2 * Driver for Marvell PPv2 network controller for Armada 375 SoC.
4 * Copyright (C) 2014 Marvell
6 * Marcin Wojtas <mw@semihalf.com>
8 * This file is licensed under the terms of the GNU General Public
9 * License version 2. This program is licensed "as is" without any
10 * warranty of any kind, whether express or implied.
13 #include <linux/kernel.h>
14 #include <linux/netdevice.h>
15 #include <linux/etherdevice.h>
16 #include <linux/platform_device.h>
17 #include <linux/skbuff.h>
18 #include <linux/inetdevice.h>
19 #include <linux/mbus.h>
20 #include <linux/module.h>
21 #include <linux/interrupt.h>
22 #include <linux/cpumask.h>
24 #include <linux/of_irq.h>
25 #include <linux/of_mdio.h>
26 #include <linux/of_net.h>
27 #include <linux/of_address.h>
28 #include <linux/of_device.h>
29 #include <linux/phy.h>
30 #include <linux/clk.h>
31 #include <linux/hrtimer.h>
32 #include <linux/ktime.h>
33 #include <uapi/linux/ppp_defs.h>
37 /* RX Fifo Registers */
38 #define MVPP2_RX_DATA_FIFO_SIZE_REG(port) (0x00 + 4 * (port))
39 #define MVPP2_RX_ATTR_FIFO_SIZE_REG(port) (0x20 + 4 * (port))
40 #define MVPP2_RX_MIN_PKT_SIZE_REG 0x60
41 #define MVPP2_RX_FIFO_INIT_REG 0x64
43 /* RX DMA Top Registers */
44 #define MVPP2_RX_CTRL_REG(port) (0x140 + 4 * (port))
45 #define MVPP2_RX_LOW_LATENCY_PKT_SIZE(s) (((s) & 0xfff) << 16)
46 #define MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK BIT(31)
47 #define MVPP2_POOL_BUF_SIZE_REG(pool) (0x180 + 4 * (pool))
48 #define MVPP2_POOL_BUF_SIZE_OFFSET 5
49 #define MVPP2_RXQ_CONFIG_REG(rxq) (0x800 + 4 * (rxq))
50 #define MVPP2_SNOOP_PKT_SIZE_MASK 0x1ff
51 #define MVPP2_SNOOP_BUF_HDR_MASK BIT(9)
52 #define MVPP2_RXQ_POOL_SHORT_OFFS 20
53 #define MVPP21_RXQ_POOL_SHORT_MASK 0x700000
54 #define MVPP22_RXQ_POOL_SHORT_MASK 0xf00000
55 #define MVPP2_RXQ_POOL_LONG_OFFS 24
56 #define MVPP21_RXQ_POOL_LONG_MASK 0x7000000
57 #define MVPP22_RXQ_POOL_LONG_MASK 0xf000000
58 #define MVPP2_RXQ_PACKET_OFFSET_OFFS 28
59 #define MVPP2_RXQ_PACKET_OFFSET_MASK 0x70000000
60 #define MVPP2_RXQ_DISABLE_MASK BIT(31)
62 /* Parser Registers */
63 #define MVPP2_PRS_INIT_LOOKUP_REG 0x1000
64 #define MVPP2_PRS_PORT_LU_MAX 0xf
65 #define MVPP2_PRS_PORT_LU_MASK(port) (0xff << ((port) * 4))
66 #define MVPP2_PRS_PORT_LU_VAL(port, val) ((val) << ((port) * 4))
67 #define MVPP2_PRS_INIT_OFFS_REG(port) (0x1004 + ((port) & 4))
68 #define MVPP2_PRS_INIT_OFF_MASK(port) (0x3f << (((port) % 4) * 8))
69 #define MVPP2_PRS_INIT_OFF_VAL(port, val) ((val) << (((port) % 4) * 8))
70 #define MVPP2_PRS_MAX_LOOP_REG(port) (0x100c + ((port) & 4))
71 #define MVPP2_PRS_MAX_LOOP_MASK(port) (0xff << (((port) % 4) * 8))
72 #define MVPP2_PRS_MAX_LOOP_VAL(port, val) ((val) << (((port) % 4) * 8))
73 #define MVPP2_PRS_TCAM_IDX_REG 0x1100
74 #define MVPP2_PRS_TCAM_DATA_REG(idx) (0x1104 + (idx) * 4)
75 #define MVPP2_PRS_TCAM_INV_MASK BIT(31)
76 #define MVPP2_PRS_SRAM_IDX_REG 0x1200
77 #define MVPP2_PRS_SRAM_DATA_REG(idx) (0x1204 + (idx) * 4)
78 #define MVPP2_PRS_TCAM_CTRL_REG 0x1230
79 #define MVPP2_PRS_TCAM_EN_MASK BIT(0)
81 /* Classifier Registers */
82 #define MVPP2_CLS_MODE_REG 0x1800
83 #define MVPP2_CLS_MODE_ACTIVE_MASK BIT(0)
84 #define MVPP2_CLS_PORT_WAY_REG 0x1810
85 #define MVPP2_CLS_PORT_WAY_MASK(port) (1 << (port))
86 #define MVPP2_CLS_LKP_INDEX_REG 0x1814
87 #define MVPP2_CLS_LKP_INDEX_WAY_OFFS 6
88 #define MVPP2_CLS_LKP_TBL_REG 0x1818
89 #define MVPP2_CLS_LKP_TBL_RXQ_MASK 0xff
90 #define MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK BIT(25)
91 #define MVPP2_CLS_FLOW_INDEX_REG 0x1820
92 #define MVPP2_CLS_FLOW_TBL0_REG 0x1824
93 #define MVPP2_CLS_FLOW_TBL1_REG 0x1828
94 #define MVPP2_CLS_FLOW_TBL2_REG 0x182c
95 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port) (0x1980 + ((port) * 4))
96 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS 3
97 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK 0x7
98 #define MVPP2_CLS_SWFWD_P2HQ_REG(port) (0x19b0 + ((port) * 4))
99 #define MVPP2_CLS_SWFWD_PCTRL_REG 0x19d0
100 #define MVPP2_CLS_SWFWD_PCTRL_MASK(port) (1 << (port))
102 /* Descriptor Manager Top Registers */
103 #define MVPP2_RXQ_NUM_REG 0x2040
104 #define MVPP2_RXQ_DESC_ADDR_REG 0x2044
105 #define MVPP22_DESC_ADDR_OFFS 8
106 #define MVPP2_RXQ_DESC_SIZE_REG 0x2048
107 #define MVPP2_RXQ_DESC_SIZE_MASK 0x3ff0
108 #define MVPP2_RXQ_STATUS_UPDATE_REG(rxq) (0x3000 + 4 * (rxq))
109 #define MVPP2_RXQ_NUM_PROCESSED_OFFSET 0
110 #define MVPP2_RXQ_NUM_NEW_OFFSET 16
111 #define MVPP2_RXQ_STATUS_REG(rxq) (0x3400 + 4 * (rxq))
112 #define MVPP2_RXQ_OCCUPIED_MASK 0x3fff
113 #define MVPP2_RXQ_NON_OCCUPIED_OFFSET 16
114 #define MVPP2_RXQ_NON_OCCUPIED_MASK 0x3fff0000
115 #define MVPP2_RXQ_THRESH_REG 0x204c
116 #define MVPP2_OCCUPIED_THRESH_OFFSET 0
117 #define MVPP2_OCCUPIED_THRESH_MASK 0x3fff
118 #define MVPP2_RXQ_INDEX_REG 0x2050
119 #define MVPP2_TXQ_NUM_REG 0x2080
120 #define MVPP2_TXQ_DESC_ADDR_REG 0x2084
121 #define MVPP2_TXQ_DESC_SIZE_REG 0x2088
122 #define MVPP2_TXQ_DESC_SIZE_MASK 0x3ff0
123 #define MVPP2_AGGR_TXQ_UPDATE_REG 0x2090
124 #define MVPP2_TXQ_INDEX_REG 0x2098
125 #define MVPP2_TXQ_PREF_BUF_REG 0x209c
126 #define MVPP2_PREF_BUF_PTR(desc) ((desc) & 0xfff)
127 #define MVPP2_PREF_BUF_SIZE_4 (BIT(12) | BIT(13))
128 #define MVPP2_PREF_BUF_SIZE_16 (BIT(12) | BIT(14))
129 #define MVPP2_PREF_BUF_THRESH(val) ((val) << 17)
130 #define MVPP2_TXQ_DRAIN_EN_MASK BIT(31)
131 #define MVPP2_TXQ_PENDING_REG 0x20a0
132 #define MVPP2_TXQ_PENDING_MASK 0x3fff
133 #define MVPP2_TXQ_INT_STATUS_REG 0x20a4
134 #define MVPP2_TXQ_SENT_REG(txq) (0x3c00 + 4 * (txq))
135 #define MVPP2_TRANSMITTED_COUNT_OFFSET 16
136 #define MVPP2_TRANSMITTED_COUNT_MASK 0x3fff0000
137 #define MVPP2_TXQ_RSVD_REQ_REG 0x20b0
138 #define MVPP2_TXQ_RSVD_REQ_Q_OFFSET 16
139 #define MVPP2_TXQ_RSVD_RSLT_REG 0x20b4
140 #define MVPP2_TXQ_RSVD_RSLT_MASK 0x3fff
141 #define MVPP2_TXQ_RSVD_CLR_REG 0x20b8
142 #define MVPP2_TXQ_RSVD_CLR_OFFSET 16
143 #define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu) (0x2100 + 4 * (cpu))
144 #define MVPP22_AGGR_TXQ_DESC_ADDR_OFFS 8
145 #define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu) (0x2140 + 4 * (cpu))
146 #define MVPP2_AGGR_TXQ_DESC_SIZE_MASK 0x3ff0
147 #define MVPP2_AGGR_TXQ_STATUS_REG(cpu) (0x2180 + 4 * (cpu))
148 #define MVPP2_AGGR_TXQ_PENDING_MASK 0x3fff
149 #define MVPP2_AGGR_TXQ_INDEX_REG(cpu) (0x21c0 + 4 * (cpu))
151 /* MBUS bridge registers */
152 #define MVPP2_WIN_BASE(w) (0x4000 + ((w) << 2))
153 #define MVPP2_WIN_SIZE(w) (0x4020 + ((w) << 2))
154 #define MVPP2_WIN_REMAP(w) (0x4040 + ((w) << 2))
155 #define MVPP2_BASE_ADDR_ENABLE 0x4060
157 /* AXI Bridge Registers */
158 #define MVPP22_AXI_BM_WR_ATTR_REG 0x4100
159 #define MVPP22_AXI_BM_RD_ATTR_REG 0x4104
160 #define MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG 0x4110
161 #define MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG 0x4114
162 #define MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG 0x4118
163 #define MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG 0x411c
164 #define MVPP22_AXI_RX_DATA_WR_ATTR_REG 0x4120
165 #define MVPP22_AXI_TX_DATA_RD_ATTR_REG 0x4130
166 #define MVPP22_AXI_RD_NORMAL_CODE_REG 0x4150
167 #define MVPP22_AXI_RD_SNOOP_CODE_REG 0x4154
168 #define MVPP22_AXI_WR_NORMAL_CODE_REG 0x4160
169 #define MVPP22_AXI_WR_SNOOP_CODE_REG 0x4164
171 /* Values for AXI Bridge registers */
172 #define MVPP22_AXI_ATTR_CACHE_OFFS 0
173 #define MVPP22_AXI_ATTR_DOMAIN_OFFS 12
175 #define MVPP22_AXI_CODE_CACHE_OFFS 0
176 #define MVPP22_AXI_CODE_DOMAIN_OFFS 4
178 #define MVPP22_AXI_CODE_CACHE_NON_CACHE 0x3
179 #define MVPP22_AXI_CODE_CACHE_WR_CACHE 0x7
180 #define MVPP22_AXI_CODE_CACHE_RD_CACHE 0xb
182 #define MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 2
183 #define MVPP22_AXI_CODE_DOMAIN_SYSTEM 3
185 /* Interrupt Cause and Mask registers */
186 #define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq))
187 #define MVPP2_MAX_ISR_RX_THRESHOLD 0xfffff0
188 #define MVPP21_ISR_RXQ_GROUP_REG(rxq) (0x5400 + 4 * (rxq))
190 #define MVPP22_ISR_RXQ_GROUP_INDEX_REG 0x5400
191 #define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf
192 #define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380
193 #define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET 7
195 #define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf
196 #define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380
198 #define MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG 0x5404
199 #define MVPP22_ISR_RXQ_SUB_GROUP_STARTQ_MASK 0x1f
200 #define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_MASK 0xf00
201 #define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET 8
203 #define MVPP2_ISR_ENABLE_REG(port) (0x5420 + 4 * (port))
204 #define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff)
205 #define MVPP2_ISR_DISABLE_INTERRUPT(mask) (((mask) << 16) & 0xffff0000)
206 #define MVPP2_ISR_RX_TX_CAUSE_REG(port) (0x5480 + 4 * (port))
207 #define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
208 #define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK 0xff0000
209 #define MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK BIT(24)
210 #define MVPP2_CAUSE_FCS_ERR_MASK BIT(25)
211 #define MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK BIT(26)
212 #define MVPP2_CAUSE_TX_EXCEPTION_SUM_MASK BIT(29)
213 #define MVPP2_CAUSE_RX_EXCEPTION_SUM_MASK BIT(30)
214 #define MVPP2_CAUSE_MISC_SUM_MASK BIT(31)
215 #define MVPP2_ISR_RX_TX_MASK_REG(port) (0x54a0 + 4 * (port))
216 #define MVPP2_ISR_PON_RX_TX_MASK_REG 0x54bc
217 #define MVPP2_PON_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
218 #define MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK 0x3fc00000
219 #define MVPP2_PON_CAUSE_MISC_SUM_MASK BIT(31)
220 #define MVPP2_ISR_MISC_CAUSE_REG 0x55b0
222 /* Buffer Manager registers */
223 #define MVPP2_BM_POOL_BASE_REG(pool) (0x6000 + ((pool) * 4))
224 #define MVPP2_BM_POOL_BASE_ADDR_MASK 0xfffff80
225 #define MVPP2_BM_POOL_SIZE_REG(pool) (0x6040 + ((pool) * 4))
226 #define MVPP2_BM_POOL_SIZE_MASK 0xfff0
227 #define MVPP2_BM_POOL_READ_PTR_REG(pool) (0x6080 + ((pool) * 4))
228 #define MVPP2_BM_POOL_GET_READ_PTR_MASK 0xfff0
229 #define MVPP2_BM_POOL_PTRS_NUM_REG(pool) (0x60c0 + ((pool) * 4))
230 #define MVPP2_BM_POOL_PTRS_NUM_MASK 0xfff0
231 #define MVPP2_BM_BPPI_READ_PTR_REG(pool) (0x6100 + ((pool) * 4))
232 #define MVPP2_BM_BPPI_PTRS_NUM_REG(pool) (0x6140 + ((pool) * 4))
233 #define MVPP2_BM_BPPI_PTR_NUM_MASK 0x7ff
234 #define MVPP2_BM_BPPI_PREFETCH_FULL_MASK BIT(16)
235 #define MVPP2_BM_POOL_CTRL_REG(pool) (0x6200 + ((pool) * 4))
236 #define MVPP2_BM_START_MASK BIT(0)
237 #define MVPP2_BM_STOP_MASK BIT(1)
238 #define MVPP2_BM_STATE_MASK BIT(4)
239 #define MVPP2_BM_LOW_THRESH_OFFS 8
240 #define MVPP2_BM_LOW_THRESH_MASK 0x7f00
241 #define MVPP2_BM_LOW_THRESH_VALUE(val) ((val) << \
242 MVPP2_BM_LOW_THRESH_OFFS)
243 #define MVPP2_BM_HIGH_THRESH_OFFS 16
244 #define MVPP2_BM_HIGH_THRESH_MASK 0x7f0000
245 #define MVPP2_BM_HIGH_THRESH_VALUE(val) ((val) << \
246 MVPP2_BM_HIGH_THRESH_OFFS)
247 #define MVPP2_BM_INTR_CAUSE_REG(pool) (0x6240 + ((pool) * 4))
248 #define MVPP2_BM_RELEASED_DELAY_MASK BIT(0)
249 #define MVPP2_BM_ALLOC_FAILED_MASK BIT(1)
250 #define MVPP2_BM_BPPE_EMPTY_MASK BIT(2)
251 #define MVPP2_BM_BPPE_FULL_MASK BIT(3)
252 #define MVPP2_BM_AVAILABLE_BP_LOW_MASK BIT(4)
253 #define MVPP2_BM_INTR_MASK_REG(pool) (0x6280 + ((pool) * 4))
254 #define MVPP2_BM_PHY_ALLOC_REG(pool) (0x6400 + ((pool) * 4))
255 #define MVPP2_BM_PHY_ALLOC_GRNTD_MASK BIT(0)
256 #define MVPP2_BM_VIRT_ALLOC_REG 0x6440
257 #define MVPP22_BM_ADDR_HIGH_ALLOC 0x6444
258 #define MVPP22_BM_ADDR_HIGH_PHYS_MASK 0xff
259 #define MVPP22_BM_ADDR_HIGH_VIRT_MASK 0xff00
260 #define MVPP22_BM_ADDR_HIGH_VIRT_SHIFT 8
261 #define MVPP2_BM_PHY_RLS_REG(pool) (0x6480 + ((pool) * 4))
262 #define MVPP2_BM_PHY_RLS_MC_BUFF_MASK BIT(0)
263 #define MVPP2_BM_PHY_RLS_PRIO_EN_MASK BIT(1)
264 #define MVPP2_BM_PHY_RLS_GRNTD_MASK BIT(2)
265 #define MVPP2_BM_VIRT_RLS_REG 0x64c0
266 #define MVPP22_BM_ADDR_HIGH_RLS_REG 0x64c4
267 #define MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK 0xff
268 #define MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK 0xff00
269 #define MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT 8
271 /* TX Scheduler registers */
272 #define MVPP2_TXP_SCHED_PORT_INDEX_REG 0x8000
273 #define MVPP2_TXP_SCHED_Q_CMD_REG 0x8004
274 #define MVPP2_TXP_SCHED_ENQ_MASK 0xff
275 #define MVPP2_TXP_SCHED_DISQ_OFFSET 8
276 #define MVPP2_TXP_SCHED_CMD_1_REG 0x8010
277 #define MVPP2_TXP_SCHED_PERIOD_REG 0x8018
278 #define MVPP2_TXP_SCHED_MTU_REG 0x801c
279 #define MVPP2_TXP_MTU_MAX 0x7FFFF
280 #define MVPP2_TXP_SCHED_REFILL_REG 0x8020
281 #define MVPP2_TXP_REFILL_TOKENS_ALL_MASK 0x7ffff
282 #define MVPP2_TXP_REFILL_PERIOD_ALL_MASK 0x3ff00000
283 #define MVPP2_TXP_REFILL_PERIOD_MASK(v) ((v) << 20)
284 #define MVPP2_TXP_SCHED_TOKEN_SIZE_REG 0x8024
285 #define MVPP2_TXP_TOKEN_SIZE_MAX 0xffffffff
286 #define MVPP2_TXQ_SCHED_REFILL_REG(q) (0x8040 + ((q) << 2))
287 #define MVPP2_TXQ_REFILL_TOKENS_ALL_MASK 0x7ffff
288 #define MVPP2_TXQ_REFILL_PERIOD_ALL_MASK 0x3ff00000
289 #define MVPP2_TXQ_REFILL_PERIOD_MASK(v) ((v) << 20)
290 #define MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(q) (0x8060 + ((q) << 2))
291 #define MVPP2_TXQ_TOKEN_SIZE_MAX 0x7fffffff
292 #define MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(q) (0x8080 + ((q) << 2))
293 #define MVPP2_TXQ_TOKEN_CNTR_MAX 0xffffffff
295 /* TX general registers */
296 #define MVPP2_TX_SNOOP_REG 0x8800
297 #define MVPP2_TX_PORT_FLUSH_REG 0x8810
298 #define MVPP2_TX_PORT_FLUSH_MASK(port) (1 << (port))
301 #define MVPP2_SRC_ADDR_MIDDLE 0x24
302 #define MVPP2_SRC_ADDR_HIGH 0x28
303 #define MVPP2_PHY_AN_CFG0_REG 0x34
304 #define MVPP2_PHY_AN_STOP_SMI0_MASK BIT(7)
305 #define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG 0x305c
306 #define MVPP2_EXT_GLOBAL_CTRL_DEFAULT 0x27
308 /* Per-port registers */
309 #define MVPP2_GMAC_CTRL_0_REG 0x0
310 #define MVPP2_GMAC_PORT_EN_MASK BIT(0)
311 #define MVPP2_GMAC_MAX_RX_SIZE_OFFS 2
312 #define MVPP2_GMAC_MAX_RX_SIZE_MASK 0x7ffc
313 #define MVPP2_GMAC_MIB_CNTR_EN_MASK BIT(15)
314 #define MVPP2_GMAC_CTRL_1_REG 0x4
315 #define MVPP2_GMAC_PERIODIC_XON_EN_MASK BIT(1)
316 #define MVPP2_GMAC_GMII_LB_EN_MASK BIT(5)
317 #define MVPP2_GMAC_PCS_LB_EN_BIT 6
318 #define MVPP2_GMAC_PCS_LB_EN_MASK BIT(6)
319 #define MVPP2_GMAC_SA_LOW_OFFS 7
320 #define MVPP2_GMAC_CTRL_2_REG 0x8
321 #define MVPP2_GMAC_INBAND_AN_MASK BIT(0)
322 #define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3)
323 #define MVPP2_GMAC_PORT_RGMII_MASK BIT(4)
324 #define MVPP2_GMAC_PORT_RESET_MASK BIT(6)
325 #define MVPP2_GMAC_AUTONEG_CONFIG 0xc
326 #define MVPP2_GMAC_FORCE_LINK_DOWN BIT(0)
327 #define MVPP2_GMAC_FORCE_LINK_PASS BIT(1)
328 #define MVPP2_GMAC_CONFIG_MII_SPEED BIT(5)
329 #define MVPP2_GMAC_CONFIG_GMII_SPEED BIT(6)
330 #define MVPP2_GMAC_AN_SPEED_EN BIT(7)
331 #define MVPP2_GMAC_FC_ADV_EN BIT(9)
332 #define MVPP2_GMAC_CONFIG_FULL_DUPLEX BIT(12)
333 #define MVPP2_GMAC_AN_DUPLEX_EN BIT(13)
334 #define MVPP2_GMAC_PORT_FIFO_CFG_1_REG 0x1c
335 #define MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS 6
336 #define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0
337 #define MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \
338 MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK)
339 #define MVPP22_GMAC_CTRL_4_REG 0x90
340 #define MVPP22_CTRL4_EXT_PIN_GMII_SEL BIT(0)
341 #define MVPP22_CTRL4_DP_CLK_SEL BIT(5)
342 #define MVPP22_CTRL4_SYNC_BYPASS BIT(6)
343 #define MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE BIT(7)
345 /* Per-port XGMAC registers. PPv2.2 only, only for GOP port 0,
346 * relative to port->base.
348 #define MVPP22_XLG_CTRL0_REG 0x100
349 #define MVPP22_XLG_CTRL0_PORT_EN BIT(0)
350 #define MVPP22_XLG_CTRL0_MAC_RESET_DIS BIT(1)
351 #define MVPP22_XLG_CTRL0_MIB_CNT_DIS BIT(14)
353 #define MVPP22_XLG_CTRL3_REG 0x11c
354 #define MVPP22_XLG_CTRL3_MACMODESELECT_MASK (7 << 13)
355 #define MVPP22_XLG_CTRL3_MACMODESELECT_GMAC (0 << 13)
356 #define MVPP22_XLG_CTRL3_MACMODESELECT_10G (1 << 13)
358 /* SMI registers. PPv2.2 only, relative to priv->iface_base. */
359 #define MVPP22_SMI_MISC_CFG_REG 0x1204
360 #define MVPP22_SMI_POLLING_EN BIT(10)
362 #define MVPP22_GMAC_BASE(port) (0x7000 + (port) * 0x1000 + 0xe00)
364 #define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
366 /* Descriptor ring Macros */
367 #define MVPP2_QUEUE_NEXT_DESC(q, index) \
368 (((index) < (q)->last_desc) ? ((index) + 1) : 0)
370 /* Various constants */
373 #define MVPP2_TXDONE_COAL_PKTS_THRESH 15
374 #define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL
375 #define MVPP2_RX_COAL_PKTS 32
376 #define MVPP2_RX_COAL_USEC 100
378 /* The two bytes Marvell header. Either contains a special value used
379 * by Marvell switches when a specific hardware mode is enabled (not
380 * supported by this driver) or is filled automatically by zeroes on
381 * the RX side. Those two bytes being at the front of the Ethernet
382 * header, they allow to have the IP header aligned on a 4 bytes
383 * boundary automatically: the hardware skips those two bytes on its
386 #define MVPP2_MH_SIZE 2
387 #define MVPP2_ETH_TYPE_LEN 2
388 #define MVPP2_PPPOE_HDR_SIZE 8
389 #define MVPP2_VLAN_TAG_LEN 4
391 /* Lbtd 802.3 type */
392 #define MVPP2_IP_LBDT_TYPE 0xfffa
394 #define MVPP2_TX_CSUM_MAX_SIZE 9800
396 /* Timeout constants */
397 #define MVPP2_TX_DISABLE_TIMEOUT_MSEC 1000
398 #define MVPP2_TX_PENDING_TIMEOUT_MSEC 1000
400 #define MVPP2_TX_MTU_MAX 0x7ffff
402 /* Maximum number of T-CONTs of PON port */
403 #define MVPP2_MAX_TCONT 16
405 /* Maximum number of supported ports */
406 #define MVPP2_MAX_PORTS 4
408 /* Maximum number of TXQs used by single port */
409 #define MVPP2_MAX_TXQ 8
411 /* Dfault number of RXQs in use */
412 #define MVPP2_DEFAULT_RXQ 4
414 /* Max number of Rx descriptors */
415 #define MVPP2_MAX_RXD 128
417 /* Max number of Tx descriptors */
418 #define MVPP2_MAX_TXD 1024
420 /* Amount of Tx descriptors that can be reserved at once by CPU */
421 #define MVPP2_CPU_DESC_CHUNK 64
423 /* Max number of Tx descriptors in each aggregated queue */
424 #define MVPP2_AGGR_TXQ_SIZE 256
426 /* Descriptor aligned size */
427 #define MVPP2_DESC_ALIGNED_SIZE 32
429 /* Descriptor alignment mask */
430 #define MVPP2_TX_DESC_ALIGN (MVPP2_DESC_ALIGNED_SIZE - 1)
432 /* RX FIFO constants */
433 #define MVPP2_RX_FIFO_PORT_DATA_SIZE 0x2000
434 #define MVPP2_RX_FIFO_PORT_ATTR_SIZE 0x80
435 #define MVPP2_RX_FIFO_PORT_MIN_PKT 0x80
437 /* RX buffer constants */
438 #define MVPP2_SKB_SHINFO_SIZE \
439 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
441 #define MVPP2_RX_PKT_SIZE(mtu) \
442 ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \
443 ETH_HLEN + ETH_FCS_LEN, cache_line_size())
445 #define MVPP2_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
446 #define MVPP2_RX_TOTAL_SIZE(buf_size) ((buf_size) + MVPP2_SKB_SHINFO_SIZE)
447 #define MVPP2_RX_MAX_PKT_SIZE(total_size) \
448 ((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE)
450 #define MVPP2_BIT_TO_BYTE(bit) ((bit) / 8)
452 /* IPv6 max L3 address size */
453 #define MVPP2_MAX_L3_ADDR_SIZE 16
456 #define MVPP2_F_LOOPBACK BIT(0)
458 /* Marvell tag types */
459 enum mvpp2_tag_type {
460 MVPP2_TAG_TYPE_NONE = 0,
461 MVPP2_TAG_TYPE_MH = 1,
462 MVPP2_TAG_TYPE_DSA = 2,
463 MVPP2_TAG_TYPE_EDSA = 3,
464 MVPP2_TAG_TYPE_VLAN = 4,
465 MVPP2_TAG_TYPE_LAST = 5
468 /* Parser constants */
469 #define MVPP2_PRS_TCAM_SRAM_SIZE 256
470 #define MVPP2_PRS_TCAM_WORDS 6
471 #define MVPP2_PRS_SRAM_WORDS 4
472 #define MVPP2_PRS_FLOW_ID_SIZE 64
473 #define MVPP2_PRS_FLOW_ID_MASK 0x3f
474 #define MVPP2_PRS_TCAM_ENTRY_INVALID 1
475 #define MVPP2_PRS_TCAM_DSA_TAGGED_BIT BIT(5)
476 #define MVPP2_PRS_IPV4_HEAD 0x40
477 #define MVPP2_PRS_IPV4_HEAD_MASK 0xf0
478 #define MVPP2_PRS_IPV4_MC 0xe0
479 #define MVPP2_PRS_IPV4_MC_MASK 0xf0
480 #define MVPP2_PRS_IPV4_BC_MASK 0xff
481 #define MVPP2_PRS_IPV4_IHL 0x5
482 #define MVPP2_PRS_IPV4_IHL_MASK 0xf
483 #define MVPP2_PRS_IPV6_MC 0xff
484 #define MVPP2_PRS_IPV6_MC_MASK 0xff
485 #define MVPP2_PRS_IPV6_HOP_MASK 0xff
486 #define MVPP2_PRS_TCAM_PROTO_MASK 0xff
487 #define MVPP2_PRS_TCAM_PROTO_MASK_L 0x3f
488 #define MVPP2_PRS_DBL_VLANS_MAX 100
491 * - lookup ID - 4 bits
493 * - additional information - 1 byte
494 * - header data - 8 bytes
495 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0).
497 #define MVPP2_PRS_AI_BITS 8
498 #define MVPP2_PRS_PORT_MASK 0xff
499 #define MVPP2_PRS_LU_MASK 0xf
500 #define MVPP2_PRS_TCAM_DATA_BYTE(offs) \
501 (((offs) - ((offs) % 2)) * 2 + ((offs) % 2))
502 #define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs) \
503 (((offs) * 2) - ((offs) % 2) + 2)
504 #define MVPP2_PRS_TCAM_AI_BYTE 16
505 #define MVPP2_PRS_TCAM_PORT_BYTE 17
506 #define MVPP2_PRS_TCAM_LU_BYTE 20
507 #define MVPP2_PRS_TCAM_EN_OFFS(offs) ((offs) + 2)
508 #define MVPP2_PRS_TCAM_INV_WORD 5
509 /* Tcam entries ID */
510 #define MVPP2_PE_DROP_ALL 0
511 #define MVPP2_PE_FIRST_FREE_TID 1
512 #define MVPP2_PE_LAST_FREE_TID (MVPP2_PRS_TCAM_SRAM_SIZE - 31)
513 #define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30)
514 #define MVPP2_PE_MAC_MC_IP6 (MVPP2_PRS_TCAM_SRAM_SIZE - 29)
515 #define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28)
516 #define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 27)
517 #define MVPP2_PE_LAST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 26)
518 #define MVPP2_PE_FIRST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 19)
519 #define MVPP2_PE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 18)
520 #define MVPP2_PE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 17)
521 #define MVPP2_PE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 16)
522 #define MVPP2_PE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 15)
523 #define MVPP2_PE_ETYPE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 14)
524 #define MVPP2_PE_ETYPE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 13)
525 #define MVPP2_PE_ETYPE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 12)
526 #define MVPP2_PE_ETYPE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 11)
527 #define MVPP2_PE_MH_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 10)
528 #define MVPP2_PE_DSA_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 9)
529 #define MVPP2_PE_IP6_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 8)
530 #define MVPP2_PE_IP4_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 7)
531 #define MVPP2_PE_ETH_TYPE_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 6)
532 #define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 5)
533 #define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 4)
534 #define MVPP2_PE_MAC_MC_ALL (MVPP2_PRS_TCAM_SRAM_SIZE - 3)
535 #define MVPP2_PE_MAC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2)
536 #define MVPP2_PE_MAC_NON_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 1)
539 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(3)->(0).
541 #define MVPP2_PRS_SRAM_RI_OFFS 0
542 #define MVPP2_PRS_SRAM_RI_WORD 0
543 #define MVPP2_PRS_SRAM_RI_CTRL_OFFS 32
544 #define MVPP2_PRS_SRAM_RI_CTRL_WORD 1
545 #define MVPP2_PRS_SRAM_RI_CTRL_BITS 32
546 #define MVPP2_PRS_SRAM_SHIFT_OFFS 64
547 #define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT 72
548 #define MVPP2_PRS_SRAM_UDF_OFFS 73
549 #define MVPP2_PRS_SRAM_UDF_BITS 8
550 #define MVPP2_PRS_SRAM_UDF_MASK 0xff
551 #define MVPP2_PRS_SRAM_UDF_SIGN_BIT 81
552 #define MVPP2_PRS_SRAM_UDF_TYPE_OFFS 82
553 #define MVPP2_PRS_SRAM_UDF_TYPE_MASK 0x7
554 #define MVPP2_PRS_SRAM_UDF_TYPE_L3 1
555 #define MVPP2_PRS_SRAM_UDF_TYPE_L4 4
556 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS 85
557 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK 0x3
558 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD 1
559 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP4_ADD 2
560 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP6_ADD 3
561 #define MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS 87
562 #define MVPP2_PRS_SRAM_OP_SEL_UDF_BITS 2
563 #define MVPP2_PRS_SRAM_OP_SEL_UDF_MASK 0x3
564 #define MVPP2_PRS_SRAM_OP_SEL_UDF_ADD 0
565 #define MVPP2_PRS_SRAM_OP_SEL_UDF_IP4_ADD 2
566 #define MVPP2_PRS_SRAM_OP_SEL_UDF_IP6_ADD 3
567 #define MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS 89
568 #define MVPP2_PRS_SRAM_AI_OFFS 90
569 #define MVPP2_PRS_SRAM_AI_CTRL_OFFS 98
570 #define MVPP2_PRS_SRAM_AI_CTRL_BITS 8
571 #define MVPP2_PRS_SRAM_AI_MASK 0xff
572 #define MVPP2_PRS_SRAM_NEXT_LU_OFFS 106
573 #define MVPP2_PRS_SRAM_NEXT_LU_MASK 0xf
574 #define MVPP2_PRS_SRAM_LU_DONE_BIT 110
575 #define MVPP2_PRS_SRAM_LU_GEN_BIT 111
577 /* Sram result info bits assignment */
578 #define MVPP2_PRS_RI_MAC_ME_MASK 0x1
579 #define MVPP2_PRS_RI_DSA_MASK 0x2
580 #define MVPP2_PRS_RI_VLAN_MASK (BIT(2) | BIT(3))
581 #define MVPP2_PRS_RI_VLAN_NONE 0x0
582 #define MVPP2_PRS_RI_VLAN_SINGLE BIT(2)
583 #define MVPP2_PRS_RI_VLAN_DOUBLE BIT(3)
584 #define MVPP2_PRS_RI_VLAN_TRIPLE (BIT(2) | BIT(3))
585 #define MVPP2_PRS_RI_CPU_CODE_MASK 0x70
586 #define MVPP2_PRS_RI_CPU_CODE_RX_SPEC BIT(4)
587 #define MVPP2_PRS_RI_L2_CAST_MASK (BIT(9) | BIT(10))
588 #define MVPP2_PRS_RI_L2_UCAST 0x0
589 #define MVPP2_PRS_RI_L2_MCAST BIT(9)
590 #define MVPP2_PRS_RI_L2_BCAST BIT(10)
591 #define MVPP2_PRS_RI_PPPOE_MASK 0x800
592 #define MVPP2_PRS_RI_L3_PROTO_MASK (BIT(12) | BIT(13) | BIT(14))
593 #define MVPP2_PRS_RI_L3_UN 0x0
594 #define MVPP2_PRS_RI_L3_IP4 BIT(12)
595 #define MVPP2_PRS_RI_L3_IP4_OPT BIT(13)
596 #define MVPP2_PRS_RI_L3_IP4_OTHER (BIT(12) | BIT(13))
597 #define MVPP2_PRS_RI_L3_IP6 BIT(14)
598 #define MVPP2_PRS_RI_L3_IP6_EXT (BIT(12) | BIT(14))
599 #define MVPP2_PRS_RI_L3_ARP (BIT(13) | BIT(14))
600 #define MVPP2_PRS_RI_L3_ADDR_MASK (BIT(15) | BIT(16))
601 #define MVPP2_PRS_RI_L3_UCAST 0x0
602 #define MVPP2_PRS_RI_L3_MCAST BIT(15)
603 #define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16))
604 #define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000
605 #define MVPP2_PRS_RI_UDF3_MASK 0x300000
606 #define MVPP2_PRS_RI_UDF3_RX_SPECIAL BIT(21)
607 #define MVPP2_PRS_RI_L4_PROTO_MASK 0x1c00000
608 #define MVPP2_PRS_RI_L4_TCP BIT(22)
609 #define MVPP2_PRS_RI_L4_UDP BIT(23)
610 #define MVPP2_PRS_RI_L4_OTHER (BIT(22) | BIT(23))
611 #define MVPP2_PRS_RI_UDF7_MASK 0x60000000
612 #define MVPP2_PRS_RI_UDF7_IP6_LITE BIT(29)
613 #define MVPP2_PRS_RI_DROP_MASK 0x80000000
615 /* Sram additional info bits assignment */
616 #define MVPP2_PRS_IPV4_DIP_AI_BIT BIT(0)
617 #define MVPP2_PRS_IPV6_NO_EXT_AI_BIT BIT(0)
618 #define MVPP2_PRS_IPV6_EXT_AI_BIT BIT(1)
619 #define MVPP2_PRS_IPV6_EXT_AH_AI_BIT BIT(2)
620 #define MVPP2_PRS_IPV6_EXT_AH_LEN_AI_BIT BIT(3)
621 #define MVPP2_PRS_IPV6_EXT_AH_L4_AI_BIT BIT(4)
622 #define MVPP2_PRS_SINGLE_VLAN_AI 0
623 #define MVPP2_PRS_DBL_VLAN_AI_BIT BIT(7)
626 #define MVPP2_PRS_TAGGED true
627 #define MVPP2_PRS_UNTAGGED false
628 #define MVPP2_PRS_EDSA true
629 #define MVPP2_PRS_DSA false
631 /* MAC entries, shadow udf */
633 MVPP2_PRS_UDF_MAC_DEF,
634 MVPP2_PRS_UDF_MAC_RANGE,
635 MVPP2_PRS_UDF_L2_DEF,
636 MVPP2_PRS_UDF_L2_DEF_COPY,
637 MVPP2_PRS_UDF_L2_USER,
641 enum mvpp2_prs_lookup {
655 enum mvpp2_prs_l3_cast {
656 MVPP2_PRS_L3_UNI_CAST,
657 MVPP2_PRS_L3_MULTI_CAST,
658 MVPP2_PRS_L3_BROAD_CAST
661 /* Classifier constants */
662 #define MVPP2_CLS_FLOWS_TBL_SIZE 512
663 #define MVPP2_CLS_FLOWS_TBL_DATA_WORDS 3
664 #define MVPP2_CLS_LKP_TBL_SIZE 64
667 #define MVPP2_BM_POOLS_NUM 8
668 #define MVPP2_BM_LONG_BUF_NUM 1024
669 #define MVPP2_BM_SHORT_BUF_NUM 2048
670 #define MVPP2_BM_POOL_SIZE_MAX (16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4)
671 #define MVPP2_BM_POOL_PTR_ALIGN 128
672 #define MVPP2_BM_SWF_LONG_POOL(port) ((port > 2) ? 2 : port)
673 #define MVPP2_BM_SWF_SHORT_POOL 3
675 /* BM cookie (32 bits) definition */
676 #define MVPP2_BM_COOKIE_POOL_OFFS 8
677 #define MVPP2_BM_COOKIE_CPU_OFFS 24
679 /* BM short pool packet size
680 * These value assure that for SWF the total number
681 * of bytes allocated for each buffer will be 512
683 #define MVPP2_BM_SHORT_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(512)
685 #define MVPP21_ADDR_SPACE_SZ 0
686 #define MVPP22_ADDR_SPACE_SZ SZ_64K
688 #define MVPP2_MAX_CPUS 4
698 /* Shared Packet Processor resources */
700 /* Shared registers' base addresses */
701 void __iomem *lms_base;
702 void __iomem *iface_base;
704 /* On PPv2.2, each CPU can access the base register through a
705 * separate address space, each 64 KB apart from each
708 void __iomem *cpu_base[MVPP2_MAX_CPUS];
715 /* List of pointers to port structures */
716 struct mvpp2_port **port_list;
718 /* Aggregated TXQs */
719 struct mvpp2_tx_queue *aggr_txqs;
722 struct mvpp2_bm_pool *bm_pools;
724 /* PRS shadow table */
725 struct mvpp2_prs_shadow *prs_shadow;
726 /* PRS auxiliary table for double vlan entries control */
727 bool *prs_double_vlans;
733 enum { MVPP21, MVPP22 } hw_version;
735 /* Maximum number of RXQs per port */
736 unsigned int max_port_rxqs;
739 struct mvpp2_pcpu_stats {
740 struct u64_stats_sync syncp;
747 /* Per-CPU port control */
748 struct mvpp2_port_pcpu {
749 struct hrtimer tx_done_timer;
750 bool timer_scheduled;
751 /* Tasklet for egress finalization */
752 struct tasklet_struct tx_done_tasklet;
758 /* Index of the port from the "group of ports" complex point
767 /* Per-port registers' base address */
770 struct mvpp2_rx_queue **rxqs;
771 struct mvpp2_tx_queue **txqs;
772 struct net_device *dev;
776 u32 pending_cause_rx;
777 struct napi_struct napi;
779 /* Per-CPU port control */
780 struct mvpp2_port_pcpu __percpu *pcpu;
787 struct mvpp2_pcpu_stats __percpu *stats;
789 phy_interface_t phy_interface;
790 struct device_node *phy_node;
795 struct mvpp2_bm_pool *pool_long;
796 struct mvpp2_bm_pool *pool_short;
798 /* Index of first port's physical RXQ */
802 /* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the
803 * layout of the transmit and reception DMA descriptors, and their
804 * layout is therefore defined by the hardware design
807 #define MVPP2_TXD_L3_OFF_SHIFT 0
808 #define MVPP2_TXD_IP_HLEN_SHIFT 8
809 #define MVPP2_TXD_L4_CSUM_FRAG BIT(13)
810 #define MVPP2_TXD_L4_CSUM_NOT BIT(14)
811 #define MVPP2_TXD_IP_CSUM_DISABLE BIT(15)
812 #define MVPP2_TXD_PADDING_DISABLE BIT(23)
813 #define MVPP2_TXD_L4_UDP BIT(24)
814 #define MVPP2_TXD_L3_IP6 BIT(26)
815 #define MVPP2_TXD_L_DESC BIT(28)
816 #define MVPP2_TXD_F_DESC BIT(29)
818 #define MVPP2_RXD_ERR_SUMMARY BIT(15)
819 #define MVPP2_RXD_ERR_CODE_MASK (BIT(13) | BIT(14))
820 #define MVPP2_RXD_ERR_CRC 0x0
821 #define MVPP2_RXD_ERR_OVERRUN BIT(13)
822 #define MVPP2_RXD_ERR_RESOURCE (BIT(13) | BIT(14))
823 #define MVPP2_RXD_BM_POOL_ID_OFFS 16
824 #define MVPP2_RXD_BM_POOL_ID_MASK (BIT(16) | BIT(17) | BIT(18))
825 #define MVPP2_RXD_HWF_SYNC BIT(21)
826 #define MVPP2_RXD_L4_CSUM_OK BIT(22)
827 #define MVPP2_RXD_IP4_HEADER_ERR BIT(24)
828 #define MVPP2_RXD_L4_TCP BIT(25)
829 #define MVPP2_RXD_L4_UDP BIT(26)
830 #define MVPP2_RXD_L3_IP4 BIT(28)
831 #define MVPP2_RXD_L3_IP6 BIT(30)
832 #define MVPP2_RXD_BUF_HDR BIT(31)
834 /* HW TX descriptor for PPv2.1 */
835 struct mvpp21_tx_desc {
836 u32 command; /* Options used by HW for packet transmitting.*/
837 u8 packet_offset; /* the offset from the buffer beginning */
838 u8 phys_txq; /* destination queue ID */
839 u16 data_size; /* data size of transmitted packet in bytes */
840 u32 buf_dma_addr; /* physical addr of transmitted buffer */
841 u32 buf_cookie; /* cookie for access to TX buffer in tx path */
842 u32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */
843 u32 reserved2; /* reserved (for future use) */
846 /* HW RX descriptor for PPv2.1 */
847 struct mvpp21_rx_desc {
848 u32 status; /* info about received packet */
849 u16 reserved1; /* parser_info (for future use, PnC) */
850 u16 data_size; /* size of received packet in bytes */
851 u32 buf_dma_addr; /* physical address of the buffer */
852 u32 buf_cookie; /* cookie for access to RX buffer in rx path */
853 u16 reserved2; /* gem_port_id (for future use, PON) */
854 u16 reserved3; /* csum_l4 (for future use, PnC) */
855 u8 reserved4; /* bm_qset (for future use, BM) */
857 u16 reserved6; /* classify_info (for future use, PnC) */
858 u32 reserved7; /* flow_id (for future use, PnC) */
862 /* HW TX descriptor for PPv2.2 */
863 struct mvpp22_tx_desc {
869 u64 buf_dma_addr_ptp;
873 /* HW RX descriptor for PPv2.2 */
874 struct mvpp22_rx_desc {
880 u64 buf_dma_addr_key_hash;
884 /* Opaque type used by the driver to manipulate the HW TX and RX
887 struct mvpp2_tx_desc {
889 struct mvpp21_tx_desc pp21;
890 struct mvpp22_tx_desc pp22;
894 struct mvpp2_rx_desc {
896 struct mvpp21_rx_desc pp21;
897 struct mvpp22_rx_desc pp22;
901 struct mvpp2_txq_pcpu_buf {
902 /* Transmitted SKB */
905 /* Physical address of transmitted buffer */
908 /* Size transmitted */
912 /* Per-CPU Tx queue control */
913 struct mvpp2_txq_pcpu {
916 /* Number of Tx DMA descriptors in the descriptor ring */
919 /* Number of currently used Tx DMA descriptor in the
924 /* Number of Tx DMA descriptors reserved for each CPU */
927 /* Infos about transmitted buffers */
928 struct mvpp2_txq_pcpu_buf *buffs;
930 /* Index of last TX DMA descriptor that was inserted */
933 /* Index of the TX DMA descriptor to be cleaned up */
937 struct mvpp2_tx_queue {
938 /* Physical number of this Tx queue */
941 /* Logical number of this Tx queue */
944 /* Number of Tx DMA descriptors in the descriptor ring */
947 /* Number of currently used Tx DMA descriptor in the descriptor ring */
950 /* Per-CPU control of physical Tx queues */
951 struct mvpp2_txq_pcpu __percpu *pcpu;
955 /* Virtual address of thex Tx DMA descriptors array */
956 struct mvpp2_tx_desc *descs;
958 /* DMA address of the Tx DMA descriptors array */
959 dma_addr_t descs_dma;
961 /* Index of the last Tx DMA descriptor */
964 /* Index of the next Tx DMA descriptor to process */
965 int next_desc_to_proc;
968 struct mvpp2_rx_queue {
969 /* RX queue number, in the range 0-31 for physical RXQs */
972 /* Num of rx descriptors in the rx descriptor ring */
978 /* Virtual address of the RX DMA descriptors array */
979 struct mvpp2_rx_desc *descs;
981 /* DMA address of the RX DMA descriptors array */
982 dma_addr_t descs_dma;
984 /* Index of the last RX DMA descriptor */
987 /* Index of the next RX DMA descriptor to process */
988 int next_desc_to_proc;
990 /* ID of port to which physical RXQ is mapped */
993 /* Port's logic RXQ number to which physical RXQ is mapped */
997 union mvpp2_prs_tcam_entry {
998 u32 word[MVPP2_PRS_TCAM_WORDS];
999 u8 byte[MVPP2_PRS_TCAM_WORDS * 4];
1002 union mvpp2_prs_sram_entry {
1003 u32 word[MVPP2_PRS_SRAM_WORDS];
1004 u8 byte[MVPP2_PRS_SRAM_WORDS * 4];
1007 struct mvpp2_prs_entry {
1009 union mvpp2_prs_tcam_entry tcam;
1010 union mvpp2_prs_sram_entry sram;
1013 struct mvpp2_prs_shadow {
1020 /* User defined offset */
1028 struct mvpp2_cls_flow_entry {
1030 u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS];
1033 struct mvpp2_cls_lookup_entry {
1039 struct mvpp2_bm_pool {
1040 /* Pool number in the range 0-7 */
1042 enum mvpp2_bm_type type;
1044 /* Buffer Pointers Pool External (BPPE) size */
1046 /* BPPE size in bytes */
1048 /* Number of buffers for this pool */
1050 /* Pool buffer size */
1056 /* BPPE virtual base address */
1058 /* BPPE DMA base address */
1059 dma_addr_t dma_addr;
1061 /* Ports using BM pool */
1065 /* Static declaractions */
1067 /* Number of RXQs used by single port */
1068 static int rxq_number = MVPP2_DEFAULT_RXQ;
1069 /* Number of TXQs used by single port */
1070 static int txq_number = MVPP2_MAX_TXQ;
1072 #define MVPP2_DRIVER_NAME "mvpp2"
1073 #define MVPP2_DRIVER_VERSION "1.0"
1075 /* Utility/helper methods */
1077 static void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data)
1079 writel(data, priv->cpu_base[0] + offset);
1082 static u32 mvpp2_read(struct mvpp2 *priv, u32 offset)
1084 return readl(priv->cpu_base[0] + offset);
1087 /* These accessors should be used to access:
1089 * - per-CPU registers, where each CPU has its own copy of the
1092 * MVPP2_BM_VIRT_ALLOC_REG
1093 * MVPP2_BM_ADDR_HIGH_ALLOC
1094 * MVPP22_BM_ADDR_HIGH_RLS_REG
1095 * MVPP2_BM_VIRT_RLS_REG
1096 * MVPP2_ISR_RX_TX_CAUSE_REG
1097 * MVPP2_ISR_RX_TX_MASK_REG
1099 * MVPP2_AGGR_TXQ_UPDATE_REG
1100 * MVPP2_TXQ_RSVD_REQ_REG
1101 * MVPP2_TXQ_RSVD_RSLT_REG
1102 * MVPP2_TXQ_SENT_REG
1105 * - global registers that must be accessed through a specific CPU
1106 * window, because they are related to an access to a per-CPU
1109 * MVPP2_BM_PHY_ALLOC_REG (related to MVPP2_BM_VIRT_ALLOC_REG)
1110 * MVPP2_BM_PHY_RLS_REG (related to MVPP2_BM_VIRT_RLS_REG)
1111 * MVPP2_RXQ_THRESH_REG (related to MVPP2_RXQ_NUM_REG)
1112 * MVPP2_RXQ_DESC_ADDR_REG (related to MVPP2_RXQ_NUM_REG)
1113 * MVPP2_RXQ_DESC_SIZE_REG (related to MVPP2_RXQ_NUM_REG)
1114 * MVPP2_RXQ_INDEX_REG (related to MVPP2_RXQ_NUM_REG)
1115 * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG)
1116 * MVPP2_TXQ_DESC_ADDR_REG (related to MVPP2_TXQ_NUM_REG)
1117 * MVPP2_TXQ_DESC_SIZE_REG (related to MVPP2_TXQ_NUM_REG)
1118 * MVPP2_TXQ_INDEX_REG (related to MVPP2_TXQ_NUM_REG)
1119 * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG)
1120 * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
1121 * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
1123 static void mvpp2_percpu_write(struct mvpp2 *priv, int cpu,
1124 u32 offset, u32 data)
1126 writel(data, priv->cpu_base[cpu] + offset);
1129 static u32 mvpp2_percpu_read(struct mvpp2 *priv, int cpu,
1132 return readl(priv->cpu_base[cpu] + offset);
1135 static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port,
1136 struct mvpp2_tx_desc *tx_desc)
1138 if (port->priv->hw_version == MVPP21)
1139 return tx_desc->pp21.buf_dma_addr;
1141 return tx_desc->pp22.buf_dma_addr_ptp & GENMASK_ULL(40, 0);
1144 static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port,
1145 struct mvpp2_tx_desc *tx_desc,
1146 dma_addr_t dma_addr)
1148 if (port->priv->hw_version == MVPP21) {
1149 tx_desc->pp21.buf_dma_addr = dma_addr;
1151 u64 val = (u64)dma_addr;
1153 tx_desc->pp22.buf_dma_addr_ptp &= ~GENMASK_ULL(40, 0);
1154 tx_desc->pp22.buf_dma_addr_ptp |= val;
1158 static size_t mvpp2_txdesc_size_get(struct mvpp2_port *port,
1159 struct mvpp2_tx_desc *tx_desc)
1161 if (port->priv->hw_version == MVPP21)
1162 return tx_desc->pp21.data_size;
1164 return tx_desc->pp22.data_size;
1167 static void mvpp2_txdesc_size_set(struct mvpp2_port *port,
1168 struct mvpp2_tx_desc *tx_desc,
1171 if (port->priv->hw_version == MVPP21)
1172 tx_desc->pp21.data_size = size;
1174 tx_desc->pp22.data_size = size;
1177 static void mvpp2_txdesc_txq_set(struct mvpp2_port *port,
1178 struct mvpp2_tx_desc *tx_desc,
1181 if (port->priv->hw_version == MVPP21)
1182 tx_desc->pp21.phys_txq = txq;
1184 tx_desc->pp22.phys_txq = txq;
1187 static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port,
1188 struct mvpp2_tx_desc *tx_desc,
1189 unsigned int command)
1191 if (port->priv->hw_version == MVPP21)
1192 tx_desc->pp21.command = command;
1194 tx_desc->pp22.command = command;
1197 static void mvpp2_txdesc_offset_set(struct mvpp2_port *port,
1198 struct mvpp2_tx_desc *tx_desc,
1199 unsigned int offset)
1201 if (port->priv->hw_version == MVPP21)
1202 tx_desc->pp21.packet_offset = offset;
1204 tx_desc->pp22.packet_offset = offset;
1207 static unsigned int mvpp2_txdesc_offset_get(struct mvpp2_port *port,
1208 struct mvpp2_tx_desc *tx_desc)
1210 if (port->priv->hw_version == MVPP21)
1211 return tx_desc->pp21.packet_offset;
1213 return tx_desc->pp22.packet_offset;
1216 static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port,
1217 struct mvpp2_rx_desc *rx_desc)
1219 if (port->priv->hw_version == MVPP21)
1220 return rx_desc->pp21.buf_dma_addr;
1222 return rx_desc->pp22.buf_dma_addr_key_hash & GENMASK_ULL(40, 0);
1225 static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port,
1226 struct mvpp2_rx_desc *rx_desc)
1228 if (port->priv->hw_version == MVPP21)
1229 return rx_desc->pp21.buf_cookie;
1231 return rx_desc->pp22.buf_cookie_misc & GENMASK_ULL(40, 0);
1234 static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port,
1235 struct mvpp2_rx_desc *rx_desc)
1237 if (port->priv->hw_version == MVPP21)
1238 return rx_desc->pp21.data_size;
1240 return rx_desc->pp22.data_size;
1243 static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port,
1244 struct mvpp2_rx_desc *rx_desc)
1246 if (port->priv->hw_version == MVPP21)
1247 return rx_desc->pp21.status;
1249 return rx_desc->pp22.status;
1252 static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
1254 txq_pcpu->txq_get_index++;
1255 if (txq_pcpu->txq_get_index == txq_pcpu->size)
1256 txq_pcpu->txq_get_index = 0;
1259 static void mvpp2_txq_inc_put(struct mvpp2_port *port,
1260 struct mvpp2_txq_pcpu *txq_pcpu,
1261 struct sk_buff *skb,
1262 struct mvpp2_tx_desc *tx_desc)
1264 struct mvpp2_txq_pcpu_buf *tx_buf =
1265 txq_pcpu->buffs + txq_pcpu->txq_put_index;
1267 tx_buf->size = mvpp2_txdesc_size_get(port, tx_desc);
1268 tx_buf->dma = mvpp2_txdesc_dma_addr_get(port, tx_desc) +
1269 mvpp2_txdesc_offset_get(port, tx_desc);
1270 txq_pcpu->txq_put_index++;
1271 if (txq_pcpu->txq_put_index == txq_pcpu->size)
1272 txq_pcpu->txq_put_index = 0;
1275 /* Get number of physical egress port */
1276 static inline int mvpp2_egress_port(struct mvpp2_port *port)
1278 return MVPP2_MAX_TCONT + port->id;
1281 /* Get number of physical TXQ */
1282 static inline int mvpp2_txq_phys(int port, int txq)
1284 return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq;
1287 /* Parser configuration routines */
1289 /* Update parser tcam and sram hw entries */
1290 static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
1294 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
1297 /* Clear entry invalidation bit */
1298 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
1300 /* Write tcam index - indirect access */
1301 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
1302 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1303 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]);
1305 /* Write sram index - indirect access */
1306 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
1307 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1308 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]);
1313 /* Read tcam entry from hw */
1314 static int mvpp2_prs_hw_read(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
1318 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
1321 /* Write tcam index - indirect access */
1322 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
1324 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv,
1325 MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD));
1326 if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK)
1327 return MVPP2_PRS_TCAM_ENTRY_INVALID;
1329 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1330 pe->tcam.word[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i));
1332 /* Write sram index - indirect access */
1333 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
1334 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1335 pe->sram.word[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i));
1340 /* Invalidate tcam hw entry */
1341 static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index)
1343 /* Write index - indirect access */
1344 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
1345 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD),
1346 MVPP2_PRS_TCAM_INV_MASK);
1349 /* Enable shadow table entry and set its lookup ID */
1350 static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu)
1352 priv->prs_shadow[index].valid = true;
1353 priv->prs_shadow[index].lu = lu;
1356 /* Update ri fields in shadow table entry */
1357 static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index,
1358 unsigned int ri, unsigned int ri_mask)
1360 priv->prs_shadow[index].ri_mask = ri_mask;
1361 priv->prs_shadow[index].ri = ri;
1364 /* Update lookup field in tcam sw entry */
1365 static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu)
1367 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE);
1369 pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu;
1370 pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK;
1373 /* Update mask for single port in tcam sw entry */
1374 static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe,
1375 unsigned int port, bool add)
1377 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1380 pe->tcam.byte[enable_off] &= ~(1 << port);
1382 pe->tcam.byte[enable_off] |= 1 << port;
1385 /* Update port map in tcam sw entry */
1386 static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe,
1389 unsigned char port_mask = MVPP2_PRS_PORT_MASK;
1390 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1392 pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0;
1393 pe->tcam.byte[enable_off] &= ~port_mask;
1394 pe->tcam.byte[enable_off] |= ~ports & MVPP2_PRS_PORT_MASK;
1397 /* Obtain port map from tcam sw entry */
1398 static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe)
1400 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1402 return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK;
1405 /* Set byte of data and its enable bits in tcam sw entry */
1406 static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe,
1407 unsigned int offs, unsigned char byte,
1408 unsigned char enable)
1410 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte;
1411 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable;
1414 /* Get byte of data and its enable bits from tcam sw entry */
1415 static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe,
1416 unsigned int offs, unsigned char *byte,
1417 unsigned char *enable)
1419 *byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)];
1420 *enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)];
1423 /* Compare tcam data bytes with a pattern */
1424 static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offs,
1427 int off = MVPP2_PRS_TCAM_DATA_BYTE(offs);
1430 tcam_data = (8 << pe->tcam.byte[off + 1]) | pe->tcam.byte[off];
1431 if (tcam_data != data)
1436 /* Update ai bits in tcam sw entry */
1437 static void mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe,
1438 unsigned int bits, unsigned int enable)
1440 int i, ai_idx = MVPP2_PRS_TCAM_AI_BYTE;
1442 for (i = 0; i < MVPP2_PRS_AI_BITS; i++) {
1444 if (!(enable & BIT(i)))
1448 pe->tcam.byte[ai_idx] |= 1 << i;
1450 pe->tcam.byte[ai_idx] &= ~(1 << i);
1453 pe->tcam.byte[MVPP2_PRS_TCAM_EN_OFFS(ai_idx)] |= enable;
1456 /* Get ai bits from tcam sw entry */
1457 static int mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *pe)
1459 return pe->tcam.byte[MVPP2_PRS_TCAM_AI_BYTE];
1462 /* Set ethertype in tcam sw entry */
1463 static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset,
1464 unsigned short ethertype)
1466 mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff);
1467 mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff);
1470 /* Set bits in sram sw entry */
1471 static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num,
1474 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] |= (val << (bit_num % 8));
1477 /* Clear bits in sram sw entry */
1478 static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num,
1481 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] &= ~(val << (bit_num % 8));
1484 /* Update ri bits in sram sw entry */
1485 static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe,
1486 unsigned int bits, unsigned int mask)
1490 for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) {
1491 int ri_off = MVPP2_PRS_SRAM_RI_OFFS;
1493 if (!(mask & BIT(i)))
1497 mvpp2_prs_sram_bits_set(pe, ri_off + i, 1);
1499 mvpp2_prs_sram_bits_clear(pe, ri_off + i, 1);
1501 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1);
1505 /* Obtain ri bits from sram sw entry */
1506 static int mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *pe)
1508 return pe->sram.word[MVPP2_PRS_SRAM_RI_WORD];
1511 /* Update ai bits in sram sw entry */
1512 static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe,
1513 unsigned int bits, unsigned int mask)
1516 int ai_off = MVPP2_PRS_SRAM_AI_OFFS;
1518 for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) {
1520 if (!(mask & BIT(i)))
1524 mvpp2_prs_sram_bits_set(pe, ai_off + i, 1);
1526 mvpp2_prs_sram_bits_clear(pe, ai_off + i, 1);
1528 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1);
1532 /* Read ai bits from sram sw entry */
1533 static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe)
1536 int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS);
1537 int ai_en_off = ai_off + 1;
1538 int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8;
1540 bits = (pe->sram.byte[ai_off] >> ai_shift) |
1541 (pe->sram.byte[ai_en_off] << (8 - ai_shift));
1546 /* In sram sw entry set lookup ID field of the tcam key to be used in the next
1549 static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe,
1552 int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS;
1554 mvpp2_prs_sram_bits_clear(pe, sram_next_off,
1555 MVPP2_PRS_SRAM_NEXT_LU_MASK);
1556 mvpp2_prs_sram_bits_set(pe, sram_next_off, lu);
1559 /* In the sram sw entry set sign and value of the next lookup offset
1560 * and the offset value generated to the classifier
1562 static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift,
1567 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
1570 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
1574 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] =
1575 (unsigned char)shift;
1577 /* Reset and set operation */
1578 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
1579 MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK);
1580 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op);
1582 /* Set base offset as current */
1583 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
1586 /* In the sram sw entry set sign and value of the user defined offset
1587 * generated to the classifier
1589 static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe,
1590 unsigned int type, int offset,
1595 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
1596 offset = 0 - offset;
1598 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
1602 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS,
1603 MVPP2_PRS_SRAM_UDF_MASK);
1604 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset);
1605 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
1606 MVPP2_PRS_SRAM_UDF_BITS)] &=
1607 ~(MVPP2_PRS_SRAM_UDF_MASK >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
1608 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
1609 MVPP2_PRS_SRAM_UDF_BITS)] |=
1610 (offset >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
1612 /* Set offset type */
1613 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS,
1614 MVPP2_PRS_SRAM_UDF_TYPE_MASK);
1615 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type);
1617 /* Set offset operation */
1618 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
1619 MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
1620 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op);
1622 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
1623 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] &=
1624 ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >>
1625 (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
1627 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
1628 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] |=
1629 (op >> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
1631 /* Set base offset as current */
1632 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
1635 /* Find parser flow entry */
1636 static struct mvpp2_prs_entry *mvpp2_prs_flow_find(struct mvpp2 *priv, int flow)
1638 struct mvpp2_prs_entry *pe;
1641 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1644 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
1646 /* Go through the all entires with MVPP2_PRS_LU_FLOWS */
1647 for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) {
1650 if (!priv->prs_shadow[tid].valid ||
1651 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS)
1655 mvpp2_prs_hw_read(priv, pe);
1656 bits = mvpp2_prs_sram_ai_get(pe);
1658 /* Sram store classification lookup ID in AI bits [5:0] */
1659 if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow)
1667 /* Return first free tcam index, seeking from start to end */
1668 static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start,
1676 if (end >= MVPP2_PRS_TCAM_SRAM_SIZE)
1677 end = MVPP2_PRS_TCAM_SRAM_SIZE - 1;
1679 for (tid = start; tid <= end; tid++) {
1680 if (!priv->prs_shadow[tid].valid)
1687 /* Enable/disable dropping all mac da's */
1688 static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
1690 struct mvpp2_prs_entry pe;
1692 if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) {
1693 /* Entry exist - update port only */
1694 pe.index = MVPP2_PE_DROP_ALL;
1695 mvpp2_prs_hw_read(priv, &pe);
1697 /* Entry doesn't exist - create new */
1698 memset(&pe, 0, sizeof(pe));
1699 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1700 pe.index = MVPP2_PE_DROP_ALL;
1702 /* Non-promiscuous mode for all ports - DROP unknown packets */
1703 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
1704 MVPP2_PRS_RI_DROP_MASK);
1706 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1707 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1709 /* Update shadow table */
1710 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1712 /* Mask all ports */
1713 mvpp2_prs_tcam_port_map_set(&pe, 0);
1716 /* Update port mask */
1717 mvpp2_prs_tcam_port_set(&pe, port, add);
1719 mvpp2_prs_hw_write(priv, &pe);
1722 /* Set port to promiscuous mode */
1723 static void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, bool add)
1725 struct mvpp2_prs_entry pe;
1727 /* Promiscuous mode - Accept unknown packets */
1729 if (priv->prs_shadow[MVPP2_PE_MAC_PROMISCUOUS].valid) {
1730 /* Entry exist - update port only */
1731 pe.index = MVPP2_PE_MAC_PROMISCUOUS;
1732 mvpp2_prs_hw_read(priv, &pe);
1734 /* Entry doesn't exist - create new */
1735 memset(&pe, 0, sizeof(pe));
1736 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1737 pe.index = MVPP2_PE_MAC_PROMISCUOUS;
1739 /* Continue - set next lookup */
1740 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
1742 /* Set result info bits */
1743 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_UCAST,
1744 MVPP2_PRS_RI_L2_CAST_MASK);
1746 /* Shift to ethertype */
1747 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
1748 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1750 /* Mask all ports */
1751 mvpp2_prs_tcam_port_map_set(&pe, 0);
1753 /* Update shadow table */
1754 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1757 /* Update port mask */
1758 mvpp2_prs_tcam_port_set(&pe, port, add);
1760 mvpp2_prs_hw_write(priv, &pe);
1763 /* Accept multicast */
1764 static void mvpp2_prs_mac_multi_set(struct mvpp2 *priv, int port, int index,
1767 struct mvpp2_prs_entry pe;
1768 unsigned char da_mc;
1770 /* Ethernet multicast address first byte is
1771 * 0x01 for IPv4 and 0x33 for IPv6
1773 da_mc = (index == MVPP2_PE_MAC_MC_ALL) ? 0x01 : 0x33;
1775 if (priv->prs_shadow[index].valid) {
1776 /* Entry exist - update port only */
1778 mvpp2_prs_hw_read(priv, &pe);
1780 /* Entry doesn't exist - create new */
1781 memset(&pe, 0, sizeof(pe));
1782 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1785 /* Continue - set next lookup */
1786 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
1788 /* Set result info bits */
1789 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_MCAST,
1790 MVPP2_PRS_RI_L2_CAST_MASK);
1792 /* Update tcam entry data first byte */
1793 mvpp2_prs_tcam_data_byte_set(&pe, 0, da_mc, 0xff);
1795 /* Shift to ethertype */
1796 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
1797 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1799 /* Mask all ports */
1800 mvpp2_prs_tcam_port_map_set(&pe, 0);
1802 /* Update shadow table */
1803 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1806 /* Update port mask */
1807 mvpp2_prs_tcam_port_set(&pe, port, add);
1809 mvpp2_prs_hw_write(priv, &pe);
1812 /* Set entry for dsa packets */
1813 static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add,
1814 bool tagged, bool extend)
1816 struct mvpp2_prs_entry pe;
1820 tid = tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED;
1823 tid = tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED;
1827 if (priv->prs_shadow[tid].valid) {
1828 /* Entry exist - update port only */
1830 mvpp2_prs_hw_read(priv, &pe);
1832 /* Entry doesn't exist - create new */
1833 memset(&pe, 0, sizeof(pe));
1834 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
1837 /* Shift 4 bytes if DSA tag or 8 bytes in case of EDSA tag*/
1838 mvpp2_prs_sram_shift_set(&pe, shift,
1839 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1841 /* Update shadow table */
1842 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
1845 /* Set tagged bit in DSA tag */
1846 mvpp2_prs_tcam_data_byte_set(&pe, 0,
1847 MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
1848 MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
1849 /* Clear all ai bits for next iteration */
1850 mvpp2_prs_sram_ai_update(&pe, 0,
1851 MVPP2_PRS_SRAM_AI_MASK);
1852 /* If packet is tagged continue check vlans */
1853 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1855 /* Set result info bits to 'no vlans' */
1856 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
1857 MVPP2_PRS_RI_VLAN_MASK);
1858 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1861 /* Mask all ports */
1862 mvpp2_prs_tcam_port_map_set(&pe, 0);
1865 /* Update port mask */
1866 mvpp2_prs_tcam_port_set(&pe, port, add);
1868 mvpp2_prs_hw_write(priv, &pe);
1871 /* Set entry for dsa ethertype */
1872 static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port,
1873 bool add, bool tagged, bool extend)
1875 struct mvpp2_prs_entry pe;
1876 int tid, shift, port_mask;
1879 tid = tagged ? MVPP2_PE_ETYPE_EDSA_TAGGED :
1880 MVPP2_PE_ETYPE_EDSA_UNTAGGED;
1884 tid = tagged ? MVPP2_PE_ETYPE_DSA_TAGGED :
1885 MVPP2_PE_ETYPE_DSA_UNTAGGED;
1886 port_mask = MVPP2_PRS_PORT_MASK;
1890 if (priv->prs_shadow[tid].valid) {
1891 /* Entry exist - update port only */
1893 mvpp2_prs_hw_read(priv, &pe);
1895 /* Entry doesn't exist - create new */
1896 memset(&pe, 0, sizeof(pe));
1897 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
1901 mvpp2_prs_match_etype(&pe, 0, ETH_P_EDSA);
1902 mvpp2_prs_match_etype(&pe, 2, 0);
1904 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DSA_MASK,
1905 MVPP2_PRS_RI_DSA_MASK);
1906 /* Shift ethertype + 2 byte reserved + tag*/
1907 mvpp2_prs_sram_shift_set(&pe, 2 + MVPP2_ETH_TYPE_LEN + shift,
1908 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1910 /* Update shadow table */
1911 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
1914 /* Set tagged bit in DSA tag */
1915 mvpp2_prs_tcam_data_byte_set(&pe,
1916 MVPP2_ETH_TYPE_LEN + 2 + 3,
1917 MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
1918 MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
1919 /* Clear all ai bits for next iteration */
1920 mvpp2_prs_sram_ai_update(&pe, 0,
1921 MVPP2_PRS_SRAM_AI_MASK);
1922 /* If packet is tagged continue check vlans */
1923 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1925 /* Set result info bits to 'no vlans' */
1926 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
1927 MVPP2_PRS_RI_VLAN_MASK);
1928 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1930 /* Mask/unmask all ports, depending on dsa type */
1931 mvpp2_prs_tcam_port_map_set(&pe, port_mask);
1934 /* Update port mask */
1935 mvpp2_prs_tcam_port_set(&pe, port, add);
1937 mvpp2_prs_hw_write(priv, &pe);
1940 /* Search for existing single/triple vlan entry */
1941 static struct mvpp2_prs_entry *mvpp2_prs_vlan_find(struct mvpp2 *priv,
1942 unsigned short tpid, int ai)
1944 struct mvpp2_prs_entry *pe;
1947 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1950 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
1952 /* Go through the all entries with MVPP2_PRS_LU_VLAN */
1953 for (tid = MVPP2_PE_FIRST_FREE_TID;
1954 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
1955 unsigned int ri_bits, ai_bits;
1958 if (!priv->prs_shadow[tid].valid ||
1959 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
1964 mvpp2_prs_hw_read(priv, pe);
1965 match = mvpp2_prs_tcam_data_cmp(pe, 0, swab16(tpid));
1970 ri_bits = mvpp2_prs_sram_ri_get(pe);
1971 ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
1973 /* Get current ai value from tcam */
1974 ai_bits = mvpp2_prs_tcam_ai_get(pe);
1975 /* Clear double vlan bit */
1976 ai_bits &= ~MVPP2_PRS_DBL_VLAN_AI_BIT;
1981 if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
1982 ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
1990 /* Add/update single/triple vlan entry */
1991 static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
1992 unsigned int port_map)
1994 struct mvpp2_prs_entry *pe;
1998 pe = mvpp2_prs_vlan_find(priv, tpid, ai);
2001 /* Create new tcam entry */
2002 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_LAST_FREE_TID,
2003 MVPP2_PE_FIRST_FREE_TID);
2007 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2011 /* Get last double vlan tid */
2012 for (tid_aux = MVPP2_PE_LAST_FREE_TID;
2013 tid_aux >= MVPP2_PE_FIRST_FREE_TID; tid_aux--) {
2014 unsigned int ri_bits;
2016 if (!priv->prs_shadow[tid_aux].valid ||
2017 priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
2020 pe->index = tid_aux;
2021 mvpp2_prs_hw_read(priv, pe);
2022 ri_bits = mvpp2_prs_sram_ri_get(pe);
2023 if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) ==
2024 MVPP2_PRS_RI_VLAN_DOUBLE)
2028 if (tid <= tid_aux) {
2033 memset(pe, 0, sizeof(*pe));
2034 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
2037 mvpp2_prs_match_etype(pe, 0, tpid);
2039 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_L2);
2040 /* Shift 4 bytes - skip 1 vlan tag */
2041 mvpp2_prs_sram_shift_set(pe, MVPP2_VLAN_TAG_LEN,
2042 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2043 /* Clear all ai bits for next iteration */
2044 mvpp2_prs_sram_ai_update(pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2046 if (ai == MVPP2_PRS_SINGLE_VLAN_AI) {
2047 mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_SINGLE,
2048 MVPP2_PRS_RI_VLAN_MASK);
2050 ai |= MVPP2_PRS_DBL_VLAN_AI_BIT;
2051 mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_TRIPLE,
2052 MVPP2_PRS_RI_VLAN_MASK);
2054 mvpp2_prs_tcam_ai_update(pe, ai, MVPP2_PRS_SRAM_AI_MASK);
2056 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_VLAN);
2058 /* Update ports' mask */
2059 mvpp2_prs_tcam_port_map_set(pe, port_map);
2061 mvpp2_prs_hw_write(priv, pe);
2068 /* Get first free double vlan ai number */
2069 static int mvpp2_prs_double_vlan_ai_free_get(struct mvpp2 *priv)
2073 for (i = 1; i < MVPP2_PRS_DBL_VLANS_MAX; i++) {
2074 if (!priv->prs_double_vlans[i])
2081 /* Search for existing double vlan entry */
2082 static struct mvpp2_prs_entry *mvpp2_prs_double_vlan_find(struct mvpp2 *priv,
2083 unsigned short tpid1,
2084 unsigned short tpid2)
2086 struct mvpp2_prs_entry *pe;
2089 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2092 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
2094 /* Go through the all entries with MVPP2_PRS_LU_VLAN */
2095 for (tid = MVPP2_PE_FIRST_FREE_TID;
2096 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
2097 unsigned int ri_mask;
2100 if (!priv->prs_shadow[tid].valid ||
2101 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
2105 mvpp2_prs_hw_read(priv, pe);
2107 match = mvpp2_prs_tcam_data_cmp(pe, 0, swab16(tpid1))
2108 && mvpp2_prs_tcam_data_cmp(pe, 4, swab16(tpid2));
2113 ri_mask = mvpp2_prs_sram_ri_get(pe) & MVPP2_PRS_RI_VLAN_MASK;
2114 if (ri_mask == MVPP2_PRS_RI_VLAN_DOUBLE)
2122 /* Add or update double vlan entry */
2123 static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
2124 unsigned short tpid2,
2125 unsigned int port_map)
2127 struct mvpp2_prs_entry *pe;
2128 int tid_aux, tid, ai, ret = 0;
2130 pe = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2);
2133 /* Create new tcam entry */
2134 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2135 MVPP2_PE_LAST_FREE_TID);
2139 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2143 /* Set ai value for new double vlan entry */
2144 ai = mvpp2_prs_double_vlan_ai_free_get(priv);
2150 /* Get first single/triple vlan tid */
2151 for (tid_aux = MVPP2_PE_FIRST_FREE_TID;
2152 tid_aux <= MVPP2_PE_LAST_FREE_TID; tid_aux++) {
2153 unsigned int ri_bits;
2155 if (!priv->prs_shadow[tid_aux].valid ||
2156 priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
2159 pe->index = tid_aux;
2160 mvpp2_prs_hw_read(priv, pe);
2161 ri_bits = mvpp2_prs_sram_ri_get(pe);
2162 ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
2163 if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
2164 ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
2168 if (tid >= tid_aux) {
2173 memset(pe, 0, sizeof(*pe));
2174 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
2177 priv->prs_double_vlans[ai] = true;
2179 mvpp2_prs_match_etype(pe, 0, tpid1);
2180 mvpp2_prs_match_etype(pe, 4, tpid2);
2182 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_VLAN);
2183 /* Shift 8 bytes - skip 2 vlan tags */
2184 mvpp2_prs_sram_shift_set(pe, 2 * MVPP2_VLAN_TAG_LEN,
2185 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2186 mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_DOUBLE,
2187 MVPP2_PRS_RI_VLAN_MASK);
2188 mvpp2_prs_sram_ai_update(pe, ai | MVPP2_PRS_DBL_VLAN_AI_BIT,
2189 MVPP2_PRS_SRAM_AI_MASK);
2191 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_VLAN);
2194 /* Update ports' mask */
2195 mvpp2_prs_tcam_port_map_set(pe, port_map);
2196 mvpp2_prs_hw_write(priv, pe);
2202 /* IPv4 header parsing for fragmentation and L4 offset */
2203 static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
2204 unsigned int ri, unsigned int ri_mask)
2206 struct mvpp2_prs_entry pe;
2209 if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
2210 (proto != IPPROTO_IGMP))
2213 /* Fragmented packet */
2214 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2215 MVPP2_PE_LAST_FREE_TID);
2219 memset(&pe, 0, sizeof(pe));
2220 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
2223 /* Set next lu to IPv4 */
2224 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2225 mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2227 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
2228 sizeof(struct iphdr) - 4,
2229 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2230 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
2231 MVPP2_PRS_IPV4_DIP_AI_BIT);
2232 mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_MASK,
2233 ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
2235 mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK);
2236 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
2237 /* Unmask all ports */
2238 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2240 /* Update shadow table and hw entry */
2241 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2242 mvpp2_prs_hw_write(priv, &pe);
2244 /* Not fragmented packet */
2245 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2246 MVPP2_PE_LAST_FREE_TID);
2251 /* Clear ri before updating */
2252 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
2253 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
2254 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
2256 mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, MVPP2_PRS_TCAM_PROTO_MASK_L);
2257 mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, MVPP2_PRS_TCAM_PROTO_MASK);
2259 /* Update shadow table and hw entry */
2260 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2261 mvpp2_prs_hw_write(priv, &pe);
2266 /* IPv4 L3 multicast or broadcast */
2267 static int mvpp2_prs_ip4_cast(struct mvpp2 *priv, unsigned short l3_cast)
2269 struct mvpp2_prs_entry pe;
2272 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2273 MVPP2_PE_LAST_FREE_TID);
2277 memset(&pe, 0, sizeof(pe));
2278 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
2282 case MVPP2_PRS_L3_MULTI_CAST:
2283 mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV4_MC,
2284 MVPP2_PRS_IPV4_MC_MASK);
2285 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
2286 MVPP2_PRS_RI_L3_ADDR_MASK);
2288 case MVPP2_PRS_L3_BROAD_CAST:
2289 mask = MVPP2_PRS_IPV4_BC_MASK;
2290 mvpp2_prs_tcam_data_byte_set(&pe, 0, mask, mask);
2291 mvpp2_prs_tcam_data_byte_set(&pe, 1, mask, mask);
2292 mvpp2_prs_tcam_data_byte_set(&pe, 2, mask, mask);
2293 mvpp2_prs_tcam_data_byte_set(&pe, 3, mask, mask);
2294 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_BCAST,
2295 MVPP2_PRS_RI_L3_ADDR_MASK);
2301 /* Finished: go to flowid generation */
2302 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2303 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2305 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
2306 MVPP2_PRS_IPV4_DIP_AI_BIT);
2307 /* Unmask all ports */
2308 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2310 /* Update shadow table and hw entry */
2311 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2312 mvpp2_prs_hw_write(priv, &pe);
2317 /* Set entries for protocols over IPv6 */
2318 static int mvpp2_prs_ip6_proto(struct mvpp2 *priv, unsigned short proto,
2319 unsigned int ri, unsigned int ri_mask)
2321 struct mvpp2_prs_entry pe;
2324 if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
2325 (proto != IPPROTO_ICMPV6) && (proto != IPPROTO_IPIP))
2328 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2329 MVPP2_PE_LAST_FREE_TID);
2333 memset(&pe, 0, sizeof(pe));
2334 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2337 /* Finished: go to flowid generation */
2338 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2339 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2340 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
2341 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
2342 sizeof(struct ipv6hdr) - 6,
2343 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2345 mvpp2_prs_tcam_data_byte_set(&pe, 0, proto, MVPP2_PRS_TCAM_PROTO_MASK);
2346 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2347 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2348 /* Unmask all ports */
2349 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2352 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
2353 mvpp2_prs_hw_write(priv, &pe);
2358 /* IPv6 L3 multicast entry */
2359 static int mvpp2_prs_ip6_cast(struct mvpp2 *priv, unsigned short l3_cast)
2361 struct mvpp2_prs_entry pe;
2364 if (l3_cast != MVPP2_PRS_L3_MULTI_CAST)
2367 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2368 MVPP2_PE_LAST_FREE_TID);
2372 memset(&pe, 0, sizeof(pe));
2373 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2376 /* Finished: go to flowid generation */
2377 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2378 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
2379 MVPP2_PRS_RI_L3_ADDR_MASK);
2380 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2381 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2382 /* Shift back to IPv6 NH */
2383 mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2385 mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV6_MC,
2386 MVPP2_PRS_IPV6_MC_MASK);
2387 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2388 /* Unmask all ports */
2389 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2391 /* Update shadow table and hw entry */
2392 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
2393 mvpp2_prs_hw_write(priv, &pe);
2398 /* Parser per-port initialization */
2399 static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first,
2400 int lu_max, int offset)
2405 val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG);
2406 val &= ~MVPP2_PRS_PORT_LU_MASK(port);
2407 val |= MVPP2_PRS_PORT_LU_VAL(port, lu_first);
2408 mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val);
2410 /* Set maximum number of loops for packet received from port */
2411 val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port));
2412 val &= ~MVPP2_PRS_MAX_LOOP_MASK(port);
2413 val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max);
2414 mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val);
2416 /* Set initial offset for packet header extraction for the first
2419 val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port));
2420 val &= ~MVPP2_PRS_INIT_OFF_MASK(port);
2421 val |= MVPP2_PRS_INIT_OFF_VAL(port, offset);
2422 mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val);
2425 /* Default flow entries initialization for all ports */
2426 static void mvpp2_prs_def_flow_init(struct mvpp2 *priv)
2428 struct mvpp2_prs_entry pe;
2431 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
2432 memset(&pe, 0, sizeof(pe));
2433 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2434 pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port;
2436 /* Mask all ports */
2437 mvpp2_prs_tcam_port_map_set(&pe, 0);
2440 mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK);
2441 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
2443 /* Update shadow table and hw entry */
2444 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS);
2445 mvpp2_prs_hw_write(priv, &pe);
2449 /* Set default entry for Marvell Header field */
2450 static void mvpp2_prs_mh_init(struct mvpp2 *priv)
2452 struct mvpp2_prs_entry pe;
2454 memset(&pe, 0, sizeof(pe));
2456 pe.index = MVPP2_PE_MH_DEFAULT;
2457 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
2458 mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
2459 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2460 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC);
2462 /* Unmask all ports */
2463 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2465 /* Update shadow table and hw entry */
2466 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH);
2467 mvpp2_prs_hw_write(priv, &pe);
2470 /* Set default entires (place holder) for promiscuous, non-promiscuous and
2471 * multicast MAC addresses
2473 static void mvpp2_prs_mac_init(struct mvpp2 *priv)
2475 struct mvpp2_prs_entry pe;
2477 memset(&pe, 0, sizeof(pe));
2479 /* Non-promiscuous mode for all ports - DROP unknown packets */
2480 pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS;
2481 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
2483 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
2484 MVPP2_PRS_RI_DROP_MASK);
2485 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2486 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2488 /* Unmask all ports */
2489 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2491 /* Update shadow table and hw entry */
2492 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
2493 mvpp2_prs_hw_write(priv, &pe);
2495 /* place holders only - no ports */
2496 mvpp2_prs_mac_drop_all_set(priv, 0, false);
2497 mvpp2_prs_mac_promisc_set(priv, 0, false);
2498 mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_ALL, 0, false);
2499 mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_IP6, 0, false);
2502 /* Set default entries for various types of dsa packets */
2503 static void mvpp2_prs_dsa_init(struct mvpp2 *priv)
2505 struct mvpp2_prs_entry pe;
2507 /* None tagged EDSA entry - place holder */
2508 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
2511 /* Tagged EDSA entry - place holder */
2512 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2514 /* None tagged DSA entry - place holder */
2515 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
2518 /* Tagged DSA entry - place holder */
2519 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2521 /* None tagged EDSA ethertype entry - place holder*/
2522 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
2523 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
2525 /* Tagged EDSA ethertype entry - place holder*/
2526 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
2527 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2529 /* None tagged DSA ethertype entry */
2530 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
2531 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
2533 /* Tagged DSA ethertype entry */
2534 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
2535 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2537 /* Set default entry, in case DSA or EDSA tag not found */
2538 memset(&pe, 0, sizeof(pe));
2539 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
2540 pe.index = MVPP2_PE_DSA_DEFAULT;
2541 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2544 mvpp2_prs_sram_shift_set(&pe, 0, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2545 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
2547 /* Clear all sram ai bits for next iteration */
2548 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2550 /* Unmask all ports */
2551 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2553 mvpp2_prs_hw_write(priv, &pe);
2556 /* Match basic ethertypes */
2557 static int mvpp2_prs_etype_init(struct mvpp2 *priv)
2559 struct mvpp2_prs_entry pe;
2562 /* Ethertype: PPPoE */
2563 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2564 MVPP2_PE_LAST_FREE_TID);
2568 memset(&pe, 0, sizeof(pe));
2569 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2572 mvpp2_prs_match_etype(&pe, 0, ETH_P_PPP_SES);
2574 mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE,
2575 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2576 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2577 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK,
2578 MVPP2_PRS_RI_PPPOE_MASK);
2580 /* Update shadow table and hw entry */
2581 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2582 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2583 priv->prs_shadow[pe.index].finish = false;
2584 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK,
2585 MVPP2_PRS_RI_PPPOE_MASK);
2586 mvpp2_prs_hw_write(priv, &pe);
2588 /* Ethertype: ARP */
2589 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2590 MVPP2_PE_LAST_FREE_TID);
2594 memset(&pe, 0, sizeof(pe));
2595 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2598 mvpp2_prs_match_etype(&pe, 0, ETH_P_ARP);
2600 /* Generate flow in the next iteration*/
2601 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2602 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2603 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP,
2604 MVPP2_PRS_RI_L3_PROTO_MASK);
2606 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2608 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2610 /* Update shadow table and hw entry */
2611 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2612 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2613 priv->prs_shadow[pe.index].finish = true;
2614 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP,
2615 MVPP2_PRS_RI_L3_PROTO_MASK);
2616 mvpp2_prs_hw_write(priv, &pe);
2618 /* Ethertype: LBTD */
2619 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2620 MVPP2_PE_LAST_FREE_TID);
2624 memset(&pe, 0, sizeof(pe));
2625 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2628 mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE);
2630 /* Generate flow in the next iteration*/
2631 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2632 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2633 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2634 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2635 MVPP2_PRS_RI_CPU_CODE_MASK |
2636 MVPP2_PRS_RI_UDF3_MASK);
2638 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2640 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2642 /* Update shadow table and hw entry */
2643 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2644 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2645 priv->prs_shadow[pe.index].finish = true;
2646 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2647 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2648 MVPP2_PRS_RI_CPU_CODE_MASK |
2649 MVPP2_PRS_RI_UDF3_MASK);
2650 mvpp2_prs_hw_write(priv, &pe);
2652 /* Ethertype: IPv4 without options */
2653 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2654 MVPP2_PE_LAST_FREE_TID);
2658 memset(&pe, 0, sizeof(pe));
2659 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2662 mvpp2_prs_match_etype(&pe, 0, ETH_P_IP);
2663 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
2664 MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
2665 MVPP2_PRS_IPV4_HEAD_MASK |
2666 MVPP2_PRS_IPV4_IHL_MASK);
2668 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2669 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
2670 MVPP2_PRS_RI_L3_PROTO_MASK);
2671 /* Skip eth_type + 4 bytes of IP header */
2672 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
2673 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2675 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2677 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2679 /* Update shadow table and hw entry */
2680 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2681 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2682 priv->prs_shadow[pe.index].finish = false;
2683 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4,
2684 MVPP2_PRS_RI_L3_PROTO_MASK);
2685 mvpp2_prs_hw_write(priv, &pe);
2687 /* Ethertype: IPv4 with options */
2688 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2689 MVPP2_PE_LAST_FREE_TID);
2695 /* Clear tcam data before updating */
2696 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0;
2697 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0;
2699 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
2700 MVPP2_PRS_IPV4_HEAD,
2701 MVPP2_PRS_IPV4_HEAD_MASK);
2703 /* Clear ri before updating */
2704 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
2705 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
2706 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
2707 MVPP2_PRS_RI_L3_PROTO_MASK);
2709 /* Update shadow table and hw entry */
2710 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2711 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2712 priv->prs_shadow[pe.index].finish = false;
2713 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4_OPT,
2714 MVPP2_PRS_RI_L3_PROTO_MASK);
2715 mvpp2_prs_hw_write(priv, &pe);
2717 /* Ethertype: IPv6 without options */
2718 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2719 MVPP2_PE_LAST_FREE_TID);
2723 memset(&pe, 0, sizeof(pe));
2724 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2727 mvpp2_prs_match_etype(&pe, 0, ETH_P_IPV6);
2729 /* Skip DIP of IPV6 header */
2730 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
2731 MVPP2_MAX_L3_ADDR_SIZE,
2732 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2733 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2734 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
2735 MVPP2_PRS_RI_L3_PROTO_MASK);
2737 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2739 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2741 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2742 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2743 priv->prs_shadow[pe.index].finish = false;
2744 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6,
2745 MVPP2_PRS_RI_L3_PROTO_MASK);
2746 mvpp2_prs_hw_write(priv, &pe);
2748 /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */
2749 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2750 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2751 pe.index = MVPP2_PE_ETH_TYPE_UN;
2753 /* Unmask all ports */
2754 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2756 /* Generate flow in the next iteration*/
2757 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2758 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2759 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
2760 MVPP2_PRS_RI_L3_PROTO_MASK);
2761 /* Set L3 offset even it's unknown L3 */
2762 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2764 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2766 /* Update shadow table and hw entry */
2767 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2768 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2769 priv->prs_shadow[pe.index].finish = true;
2770 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN,
2771 MVPP2_PRS_RI_L3_PROTO_MASK);
2772 mvpp2_prs_hw_write(priv, &pe);
2777 /* Configure vlan entries and detect up to 2 successive VLAN tags.
2784 static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv)
2786 struct mvpp2_prs_entry pe;
2789 priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool),
2790 MVPP2_PRS_DBL_VLANS_MAX,
2792 if (!priv->prs_double_vlans)
2795 /* Double VLAN: 0x8100, 0x88A8 */
2796 err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021AD,
2797 MVPP2_PRS_PORT_MASK);
2801 /* Double VLAN: 0x8100, 0x8100 */
2802 err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021Q,
2803 MVPP2_PRS_PORT_MASK);
2807 /* Single VLAN: 0x88a8 */
2808 err = mvpp2_prs_vlan_add(priv, ETH_P_8021AD, MVPP2_PRS_SINGLE_VLAN_AI,
2809 MVPP2_PRS_PORT_MASK);
2813 /* Single VLAN: 0x8100 */
2814 err = mvpp2_prs_vlan_add(priv, ETH_P_8021Q, MVPP2_PRS_SINGLE_VLAN_AI,
2815 MVPP2_PRS_PORT_MASK);
2819 /* Set default double vlan entry */
2820 memset(&pe, 0, sizeof(pe));
2821 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2822 pe.index = MVPP2_PE_VLAN_DBL;
2824 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2825 /* Clear ai for next iterations */
2826 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2827 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE,
2828 MVPP2_PRS_RI_VLAN_MASK);
2830 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_DBL_VLAN_AI_BIT,
2831 MVPP2_PRS_DBL_VLAN_AI_BIT);
2832 /* Unmask all ports */
2833 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2835 /* Update shadow table and hw entry */
2836 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
2837 mvpp2_prs_hw_write(priv, &pe);
2839 /* Set default vlan none entry */
2840 memset(&pe, 0, sizeof(pe));
2841 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2842 pe.index = MVPP2_PE_VLAN_NONE;
2844 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2845 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
2846 MVPP2_PRS_RI_VLAN_MASK);
2848 /* Unmask all ports */
2849 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2851 /* Update shadow table and hw entry */
2852 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
2853 mvpp2_prs_hw_write(priv, &pe);
2858 /* Set entries for PPPoE ethertype */
2859 static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
2861 struct mvpp2_prs_entry pe;
2864 /* IPv4 over PPPoE with options */
2865 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2866 MVPP2_PE_LAST_FREE_TID);
2870 memset(&pe, 0, sizeof(pe));
2871 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2874 mvpp2_prs_match_etype(&pe, 0, PPP_IP);
2876 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2877 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
2878 MVPP2_PRS_RI_L3_PROTO_MASK);
2879 /* Skip eth_type + 4 bytes of IP header */
2880 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
2881 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2883 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2885 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2887 /* Update shadow table and hw entry */
2888 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
2889 mvpp2_prs_hw_write(priv, &pe);
2891 /* IPv4 over PPPoE without options */
2892 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2893 MVPP2_PE_LAST_FREE_TID);
2899 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
2900 MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
2901 MVPP2_PRS_IPV4_HEAD_MASK |
2902 MVPP2_PRS_IPV4_IHL_MASK);
2904 /* Clear ri before updating */
2905 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
2906 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
2907 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
2908 MVPP2_PRS_RI_L3_PROTO_MASK);
2910 /* Update shadow table and hw entry */
2911 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
2912 mvpp2_prs_hw_write(priv, &pe);
2914 /* IPv6 over PPPoE */
2915 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2916 MVPP2_PE_LAST_FREE_TID);
2920 memset(&pe, 0, sizeof(pe));
2921 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2924 mvpp2_prs_match_etype(&pe, 0, PPP_IPV6);
2926 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2927 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
2928 MVPP2_PRS_RI_L3_PROTO_MASK);
2929 /* Skip eth_type + 4 bytes of IPv6 header */
2930 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
2931 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2933 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2935 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2937 /* Update shadow table and hw entry */
2938 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
2939 mvpp2_prs_hw_write(priv, &pe);
2941 /* Non-IP over PPPoE */
2942 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2943 MVPP2_PE_LAST_FREE_TID);
2947 memset(&pe, 0, sizeof(pe));
2948 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2951 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
2952 MVPP2_PRS_RI_L3_PROTO_MASK);
2954 /* Finished: go to flowid generation */
2955 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2956 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2957 /* Set L3 offset even if it's unknown L3 */
2958 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2960 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2962 /* Update shadow table and hw entry */
2963 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
2964 mvpp2_prs_hw_write(priv, &pe);
2969 /* Initialize entries for IPv4 */
2970 static int mvpp2_prs_ip4_init(struct mvpp2 *priv)
2972 struct mvpp2_prs_entry pe;
2975 /* Set entries for TCP, UDP and IGMP over IPv4 */
2976 err = mvpp2_prs_ip4_proto(priv, IPPROTO_TCP, MVPP2_PRS_RI_L4_TCP,
2977 MVPP2_PRS_RI_L4_PROTO_MASK);
2981 err = mvpp2_prs_ip4_proto(priv, IPPROTO_UDP, MVPP2_PRS_RI_L4_UDP,
2982 MVPP2_PRS_RI_L4_PROTO_MASK);
2986 err = mvpp2_prs_ip4_proto(priv, IPPROTO_IGMP,
2987 MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2988 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2989 MVPP2_PRS_RI_CPU_CODE_MASK |
2990 MVPP2_PRS_RI_UDF3_MASK);
2994 /* IPv4 Broadcast */
2995 err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_BROAD_CAST);
2999 /* IPv4 Multicast */
3000 err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
3004 /* Default IPv4 entry for unknown protocols */
3005 memset(&pe, 0, sizeof(pe));
3006 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
3007 pe.index = MVPP2_PE_IP4_PROTO_UN;
3009 /* Set next lu to IPv4 */
3010 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
3011 mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3013 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
3014 sizeof(struct iphdr) - 4,
3015 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
3016 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
3017 MVPP2_PRS_IPV4_DIP_AI_BIT);
3018 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
3019 MVPP2_PRS_RI_L4_PROTO_MASK);
3021 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
3022 /* Unmask all ports */
3023 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3025 /* Update shadow table and hw entry */
3026 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
3027 mvpp2_prs_hw_write(priv, &pe);
3029 /* Default IPv4 entry for unicast address */
3030 memset(&pe, 0, sizeof(pe));
3031 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
3032 pe.index = MVPP2_PE_IP4_ADDR_UN;
3034 /* Finished: go to flowid generation */
3035 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3036 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3037 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
3038 MVPP2_PRS_RI_L3_ADDR_MASK);
3040 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
3041 MVPP2_PRS_IPV4_DIP_AI_BIT);
3042 /* Unmask all ports */
3043 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3045 /* Update shadow table and hw entry */
3046 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
3047 mvpp2_prs_hw_write(priv, &pe);
3052 /* Initialize entries for IPv6 */
3053 static int mvpp2_prs_ip6_init(struct mvpp2 *priv)
3055 struct mvpp2_prs_entry pe;
3058 /* Set entries for TCP, UDP and ICMP over IPv6 */
3059 err = mvpp2_prs_ip6_proto(priv, IPPROTO_TCP,
3060 MVPP2_PRS_RI_L4_TCP,
3061 MVPP2_PRS_RI_L4_PROTO_MASK);
3065 err = mvpp2_prs_ip6_proto(priv, IPPROTO_UDP,
3066 MVPP2_PRS_RI_L4_UDP,
3067 MVPP2_PRS_RI_L4_PROTO_MASK);
3071 err = mvpp2_prs_ip6_proto(priv, IPPROTO_ICMPV6,
3072 MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
3073 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
3074 MVPP2_PRS_RI_CPU_CODE_MASK |
3075 MVPP2_PRS_RI_UDF3_MASK);
3079 /* IPv4 is the last header. This is similar case as 6-TCP or 17-UDP */
3080 /* Result Info: UDF7=1, DS lite */
3081 err = mvpp2_prs_ip6_proto(priv, IPPROTO_IPIP,
3082 MVPP2_PRS_RI_UDF7_IP6_LITE,
3083 MVPP2_PRS_RI_UDF7_MASK);
3087 /* IPv6 multicast */
3088 err = mvpp2_prs_ip6_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
3092 /* Entry for checking hop limit */
3093 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
3094 MVPP2_PE_LAST_FREE_TID);
3098 memset(&pe, 0, sizeof(pe));
3099 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
3102 /* Finished: go to flowid generation */
3103 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3104 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3105 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN |
3106 MVPP2_PRS_RI_DROP_MASK,
3107 MVPP2_PRS_RI_L3_PROTO_MASK |
3108 MVPP2_PRS_RI_DROP_MASK);
3110 mvpp2_prs_tcam_data_byte_set(&pe, 1, 0x00, MVPP2_PRS_IPV6_HOP_MASK);
3111 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
3112 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
3114 /* Update shadow table and hw entry */
3115 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
3116 mvpp2_prs_hw_write(priv, &pe);
3118 /* Default IPv6 entry for unknown protocols */
3119 memset(&pe, 0, sizeof(pe));
3120 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
3121 pe.index = MVPP2_PE_IP6_PROTO_UN;
3123 /* Finished: go to flowid generation */
3124 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3125 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3126 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
3127 MVPP2_PRS_RI_L4_PROTO_MASK);
3128 /* Set L4 offset relatively to our current place */
3129 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
3130 sizeof(struct ipv6hdr) - 4,
3131 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
3133 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
3134 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
3135 /* Unmask all ports */
3136 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3138 /* Update shadow table and hw entry */
3139 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
3140 mvpp2_prs_hw_write(priv, &pe);
3142 /* Default IPv6 entry for unknown ext protocols */
3143 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
3144 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
3145 pe.index = MVPP2_PE_IP6_EXT_PROTO_UN;
3147 /* Finished: go to flowid generation */
3148 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3149 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3150 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
3151 MVPP2_PRS_RI_L4_PROTO_MASK);
3153 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_EXT_AI_BIT,
3154 MVPP2_PRS_IPV6_EXT_AI_BIT);
3155 /* Unmask all ports */
3156 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3158 /* Update shadow table and hw entry */
3159 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
3160 mvpp2_prs_hw_write(priv, &pe);
3162 /* Default IPv6 entry for unicast address */
3163 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
3164 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
3165 pe.index = MVPP2_PE_IP6_ADDR_UN;
3167 /* Finished: go to IPv6 again */
3168 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
3169 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
3170 MVPP2_PRS_RI_L3_ADDR_MASK);
3171 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
3172 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
3173 /* Shift back to IPV6 NH */
3174 mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3176 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
3177 /* Unmask all ports */
3178 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3180 /* Update shadow table and hw entry */
3181 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
3182 mvpp2_prs_hw_write(priv, &pe);
3187 /* Parser default initialization */
3188 static int mvpp2_prs_default_init(struct platform_device *pdev,
3193 /* Enable tcam table */
3194 mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK);
3196 /* Clear all tcam and sram entries */
3197 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) {
3198 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
3199 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
3200 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0);
3202 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index);
3203 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
3204 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0);
3207 /* Invalidate all tcam entries */
3208 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++)
3209 mvpp2_prs_hw_inv(priv, index);
3211 priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE,
3212 sizeof(*priv->prs_shadow),
3214 if (!priv->prs_shadow)
3217 /* Always start from lookup = 0 */
3218 for (index = 0; index < MVPP2_MAX_PORTS; index++)
3219 mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH,
3220 MVPP2_PRS_PORT_LU_MAX, 0);
3222 mvpp2_prs_def_flow_init(priv);
3224 mvpp2_prs_mh_init(priv);
3226 mvpp2_prs_mac_init(priv);
3228 mvpp2_prs_dsa_init(priv);
3230 err = mvpp2_prs_etype_init(priv);
3234 err = mvpp2_prs_vlan_init(pdev, priv);
3238 err = mvpp2_prs_pppoe_init(priv);
3242 err = mvpp2_prs_ip6_init(priv);
3246 err = mvpp2_prs_ip4_init(priv);
3253 /* Compare MAC DA with tcam entry data */
3254 static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe,
3255 const u8 *da, unsigned char *mask)
3257 unsigned char tcam_byte, tcam_mask;
3260 for (index = 0; index < ETH_ALEN; index++) {
3261 mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask);
3262 if (tcam_mask != mask[index])
3265 if ((tcam_mask & tcam_byte) != (da[index] & mask[index]))
3272 /* Find tcam entry with matched pair <MAC DA, port> */
3273 static struct mvpp2_prs_entry *
3274 mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da,
3275 unsigned char *mask, int udf_type)
3277 struct mvpp2_prs_entry *pe;
3280 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
3283 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
3285 /* Go through the all entires with MVPP2_PRS_LU_MAC */
3286 for (tid = MVPP2_PE_FIRST_FREE_TID;
3287 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
3288 unsigned int entry_pmap;
3290 if (!priv->prs_shadow[tid].valid ||
3291 (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
3292 (priv->prs_shadow[tid].udf != udf_type))
3296 mvpp2_prs_hw_read(priv, pe);
3297 entry_pmap = mvpp2_prs_tcam_port_map_get(pe);
3299 if (mvpp2_prs_mac_range_equals(pe, da, mask) &&
3308 /* Update parser's mac da entry */
3309 static int mvpp2_prs_mac_da_accept(struct mvpp2 *priv, int port,
3310 const u8 *da, bool add)
3312 struct mvpp2_prs_entry *pe;
3313 unsigned int pmap, len, ri;
3314 unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
3317 /* Scan TCAM and see if entry with this <MAC DA, port> already exist */
3318 pe = mvpp2_prs_mac_da_range_find(priv, (1 << port), da, mask,
3319 MVPP2_PRS_UDF_MAC_DEF);
3326 /* Create new TCAM entry */
3327 /* Find first range mac entry*/
3328 for (tid = MVPP2_PE_FIRST_FREE_TID;
3329 tid <= MVPP2_PE_LAST_FREE_TID; tid++)
3330 if (priv->prs_shadow[tid].valid &&
3331 (priv->prs_shadow[tid].lu == MVPP2_PRS_LU_MAC) &&
3332 (priv->prs_shadow[tid].udf ==
3333 MVPP2_PRS_UDF_MAC_RANGE))
3336 /* Go through the all entries from first to last */
3337 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
3342 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
3345 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
3348 /* Mask all ports */
3349 mvpp2_prs_tcam_port_map_set(pe, 0);
3352 /* Update port mask */
3353 mvpp2_prs_tcam_port_set(pe, port, add);
3355 /* Invalidate the entry if no ports are left enabled */
3356 pmap = mvpp2_prs_tcam_port_map_get(pe);
3362 mvpp2_prs_hw_inv(priv, pe->index);
3363 priv->prs_shadow[pe->index].valid = false;
3368 /* Continue - set next lookup */
3369 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_DSA);
3371 /* Set match on DA */
3374 mvpp2_prs_tcam_data_byte_set(pe, len, da[len], 0xff);
3376 /* Set result info bits */
3377 if (is_broadcast_ether_addr(da))
3378 ri = MVPP2_PRS_RI_L2_BCAST;
3379 else if (is_multicast_ether_addr(da))
3380 ri = MVPP2_PRS_RI_L2_MCAST;
3382 ri = MVPP2_PRS_RI_L2_UCAST | MVPP2_PRS_RI_MAC_ME_MASK;
3384 mvpp2_prs_sram_ri_update(pe, ri, MVPP2_PRS_RI_L2_CAST_MASK |
3385 MVPP2_PRS_RI_MAC_ME_MASK);
3386 mvpp2_prs_shadow_ri_set(priv, pe->index, ri, MVPP2_PRS_RI_L2_CAST_MASK |
3387 MVPP2_PRS_RI_MAC_ME_MASK);
3389 /* Shift to ethertype */
3390 mvpp2_prs_sram_shift_set(pe, 2 * ETH_ALEN,
3391 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3393 /* Update shadow table and hw entry */
3394 priv->prs_shadow[pe->index].udf = MVPP2_PRS_UDF_MAC_DEF;
3395 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_MAC);
3396 mvpp2_prs_hw_write(priv, pe);
3403 static int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da)
3405 struct mvpp2_port *port = netdev_priv(dev);
3408 /* Remove old parser entry */
3409 err = mvpp2_prs_mac_da_accept(port->priv, port->id, dev->dev_addr,
3414 /* Add new parser entry */
3415 err = mvpp2_prs_mac_da_accept(port->priv, port->id, da, true);
3419 /* Set addr in the device */
3420 ether_addr_copy(dev->dev_addr, da);
3425 /* Delete all port's multicast simple (not range) entries */
3426 static void mvpp2_prs_mcast_del_all(struct mvpp2 *priv, int port)
3428 struct mvpp2_prs_entry pe;
3431 for (tid = MVPP2_PE_FIRST_FREE_TID;
3432 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
3433 unsigned char da[ETH_ALEN], da_mask[ETH_ALEN];
3435 if (!priv->prs_shadow[tid].valid ||
3436 (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
3437 (priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF))
3440 /* Only simple mac entries */
3442 mvpp2_prs_hw_read(priv, &pe);
3444 /* Read mac addr from entry */
3445 for (index = 0; index < ETH_ALEN; index++)
3446 mvpp2_prs_tcam_data_byte_get(&pe, index, &da[index],
3449 if (is_multicast_ether_addr(da) && !is_broadcast_ether_addr(da))
3450 /* Delete this entry */
3451 mvpp2_prs_mac_da_accept(priv, port, da, false);
3455 static int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type)
3458 case MVPP2_TAG_TYPE_EDSA:
3459 /* Add port to EDSA entries */
3460 mvpp2_prs_dsa_tag_set(priv, port, true,
3461 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
3462 mvpp2_prs_dsa_tag_set(priv, port, true,
3463 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
3464 /* Remove port from DSA entries */
3465 mvpp2_prs_dsa_tag_set(priv, port, false,
3466 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
3467 mvpp2_prs_dsa_tag_set(priv, port, false,
3468 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
3471 case MVPP2_TAG_TYPE_DSA:
3472 /* Add port to DSA entries */
3473 mvpp2_prs_dsa_tag_set(priv, port, true,
3474 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
3475 mvpp2_prs_dsa_tag_set(priv, port, true,
3476 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
3477 /* Remove port from EDSA entries */
3478 mvpp2_prs_dsa_tag_set(priv, port, false,
3479 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
3480 mvpp2_prs_dsa_tag_set(priv, port, false,
3481 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
3484 case MVPP2_TAG_TYPE_MH:
3485 case MVPP2_TAG_TYPE_NONE:
3486 /* Remove port form EDSA and DSA entries */
3487 mvpp2_prs_dsa_tag_set(priv, port, false,
3488 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
3489 mvpp2_prs_dsa_tag_set(priv, port, false,
3490 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
3491 mvpp2_prs_dsa_tag_set(priv, port, false,
3492 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
3493 mvpp2_prs_dsa_tag_set(priv, port, false,
3494 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
3498 if ((type < 0) || (type > MVPP2_TAG_TYPE_EDSA))
3505 /* Set prs flow for the port */
3506 static int mvpp2_prs_def_flow(struct mvpp2_port *port)
3508 struct mvpp2_prs_entry *pe;
3511 pe = mvpp2_prs_flow_find(port->priv, port->id);
3513 /* Such entry not exist */
3515 /* Go through the all entires from last to first */
3516 tid = mvpp2_prs_tcam_first_free(port->priv,
3517 MVPP2_PE_LAST_FREE_TID,
3518 MVPP2_PE_FIRST_FREE_TID);
3522 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
3526 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
3530 mvpp2_prs_sram_ai_update(pe, port->id, MVPP2_PRS_FLOW_ID_MASK);
3531 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
3533 /* Update shadow table */
3534 mvpp2_prs_shadow_set(port->priv, pe->index, MVPP2_PRS_LU_FLOWS);
3537 mvpp2_prs_tcam_port_map_set(pe, (1 << port->id));
3538 mvpp2_prs_hw_write(port->priv, pe);
3544 /* Classifier configuration routines */
3546 /* Update classification flow table registers */
3547 static void mvpp2_cls_flow_write(struct mvpp2 *priv,
3548 struct mvpp2_cls_flow_entry *fe)
3550 mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index);
3551 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]);
3552 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]);
3553 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]);
3556 /* Update classification lookup table register */
3557 static void mvpp2_cls_lookup_write(struct mvpp2 *priv,
3558 struct mvpp2_cls_lookup_entry *le)
3562 val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid;
3563 mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val);
3564 mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data);
3567 /* Classifier default initialization */
3568 static void mvpp2_cls_init(struct mvpp2 *priv)
3570 struct mvpp2_cls_lookup_entry le;
3571 struct mvpp2_cls_flow_entry fe;
3574 /* Enable classifier */
3575 mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK);
3577 /* Clear classifier flow table */
3578 memset(&fe.data, 0, sizeof(fe.data));
3579 for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) {
3581 mvpp2_cls_flow_write(priv, &fe);
3584 /* Clear classifier lookup table */
3586 for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) {
3589 mvpp2_cls_lookup_write(priv, &le);
3592 mvpp2_cls_lookup_write(priv, &le);
3596 static void mvpp2_cls_port_config(struct mvpp2_port *port)
3598 struct mvpp2_cls_lookup_entry le;
3601 /* Set way for the port */
3602 val = mvpp2_read(port->priv, MVPP2_CLS_PORT_WAY_REG);
3603 val &= ~MVPP2_CLS_PORT_WAY_MASK(port->id);
3604 mvpp2_write(port->priv, MVPP2_CLS_PORT_WAY_REG, val);
3606 /* Pick the entry to be accessed in lookup ID decoding table
3607 * according to the way and lkpid.
3609 le.lkpid = port->id;
3613 /* Set initial CPU queue for receiving packets */
3614 le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK;
3615 le.data |= port->first_rxq;
3617 /* Disable classification engines */
3618 le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
3620 /* Update lookup ID table entry */
3621 mvpp2_cls_lookup_write(port->priv, &le);
3624 /* Set CPU queue number for oversize packets */
3625 static void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port)
3629 mvpp2_write(port->priv, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->id),
3630 port->first_rxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK);
3632 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_P2HQ_REG(port->id),
3633 (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS));
3635 val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG);
3636 val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id);
3637 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val);
3640 static void *mvpp2_frag_alloc(const struct mvpp2_bm_pool *pool)
3642 if (likely(pool->frag_size <= PAGE_SIZE))
3643 return netdev_alloc_frag(pool->frag_size);
3645 return kmalloc(pool->frag_size, GFP_ATOMIC);
3648 static void mvpp2_frag_free(const struct mvpp2_bm_pool *pool, void *data)
3650 if (likely(pool->frag_size <= PAGE_SIZE))
3651 skb_free_frag(data);
3656 /* Buffer Manager configuration routines */
3659 static int mvpp2_bm_pool_create(struct platform_device *pdev,
3661 struct mvpp2_bm_pool *bm_pool, int size)
3665 /* Number of buffer pointers must be a multiple of 16, as per
3666 * hardware constraints
3668 if (!IS_ALIGNED(size, 16))
3671 /* PPv2.1 needs 8 bytes per buffer pointer, PPv2.2 needs 16
3672 * bytes per buffer pointer
3674 if (priv->hw_version == MVPP21)
3675 bm_pool->size_bytes = 2 * sizeof(u32) * size;
3677 bm_pool->size_bytes = 2 * sizeof(u64) * size;
3679 bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, bm_pool->size_bytes,
3682 if (!bm_pool->virt_addr)
3685 if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr,
3686 MVPP2_BM_POOL_PTR_ALIGN)) {
3687 dma_free_coherent(&pdev->dev, bm_pool->size_bytes,
3688 bm_pool->virt_addr, bm_pool->dma_addr);
3689 dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n",
3690 bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN);
3694 mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id),
3695 lower_32_bits(bm_pool->dma_addr));
3696 mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size);
3698 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
3699 val |= MVPP2_BM_START_MASK;
3700 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
3702 bm_pool->type = MVPP2_BM_FREE;
3703 bm_pool->size = size;
3704 bm_pool->pkt_size = 0;
3705 bm_pool->buf_num = 0;
3710 /* Set pool buffer size */
3711 static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv,
3712 struct mvpp2_bm_pool *bm_pool,
3717 bm_pool->buf_size = buf_size;
3719 val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET);
3720 mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val);
3723 static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv,
3724 struct mvpp2_bm_pool *bm_pool,
3725 dma_addr_t *dma_addr,
3726 phys_addr_t *phys_addr)
3728 int cpu = get_cpu();
3730 *dma_addr = mvpp2_percpu_read(priv, cpu,
3731 MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
3732 *phys_addr = mvpp2_percpu_read(priv, cpu, MVPP2_BM_VIRT_ALLOC_REG);
3734 if (priv->hw_version == MVPP22) {
3736 u32 dma_addr_highbits, phys_addr_highbits;
3738 val = mvpp2_percpu_read(priv, cpu, MVPP22_BM_ADDR_HIGH_ALLOC);
3739 dma_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_PHYS_MASK);
3740 phys_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_VIRT_MASK) >>
3741 MVPP22_BM_ADDR_HIGH_VIRT_SHIFT;
3743 if (sizeof(dma_addr_t) == 8)
3744 *dma_addr |= (u64)dma_addr_highbits << 32;
3746 if (sizeof(phys_addr_t) == 8)
3747 *phys_addr |= (u64)phys_addr_highbits << 32;
3753 /* Free all buffers from the pool */
3754 static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv,
3755 struct mvpp2_bm_pool *bm_pool)
3759 for (i = 0; i < bm_pool->buf_num; i++) {
3760 dma_addr_t buf_dma_addr;
3761 phys_addr_t buf_phys_addr;
3764 mvpp2_bm_bufs_get_addrs(dev, priv, bm_pool,
3765 &buf_dma_addr, &buf_phys_addr);
3767 dma_unmap_single(dev, buf_dma_addr,
3768 bm_pool->buf_size, DMA_FROM_DEVICE);
3770 data = (void *)phys_to_virt(buf_phys_addr);
3774 mvpp2_frag_free(bm_pool, data);
3777 /* Update BM driver with number of buffers removed from pool */
3778 bm_pool->buf_num -= i;
3782 static int mvpp2_bm_pool_destroy(struct platform_device *pdev,
3784 struct mvpp2_bm_pool *bm_pool)
3788 mvpp2_bm_bufs_free(&pdev->dev, priv, bm_pool);
3789 if (bm_pool->buf_num) {
3790 WARN(1, "cannot free all buffers in pool %d\n", bm_pool->id);
3794 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
3795 val |= MVPP2_BM_STOP_MASK;
3796 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
3798 dma_free_coherent(&pdev->dev, bm_pool->size_bytes,
3804 static int mvpp2_bm_pools_init(struct platform_device *pdev,
3808 struct mvpp2_bm_pool *bm_pool;
3810 /* Create all pools with maximum size */
3811 size = MVPP2_BM_POOL_SIZE_MAX;
3812 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
3813 bm_pool = &priv->bm_pools[i];
3815 err = mvpp2_bm_pool_create(pdev, priv, bm_pool, size);
3817 goto err_unroll_pools;
3818 mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0);
3823 dev_err(&pdev->dev, "failed to create BM pool %d, size %d\n", i, size);
3824 for (i = i - 1; i >= 0; i--)
3825 mvpp2_bm_pool_destroy(pdev, priv, &priv->bm_pools[i]);
3829 static int mvpp2_bm_init(struct platform_device *pdev, struct mvpp2 *priv)
3833 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
3834 /* Mask BM all interrupts */
3835 mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0);
3836 /* Clear BM cause register */
3837 mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0);
3840 /* Allocate and initialize BM pools */
3841 priv->bm_pools = devm_kcalloc(&pdev->dev, MVPP2_BM_POOLS_NUM,
3842 sizeof(*priv->bm_pools), GFP_KERNEL);
3843 if (!priv->bm_pools)
3846 err = mvpp2_bm_pools_init(pdev, priv);
3852 /* Attach long pool to rxq */
3853 static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port,
3854 int lrxq, int long_pool)
3859 /* Get queue physical ID */
3860 prxq = port->rxqs[lrxq]->id;
3862 if (port->priv->hw_version == MVPP21)
3863 mask = MVPP21_RXQ_POOL_LONG_MASK;
3865 mask = MVPP22_RXQ_POOL_LONG_MASK;
3867 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
3869 val |= (long_pool << MVPP2_RXQ_POOL_LONG_OFFS) & mask;
3870 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
3873 /* Attach short pool to rxq */
3874 static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port,
3875 int lrxq, int short_pool)
3880 /* Get queue physical ID */
3881 prxq = port->rxqs[lrxq]->id;
3883 if (port->priv->hw_version == MVPP21)
3884 mask = MVPP21_RXQ_POOL_SHORT_MASK;
3886 mask = MVPP22_RXQ_POOL_SHORT_MASK;
3888 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
3890 val |= (short_pool << MVPP2_RXQ_POOL_SHORT_OFFS) & mask;
3891 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
3894 static void *mvpp2_buf_alloc(struct mvpp2_port *port,
3895 struct mvpp2_bm_pool *bm_pool,
3896 dma_addr_t *buf_dma_addr,
3897 phys_addr_t *buf_phys_addr,
3900 dma_addr_t dma_addr;
3903 data = mvpp2_frag_alloc(bm_pool);
3907 dma_addr = dma_map_single(port->dev->dev.parent, data,
3908 MVPP2_RX_BUF_SIZE(bm_pool->pkt_size),
3910 if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) {
3911 mvpp2_frag_free(bm_pool, data);
3914 *buf_dma_addr = dma_addr;
3915 *buf_phys_addr = virt_to_phys(data);
3920 /* Release buffer to BM */
3921 static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
3922 dma_addr_t buf_dma_addr,
3923 phys_addr_t buf_phys_addr)
3925 int cpu = get_cpu();
3927 if (port->priv->hw_version == MVPP22) {
3930 if (sizeof(dma_addr_t) == 8)
3931 val |= upper_32_bits(buf_dma_addr) &
3932 MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK;
3934 if (sizeof(phys_addr_t) == 8)
3935 val |= (upper_32_bits(buf_phys_addr)
3936 << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) &
3937 MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK;
3939 mvpp2_percpu_write(port->priv, cpu,
3940 MVPP22_BM_ADDR_HIGH_RLS_REG, val);
3943 /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply
3944 * returned in the "cookie" field of the RX
3945 * descriptor. Instead of storing the virtual address, we
3946 * store the physical address
3948 mvpp2_percpu_write(port->priv, cpu,
3949 MVPP2_BM_VIRT_RLS_REG, buf_phys_addr);
3950 mvpp2_percpu_write(port->priv, cpu,
3951 MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr);
3956 /* Allocate buffers for the pool */
3957 static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
3958 struct mvpp2_bm_pool *bm_pool, int buf_num)
3960 int i, buf_size, total_size;
3961 dma_addr_t dma_addr;
3962 phys_addr_t phys_addr;
3965 buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size);
3966 total_size = MVPP2_RX_TOTAL_SIZE(buf_size);
3969 (buf_num + bm_pool->buf_num > bm_pool->size)) {
3970 netdev_err(port->dev,
3971 "cannot allocate %d buffers for pool %d\n",
3972 buf_num, bm_pool->id);
3976 for (i = 0; i < buf_num; i++) {
3977 buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr,
3978 &phys_addr, GFP_KERNEL);
3982 mvpp2_bm_pool_put(port, bm_pool->id, dma_addr,
3986 /* Update BM driver with number of buffers added to pool */
3987 bm_pool->buf_num += i;
3989 netdev_dbg(port->dev,
3990 "%s pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n",
3991 bm_pool->type == MVPP2_BM_SWF_SHORT ? "short" : " long",
3992 bm_pool->id, bm_pool->pkt_size, buf_size, total_size);
3994 netdev_dbg(port->dev,
3995 "%s pool %d: %d of %d buffers added\n",
3996 bm_pool->type == MVPP2_BM_SWF_SHORT ? "short" : " long",
3997 bm_pool->id, i, buf_num);
4001 /* Notify the driver that BM pool is being used as specific type and return the
4002 * pool pointer on success
4004 static struct mvpp2_bm_pool *
4005 mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
4008 struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
4011 if (new_pool->type != MVPP2_BM_FREE && new_pool->type != type) {
4012 netdev_err(port->dev, "mixing pool types is forbidden\n");
4016 if (new_pool->type == MVPP2_BM_FREE)
4017 new_pool->type = type;
4019 /* Allocate buffers in case BM pool is used as long pool, but packet
4020 * size doesn't match MTU or BM pool hasn't being used yet
4022 if (((type == MVPP2_BM_SWF_LONG) && (pkt_size > new_pool->pkt_size)) ||
4023 (new_pool->pkt_size == 0)) {
4026 /* Set default buffer number or free all the buffers in case
4027 * the pool is not empty
4029 pkts_num = new_pool->buf_num;
4031 pkts_num = type == MVPP2_BM_SWF_LONG ?
4032 MVPP2_BM_LONG_BUF_NUM :
4033 MVPP2_BM_SHORT_BUF_NUM;
4035 mvpp2_bm_bufs_free(port->dev->dev.parent,
4036 port->priv, new_pool);
4038 new_pool->pkt_size = pkt_size;
4039 new_pool->frag_size =
4040 SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) +
4041 MVPP2_SKB_SHINFO_SIZE;
4043 /* Allocate buffers for this pool */
4044 num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
4045 if (num != pkts_num) {
4046 WARN(1, "pool %d: %d of %d allocated\n",
4047 new_pool->id, num, pkts_num);
4052 mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
4053 MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
4058 /* Initialize pools for swf */
4059 static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
4063 if (!port->pool_long) {
4065 mvpp2_bm_pool_use(port, MVPP2_BM_SWF_LONG_POOL(port->id),
4068 if (!port->pool_long)
4071 port->pool_long->port_map |= (1 << port->id);
4073 for (rxq = 0; rxq < rxq_number; rxq++)
4074 mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
4077 if (!port->pool_short) {
4079 mvpp2_bm_pool_use(port, MVPP2_BM_SWF_SHORT_POOL,
4081 MVPP2_BM_SHORT_PKT_SIZE);
4082 if (!port->pool_short)
4085 port->pool_short->port_map |= (1 << port->id);
4087 for (rxq = 0; rxq < rxq_number; rxq++)
4088 mvpp2_rxq_short_pool_set(port, rxq,
4089 port->pool_short->id);
4095 static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
4097 struct mvpp2_port *port = netdev_priv(dev);
4098 struct mvpp2_bm_pool *port_pool = port->pool_long;
4099 int num, pkts_num = port_pool->buf_num;
4100 int pkt_size = MVPP2_RX_PKT_SIZE(mtu);
4102 /* Update BM pool with new buffer size */
4103 mvpp2_bm_bufs_free(dev->dev.parent, port->priv, port_pool);
4104 if (port_pool->buf_num) {
4105 WARN(1, "cannot free all buffers in pool %d\n", port_pool->id);
4109 port_pool->pkt_size = pkt_size;
4110 port_pool->frag_size = SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) +
4111 MVPP2_SKB_SHINFO_SIZE;
4112 num = mvpp2_bm_bufs_add(port, port_pool, pkts_num);
4113 if (num != pkts_num) {
4114 WARN(1, "pool %d: %d of %d allocated\n",
4115 port_pool->id, num, pkts_num);
4119 mvpp2_bm_pool_bufsize_set(port->priv, port_pool,
4120 MVPP2_RX_BUF_SIZE(port_pool->pkt_size));
4122 netdev_update_features(dev);
4126 static inline void mvpp2_interrupts_enable(struct mvpp2_port *port)
4128 int cpu, cpu_mask = 0;
4130 for_each_present_cpu(cpu)
4131 cpu_mask |= 1 << cpu;
4132 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
4133 MVPP2_ISR_ENABLE_INTERRUPT(cpu_mask));
4136 static inline void mvpp2_interrupts_disable(struct mvpp2_port *port)
4138 int cpu, cpu_mask = 0;
4140 for_each_present_cpu(cpu)
4141 cpu_mask |= 1 << cpu;
4142 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
4143 MVPP2_ISR_DISABLE_INTERRUPT(cpu_mask));
4146 /* Mask the current CPU's Rx/Tx interrupts
4147 * Called by on_each_cpu(), guaranteed to run with migration disabled,
4148 * using smp_processor_id() is OK.
4150 static void mvpp2_interrupts_mask(void *arg)
4152 struct mvpp2_port *port = arg;
4154 mvpp2_percpu_write(port->priv, smp_processor_id(),
4155 MVPP2_ISR_RX_TX_MASK_REG(port->id), 0);
4158 /* Unmask the current CPU's Rx/Tx interrupts.
4159 * Called by on_each_cpu(), guaranteed to run with migration disabled,
4160 * using smp_processor_id() is OK.
4162 static void mvpp2_interrupts_unmask(void *arg)
4164 struct mvpp2_port *port = arg;
4166 mvpp2_percpu_write(port->priv, smp_processor_id(),
4167 MVPP2_ISR_RX_TX_MASK_REG(port->id),
4168 (MVPP2_CAUSE_MISC_SUM_MASK |
4169 MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK));
4172 /* Port configuration routines */
4174 static void mvpp22_port_mii_set(struct mvpp2_port *port)
4178 /* Only GOP port 0 has an XLG MAC */
4179 if (port->gop_id == 0) {
4180 val = readl(port->base + MVPP22_XLG_CTRL3_REG);
4181 val &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK;
4183 if (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
4184 port->phy_interface == PHY_INTERFACE_MODE_10GKR)
4185 val |= MVPP22_XLG_CTRL3_MACMODESELECT_10G;
4187 val |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC;
4189 writel(val, port->base + MVPP22_XLG_CTRL3_REG);
4192 val = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
4193 if (port->phy_interface == PHY_INTERFACE_MODE_RGMII)
4194 val |= MVPP22_CTRL4_EXT_PIN_GMII_SEL;
4196 val &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL;
4197 val &= ~MVPP22_CTRL4_DP_CLK_SEL;
4198 val |= MVPP22_CTRL4_SYNC_BYPASS;
4199 val |= MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
4200 writel(val, port->base + MVPP22_GMAC_CTRL_4_REG);
4203 static void mvpp2_port_mii_set(struct mvpp2_port *port)
4207 if (port->priv->hw_version == MVPP22)
4208 mvpp22_port_mii_set(port);
4210 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
4212 switch (port->phy_interface) {
4213 case PHY_INTERFACE_MODE_SGMII:
4214 val |= MVPP2_GMAC_INBAND_AN_MASK;
4216 case PHY_INTERFACE_MODE_RGMII:
4217 val |= MVPP2_GMAC_PORT_RGMII_MASK;
4219 val &= ~MVPP2_GMAC_PCS_ENABLE_MASK;
4222 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
4225 static void mvpp2_port_fc_adv_enable(struct mvpp2_port *port)
4229 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4230 val |= MVPP2_GMAC_FC_ADV_EN;
4231 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4234 static void mvpp2_port_enable(struct mvpp2_port *port)
4238 /* Only GOP port 0 has an XLG MAC */
4239 if (port->gop_id == 0 &&
4240 (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
4241 port->phy_interface == PHY_INTERFACE_MODE_10GKR)) {
4242 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
4243 val |= MVPP22_XLG_CTRL0_PORT_EN |
4244 MVPP22_XLG_CTRL0_MAC_RESET_DIS;
4245 val &= ~MVPP22_XLG_CTRL0_MIB_CNT_DIS;
4246 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
4248 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
4249 val |= MVPP2_GMAC_PORT_EN_MASK;
4250 val |= MVPP2_GMAC_MIB_CNTR_EN_MASK;
4251 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
4255 static void mvpp2_port_disable(struct mvpp2_port *port)
4259 /* Only GOP port 0 has an XLG MAC */
4260 if (port->gop_id == 0 &&
4261 (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
4262 port->phy_interface == PHY_INTERFACE_MODE_10GKR)) {
4263 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
4264 val &= ~(MVPP22_XLG_CTRL0_PORT_EN |
4265 MVPP22_XLG_CTRL0_MAC_RESET_DIS);
4266 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
4268 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
4269 val &= ~(MVPP2_GMAC_PORT_EN_MASK);
4270 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
4274 /* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */
4275 static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port)
4279 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) &
4280 ~MVPP2_GMAC_PERIODIC_XON_EN_MASK;
4281 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
4284 /* Configure loopback port */
4285 static void mvpp2_port_loopback_set(struct mvpp2_port *port)
4289 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
4291 if (port->speed == 1000)
4292 val |= MVPP2_GMAC_GMII_LB_EN_MASK;
4294 val &= ~MVPP2_GMAC_GMII_LB_EN_MASK;
4296 if (port->phy_interface == PHY_INTERFACE_MODE_SGMII)
4297 val |= MVPP2_GMAC_PCS_LB_EN_MASK;
4299 val &= ~MVPP2_GMAC_PCS_LB_EN_MASK;
4301 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
4304 static void mvpp2_port_reset(struct mvpp2_port *port)
4308 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
4309 ~MVPP2_GMAC_PORT_RESET_MASK;
4310 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
4312 while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
4313 MVPP2_GMAC_PORT_RESET_MASK)
4317 /* Change maximum receive size of the port */
4318 static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port)
4322 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
4323 val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
4324 val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
4325 MVPP2_GMAC_MAX_RX_SIZE_OFFS);
4326 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
4329 /* Set defaults to the MVPP2 port */
4330 static void mvpp2_defaults_set(struct mvpp2_port *port)
4332 int tx_port_num, val, queue, ptxq, lrxq;
4334 if (port->priv->hw_version == MVPP21) {
4335 /* Configure port to loopback if needed */
4336 if (port->flags & MVPP2_F_LOOPBACK)
4337 mvpp2_port_loopback_set(port);
4339 /* Update TX FIFO MIN Threshold */
4340 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
4341 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
4342 /* Min. TX threshold must be less than minimal packet length */
4343 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
4344 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
4347 /* Disable Legacy WRR, Disable EJP, Release from reset */
4348 tx_port_num = mvpp2_egress_port(port);
4349 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG,
4351 mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0);
4353 /* Close bandwidth for all queues */
4354 for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) {
4355 ptxq = mvpp2_txq_phys(port->id, queue);
4356 mvpp2_write(port->priv,
4357 MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq), 0);
4360 /* Set refill period to 1 usec, refill tokens
4361 * and bucket size to maximum
4363 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG,
4364 port->priv->tclk / USEC_PER_SEC);
4365 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG);
4366 val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK;
4367 val |= MVPP2_TXP_REFILL_PERIOD_MASK(1);
4368 val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK;
4369 mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val);
4370 val = MVPP2_TXP_TOKEN_SIZE_MAX;
4371 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
4373 /* Set MaximumLowLatencyPacketSize value to 256 */
4374 mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id),
4375 MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK |
4376 MVPP2_RX_LOW_LATENCY_PKT_SIZE(256));
4378 /* Enable Rx cache snoop */
4379 for (lrxq = 0; lrxq < rxq_number; lrxq++) {
4380 queue = port->rxqs[lrxq]->id;
4381 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
4382 val |= MVPP2_SNOOP_PKT_SIZE_MASK |
4383 MVPP2_SNOOP_BUF_HDR_MASK;
4384 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
4387 /* At default, mask all interrupts to all present cpus */
4388 mvpp2_interrupts_disable(port);
4391 /* Enable/disable receiving packets */
4392 static void mvpp2_ingress_enable(struct mvpp2_port *port)
4397 for (lrxq = 0; lrxq < rxq_number; lrxq++) {
4398 queue = port->rxqs[lrxq]->id;
4399 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
4400 val &= ~MVPP2_RXQ_DISABLE_MASK;
4401 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
4405 static void mvpp2_ingress_disable(struct mvpp2_port *port)
4410 for (lrxq = 0; lrxq < rxq_number; lrxq++) {
4411 queue = port->rxqs[lrxq]->id;
4412 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
4413 val |= MVPP2_RXQ_DISABLE_MASK;
4414 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
4418 /* Enable transmit via physical egress queue
4419 * - HW starts take descriptors from DRAM
4421 static void mvpp2_egress_enable(struct mvpp2_port *port)
4425 int tx_port_num = mvpp2_egress_port(port);
4427 /* Enable all initialized TXs. */
4429 for (queue = 0; queue < txq_number; queue++) {
4430 struct mvpp2_tx_queue *txq = port->txqs[queue];
4433 qmap |= (1 << queue);
4436 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
4437 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap);
4440 /* Disable transmit via physical egress queue
4441 * - HW doesn't take descriptors from DRAM
4443 static void mvpp2_egress_disable(struct mvpp2_port *port)
4447 int tx_port_num = mvpp2_egress_port(port);
4449 /* Issue stop command for active channels only */
4450 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
4451 reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) &
4452 MVPP2_TXP_SCHED_ENQ_MASK;
4454 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG,
4455 (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET));
4457 /* Wait for all Tx activity to terminate. */
4460 if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) {
4461 netdev_warn(port->dev,
4462 "Tx stop timed out, status=0x%08x\n",
4469 /* Check port TX Command register that all
4470 * Tx queues are stopped
4472 reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG);
4473 } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK);
4476 /* Rx descriptors helper methods */
4478 /* Get number of Rx descriptors occupied by received packets */
4480 mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id)
4482 u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id));
4484 return val & MVPP2_RXQ_OCCUPIED_MASK;
4487 /* Update Rx queue status with the number of occupied and available
4488 * Rx descriptor slots.
4491 mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id,
4492 int used_count, int free_count)
4494 /* Decrement the number of used descriptors and increment count
4495 * increment the number of free descriptors.
4497 u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET);
4499 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val);
4502 /* Get pointer to next RX descriptor to be processed by SW */
4503 static inline struct mvpp2_rx_desc *
4504 mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq)
4506 int rx_desc = rxq->next_desc_to_proc;
4508 rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc);
4509 prefetch(rxq->descs + rxq->next_desc_to_proc);
4510 return rxq->descs + rx_desc;
4513 /* Set rx queue offset */
4514 static void mvpp2_rxq_offset_set(struct mvpp2_port *port,
4515 int prxq, int offset)
4519 /* Convert offset from bytes to units of 32 bytes */
4520 offset = offset >> 5;
4522 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
4523 val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK;
4526 val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) &
4527 MVPP2_RXQ_PACKET_OFFSET_MASK);
4529 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
4532 /* Tx descriptors helper methods */
4534 /* Get pointer to next Tx descriptor to be processed (send) by HW */
4535 static struct mvpp2_tx_desc *
4536 mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq)
4538 int tx_desc = txq->next_desc_to_proc;
4540 txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc);
4541 return txq->descs + tx_desc;
4544 /* Update HW with number of aggregated Tx descriptors to be sent
4546 * Called only from mvpp2_tx(), so migration is disabled, using
4547 * smp_processor_id() is OK.
4549 static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
4551 /* aggregated access - relevant TXQ number is written in TX desc */
4552 mvpp2_percpu_write(port->priv, smp_processor_id(),
4553 MVPP2_AGGR_TXQ_UPDATE_REG, pending);
4557 /* Check if there are enough free descriptors in aggregated txq.
4558 * If not, update the number of occupied descriptors and repeat the check.
4560 * Called only from mvpp2_tx(), so migration is disabled, using
4561 * smp_processor_id() is OK.
4563 static int mvpp2_aggr_desc_num_check(struct mvpp2 *priv,
4564 struct mvpp2_tx_queue *aggr_txq, int num)
4566 if ((aggr_txq->count + num) > aggr_txq->size) {
4567 /* Update number of occupied aggregated Tx descriptors */
4568 int cpu = smp_processor_id();
4569 u32 val = mvpp2_read(priv, MVPP2_AGGR_TXQ_STATUS_REG(cpu));
4571 aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK;
4574 if ((aggr_txq->count + num) > aggr_txq->size)
4580 /* Reserved Tx descriptors allocation request
4582 * Called only from mvpp2_txq_reserved_desc_num_proc(), itself called
4583 * only by mvpp2_tx(), so migration is disabled, using
4584 * smp_processor_id() is OK.
4586 static int mvpp2_txq_alloc_reserved_desc(struct mvpp2 *priv,
4587 struct mvpp2_tx_queue *txq, int num)
4590 int cpu = smp_processor_id();
4592 val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num;
4593 mvpp2_percpu_write(priv, cpu, MVPP2_TXQ_RSVD_REQ_REG, val);
4595 val = mvpp2_percpu_read(priv, cpu, MVPP2_TXQ_RSVD_RSLT_REG);
4597 return val & MVPP2_TXQ_RSVD_RSLT_MASK;
4600 /* Check if there are enough reserved descriptors for transmission.
4601 * If not, request chunk of reserved descriptors and check again.
4603 static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2 *priv,
4604 struct mvpp2_tx_queue *txq,
4605 struct mvpp2_txq_pcpu *txq_pcpu,
4608 int req, cpu, desc_count;
4610 if (txq_pcpu->reserved_num >= num)
4613 /* Not enough descriptors reserved! Update the reserved descriptor
4614 * count and check again.
4618 /* Compute total of used descriptors */
4619 for_each_present_cpu(cpu) {
4620 struct mvpp2_txq_pcpu *txq_pcpu_aux;
4622 txq_pcpu_aux = per_cpu_ptr(txq->pcpu, cpu);
4623 desc_count += txq_pcpu_aux->count;
4624 desc_count += txq_pcpu_aux->reserved_num;
4627 req = max(MVPP2_CPU_DESC_CHUNK, num - txq_pcpu->reserved_num);
4631 (txq->size - (num_present_cpus() * MVPP2_CPU_DESC_CHUNK)))
4634 txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(priv, txq, req);
4636 /* OK, the descriptor cound has been updated: check again. */
4637 if (txq_pcpu->reserved_num < num)
4642 /* Release the last allocated Tx descriptor. Useful to handle DMA
4643 * mapping failures in the Tx path.
4645 static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq)
4647 if (txq->next_desc_to_proc == 0)
4648 txq->next_desc_to_proc = txq->last_desc - 1;
4650 txq->next_desc_to_proc--;
4653 /* Set Tx descriptors fields relevant for CSUM calculation */
4654 static u32 mvpp2_txq_desc_csum(int l3_offs, int l3_proto,
4655 int ip_hdr_len, int l4_proto)
4659 /* fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
4660 * G_L4_chk, L4_type required only for checksum calculation
4662 command = (l3_offs << MVPP2_TXD_L3_OFF_SHIFT);
4663 command |= (ip_hdr_len << MVPP2_TXD_IP_HLEN_SHIFT);
4664 command |= MVPP2_TXD_IP_CSUM_DISABLE;
4666 if (l3_proto == swab16(ETH_P_IP)) {
4667 command &= ~MVPP2_TXD_IP_CSUM_DISABLE; /* enable IPv4 csum */
4668 command &= ~MVPP2_TXD_L3_IP6; /* enable IPv4 */
4670 command |= MVPP2_TXD_L3_IP6; /* enable IPv6 */
4673 if (l4_proto == IPPROTO_TCP) {
4674 command &= ~MVPP2_TXD_L4_UDP; /* enable TCP */
4675 command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */
4676 } else if (l4_proto == IPPROTO_UDP) {
4677 command |= MVPP2_TXD_L4_UDP; /* enable UDP */
4678 command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */
4680 command |= MVPP2_TXD_L4_CSUM_NOT;
4686 /* Get number of sent descriptors and decrement counter.
4687 * The number of sent descriptors is returned.
4690 * Called only from mvpp2_txq_done(), called from mvpp2_tx()
4691 * (migration disabled) and from the TX completion tasklet (migration
4692 * disabled) so using smp_processor_id() is OK.
4694 static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port,
4695 struct mvpp2_tx_queue *txq)
4699 /* Reading status reg resets transmitted descriptor counter */
4700 val = mvpp2_percpu_read(port->priv, smp_processor_id(),
4701 MVPP2_TXQ_SENT_REG(txq->id));
4703 return (val & MVPP2_TRANSMITTED_COUNT_MASK) >>
4704 MVPP2_TRANSMITTED_COUNT_OFFSET;
4707 /* Called through on_each_cpu(), so runs on all CPUs, with migration
4708 * disabled, therefore using smp_processor_id() is OK.
4710 static void mvpp2_txq_sent_counter_clear(void *arg)
4712 struct mvpp2_port *port = arg;
4715 for (queue = 0; queue < txq_number; queue++) {
4716 int id = port->txqs[queue]->id;
4718 mvpp2_percpu_read(port->priv, smp_processor_id(),
4719 MVPP2_TXQ_SENT_REG(id));
4723 /* Set max sizes for Tx queues */
4724 static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
4727 int txq, tx_port_num;
4729 mtu = port->pkt_size * 8;
4730 if (mtu > MVPP2_TXP_MTU_MAX)
4731 mtu = MVPP2_TXP_MTU_MAX;
4733 /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */
4736 /* Indirect access to registers */
4737 tx_port_num = mvpp2_egress_port(port);
4738 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
4741 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG);
4742 val &= ~MVPP2_TXP_MTU_MAX;
4744 mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val);
4746 /* TXP token size and all TXQs token size must be larger that MTU */
4747 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG);
4748 size = val & MVPP2_TXP_TOKEN_SIZE_MAX;
4751 val &= ~MVPP2_TXP_TOKEN_SIZE_MAX;
4753 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
4756 for (txq = 0; txq < txq_number; txq++) {
4757 val = mvpp2_read(port->priv,
4758 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq));
4759 size = val & MVPP2_TXQ_TOKEN_SIZE_MAX;
4763 val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX;
4765 mvpp2_write(port->priv,
4766 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq),
4772 /* Set the number of packets that will be received before Rx interrupt
4773 * will be generated by HW.
4775 static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
4776 struct mvpp2_rx_queue *rxq)
4778 int cpu = get_cpu();
4780 if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK)
4781 rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK;
4783 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
4784 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_THRESH_REG,
4790 static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz)
4792 u64 tmp = (u64)clk_hz * usec;
4794 do_div(tmp, USEC_PER_SEC);
4796 return tmp > U32_MAX ? U32_MAX : tmp;
4799 static u32 mvpp2_cycles_to_usec(u32 cycles, unsigned long clk_hz)
4801 u64 tmp = (u64)cycles * USEC_PER_SEC;
4803 do_div(tmp, clk_hz);
4805 return tmp > U32_MAX ? U32_MAX : tmp;
4808 /* Set the time delay in usec before Rx interrupt */
4809 static void mvpp2_rx_time_coal_set(struct mvpp2_port *port,
4810 struct mvpp2_rx_queue *rxq)
4812 unsigned long freq = port->priv->tclk;
4813 u32 val = mvpp2_usec_to_cycles(rxq->time_coal, freq);
4815 if (val > MVPP2_MAX_ISR_RX_THRESHOLD) {
4817 mvpp2_cycles_to_usec(MVPP2_MAX_ISR_RX_THRESHOLD, freq);
4819 /* re-evaluate to get actual register value */
4820 val = mvpp2_usec_to_cycles(rxq->time_coal, freq);
4823 mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val);
4826 /* Free Tx queue skbuffs */
4827 static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
4828 struct mvpp2_tx_queue *txq,
4829 struct mvpp2_txq_pcpu *txq_pcpu, int num)
4833 for (i = 0; i < num; i++) {
4834 struct mvpp2_txq_pcpu_buf *tx_buf =
4835 txq_pcpu->buffs + txq_pcpu->txq_get_index;
4837 dma_unmap_single(port->dev->dev.parent, tx_buf->dma,
4838 tx_buf->size, DMA_TO_DEVICE);
4840 dev_kfree_skb_any(tx_buf->skb);
4842 mvpp2_txq_inc_get(txq_pcpu);
4846 static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port,
4849 int queue = fls(cause) - 1;
4851 return port->rxqs[queue];
4854 static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port,
4857 int queue = fls(cause) - 1;
4859 return port->txqs[queue];
4862 /* Handle end of transmission */
4863 static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
4864 struct mvpp2_txq_pcpu *txq_pcpu)
4866 struct netdev_queue *nq = netdev_get_tx_queue(port->dev, txq->log_id);
4869 if (txq_pcpu->cpu != smp_processor_id())
4870 netdev_err(port->dev, "wrong cpu on the end of Tx processing\n");
4872 tx_done = mvpp2_txq_sent_desc_proc(port, txq);
4875 mvpp2_txq_bufs_free(port, txq, txq_pcpu, tx_done);
4877 txq_pcpu->count -= tx_done;
4879 if (netif_tx_queue_stopped(nq))
4880 if (txq_pcpu->size - txq_pcpu->count >= MAX_SKB_FRAGS + 1)
4881 netif_tx_wake_queue(nq);
4884 static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause)
4886 struct mvpp2_tx_queue *txq;
4887 struct mvpp2_txq_pcpu *txq_pcpu;
4888 unsigned int tx_todo = 0;
4891 txq = mvpp2_get_tx_queue(port, cause);
4895 txq_pcpu = this_cpu_ptr(txq->pcpu);
4897 if (txq_pcpu->count) {
4898 mvpp2_txq_done(port, txq, txq_pcpu);
4899 tx_todo += txq_pcpu->count;
4902 cause &= ~(1 << txq->log_id);
4907 /* Rx/Tx queue initialization/cleanup methods */
4909 /* Allocate and initialize descriptors for aggr TXQ */
4910 static int mvpp2_aggr_txq_init(struct platform_device *pdev,
4911 struct mvpp2_tx_queue *aggr_txq,
4912 int desc_num, int cpu,
4917 /* Allocate memory for TX descriptors */
4918 aggr_txq->descs = dma_alloc_coherent(&pdev->dev,
4919 desc_num * MVPP2_DESC_ALIGNED_SIZE,
4920 &aggr_txq->descs_dma, GFP_KERNEL);
4921 if (!aggr_txq->descs)
4924 aggr_txq->last_desc = aggr_txq->size - 1;
4926 /* Aggr TXQ no reset WA */
4927 aggr_txq->next_desc_to_proc = mvpp2_read(priv,
4928 MVPP2_AGGR_TXQ_INDEX_REG(cpu));
4930 /* Set Tx descriptors queue starting address indirect
4933 if (priv->hw_version == MVPP21)
4934 txq_dma = aggr_txq->descs_dma;
4936 txq_dma = aggr_txq->descs_dma >>
4937 MVPP22_AGGR_TXQ_DESC_ADDR_OFFS;
4939 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu), txq_dma);
4940 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu), desc_num);
4945 /* Create a specified Rx queue */
4946 static int mvpp2_rxq_init(struct mvpp2_port *port,
4947 struct mvpp2_rx_queue *rxq)
4953 rxq->size = port->rx_ring_size;
4955 /* Allocate memory for RX descriptors */
4956 rxq->descs = dma_alloc_coherent(port->dev->dev.parent,
4957 rxq->size * MVPP2_DESC_ALIGNED_SIZE,
4958 &rxq->descs_dma, GFP_KERNEL);
4962 rxq->last_desc = rxq->size - 1;
4964 /* Zero occupied and non-occupied counters - direct access */
4965 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
4967 /* Set Rx descriptors queue starting address - indirect access */
4969 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
4970 if (port->priv->hw_version == MVPP21)
4971 rxq_dma = rxq->descs_dma;
4973 rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS;
4974 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma);
4975 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
4976 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_INDEX_REG, 0);
4980 mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD);
4982 /* Set coalescing pkts and time */
4983 mvpp2_rx_pkts_coal_set(port, rxq);
4984 mvpp2_rx_time_coal_set(port, rxq);
4986 /* Add number of descriptors ready for receiving packets */
4987 mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);
4992 /* Push packets received by the RXQ to BM pool */
4993 static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
4994 struct mvpp2_rx_queue *rxq)
4998 rx_received = mvpp2_rxq_received(port, rxq->id);
5002 for (i = 0; i < rx_received; i++) {
5003 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
5004 u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
5007 pool = (status & MVPP2_RXD_BM_POOL_ID_MASK) >>
5008 MVPP2_RXD_BM_POOL_ID_OFFS;
5010 mvpp2_bm_pool_put(port, pool,
5011 mvpp2_rxdesc_dma_addr_get(port, rx_desc),
5012 mvpp2_rxdesc_cookie_get(port, rx_desc));
5014 mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received);
5017 /* Cleanup Rx queue */
5018 static void mvpp2_rxq_deinit(struct mvpp2_port *port,
5019 struct mvpp2_rx_queue *rxq)
5023 mvpp2_rxq_drop_pkts(port, rxq);
5026 dma_free_coherent(port->dev->dev.parent,
5027 rxq->size * MVPP2_DESC_ALIGNED_SIZE,
5033 rxq->next_desc_to_proc = 0;
5036 /* Clear Rx descriptors queue starting address and size;
5037 * free descriptor number
5039 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
5041 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
5042 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, 0);
5043 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, 0);
5047 /* Create and initialize a Tx queue */
5048 static int mvpp2_txq_init(struct mvpp2_port *port,
5049 struct mvpp2_tx_queue *txq)
5052 int cpu, desc, desc_per_txq, tx_port_num;
5053 struct mvpp2_txq_pcpu *txq_pcpu;
5055 txq->size = port->tx_ring_size;
5057 /* Allocate memory for Tx descriptors */
5058 txq->descs = dma_alloc_coherent(port->dev->dev.parent,
5059 txq->size * MVPP2_DESC_ALIGNED_SIZE,
5060 &txq->descs_dma, GFP_KERNEL);
5064 txq->last_desc = txq->size - 1;
5066 /* Set Tx descriptors queue starting address - indirect access */
5068 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
5069 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG,
5071 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG,
5072 txq->size & MVPP2_TXQ_DESC_SIZE_MASK);
5073 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_INDEX_REG, 0);
5074 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_RSVD_CLR_REG,
5075 txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
5076 val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PENDING_REG);
5077 val &= ~MVPP2_TXQ_PENDING_MASK;
5078 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PENDING_REG, val);
5080 /* Calculate base address in prefetch buffer. We reserve 16 descriptors
5081 * for each existing TXQ.
5082 * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT
5083 * GBE ports assumed to be continious from 0 to MVPP2_MAX_PORTS
5086 desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) +
5087 (txq->log_id * desc_per_txq);
5089 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG,
5090 MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
5091 MVPP2_PREF_BUF_THRESH(desc_per_txq / 2));
5094 /* WRR / EJP configuration - indirect access */
5095 tx_port_num = mvpp2_egress_port(port);
5096 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
5098 val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id));
5099 val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK;
5100 val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1);
5101 val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK;
5102 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val);
5104 val = MVPP2_TXQ_TOKEN_SIZE_MAX;
5105 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id),
5108 for_each_present_cpu(cpu) {
5109 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
5110 txq_pcpu->size = txq->size;
5111 txq_pcpu->buffs = kmalloc_array(txq_pcpu->size,
5112 sizeof(*txq_pcpu->buffs),
5114 if (!txq_pcpu->buffs)
5117 txq_pcpu->count = 0;
5118 txq_pcpu->reserved_num = 0;
5119 txq_pcpu->txq_put_index = 0;
5120 txq_pcpu->txq_get_index = 0;
5125 for_each_present_cpu(cpu) {
5126 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
5127 kfree(txq_pcpu->buffs);
5130 dma_free_coherent(port->dev->dev.parent,
5131 txq->size * MVPP2_DESC_ALIGNED_SIZE,
5132 txq->descs, txq->descs_dma);
5137 /* Free allocated TXQ resources */
5138 static void mvpp2_txq_deinit(struct mvpp2_port *port,
5139 struct mvpp2_tx_queue *txq)
5141 struct mvpp2_txq_pcpu *txq_pcpu;
5144 for_each_present_cpu(cpu) {
5145 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
5146 kfree(txq_pcpu->buffs);
5150 dma_free_coherent(port->dev->dev.parent,
5151 txq->size * MVPP2_DESC_ALIGNED_SIZE,
5152 txq->descs, txq->descs_dma);
5156 txq->next_desc_to_proc = 0;
5159 /* Set minimum bandwidth for disabled TXQs */
5160 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0);
5162 /* Set Tx descriptors queue starting address and size */
5164 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
5165 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG, 0);
5166 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG, 0);
5170 /* Cleanup Tx ports */
5171 static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
5173 struct mvpp2_txq_pcpu *txq_pcpu;
5174 int delay, pending, cpu;
5178 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
5179 val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG);
5180 val |= MVPP2_TXQ_DRAIN_EN_MASK;
5181 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val);
5183 /* The napi queue has been stopped so wait for all packets
5184 * to be transmitted.
5188 if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) {
5189 netdev_warn(port->dev,
5190 "port %d: cleaning queue %d timed out\n",
5191 port->id, txq->log_id);
5197 pending = mvpp2_percpu_read(port->priv, cpu,
5198 MVPP2_TXQ_PENDING_REG);
5199 pending &= MVPP2_TXQ_PENDING_MASK;
5202 val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
5203 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val);
5206 for_each_present_cpu(cpu) {
5207 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
5209 /* Release all packets */
5210 mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count);
5213 txq_pcpu->count = 0;
5214 txq_pcpu->txq_put_index = 0;
5215 txq_pcpu->txq_get_index = 0;
5219 /* Cleanup all Tx queues */
5220 static void mvpp2_cleanup_txqs(struct mvpp2_port *port)
5222 struct mvpp2_tx_queue *txq;
5226 val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG);
5228 /* Reset Tx ports and delete Tx queues */
5229 val |= MVPP2_TX_PORT_FLUSH_MASK(port->id);
5230 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
5232 for (queue = 0; queue < txq_number; queue++) {
5233 txq = port->txqs[queue];
5234 mvpp2_txq_clean(port, txq);
5235 mvpp2_txq_deinit(port, txq);
5238 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
5240 val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id);
5241 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
5244 /* Cleanup all Rx queues */
5245 static void mvpp2_cleanup_rxqs(struct mvpp2_port *port)
5249 for (queue = 0; queue < rxq_number; queue++)
5250 mvpp2_rxq_deinit(port, port->rxqs[queue]);
5253 /* Init all Rx queues for port */
5254 static int mvpp2_setup_rxqs(struct mvpp2_port *port)
5258 for (queue = 0; queue < rxq_number; queue++) {
5259 err = mvpp2_rxq_init(port, port->rxqs[queue]);
5266 mvpp2_cleanup_rxqs(port);
5270 /* Init all tx queues for port */
5271 static int mvpp2_setup_txqs(struct mvpp2_port *port)
5273 struct mvpp2_tx_queue *txq;
5276 for (queue = 0; queue < txq_number; queue++) {
5277 txq = port->txqs[queue];
5278 err = mvpp2_txq_init(port, txq);
5283 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
5287 mvpp2_cleanup_txqs(port);
5291 /* The callback for per-port interrupt */
5292 static irqreturn_t mvpp2_isr(int irq, void *dev_id)
5294 struct mvpp2_port *port = (struct mvpp2_port *)dev_id;
5296 mvpp2_interrupts_disable(port);
5298 napi_schedule(&port->napi);
5304 static void mvpp2_link_event(struct net_device *dev)
5306 struct mvpp2_port *port = netdev_priv(dev);
5307 struct phy_device *phydev = dev->phydev;
5308 int status_change = 0;
5312 if ((port->speed != phydev->speed) ||
5313 (port->duplex != phydev->duplex)) {
5316 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5317 val &= ~(MVPP2_GMAC_CONFIG_MII_SPEED |
5318 MVPP2_GMAC_CONFIG_GMII_SPEED |
5319 MVPP2_GMAC_CONFIG_FULL_DUPLEX |
5320 MVPP2_GMAC_AN_SPEED_EN |
5321 MVPP2_GMAC_AN_DUPLEX_EN);
5324 val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
5326 if (phydev->speed == SPEED_1000)
5327 val |= MVPP2_GMAC_CONFIG_GMII_SPEED;
5328 else if (phydev->speed == SPEED_100)
5329 val |= MVPP2_GMAC_CONFIG_MII_SPEED;
5331 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5333 port->duplex = phydev->duplex;
5334 port->speed = phydev->speed;
5338 if (phydev->link != port->link) {
5339 if (!phydev->link) {
5344 port->link = phydev->link;
5348 if (status_change) {
5350 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5351 val |= (MVPP2_GMAC_FORCE_LINK_PASS |
5352 MVPP2_GMAC_FORCE_LINK_DOWN);
5353 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5354 mvpp2_egress_enable(port);
5355 mvpp2_ingress_enable(port);
5357 mvpp2_ingress_disable(port);
5358 mvpp2_egress_disable(port);
5360 phy_print_status(phydev);
5364 static void mvpp2_timer_set(struct mvpp2_port_pcpu *port_pcpu)
5368 if (!port_pcpu->timer_scheduled) {
5369 port_pcpu->timer_scheduled = true;
5370 interval = MVPP2_TXDONE_HRTIMER_PERIOD_NS;
5371 hrtimer_start(&port_pcpu->tx_done_timer, interval,
5372 HRTIMER_MODE_REL_PINNED);
5376 static void mvpp2_tx_proc_cb(unsigned long data)
5378 struct net_device *dev = (struct net_device *)data;
5379 struct mvpp2_port *port = netdev_priv(dev);
5380 struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
5381 unsigned int tx_todo, cause;
5383 if (!netif_running(dev))
5385 port_pcpu->timer_scheduled = false;
5387 /* Process all the Tx queues */
5388 cause = (1 << txq_number) - 1;
5389 tx_todo = mvpp2_tx_done(port, cause);
5391 /* Set the timer in case not all the packets were processed */
5393 mvpp2_timer_set(port_pcpu);
5396 static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
5398 struct mvpp2_port_pcpu *port_pcpu = container_of(timer,
5399 struct mvpp2_port_pcpu,
5402 tasklet_schedule(&port_pcpu->tx_done_tasklet);
5404 return HRTIMER_NORESTART;
5407 /* Main RX/TX processing routines */
5409 /* Display more error info */
5410 static void mvpp2_rx_error(struct mvpp2_port *port,
5411 struct mvpp2_rx_desc *rx_desc)
5413 u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
5414 size_t sz = mvpp2_rxdesc_size_get(port, rx_desc);
5416 switch (status & MVPP2_RXD_ERR_CODE_MASK) {
5417 case MVPP2_RXD_ERR_CRC:
5418 netdev_err(port->dev, "bad rx status %08x (crc error), size=%zu\n",
5421 case MVPP2_RXD_ERR_OVERRUN:
5422 netdev_err(port->dev, "bad rx status %08x (overrun error), size=%zu\n",
5425 case MVPP2_RXD_ERR_RESOURCE:
5426 netdev_err(port->dev, "bad rx status %08x (resource error), size=%zu\n",
5432 /* Handle RX checksum offload */
5433 static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status,
5434 struct sk_buff *skb)
5436 if (((status & MVPP2_RXD_L3_IP4) &&
5437 !(status & MVPP2_RXD_IP4_HEADER_ERR)) ||
5438 (status & MVPP2_RXD_L3_IP6))
5439 if (((status & MVPP2_RXD_L4_UDP) ||
5440 (status & MVPP2_RXD_L4_TCP)) &&
5441 (status & MVPP2_RXD_L4_CSUM_OK)) {
5443 skb->ip_summed = CHECKSUM_UNNECESSARY;
5447 skb->ip_summed = CHECKSUM_NONE;
5450 /* Reuse skb if possible, or allocate a new skb and add it to BM pool */
5451 static int mvpp2_rx_refill(struct mvpp2_port *port,
5452 struct mvpp2_bm_pool *bm_pool, int pool)
5454 dma_addr_t dma_addr;
5455 phys_addr_t phys_addr;
5458 /* No recycle or too many buffers are in use, so allocate a new skb */
5459 buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr, &phys_addr,
5464 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
5469 /* Handle tx checksum */
5470 static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
5472 if (skb->ip_summed == CHECKSUM_PARTIAL) {
5476 if (skb->protocol == htons(ETH_P_IP)) {
5477 struct iphdr *ip4h = ip_hdr(skb);
5479 /* Calculate IPv4 checksum and L4 checksum */
5480 ip_hdr_len = ip4h->ihl;
5481 l4_proto = ip4h->protocol;
5482 } else if (skb->protocol == htons(ETH_P_IPV6)) {
5483 struct ipv6hdr *ip6h = ipv6_hdr(skb);
5485 /* Read l4_protocol from one of IPv6 extra headers */
5486 if (skb_network_header_len(skb) > 0)
5487 ip_hdr_len = (skb_network_header_len(skb) >> 2);
5488 l4_proto = ip6h->nexthdr;
5490 return MVPP2_TXD_L4_CSUM_NOT;
5493 return mvpp2_txq_desc_csum(skb_network_offset(skb),
5494 skb->protocol, ip_hdr_len, l4_proto);
5497 return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE;
5500 /* Main rx processing */
5501 static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
5502 struct mvpp2_rx_queue *rxq)
5504 struct net_device *dev = port->dev;
5510 /* Get number of received packets and clamp the to-do */
5511 rx_received = mvpp2_rxq_received(port, rxq->id);
5512 if (rx_todo > rx_received)
5513 rx_todo = rx_received;
5515 while (rx_done < rx_todo) {
5516 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
5517 struct mvpp2_bm_pool *bm_pool;
5518 struct sk_buff *skb;
5519 unsigned int frag_size;
5520 dma_addr_t dma_addr;
5521 phys_addr_t phys_addr;
5523 int pool, rx_bytes, err;
5527 rx_status = mvpp2_rxdesc_status_get(port, rx_desc);
5528 rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc);
5529 rx_bytes -= MVPP2_MH_SIZE;
5530 dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc);
5531 phys_addr = mvpp2_rxdesc_cookie_get(port, rx_desc);
5532 data = (void *)phys_to_virt(phys_addr);
5534 pool = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >>
5535 MVPP2_RXD_BM_POOL_ID_OFFS;
5536 bm_pool = &port->priv->bm_pools[pool];
5538 /* In case of an error, release the requested buffer pointer
5539 * to the Buffer Manager. This request process is controlled
5540 * by the hardware, and the information about the buffer is
5541 * comprised by the RX descriptor.
5543 if (rx_status & MVPP2_RXD_ERR_SUMMARY) {
5545 dev->stats.rx_errors++;
5546 mvpp2_rx_error(port, rx_desc);
5547 /* Return the buffer to the pool */
5548 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
5552 if (bm_pool->frag_size > PAGE_SIZE)
5555 frag_size = bm_pool->frag_size;
5557 skb = build_skb(data, frag_size);
5559 netdev_warn(port->dev, "skb build failed\n");
5560 goto err_drop_frame;
5563 err = mvpp2_rx_refill(port, bm_pool, pool);
5565 netdev_err(port->dev, "failed to refill BM pools\n");
5566 goto err_drop_frame;
5569 dma_unmap_single(dev->dev.parent, dma_addr,
5570 bm_pool->buf_size, DMA_FROM_DEVICE);
5573 rcvd_bytes += rx_bytes;
5575 skb_reserve(skb, MVPP2_MH_SIZE + NET_SKB_PAD);
5576 skb_put(skb, rx_bytes);
5577 skb->protocol = eth_type_trans(skb, dev);
5578 mvpp2_rx_csum(port, rx_status, skb);
5580 napi_gro_receive(&port->napi, skb);
5584 struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
5586 u64_stats_update_begin(&stats->syncp);
5587 stats->rx_packets += rcvd_pkts;
5588 stats->rx_bytes += rcvd_bytes;
5589 u64_stats_update_end(&stats->syncp);
5592 /* Update Rx queue management counters */
5594 mvpp2_rxq_status_update(port, rxq->id, rx_done, rx_done);
5600 tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
5601 struct mvpp2_tx_desc *desc)
5603 dma_addr_t buf_dma_addr =
5604 mvpp2_txdesc_dma_addr_get(port, desc);
5606 mvpp2_txdesc_size_get(port, desc);
5607 dma_unmap_single(port->dev->dev.parent, buf_dma_addr,
5608 buf_sz, DMA_TO_DEVICE);
5609 mvpp2_txq_desc_put(txq);
5612 /* Handle tx fragmentation processing */
5613 static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
5614 struct mvpp2_tx_queue *aggr_txq,
5615 struct mvpp2_tx_queue *txq)
5617 struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
5618 struct mvpp2_tx_desc *tx_desc;
5620 dma_addr_t buf_dma_addr;
5622 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5623 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5624 void *addr = page_address(frag->page.p) + frag->page_offset;
5626 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
5627 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
5628 mvpp2_txdesc_size_set(port, tx_desc, frag->size);
5630 buf_dma_addr = dma_map_single(port->dev->dev.parent, addr,
5633 if (dma_mapping_error(port->dev->dev.parent, buf_dma_addr)) {
5634 mvpp2_txq_desc_put(txq);
5638 mvpp2_txdesc_offset_set(port, tx_desc,
5639 buf_dma_addr & MVPP2_TX_DESC_ALIGN);
5640 mvpp2_txdesc_dma_addr_set(port, tx_desc,
5641 buf_dma_addr & ~MVPP2_TX_DESC_ALIGN);
5643 if (i == (skb_shinfo(skb)->nr_frags - 1)) {
5644 /* Last descriptor */
5645 mvpp2_txdesc_cmd_set(port, tx_desc,
5647 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc);
5649 /* Descriptor in the middle: Not First, Not Last */
5650 mvpp2_txdesc_cmd_set(port, tx_desc, 0);
5651 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
5657 /* Release all descriptors that were used to map fragments of
5658 * this packet, as well as the corresponding DMA mappings
5660 for (i = i - 1; i >= 0; i--) {
5661 tx_desc = txq->descs + i;
5662 tx_desc_unmap_put(port, txq, tx_desc);
5668 /* Main tx processing */
5669 static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
5671 struct mvpp2_port *port = netdev_priv(dev);
5672 struct mvpp2_tx_queue *txq, *aggr_txq;
5673 struct mvpp2_txq_pcpu *txq_pcpu;
5674 struct mvpp2_tx_desc *tx_desc;
5675 dma_addr_t buf_dma_addr;
5680 txq_id = skb_get_queue_mapping(skb);
5681 txq = port->txqs[txq_id];
5682 txq_pcpu = this_cpu_ptr(txq->pcpu);
5683 aggr_txq = &port->priv->aggr_txqs[smp_processor_id()];
5685 frags = skb_shinfo(skb)->nr_frags + 1;
5687 /* Check number of available descriptors */
5688 if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq, frags) ||
5689 mvpp2_txq_reserved_desc_num_proc(port->priv, txq,
5695 /* Get a descriptor for the first part of the packet */
5696 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
5697 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
5698 mvpp2_txdesc_size_set(port, tx_desc, skb_headlen(skb));
5700 buf_dma_addr = dma_map_single(dev->dev.parent, skb->data,
5701 skb_headlen(skb), DMA_TO_DEVICE);
5702 if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) {
5703 mvpp2_txq_desc_put(txq);
5708 mvpp2_txdesc_offset_set(port, tx_desc,
5709 buf_dma_addr & MVPP2_TX_DESC_ALIGN);
5710 mvpp2_txdesc_dma_addr_set(port, tx_desc,
5711 buf_dma_addr & ~MVPP2_TX_DESC_ALIGN);
5713 tx_cmd = mvpp2_skb_tx_csum(port, skb);
5716 /* First and Last descriptor */
5717 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
5718 mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
5719 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc);
5721 /* First but not Last */
5722 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE;
5723 mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
5724 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
5726 /* Continue with other skb fragments */
5727 if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) {
5728 tx_desc_unmap_put(port, txq, tx_desc);
5734 txq_pcpu->reserved_num -= frags;
5735 txq_pcpu->count += frags;
5736 aggr_txq->count += frags;
5738 /* Enable transmit */
5740 mvpp2_aggr_txq_pend_desc_add(port, frags);
5742 if (txq_pcpu->size - txq_pcpu->count < MAX_SKB_FRAGS + 1) {
5743 struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
5745 netif_tx_stop_queue(nq);
5749 struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
5751 u64_stats_update_begin(&stats->syncp);
5752 stats->tx_packets++;
5753 stats->tx_bytes += skb->len;
5754 u64_stats_update_end(&stats->syncp);
5756 dev->stats.tx_dropped++;
5757 dev_kfree_skb_any(skb);
5760 /* Finalize TX processing */
5761 if (txq_pcpu->count >= txq->done_pkts_coal)
5762 mvpp2_txq_done(port, txq, txq_pcpu);
5764 /* Set the timer in case not all frags were processed */
5765 if (txq_pcpu->count <= frags && txq_pcpu->count > 0) {
5766 struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
5768 mvpp2_timer_set(port_pcpu);
5771 return NETDEV_TX_OK;
5774 static inline void mvpp2_cause_error(struct net_device *dev, int cause)
5776 if (cause & MVPP2_CAUSE_FCS_ERR_MASK)
5777 netdev_err(dev, "FCS error\n");
5778 if (cause & MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK)
5779 netdev_err(dev, "rx fifo overrun error\n");
5780 if (cause & MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK)
5781 netdev_err(dev, "tx fifo underrun error\n");
5784 static int mvpp2_poll(struct napi_struct *napi, int budget)
5786 u32 cause_rx_tx, cause_rx, cause_misc;
5788 struct mvpp2_port *port = netdev_priv(napi->dev);
5789 int cpu = smp_processor_id();
5791 /* Rx/Tx cause register
5793 * Bits 0-15: each bit indicates received packets on the Rx queue
5794 * (bit 0 is for Rx queue 0).
5796 * Bits 16-23: each bit indicates transmitted packets on the Tx queue
5797 * (bit 16 is for Tx queue 0).
5799 * Each CPU has its own Rx/Tx cause register
5801 cause_rx_tx = mvpp2_percpu_read(port->priv, cpu,
5802 MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
5803 cause_rx_tx &= ~MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
5804 cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
5807 mvpp2_cause_error(port->dev, cause_misc);
5809 /* Clear the cause register */
5810 mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0);
5811 mvpp2_percpu_write(port->priv, cpu,
5812 MVPP2_ISR_RX_TX_CAUSE_REG(port->id),
5813 cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
5816 cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
5818 /* Process RX packets */
5819 cause_rx |= port->pending_cause_rx;
5820 while (cause_rx && budget > 0) {
5822 struct mvpp2_rx_queue *rxq;
5824 rxq = mvpp2_get_rx_queue(port, cause_rx);
5828 count = mvpp2_rx(port, budget, rxq);
5832 /* Clear the bit associated to this Rx queue
5833 * so that next iteration will continue from
5834 * the next Rx queue.
5836 cause_rx &= ~(1 << rxq->logic_rxq);
5842 napi_complete_done(napi, rx_done);
5844 mvpp2_interrupts_enable(port);
5846 port->pending_cause_rx = cause_rx;
5850 /* Set hw internals when starting port */
5851 static void mvpp2_start_dev(struct mvpp2_port *port)
5853 struct net_device *ndev = port->dev;
5855 mvpp2_gmac_max_rx_size_set(port);
5856 mvpp2_txp_max_tx_size_set(port);
5858 napi_enable(&port->napi);
5860 /* Enable interrupts on all CPUs */
5861 mvpp2_interrupts_enable(port);
5863 mvpp2_port_enable(port);
5864 phy_start(ndev->phydev);
5865 netif_tx_start_all_queues(port->dev);
5868 /* Set hw internals when stopping port */
5869 static void mvpp2_stop_dev(struct mvpp2_port *port)
5871 struct net_device *ndev = port->dev;
5873 /* Stop new packets from arriving to RXQs */
5874 mvpp2_ingress_disable(port);
5878 /* Disable interrupts on all CPUs */
5879 mvpp2_interrupts_disable(port);
5881 napi_disable(&port->napi);
5883 netif_carrier_off(port->dev);
5884 netif_tx_stop_all_queues(port->dev);
5886 mvpp2_egress_disable(port);
5887 mvpp2_port_disable(port);
5888 phy_stop(ndev->phydev);
5891 static int mvpp2_check_ringparam_valid(struct net_device *dev,
5892 struct ethtool_ringparam *ring)
5894 u16 new_rx_pending = ring->rx_pending;
5895 u16 new_tx_pending = ring->tx_pending;
5897 if (ring->rx_pending == 0 || ring->tx_pending == 0)
5900 if (ring->rx_pending > MVPP2_MAX_RXD)
5901 new_rx_pending = MVPP2_MAX_RXD;
5902 else if (!IS_ALIGNED(ring->rx_pending, 16))
5903 new_rx_pending = ALIGN(ring->rx_pending, 16);
5905 if (ring->tx_pending > MVPP2_MAX_TXD)
5906 new_tx_pending = MVPP2_MAX_TXD;
5907 else if (!IS_ALIGNED(ring->tx_pending, 32))
5908 new_tx_pending = ALIGN(ring->tx_pending, 32);
5910 if (ring->rx_pending != new_rx_pending) {
5911 netdev_info(dev, "illegal Rx ring size value %d, round to %d\n",
5912 ring->rx_pending, new_rx_pending);
5913 ring->rx_pending = new_rx_pending;
5916 if (ring->tx_pending != new_tx_pending) {
5917 netdev_info(dev, "illegal Tx ring size value %d, round to %d\n",
5918 ring->tx_pending, new_tx_pending);
5919 ring->tx_pending = new_tx_pending;
5925 static void mvpp21_get_mac_address(struct mvpp2_port *port, unsigned char *addr)
5927 u32 mac_addr_l, mac_addr_m, mac_addr_h;
5929 mac_addr_l = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
5930 mac_addr_m = readl(port->priv->lms_base + MVPP2_SRC_ADDR_MIDDLE);
5931 mac_addr_h = readl(port->priv->lms_base + MVPP2_SRC_ADDR_HIGH);
5932 addr[0] = (mac_addr_h >> 24) & 0xFF;
5933 addr[1] = (mac_addr_h >> 16) & 0xFF;
5934 addr[2] = (mac_addr_h >> 8) & 0xFF;
5935 addr[3] = mac_addr_h & 0xFF;
5936 addr[4] = mac_addr_m & 0xFF;
5937 addr[5] = (mac_addr_l >> MVPP2_GMAC_SA_LOW_OFFS) & 0xFF;
5940 static int mvpp2_phy_connect(struct mvpp2_port *port)
5942 struct phy_device *phy_dev;
5944 phy_dev = of_phy_connect(port->dev, port->phy_node, mvpp2_link_event, 0,
5945 port->phy_interface);
5947 netdev_err(port->dev, "cannot connect to phy\n");
5950 phy_dev->supported &= PHY_GBIT_FEATURES;
5951 phy_dev->advertising = phy_dev->supported;
5960 static void mvpp2_phy_disconnect(struct mvpp2_port *port)
5962 struct net_device *ndev = port->dev;
5964 phy_disconnect(ndev->phydev);
5967 static int mvpp2_open(struct net_device *dev)
5969 struct mvpp2_port *port = netdev_priv(dev);
5970 unsigned char mac_bcast[ETH_ALEN] = {
5971 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
5974 err = mvpp2_prs_mac_da_accept(port->priv, port->id, mac_bcast, true);
5976 netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n");
5979 err = mvpp2_prs_mac_da_accept(port->priv, port->id,
5980 dev->dev_addr, true);
5982 netdev_err(dev, "mvpp2_prs_mac_da_accept MC failed\n");
5985 err = mvpp2_prs_tag_mode_set(port->priv, port->id, MVPP2_TAG_TYPE_MH);
5987 netdev_err(dev, "mvpp2_prs_tag_mode_set failed\n");
5990 err = mvpp2_prs_def_flow(port);
5992 netdev_err(dev, "mvpp2_prs_def_flow failed\n");
5996 /* Allocate the Rx/Tx queues */
5997 err = mvpp2_setup_rxqs(port);
5999 netdev_err(port->dev, "cannot allocate Rx queues\n");
6003 err = mvpp2_setup_txqs(port);
6005 netdev_err(port->dev, "cannot allocate Tx queues\n");
6006 goto err_cleanup_rxqs;
6009 err = request_irq(port->irq, mvpp2_isr, 0, dev->name, port);
6011 netdev_err(port->dev, "cannot request IRQ %d\n", port->irq);
6012 goto err_cleanup_txqs;
6015 /* In default link is down */
6016 netif_carrier_off(port->dev);
6018 err = mvpp2_phy_connect(port);
6022 /* Unmask interrupts on all CPUs */
6023 on_each_cpu(mvpp2_interrupts_unmask, port, 1);
6025 mvpp2_start_dev(port);
6030 free_irq(port->irq, port);
6032 mvpp2_cleanup_txqs(port);
6034 mvpp2_cleanup_rxqs(port);
6038 static int mvpp2_stop(struct net_device *dev)
6040 struct mvpp2_port *port = netdev_priv(dev);
6041 struct mvpp2_port_pcpu *port_pcpu;
6044 mvpp2_stop_dev(port);
6045 mvpp2_phy_disconnect(port);
6047 /* Mask interrupts on all CPUs */
6048 on_each_cpu(mvpp2_interrupts_mask, port, 1);
6050 free_irq(port->irq, port);
6051 for_each_present_cpu(cpu) {
6052 port_pcpu = per_cpu_ptr(port->pcpu, cpu);
6054 hrtimer_cancel(&port_pcpu->tx_done_timer);
6055 port_pcpu->timer_scheduled = false;
6056 tasklet_kill(&port_pcpu->tx_done_tasklet);
6058 mvpp2_cleanup_rxqs(port);
6059 mvpp2_cleanup_txqs(port);
6064 static void mvpp2_set_rx_mode(struct net_device *dev)
6066 struct mvpp2_port *port = netdev_priv(dev);
6067 struct mvpp2 *priv = port->priv;
6068 struct netdev_hw_addr *ha;
6070 bool allmulti = dev->flags & IFF_ALLMULTI;
6072 mvpp2_prs_mac_promisc_set(priv, id, dev->flags & IFF_PROMISC);
6073 mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_ALL, allmulti);
6074 mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_IP6, allmulti);
6076 /* Remove all port->id's mcast enries */
6077 mvpp2_prs_mcast_del_all(priv, id);
6079 if (allmulti && !netdev_mc_empty(dev)) {
6080 netdev_for_each_mc_addr(ha, dev)
6081 mvpp2_prs_mac_da_accept(priv, id, ha->addr, true);
6085 static int mvpp2_set_mac_address(struct net_device *dev, void *p)
6087 struct mvpp2_port *port = netdev_priv(dev);
6088 const struct sockaddr *addr = p;
6091 if (!is_valid_ether_addr(addr->sa_data)) {
6092 err = -EADDRNOTAVAIL;
6096 if (!netif_running(dev)) {
6097 err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
6100 /* Reconfigure parser to accept the original MAC address */
6101 err = mvpp2_prs_update_mac_da(dev, dev->dev_addr);
6106 mvpp2_stop_dev(port);
6108 err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
6112 /* Reconfigure parser accept the original MAC address */
6113 err = mvpp2_prs_update_mac_da(dev, dev->dev_addr);
6117 mvpp2_start_dev(port);
6118 mvpp2_egress_enable(port);
6119 mvpp2_ingress_enable(port);
6122 netdev_err(dev, "failed to change MAC address\n");
6126 static int mvpp2_change_mtu(struct net_device *dev, int mtu)
6128 struct mvpp2_port *port = netdev_priv(dev);
6131 if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) {
6132 netdev_info(dev, "illegal MTU value %d, round to %d\n", mtu,
6133 ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8));
6134 mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8);
6137 if (!netif_running(dev)) {
6138 err = mvpp2_bm_update_mtu(dev, mtu);
6140 port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
6144 /* Reconfigure BM to the original MTU */
6145 err = mvpp2_bm_update_mtu(dev, dev->mtu);
6150 mvpp2_stop_dev(port);
6152 err = mvpp2_bm_update_mtu(dev, mtu);
6154 port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
6158 /* Reconfigure BM to the original MTU */
6159 err = mvpp2_bm_update_mtu(dev, dev->mtu);
6164 mvpp2_start_dev(port);
6165 mvpp2_egress_enable(port);
6166 mvpp2_ingress_enable(port);
6170 netdev_err(dev, "failed to change MTU\n");
6175 mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
6177 struct mvpp2_port *port = netdev_priv(dev);
6181 for_each_possible_cpu(cpu) {
6182 struct mvpp2_pcpu_stats *cpu_stats;
6188 cpu_stats = per_cpu_ptr(port->stats, cpu);
6190 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
6191 rx_packets = cpu_stats->rx_packets;
6192 rx_bytes = cpu_stats->rx_bytes;
6193 tx_packets = cpu_stats->tx_packets;
6194 tx_bytes = cpu_stats->tx_bytes;
6195 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
6197 stats->rx_packets += rx_packets;
6198 stats->rx_bytes += rx_bytes;
6199 stats->tx_packets += tx_packets;
6200 stats->tx_bytes += tx_bytes;
6203 stats->rx_errors = dev->stats.rx_errors;
6204 stats->rx_dropped = dev->stats.rx_dropped;
6205 stats->tx_dropped = dev->stats.tx_dropped;
6208 static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6215 ret = phy_mii_ioctl(dev->phydev, ifr, cmd);
6217 mvpp2_link_event(dev);
6222 /* Ethtool methods */
6224 /* Set interrupt coalescing for ethtools */
6225 static int mvpp2_ethtool_set_coalesce(struct net_device *dev,
6226 struct ethtool_coalesce *c)
6228 struct mvpp2_port *port = netdev_priv(dev);
6231 for (queue = 0; queue < rxq_number; queue++) {
6232 struct mvpp2_rx_queue *rxq = port->rxqs[queue];
6234 rxq->time_coal = c->rx_coalesce_usecs;
6235 rxq->pkts_coal = c->rx_max_coalesced_frames;
6236 mvpp2_rx_pkts_coal_set(port, rxq);
6237 mvpp2_rx_time_coal_set(port, rxq);
6240 for (queue = 0; queue < txq_number; queue++) {
6241 struct mvpp2_tx_queue *txq = port->txqs[queue];
6243 txq->done_pkts_coal = c->tx_max_coalesced_frames;
6249 /* get coalescing for ethtools */
6250 static int mvpp2_ethtool_get_coalesce(struct net_device *dev,
6251 struct ethtool_coalesce *c)
6253 struct mvpp2_port *port = netdev_priv(dev);
6255 c->rx_coalesce_usecs = port->rxqs[0]->time_coal;
6256 c->rx_max_coalesced_frames = port->rxqs[0]->pkts_coal;
6257 c->tx_max_coalesced_frames = port->txqs[0]->done_pkts_coal;
6261 static void mvpp2_ethtool_get_drvinfo(struct net_device *dev,
6262 struct ethtool_drvinfo *drvinfo)
6264 strlcpy(drvinfo->driver, MVPP2_DRIVER_NAME,
6265 sizeof(drvinfo->driver));
6266 strlcpy(drvinfo->version, MVPP2_DRIVER_VERSION,
6267 sizeof(drvinfo->version));
6268 strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
6269 sizeof(drvinfo->bus_info));
6272 static void mvpp2_ethtool_get_ringparam(struct net_device *dev,
6273 struct ethtool_ringparam *ring)
6275 struct mvpp2_port *port = netdev_priv(dev);
6277 ring->rx_max_pending = MVPP2_MAX_RXD;
6278 ring->tx_max_pending = MVPP2_MAX_TXD;
6279 ring->rx_pending = port->rx_ring_size;
6280 ring->tx_pending = port->tx_ring_size;
6283 static int mvpp2_ethtool_set_ringparam(struct net_device *dev,
6284 struct ethtool_ringparam *ring)
6286 struct mvpp2_port *port = netdev_priv(dev);
6287 u16 prev_rx_ring_size = port->rx_ring_size;
6288 u16 prev_tx_ring_size = port->tx_ring_size;
6291 err = mvpp2_check_ringparam_valid(dev, ring);
6295 if (!netif_running(dev)) {
6296 port->rx_ring_size = ring->rx_pending;
6297 port->tx_ring_size = ring->tx_pending;
6301 /* The interface is running, so we have to force a
6302 * reallocation of the queues
6304 mvpp2_stop_dev(port);
6305 mvpp2_cleanup_rxqs(port);
6306 mvpp2_cleanup_txqs(port);
6308 port->rx_ring_size = ring->rx_pending;
6309 port->tx_ring_size = ring->tx_pending;
6311 err = mvpp2_setup_rxqs(port);
6313 /* Reallocate Rx queues with the original ring size */
6314 port->rx_ring_size = prev_rx_ring_size;
6315 ring->rx_pending = prev_rx_ring_size;
6316 err = mvpp2_setup_rxqs(port);
6320 err = mvpp2_setup_txqs(port);
6322 /* Reallocate Tx queues with the original ring size */
6323 port->tx_ring_size = prev_tx_ring_size;
6324 ring->tx_pending = prev_tx_ring_size;
6325 err = mvpp2_setup_txqs(port);
6327 goto err_clean_rxqs;
6330 mvpp2_start_dev(port);
6331 mvpp2_egress_enable(port);
6332 mvpp2_ingress_enable(port);
6337 mvpp2_cleanup_rxqs(port);
6339 netdev_err(dev, "failed to change ring parameters");
6345 static const struct net_device_ops mvpp2_netdev_ops = {
6346 .ndo_open = mvpp2_open,
6347 .ndo_stop = mvpp2_stop,
6348 .ndo_start_xmit = mvpp2_tx,
6349 .ndo_set_rx_mode = mvpp2_set_rx_mode,
6350 .ndo_set_mac_address = mvpp2_set_mac_address,
6351 .ndo_change_mtu = mvpp2_change_mtu,
6352 .ndo_get_stats64 = mvpp2_get_stats64,
6353 .ndo_do_ioctl = mvpp2_ioctl,
6356 static const struct ethtool_ops mvpp2_eth_tool_ops = {
6357 .nway_reset = phy_ethtool_nway_reset,
6358 .get_link = ethtool_op_get_link,
6359 .set_coalesce = mvpp2_ethtool_set_coalesce,
6360 .get_coalesce = mvpp2_ethtool_get_coalesce,
6361 .get_drvinfo = mvpp2_ethtool_get_drvinfo,
6362 .get_ringparam = mvpp2_ethtool_get_ringparam,
6363 .set_ringparam = mvpp2_ethtool_set_ringparam,
6364 .get_link_ksettings = phy_ethtool_get_link_ksettings,
6365 .set_link_ksettings = phy_ethtool_set_link_ksettings,
6368 /* Initialize port HW */
6369 static int mvpp2_port_init(struct mvpp2_port *port)
6371 struct device *dev = port->dev->dev.parent;
6372 struct mvpp2 *priv = port->priv;
6373 struct mvpp2_txq_pcpu *txq_pcpu;
6374 int queue, cpu, err;
6376 if (port->first_rxq + rxq_number >
6377 MVPP2_MAX_PORTS * priv->max_port_rxqs)
6381 mvpp2_egress_disable(port);
6382 mvpp2_port_disable(port);
6384 port->txqs = devm_kcalloc(dev, txq_number, sizeof(*port->txqs),
6389 /* Associate physical Tx queues to this port and initialize.
6390 * The mapping is predefined.
6392 for (queue = 0; queue < txq_number; queue++) {
6393 int queue_phy_id = mvpp2_txq_phys(port->id, queue);
6394 struct mvpp2_tx_queue *txq;
6396 txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL);
6399 goto err_free_percpu;
6402 txq->pcpu = alloc_percpu(struct mvpp2_txq_pcpu);
6405 goto err_free_percpu;
6408 txq->id = queue_phy_id;
6409 txq->log_id = queue;
6410 txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH;
6411 for_each_present_cpu(cpu) {
6412 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
6413 txq_pcpu->cpu = cpu;
6416 port->txqs[queue] = txq;
6419 port->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*port->rxqs),
6423 goto err_free_percpu;
6426 /* Allocate and initialize Rx queue for this port */
6427 for (queue = 0; queue < rxq_number; queue++) {
6428 struct mvpp2_rx_queue *rxq;
6430 /* Map physical Rx queue to port's logical Rx queue */
6431 rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL);
6434 goto err_free_percpu;
6436 /* Map this Rx queue to a physical queue */
6437 rxq->id = port->first_rxq + queue;
6438 rxq->port = port->id;
6439 rxq->logic_rxq = queue;
6441 port->rxqs[queue] = rxq;
6444 /* Configure Rx queue group interrupt for this port */
6445 if (priv->hw_version == MVPP21) {
6446 mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(port->id),
6451 val = (port->id << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET);
6452 mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val);
6454 val = (rxq_number << MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET);
6455 mvpp2_write(priv, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val);
6458 /* Create Rx descriptor rings */
6459 for (queue = 0; queue < rxq_number; queue++) {
6460 struct mvpp2_rx_queue *rxq = port->rxqs[queue];
6462 rxq->size = port->rx_ring_size;
6463 rxq->pkts_coal = MVPP2_RX_COAL_PKTS;
6464 rxq->time_coal = MVPP2_RX_COAL_USEC;
6467 mvpp2_ingress_disable(port);
6469 /* Port default configuration */
6470 mvpp2_defaults_set(port);
6472 /* Port's classifier configuration */
6473 mvpp2_cls_oversize_rxq_set(port);
6474 mvpp2_cls_port_config(port);
6476 /* Provide an initial Rx packet size */
6477 port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu);
6479 /* Initialize pools for swf */
6480 err = mvpp2_swf_bm_pool_init(port);
6482 goto err_free_percpu;
6487 for (queue = 0; queue < txq_number; queue++) {
6488 if (!port->txqs[queue])
6490 free_percpu(port->txqs[queue]->pcpu);
6495 /* Ports initialization */
6496 static int mvpp2_port_probe(struct platform_device *pdev,
6497 struct device_node *port_node,
6500 struct device_node *phy_node;
6501 struct mvpp2_port *port;
6502 struct mvpp2_port_pcpu *port_pcpu;
6503 struct net_device *dev;
6504 struct resource *res;
6505 const char *dt_mac_addr;
6506 const char *mac_from;
6507 char hw_mac_addr[ETH_ALEN] = {0};
6513 dev = alloc_etherdev_mqs(sizeof(*port), txq_number, rxq_number);
6517 phy_node = of_parse_phandle(port_node, "phy", 0);
6519 dev_err(&pdev->dev, "missing phy\n");
6521 goto err_free_netdev;
6524 phy_mode = of_get_phy_mode(port_node);
6526 dev_err(&pdev->dev, "incorrect phy mode\n");
6528 goto err_free_netdev;
6531 if (of_property_read_u32(port_node, "port-id", &id)) {
6533 dev_err(&pdev->dev, "missing port-id value\n");
6534 goto err_free_netdev;
6537 dev->tx_queue_len = MVPP2_MAX_TXD;
6538 dev->watchdog_timeo = 5 * HZ;
6539 dev->netdev_ops = &mvpp2_netdev_ops;
6540 dev->ethtool_ops = &mvpp2_eth_tool_ops;
6542 port = netdev_priv(dev);
6544 port->irq = irq_of_parse_and_map(port_node, 0);
6545 if (port->irq <= 0) {
6547 goto err_free_netdev;
6550 if (of_property_read_bool(port_node, "marvell,loopback"))
6551 port->flags |= MVPP2_F_LOOPBACK;
6555 if (priv->hw_version == MVPP21)
6556 port->first_rxq = port->id * rxq_number;
6558 port->first_rxq = port->id * priv->max_port_rxqs;
6560 port->phy_node = phy_node;
6561 port->phy_interface = phy_mode;
6563 if (priv->hw_version == MVPP21) {
6564 res = platform_get_resource(pdev, IORESOURCE_MEM, 2 + id);
6565 port->base = devm_ioremap_resource(&pdev->dev, res);
6566 if (IS_ERR(port->base)) {
6567 err = PTR_ERR(port->base);
6571 if (of_property_read_u32(port_node, "gop-port-id",
6574 dev_err(&pdev->dev, "missing gop-port-id value\n");
6578 port->base = priv->iface_base + MVPP22_GMAC_BASE(port->gop_id);
6581 /* Alloc per-cpu stats */
6582 port->stats = netdev_alloc_pcpu_stats(struct mvpp2_pcpu_stats);
6588 dt_mac_addr = of_get_mac_address(port_node);
6589 if (dt_mac_addr && is_valid_ether_addr(dt_mac_addr)) {
6590 mac_from = "device tree";
6591 ether_addr_copy(dev->dev_addr, dt_mac_addr);
6593 if (priv->hw_version == MVPP21)
6594 mvpp21_get_mac_address(port, hw_mac_addr);
6595 if (is_valid_ether_addr(hw_mac_addr)) {
6596 mac_from = "hardware";
6597 ether_addr_copy(dev->dev_addr, hw_mac_addr);
6599 mac_from = "random";
6600 eth_hw_addr_random(dev);
6604 port->tx_ring_size = MVPP2_MAX_TXD;
6605 port->rx_ring_size = MVPP2_MAX_RXD;
6607 SET_NETDEV_DEV(dev, &pdev->dev);
6609 err = mvpp2_port_init(port);
6611 dev_err(&pdev->dev, "failed to init port %d\n", id);
6612 goto err_free_stats;
6615 mvpp2_port_mii_set(port);
6616 mvpp2_port_periodic_xon_disable(port);
6618 if (priv->hw_version == MVPP21)
6619 mvpp2_port_fc_adv_enable(port);
6621 mvpp2_port_reset(port);
6623 port->pcpu = alloc_percpu(struct mvpp2_port_pcpu);
6626 goto err_free_txq_pcpu;
6629 for_each_present_cpu(cpu) {
6630 port_pcpu = per_cpu_ptr(port->pcpu, cpu);
6632 hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
6633 HRTIMER_MODE_REL_PINNED);
6634 port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb;
6635 port_pcpu->timer_scheduled = false;
6637 tasklet_init(&port_pcpu->tx_done_tasklet, mvpp2_tx_proc_cb,
6638 (unsigned long)dev);
6641 netif_napi_add(dev, &port->napi, mvpp2_poll, NAPI_POLL_WEIGHT);
6642 features = NETIF_F_SG | NETIF_F_IP_CSUM;
6643 dev->features = features | NETIF_F_RXCSUM;
6644 dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO;
6645 dev->vlan_features |= features;
6647 /* MTU range: 68 - 9676 */
6648 dev->min_mtu = ETH_MIN_MTU;
6649 /* 9676 == 9700 - 20 and rounding to 8 */
6650 dev->max_mtu = 9676;
6652 err = register_netdev(dev);
6654 dev_err(&pdev->dev, "failed to register netdev\n");
6655 goto err_free_port_pcpu;
6657 netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr);
6659 priv->port_list[id] = port;
6663 free_percpu(port->pcpu);
6665 for (i = 0; i < txq_number; i++)
6666 free_percpu(port->txqs[i]->pcpu);
6668 free_percpu(port->stats);
6670 irq_dispose_mapping(port->irq);
6672 of_node_put(phy_node);
6677 /* Ports removal routine */
6678 static void mvpp2_port_remove(struct mvpp2_port *port)
6682 unregister_netdev(port->dev);
6683 of_node_put(port->phy_node);
6684 free_percpu(port->pcpu);
6685 free_percpu(port->stats);
6686 for (i = 0; i < txq_number; i++)
6687 free_percpu(port->txqs[i]->pcpu);
6688 irq_dispose_mapping(port->irq);
6689 free_netdev(port->dev);
6692 /* Initialize decoding windows */
6693 static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram,
6699 for (i = 0; i < 6; i++) {
6700 mvpp2_write(priv, MVPP2_WIN_BASE(i), 0);
6701 mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0);
6704 mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0);
6709 for (i = 0; i < dram->num_cs; i++) {
6710 const struct mbus_dram_window *cs = dram->cs + i;
6712 mvpp2_write(priv, MVPP2_WIN_BASE(i),
6713 (cs->base & 0xffff0000) | (cs->mbus_attr << 8) |
6714 dram->mbus_dram_target_id);
6716 mvpp2_write(priv, MVPP2_WIN_SIZE(i),
6717 (cs->size - 1) & 0xffff0000);
6719 win_enable |= (1 << i);
6722 mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable);
6725 /* Initialize Rx FIFO's */
6726 static void mvpp2_rx_fifo_init(struct mvpp2 *priv)
6730 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
6731 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
6732 MVPP2_RX_FIFO_PORT_DATA_SIZE);
6733 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
6734 MVPP2_RX_FIFO_PORT_ATTR_SIZE);
6737 mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
6738 MVPP2_RX_FIFO_PORT_MIN_PKT);
6739 mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
6742 static void mvpp2_axi_init(struct mvpp2 *priv)
6744 u32 val, rdval, wrval;
6746 mvpp2_write(priv, MVPP22_BM_ADDR_HIGH_RLS_REG, 0x0);
6748 /* AXI Bridge Configuration */
6750 rdval = MVPP22_AXI_CODE_CACHE_RD_CACHE
6751 << MVPP22_AXI_ATTR_CACHE_OFFS;
6752 rdval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
6753 << MVPP22_AXI_ATTR_DOMAIN_OFFS;
6755 wrval = MVPP22_AXI_CODE_CACHE_WR_CACHE
6756 << MVPP22_AXI_ATTR_CACHE_OFFS;
6757 wrval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
6758 << MVPP22_AXI_ATTR_DOMAIN_OFFS;
6761 mvpp2_write(priv, MVPP22_AXI_BM_WR_ATTR_REG, wrval);
6762 mvpp2_write(priv, MVPP22_AXI_BM_RD_ATTR_REG, rdval);
6765 mvpp2_write(priv, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG, rdval);
6766 mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG, wrval);
6767 mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG, rdval);
6768 mvpp2_write(priv, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, wrval);
6771 mvpp2_write(priv, MVPP22_AXI_TX_DATA_RD_ATTR_REG, rdval);
6772 mvpp2_write(priv, MVPP22_AXI_RX_DATA_WR_ATTR_REG, wrval);
6774 val = MVPP22_AXI_CODE_CACHE_NON_CACHE
6775 << MVPP22_AXI_CODE_CACHE_OFFS;
6776 val |= MVPP22_AXI_CODE_DOMAIN_SYSTEM
6777 << MVPP22_AXI_CODE_DOMAIN_OFFS;
6778 mvpp2_write(priv, MVPP22_AXI_RD_NORMAL_CODE_REG, val);
6779 mvpp2_write(priv, MVPP22_AXI_WR_NORMAL_CODE_REG, val);
6781 val = MVPP22_AXI_CODE_CACHE_RD_CACHE
6782 << MVPP22_AXI_CODE_CACHE_OFFS;
6783 val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
6784 << MVPP22_AXI_CODE_DOMAIN_OFFS;
6786 mvpp2_write(priv, MVPP22_AXI_RD_SNOOP_CODE_REG, val);
6788 val = MVPP22_AXI_CODE_CACHE_WR_CACHE
6789 << MVPP22_AXI_CODE_CACHE_OFFS;
6790 val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
6791 << MVPP22_AXI_CODE_DOMAIN_OFFS;
6793 mvpp2_write(priv, MVPP22_AXI_WR_SNOOP_CODE_REG, val);
6796 /* Initialize network controller common part HW */
6797 static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
6799 const struct mbus_dram_target_info *dram_target_info;
6803 /* Checks for hardware constraints */
6804 if (rxq_number % 4 || (rxq_number > priv->max_port_rxqs) ||
6805 (txq_number > MVPP2_MAX_TXQ)) {
6806 dev_err(&pdev->dev, "invalid queue size parameter\n");
6810 /* MBUS windows configuration */
6811 dram_target_info = mv_mbus_dram_info();
6812 if (dram_target_info)
6813 mvpp2_conf_mbus_windows(dram_target_info, priv);
6815 if (priv->hw_version == MVPP22)
6816 mvpp2_axi_init(priv);
6818 /* Disable HW PHY polling */
6819 if (priv->hw_version == MVPP21) {
6820 val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
6821 val |= MVPP2_PHY_AN_STOP_SMI0_MASK;
6822 writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
6824 val = readl(priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
6825 val &= ~MVPP22_SMI_POLLING_EN;
6826 writel(val, priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
6829 /* Allocate and initialize aggregated TXQs */
6830 priv->aggr_txqs = devm_kcalloc(&pdev->dev, num_present_cpus(),
6831 sizeof(*priv->aggr_txqs),
6833 if (!priv->aggr_txqs)
6836 for_each_present_cpu(i) {
6837 priv->aggr_txqs[i].id = i;
6838 priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE;
6839 err = mvpp2_aggr_txq_init(pdev, &priv->aggr_txqs[i],
6840 MVPP2_AGGR_TXQ_SIZE, i, priv);
6846 mvpp2_rx_fifo_init(priv);
6848 /* Reset Rx queue group interrupt configuration */
6849 for (i = 0; i < MVPP2_MAX_PORTS; i++) {
6850 if (priv->hw_version == MVPP21) {
6851 mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(i),
6857 val = (i << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET);
6858 mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val);
6860 val = (rxq_number << MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET);
6861 mvpp2_write(priv, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val);
6865 if (priv->hw_version == MVPP21)
6866 writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT,
6867 priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG);
6869 /* Allow cache snoop when transmiting packets */
6870 mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1);
6872 /* Buffer Manager initialization */
6873 err = mvpp2_bm_init(pdev, priv);
6877 /* Parser default initialization */
6878 err = mvpp2_prs_default_init(pdev, priv);
6882 /* Classifier default initialization */
6883 mvpp2_cls_init(priv);
6888 static int mvpp2_probe(struct platform_device *pdev)
6890 struct device_node *dn = pdev->dev.of_node;
6891 struct device_node *port_node;
6893 struct resource *res;
6895 int port_count, cpu;
6898 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
6903 (unsigned long)of_device_get_match_data(&pdev->dev);
6905 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
6906 base = devm_ioremap_resource(&pdev->dev, res);
6908 return PTR_ERR(base);
6910 if (priv->hw_version == MVPP21) {
6911 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
6912 priv->lms_base = devm_ioremap_resource(&pdev->dev, res);
6913 if (IS_ERR(priv->lms_base))
6914 return PTR_ERR(priv->lms_base);
6916 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
6917 priv->iface_base = devm_ioremap_resource(&pdev->dev, res);
6918 if (IS_ERR(priv->iface_base))
6919 return PTR_ERR(priv->iface_base);
6922 for_each_present_cpu(cpu) {
6925 addr_space_sz = (priv->hw_version == MVPP21 ?
6926 MVPP21_ADDR_SPACE_SZ : MVPP22_ADDR_SPACE_SZ);
6927 priv->cpu_base[cpu] = base + cpu * addr_space_sz;
6930 if (priv->hw_version == MVPP21)
6931 priv->max_port_rxqs = 8;
6933 priv->max_port_rxqs = 32;
6935 priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk");
6936 if (IS_ERR(priv->pp_clk))
6937 return PTR_ERR(priv->pp_clk);
6938 err = clk_prepare_enable(priv->pp_clk);
6942 priv->gop_clk = devm_clk_get(&pdev->dev, "gop_clk");
6943 if (IS_ERR(priv->gop_clk)) {
6944 err = PTR_ERR(priv->gop_clk);
6947 err = clk_prepare_enable(priv->gop_clk);
6951 if (priv->hw_version == MVPP22) {
6952 priv->mg_clk = devm_clk_get(&pdev->dev, "mg_clk");
6953 if (IS_ERR(priv->mg_clk)) {
6954 err = PTR_ERR(priv->mg_clk);
6958 err = clk_prepare_enable(priv->mg_clk);
6963 /* Get system's tclk rate */
6964 priv->tclk = clk_get_rate(priv->pp_clk);
6966 if (priv->hw_version == MVPP22) {
6967 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(40));
6970 /* Sadly, the BM pools all share the same register to
6971 * store the high 32 bits of their address. So they
6972 * must all have the same high 32 bits, which forces
6973 * us to restrict coherent memory to DMA_BIT_MASK(32).
6975 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
6980 /* Initialize network controller */
6981 err = mvpp2_init(pdev, priv);
6983 dev_err(&pdev->dev, "failed to initialize controller\n");
6987 port_count = of_get_available_child_count(dn);
6988 if (port_count == 0) {
6989 dev_err(&pdev->dev, "no ports enabled\n");
6994 priv->port_list = devm_kcalloc(&pdev->dev, port_count,
6995 sizeof(*priv->port_list),
6997 if (!priv->port_list) {
7002 /* Initialize ports */
7003 for_each_available_child_of_node(dn, port_node) {
7004 err = mvpp2_port_probe(pdev, port_node, priv);
7009 platform_set_drvdata(pdev, priv);
7013 if (priv->hw_version == MVPP22)
7014 clk_disable_unprepare(priv->mg_clk);
7016 clk_disable_unprepare(priv->gop_clk);
7018 clk_disable_unprepare(priv->pp_clk);
7022 static int mvpp2_remove(struct platform_device *pdev)
7024 struct mvpp2 *priv = platform_get_drvdata(pdev);
7025 struct device_node *dn = pdev->dev.of_node;
7026 struct device_node *port_node;
7029 for_each_available_child_of_node(dn, port_node) {
7030 if (priv->port_list[i])
7031 mvpp2_port_remove(priv->port_list[i]);
7035 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
7036 struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i];
7038 mvpp2_bm_pool_destroy(pdev, priv, bm_pool);
7041 for_each_present_cpu(i) {
7042 struct mvpp2_tx_queue *aggr_txq = &priv->aggr_txqs[i];
7044 dma_free_coherent(&pdev->dev,
7045 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
7047 aggr_txq->descs_dma);
7050 clk_disable_unprepare(priv->mg_clk);
7051 clk_disable_unprepare(priv->pp_clk);
7052 clk_disable_unprepare(priv->gop_clk);
7057 static const struct of_device_id mvpp2_match[] = {
7059 .compatible = "marvell,armada-375-pp2",
7060 .data = (void *)MVPP21,
7063 .compatible = "marvell,armada-7k-pp22",
7064 .data = (void *)MVPP22,
7068 MODULE_DEVICE_TABLE(of, mvpp2_match);
7070 static struct platform_driver mvpp2_driver = {
7071 .probe = mvpp2_probe,
7072 .remove = mvpp2_remove,
7074 .name = MVPP2_DRIVER_NAME,
7075 .of_match_table = mvpp2_match,
7079 module_platform_driver(mvpp2_driver);
7081 MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com");
7082 MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>");
7083 MODULE_LICENSE("GPL v2");