2 * Driver for Marvell PPv2 network controller for Armada 375 SoC.
4 * Copyright (C) 2014 Marvell
6 * Marcin Wojtas <mw@semihalf.com>
8 * This file is licensed under the terms of the GNU General Public
9 * License version 2. This program is licensed "as is" without any
10 * warranty of any kind, whether express or implied.
13 #include <linux/kernel.h>
14 #include <linux/netdevice.h>
15 #include <linux/etherdevice.h>
16 #include <linux/platform_device.h>
17 #include <linux/skbuff.h>
18 #include <linux/inetdevice.h>
19 #include <linux/mbus.h>
20 #include <linux/module.h>
21 #include <linux/mfd/syscon.h>
22 #include <linux/interrupt.h>
23 #include <linux/cpumask.h>
25 #include <linux/of_irq.h>
26 #include <linux/of_mdio.h>
27 #include <linux/of_net.h>
28 #include <linux/of_address.h>
29 #include <linux/of_device.h>
30 #include <linux/phy.h>
31 #include <linux/phy/phy.h>
32 #include <linux/clk.h>
33 #include <linux/hrtimer.h>
34 #include <linux/ktime.h>
35 #include <linux/regmap.h>
36 #include <uapi/linux/ppp_defs.h>
41 /* RX Fifo Registers */
42 #define MVPP2_RX_DATA_FIFO_SIZE_REG(port) (0x00 + 4 * (port))
43 #define MVPP2_RX_ATTR_FIFO_SIZE_REG(port) (0x20 + 4 * (port))
44 #define MVPP2_RX_MIN_PKT_SIZE_REG 0x60
45 #define MVPP2_RX_FIFO_INIT_REG 0x64
47 /* RX DMA Top Registers */
48 #define MVPP2_RX_CTRL_REG(port) (0x140 + 4 * (port))
49 #define MVPP2_RX_LOW_LATENCY_PKT_SIZE(s) (((s) & 0xfff) << 16)
50 #define MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK BIT(31)
51 #define MVPP2_POOL_BUF_SIZE_REG(pool) (0x180 + 4 * (pool))
52 #define MVPP2_POOL_BUF_SIZE_OFFSET 5
53 #define MVPP2_RXQ_CONFIG_REG(rxq) (0x800 + 4 * (rxq))
54 #define MVPP2_SNOOP_PKT_SIZE_MASK 0x1ff
55 #define MVPP2_SNOOP_BUF_HDR_MASK BIT(9)
56 #define MVPP2_RXQ_POOL_SHORT_OFFS 20
57 #define MVPP21_RXQ_POOL_SHORT_MASK 0x700000
58 #define MVPP22_RXQ_POOL_SHORT_MASK 0xf00000
59 #define MVPP2_RXQ_POOL_LONG_OFFS 24
60 #define MVPP21_RXQ_POOL_LONG_MASK 0x7000000
61 #define MVPP22_RXQ_POOL_LONG_MASK 0xf000000
62 #define MVPP2_RXQ_PACKET_OFFSET_OFFS 28
63 #define MVPP2_RXQ_PACKET_OFFSET_MASK 0x70000000
64 #define MVPP2_RXQ_DISABLE_MASK BIT(31)
66 /* Parser Registers */
67 #define MVPP2_PRS_INIT_LOOKUP_REG 0x1000
68 #define MVPP2_PRS_PORT_LU_MAX 0xf
69 #define MVPP2_PRS_PORT_LU_MASK(port) (0xff << ((port) * 4))
70 #define MVPP2_PRS_PORT_LU_VAL(port, val) ((val) << ((port) * 4))
71 #define MVPP2_PRS_INIT_OFFS_REG(port) (0x1004 + ((port) & 4))
72 #define MVPP2_PRS_INIT_OFF_MASK(port) (0x3f << (((port) % 4) * 8))
73 #define MVPP2_PRS_INIT_OFF_VAL(port, val) ((val) << (((port) % 4) * 8))
74 #define MVPP2_PRS_MAX_LOOP_REG(port) (0x100c + ((port) & 4))
75 #define MVPP2_PRS_MAX_LOOP_MASK(port) (0xff << (((port) % 4) * 8))
76 #define MVPP2_PRS_MAX_LOOP_VAL(port, val) ((val) << (((port) % 4) * 8))
77 #define MVPP2_PRS_TCAM_IDX_REG 0x1100
78 #define MVPP2_PRS_TCAM_DATA_REG(idx) (0x1104 + (idx) * 4)
79 #define MVPP2_PRS_TCAM_INV_MASK BIT(31)
80 #define MVPP2_PRS_SRAM_IDX_REG 0x1200
81 #define MVPP2_PRS_SRAM_DATA_REG(idx) (0x1204 + (idx) * 4)
82 #define MVPP2_PRS_TCAM_CTRL_REG 0x1230
83 #define MVPP2_PRS_TCAM_EN_MASK BIT(0)
85 /* Classifier Registers */
86 #define MVPP2_CLS_MODE_REG 0x1800
87 #define MVPP2_CLS_MODE_ACTIVE_MASK BIT(0)
88 #define MVPP2_CLS_PORT_WAY_REG 0x1810
89 #define MVPP2_CLS_PORT_WAY_MASK(port) (1 << (port))
90 #define MVPP2_CLS_LKP_INDEX_REG 0x1814
91 #define MVPP2_CLS_LKP_INDEX_WAY_OFFS 6
92 #define MVPP2_CLS_LKP_TBL_REG 0x1818
93 #define MVPP2_CLS_LKP_TBL_RXQ_MASK 0xff
94 #define MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK BIT(25)
95 #define MVPP2_CLS_FLOW_INDEX_REG 0x1820
96 #define MVPP2_CLS_FLOW_TBL0_REG 0x1824
97 #define MVPP2_CLS_FLOW_TBL1_REG 0x1828
98 #define MVPP2_CLS_FLOW_TBL2_REG 0x182c
99 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port) (0x1980 + ((port) * 4))
100 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS 3
101 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK 0x7
102 #define MVPP2_CLS_SWFWD_P2HQ_REG(port) (0x19b0 + ((port) * 4))
103 #define MVPP2_CLS_SWFWD_PCTRL_REG 0x19d0
104 #define MVPP2_CLS_SWFWD_PCTRL_MASK(port) (1 << (port))
106 /* Descriptor Manager Top Registers */
107 #define MVPP2_RXQ_NUM_REG 0x2040
108 #define MVPP2_RXQ_DESC_ADDR_REG 0x2044
109 #define MVPP22_DESC_ADDR_OFFS 8
110 #define MVPP2_RXQ_DESC_SIZE_REG 0x2048
111 #define MVPP2_RXQ_DESC_SIZE_MASK 0x3ff0
112 #define MVPP2_RXQ_STATUS_UPDATE_REG(rxq) (0x3000 + 4 * (rxq))
113 #define MVPP2_RXQ_NUM_PROCESSED_OFFSET 0
114 #define MVPP2_RXQ_NUM_NEW_OFFSET 16
115 #define MVPP2_RXQ_STATUS_REG(rxq) (0x3400 + 4 * (rxq))
116 #define MVPP2_RXQ_OCCUPIED_MASK 0x3fff
117 #define MVPP2_RXQ_NON_OCCUPIED_OFFSET 16
118 #define MVPP2_RXQ_NON_OCCUPIED_MASK 0x3fff0000
119 #define MVPP2_RXQ_THRESH_REG 0x204c
120 #define MVPP2_OCCUPIED_THRESH_OFFSET 0
121 #define MVPP2_OCCUPIED_THRESH_MASK 0x3fff
122 #define MVPP2_RXQ_INDEX_REG 0x2050
123 #define MVPP2_TXQ_NUM_REG 0x2080
124 #define MVPP2_TXQ_DESC_ADDR_REG 0x2084
125 #define MVPP2_TXQ_DESC_SIZE_REG 0x2088
126 #define MVPP2_TXQ_DESC_SIZE_MASK 0x3ff0
127 #define MVPP2_TXQ_THRESH_REG 0x2094
128 #define MVPP2_TXQ_THRESH_OFFSET 16
129 #define MVPP2_TXQ_THRESH_MASK 0x3fff
130 #define MVPP2_AGGR_TXQ_UPDATE_REG 0x2090
131 #define MVPP2_TXQ_INDEX_REG 0x2098
132 #define MVPP2_TXQ_PREF_BUF_REG 0x209c
133 #define MVPP2_PREF_BUF_PTR(desc) ((desc) & 0xfff)
134 #define MVPP2_PREF_BUF_SIZE_4 (BIT(12) | BIT(13))
135 #define MVPP2_PREF_BUF_SIZE_16 (BIT(12) | BIT(14))
136 #define MVPP2_PREF_BUF_THRESH(val) ((val) << 17)
137 #define MVPP2_TXQ_DRAIN_EN_MASK BIT(31)
138 #define MVPP2_TXQ_PENDING_REG 0x20a0
139 #define MVPP2_TXQ_PENDING_MASK 0x3fff
140 #define MVPP2_TXQ_INT_STATUS_REG 0x20a4
141 #define MVPP2_TXQ_SENT_REG(txq) (0x3c00 + 4 * (txq))
142 #define MVPP2_TRANSMITTED_COUNT_OFFSET 16
143 #define MVPP2_TRANSMITTED_COUNT_MASK 0x3fff0000
144 #define MVPP2_TXQ_RSVD_REQ_REG 0x20b0
145 #define MVPP2_TXQ_RSVD_REQ_Q_OFFSET 16
146 #define MVPP2_TXQ_RSVD_RSLT_REG 0x20b4
147 #define MVPP2_TXQ_RSVD_RSLT_MASK 0x3fff
148 #define MVPP2_TXQ_RSVD_CLR_REG 0x20b8
149 #define MVPP2_TXQ_RSVD_CLR_OFFSET 16
150 #define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu) (0x2100 + 4 * (cpu))
151 #define MVPP22_AGGR_TXQ_DESC_ADDR_OFFS 8
152 #define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu) (0x2140 + 4 * (cpu))
153 #define MVPP2_AGGR_TXQ_DESC_SIZE_MASK 0x3ff0
154 #define MVPP2_AGGR_TXQ_STATUS_REG(cpu) (0x2180 + 4 * (cpu))
155 #define MVPP2_AGGR_TXQ_PENDING_MASK 0x3fff
156 #define MVPP2_AGGR_TXQ_INDEX_REG(cpu) (0x21c0 + 4 * (cpu))
158 /* MBUS bridge registers */
159 #define MVPP2_WIN_BASE(w) (0x4000 + ((w) << 2))
160 #define MVPP2_WIN_SIZE(w) (0x4020 + ((w) << 2))
161 #define MVPP2_WIN_REMAP(w) (0x4040 + ((w) << 2))
162 #define MVPP2_BASE_ADDR_ENABLE 0x4060
164 /* AXI Bridge Registers */
165 #define MVPP22_AXI_BM_WR_ATTR_REG 0x4100
166 #define MVPP22_AXI_BM_RD_ATTR_REG 0x4104
167 #define MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG 0x4110
168 #define MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG 0x4114
169 #define MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG 0x4118
170 #define MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG 0x411c
171 #define MVPP22_AXI_RX_DATA_WR_ATTR_REG 0x4120
172 #define MVPP22_AXI_TX_DATA_RD_ATTR_REG 0x4130
173 #define MVPP22_AXI_RD_NORMAL_CODE_REG 0x4150
174 #define MVPP22_AXI_RD_SNOOP_CODE_REG 0x4154
175 #define MVPP22_AXI_WR_NORMAL_CODE_REG 0x4160
176 #define MVPP22_AXI_WR_SNOOP_CODE_REG 0x4164
178 /* Values for AXI Bridge registers */
179 #define MVPP22_AXI_ATTR_CACHE_OFFS 0
180 #define MVPP22_AXI_ATTR_DOMAIN_OFFS 12
182 #define MVPP22_AXI_CODE_CACHE_OFFS 0
183 #define MVPP22_AXI_CODE_DOMAIN_OFFS 4
185 #define MVPP22_AXI_CODE_CACHE_NON_CACHE 0x3
186 #define MVPP22_AXI_CODE_CACHE_WR_CACHE 0x7
187 #define MVPP22_AXI_CODE_CACHE_RD_CACHE 0xb
189 #define MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 2
190 #define MVPP22_AXI_CODE_DOMAIN_SYSTEM 3
192 /* Interrupt Cause and Mask registers */
193 #define MVPP2_ISR_TX_THRESHOLD_REG(port) (0x5140 + 4 * (port))
194 #define MVPP2_MAX_ISR_TX_THRESHOLD 0xfffff0
196 #define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq))
197 #define MVPP2_MAX_ISR_RX_THRESHOLD 0xfffff0
198 #define MVPP21_ISR_RXQ_GROUP_REG(port) (0x5400 + 4 * (port))
200 #define MVPP22_ISR_RXQ_GROUP_INDEX_REG 0x5400
201 #define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf
202 #define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380
203 #define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET 7
205 #define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf
206 #define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380
208 #define MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG 0x5404
209 #define MVPP22_ISR_RXQ_SUB_GROUP_STARTQ_MASK 0x1f
210 #define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_MASK 0xf00
211 #define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET 8
213 #define MVPP2_ISR_ENABLE_REG(port) (0x5420 + 4 * (port))
214 #define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff)
215 #define MVPP2_ISR_DISABLE_INTERRUPT(mask) (((mask) << 16) & 0xffff0000)
216 #define MVPP2_ISR_RX_TX_CAUSE_REG(port) (0x5480 + 4 * (port))
217 #define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
218 #define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK 0xff0000
219 #define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET 16
220 #define MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK BIT(24)
221 #define MVPP2_CAUSE_FCS_ERR_MASK BIT(25)
222 #define MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK BIT(26)
223 #define MVPP2_CAUSE_TX_EXCEPTION_SUM_MASK BIT(29)
224 #define MVPP2_CAUSE_RX_EXCEPTION_SUM_MASK BIT(30)
225 #define MVPP2_CAUSE_MISC_SUM_MASK BIT(31)
226 #define MVPP2_ISR_RX_TX_MASK_REG(port) (0x54a0 + 4 * (port))
227 #define MVPP2_ISR_PON_RX_TX_MASK_REG 0x54bc
228 #define MVPP2_PON_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
229 #define MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK 0x3fc00000
230 #define MVPP2_PON_CAUSE_MISC_SUM_MASK BIT(31)
231 #define MVPP2_ISR_MISC_CAUSE_REG 0x55b0
233 /* Buffer Manager registers */
234 #define MVPP2_BM_POOL_BASE_REG(pool) (0x6000 + ((pool) * 4))
235 #define MVPP2_BM_POOL_BASE_ADDR_MASK 0xfffff80
236 #define MVPP2_BM_POOL_SIZE_REG(pool) (0x6040 + ((pool) * 4))
237 #define MVPP2_BM_POOL_SIZE_MASK 0xfff0
238 #define MVPP2_BM_POOL_READ_PTR_REG(pool) (0x6080 + ((pool) * 4))
239 #define MVPP2_BM_POOL_GET_READ_PTR_MASK 0xfff0
240 #define MVPP2_BM_POOL_PTRS_NUM_REG(pool) (0x60c0 + ((pool) * 4))
241 #define MVPP2_BM_POOL_PTRS_NUM_MASK 0xfff0
242 #define MVPP2_BM_BPPI_READ_PTR_REG(pool) (0x6100 + ((pool) * 4))
243 #define MVPP2_BM_BPPI_PTRS_NUM_REG(pool) (0x6140 + ((pool) * 4))
244 #define MVPP2_BM_BPPI_PTR_NUM_MASK 0x7ff
245 #define MVPP2_BM_BPPI_PREFETCH_FULL_MASK BIT(16)
246 #define MVPP2_BM_POOL_CTRL_REG(pool) (0x6200 + ((pool) * 4))
247 #define MVPP2_BM_START_MASK BIT(0)
248 #define MVPP2_BM_STOP_MASK BIT(1)
249 #define MVPP2_BM_STATE_MASK BIT(4)
250 #define MVPP2_BM_LOW_THRESH_OFFS 8
251 #define MVPP2_BM_LOW_THRESH_MASK 0x7f00
252 #define MVPP2_BM_LOW_THRESH_VALUE(val) ((val) << \
253 MVPP2_BM_LOW_THRESH_OFFS)
254 #define MVPP2_BM_HIGH_THRESH_OFFS 16
255 #define MVPP2_BM_HIGH_THRESH_MASK 0x7f0000
256 #define MVPP2_BM_HIGH_THRESH_VALUE(val) ((val) << \
257 MVPP2_BM_HIGH_THRESH_OFFS)
258 #define MVPP2_BM_INTR_CAUSE_REG(pool) (0x6240 + ((pool) * 4))
259 #define MVPP2_BM_RELEASED_DELAY_MASK BIT(0)
260 #define MVPP2_BM_ALLOC_FAILED_MASK BIT(1)
261 #define MVPP2_BM_BPPE_EMPTY_MASK BIT(2)
262 #define MVPP2_BM_BPPE_FULL_MASK BIT(3)
263 #define MVPP2_BM_AVAILABLE_BP_LOW_MASK BIT(4)
264 #define MVPP2_BM_INTR_MASK_REG(pool) (0x6280 + ((pool) * 4))
265 #define MVPP2_BM_PHY_ALLOC_REG(pool) (0x6400 + ((pool) * 4))
266 #define MVPP2_BM_PHY_ALLOC_GRNTD_MASK BIT(0)
267 #define MVPP2_BM_VIRT_ALLOC_REG 0x6440
268 #define MVPP22_BM_ADDR_HIGH_ALLOC 0x6444
269 #define MVPP22_BM_ADDR_HIGH_PHYS_MASK 0xff
270 #define MVPP22_BM_ADDR_HIGH_VIRT_MASK 0xff00
271 #define MVPP22_BM_ADDR_HIGH_VIRT_SHIFT 8
272 #define MVPP2_BM_PHY_RLS_REG(pool) (0x6480 + ((pool) * 4))
273 #define MVPP2_BM_PHY_RLS_MC_BUFF_MASK BIT(0)
274 #define MVPP2_BM_PHY_RLS_PRIO_EN_MASK BIT(1)
275 #define MVPP2_BM_PHY_RLS_GRNTD_MASK BIT(2)
276 #define MVPP2_BM_VIRT_RLS_REG 0x64c0
277 #define MVPP22_BM_ADDR_HIGH_RLS_REG 0x64c4
278 #define MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK 0xff
279 #define MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK 0xff00
280 #define MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT 8
282 /* TX Scheduler registers */
283 #define MVPP2_TXP_SCHED_PORT_INDEX_REG 0x8000
284 #define MVPP2_TXP_SCHED_Q_CMD_REG 0x8004
285 #define MVPP2_TXP_SCHED_ENQ_MASK 0xff
286 #define MVPP2_TXP_SCHED_DISQ_OFFSET 8
287 #define MVPP2_TXP_SCHED_CMD_1_REG 0x8010
288 #define MVPP2_TXP_SCHED_PERIOD_REG 0x8018
289 #define MVPP2_TXP_SCHED_MTU_REG 0x801c
290 #define MVPP2_TXP_MTU_MAX 0x7FFFF
291 #define MVPP2_TXP_SCHED_REFILL_REG 0x8020
292 #define MVPP2_TXP_REFILL_TOKENS_ALL_MASK 0x7ffff
293 #define MVPP2_TXP_REFILL_PERIOD_ALL_MASK 0x3ff00000
294 #define MVPP2_TXP_REFILL_PERIOD_MASK(v) ((v) << 20)
295 #define MVPP2_TXP_SCHED_TOKEN_SIZE_REG 0x8024
296 #define MVPP2_TXP_TOKEN_SIZE_MAX 0xffffffff
297 #define MVPP2_TXQ_SCHED_REFILL_REG(q) (0x8040 + ((q) << 2))
298 #define MVPP2_TXQ_REFILL_TOKENS_ALL_MASK 0x7ffff
299 #define MVPP2_TXQ_REFILL_PERIOD_ALL_MASK 0x3ff00000
300 #define MVPP2_TXQ_REFILL_PERIOD_MASK(v) ((v) << 20)
301 #define MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(q) (0x8060 + ((q) << 2))
302 #define MVPP2_TXQ_TOKEN_SIZE_MAX 0x7fffffff
303 #define MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(q) (0x8080 + ((q) << 2))
304 #define MVPP2_TXQ_TOKEN_CNTR_MAX 0xffffffff
306 /* TX general registers */
307 #define MVPP2_TX_SNOOP_REG 0x8800
308 #define MVPP2_TX_PORT_FLUSH_REG 0x8810
309 #define MVPP2_TX_PORT_FLUSH_MASK(port) (1 << (port))
312 #define MVPP2_SRC_ADDR_MIDDLE 0x24
313 #define MVPP2_SRC_ADDR_HIGH 0x28
314 #define MVPP2_PHY_AN_CFG0_REG 0x34
315 #define MVPP2_PHY_AN_STOP_SMI0_MASK BIT(7)
316 #define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG 0x305c
317 #define MVPP2_EXT_GLOBAL_CTRL_DEFAULT 0x27
319 /* Per-port registers */
320 #define MVPP2_GMAC_CTRL_0_REG 0x0
321 #define MVPP2_GMAC_PORT_EN_MASK BIT(0)
322 #define MVPP2_GMAC_PORT_TYPE_MASK BIT(1)
323 #define MVPP2_GMAC_MAX_RX_SIZE_OFFS 2
324 #define MVPP2_GMAC_MAX_RX_SIZE_MASK 0x7ffc
325 #define MVPP2_GMAC_MIB_CNTR_EN_MASK BIT(15)
326 #define MVPP2_GMAC_CTRL_1_REG 0x4
327 #define MVPP2_GMAC_PERIODIC_XON_EN_MASK BIT(1)
328 #define MVPP2_GMAC_GMII_LB_EN_MASK BIT(5)
329 #define MVPP2_GMAC_PCS_LB_EN_BIT 6
330 #define MVPP2_GMAC_PCS_LB_EN_MASK BIT(6)
331 #define MVPP2_GMAC_SA_LOW_OFFS 7
332 #define MVPP2_GMAC_CTRL_2_REG 0x8
333 #define MVPP2_GMAC_INBAND_AN_MASK BIT(0)
334 #define MVPP2_GMAC_FLOW_CTRL_MASK GENMASK(2, 1)
335 #define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3)
336 #define MVPP2_GMAC_PORT_RGMII_MASK BIT(4)
337 #define MVPP2_GMAC_DISABLE_PADDING BIT(5)
338 #define MVPP2_GMAC_PORT_RESET_MASK BIT(6)
339 #define MVPP2_GMAC_AUTONEG_CONFIG 0xc
340 #define MVPP2_GMAC_FORCE_LINK_DOWN BIT(0)
341 #define MVPP2_GMAC_FORCE_LINK_PASS BIT(1)
342 #define MVPP2_GMAC_IN_BAND_AUTONEG BIT(2)
343 #define MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS BIT(3)
344 #define MVPP2_GMAC_CONFIG_MII_SPEED BIT(5)
345 #define MVPP2_GMAC_CONFIG_GMII_SPEED BIT(6)
346 #define MVPP2_GMAC_AN_SPEED_EN BIT(7)
347 #define MVPP2_GMAC_FC_ADV_EN BIT(9)
348 #define MVPP2_GMAC_FLOW_CTRL_AUTONEG BIT(11)
349 #define MVPP2_GMAC_CONFIG_FULL_DUPLEX BIT(12)
350 #define MVPP2_GMAC_AN_DUPLEX_EN BIT(13)
351 #define MVPP2_GMAC_STATUS0 0x10
352 #define MVPP2_GMAC_STATUS0_LINK_UP BIT(0)
353 #define MVPP2_GMAC_PORT_FIFO_CFG_1_REG 0x1c
354 #define MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS 6
355 #define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0
356 #define MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \
357 MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK)
358 #define MVPP22_GMAC_INT_STAT 0x20
359 #define MVPP22_GMAC_INT_STAT_LINK BIT(1)
360 #define MVPP22_GMAC_INT_MASK 0x24
361 #define MVPP22_GMAC_INT_MASK_LINK_STAT BIT(1)
362 #define MVPP22_GMAC_CTRL_4_REG 0x90
363 #define MVPP22_CTRL4_EXT_PIN_GMII_SEL BIT(0)
364 #define MVPP22_CTRL4_DP_CLK_SEL BIT(5)
365 #define MVPP22_CTRL4_SYNC_BYPASS_DIS BIT(6)
366 #define MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE BIT(7)
367 #define MVPP22_GMAC_INT_SUM_MASK 0xa4
368 #define MVPP22_GMAC_INT_SUM_MASK_LINK_STAT BIT(1)
370 /* Per-port XGMAC registers. PPv2.2 only, only for GOP port 0,
371 * relative to port->base.
373 #define MVPP22_XLG_CTRL0_REG 0x100
374 #define MVPP22_XLG_CTRL0_PORT_EN BIT(0)
375 #define MVPP22_XLG_CTRL0_MAC_RESET_DIS BIT(1)
376 #define MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN BIT(7)
377 #define MVPP22_XLG_CTRL0_MIB_CNT_DIS BIT(14)
378 #define MVPP22_XLG_CTRL1_REG 0x104
379 #define MVPP22_XLG_CTRL1_FRAMESIZELIMIT_OFFS 0
380 #define MVPP22_XLG_CTRL1_FRAMESIZELIMIT_MASK 0x1fff
381 #define MVPP22_XLG_STATUS 0x10c
382 #define MVPP22_XLG_STATUS_LINK_UP BIT(0)
383 #define MVPP22_XLG_INT_STAT 0x114
384 #define MVPP22_XLG_INT_STAT_LINK BIT(1)
385 #define MVPP22_XLG_INT_MASK 0x118
386 #define MVPP22_XLG_INT_MASK_LINK BIT(1)
387 #define MVPP22_XLG_CTRL3_REG 0x11c
388 #define MVPP22_XLG_CTRL3_MACMODESELECT_MASK (7 << 13)
389 #define MVPP22_XLG_CTRL3_MACMODESELECT_GMAC (0 << 13)
390 #define MVPP22_XLG_CTRL3_MACMODESELECT_10G (1 << 13)
391 #define MVPP22_XLG_EXT_INT_MASK 0x15c
392 #define MVPP22_XLG_EXT_INT_MASK_XLG BIT(1)
393 #define MVPP22_XLG_EXT_INT_MASK_GIG BIT(2)
394 #define MVPP22_XLG_CTRL4_REG 0x184
395 #define MVPP22_XLG_CTRL4_FWD_FC BIT(5)
396 #define MVPP22_XLG_CTRL4_FWD_PFC BIT(6)
397 #define MVPP22_XLG_CTRL4_MACMODSELECT_GMAC BIT(12)
399 /* SMI registers. PPv2.2 only, relative to priv->iface_base. */
400 #define MVPP22_SMI_MISC_CFG_REG 0x1204
401 #define MVPP22_SMI_POLLING_EN BIT(10)
403 #define MVPP22_GMAC_BASE(port) (0x7000 + (port) * 0x1000 + 0xe00)
405 #define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
407 /* Descriptor ring Macros */
408 #define MVPP2_QUEUE_NEXT_DESC(q, index) \
409 (((index) < (q)->last_desc) ? ((index) + 1) : 0)
411 /* XPCS registers. PPv2.2 only */
412 #define MVPP22_MPCS_BASE(port) (0x7000 + (port) * 0x1000)
413 #define MVPP22_MPCS_CTRL 0x14
414 #define MVPP22_MPCS_CTRL_FWD_ERR_CONN BIT(10)
415 #define MVPP22_MPCS_CLK_RESET 0x14c
416 #define MAC_CLK_RESET_SD_TX BIT(0)
417 #define MAC_CLK_RESET_SD_RX BIT(1)
418 #define MAC_CLK_RESET_MAC BIT(2)
419 #define MVPP22_MPCS_CLK_RESET_DIV_RATIO(n) ((n) << 4)
420 #define MVPP22_MPCS_CLK_RESET_DIV_SET BIT(11)
422 /* XPCS registers. PPv2.2 only */
423 #define MVPP22_XPCS_BASE(port) (0x7400 + (port) * 0x1000)
424 #define MVPP22_XPCS_CFG0 0x0
425 #define MVPP22_XPCS_CFG0_PCS_MODE(n) ((n) << 3)
426 #define MVPP22_XPCS_CFG0_ACTIVE_LANE(n) ((n) << 5)
428 /* System controller registers. Accessed through a regmap. */
429 #define GENCONF_SOFT_RESET1 0x1108
430 #define GENCONF_SOFT_RESET1_GOP BIT(6)
431 #define GENCONF_PORT_CTRL0 0x1110
432 #define GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT BIT(1)
433 #define GENCONF_PORT_CTRL0_RX_DATA_SAMPLE BIT(29)
434 #define GENCONF_PORT_CTRL0_CLK_DIV_PHASE_CLR BIT(31)
435 #define GENCONF_PORT_CTRL1 0x1114
436 #define GENCONF_PORT_CTRL1_EN(p) BIT(p)
437 #define GENCONF_PORT_CTRL1_RESET(p) (BIT(p) << 28)
438 #define GENCONF_CTRL0 0x1120
439 #define GENCONF_CTRL0_PORT0_RGMII BIT(0)
440 #define GENCONF_CTRL0_PORT1_RGMII_MII BIT(1)
441 #define GENCONF_CTRL0_PORT1_RGMII BIT(2)
443 /* Various constants */
446 #define MVPP2_TXDONE_COAL_PKTS_THRESH 15
447 #define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL
448 #define MVPP2_TXDONE_COAL_USEC 1000
449 #define MVPP2_RX_COAL_PKTS 32
450 #define MVPP2_RX_COAL_USEC 100
452 /* The two bytes Marvell header. Either contains a special value used
453 * by Marvell switches when a specific hardware mode is enabled (not
454 * supported by this driver) or is filled automatically by zeroes on
455 * the RX side. Those two bytes being at the front of the Ethernet
456 * header, they allow to have the IP header aligned on a 4 bytes
457 * boundary automatically: the hardware skips those two bytes on its
460 #define MVPP2_MH_SIZE 2
461 #define MVPP2_ETH_TYPE_LEN 2
462 #define MVPP2_PPPOE_HDR_SIZE 8
463 #define MVPP2_VLAN_TAG_LEN 4
465 /* Lbtd 802.3 type */
466 #define MVPP2_IP_LBDT_TYPE 0xfffa
468 #define MVPP2_TX_CSUM_MAX_SIZE 9800
470 /* Timeout constants */
471 #define MVPP2_TX_DISABLE_TIMEOUT_MSEC 1000
472 #define MVPP2_TX_PENDING_TIMEOUT_MSEC 1000
474 #define MVPP2_TX_MTU_MAX 0x7ffff
476 /* Maximum number of T-CONTs of PON port */
477 #define MVPP2_MAX_TCONT 16
479 /* Maximum number of supported ports */
480 #define MVPP2_MAX_PORTS 4
482 /* Maximum number of TXQs used by single port */
483 #define MVPP2_MAX_TXQ 8
485 /* Dfault number of RXQs in use */
486 #define MVPP2_DEFAULT_RXQ 4
488 /* Max number of Rx descriptors */
489 #define MVPP2_MAX_RXD 128
491 /* Max number of Tx descriptors */
492 #define MVPP2_MAX_TXD 1024
494 /* Amount of Tx descriptors that can be reserved at once by CPU */
495 #define MVPP2_CPU_DESC_CHUNK 64
497 /* Max number of Tx descriptors in each aggregated queue */
498 #define MVPP2_AGGR_TXQ_SIZE 256
500 /* Descriptor aligned size */
501 #define MVPP2_DESC_ALIGNED_SIZE 32
503 /* Descriptor alignment mask */
504 #define MVPP2_TX_DESC_ALIGN (MVPP2_DESC_ALIGNED_SIZE - 1)
506 /* RX FIFO constants */
507 #define MVPP2_RX_FIFO_PORT_DATA_SIZE 0x2000
508 #define MVPP2_RX_FIFO_PORT_ATTR_SIZE 0x80
509 #define MVPP2_RX_FIFO_PORT_MIN_PKT 0x80
511 /* RX buffer constants */
512 #define MVPP2_SKB_SHINFO_SIZE \
513 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
515 #define MVPP2_RX_PKT_SIZE(mtu) \
516 ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \
517 ETH_HLEN + ETH_FCS_LEN, cache_line_size())
519 #define MVPP2_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
520 #define MVPP2_RX_TOTAL_SIZE(buf_size) ((buf_size) + MVPP2_SKB_SHINFO_SIZE)
521 #define MVPP2_RX_MAX_PKT_SIZE(total_size) \
522 ((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE)
524 #define MVPP2_BIT_TO_BYTE(bit) ((bit) / 8)
526 /* IPv6 max L3 address size */
527 #define MVPP2_MAX_L3_ADDR_SIZE 16
530 #define MVPP2_F_LOOPBACK BIT(0)
532 /* Marvell tag types */
533 enum mvpp2_tag_type {
534 MVPP2_TAG_TYPE_NONE = 0,
535 MVPP2_TAG_TYPE_MH = 1,
536 MVPP2_TAG_TYPE_DSA = 2,
537 MVPP2_TAG_TYPE_EDSA = 3,
538 MVPP2_TAG_TYPE_VLAN = 4,
539 MVPP2_TAG_TYPE_LAST = 5
542 /* Parser constants */
543 #define MVPP2_PRS_TCAM_SRAM_SIZE 256
544 #define MVPP2_PRS_TCAM_WORDS 6
545 #define MVPP2_PRS_SRAM_WORDS 4
546 #define MVPP2_PRS_FLOW_ID_SIZE 64
547 #define MVPP2_PRS_FLOW_ID_MASK 0x3f
548 #define MVPP2_PRS_TCAM_ENTRY_INVALID 1
549 #define MVPP2_PRS_TCAM_DSA_TAGGED_BIT BIT(5)
550 #define MVPP2_PRS_IPV4_HEAD 0x40
551 #define MVPP2_PRS_IPV4_HEAD_MASK 0xf0
552 #define MVPP2_PRS_IPV4_MC 0xe0
553 #define MVPP2_PRS_IPV4_MC_MASK 0xf0
554 #define MVPP2_PRS_IPV4_BC_MASK 0xff
555 #define MVPP2_PRS_IPV4_IHL 0x5
556 #define MVPP2_PRS_IPV4_IHL_MASK 0xf
557 #define MVPP2_PRS_IPV6_MC 0xff
558 #define MVPP2_PRS_IPV6_MC_MASK 0xff
559 #define MVPP2_PRS_IPV6_HOP_MASK 0xff
560 #define MVPP2_PRS_TCAM_PROTO_MASK 0xff
561 #define MVPP2_PRS_TCAM_PROTO_MASK_L 0x3f
562 #define MVPP2_PRS_DBL_VLANS_MAX 100
565 * - lookup ID - 4 bits
567 * - additional information - 1 byte
568 * - header data - 8 bytes
569 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0).
571 #define MVPP2_PRS_AI_BITS 8
572 #define MVPP2_PRS_PORT_MASK 0xff
573 #define MVPP2_PRS_LU_MASK 0xf
574 #define MVPP2_PRS_TCAM_DATA_BYTE(offs) \
575 (((offs) - ((offs) % 2)) * 2 + ((offs) % 2))
576 #define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs) \
577 (((offs) * 2) - ((offs) % 2) + 2)
578 #define MVPP2_PRS_TCAM_AI_BYTE 16
579 #define MVPP2_PRS_TCAM_PORT_BYTE 17
580 #define MVPP2_PRS_TCAM_LU_BYTE 20
581 #define MVPP2_PRS_TCAM_EN_OFFS(offs) ((offs) + 2)
582 #define MVPP2_PRS_TCAM_INV_WORD 5
583 /* Tcam entries ID */
584 #define MVPP2_PE_DROP_ALL 0
585 #define MVPP2_PE_FIRST_FREE_TID 1
586 #define MVPP2_PE_LAST_FREE_TID (MVPP2_PRS_TCAM_SRAM_SIZE - 31)
587 #define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30)
588 #define MVPP2_PE_MAC_MC_IP6 (MVPP2_PRS_TCAM_SRAM_SIZE - 29)
589 #define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28)
590 #define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 27)
591 #define MVPP2_PE_LAST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 26)
592 #define MVPP2_PE_FIRST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 19)
593 #define MVPP2_PE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 18)
594 #define MVPP2_PE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 17)
595 #define MVPP2_PE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 16)
596 #define MVPP2_PE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 15)
597 #define MVPP2_PE_ETYPE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 14)
598 #define MVPP2_PE_ETYPE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 13)
599 #define MVPP2_PE_ETYPE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 12)
600 #define MVPP2_PE_ETYPE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 11)
601 #define MVPP2_PE_MH_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 10)
602 #define MVPP2_PE_DSA_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 9)
603 #define MVPP2_PE_IP6_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 8)
604 #define MVPP2_PE_IP4_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 7)
605 #define MVPP2_PE_ETH_TYPE_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 6)
606 #define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 5)
607 #define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 4)
608 #define MVPP2_PE_MAC_MC_ALL (MVPP2_PRS_TCAM_SRAM_SIZE - 3)
609 #define MVPP2_PE_MAC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2)
610 #define MVPP2_PE_MAC_NON_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 1)
613 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(3)->(0).
615 #define MVPP2_PRS_SRAM_RI_OFFS 0
616 #define MVPP2_PRS_SRAM_RI_WORD 0
617 #define MVPP2_PRS_SRAM_RI_CTRL_OFFS 32
618 #define MVPP2_PRS_SRAM_RI_CTRL_WORD 1
619 #define MVPP2_PRS_SRAM_RI_CTRL_BITS 32
620 #define MVPP2_PRS_SRAM_SHIFT_OFFS 64
621 #define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT 72
622 #define MVPP2_PRS_SRAM_UDF_OFFS 73
623 #define MVPP2_PRS_SRAM_UDF_BITS 8
624 #define MVPP2_PRS_SRAM_UDF_MASK 0xff
625 #define MVPP2_PRS_SRAM_UDF_SIGN_BIT 81
626 #define MVPP2_PRS_SRAM_UDF_TYPE_OFFS 82
627 #define MVPP2_PRS_SRAM_UDF_TYPE_MASK 0x7
628 #define MVPP2_PRS_SRAM_UDF_TYPE_L3 1
629 #define MVPP2_PRS_SRAM_UDF_TYPE_L4 4
630 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS 85
631 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK 0x3
632 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD 1
633 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP4_ADD 2
634 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP6_ADD 3
635 #define MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS 87
636 #define MVPP2_PRS_SRAM_OP_SEL_UDF_BITS 2
637 #define MVPP2_PRS_SRAM_OP_SEL_UDF_MASK 0x3
638 #define MVPP2_PRS_SRAM_OP_SEL_UDF_ADD 0
639 #define MVPP2_PRS_SRAM_OP_SEL_UDF_IP4_ADD 2
640 #define MVPP2_PRS_SRAM_OP_SEL_UDF_IP6_ADD 3
641 #define MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS 89
642 #define MVPP2_PRS_SRAM_AI_OFFS 90
643 #define MVPP2_PRS_SRAM_AI_CTRL_OFFS 98
644 #define MVPP2_PRS_SRAM_AI_CTRL_BITS 8
645 #define MVPP2_PRS_SRAM_AI_MASK 0xff
646 #define MVPP2_PRS_SRAM_NEXT_LU_OFFS 106
647 #define MVPP2_PRS_SRAM_NEXT_LU_MASK 0xf
648 #define MVPP2_PRS_SRAM_LU_DONE_BIT 110
649 #define MVPP2_PRS_SRAM_LU_GEN_BIT 111
651 /* Sram result info bits assignment */
652 #define MVPP2_PRS_RI_MAC_ME_MASK 0x1
653 #define MVPP2_PRS_RI_DSA_MASK 0x2
654 #define MVPP2_PRS_RI_VLAN_MASK (BIT(2) | BIT(3))
655 #define MVPP2_PRS_RI_VLAN_NONE 0x0
656 #define MVPP2_PRS_RI_VLAN_SINGLE BIT(2)
657 #define MVPP2_PRS_RI_VLAN_DOUBLE BIT(3)
658 #define MVPP2_PRS_RI_VLAN_TRIPLE (BIT(2) | BIT(3))
659 #define MVPP2_PRS_RI_CPU_CODE_MASK 0x70
660 #define MVPP2_PRS_RI_CPU_CODE_RX_SPEC BIT(4)
661 #define MVPP2_PRS_RI_L2_CAST_MASK (BIT(9) | BIT(10))
662 #define MVPP2_PRS_RI_L2_UCAST 0x0
663 #define MVPP2_PRS_RI_L2_MCAST BIT(9)
664 #define MVPP2_PRS_RI_L2_BCAST BIT(10)
665 #define MVPP2_PRS_RI_PPPOE_MASK 0x800
666 #define MVPP2_PRS_RI_L3_PROTO_MASK (BIT(12) | BIT(13) | BIT(14))
667 #define MVPP2_PRS_RI_L3_UN 0x0
668 #define MVPP2_PRS_RI_L3_IP4 BIT(12)
669 #define MVPP2_PRS_RI_L3_IP4_OPT BIT(13)
670 #define MVPP2_PRS_RI_L3_IP4_OTHER (BIT(12) | BIT(13))
671 #define MVPP2_PRS_RI_L3_IP6 BIT(14)
672 #define MVPP2_PRS_RI_L3_IP6_EXT (BIT(12) | BIT(14))
673 #define MVPP2_PRS_RI_L3_ARP (BIT(13) | BIT(14))
674 #define MVPP2_PRS_RI_L3_ADDR_MASK (BIT(15) | BIT(16))
675 #define MVPP2_PRS_RI_L3_UCAST 0x0
676 #define MVPP2_PRS_RI_L3_MCAST BIT(15)
677 #define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16))
678 #define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000
679 #define MVPP2_PRS_RI_UDF3_MASK 0x300000
680 #define MVPP2_PRS_RI_UDF3_RX_SPECIAL BIT(21)
681 #define MVPP2_PRS_RI_L4_PROTO_MASK 0x1c00000
682 #define MVPP2_PRS_RI_L4_TCP BIT(22)
683 #define MVPP2_PRS_RI_L4_UDP BIT(23)
684 #define MVPP2_PRS_RI_L4_OTHER (BIT(22) | BIT(23))
685 #define MVPP2_PRS_RI_UDF7_MASK 0x60000000
686 #define MVPP2_PRS_RI_UDF7_IP6_LITE BIT(29)
687 #define MVPP2_PRS_RI_DROP_MASK 0x80000000
689 /* Sram additional info bits assignment */
690 #define MVPP2_PRS_IPV4_DIP_AI_BIT BIT(0)
691 #define MVPP2_PRS_IPV6_NO_EXT_AI_BIT BIT(0)
692 #define MVPP2_PRS_IPV6_EXT_AI_BIT BIT(1)
693 #define MVPP2_PRS_IPV6_EXT_AH_AI_BIT BIT(2)
694 #define MVPP2_PRS_IPV6_EXT_AH_LEN_AI_BIT BIT(3)
695 #define MVPP2_PRS_IPV6_EXT_AH_L4_AI_BIT BIT(4)
696 #define MVPP2_PRS_SINGLE_VLAN_AI 0
697 #define MVPP2_PRS_DBL_VLAN_AI_BIT BIT(7)
700 #define MVPP2_PRS_TAGGED true
701 #define MVPP2_PRS_UNTAGGED false
702 #define MVPP2_PRS_EDSA true
703 #define MVPP2_PRS_DSA false
705 /* MAC entries, shadow udf */
707 MVPP2_PRS_UDF_MAC_DEF,
708 MVPP2_PRS_UDF_MAC_RANGE,
709 MVPP2_PRS_UDF_L2_DEF,
710 MVPP2_PRS_UDF_L2_DEF_COPY,
711 MVPP2_PRS_UDF_L2_USER,
715 enum mvpp2_prs_lookup {
729 enum mvpp2_prs_l3_cast {
730 MVPP2_PRS_L3_UNI_CAST,
731 MVPP2_PRS_L3_MULTI_CAST,
732 MVPP2_PRS_L3_BROAD_CAST
735 /* Classifier constants */
736 #define MVPP2_CLS_FLOWS_TBL_SIZE 512
737 #define MVPP2_CLS_FLOWS_TBL_DATA_WORDS 3
738 #define MVPP2_CLS_LKP_TBL_SIZE 64
741 #define MVPP2_BM_POOLS_NUM 8
742 #define MVPP2_BM_LONG_BUF_NUM 1024
743 #define MVPP2_BM_SHORT_BUF_NUM 2048
744 #define MVPP2_BM_POOL_SIZE_MAX (16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4)
745 #define MVPP2_BM_POOL_PTR_ALIGN 128
746 #define MVPP2_BM_SWF_LONG_POOL(port) ((port > 2) ? 2 : port)
747 #define MVPP2_BM_SWF_SHORT_POOL 3
749 /* BM cookie (32 bits) definition */
750 #define MVPP2_BM_COOKIE_POOL_OFFS 8
751 #define MVPP2_BM_COOKIE_CPU_OFFS 24
753 /* BM short pool packet size
754 * These value assure that for SWF the total number
755 * of bytes allocated for each buffer will be 512
757 #define MVPP2_BM_SHORT_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(512)
759 #define MVPP21_ADDR_SPACE_SZ 0
760 #define MVPP22_ADDR_SPACE_SZ SZ_64K
762 #define MVPP2_MAX_THREADS 8
763 #define MVPP2_MAX_QVECS MVPP2_MAX_THREADS
773 /* Shared Packet Processor resources */
775 /* Shared registers' base addresses */
776 void __iomem *lms_base;
777 void __iomem *iface_base;
779 /* On PPv2.2, each "software thread" can access the base
780 * register through a separate address space, each 64 KB apart
781 * from each other. Typically, such address spaces will be
784 void __iomem *swth_base[MVPP2_MAX_THREADS];
786 /* On PPv2.2, some port control registers are located into the system
787 * controller space. These registers are accessible through a regmap.
789 struct regmap *sysctrl_base;
796 /* List of pointers to port structures */
797 struct mvpp2_port **port_list;
799 /* Aggregated TXQs */
800 struct mvpp2_tx_queue *aggr_txqs;
803 struct mvpp2_bm_pool *bm_pools;
805 /* PRS shadow table */
806 struct mvpp2_prs_shadow *prs_shadow;
807 /* PRS auxiliary table for double vlan entries control */
808 bool *prs_double_vlans;
814 enum { MVPP21, MVPP22 } hw_version;
816 /* Maximum number of RXQs per port */
817 unsigned int max_port_rxqs;
820 struct mvpp2_pcpu_stats {
821 struct u64_stats_sync syncp;
828 /* Per-CPU port control */
829 struct mvpp2_port_pcpu {
830 struct hrtimer tx_done_timer;
831 bool timer_scheduled;
832 /* Tasklet for egress finalization */
833 struct tasklet_struct tx_done_tasklet;
836 struct mvpp2_queue_vector {
838 struct napi_struct napi;
839 enum { MVPP2_QUEUE_VECTOR_SHARED, MVPP2_QUEUE_VECTOR_PRIVATE } type;
844 u32 pending_cause_rx;
845 struct mvpp2_port *port;
851 /* Index of the port from the "group of ports" complex point
860 /* Per-port registers' base address */
863 struct mvpp2_rx_queue **rxqs;
865 struct mvpp2_tx_queue **txqs;
867 struct net_device *dev;
871 /* Per-CPU port control */
872 struct mvpp2_port_pcpu __percpu *pcpu;
879 struct mvpp2_pcpu_stats __percpu *stats;
881 phy_interface_t phy_interface;
882 struct device_node *phy_node;
888 struct mvpp2_bm_pool *pool_long;
889 struct mvpp2_bm_pool *pool_short;
891 /* Index of first port's physical RXQ */
894 struct mvpp2_queue_vector qvecs[MVPP2_MAX_QVECS];
901 /* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the
902 * layout of the transmit and reception DMA descriptors, and their
903 * layout is therefore defined by the hardware design
906 #define MVPP2_TXD_L3_OFF_SHIFT 0
907 #define MVPP2_TXD_IP_HLEN_SHIFT 8
908 #define MVPP2_TXD_L4_CSUM_FRAG BIT(13)
909 #define MVPP2_TXD_L4_CSUM_NOT BIT(14)
910 #define MVPP2_TXD_IP_CSUM_DISABLE BIT(15)
911 #define MVPP2_TXD_PADDING_DISABLE BIT(23)
912 #define MVPP2_TXD_L4_UDP BIT(24)
913 #define MVPP2_TXD_L3_IP6 BIT(26)
914 #define MVPP2_TXD_L_DESC BIT(28)
915 #define MVPP2_TXD_F_DESC BIT(29)
917 #define MVPP2_RXD_ERR_SUMMARY BIT(15)
918 #define MVPP2_RXD_ERR_CODE_MASK (BIT(13) | BIT(14))
919 #define MVPP2_RXD_ERR_CRC 0x0
920 #define MVPP2_RXD_ERR_OVERRUN BIT(13)
921 #define MVPP2_RXD_ERR_RESOURCE (BIT(13) | BIT(14))
922 #define MVPP2_RXD_BM_POOL_ID_OFFS 16
923 #define MVPP2_RXD_BM_POOL_ID_MASK (BIT(16) | BIT(17) | BIT(18))
924 #define MVPP2_RXD_HWF_SYNC BIT(21)
925 #define MVPP2_RXD_L4_CSUM_OK BIT(22)
926 #define MVPP2_RXD_IP4_HEADER_ERR BIT(24)
927 #define MVPP2_RXD_L4_TCP BIT(25)
928 #define MVPP2_RXD_L4_UDP BIT(26)
929 #define MVPP2_RXD_L3_IP4 BIT(28)
930 #define MVPP2_RXD_L3_IP6 BIT(30)
931 #define MVPP2_RXD_BUF_HDR BIT(31)
933 /* HW TX descriptor for PPv2.1 */
934 struct mvpp21_tx_desc {
935 u32 command; /* Options used by HW for packet transmitting.*/
936 u8 packet_offset; /* the offset from the buffer beginning */
937 u8 phys_txq; /* destination queue ID */
938 u16 data_size; /* data size of transmitted packet in bytes */
939 u32 buf_dma_addr; /* physical addr of transmitted buffer */
940 u32 buf_cookie; /* cookie for access to TX buffer in tx path */
941 u32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */
942 u32 reserved2; /* reserved (for future use) */
945 /* HW RX descriptor for PPv2.1 */
946 struct mvpp21_rx_desc {
947 u32 status; /* info about received packet */
948 u16 reserved1; /* parser_info (for future use, PnC) */
949 u16 data_size; /* size of received packet in bytes */
950 u32 buf_dma_addr; /* physical address of the buffer */
951 u32 buf_cookie; /* cookie for access to RX buffer in rx path */
952 u16 reserved2; /* gem_port_id (for future use, PON) */
953 u16 reserved3; /* csum_l4 (for future use, PnC) */
954 u8 reserved4; /* bm_qset (for future use, BM) */
956 u16 reserved6; /* classify_info (for future use, PnC) */
957 u32 reserved7; /* flow_id (for future use, PnC) */
961 /* HW TX descriptor for PPv2.2 */
962 struct mvpp22_tx_desc {
968 u64 buf_dma_addr_ptp;
972 /* HW RX descriptor for PPv2.2 */
973 struct mvpp22_rx_desc {
979 u64 buf_dma_addr_key_hash;
983 /* Opaque type used by the driver to manipulate the HW TX and RX
986 struct mvpp2_tx_desc {
988 struct mvpp21_tx_desc pp21;
989 struct mvpp22_tx_desc pp22;
993 struct mvpp2_rx_desc {
995 struct mvpp21_rx_desc pp21;
996 struct mvpp22_rx_desc pp22;
1000 struct mvpp2_txq_pcpu_buf {
1001 /* Transmitted SKB */
1002 struct sk_buff *skb;
1004 /* Physical address of transmitted buffer */
1007 /* Size transmitted */
1011 /* Per-CPU Tx queue control */
1012 struct mvpp2_txq_pcpu {
1015 /* Number of Tx DMA descriptors in the descriptor ring */
1018 /* Number of currently used Tx DMA descriptor in the
1023 /* Number of Tx DMA descriptors reserved for each CPU */
1026 /* Infos about transmitted buffers */
1027 struct mvpp2_txq_pcpu_buf *buffs;
1029 /* Index of last TX DMA descriptor that was inserted */
1032 /* Index of the TX DMA descriptor to be cleaned up */
1035 /* DMA buffer for TSO headers */
1037 dma_addr_t tso_headers_dma;
1040 struct mvpp2_tx_queue {
1041 /* Physical number of this Tx queue */
1044 /* Logical number of this Tx queue */
1047 /* Number of Tx DMA descriptors in the descriptor ring */
1050 /* Number of currently used Tx DMA descriptor in the descriptor ring */
1053 /* Per-CPU control of physical Tx queues */
1054 struct mvpp2_txq_pcpu __percpu *pcpu;
1058 /* Virtual address of thex Tx DMA descriptors array */
1059 struct mvpp2_tx_desc *descs;
1061 /* DMA address of the Tx DMA descriptors array */
1062 dma_addr_t descs_dma;
1064 /* Index of the last Tx DMA descriptor */
1067 /* Index of the next Tx DMA descriptor to process */
1068 int next_desc_to_proc;
1071 struct mvpp2_rx_queue {
1072 /* RX queue number, in the range 0-31 for physical RXQs */
1075 /* Num of rx descriptors in the rx descriptor ring */
1081 /* Virtual address of the RX DMA descriptors array */
1082 struct mvpp2_rx_desc *descs;
1084 /* DMA address of the RX DMA descriptors array */
1085 dma_addr_t descs_dma;
1087 /* Index of the last RX DMA descriptor */
1090 /* Index of the next RX DMA descriptor to process */
1091 int next_desc_to_proc;
1093 /* ID of port to which physical RXQ is mapped */
1096 /* Port's logic RXQ number to which physical RXQ is mapped */
1100 union mvpp2_prs_tcam_entry {
1101 u32 word[MVPP2_PRS_TCAM_WORDS];
1102 u8 byte[MVPP2_PRS_TCAM_WORDS * 4];
1105 union mvpp2_prs_sram_entry {
1106 u32 word[MVPP2_PRS_SRAM_WORDS];
1107 u8 byte[MVPP2_PRS_SRAM_WORDS * 4];
1110 struct mvpp2_prs_entry {
1112 union mvpp2_prs_tcam_entry tcam;
1113 union mvpp2_prs_sram_entry sram;
1116 struct mvpp2_prs_shadow {
1123 /* User defined offset */
1131 struct mvpp2_cls_flow_entry {
1133 u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS];
1136 struct mvpp2_cls_lookup_entry {
1142 struct mvpp2_bm_pool {
1143 /* Pool number in the range 0-7 */
1145 enum mvpp2_bm_type type;
1147 /* Buffer Pointers Pool External (BPPE) size */
1149 /* BPPE size in bytes */
1151 /* Number of buffers for this pool */
1153 /* Pool buffer size */
1159 /* BPPE virtual base address */
1161 /* BPPE DMA base address */
1162 dma_addr_t dma_addr;
1164 /* Ports using BM pool */
1169 #define MVPP2_QDIST_SINGLE_MODE 0
1170 #define MVPP2_QDIST_MULTI_MODE 1
1172 static int queue_mode = MVPP2_QDIST_SINGLE_MODE;
1174 module_param(queue_mode, int, 0444);
1175 MODULE_PARM_DESC(queue_mode, "Set queue_mode (single=0, multi=1)");
1177 #define MVPP2_DRIVER_NAME "mvpp2"
1178 #define MVPP2_DRIVER_VERSION "1.0"
1180 /* Utility/helper methods */
1182 static void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data)
1184 writel(data, priv->swth_base[0] + offset);
1187 static u32 mvpp2_read(struct mvpp2 *priv, u32 offset)
1189 return readl(priv->swth_base[0] + offset);
1192 /* These accessors should be used to access:
1194 * - per-CPU registers, where each CPU has its own copy of the
1197 * MVPP2_BM_VIRT_ALLOC_REG
1198 * MVPP2_BM_ADDR_HIGH_ALLOC
1199 * MVPP22_BM_ADDR_HIGH_RLS_REG
1200 * MVPP2_BM_VIRT_RLS_REG
1201 * MVPP2_ISR_RX_TX_CAUSE_REG
1202 * MVPP2_ISR_RX_TX_MASK_REG
1204 * MVPP2_AGGR_TXQ_UPDATE_REG
1205 * MVPP2_TXQ_RSVD_REQ_REG
1206 * MVPP2_TXQ_RSVD_RSLT_REG
1207 * MVPP2_TXQ_SENT_REG
1210 * - global registers that must be accessed through a specific CPU
1211 * window, because they are related to an access to a per-CPU
1214 * MVPP2_BM_PHY_ALLOC_REG (related to MVPP2_BM_VIRT_ALLOC_REG)
1215 * MVPP2_BM_PHY_RLS_REG (related to MVPP2_BM_VIRT_RLS_REG)
1216 * MVPP2_RXQ_THRESH_REG (related to MVPP2_RXQ_NUM_REG)
1217 * MVPP2_RXQ_DESC_ADDR_REG (related to MVPP2_RXQ_NUM_REG)
1218 * MVPP2_RXQ_DESC_SIZE_REG (related to MVPP2_RXQ_NUM_REG)
1219 * MVPP2_RXQ_INDEX_REG (related to MVPP2_RXQ_NUM_REG)
1220 * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG)
1221 * MVPP2_TXQ_DESC_ADDR_REG (related to MVPP2_TXQ_NUM_REG)
1222 * MVPP2_TXQ_DESC_SIZE_REG (related to MVPP2_TXQ_NUM_REG)
1223 * MVPP2_TXQ_INDEX_REG (related to MVPP2_TXQ_NUM_REG)
1224 * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG)
1225 * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
1226 * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
1228 static void mvpp2_percpu_write(struct mvpp2 *priv, int cpu,
1229 u32 offset, u32 data)
1231 writel(data, priv->swth_base[cpu] + offset);
1234 static u32 mvpp2_percpu_read(struct mvpp2 *priv, int cpu,
1237 return readl(priv->swth_base[cpu] + offset);
1240 static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port,
1241 struct mvpp2_tx_desc *tx_desc)
1243 if (port->priv->hw_version == MVPP21)
1244 return tx_desc->pp21.buf_dma_addr;
1246 return tx_desc->pp22.buf_dma_addr_ptp & GENMASK_ULL(40, 0);
1249 static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port,
1250 struct mvpp2_tx_desc *tx_desc,
1251 dma_addr_t dma_addr)
1253 if (port->priv->hw_version == MVPP21) {
1254 tx_desc->pp21.buf_dma_addr = dma_addr;
1256 u64 val = (u64)dma_addr;
1258 tx_desc->pp22.buf_dma_addr_ptp &= ~GENMASK_ULL(40, 0);
1259 tx_desc->pp22.buf_dma_addr_ptp |= val;
1263 static size_t mvpp2_txdesc_size_get(struct mvpp2_port *port,
1264 struct mvpp2_tx_desc *tx_desc)
1266 if (port->priv->hw_version == MVPP21)
1267 return tx_desc->pp21.data_size;
1269 return tx_desc->pp22.data_size;
1272 static void mvpp2_txdesc_size_set(struct mvpp2_port *port,
1273 struct mvpp2_tx_desc *tx_desc,
1276 if (port->priv->hw_version == MVPP21)
1277 tx_desc->pp21.data_size = size;
1279 tx_desc->pp22.data_size = size;
1282 static void mvpp2_txdesc_txq_set(struct mvpp2_port *port,
1283 struct mvpp2_tx_desc *tx_desc,
1286 if (port->priv->hw_version == MVPP21)
1287 tx_desc->pp21.phys_txq = txq;
1289 tx_desc->pp22.phys_txq = txq;
1292 static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port,
1293 struct mvpp2_tx_desc *tx_desc,
1294 unsigned int command)
1296 if (port->priv->hw_version == MVPP21)
1297 tx_desc->pp21.command = command;
1299 tx_desc->pp22.command = command;
1302 static void mvpp2_txdesc_offset_set(struct mvpp2_port *port,
1303 struct mvpp2_tx_desc *tx_desc,
1304 unsigned int offset)
1306 if (port->priv->hw_version == MVPP21)
1307 tx_desc->pp21.packet_offset = offset;
1309 tx_desc->pp22.packet_offset = offset;
1312 static unsigned int mvpp2_txdesc_offset_get(struct mvpp2_port *port,
1313 struct mvpp2_tx_desc *tx_desc)
1315 if (port->priv->hw_version == MVPP21)
1316 return tx_desc->pp21.packet_offset;
1318 return tx_desc->pp22.packet_offset;
1321 static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port,
1322 struct mvpp2_rx_desc *rx_desc)
1324 if (port->priv->hw_version == MVPP21)
1325 return rx_desc->pp21.buf_dma_addr;
1327 return rx_desc->pp22.buf_dma_addr_key_hash & GENMASK_ULL(40, 0);
1330 static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port,
1331 struct mvpp2_rx_desc *rx_desc)
1333 if (port->priv->hw_version == MVPP21)
1334 return rx_desc->pp21.buf_cookie;
1336 return rx_desc->pp22.buf_cookie_misc & GENMASK_ULL(40, 0);
1339 static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port,
1340 struct mvpp2_rx_desc *rx_desc)
1342 if (port->priv->hw_version == MVPP21)
1343 return rx_desc->pp21.data_size;
1345 return rx_desc->pp22.data_size;
1348 static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port,
1349 struct mvpp2_rx_desc *rx_desc)
1351 if (port->priv->hw_version == MVPP21)
1352 return rx_desc->pp21.status;
1354 return rx_desc->pp22.status;
1357 static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
1359 txq_pcpu->txq_get_index++;
1360 if (txq_pcpu->txq_get_index == txq_pcpu->size)
1361 txq_pcpu->txq_get_index = 0;
1364 static void mvpp2_txq_inc_put(struct mvpp2_port *port,
1365 struct mvpp2_txq_pcpu *txq_pcpu,
1366 struct sk_buff *skb,
1367 struct mvpp2_tx_desc *tx_desc)
1369 struct mvpp2_txq_pcpu_buf *tx_buf =
1370 txq_pcpu->buffs + txq_pcpu->txq_put_index;
1372 tx_buf->size = mvpp2_txdesc_size_get(port, tx_desc);
1373 tx_buf->dma = mvpp2_txdesc_dma_addr_get(port, tx_desc) +
1374 mvpp2_txdesc_offset_get(port, tx_desc);
1375 txq_pcpu->txq_put_index++;
1376 if (txq_pcpu->txq_put_index == txq_pcpu->size)
1377 txq_pcpu->txq_put_index = 0;
1380 /* Get number of physical egress port */
1381 static inline int mvpp2_egress_port(struct mvpp2_port *port)
1383 return MVPP2_MAX_TCONT + port->id;
1386 /* Get number of physical TXQ */
1387 static inline int mvpp2_txq_phys(int port, int txq)
1389 return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq;
1392 /* Parser configuration routines */
1394 /* Update parser tcam and sram hw entries */
1395 static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
1399 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
1402 /* Clear entry invalidation bit */
1403 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
1405 /* Write tcam index - indirect access */
1406 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
1407 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1408 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]);
1410 /* Write sram index - indirect access */
1411 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
1412 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1413 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]);
1418 /* Read tcam entry from hw */
1419 static int mvpp2_prs_hw_read(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
1423 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
1426 /* Write tcam index - indirect access */
1427 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
1429 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv,
1430 MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD));
1431 if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK)
1432 return MVPP2_PRS_TCAM_ENTRY_INVALID;
1434 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1435 pe->tcam.word[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i));
1437 /* Write sram index - indirect access */
1438 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
1439 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1440 pe->sram.word[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i));
1445 /* Invalidate tcam hw entry */
1446 static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index)
1448 /* Write index - indirect access */
1449 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
1450 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD),
1451 MVPP2_PRS_TCAM_INV_MASK);
1454 /* Enable shadow table entry and set its lookup ID */
1455 static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu)
1457 priv->prs_shadow[index].valid = true;
1458 priv->prs_shadow[index].lu = lu;
1461 /* Update ri fields in shadow table entry */
1462 static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index,
1463 unsigned int ri, unsigned int ri_mask)
1465 priv->prs_shadow[index].ri_mask = ri_mask;
1466 priv->prs_shadow[index].ri = ri;
1469 /* Update lookup field in tcam sw entry */
1470 static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu)
1472 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE);
1474 pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu;
1475 pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK;
1478 /* Update mask for single port in tcam sw entry */
1479 static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe,
1480 unsigned int port, bool add)
1482 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1485 pe->tcam.byte[enable_off] &= ~(1 << port);
1487 pe->tcam.byte[enable_off] |= 1 << port;
1490 /* Update port map in tcam sw entry */
1491 static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe,
1494 unsigned char port_mask = MVPP2_PRS_PORT_MASK;
1495 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1497 pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0;
1498 pe->tcam.byte[enable_off] &= ~port_mask;
1499 pe->tcam.byte[enable_off] |= ~ports & MVPP2_PRS_PORT_MASK;
1502 /* Obtain port map from tcam sw entry */
1503 static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe)
1505 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1507 return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK;
1510 /* Set byte of data and its enable bits in tcam sw entry */
1511 static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe,
1512 unsigned int offs, unsigned char byte,
1513 unsigned char enable)
1515 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte;
1516 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable;
1519 /* Get byte of data and its enable bits from tcam sw entry */
1520 static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe,
1521 unsigned int offs, unsigned char *byte,
1522 unsigned char *enable)
1524 *byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)];
1525 *enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)];
1528 /* Compare tcam data bytes with a pattern */
1529 static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offs,
1532 int off = MVPP2_PRS_TCAM_DATA_BYTE(offs);
1535 tcam_data = (8 << pe->tcam.byte[off + 1]) | pe->tcam.byte[off];
1536 if (tcam_data != data)
1541 /* Update ai bits in tcam sw entry */
1542 static void mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe,
1543 unsigned int bits, unsigned int enable)
1545 int i, ai_idx = MVPP2_PRS_TCAM_AI_BYTE;
1547 for (i = 0; i < MVPP2_PRS_AI_BITS; i++) {
1549 if (!(enable & BIT(i)))
1553 pe->tcam.byte[ai_idx] |= 1 << i;
1555 pe->tcam.byte[ai_idx] &= ~(1 << i);
1558 pe->tcam.byte[MVPP2_PRS_TCAM_EN_OFFS(ai_idx)] |= enable;
1561 /* Get ai bits from tcam sw entry */
1562 static int mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *pe)
1564 return pe->tcam.byte[MVPP2_PRS_TCAM_AI_BYTE];
1567 /* Set ethertype in tcam sw entry */
1568 static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset,
1569 unsigned short ethertype)
1571 mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff);
1572 mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff);
1575 /* Set bits in sram sw entry */
1576 static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num,
1579 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] |= (val << (bit_num % 8));
1582 /* Clear bits in sram sw entry */
1583 static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num,
1586 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] &= ~(val << (bit_num % 8));
1589 /* Update ri bits in sram sw entry */
1590 static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe,
1591 unsigned int bits, unsigned int mask)
1595 for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) {
1596 int ri_off = MVPP2_PRS_SRAM_RI_OFFS;
1598 if (!(mask & BIT(i)))
1602 mvpp2_prs_sram_bits_set(pe, ri_off + i, 1);
1604 mvpp2_prs_sram_bits_clear(pe, ri_off + i, 1);
1606 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1);
1610 /* Obtain ri bits from sram sw entry */
1611 static int mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *pe)
1613 return pe->sram.word[MVPP2_PRS_SRAM_RI_WORD];
1616 /* Update ai bits in sram sw entry */
1617 static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe,
1618 unsigned int bits, unsigned int mask)
1621 int ai_off = MVPP2_PRS_SRAM_AI_OFFS;
1623 for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) {
1625 if (!(mask & BIT(i)))
1629 mvpp2_prs_sram_bits_set(pe, ai_off + i, 1);
1631 mvpp2_prs_sram_bits_clear(pe, ai_off + i, 1);
1633 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1);
1637 /* Read ai bits from sram sw entry */
1638 static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe)
1641 int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS);
1642 int ai_en_off = ai_off + 1;
1643 int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8;
1645 bits = (pe->sram.byte[ai_off] >> ai_shift) |
1646 (pe->sram.byte[ai_en_off] << (8 - ai_shift));
1651 /* In sram sw entry set lookup ID field of the tcam key to be used in the next
1654 static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe,
1657 int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS;
1659 mvpp2_prs_sram_bits_clear(pe, sram_next_off,
1660 MVPP2_PRS_SRAM_NEXT_LU_MASK);
1661 mvpp2_prs_sram_bits_set(pe, sram_next_off, lu);
1664 /* In the sram sw entry set sign and value of the next lookup offset
1665 * and the offset value generated to the classifier
1667 static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift,
1672 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
1675 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
1679 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] =
1680 (unsigned char)shift;
1682 /* Reset and set operation */
1683 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
1684 MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK);
1685 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op);
1687 /* Set base offset as current */
1688 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
1691 /* In the sram sw entry set sign and value of the user defined offset
1692 * generated to the classifier
1694 static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe,
1695 unsigned int type, int offset,
1700 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
1701 offset = 0 - offset;
1703 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
1707 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS,
1708 MVPP2_PRS_SRAM_UDF_MASK);
1709 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset);
1710 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
1711 MVPP2_PRS_SRAM_UDF_BITS)] &=
1712 ~(MVPP2_PRS_SRAM_UDF_MASK >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
1713 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
1714 MVPP2_PRS_SRAM_UDF_BITS)] |=
1715 (offset >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
1717 /* Set offset type */
1718 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS,
1719 MVPP2_PRS_SRAM_UDF_TYPE_MASK);
1720 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type);
1722 /* Set offset operation */
1723 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
1724 MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
1725 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op);
1727 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
1728 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] &=
1729 ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >>
1730 (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
1732 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
1733 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] |=
1734 (op >> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
1736 /* Set base offset as current */
1737 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
1740 /* Find parser flow entry */
1741 static struct mvpp2_prs_entry *mvpp2_prs_flow_find(struct mvpp2 *priv, int flow)
1743 struct mvpp2_prs_entry *pe;
1746 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1749 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
1751 /* Go through the all entires with MVPP2_PRS_LU_FLOWS */
1752 for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) {
1755 if (!priv->prs_shadow[tid].valid ||
1756 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS)
1760 mvpp2_prs_hw_read(priv, pe);
1761 bits = mvpp2_prs_sram_ai_get(pe);
1763 /* Sram store classification lookup ID in AI bits [5:0] */
1764 if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow)
1772 /* Return first free tcam index, seeking from start to end */
1773 static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start,
1781 if (end >= MVPP2_PRS_TCAM_SRAM_SIZE)
1782 end = MVPP2_PRS_TCAM_SRAM_SIZE - 1;
1784 for (tid = start; tid <= end; tid++) {
1785 if (!priv->prs_shadow[tid].valid)
1792 /* Enable/disable dropping all mac da's */
1793 static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
1795 struct mvpp2_prs_entry pe;
1797 if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) {
1798 /* Entry exist - update port only */
1799 pe.index = MVPP2_PE_DROP_ALL;
1800 mvpp2_prs_hw_read(priv, &pe);
1802 /* Entry doesn't exist - create new */
1803 memset(&pe, 0, sizeof(pe));
1804 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1805 pe.index = MVPP2_PE_DROP_ALL;
1807 /* Non-promiscuous mode for all ports - DROP unknown packets */
1808 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
1809 MVPP2_PRS_RI_DROP_MASK);
1811 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1812 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1814 /* Update shadow table */
1815 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1817 /* Mask all ports */
1818 mvpp2_prs_tcam_port_map_set(&pe, 0);
1821 /* Update port mask */
1822 mvpp2_prs_tcam_port_set(&pe, port, add);
1824 mvpp2_prs_hw_write(priv, &pe);
1827 /* Set port to promiscuous mode */
1828 static void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, bool add)
1830 struct mvpp2_prs_entry pe;
1832 /* Promiscuous mode - Accept unknown packets */
1834 if (priv->prs_shadow[MVPP2_PE_MAC_PROMISCUOUS].valid) {
1835 /* Entry exist - update port only */
1836 pe.index = MVPP2_PE_MAC_PROMISCUOUS;
1837 mvpp2_prs_hw_read(priv, &pe);
1839 /* Entry doesn't exist - create new */
1840 memset(&pe, 0, sizeof(pe));
1841 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1842 pe.index = MVPP2_PE_MAC_PROMISCUOUS;
1844 /* Continue - set next lookup */
1845 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
1847 /* Set result info bits */
1848 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_UCAST,
1849 MVPP2_PRS_RI_L2_CAST_MASK);
1851 /* Shift to ethertype */
1852 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
1853 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1855 /* Mask all ports */
1856 mvpp2_prs_tcam_port_map_set(&pe, 0);
1858 /* Update shadow table */
1859 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1862 /* Update port mask */
1863 mvpp2_prs_tcam_port_set(&pe, port, add);
1865 mvpp2_prs_hw_write(priv, &pe);
1868 /* Accept multicast */
1869 static void mvpp2_prs_mac_multi_set(struct mvpp2 *priv, int port, int index,
1872 struct mvpp2_prs_entry pe;
1873 unsigned char da_mc;
1875 /* Ethernet multicast address first byte is
1876 * 0x01 for IPv4 and 0x33 for IPv6
1878 da_mc = (index == MVPP2_PE_MAC_MC_ALL) ? 0x01 : 0x33;
1880 if (priv->prs_shadow[index].valid) {
1881 /* Entry exist - update port only */
1883 mvpp2_prs_hw_read(priv, &pe);
1885 /* Entry doesn't exist - create new */
1886 memset(&pe, 0, sizeof(pe));
1887 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1890 /* Continue - set next lookup */
1891 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
1893 /* Set result info bits */
1894 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_MCAST,
1895 MVPP2_PRS_RI_L2_CAST_MASK);
1897 /* Update tcam entry data first byte */
1898 mvpp2_prs_tcam_data_byte_set(&pe, 0, da_mc, 0xff);
1900 /* Shift to ethertype */
1901 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
1902 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1904 /* Mask all ports */
1905 mvpp2_prs_tcam_port_map_set(&pe, 0);
1907 /* Update shadow table */
1908 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1911 /* Update port mask */
1912 mvpp2_prs_tcam_port_set(&pe, port, add);
1914 mvpp2_prs_hw_write(priv, &pe);
1917 /* Set entry for dsa packets */
1918 static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add,
1919 bool tagged, bool extend)
1921 struct mvpp2_prs_entry pe;
1925 tid = tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED;
1928 tid = tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED;
1932 if (priv->prs_shadow[tid].valid) {
1933 /* Entry exist - update port only */
1935 mvpp2_prs_hw_read(priv, &pe);
1937 /* Entry doesn't exist - create new */
1938 memset(&pe, 0, sizeof(pe));
1939 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
1942 /* Shift 4 bytes if DSA tag or 8 bytes in case of EDSA tag*/
1943 mvpp2_prs_sram_shift_set(&pe, shift,
1944 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1946 /* Update shadow table */
1947 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
1950 /* Set tagged bit in DSA tag */
1951 mvpp2_prs_tcam_data_byte_set(&pe, 0,
1952 MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
1953 MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
1954 /* Clear all ai bits for next iteration */
1955 mvpp2_prs_sram_ai_update(&pe, 0,
1956 MVPP2_PRS_SRAM_AI_MASK);
1957 /* If packet is tagged continue check vlans */
1958 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1960 /* Set result info bits to 'no vlans' */
1961 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
1962 MVPP2_PRS_RI_VLAN_MASK);
1963 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1966 /* Mask all ports */
1967 mvpp2_prs_tcam_port_map_set(&pe, 0);
1970 /* Update port mask */
1971 mvpp2_prs_tcam_port_set(&pe, port, add);
1973 mvpp2_prs_hw_write(priv, &pe);
1976 /* Set entry for dsa ethertype */
1977 static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port,
1978 bool add, bool tagged, bool extend)
1980 struct mvpp2_prs_entry pe;
1981 int tid, shift, port_mask;
1984 tid = tagged ? MVPP2_PE_ETYPE_EDSA_TAGGED :
1985 MVPP2_PE_ETYPE_EDSA_UNTAGGED;
1989 tid = tagged ? MVPP2_PE_ETYPE_DSA_TAGGED :
1990 MVPP2_PE_ETYPE_DSA_UNTAGGED;
1991 port_mask = MVPP2_PRS_PORT_MASK;
1995 if (priv->prs_shadow[tid].valid) {
1996 /* Entry exist - update port only */
1998 mvpp2_prs_hw_read(priv, &pe);
2000 /* Entry doesn't exist - create new */
2001 memset(&pe, 0, sizeof(pe));
2002 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
2006 mvpp2_prs_match_etype(&pe, 0, ETH_P_EDSA);
2007 mvpp2_prs_match_etype(&pe, 2, 0);
2009 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DSA_MASK,
2010 MVPP2_PRS_RI_DSA_MASK);
2011 /* Shift ethertype + 2 byte reserved + tag*/
2012 mvpp2_prs_sram_shift_set(&pe, 2 + MVPP2_ETH_TYPE_LEN + shift,
2013 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2015 /* Update shadow table */
2016 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
2019 /* Set tagged bit in DSA tag */
2020 mvpp2_prs_tcam_data_byte_set(&pe,
2021 MVPP2_ETH_TYPE_LEN + 2 + 3,
2022 MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
2023 MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
2024 /* Clear all ai bits for next iteration */
2025 mvpp2_prs_sram_ai_update(&pe, 0,
2026 MVPP2_PRS_SRAM_AI_MASK);
2027 /* If packet is tagged continue check vlans */
2028 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2030 /* Set result info bits to 'no vlans' */
2031 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
2032 MVPP2_PRS_RI_VLAN_MASK);
2033 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2035 /* Mask/unmask all ports, depending on dsa type */
2036 mvpp2_prs_tcam_port_map_set(&pe, port_mask);
2039 /* Update port mask */
2040 mvpp2_prs_tcam_port_set(&pe, port, add);
2042 mvpp2_prs_hw_write(priv, &pe);
2045 /* Search for existing single/triple vlan entry */
2046 static struct mvpp2_prs_entry *mvpp2_prs_vlan_find(struct mvpp2 *priv,
2047 unsigned short tpid, int ai)
2049 struct mvpp2_prs_entry *pe;
2052 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2055 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
2057 /* Go through the all entries with MVPP2_PRS_LU_VLAN */
2058 for (tid = MVPP2_PE_FIRST_FREE_TID;
2059 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
2060 unsigned int ri_bits, ai_bits;
2063 if (!priv->prs_shadow[tid].valid ||
2064 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
2069 mvpp2_prs_hw_read(priv, pe);
2070 match = mvpp2_prs_tcam_data_cmp(pe, 0, swab16(tpid));
2075 ri_bits = mvpp2_prs_sram_ri_get(pe);
2076 ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
2078 /* Get current ai value from tcam */
2079 ai_bits = mvpp2_prs_tcam_ai_get(pe);
2080 /* Clear double vlan bit */
2081 ai_bits &= ~MVPP2_PRS_DBL_VLAN_AI_BIT;
2086 if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
2087 ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
2095 /* Add/update single/triple vlan entry */
2096 static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
2097 unsigned int port_map)
2099 struct mvpp2_prs_entry *pe;
2103 pe = mvpp2_prs_vlan_find(priv, tpid, ai);
2106 /* Create new tcam entry */
2107 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_LAST_FREE_TID,
2108 MVPP2_PE_FIRST_FREE_TID);
2112 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2116 /* Get last double vlan tid */
2117 for (tid_aux = MVPP2_PE_LAST_FREE_TID;
2118 tid_aux >= MVPP2_PE_FIRST_FREE_TID; tid_aux--) {
2119 unsigned int ri_bits;
2121 if (!priv->prs_shadow[tid_aux].valid ||
2122 priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
2125 pe->index = tid_aux;
2126 mvpp2_prs_hw_read(priv, pe);
2127 ri_bits = mvpp2_prs_sram_ri_get(pe);
2128 if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) ==
2129 MVPP2_PRS_RI_VLAN_DOUBLE)
2133 if (tid <= tid_aux) {
2138 memset(pe, 0, sizeof(*pe));
2139 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
2142 mvpp2_prs_match_etype(pe, 0, tpid);
2144 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_L2);
2145 /* Shift 4 bytes - skip 1 vlan tag */
2146 mvpp2_prs_sram_shift_set(pe, MVPP2_VLAN_TAG_LEN,
2147 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2148 /* Clear all ai bits for next iteration */
2149 mvpp2_prs_sram_ai_update(pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2151 if (ai == MVPP2_PRS_SINGLE_VLAN_AI) {
2152 mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_SINGLE,
2153 MVPP2_PRS_RI_VLAN_MASK);
2155 ai |= MVPP2_PRS_DBL_VLAN_AI_BIT;
2156 mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_TRIPLE,
2157 MVPP2_PRS_RI_VLAN_MASK);
2159 mvpp2_prs_tcam_ai_update(pe, ai, MVPP2_PRS_SRAM_AI_MASK);
2161 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_VLAN);
2163 /* Update ports' mask */
2164 mvpp2_prs_tcam_port_map_set(pe, port_map);
2166 mvpp2_prs_hw_write(priv, pe);
2173 /* Get first free double vlan ai number */
2174 static int mvpp2_prs_double_vlan_ai_free_get(struct mvpp2 *priv)
2178 for (i = 1; i < MVPP2_PRS_DBL_VLANS_MAX; i++) {
2179 if (!priv->prs_double_vlans[i])
2186 /* Search for existing double vlan entry */
2187 static struct mvpp2_prs_entry *mvpp2_prs_double_vlan_find(struct mvpp2 *priv,
2188 unsigned short tpid1,
2189 unsigned short tpid2)
2191 struct mvpp2_prs_entry *pe;
2194 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2197 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
2199 /* Go through the all entries with MVPP2_PRS_LU_VLAN */
2200 for (tid = MVPP2_PE_FIRST_FREE_TID;
2201 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
2202 unsigned int ri_mask;
2205 if (!priv->prs_shadow[tid].valid ||
2206 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
2210 mvpp2_prs_hw_read(priv, pe);
2212 match = mvpp2_prs_tcam_data_cmp(pe, 0, swab16(tpid1))
2213 && mvpp2_prs_tcam_data_cmp(pe, 4, swab16(tpid2));
2218 ri_mask = mvpp2_prs_sram_ri_get(pe) & MVPP2_PRS_RI_VLAN_MASK;
2219 if (ri_mask == MVPP2_PRS_RI_VLAN_DOUBLE)
2227 /* Add or update double vlan entry */
2228 static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
2229 unsigned short tpid2,
2230 unsigned int port_map)
2232 struct mvpp2_prs_entry *pe;
2233 int tid_aux, tid, ai, ret = 0;
2235 pe = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2);
2238 /* Create new tcam entry */
2239 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2240 MVPP2_PE_LAST_FREE_TID);
2244 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2248 /* Set ai value for new double vlan entry */
2249 ai = mvpp2_prs_double_vlan_ai_free_get(priv);
2255 /* Get first single/triple vlan tid */
2256 for (tid_aux = MVPP2_PE_FIRST_FREE_TID;
2257 tid_aux <= MVPP2_PE_LAST_FREE_TID; tid_aux++) {
2258 unsigned int ri_bits;
2260 if (!priv->prs_shadow[tid_aux].valid ||
2261 priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
2264 pe->index = tid_aux;
2265 mvpp2_prs_hw_read(priv, pe);
2266 ri_bits = mvpp2_prs_sram_ri_get(pe);
2267 ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
2268 if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
2269 ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
2273 if (tid >= tid_aux) {
2278 memset(pe, 0, sizeof(*pe));
2279 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
2282 priv->prs_double_vlans[ai] = true;
2284 mvpp2_prs_match_etype(pe, 0, tpid1);
2285 mvpp2_prs_match_etype(pe, 4, tpid2);
2287 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_VLAN);
2288 /* Shift 8 bytes - skip 2 vlan tags */
2289 mvpp2_prs_sram_shift_set(pe, 2 * MVPP2_VLAN_TAG_LEN,
2290 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2291 mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_DOUBLE,
2292 MVPP2_PRS_RI_VLAN_MASK);
2293 mvpp2_prs_sram_ai_update(pe, ai | MVPP2_PRS_DBL_VLAN_AI_BIT,
2294 MVPP2_PRS_SRAM_AI_MASK);
2296 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_VLAN);
2299 /* Update ports' mask */
2300 mvpp2_prs_tcam_port_map_set(pe, port_map);
2301 mvpp2_prs_hw_write(priv, pe);
2307 /* IPv4 header parsing for fragmentation and L4 offset */
2308 static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
2309 unsigned int ri, unsigned int ri_mask)
2311 struct mvpp2_prs_entry pe;
2314 if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
2315 (proto != IPPROTO_IGMP))
2318 /* Fragmented packet */
2319 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2320 MVPP2_PE_LAST_FREE_TID);
2324 memset(&pe, 0, sizeof(pe));
2325 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
2328 /* Set next lu to IPv4 */
2329 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2330 mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2332 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
2333 sizeof(struct iphdr) - 4,
2334 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2335 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
2336 MVPP2_PRS_IPV4_DIP_AI_BIT);
2337 mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_MASK,
2338 ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
2340 mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK);
2341 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
2342 /* Unmask all ports */
2343 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2345 /* Update shadow table and hw entry */
2346 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2347 mvpp2_prs_hw_write(priv, &pe);
2349 /* Not fragmented packet */
2350 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2351 MVPP2_PE_LAST_FREE_TID);
2356 /* Clear ri before updating */
2357 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
2358 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
2359 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
2361 mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, MVPP2_PRS_TCAM_PROTO_MASK_L);
2362 mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, MVPP2_PRS_TCAM_PROTO_MASK);
2364 /* Update shadow table and hw entry */
2365 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2366 mvpp2_prs_hw_write(priv, &pe);
2371 /* IPv4 L3 multicast or broadcast */
2372 static int mvpp2_prs_ip4_cast(struct mvpp2 *priv, unsigned short l3_cast)
2374 struct mvpp2_prs_entry pe;
2377 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2378 MVPP2_PE_LAST_FREE_TID);
2382 memset(&pe, 0, sizeof(pe));
2383 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
2387 case MVPP2_PRS_L3_MULTI_CAST:
2388 mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV4_MC,
2389 MVPP2_PRS_IPV4_MC_MASK);
2390 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
2391 MVPP2_PRS_RI_L3_ADDR_MASK);
2393 case MVPP2_PRS_L3_BROAD_CAST:
2394 mask = MVPP2_PRS_IPV4_BC_MASK;
2395 mvpp2_prs_tcam_data_byte_set(&pe, 0, mask, mask);
2396 mvpp2_prs_tcam_data_byte_set(&pe, 1, mask, mask);
2397 mvpp2_prs_tcam_data_byte_set(&pe, 2, mask, mask);
2398 mvpp2_prs_tcam_data_byte_set(&pe, 3, mask, mask);
2399 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_BCAST,
2400 MVPP2_PRS_RI_L3_ADDR_MASK);
2406 /* Finished: go to flowid generation */
2407 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2408 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2410 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
2411 MVPP2_PRS_IPV4_DIP_AI_BIT);
2412 /* Unmask all ports */
2413 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2415 /* Update shadow table and hw entry */
2416 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2417 mvpp2_prs_hw_write(priv, &pe);
2422 /* Set entries for protocols over IPv6 */
2423 static int mvpp2_prs_ip6_proto(struct mvpp2 *priv, unsigned short proto,
2424 unsigned int ri, unsigned int ri_mask)
2426 struct mvpp2_prs_entry pe;
2429 if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
2430 (proto != IPPROTO_ICMPV6) && (proto != IPPROTO_IPIP))
2433 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2434 MVPP2_PE_LAST_FREE_TID);
2438 memset(&pe, 0, sizeof(pe));
2439 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2442 /* Finished: go to flowid generation */
2443 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2444 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2445 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
2446 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
2447 sizeof(struct ipv6hdr) - 6,
2448 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2450 mvpp2_prs_tcam_data_byte_set(&pe, 0, proto, MVPP2_PRS_TCAM_PROTO_MASK);
2451 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2452 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2453 /* Unmask all ports */
2454 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2457 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
2458 mvpp2_prs_hw_write(priv, &pe);
2463 /* IPv6 L3 multicast entry */
2464 static int mvpp2_prs_ip6_cast(struct mvpp2 *priv, unsigned short l3_cast)
2466 struct mvpp2_prs_entry pe;
2469 if (l3_cast != MVPP2_PRS_L3_MULTI_CAST)
2472 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2473 MVPP2_PE_LAST_FREE_TID);
2477 memset(&pe, 0, sizeof(pe));
2478 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2481 /* Finished: go to flowid generation */
2482 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2483 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
2484 MVPP2_PRS_RI_L3_ADDR_MASK);
2485 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2486 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2487 /* Shift back to IPv6 NH */
2488 mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2490 mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV6_MC,
2491 MVPP2_PRS_IPV6_MC_MASK);
2492 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2493 /* Unmask all ports */
2494 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2496 /* Update shadow table and hw entry */
2497 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
2498 mvpp2_prs_hw_write(priv, &pe);
2503 /* Parser per-port initialization */
2504 static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first,
2505 int lu_max, int offset)
2510 val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG);
2511 val &= ~MVPP2_PRS_PORT_LU_MASK(port);
2512 val |= MVPP2_PRS_PORT_LU_VAL(port, lu_first);
2513 mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val);
2515 /* Set maximum number of loops for packet received from port */
2516 val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port));
2517 val &= ~MVPP2_PRS_MAX_LOOP_MASK(port);
2518 val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max);
2519 mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val);
2521 /* Set initial offset for packet header extraction for the first
2524 val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port));
2525 val &= ~MVPP2_PRS_INIT_OFF_MASK(port);
2526 val |= MVPP2_PRS_INIT_OFF_VAL(port, offset);
2527 mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val);
2530 /* Default flow entries initialization for all ports */
2531 static void mvpp2_prs_def_flow_init(struct mvpp2 *priv)
2533 struct mvpp2_prs_entry pe;
2536 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
2537 memset(&pe, 0, sizeof(pe));
2538 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2539 pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port;
2541 /* Mask all ports */
2542 mvpp2_prs_tcam_port_map_set(&pe, 0);
2545 mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK);
2546 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
2548 /* Update shadow table and hw entry */
2549 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS);
2550 mvpp2_prs_hw_write(priv, &pe);
2554 /* Set default entry for Marvell Header field */
2555 static void mvpp2_prs_mh_init(struct mvpp2 *priv)
2557 struct mvpp2_prs_entry pe;
2559 memset(&pe, 0, sizeof(pe));
2561 pe.index = MVPP2_PE_MH_DEFAULT;
2562 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
2563 mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
2564 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2565 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC);
2567 /* Unmask all ports */
2568 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2570 /* Update shadow table and hw entry */
2571 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH);
2572 mvpp2_prs_hw_write(priv, &pe);
2575 /* Set default entires (place holder) for promiscuous, non-promiscuous and
2576 * multicast MAC addresses
2578 static void mvpp2_prs_mac_init(struct mvpp2 *priv)
2580 struct mvpp2_prs_entry pe;
2582 memset(&pe, 0, sizeof(pe));
2584 /* Non-promiscuous mode for all ports - DROP unknown packets */
2585 pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS;
2586 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
2588 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
2589 MVPP2_PRS_RI_DROP_MASK);
2590 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2591 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2593 /* Unmask all ports */
2594 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2596 /* Update shadow table and hw entry */
2597 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
2598 mvpp2_prs_hw_write(priv, &pe);
2600 /* place holders only - no ports */
2601 mvpp2_prs_mac_drop_all_set(priv, 0, false);
2602 mvpp2_prs_mac_promisc_set(priv, 0, false);
2603 mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_ALL, 0, false);
2604 mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_IP6, 0, false);
2607 /* Set default entries for various types of dsa packets */
2608 static void mvpp2_prs_dsa_init(struct mvpp2 *priv)
2610 struct mvpp2_prs_entry pe;
2612 /* None tagged EDSA entry - place holder */
2613 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
2616 /* Tagged EDSA entry - place holder */
2617 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2619 /* None tagged DSA entry - place holder */
2620 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
2623 /* Tagged DSA entry - place holder */
2624 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2626 /* None tagged EDSA ethertype entry - place holder*/
2627 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
2628 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
2630 /* Tagged EDSA ethertype entry - place holder*/
2631 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
2632 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2634 /* None tagged DSA ethertype entry */
2635 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
2636 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
2638 /* Tagged DSA ethertype entry */
2639 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
2640 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2642 /* Set default entry, in case DSA or EDSA tag not found */
2643 memset(&pe, 0, sizeof(pe));
2644 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
2645 pe.index = MVPP2_PE_DSA_DEFAULT;
2646 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2649 mvpp2_prs_sram_shift_set(&pe, 0, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2650 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
2652 /* Clear all sram ai bits for next iteration */
2653 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2655 /* Unmask all ports */
2656 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2658 mvpp2_prs_hw_write(priv, &pe);
2661 /* Match basic ethertypes */
2662 static int mvpp2_prs_etype_init(struct mvpp2 *priv)
2664 struct mvpp2_prs_entry pe;
2667 /* Ethertype: PPPoE */
2668 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2669 MVPP2_PE_LAST_FREE_TID);
2673 memset(&pe, 0, sizeof(pe));
2674 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2677 mvpp2_prs_match_etype(&pe, 0, ETH_P_PPP_SES);
2679 mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE,
2680 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2681 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2682 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK,
2683 MVPP2_PRS_RI_PPPOE_MASK);
2685 /* Update shadow table and hw entry */
2686 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2687 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2688 priv->prs_shadow[pe.index].finish = false;
2689 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK,
2690 MVPP2_PRS_RI_PPPOE_MASK);
2691 mvpp2_prs_hw_write(priv, &pe);
2693 /* Ethertype: ARP */
2694 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2695 MVPP2_PE_LAST_FREE_TID);
2699 memset(&pe, 0, sizeof(pe));
2700 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2703 mvpp2_prs_match_etype(&pe, 0, ETH_P_ARP);
2705 /* Generate flow in the next iteration*/
2706 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2707 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2708 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP,
2709 MVPP2_PRS_RI_L3_PROTO_MASK);
2711 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2713 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2715 /* Update shadow table and hw entry */
2716 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2717 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2718 priv->prs_shadow[pe.index].finish = true;
2719 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP,
2720 MVPP2_PRS_RI_L3_PROTO_MASK);
2721 mvpp2_prs_hw_write(priv, &pe);
2723 /* Ethertype: LBTD */
2724 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2725 MVPP2_PE_LAST_FREE_TID);
2729 memset(&pe, 0, sizeof(pe));
2730 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2733 mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE);
2735 /* Generate flow in the next iteration*/
2736 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2737 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2738 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2739 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2740 MVPP2_PRS_RI_CPU_CODE_MASK |
2741 MVPP2_PRS_RI_UDF3_MASK);
2743 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2745 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2747 /* Update shadow table and hw entry */
2748 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2749 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2750 priv->prs_shadow[pe.index].finish = true;
2751 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2752 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2753 MVPP2_PRS_RI_CPU_CODE_MASK |
2754 MVPP2_PRS_RI_UDF3_MASK);
2755 mvpp2_prs_hw_write(priv, &pe);
2757 /* Ethertype: IPv4 without options */
2758 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2759 MVPP2_PE_LAST_FREE_TID);
2763 memset(&pe, 0, sizeof(pe));
2764 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2767 mvpp2_prs_match_etype(&pe, 0, ETH_P_IP);
2768 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
2769 MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
2770 MVPP2_PRS_IPV4_HEAD_MASK |
2771 MVPP2_PRS_IPV4_IHL_MASK);
2773 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2774 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
2775 MVPP2_PRS_RI_L3_PROTO_MASK);
2776 /* Skip eth_type + 4 bytes of IP header */
2777 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
2778 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2780 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2782 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2784 /* Update shadow table and hw entry */
2785 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2786 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2787 priv->prs_shadow[pe.index].finish = false;
2788 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4,
2789 MVPP2_PRS_RI_L3_PROTO_MASK);
2790 mvpp2_prs_hw_write(priv, &pe);
2792 /* Ethertype: IPv4 with options */
2793 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2794 MVPP2_PE_LAST_FREE_TID);
2800 /* Clear tcam data before updating */
2801 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0;
2802 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0;
2804 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
2805 MVPP2_PRS_IPV4_HEAD,
2806 MVPP2_PRS_IPV4_HEAD_MASK);
2808 /* Clear ri before updating */
2809 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
2810 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
2811 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
2812 MVPP2_PRS_RI_L3_PROTO_MASK);
2814 /* Update shadow table and hw entry */
2815 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2816 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2817 priv->prs_shadow[pe.index].finish = false;
2818 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4_OPT,
2819 MVPP2_PRS_RI_L3_PROTO_MASK);
2820 mvpp2_prs_hw_write(priv, &pe);
2822 /* Ethertype: IPv6 without options */
2823 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2824 MVPP2_PE_LAST_FREE_TID);
2828 memset(&pe, 0, sizeof(pe));
2829 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2832 mvpp2_prs_match_etype(&pe, 0, ETH_P_IPV6);
2834 /* Skip DIP of IPV6 header */
2835 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
2836 MVPP2_MAX_L3_ADDR_SIZE,
2837 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2838 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2839 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
2840 MVPP2_PRS_RI_L3_PROTO_MASK);
2842 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2844 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2846 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2847 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2848 priv->prs_shadow[pe.index].finish = false;
2849 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6,
2850 MVPP2_PRS_RI_L3_PROTO_MASK);
2851 mvpp2_prs_hw_write(priv, &pe);
2853 /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */
2854 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2855 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2856 pe.index = MVPP2_PE_ETH_TYPE_UN;
2858 /* Unmask all ports */
2859 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2861 /* Generate flow in the next iteration*/
2862 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2863 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2864 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
2865 MVPP2_PRS_RI_L3_PROTO_MASK);
2866 /* Set L3 offset even it's unknown L3 */
2867 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2869 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2871 /* Update shadow table and hw entry */
2872 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2873 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2874 priv->prs_shadow[pe.index].finish = true;
2875 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN,
2876 MVPP2_PRS_RI_L3_PROTO_MASK);
2877 mvpp2_prs_hw_write(priv, &pe);
2882 /* Configure vlan entries and detect up to 2 successive VLAN tags.
2889 static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv)
2891 struct mvpp2_prs_entry pe;
2894 priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool),
2895 MVPP2_PRS_DBL_VLANS_MAX,
2897 if (!priv->prs_double_vlans)
2900 /* Double VLAN: 0x8100, 0x88A8 */
2901 err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021AD,
2902 MVPP2_PRS_PORT_MASK);
2906 /* Double VLAN: 0x8100, 0x8100 */
2907 err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021Q,
2908 MVPP2_PRS_PORT_MASK);
2912 /* Single VLAN: 0x88a8 */
2913 err = mvpp2_prs_vlan_add(priv, ETH_P_8021AD, MVPP2_PRS_SINGLE_VLAN_AI,
2914 MVPP2_PRS_PORT_MASK);
2918 /* Single VLAN: 0x8100 */
2919 err = mvpp2_prs_vlan_add(priv, ETH_P_8021Q, MVPP2_PRS_SINGLE_VLAN_AI,
2920 MVPP2_PRS_PORT_MASK);
2924 /* Set default double vlan entry */
2925 memset(&pe, 0, sizeof(pe));
2926 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2927 pe.index = MVPP2_PE_VLAN_DBL;
2929 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2930 /* Clear ai for next iterations */
2931 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2932 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE,
2933 MVPP2_PRS_RI_VLAN_MASK);
2935 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_DBL_VLAN_AI_BIT,
2936 MVPP2_PRS_DBL_VLAN_AI_BIT);
2937 /* Unmask all ports */
2938 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2940 /* Update shadow table and hw entry */
2941 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
2942 mvpp2_prs_hw_write(priv, &pe);
2944 /* Set default vlan none entry */
2945 memset(&pe, 0, sizeof(pe));
2946 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2947 pe.index = MVPP2_PE_VLAN_NONE;
2949 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2950 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
2951 MVPP2_PRS_RI_VLAN_MASK);
2953 /* Unmask all ports */
2954 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2956 /* Update shadow table and hw entry */
2957 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
2958 mvpp2_prs_hw_write(priv, &pe);
2963 /* Set entries for PPPoE ethertype */
2964 static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
2966 struct mvpp2_prs_entry pe;
2969 /* IPv4 over PPPoE with options */
2970 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2971 MVPP2_PE_LAST_FREE_TID);
2975 memset(&pe, 0, sizeof(pe));
2976 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2979 mvpp2_prs_match_etype(&pe, 0, PPP_IP);
2981 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2982 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
2983 MVPP2_PRS_RI_L3_PROTO_MASK);
2984 /* Skip eth_type + 4 bytes of IP header */
2985 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
2986 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2988 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2990 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2992 /* Update shadow table and hw entry */
2993 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
2994 mvpp2_prs_hw_write(priv, &pe);
2996 /* IPv4 over PPPoE without options */
2997 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2998 MVPP2_PE_LAST_FREE_TID);
3004 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
3005 MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
3006 MVPP2_PRS_IPV4_HEAD_MASK |
3007 MVPP2_PRS_IPV4_IHL_MASK);
3009 /* Clear ri before updating */
3010 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
3011 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
3012 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
3013 MVPP2_PRS_RI_L3_PROTO_MASK);
3015 /* Update shadow table and hw entry */
3016 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
3017 mvpp2_prs_hw_write(priv, &pe);
3019 /* IPv6 over PPPoE */
3020 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
3021 MVPP2_PE_LAST_FREE_TID);
3025 memset(&pe, 0, sizeof(pe));
3026 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
3029 mvpp2_prs_match_etype(&pe, 0, PPP_IPV6);
3031 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
3032 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
3033 MVPP2_PRS_RI_L3_PROTO_MASK);
3034 /* Skip eth_type + 4 bytes of IPv6 header */
3035 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
3036 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3038 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
3040 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
3042 /* Update shadow table and hw entry */
3043 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
3044 mvpp2_prs_hw_write(priv, &pe);
3046 /* Non-IP over PPPoE */
3047 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
3048 MVPP2_PE_LAST_FREE_TID);
3052 memset(&pe, 0, sizeof(pe));
3053 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
3056 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
3057 MVPP2_PRS_RI_L3_PROTO_MASK);
3059 /* Finished: go to flowid generation */
3060 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3061 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3062 /* Set L3 offset even if it's unknown L3 */
3063 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
3065 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
3067 /* Update shadow table and hw entry */
3068 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
3069 mvpp2_prs_hw_write(priv, &pe);
3074 /* Initialize entries for IPv4 */
3075 static int mvpp2_prs_ip4_init(struct mvpp2 *priv)
3077 struct mvpp2_prs_entry pe;
3080 /* Set entries for TCP, UDP and IGMP over IPv4 */
3081 err = mvpp2_prs_ip4_proto(priv, IPPROTO_TCP, MVPP2_PRS_RI_L4_TCP,
3082 MVPP2_PRS_RI_L4_PROTO_MASK);
3086 err = mvpp2_prs_ip4_proto(priv, IPPROTO_UDP, MVPP2_PRS_RI_L4_UDP,
3087 MVPP2_PRS_RI_L4_PROTO_MASK);
3091 err = mvpp2_prs_ip4_proto(priv, IPPROTO_IGMP,
3092 MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
3093 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
3094 MVPP2_PRS_RI_CPU_CODE_MASK |
3095 MVPP2_PRS_RI_UDF3_MASK);
3099 /* IPv4 Broadcast */
3100 err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_BROAD_CAST);
3104 /* IPv4 Multicast */
3105 err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
3109 /* Default IPv4 entry for unknown protocols */
3110 memset(&pe, 0, sizeof(pe));
3111 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
3112 pe.index = MVPP2_PE_IP4_PROTO_UN;
3114 /* Set next lu to IPv4 */
3115 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
3116 mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3118 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
3119 sizeof(struct iphdr) - 4,
3120 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
3121 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
3122 MVPP2_PRS_IPV4_DIP_AI_BIT);
3123 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
3124 MVPP2_PRS_RI_L4_PROTO_MASK);
3126 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
3127 /* Unmask all ports */
3128 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3130 /* Update shadow table and hw entry */
3131 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
3132 mvpp2_prs_hw_write(priv, &pe);
3134 /* Default IPv4 entry for unicast address */
3135 memset(&pe, 0, sizeof(pe));
3136 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
3137 pe.index = MVPP2_PE_IP4_ADDR_UN;
3139 /* Finished: go to flowid generation */
3140 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3141 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3142 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
3143 MVPP2_PRS_RI_L3_ADDR_MASK);
3145 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
3146 MVPP2_PRS_IPV4_DIP_AI_BIT);
3147 /* Unmask all ports */
3148 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3150 /* Update shadow table and hw entry */
3151 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
3152 mvpp2_prs_hw_write(priv, &pe);
3157 /* Initialize entries for IPv6 */
3158 static int mvpp2_prs_ip6_init(struct mvpp2 *priv)
3160 struct mvpp2_prs_entry pe;
3163 /* Set entries for TCP, UDP and ICMP over IPv6 */
3164 err = mvpp2_prs_ip6_proto(priv, IPPROTO_TCP,
3165 MVPP2_PRS_RI_L4_TCP,
3166 MVPP2_PRS_RI_L4_PROTO_MASK);
3170 err = mvpp2_prs_ip6_proto(priv, IPPROTO_UDP,
3171 MVPP2_PRS_RI_L4_UDP,
3172 MVPP2_PRS_RI_L4_PROTO_MASK);
3176 err = mvpp2_prs_ip6_proto(priv, IPPROTO_ICMPV6,
3177 MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
3178 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
3179 MVPP2_PRS_RI_CPU_CODE_MASK |
3180 MVPP2_PRS_RI_UDF3_MASK);
3184 /* IPv4 is the last header. This is similar case as 6-TCP or 17-UDP */
3185 /* Result Info: UDF7=1, DS lite */
3186 err = mvpp2_prs_ip6_proto(priv, IPPROTO_IPIP,
3187 MVPP2_PRS_RI_UDF7_IP6_LITE,
3188 MVPP2_PRS_RI_UDF7_MASK);
3192 /* IPv6 multicast */
3193 err = mvpp2_prs_ip6_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
3197 /* Entry for checking hop limit */
3198 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
3199 MVPP2_PE_LAST_FREE_TID);
3203 memset(&pe, 0, sizeof(pe));
3204 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
3207 /* Finished: go to flowid generation */
3208 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3209 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3210 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN |
3211 MVPP2_PRS_RI_DROP_MASK,
3212 MVPP2_PRS_RI_L3_PROTO_MASK |
3213 MVPP2_PRS_RI_DROP_MASK);
3215 mvpp2_prs_tcam_data_byte_set(&pe, 1, 0x00, MVPP2_PRS_IPV6_HOP_MASK);
3216 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
3217 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
3219 /* Update shadow table and hw entry */
3220 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
3221 mvpp2_prs_hw_write(priv, &pe);
3223 /* Default IPv6 entry for unknown protocols */
3224 memset(&pe, 0, sizeof(pe));
3225 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
3226 pe.index = MVPP2_PE_IP6_PROTO_UN;
3228 /* Finished: go to flowid generation */
3229 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3230 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3231 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
3232 MVPP2_PRS_RI_L4_PROTO_MASK);
3233 /* Set L4 offset relatively to our current place */
3234 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
3235 sizeof(struct ipv6hdr) - 4,
3236 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
3238 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
3239 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
3240 /* Unmask all ports */
3241 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3243 /* Update shadow table and hw entry */
3244 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
3245 mvpp2_prs_hw_write(priv, &pe);
3247 /* Default IPv6 entry for unknown ext protocols */
3248 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
3249 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
3250 pe.index = MVPP2_PE_IP6_EXT_PROTO_UN;
3252 /* Finished: go to flowid generation */
3253 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3254 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3255 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
3256 MVPP2_PRS_RI_L4_PROTO_MASK);
3258 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_EXT_AI_BIT,
3259 MVPP2_PRS_IPV6_EXT_AI_BIT);
3260 /* Unmask all ports */
3261 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3263 /* Update shadow table and hw entry */
3264 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
3265 mvpp2_prs_hw_write(priv, &pe);
3267 /* Default IPv6 entry for unicast address */
3268 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
3269 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
3270 pe.index = MVPP2_PE_IP6_ADDR_UN;
3272 /* Finished: go to IPv6 again */
3273 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
3274 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
3275 MVPP2_PRS_RI_L3_ADDR_MASK);
3276 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
3277 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
3278 /* Shift back to IPV6 NH */
3279 mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3281 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
3282 /* Unmask all ports */
3283 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3285 /* Update shadow table and hw entry */
3286 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
3287 mvpp2_prs_hw_write(priv, &pe);
3292 /* Parser default initialization */
3293 static int mvpp2_prs_default_init(struct platform_device *pdev,
3298 /* Enable tcam table */
3299 mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK);
3301 /* Clear all tcam and sram entries */
3302 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) {
3303 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
3304 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
3305 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0);
3307 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index);
3308 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
3309 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0);
3312 /* Invalidate all tcam entries */
3313 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++)
3314 mvpp2_prs_hw_inv(priv, index);
3316 priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE,
3317 sizeof(*priv->prs_shadow),
3319 if (!priv->prs_shadow)
3322 /* Always start from lookup = 0 */
3323 for (index = 0; index < MVPP2_MAX_PORTS; index++)
3324 mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH,
3325 MVPP2_PRS_PORT_LU_MAX, 0);
3327 mvpp2_prs_def_flow_init(priv);
3329 mvpp2_prs_mh_init(priv);
3331 mvpp2_prs_mac_init(priv);
3333 mvpp2_prs_dsa_init(priv);
3335 err = mvpp2_prs_etype_init(priv);
3339 err = mvpp2_prs_vlan_init(pdev, priv);
3343 err = mvpp2_prs_pppoe_init(priv);
3347 err = mvpp2_prs_ip6_init(priv);
3351 err = mvpp2_prs_ip4_init(priv);
3358 /* Compare MAC DA with tcam entry data */
3359 static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe,
3360 const u8 *da, unsigned char *mask)
3362 unsigned char tcam_byte, tcam_mask;
3365 for (index = 0; index < ETH_ALEN; index++) {
3366 mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask);
3367 if (tcam_mask != mask[index])
3370 if ((tcam_mask & tcam_byte) != (da[index] & mask[index]))
3377 /* Find tcam entry with matched pair <MAC DA, port> */
3378 static struct mvpp2_prs_entry *
3379 mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da,
3380 unsigned char *mask, int udf_type)
3382 struct mvpp2_prs_entry *pe;
3385 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
3388 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
3390 /* Go through the all entires with MVPP2_PRS_LU_MAC */
3391 for (tid = MVPP2_PE_FIRST_FREE_TID;
3392 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
3393 unsigned int entry_pmap;
3395 if (!priv->prs_shadow[tid].valid ||
3396 (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
3397 (priv->prs_shadow[tid].udf != udf_type))
3401 mvpp2_prs_hw_read(priv, pe);
3402 entry_pmap = mvpp2_prs_tcam_port_map_get(pe);
3404 if (mvpp2_prs_mac_range_equals(pe, da, mask) &&
3413 /* Update parser's mac da entry */
3414 static int mvpp2_prs_mac_da_accept(struct mvpp2 *priv, int port,
3415 const u8 *da, bool add)
3417 struct mvpp2_prs_entry *pe;
3418 unsigned int pmap, len, ri;
3419 unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
3422 /* Scan TCAM and see if entry with this <MAC DA, port> already exist */
3423 pe = mvpp2_prs_mac_da_range_find(priv, (1 << port), da, mask,
3424 MVPP2_PRS_UDF_MAC_DEF);
3431 /* Create new TCAM entry */
3432 /* Find first range mac entry*/
3433 for (tid = MVPP2_PE_FIRST_FREE_TID;
3434 tid <= MVPP2_PE_LAST_FREE_TID; tid++)
3435 if (priv->prs_shadow[tid].valid &&
3436 (priv->prs_shadow[tid].lu == MVPP2_PRS_LU_MAC) &&
3437 (priv->prs_shadow[tid].udf ==
3438 MVPP2_PRS_UDF_MAC_RANGE))
3441 /* Go through the all entries from first to last */
3442 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
3447 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
3450 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
3453 /* Mask all ports */
3454 mvpp2_prs_tcam_port_map_set(pe, 0);
3457 /* Update port mask */
3458 mvpp2_prs_tcam_port_set(pe, port, add);
3460 /* Invalidate the entry if no ports are left enabled */
3461 pmap = mvpp2_prs_tcam_port_map_get(pe);
3467 mvpp2_prs_hw_inv(priv, pe->index);
3468 priv->prs_shadow[pe->index].valid = false;
3473 /* Continue - set next lookup */
3474 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_DSA);
3476 /* Set match on DA */
3479 mvpp2_prs_tcam_data_byte_set(pe, len, da[len], 0xff);
3481 /* Set result info bits */
3482 if (is_broadcast_ether_addr(da))
3483 ri = MVPP2_PRS_RI_L2_BCAST;
3484 else if (is_multicast_ether_addr(da))
3485 ri = MVPP2_PRS_RI_L2_MCAST;
3487 ri = MVPP2_PRS_RI_L2_UCAST | MVPP2_PRS_RI_MAC_ME_MASK;
3489 mvpp2_prs_sram_ri_update(pe, ri, MVPP2_PRS_RI_L2_CAST_MASK |
3490 MVPP2_PRS_RI_MAC_ME_MASK);
3491 mvpp2_prs_shadow_ri_set(priv, pe->index, ri, MVPP2_PRS_RI_L2_CAST_MASK |
3492 MVPP2_PRS_RI_MAC_ME_MASK);
3494 /* Shift to ethertype */
3495 mvpp2_prs_sram_shift_set(pe, 2 * ETH_ALEN,
3496 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3498 /* Update shadow table and hw entry */
3499 priv->prs_shadow[pe->index].udf = MVPP2_PRS_UDF_MAC_DEF;
3500 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_MAC);
3501 mvpp2_prs_hw_write(priv, pe);
3508 static int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da)
3510 struct mvpp2_port *port = netdev_priv(dev);
3513 /* Remove old parser entry */
3514 err = mvpp2_prs_mac_da_accept(port->priv, port->id, dev->dev_addr,
3519 /* Add new parser entry */
3520 err = mvpp2_prs_mac_da_accept(port->priv, port->id, da, true);
3524 /* Set addr in the device */
3525 ether_addr_copy(dev->dev_addr, da);
3530 /* Delete all port's multicast simple (not range) entries */
3531 static void mvpp2_prs_mcast_del_all(struct mvpp2 *priv, int port)
3533 struct mvpp2_prs_entry pe;
3536 for (tid = MVPP2_PE_FIRST_FREE_TID;
3537 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
3538 unsigned char da[ETH_ALEN], da_mask[ETH_ALEN];
3540 if (!priv->prs_shadow[tid].valid ||
3541 (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
3542 (priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF))
3545 /* Only simple mac entries */
3547 mvpp2_prs_hw_read(priv, &pe);
3549 /* Read mac addr from entry */
3550 for (index = 0; index < ETH_ALEN; index++)
3551 mvpp2_prs_tcam_data_byte_get(&pe, index, &da[index],
3554 if (is_multicast_ether_addr(da) && !is_broadcast_ether_addr(da))
3555 /* Delete this entry */
3556 mvpp2_prs_mac_da_accept(priv, port, da, false);
3560 static int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type)
3563 case MVPP2_TAG_TYPE_EDSA:
3564 /* Add port to EDSA entries */
3565 mvpp2_prs_dsa_tag_set(priv, port, true,
3566 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
3567 mvpp2_prs_dsa_tag_set(priv, port, true,
3568 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
3569 /* Remove port from DSA entries */
3570 mvpp2_prs_dsa_tag_set(priv, port, false,
3571 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
3572 mvpp2_prs_dsa_tag_set(priv, port, false,
3573 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
3576 case MVPP2_TAG_TYPE_DSA:
3577 /* Add port to DSA entries */
3578 mvpp2_prs_dsa_tag_set(priv, port, true,
3579 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
3580 mvpp2_prs_dsa_tag_set(priv, port, true,
3581 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
3582 /* Remove port from EDSA entries */
3583 mvpp2_prs_dsa_tag_set(priv, port, false,
3584 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
3585 mvpp2_prs_dsa_tag_set(priv, port, false,
3586 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
3589 case MVPP2_TAG_TYPE_MH:
3590 case MVPP2_TAG_TYPE_NONE:
3591 /* Remove port form EDSA and DSA entries */
3592 mvpp2_prs_dsa_tag_set(priv, port, false,
3593 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
3594 mvpp2_prs_dsa_tag_set(priv, port, false,
3595 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
3596 mvpp2_prs_dsa_tag_set(priv, port, false,
3597 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
3598 mvpp2_prs_dsa_tag_set(priv, port, false,
3599 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
3603 if ((type < 0) || (type > MVPP2_TAG_TYPE_EDSA))
3610 /* Set prs flow for the port */
3611 static int mvpp2_prs_def_flow(struct mvpp2_port *port)
3613 struct mvpp2_prs_entry *pe;
3616 pe = mvpp2_prs_flow_find(port->priv, port->id);
3618 /* Such entry not exist */
3620 /* Go through the all entires from last to first */
3621 tid = mvpp2_prs_tcam_first_free(port->priv,
3622 MVPP2_PE_LAST_FREE_TID,
3623 MVPP2_PE_FIRST_FREE_TID);
3627 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
3631 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
3635 mvpp2_prs_sram_ai_update(pe, port->id, MVPP2_PRS_FLOW_ID_MASK);
3636 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
3638 /* Update shadow table */
3639 mvpp2_prs_shadow_set(port->priv, pe->index, MVPP2_PRS_LU_FLOWS);
3642 mvpp2_prs_tcam_port_map_set(pe, (1 << port->id));
3643 mvpp2_prs_hw_write(port->priv, pe);
3649 /* Classifier configuration routines */
3651 /* Update classification flow table registers */
3652 static void mvpp2_cls_flow_write(struct mvpp2 *priv,
3653 struct mvpp2_cls_flow_entry *fe)
3655 mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index);
3656 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]);
3657 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]);
3658 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]);
3661 /* Update classification lookup table register */
3662 static void mvpp2_cls_lookup_write(struct mvpp2 *priv,
3663 struct mvpp2_cls_lookup_entry *le)
3667 val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid;
3668 mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val);
3669 mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data);
3672 /* Classifier default initialization */
3673 static void mvpp2_cls_init(struct mvpp2 *priv)
3675 struct mvpp2_cls_lookup_entry le;
3676 struct mvpp2_cls_flow_entry fe;
3679 /* Enable classifier */
3680 mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK);
3682 /* Clear classifier flow table */
3683 memset(&fe.data, 0, sizeof(fe.data));
3684 for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) {
3686 mvpp2_cls_flow_write(priv, &fe);
3689 /* Clear classifier lookup table */
3691 for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) {
3694 mvpp2_cls_lookup_write(priv, &le);
3697 mvpp2_cls_lookup_write(priv, &le);
3701 static void mvpp2_cls_port_config(struct mvpp2_port *port)
3703 struct mvpp2_cls_lookup_entry le;
3706 /* Set way for the port */
3707 val = mvpp2_read(port->priv, MVPP2_CLS_PORT_WAY_REG);
3708 val &= ~MVPP2_CLS_PORT_WAY_MASK(port->id);
3709 mvpp2_write(port->priv, MVPP2_CLS_PORT_WAY_REG, val);
3711 /* Pick the entry to be accessed in lookup ID decoding table
3712 * according to the way and lkpid.
3714 le.lkpid = port->id;
3718 /* Set initial CPU queue for receiving packets */
3719 le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK;
3720 le.data |= port->first_rxq;
3722 /* Disable classification engines */
3723 le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
3725 /* Update lookup ID table entry */
3726 mvpp2_cls_lookup_write(port->priv, &le);
3729 /* Set CPU queue number for oversize packets */
3730 static void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port)
3734 mvpp2_write(port->priv, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->id),
3735 port->first_rxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK);
3737 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_P2HQ_REG(port->id),
3738 (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS));
3740 val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG);
3741 val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id);
3742 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val);
3745 static void *mvpp2_frag_alloc(const struct mvpp2_bm_pool *pool)
3747 if (likely(pool->frag_size <= PAGE_SIZE))
3748 return netdev_alloc_frag(pool->frag_size);
3750 return kmalloc(pool->frag_size, GFP_ATOMIC);
3753 static void mvpp2_frag_free(const struct mvpp2_bm_pool *pool, void *data)
3755 if (likely(pool->frag_size <= PAGE_SIZE))
3756 skb_free_frag(data);
3761 /* Buffer Manager configuration routines */
3764 static int mvpp2_bm_pool_create(struct platform_device *pdev,
3766 struct mvpp2_bm_pool *bm_pool, int size)
3770 /* Number of buffer pointers must be a multiple of 16, as per
3771 * hardware constraints
3773 if (!IS_ALIGNED(size, 16))
3776 /* PPv2.1 needs 8 bytes per buffer pointer, PPv2.2 needs 16
3777 * bytes per buffer pointer
3779 if (priv->hw_version == MVPP21)
3780 bm_pool->size_bytes = 2 * sizeof(u32) * size;
3782 bm_pool->size_bytes = 2 * sizeof(u64) * size;
3784 bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, bm_pool->size_bytes,
3787 if (!bm_pool->virt_addr)
3790 if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr,
3791 MVPP2_BM_POOL_PTR_ALIGN)) {
3792 dma_free_coherent(&pdev->dev, bm_pool->size_bytes,
3793 bm_pool->virt_addr, bm_pool->dma_addr);
3794 dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n",
3795 bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN);
3799 mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id),
3800 lower_32_bits(bm_pool->dma_addr));
3801 mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size);
3803 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
3804 val |= MVPP2_BM_START_MASK;
3805 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
3807 bm_pool->type = MVPP2_BM_FREE;
3808 bm_pool->size = size;
3809 bm_pool->pkt_size = 0;
3810 bm_pool->buf_num = 0;
3815 /* Set pool buffer size */
3816 static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv,
3817 struct mvpp2_bm_pool *bm_pool,
3822 bm_pool->buf_size = buf_size;
3824 val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET);
3825 mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val);
3828 static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv,
3829 struct mvpp2_bm_pool *bm_pool,
3830 dma_addr_t *dma_addr,
3831 phys_addr_t *phys_addr)
3833 int cpu = get_cpu();
3835 *dma_addr = mvpp2_percpu_read(priv, cpu,
3836 MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
3837 *phys_addr = mvpp2_percpu_read(priv, cpu, MVPP2_BM_VIRT_ALLOC_REG);
3839 if (priv->hw_version == MVPP22) {
3841 u32 dma_addr_highbits, phys_addr_highbits;
3843 val = mvpp2_percpu_read(priv, cpu, MVPP22_BM_ADDR_HIGH_ALLOC);
3844 dma_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_PHYS_MASK);
3845 phys_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_VIRT_MASK) >>
3846 MVPP22_BM_ADDR_HIGH_VIRT_SHIFT;
3848 if (sizeof(dma_addr_t) == 8)
3849 *dma_addr |= (u64)dma_addr_highbits << 32;
3851 if (sizeof(phys_addr_t) == 8)
3852 *phys_addr |= (u64)phys_addr_highbits << 32;
3858 /* Free all buffers from the pool */
3859 static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv,
3860 struct mvpp2_bm_pool *bm_pool)
3864 for (i = 0; i < bm_pool->buf_num; i++) {
3865 dma_addr_t buf_dma_addr;
3866 phys_addr_t buf_phys_addr;
3869 mvpp2_bm_bufs_get_addrs(dev, priv, bm_pool,
3870 &buf_dma_addr, &buf_phys_addr);
3872 dma_unmap_single(dev, buf_dma_addr,
3873 bm_pool->buf_size, DMA_FROM_DEVICE);
3875 data = (void *)phys_to_virt(buf_phys_addr);
3879 mvpp2_frag_free(bm_pool, data);
3882 /* Update BM driver with number of buffers removed from pool */
3883 bm_pool->buf_num -= i;
3887 static int mvpp2_bm_pool_destroy(struct platform_device *pdev,
3889 struct mvpp2_bm_pool *bm_pool)
3893 mvpp2_bm_bufs_free(&pdev->dev, priv, bm_pool);
3894 if (bm_pool->buf_num) {
3895 WARN(1, "cannot free all buffers in pool %d\n", bm_pool->id);
3899 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
3900 val |= MVPP2_BM_STOP_MASK;
3901 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
3903 dma_free_coherent(&pdev->dev, bm_pool->size_bytes,
3909 static int mvpp2_bm_pools_init(struct platform_device *pdev,
3913 struct mvpp2_bm_pool *bm_pool;
3915 /* Create all pools with maximum size */
3916 size = MVPP2_BM_POOL_SIZE_MAX;
3917 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
3918 bm_pool = &priv->bm_pools[i];
3920 err = mvpp2_bm_pool_create(pdev, priv, bm_pool, size);
3922 goto err_unroll_pools;
3923 mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0);
3928 dev_err(&pdev->dev, "failed to create BM pool %d, size %d\n", i, size);
3929 for (i = i - 1; i >= 0; i--)
3930 mvpp2_bm_pool_destroy(pdev, priv, &priv->bm_pools[i]);
3934 static int mvpp2_bm_init(struct platform_device *pdev, struct mvpp2 *priv)
3938 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
3939 /* Mask BM all interrupts */
3940 mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0);
3941 /* Clear BM cause register */
3942 mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0);
3945 /* Allocate and initialize BM pools */
3946 priv->bm_pools = devm_kcalloc(&pdev->dev, MVPP2_BM_POOLS_NUM,
3947 sizeof(*priv->bm_pools), GFP_KERNEL);
3948 if (!priv->bm_pools)
3951 err = mvpp2_bm_pools_init(pdev, priv);
3957 /* Attach long pool to rxq */
3958 static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port,
3959 int lrxq, int long_pool)
3964 /* Get queue physical ID */
3965 prxq = port->rxqs[lrxq]->id;
3967 if (port->priv->hw_version == MVPP21)
3968 mask = MVPP21_RXQ_POOL_LONG_MASK;
3970 mask = MVPP22_RXQ_POOL_LONG_MASK;
3972 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
3974 val |= (long_pool << MVPP2_RXQ_POOL_LONG_OFFS) & mask;
3975 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
3978 /* Attach short pool to rxq */
3979 static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port,
3980 int lrxq, int short_pool)
3985 /* Get queue physical ID */
3986 prxq = port->rxqs[lrxq]->id;
3988 if (port->priv->hw_version == MVPP21)
3989 mask = MVPP21_RXQ_POOL_SHORT_MASK;
3991 mask = MVPP22_RXQ_POOL_SHORT_MASK;
3993 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
3995 val |= (short_pool << MVPP2_RXQ_POOL_SHORT_OFFS) & mask;
3996 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
3999 static void *mvpp2_buf_alloc(struct mvpp2_port *port,
4000 struct mvpp2_bm_pool *bm_pool,
4001 dma_addr_t *buf_dma_addr,
4002 phys_addr_t *buf_phys_addr,
4005 dma_addr_t dma_addr;
4008 data = mvpp2_frag_alloc(bm_pool);
4012 dma_addr = dma_map_single(port->dev->dev.parent, data,
4013 MVPP2_RX_BUF_SIZE(bm_pool->pkt_size),
4015 if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) {
4016 mvpp2_frag_free(bm_pool, data);
4019 *buf_dma_addr = dma_addr;
4020 *buf_phys_addr = virt_to_phys(data);
4025 /* Release buffer to BM */
4026 static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
4027 dma_addr_t buf_dma_addr,
4028 phys_addr_t buf_phys_addr)
4030 int cpu = get_cpu();
4032 if (port->priv->hw_version == MVPP22) {
4035 if (sizeof(dma_addr_t) == 8)
4036 val |= upper_32_bits(buf_dma_addr) &
4037 MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK;
4039 if (sizeof(phys_addr_t) == 8)
4040 val |= (upper_32_bits(buf_phys_addr)
4041 << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) &
4042 MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK;
4044 mvpp2_percpu_write(port->priv, cpu,
4045 MVPP22_BM_ADDR_HIGH_RLS_REG, val);
4048 /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply
4049 * returned in the "cookie" field of the RX
4050 * descriptor. Instead of storing the virtual address, we
4051 * store the physical address
4053 mvpp2_percpu_write(port->priv, cpu,
4054 MVPP2_BM_VIRT_RLS_REG, buf_phys_addr);
4055 mvpp2_percpu_write(port->priv, cpu,
4056 MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr);
4061 /* Allocate buffers for the pool */
4062 static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
4063 struct mvpp2_bm_pool *bm_pool, int buf_num)
4065 int i, buf_size, total_size;
4066 dma_addr_t dma_addr;
4067 phys_addr_t phys_addr;
4070 buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size);
4071 total_size = MVPP2_RX_TOTAL_SIZE(buf_size);
4074 (buf_num + bm_pool->buf_num > bm_pool->size)) {
4075 netdev_err(port->dev,
4076 "cannot allocate %d buffers for pool %d\n",
4077 buf_num, bm_pool->id);
4081 for (i = 0; i < buf_num; i++) {
4082 buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr,
4083 &phys_addr, GFP_KERNEL);
4087 mvpp2_bm_pool_put(port, bm_pool->id, dma_addr,
4091 /* Update BM driver with number of buffers added to pool */
4092 bm_pool->buf_num += i;
4094 netdev_dbg(port->dev,
4095 "%s pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n",
4096 bm_pool->type == MVPP2_BM_SWF_SHORT ? "short" : " long",
4097 bm_pool->id, bm_pool->pkt_size, buf_size, total_size);
4099 netdev_dbg(port->dev,
4100 "%s pool %d: %d of %d buffers added\n",
4101 bm_pool->type == MVPP2_BM_SWF_SHORT ? "short" : " long",
4102 bm_pool->id, i, buf_num);
4106 /* Notify the driver that BM pool is being used as specific type and return the
4107 * pool pointer on success
4109 static struct mvpp2_bm_pool *
4110 mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
4113 struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
4116 if (new_pool->type != MVPP2_BM_FREE && new_pool->type != type) {
4117 netdev_err(port->dev, "mixing pool types is forbidden\n");
4121 if (new_pool->type == MVPP2_BM_FREE)
4122 new_pool->type = type;
4124 /* Allocate buffers in case BM pool is used as long pool, but packet
4125 * size doesn't match MTU or BM pool hasn't being used yet
4127 if (((type == MVPP2_BM_SWF_LONG) && (pkt_size > new_pool->pkt_size)) ||
4128 (new_pool->pkt_size == 0)) {
4131 /* Set default buffer number or free all the buffers in case
4132 * the pool is not empty
4134 pkts_num = new_pool->buf_num;
4136 pkts_num = type == MVPP2_BM_SWF_LONG ?
4137 MVPP2_BM_LONG_BUF_NUM :
4138 MVPP2_BM_SHORT_BUF_NUM;
4140 mvpp2_bm_bufs_free(port->dev->dev.parent,
4141 port->priv, new_pool);
4143 new_pool->pkt_size = pkt_size;
4144 new_pool->frag_size =
4145 SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) +
4146 MVPP2_SKB_SHINFO_SIZE;
4148 /* Allocate buffers for this pool */
4149 num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
4150 if (num != pkts_num) {
4151 WARN(1, "pool %d: %d of %d allocated\n",
4152 new_pool->id, num, pkts_num);
4157 mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
4158 MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
4163 /* Initialize pools for swf */
4164 static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
4168 if (!port->pool_long) {
4170 mvpp2_bm_pool_use(port, MVPP2_BM_SWF_LONG_POOL(port->id),
4173 if (!port->pool_long)
4176 port->pool_long->port_map |= (1 << port->id);
4178 for (rxq = 0; rxq < port->nrxqs; rxq++)
4179 mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
4182 if (!port->pool_short) {
4184 mvpp2_bm_pool_use(port, MVPP2_BM_SWF_SHORT_POOL,
4186 MVPP2_BM_SHORT_PKT_SIZE);
4187 if (!port->pool_short)
4190 port->pool_short->port_map |= (1 << port->id);
4192 for (rxq = 0; rxq < port->nrxqs; rxq++)
4193 mvpp2_rxq_short_pool_set(port, rxq,
4194 port->pool_short->id);
4200 static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
4202 struct mvpp2_port *port = netdev_priv(dev);
4203 struct mvpp2_bm_pool *port_pool = port->pool_long;
4204 int num, pkts_num = port_pool->buf_num;
4205 int pkt_size = MVPP2_RX_PKT_SIZE(mtu);
4207 /* Update BM pool with new buffer size */
4208 mvpp2_bm_bufs_free(dev->dev.parent, port->priv, port_pool);
4209 if (port_pool->buf_num) {
4210 WARN(1, "cannot free all buffers in pool %d\n", port_pool->id);
4214 port_pool->pkt_size = pkt_size;
4215 port_pool->frag_size = SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) +
4216 MVPP2_SKB_SHINFO_SIZE;
4217 num = mvpp2_bm_bufs_add(port, port_pool, pkts_num);
4218 if (num != pkts_num) {
4219 WARN(1, "pool %d: %d of %d allocated\n",
4220 port_pool->id, num, pkts_num);
4224 mvpp2_bm_pool_bufsize_set(port->priv, port_pool,
4225 MVPP2_RX_BUF_SIZE(port_pool->pkt_size));
4227 netdev_update_features(dev);
4231 static inline void mvpp2_interrupts_enable(struct mvpp2_port *port)
4233 int i, sw_thread_mask = 0;
4235 for (i = 0; i < port->nqvecs; i++)
4236 sw_thread_mask |= port->qvecs[i].sw_thread_mask;
4238 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
4239 MVPP2_ISR_ENABLE_INTERRUPT(sw_thread_mask));
4242 static inline void mvpp2_interrupts_disable(struct mvpp2_port *port)
4244 int i, sw_thread_mask = 0;
4246 for (i = 0; i < port->nqvecs; i++)
4247 sw_thread_mask |= port->qvecs[i].sw_thread_mask;
4249 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
4250 MVPP2_ISR_DISABLE_INTERRUPT(sw_thread_mask));
4253 static inline void mvpp2_qvec_interrupt_enable(struct mvpp2_queue_vector *qvec)
4255 struct mvpp2_port *port = qvec->port;
4257 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
4258 MVPP2_ISR_ENABLE_INTERRUPT(qvec->sw_thread_mask));
4261 static inline void mvpp2_qvec_interrupt_disable(struct mvpp2_queue_vector *qvec)
4263 struct mvpp2_port *port = qvec->port;
4265 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
4266 MVPP2_ISR_DISABLE_INTERRUPT(qvec->sw_thread_mask));
4269 /* Mask the current CPU's Rx/Tx interrupts
4270 * Called by on_each_cpu(), guaranteed to run with migration disabled,
4271 * using smp_processor_id() is OK.
4273 static void mvpp2_interrupts_mask(void *arg)
4275 struct mvpp2_port *port = arg;
4277 mvpp2_percpu_write(port->priv, smp_processor_id(),
4278 MVPP2_ISR_RX_TX_MASK_REG(port->id), 0);
4281 /* Unmask the current CPU's Rx/Tx interrupts.
4282 * Called by on_each_cpu(), guaranteed to run with migration disabled,
4283 * using smp_processor_id() is OK.
4285 static void mvpp2_interrupts_unmask(void *arg)
4287 struct mvpp2_port *port = arg;
4290 val = MVPP2_CAUSE_MISC_SUM_MASK |
4291 MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
4292 if (port->has_tx_irqs)
4293 val |= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
4295 mvpp2_percpu_write(port->priv, smp_processor_id(),
4296 MVPP2_ISR_RX_TX_MASK_REG(port->id), val);
4300 mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port *port, bool mask)
4305 if (port->priv->hw_version != MVPP22)
4311 val = MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
4313 for (i = 0; i < port->nqvecs; i++) {
4314 struct mvpp2_queue_vector *v = port->qvecs + i;
4316 if (v->type != MVPP2_QUEUE_VECTOR_SHARED)
4319 mvpp2_percpu_write(port->priv, v->sw_thread_id,
4320 MVPP2_ISR_RX_TX_MASK_REG(port->id), val);
4324 /* Port configuration routines */
4326 static void mvpp22_gop_init_rgmii(struct mvpp2_port *port)
4328 struct mvpp2 *priv = port->priv;
4331 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
4332 val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT;
4333 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
4335 regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val);
4336 if (port->gop_id == 2)
4337 val |= GENCONF_CTRL0_PORT0_RGMII | GENCONF_CTRL0_PORT1_RGMII;
4338 else if (port->gop_id == 3)
4339 val |= GENCONF_CTRL0_PORT1_RGMII_MII;
4340 regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val);
4343 static void mvpp22_gop_init_sgmii(struct mvpp2_port *port)
4345 struct mvpp2 *priv = port->priv;
4348 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
4349 val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT |
4350 GENCONF_PORT_CTRL0_RX_DATA_SAMPLE;
4351 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
4353 if (port->gop_id > 1) {
4354 regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val);
4355 if (port->gop_id == 2)
4356 val &= ~GENCONF_CTRL0_PORT0_RGMII;
4357 else if (port->gop_id == 3)
4358 val &= ~GENCONF_CTRL0_PORT1_RGMII_MII;
4359 regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val);
4363 static void mvpp22_gop_init_10gkr(struct mvpp2_port *port)
4365 struct mvpp2 *priv = port->priv;
4366 void __iomem *mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id);
4367 void __iomem *xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id);
4371 val = readl(xpcs + MVPP22_XPCS_CFG0);
4372 val &= ~(MVPP22_XPCS_CFG0_PCS_MODE(0x3) |
4373 MVPP22_XPCS_CFG0_ACTIVE_LANE(0x3));
4374 val |= MVPP22_XPCS_CFG0_ACTIVE_LANE(2);
4375 writel(val, xpcs + MVPP22_XPCS_CFG0);
4378 val = readl(mpcs + MVPP22_MPCS_CTRL);
4379 val &= ~MVPP22_MPCS_CTRL_FWD_ERR_CONN;
4380 writel(val, mpcs + MVPP22_MPCS_CTRL);
4382 val = readl(mpcs + MVPP22_MPCS_CLK_RESET);
4383 val &= ~(MVPP22_MPCS_CLK_RESET_DIV_RATIO(0x7) | MAC_CLK_RESET_MAC |
4384 MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX);
4385 val |= MVPP22_MPCS_CLK_RESET_DIV_RATIO(1);
4386 writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
4388 val &= ~MVPP22_MPCS_CLK_RESET_DIV_SET;
4389 val |= MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX;
4390 writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
4393 static int mvpp22_gop_init(struct mvpp2_port *port)
4395 struct mvpp2 *priv = port->priv;
4398 if (!priv->sysctrl_base)
4401 switch (port->phy_interface) {
4402 case PHY_INTERFACE_MODE_RGMII:
4403 case PHY_INTERFACE_MODE_RGMII_ID:
4404 case PHY_INTERFACE_MODE_RGMII_RXID:
4405 case PHY_INTERFACE_MODE_RGMII_TXID:
4406 if (port->gop_id == 0)
4408 mvpp22_gop_init_rgmii(port);
4410 case PHY_INTERFACE_MODE_SGMII:
4411 mvpp22_gop_init_sgmii(port);
4413 case PHY_INTERFACE_MODE_10GKR:
4414 if (port->gop_id != 0)
4416 mvpp22_gop_init_10gkr(port);
4419 goto unsupported_conf;
4422 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL1, &val);
4423 val |= GENCONF_PORT_CTRL1_RESET(port->gop_id) |
4424 GENCONF_PORT_CTRL1_EN(port->gop_id);
4425 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL1, val);
4427 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
4428 val |= GENCONF_PORT_CTRL0_CLK_DIV_PHASE_CLR;
4429 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
4431 regmap_read(priv->sysctrl_base, GENCONF_SOFT_RESET1, &val);
4432 val |= GENCONF_SOFT_RESET1_GOP;
4433 regmap_write(priv->sysctrl_base, GENCONF_SOFT_RESET1, val);
4439 netdev_err(port->dev, "Invalid port configuration\n");
4443 static void mvpp22_gop_unmask_irq(struct mvpp2_port *port)
4447 if (phy_interface_mode_is_rgmii(port->phy_interface) ||
4448 port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
4449 /* Enable the GMAC link status irq for this port */
4450 val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK);
4451 val |= MVPP22_GMAC_INT_SUM_MASK_LINK_STAT;
4452 writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK);
4455 if (port->gop_id == 0) {
4456 /* Enable the XLG/GIG irqs for this port */
4457 val = readl(port->base + MVPP22_XLG_EXT_INT_MASK);
4458 if (port->phy_interface == PHY_INTERFACE_MODE_10GKR)
4459 val |= MVPP22_XLG_EXT_INT_MASK_XLG;
4461 val |= MVPP22_XLG_EXT_INT_MASK_GIG;
4462 writel(val, port->base + MVPP22_XLG_EXT_INT_MASK);
4466 static void mvpp22_gop_mask_irq(struct mvpp2_port *port)
4470 if (port->gop_id == 0) {
4471 val = readl(port->base + MVPP22_XLG_EXT_INT_MASK);
4472 val &= ~(MVPP22_XLG_EXT_INT_MASK_XLG |
4473 MVPP22_XLG_EXT_INT_MASK_GIG);
4474 writel(val, port->base + MVPP22_XLG_EXT_INT_MASK);
4477 if (phy_interface_mode_is_rgmii(port->phy_interface) ||
4478 port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
4479 val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK);
4480 val &= ~MVPP22_GMAC_INT_SUM_MASK_LINK_STAT;
4481 writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK);
4485 static void mvpp22_gop_setup_irq(struct mvpp2_port *port)
4489 if (phy_interface_mode_is_rgmii(port->phy_interface) ||
4490 port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
4491 val = readl(port->base + MVPP22_GMAC_INT_MASK);
4492 val |= MVPP22_GMAC_INT_MASK_LINK_STAT;
4493 writel(val, port->base + MVPP22_GMAC_INT_MASK);
4496 if (port->gop_id == 0) {
4497 val = readl(port->base + MVPP22_XLG_INT_MASK);
4498 val |= MVPP22_XLG_INT_MASK_LINK;
4499 writel(val, port->base + MVPP22_XLG_INT_MASK);
4502 mvpp22_gop_unmask_irq(port);
4505 static int mvpp22_comphy_init(struct mvpp2_port *port)
4513 switch (port->phy_interface) {
4514 case PHY_INTERFACE_MODE_SGMII:
4515 mode = PHY_MODE_SGMII;
4517 case PHY_INTERFACE_MODE_10GKR:
4518 mode = PHY_MODE_10GKR;
4524 ret = phy_set_mode(port->comphy, mode);
4528 return phy_power_on(port->comphy);
4531 static void mvpp2_port_mii_gmac_configure_mode(struct mvpp2_port *port)
4535 if (port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
4536 val = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
4537 val |= MVPP22_CTRL4_SYNC_BYPASS_DIS | MVPP22_CTRL4_DP_CLK_SEL |
4538 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
4539 val &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL;
4540 writel(val, port->base + MVPP22_GMAC_CTRL_4_REG);
4542 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
4543 val |= MVPP2_GMAC_DISABLE_PADDING;
4544 val &= ~MVPP2_GMAC_FLOW_CTRL_MASK;
4545 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
4546 } else if (phy_interface_mode_is_rgmii(port->phy_interface)) {
4547 val = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
4548 val |= MVPP22_CTRL4_EXT_PIN_GMII_SEL |
4549 MVPP22_CTRL4_SYNC_BYPASS_DIS |
4550 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
4551 val &= ~MVPP22_CTRL4_DP_CLK_SEL;
4552 writel(val, port->base + MVPP22_GMAC_CTRL_4_REG);
4554 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
4555 val &= ~MVPP2_GMAC_DISABLE_PADDING;
4556 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
4559 /* The port is connected to a copper PHY */
4560 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
4561 val &= ~MVPP2_GMAC_PORT_TYPE_MASK;
4562 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
4564 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4565 val |= MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS |
4566 MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_FLOW_CTRL_AUTONEG |
4567 MVPP2_GMAC_AN_DUPLEX_EN;
4568 if (port->phy_interface == PHY_INTERFACE_MODE_SGMII)
4569 val |= MVPP2_GMAC_IN_BAND_AUTONEG;
4570 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4573 static void mvpp2_port_mii_gmac_configure(struct mvpp2_port *port)
4577 /* Force link down */
4578 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4579 val &= ~MVPP2_GMAC_FORCE_LINK_PASS;
4580 val |= MVPP2_GMAC_FORCE_LINK_DOWN;
4581 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4583 /* Set the GMAC in a reset state */
4584 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
4585 val |= MVPP2_GMAC_PORT_RESET_MASK;
4586 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
4588 /* Configure the PCS and in-band AN */
4589 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
4590 if (port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
4591 val |= MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PCS_ENABLE_MASK;
4592 } else if (phy_interface_mode_is_rgmii(port->phy_interface)) {
4593 val &= ~MVPP2_GMAC_PCS_ENABLE_MASK;
4594 val |= MVPP2_GMAC_PORT_RGMII_MASK;
4596 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
4598 mvpp2_port_mii_gmac_configure_mode(port);
4600 /* Unset the GMAC reset state */
4601 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
4602 val &= ~MVPP2_GMAC_PORT_RESET_MASK;
4603 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
4605 /* Stop forcing link down */
4606 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4607 val &= ~MVPP2_GMAC_FORCE_LINK_DOWN;
4608 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4611 static void mvpp2_port_mii_xlg_configure(struct mvpp2_port *port)
4615 if (port->gop_id != 0)
4618 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
4619 val |= MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN;
4620 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
4622 val = readl(port->base + MVPP22_XLG_CTRL4_REG);
4623 val &= ~MVPP22_XLG_CTRL4_MACMODSELECT_GMAC;
4624 val |= MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC;
4625 writel(val, port->base + MVPP22_XLG_CTRL4_REG);
4628 static void mvpp22_port_mii_set(struct mvpp2_port *port)
4632 /* Only GOP port 0 has an XLG MAC */
4633 if (port->gop_id == 0) {
4634 val = readl(port->base + MVPP22_XLG_CTRL3_REG);
4635 val &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK;
4637 if (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
4638 port->phy_interface == PHY_INTERFACE_MODE_10GKR)
4639 val |= MVPP22_XLG_CTRL3_MACMODESELECT_10G;
4641 val |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC;
4643 writel(val, port->base + MVPP22_XLG_CTRL3_REG);
4647 static void mvpp2_port_mii_set(struct mvpp2_port *port)
4649 if (port->priv->hw_version == MVPP22)
4650 mvpp22_port_mii_set(port);
4652 if (phy_interface_mode_is_rgmii(port->phy_interface) ||
4653 port->phy_interface == PHY_INTERFACE_MODE_SGMII)
4654 mvpp2_port_mii_gmac_configure(port);
4655 else if (port->phy_interface == PHY_INTERFACE_MODE_10GKR)
4656 mvpp2_port_mii_xlg_configure(port);
4659 static void mvpp2_port_fc_adv_enable(struct mvpp2_port *port)
4663 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4664 val |= MVPP2_GMAC_FC_ADV_EN;
4665 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4668 static void mvpp2_port_enable(struct mvpp2_port *port)
4672 /* Only GOP port 0 has an XLG MAC */
4673 if (port->gop_id == 0 &&
4674 (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
4675 port->phy_interface == PHY_INTERFACE_MODE_10GKR)) {
4676 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
4677 val |= MVPP22_XLG_CTRL0_PORT_EN |
4678 MVPP22_XLG_CTRL0_MAC_RESET_DIS;
4679 val &= ~MVPP22_XLG_CTRL0_MIB_CNT_DIS;
4680 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
4682 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
4683 val |= MVPP2_GMAC_PORT_EN_MASK;
4684 val |= MVPP2_GMAC_MIB_CNTR_EN_MASK;
4685 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
4689 static void mvpp2_port_disable(struct mvpp2_port *port)
4693 /* Only GOP port 0 has an XLG MAC */
4694 if (port->gop_id == 0 &&
4695 (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
4696 port->phy_interface == PHY_INTERFACE_MODE_10GKR)) {
4697 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
4698 val &= ~(MVPP22_XLG_CTRL0_PORT_EN |
4699 MVPP22_XLG_CTRL0_MAC_RESET_DIS);
4700 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
4702 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
4703 val &= ~(MVPP2_GMAC_PORT_EN_MASK);
4704 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
4708 /* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */
4709 static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port)
4713 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) &
4714 ~MVPP2_GMAC_PERIODIC_XON_EN_MASK;
4715 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
4718 /* Configure loopback port */
4719 static void mvpp2_port_loopback_set(struct mvpp2_port *port)
4723 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
4725 if (port->speed == 1000)
4726 val |= MVPP2_GMAC_GMII_LB_EN_MASK;
4728 val &= ~MVPP2_GMAC_GMII_LB_EN_MASK;
4730 if (port->phy_interface == PHY_INTERFACE_MODE_SGMII)
4731 val |= MVPP2_GMAC_PCS_LB_EN_MASK;
4733 val &= ~MVPP2_GMAC_PCS_LB_EN_MASK;
4735 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
4738 static void mvpp2_port_reset(struct mvpp2_port *port)
4742 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
4743 ~MVPP2_GMAC_PORT_RESET_MASK;
4744 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
4746 while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
4747 MVPP2_GMAC_PORT_RESET_MASK)
4751 /* Change maximum receive size of the port */
4752 static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port)
4756 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
4757 val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
4758 val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
4759 MVPP2_GMAC_MAX_RX_SIZE_OFFS);
4760 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
4763 /* Change maximum receive size of the port */
4764 static inline void mvpp2_xlg_max_rx_size_set(struct mvpp2_port *port)
4768 val = readl(port->base + MVPP22_XLG_CTRL1_REG);
4769 val &= ~MVPP22_XLG_CTRL1_FRAMESIZELIMIT_MASK;
4770 val |= ((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
4771 MVPP22_XLG_CTRL1_FRAMESIZELIMIT_OFFS;
4772 writel(val, port->base + MVPP22_XLG_CTRL1_REG);
4775 /* Set defaults to the MVPP2 port */
4776 static void mvpp2_defaults_set(struct mvpp2_port *port)
4778 int tx_port_num, val, queue, ptxq, lrxq;
4780 if (port->priv->hw_version == MVPP21) {
4781 /* Configure port to loopback if needed */
4782 if (port->flags & MVPP2_F_LOOPBACK)
4783 mvpp2_port_loopback_set(port);
4785 /* Update TX FIFO MIN Threshold */
4786 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
4787 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
4788 /* Min. TX threshold must be less than minimal packet length */
4789 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
4790 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
4793 /* Disable Legacy WRR, Disable EJP, Release from reset */
4794 tx_port_num = mvpp2_egress_port(port);
4795 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG,
4797 mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0);
4799 /* Close bandwidth for all queues */
4800 for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) {
4801 ptxq = mvpp2_txq_phys(port->id, queue);
4802 mvpp2_write(port->priv,
4803 MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq), 0);
4806 /* Set refill period to 1 usec, refill tokens
4807 * and bucket size to maximum
4809 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG,
4810 port->priv->tclk / USEC_PER_SEC);
4811 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG);
4812 val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK;
4813 val |= MVPP2_TXP_REFILL_PERIOD_MASK(1);
4814 val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK;
4815 mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val);
4816 val = MVPP2_TXP_TOKEN_SIZE_MAX;
4817 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
4819 /* Set MaximumLowLatencyPacketSize value to 256 */
4820 mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id),
4821 MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK |
4822 MVPP2_RX_LOW_LATENCY_PKT_SIZE(256));
4824 /* Enable Rx cache snoop */
4825 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
4826 queue = port->rxqs[lrxq]->id;
4827 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
4828 val |= MVPP2_SNOOP_PKT_SIZE_MASK |
4829 MVPP2_SNOOP_BUF_HDR_MASK;
4830 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
4833 /* At default, mask all interrupts to all present cpus */
4834 mvpp2_interrupts_disable(port);
4837 /* Enable/disable receiving packets */
4838 static void mvpp2_ingress_enable(struct mvpp2_port *port)
4843 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
4844 queue = port->rxqs[lrxq]->id;
4845 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
4846 val &= ~MVPP2_RXQ_DISABLE_MASK;
4847 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
4851 static void mvpp2_ingress_disable(struct mvpp2_port *port)
4856 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
4857 queue = port->rxqs[lrxq]->id;
4858 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
4859 val |= MVPP2_RXQ_DISABLE_MASK;
4860 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
4864 /* Enable transmit via physical egress queue
4865 * - HW starts take descriptors from DRAM
4867 static void mvpp2_egress_enable(struct mvpp2_port *port)
4871 int tx_port_num = mvpp2_egress_port(port);
4873 /* Enable all initialized TXs. */
4875 for (queue = 0; queue < port->ntxqs; queue++) {
4876 struct mvpp2_tx_queue *txq = port->txqs[queue];
4879 qmap |= (1 << queue);
4882 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
4883 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap);
4886 /* Disable transmit via physical egress queue
4887 * - HW doesn't take descriptors from DRAM
4889 static void mvpp2_egress_disable(struct mvpp2_port *port)
4893 int tx_port_num = mvpp2_egress_port(port);
4895 /* Issue stop command for active channels only */
4896 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
4897 reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) &
4898 MVPP2_TXP_SCHED_ENQ_MASK;
4900 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG,
4901 (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET));
4903 /* Wait for all Tx activity to terminate. */
4906 if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) {
4907 netdev_warn(port->dev,
4908 "Tx stop timed out, status=0x%08x\n",
4915 /* Check port TX Command register that all
4916 * Tx queues are stopped
4918 reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG);
4919 } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK);
4922 /* Rx descriptors helper methods */
4924 /* Get number of Rx descriptors occupied by received packets */
4926 mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id)
4928 u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id));
4930 return val & MVPP2_RXQ_OCCUPIED_MASK;
4933 /* Update Rx queue status with the number of occupied and available
4934 * Rx descriptor slots.
4937 mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id,
4938 int used_count, int free_count)
4940 /* Decrement the number of used descriptors and increment count
4941 * increment the number of free descriptors.
4943 u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET);
4945 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val);
4948 /* Get pointer to next RX descriptor to be processed by SW */
4949 static inline struct mvpp2_rx_desc *
4950 mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq)
4952 int rx_desc = rxq->next_desc_to_proc;
4954 rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc);
4955 prefetch(rxq->descs + rxq->next_desc_to_proc);
4956 return rxq->descs + rx_desc;
4959 /* Set rx queue offset */
4960 static void mvpp2_rxq_offset_set(struct mvpp2_port *port,
4961 int prxq, int offset)
4965 /* Convert offset from bytes to units of 32 bytes */
4966 offset = offset >> 5;
4968 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
4969 val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK;
4972 val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) &
4973 MVPP2_RXQ_PACKET_OFFSET_MASK);
4975 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
4978 /* Tx descriptors helper methods */
4980 /* Get pointer to next Tx descriptor to be processed (send) by HW */
4981 static struct mvpp2_tx_desc *
4982 mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq)
4984 int tx_desc = txq->next_desc_to_proc;
4986 txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc);
4987 return txq->descs + tx_desc;
4990 /* Update HW with number of aggregated Tx descriptors to be sent
4992 * Called only from mvpp2_tx(), so migration is disabled, using
4993 * smp_processor_id() is OK.
4995 static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
4997 /* aggregated access - relevant TXQ number is written in TX desc */
4998 mvpp2_percpu_write(port->priv, smp_processor_id(),
4999 MVPP2_AGGR_TXQ_UPDATE_REG, pending);
5003 /* Check if there are enough free descriptors in aggregated txq.
5004 * If not, update the number of occupied descriptors and repeat the check.
5006 * Called only from mvpp2_tx(), so migration is disabled, using
5007 * smp_processor_id() is OK.
5009 static int mvpp2_aggr_desc_num_check(struct mvpp2 *priv,
5010 struct mvpp2_tx_queue *aggr_txq, int num)
5012 if ((aggr_txq->count + num) > aggr_txq->size) {
5013 /* Update number of occupied aggregated Tx descriptors */
5014 int cpu = smp_processor_id();
5015 u32 val = mvpp2_read(priv, MVPP2_AGGR_TXQ_STATUS_REG(cpu));
5017 aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK;
5020 if ((aggr_txq->count + num) > aggr_txq->size)
5026 /* Reserved Tx descriptors allocation request
5028 * Called only from mvpp2_txq_reserved_desc_num_proc(), itself called
5029 * only by mvpp2_tx(), so migration is disabled, using
5030 * smp_processor_id() is OK.
5032 static int mvpp2_txq_alloc_reserved_desc(struct mvpp2 *priv,
5033 struct mvpp2_tx_queue *txq, int num)
5036 int cpu = smp_processor_id();
5038 val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num;
5039 mvpp2_percpu_write(priv, cpu, MVPP2_TXQ_RSVD_REQ_REG, val);
5041 val = mvpp2_percpu_read(priv, cpu, MVPP2_TXQ_RSVD_RSLT_REG);
5043 return val & MVPP2_TXQ_RSVD_RSLT_MASK;
5046 /* Check if there are enough reserved descriptors for transmission.
5047 * If not, request chunk of reserved descriptors and check again.
5049 static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2 *priv,
5050 struct mvpp2_tx_queue *txq,
5051 struct mvpp2_txq_pcpu *txq_pcpu,
5054 int req, cpu, desc_count;
5056 if (txq_pcpu->reserved_num >= num)
5059 /* Not enough descriptors reserved! Update the reserved descriptor
5060 * count and check again.
5064 /* Compute total of used descriptors */
5065 for_each_present_cpu(cpu) {
5066 struct mvpp2_txq_pcpu *txq_pcpu_aux;
5068 txq_pcpu_aux = per_cpu_ptr(txq->pcpu, cpu);
5069 desc_count += txq_pcpu_aux->count;
5070 desc_count += txq_pcpu_aux->reserved_num;
5073 req = max(MVPP2_CPU_DESC_CHUNK, num - txq_pcpu->reserved_num);
5077 (txq->size - (num_present_cpus() * MVPP2_CPU_DESC_CHUNK)))
5080 txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(priv, txq, req);
5082 /* OK, the descriptor cound has been updated: check again. */
5083 if (txq_pcpu->reserved_num < num)
5088 /* Release the last allocated Tx descriptor. Useful to handle DMA
5089 * mapping failures in the Tx path.
5091 static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq)
5093 if (txq->next_desc_to_proc == 0)
5094 txq->next_desc_to_proc = txq->last_desc - 1;
5096 txq->next_desc_to_proc--;
5099 /* Set Tx descriptors fields relevant for CSUM calculation */
5100 static u32 mvpp2_txq_desc_csum(int l3_offs, int l3_proto,
5101 int ip_hdr_len, int l4_proto)
5105 /* fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
5106 * G_L4_chk, L4_type required only for checksum calculation
5108 command = (l3_offs << MVPP2_TXD_L3_OFF_SHIFT);
5109 command |= (ip_hdr_len << MVPP2_TXD_IP_HLEN_SHIFT);
5110 command |= MVPP2_TXD_IP_CSUM_DISABLE;
5112 if (l3_proto == swab16(ETH_P_IP)) {
5113 command &= ~MVPP2_TXD_IP_CSUM_DISABLE; /* enable IPv4 csum */
5114 command &= ~MVPP2_TXD_L3_IP6; /* enable IPv4 */
5116 command |= MVPP2_TXD_L3_IP6; /* enable IPv6 */
5119 if (l4_proto == IPPROTO_TCP) {
5120 command &= ~MVPP2_TXD_L4_UDP; /* enable TCP */
5121 command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */
5122 } else if (l4_proto == IPPROTO_UDP) {
5123 command |= MVPP2_TXD_L4_UDP; /* enable UDP */
5124 command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */
5126 command |= MVPP2_TXD_L4_CSUM_NOT;
5132 /* Get number of sent descriptors and decrement counter.
5133 * The number of sent descriptors is returned.
5136 * Called only from mvpp2_txq_done(), called from mvpp2_tx()
5137 * (migration disabled) and from the TX completion tasklet (migration
5138 * disabled) so using smp_processor_id() is OK.
5140 static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port,
5141 struct mvpp2_tx_queue *txq)
5145 /* Reading status reg resets transmitted descriptor counter */
5146 val = mvpp2_percpu_read(port->priv, smp_processor_id(),
5147 MVPP2_TXQ_SENT_REG(txq->id));
5149 return (val & MVPP2_TRANSMITTED_COUNT_MASK) >>
5150 MVPP2_TRANSMITTED_COUNT_OFFSET;
5153 /* Called through on_each_cpu(), so runs on all CPUs, with migration
5154 * disabled, therefore using smp_processor_id() is OK.
5156 static void mvpp2_txq_sent_counter_clear(void *arg)
5158 struct mvpp2_port *port = arg;
5161 for (queue = 0; queue < port->ntxqs; queue++) {
5162 int id = port->txqs[queue]->id;
5164 mvpp2_percpu_read(port->priv, smp_processor_id(),
5165 MVPP2_TXQ_SENT_REG(id));
5169 /* Set max sizes for Tx queues */
5170 static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
5173 int txq, tx_port_num;
5175 mtu = port->pkt_size * 8;
5176 if (mtu > MVPP2_TXP_MTU_MAX)
5177 mtu = MVPP2_TXP_MTU_MAX;
5179 /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */
5182 /* Indirect access to registers */
5183 tx_port_num = mvpp2_egress_port(port);
5184 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
5187 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG);
5188 val &= ~MVPP2_TXP_MTU_MAX;
5190 mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val);
5192 /* TXP token size and all TXQs token size must be larger that MTU */
5193 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG);
5194 size = val & MVPP2_TXP_TOKEN_SIZE_MAX;
5197 val &= ~MVPP2_TXP_TOKEN_SIZE_MAX;
5199 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
5202 for (txq = 0; txq < port->ntxqs; txq++) {
5203 val = mvpp2_read(port->priv,
5204 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq));
5205 size = val & MVPP2_TXQ_TOKEN_SIZE_MAX;
5209 val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX;
5211 mvpp2_write(port->priv,
5212 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq),
5218 /* Set the number of packets that will be received before Rx interrupt
5219 * will be generated by HW.
5221 static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
5222 struct mvpp2_rx_queue *rxq)
5224 int cpu = get_cpu();
5226 if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK)
5227 rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK;
5229 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
5230 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_THRESH_REG,
5236 /* For some reason in the LSP this is done on each CPU. Why ? */
5237 static void mvpp2_tx_pkts_coal_set(struct mvpp2_port *port,
5238 struct mvpp2_tx_queue *txq)
5240 int cpu = get_cpu();
5243 if (txq->done_pkts_coal > MVPP2_TXQ_THRESH_MASK)
5244 txq->done_pkts_coal = MVPP2_TXQ_THRESH_MASK;
5246 val = (txq->done_pkts_coal << MVPP2_TXQ_THRESH_OFFSET);
5247 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
5248 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_THRESH_REG, val);
5253 static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz)
5255 u64 tmp = (u64)clk_hz * usec;
5257 do_div(tmp, USEC_PER_SEC);
5259 return tmp > U32_MAX ? U32_MAX : tmp;
5262 static u32 mvpp2_cycles_to_usec(u32 cycles, unsigned long clk_hz)
5264 u64 tmp = (u64)cycles * USEC_PER_SEC;
5266 do_div(tmp, clk_hz);
5268 return tmp > U32_MAX ? U32_MAX : tmp;
5271 /* Set the time delay in usec before Rx interrupt */
5272 static void mvpp2_rx_time_coal_set(struct mvpp2_port *port,
5273 struct mvpp2_rx_queue *rxq)
5275 unsigned long freq = port->priv->tclk;
5276 u32 val = mvpp2_usec_to_cycles(rxq->time_coal, freq);
5278 if (val > MVPP2_MAX_ISR_RX_THRESHOLD) {
5280 mvpp2_cycles_to_usec(MVPP2_MAX_ISR_RX_THRESHOLD, freq);
5282 /* re-evaluate to get actual register value */
5283 val = mvpp2_usec_to_cycles(rxq->time_coal, freq);
5286 mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val);
5289 static void mvpp2_tx_time_coal_set(struct mvpp2_port *port)
5291 unsigned long freq = port->priv->tclk;
5292 u32 val = mvpp2_usec_to_cycles(port->tx_time_coal, freq);
5294 if (val > MVPP2_MAX_ISR_TX_THRESHOLD) {
5295 port->tx_time_coal =
5296 mvpp2_cycles_to_usec(MVPP2_MAX_ISR_TX_THRESHOLD, freq);
5298 /* re-evaluate to get actual register value */
5299 val = mvpp2_usec_to_cycles(port->tx_time_coal, freq);
5302 mvpp2_write(port->priv, MVPP2_ISR_TX_THRESHOLD_REG(port->id), val);
5305 /* Free Tx queue skbuffs */
5306 static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
5307 struct mvpp2_tx_queue *txq,
5308 struct mvpp2_txq_pcpu *txq_pcpu, int num)
5312 for (i = 0; i < num; i++) {
5313 struct mvpp2_txq_pcpu_buf *tx_buf =
5314 txq_pcpu->buffs + txq_pcpu->txq_get_index;
5316 dma_unmap_single(port->dev->dev.parent, tx_buf->dma,
5317 tx_buf->size, DMA_TO_DEVICE);
5319 dev_kfree_skb_any(tx_buf->skb);
5321 mvpp2_txq_inc_get(txq_pcpu);
5325 static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port,
5328 int queue = fls(cause) - 1;
5330 return port->rxqs[queue];
5333 static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port,
5336 int queue = fls(cause) - 1;
5338 return port->txqs[queue];
5341 /* Handle end of transmission */
5342 static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
5343 struct mvpp2_txq_pcpu *txq_pcpu)
5345 struct netdev_queue *nq = netdev_get_tx_queue(port->dev, txq->log_id);
5348 if (txq_pcpu->cpu != smp_processor_id())
5349 netdev_err(port->dev, "wrong cpu on the end of Tx processing\n");
5351 tx_done = mvpp2_txq_sent_desc_proc(port, txq);
5354 mvpp2_txq_bufs_free(port, txq, txq_pcpu, tx_done);
5356 txq_pcpu->count -= tx_done;
5358 if (netif_tx_queue_stopped(nq))
5359 if (txq_pcpu->size - txq_pcpu->count >= MAX_SKB_FRAGS + 1)
5360 netif_tx_wake_queue(nq);
5363 static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause,
5366 struct mvpp2_tx_queue *txq;
5367 struct mvpp2_txq_pcpu *txq_pcpu;
5368 unsigned int tx_todo = 0;
5371 txq = mvpp2_get_tx_queue(port, cause);
5375 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
5377 if (txq_pcpu->count) {
5378 mvpp2_txq_done(port, txq, txq_pcpu);
5379 tx_todo += txq_pcpu->count;
5382 cause &= ~(1 << txq->log_id);
5387 /* Rx/Tx queue initialization/cleanup methods */
5389 /* Allocate and initialize descriptors for aggr TXQ */
5390 static int mvpp2_aggr_txq_init(struct platform_device *pdev,
5391 struct mvpp2_tx_queue *aggr_txq, int cpu,
5396 /* Allocate memory for TX descriptors */
5397 aggr_txq->descs = dma_alloc_coherent(&pdev->dev,
5398 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
5399 &aggr_txq->descs_dma, GFP_KERNEL);
5400 if (!aggr_txq->descs)
5403 aggr_txq->last_desc = aggr_txq->size - 1;
5405 /* Aggr TXQ no reset WA */
5406 aggr_txq->next_desc_to_proc = mvpp2_read(priv,
5407 MVPP2_AGGR_TXQ_INDEX_REG(cpu));
5409 /* Set Tx descriptors queue starting address indirect
5412 if (priv->hw_version == MVPP21)
5413 txq_dma = aggr_txq->descs_dma;
5415 txq_dma = aggr_txq->descs_dma >>
5416 MVPP22_AGGR_TXQ_DESC_ADDR_OFFS;
5418 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu), txq_dma);
5419 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu),
5420 MVPP2_AGGR_TXQ_SIZE);
5425 /* Create a specified Rx queue */
5426 static int mvpp2_rxq_init(struct mvpp2_port *port,
5427 struct mvpp2_rx_queue *rxq)
5433 rxq->size = port->rx_ring_size;
5435 /* Allocate memory for RX descriptors */
5436 rxq->descs = dma_alloc_coherent(port->dev->dev.parent,
5437 rxq->size * MVPP2_DESC_ALIGNED_SIZE,
5438 &rxq->descs_dma, GFP_KERNEL);
5442 rxq->last_desc = rxq->size - 1;
5444 /* Zero occupied and non-occupied counters - direct access */
5445 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
5447 /* Set Rx descriptors queue starting address - indirect access */
5449 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
5450 if (port->priv->hw_version == MVPP21)
5451 rxq_dma = rxq->descs_dma;
5453 rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS;
5454 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma);
5455 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
5456 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_INDEX_REG, 0);
5460 mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD);
5462 /* Set coalescing pkts and time */
5463 mvpp2_rx_pkts_coal_set(port, rxq);
5464 mvpp2_rx_time_coal_set(port, rxq);
5466 /* Add number of descriptors ready for receiving packets */
5467 mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);
5472 /* Push packets received by the RXQ to BM pool */
5473 static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
5474 struct mvpp2_rx_queue *rxq)
5478 rx_received = mvpp2_rxq_received(port, rxq->id);
5482 for (i = 0; i < rx_received; i++) {
5483 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
5484 u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
5487 pool = (status & MVPP2_RXD_BM_POOL_ID_MASK) >>
5488 MVPP2_RXD_BM_POOL_ID_OFFS;
5490 mvpp2_bm_pool_put(port, pool,
5491 mvpp2_rxdesc_dma_addr_get(port, rx_desc),
5492 mvpp2_rxdesc_cookie_get(port, rx_desc));
5494 mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received);
5497 /* Cleanup Rx queue */
5498 static void mvpp2_rxq_deinit(struct mvpp2_port *port,
5499 struct mvpp2_rx_queue *rxq)
5503 mvpp2_rxq_drop_pkts(port, rxq);
5506 dma_free_coherent(port->dev->dev.parent,
5507 rxq->size * MVPP2_DESC_ALIGNED_SIZE,
5513 rxq->next_desc_to_proc = 0;
5516 /* Clear Rx descriptors queue starting address and size;
5517 * free descriptor number
5519 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
5521 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
5522 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, 0);
5523 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, 0);
5527 /* Create and initialize a Tx queue */
5528 static int mvpp2_txq_init(struct mvpp2_port *port,
5529 struct mvpp2_tx_queue *txq)
5532 int cpu, desc, desc_per_txq, tx_port_num;
5533 struct mvpp2_txq_pcpu *txq_pcpu;
5535 txq->size = port->tx_ring_size;
5537 /* Allocate memory for Tx descriptors */
5538 txq->descs = dma_alloc_coherent(port->dev->dev.parent,
5539 txq->size * MVPP2_DESC_ALIGNED_SIZE,
5540 &txq->descs_dma, GFP_KERNEL);
5544 txq->last_desc = txq->size - 1;
5546 /* Set Tx descriptors queue starting address - indirect access */
5548 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
5549 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG,
5551 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG,
5552 txq->size & MVPP2_TXQ_DESC_SIZE_MASK);
5553 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_INDEX_REG, 0);
5554 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_RSVD_CLR_REG,
5555 txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
5556 val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PENDING_REG);
5557 val &= ~MVPP2_TXQ_PENDING_MASK;
5558 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PENDING_REG, val);
5560 /* Calculate base address in prefetch buffer. We reserve 16 descriptors
5561 * for each existing TXQ.
5562 * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT
5563 * GBE ports assumed to be continious from 0 to MVPP2_MAX_PORTS
5566 desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) +
5567 (txq->log_id * desc_per_txq);
5569 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG,
5570 MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
5571 MVPP2_PREF_BUF_THRESH(desc_per_txq / 2));
5574 /* WRR / EJP configuration - indirect access */
5575 tx_port_num = mvpp2_egress_port(port);
5576 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
5578 val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id));
5579 val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK;
5580 val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1);
5581 val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK;
5582 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val);
5584 val = MVPP2_TXQ_TOKEN_SIZE_MAX;
5585 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id),
5588 for_each_present_cpu(cpu) {
5589 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
5590 txq_pcpu->size = txq->size;
5591 txq_pcpu->buffs = kmalloc_array(txq_pcpu->size,
5592 sizeof(*txq_pcpu->buffs),
5594 if (!txq_pcpu->buffs)
5597 txq_pcpu->count = 0;
5598 txq_pcpu->reserved_num = 0;
5599 txq_pcpu->txq_put_index = 0;
5600 txq_pcpu->txq_get_index = 0;
5602 txq_pcpu->tso_headers =
5603 dma_alloc_coherent(port->dev->dev.parent,
5604 MVPP2_AGGR_TXQ_SIZE * TSO_HEADER_SIZE,
5605 &txq_pcpu->tso_headers_dma,
5607 if (!txq_pcpu->tso_headers)
5613 for_each_present_cpu(cpu) {
5614 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
5615 kfree(txq_pcpu->buffs);
5617 dma_free_coherent(port->dev->dev.parent,
5618 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
5619 txq_pcpu->tso_headers,
5620 txq_pcpu->tso_headers_dma);
5623 dma_free_coherent(port->dev->dev.parent,
5624 txq->size * MVPP2_DESC_ALIGNED_SIZE,
5625 txq->descs, txq->descs_dma);
5630 /* Free allocated TXQ resources */
5631 static void mvpp2_txq_deinit(struct mvpp2_port *port,
5632 struct mvpp2_tx_queue *txq)
5634 struct mvpp2_txq_pcpu *txq_pcpu;
5637 for_each_present_cpu(cpu) {
5638 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
5639 kfree(txq_pcpu->buffs);
5641 dma_free_coherent(port->dev->dev.parent,
5642 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
5643 txq_pcpu->tso_headers,
5644 txq_pcpu->tso_headers_dma);
5648 dma_free_coherent(port->dev->dev.parent,
5649 txq->size * MVPP2_DESC_ALIGNED_SIZE,
5650 txq->descs, txq->descs_dma);
5654 txq->next_desc_to_proc = 0;
5657 /* Set minimum bandwidth for disabled TXQs */
5658 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0);
5660 /* Set Tx descriptors queue starting address and size */
5662 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
5663 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG, 0);
5664 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG, 0);
5668 /* Cleanup Tx ports */
5669 static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
5671 struct mvpp2_txq_pcpu *txq_pcpu;
5672 int delay, pending, cpu;
5676 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
5677 val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG);
5678 val |= MVPP2_TXQ_DRAIN_EN_MASK;
5679 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val);
5681 /* The napi queue has been stopped so wait for all packets
5682 * to be transmitted.
5686 if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) {
5687 netdev_warn(port->dev,
5688 "port %d: cleaning queue %d timed out\n",
5689 port->id, txq->log_id);
5695 pending = mvpp2_percpu_read(port->priv, cpu,
5696 MVPP2_TXQ_PENDING_REG);
5697 pending &= MVPP2_TXQ_PENDING_MASK;
5700 val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
5701 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val);
5704 for_each_present_cpu(cpu) {
5705 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
5707 /* Release all packets */
5708 mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count);
5711 txq_pcpu->count = 0;
5712 txq_pcpu->txq_put_index = 0;
5713 txq_pcpu->txq_get_index = 0;
5717 /* Cleanup all Tx queues */
5718 static void mvpp2_cleanup_txqs(struct mvpp2_port *port)
5720 struct mvpp2_tx_queue *txq;
5724 val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG);
5726 /* Reset Tx ports and delete Tx queues */
5727 val |= MVPP2_TX_PORT_FLUSH_MASK(port->id);
5728 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
5730 for (queue = 0; queue < port->ntxqs; queue++) {
5731 txq = port->txqs[queue];
5732 mvpp2_txq_clean(port, txq);
5733 mvpp2_txq_deinit(port, txq);
5736 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
5738 val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id);
5739 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
5742 /* Cleanup all Rx queues */
5743 static void mvpp2_cleanup_rxqs(struct mvpp2_port *port)
5747 for (queue = 0; queue < port->nrxqs; queue++)
5748 mvpp2_rxq_deinit(port, port->rxqs[queue]);
5751 /* Init all Rx queues for port */
5752 static int mvpp2_setup_rxqs(struct mvpp2_port *port)
5756 for (queue = 0; queue < port->nrxqs; queue++) {
5757 err = mvpp2_rxq_init(port, port->rxqs[queue]);
5764 mvpp2_cleanup_rxqs(port);
5768 /* Init all tx queues for port */
5769 static int mvpp2_setup_txqs(struct mvpp2_port *port)
5771 struct mvpp2_tx_queue *txq;
5774 for (queue = 0; queue < port->ntxqs; queue++) {
5775 txq = port->txqs[queue];
5776 err = mvpp2_txq_init(port, txq);
5781 if (port->has_tx_irqs) {
5782 mvpp2_tx_time_coal_set(port);
5783 for (queue = 0; queue < port->ntxqs; queue++) {
5784 txq = port->txqs[queue];
5785 mvpp2_tx_pkts_coal_set(port, txq);
5789 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
5793 mvpp2_cleanup_txqs(port);
5797 /* The callback for per-port interrupt */
5798 static irqreturn_t mvpp2_isr(int irq, void *dev_id)
5800 struct mvpp2_queue_vector *qv = dev_id;
5802 mvpp2_qvec_interrupt_disable(qv);
5804 napi_schedule(&qv->napi);
5809 /* Per-port interrupt for link status changes */
5810 static irqreturn_t mvpp2_link_status_isr(int irq, void *dev_id)
5812 struct mvpp2_port *port = (struct mvpp2_port *)dev_id;
5813 struct net_device *dev = port->dev;
5814 bool event = false, link = false;
5817 mvpp22_gop_mask_irq(port);
5819 if (port->gop_id == 0 &&
5820 port->phy_interface == PHY_INTERFACE_MODE_10GKR) {
5821 val = readl(port->base + MVPP22_XLG_INT_STAT);
5822 if (val & MVPP22_XLG_INT_STAT_LINK) {
5824 val = readl(port->base + MVPP22_XLG_STATUS);
5825 if (val & MVPP22_XLG_STATUS_LINK_UP)
5828 } else if (phy_interface_mode_is_rgmii(port->phy_interface) ||
5829 port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
5830 val = readl(port->base + MVPP22_GMAC_INT_STAT);
5831 if (val & MVPP22_GMAC_INT_STAT_LINK) {
5833 val = readl(port->base + MVPP2_GMAC_STATUS0);
5834 if (val & MVPP2_GMAC_STATUS0_LINK_UP)
5839 if (!netif_running(dev) || !event)
5843 mvpp2_interrupts_enable(port);
5845 mvpp2_egress_enable(port);
5846 mvpp2_ingress_enable(port);
5847 netif_carrier_on(dev);
5848 netif_tx_wake_all_queues(dev);
5850 netif_tx_stop_all_queues(dev);
5851 netif_carrier_off(dev);
5852 mvpp2_ingress_disable(port);
5853 mvpp2_egress_disable(port);
5855 mvpp2_interrupts_disable(port);
5859 mvpp22_gop_unmask_irq(port);
5863 static void mvpp2_gmac_set_autoneg(struct mvpp2_port *port,
5864 struct phy_device *phydev)
5868 if (port->phy_interface != PHY_INTERFACE_MODE_RGMII &&
5869 port->phy_interface != PHY_INTERFACE_MODE_RGMII_ID &&
5870 port->phy_interface != PHY_INTERFACE_MODE_RGMII_RXID &&
5871 port->phy_interface != PHY_INTERFACE_MODE_RGMII_TXID &&
5872 port->phy_interface != PHY_INTERFACE_MODE_SGMII)
5875 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5876 val &= ~(MVPP2_GMAC_CONFIG_MII_SPEED |
5877 MVPP2_GMAC_CONFIG_GMII_SPEED |
5878 MVPP2_GMAC_CONFIG_FULL_DUPLEX |
5879 MVPP2_GMAC_AN_SPEED_EN |
5880 MVPP2_GMAC_AN_DUPLEX_EN);
5883 val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
5885 if (phydev->speed == SPEED_1000)
5886 val |= MVPP2_GMAC_CONFIG_GMII_SPEED;
5887 else if (phydev->speed == SPEED_100)
5888 val |= MVPP2_GMAC_CONFIG_MII_SPEED;
5890 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5894 static void mvpp2_link_event(struct net_device *dev)
5896 struct mvpp2_port *port = netdev_priv(dev);
5897 struct phy_device *phydev = dev->phydev;
5898 bool link_reconfigured = false;
5902 if (port->phy_interface != phydev->interface && port->comphy) {
5903 /* disable current port for reconfiguration */
5904 mvpp2_interrupts_disable(port);
5905 netif_carrier_off(port->dev);
5906 mvpp2_port_disable(port);
5907 phy_power_off(port->comphy);
5909 /* comphy reconfiguration */
5910 port->phy_interface = phydev->interface;
5911 mvpp22_comphy_init(port);
5913 /* gop/mac reconfiguration */
5914 mvpp22_gop_init(port);
5915 mvpp2_port_mii_set(port);
5917 link_reconfigured = true;
5920 if ((port->speed != phydev->speed) ||
5921 (port->duplex != phydev->duplex)) {
5922 mvpp2_gmac_set_autoneg(port, phydev);
5924 port->duplex = phydev->duplex;
5925 port->speed = phydev->speed;
5929 if (phydev->link != port->link || link_reconfigured) {
5930 port->link = phydev->link;
5933 if (port->phy_interface == PHY_INTERFACE_MODE_RGMII ||
5934 port->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
5935 port->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID ||
5936 port->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID ||
5937 port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
5938 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5939 val |= (MVPP2_GMAC_FORCE_LINK_PASS |
5940 MVPP2_GMAC_FORCE_LINK_DOWN);
5941 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5944 mvpp2_interrupts_enable(port);
5945 mvpp2_port_enable(port);
5947 mvpp2_egress_enable(port);
5948 mvpp2_ingress_enable(port);
5949 netif_carrier_on(dev);
5950 netif_tx_wake_all_queues(dev);
5955 netif_tx_stop_all_queues(dev);
5956 netif_carrier_off(dev);
5957 mvpp2_ingress_disable(port);
5958 mvpp2_egress_disable(port);
5960 mvpp2_port_disable(port);
5961 mvpp2_interrupts_disable(port);
5964 phy_print_status(phydev);
5968 static void mvpp2_timer_set(struct mvpp2_port_pcpu *port_pcpu)
5972 if (!port_pcpu->timer_scheduled) {
5973 port_pcpu->timer_scheduled = true;
5974 interval = MVPP2_TXDONE_HRTIMER_PERIOD_NS;
5975 hrtimer_start(&port_pcpu->tx_done_timer, interval,
5976 HRTIMER_MODE_REL_PINNED);
5980 static void mvpp2_tx_proc_cb(unsigned long data)
5982 struct net_device *dev = (struct net_device *)data;
5983 struct mvpp2_port *port = netdev_priv(dev);
5984 struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
5985 unsigned int tx_todo, cause;
5987 if (!netif_running(dev))
5989 port_pcpu->timer_scheduled = false;
5991 /* Process all the Tx queues */
5992 cause = (1 << port->ntxqs) - 1;
5993 tx_todo = mvpp2_tx_done(port, cause, smp_processor_id());
5995 /* Set the timer in case not all the packets were processed */
5997 mvpp2_timer_set(port_pcpu);
6000 static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
6002 struct mvpp2_port_pcpu *port_pcpu = container_of(timer,
6003 struct mvpp2_port_pcpu,
6006 tasklet_schedule(&port_pcpu->tx_done_tasklet);
6008 return HRTIMER_NORESTART;
6011 /* Main RX/TX processing routines */
6013 /* Display more error info */
6014 static void mvpp2_rx_error(struct mvpp2_port *port,
6015 struct mvpp2_rx_desc *rx_desc)
6017 u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
6018 size_t sz = mvpp2_rxdesc_size_get(port, rx_desc);
6020 switch (status & MVPP2_RXD_ERR_CODE_MASK) {
6021 case MVPP2_RXD_ERR_CRC:
6022 netdev_err(port->dev, "bad rx status %08x (crc error), size=%zu\n",
6025 case MVPP2_RXD_ERR_OVERRUN:
6026 netdev_err(port->dev, "bad rx status %08x (overrun error), size=%zu\n",
6029 case MVPP2_RXD_ERR_RESOURCE:
6030 netdev_err(port->dev, "bad rx status %08x (resource error), size=%zu\n",
6036 /* Handle RX checksum offload */
6037 static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status,
6038 struct sk_buff *skb)
6040 if (((status & MVPP2_RXD_L3_IP4) &&
6041 !(status & MVPP2_RXD_IP4_HEADER_ERR)) ||
6042 (status & MVPP2_RXD_L3_IP6))
6043 if (((status & MVPP2_RXD_L4_UDP) ||
6044 (status & MVPP2_RXD_L4_TCP)) &&
6045 (status & MVPP2_RXD_L4_CSUM_OK)) {
6047 skb->ip_summed = CHECKSUM_UNNECESSARY;
6051 skb->ip_summed = CHECKSUM_NONE;
6054 /* Reuse skb if possible, or allocate a new skb and add it to BM pool */
6055 static int mvpp2_rx_refill(struct mvpp2_port *port,
6056 struct mvpp2_bm_pool *bm_pool, int pool)
6058 dma_addr_t dma_addr;
6059 phys_addr_t phys_addr;
6062 /* No recycle or too many buffers are in use, so allocate a new skb */
6063 buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr, &phys_addr,
6068 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
6073 /* Handle tx checksum */
6074 static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
6076 if (skb->ip_summed == CHECKSUM_PARTIAL) {
6080 if (skb->protocol == htons(ETH_P_IP)) {
6081 struct iphdr *ip4h = ip_hdr(skb);
6083 /* Calculate IPv4 checksum and L4 checksum */
6084 ip_hdr_len = ip4h->ihl;
6085 l4_proto = ip4h->protocol;
6086 } else if (skb->protocol == htons(ETH_P_IPV6)) {
6087 struct ipv6hdr *ip6h = ipv6_hdr(skb);
6089 /* Read l4_protocol from one of IPv6 extra headers */
6090 if (skb_network_header_len(skb) > 0)
6091 ip_hdr_len = (skb_network_header_len(skb) >> 2);
6092 l4_proto = ip6h->nexthdr;
6094 return MVPP2_TXD_L4_CSUM_NOT;
6097 return mvpp2_txq_desc_csum(skb_network_offset(skb),
6098 skb->protocol, ip_hdr_len, l4_proto);
6101 return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE;
6104 /* Main rx processing */
6105 static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
6106 int rx_todo, struct mvpp2_rx_queue *rxq)
6108 struct net_device *dev = port->dev;
6114 /* Get number of received packets and clamp the to-do */
6115 rx_received = mvpp2_rxq_received(port, rxq->id);
6116 if (rx_todo > rx_received)
6117 rx_todo = rx_received;
6119 while (rx_done < rx_todo) {
6120 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
6121 struct mvpp2_bm_pool *bm_pool;
6122 struct sk_buff *skb;
6123 unsigned int frag_size;
6124 dma_addr_t dma_addr;
6125 phys_addr_t phys_addr;
6127 int pool, rx_bytes, err;
6131 rx_status = mvpp2_rxdesc_status_get(port, rx_desc);
6132 rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc);
6133 rx_bytes -= MVPP2_MH_SIZE;
6134 dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc);
6135 phys_addr = mvpp2_rxdesc_cookie_get(port, rx_desc);
6136 data = (void *)phys_to_virt(phys_addr);
6138 pool = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >>
6139 MVPP2_RXD_BM_POOL_ID_OFFS;
6140 bm_pool = &port->priv->bm_pools[pool];
6142 /* In case of an error, release the requested buffer pointer
6143 * to the Buffer Manager. This request process is controlled
6144 * by the hardware, and the information about the buffer is
6145 * comprised by the RX descriptor.
6147 if (rx_status & MVPP2_RXD_ERR_SUMMARY) {
6149 dev->stats.rx_errors++;
6150 mvpp2_rx_error(port, rx_desc);
6151 /* Return the buffer to the pool */
6152 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
6156 if (bm_pool->frag_size > PAGE_SIZE)
6159 frag_size = bm_pool->frag_size;
6161 skb = build_skb(data, frag_size);
6163 netdev_warn(port->dev, "skb build failed\n");
6164 goto err_drop_frame;
6167 err = mvpp2_rx_refill(port, bm_pool, pool);
6169 netdev_err(port->dev, "failed to refill BM pools\n");
6170 goto err_drop_frame;
6173 dma_unmap_single(dev->dev.parent, dma_addr,
6174 bm_pool->buf_size, DMA_FROM_DEVICE);
6177 rcvd_bytes += rx_bytes;
6179 skb_reserve(skb, MVPP2_MH_SIZE + NET_SKB_PAD);
6180 skb_put(skb, rx_bytes);
6181 skb->protocol = eth_type_trans(skb, dev);
6182 mvpp2_rx_csum(port, rx_status, skb);
6184 napi_gro_receive(napi, skb);
6188 struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
6190 u64_stats_update_begin(&stats->syncp);
6191 stats->rx_packets += rcvd_pkts;
6192 stats->rx_bytes += rcvd_bytes;
6193 u64_stats_update_end(&stats->syncp);
6196 /* Update Rx queue management counters */
6198 mvpp2_rxq_status_update(port, rxq->id, rx_done, rx_done);
6204 tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
6205 struct mvpp2_tx_desc *desc)
6207 dma_addr_t buf_dma_addr =
6208 mvpp2_txdesc_dma_addr_get(port, desc);
6210 mvpp2_txdesc_size_get(port, desc);
6211 dma_unmap_single(port->dev->dev.parent, buf_dma_addr,
6212 buf_sz, DMA_TO_DEVICE);
6213 mvpp2_txq_desc_put(txq);
6216 /* Handle tx fragmentation processing */
6217 static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
6218 struct mvpp2_tx_queue *aggr_txq,
6219 struct mvpp2_tx_queue *txq)
6221 struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
6222 struct mvpp2_tx_desc *tx_desc;
6224 dma_addr_t buf_dma_addr;
6226 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6227 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6228 void *addr = page_address(frag->page.p) + frag->page_offset;
6230 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
6231 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
6232 mvpp2_txdesc_size_set(port, tx_desc, frag->size);
6234 buf_dma_addr = dma_map_single(port->dev->dev.parent, addr,
6237 if (dma_mapping_error(port->dev->dev.parent, buf_dma_addr)) {
6238 mvpp2_txq_desc_put(txq);
6242 mvpp2_txdesc_offset_set(port, tx_desc,
6243 buf_dma_addr & MVPP2_TX_DESC_ALIGN);
6244 mvpp2_txdesc_dma_addr_set(port, tx_desc,
6245 buf_dma_addr & ~MVPP2_TX_DESC_ALIGN);
6247 if (i == (skb_shinfo(skb)->nr_frags - 1)) {
6248 /* Last descriptor */
6249 mvpp2_txdesc_cmd_set(port, tx_desc,
6251 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc);
6253 /* Descriptor in the middle: Not First, Not Last */
6254 mvpp2_txdesc_cmd_set(port, tx_desc, 0);
6255 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
6261 /* Release all descriptors that were used to map fragments of
6262 * this packet, as well as the corresponding DMA mappings
6264 for (i = i - 1; i >= 0; i--) {
6265 tx_desc = txq->descs + i;
6266 tx_desc_unmap_put(port, txq, tx_desc);
6272 static inline void mvpp2_tso_put_hdr(struct sk_buff *skb,
6273 struct net_device *dev,
6274 struct mvpp2_tx_queue *txq,
6275 struct mvpp2_tx_queue *aggr_txq,
6276 struct mvpp2_txq_pcpu *txq_pcpu,
6279 struct mvpp2_port *port = netdev_priv(dev);
6280 struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
6283 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
6284 mvpp2_txdesc_size_set(port, tx_desc, hdr_sz);
6286 addr = txq_pcpu->tso_headers_dma +
6287 txq_pcpu->txq_put_index * TSO_HEADER_SIZE;
6288 mvpp2_txdesc_offset_set(port, tx_desc, addr & MVPP2_TX_DESC_ALIGN);
6289 mvpp2_txdesc_dma_addr_set(port, tx_desc, addr & ~MVPP2_TX_DESC_ALIGN);
6291 mvpp2_txdesc_cmd_set(port, tx_desc, mvpp2_skb_tx_csum(port, skb) |
6293 MVPP2_TXD_PADDING_DISABLE);
6294 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
6297 static inline int mvpp2_tso_put_data(struct sk_buff *skb,
6298 struct net_device *dev, struct tso_t *tso,
6299 struct mvpp2_tx_queue *txq,
6300 struct mvpp2_tx_queue *aggr_txq,
6301 struct mvpp2_txq_pcpu *txq_pcpu,
6302 int sz, bool left, bool last)
6304 struct mvpp2_port *port = netdev_priv(dev);
6305 struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
6306 dma_addr_t buf_dma_addr;
6308 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
6309 mvpp2_txdesc_size_set(port, tx_desc, sz);
6311 buf_dma_addr = dma_map_single(dev->dev.parent, tso->data, sz,
6313 if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) {
6314 mvpp2_txq_desc_put(txq);
6318 mvpp2_txdesc_offset_set(port, tx_desc,
6319 buf_dma_addr & MVPP2_TX_DESC_ALIGN);
6320 mvpp2_txdesc_dma_addr_set(port, tx_desc,
6321 buf_dma_addr & ~MVPP2_TX_DESC_ALIGN);
6324 mvpp2_txdesc_cmd_set(port, tx_desc, MVPP2_TXD_L_DESC);
6326 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc);
6330 mvpp2_txdesc_cmd_set(port, tx_desc, 0);
6333 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
6337 static int mvpp2_tx_tso(struct sk_buff *skb, struct net_device *dev,
6338 struct mvpp2_tx_queue *txq,
6339 struct mvpp2_tx_queue *aggr_txq,
6340 struct mvpp2_txq_pcpu *txq_pcpu)
6342 struct mvpp2_port *port = netdev_priv(dev);
6344 int hdr_sz = skb_transport_offset(skb) + tcp_hdrlen(skb);
6345 int i, len, descs = 0;
6347 /* Check number of available descriptors */
6348 if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq,
6349 tso_count_descs(skb)) ||
6350 mvpp2_txq_reserved_desc_num_proc(port->priv, txq, txq_pcpu,
6351 tso_count_descs(skb)))
6354 tso_start(skb, &tso);
6355 len = skb->len - hdr_sz;
6357 int left = min_t(int, skb_shinfo(skb)->gso_size, len);
6358 char *hdr = txq_pcpu->tso_headers +
6359 txq_pcpu->txq_put_index * TSO_HEADER_SIZE;
6364 tso_build_hdr(skb, hdr, &tso, left, len == 0);
6365 mvpp2_tso_put_hdr(skb, dev, txq, aggr_txq, txq_pcpu, hdr_sz);
6368 int sz = min_t(int, tso.size, left);
6372 if (mvpp2_tso_put_data(skb, dev, &tso, txq, aggr_txq,
6373 txq_pcpu, sz, left, len == 0))
6375 tso_build_data(skb, &tso, sz);
6382 for (i = descs - 1; i >= 0; i--) {
6383 struct mvpp2_tx_desc *tx_desc = txq->descs + i;
6384 tx_desc_unmap_put(port, txq, tx_desc);
6389 /* Main tx processing */
6390 static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
6392 struct mvpp2_port *port = netdev_priv(dev);
6393 struct mvpp2_tx_queue *txq, *aggr_txq;
6394 struct mvpp2_txq_pcpu *txq_pcpu;
6395 struct mvpp2_tx_desc *tx_desc;
6396 dma_addr_t buf_dma_addr;
6401 txq_id = skb_get_queue_mapping(skb);
6402 txq = port->txqs[txq_id];
6403 txq_pcpu = this_cpu_ptr(txq->pcpu);
6404 aggr_txq = &port->priv->aggr_txqs[smp_processor_id()];
6406 if (skb_is_gso(skb)) {
6407 frags = mvpp2_tx_tso(skb, dev, txq, aggr_txq, txq_pcpu);
6410 frags = skb_shinfo(skb)->nr_frags + 1;
6412 /* Check number of available descriptors */
6413 if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq, frags) ||
6414 mvpp2_txq_reserved_desc_num_proc(port->priv, txq,
6420 /* Get a descriptor for the first part of the packet */
6421 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
6422 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
6423 mvpp2_txdesc_size_set(port, tx_desc, skb_headlen(skb));
6425 buf_dma_addr = dma_map_single(dev->dev.parent, skb->data,
6426 skb_headlen(skb), DMA_TO_DEVICE);
6427 if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) {
6428 mvpp2_txq_desc_put(txq);
6433 mvpp2_txdesc_offset_set(port, tx_desc,
6434 buf_dma_addr & MVPP2_TX_DESC_ALIGN);
6435 mvpp2_txdesc_dma_addr_set(port, tx_desc,
6436 buf_dma_addr & ~MVPP2_TX_DESC_ALIGN);
6438 tx_cmd = mvpp2_skb_tx_csum(port, skb);
6441 /* First and Last descriptor */
6442 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
6443 mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
6444 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc);
6446 /* First but not Last */
6447 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE;
6448 mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
6449 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
6451 /* Continue with other skb fragments */
6452 if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) {
6453 tx_desc_unmap_put(port, txq, tx_desc);
6461 struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
6462 struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
6464 txq_pcpu->reserved_num -= frags;
6465 txq_pcpu->count += frags;
6466 aggr_txq->count += frags;
6468 /* Enable transmit */
6470 mvpp2_aggr_txq_pend_desc_add(port, frags);
6472 if (txq_pcpu->size - txq_pcpu->count < MAX_SKB_FRAGS + 1)
6473 netif_tx_stop_queue(nq);
6475 u64_stats_update_begin(&stats->syncp);
6476 stats->tx_packets++;
6477 stats->tx_bytes += skb->len;
6478 u64_stats_update_end(&stats->syncp);
6480 dev->stats.tx_dropped++;
6481 dev_kfree_skb_any(skb);
6484 /* Finalize TX processing */
6485 if (txq_pcpu->count >= txq->done_pkts_coal)
6486 mvpp2_txq_done(port, txq, txq_pcpu);
6488 /* Set the timer in case not all frags were processed */
6489 if (!port->has_tx_irqs && txq_pcpu->count <= frags &&
6490 txq_pcpu->count > 0) {
6491 struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
6493 mvpp2_timer_set(port_pcpu);
6496 return NETDEV_TX_OK;
6499 static inline void mvpp2_cause_error(struct net_device *dev, int cause)
6501 if (cause & MVPP2_CAUSE_FCS_ERR_MASK)
6502 netdev_err(dev, "FCS error\n");
6503 if (cause & MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK)
6504 netdev_err(dev, "rx fifo overrun error\n");
6505 if (cause & MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK)
6506 netdev_err(dev, "tx fifo underrun error\n");
6509 static int mvpp2_poll(struct napi_struct *napi, int budget)
6511 u32 cause_rx_tx, cause_rx, cause_tx, cause_misc;
6513 struct mvpp2_port *port = netdev_priv(napi->dev);
6514 struct mvpp2_queue_vector *qv;
6515 int cpu = smp_processor_id();
6517 qv = container_of(napi, struct mvpp2_queue_vector, napi);
6519 /* Rx/Tx cause register
6521 * Bits 0-15: each bit indicates received packets on the Rx queue
6522 * (bit 0 is for Rx queue 0).
6524 * Bits 16-23: each bit indicates transmitted packets on the Tx queue
6525 * (bit 16 is for Tx queue 0).
6527 * Each CPU has its own Rx/Tx cause register
6529 cause_rx_tx = mvpp2_percpu_read(port->priv, qv->sw_thread_id,
6530 MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
6532 cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
6534 mvpp2_cause_error(port->dev, cause_misc);
6536 /* Clear the cause register */
6537 mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0);
6538 mvpp2_percpu_write(port->priv, cpu,
6539 MVPP2_ISR_RX_TX_CAUSE_REG(port->id),
6540 cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
6543 cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
6545 cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET;
6546 mvpp2_tx_done(port, cause_tx, qv->sw_thread_id);
6549 /* Process RX packets */
6550 cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
6551 cause_rx <<= qv->first_rxq;
6552 cause_rx |= qv->pending_cause_rx;
6553 while (cause_rx && budget > 0) {
6555 struct mvpp2_rx_queue *rxq;
6557 rxq = mvpp2_get_rx_queue(port, cause_rx);
6561 count = mvpp2_rx(port, napi, budget, rxq);
6565 /* Clear the bit associated to this Rx queue
6566 * so that next iteration will continue from
6567 * the next Rx queue.
6569 cause_rx &= ~(1 << rxq->logic_rxq);
6575 napi_complete_done(napi, rx_done);
6577 mvpp2_qvec_interrupt_enable(qv);
6579 qv->pending_cause_rx = cause_rx;
6583 /* Set hw internals when starting port */
6584 static void mvpp2_start_dev(struct mvpp2_port *port)
6586 struct net_device *ndev = port->dev;
6589 if (port->gop_id == 0 &&
6590 (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
6591 port->phy_interface == PHY_INTERFACE_MODE_10GKR))
6592 mvpp2_xlg_max_rx_size_set(port);
6594 mvpp2_gmac_max_rx_size_set(port);
6596 mvpp2_txp_max_tx_size_set(port);
6598 for (i = 0; i < port->nqvecs; i++)
6599 napi_enable(&port->qvecs[i].napi);
6601 /* Enable interrupts on all CPUs */
6602 mvpp2_interrupts_enable(port);
6604 if (port->priv->hw_version == MVPP22) {
6605 mvpp22_comphy_init(port);
6606 mvpp22_gop_init(port);
6609 mvpp2_port_mii_set(port);
6610 mvpp2_port_enable(port);
6612 phy_start(ndev->phydev);
6613 netif_tx_start_all_queues(port->dev);
6616 /* Set hw internals when stopping port */
6617 static void mvpp2_stop_dev(struct mvpp2_port *port)
6619 struct net_device *ndev = port->dev;
6622 /* Stop new packets from arriving to RXQs */
6623 mvpp2_ingress_disable(port);
6627 /* Disable interrupts on all CPUs */
6628 mvpp2_interrupts_disable(port);
6630 for (i = 0; i < port->nqvecs; i++)
6631 napi_disable(&port->qvecs[i].napi);
6633 netif_carrier_off(port->dev);
6634 netif_tx_stop_all_queues(port->dev);
6636 mvpp2_egress_disable(port);
6637 mvpp2_port_disable(port);
6639 phy_stop(ndev->phydev);
6640 phy_power_off(port->comphy);
6643 static int mvpp2_check_ringparam_valid(struct net_device *dev,
6644 struct ethtool_ringparam *ring)
6646 u16 new_rx_pending = ring->rx_pending;
6647 u16 new_tx_pending = ring->tx_pending;
6649 if (ring->rx_pending == 0 || ring->tx_pending == 0)
6652 if (ring->rx_pending > MVPP2_MAX_RXD)
6653 new_rx_pending = MVPP2_MAX_RXD;
6654 else if (!IS_ALIGNED(ring->rx_pending, 16))
6655 new_rx_pending = ALIGN(ring->rx_pending, 16);
6657 if (ring->tx_pending > MVPP2_MAX_TXD)
6658 new_tx_pending = MVPP2_MAX_TXD;
6659 else if (!IS_ALIGNED(ring->tx_pending, 32))
6660 new_tx_pending = ALIGN(ring->tx_pending, 32);
6662 if (ring->rx_pending != new_rx_pending) {
6663 netdev_info(dev, "illegal Rx ring size value %d, round to %d\n",
6664 ring->rx_pending, new_rx_pending);
6665 ring->rx_pending = new_rx_pending;
6668 if (ring->tx_pending != new_tx_pending) {
6669 netdev_info(dev, "illegal Tx ring size value %d, round to %d\n",
6670 ring->tx_pending, new_tx_pending);
6671 ring->tx_pending = new_tx_pending;
6677 static void mvpp21_get_mac_address(struct mvpp2_port *port, unsigned char *addr)
6679 u32 mac_addr_l, mac_addr_m, mac_addr_h;
6681 mac_addr_l = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
6682 mac_addr_m = readl(port->priv->lms_base + MVPP2_SRC_ADDR_MIDDLE);
6683 mac_addr_h = readl(port->priv->lms_base + MVPP2_SRC_ADDR_HIGH);
6684 addr[0] = (mac_addr_h >> 24) & 0xFF;
6685 addr[1] = (mac_addr_h >> 16) & 0xFF;
6686 addr[2] = (mac_addr_h >> 8) & 0xFF;
6687 addr[3] = mac_addr_h & 0xFF;
6688 addr[4] = mac_addr_m & 0xFF;
6689 addr[5] = (mac_addr_l >> MVPP2_GMAC_SA_LOW_OFFS) & 0xFF;
6692 static int mvpp2_phy_connect(struct mvpp2_port *port)
6694 struct phy_device *phy_dev;
6696 /* No PHY is attached */
6697 if (!port->phy_node)
6700 phy_dev = of_phy_connect(port->dev, port->phy_node, mvpp2_link_event, 0,
6701 port->phy_interface);
6703 netdev_err(port->dev, "cannot connect to phy\n");
6706 phy_dev->supported &= PHY_GBIT_FEATURES;
6707 phy_dev->advertising = phy_dev->supported;
6716 static void mvpp2_phy_disconnect(struct mvpp2_port *port)
6718 struct net_device *ndev = port->dev;
6723 phy_disconnect(ndev->phydev);
6726 static int mvpp2_irqs_init(struct mvpp2_port *port)
6730 for (i = 0; i < port->nqvecs; i++) {
6731 struct mvpp2_queue_vector *qv = port->qvecs + i;
6733 err = request_irq(qv->irq, mvpp2_isr, 0, port->dev->name, qv);
6737 if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE)
6738 irq_set_affinity_hint(qv->irq,
6739 cpumask_of(qv->sw_thread_id));
6744 for (i = 0; i < port->nqvecs; i++) {
6745 struct mvpp2_queue_vector *qv = port->qvecs + i;
6747 irq_set_affinity_hint(qv->irq, NULL);
6748 free_irq(qv->irq, qv);
6754 static void mvpp2_irqs_deinit(struct mvpp2_port *port)
6758 for (i = 0; i < port->nqvecs; i++) {
6759 struct mvpp2_queue_vector *qv = port->qvecs + i;
6761 irq_set_affinity_hint(qv->irq, NULL);
6762 free_irq(qv->irq, qv);
6766 static int mvpp2_open(struct net_device *dev)
6768 struct mvpp2_port *port = netdev_priv(dev);
6769 struct mvpp2 *priv = port->priv;
6770 unsigned char mac_bcast[ETH_ALEN] = {
6771 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
6774 err = mvpp2_prs_mac_da_accept(port->priv, port->id, mac_bcast, true);
6776 netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n");
6779 err = mvpp2_prs_mac_da_accept(port->priv, port->id,
6780 dev->dev_addr, true);
6782 netdev_err(dev, "mvpp2_prs_mac_da_accept MC failed\n");
6785 err = mvpp2_prs_tag_mode_set(port->priv, port->id, MVPP2_TAG_TYPE_MH);
6787 netdev_err(dev, "mvpp2_prs_tag_mode_set failed\n");
6790 err = mvpp2_prs_def_flow(port);
6792 netdev_err(dev, "mvpp2_prs_def_flow failed\n");
6796 /* Allocate the Rx/Tx queues */
6797 err = mvpp2_setup_rxqs(port);
6799 netdev_err(port->dev, "cannot allocate Rx queues\n");
6803 err = mvpp2_setup_txqs(port);
6805 netdev_err(port->dev, "cannot allocate Tx queues\n");
6806 goto err_cleanup_rxqs;
6809 err = mvpp2_irqs_init(port);
6811 netdev_err(port->dev, "cannot init IRQs\n");
6812 goto err_cleanup_txqs;
6815 if (priv->hw_version == MVPP22 && !port->phy_node && port->link_irq) {
6816 err = request_irq(port->link_irq, mvpp2_link_status_isr, 0,
6819 netdev_err(port->dev, "cannot request link IRQ %d\n",
6824 mvpp22_gop_setup_irq(port);
6827 /* In default link is down */
6828 netif_carrier_off(port->dev);
6830 err = mvpp2_phy_connect(port);
6832 goto err_free_link_irq;
6834 /* Unmask interrupts on all CPUs */
6835 on_each_cpu(mvpp2_interrupts_unmask, port, 1);
6836 mvpp2_shared_interrupt_mask_unmask(port, false);
6838 mvpp2_start_dev(port);
6843 if (priv->hw_version == MVPP22 && !port->phy_node && port->link_irq)
6844 free_irq(port->link_irq, port);
6846 mvpp2_irqs_deinit(port);
6848 mvpp2_cleanup_txqs(port);
6850 mvpp2_cleanup_rxqs(port);
6854 static int mvpp2_stop(struct net_device *dev)
6856 struct mvpp2_port *port = netdev_priv(dev);
6857 struct mvpp2_port_pcpu *port_pcpu;
6858 struct mvpp2 *priv = port->priv;
6861 mvpp2_stop_dev(port);
6862 mvpp2_phy_disconnect(port);
6864 /* Mask interrupts on all CPUs */
6865 on_each_cpu(mvpp2_interrupts_mask, port, 1);
6866 mvpp2_shared_interrupt_mask_unmask(port, true);
6868 if (priv->hw_version == MVPP22 && !port->phy_node && port->link_irq)
6869 free_irq(port->link_irq, port);
6871 mvpp2_irqs_deinit(port);
6872 if (!port->has_tx_irqs) {
6873 for_each_present_cpu(cpu) {
6874 port_pcpu = per_cpu_ptr(port->pcpu, cpu);
6876 hrtimer_cancel(&port_pcpu->tx_done_timer);
6877 port_pcpu->timer_scheduled = false;
6878 tasklet_kill(&port_pcpu->tx_done_tasklet);
6881 mvpp2_cleanup_rxqs(port);
6882 mvpp2_cleanup_txqs(port);
6887 static void mvpp2_set_rx_mode(struct net_device *dev)
6889 struct mvpp2_port *port = netdev_priv(dev);
6890 struct mvpp2 *priv = port->priv;
6891 struct netdev_hw_addr *ha;
6893 bool allmulti = dev->flags & IFF_ALLMULTI;
6895 mvpp2_prs_mac_promisc_set(priv, id, dev->flags & IFF_PROMISC);
6896 mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_ALL, allmulti);
6897 mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_IP6, allmulti);
6899 /* Remove all port->id's mcast enries */
6900 mvpp2_prs_mcast_del_all(priv, id);
6902 if (allmulti && !netdev_mc_empty(dev)) {
6903 netdev_for_each_mc_addr(ha, dev)
6904 mvpp2_prs_mac_da_accept(priv, id, ha->addr, true);
6908 static int mvpp2_set_mac_address(struct net_device *dev, void *p)
6910 struct mvpp2_port *port = netdev_priv(dev);
6911 const struct sockaddr *addr = p;
6914 if (!is_valid_ether_addr(addr->sa_data)) {
6915 err = -EADDRNOTAVAIL;
6919 if (!netif_running(dev)) {
6920 err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
6923 /* Reconfigure parser to accept the original MAC address */
6924 err = mvpp2_prs_update_mac_da(dev, dev->dev_addr);
6929 mvpp2_stop_dev(port);
6931 err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
6935 /* Reconfigure parser accept the original MAC address */
6936 err = mvpp2_prs_update_mac_da(dev, dev->dev_addr);
6940 mvpp2_start_dev(port);
6941 mvpp2_egress_enable(port);
6942 mvpp2_ingress_enable(port);
6945 netdev_err(dev, "failed to change MAC address\n");
6949 static int mvpp2_change_mtu(struct net_device *dev, int mtu)
6951 struct mvpp2_port *port = netdev_priv(dev);
6954 if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) {
6955 netdev_info(dev, "illegal MTU value %d, round to %d\n", mtu,
6956 ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8));
6957 mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8);
6960 if (!netif_running(dev)) {
6961 err = mvpp2_bm_update_mtu(dev, mtu);
6963 port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
6967 /* Reconfigure BM to the original MTU */
6968 err = mvpp2_bm_update_mtu(dev, dev->mtu);
6973 mvpp2_stop_dev(port);
6975 err = mvpp2_bm_update_mtu(dev, mtu);
6977 port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
6981 /* Reconfigure BM to the original MTU */
6982 err = mvpp2_bm_update_mtu(dev, dev->mtu);
6987 mvpp2_start_dev(port);
6988 mvpp2_egress_enable(port);
6989 mvpp2_ingress_enable(port);
6993 netdev_err(dev, "failed to change MTU\n");
6998 mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
7000 struct mvpp2_port *port = netdev_priv(dev);
7004 for_each_possible_cpu(cpu) {
7005 struct mvpp2_pcpu_stats *cpu_stats;
7011 cpu_stats = per_cpu_ptr(port->stats, cpu);
7013 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
7014 rx_packets = cpu_stats->rx_packets;
7015 rx_bytes = cpu_stats->rx_bytes;
7016 tx_packets = cpu_stats->tx_packets;
7017 tx_bytes = cpu_stats->tx_bytes;
7018 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
7020 stats->rx_packets += rx_packets;
7021 stats->rx_bytes += rx_bytes;
7022 stats->tx_packets += tx_packets;
7023 stats->tx_bytes += tx_bytes;
7026 stats->rx_errors = dev->stats.rx_errors;
7027 stats->rx_dropped = dev->stats.rx_dropped;
7028 stats->tx_dropped = dev->stats.tx_dropped;
7031 static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7038 ret = phy_mii_ioctl(dev->phydev, ifr, cmd);
7040 mvpp2_link_event(dev);
7045 /* Ethtool methods */
7047 /* Set interrupt coalescing for ethtools */
7048 static int mvpp2_ethtool_set_coalesce(struct net_device *dev,
7049 struct ethtool_coalesce *c)
7051 struct mvpp2_port *port = netdev_priv(dev);
7054 for (queue = 0; queue < port->nrxqs; queue++) {
7055 struct mvpp2_rx_queue *rxq = port->rxqs[queue];
7057 rxq->time_coal = c->rx_coalesce_usecs;
7058 rxq->pkts_coal = c->rx_max_coalesced_frames;
7059 mvpp2_rx_pkts_coal_set(port, rxq);
7060 mvpp2_rx_time_coal_set(port, rxq);
7063 if (port->has_tx_irqs) {
7064 port->tx_time_coal = c->tx_coalesce_usecs;
7065 mvpp2_tx_time_coal_set(port);
7068 for (queue = 0; queue < port->ntxqs; queue++) {
7069 struct mvpp2_tx_queue *txq = port->txqs[queue];
7071 txq->done_pkts_coal = c->tx_max_coalesced_frames;
7073 if (port->has_tx_irqs)
7074 mvpp2_tx_pkts_coal_set(port, txq);
7080 /* get coalescing for ethtools */
7081 static int mvpp2_ethtool_get_coalesce(struct net_device *dev,
7082 struct ethtool_coalesce *c)
7084 struct mvpp2_port *port = netdev_priv(dev);
7086 c->rx_coalesce_usecs = port->rxqs[0]->time_coal;
7087 c->rx_max_coalesced_frames = port->rxqs[0]->pkts_coal;
7088 c->tx_max_coalesced_frames = port->txqs[0]->done_pkts_coal;
7092 static void mvpp2_ethtool_get_drvinfo(struct net_device *dev,
7093 struct ethtool_drvinfo *drvinfo)
7095 strlcpy(drvinfo->driver, MVPP2_DRIVER_NAME,
7096 sizeof(drvinfo->driver));
7097 strlcpy(drvinfo->version, MVPP2_DRIVER_VERSION,
7098 sizeof(drvinfo->version));
7099 strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
7100 sizeof(drvinfo->bus_info));
7103 static void mvpp2_ethtool_get_ringparam(struct net_device *dev,
7104 struct ethtool_ringparam *ring)
7106 struct mvpp2_port *port = netdev_priv(dev);
7108 ring->rx_max_pending = MVPP2_MAX_RXD;
7109 ring->tx_max_pending = MVPP2_MAX_TXD;
7110 ring->rx_pending = port->rx_ring_size;
7111 ring->tx_pending = port->tx_ring_size;
7114 static int mvpp2_ethtool_set_ringparam(struct net_device *dev,
7115 struct ethtool_ringparam *ring)
7117 struct mvpp2_port *port = netdev_priv(dev);
7118 u16 prev_rx_ring_size = port->rx_ring_size;
7119 u16 prev_tx_ring_size = port->tx_ring_size;
7122 err = mvpp2_check_ringparam_valid(dev, ring);
7126 if (!netif_running(dev)) {
7127 port->rx_ring_size = ring->rx_pending;
7128 port->tx_ring_size = ring->tx_pending;
7132 /* The interface is running, so we have to force a
7133 * reallocation of the queues
7135 mvpp2_stop_dev(port);
7136 mvpp2_cleanup_rxqs(port);
7137 mvpp2_cleanup_txqs(port);
7139 port->rx_ring_size = ring->rx_pending;
7140 port->tx_ring_size = ring->tx_pending;
7142 err = mvpp2_setup_rxqs(port);
7144 /* Reallocate Rx queues with the original ring size */
7145 port->rx_ring_size = prev_rx_ring_size;
7146 ring->rx_pending = prev_rx_ring_size;
7147 err = mvpp2_setup_rxqs(port);
7151 err = mvpp2_setup_txqs(port);
7153 /* Reallocate Tx queues with the original ring size */
7154 port->tx_ring_size = prev_tx_ring_size;
7155 ring->tx_pending = prev_tx_ring_size;
7156 err = mvpp2_setup_txqs(port);
7158 goto err_clean_rxqs;
7161 mvpp2_start_dev(port);
7162 mvpp2_egress_enable(port);
7163 mvpp2_ingress_enable(port);
7168 mvpp2_cleanup_rxqs(port);
7170 netdev_err(dev, "failed to change ring parameters");
7176 static const struct net_device_ops mvpp2_netdev_ops = {
7177 .ndo_open = mvpp2_open,
7178 .ndo_stop = mvpp2_stop,
7179 .ndo_start_xmit = mvpp2_tx,
7180 .ndo_set_rx_mode = mvpp2_set_rx_mode,
7181 .ndo_set_mac_address = mvpp2_set_mac_address,
7182 .ndo_change_mtu = mvpp2_change_mtu,
7183 .ndo_get_stats64 = mvpp2_get_stats64,
7184 .ndo_do_ioctl = mvpp2_ioctl,
7187 static const struct ethtool_ops mvpp2_eth_tool_ops = {
7188 .nway_reset = phy_ethtool_nway_reset,
7189 .get_link = ethtool_op_get_link,
7190 .set_coalesce = mvpp2_ethtool_set_coalesce,
7191 .get_coalesce = mvpp2_ethtool_get_coalesce,
7192 .get_drvinfo = mvpp2_ethtool_get_drvinfo,
7193 .get_ringparam = mvpp2_ethtool_get_ringparam,
7194 .set_ringparam = mvpp2_ethtool_set_ringparam,
7195 .get_link_ksettings = phy_ethtool_get_link_ksettings,
7196 .set_link_ksettings = phy_ethtool_set_link_ksettings,
7199 /* Used for PPv2.1, or PPv2.2 with the old Device Tree binding that
7200 * had a single IRQ defined per-port.
7202 static int mvpp2_simple_queue_vectors_init(struct mvpp2_port *port,
7203 struct device_node *port_node)
7205 struct mvpp2_queue_vector *v = &port->qvecs[0];
7208 v->nrxqs = port->nrxqs;
7209 v->type = MVPP2_QUEUE_VECTOR_SHARED;
7210 v->sw_thread_id = 0;
7211 v->sw_thread_mask = *cpumask_bits(cpu_online_mask);
7213 v->irq = irq_of_parse_and_map(port_node, 0);
7216 netif_napi_add(port->dev, &v->napi, mvpp2_poll,
7224 static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port,
7225 struct device_node *port_node)
7227 struct mvpp2_queue_vector *v;
7230 port->nqvecs = num_possible_cpus();
7231 if (queue_mode == MVPP2_QDIST_SINGLE_MODE)
7234 for (i = 0; i < port->nqvecs; i++) {
7237 v = port->qvecs + i;
7240 v->type = MVPP2_QUEUE_VECTOR_PRIVATE;
7241 v->sw_thread_id = i;
7242 v->sw_thread_mask = BIT(i);
7244 snprintf(irqname, sizeof(irqname), "tx-cpu%d", i);
7246 if (queue_mode == MVPP2_QDIST_MULTI_MODE) {
7247 v->first_rxq = i * MVPP2_DEFAULT_RXQ;
7248 v->nrxqs = MVPP2_DEFAULT_RXQ;
7249 } else if (queue_mode == MVPP2_QDIST_SINGLE_MODE &&
7250 i == (port->nqvecs - 1)) {
7252 v->nrxqs = port->nrxqs;
7253 v->type = MVPP2_QUEUE_VECTOR_SHARED;
7254 strncpy(irqname, "rx-shared", sizeof(irqname));
7257 v->irq = of_irq_get_byname(port_node, irqname);
7263 netif_napi_add(port->dev, &v->napi, mvpp2_poll,
7270 for (i = 0; i < port->nqvecs; i++)
7271 irq_dispose_mapping(port->qvecs[i].irq);
7275 static int mvpp2_queue_vectors_init(struct mvpp2_port *port,
7276 struct device_node *port_node)
7278 if (port->has_tx_irqs)
7279 return mvpp2_multi_queue_vectors_init(port, port_node);
7281 return mvpp2_simple_queue_vectors_init(port, port_node);
7284 static void mvpp2_queue_vectors_deinit(struct mvpp2_port *port)
7288 for (i = 0; i < port->nqvecs; i++)
7289 irq_dispose_mapping(port->qvecs[i].irq);
7292 /* Configure Rx queue group interrupt for this port */
7293 static void mvpp2_rx_irqs_setup(struct mvpp2_port *port)
7295 struct mvpp2 *priv = port->priv;
7299 if (priv->hw_version == MVPP21) {
7300 mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(port->id),
7305 /* Handle the more complicated PPv2.2 case */
7306 for (i = 0; i < port->nqvecs; i++) {
7307 struct mvpp2_queue_vector *qv = port->qvecs + i;
7312 val = qv->sw_thread_id;
7313 val |= port->id << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET;
7314 mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val);
7316 val = qv->first_rxq;
7317 val |= qv->nrxqs << MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET;
7318 mvpp2_write(priv, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val);
7322 /* Initialize port HW */
7323 static int mvpp2_port_init(struct mvpp2_port *port)
7325 struct device *dev = port->dev->dev.parent;
7326 struct mvpp2 *priv = port->priv;
7327 struct mvpp2_txq_pcpu *txq_pcpu;
7328 int queue, cpu, err;
7330 /* Checks for hardware constraints */
7331 if (port->first_rxq + port->nrxqs >
7332 MVPP2_MAX_PORTS * priv->max_port_rxqs)
7335 if (port->nrxqs % 4 || (port->nrxqs > priv->max_port_rxqs) ||
7336 (port->ntxqs > MVPP2_MAX_TXQ))
7340 mvpp2_egress_disable(port);
7341 mvpp2_port_disable(port);
7343 port->tx_time_coal = MVPP2_TXDONE_COAL_USEC;
7345 port->txqs = devm_kcalloc(dev, port->ntxqs, sizeof(*port->txqs),
7350 /* Associate physical Tx queues to this port and initialize.
7351 * The mapping is predefined.
7353 for (queue = 0; queue < port->ntxqs; queue++) {
7354 int queue_phy_id = mvpp2_txq_phys(port->id, queue);
7355 struct mvpp2_tx_queue *txq;
7357 txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL);
7360 goto err_free_percpu;
7363 txq->pcpu = alloc_percpu(struct mvpp2_txq_pcpu);
7366 goto err_free_percpu;
7369 txq->id = queue_phy_id;
7370 txq->log_id = queue;
7371 txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH;
7372 for_each_present_cpu(cpu) {
7373 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
7374 txq_pcpu->cpu = cpu;
7377 port->txqs[queue] = txq;
7380 port->rxqs = devm_kcalloc(dev, port->nrxqs, sizeof(*port->rxqs),
7384 goto err_free_percpu;
7387 /* Allocate and initialize Rx queue for this port */
7388 for (queue = 0; queue < port->nrxqs; queue++) {
7389 struct mvpp2_rx_queue *rxq;
7391 /* Map physical Rx queue to port's logical Rx queue */
7392 rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL);
7395 goto err_free_percpu;
7397 /* Map this Rx queue to a physical queue */
7398 rxq->id = port->first_rxq + queue;
7399 rxq->port = port->id;
7400 rxq->logic_rxq = queue;
7402 port->rxqs[queue] = rxq;
7405 mvpp2_rx_irqs_setup(port);
7407 /* Create Rx descriptor rings */
7408 for (queue = 0; queue < port->nrxqs; queue++) {
7409 struct mvpp2_rx_queue *rxq = port->rxqs[queue];
7411 rxq->size = port->rx_ring_size;
7412 rxq->pkts_coal = MVPP2_RX_COAL_PKTS;
7413 rxq->time_coal = MVPP2_RX_COAL_USEC;
7416 mvpp2_ingress_disable(port);
7418 /* Port default configuration */
7419 mvpp2_defaults_set(port);
7421 /* Port's classifier configuration */
7422 mvpp2_cls_oversize_rxq_set(port);
7423 mvpp2_cls_port_config(port);
7425 /* Provide an initial Rx packet size */
7426 port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu);
7428 /* Initialize pools for swf */
7429 err = mvpp2_swf_bm_pool_init(port);
7431 goto err_free_percpu;
7436 for (queue = 0; queue < port->ntxqs; queue++) {
7437 if (!port->txqs[queue])
7439 free_percpu(port->txqs[queue]->pcpu);
7444 /* Checks if the port DT description has the TX interrupts
7445 * described. On PPv2.1, there are no such interrupts. On PPv2.2,
7446 * there are available, but we need to keep support for old DTs.
7448 static bool mvpp2_port_has_tx_irqs(struct mvpp2 *priv,
7449 struct device_node *port_node)
7451 char *irqs[5] = { "rx-shared", "tx-cpu0", "tx-cpu1",
7452 "tx-cpu2", "tx-cpu3" };
7455 if (priv->hw_version == MVPP21)
7458 for (i = 0; i < 5; i++) {
7459 ret = of_property_match_string(port_node, "interrupt-names",
7468 /* Ports initialization */
7469 static int mvpp2_port_probe(struct platform_device *pdev,
7470 struct device_node *port_node,
7473 struct device_node *phy_node;
7475 struct mvpp2_port *port;
7476 struct mvpp2_port_pcpu *port_pcpu;
7477 struct net_device *dev;
7478 struct resource *res;
7479 const char *dt_mac_addr;
7480 const char *mac_from;
7481 char hw_mac_addr[ETH_ALEN];
7482 unsigned int ntxqs, nrxqs;
7489 has_tx_irqs = mvpp2_port_has_tx_irqs(priv, port_node);
7492 queue_mode = MVPP2_QDIST_SINGLE_MODE;
7494 ntxqs = MVPP2_MAX_TXQ;
7495 if (priv->hw_version == MVPP22 && queue_mode == MVPP2_QDIST_MULTI_MODE)
7496 nrxqs = MVPP2_DEFAULT_RXQ * num_possible_cpus();
7498 nrxqs = MVPP2_DEFAULT_RXQ;
7500 dev = alloc_etherdev_mqs(sizeof(*port), ntxqs, nrxqs);
7504 phy_node = of_parse_phandle(port_node, "phy", 0);
7505 phy_mode = of_get_phy_mode(port_node);
7507 dev_err(&pdev->dev, "incorrect phy mode\n");
7509 goto err_free_netdev;
7512 comphy = devm_of_phy_get(&pdev->dev, port_node, NULL);
7513 if (IS_ERR(comphy)) {
7514 if (PTR_ERR(comphy) == -EPROBE_DEFER) {
7515 err = -EPROBE_DEFER;
7516 goto err_free_netdev;
7521 if (of_property_read_u32(port_node, "port-id", &id)) {
7523 dev_err(&pdev->dev, "missing port-id value\n");
7524 goto err_free_netdev;
7527 dev->tx_queue_len = MVPP2_MAX_TXD;
7528 dev->watchdog_timeo = 5 * HZ;
7529 dev->netdev_ops = &mvpp2_netdev_ops;
7530 dev->ethtool_ops = &mvpp2_eth_tool_ops;
7532 port = netdev_priv(dev);
7534 port->ntxqs = ntxqs;
7535 port->nrxqs = nrxqs;
7537 port->has_tx_irqs = has_tx_irqs;
7539 err = mvpp2_queue_vectors_init(port, port_node);
7541 goto err_free_netdev;
7543 port->link_irq = of_irq_get_byname(port_node, "link");
7544 if (port->link_irq == -EPROBE_DEFER) {
7545 err = -EPROBE_DEFER;
7546 goto err_deinit_qvecs;
7548 if (port->link_irq <= 0)
7549 /* the link irq is optional */
7552 if (of_property_read_bool(port_node, "marvell,loopback"))
7553 port->flags |= MVPP2_F_LOOPBACK;
7556 if (priv->hw_version == MVPP21)
7557 port->first_rxq = port->id * port->nrxqs;
7559 port->first_rxq = port->id * priv->max_port_rxqs;
7561 port->phy_node = phy_node;
7562 port->phy_interface = phy_mode;
7563 port->comphy = comphy;
7565 if (priv->hw_version == MVPP21) {
7566 res = platform_get_resource(pdev, IORESOURCE_MEM, 2 + id);
7567 port->base = devm_ioremap_resource(&pdev->dev, res);
7568 if (IS_ERR(port->base)) {
7569 err = PTR_ERR(port->base);
7573 if (of_property_read_u32(port_node, "gop-port-id",
7576 dev_err(&pdev->dev, "missing gop-port-id value\n");
7577 goto err_deinit_qvecs;
7580 port->base = priv->iface_base + MVPP22_GMAC_BASE(port->gop_id);
7583 /* Alloc per-cpu stats */
7584 port->stats = netdev_alloc_pcpu_stats(struct mvpp2_pcpu_stats);
7590 dt_mac_addr = of_get_mac_address(port_node);
7591 if (dt_mac_addr && is_valid_ether_addr(dt_mac_addr)) {
7592 mac_from = "device tree";
7593 ether_addr_copy(dev->dev_addr, dt_mac_addr);
7595 if (priv->hw_version == MVPP21)
7596 mvpp21_get_mac_address(port, hw_mac_addr);
7597 if (is_valid_ether_addr(hw_mac_addr)) {
7598 mac_from = "hardware";
7599 ether_addr_copy(dev->dev_addr, hw_mac_addr);
7601 mac_from = "random";
7602 eth_hw_addr_random(dev);
7606 port->tx_ring_size = MVPP2_MAX_TXD;
7607 port->rx_ring_size = MVPP2_MAX_RXD;
7608 SET_NETDEV_DEV(dev, &pdev->dev);
7610 err = mvpp2_port_init(port);
7612 dev_err(&pdev->dev, "failed to init port %d\n", id);
7613 goto err_free_stats;
7616 mvpp2_port_periodic_xon_disable(port);
7618 if (priv->hw_version == MVPP21)
7619 mvpp2_port_fc_adv_enable(port);
7621 mvpp2_port_reset(port);
7623 port->pcpu = alloc_percpu(struct mvpp2_port_pcpu);
7626 goto err_free_txq_pcpu;
7629 if (!port->has_tx_irqs) {
7630 for_each_present_cpu(cpu) {
7631 port_pcpu = per_cpu_ptr(port->pcpu, cpu);
7633 hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
7634 HRTIMER_MODE_REL_PINNED);
7635 port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb;
7636 port_pcpu->timer_scheduled = false;
7638 tasklet_init(&port_pcpu->tx_done_tasklet,
7640 (unsigned long)dev);
7644 features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
7645 dev->features = features | NETIF_F_RXCSUM;
7646 dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO;
7647 dev->vlan_features |= features;
7649 /* MTU range: 68 - 9676 */
7650 dev->min_mtu = ETH_MIN_MTU;
7651 /* 9676 == 9700 - 20 and rounding to 8 */
7652 dev->max_mtu = 9676;
7654 err = register_netdev(dev);
7656 dev_err(&pdev->dev, "failed to register netdev\n");
7657 goto err_free_port_pcpu;
7659 netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr);
7661 priv->port_list[id] = port;
7665 free_percpu(port->pcpu);
7667 for (i = 0; i < port->ntxqs; i++)
7668 free_percpu(port->txqs[i]->pcpu);
7670 free_percpu(port->stats);
7673 irq_dispose_mapping(port->link_irq);
7675 mvpp2_queue_vectors_deinit(port);
7677 of_node_put(phy_node);
7682 /* Ports removal routine */
7683 static void mvpp2_port_remove(struct mvpp2_port *port)
7687 unregister_netdev(port->dev);
7688 of_node_put(port->phy_node);
7689 free_percpu(port->pcpu);
7690 free_percpu(port->stats);
7691 for (i = 0; i < port->ntxqs; i++)
7692 free_percpu(port->txqs[i]->pcpu);
7693 mvpp2_queue_vectors_deinit(port);
7695 irq_dispose_mapping(port->link_irq);
7696 free_netdev(port->dev);
7699 /* Initialize decoding windows */
7700 static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram,
7706 for (i = 0; i < 6; i++) {
7707 mvpp2_write(priv, MVPP2_WIN_BASE(i), 0);
7708 mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0);
7711 mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0);
7716 for (i = 0; i < dram->num_cs; i++) {
7717 const struct mbus_dram_window *cs = dram->cs + i;
7719 mvpp2_write(priv, MVPP2_WIN_BASE(i),
7720 (cs->base & 0xffff0000) | (cs->mbus_attr << 8) |
7721 dram->mbus_dram_target_id);
7723 mvpp2_write(priv, MVPP2_WIN_SIZE(i),
7724 (cs->size - 1) & 0xffff0000);
7726 win_enable |= (1 << i);
7729 mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable);
7732 /* Initialize Rx FIFO's */
7733 static void mvpp2_rx_fifo_init(struct mvpp2 *priv)
7737 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
7738 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
7739 MVPP2_RX_FIFO_PORT_DATA_SIZE);
7740 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
7741 MVPP2_RX_FIFO_PORT_ATTR_SIZE);
7744 mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
7745 MVPP2_RX_FIFO_PORT_MIN_PKT);
7746 mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
7749 static void mvpp2_axi_init(struct mvpp2 *priv)
7751 u32 val, rdval, wrval;
7753 mvpp2_write(priv, MVPP22_BM_ADDR_HIGH_RLS_REG, 0x0);
7755 /* AXI Bridge Configuration */
7757 rdval = MVPP22_AXI_CODE_CACHE_RD_CACHE
7758 << MVPP22_AXI_ATTR_CACHE_OFFS;
7759 rdval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
7760 << MVPP22_AXI_ATTR_DOMAIN_OFFS;
7762 wrval = MVPP22_AXI_CODE_CACHE_WR_CACHE
7763 << MVPP22_AXI_ATTR_CACHE_OFFS;
7764 wrval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
7765 << MVPP22_AXI_ATTR_DOMAIN_OFFS;
7768 mvpp2_write(priv, MVPP22_AXI_BM_WR_ATTR_REG, wrval);
7769 mvpp2_write(priv, MVPP22_AXI_BM_RD_ATTR_REG, rdval);
7772 mvpp2_write(priv, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG, rdval);
7773 mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG, wrval);
7774 mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG, rdval);
7775 mvpp2_write(priv, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, wrval);
7778 mvpp2_write(priv, MVPP22_AXI_TX_DATA_RD_ATTR_REG, rdval);
7779 mvpp2_write(priv, MVPP22_AXI_RX_DATA_WR_ATTR_REG, wrval);
7781 val = MVPP22_AXI_CODE_CACHE_NON_CACHE
7782 << MVPP22_AXI_CODE_CACHE_OFFS;
7783 val |= MVPP22_AXI_CODE_DOMAIN_SYSTEM
7784 << MVPP22_AXI_CODE_DOMAIN_OFFS;
7785 mvpp2_write(priv, MVPP22_AXI_RD_NORMAL_CODE_REG, val);
7786 mvpp2_write(priv, MVPP22_AXI_WR_NORMAL_CODE_REG, val);
7788 val = MVPP22_AXI_CODE_CACHE_RD_CACHE
7789 << MVPP22_AXI_CODE_CACHE_OFFS;
7790 val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
7791 << MVPP22_AXI_CODE_DOMAIN_OFFS;
7793 mvpp2_write(priv, MVPP22_AXI_RD_SNOOP_CODE_REG, val);
7795 val = MVPP22_AXI_CODE_CACHE_WR_CACHE
7796 << MVPP22_AXI_CODE_CACHE_OFFS;
7797 val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
7798 << MVPP22_AXI_CODE_DOMAIN_OFFS;
7800 mvpp2_write(priv, MVPP22_AXI_WR_SNOOP_CODE_REG, val);
7803 /* Initialize network controller common part HW */
7804 static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
7806 const struct mbus_dram_target_info *dram_target_info;
7810 /* MBUS windows configuration */
7811 dram_target_info = mv_mbus_dram_info();
7812 if (dram_target_info)
7813 mvpp2_conf_mbus_windows(dram_target_info, priv);
7815 if (priv->hw_version == MVPP22)
7816 mvpp2_axi_init(priv);
7818 /* Disable HW PHY polling */
7819 if (priv->hw_version == MVPP21) {
7820 val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
7821 val |= MVPP2_PHY_AN_STOP_SMI0_MASK;
7822 writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
7824 val = readl(priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
7825 val &= ~MVPP22_SMI_POLLING_EN;
7826 writel(val, priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
7829 /* Allocate and initialize aggregated TXQs */
7830 priv->aggr_txqs = devm_kcalloc(&pdev->dev, num_present_cpus(),
7831 sizeof(*priv->aggr_txqs),
7833 if (!priv->aggr_txqs)
7836 for_each_present_cpu(i) {
7837 priv->aggr_txqs[i].id = i;
7838 priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE;
7839 err = mvpp2_aggr_txq_init(pdev, &priv->aggr_txqs[i], i, priv);
7845 mvpp2_rx_fifo_init(priv);
7847 if (priv->hw_version == MVPP21)
7848 writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT,
7849 priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG);
7851 /* Allow cache snoop when transmiting packets */
7852 mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1);
7854 /* Buffer Manager initialization */
7855 err = mvpp2_bm_init(pdev, priv);
7859 /* Parser default initialization */
7860 err = mvpp2_prs_default_init(pdev, priv);
7864 /* Classifier default initialization */
7865 mvpp2_cls_init(priv);
7870 static int mvpp2_probe(struct platform_device *pdev)
7872 struct device_node *dn = pdev->dev.of_node;
7873 struct device_node *port_node;
7875 struct resource *res;
7880 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
7885 (unsigned long)of_device_get_match_data(&pdev->dev);
7887 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
7888 base = devm_ioremap_resource(&pdev->dev, res);
7890 return PTR_ERR(base);
7892 if (priv->hw_version == MVPP21) {
7893 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
7894 priv->lms_base = devm_ioremap_resource(&pdev->dev, res);
7895 if (IS_ERR(priv->lms_base))
7896 return PTR_ERR(priv->lms_base);
7898 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
7899 priv->iface_base = devm_ioremap_resource(&pdev->dev, res);
7900 if (IS_ERR(priv->iface_base))
7901 return PTR_ERR(priv->iface_base);
7903 priv->sysctrl_base =
7904 syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
7905 "marvell,system-controller");
7906 if (IS_ERR(priv->sysctrl_base))
7907 /* The system controller regmap is optional for dt
7908 * compatibility reasons. When not provided, the
7909 * configuration of the GoP relies on the
7910 * firmware/bootloader.
7912 priv->sysctrl_base = NULL;
7915 for (i = 0; i < MVPP2_MAX_THREADS; i++) {
7918 addr_space_sz = (priv->hw_version == MVPP21 ?
7919 MVPP21_ADDR_SPACE_SZ : MVPP22_ADDR_SPACE_SZ);
7920 priv->swth_base[i] = base + i * addr_space_sz;
7923 if (priv->hw_version == MVPP21)
7924 priv->max_port_rxqs = 8;
7926 priv->max_port_rxqs = 32;
7928 priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk");
7929 if (IS_ERR(priv->pp_clk))
7930 return PTR_ERR(priv->pp_clk);
7931 err = clk_prepare_enable(priv->pp_clk);
7935 priv->gop_clk = devm_clk_get(&pdev->dev, "gop_clk");
7936 if (IS_ERR(priv->gop_clk)) {
7937 err = PTR_ERR(priv->gop_clk);
7940 err = clk_prepare_enable(priv->gop_clk);
7944 if (priv->hw_version == MVPP22) {
7945 priv->mg_clk = devm_clk_get(&pdev->dev, "mg_clk");
7946 if (IS_ERR(priv->mg_clk)) {
7947 err = PTR_ERR(priv->mg_clk);
7951 err = clk_prepare_enable(priv->mg_clk);
7956 /* Get system's tclk rate */
7957 priv->tclk = clk_get_rate(priv->pp_clk);
7959 if (priv->hw_version == MVPP22) {
7960 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(40));
7963 /* Sadly, the BM pools all share the same register to
7964 * store the high 32 bits of their address. So they
7965 * must all have the same high 32 bits, which forces
7966 * us to restrict coherent memory to DMA_BIT_MASK(32).
7968 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
7973 /* Initialize network controller */
7974 err = mvpp2_init(pdev, priv);
7976 dev_err(&pdev->dev, "failed to initialize controller\n");
7980 port_count = of_get_available_child_count(dn);
7981 if (port_count == 0) {
7982 dev_err(&pdev->dev, "no ports enabled\n");
7987 priv->port_list = devm_kcalloc(&pdev->dev, port_count,
7988 sizeof(*priv->port_list),
7990 if (!priv->port_list) {
7995 /* Initialize ports */
7996 for_each_available_child_of_node(dn, port_node) {
7997 err = mvpp2_port_probe(pdev, port_node, priv);
8002 platform_set_drvdata(pdev, priv);
8006 if (priv->hw_version == MVPP22)
8007 clk_disable_unprepare(priv->mg_clk);
8009 clk_disable_unprepare(priv->gop_clk);
8011 clk_disable_unprepare(priv->pp_clk);
8015 static int mvpp2_remove(struct platform_device *pdev)
8017 struct mvpp2 *priv = platform_get_drvdata(pdev);
8018 struct device_node *dn = pdev->dev.of_node;
8019 struct device_node *port_node;
8022 for_each_available_child_of_node(dn, port_node) {
8023 if (priv->port_list[i])
8024 mvpp2_port_remove(priv->port_list[i]);
8028 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
8029 struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i];
8031 mvpp2_bm_pool_destroy(pdev, priv, bm_pool);
8034 for_each_present_cpu(i) {
8035 struct mvpp2_tx_queue *aggr_txq = &priv->aggr_txqs[i];
8037 dma_free_coherent(&pdev->dev,
8038 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
8040 aggr_txq->descs_dma);
8043 clk_disable_unprepare(priv->mg_clk);
8044 clk_disable_unprepare(priv->pp_clk);
8045 clk_disable_unprepare(priv->gop_clk);
8050 static const struct of_device_id mvpp2_match[] = {
8052 .compatible = "marvell,armada-375-pp2",
8053 .data = (void *)MVPP21,
8056 .compatible = "marvell,armada-7k-pp22",
8057 .data = (void *)MVPP22,
8061 MODULE_DEVICE_TABLE(of, mvpp2_match);
8063 static struct platform_driver mvpp2_driver = {
8064 .probe = mvpp2_probe,
8065 .remove = mvpp2_remove,
8067 .name = MVPP2_DRIVER_NAME,
8068 .of_match_table = mvpp2_match,
8072 module_platform_driver(mvpp2_driver);
8074 MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com");
8075 MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>");
8076 MODULE_LICENSE("GPL v2");