2 * IOMMU API for ARM architected SMMUv3 implementations.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 * Copyright (C) 2015 ARM Limited
18 * Author: Will Deacon <will.deacon@arm.com>
20 * This driver is powered by bad coffee and bombay mix.
23 #include <linux/acpi.h>
24 #include <linux/acpi_iort.h>
25 #include <linux/delay.h>
26 #include <linux/dma-iommu.h>
27 #include <linux/err.h>
28 #include <linux/interrupt.h>
29 #include <linux/iommu.h>
30 #include <linux/iopoll.h>
31 #include <linux/module.h>
32 #include <linux/msi.h>
34 #include <linux/of_address.h>
35 #include <linux/of_iommu.h>
36 #include <linux/of_platform.h>
37 #include <linux/pci.h>
38 #include <linux/platform_device.h>
40 #include <linux/amba/bus.h>
42 #include "io-pgtable.h"
45 #define ARM_SMMU_IDR0 0x0
46 #define IDR0_ST_LVL_SHIFT 27
47 #define IDR0_ST_LVL_MASK 0x3
48 #define IDR0_ST_LVL_2LVL (1 << IDR0_ST_LVL_SHIFT)
49 #define IDR0_STALL_MODEL_SHIFT 24
50 #define IDR0_STALL_MODEL_MASK 0x3
51 #define IDR0_STALL_MODEL_STALL (0 << IDR0_STALL_MODEL_SHIFT)
52 #define IDR0_STALL_MODEL_FORCE (2 << IDR0_STALL_MODEL_SHIFT)
53 #define IDR0_TTENDIAN_SHIFT 21
54 #define IDR0_TTENDIAN_MASK 0x3
55 #define IDR0_TTENDIAN_LE (2 << IDR0_TTENDIAN_SHIFT)
56 #define IDR0_TTENDIAN_BE (3 << IDR0_TTENDIAN_SHIFT)
57 #define IDR0_TTENDIAN_MIXED (0 << IDR0_TTENDIAN_SHIFT)
58 #define IDR0_CD2L (1 << 19)
59 #define IDR0_VMID16 (1 << 18)
60 #define IDR0_PRI (1 << 16)
61 #define IDR0_SEV (1 << 14)
62 #define IDR0_MSI (1 << 13)
63 #define IDR0_ASID16 (1 << 12)
64 #define IDR0_ATS (1 << 10)
65 #define IDR0_HYP (1 << 9)
66 #define IDR0_COHACC (1 << 4)
67 #define IDR0_TTF_SHIFT 2
68 #define IDR0_TTF_MASK 0x3
69 #define IDR0_TTF_AARCH64 (2 << IDR0_TTF_SHIFT)
70 #define IDR0_TTF_AARCH32_64 (3 << IDR0_TTF_SHIFT)
71 #define IDR0_S1P (1 << 1)
72 #define IDR0_S2P (1 << 0)
74 #define ARM_SMMU_IDR1 0x4
75 #define IDR1_TABLES_PRESET (1 << 30)
76 #define IDR1_QUEUES_PRESET (1 << 29)
77 #define IDR1_REL (1 << 28)
78 #define IDR1_CMDQ_SHIFT 21
79 #define IDR1_CMDQ_MASK 0x1f
80 #define IDR1_EVTQ_SHIFT 16
81 #define IDR1_EVTQ_MASK 0x1f
82 #define IDR1_PRIQ_SHIFT 11
83 #define IDR1_PRIQ_MASK 0x1f
84 #define IDR1_SSID_SHIFT 6
85 #define IDR1_SSID_MASK 0x1f
86 #define IDR1_SID_SHIFT 0
87 #define IDR1_SID_MASK 0x3f
89 #define ARM_SMMU_IDR5 0x14
90 #define IDR5_STALL_MAX_SHIFT 16
91 #define IDR5_STALL_MAX_MASK 0xffff
92 #define IDR5_GRAN64K (1 << 6)
93 #define IDR5_GRAN16K (1 << 5)
94 #define IDR5_GRAN4K (1 << 4)
95 #define IDR5_OAS_SHIFT 0
96 #define IDR5_OAS_MASK 0x7
97 #define IDR5_OAS_32_BIT (0 << IDR5_OAS_SHIFT)
98 #define IDR5_OAS_36_BIT (1 << IDR5_OAS_SHIFT)
99 #define IDR5_OAS_40_BIT (2 << IDR5_OAS_SHIFT)
100 #define IDR5_OAS_42_BIT (3 << IDR5_OAS_SHIFT)
101 #define IDR5_OAS_44_BIT (4 << IDR5_OAS_SHIFT)
102 #define IDR5_OAS_48_BIT (5 << IDR5_OAS_SHIFT)
104 #define ARM_SMMU_CR0 0x20
105 #define CR0_CMDQEN (1 << 3)
106 #define CR0_EVTQEN (1 << 2)
107 #define CR0_PRIQEN (1 << 1)
108 #define CR0_SMMUEN (1 << 0)
110 #define ARM_SMMU_CR0ACK 0x24
112 #define ARM_SMMU_CR1 0x28
116 #define CR1_CACHE_NC 0
117 #define CR1_CACHE_WB 1
118 #define CR1_CACHE_WT 2
119 #define CR1_TABLE_SH_SHIFT 10
120 #define CR1_TABLE_OC_SHIFT 8
121 #define CR1_TABLE_IC_SHIFT 6
122 #define CR1_QUEUE_SH_SHIFT 4
123 #define CR1_QUEUE_OC_SHIFT 2
124 #define CR1_QUEUE_IC_SHIFT 0
126 #define ARM_SMMU_CR2 0x2c
127 #define CR2_PTM (1 << 2)
128 #define CR2_RECINVSID (1 << 1)
129 #define CR2_E2H (1 << 0)
131 #define ARM_SMMU_GBPA 0x44
132 #define GBPA_ABORT (1 << 20)
133 #define GBPA_UPDATE (1 << 31)
135 #define ARM_SMMU_IRQ_CTRL 0x50
136 #define IRQ_CTRL_EVTQ_IRQEN (1 << 2)
137 #define IRQ_CTRL_PRIQ_IRQEN (1 << 1)
138 #define IRQ_CTRL_GERROR_IRQEN (1 << 0)
140 #define ARM_SMMU_IRQ_CTRLACK 0x54
142 #define ARM_SMMU_GERROR 0x60
143 #define GERROR_SFM_ERR (1 << 8)
144 #define GERROR_MSI_GERROR_ABT_ERR (1 << 7)
145 #define GERROR_MSI_PRIQ_ABT_ERR (1 << 6)
146 #define GERROR_MSI_EVTQ_ABT_ERR (1 << 5)
147 #define GERROR_MSI_CMDQ_ABT_ERR (1 << 4)
148 #define GERROR_PRIQ_ABT_ERR (1 << 3)
149 #define GERROR_EVTQ_ABT_ERR (1 << 2)
150 #define GERROR_CMDQ_ERR (1 << 0)
151 #define GERROR_ERR_MASK 0xfd
153 #define ARM_SMMU_GERRORN 0x64
155 #define ARM_SMMU_GERROR_IRQ_CFG0 0x68
156 #define ARM_SMMU_GERROR_IRQ_CFG1 0x70
157 #define ARM_SMMU_GERROR_IRQ_CFG2 0x74
159 #define ARM_SMMU_STRTAB_BASE 0x80
160 #define STRTAB_BASE_RA (1UL << 62)
161 #define STRTAB_BASE_ADDR_SHIFT 6
162 #define STRTAB_BASE_ADDR_MASK 0x3ffffffffffUL
164 #define ARM_SMMU_STRTAB_BASE_CFG 0x88
165 #define STRTAB_BASE_CFG_LOG2SIZE_SHIFT 0
166 #define STRTAB_BASE_CFG_LOG2SIZE_MASK 0x3f
167 #define STRTAB_BASE_CFG_SPLIT_SHIFT 6
168 #define STRTAB_BASE_CFG_SPLIT_MASK 0x1f
169 #define STRTAB_BASE_CFG_FMT_SHIFT 16
170 #define STRTAB_BASE_CFG_FMT_MASK 0x3
171 #define STRTAB_BASE_CFG_FMT_LINEAR (0 << STRTAB_BASE_CFG_FMT_SHIFT)
172 #define STRTAB_BASE_CFG_FMT_2LVL (1 << STRTAB_BASE_CFG_FMT_SHIFT)
174 #define ARM_SMMU_CMDQ_BASE 0x90
175 #define ARM_SMMU_CMDQ_PROD 0x98
176 #define ARM_SMMU_CMDQ_CONS 0x9c
178 #define ARM_SMMU_EVTQ_BASE 0xa0
179 #define ARM_SMMU_EVTQ_PROD 0x100a8
180 #define ARM_SMMU_EVTQ_CONS 0x100ac
181 #define ARM_SMMU_EVTQ_IRQ_CFG0 0xb0
182 #define ARM_SMMU_EVTQ_IRQ_CFG1 0xb8
183 #define ARM_SMMU_EVTQ_IRQ_CFG2 0xbc
185 #define ARM_SMMU_PRIQ_BASE 0xc0
186 #define ARM_SMMU_PRIQ_PROD 0x100c8
187 #define ARM_SMMU_PRIQ_CONS 0x100cc
188 #define ARM_SMMU_PRIQ_IRQ_CFG0 0xd0
189 #define ARM_SMMU_PRIQ_IRQ_CFG1 0xd8
190 #define ARM_SMMU_PRIQ_IRQ_CFG2 0xdc
192 /* Common MSI config fields */
193 #define MSI_CFG0_ADDR_SHIFT 2
194 #define MSI_CFG0_ADDR_MASK 0x3fffffffffffUL
195 #define MSI_CFG2_SH_SHIFT 4
196 #define MSI_CFG2_SH_NSH (0UL << MSI_CFG2_SH_SHIFT)
197 #define MSI_CFG2_SH_OSH (2UL << MSI_CFG2_SH_SHIFT)
198 #define MSI_CFG2_SH_ISH (3UL << MSI_CFG2_SH_SHIFT)
199 #define MSI_CFG2_MEMATTR_SHIFT 0
200 #define MSI_CFG2_MEMATTR_DEVICE_nGnRE (0x1 << MSI_CFG2_MEMATTR_SHIFT)
202 #define Q_IDX(q, p) ((p) & ((1 << (q)->max_n_shift) - 1))
203 #define Q_WRP(q, p) ((p) & (1 << (q)->max_n_shift))
204 #define Q_OVERFLOW_FLAG (1 << 31)
205 #define Q_OVF(q, p) ((p) & Q_OVERFLOW_FLAG)
206 #define Q_ENT(q, p) ((q)->base + \
207 Q_IDX(q, p) * (q)->ent_dwords)
209 #define Q_BASE_RWA (1UL << 62)
210 #define Q_BASE_ADDR_SHIFT 5
211 #define Q_BASE_ADDR_MASK 0xfffffffffffUL
212 #define Q_BASE_LOG2SIZE_SHIFT 0
213 #define Q_BASE_LOG2SIZE_MASK 0x1fUL
218 * Linear: Enough to cover 1 << IDR1.SIDSIZE entries
219 * 2lvl: 128k L1 entries,
220 * 256 lazy entries per table (each table covers a PCI bus)
222 #define STRTAB_L1_SZ_SHIFT 20
223 #define STRTAB_SPLIT 8
225 #define STRTAB_L1_DESC_DWORDS 1
226 #define STRTAB_L1_DESC_SPAN_SHIFT 0
227 #define STRTAB_L1_DESC_SPAN_MASK 0x1fUL
228 #define STRTAB_L1_DESC_L2PTR_SHIFT 6
229 #define STRTAB_L1_DESC_L2PTR_MASK 0x3ffffffffffUL
231 #define STRTAB_STE_DWORDS 8
232 #define STRTAB_STE_0_V (1UL << 0)
233 #define STRTAB_STE_0_CFG_SHIFT 1
234 #define STRTAB_STE_0_CFG_MASK 0x7UL
235 #define STRTAB_STE_0_CFG_ABORT (0UL << STRTAB_STE_0_CFG_SHIFT)
236 #define STRTAB_STE_0_CFG_BYPASS (4UL << STRTAB_STE_0_CFG_SHIFT)
237 #define STRTAB_STE_0_CFG_S1_TRANS (5UL << STRTAB_STE_0_CFG_SHIFT)
238 #define STRTAB_STE_0_CFG_S2_TRANS (6UL << STRTAB_STE_0_CFG_SHIFT)
240 #define STRTAB_STE_0_S1FMT_SHIFT 4
241 #define STRTAB_STE_0_S1FMT_LINEAR (0UL << STRTAB_STE_0_S1FMT_SHIFT)
242 #define STRTAB_STE_0_S1CTXPTR_SHIFT 6
243 #define STRTAB_STE_0_S1CTXPTR_MASK 0x3ffffffffffUL
244 #define STRTAB_STE_0_S1CDMAX_SHIFT 59
245 #define STRTAB_STE_0_S1CDMAX_MASK 0x1fUL
247 #define STRTAB_STE_1_S1C_CACHE_NC 0UL
248 #define STRTAB_STE_1_S1C_CACHE_WBRA 1UL
249 #define STRTAB_STE_1_S1C_CACHE_WT 2UL
250 #define STRTAB_STE_1_S1C_CACHE_WB 3UL
251 #define STRTAB_STE_1_S1C_SH_NSH 0UL
252 #define STRTAB_STE_1_S1C_SH_OSH 2UL
253 #define STRTAB_STE_1_S1C_SH_ISH 3UL
254 #define STRTAB_STE_1_S1CIR_SHIFT 2
255 #define STRTAB_STE_1_S1COR_SHIFT 4
256 #define STRTAB_STE_1_S1CSH_SHIFT 6
258 #define STRTAB_STE_1_S1STALLD (1UL << 27)
260 #define STRTAB_STE_1_EATS_ABT 0UL
261 #define STRTAB_STE_1_EATS_TRANS 1UL
262 #define STRTAB_STE_1_EATS_S1CHK 2UL
263 #define STRTAB_STE_1_EATS_SHIFT 28
265 #define STRTAB_STE_1_STRW_NSEL1 0UL
266 #define STRTAB_STE_1_STRW_EL2 2UL
267 #define STRTAB_STE_1_STRW_SHIFT 30
269 #define STRTAB_STE_1_SHCFG_INCOMING 1UL
270 #define STRTAB_STE_1_SHCFG_SHIFT 44
272 #define STRTAB_STE_2_S2VMID_SHIFT 0
273 #define STRTAB_STE_2_S2VMID_MASK 0xffffUL
274 #define STRTAB_STE_2_VTCR_SHIFT 32
275 #define STRTAB_STE_2_VTCR_MASK 0x7ffffUL
276 #define STRTAB_STE_2_S2AA64 (1UL << 51)
277 #define STRTAB_STE_2_S2ENDI (1UL << 52)
278 #define STRTAB_STE_2_S2PTW (1UL << 54)
279 #define STRTAB_STE_2_S2R (1UL << 58)
281 #define STRTAB_STE_3_S2TTB_SHIFT 4
282 #define STRTAB_STE_3_S2TTB_MASK 0xfffffffffffUL
284 /* Context descriptor (stage-1 only) */
285 #define CTXDESC_CD_DWORDS 8
286 #define CTXDESC_CD_0_TCR_T0SZ_SHIFT 0
287 #define ARM64_TCR_T0SZ_SHIFT 0
288 #define ARM64_TCR_T0SZ_MASK 0x1fUL
289 #define CTXDESC_CD_0_TCR_TG0_SHIFT 6
290 #define ARM64_TCR_TG0_SHIFT 14
291 #define ARM64_TCR_TG0_MASK 0x3UL
292 #define CTXDESC_CD_0_TCR_IRGN0_SHIFT 8
293 #define ARM64_TCR_IRGN0_SHIFT 8
294 #define ARM64_TCR_IRGN0_MASK 0x3UL
295 #define CTXDESC_CD_0_TCR_ORGN0_SHIFT 10
296 #define ARM64_TCR_ORGN0_SHIFT 10
297 #define ARM64_TCR_ORGN0_MASK 0x3UL
298 #define CTXDESC_CD_0_TCR_SH0_SHIFT 12
299 #define ARM64_TCR_SH0_SHIFT 12
300 #define ARM64_TCR_SH0_MASK 0x3UL
301 #define CTXDESC_CD_0_TCR_EPD0_SHIFT 14
302 #define ARM64_TCR_EPD0_SHIFT 7
303 #define ARM64_TCR_EPD0_MASK 0x1UL
304 #define CTXDESC_CD_0_TCR_EPD1_SHIFT 30
305 #define ARM64_TCR_EPD1_SHIFT 23
306 #define ARM64_TCR_EPD1_MASK 0x1UL
308 #define CTXDESC_CD_0_ENDI (1UL << 15)
309 #define CTXDESC_CD_0_V (1UL << 31)
311 #define CTXDESC_CD_0_TCR_IPS_SHIFT 32
312 #define ARM64_TCR_IPS_SHIFT 32
313 #define ARM64_TCR_IPS_MASK 0x7UL
314 #define CTXDESC_CD_0_TCR_TBI0_SHIFT 38
315 #define ARM64_TCR_TBI0_SHIFT 37
316 #define ARM64_TCR_TBI0_MASK 0x1UL
318 #define CTXDESC_CD_0_AA64 (1UL << 41)
319 #define CTXDESC_CD_0_R (1UL << 45)
320 #define CTXDESC_CD_0_A (1UL << 46)
321 #define CTXDESC_CD_0_ASET_SHIFT 47
322 #define CTXDESC_CD_0_ASET_SHARED (0UL << CTXDESC_CD_0_ASET_SHIFT)
323 #define CTXDESC_CD_0_ASET_PRIVATE (1UL << CTXDESC_CD_0_ASET_SHIFT)
324 #define CTXDESC_CD_0_ASID_SHIFT 48
325 #define CTXDESC_CD_0_ASID_MASK 0xffffUL
327 #define CTXDESC_CD_1_TTB0_SHIFT 4
328 #define CTXDESC_CD_1_TTB0_MASK 0xfffffffffffUL
330 #define CTXDESC_CD_3_MAIR_SHIFT 0
332 /* Convert between AArch64 (CPU) TCR format and SMMU CD format */
333 #define ARM_SMMU_TCR2CD(tcr, fld) \
334 (((tcr) >> ARM64_TCR_##fld##_SHIFT & ARM64_TCR_##fld##_MASK) \
335 << CTXDESC_CD_0_TCR_##fld##_SHIFT)
338 #define CMDQ_ENT_DWORDS 2
339 #define CMDQ_MAX_SZ_SHIFT 8
341 #define CMDQ_ERR_SHIFT 24
342 #define CMDQ_ERR_MASK 0x7f
343 #define CMDQ_ERR_CERROR_NONE_IDX 0
344 #define CMDQ_ERR_CERROR_ILL_IDX 1
345 #define CMDQ_ERR_CERROR_ABT_IDX 2
347 #define CMDQ_0_OP_SHIFT 0
348 #define CMDQ_0_OP_MASK 0xffUL
349 #define CMDQ_0_SSV (1UL << 11)
351 #define CMDQ_PREFETCH_0_SID_SHIFT 32
352 #define CMDQ_PREFETCH_1_SIZE_SHIFT 0
353 #define CMDQ_PREFETCH_1_ADDR_MASK ~0xfffUL
355 #define CMDQ_CFGI_0_SID_SHIFT 32
356 #define CMDQ_CFGI_0_SID_MASK 0xffffffffUL
357 #define CMDQ_CFGI_1_LEAF (1UL << 0)
358 #define CMDQ_CFGI_1_RANGE_SHIFT 0
359 #define CMDQ_CFGI_1_RANGE_MASK 0x1fUL
361 #define CMDQ_TLBI_0_VMID_SHIFT 32
362 #define CMDQ_TLBI_0_ASID_SHIFT 48
363 #define CMDQ_TLBI_1_LEAF (1UL << 0)
364 #define CMDQ_TLBI_1_VA_MASK ~0xfffUL
365 #define CMDQ_TLBI_1_IPA_MASK 0xfffffffff000UL
367 #define CMDQ_PRI_0_SSID_SHIFT 12
368 #define CMDQ_PRI_0_SSID_MASK 0xfffffUL
369 #define CMDQ_PRI_0_SID_SHIFT 32
370 #define CMDQ_PRI_0_SID_MASK 0xffffffffUL
371 #define CMDQ_PRI_1_GRPID_SHIFT 0
372 #define CMDQ_PRI_1_GRPID_MASK 0x1ffUL
373 #define CMDQ_PRI_1_RESP_SHIFT 12
374 #define CMDQ_PRI_1_RESP_DENY (0UL << CMDQ_PRI_1_RESP_SHIFT)
375 #define CMDQ_PRI_1_RESP_FAIL (1UL << CMDQ_PRI_1_RESP_SHIFT)
376 #define CMDQ_PRI_1_RESP_SUCC (2UL << CMDQ_PRI_1_RESP_SHIFT)
378 #define CMDQ_SYNC_0_CS_SHIFT 12
379 #define CMDQ_SYNC_0_CS_NONE (0UL << CMDQ_SYNC_0_CS_SHIFT)
380 #define CMDQ_SYNC_0_CS_SEV (2UL << CMDQ_SYNC_0_CS_SHIFT)
383 #define EVTQ_ENT_DWORDS 4
384 #define EVTQ_MAX_SZ_SHIFT 7
386 #define EVTQ_0_ID_SHIFT 0
387 #define EVTQ_0_ID_MASK 0xffUL
390 #define PRIQ_ENT_DWORDS 2
391 #define PRIQ_MAX_SZ_SHIFT 8
393 #define PRIQ_0_SID_SHIFT 0
394 #define PRIQ_0_SID_MASK 0xffffffffUL
395 #define PRIQ_0_SSID_SHIFT 32
396 #define PRIQ_0_SSID_MASK 0xfffffUL
397 #define PRIQ_0_PERM_PRIV (1UL << 58)
398 #define PRIQ_0_PERM_EXEC (1UL << 59)
399 #define PRIQ_0_PERM_READ (1UL << 60)
400 #define PRIQ_0_PERM_WRITE (1UL << 61)
401 #define PRIQ_0_PRG_LAST (1UL << 62)
402 #define PRIQ_0_SSID_V (1UL << 63)
404 #define PRIQ_1_PRG_IDX_SHIFT 0
405 #define PRIQ_1_PRG_IDX_MASK 0x1ffUL
406 #define PRIQ_1_ADDR_SHIFT 12
407 #define PRIQ_1_ADDR_MASK 0xfffffffffffffUL
409 /* High-level queue structures */
410 #define ARM_SMMU_POLL_TIMEOUT_US 100
412 static bool disable_bypass;
413 module_param_named(disable_bypass, disable_bypass, bool, S_IRUGO);
414 MODULE_PARM_DESC(disable_bypass,
415 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
423 enum arm_smmu_msi_index {
430 static phys_addr_t arm_smmu_msi_cfg[ARM_SMMU_MAX_MSIS][3] = {
432 ARM_SMMU_EVTQ_IRQ_CFG0,
433 ARM_SMMU_EVTQ_IRQ_CFG1,
434 ARM_SMMU_EVTQ_IRQ_CFG2,
436 [GERROR_MSI_INDEX] = {
437 ARM_SMMU_GERROR_IRQ_CFG0,
438 ARM_SMMU_GERROR_IRQ_CFG1,
439 ARM_SMMU_GERROR_IRQ_CFG2,
442 ARM_SMMU_PRIQ_IRQ_CFG0,
443 ARM_SMMU_PRIQ_IRQ_CFG1,
444 ARM_SMMU_PRIQ_IRQ_CFG2,
448 struct arm_smmu_cmdq_ent {
451 bool substream_valid;
453 /* Command-specific fields */
455 #define CMDQ_OP_PREFETCH_CFG 0x1
462 #define CMDQ_OP_CFGI_STE 0x3
463 #define CMDQ_OP_CFGI_ALL 0x4
472 #define CMDQ_OP_TLBI_NH_ASID 0x11
473 #define CMDQ_OP_TLBI_NH_VA 0x12
474 #define CMDQ_OP_TLBI_EL2_ALL 0x20
475 #define CMDQ_OP_TLBI_S12_VMALL 0x28
476 #define CMDQ_OP_TLBI_S2_IPA 0x2a
477 #define CMDQ_OP_TLBI_NSNH_ALL 0x30
485 #define CMDQ_OP_PRI_RESP 0x41
493 #define CMDQ_OP_CMD_SYNC 0x46
497 struct arm_smmu_queue {
498 int irq; /* Wired interrupt */
509 u32 __iomem *prod_reg;
510 u32 __iomem *cons_reg;
513 struct arm_smmu_cmdq {
514 struct arm_smmu_queue q;
518 struct arm_smmu_evtq {
519 struct arm_smmu_queue q;
523 struct arm_smmu_priq {
524 struct arm_smmu_queue q;
527 /* High-level stream table and context descriptor structures */
528 struct arm_smmu_strtab_l1_desc {
532 dma_addr_t l2ptr_dma;
535 struct arm_smmu_s1_cfg {
537 dma_addr_t cdptr_dma;
539 struct arm_smmu_ctx_desc {
547 struct arm_smmu_s2_cfg {
553 struct arm_smmu_strtab_ent {
556 bool bypass; /* Overrides s1/s2 config */
557 struct arm_smmu_s1_cfg *s1_cfg;
558 struct arm_smmu_s2_cfg *s2_cfg;
561 struct arm_smmu_strtab_cfg {
563 dma_addr_t strtab_dma;
564 struct arm_smmu_strtab_l1_desc *l1_desc;
565 unsigned int num_l1_ents;
571 /* An SMMUv3 instance */
572 struct arm_smmu_device {
576 #define ARM_SMMU_FEAT_2_LVL_STRTAB (1 << 0)
577 #define ARM_SMMU_FEAT_2_LVL_CDTAB (1 << 1)
578 #define ARM_SMMU_FEAT_TT_LE (1 << 2)
579 #define ARM_SMMU_FEAT_TT_BE (1 << 3)
580 #define ARM_SMMU_FEAT_PRI (1 << 4)
581 #define ARM_SMMU_FEAT_ATS (1 << 5)
582 #define ARM_SMMU_FEAT_SEV (1 << 6)
583 #define ARM_SMMU_FEAT_MSI (1 << 7)
584 #define ARM_SMMU_FEAT_COHERENCY (1 << 8)
585 #define ARM_SMMU_FEAT_TRANS_S1 (1 << 9)
586 #define ARM_SMMU_FEAT_TRANS_S2 (1 << 10)
587 #define ARM_SMMU_FEAT_STALLS (1 << 11)
588 #define ARM_SMMU_FEAT_HYP (1 << 12)
591 #define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0)
594 struct arm_smmu_cmdq cmdq;
595 struct arm_smmu_evtq evtq;
596 struct arm_smmu_priq priq;
600 unsigned long ias; /* IPA */
601 unsigned long oas; /* PA */
602 unsigned long pgsize_bitmap;
604 #define ARM_SMMU_MAX_ASIDS (1 << 16)
605 unsigned int asid_bits;
606 DECLARE_BITMAP(asid_map, ARM_SMMU_MAX_ASIDS);
608 #define ARM_SMMU_MAX_VMIDS (1 << 16)
609 unsigned int vmid_bits;
610 DECLARE_BITMAP(vmid_map, ARM_SMMU_MAX_VMIDS);
612 unsigned int ssid_bits;
613 unsigned int sid_bits;
615 struct arm_smmu_strtab_cfg strtab_cfg;
618 /* SMMU private data for each master */
619 struct arm_smmu_master_data {
620 struct arm_smmu_device *smmu;
621 struct arm_smmu_strtab_ent ste;
624 /* SMMU private data for an IOMMU domain */
625 enum arm_smmu_domain_stage {
626 ARM_SMMU_DOMAIN_S1 = 0,
628 ARM_SMMU_DOMAIN_NESTED,
631 struct arm_smmu_domain {
632 struct arm_smmu_device *smmu;
633 struct mutex init_mutex; /* Protects smmu pointer */
635 struct io_pgtable_ops *pgtbl_ops;
636 spinlock_t pgtbl_lock;
638 enum arm_smmu_domain_stage stage;
640 struct arm_smmu_s1_cfg s1_cfg;
641 struct arm_smmu_s2_cfg s2_cfg;
644 struct iommu_domain domain;
647 struct arm_smmu_option_prop {
652 static struct arm_smmu_option_prop arm_smmu_options[] = {
653 { ARM_SMMU_OPT_SKIP_PREFETCH, "hisilicon,broken-prefetch-cmd" },
657 static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
659 return container_of(dom, struct arm_smmu_domain, domain);
662 static void parse_driver_options(struct arm_smmu_device *smmu)
667 if (of_property_read_bool(smmu->dev->of_node,
668 arm_smmu_options[i].prop)) {
669 smmu->options |= arm_smmu_options[i].opt;
670 dev_notice(smmu->dev, "option %s\n",
671 arm_smmu_options[i].prop);
673 } while (arm_smmu_options[++i].opt);
676 /* Low-level queue manipulation functions */
677 static bool queue_full(struct arm_smmu_queue *q)
679 return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) &&
680 Q_WRP(q, q->prod) != Q_WRP(q, q->cons);
683 static bool queue_empty(struct arm_smmu_queue *q)
685 return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) &&
686 Q_WRP(q, q->prod) == Q_WRP(q, q->cons);
689 static void queue_sync_cons(struct arm_smmu_queue *q)
691 q->cons = readl_relaxed(q->cons_reg);
694 static void queue_inc_cons(struct arm_smmu_queue *q)
696 u32 cons = (Q_WRP(q, q->cons) | Q_IDX(q, q->cons)) + 1;
698 q->cons = Q_OVF(q, q->cons) | Q_WRP(q, cons) | Q_IDX(q, cons);
699 writel(q->cons, q->cons_reg);
702 static int queue_sync_prod(struct arm_smmu_queue *q)
705 u32 prod = readl_relaxed(q->prod_reg);
707 if (Q_OVF(q, prod) != Q_OVF(q, q->prod))
714 static void queue_inc_prod(struct arm_smmu_queue *q)
716 u32 prod = (Q_WRP(q, q->prod) | Q_IDX(q, q->prod)) + 1;
718 q->prod = Q_OVF(q, q->prod) | Q_WRP(q, prod) | Q_IDX(q, prod);
719 writel(q->prod, q->prod_reg);
723 * Wait for the SMMU to consume items. If drain is true, wait until the queue
724 * is empty. Otherwise, wait until there is at least one free slot.
726 static int queue_poll_cons(struct arm_smmu_queue *q, bool drain, bool wfe)
728 ktime_t timeout = ktime_add_us(ktime_get(), ARM_SMMU_POLL_TIMEOUT_US);
730 while (queue_sync_cons(q), (drain ? !queue_empty(q) : queue_full(q))) {
731 if (ktime_compare(ktime_get(), timeout) > 0)
745 static void queue_write(__le64 *dst, u64 *src, size_t n_dwords)
749 for (i = 0; i < n_dwords; ++i)
750 *dst++ = cpu_to_le64(*src++);
753 static int queue_insert_raw(struct arm_smmu_queue *q, u64 *ent)
758 queue_write(Q_ENT(q, q->prod), ent, q->ent_dwords);
763 static void queue_read(__le64 *dst, u64 *src, size_t n_dwords)
767 for (i = 0; i < n_dwords; ++i)
768 *dst++ = le64_to_cpu(*src++);
771 static int queue_remove_raw(struct arm_smmu_queue *q, u64 *ent)
776 queue_read(ent, Q_ENT(q, q->cons), q->ent_dwords);
781 /* High-level queue accessors */
782 static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent)
784 memset(cmd, 0, CMDQ_ENT_DWORDS << 3);
785 cmd[0] |= (ent->opcode & CMDQ_0_OP_MASK) << CMDQ_0_OP_SHIFT;
787 switch (ent->opcode) {
788 case CMDQ_OP_TLBI_EL2_ALL:
789 case CMDQ_OP_TLBI_NSNH_ALL:
791 case CMDQ_OP_PREFETCH_CFG:
792 cmd[0] |= (u64)ent->prefetch.sid << CMDQ_PREFETCH_0_SID_SHIFT;
793 cmd[1] |= ent->prefetch.size << CMDQ_PREFETCH_1_SIZE_SHIFT;
794 cmd[1] |= ent->prefetch.addr & CMDQ_PREFETCH_1_ADDR_MASK;
796 case CMDQ_OP_CFGI_STE:
797 cmd[0] |= (u64)ent->cfgi.sid << CMDQ_CFGI_0_SID_SHIFT;
798 cmd[1] |= ent->cfgi.leaf ? CMDQ_CFGI_1_LEAF : 0;
800 case CMDQ_OP_CFGI_ALL:
801 /* Cover the entire SID range */
802 cmd[1] |= CMDQ_CFGI_1_RANGE_MASK << CMDQ_CFGI_1_RANGE_SHIFT;
804 case CMDQ_OP_TLBI_NH_VA:
805 cmd[0] |= (u64)ent->tlbi.asid << CMDQ_TLBI_0_ASID_SHIFT;
806 cmd[1] |= ent->tlbi.leaf ? CMDQ_TLBI_1_LEAF : 0;
807 cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_VA_MASK;
809 case CMDQ_OP_TLBI_S2_IPA:
810 cmd[0] |= (u64)ent->tlbi.vmid << CMDQ_TLBI_0_VMID_SHIFT;
811 cmd[1] |= ent->tlbi.leaf ? CMDQ_TLBI_1_LEAF : 0;
812 cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_IPA_MASK;
814 case CMDQ_OP_TLBI_NH_ASID:
815 cmd[0] |= (u64)ent->tlbi.asid << CMDQ_TLBI_0_ASID_SHIFT;
817 case CMDQ_OP_TLBI_S12_VMALL:
818 cmd[0] |= (u64)ent->tlbi.vmid << CMDQ_TLBI_0_VMID_SHIFT;
820 case CMDQ_OP_PRI_RESP:
821 cmd[0] |= ent->substream_valid ? CMDQ_0_SSV : 0;
822 cmd[0] |= ent->pri.ssid << CMDQ_PRI_0_SSID_SHIFT;
823 cmd[0] |= (u64)ent->pri.sid << CMDQ_PRI_0_SID_SHIFT;
824 cmd[1] |= ent->pri.grpid << CMDQ_PRI_1_GRPID_SHIFT;
825 switch (ent->pri.resp) {
827 cmd[1] |= CMDQ_PRI_1_RESP_DENY;
830 cmd[1] |= CMDQ_PRI_1_RESP_FAIL;
833 cmd[1] |= CMDQ_PRI_1_RESP_SUCC;
839 case CMDQ_OP_CMD_SYNC:
840 cmd[0] |= CMDQ_SYNC_0_CS_SEV;
849 static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu)
851 static const char *cerror_str[] = {
852 [CMDQ_ERR_CERROR_NONE_IDX] = "No error",
853 [CMDQ_ERR_CERROR_ILL_IDX] = "Illegal command",
854 [CMDQ_ERR_CERROR_ABT_IDX] = "Abort on command fetch",
858 u64 cmd[CMDQ_ENT_DWORDS];
859 struct arm_smmu_queue *q = &smmu->cmdq.q;
860 u32 cons = readl_relaxed(q->cons_reg);
861 u32 idx = cons >> CMDQ_ERR_SHIFT & CMDQ_ERR_MASK;
862 struct arm_smmu_cmdq_ent cmd_sync = {
863 .opcode = CMDQ_OP_CMD_SYNC,
866 dev_err(smmu->dev, "CMDQ error (cons 0x%08x): %s\n", cons,
867 idx < ARRAY_SIZE(cerror_str) ? cerror_str[idx] : "Unknown");
870 case CMDQ_ERR_CERROR_ABT_IDX:
871 dev_err(smmu->dev, "retrying command fetch\n");
872 case CMDQ_ERR_CERROR_NONE_IDX:
874 case CMDQ_ERR_CERROR_ILL_IDX:
881 * We may have concurrent producers, so we need to be careful
882 * not to touch any of the shadow cmdq state.
884 queue_read(cmd, Q_ENT(q, cons), q->ent_dwords);
885 dev_err(smmu->dev, "skipping command in error state:\n");
886 for (i = 0; i < ARRAY_SIZE(cmd); ++i)
887 dev_err(smmu->dev, "\t0x%016llx\n", (unsigned long long)cmd[i]);
889 /* Convert the erroneous command into a CMD_SYNC */
890 if (arm_smmu_cmdq_build_cmd(cmd, &cmd_sync)) {
891 dev_err(smmu->dev, "failed to convert to CMD_SYNC\n");
895 queue_write(Q_ENT(q, cons), cmd, q->ent_dwords);
898 static void arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu,
899 struct arm_smmu_cmdq_ent *ent)
901 u64 cmd[CMDQ_ENT_DWORDS];
903 bool wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV);
904 struct arm_smmu_queue *q = &smmu->cmdq.q;
906 if (arm_smmu_cmdq_build_cmd(cmd, ent)) {
907 dev_warn(smmu->dev, "ignoring unknown CMDQ opcode 0x%x\n",
912 spin_lock_irqsave(&smmu->cmdq.lock, flags);
913 while (queue_insert_raw(q, cmd) == -ENOSPC) {
914 if (queue_poll_cons(q, false, wfe))
915 dev_err_ratelimited(smmu->dev, "CMDQ timeout\n");
918 if (ent->opcode == CMDQ_OP_CMD_SYNC && queue_poll_cons(q, true, wfe))
919 dev_err_ratelimited(smmu->dev, "CMD_SYNC timeout\n");
920 spin_unlock_irqrestore(&smmu->cmdq.lock, flags);
923 /* Context descriptor manipulation functions */
924 static u64 arm_smmu_cpu_tcr_to_cd(u64 tcr)
928 /* Repack the TCR. Just care about TTBR0 for now */
929 val |= ARM_SMMU_TCR2CD(tcr, T0SZ);
930 val |= ARM_SMMU_TCR2CD(tcr, TG0);
931 val |= ARM_SMMU_TCR2CD(tcr, IRGN0);
932 val |= ARM_SMMU_TCR2CD(tcr, ORGN0);
933 val |= ARM_SMMU_TCR2CD(tcr, SH0);
934 val |= ARM_SMMU_TCR2CD(tcr, EPD0);
935 val |= ARM_SMMU_TCR2CD(tcr, EPD1);
936 val |= ARM_SMMU_TCR2CD(tcr, IPS);
937 val |= ARM_SMMU_TCR2CD(tcr, TBI0);
942 static void arm_smmu_write_ctx_desc(struct arm_smmu_device *smmu,
943 struct arm_smmu_s1_cfg *cfg)
948 * We don't need to issue any invalidation here, as we'll invalidate
949 * the STE when installing the new entry anyway.
951 val = arm_smmu_cpu_tcr_to_cd(cfg->cd.tcr) |
955 CTXDESC_CD_0_R | CTXDESC_CD_0_A | CTXDESC_CD_0_ASET_PRIVATE |
956 CTXDESC_CD_0_AA64 | (u64)cfg->cd.asid << CTXDESC_CD_0_ASID_SHIFT |
958 cfg->cdptr[0] = cpu_to_le64(val);
960 val = cfg->cd.ttbr & CTXDESC_CD_1_TTB0_MASK << CTXDESC_CD_1_TTB0_SHIFT;
961 cfg->cdptr[1] = cpu_to_le64(val);
963 cfg->cdptr[3] = cpu_to_le64(cfg->cd.mair << CTXDESC_CD_3_MAIR_SHIFT);
966 /* Stream table manipulation functions */
968 arm_smmu_write_strtab_l1_desc(__le64 *dst, struct arm_smmu_strtab_l1_desc *desc)
972 val |= (desc->span & STRTAB_L1_DESC_SPAN_MASK)
973 << STRTAB_L1_DESC_SPAN_SHIFT;
974 val |= desc->l2ptr_dma &
975 STRTAB_L1_DESC_L2PTR_MASK << STRTAB_L1_DESC_L2PTR_SHIFT;
977 *dst = cpu_to_le64(val);
980 static void arm_smmu_sync_ste_for_sid(struct arm_smmu_device *smmu, u32 sid)
982 struct arm_smmu_cmdq_ent cmd = {
983 .opcode = CMDQ_OP_CFGI_STE,
990 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
991 cmd.opcode = CMDQ_OP_CMD_SYNC;
992 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
995 static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
996 __le64 *dst, struct arm_smmu_strtab_ent *ste)
999 * This is hideously complicated, but we only really care about
1000 * three cases at the moment:
1002 * 1. Invalid (all zero) -> bypass (init)
1003 * 2. Bypass -> translation (attach)
1004 * 3. Translation -> bypass (detach)
1006 * Given that we can't update the STE atomically and the SMMU
1007 * doesn't read the thing in a defined order, that leaves us
1008 * with the following maintenance requirements:
1010 * 1. Update Config, return (init time STEs aren't live)
1011 * 2. Write everything apart from dword 0, sync, write dword 0, sync
1012 * 3. Update Config, sync
1014 u64 val = le64_to_cpu(dst[0]);
1015 bool ste_live = false;
1016 struct arm_smmu_cmdq_ent prefetch_cmd = {
1017 .opcode = CMDQ_OP_PREFETCH_CFG,
1023 if (val & STRTAB_STE_0_V) {
1026 cfg = val & STRTAB_STE_0_CFG_MASK << STRTAB_STE_0_CFG_SHIFT;
1028 case STRTAB_STE_0_CFG_BYPASS:
1030 case STRTAB_STE_0_CFG_S1_TRANS:
1031 case STRTAB_STE_0_CFG_S2_TRANS:
1034 case STRTAB_STE_0_CFG_ABORT:
1038 BUG(); /* STE corruption */
1042 /* Nuke the existing Config, as we're going to rewrite it */
1043 val &= ~(STRTAB_STE_0_CFG_MASK << STRTAB_STE_0_CFG_SHIFT);
1046 val |= STRTAB_STE_0_V;
1048 val &= ~STRTAB_STE_0_V;
1051 val |= disable_bypass ? STRTAB_STE_0_CFG_ABORT
1052 : STRTAB_STE_0_CFG_BYPASS;
1053 dst[0] = cpu_to_le64(val);
1054 dst[1] = cpu_to_le64(STRTAB_STE_1_SHCFG_INCOMING
1055 << STRTAB_STE_1_SHCFG_SHIFT);
1056 dst[2] = 0; /* Nuke the VMID */
1058 arm_smmu_sync_ste_for_sid(smmu, sid);
1064 dst[1] = cpu_to_le64(
1065 STRTAB_STE_1_S1C_CACHE_WBRA
1066 << STRTAB_STE_1_S1CIR_SHIFT |
1067 STRTAB_STE_1_S1C_CACHE_WBRA
1068 << STRTAB_STE_1_S1COR_SHIFT |
1069 STRTAB_STE_1_S1C_SH_ISH << STRTAB_STE_1_S1CSH_SHIFT |
1070 #ifdef CONFIG_PCI_ATS
1071 STRTAB_STE_1_EATS_TRANS << STRTAB_STE_1_EATS_SHIFT |
1073 STRTAB_STE_1_STRW_NSEL1 << STRTAB_STE_1_STRW_SHIFT);
1075 if (smmu->features & ARM_SMMU_FEAT_STALLS)
1076 dst[1] |= cpu_to_le64(STRTAB_STE_1_S1STALLD);
1078 val |= (ste->s1_cfg->cdptr_dma & STRTAB_STE_0_S1CTXPTR_MASK
1079 << STRTAB_STE_0_S1CTXPTR_SHIFT) |
1080 STRTAB_STE_0_CFG_S1_TRANS;
1086 dst[2] = cpu_to_le64(
1087 ste->s2_cfg->vmid << STRTAB_STE_2_S2VMID_SHIFT |
1088 (ste->s2_cfg->vtcr & STRTAB_STE_2_VTCR_MASK)
1089 << STRTAB_STE_2_VTCR_SHIFT |
1091 STRTAB_STE_2_S2ENDI |
1093 STRTAB_STE_2_S2PTW | STRTAB_STE_2_S2AA64 |
1096 dst[3] = cpu_to_le64(ste->s2_cfg->vttbr &
1097 STRTAB_STE_3_S2TTB_MASK << STRTAB_STE_3_S2TTB_SHIFT);
1099 val |= STRTAB_STE_0_CFG_S2_TRANS;
1102 arm_smmu_sync_ste_for_sid(smmu, sid);
1103 dst[0] = cpu_to_le64(val);
1104 arm_smmu_sync_ste_for_sid(smmu, sid);
1106 /* It's likely that we'll want to use the new STE soon */
1107 if (!(smmu->options & ARM_SMMU_OPT_SKIP_PREFETCH))
1108 arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd);
1111 static void arm_smmu_init_bypass_stes(u64 *strtab, unsigned int nent)
1114 struct arm_smmu_strtab_ent ste = {
1119 for (i = 0; i < nent; ++i) {
1120 arm_smmu_write_strtab_ent(NULL, -1, strtab, &ste);
1121 strtab += STRTAB_STE_DWORDS;
1125 static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid)
1129 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
1130 struct arm_smmu_strtab_l1_desc *desc = &cfg->l1_desc[sid >> STRTAB_SPLIT];
1135 size = 1 << (STRTAB_SPLIT + ilog2(STRTAB_STE_DWORDS) + 3);
1136 strtab = &cfg->strtab[(sid >> STRTAB_SPLIT) * STRTAB_L1_DESC_DWORDS];
1138 desc->span = STRTAB_SPLIT + 1;
1139 desc->l2ptr = dmam_alloc_coherent(smmu->dev, size, &desc->l2ptr_dma,
1140 GFP_KERNEL | __GFP_ZERO);
1143 "failed to allocate l2 stream table for SID %u\n",
1148 arm_smmu_init_bypass_stes(desc->l2ptr, 1 << STRTAB_SPLIT);
1149 arm_smmu_write_strtab_l1_desc(strtab, desc);
1153 /* IRQ and event handlers */
1154 static irqreturn_t arm_smmu_evtq_thread(int irq, void *dev)
1157 struct arm_smmu_device *smmu = dev;
1158 struct arm_smmu_queue *q = &smmu->evtq.q;
1159 u64 evt[EVTQ_ENT_DWORDS];
1162 while (!queue_remove_raw(q, evt)) {
1163 u8 id = evt[0] >> EVTQ_0_ID_SHIFT & EVTQ_0_ID_MASK;
1165 dev_info(smmu->dev, "event 0x%02x received:\n", id);
1166 for (i = 0; i < ARRAY_SIZE(evt); ++i)
1167 dev_info(smmu->dev, "\t0x%016llx\n",
1168 (unsigned long long)evt[i]);
1173 * Not much we can do on overflow, so scream and pretend we're
1176 if (queue_sync_prod(q) == -EOVERFLOW)
1177 dev_err(smmu->dev, "EVTQ overflow detected -- events lost\n");
1178 } while (!queue_empty(q));
1180 /* Sync our overflow flag, as we believe we're up to speed */
1181 q->cons = Q_OVF(q, q->prod) | Q_WRP(q, q->cons) | Q_IDX(q, q->cons);
1185 static void arm_smmu_handle_ppr(struct arm_smmu_device *smmu, u64 *evt)
1191 sid = evt[0] >> PRIQ_0_SID_SHIFT & PRIQ_0_SID_MASK;
1192 ssv = evt[0] & PRIQ_0_SSID_V;
1193 ssid = ssv ? evt[0] >> PRIQ_0_SSID_SHIFT & PRIQ_0_SSID_MASK : 0;
1194 last = evt[0] & PRIQ_0_PRG_LAST;
1195 grpid = evt[1] >> PRIQ_1_PRG_IDX_SHIFT & PRIQ_1_PRG_IDX_MASK;
1197 dev_info(smmu->dev, "unexpected PRI request received:\n");
1199 "\tsid 0x%08x.0x%05x: [%u%s] %sprivileged %s%s%s access at iova 0x%016llx\n",
1200 sid, ssid, grpid, last ? "L" : "",
1201 evt[0] & PRIQ_0_PERM_PRIV ? "" : "un",
1202 evt[0] & PRIQ_0_PERM_READ ? "R" : "",
1203 evt[0] & PRIQ_0_PERM_WRITE ? "W" : "",
1204 evt[0] & PRIQ_0_PERM_EXEC ? "X" : "",
1205 evt[1] & PRIQ_1_ADDR_MASK << PRIQ_1_ADDR_SHIFT);
1208 struct arm_smmu_cmdq_ent cmd = {
1209 .opcode = CMDQ_OP_PRI_RESP,
1210 .substream_valid = ssv,
1215 .resp = PRI_RESP_DENY,
1219 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1223 static irqreturn_t arm_smmu_priq_thread(int irq, void *dev)
1225 struct arm_smmu_device *smmu = dev;
1226 struct arm_smmu_queue *q = &smmu->priq.q;
1227 u64 evt[PRIQ_ENT_DWORDS];
1230 while (!queue_remove_raw(q, evt))
1231 arm_smmu_handle_ppr(smmu, evt);
1233 if (queue_sync_prod(q) == -EOVERFLOW)
1234 dev_err(smmu->dev, "PRIQ overflow detected -- requests lost\n");
1235 } while (!queue_empty(q));
1237 /* Sync our overflow flag, as we believe we're up to speed */
1238 q->cons = Q_OVF(q, q->prod) | Q_WRP(q, q->cons) | Q_IDX(q, q->cons);
1242 static irqreturn_t arm_smmu_cmdq_sync_handler(int irq, void *dev)
1244 /* We don't actually use CMD_SYNC interrupts for anything */
1248 static int arm_smmu_device_disable(struct arm_smmu_device *smmu);
1250 static irqreturn_t arm_smmu_gerror_handler(int irq, void *dev)
1252 u32 gerror, gerrorn, active;
1253 struct arm_smmu_device *smmu = dev;
1255 gerror = readl_relaxed(smmu->base + ARM_SMMU_GERROR);
1256 gerrorn = readl_relaxed(smmu->base + ARM_SMMU_GERRORN);
1258 active = gerror ^ gerrorn;
1259 if (!(active & GERROR_ERR_MASK))
1260 return IRQ_NONE; /* No errors pending */
1263 "unexpected global error reported (0x%08x), this could be serious\n",
1266 if (active & GERROR_SFM_ERR) {
1267 dev_err(smmu->dev, "device has entered Service Failure Mode!\n");
1268 arm_smmu_device_disable(smmu);
1271 if (active & GERROR_MSI_GERROR_ABT_ERR)
1272 dev_warn(smmu->dev, "GERROR MSI write aborted\n");
1274 if (active & GERROR_MSI_PRIQ_ABT_ERR)
1275 dev_warn(smmu->dev, "PRIQ MSI write aborted\n");
1277 if (active & GERROR_MSI_EVTQ_ABT_ERR)
1278 dev_warn(smmu->dev, "EVTQ MSI write aborted\n");
1280 if (active & GERROR_MSI_CMDQ_ABT_ERR) {
1281 dev_warn(smmu->dev, "CMDQ MSI write aborted\n");
1282 arm_smmu_cmdq_sync_handler(irq, smmu->dev);
1285 if (active & GERROR_PRIQ_ABT_ERR)
1286 dev_err(smmu->dev, "PRIQ write aborted -- events may have been lost\n");
1288 if (active & GERROR_EVTQ_ABT_ERR)
1289 dev_err(smmu->dev, "EVTQ write aborted -- events may have been lost\n");
1291 if (active & GERROR_CMDQ_ERR)
1292 arm_smmu_cmdq_skip_err(smmu);
1294 writel(gerror, smmu->base + ARM_SMMU_GERRORN);
1298 /* IO_PGTABLE API */
1299 static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
1301 struct arm_smmu_cmdq_ent cmd;
1303 cmd.opcode = CMDQ_OP_CMD_SYNC;
1304 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1307 static void arm_smmu_tlb_sync(void *cookie)
1309 struct arm_smmu_domain *smmu_domain = cookie;
1310 __arm_smmu_tlb_sync(smmu_domain->smmu);
1313 static void arm_smmu_tlb_inv_context(void *cookie)
1315 struct arm_smmu_domain *smmu_domain = cookie;
1316 struct arm_smmu_device *smmu = smmu_domain->smmu;
1317 struct arm_smmu_cmdq_ent cmd;
1319 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1320 cmd.opcode = CMDQ_OP_TLBI_NH_ASID;
1321 cmd.tlbi.asid = smmu_domain->s1_cfg.cd.asid;
1324 cmd.opcode = CMDQ_OP_TLBI_S12_VMALL;
1325 cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
1328 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1329 __arm_smmu_tlb_sync(smmu);
1332 static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
1333 size_t granule, bool leaf, void *cookie)
1335 struct arm_smmu_domain *smmu_domain = cookie;
1336 struct arm_smmu_device *smmu = smmu_domain->smmu;
1337 struct arm_smmu_cmdq_ent cmd = {
1344 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1345 cmd.opcode = CMDQ_OP_TLBI_NH_VA;
1346 cmd.tlbi.asid = smmu_domain->s1_cfg.cd.asid;
1348 cmd.opcode = CMDQ_OP_TLBI_S2_IPA;
1349 cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
1353 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1354 cmd.tlbi.addr += granule;
1355 } while (size -= granule);
1358 static const struct iommu_gather_ops arm_smmu_gather_ops = {
1359 .tlb_flush_all = arm_smmu_tlb_inv_context,
1360 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
1361 .tlb_sync = arm_smmu_tlb_sync,
1365 static bool arm_smmu_capable(enum iommu_cap cap)
1368 case IOMMU_CAP_CACHE_COHERENCY:
1370 case IOMMU_CAP_INTR_REMAP:
1371 return true; /* MSIs are just memory writes */
1372 case IOMMU_CAP_NOEXEC:
1379 static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
1381 struct arm_smmu_domain *smmu_domain;
1383 if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
1387 * Allocate the domain and initialise some of its data structures.
1388 * We can't really do anything meaningful until we've added a
1391 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1395 if (type == IOMMU_DOMAIN_DMA &&
1396 iommu_get_dma_cookie(&smmu_domain->domain)) {
1401 mutex_init(&smmu_domain->init_mutex);
1402 spin_lock_init(&smmu_domain->pgtbl_lock);
1403 return &smmu_domain->domain;
1406 static int arm_smmu_bitmap_alloc(unsigned long *map, int span)
1408 int idx, size = 1 << span;
1411 idx = find_first_zero_bit(map, size);
1414 } while (test_and_set_bit(idx, map));
1419 static void arm_smmu_bitmap_free(unsigned long *map, int idx)
1421 clear_bit(idx, map);
1424 static void arm_smmu_domain_free(struct iommu_domain *domain)
1426 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1427 struct arm_smmu_device *smmu = smmu_domain->smmu;
1429 iommu_put_dma_cookie(domain);
1430 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
1432 /* Free the CD and ASID, if we allocated them */
1433 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1434 struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
1437 dmam_free_coherent(smmu_domain->smmu->dev,
1438 CTXDESC_CD_DWORDS << 3,
1442 arm_smmu_bitmap_free(smmu->asid_map, cfg->cd.asid);
1445 struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
1447 arm_smmu_bitmap_free(smmu->vmid_map, cfg->vmid);
1453 static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain,
1454 struct io_pgtable_cfg *pgtbl_cfg)
1458 struct arm_smmu_device *smmu = smmu_domain->smmu;
1459 struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
1461 asid = arm_smmu_bitmap_alloc(smmu->asid_map, smmu->asid_bits);
1465 cfg->cdptr = dmam_alloc_coherent(smmu->dev, CTXDESC_CD_DWORDS << 3,
1467 GFP_KERNEL | __GFP_ZERO);
1469 dev_warn(smmu->dev, "failed to allocate context descriptor\n");
1474 cfg->cd.asid = (u16)asid;
1475 cfg->cd.ttbr = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
1476 cfg->cd.tcr = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
1477 cfg->cd.mair = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
1481 arm_smmu_bitmap_free(smmu->asid_map, asid);
1485 static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain,
1486 struct io_pgtable_cfg *pgtbl_cfg)
1489 struct arm_smmu_device *smmu = smmu_domain->smmu;
1490 struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
1492 vmid = arm_smmu_bitmap_alloc(smmu->vmid_map, smmu->vmid_bits);
1496 cfg->vmid = (u16)vmid;
1497 cfg->vttbr = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
1498 cfg->vtcr = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
1502 static int arm_smmu_domain_finalise(struct iommu_domain *domain)
1505 unsigned long ias, oas;
1506 enum io_pgtable_fmt fmt;
1507 struct io_pgtable_cfg pgtbl_cfg;
1508 struct io_pgtable_ops *pgtbl_ops;
1509 int (*finalise_stage_fn)(struct arm_smmu_domain *,
1510 struct io_pgtable_cfg *);
1511 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1512 struct arm_smmu_device *smmu = smmu_domain->smmu;
1514 /* Restrict the stage to what we can actually support */
1515 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
1516 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
1517 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
1518 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1520 switch (smmu_domain->stage) {
1521 case ARM_SMMU_DOMAIN_S1:
1524 fmt = ARM_64_LPAE_S1;
1525 finalise_stage_fn = arm_smmu_domain_finalise_s1;
1527 case ARM_SMMU_DOMAIN_NESTED:
1528 case ARM_SMMU_DOMAIN_S2:
1531 fmt = ARM_64_LPAE_S2;
1532 finalise_stage_fn = arm_smmu_domain_finalise_s2;
1538 pgtbl_cfg = (struct io_pgtable_cfg) {
1539 .pgsize_bitmap = smmu->pgsize_bitmap,
1542 .tlb = &arm_smmu_gather_ops,
1543 .iommu_dev = smmu->dev,
1546 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
1550 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
1551 domain->geometry.aperture_end = (1UL << ias) - 1;
1552 domain->geometry.force_aperture = true;
1553 smmu_domain->pgtbl_ops = pgtbl_ops;
1555 ret = finalise_stage_fn(smmu_domain, &pgtbl_cfg);
1557 free_io_pgtable_ops(pgtbl_ops);
1562 static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid)
1565 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
1567 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
1568 struct arm_smmu_strtab_l1_desc *l1_desc;
1571 /* Two-level walk */
1572 idx = (sid >> STRTAB_SPLIT) * STRTAB_L1_DESC_DWORDS;
1573 l1_desc = &cfg->l1_desc[idx];
1574 idx = (sid & ((1 << STRTAB_SPLIT) - 1)) * STRTAB_STE_DWORDS;
1575 step = &l1_desc->l2ptr[idx];
1577 /* Simple linear lookup */
1578 step = &cfg->strtab[sid * STRTAB_STE_DWORDS];
1584 static int arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec)
1587 struct arm_smmu_master_data *master = fwspec->iommu_priv;
1588 struct arm_smmu_device *smmu = master->smmu;
1590 for (i = 0; i < fwspec->num_ids; ++i) {
1591 u32 sid = fwspec->ids[i];
1592 __le64 *step = arm_smmu_get_step_for_sid(smmu, sid);
1594 arm_smmu_write_strtab_ent(smmu, sid, step, &master->ste);
1600 static void arm_smmu_detach_dev(struct device *dev)
1602 struct arm_smmu_master_data *master = dev->iommu_fwspec->iommu_priv;
1604 master->ste.bypass = true;
1605 if (arm_smmu_install_ste_for_dev(dev->iommu_fwspec) < 0)
1606 dev_warn(dev, "failed to install bypass STE\n");
1609 static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1612 struct arm_smmu_device *smmu;
1613 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1614 struct arm_smmu_master_data *master;
1615 struct arm_smmu_strtab_ent *ste;
1617 if (!dev->iommu_fwspec)
1620 master = dev->iommu_fwspec->iommu_priv;
1621 smmu = master->smmu;
1624 /* Already attached to a different domain? */
1626 arm_smmu_detach_dev(dev);
1628 mutex_lock(&smmu_domain->init_mutex);
1630 if (!smmu_domain->smmu) {
1631 smmu_domain->smmu = smmu;
1632 ret = arm_smmu_domain_finalise(domain);
1634 smmu_domain->smmu = NULL;
1637 } else if (smmu_domain->smmu != smmu) {
1639 "cannot attach to SMMU %s (upstream of %s)\n",
1640 dev_name(smmu_domain->smmu->dev),
1641 dev_name(smmu->dev));
1646 ste->bypass = false;
1649 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1650 ste->s1_cfg = &smmu_domain->s1_cfg;
1652 arm_smmu_write_ctx_desc(smmu, ste->s1_cfg);
1655 ste->s2_cfg = &smmu_domain->s2_cfg;
1658 ret = arm_smmu_install_ste_for_dev(dev->iommu_fwspec);
1663 mutex_unlock(&smmu_domain->init_mutex);
1667 static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
1668 phys_addr_t paddr, size_t size, int prot)
1671 unsigned long flags;
1672 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1673 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
1678 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1679 ret = ops->map(ops, iova, paddr, size, prot);
1680 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1685 arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
1688 unsigned long flags;
1689 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1690 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
1695 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1696 ret = ops->unmap(ops, iova, size);
1697 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1702 arm_smmu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
1705 unsigned long flags;
1706 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1707 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
1712 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1713 ret = ops->iova_to_phys(ops, iova);
1714 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1719 static struct platform_driver arm_smmu_driver;
1721 static int arm_smmu_match_node(struct device *dev, void *data)
1723 return dev->fwnode == data;
1727 struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
1729 struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL,
1730 fwnode, arm_smmu_match_node);
1732 return dev ? dev_get_drvdata(dev) : NULL;
1735 static bool arm_smmu_sid_in_range(struct arm_smmu_device *smmu, u32 sid)
1737 unsigned long limit = smmu->strtab_cfg.num_l1_ents;
1739 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB)
1740 limit *= 1UL << STRTAB_SPLIT;
1745 static struct iommu_ops arm_smmu_ops;
1747 static int arm_smmu_add_device(struct device *dev)
1750 struct arm_smmu_device *smmu;
1751 struct arm_smmu_master_data *master;
1752 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1753 struct iommu_group *group;
1755 if (!fwspec || fwspec->ops != &arm_smmu_ops)
1758 * We _can_ actually withstand dodgy bus code re-calling add_device()
1759 * without an intervening remove_device()/of_xlate() sequence, but
1760 * we're not going to do so quietly...
1762 if (WARN_ON_ONCE(fwspec->iommu_priv)) {
1763 master = fwspec->iommu_priv;
1764 smmu = master->smmu;
1766 smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
1769 master = kzalloc(sizeof(*master), GFP_KERNEL);
1773 master->smmu = smmu;
1774 fwspec->iommu_priv = master;
1777 /* Check the SIDs are in range of the SMMU and our stream table */
1778 for (i = 0; i < fwspec->num_ids; i++) {
1779 u32 sid = fwspec->ids[i];
1781 if (!arm_smmu_sid_in_range(smmu, sid))
1784 /* Ensure l2 strtab is initialised */
1785 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
1786 ret = arm_smmu_init_l2_strtab(smmu, sid);
1792 group = iommu_group_get_for_dev(dev);
1794 iommu_group_put(group);
1796 return PTR_ERR_OR_ZERO(group);
1799 static void arm_smmu_remove_device(struct device *dev)
1801 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1802 struct arm_smmu_master_data *master;
1804 if (!fwspec || fwspec->ops != &arm_smmu_ops)
1807 master = fwspec->iommu_priv;
1808 if (master && master->ste.valid)
1809 arm_smmu_detach_dev(dev);
1810 iommu_group_remove_device(dev);
1812 iommu_fwspec_free(dev);
1815 static struct iommu_group *arm_smmu_device_group(struct device *dev)
1817 struct iommu_group *group;
1820 * We don't support devices sharing stream IDs other than PCI RID
1821 * aliases, since the necessary ID-to-device lookup becomes rather
1822 * impractical given a potential sparse 32-bit stream ID space.
1824 if (dev_is_pci(dev))
1825 group = pci_device_group(dev);
1827 group = generic_device_group(dev);
1832 static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1833 enum iommu_attr attr, void *data)
1835 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1838 case DOMAIN_ATTR_NESTING:
1839 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1846 static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1847 enum iommu_attr attr, void *data)
1850 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1852 mutex_lock(&smmu_domain->init_mutex);
1855 case DOMAIN_ATTR_NESTING:
1856 if (smmu_domain->smmu) {
1862 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1864 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1872 mutex_unlock(&smmu_domain->init_mutex);
1876 static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
1878 return iommu_fwspec_add_ids(dev, args->args, 1);
1881 static struct iommu_ops arm_smmu_ops = {
1882 .capable = arm_smmu_capable,
1883 .domain_alloc = arm_smmu_domain_alloc,
1884 .domain_free = arm_smmu_domain_free,
1885 .attach_dev = arm_smmu_attach_dev,
1886 .map = arm_smmu_map,
1887 .unmap = arm_smmu_unmap,
1888 .map_sg = default_iommu_map_sg,
1889 .iova_to_phys = arm_smmu_iova_to_phys,
1890 .add_device = arm_smmu_add_device,
1891 .remove_device = arm_smmu_remove_device,
1892 .device_group = arm_smmu_device_group,
1893 .domain_get_attr = arm_smmu_domain_get_attr,
1894 .domain_set_attr = arm_smmu_domain_set_attr,
1895 .of_xlate = arm_smmu_of_xlate,
1896 .pgsize_bitmap = -1UL, /* Restricted during device attach */
1899 /* Probing and initialisation functions */
1900 static int arm_smmu_init_one_queue(struct arm_smmu_device *smmu,
1901 struct arm_smmu_queue *q,
1902 unsigned long prod_off,
1903 unsigned long cons_off,
1906 size_t qsz = ((1 << q->max_n_shift) * dwords) << 3;
1908 q->base = dmam_alloc_coherent(smmu->dev, qsz, &q->base_dma, GFP_KERNEL);
1910 dev_err(smmu->dev, "failed to allocate queue (0x%zx bytes)\n",
1915 q->prod_reg = smmu->base + prod_off;
1916 q->cons_reg = smmu->base + cons_off;
1917 q->ent_dwords = dwords;
1919 q->q_base = Q_BASE_RWA;
1920 q->q_base |= q->base_dma & Q_BASE_ADDR_MASK << Q_BASE_ADDR_SHIFT;
1921 q->q_base |= (q->max_n_shift & Q_BASE_LOG2SIZE_MASK)
1922 << Q_BASE_LOG2SIZE_SHIFT;
1924 q->prod = q->cons = 0;
1928 static int arm_smmu_init_queues(struct arm_smmu_device *smmu)
1933 spin_lock_init(&smmu->cmdq.lock);
1934 ret = arm_smmu_init_one_queue(smmu, &smmu->cmdq.q, ARM_SMMU_CMDQ_PROD,
1935 ARM_SMMU_CMDQ_CONS, CMDQ_ENT_DWORDS);
1940 ret = arm_smmu_init_one_queue(smmu, &smmu->evtq.q, ARM_SMMU_EVTQ_PROD,
1941 ARM_SMMU_EVTQ_CONS, EVTQ_ENT_DWORDS);
1946 if (!(smmu->features & ARM_SMMU_FEAT_PRI))
1949 return arm_smmu_init_one_queue(smmu, &smmu->priq.q, ARM_SMMU_PRIQ_PROD,
1950 ARM_SMMU_PRIQ_CONS, PRIQ_ENT_DWORDS);
1953 static int arm_smmu_init_l1_strtab(struct arm_smmu_device *smmu)
1956 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
1957 size_t size = sizeof(*cfg->l1_desc) * cfg->num_l1_ents;
1958 void *strtab = smmu->strtab_cfg.strtab;
1960 cfg->l1_desc = devm_kzalloc(smmu->dev, size, GFP_KERNEL);
1961 if (!cfg->l1_desc) {
1962 dev_err(smmu->dev, "failed to allocate l1 stream table desc\n");
1966 for (i = 0; i < cfg->num_l1_ents; ++i) {
1967 arm_smmu_write_strtab_l1_desc(strtab, &cfg->l1_desc[i]);
1968 strtab += STRTAB_L1_DESC_DWORDS << 3;
1974 static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu)
1979 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
1982 * If we can resolve everything with a single L2 table, then we
1983 * just need a single L1 descriptor. Otherwise, calculate the L1
1984 * size, capped to the SIDSIZE.
1986 if (smmu->sid_bits < STRTAB_SPLIT) {
1989 size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3);
1990 size = min(size, smmu->sid_bits - STRTAB_SPLIT);
1992 cfg->num_l1_ents = 1 << size;
1994 size += STRTAB_SPLIT;
1995 if (size < smmu->sid_bits)
1997 "2-level strtab only covers %u/%u bits of SID\n",
1998 size, smmu->sid_bits);
2000 l1size = cfg->num_l1_ents * (STRTAB_L1_DESC_DWORDS << 3);
2001 strtab = dmam_alloc_coherent(smmu->dev, l1size, &cfg->strtab_dma,
2002 GFP_KERNEL | __GFP_ZERO);
2005 "failed to allocate l1 stream table (%u bytes)\n",
2009 cfg->strtab = strtab;
2011 /* Configure strtab_base_cfg for 2 levels */
2012 reg = STRTAB_BASE_CFG_FMT_2LVL;
2013 reg |= (size & STRTAB_BASE_CFG_LOG2SIZE_MASK)
2014 << STRTAB_BASE_CFG_LOG2SIZE_SHIFT;
2015 reg |= (STRTAB_SPLIT & STRTAB_BASE_CFG_SPLIT_MASK)
2016 << STRTAB_BASE_CFG_SPLIT_SHIFT;
2017 cfg->strtab_base_cfg = reg;
2019 return arm_smmu_init_l1_strtab(smmu);
2022 static int arm_smmu_init_strtab_linear(struct arm_smmu_device *smmu)
2027 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
2029 size = (1 << smmu->sid_bits) * (STRTAB_STE_DWORDS << 3);
2030 strtab = dmam_alloc_coherent(smmu->dev, size, &cfg->strtab_dma,
2031 GFP_KERNEL | __GFP_ZERO);
2034 "failed to allocate linear stream table (%u bytes)\n",
2038 cfg->strtab = strtab;
2039 cfg->num_l1_ents = 1 << smmu->sid_bits;
2041 /* Configure strtab_base_cfg for a linear table covering all SIDs */
2042 reg = STRTAB_BASE_CFG_FMT_LINEAR;
2043 reg |= (smmu->sid_bits & STRTAB_BASE_CFG_LOG2SIZE_MASK)
2044 << STRTAB_BASE_CFG_LOG2SIZE_SHIFT;
2045 cfg->strtab_base_cfg = reg;
2047 arm_smmu_init_bypass_stes(strtab, cfg->num_l1_ents);
2051 static int arm_smmu_init_strtab(struct arm_smmu_device *smmu)
2056 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB)
2057 ret = arm_smmu_init_strtab_2lvl(smmu);
2059 ret = arm_smmu_init_strtab_linear(smmu);
2064 /* Set the strtab base address */
2065 reg = smmu->strtab_cfg.strtab_dma &
2066 STRTAB_BASE_ADDR_MASK << STRTAB_BASE_ADDR_SHIFT;
2067 reg |= STRTAB_BASE_RA;
2068 smmu->strtab_cfg.strtab_base = reg;
2070 /* Allocate the first VMID for stage-2 bypass STEs */
2071 set_bit(0, smmu->vmid_map);
2075 static int arm_smmu_init_structures(struct arm_smmu_device *smmu)
2079 ret = arm_smmu_init_queues(smmu);
2083 return arm_smmu_init_strtab(smmu);
2086 static int arm_smmu_write_reg_sync(struct arm_smmu_device *smmu, u32 val,
2087 unsigned int reg_off, unsigned int ack_off)
2091 writel_relaxed(val, smmu->base + reg_off);
2092 return readl_relaxed_poll_timeout(smmu->base + ack_off, reg, reg == val,
2093 1, ARM_SMMU_POLL_TIMEOUT_US);
2096 /* GBPA is "special" */
2097 static int arm_smmu_update_gbpa(struct arm_smmu_device *smmu, u32 set, u32 clr)
2100 u32 reg, __iomem *gbpa = smmu->base + ARM_SMMU_GBPA;
2102 ret = readl_relaxed_poll_timeout(gbpa, reg, !(reg & GBPA_UPDATE),
2103 1, ARM_SMMU_POLL_TIMEOUT_US);
2109 writel_relaxed(reg | GBPA_UPDATE, gbpa);
2110 return readl_relaxed_poll_timeout(gbpa, reg, !(reg & GBPA_UPDATE),
2111 1, ARM_SMMU_POLL_TIMEOUT_US);
2114 static void arm_smmu_free_msis(void *data)
2116 struct device *dev = data;
2117 platform_msi_domain_free_irqs(dev);
2120 static void arm_smmu_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
2122 phys_addr_t doorbell;
2123 struct device *dev = msi_desc_to_dev(desc);
2124 struct arm_smmu_device *smmu = dev_get_drvdata(dev);
2125 phys_addr_t *cfg = arm_smmu_msi_cfg[desc->platform.msi_index];
2127 doorbell = (((u64)msg->address_hi) << 32) | msg->address_lo;
2128 doorbell &= MSI_CFG0_ADDR_MASK << MSI_CFG0_ADDR_SHIFT;
2130 writeq_relaxed(doorbell, smmu->base + cfg[0]);
2131 writel_relaxed(msg->data, smmu->base + cfg[1]);
2132 writel_relaxed(MSI_CFG2_MEMATTR_DEVICE_nGnRE, smmu->base + cfg[2]);
2135 static void arm_smmu_setup_msis(struct arm_smmu_device *smmu)
2137 struct msi_desc *desc;
2138 int ret, nvec = ARM_SMMU_MAX_MSIS;
2139 struct device *dev = smmu->dev;
2141 /* Clear the MSI address regs */
2142 writeq_relaxed(0, smmu->base + ARM_SMMU_GERROR_IRQ_CFG0);
2143 writeq_relaxed(0, smmu->base + ARM_SMMU_EVTQ_IRQ_CFG0);
2145 if (smmu->features & ARM_SMMU_FEAT_PRI)
2146 writeq_relaxed(0, smmu->base + ARM_SMMU_PRIQ_IRQ_CFG0);
2150 if (!(smmu->features & ARM_SMMU_FEAT_MSI))
2153 /* Allocate MSIs for evtq, gerror and priq. Ignore cmdq */
2154 ret = platform_msi_domain_alloc_irqs(dev, nvec, arm_smmu_write_msi_msg);
2156 dev_warn(dev, "failed to allocate MSIs\n");
2160 for_each_msi_entry(desc, dev) {
2161 switch (desc->platform.msi_index) {
2162 case EVTQ_MSI_INDEX:
2163 smmu->evtq.q.irq = desc->irq;
2165 case GERROR_MSI_INDEX:
2166 smmu->gerr_irq = desc->irq;
2168 case PRIQ_MSI_INDEX:
2169 smmu->priq.q.irq = desc->irq;
2171 default: /* Unknown */
2176 /* Add callback to free MSIs on teardown */
2177 devm_add_action(dev, arm_smmu_free_msis, dev);
2180 static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu)
2183 u32 irqen_flags = IRQ_CTRL_EVTQ_IRQEN | IRQ_CTRL_GERROR_IRQEN;
2185 /* Disable IRQs first */
2186 ret = arm_smmu_write_reg_sync(smmu, 0, ARM_SMMU_IRQ_CTRL,
2187 ARM_SMMU_IRQ_CTRLACK);
2189 dev_err(smmu->dev, "failed to disable irqs\n");
2193 arm_smmu_setup_msis(smmu);
2195 /* Request interrupt lines */
2196 irq = smmu->evtq.q.irq;
2198 ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
2199 arm_smmu_evtq_thread,
2201 "arm-smmu-v3-evtq", smmu);
2203 dev_warn(smmu->dev, "failed to enable evtq irq\n");
2206 irq = smmu->cmdq.q.irq;
2208 ret = devm_request_irq(smmu->dev, irq,
2209 arm_smmu_cmdq_sync_handler, 0,
2210 "arm-smmu-v3-cmdq-sync", smmu);
2212 dev_warn(smmu->dev, "failed to enable cmdq-sync irq\n");
2215 irq = smmu->gerr_irq;
2217 ret = devm_request_irq(smmu->dev, irq, arm_smmu_gerror_handler,
2218 0, "arm-smmu-v3-gerror", smmu);
2220 dev_warn(smmu->dev, "failed to enable gerror irq\n");
2223 if (smmu->features & ARM_SMMU_FEAT_PRI) {
2224 irq = smmu->priq.q.irq;
2226 ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
2227 arm_smmu_priq_thread,
2233 "failed to enable priq irq\n");
2235 irqen_flags |= IRQ_CTRL_PRIQ_IRQEN;
2239 /* Enable interrupt generation on the SMMU */
2240 ret = arm_smmu_write_reg_sync(smmu, irqen_flags,
2241 ARM_SMMU_IRQ_CTRL, ARM_SMMU_IRQ_CTRLACK);
2243 dev_warn(smmu->dev, "failed to enable irqs\n");
2248 static int arm_smmu_device_disable(struct arm_smmu_device *smmu)
2252 ret = arm_smmu_write_reg_sync(smmu, 0, ARM_SMMU_CR0, ARM_SMMU_CR0ACK);
2254 dev_err(smmu->dev, "failed to clear cr0\n");
2259 static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
2263 struct arm_smmu_cmdq_ent cmd;
2265 /* Clear CR0 and sync (disables SMMU and queue processing) */
2266 reg = readl_relaxed(smmu->base + ARM_SMMU_CR0);
2267 if (reg & CR0_SMMUEN)
2268 dev_warn(smmu->dev, "SMMU currently enabled! Resetting...\n");
2270 ret = arm_smmu_device_disable(smmu);
2274 /* CR1 (table and queue memory attributes) */
2275 reg = (CR1_SH_ISH << CR1_TABLE_SH_SHIFT) |
2276 (CR1_CACHE_WB << CR1_TABLE_OC_SHIFT) |
2277 (CR1_CACHE_WB << CR1_TABLE_IC_SHIFT) |
2278 (CR1_SH_ISH << CR1_QUEUE_SH_SHIFT) |
2279 (CR1_CACHE_WB << CR1_QUEUE_OC_SHIFT) |
2280 (CR1_CACHE_WB << CR1_QUEUE_IC_SHIFT);
2281 writel_relaxed(reg, smmu->base + ARM_SMMU_CR1);
2283 /* CR2 (random crap) */
2284 reg = CR2_PTM | CR2_RECINVSID | CR2_E2H;
2285 writel_relaxed(reg, smmu->base + ARM_SMMU_CR2);
2288 writeq_relaxed(smmu->strtab_cfg.strtab_base,
2289 smmu->base + ARM_SMMU_STRTAB_BASE);
2290 writel_relaxed(smmu->strtab_cfg.strtab_base_cfg,
2291 smmu->base + ARM_SMMU_STRTAB_BASE_CFG);
2294 writeq_relaxed(smmu->cmdq.q.q_base, smmu->base + ARM_SMMU_CMDQ_BASE);
2295 writel_relaxed(smmu->cmdq.q.prod, smmu->base + ARM_SMMU_CMDQ_PROD);
2296 writel_relaxed(smmu->cmdq.q.cons, smmu->base + ARM_SMMU_CMDQ_CONS);
2298 enables = CR0_CMDQEN;
2299 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
2302 dev_err(smmu->dev, "failed to enable command queue\n");
2306 /* Invalidate any cached configuration */
2307 cmd.opcode = CMDQ_OP_CFGI_ALL;
2308 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2309 cmd.opcode = CMDQ_OP_CMD_SYNC;
2310 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2312 /* Invalidate any stale TLB entries */
2313 if (smmu->features & ARM_SMMU_FEAT_HYP) {
2314 cmd.opcode = CMDQ_OP_TLBI_EL2_ALL;
2315 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2318 cmd.opcode = CMDQ_OP_TLBI_NSNH_ALL;
2319 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2320 cmd.opcode = CMDQ_OP_CMD_SYNC;
2321 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2324 writeq_relaxed(smmu->evtq.q.q_base, smmu->base + ARM_SMMU_EVTQ_BASE);
2325 writel_relaxed(smmu->evtq.q.prod, smmu->base + ARM_SMMU_EVTQ_PROD);
2326 writel_relaxed(smmu->evtq.q.cons, smmu->base + ARM_SMMU_EVTQ_CONS);
2328 enables |= CR0_EVTQEN;
2329 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
2332 dev_err(smmu->dev, "failed to enable event queue\n");
2337 if (smmu->features & ARM_SMMU_FEAT_PRI) {
2338 writeq_relaxed(smmu->priq.q.q_base,
2339 smmu->base + ARM_SMMU_PRIQ_BASE);
2340 writel_relaxed(smmu->priq.q.prod,
2341 smmu->base + ARM_SMMU_PRIQ_PROD);
2342 writel_relaxed(smmu->priq.q.cons,
2343 smmu->base + ARM_SMMU_PRIQ_CONS);
2345 enables |= CR0_PRIQEN;
2346 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
2349 dev_err(smmu->dev, "failed to enable PRI queue\n");
2354 ret = arm_smmu_setup_irqs(smmu);
2356 dev_err(smmu->dev, "failed to setup irqs\n");
2361 /* Enable the SMMU interface, or ensure bypass */
2362 if (!bypass || disable_bypass) {
2363 enables |= CR0_SMMUEN;
2365 ret = arm_smmu_update_gbpa(smmu, 0, GBPA_ABORT);
2367 dev_err(smmu->dev, "GBPA not responding to update\n");
2371 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
2374 dev_err(smmu->dev, "failed to enable SMMU interface\n");
2381 static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
2384 bool coherent = smmu->features & ARM_SMMU_FEAT_COHERENCY;
2387 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR0);
2389 /* 2-level structures */
2390 if ((reg & IDR0_ST_LVL_MASK << IDR0_ST_LVL_SHIFT) == IDR0_ST_LVL_2LVL)
2391 smmu->features |= ARM_SMMU_FEAT_2_LVL_STRTAB;
2393 if (reg & IDR0_CD2L)
2394 smmu->features |= ARM_SMMU_FEAT_2_LVL_CDTAB;
2397 * Translation table endianness.
2398 * We currently require the same endianness as the CPU, but this
2399 * could be changed later by adding a new IO_PGTABLE_QUIRK.
2401 switch (reg & IDR0_TTENDIAN_MASK << IDR0_TTENDIAN_SHIFT) {
2402 case IDR0_TTENDIAN_MIXED:
2403 smmu->features |= ARM_SMMU_FEAT_TT_LE | ARM_SMMU_FEAT_TT_BE;
2406 case IDR0_TTENDIAN_BE:
2407 smmu->features |= ARM_SMMU_FEAT_TT_BE;
2410 case IDR0_TTENDIAN_LE:
2411 smmu->features |= ARM_SMMU_FEAT_TT_LE;
2415 dev_err(smmu->dev, "unknown/unsupported TT endianness!\n");
2419 /* Boolean feature flags */
2420 if (IS_ENABLED(CONFIG_PCI_PRI) && reg & IDR0_PRI)
2421 smmu->features |= ARM_SMMU_FEAT_PRI;
2423 if (IS_ENABLED(CONFIG_PCI_ATS) && reg & IDR0_ATS)
2424 smmu->features |= ARM_SMMU_FEAT_ATS;
2427 smmu->features |= ARM_SMMU_FEAT_SEV;
2430 smmu->features |= ARM_SMMU_FEAT_MSI;
2433 smmu->features |= ARM_SMMU_FEAT_HYP;
2436 * The coherency feature as set by FW is used in preference to the ID
2437 * register, but warn on mismatch.
2439 if (!!(reg & IDR0_COHACC) != coherent)
2440 dev_warn(smmu->dev, "IDR0.COHACC overridden by dma-coherent property (%s)\n",
2441 coherent ? "true" : "false");
2443 switch (reg & IDR0_STALL_MODEL_MASK << IDR0_STALL_MODEL_SHIFT) {
2444 case IDR0_STALL_MODEL_STALL:
2446 case IDR0_STALL_MODEL_FORCE:
2447 smmu->features |= ARM_SMMU_FEAT_STALLS;
2451 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
2454 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
2456 if (!(reg & (IDR0_S1P | IDR0_S2P))) {
2457 dev_err(smmu->dev, "no translation support!\n");
2461 /* We only support the AArch64 table format at present */
2462 switch (reg & IDR0_TTF_MASK << IDR0_TTF_SHIFT) {
2463 case IDR0_TTF_AARCH32_64:
2466 case IDR0_TTF_AARCH64:
2469 dev_err(smmu->dev, "AArch64 table format not supported!\n");
2473 /* ASID/VMID sizes */
2474 smmu->asid_bits = reg & IDR0_ASID16 ? 16 : 8;
2475 smmu->vmid_bits = reg & IDR0_VMID16 ? 16 : 8;
2478 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR1);
2479 if (reg & (IDR1_TABLES_PRESET | IDR1_QUEUES_PRESET | IDR1_REL)) {
2480 dev_err(smmu->dev, "embedded implementation not supported\n");
2484 /* Queue sizes, capped at 4k */
2485 smmu->cmdq.q.max_n_shift = min((u32)CMDQ_MAX_SZ_SHIFT,
2486 reg >> IDR1_CMDQ_SHIFT & IDR1_CMDQ_MASK);
2487 if (!smmu->cmdq.q.max_n_shift) {
2488 /* Odd alignment restrictions on the base, so ignore for now */
2489 dev_err(smmu->dev, "unit-length command queue not supported\n");
2493 smmu->evtq.q.max_n_shift = min((u32)EVTQ_MAX_SZ_SHIFT,
2494 reg >> IDR1_EVTQ_SHIFT & IDR1_EVTQ_MASK);
2495 smmu->priq.q.max_n_shift = min((u32)PRIQ_MAX_SZ_SHIFT,
2496 reg >> IDR1_PRIQ_SHIFT & IDR1_PRIQ_MASK);
2498 /* SID/SSID sizes */
2499 smmu->ssid_bits = reg >> IDR1_SSID_SHIFT & IDR1_SSID_MASK;
2500 smmu->sid_bits = reg >> IDR1_SID_SHIFT & IDR1_SID_MASK;
2503 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR5);
2505 /* Maximum number of outstanding stalls */
2506 smmu->evtq.max_stalls = reg >> IDR5_STALL_MAX_SHIFT
2507 & IDR5_STALL_MAX_MASK;
2510 if (reg & IDR5_GRAN64K)
2511 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
2512 if (reg & IDR5_GRAN16K)
2513 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
2514 if (reg & IDR5_GRAN4K)
2515 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
2517 if (arm_smmu_ops.pgsize_bitmap == -1UL)
2518 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
2520 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
2522 /* Output address size */
2523 switch (reg & IDR5_OAS_MASK << IDR5_OAS_SHIFT) {
2524 case IDR5_OAS_32_BIT:
2527 case IDR5_OAS_36_BIT:
2530 case IDR5_OAS_40_BIT:
2533 case IDR5_OAS_42_BIT:
2536 case IDR5_OAS_44_BIT:
2541 "unknown output address size. Truncating to 48-bit\n");
2543 case IDR5_OAS_48_BIT:
2547 /* Set the DMA mask for our table walker */
2548 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(smmu->oas)))
2550 "failed to set DMA mask for table walker\n");
2552 smmu->ias = max(smmu->ias, smmu->oas);
2554 dev_info(smmu->dev, "ias %lu-bit, oas %lu-bit (features 0x%08x)\n",
2555 smmu->ias, smmu->oas, smmu->features);
2560 static int arm_smmu_device_acpi_probe(struct platform_device *pdev,
2561 struct arm_smmu_device *smmu)
2563 struct acpi_iort_smmu_v3 *iort_smmu;
2564 struct device *dev = smmu->dev;
2565 struct acpi_iort_node *node;
2567 node = *(struct acpi_iort_node **)dev_get_platdata(dev);
2569 /* Retrieve SMMUv3 specific data */
2570 iort_smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
2572 if (iort_smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE)
2573 smmu->features |= ARM_SMMU_FEAT_COHERENCY;
2578 static inline int arm_smmu_device_acpi_probe(struct platform_device *pdev,
2579 struct arm_smmu_device *smmu)
2585 static int arm_smmu_device_dt_probe(struct platform_device *pdev,
2586 struct arm_smmu_device *smmu)
2588 struct device *dev = &pdev->dev;
2592 if (of_property_read_u32(dev->of_node, "#iommu-cells", &cells))
2593 dev_err(dev, "missing #iommu-cells property\n");
2594 else if (cells != 1)
2595 dev_err(dev, "invalid #iommu-cells value (%d)\n", cells);
2599 parse_driver_options(smmu);
2601 if (of_dma_is_coherent(dev->of_node))
2602 smmu->features |= ARM_SMMU_FEAT_COHERENCY;
2607 static int arm_smmu_device_probe(struct platform_device *pdev)
2610 struct resource *res;
2611 struct arm_smmu_device *smmu;
2612 struct device *dev = &pdev->dev;
2615 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
2617 dev_err(dev, "failed to allocate arm_smmu_device\n");
2623 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2624 if (resource_size(res) + 1 < SZ_128K) {
2625 dev_err(dev, "MMIO region too small (%pr)\n", res);
2629 smmu->base = devm_ioremap_resource(dev, res);
2630 if (IS_ERR(smmu->base))
2631 return PTR_ERR(smmu->base);
2633 /* Interrupt lines */
2634 irq = platform_get_irq_byname(pdev, "eventq");
2636 smmu->evtq.q.irq = irq;
2638 irq = platform_get_irq_byname(pdev, "priq");
2640 smmu->priq.q.irq = irq;
2642 irq = platform_get_irq_byname(pdev, "cmdq-sync");
2644 smmu->cmdq.q.irq = irq;
2646 irq = platform_get_irq_byname(pdev, "gerror");
2648 smmu->gerr_irq = irq;
2651 ret = arm_smmu_device_dt_probe(pdev, smmu);
2653 ret = arm_smmu_device_acpi_probe(pdev, smmu);
2658 /* Set bypass mode according to firmware probing result */
2662 ret = arm_smmu_device_hw_probe(smmu);
2666 /* Initialise in-memory data structures */
2667 ret = arm_smmu_init_structures(smmu);
2671 /* Record our private device structure */
2672 platform_set_drvdata(pdev, smmu);
2674 /* Reset the device */
2675 ret = arm_smmu_device_reset(smmu, bypass);
2679 /* And we're up. Go go go! */
2680 iommu_register_instance(dev->fwnode, &arm_smmu_ops);
2683 if (pci_bus_type.iommu_ops != &arm_smmu_ops) {
2685 ret = bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
2690 #ifdef CONFIG_ARM_AMBA
2691 if (amba_bustype.iommu_ops != &arm_smmu_ops) {
2692 ret = bus_set_iommu(&amba_bustype, &arm_smmu_ops);
2697 if (platform_bus_type.iommu_ops != &arm_smmu_ops) {
2698 ret = bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
2705 static int arm_smmu_device_remove(struct platform_device *pdev)
2707 struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
2709 arm_smmu_device_disable(smmu);
2713 static struct of_device_id arm_smmu_of_match[] = {
2714 { .compatible = "arm,smmu-v3", },
2717 MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
2719 static struct platform_driver arm_smmu_driver = {
2721 .name = "arm-smmu-v3",
2722 .of_match_table = of_match_ptr(arm_smmu_of_match),
2724 .probe = arm_smmu_device_probe,
2725 .remove = arm_smmu_device_remove,
2728 static int __init arm_smmu_init(void)
2730 static bool registered;
2734 ret = platform_driver_register(&arm_smmu_driver);
2740 static void __exit arm_smmu_exit(void)
2742 return platform_driver_unregister(&arm_smmu_driver);
2745 subsys_initcall(arm_smmu_init);
2746 module_exit(arm_smmu_exit);
2748 static int __init arm_smmu_of_init(struct device_node *np)
2750 int ret = arm_smmu_init();
2755 if (!of_platform_device_create(np, NULL, platform_bus_type.dev_root))
2760 IOMMU_OF_DECLARE(arm_smmuv3, "arm,smmu-v3", arm_smmu_of_init);
2763 static int __init acpi_smmu_v3_init(struct acpi_table_header *table)
2765 if (iort_node_match(ACPI_IORT_NODE_SMMU_V3))
2766 return arm_smmu_init();
2770 IORT_ACPI_DECLARE(arm_smmu_v3, ACPI_SIG_IORT, acpi_smmu_v3_init);
2773 MODULE_DESCRIPTION("IOMMU API for ARM architected SMMUv3 implementations");
2774 MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
2775 MODULE_LICENSE("GPL v2");