2 * IOMMU API for ARM architected SMMUv3 implementations.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 * Copyright (C) 2015 ARM Limited
18 * Author: Will Deacon <will.deacon@arm.com>
20 * This driver is powered by bad coffee and bombay mix.
23 #include <linux/acpi.h>
24 #include <linux/acpi_iort.h>
25 #include <linux/delay.h>
26 #include <linux/dma-iommu.h>
27 #include <linux/err.h>
28 #include <linux/interrupt.h>
29 #include <linux/iommu.h>
30 #include <linux/iopoll.h>
31 #include <linux/module.h>
32 #include <linux/msi.h>
34 #include <linux/of_address.h>
35 #include <linux/of_iommu.h>
36 #include <linux/of_platform.h>
37 #include <linux/pci.h>
38 #include <linux/platform_device.h>
40 #include <linux/amba/bus.h>
42 #include "io-pgtable.h"
45 #define ARM_SMMU_IDR0 0x0
46 #define IDR0_ST_LVL_SHIFT 27
47 #define IDR0_ST_LVL_MASK 0x3
48 #define IDR0_ST_LVL_2LVL (1 << IDR0_ST_LVL_SHIFT)
49 #define IDR0_STALL_MODEL_SHIFT 24
50 #define IDR0_STALL_MODEL_MASK 0x3
51 #define IDR0_STALL_MODEL_STALL (0 << IDR0_STALL_MODEL_SHIFT)
52 #define IDR0_STALL_MODEL_FORCE (2 << IDR0_STALL_MODEL_SHIFT)
53 #define IDR0_TTENDIAN_SHIFT 21
54 #define IDR0_TTENDIAN_MASK 0x3
55 #define IDR0_TTENDIAN_LE (2 << IDR0_TTENDIAN_SHIFT)
56 #define IDR0_TTENDIAN_BE (3 << IDR0_TTENDIAN_SHIFT)
57 #define IDR0_TTENDIAN_MIXED (0 << IDR0_TTENDIAN_SHIFT)
58 #define IDR0_CD2L (1 << 19)
59 #define IDR0_VMID16 (1 << 18)
60 #define IDR0_PRI (1 << 16)
61 #define IDR0_SEV (1 << 14)
62 #define IDR0_MSI (1 << 13)
63 #define IDR0_ASID16 (1 << 12)
64 #define IDR0_ATS (1 << 10)
65 #define IDR0_HYP (1 << 9)
66 #define IDR0_COHACC (1 << 4)
67 #define IDR0_TTF_SHIFT 2
68 #define IDR0_TTF_MASK 0x3
69 #define IDR0_TTF_AARCH64 (2 << IDR0_TTF_SHIFT)
70 #define IDR0_TTF_AARCH32_64 (3 << IDR0_TTF_SHIFT)
71 #define IDR0_S1P (1 << 1)
72 #define IDR0_S2P (1 << 0)
74 #define ARM_SMMU_IDR1 0x4
75 #define IDR1_TABLES_PRESET (1 << 30)
76 #define IDR1_QUEUES_PRESET (1 << 29)
77 #define IDR1_REL (1 << 28)
78 #define IDR1_CMDQ_SHIFT 21
79 #define IDR1_CMDQ_MASK 0x1f
80 #define IDR1_EVTQ_SHIFT 16
81 #define IDR1_EVTQ_MASK 0x1f
82 #define IDR1_PRIQ_SHIFT 11
83 #define IDR1_PRIQ_MASK 0x1f
84 #define IDR1_SSID_SHIFT 6
85 #define IDR1_SSID_MASK 0x1f
86 #define IDR1_SID_SHIFT 0
87 #define IDR1_SID_MASK 0x3f
89 #define ARM_SMMU_IDR5 0x14
90 #define IDR5_STALL_MAX_SHIFT 16
91 #define IDR5_STALL_MAX_MASK 0xffff
92 #define IDR5_GRAN64K (1 << 6)
93 #define IDR5_GRAN16K (1 << 5)
94 #define IDR5_GRAN4K (1 << 4)
95 #define IDR5_OAS_SHIFT 0
96 #define IDR5_OAS_MASK 0x7
97 #define IDR5_OAS_32_BIT (0 << IDR5_OAS_SHIFT)
98 #define IDR5_OAS_36_BIT (1 << IDR5_OAS_SHIFT)
99 #define IDR5_OAS_40_BIT (2 << IDR5_OAS_SHIFT)
100 #define IDR5_OAS_42_BIT (3 << IDR5_OAS_SHIFT)
101 #define IDR5_OAS_44_BIT (4 << IDR5_OAS_SHIFT)
102 #define IDR5_OAS_48_BIT (5 << IDR5_OAS_SHIFT)
104 #define ARM_SMMU_CR0 0x20
105 #define CR0_CMDQEN (1 << 3)
106 #define CR0_EVTQEN (1 << 2)
107 #define CR0_PRIQEN (1 << 1)
108 #define CR0_SMMUEN (1 << 0)
110 #define ARM_SMMU_CR0ACK 0x24
112 #define ARM_SMMU_CR1 0x28
116 #define CR1_CACHE_NC 0
117 #define CR1_CACHE_WB 1
118 #define CR1_CACHE_WT 2
119 #define CR1_TABLE_SH_SHIFT 10
120 #define CR1_TABLE_OC_SHIFT 8
121 #define CR1_TABLE_IC_SHIFT 6
122 #define CR1_QUEUE_SH_SHIFT 4
123 #define CR1_QUEUE_OC_SHIFT 2
124 #define CR1_QUEUE_IC_SHIFT 0
126 #define ARM_SMMU_CR2 0x2c
127 #define CR2_PTM (1 << 2)
128 #define CR2_RECINVSID (1 << 1)
129 #define CR2_E2H (1 << 0)
131 #define ARM_SMMU_GBPA 0x44
132 #define GBPA_ABORT (1 << 20)
133 #define GBPA_UPDATE (1 << 31)
135 #define ARM_SMMU_IRQ_CTRL 0x50
136 #define IRQ_CTRL_EVTQ_IRQEN (1 << 2)
137 #define IRQ_CTRL_PRIQ_IRQEN (1 << 1)
138 #define IRQ_CTRL_GERROR_IRQEN (1 << 0)
140 #define ARM_SMMU_IRQ_CTRLACK 0x54
142 #define ARM_SMMU_GERROR 0x60
143 #define GERROR_SFM_ERR (1 << 8)
144 #define GERROR_MSI_GERROR_ABT_ERR (1 << 7)
145 #define GERROR_MSI_PRIQ_ABT_ERR (1 << 6)
146 #define GERROR_MSI_EVTQ_ABT_ERR (1 << 5)
147 #define GERROR_MSI_CMDQ_ABT_ERR (1 << 4)
148 #define GERROR_PRIQ_ABT_ERR (1 << 3)
149 #define GERROR_EVTQ_ABT_ERR (1 << 2)
150 #define GERROR_CMDQ_ERR (1 << 0)
151 #define GERROR_ERR_MASK 0xfd
153 #define ARM_SMMU_GERRORN 0x64
155 #define ARM_SMMU_GERROR_IRQ_CFG0 0x68
156 #define ARM_SMMU_GERROR_IRQ_CFG1 0x70
157 #define ARM_SMMU_GERROR_IRQ_CFG2 0x74
159 #define ARM_SMMU_STRTAB_BASE 0x80
160 #define STRTAB_BASE_RA (1UL << 62)
161 #define STRTAB_BASE_ADDR_SHIFT 6
162 #define STRTAB_BASE_ADDR_MASK 0x3ffffffffffUL
164 #define ARM_SMMU_STRTAB_BASE_CFG 0x88
165 #define STRTAB_BASE_CFG_LOG2SIZE_SHIFT 0
166 #define STRTAB_BASE_CFG_LOG2SIZE_MASK 0x3f
167 #define STRTAB_BASE_CFG_SPLIT_SHIFT 6
168 #define STRTAB_BASE_CFG_SPLIT_MASK 0x1f
169 #define STRTAB_BASE_CFG_FMT_SHIFT 16
170 #define STRTAB_BASE_CFG_FMT_MASK 0x3
171 #define STRTAB_BASE_CFG_FMT_LINEAR (0 << STRTAB_BASE_CFG_FMT_SHIFT)
172 #define STRTAB_BASE_CFG_FMT_2LVL (1 << STRTAB_BASE_CFG_FMT_SHIFT)
174 #define ARM_SMMU_CMDQ_BASE 0x90
175 #define ARM_SMMU_CMDQ_PROD 0x98
176 #define ARM_SMMU_CMDQ_CONS 0x9c
178 #define ARM_SMMU_EVTQ_BASE 0xa0
179 #define ARM_SMMU_EVTQ_PROD 0x100a8
180 #define ARM_SMMU_EVTQ_CONS 0x100ac
181 #define ARM_SMMU_EVTQ_IRQ_CFG0 0xb0
182 #define ARM_SMMU_EVTQ_IRQ_CFG1 0xb8
183 #define ARM_SMMU_EVTQ_IRQ_CFG2 0xbc
185 #define ARM_SMMU_PRIQ_BASE 0xc0
186 #define ARM_SMMU_PRIQ_PROD 0x100c8
187 #define ARM_SMMU_PRIQ_CONS 0x100cc
188 #define ARM_SMMU_PRIQ_IRQ_CFG0 0xd0
189 #define ARM_SMMU_PRIQ_IRQ_CFG1 0xd8
190 #define ARM_SMMU_PRIQ_IRQ_CFG2 0xdc
192 /* Common MSI config fields */
193 #define MSI_CFG0_ADDR_SHIFT 2
194 #define MSI_CFG0_ADDR_MASK 0x3fffffffffffUL
195 #define MSI_CFG2_SH_SHIFT 4
196 #define MSI_CFG2_SH_NSH (0UL << MSI_CFG2_SH_SHIFT)
197 #define MSI_CFG2_SH_OSH (2UL << MSI_CFG2_SH_SHIFT)
198 #define MSI_CFG2_SH_ISH (3UL << MSI_CFG2_SH_SHIFT)
199 #define MSI_CFG2_MEMATTR_SHIFT 0
200 #define MSI_CFG2_MEMATTR_DEVICE_nGnRE (0x1 << MSI_CFG2_MEMATTR_SHIFT)
202 #define Q_IDX(q, p) ((p) & ((1 << (q)->max_n_shift) - 1))
203 #define Q_WRP(q, p) ((p) & (1 << (q)->max_n_shift))
204 #define Q_OVERFLOW_FLAG (1 << 31)
205 #define Q_OVF(q, p) ((p) & Q_OVERFLOW_FLAG)
206 #define Q_ENT(q, p) ((q)->base + \
207 Q_IDX(q, p) * (q)->ent_dwords)
209 #define Q_BASE_RWA (1UL << 62)
210 #define Q_BASE_ADDR_SHIFT 5
211 #define Q_BASE_ADDR_MASK 0xfffffffffffUL
212 #define Q_BASE_LOG2SIZE_SHIFT 0
213 #define Q_BASE_LOG2SIZE_MASK 0x1fUL
218 * Linear: Enough to cover 1 << IDR1.SIDSIZE entries
219 * 2lvl: 128k L1 entries,
220 * 256 lazy entries per table (each table covers a PCI bus)
222 #define STRTAB_L1_SZ_SHIFT 20
223 #define STRTAB_SPLIT 8
225 #define STRTAB_L1_DESC_DWORDS 1
226 #define STRTAB_L1_DESC_SPAN_SHIFT 0
227 #define STRTAB_L1_DESC_SPAN_MASK 0x1fUL
228 #define STRTAB_L1_DESC_L2PTR_SHIFT 6
229 #define STRTAB_L1_DESC_L2PTR_MASK 0x3ffffffffffUL
231 #define STRTAB_STE_DWORDS 8
232 #define STRTAB_STE_0_V (1UL << 0)
233 #define STRTAB_STE_0_CFG_SHIFT 1
234 #define STRTAB_STE_0_CFG_MASK 0x7UL
235 #define STRTAB_STE_0_CFG_ABORT (0UL << STRTAB_STE_0_CFG_SHIFT)
236 #define STRTAB_STE_0_CFG_BYPASS (4UL << STRTAB_STE_0_CFG_SHIFT)
237 #define STRTAB_STE_0_CFG_S1_TRANS (5UL << STRTAB_STE_0_CFG_SHIFT)
238 #define STRTAB_STE_0_CFG_S2_TRANS (6UL << STRTAB_STE_0_CFG_SHIFT)
240 #define STRTAB_STE_0_S1FMT_SHIFT 4
241 #define STRTAB_STE_0_S1FMT_LINEAR (0UL << STRTAB_STE_0_S1FMT_SHIFT)
242 #define STRTAB_STE_0_S1CTXPTR_SHIFT 6
243 #define STRTAB_STE_0_S1CTXPTR_MASK 0x3ffffffffffUL
244 #define STRTAB_STE_0_S1CDMAX_SHIFT 59
245 #define STRTAB_STE_0_S1CDMAX_MASK 0x1fUL
247 #define STRTAB_STE_1_S1C_CACHE_NC 0UL
248 #define STRTAB_STE_1_S1C_CACHE_WBRA 1UL
249 #define STRTAB_STE_1_S1C_CACHE_WT 2UL
250 #define STRTAB_STE_1_S1C_CACHE_WB 3UL
251 #define STRTAB_STE_1_S1C_SH_NSH 0UL
252 #define STRTAB_STE_1_S1C_SH_OSH 2UL
253 #define STRTAB_STE_1_S1C_SH_ISH 3UL
254 #define STRTAB_STE_1_S1CIR_SHIFT 2
255 #define STRTAB_STE_1_S1COR_SHIFT 4
256 #define STRTAB_STE_1_S1CSH_SHIFT 6
258 #define STRTAB_STE_1_S1STALLD (1UL << 27)
260 #define STRTAB_STE_1_EATS_ABT 0UL
261 #define STRTAB_STE_1_EATS_TRANS 1UL
262 #define STRTAB_STE_1_EATS_S1CHK 2UL
263 #define STRTAB_STE_1_EATS_SHIFT 28
265 #define STRTAB_STE_1_STRW_NSEL1 0UL
266 #define STRTAB_STE_1_STRW_EL2 2UL
267 #define STRTAB_STE_1_STRW_SHIFT 30
269 #define STRTAB_STE_1_SHCFG_INCOMING 1UL
270 #define STRTAB_STE_1_SHCFG_SHIFT 44
272 #define STRTAB_STE_2_S2VMID_SHIFT 0
273 #define STRTAB_STE_2_S2VMID_MASK 0xffffUL
274 #define STRTAB_STE_2_VTCR_SHIFT 32
275 #define STRTAB_STE_2_VTCR_MASK 0x7ffffUL
276 #define STRTAB_STE_2_S2AA64 (1UL << 51)
277 #define STRTAB_STE_2_S2ENDI (1UL << 52)
278 #define STRTAB_STE_2_S2PTW (1UL << 54)
279 #define STRTAB_STE_2_S2R (1UL << 58)
281 #define STRTAB_STE_3_S2TTB_SHIFT 4
282 #define STRTAB_STE_3_S2TTB_MASK 0xfffffffffffUL
284 /* Context descriptor (stage-1 only) */
285 #define CTXDESC_CD_DWORDS 8
286 #define CTXDESC_CD_0_TCR_T0SZ_SHIFT 0
287 #define ARM64_TCR_T0SZ_SHIFT 0
288 #define ARM64_TCR_T0SZ_MASK 0x1fUL
289 #define CTXDESC_CD_0_TCR_TG0_SHIFT 6
290 #define ARM64_TCR_TG0_SHIFT 14
291 #define ARM64_TCR_TG0_MASK 0x3UL
292 #define CTXDESC_CD_0_TCR_IRGN0_SHIFT 8
293 #define ARM64_TCR_IRGN0_SHIFT 8
294 #define ARM64_TCR_IRGN0_MASK 0x3UL
295 #define CTXDESC_CD_0_TCR_ORGN0_SHIFT 10
296 #define ARM64_TCR_ORGN0_SHIFT 10
297 #define ARM64_TCR_ORGN0_MASK 0x3UL
298 #define CTXDESC_CD_0_TCR_SH0_SHIFT 12
299 #define ARM64_TCR_SH0_SHIFT 12
300 #define ARM64_TCR_SH0_MASK 0x3UL
301 #define CTXDESC_CD_0_TCR_EPD0_SHIFT 14
302 #define ARM64_TCR_EPD0_SHIFT 7
303 #define ARM64_TCR_EPD0_MASK 0x1UL
304 #define CTXDESC_CD_0_TCR_EPD1_SHIFT 30
305 #define ARM64_TCR_EPD1_SHIFT 23
306 #define ARM64_TCR_EPD1_MASK 0x1UL
308 #define CTXDESC_CD_0_ENDI (1UL << 15)
309 #define CTXDESC_CD_0_V (1UL << 31)
311 #define CTXDESC_CD_0_TCR_IPS_SHIFT 32
312 #define ARM64_TCR_IPS_SHIFT 32
313 #define ARM64_TCR_IPS_MASK 0x7UL
314 #define CTXDESC_CD_0_TCR_TBI0_SHIFT 38
315 #define ARM64_TCR_TBI0_SHIFT 37
316 #define ARM64_TCR_TBI0_MASK 0x1UL
318 #define CTXDESC_CD_0_AA64 (1UL << 41)
319 #define CTXDESC_CD_0_R (1UL << 45)
320 #define CTXDESC_CD_0_A (1UL << 46)
321 #define CTXDESC_CD_0_ASET_SHIFT 47
322 #define CTXDESC_CD_0_ASET_SHARED (0UL << CTXDESC_CD_0_ASET_SHIFT)
323 #define CTXDESC_CD_0_ASET_PRIVATE (1UL << CTXDESC_CD_0_ASET_SHIFT)
324 #define CTXDESC_CD_0_ASID_SHIFT 48
325 #define CTXDESC_CD_0_ASID_MASK 0xffffUL
327 #define CTXDESC_CD_1_TTB0_SHIFT 4
328 #define CTXDESC_CD_1_TTB0_MASK 0xfffffffffffUL
330 #define CTXDESC_CD_3_MAIR_SHIFT 0
332 /* Convert between AArch64 (CPU) TCR format and SMMU CD format */
333 #define ARM_SMMU_TCR2CD(tcr, fld) \
334 (((tcr) >> ARM64_TCR_##fld##_SHIFT & ARM64_TCR_##fld##_MASK) \
335 << CTXDESC_CD_0_TCR_##fld##_SHIFT)
338 #define CMDQ_ENT_DWORDS 2
339 #define CMDQ_MAX_SZ_SHIFT 8
341 #define CMDQ_ERR_SHIFT 24
342 #define CMDQ_ERR_MASK 0x7f
343 #define CMDQ_ERR_CERROR_NONE_IDX 0
344 #define CMDQ_ERR_CERROR_ILL_IDX 1
345 #define CMDQ_ERR_CERROR_ABT_IDX 2
347 #define CMDQ_0_OP_SHIFT 0
348 #define CMDQ_0_OP_MASK 0xffUL
349 #define CMDQ_0_SSV (1UL << 11)
351 #define CMDQ_PREFETCH_0_SID_SHIFT 32
352 #define CMDQ_PREFETCH_1_SIZE_SHIFT 0
353 #define CMDQ_PREFETCH_1_ADDR_MASK ~0xfffUL
355 #define CMDQ_CFGI_0_SID_SHIFT 32
356 #define CMDQ_CFGI_0_SID_MASK 0xffffffffUL
357 #define CMDQ_CFGI_1_LEAF (1UL << 0)
358 #define CMDQ_CFGI_1_RANGE_SHIFT 0
359 #define CMDQ_CFGI_1_RANGE_MASK 0x1fUL
361 #define CMDQ_TLBI_0_VMID_SHIFT 32
362 #define CMDQ_TLBI_0_ASID_SHIFT 48
363 #define CMDQ_TLBI_1_LEAF (1UL << 0)
364 #define CMDQ_TLBI_1_VA_MASK ~0xfffUL
365 #define CMDQ_TLBI_1_IPA_MASK 0xfffffffff000UL
367 #define CMDQ_PRI_0_SSID_SHIFT 12
368 #define CMDQ_PRI_0_SSID_MASK 0xfffffUL
369 #define CMDQ_PRI_0_SID_SHIFT 32
370 #define CMDQ_PRI_0_SID_MASK 0xffffffffUL
371 #define CMDQ_PRI_1_GRPID_SHIFT 0
372 #define CMDQ_PRI_1_GRPID_MASK 0x1ffUL
373 #define CMDQ_PRI_1_RESP_SHIFT 12
374 #define CMDQ_PRI_1_RESP_DENY (0UL << CMDQ_PRI_1_RESP_SHIFT)
375 #define CMDQ_PRI_1_RESP_FAIL (1UL << CMDQ_PRI_1_RESP_SHIFT)
376 #define CMDQ_PRI_1_RESP_SUCC (2UL << CMDQ_PRI_1_RESP_SHIFT)
378 #define CMDQ_SYNC_0_CS_SHIFT 12
379 #define CMDQ_SYNC_0_CS_NONE (0UL << CMDQ_SYNC_0_CS_SHIFT)
380 #define CMDQ_SYNC_0_CS_SEV (2UL << CMDQ_SYNC_0_CS_SHIFT)
383 #define EVTQ_ENT_DWORDS 4
384 #define EVTQ_MAX_SZ_SHIFT 7
386 #define EVTQ_0_ID_SHIFT 0
387 #define EVTQ_0_ID_MASK 0xffUL
390 #define PRIQ_ENT_DWORDS 2
391 #define PRIQ_MAX_SZ_SHIFT 8
393 #define PRIQ_0_SID_SHIFT 0
394 #define PRIQ_0_SID_MASK 0xffffffffUL
395 #define PRIQ_0_SSID_SHIFT 32
396 #define PRIQ_0_SSID_MASK 0xfffffUL
397 #define PRIQ_0_PERM_PRIV (1UL << 58)
398 #define PRIQ_0_PERM_EXEC (1UL << 59)
399 #define PRIQ_0_PERM_READ (1UL << 60)
400 #define PRIQ_0_PERM_WRITE (1UL << 61)
401 #define PRIQ_0_PRG_LAST (1UL << 62)
402 #define PRIQ_0_SSID_V (1UL << 63)
404 #define PRIQ_1_PRG_IDX_SHIFT 0
405 #define PRIQ_1_PRG_IDX_MASK 0x1ffUL
406 #define PRIQ_1_ADDR_SHIFT 12
407 #define PRIQ_1_ADDR_MASK 0xfffffffffffffUL
409 /* High-level queue structures */
410 #define ARM_SMMU_POLL_TIMEOUT_US 100
412 #define MSI_IOVA_BASE 0x8000000
413 #define MSI_IOVA_LENGTH 0x100000
415 static bool disable_bypass;
416 module_param_named(disable_bypass, disable_bypass, bool, S_IRUGO);
417 MODULE_PARM_DESC(disable_bypass,
418 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
426 enum arm_smmu_msi_index {
433 static phys_addr_t arm_smmu_msi_cfg[ARM_SMMU_MAX_MSIS][3] = {
435 ARM_SMMU_EVTQ_IRQ_CFG0,
436 ARM_SMMU_EVTQ_IRQ_CFG1,
437 ARM_SMMU_EVTQ_IRQ_CFG2,
439 [GERROR_MSI_INDEX] = {
440 ARM_SMMU_GERROR_IRQ_CFG0,
441 ARM_SMMU_GERROR_IRQ_CFG1,
442 ARM_SMMU_GERROR_IRQ_CFG2,
445 ARM_SMMU_PRIQ_IRQ_CFG0,
446 ARM_SMMU_PRIQ_IRQ_CFG1,
447 ARM_SMMU_PRIQ_IRQ_CFG2,
451 struct arm_smmu_cmdq_ent {
454 bool substream_valid;
456 /* Command-specific fields */
458 #define CMDQ_OP_PREFETCH_CFG 0x1
465 #define CMDQ_OP_CFGI_STE 0x3
466 #define CMDQ_OP_CFGI_ALL 0x4
475 #define CMDQ_OP_TLBI_NH_ASID 0x11
476 #define CMDQ_OP_TLBI_NH_VA 0x12
477 #define CMDQ_OP_TLBI_EL2_ALL 0x20
478 #define CMDQ_OP_TLBI_S12_VMALL 0x28
479 #define CMDQ_OP_TLBI_S2_IPA 0x2a
480 #define CMDQ_OP_TLBI_NSNH_ALL 0x30
488 #define CMDQ_OP_PRI_RESP 0x41
496 #define CMDQ_OP_CMD_SYNC 0x46
500 struct arm_smmu_queue {
501 int irq; /* Wired interrupt */
512 u32 __iomem *prod_reg;
513 u32 __iomem *cons_reg;
516 struct arm_smmu_cmdq {
517 struct arm_smmu_queue q;
521 struct arm_smmu_evtq {
522 struct arm_smmu_queue q;
526 struct arm_smmu_priq {
527 struct arm_smmu_queue q;
530 /* High-level stream table and context descriptor structures */
531 struct arm_smmu_strtab_l1_desc {
535 dma_addr_t l2ptr_dma;
538 struct arm_smmu_s1_cfg {
540 dma_addr_t cdptr_dma;
542 struct arm_smmu_ctx_desc {
550 struct arm_smmu_s2_cfg {
556 struct arm_smmu_strtab_ent {
559 bool bypass; /* Overrides s1/s2 config */
560 struct arm_smmu_s1_cfg *s1_cfg;
561 struct arm_smmu_s2_cfg *s2_cfg;
564 struct arm_smmu_strtab_cfg {
566 dma_addr_t strtab_dma;
567 struct arm_smmu_strtab_l1_desc *l1_desc;
568 unsigned int num_l1_ents;
574 /* An SMMUv3 instance */
575 struct arm_smmu_device {
579 #define ARM_SMMU_FEAT_2_LVL_STRTAB (1 << 0)
580 #define ARM_SMMU_FEAT_2_LVL_CDTAB (1 << 1)
581 #define ARM_SMMU_FEAT_TT_LE (1 << 2)
582 #define ARM_SMMU_FEAT_TT_BE (1 << 3)
583 #define ARM_SMMU_FEAT_PRI (1 << 4)
584 #define ARM_SMMU_FEAT_ATS (1 << 5)
585 #define ARM_SMMU_FEAT_SEV (1 << 6)
586 #define ARM_SMMU_FEAT_MSI (1 << 7)
587 #define ARM_SMMU_FEAT_COHERENCY (1 << 8)
588 #define ARM_SMMU_FEAT_TRANS_S1 (1 << 9)
589 #define ARM_SMMU_FEAT_TRANS_S2 (1 << 10)
590 #define ARM_SMMU_FEAT_STALLS (1 << 11)
591 #define ARM_SMMU_FEAT_HYP (1 << 12)
594 #define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0)
597 struct arm_smmu_cmdq cmdq;
598 struct arm_smmu_evtq evtq;
599 struct arm_smmu_priq priq;
603 unsigned long ias; /* IPA */
604 unsigned long oas; /* PA */
605 unsigned long pgsize_bitmap;
607 #define ARM_SMMU_MAX_ASIDS (1 << 16)
608 unsigned int asid_bits;
609 DECLARE_BITMAP(asid_map, ARM_SMMU_MAX_ASIDS);
611 #define ARM_SMMU_MAX_VMIDS (1 << 16)
612 unsigned int vmid_bits;
613 DECLARE_BITMAP(vmid_map, ARM_SMMU_MAX_VMIDS);
615 unsigned int ssid_bits;
616 unsigned int sid_bits;
618 struct arm_smmu_strtab_cfg strtab_cfg;
621 /* SMMU private data for each master */
622 struct arm_smmu_master_data {
623 struct arm_smmu_device *smmu;
624 struct arm_smmu_strtab_ent ste;
627 /* SMMU private data for an IOMMU domain */
628 enum arm_smmu_domain_stage {
629 ARM_SMMU_DOMAIN_S1 = 0,
631 ARM_SMMU_DOMAIN_NESTED,
634 struct arm_smmu_domain {
635 struct arm_smmu_device *smmu;
636 struct mutex init_mutex; /* Protects smmu pointer */
638 struct io_pgtable_ops *pgtbl_ops;
639 spinlock_t pgtbl_lock;
641 enum arm_smmu_domain_stage stage;
643 struct arm_smmu_s1_cfg s1_cfg;
644 struct arm_smmu_s2_cfg s2_cfg;
647 struct iommu_domain domain;
650 struct arm_smmu_option_prop {
655 static struct arm_smmu_option_prop arm_smmu_options[] = {
656 { ARM_SMMU_OPT_SKIP_PREFETCH, "hisilicon,broken-prefetch-cmd" },
660 static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
662 return container_of(dom, struct arm_smmu_domain, domain);
665 static void parse_driver_options(struct arm_smmu_device *smmu)
670 if (of_property_read_bool(smmu->dev->of_node,
671 arm_smmu_options[i].prop)) {
672 smmu->options |= arm_smmu_options[i].opt;
673 dev_notice(smmu->dev, "option %s\n",
674 arm_smmu_options[i].prop);
676 } while (arm_smmu_options[++i].opt);
679 /* Low-level queue manipulation functions */
680 static bool queue_full(struct arm_smmu_queue *q)
682 return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) &&
683 Q_WRP(q, q->prod) != Q_WRP(q, q->cons);
686 static bool queue_empty(struct arm_smmu_queue *q)
688 return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) &&
689 Q_WRP(q, q->prod) == Q_WRP(q, q->cons);
692 static void queue_sync_cons(struct arm_smmu_queue *q)
694 q->cons = readl_relaxed(q->cons_reg);
697 static void queue_inc_cons(struct arm_smmu_queue *q)
699 u32 cons = (Q_WRP(q, q->cons) | Q_IDX(q, q->cons)) + 1;
701 q->cons = Q_OVF(q, q->cons) | Q_WRP(q, cons) | Q_IDX(q, cons);
702 writel(q->cons, q->cons_reg);
705 static int queue_sync_prod(struct arm_smmu_queue *q)
708 u32 prod = readl_relaxed(q->prod_reg);
710 if (Q_OVF(q, prod) != Q_OVF(q, q->prod))
717 static void queue_inc_prod(struct arm_smmu_queue *q)
719 u32 prod = (Q_WRP(q, q->prod) | Q_IDX(q, q->prod)) + 1;
721 q->prod = Q_OVF(q, q->prod) | Q_WRP(q, prod) | Q_IDX(q, prod);
722 writel(q->prod, q->prod_reg);
726 * Wait for the SMMU to consume items. If drain is true, wait until the queue
727 * is empty. Otherwise, wait until there is at least one free slot.
729 static int queue_poll_cons(struct arm_smmu_queue *q, bool drain, bool wfe)
731 ktime_t timeout = ktime_add_us(ktime_get(), ARM_SMMU_POLL_TIMEOUT_US);
733 while (queue_sync_cons(q), (drain ? !queue_empty(q) : queue_full(q))) {
734 if (ktime_compare(ktime_get(), timeout) > 0)
748 static void queue_write(__le64 *dst, u64 *src, size_t n_dwords)
752 for (i = 0; i < n_dwords; ++i)
753 *dst++ = cpu_to_le64(*src++);
756 static int queue_insert_raw(struct arm_smmu_queue *q, u64 *ent)
761 queue_write(Q_ENT(q, q->prod), ent, q->ent_dwords);
766 static void queue_read(__le64 *dst, u64 *src, size_t n_dwords)
770 for (i = 0; i < n_dwords; ++i)
771 *dst++ = le64_to_cpu(*src++);
774 static int queue_remove_raw(struct arm_smmu_queue *q, u64 *ent)
779 queue_read(ent, Q_ENT(q, q->cons), q->ent_dwords);
784 /* High-level queue accessors */
785 static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent)
787 memset(cmd, 0, CMDQ_ENT_DWORDS << 3);
788 cmd[0] |= (ent->opcode & CMDQ_0_OP_MASK) << CMDQ_0_OP_SHIFT;
790 switch (ent->opcode) {
791 case CMDQ_OP_TLBI_EL2_ALL:
792 case CMDQ_OP_TLBI_NSNH_ALL:
794 case CMDQ_OP_PREFETCH_CFG:
795 cmd[0] |= (u64)ent->prefetch.sid << CMDQ_PREFETCH_0_SID_SHIFT;
796 cmd[1] |= ent->prefetch.size << CMDQ_PREFETCH_1_SIZE_SHIFT;
797 cmd[1] |= ent->prefetch.addr & CMDQ_PREFETCH_1_ADDR_MASK;
799 case CMDQ_OP_CFGI_STE:
800 cmd[0] |= (u64)ent->cfgi.sid << CMDQ_CFGI_0_SID_SHIFT;
801 cmd[1] |= ent->cfgi.leaf ? CMDQ_CFGI_1_LEAF : 0;
803 case CMDQ_OP_CFGI_ALL:
804 /* Cover the entire SID range */
805 cmd[1] |= CMDQ_CFGI_1_RANGE_MASK << CMDQ_CFGI_1_RANGE_SHIFT;
807 case CMDQ_OP_TLBI_NH_VA:
808 cmd[0] |= (u64)ent->tlbi.asid << CMDQ_TLBI_0_ASID_SHIFT;
809 cmd[1] |= ent->tlbi.leaf ? CMDQ_TLBI_1_LEAF : 0;
810 cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_VA_MASK;
812 case CMDQ_OP_TLBI_S2_IPA:
813 cmd[0] |= (u64)ent->tlbi.vmid << CMDQ_TLBI_0_VMID_SHIFT;
814 cmd[1] |= ent->tlbi.leaf ? CMDQ_TLBI_1_LEAF : 0;
815 cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_IPA_MASK;
817 case CMDQ_OP_TLBI_NH_ASID:
818 cmd[0] |= (u64)ent->tlbi.asid << CMDQ_TLBI_0_ASID_SHIFT;
820 case CMDQ_OP_TLBI_S12_VMALL:
821 cmd[0] |= (u64)ent->tlbi.vmid << CMDQ_TLBI_0_VMID_SHIFT;
823 case CMDQ_OP_PRI_RESP:
824 cmd[0] |= ent->substream_valid ? CMDQ_0_SSV : 0;
825 cmd[0] |= ent->pri.ssid << CMDQ_PRI_0_SSID_SHIFT;
826 cmd[0] |= (u64)ent->pri.sid << CMDQ_PRI_0_SID_SHIFT;
827 cmd[1] |= ent->pri.grpid << CMDQ_PRI_1_GRPID_SHIFT;
828 switch (ent->pri.resp) {
830 cmd[1] |= CMDQ_PRI_1_RESP_DENY;
833 cmd[1] |= CMDQ_PRI_1_RESP_FAIL;
836 cmd[1] |= CMDQ_PRI_1_RESP_SUCC;
842 case CMDQ_OP_CMD_SYNC:
843 cmd[0] |= CMDQ_SYNC_0_CS_SEV;
852 static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu)
854 static const char *cerror_str[] = {
855 [CMDQ_ERR_CERROR_NONE_IDX] = "No error",
856 [CMDQ_ERR_CERROR_ILL_IDX] = "Illegal command",
857 [CMDQ_ERR_CERROR_ABT_IDX] = "Abort on command fetch",
861 u64 cmd[CMDQ_ENT_DWORDS];
862 struct arm_smmu_queue *q = &smmu->cmdq.q;
863 u32 cons = readl_relaxed(q->cons_reg);
864 u32 idx = cons >> CMDQ_ERR_SHIFT & CMDQ_ERR_MASK;
865 struct arm_smmu_cmdq_ent cmd_sync = {
866 .opcode = CMDQ_OP_CMD_SYNC,
869 dev_err(smmu->dev, "CMDQ error (cons 0x%08x): %s\n", cons,
870 idx < ARRAY_SIZE(cerror_str) ? cerror_str[idx] : "Unknown");
873 case CMDQ_ERR_CERROR_ABT_IDX:
874 dev_err(smmu->dev, "retrying command fetch\n");
875 case CMDQ_ERR_CERROR_NONE_IDX:
877 case CMDQ_ERR_CERROR_ILL_IDX:
884 * We may have concurrent producers, so we need to be careful
885 * not to touch any of the shadow cmdq state.
887 queue_read(cmd, Q_ENT(q, cons), q->ent_dwords);
888 dev_err(smmu->dev, "skipping command in error state:\n");
889 for (i = 0; i < ARRAY_SIZE(cmd); ++i)
890 dev_err(smmu->dev, "\t0x%016llx\n", (unsigned long long)cmd[i]);
892 /* Convert the erroneous command into a CMD_SYNC */
893 if (arm_smmu_cmdq_build_cmd(cmd, &cmd_sync)) {
894 dev_err(smmu->dev, "failed to convert to CMD_SYNC\n");
898 queue_write(Q_ENT(q, cons), cmd, q->ent_dwords);
901 static void arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu,
902 struct arm_smmu_cmdq_ent *ent)
904 u64 cmd[CMDQ_ENT_DWORDS];
906 bool wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV);
907 struct arm_smmu_queue *q = &smmu->cmdq.q;
909 if (arm_smmu_cmdq_build_cmd(cmd, ent)) {
910 dev_warn(smmu->dev, "ignoring unknown CMDQ opcode 0x%x\n",
915 spin_lock_irqsave(&smmu->cmdq.lock, flags);
916 while (queue_insert_raw(q, cmd) == -ENOSPC) {
917 if (queue_poll_cons(q, false, wfe))
918 dev_err_ratelimited(smmu->dev, "CMDQ timeout\n");
921 if (ent->opcode == CMDQ_OP_CMD_SYNC && queue_poll_cons(q, true, wfe))
922 dev_err_ratelimited(smmu->dev, "CMD_SYNC timeout\n");
923 spin_unlock_irqrestore(&smmu->cmdq.lock, flags);
926 /* Context descriptor manipulation functions */
927 static u64 arm_smmu_cpu_tcr_to_cd(u64 tcr)
931 /* Repack the TCR. Just care about TTBR0 for now */
932 val |= ARM_SMMU_TCR2CD(tcr, T0SZ);
933 val |= ARM_SMMU_TCR2CD(tcr, TG0);
934 val |= ARM_SMMU_TCR2CD(tcr, IRGN0);
935 val |= ARM_SMMU_TCR2CD(tcr, ORGN0);
936 val |= ARM_SMMU_TCR2CD(tcr, SH0);
937 val |= ARM_SMMU_TCR2CD(tcr, EPD0);
938 val |= ARM_SMMU_TCR2CD(tcr, EPD1);
939 val |= ARM_SMMU_TCR2CD(tcr, IPS);
940 val |= ARM_SMMU_TCR2CD(tcr, TBI0);
945 static void arm_smmu_write_ctx_desc(struct arm_smmu_device *smmu,
946 struct arm_smmu_s1_cfg *cfg)
951 * We don't need to issue any invalidation here, as we'll invalidate
952 * the STE when installing the new entry anyway.
954 val = arm_smmu_cpu_tcr_to_cd(cfg->cd.tcr) |
958 CTXDESC_CD_0_R | CTXDESC_CD_0_A | CTXDESC_CD_0_ASET_PRIVATE |
959 CTXDESC_CD_0_AA64 | (u64)cfg->cd.asid << CTXDESC_CD_0_ASID_SHIFT |
961 cfg->cdptr[0] = cpu_to_le64(val);
963 val = cfg->cd.ttbr & CTXDESC_CD_1_TTB0_MASK << CTXDESC_CD_1_TTB0_SHIFT;
964 cfg->cdptr[1] = cpu_to_le64(val);
966 cfg->cdptr[3] = cpu_to_le64(cfg->cd.mair << CTXDESC_CD_3_MAIR_SHIFT);
969 /* Stream table manipulation functions */
971 arm_smmu_write_strtab_l1_desc(__le64 *dst, struct arm_smmu_strtab_l1_desc *desc)
975 val |= (desc->span & STRTAB_L1_DESC_SPAN_MASK)
976 << STRTAB_L1_DESC_SPAN_SHIFT;
977 val |= desc->l2ptr_dma &
978 STRTAB_L1_DESC_L2PTR_MASK << STRTAB_L1_DESC_L2PTR_SHIFT;
980 *dst = cpu_to_le64(val);
983 static void arm_smmu_sync_ste_for_sid(struct arm_smmu_device *smmu, u32 sid)
985 struct arm_smmu_cmdq_ent cmd = {
986 .opcode = CMDQ_OP_CFGI_STE,
993 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
994 cmd.opcode = CMDQ_OP_CMD_SYNC;
995 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
998 static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
999 __le64 *dst, struct arm_smmu_strtab_ent *ste)
1002 * This is hideously complicated, but we only really care about
1003 * three cases at the moment:
1005 * 1. Invalid (all zero) -> bypass (init)
1006 * 2. Bypass -> translation (attach)
1007 * 3. Translation -> bypass (detach)
1009 * Given that we can't update the STE atomically and the SMMU
1010 * doesn't read the thing in a defined order, that leaves us
1011 * with the following maintenance requirements:
1013 * 1. Update Config, return (init time STEs aren't live)
1014 * 2. Write everything apart from dword 0, sync, write dword 0, sync
1015 * 3. Update Config, sync
1017 u64 val = le64_to_cpu(dst[0]);
1018 bool ste_live = false;
1019 struct arm_smmu_cmdq_ent prefetch_cmd = {
1020 .opcode = CMDQ_OP_PREFETCH_CFG,
1026 if (val & STRTAB_STE_0_V) {
1029 cfg = val & STRTAB_STE_0_CFG_MASK << STRTAB_STE_0_CFG_SHIFT;
1031 case STRTAB_STE_0_CFG_BYPASS:
1033 case STRTAB_STE_0_CFG_S1_TRANS:
1034 case STRTAB_STE_0_CFG_S2_TRANS:
1037 case STRTAB_STE_0_CFG_ABORT:
1041 BUG(); /* STE corruption */
1045 /* Nuke the existing Config, as we're going to rewrite it */
1046 val &= ~(STRTAB_STE_0_CFG_MASK << STRTAB_STE_0_CFG_SHIFT);
1049 val |= STRTAB_STE_0_V;
1051 val &= ~STRTAB_STE_0_V;
1054 val |= disable_bypass ? STRTAB_STE_0_CFG_ABORT
1055 : STRTAB_STE_0_CFG_BYPASS;
1056 dst[0] = cpu_to_le64(val);
1057 dst[1] = cpu_to_le64(STRTAB_STE_1_SHCFG_INCOMING
1058 << STRTAB_STE_1_SHCFG_SHIFT);
1059 dst[2] = 0; /* Nuke the VMID */
1061 arm_smmu_sync_ste_for_sid(smmu, sid);
1067 dst[1] = cpu_to_le64(
1068 STRTAB_STE_1_S1C_CACHE_WBRA
1069 << STRTAB_STE_1_S1CIR_SHIFT |
1070 STRTAB_STE_1_S1C_CACHE_WBRA
1071 << STRTAB_STE_1_S1COR_SHIFT |
1072 STRTAB_STE_1_S1C_SH_ISH << STRTAB_STE_1_S1CSH_SHIFT |
1073 #ifdef CONFIG_PCI_ATS
1074 STRTAB_STE_1_EATS_TRANS << STRTAB_STE_1_EATS_SHIFT |
1076 STRTAB_STE_1_STRW_NSEL1 << STRTAB_STE_1_STRW_SHIFT);
1078 if (smmu->features & ARM_SMMU_FEAT_STALLS)
1079 dst[1] |= cpu_to_le64(STRTAB_STE_1_S1STALLD);
1081 val |= (ste->s1_cfg->cdptr_dma & STRTAB_STE_0_S1CTXPTR_MASK
1082 << STRTAB_STE_0_S1CTXPTR_SHIFT) |
1083 STRTAB_STE_0_CFG_S1_TRANS;
1089 dst[2] = cpu_to_le64(
1090 ste->s2_cfg->vmid << STRTAB_STE_2_S2VMID_SHIFT |
1091 (ste->s2_cfg->vtcr & STRTAB_STE_2_VTCR_MASK)
1092 << STRTAB_STE_2_VTCR_SHIFT |
1094 STRTAB_STE_2_S2ENDI |
1096 STRTAB_STE_2_S2PTW | STRTAB_STE_2_S2AA64 |
1099 dst[3] = cpu_to_le64(ste->s2_cfg->vttbr &
1100 STRTAB_STE_3_S2TTB_MASK << STRTAB_STE_3_S2TTB_SHIFT);
1102 val |= STRTAB_STE_0_CFG_S2_TRANS;
1105 arm_smmu_sync_ste_for_sid(smmu, sid);
1106 dst[0] = cpu_to_le64(val);
1107 arm_smmu_sync_ste_for_sid(smmu, sid);
1109 /* It's likely that we'll want to use the new STE soon */
1110 if (!(smmu->options & ARM_SMMU_OPT_SKIP_PREFETCH))
1111 arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd);
1114 static void arm_smmu_init_bypass_stes(u64 *strtab, unsigned int nent)
1117 struct arm_smmu_strtab_ent ste = {
1122 for (i = 0; i < nent; ++i) {
1123 arm_smmu_write_strtab_ent(NULL, -1, strtab, &ste);
1124 strtab += STRTAB_STE_DWORDS;
1128 static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid)
1132 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
1133 struct arm_smmu_strtab_l1_desc *desc = &cfg->l1_desc[sid >> STRTAB_SPLIT];
1138 size = 1 << (STRTAB_SPLIT + ilog2(STRTAB_STE_DWORDS) + 3);
1139 strtab = &cfg->strtab[(sid >> STRTAB_SPLIT) * STRTAB_L1_DESC_DWORDS];
1141 desc->span = STRTAB_SPLIT + 1;
1142 desc->l2ptr = dmam_alloc_coherent(smmu->dev, size, &desc->l2ptr_dma,
1143 GFP_KERNEL | __GFP_ZERO);
1146 "failed to allocate l2 stream table for SID %u\n",
1151 arm_smmu_init_bypass_stes(desc->l2ptr, 1 << STRTAB_SPLIT);
1152 arm_smmu_write_strtab_l1_desc(strtab, desc);
1156 /* IRQ and event handlers */
1157 static irqreturn_t arm_smmu_evtq_thread(int irq, void *dev)
1160 struct arm_smmu_device *smmu = dev;
1161 struct arm_smmu_queue *q = &smmu->evtq.q;
1162 u64 evt[EVTQ_ENT_DWORDS];
1165 while (!queue_remove_raw(q, evt)) {
1166 u8 id = evt[0] >> EVTQ_0_ID_SHIFT & EVTQ_0_ID_MASK;
1168 dev_info(smmu->dev, "event 0x%02x received:\n", id);
1169 for (i = 0; i < ARRAY_SIZE(evt); ++i)
1170 dev_info(smmu->dev, "\t0x%016llx\n",
1171 (unsigned long long)evt[i]);
1176 * Not much we can do on overflow, so scream and pretend we're
1179 if (queue_sync_prod(q) == -EOVERFLOW)
1180 dev_err(smmu->dev, "EVTQ overflow detected -- events lost\n");
1181 } while (!queue_empty(q));
1183 /* Sync our overflow flag, as we believe we're up to speed */
1184 q->cons = Q_OVF(q, q->prod) | Q_WRP(q, q->cons) | Q_IDX(q, q->cons);
1188 static void arm_smmu_handle_ppr(struct arm_smmu_device *smmu, u64 *evt)
1194 sid = evt[0] >> PRIQ_0_SID_SHIFT & PRIQ_0_SID_MASK;
1195 ssv = evt[0] & PRIQ_0_SSID_V;
1196 ssid = ssv ? evt[0] >> PRIQ_0_SSID_SHIFT & PRIQ_0_SSID_MASK : 0;
1197 last = evt[0] & PRIQ_0_PRG_LAST;
1198 grpid = evt[1] >> PRIQ_1_PRG_IDX_SHIFT & PRIQ_1_PRG_IDX_MASK;
1200 dev_info(smmu->dev, "unexpected PRI request received:\n");
1202 "\tsid 0x%08x.0x%05x: [%u%s] %sprivileged %s%s%s access at iova 0x%016llx\n",
1203 sid, ssid, grpid, last ? "L" : "",
1204 evt[0] & PRIQ_0_PERM_PRIV ? "" : "un",
1205 evt[0] & PRIQ_0_PERM_READ ? "R" : "",
1206 evt[0] & PRIQ_0_PERM_WRITE ? "W" : "",
1207 evt[0] & PRIQ_0_PERM_EXEC ? "X" : "",
1208 evt[1] & PRIQ_1_ADDR_MASK << PRIQ_1_ADDR_SHIFT);
1211 struct arm_smmu_cmdq_ent cmd = {
1212 .opcode = CMDQ_OP_PRI_RESP,
1213 .substream_valid = ssv,
1218 .resp = PRI_RESP_DENY,
1222 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1226 static irqreturn_t arm_smmu_priq_thread(int irq, void *dev)
1228 struct arm_smmu_device *smmu = dev;
1229 struct arm_smmu_queue *q = &smmu->priq.q;
1230 u64 evt[PRIQ_ENT_DWORDS];
1233 while (!queue_remove_raw(q, evt))
1234 arm_smmu_handle_ppr(smmu, evt);
1236 if (queue_sync_prod(q) == -EOVERFLOW)
1237 dev_err(smmu->dev, "PRIQ overflow detected -- requests lost\n");
1238 } while (!queue_empty(q));
1240 /* Sync our overflow flag, as we believe we're up to speed */
1241 q->cons = Q_OVF(q, q->prod) | Q_WRP(q, q->cons) | Q_IDX(q, q->cons);
1245 static irqreturn_t arm_smmu_cmdq_sync_handler(int irq, void *dev)
1247 /* We don't actually use CMD_SYNC interrupts for anything */
1251 static int arm_smmu_device_disable(struct arm_smmu_device *smmu);
1253 static irqreturn_t arm_smmu_gerror_handler(int irq, void *dev)
1255 u32 gerror, gerrorn, active;
1256 struct arm_smmu_device *smmu = dev;
1258 gerror = readl_relaxed(smmu->base + ARM_SMMU_GERROR);
1259 gerrorn = readl_relaxed(smmu->base + ARM_SMMU_GERRORN);
1261 active = gerror ^ gerrorn;
1262 if (!(active & GERROR_ERR_MASK))
1263 return IRQ_NONE; /* No errors pending */
1266 "unexpected global error reported (0x%08x), this could be serious\n",
1269 if (active & GERROR_SFM_ERR) {
1270 dev_err(smmu->dev, "device has entered Service Failure Mode!\n");
1271 arm_smmu_device_disable(smmu);
1274 if (active & GERROR_MSI_GERROR_ABT_ERR)
1275 dev_warn(smmu->dev, "GERROR MSI write aborted\n");
1277 if (active & GERROR_MSI_PRIQ_ABT_ERR)
1278 dev_warn(smmu->dev, "PRIQ MSI write aborted\n");
1280 if (active & GERROR_MSI_EVTQ_ABT_ERR)
1281 dev_warn(smmu->dev, "EVTQ MSI write aborted\n");
1283 if (active & GERROR_MSI_CMDQ_ABT_ERR) {
1284 dev_warn(smmu->dev, "CMDQ MSI write aborted\n");
1285 arm_smmu_cmdq_sync_handler(irq, smmu->dev);
1288 if (active & GERROR_PRIQ_ABT_ERR)
1289 dev_err(smmu->dev, "PRIQ write aborted -- events may have been lost\n");
1291 if (active & GERROR_EVTQ_ABT_ERR)
1292 dev_err(smmu->dev, "EVTQ write aborted -- events may have been lost\n");
1294 if (active & GERROR_CMDQ_ERR)
1295 arm_smmu_cmdq_skip_err(smmu);
1297 writel(gerror, smmu->base + ARM_SMMU_GERRORN);
1301 /* IO_PGTABLE API */
1302 static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
1304 struct arm_smmu_cmdq_ent cmd;
1306 cmd.opcode = CMDQ_OP_CMD_SYNC;
1307 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1310 static void arm_smmu_tlb_sync(void *cookie)
1312 struct arm_smmu_domain *smmu_domain = cookie;
1313 __arm_smmu_tlb_sync(smmu_domain->smmu);
1316 static void arm_smmu_tlb_inv_context(void *cookie)
1318 struct arm_smmu_domain *smmu_domain = cookie;
1319 struct arm_smmu_device *smmu = smmu_domain->smmu;
1320 struct arm_smmu_cmdq_ent cmd;
1322 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1323 cmd.opcode = CMDQ_OP_TLBI_NH_ASID;
1324 cmd.tlbi.asid = smmu_domain->s1_cfg.cd.asid;
1327 cmd.opcode = CMDQ_OP_TLBI_S12_VMALL;
1328 cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
1331 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1332 __arm_smmu_tlb_sync(smmu);
1335 static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
1336 size_t granule, bool leaf, void *cookie)
1338 struct arm_smmu_domain *smmu_domain = cookie;
1339 struct arm_smmu_device *smmu = smmu_domain->smmu;
1340 struct arm_smmu_cmdq_ent cmd = {
1347 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1348 cmd.opcode = CMDQ_OP_TLBI_NH_VA;
1349 cmd.tlbi.asid = smmu_domain->s1_cfg.cd.asid;
1351 cmd.opcode = CMDQ_OP_TLBI_S2_IPA;
1352 cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
1356 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1357 cmd.tlbi.addr += granule;
1358 } while (size -= granule);
1361 static const struct iommu_gather_ops arm_smmu_gather_ops = {
1362 .tlb_flush_all = arm_smmu_tlb_inv_context,
1363 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
1364 .tlb_sync = arm_smmu_tlb_sync,
1368 static bool arm_smmu_capable(enum iommu_cap cap)
1371 case IOMMU_CAP_CACHE_COHERENCY:
1373 case IOMMU_CAP_NOEXEC:
1380 static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
1382 struct arm_smmu_domain *smmu_domain;
1384 if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
1388 * Allocate the domain and initialise some of its data structures.
1389 * We can't really do anything meaningful until we've added a
1392 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1396 if (type == IOMMU_DOMAIN_DMA &&
1397 iommu_get_dma_cookie(&smmu_domain->domain)) {
1402 mutex_init(&smmu_domain->init_mutex);
1403 spin_lock_init(&smmu_domain->pgtbl_lock);
1404 return &smmu_domain->domain;
1407 static int arm_smmu_bitmap_alloc(unsigned long *map, int span)
1409 int idx, size = 1 << span;
1412 idx = find_first_zero_bit(map, size);
1415 } while (test_and_set_bit(idx, map));
1420 static void arm_smmu_bitmap_free(unsigned long *map, int idx)
1422 clear_bit(idx, map);
1425 static void arm_smmu_domain_free(struct iommu_domain *domain)
1427 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1428 struct arm_smmu_device *smmu = smmu_domain->smmu;
1430 iommu_put_dma_cookie(domain);
1431 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
1433 /* Free the CD and ASID, if we allocated them */
1434 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1435 struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
1438 dmam_free_coherent(smmu_domain->smmu->dev,
1439 CTXDESC_CD_DWORDS << 3,
1443 arm_smmu_bitmap_free(smmu->asid_map, cfg->cd.asid);
1446 struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
1448 arm_smmu_bitmap_free(smmu->vmid_map, cfg->vmid);
1454 static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain,
1455 struct io_pgtable_cfg *pgtbl_cfg)
1459 struct arm_smmu_device *smmu = smmu_domain->smmu;
1460 struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
1462 asid = arm_smmu_bitmap_alloc(smmu->asid_map, smmu->asid_bits);
1466 cfg->cdptr = dmam_alloc_coherent(smmu->dev, CTXDESC_CD_DWORDS << 3,
1468 GFP_KERNEL | __GFP_ZERO);
1470 dev_warn(smmu->dev, "failed to allocate context descriptor\n");
1475 cfg->cd.asid = (u16)asid;
1476 cfg->cd.ttbr = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
1477 cfg->cd.tcr = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
1478 cfg->cd.mair = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
1482 arm_smmu_bitmap_free(smmu->asid_map, asid);
1486 static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain,
1487 struct io_pgtable_cfg *pgtbl_cfg)
1490 struct arm_smmu_device *smmu = smmu_domain->smmu;
1491 struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
1493 vmid = arm_smmu_bitmap_alloc(smmu->vmid_map, smmu->vmid_bits);
1497 cfg->vmid = (u16)vmid;
1498 cfg->vttbr = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
1499 cfg->vtcr = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
1503 static int arm_smmu_domain_finalise(struct iommu_domain *domain)
1506 unsigned long ias, oas;
1507 enum io_pgtable_fmt fmt;
1508 struct io_pgtable_cfg pgtbl_cfg;
1509 struct io_pgtable_ops *pgtbl_ops;
1510 int (*finalise_stage_fn)(struct arm_smmu_domain *,
1511 struct io_pgtable_cfg *);
1512 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1513 struct arm_smmu_device *smmu = smmu_domain->smmu;
1515 /* Restrict the stage to what we can actually support */
1516 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
1517 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
1518 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
1519 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1521 switch (smmu_domain->stage) {
1522 case ARM_SMMU_DOMAIN_S1:
1525 fmt = ARM_64_LPAE_S1;
1526 finalise_stage_fn = arm_smmu_domain_finalise_s1;
1528 case ARM_SMMU_DOMAIN_NESTED:
1529 case ARM_SMMU_DOMAIN_S2:
1532 fmt = ARM_64_LPAE_S2;
1533 finalise_stage_fn = arm_smmu_domain_finalise_s2;
1539 pgtbl_cfg = (struct io_pgtable_cfg) {
1540 .pgsize_bitmap = smmu->pgsize_bitmap,
1543 .tlb = &arm_smmu_gather_ops,
1544 .iommu_dev = smmu->dev,
1547 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
1551 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
1552 domain->geometry.aperture_end = (1UL << ias) - 1;
1553 domain->geometry.force_aperture = true;
1554 smmu_domain->pgtbl_ops = pgtbl_ops;
1556 ret = finalise_stage_fn(smmu_domain, &pgtbl_cfg);
1558 free_io_pgtable_ops(pgtbl_ops);
1563 static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid)
1566 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
1568 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
1569 struct arm_smmu_strtab_l1_desc *l1_desc;
1572 /* Two-level walk */
1573 idx = (sid >> STRTAB_SPLIT) * STRTAB_L1_DESC_DWORDS;
1574 l1_desc = &cfg->l1_desc[idx];
1575 idx = (sid & ((1 << STRTAB_SPLIT) - 1)) * STRTAB_STE_DWORDS;
1576 step = &l1_desc->l2ptr[idx];
1578 /* Simple linear lookup */
1579 step = &cfg->strtab[sid * STRTAB_STE_DWORDS];
1585 static int arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec)
1588 struct arm_smmu_master_data *master = fwspec->iommu_priv;
1589 struct arm_smmu_device *smmu = master->smmu;
1591 for (i = 0; i < fwspec->num_ids; ++i) {
1592 u32 sid = fwspec->ids[i];
1593 __le64 *step = arm_smmu_get_step_for_sid(smmu, sid);
1595 arm_smmu_write_strtab_ent(smmu, sid, step, &master->ste);
1601 static void arm_smmu_detach_dev(struct device *dev)
1603 struct arm_smmu_master_data *master = dev->iommu_fwspec->iommu_priv;
1605 master->ste.bypass = true;
1606 if (arm_smmu_install_ste_for_dev(dev->iommu_fwspec) < 0)
1607 dev_warn(dev, "failed to install bypass STE\n");
1610 static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1613 struct arm_smmu_device *smmu;
1614 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1615 struct arm_smmu_master_data *master;
1616 struct arm_smmu_strtab_ent *ste;
1618 if (!dev->iommu_fwspec)
1621 master = dev->iommu_fwspec->iommu_priv;
1622 smmu = master->smmu;
1625 /* Already attached to a different domain? */
1627 arm_smmu_detach_dev(dev);
1629 mutex_lock(&smmu_domain->init_mutex);
1631 if (!smmu_domain->smmu) {
1632 smmu_domain->smmu = smmu;
1633 ret = arm_smmu_domain_finalise(domain);
1635 smmu_domain->smmu = NULL;
1638 } else if (smmu_domain->smmu != smmu) {
1640 "cannot attach to SMMU %s (upstream of %s)\n",
1641 dev_name(smmu_domain->smmu->dev),
1642 dev_name(smmu->dev));
1647 ste->bypass = false;
1650 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1651 ste->s1_cfg = &smmu_domain->s1_cfg;
1653 arm_smmu_write_ctx_desc(smmu, ste->s1_cfg);
1656 ste->s2_cfg = &smmu_domain->s2_cfg;
1659 ret = arm_smmu_install_ste_for_dev(dev->iommu_fwspec);
1664 mutex_unlock(&smmu_domain->init_mutex);
1668 static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
1669 phys_addr_t paddr, size_t size, int prot)
1672 unsigned long flags;
1673 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1674 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
1679 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1680 ret = ops->map(ops, iova, paddr, size, prot);
1681 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1686 arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
1689 unsigned long flags;
1690 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1691 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
1696 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1697 ret = ops->unmap(ops, iova, size);
1698 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1703 arm_smmu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
1706 unsigned long flags;
1707 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1708 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
1713 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1714 ret = ops->iova_to_phys(ops, iova);
1715 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1720 static struct platform_driver arm_smmu_driver;
1722 static int arm_smmu_match_node(struct device *dev, void *data)
1724 return dev->fwnode == data;
1728 struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
1730 struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL,
1731 fwnode, arm_smmu_match_node);
1733 return dev ? dev_get_drvdata(dev) : NULL;
1736 static bool arm_smmu_sid_in_range(struct arm_smmu_device *smmu, u32 sid)
1738 unsigned long limit = smmu->strtab_cfg.num_l1_ents;
1740 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB)
1741 limit *= 1UL << STRTAB_SPLIT;
1746 static struct iommu_ops arm_smmu_ops;
1748 static int arm_smmu_add_device(struct device *dev)
1751 struct arm_smmu_device *smmu;
1752 struct arm_smmu_master_data *master;
1753 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1754 struct iommu_group *group;
1756 if (!fwspec || fwspec->ops != &arm_smmu_ops)
1759 * We _can_ actually withstand dodgy bus code re-calling add_device()
1760 * without an intervening remove_device()/of_xlate() sequence, but
1761 * we're not going to do so quietly...
1763 if (WARN_ON_ONCE(fwspec->iommu_priv)) {
1764 master = fwspec->iommu_priv;
1765 smmu = master->smmu;
1767 smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
1770 master = kzalloc(sizeof(*master), GFP_KERNEL);
1774 master->smmu = smmu;
1775 fwspec->iommu_priv = master;
1778 /* Check the SIDs are in range of the SMMU and our stream table */
1779 for (i = 0; i < fwspec->num_ids; i++) {
1780 u32 sid = fwspec->ids[i];
1782 if (!arm_smmu_sid_in_range(smmu, sid))
1785 /* Ensure l2 strtab is initialised */
1786 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
1787 ret = arm_smmu_init_l2_strtab(smmu, sid);
1793 group = iommu_group_get_for_dev(dev);
1795 iommu_group_put(group);
1797 return PTR_ERR_OR_ZERO(group);
1800 static void arm_smmu_remove_device(struct device *dev)
1802 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1803 struct arm_smmu_master_data *master;
1805 if (!fwspec || fwspec->ops != &arm_smmu_ops)
1808 master = fwspec->iommu_priv;
1809 if (master && master->ste.valid)
1810 arm_smmu_detach_dev(dev);
1811 iommu_group_remove_device(dev);
1813 iommu_fwspec_free(dev);
1816 static struct iommu_group *arm_smmu_device_group(struct device *dev)
1818 struct iommu_group *group;
1821 * We don't support devices sharing stream IDs other than PCI RID
1822 * aliases, since the necessary ID-to-device lookup becomes rather
1823 * impractical given a potential sparse 32-bit stream ID space.
1825 if (dev_is_pci(dev))
1826 group = pci_device_group(dev);
1828 group = generic_device_group(dev);
1833 static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1834 enum iommu_attr attr, void *data)
1836 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1839 case DOMAIN_ATTR_NESTING:
1840 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1847 static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1848 enum iommu_attr attr, void *data)
1851 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1853 mutex_lock(&smmu_domain->init_mutex);
1856 case DOMAIN_ATTR_NESTING:
1857 if (smmu_domain->smmu) {
1863 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1865 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1873 mutex_unlock(&smmu_domain->init_mutex);
1877 static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
1879 return iommu_fwspec_add_ids(dev, args->args, 1);
1882 static void arm_smmu_get_resv_regions(struct device *dev,
1883 struct list_head *head)
1885 struct iommu_resv_region *region;
1886 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
1888 region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
1889 prot, IOMMU_RESV_MSI);
1893 list_add_tail(®ion->list, head);
1896 static void arm_smmu_put_resv_regions(struct device *dev,
1897 struct list_head *head)
1899 struct iommu_resv_region *entry, *next;
1901 list_for_each_entry_safe(entry, next, head, list)
1905 static struct iommu_ops arm_smmu_ops = {
1906 .capable = arm_smmu_capable,
1907 .domain_alloc = arm_smmu_domain_alloc,
1908 .domain_free = arm_smmu_domain_free,
1909 .attach_dev = arm_smmu_attach_dev,
1910 .map = arm_smmu_map,
1911 .unmap = arm_smmu_unmap,
1912 .map_sg = default_iommu_map_sg,
1913 .iova_to_phys = arm_smmu_iova_to_phys,
1914 .add_device = arm_smmu_add_device,
1915 .remove_device = arm_smmu_remove_device,
1916 .device_group = arm_smmu_device_group,
1917 .domain_get_attr = arm_smmu_domain_get_attr,
1918 .domain_set_attr = arm_smmu_domain_set_attr,
1919 .of_xlate = arm_smmu_of_xlate,
1920 .get_resv_regions = arm_smmu_get_resv_regions,
1921 .put_resv_regions = arm_smmu_put_resv_regions,
1922 .pgsize_bitmap = -1UL, /* Restricted during device attach */
1925 /* Probing and initialisation functions */
1926 static int arm_smmu_init_one_queue(struct arm_smmu_device *smmu,
1927 struct arm_smmu_queue *q,
1928 unsigned long prod_off,
1929 unsigned long cons_off,
1932 size_t qsz = ((1 << q->max_n_shift) * dwords) << 3;
1934 q->base = dmam_alloc_coherent(smmu->dev, qsz, &q->base_dma, GFP_KERNEL);
1936 dev_err(smmu->dev, "failed to allocate queue (0x%zx bytes)\n",
1941 q->prod_reg = smmu->base + prod_off;
1942 q->cons_reg = smmu->base + cons_off;
1943 q->ent_dwords = dwords;
1945 q->q_base = Q_BASE_RWA;
1946 q->q_base |= q->base_dma & Q_BASE_ADDR_MASK << Q_BASE_ADDR_SHIFT;
1947 q->q_base |= (q->max_n_shift & Q_BASE_LOG2SIZE_MASK)
1948 << Q_BASE_LOG2SIZE_SHIFT;
1950 q->prod = q->cons = 0;
1954 static int arm_smmu_init_queues(struct arm_smmu_device *smmu)
1959 spin_lock_init(&smmu->cmdq.lock);
1960 ret = arm_smmu_init_one_queue(smmu, &smmu->cmdq.q, ARM_SMMU_CMDQ_PROD,
1961 ARM_SMMU_CMDQ_CONS, CMDQ_ENT_DWORDS);
1966 ret = arm_smmu_init_one_queue(smmu, &smmu->evtq.q, ARM_SMMU_EVTQ_PROD,
1967 ARM_SMMU_EVTQ_CONS, EVTQ_ENT_DWORDS);
1972 if (!(smmu->features & ARM_SMMU_FEAT_PRI))
1975 return arm_smmu_init_one_queue(smmu, &smmu->priq.q, ARM_SMMU_PRIQ_PROD,
1976 ARM_SMMU_PRIQ_CONS, PRIQ_ENT_DWORDS);
1979 static int arm_smmu_init_l1_strtab(struct arm_smmu_device *smmu)
1982 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
1983 size_t size = sizeof(*cfg->l1_desc) * cfg->num_l1_ents;
1984 void *strtab = smmu->strtab_cfg.strtab;
1986 cfg->l1_desc = devm_kzalloc(smmu->dev, size, GFP_KERNEL);
1987 if (!cfg->l1_desc) {
1988 dev_err(smmu->dev, "failed to allocate l1 stream table desc\n");
1992 for (i = 0; i < cfg->num_l1_ents; ++i) {
1993 arm_smmu_write_strtab_l1_desc(strtab, &cfg->l1_desc[i]);
1994 strtab += STRTAB_L1_DESC_DWORDS << 3;
2000 static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu)
2005 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
2008 * If we can resolve everything with a single L2 table, then we
2009 * just need a single L1 descriptor. Otherwise, calculate the L1
2010 * size, capped to the SIDSIZE.
2012 if (smmu->sid_bits < STRTAB_SPLIT) {
2015 size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3);
2016 size = min(size, smmu->sid_bits - STRTAB_SPLIT);
2018 cfg->num_l1_ents = 1 << size;
2020 size += STRTAB_SPLIT;
2021 if (size < smmu->sid_bits)
2023 "2-level strtab only covers %u/%u bits of SID\n",
2024 size, smmu->sid_bits);
2026 l1size = cfg->num_l1_ents * (STRTAB_L1_DESC_DWORDS << 3);
2027 strtab = dmam_alloc_coherent(smmu->dev, l1size, &cfg->strtab_dma,
2028 GFP_KERNEL | __GFP_ZERO);
2031 "failed to allocate l1 stream table (%u bytes)\n",
2035 cfg->strtab = strtab;
2037 /* Configure strtab_base_cfg for 2 levels */
2038 reg = STRTAB_BASE_CFG_FMT_2LVL;
2039 reg |= (size & STRTAB_BASE_CFG_LOG2SIZE_MASK)
2040 << STRTAB_BASE_CFG_LOG2SIZE_SHIFT;
2041 reg |= (STRTAB_SPLIT & STRTAB_BASE_CFG_SPLIT_MASK)
2042 << STRTAB_BASE_CFG_SPLIT_SHIFT;
2043 cfg->strtab_base_cfg = reg;
2045 return arm_smmu_init_l1_strtab(smmu);
2048 static int arm_smmu_init_strtab_linear(struct arm_smmu_device *smmu)
2053 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
2055 size = (1 << smmu->sid_bits) * (STRTAB_STE_DWORDS << 3);
2056 strtab = dmam_alloc_coherent(smmu->dev, size, &cfg->strtab_dma,
2057 GFP_KERNEL | __GFP_ZERO);
2060 "failed to allocate linear stream table (%u bytes)\n",
2064 cfg->strtab = strtab;
2065 cfg->num_l1_ents = 1 << smmu->sid_bits;
2067 /* Configure strtab_base_cfg for a linear table covering all SIDs */
2068 reg = STRTAB_BASE_CFG_FMT_LINEAR;
2069 reg |= (smmu->sid_bits & STRTAB_BASE_CFG_LOG2SIZE_MASK)
2070 << STRTAB_BASE_CFG_LOG2SIZE_SHIFT;
2071 cfg->strtab_base_cfg = reg;
2073 arm_smmu_init_bypass_stes(strtab, cfg->num_l1_ents);
2077 static int arm_smmu_init_strtab(struct arm_smmu_device *smmu)
2082 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB)
2083 ret = arm_smmu_init_strtab_2lvl(smmu);
2085 ret = arm_smmu_init_strtab_linear(smmu);
2090 /* Set the strtab base address */
2091 reg = smmu->strtab_cfg.strtab_dma &
2092 STRTAB_BASE_ADDR_MASK << STRTAB_BASE_ADDR_SHIFT;
2093 reg |= STRTAB_BASE_RA;
2094 smmu->strtab_cfg.strtab_base = reg;
2096 /* Allocate the first VMID for stage-2 bypass STEs */
2097 set_bit(0, smmu->vmid_map);
2101 static int arm_smmu_init_structures(struct arm_smmu_device *smmu)
2105 ret = arm_smmu_init_queues(smmu);
2109 return arm_smmu_init_strtab(smmu);
2112 static int arm_smmu_write_reg_sync(struct arm_smmu_device *smmu, u32 val,
2113 unsigned int reg_off, unsigned int ack_off)
2117 writel_relaxed(val, smmu->base + reg_off);
2118 return readl_relaxed_poll_timeout(smmu->base + ack_off, reg, reg == val,
2119 1, ARM_SMMU_POLL_TIMEOUT_US);
2122 /* GBPA is "special" */
2123 static int arm_smmu_update_gbpa(struct arm_smmu_device *smmu, u32 set, u32 clr)
2126 u32 reg, __iomem *gbpa = smmu->base + ARM_SMMU_GBPA;
2128 ret = readl_relaxed_poll_timeout(gbpa, reg, !(reg & GBPA_UPDATE),
2129 1, ARM_SMMU_POLL_TIMEOUT_US);
2135 writel_relaxed(reg | GBPA_UPDATE, gbpa);
2136 return readl_relaxed_poll_timeout(gbpa, reg, !(reg & GBPA_UPDATE),
2137 1, ARM_SMMU_POLL_TIMEOUT_US);
2140 static void arm_smmu_free_msis(void *data)
2142 struct device *dev = data;
2143 platform_msi_domain_free_irqs(dev);
2146 static void arm_smmu_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
2148 phys_addr_t doorbell;
2149 struct device *dev = msi_desc_to_dev(desc);
2150 struct arm_smmu_device *smmu = dev_get_drvdata(dev);
2151 phys_addr_t *cfg = arm_smmu_msi_cfg[desc->platform.msi_index];
2153 doorbell = (((u64)msg->address_hi) << 32) | msg->address_lo;
2154 doorbell &= MSI_CFG0_ADDR_MASK << MSI_CFG0_ADDR_SHIFT;
2156 writeq_relaxed(doorbell, smmu->base + cfg[0]);
2157 writel_relaxed(msg->data, smmu->base + cfg[1]);
2158 writel_relaxed(MSI_CFG2_MEMATTR_DEVICE_nGnRE, smmu->base + cfg[2]);
2161 static void arm_smmu_setup_msis(struct arm_smmu_device *smmu)
2163 struct msi_desc *desc;
2164 int ret, nvec = ARM_SMMU_MAX_MSIS;
2165 struct device *dev = smmu->dev;
2167 /* Clear the MSI address regs */
2168 writeq_relaxed(0, smmu->base + ARM_SMMU_GERROR_IRQ_CFG0);
2169 writeq_relaxed(0, smmu->base + ARM_SMMU_EVTQ_IRQ_CFG0);
2171 if (smmu->features & ARM_SMMU_FEAT_PRI)
2172 writeq_relaxed(0, smmu->base + ARM_SMMU_PRIQ_IRQ_CFG0);
2176 if (!(smmu->features & ARM_SMMU_FEAT_MSI))
2179 /* Allocate MSIs for evtq, gerror and priq. Ignore cmdq */
2180 ret = platform_msi_domain_alloc_irqs(dev, nvec, arm_smmu_write_msi_msg);
2182 dev_warn(dev, "failed to allocate MSIs\n");
2186 for_each_msi_entry(desc, dev) {
2187 switch (desc->platform.msi_index) {
2188 case EVTQ_MSI_INDEX:
2189 smmu->evtq.q.irq = desc->irq;
2191 case GERROR_MSI_INDEX:
2192 smmu->gerr_irq = desc->irq;
2194 case PRIQ_MSI_INDEX:
2195 smmu->priq.q.irq = desc->irq;
2197 default: /* Unknown */
2202 /* Add callback to free MSIs on teardown */
2203 devm_add_action(dev, arm_smmu_free_msis, dev);
2206 static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu)
2209 u32 irqen_flags = IRQ_CTRL_EVTQ_IRQEN | IRQ_CTRL_GERROR_IRQEN;
2211 /* Disable IRQs first */
2212 ret = arm_smmu_write_reg_sync(smmu, 0, ARM_SMMU_IRQ_CTRL,
2213 ARM_SMMU_IRQ_CTRLACK);
2215 dev_err(smmu->dev, "failed to disable irqs\n");
2219 arm_smmu_setup_msis(smmu);
2221 /* Request interrupt lines */
2222 irq = smmu->evtq.q.irq;
2224 ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
2225 arm_smmu_evtq_thread,
2227 "arm-smmu-v3-evtq", smmu);
2229 dev_warn(smmu->dev, "failed to enable evtq irq\n");
2232 irq = smmu->cmdq.q.irq;
2234 ret = devm_request_irq(smmu->dev, irq,
2235 arm_smmu_cmdq_sync_handler, 0,
2236 "arm-smmu-v3-cmdq-sync", smmu);
2238 dev_warn(smmu->dev, "failed to enable cmdq-sync irq\n");
2241 irq = smmu->gerr_irq;
2243 ret = devm_request_irq(smmu->dev, irq, arm_smmu_gerror_handler,
2244 0, "arm-smmu-v3-gerror", smmu);
2246 dev_warn(smmu->dev, "failed to enable gerror irq\n");
2249 if (smmu->features & ARM_SMMU_FEAT_PRI) {
2250 irq = smmu->priq.q.irq;
2252 ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
2253 arm_smmu_priq_thread,
2259 "failed to enable priq irq\n");
2261 irqen_flags |= IRQ_CTRL_PRIQ_IRQEN;
2265 /* Enable interrupt generation on the SMMU */
2266 ret = arm_smmu_write_reg_sync(smmu, irqen_flags,
2267 ARM_SMMU_IRQ_CTRL, ARM_SMMU_IRQ_CTRLACK);
2269 dev_warn(smmu->dev, "failed to enable irqs\n");
2274 static int arm_smmu_device_disable(struct arm_smmu_device *smmu)
2278 ret = arm_smmu_write_reg_sync(smmu, 0, ARM_SMMU_CR0, ARM_SMMU_CR0ACK);
2280 dev_err(smmu->dev, "failed to clear cr0\n");
2285 static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
2289 struct arm_smmu_cmdq_ent cmd;
2291 /* Clear CR0 and sync (disables SMMU and queue processing) */
2292 reg = readl_relaxed(smmu->base + ARM_SMMU_CR0);
2293 if (reg & CR0_SMMUEN)
2294 dev_warn(smmu->dev, "SMMU currently enabled! Resetting...\n");
2296 ret = arm_smmu_device_disable(smmu);
2300 /* CR1 (table and queue memory attributes) */
2301 reg = (CR1_SH_ISH << CR1_TABLE_SH_SHIFT) |
2302 (CR1_CACHE_WB << CR1_TABLE_OC_SHIFT) |
2303 (CR1_CACHE_WB << CR1_TABLE_IC_SHIFT) |
2304 (CR1_SH_ISH << CR1_QUEUE_SH_SHIFT) |
2305 (CR1_CACHE_WB << CR1_QUEUE_OC_SHIFT) |
2306 (CR1_CACHE_WB << CR1_QUEUE_IC_SHIFT);
2307 writel_relaxed(reg, smmu->base + ARM_SMMU_CR1);
2309 /* CR2 (random crap) */
2310 reg = CR2_PTM | CR2_RECINVSID | CR2_E2H;
2311 writel_relaxed(reg, smmu->base + ARM_SMMU_CR2);
2314 writeq_relaxed(smmu->strtab_cfg.strtab_base,
2315 smmu->base + ARM_SMMU_STRTAB_BASE);
2316 writel_relaxed(smmu->strtab_cfg.strtab_base_cfg,
2317 smmu->base + ARM_SMMU_STRTAB_BASE_CFG);
2320 writeq_relaxed(smmu->cmdq.q.q_base, smmu->base + ARM_SMMU_CMDQ_BASE);
2321 writel_relaxed(smmu->cmdq.q.prod, smmu->base + ARM_SMMU_CMDQ_PROD);
2322 writel_relaxed(smmu->cmdq.q.cons, smmu->base + ARM_SMMU_CMDQ_CONS);
2324 enables = CR0_CMDQEN;
2325 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
2328 dev_err(smmu->dev, "failed to enable command queue\n");
2332 /* Invalidate any cached configuration */
2333 cmd.opcode = CMDQ_OP_CFGI_ALL;
2334 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2335 cmd.opcode = CMDQ_OP_CMD_SYNC;
2336 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2338 /* Invalidate any stale TLB entries */
2339 if (smmu->features & ARM_SMMU_FEAT_HYP) {
2340 cmd.opcode = CMDQ_OP_TLBI_EL2_ALL;
2341 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2344 cmd.opcode = CMDQ_OP_TLBI_NSNH_ALL;
2345 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2346 cmd.opcode = CMDQ_OP_CMD_SYNC;
2347 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2350 writeq_relaxed(smmu->evtq.q.q_base, smmu->base + ARM_SMMU_EVTQ_BASE);
2351 writel_relaxed(smmu->evtq.q.prod, smmu->base + ARM_SMMU_EVTQ_PROD);
2352 writel_relaxed(smmu->evtq.q.cons, smmu->base + ARM_SMMU_EVTQ_CONS);
2354 enables |= CR0_EVTQEN;
2355 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
2358 dev_err(smmu->dev, "failed to enable event queue\n");
2363 if (smmu->features & ARM_SMMU_FEAT_PRI) {
2364 writeq_relaxed(smmu->priq.q.q_base,
2365 smmu->base + ARM_SMMU_PRIQ_BASE);
2366 writel_relaxed(smmu->priq.q.prod,
2367 smmu->base + ARM_SMMU_PRIQ_PROD);
2368 writel_relaxed(smmu->priq.q.cons,
2369 smmu->base + ARM_SMMU_PRIQ_CONS);
2371 enables |= CR0_PRIQEN;
2372 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
2375 dev_err(smmu->dev, "failed to enable PRI queue\n");
2380 ret = arm_smmu_setup_irqs(smmu);
2382 dev_err(smmu->dev, "failed to setup irqs\n");
2387 /* Enable the SMMU interface, or ensure bypass */
2388 if (!bypass || disable_bypass) {
2389 enables |= CR0_SMMUEN;
2391 ret = arm_smmu_update_gbpa(smmu, 0, GBPA_ABORT);
2393 dev_err(smmu->dev, "GBPA not responding to update\n");
2397 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
2400 dev_err(smmu->dev, "failed to enable SMMU interface\n");
2407 static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
2410 bool coherent = smmu->features & ARM_SMMU_FEAT_COHERENCY;
2413 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR0);
2415 /* 2-level structures */
2416 if ((reg & IDR0_ST_LVL_MASK << IDR0_ST_LVL_SHIFT) == IDR0_ST_LVL_2LVL)
2417 smmu->features |= ARM_SMMU_FEAT_2_LVL_STRTAB;
2419 if (reg & IDR0_CD2L)
2420 smmu->features |= ARM_SMMU_FEAT_2_LVL_CDTAB;
2423 * Translation table endianness.
2424 * We currently require the same endianness as the CPU, but this
2425 * could be changed later by adding a new IO_PGTABLE_QUIRK.
2427 switch (reg & IDR0_TTENDIAN_MASK << IDR0_TTENDIAN_SHIFT) {
2428 case IDR0_TTENDIAN_MIXED:
2429 smmu->features |= ARM_SMMU_FEAT_TT_LE | ARM_SMMU_FEAT_TT_BE;
2432 case IDR0_TTENDIAN_BE:
2433 smmu->features |= ARM_SMMU_FEAT_TT_BE;
2436 case IDR0_TTENDIAN_LE:
2437 smmu->features |= ARM_SMMU_FEAT_TT_LE;
2441 dev_err(smmu->dev, "unknown/unsupported TT endianness!\n");
2445 /* Boolean feature flags */
2446 if (IS_ENABLED(CONFIG_PCI_PRI) && reg & IDR0_PRI)
2447 smmu->features |= ARM_SMMU_FEAT_PRI;
2449 if (IS_ENABLED(CONFIG_PCI_ATS) && reg & IDR0_ATS)
2450 smmu->features |= ARM_SMMU_FEAT_ATS;
2453 smmu->features |= ARM_SMMU_FEAT_SEV;
2456 smmu->features |= ARM_SMMU_FEAT_MSI;
2459 smmu->features |= ARM_SMMU_FEAT_HYP;
2462 * The coherency feature as set by FW is used in preference to the ID
2463 * register, but warn on mismatch.
2465 if (!!(reg & IDR0_COHACC) != coherent)
2466 dev_warn(smmu->dev, "IDR0.COHACC overridden by dma-coherent property (%s)\n",
2467 coherent ? "true" : "false");
2469 switch (reg & IDR0_STALL_MODEL_MASK << IDR0_STALL_MODEL_SHIFT) {
2470 case IDR0_STALL_MODEL_STALL:
2472 case IDR0_STALL_MODEL_FORCE:
2473 smmu->features |= ARM_SMMU_FEAT_STALLS;
2477 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
2480 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
2482 if (!(reg & (IDR0_S1P | IDR0_S2P))) {
2483 dev_err(smmu->dev, "no translation support!\n");
2487 /* We only support the AArch64 table format at present */
2488 switch (reg & IDR0_TTF_MASK << IDR0_TTF_SHIFT) {
2489 case IDR0_TTF_AARCH32_64:
2492 case IDR0_TTF_AARCH64:
2495 dev_err(smmu->dev, "AArch64 table format not supported!\n");
2499 /* ASID/VMID sizes */
2500 smmu->asid_bits = reg & IDR0_ASID16 ? 16 : 8;
2501 smmu->vmid_bits = reg & IDR0_VMID16 ? 16 : 8;
2504 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR1);
2505 if (reg & (IDR1_TABLES_PRESET | IDR1_QUEUES_PRESET | IDR1_REL)) {
2506 dev_err(smmu->dev, "embedded implementation not supported\n");
2510 /* Queue sizes, capped at 4k */
2511 smmu->cmdq.q.max_n_shift = min((u32)CMDQ_MAX_SZ_SHIFT,
2512 reg >> IDR1_CMDQ_SHIFT & IDR1_CMDQ_MASK);
2513 if (!smmu->cmdq.q.max_n_shift) {
2514 /* Odd alignment restrictions on the base, so ignore for now */
2515 dev_err(smmu->dev, "unit-length command queue not supported\n");
2519 smmu->evtq.q.max_n_shift = min((u32)EVTQ_MAX_SZ_SHIFT,
2520 reg >> IDR1_EVTQ_SHIFT & IDR1_EVTQ_MASK);
2521 smmu->priq.q.max_n_shift = min((u32)PRIQ_MAX_SZ_SHIFT,
2522 reg >> IDR1_PRIQ_SHIFT & IDR1_PRIQ_MASK);
2524 /* SID/SSID sizes */
2525 smmu->ssid_bits = reg >> IDR1_SSID_SHIFT & IDR1_SSID_MASK;
2526 smmu->sid_bits = reg >> IDR1_SID_SHIFT & IDR1_SID_MASK;
2529 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR5);
2531 /* Maximum number of outstanding stalls */
2532 smmu->evtq.max_stalls = reg >> IDR5_STALL_MAX_SHIFT
2533 & IDR5_STALL_MAX_MASK;
2536 if (reg & IDR5_GRAN64K)
2537 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
2538 if (reg & IDR5_GRAN16K)
2539 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
2540 if (reg & IDR5_GRAN4K)
2541 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
2543 if (arm_smmu_ops.pgsize_bitmap == -1UL)
2544 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
2546 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
2548 /* Output address size */
2549 switch (reg & IDR5_OAS_MASK << IDR5_OAS_SHIFT) {
2550 case IDR5_OAS_32_BIT:
2553 case IDR5_OAS_36_BIT:
2556 case IDR5_OAS_40_BIT:
2559 case IDR5_OAS_42_BIT:
2562 case IDR5_OAS_44_BIT:
2567 "unknown output address size. Truncating to 48-bit\n");
2569 case IDR5_OAS_48_BIT:
2573 /* Set the DMA mask for our table walker */
2574 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(smmu->oas)))
2576 "failed to set DMA mask for table walker\n");
2578 smmu->ias = max(smmu->ias, smmu->oas);
2580 dev_info(smmu->dev, "ias %lu-bit, oas %lu-bit (features 0x%08x)\n",
2581 smmu->ias, smmu->oas, smmu->features);
2586 static int arm_smmu_device_acpi_probe(struct platform_device *pdev,
2587 struct arm_smmu_device *smmu)
2589 struct acpi_iort_smmu_v3 *iort_smmu;
2590 struct device *dev = smmu->dev;
2591 struct acpi_iort_node *node;
2593 node = *(struct acpi_iort_node **)dev_get_platdata(dev);
2595 /* Retrieve SMMUv3 specific data */
2596 iort_smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
2598 if (iort_smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE)
2599 smmu->features |= ARM_SMMU_FEAT_COHERENCY;
2604 static inline int arm_smmu_device_acpi_probe(struct platform_device *pdev,
2605 struct arm_smmu_device *smmu)
2611 static int arm_smmu_device_dt_probe(struct platform_device *pdev,
2612 struct arm_smmu_device *smmu)
2614 struct device *dev = &pdev->dev;
2618 if (of_property_read_u32(dev->of_node, "#iommu-cells", &cells))
2619 dev_err(dev, "missing #iommu-cells property\n");
2620 else if (cells != 1)
2621 dev_err(dev, "invalid #iommu-cells value (%d)\n", cells);
2625 parse_driver_options(smmu);
2627 if (of_dma_is_coherent(dev->of_node))
2628 smmu->features |= ARM_SMMU_FEAT_COHERENCY;
2633 static int arm_smmu_device_probe(struct platform_device *pdev)
2636 struct resource *res;
2637 struct arm_smmu_device *smmu;
2638 struct device *dev = &pdev->dev;
2641 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
2643 dev_err(dev, "failed to allocate arm_smmu_device\n");
2649 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2650 if (resource_size(res) + 1 < SZ_128K) {
2651 dev_err(dev, "MMIO region too small (%pr)\n", res);
2655 smmu->base = devm_ioremap_resource(dev, res);
2656 if (IS_ERR(smmu->base))
2657 return PTR_ERR(smmu->base);
2659 /* Interrupt lines */
2660 irq = platform_get_irq_byname(pdev, "eventq");
2662 smmu->evtq.q.irq = irq;
2664 irq = platform_get_irq_byname(pdev, "priq");
2666 smmu->priq.q.irq = irq;
2668 irq = platform_get_irq_byname(pdev, "cmdq-sync");
2670 smmu->cmdq.q.irq = irq;
2672 irq = platform_get_irq_byname(pdev, "gerror");
2674 smmu->gerr_irq = irq;
2677 ret = arm_smmu_device_dt_probe(pdev, smmu);
2679 ret = arm_smmu_device_acpi_probe(pdev, smmu);
2684 /* Set bypass mode according to firmware probing result */
2688 ret = arm_smmu_device_hw_probe(smmu);
2692 /* Initialise in-memory data structures */
2693 ret = arm_smmu_init_structures(smmu);
2697 /* Record our private device structure */
2698 platform_set_drvdata(pdev, smmu);
2700 /* Reset the device */
2701 ret = arm_smmu_device_reset(smmu, bypass);
2705 /* And we're up. Go go go! */
2706 iommu_register_instance(dev->fwnode, &arm_smmu_ops);
2709 if (pci_bus_type.iommu_ops != &arm_smmu_ops) {
2711 ret = bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
2716 #ifdef CONFIG_ARM_AMBA
2717 if (amba_bustype.iommu_ops != &arm_smmu_ops) {
2718 ret = bus_set_iommu(&amba_bustype, &arm_smmu_ops);
2723 if (platform_bus_type.iommu_ops != &arm_smmu_ops) {
2724 ret = bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
2731 static int arm_smmu_device_remove(struct platform_device *pdev)
2733 struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
2735 arm_smmu_device_disable(smmu);
2739 static struct of_device_id arm_smmu_of_match[] = {
2740 { .compatible = "arm,smmu-v3", },
2743 MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
2745 static struct platform_driver arm_smmu_driver = {
2747 .name = "arm-smmu-v3",
2748 .of_match_table = of_match_ptr(arm_smmu_of_match),
2750 .probe = arm_smmu_device_probe,
2751 .remove = arm_smmu_device_remove,
2754 static int __init arm_smmu_init(void)
2756 static bool registered;
2760 ret = platform_driver_register(&arm_smmu_driver);
2766 static void __exit arm_smmu_exit(void)
2768 return platform_driver_unregister(&arm_smmu_driver);
2771 subsys_initcall(arm_smmu_init);
2772 module_exit(arm_smmu_exit);
2774 static int __init arm_smmu_of_init(struct device_node *np)
2776 int ret = arm_smmu_init();
2781 if (!of_platform_device_create(np, NULL, platform_bus_type.dev_root))
2786 IOMMU_OF_DECLARE(arm_smmuv3, "arm,smmu-v3", arm_smmu_of_init);
2789 static int __init acpi_smmu_v3_init(struct acpi_table_header *table)
2791 if (iort_node_match(ACPI_IORT_NODE_SMMU_V3))
2792 return arm_smmu_init();
2796 IORT_ACPI_DECLARE(arm_smmu_v3, ACPI_SIG_IORT, acpi_smmu_v3_init);
2799 MODULE_DESCRIPTION("IOMMU API for ARM architected SMMUv3 implementations");
2800 MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
2801 MODULE_LICENSE("GPL v2");