2 * IOMMU API for ARM architected SMMUv3 implementations.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 * Copyright (C) 2015 ARM Limited
18 * Author: Will Deacon <will.deacon@arm.com>
20 * This driver is powered by bad coffee and bombay mix.
23 #include <linux/acpi.h>
24 #include <linux/acpi_iort.h>
25 #include <linux/delay.h>
26 #include <linux/dma-iommu.h>
27 #include <linux/err.h>
28 #include <linux/interrupt.h>
29 #include <linux/iommu.h>
30 #include <linux/iopoll.h>
31 #include <linux/module.h>
32 #include <linux/msi.h>
34 #include <linux/of_address.h>
35 #include <linux/of_iommu.h>
36 #include <linux/of_platform.h>
37 #include <linux/pci.h>
38 #include <linux/platform_device.h>
40 #include <linux/amba/bus.h>
42 #include "io-pgtable.h"
45 #define ARM_SMMU_IDR0 0x0
46 #define IDR0_ST_LVL_SHIFT 27
47 #define IDR0_ST_LVL_MASK 0x3
48 #define IDR0_ST_LVL_2LVL (1 << IDR0_ST_LVL_SHIFT)
49 #define IDR0_STALL_MODEL_SHIFT 24
50 #define IDR0_STALL_MODEL_MASK 0x3
51 #define IDR0_STALL_MODEL_STALL (0 << IDR0_STALL_MODEL_SHIFT)
52 #define IDR0_STALL_MODEL_FORCE (2 << IDR0_STALL_MODEL_SHIFT)
53 #define IDR0_TTENDIAN_SHIFT 21
54 #define IDR0_TTENDIAN_MASK 0x3
55 #define IDR0_TTENDIAN_LE (2 << IDR0_TTENDIAN_SHIFT)
56 #define IDR0_TTENDIAN_BE (3 << IDR0_TTENDIAN_SHIFT)
57 #define IDR0_TTENDIAN_MIXED (0 << IDR0_TTENDIAN_SHIFT)
58 #define IDR0_CD2L (1 << 19)
59 #define IDR0_VMID16 (1 << 18)
60 #define IDR0_PRI (1 << 16)
61 #define IDR0_SEV (1 << 14)
62 #define IDR0_MSI (1 << 13)
63 #define IDR0_ASID16 (1 << 12)
64 #define IDR0_ATS (1 << 10)
65 #define IDR0_HYP (1 << 9)
66 #define IDR0_COHACC (1 << 4)
67 #define IDR0_TTF_SHIFT 2
68 #define IDR0_TTF_MASK 0x3
69 #define IDR0_TTF_AARCH64 (2 << IDR0_TTF_SHIFT)
70 #define IDR0_TTF_AARCH32_64 (3 << IDR0_TTF_SHIFT)
71 #define IDR0_S1P (1 << 1)
72 #define IDR0_S2P (1 << 0)
74 #define ARM_SMMU_IDR1 0x4
75 #define IDR1_TABLES_PRESET (1 << 30)
76 #define IDR1_QUEUES_PRESET (1 << 29)
77 #define IDR1_REL (1 << 28)
78 #define IDR1_CMDQ_SHIFT 21
79 #define IDR1_CMDQ_MASK 0x1f
80 #define IDR1_EVTQ_SHIFT 16
81 #define IDR1_EVTQ_MASK 0x1f
82 #define IDR1_PRIQ_SHIFT 11
83 #define IDR1_PRIQ_MASK 0x1f
84 #define IDR1_SSID_SHIFT 6
85 #define IDR1_SSID_MASK 0x1f
86 #define IDR1_SID_SHIFT 0
87 #define IDR1_SID_MASK 0x3f
89 #define ARM_SMMU_IDR5 0x14
90 #define IDR5_STALL_MAX_SHIFT 16
91 #define IDR5_STALL_MAX_MASK 0xffff
92 #define IDR5_GRAN64K (1 << 6)
93 #define IDR5_GRAN16K (1 << 5)
94 #define IDR5_GRAN4K (1 << 4)
95 #define IDR5_OAS_SHIFT 0
96 #define IDR5_OAS_MASK 0x7
97 #define IDR5_OAS_32_BIT (0 << IDR5_OAS_SHIFT)
98 #define IDR5_OAS_36_BIT (1 << IDR5_OAS_SHIFT)
99 #define IDR5_OAS_40_BIT (2 << IDR5_OAS_SHIFT)
100 #define IDR5_OAS_42_BIT (3 << IDR5_OAS_SHIFT)
101 #define IDR5_OAS_44_BIT (4 << IDR5_OAS_SHIFT)
102 #define IDR5_OAS_48_BIT (5 << IDR5_OAS_SHIFT)
104 #define ARM_SMMU_CR0 0x20
105 #define CR0_CMDQEN (1 << 3)
106 #define CR0_EVTQEN (1 << 2)
107 #define CR0_PRIQEN (1 << 1)
108 #define CR0_SMMUEN (1 << 0)
110 #define ARM_SMMU_CR0ACK 0x24
112 #define ARM_SMMU_CR1 0x28
116 #define CR1_CACHE_NC 0
117 #define CR1_CACHE_WB 1
118 #define CR1_CACHE_WT 2
119 #define CR1_TABLE_SH_SHIFT 10
120 #define CR1_TABLE_OC_SHIFT 8
121 #define CR1_TABLE_IC_SHIFT 6
122 #define CR1_QUEUE_SH_SHIFT 4
123 #define CR1_QUEUE_OC_SHIFT 2
124 #define CR1_QUEUE_IC_SHIFT 0
126 #define ARM_SMMU_CR2 0x2c
127 #define CR2_PTM (1 << 2)
128 #define CR2_RECINVSID (1 << 1)
129 #define CR2_E2H (1 << 0)
131 #define ARM_SMMU_GBPA 0x44
132 #define GBPA_ABORT (1 << 20)
133 #define GBPA_UPDATE (1 << 31)
135 #define ARM_SMMU_IRQ_CTRL 0x50
136 #define IRQ_CTRL_EVTQ_IRQEN (1 << 2)
137 #define IRQ_CTRL_PRIQ_IRQEN (1 << 1)
138 #define IRQ_CTRL_GERROR_IRQEN (1 << 0)
140 #define ARM_SMMU_IRQ_CTRLACK 0x54
142 #define ARM_SMMU_GERROR 0x60
143 #define GERROR_SFM_ERR (1 << 8)
144 #define GERROR_MSI_GERROR_ABT_ERR (1 << 7)
145 #define GERROR_MSI_PRIQ_ABT_ERR (1 << 6)
146 #define GERROR_MSI_EVTQ_ABT_ERR (1 << 5)
147 #define GERROR_MSI_CMDQ_ABT_ERR (1 << 4)
148 #define GERROR_PRIQ_ABT_ERR (1 << 3)
149 #define GERROR_EVTQ_ABT_ERR (1 << 2)
150 #define GERROR_CMDQ_ERR (1 << 0)
151 #define GERROR_ERR_MASK 0xfd
153 #define ARM_SMMU_GERRORN 0x64
155 #define ARM_SMMU_GERROR_IRQ_CFG0 0x68
156 #define ARM_SMMU_GERROR_IRQ_CFG1 0x70
157 #define ARM_SMMU_GERROR_IRQ_CFG2 0x74
159 #define ARM_SMMU_STRTAB_BASE 0x80
160 #define STRTAB_BASE_RA (1UL << 62)
161 #define STRTAB_BASE_ADDR_SHIFT 6
162 #define STRTAB_BASE_ADDR_MASK 0x3ffffffffffUL
164 #define ARM_SMMU_STRTAB_BASE_CFG 0x88
165 #define STRTAB_BASE_CFG_LOG2SIZE_SHIFT 0
166 #define STRTAB_BASE_CFG_LOG2SIZE_MASK 0x3f
167 #define STRTAB_BASE_CFG_SPLIT_SHIFT 6
168 #define STRTAB_BASE_CFG_SPLIT_MASK 0x1f
169 #define STRTAB_BASE_CFG_FMT_SHIFT 16
170 #define STRTAB_BASE_CFG_FMT_MASK 0x3
171 #define STRTAB_BASE_CFG_FMT_LINEAR (0 << STRTAB_BASE_CFG_FMT_SHIFT)
172 #define STRTAB_BASE_CFG_FMT_2LVL (1 << STRTAB_BASE_CFG_FMT_SHIFT)
174 #define ARM_SMMU_CMDQ_BASE 0x90
175 #define ARM_SMMU_CMDQ_PROD 0x98
176 #define ARM_SMMU_CMDQ_CONS 0x9c
178 #define ARM_SMMU_EVTQ_BASE 0xa0
179 #define ARM_SMMU_EVTQ_PROD 0x100a8
180 #define ARM_SMMU_EVTQ_CONS 0x100ac
181 #define ARM_SMMU_EVTQ_IRQ_CFG0 0xb0
182 #define ARM_SMMU_EVTQ_IRQ_CFG1 0xb8
183 #define ARM_SMMU_EVTQ_IRQ_CFG2 0xbc
185 #define ARM_SMMU_PRIQ_BASE 0xc0
186 #define ARM_SMMU_PRIQ_PROD 0x100c8
187 #define ARM_SMMU_PRIQ_CONS 0x100cc
188 #define ARM_SMMU_PRIQ_IRQ_CFG0 0xd0
189 #define ARM_SMMU_PRIQ_IRQ_CFG1 0xd8
190 #define ARM_SMMU_PRIQ_IRQ_CFG2 0xdc
192 /* Common MSI config fields */
193 #define MSI_CFG0_ADDR_SHIFT 2
194 #define MSI_CFG0_ADDR_MASK 0x3fffffffffffUL
195 #define MSI_CFG2_SH_SHIFT 4
196 #define MSI_CFG2_SH_NSH (0UL << MSI_CFG2_SH_SHIFT)
197 #define MSI_CFG2_SH_OSH (2UL << MSI_CFG2_SH_SHIFT)
198 #define MSI_CFG2_SH_ISH (3UL << MSI_CFG2_SH_SHIFT)
199 #define MSI_CFG2_MEMATTR_SHIFT 0
200 #define MSI_CFG2_MEMATTR_DEVICE_nGnRE (0x1 << MSI_CFG2_MEMATTR_SHIFT)
202 #define Q_IDX(q, p) ((p) & ((1 << (q)->max_n_shift) - 1))
203 #define Q_WRP(q, p) ((p) & (1 << (q)->max_n_shift))
204 #define Q_OVERFLOW_FLAG (1 << 31)
205 #define Q_OVF(q, p) ((p) & Q_OVERFLOW_FLAG)
206 #define Q_ENT(q, p) ((q)->base + \
207 Q_IDX(q, p) * (q)->ent_dwords)
209 #define Q_BASE_RWA (1UL << 62)
210 #define Q_BASE_ADDR_SHIFT 5
211 #define Q_BASE_ADDR_MASK 0xfffffffffffUL
212 #define Q_BASE_LOG2SIZE_SHIFT 0
213 #define Q_BASE_LOG2SIZE_MASK 0x1fUL
218 * Linear: Enough to cover 1 << IDR1.SIDSIZE entries
219 * 2lvl: 128k L1 entries,
220 * 256 lazy entries per table (each table covers a PCI bus)
222 #define STRTAB_L1_SZ_SHIFT 20
223 #define STRTAB_SPLIT 8
225 #define STRTAB_L1_DESC_DWORDS 1
226 #define STRTAB_L1_DESC_SPAN_SHIFT 0
227 #define STRTAB_L1_DESC_SPAN_MASK 0x1fUL
228 #define STRTAB_L1_DESC_L2PTR_SHIFT 6
229 #define STRTAB_L1_DESC_L2PTR_MASK 0x3ffffffffffUL
231 #define STRTAB_STE_DWORDS 8
232 #define STRTAB_STE_0_V (1UL << 0)
233 #define STRTAB_STE_0_CFG_SHIFT 1
234 #define STRTAB_STE_0_CFG_MASK 0x7UL
235 #define STRTAB_STE_0_CFG_ABORT (0UL << STRTAB_STE_0_CFG_SHIFT)
236 #define STRTAB_STE_0_CFG_BYPASS (4UL << STRTAB_STE_0_CFG_SHIFT)
237 #define STRTAB_STE_0_CFG_S1_TRANS (5UL << STRTAB_STE_0_CFG_SHIFT)
238 #define STRTAB_STE_0_CFG_S2_TRANS (6UL << STRTAB_STE_0_CFG_SHIFT)
240 #define STRTAB_STE_0_S1FMT_SHIFT 4
241 #define STRTAB_STE_0_S1FMT_LINEAR (0UL << STRTAB_STE_0_S1FMT_SHIFT)
242 #define STRTAB_STE_0_S1CTXPTR_SHIFT 6
243 #define STRTAB_STE_0_S1CTXPTR_MASK 0x3ffffffffffUL
244 #define STRTAB_STE_0_S1CDMAX_SHIFT 59
245 #define STRTAB_STE_0_S1CDMAX_MASK 0x1fUL
247 #define STRTAB_STE_1_S1C_CACHE_NC 0UL
248 #define STRTAB_STE_1_S1C_CACHE_WBRA 1UL
249 #define STRTAB_STE_1_S1C_CACHE_WT 2UL
250 #define STRTAB_STE_1_S1C_CACHE_WB 3UL
251 #define STRTAB_STE_1_S1C_SH_NSH 0UL
252 #define STRTAB_STE_1_S1C_SH_OSH 2UL
253 #define STRTAB_STE_1_S1C_SH_ISH 3UL
254 #define STRTAB_STE_1_S1CIR_SHIFT 2
255 #define STRTAB_STE_1_S1COR_SHIFT 4
256 #define STRTAB_STE_1_S1CSH_SHIFT 6
258 #define STRTAB_STE_1_S1STALLD (1UL << 27)
260 #define STRTAB_STE_1_EATS_ABT 0UL
261 #define STRTAB_STE_1_EATS_TRANS 1UL
262 #define STRTAB_STE_1_EATS_S1CHK 2UL
263 #define STRTAB_STE_1_EATS_SHIFT 28
265 #define STRTAB_STE_1_STRW_NSEL1 0UL
266 #define STRTAB_STE_1_STRW_EL2 2UL
267 #define STRTAB_STE_1_STRW_SHIFT 30
269 #define STRTAB_STE_1_SHCFG_INCOMING 1UL
270 #define STRTAB_STE_1_SHCFG_SHIFT 44
272 #define STRTAB_STE_2_S2VMID_SHIFT 0
273 #define STRTAB_STE_2_S2VMID_MASK 0xffffUL
274 #define STRTAB_STE_2_VTCR_SHIFT 32
275 #define STRTAB_STE_2_VTCR_MASK 0x7ffffUL
276 #define STRTAB_STE_2_S2AA64 (1UL << 51)
277 #define STRTAB_STE_2_S2ENDI (1UL << 52)
278 #define STRTAB_STE_2_S2PTW (1UL << 54)
279 #define STRTAB_STE_2_S2R (1UL << 58)
281 #define STRTAB_STE_3_S2TTB_SHIFT 4
282 #define STRTAB_STE_3_S2TTB_MASK 0xfffffffffffUL
284 /* Context descriptor (stage-1 only) */
285 #define CTXDESC_CD_DWORDS 8
286 #define CTXDESC_CD_0_TCR_T0SZ_SHIFT 0
287 #define ARM64_TCR_T0SZ_SHIFT 0
288 #define ARM64_TCR_T0SZ_MASK 0x1fUL
289 #define CTXDESC_CD_0_TCR_TG0_SHIFT 6
290 #define ARM64_TCR_TG0_SHIFT 14
291 #define ARM64_TCR_TG0_MASK 0x3UL
292 #define CTXDESC_CD_0_TCR_IRGN0_SHIFT 8
293 #define ARM64_TCR_IRGN0_SHIFT 8
294 #define ARM64_TCR_IRGN0_MASK 0x3UL
295 #define CTXDESC_CD_0_TCR_ORGN0_SHIFT 10
296 #define ARM64_TCR_ORGN0_SHIFT 10
297 #define ARM64_TCR_ORGN0_MASK 0x3UL
298 #define CTXDESC_CD_0_TCR_SH0_SHIFT 12
299 #define ARM64_TCR_SH0_SHIFT 12
300 #define ARM64_TCR_SH0_MASK 0x3UL
301 #define CTXDESC_CD_0_TCR_EPD0_SHIFT 14
302 #define ARM64_TCR_EPD0_SHIFT 7
303 #define ARM64_TCR_EPD0_MASK 0x1UL
304 #define CTXDESC_CD_0_TCR_EPD1_SHIFT 30
305 #define ARM64_TCR_EPD1_SHIFT 23
306 #define ARM64_TCR_EPD1_MASK 0x1UL
308 #define CTXDESC_CD_0_ENDI (1UL << 15)
309 #define CTXDESC_CD_0_V (1UL << 31)
311 #define CTXDESC_CD_0_TCR_IPS_SHIFT 32
312 #define ARM64_TCR_IPS_SHIFT 32
313 #define ARM64_TCR_IPS_MASK 0x7UL
314 #define CTXDESC_CD_0_TCR_TBI0_SHIFT 38
315 #define ARM64_TCR_TBI0_SHIFT 37
316 #define ARM64_TCR_TBI0_MASK 0x1UL
318 #define CTXDESC_CD_0_AA64 (1UL << 41)
319 #define CTXDESC_CD_0_R (1UL << 45)
320 #define CTXDESC_CD_0_A (1UL << 46)
321 #define CTXDESC_CD_0_ASET_SHIFT 47
322 #define CTXDESC_CD_0_ASET_SHARED (0UL << CTXDESC_CD_0_ASET_SHIFT)
323 #define CTXDESC_CD_0_ASET_PRIVATE (1UL << CTXDESC_CD_0_ASET_SHIFT)
324 #define CTXDESC_CD_0_ASID_SHIFT 48
325 #define CTXDESC_CD_0_ASID_MASK 0xffffUL
327 #define CTXDESC_CD_1_TTB0_SHIFT 4
328 #define CTXDESC_CD_1_TTB0_MASK 0xfffffffffffUL
330 #define CTXDESC_CD_3_MAIR_SHIFT 0
332 /* Convert between AArch64 (CPU) TCR format and SMMU CD format */
333 #define ARM_SMMU_TCR2CD(tcr, fld) \
334 (((tcr) >> ARM64_TCR_##fld##_SHIFT & ARM64_TCR_##fld##_MASK) \
335 << CTXDESC_CD_0_TCR_##fld##_SHIFT)
338 #define CMDQ_ENT_DWORDS 2
339 #define CMDQ_MAX_SZ_SHIFT 8
341 #define CMDQ_ERR_SHIFT 24
342 #define CMDQ_ERR_MASK 0x7f
343 #define CMDQ_ERR_CERROR_NONE_IDX 0
344 #define CMDQ_ERR_CERROR_ILL_IDX 1
345 #define CMDQ_ERR_CERROR_ABT_IDX 2
347 #define CMDQ_0_OP_SHIFT 0
348 #define CMDQ_0_OP_MASK 0xffUL
349 #define CMDQ_0_SSV (1UL << 11)
351 #define CMDQ_PREFETCH_0_SID_SHIFT 32
352 #define CMDQ_PREFETCH_1_SIZE_SHIFT 0
353 #define CMDQ_PREFETCH_1_ADDR_MASK ~0xfffUL
355 #define CMDQ_CFGI_0_SID_SHIFT 32
356 #define CMDQ_CFGI_0_SID_MASK 0xffffffffUL
357 #define CMDQ_CFGI_1_LEAF (1UL << 0)
358 #define CMDQ_CFGI_1_RANGE_SHIFT 0
359 #define CMDQ_CFGI_1_RANGE_MASK 0x1fUL
361 #define CMDQ_TLBI_0_VMID_SHIFT 32
362 #define CMDQ_TLBI_0_ASID_SHIFT 48
363 #define CMDQ_TLBI_1_LEAF (1UL << 0)
364 #define CMDQ_TLBI_1_VA_MASK ~0xfffUL
365 #define CMDQ_TLBI_1_IPA_MASK 0xfffffffff000UL
367 #define CMDQ_PRI_0_SSID_SHIFT 12
368 #define CMDQ_PRI_0_SSID_MASK 0xfffffUL
369 #define CMDQ_PRI_0_SID_SHIFT 32
370 #define CMDQ_PRI_0_SID_MASK 0xffffffffUL
371 #define CMDQ_PRI_1_GRPID_SHIFT 0
372 #define CMDQ_PRI_1_GRPID_MASK 0x1ffUL
373 #define CMDQ_PRI_1_RESP_SHIFT 12
374 #define CMDQ_PRI_1_RESP_DENY (0UL << CMDQ_PRI_1_RESP_SHIFT)
375 #define CMDQ_PRI_1_RESP_FAIL (1UL << CMDQ_PRI_1_RESP_SHIFT)
376 #define CMDQ_PRI_1_RESP_SUCC (2UL << CMDQ_PRI_1_RESP_SHIFT)
378 #define CMDQ_SYNC_0_CS_SHIFT 12
379 #define CMDQ_SYNC_0_CS_NONE (0UL << CMDQ_SYNC_0_CS_SHIFT)
380 #define CMDQ_SYNC_0_CS_SEV (2UL << CMDQ_SYNC_0_CS_SHIFT)
383 #define EVTQ_ENT_DWORDS 4
384 #define EVTQ_MAX_SZ_SHIFT 7
386 #define EVTQ_0_ID_SHIFT 0
387 #define EVTQ_0_ID_MASK 0xffUL
390 #define PRIQ_ENT_DWORDS 2
391 #define PRIQ_MAX_SZ_SHIFT 8
393 #define PRIQ_0_SID_SHIFT 0
394 #define PRIQ_0_SID_MASK 0xffffffffUL
395 #define PRIQ_0_SSID_SHIFT 32
396 #define PRIQ_0_SSID_MASK 0xfffffUL
397 #define PRIQ_0_PERM_PRIV (1UL << 58)
398 #define PRIQ_0_PERM_EXEC (1UL << 59)
399 #define PRIQ_0_PERM_READ (1UL << 60)
400 #define PRIQ_0_PERM_WRITE (1UL << 61)
401 #define PRIQ_0_PRG_LAST (1UL << 62)
402 #define PRIQ_0_SSID_V (1UL << 63)
404 #define PRIQ_1_PRG_IDX_SHIFT 0
405 #define PRIQ_1_PRG_IDX_MASK 0x1ffUL
406 #define PRIQ_1_ADDR_SHIFT 12
407 #define PRIQ_1_ADDR_MASK 0xfffffffffffffUL
409 /* High-level queue structures */
410 #define ARM_SMMU_POLL_TIMEOUT_US 100
411 #define ARM_SMMU_CMDQ_DRAIN_TIMEOUT_US 1000000 /* 1s! */
413 #define MSI_IOVA_BASE 0x8000000
414 #define MSI_IOVA_LENGTH 0x100000
416 static bool disable_bypass;
417 module_param_named(disable_bypass, disable_bypass, bool, S_IRUGO);
418 MODULE_PARM_DESC(disable_bypass,
419 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
427 enum arm_smmu_msi_index {
434 static phys_addr_t arm_smmu_msi_cfg[ARM_SMMU_MAX_MSIS][3] = {
436 ARM_SMMU_EVTQ_IRQ_CFG0,
437 ARM_SMMU_EVTQ_IRQ_CFG1,
438 ARM_SMMU_EVTQ_IRQ_CFG2,
440 [GERROR_MSI_INDEX] = {
441 ARM_SMMU_GERROR_IRQ_CFG0,
442 ARM_SMMU_GERROR_IRQ_CFG1,
443 ARM_SMMU_GERROR_IRQ_CFG2,
446 ARM_SMMU_PRIQ_IRQ_CFG0,
447 ARM_SMMU_PRIQ_IRQ_CFG1,
448 ARM_SMMU_PRIQ_IRQ_CFG2,
452 struct arm_smmu_cmdq_ent {
455 bool substream_valid;
457 /* Command-specific fields */
459 #define CMDQ_OP_PREFETCH_CFG 0x1
466 #define CMDQ_OP_CFGI_STE 0x3
467 #define CMDQ_OP_CFGI_ALL 0x4
476 #define CMDQ_OP_TLBI_NH_ASID 0x11
477 #define CMDQ_OP_TLBI_NH_VA 0x12
478 #define CMDQ_OP_TLBI_EL2_ALL 0x20
479 #define CMDQ_OP_TLBI_S12_VMALL 0x28
480 #define CMDQ_OP_TLBI_S2_IPA 0x2a
481 #define CMDQ_OP_TLBI_NSNH_ALL 0x30
489 #define CMDQ_OP_PRI_RESP 0x41
497 #define CMDQ_OP_CMD_SYNC 0x46
501 struct arm_smmu_queue {
502 int irq; /* Wired interrupt */
513 u32 __iomem *prod_reg;
514 u32 __iomem *cons_reg;
517 struct arm_smmu_cmdq {
518 struct arm_smmu_queue q;
522 struct arm_smmu_evtq {
523 struct arm_smmu_queue q;
527 struct arm_smmu_priq {
528 struct arm_smmu_queue q;
531 /* High-level stream table and context descriptor structures */
532 struct arm_smmu_strtab_l1_desc {
536 dma_addr_t l2ptr_dma;
539 struct arm_smmu_s1_cfg {
541 dma_addr_t cdptr_dma;
543 struct arm_smmu_ctx_desc {
551 struct arm_smmu_s2_cfg {
557 struct arm_smmu_strtab_ent {
559 * An STE is "assigned" if the master emitting the corresponding SID
560 * is attached to a domain. The behaviour of an unassigned STE is
561 * determined by the disable_bypass parameter, whereas an assigned
562 * STE behaves according to s1_cfg/s2_cfg, which themselves are
563 * configured according to the domain type.
566 struct arm_smmu_s1_cfg *s1_cfg;
567 struct arm_smmu_s2_cfg *s2_cfg;
570 struct arm_smmu_strtab_cfg {
572 dma_addr_t strtab_dma;
573 struct arm_smmu_strtab_l1_desc *l1_desc;
574 unsigned int num_l1_ents;
580 /* An SMMUv3 instance */
581 struct arm_smmu_device {
585 #define ARM_SMMU_FEAT_2_LVL_STRTAB (1 << 0)
586 #define ARM_SMMU_FEAT_2_LVL_CDTAB (1 << 1)
587 #define ARM_SMMU_FEAT_TT_LE (1 << 2)
588 #define ARM_SMMU_FEAT_TT_BE (1 << 3)
589 #define ARM_SMMU_FEAT_PRI (1 << 4)
590 #define ARM_SMMU_FEAT_ATS (1 << 5)
591 #define ARM_SMMU_FEAT_SEV (1 << 6)
592 #define ARM_SMMU_FEAT_MSI (1 << 7)
593 #define ARM_SMMU_FEAT_COHERENCY (1 << 8)
594 #define ARM_SMMU_FEAT_TRANS_S1 (1 << 9)
595 #define ARM_SMMU_FEAT_TRANS_S2 (1 << 10)
596 #define ARM_SMMU_FEAT_STALLS (1 << 11)
597 #define ARM_SMMU_FEAT_HYP (1 << 12)
600 #define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0)
603 struct arm_smmu_cmdq cmdq;
604 struct arm_smmu_evtq evtq;
605 struct arm_smmu_priq priq;
609 unsigned long ias; /* IPA */
610 unsigned long oas; /* PA */
611 unsigned long pgsize_bitmap;
613 #define ARM_SMMU_MAX_ASIDS (1 << 16)
614 unsigned int asid_bits;
615 DECLARE_BITMAP(asid_map, ARM_SMMU_MAX_ASIDS);
617 #define ARM_SMMU_MAX_VMIDS (1 << 16)
618 unsigned int vmid_bits;
619 DECLARE_BITMAP(vmid_map, ARM_SMMU_MAX_VMIDS);
621 unsigned int ssid_bits;
622 unsigned int sid_bits;
624 struct arm_smmu_strtab_cfg strtab_cfg;
626 /* IOMMU core code handle */
627 struct iommu_device iommu;
630 /* SMMU private data for each master */
631 struct arm_smmu_master_data {
632 struct arm_smmu_device *smmu;
633 struct arm_smmu_strtab_ent ste;
636 /* SMMU private data for an IOMMU domain */
637 enum arm_smmu_domain_stage {
638 ARM_SMMU_DOMAIN_S1 = 0,
640 ARM_SMMU_DOMAIN_NESTED,
641 ARM_SMMU_DOMAIN_BYPASS,
644 struct arm_smmu_domain {
645 struct arm_smmu_device *smmu;
646 struct mutex init_mutex; /* Protects smmu pointer */
648 struct io_pgtable_ops *pgtbl_ops;
649 spinlock_t pgtbl_lock;
651 enum arm_smmu_domain_stage stage;
653 struct arm_smmu_s1_cfg s1_cfg;
654 struct arm_smmu_s2_cfg s2_cfg;
657 struct iommu_domain domain;
660 struct arm_smmu_option_prop {
665 static struct arm_smmu_option_prop arm_smmu_options[] = {
666 { ARM_SMMU_OPT_SKIP_PREFETCH, "hisilicon,broken-prefetch-cmd" },
670 static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
672 return container_of(dom, struct arm_smmu_domain, domain);
675 static void parse_driver_options(struct arm_smmu_device *smmu)
680 if (of_property_read_bool(smmu->dev->of_node,
681 arm_smmu_options[i].prop)) {
682 smmu->options |= arm_smmu_options[i].opt;
683 dev_notice(smmu->dev, "option %s\n",
684 arm_smmu_options[i].prop);
686 } while (arm_smmu_options[++i].opt);
689 /* Low-level queue manipulation functions */
690 static bool queue_full(struct arm_smmu_queue *q)
692 return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) &&
693 Q_WRP(q, q->prod) != Q_WRP(q, q->cons);
696 static bool queue_empty(struct arm_smmu_queue *q)
698 return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) &&
699 Q_WRP(q, q->prod) == Q_WRP(q, q->cons);
702 static void queue_sync_cons(struct arm_smmu_queue *q)
704 q->cons = readl_relaxed(q->cons_reg);
707 static void queue_inc_cons(struct arm_smmu_queue *q)
709 u32 cons = (Q_WRP(q, q->cons) | Q_IDX(q, q->cons)) + 1;
711 q->cons = Q_OVF(q, q->cons) | Q_WRP(q, cons) | Q_IDX(q, cons);
712 writel(q->cons, q->cons_reg);
715 static int queue_sync_prod(struct arm_smmu_queue *q)
718 u32 prod = readl_relaxed(q->prod_reg);
720 if (Q_OVF(q, prod) != Q_OVF(q, q->prod))
727 static void queue_inc_prod(struct arm_smmu_queue *q)
729 u32 prod = (Q_WRP(q, q->prod) | Q_IDX(q, q->prod)) + 1;
731 q->prod = Q_OVF(q, q->prod) | Q_WRP(q, prod) | Q_IDX(q, prod);
732 writel(q->prod, q->prod_reg);
736 * Wait for the SMMU to consume items. If drain is true, wait until the queue
737 * is empty. Otherwise, wait until there is at least one free slot.
739 static int queue_poll_cons(struct arm_smmu_queue *q, bool drain, bool wfe)
742 unsigned int delay = 1;
744 /* Wait longer if it's queue drain */
745 timeout = ktime_add_us(ktime_get(), drain ?
746 ARM_SMMU_CMDQ_DRAIN_TIMEOUT_US :
747 ARM_SMMU_POLL_TIMEOUT_US);
749 while (queue_sync_cons(q), (drain ? !queue_empty(q) : queue_full(q))) {
750 if (ktime_compare(ktime_get(), timeout) > 0)
765 static void queue_write(__le64 *dst, u64 *src, size_t n_dwords)
769 for (i = 0; i < n_dwords; ++i)
770 *dst++ = cpu_to_le64(*src++);
773 static int queue_insert_raw(struct arm_smmu_queue *q, u64 *ent)
778 queue_write(Q_ENT(q, q->prod), ent, q->ent_dwords);
783 static void queue_read(__le64 *dst, u64 *src, size_t n_dwords)
787 for (i = 0; i < n_dwords; ++i)
788 *dst++ = le64_to_cpu(*src++);
791 static int queue_remove_raw(struct arm_smmu_queue *q, u64 *ent)
796 queue_read(ent, Q_ENT(q, q->cons), q->ent_dwords);
801 /* High-level queue accessors */
802 static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent)
804 memset(cmd, 0, CMDQ_ENT_DWORDS << 3);
805 cmd[0] |= (ent->opcode & CMDQ_0_OP_MASK) << CMDQ_0_OP_SHIFT;
807 switch (ent->opcode) {
808 case CMDQ_OP_TLBI_EL2_ALL:
809 case CMDQ_OP_TLBI_NSNH_ALL:
811 case CMDQ_OP_PREFETCH_CFG:
812 cmd[0] |= (u64)ent->prefetch.sid << CMDQ_PREFETCH_0_SID_SHIFT;
813 cmd[1] |= ent->prefetch.size << CMDQ_PREFETCH_1_SIZE_SHIFT;
814 cmd[1] |= ent->prefetch.addr & CMDQ_PREFETCH_1_ADDR_MASK;
816 case CMDQ_OP_CFGI_STE:
817 cmd[0] |= (u64)ent->cfgi.sid << CMDQ_CFGI_0_SID_SHIFT;
818 cmd[1] |= ent->cfgi.leaf ? CMDQ_CFGI_1_LEAF : 0;
820 case CMDQ_OP_CFGI_ALL:
821 /* Cover the entire SID range */
822 cmd[1] |= CMDQ_CFGI_1_RANGE_MASK << CMDQ_CFGI_1_RANGE_SHIFT;
824 case CMDQ_OP_TLBI_NH_VA:
825 cmd[0] |= (u64)ent->tlbi.asid << CMDQ_TLBI_0_ASID_SHIFT;
826 cmd[1] |= ent->tlbi.leaf ? CMDQ_TLBI_1_LEAF : 0;
827 cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_VA_MASK;
829 case CMDQ_OP_TLBI_S2_IPA:
830 cmd[0] |= (u64)ent->tlbi.vmid << CMDQ_TLBI_0_VMID_SHIFT;
831 cmd[1] |= ent->tlbi.leaf ? CMDQ_TLBI_1_LEAF : 0;
832 cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_IPA_MASK;
834 case CMDQ_OP_TLBI_NH_ASID:
835 cmd[0] |= (u64)ent->tlbi.asid << CMDQ_TLBI_0_ASID_SHIFT;
837 case CMDQ_OP_TLBI_S12_VMALL:
838 cmd[0] |= (u64)ent->tlbi.vmid << CMDQ_TLBI_0_VMID_SHIFT;
840 case CMDQ_OP_PRI_RESP:
841 cmd[0] |= ent->substream_valid ? CMDQ_0_SSV : 0;
842 cmd[0] |= ent->pri.ssid << CMDQ_PRI_0_SSID_SHIFT;
843 cmd[0] |= (u64)ent->pri.sid << CMDQ_PRI_0_SID_SHIFT;
844 cmd[1] |= ent->pri.grpid << CMDQ_PRI_1_GRPID_SHIFT;
845 switch (ent->pri.resp) {
847 cmd[1] |= CMDQ_PRI_1_RESP_DENY;
850 cmd[1] |= CMDQ_PRI_1_RESP_FAIL;
853 cmd[1] |= CMDQ_PRI_1_RESP_SUCC;
859 case CMDQ_OP_CMD_SYNC:
860 cmd[0] |= CMDQ_SYNC_0_CS_SEV;
869 static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu)
871 static const char *cerror_str[] = {
872 [CMDQ_ERR_CERROR_NONE_IDX] = "No error",
873 [CMDQ_ERR_CERROR_ILL_IDX] = "Illegal command",
874 [CMDQ_ERR_CERROR_ABT_IDX] = "Abort on command fetch",
878 u64 cmd[CMDQ_ENT_DWORDS];
879 struct arm_smmu_queue *q = &smmu->cmdq.q;
880 u32 cons = readl_relaxed(q->cons_reg);
881 u32 idx = cons >> CMDQ_ERR_SHIFT & CMDQ_ERR_MASK;
882 struct arm_smmu_cmdq_ent cmd_sync = {
883 .opcode = CMDQ_OP_CMD_SYNC,
886 dev_err(smmu->dev, "CMDQ error (cons 0x%08x): %s\n", cons,
887 idx < ARRAY_SIZE(cerror_str) ? cerror_str[idx] : "Unknown");
890 case CMDQ_ERR_CERROR_ABT_IDX:
891 dev_err(smmu->dev, "retrying command fetch\n");
892 case CMDQ_ERR_CERROR_NONE_IDX:
894 case CMDQ_ERR_CERROR_ILL_IDX:
901 * We may have concurrent producers, so we need to be careful
902 * not to touch any of the shadow cmdq state.
904 queue_read(cmd, Q_ENT(q, cons), q->ent_dwords);
905 dev_err(smmu->dev, "skipping command in error state:\n");
906 for (i = 0; i < ARRAY_SIZE(cmd); ++i)
907 dev_err(smmu->dev, "\t0x%016llx\n", (unsigned long long)cmd[i]);
909 /* Convert the erroneous command into a CMD_SYNC */
910 if (arm_smmu_cmdq_build_cmd(cmd, &cmd_sync)) {
911 dev_err(smmu->dev, "failed to convert to CMD_SYNC\n");
915 queue_write(Q_ENT(q, cons), cmd, q->ent_dwords);
918 static void arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu,
919 struct arm_smmu_cmdq_ent *ent)
921 u64 cmd[CMDQ_ENT_DWORDS];
923 bool wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV);
924 struct arm_smmu_queue *q = &smmu->cmdq.q;
926 if (arm_smmu_cmdq_build_cmd(cmd, ent)) {
927 dev_warn(smmu->dev, "ignoring unknown CMDQ opcode 0x%x\n",
932 spin_lock_irqsave(&smmu->cmdq.lock, flags);
933 while (queue_insert_raw(q, cmd) == -ENOSPC) {
934 if (queue_poll_cons(q, false, wfe))
935 dev_err_ratelimited(smmu->dev, "CMDQ timeout\n");
938 if (ent->opcode == CMDQ_OP_CMD_SYNC && queue_poll_cons(q, true, wfe))
939 dev_err_ratelimited(smmu->dev, "CMD_SYNC timeout\n");
940 spin_unlock_irqrestore(&smmu->cmdq.lock, flags);
943 /* Context descriptor manipulation functions */
944 static u64 arm_smmu_cpu_tcr_to_cd(u64 tcr)
948 /* Repack the TCR. Just care about TTBR0 for now */
949 val |= ARM_SMMU_TCR2CD(tcr, T0SZ);
950 val |= ARM_SMMU_TCR2CD(tcr, TG0);
951 val |= ARM_SMMU_TCR2CD(tcr, IRGN0);
952 val |= ARM_SMMU_TCR2CD(tcr, ORGN0);
953 val |= ARM_SMMU_TCR2CD(tcr, SH0);
954 val |= ARM_SMMU_TCR2CD(tcr, EPD0);
955 val |= ARM_SMMU_TCR2CD(tcr, EPD1);
956 val |= ARM_SMMU_TCR2CD(tcr, IPS);
957 val |= ARM_SMMU_TCR2CD(tcr, TBI0);
962 static void arm_smmu_write_ctx_desc(struct arm_smmu_device *smmu,
963 struct arm_smmu_s1_cfg *cfg)
968 * We don't need to issue any invalidation here, as we'll invalidate
969 * the STE when installing the new entry anyway.
971 val = arm_smmu_cpu_tcr_to_cd(cfg->cd.tcr) |
975 CTXDESC_CD_0_R | CTXDESC_CD_0_A | CTXDESC_CD_0_ASET_PRIVATE |
976 CTXDESC_CD_0_AA64 | (u64)cfg->cd.asid << CTXDESC_CD_0_ASID_SHIFT |
978 cfg->cdptr[0] = cpu_to_le64(val);
980 val = cfg->cd.ttbr & CTXDESC_CD_1_TTB0_MASK << CTXDESC_CD_1_TTB0_SHIFT;
981 cfg->cdptr[1] = cpu_to_le64(val);
983 cfg->cdptr[3] = cpu_to_le64(cfg->cd.mair << CTXDESC_CD_3_MAIR_SHIFT);
986 /* Stream table manipulation functions */
988 arm_smmu_write_strtab_l1_desc(__le64 *dst, struct arm_smmu_strtab_l1_desc *desc)
992 val |= (desc->span & STRTAB_L1_DESC_SPAN_MASK)
993 << STRTAB_L1_DESC_SPAN_SHIFT;
994 val |= desc->l2ptr_dma &
995 STRTAB_L1_DESC_L2PTR_MASK << STRTAB_L1_DESC_L2PTR_SHIFT;
997 *dst = cpu_to_le64(val);
1000 static void arm_smmu_sync_ste_for_sid(struct arm_smmu_device *smmu, u32 sid)
1002 struct arm_smmu_cmdq_ent cmd = {
1003 .opcode = CMDQ_OP_CFGI_STE,
1010 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1011 cmd.opcode = CMDQ_OP_CMD_SYNC;
1012 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1015 static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
1016 __le64 *dst, struct arm_smmu_strtab_ent *ste)
1019 * This is hideously complicated, but we only really care about
1020 * three cases at the moment:
1022 * 1. Invalid (all zero) -> bypass/fault (init)
1023 * 2. Bypass/fault -> translation/bypass (attach)
1024 * 3. Translation/bypass -> bypass/fault (detach)
1026 * Given that we can't update the STE atomically and the SMMU
1027 * doesn't read the thing in a defined order, that leaves us
1028 * with the following maintenance requirements:
1030 * 1. Update Config, return (init time STEs aren't live)
1031 * 2. Write everything apart from dword 0, sync, write dword 0, sync
1032 * 3. Update Config, sync
1034 u64 val = le64_to_cpu(dst[0]);
1035 bool ste_live = false;
1036 struct arm_smmu_cmdq_ent prefetch_cmd = {
1037 .opcode = CMDQ_OP_PREFETCH_CFG,
1043 if (val & STRTAB_STE_0_V) {
1046 cfg = val & STRTAB_STE_0_CFG_MASK << STRTAB_STE_0_CFG_SHIFT;
1048 case STRTAB_STE_0_CFG_BYPASS:
1050 case STRTAB_STE_0_CFG_S1_TRANS:
1051 case STRTAB_STE_0_CFG_S2_TRANS:
1054 case STRTAB_STE_0_CFG_ABORT:
1058 BUG(); /* STE corruption */
1062 /* Nuke the existing STE_0 value, as we're going to rewrite it */
1063 val = STRTAB_STE_0_V;
1066 if (!ste->assigned || !(ste->s1_cfg || ste->s2_cfg)) {
1067 if (!ste->assigned && disable_bypass)
1068 val |= STRTAB_STE_0_CFG_ABORT;
1070 val |= STRTAB_STE_0_CFG_BYPASS;
1072 dst[0] = cpu_to_le64(val);
1073 dst[1] = cpu_to_le64(STRTAB_STE_1_SHCFG_INCOMING
1074 << STRTAB_STE_1_SHCFG_SHIFT);
1075 dst[2] = 0; /* Nuke the VMID */
1077 arm_smmu_sync_ste_for_sid(smmu, sid);
1083 dst[1] = cpu_to_le64(
1084 STRTAB_STE_1_S1C_CACHE_WBRA
1085 << STRTAB_STE_1_S1CIR_SHIFT |
1086 STRTAB_STE_1_S1C_CACHE_WBRA
1087 << STRTAB_STE_1_S1COR_SHIFT |
1088 STRTAB_STE_1_S1C_SH_ISH << STRTAB_STE_1_S1CSH_SHIFT |
1089 #ifdef CONFIG_PCI_ATS
1090 STRTAB_STE_1_EATS_TRANS << STRTAB_STE_1_EATS_SHIFT |
1092 STRTAB_STE_1_STRW_NSEL1 << STRTAB_STE_1_STRW_SHIFT);
1094 if (smmu->features & ARM_SMMU_FEAT_STALLS)
1095 dst[1] |= cpu_to_le64(STRTAB_STE_1_S1STALLD);
1097 val |= (ste->s1_cfg->cdptr_dma & STRTAB_STE_0_S1CTXPTR_MASK
1098 << STRTAB_STE_0_S1CTXPTR_SHIFT) |
1099 STRTAB_STE_0_CFG_S1_TRANS;
1104 dst[2] = cpu_to_le64(
1105 ste->s2_cfg->vmid << STRTAB_STE_2_S2VMID_SHIFT |
1106 (ste->s2_cfg->vtcr & STRTAB_STE_2_VTCR_MASK)
1107 << STRTAB_STE_2_VTCR_SHIFT |
1109 STRTAB_STE_2_S2ENDI |
1111 STRTAB_STE_2_S2PTW | STRTAB_STE_2_S2AA64 |
1114 dst[3] = cpu_to_le64(ste->s2_cfg->vttbr &
1115 STRTAB_STE_3_S2TTB_MASK << STRTAB_STE_3_S2TTB_SHIFT);
1117 val |= STRTAB_STE_0_CFG_S2_TRANS;
1120 arm_smmu_sync_ste_for_sid(smmu, sid);
1121 dst[0] = cpu_to_le64(val);
1122 arm_smmu_sync_ste_for_sid(smmu, sid);
1124 /* It's likely that we'll want to use the new STE soon */
1125 if (!(smmu->options & ARM_SMMU_OPT_SKIP_PREFETCH))
1126 arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd);
1129 static void arm_smmu_init_bypass_stes(u64 *strtab, unsigned int nent)
1132 struct arm_smmu_strtab_ent ste = { .assigned = false };
1134 for (i = 0; i < nent; ++i) {
1135 arm_smmu_write_strtab_ent(NULL, -1, strtab, &ste);
1136 strtab += STRTAB_STE_DWORDS;
1140 static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid)
1144 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
1145 struct arm_smmu_strtab_l1_desc *desc = &cfg->l1_desc[sid >> STRTAB_SPLIT];
1150 size = 1 << (STRTAB_SPLIT + ilog2(STRTAB_STE_DWORDS) + 3);
1151 strtab = &cfg->strtab[(sid >> STRTAB_SPLIT) * STRTAB_L1_DESC_DWORDS];
1153 desc->span = STRTAB_SPLIT + 1;
1154 desc->l2ptr = dmam_alloc_coherent(smmu->dev, size, &desc->l2ptr_dma,
1155 GFP_KERNEL | __GFP_ZERO);
1158 "failed to allocate l2 stream table for SID %u\n",
1163 arm_smmu_init_bypass_stes(desc->l2ptr, 1 << STRTAB_SPLIT);
1164 arm_smmu_write_strtab_l1_desc(strtab, desc);
1168 /* IRQ and event handlers */
1169 static irqreturn_t arm_smmu_evtq_thread(int irq, void *dev)
1172 struct arm_smmu_device *smmu = dev;
1173 struct arm_smmu_queue *q = &smmu->evtq.q;
1174 u64 evt[EVTQ_ENT_DWORDS];
1177 while (!queue_remove_raw(q, evt)) {
1178 u8 id = evt[0] >> EVTQ_0_ID_SHIFT & EVTQ_0_ID_MASK;
1180 dev_info(smmu->dev, "event 0x%02x received:\n", id);
1181 for (i = 0; i < ARRAY_SIZE(evt); ++i)
1182 dev_info(smmu->dev, "\t0x%016llx\n",
1183 (unsigned long long)evt[i]);
1188 * Not much we can do on overflow, so scream and pretend we're
1191 if (queue_sync_prod(q) == -EOVERFLOW)
1192 dev_err(smmu->dev, "EVTQ overflow detected -- events lost\n");
1193 } while (!queue_empty(q));
1195 /* Sync our overflow flag, as we believe we're up to speed */
1196 q->cons = Q_OVF(q, q->prod) | Q_WRP(q, q->cons) | Q_IDX(q, q->cons);
1200 static void arm_smmu_handle_ppr(struct arm_smmu_device *smmu, u64 *evt)
1206 sid = evt[0] >> PRIQ_0_SID_SHIFT & PRIQ_0_SID_MASK;
1207 ssv = evt[0] & PRIQ_0_SSID_V;
1208 ssid = ssv ? evt[0] >> PRIQ_0_SSID_SHIFT & PRIQ_0_SSID_MASK : 0;
1209 last = evt[0] & PRIQ_0_PRG_LAST;
1210 grpid = evt[1] >> PRIQ_1_PRG_IDX_SHIFT & PRIQ_1_PRG_IDX_MASK;
1212 dev_info(smmu->dev, "unexpected PRI request received:\n");
1214 "\tsid 0x%08x.0x%05x: [%u%s] %sprivileged %s%s%s access at iova 0x%016llx\n",
1215 sid, ssid, grpid, last ? "L" : "",
1216 evt[0] & PRIQ_0_PERM_PRIV ? "" : "un",
1217 evt[0] & PRIQ_0_PERM_READ ? "R" : "",
1218 evt[0] & PRIQ_0_PERM_WRITE ? "W" : "",
1219 evt[0] & PRIQ_0_PERM_EXEC ? "X" : "",
1220 evt[1] & PRIQ_1_ADDR_MASK << PRIQ_1_ADDR_SHIFT);
1223 struct arm_smmu_cmdq_ent cmd = {
1224 .opcode = CMDQ_OP_PRI_RESP,
1225 .substream_valid = ssv,
1230 .resp = PRI_RESP_DENY,
1234 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1238 static irqreturn_t arm_smmu_priq_thread(int irq, void *dev)
1240 struct arm_smmu_device *smmu = dev;
1241 struct arm_smmu_queue *q = &smmu->priq.q;
1242 u64 evt[PRIQ_ENT_DWORDS];
1245 while (!queue_remove_raw(q, evt))
1246 arm_smmu_handle_ppr(smmu, evt);
1248 if (queue_sync_prod(q) == -EOVERFLOW)
1249 dev_err(smmu->dev, "PRIQ overflow detected -- requests lost\n");
1250 } while (!queue_empty(q));
1252 /* Sync our overflow flag, as we believe we're up to speed */
1253 q->cons = Q_OVF(q, q->prod) | Q_WRP(q, q->cons) | Q_IDX(q, q->cons);
1257 static irqreturn_t arm_smmu_cmdq_sync_handler(int irq, void *dev)
1259 /* We don't actually use CMD_SYNC interrupts for anything */
1263 static int arm_smmu_device_disable(struct arm_smmu_device *smmu);
1265 static irqreturn_t arm_smmu_gerror_handler(int irq, void *dev)
1267 u32 gerror, gerrorn, active;
1268 struct arm_smmu_device *smmu = dev;
1270 gerror = readl_relaxed(smmu->base + ARM_SMMU_GERROR);
1271 gerrorn = readl_relaxed(smmu->base + ARM_SMMU_GERRORN);
1273 active = gerror ^ gerrorn;
1274 if (!(active & GERROR_ERR_MASK))
1275 return IRQ_NONE; /* No errors pending */
1278 "unexpected global error reported (0x%08x), this could be serious\n",
1281 if (active & GERROR_SFM_ERR) {
1282 dev_err(smmu->dev, "device has entered Service Failure Mode!\n");
1283 arm_smmu_device_disable(smmu);
1286 if (active & GERROR_MSI_GERROR_ABT_ERR)
1287 dev_warn(smmu->dev, "GERROR MSI write aborted\n");
1289 if (active & GERROR_MSI_PRIQ_ABT_ERR)
1290 dev_warn(smmu->dev, "PRIQ MSI write aborted\n");
1292 if (active & GERROR_MSI_EVTQ_ABT_ERR)
1293 dev_warn(smmu->dev, "EVTQ MSI write aborted\n");
1295 if (active & GERROR_MSI_CMDQ_ABT_ERR) {
1296 dev_warn(smmu->dev, "CMDQ MSI write aborted\n");
1297 arm_smmu_cmdq_sync_handler(irq, smmu->dev);
1300 if (active & GERROR_PRIQ_ABT_ERR)
1301 dev_err(smmu->dev, "PRIQ write aborted -- events may have been lost\n");
1303 if (active & GERROR_EVTQ_ABT_ERR)
1304 dev_err(smmu->dev, "EVTQ write aborted -- events may have been lost\n");
1306 if (active & GERROR_CMDQ_ERR)
1307 arm_smmu_cmdq_skip_err(smmu);
1309 writel(gerror, smmu->base + ARM_SMMU_GERRORN);
1313 /* IO_PGTABLE API */
1314 static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
1316 struct arm_smmu_cmdq_ent cmd;
1318 cmd.opcode = CMDQ_OP_CMD_SYNC;
1319 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1322 static void arm_smmu_tlb_sync(void *cookie)
1324 struct arm_smmu_domain *smmu_domain = cookie;
1325 __arm_smmu_tlb_sync(smmu_domain->smmu);
1328 static void arm_smmu_tlb_inv_context(void *cookie)
1330 struct arm_smmu_domain *smmu_domain = cookie;
1331 struct arm_smmu_device *smmu = smmu_domain->smmu;
1332 struct arm_smmu_cmdq_ent cmd;
1334 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1335 cmd.opcode = CMDQ_OP_TLBI_NH_ASID;
1336 cmd.tlbi.asid = smmu_domain->s1_cfg.cd.asid;
1339 cmd.opcode = CMDQ_OP_TLBI_S12_VMALL;
1340 cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
1343 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1344 __arm_smmu_tlb_sync(smmu);
1347 static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
1348 size_t granule, bool leaf, void *cookie)
1350 struct arm_smmu_domain *smmu_domain = cookie;
1351 struct arm_smmu_device *smmu = smmu_domain->smmu;
1352 struct arm_smmu_cmdq_ent cmd = {
1359 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1360 cmd.opcode = CMDQ_OP_TLBI_NH_VA;
1361 cmd.tlbi.asid = smmu_domain->s1_cfg.cd.asid;
1363 cmd.opcode = CMDQ_OP_TLBI_S2_IPA;
1364 cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
1368 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1369 cmd.tlbi.addr += granule;
1370 } while (size -= granule);
1373 static const struct iommu_gather_ops arm_smmu_gather_ops = {
1374 .tlb_flush_all = arm_smmu_tlb_inv_context,
1375 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
1376 .tlb_sync = arm_smmu_tlb_sync,
1380 static bool arm_smmu_capable(enum iommu_cap cap)
1383 case IOMMU_CAP_CACHE_COHERENCY:
1385 case IOMMU_CAP_NOEXEC:
1392 static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
1394 struct arm_smmu_domain *smmu_domain;
1396 if (type != IOMMU_DOMAIN_UNMANAGED &&
1397 type != IOMMU_DOMAIN_DMA &&
1398 type != IOMMU_DOMAIN_IDENTITY)
1402 * Allocate the domain and initialise some of its data structures.
1403 * We can't really do anything meaningful until we've added a
1406 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1410 if (type == IOMMU_DOMAIN_DMA &&
1411 iommu_get_dma_cookie(&smmu_domain->domain)) {
1416 mutex_init(&smmu_domain->init_mutex);
1417 spin_lock_init(&smmu_domain->pgtbl_lock);
1418 return &smmu_domain->domain;
1421 static int arm_smmu_bitmap_alloc(unsigned long *map, int span)
1423 int idx, size = 1 << span;
1426 idx = find_first_zero_bit(map, size);
1429 } while (test_and_set_bit(idx, map));
1434 static void arm_smmu_bitmap_free(unsigned long *map, int idx)
1436 clear_bit(idx, map);
1439 static void arm_smmu_domain_free(struct iommu_domain *domain)
1441 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1442 struct arm_smmu_device *smmu = smmu_domain->smmu;
1444 iommu_put_dma_cookie(domain);
1445 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
1447 /* Free the CD and ASID, if we allocated them */
1448 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1449 struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
1452 dmam_free_coherent(smmu_domain->smmu->dev,
1453 CTXDESC_CD_DWORDS << 3,
1457 arm_smmu_bitmap_free(smmu->asid_map, cfg->cd.asid);
1460 struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
1462 arm_smmu_bitmap_free(smmu->vmid_map, cfg->vmid);
1468 static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain,
1469 struct io_pgtable_cfg *pgtbl_cfg)
1473 struct arm_smmu_device *smmu = smmu_domain->smmu;
1474 struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
1476 asid = arm_smmu_bitmap_alloc(smmu->asid_map, smmu->asid_bits);
1480 cfg->cdptr = dmam_alloc_coherent(smmu->dev, CTXDESC_CD_DWORDS << 3,
1482 GFP_KERNEL | __GFP_ZERO);
1484 dev_warn(smmu->dev, "failed to allocate context descriptor\n");
1489 cfg->cd.asid = (u16)asid;
1490 cfg->cd.ttbr = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
1491 cfg->cd.tcr = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
1492 cfg->cd.mair = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
1496 arm_smmu_bitmap_free(smmu->asid_map, asid);
1500 static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain,
1501 struct io_pgtable_cfg *pgtbl_cfg)
1504 struct arm_smmu_device *smmu = smmu_domain->smmu;
1505 struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
1507 vmid = arm_smmu_bitmap_alloc(smmu->vmid_map, smmu->vmid_bits);
1511 cfg->vmid = (u16)vmid;
1512 cfg->vttbr = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
1513 cfg->vtcr = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
1517 static int arm_smmu_domain_finalise(struct iommu_domain *domain)
1520 unsigned long ias, oas;
1521 enum io_pgtable_fmt fmt;
1522 struct io_pgtable_cfg pgtbl_cfg;
1523 struct io_pgtable_ops *pgtbl_ops;
1524 int (*finalise_stage_fn)(struct arm_smmu_domain *,
1525 struct io_pgtable_cfg *);
1526 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1527 struct arm_smmu_device *smmu = smmu_domain->smmu;
1529 if (domain->type == IOMMU_DOMAIN_IDENTITY) {
1530 smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS;
1534 /* Restrict the stage to what we can actually support */
1535 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
1536 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
1537 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
1538 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1540 switch (smmu_domain->stage) {
1541 case ARM_SMMU_DOMAIN_S1:
1544 fmt = ARM_64_LPAE_S1;
1545 finalise_stage_fn = arm_smmu_domain_finalise_s1;
1547 case ARM_SMMU_DOMAIN_NESTED:
1548 case ARM_SMMU_DOMAIN_S2:
1551 fmt = ARM_64_LPAE_S2;
1552 finalise_stage_fn = arm_smmu_domain_finalise_s2;
1558 pgtbl_cfg = (struct io_pgtable_cfg) {
1559 .pgsize_bitmap = smmu->pgsize_bitmap,
1562 .tlb = &arm_smmu_gather_ops,
1563 .iommu_dev = smmu->dev,
1566 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
1570 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
1571 domain->geometry.aperture_end = (1UL << ias) - 1;
1572 domain->geometry.force_aperture = true;
1573 smmu_domain->pgtbl_ops = pgtbl_ops;
1575 ret = finalise_stage_fn(smmu_domain, &pgtbl_cfg);
1577 free_io_pgtable_ops(pgtbl_ops);
1582 static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid)
1585 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
1587 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
1588 struct arm_smmu_strtab_l1_desc *l1_desc;
1591 /* Two-level walk */
1592 idx = (sid >> STRTAB_SPLIT) * STRTAB_L1_DESC_DWORDS;
1593 l1_desc = &cfg->l1_desc[idx];
1594 idx = (sid & ((1 << STRTAB_SPLIT) - 1)) * STRTAB_STE_DWORDS;
1595 step = &l1_desc->l2ptr[idx];
1597 /* Simple linear lookup */
1598 step = &cfg->strtab[sid * STRTAB_STE_DWORDS];
1604 static void arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec)
1607 struct arm_smmu_master_data *master = fwspec->iommu_priv;
1608 struct arm_smmu_device *smmu = master->smmu;
1610 for (i = 0; i < fwspec->num_ids; ++i) {
1611 u32 sid = fwspec->ids[i];
1612 __le64 *step = arm_smmu_get_step_for_sid(smmu, sid);
1614 arm_smmu_write_strtab_ent(smmu, sid, step, &master->ste);
1618 static void arm_smmu_detach_dev(struct device *dev)
1620 struct arm_smmu_master_data *master = dev->iommu_fwspec->iommu_priv;
1622 master->ste.assigned = false;
1623 arm_smmu_install_ste_for_dev(dev->iommu_fwspec);
1626 static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1629 struct arm_smmu_device *smmu;
1630 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1631 struct arm_smmu_master_data *master;
1632 struct arm_smmu_strtab_ent *ste;
1634 if (!dev->iommu_fwspec)
1637 master = dev->iommu_fwspec->iommu_priv;
1638 smmu = master->smmu;
1641 /* Already attached to a different domain? */
1643 arm_smmu_detach_dev(dev);
1645 mutex_lock(&smmu_domain->init_mutex);
1647 if (!smmu_domain->smmu) {
1648 smmu_domain->smmu = smmu;
1649 ret = arm_smmu_domain_finalise(domain);
1651 smmu_domain->smmu = NULL;
1654 } else if (smmu_domain->smmu != smmu) {
1656 "cannot attach to SMMU %s (upstream of %s)\n",
1657 dev_name(smmu_domain->smmu->dev),
1658 dev_name(smmu->dev));
1663 ste->assigned = true;
1665 if (smmu_domain->stage == ARM_SMMU_DOMAIN_BYPASS) {
1668 } else if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1669 ste->s1_cfg = &smmu_domain->s1_cfg;
1671 arm_smmu_write_ctx_desc(smmu, ste->s1_cfg);
1674 ste->s2_cfg = &smmu_domain->s2_cfg;
1677 arm_smmu_install_ste_for_dev(dev->iommu_fwspec);
1679 mutex_unlock(&smmu_domain->init_mutex);
1683 static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
1684 phys_addr_t paddr, size_t size, int prot)
1687 unsigned long flags;
1688 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1689 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
1694 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1695 ret = ops->map(ops, iova, paddr, size, prot);
1696 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1701 arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
1704 unsigned long flags;
1705 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1706 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
1711 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1712 ret = ops->unmap(ops, iova, size);
1713 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1718 arm_smmu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
1721 unsigned long flags;
1722 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1723 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
1725 if (domain->type == IOMMU_DOMAIN_IDENTITY)
1731 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1732 ret = ops->iova_to_phys(ops, iova);
1733 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1738 static struct platform_driver arm_smmu_driver;
1740 static int arm_smmu_match_node(struct device *dev, void *data)
1742 return dev->fwnode == data;
1746 struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
1748 struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL,
1749 fwnode, arm_smmu_match_node);
1751 return dev ? dev_get_drvdata(dev) : NULL;
1754 static bool arm_smmu_sid_in_range(struct arm_smmu_device *smmu, u32 sid)
1756 unsigned long limit = smmu->strtab_cfg.num_l1_ents;
1758 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB)
1759 limit *= 1UL << STRTAB_SPLIT;
1764 static struct iommu_ops arm_smmu_ops;
1766 static int arm_smmu_add_device(struct device *dev)
1769 struct arm_smmu_device *smmu;
1770 struct arm_smmu_master_data *master;
1771 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1772 struct iommu_group *group;
1774 if (!fwspec || fwspec->ops != &arm_smmu_ops)
1777 * We _can_ actually withstand dodgy bus code re-calling add_device()
1778 * without an intervening remove_device()/of_xlate() sequence, but
1779 * we're not going to do so quietly...
1781 if (WARN_ON_ONCE(fwspec->iommu_priv)) {
1782 master = fwspec->iommu_priv;
1783 smmu = master->smmu;
1785 smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
1788 master = kzalloc(sizeof(*master), GFP_KERNEL);
1792 master->smmu = smmu;
1793 fwspec->iommu_priv = master;
1796 /* Check the SIDs are in range of the SMMU and our stream table */
1797 for (i = 0; i < fwspec->num_ids; i++) {
1798 u32 sid = fwspec->ids[i];
1800 if (!arm_smmu_sid_in_range(smmu, sid))
1803 /* Ensure l2 strtab is initialised */
1804 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
1805 ret = arm_smmu_init_l2_strtab(smmu, sid);
1811 group = iommu_group_get_for_dev(dev);
1812 if (!IS_ERR(group)) {
1813 iommu_group_put(group);
1814 iommu_device_link(&smmu->iommu, dev);
1817 return PTR_ERR_OR_ZERO(group);
1820 static void arm_smmu_remove_device(struct device *dev)
1822 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1823 struct arm_smmu_master_data *master;
1824 struct arm_smmu_device *smmu;
1826 if (!fwspec || fwspec->ops != &arm_smmu_ops)
1829 master = fwspec->iommu_priv;
1830 smmu = master->smmu;
1831 if (master && master->ste.assigned)
1832 arm_smmu_detach_dev(dev);
1833 iommu_group_remove_device(dev);
1834 iommu_device_unlink(&smmu->iommu, dev);
1836 iommu_fwspec_free(dev);
1839 static struct iommu_group *arm_smmu_device_group(struct device *dev)
1841 struct iommu_group *group;
1844 * We don't support devices sharing stream IDs other than PCI RID
1845 * aliases, since the necessary ID-to-device lookup becomes rather
1846 * impractical given a potential sparse 32-bit stream ID space.
1848 if (dev_is_pci(dev))
1849 group = pci_device_group(dev);
1851 group = generic_device_group(dev);
1856 static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1857 enum iommu_attr attr, void *data)
1859 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1861 if (domain->type != IOMMU_DOMAIN_UNMANAGED)
1865 case DOMAIN_ATTR_NESTING:
1866 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1873 static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1874 enum iommu_attr attr, void *data)
1877 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1879 if (domain->type != IOMMU_DOMAIN_UNMANAGED)
1882 mutex_lock(&smmu_domain->init_mutex);
1885 case DOMAIN_ATTR_NESTING:
1886 if (smmu_domain->smmu) {
1892 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1894 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1902 mutex_unlock(&smmu_domain->init_mutex);
1906 static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
1908 return iommu_fwspec_add_ids(dev, args->args, 1);
1911 static void arm_smmu_get_resv_regions(struct device *dev,
1912 struct list_head *head)
1914 struct iommu_resv_region *region;
1915 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
1917 region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
1918 prot, IOMMU_RESV_SW_MSI);
1922 list_add_tail(®ion->list, head);
1924 iommu_dma_get_resv_regions(dev, head);
1927 static void arm_smmu_put_resv_regions(struct device *dev,
1928 struct list_head *head)
1930 struct iommu_resv_region *entry, *next;
1932 list_for_each_entry_safe(entry, next, head, list)
1936 static struct iommu_ops arm_smmu_ops = {
1937 .capable = arm_smmu_capable,
1938 .domain_alloc = arm_smmu_domain_alloc,
1939 .domain_free = arm_smmu_domain_free,
1940 .attach_dev = arm_smmu_attach_dev,
1941 .map = arm_smmu_map,
1942 .unmap = arm_smmu_unmap,
1943 .map_sg = default_iommu_map_sg,
1944 .iova_to_phys = arm_smmu_iova_to_phys,
1945 .add_device = arm_smmu_add_device,
1946 .remove_device = arm_smmu_remove_device,
1947 .device_group = arm_smmu_device_group,
1948 .domain_get_attr = arm_smmu_domain_get_attr,
1949 .domain_set_attr = arm_smmu_domain_set_attr,
1950 .of_xlate = arm_smmu_of_xlate,
1951 .get_resv_regions = arm_smmu_get_resv_regions,
1952 .put_resv_regions = arm_smmu_put_resv_regions,
1953 .pgsize_bitmap = -1UL, /* Restricted during device attach */
1956 /* Probing and initialisation functions */
1957 static int arm_smmu_init_one_queue(struct arm_smmu_device *smmu,
1958 struct arm_smmu_queue *q,
1959 unsigned long prod_off,
1960 unsigned long cons_off,
1963 size_t qsz = ((1 << q->max_n_shift) * dwords) << 3;
1965 q->base = dmam_alloc_coherent(smmu->dev, qsz, &q->base_dma, GFP_KERNEL);
1967 dev_err(smmu->dev, "failed to allocate queue (0x%zx bytes)\n",
1972 q->prod_reg = smmu->base + prod_off;
1973 q->cons_reg = smmu->base + cons_off;
1974 q->ent_dwords = dwords;
1976 q->q_base = Q_BASE_RWA;
1977 q->q_base |= q->base_dma & Q_BASE_ADDR_MASK << Q_BASE_ADDR_SHIFT;
1978 q->q_base |= (q->max_n_shift & Q_BASE_LOG2SIZE_MASK)
1979 << Q_BASE_LOG2SIZE_SHIFT;
1981 q->prod = q->cons = 0;
1985 static int arm_smmu_init_queues(struct arm_smmu_device *smmu)
1990 spin_lock_init(&smmu->cmdq.lock);
1991 ret = arm_smmu_init_one_queue(smmu, &smmu->cmdq.q, ARM_SMMU_CMDQ_PROD,
1992 ARM_SMMU_CMDQ_CONS, CMDQ_ENT_DWORDS);
1997 ret = arm_smmu_init_one_queue(smmu, &smmu->evtq.q, ARM_SMMU_EVTQ_PROD,
1998 ARM_SMMU_EVTQ_CONS, EVTQ_ENT_DWORDS);
2003 if (!(smmu->features & ARM_SMMU_FEAT_PRI))
2006 return arm_smmu_init_one_queue(smmu, &smmu->priq.q, ARM_SMMU_PRIQ_PROD,
2007 ARM_SMMU_PRIQ_CONS, PRIQ_ENT_DWORDS);
2010 static int arm_smmu_init_l1_strtab(struct arm_smmu_device *smmu)
2013 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
2014 size_t size = sizeof(*cfg->l1_desc) * cfg->num_l1_ents;
2015 void *strtab = smmu->strtab_cfg.strtab;
2017 cfg->l1_desc = devm_kzalloc(smmu->dev, size, GFP_KERNEL);
2018 if (!cfg->l1_desc) {
2019 dev_err(smmu->dev, "failed to allocate l1 stream table desc\n");
2023 for (i = 0; i < cfg->num_l1_ents; ++i) {
2024 arm_smmu_write_strtab_l1_desc(strtab, &cfg->l1_desc[i]);
2025 strtab += STRTAB_L1_DESC_DWORDS << 3;
2031 static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu)
2036 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
2038 /* Calculate the L1 size, capped to the SIDSIZE. */
2039 size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3);
2040 size = min(size, smmu->sid_bits - STRTAB_SPLIT);
2041 cfg->num_l1_ents = 1 << size;
2043 size += STRTAB_SPLIT;
2044 if (size < smmu->sid_bits)
2046 "2-level strtab only covers %u/%u bits of SID\n",
2047 size, smmu->sid_bits);
2049 l1size = cfg->num_l1_ents * (STRTAB_L1_DESC_DWORDS << 3);
2050 strtab = dmam_alloc_coherent(smmu->dev, l1size, &cfg->strtab_dma,
2051 GFP_KERNEL | __GFP_ZERO);
2054 "failed to allocate l1 stream table (%u bytes)\n",
2058 cfg->strtab = strtab;
2060 /* Configure strtab_base_cfg for 2 levels */
2061 reg = STRTAB_BASE_CFG_FMT_2LVL;
2062 reg |= (size & STRTAB_BASE_CFG_LOG2SIZE_MASK)
2063 << STRTAB_BASE_CFG_LOG2SIZE_SHIFT;
2064 reg |= (STRTAB_SPLIT & STRTAB_BASE_CFG_SPLIT_MASK)
2065 << STRTAB_BASE_CFG_SPLIT_SHIFT;
2066 cfg->strtab_base_cfg = reg;
2068 return arm_smmu_init_l1_strtab(smmu);
2071 static int arm_smmu_init_strtab_linear(struct arm_smmu_device *smmu)
2076 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
2078 size = (1 << smmu->sid_bits) * (STRTAB_STE_DWORDS << 3);
2079 strtab = dmam_alloc_coherent(smmu->dev, size, &cfg->strtab_dma,
2080 GFP_KERNEL | __GFP_ZERO);
2083 "failed to allocate linear stream table (%u bytes)\n",
2087 cfg->strtab = strtab;
2088 cfg->num_l1_ents = 1 << smmu->sid_bits;
2090 /* Configure strtab_base_cfg for a linear table covering all SIDs */
2091 reg = STRTAB_BASE_CFG_FMT_LINEAR;
2092 reg |= (smmu->sid_bits & STRTAB_BASE_CFG_LOG2SIZE_MASK)
2093 << STRTAB_BASE_CFG_LOG2SIZE_SHIFT;
2094 cfg->strtab_base_cfg = reg;
2096 arm_smmu_init_bypass_stes(strtab, cfg->num_l1_ents);
2100 static int arm_smmu_init_strtab(struct arm_smmu_device *smmu)
2105 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB)
2106 ret = arm_smmu_init_strtab_2lvl(smmu);
2108 ret = arm_smmu_init_strtab_linear(smmu);
2113 /* Set the strtab base address */
2114 reg = smmu->strtab_cfg.strtab_dma &
2115 STRTAB_BASE_ADDR_MASK << STRTAB_BASE_ADDR_SHIFT;
2116 reg |= STRTAB_BASE_RA;
2117 smmu->strtab_cfg.strtab_base = reg;
2119 /* Allocate the first VMID for stage-2 bypass STEs */
2120 set_bit(0, smmu->vmid_map);
2124 static int arm_smmu_init_structures(struct arm_smmu_device *smmu)
2128 ret = arm_smmu_init_queues(smmu);
2132 return arm_smmu_init_strtab(smmu);
2135 static int arm_smmu_write_reg_sync(struct arm_smmu_device *smmu, u32 val,
2136 unsigned int reg_off, unsigned int ack_off)
2140 writel_relaxed(val, smmu->base + reg_off);
2141 return readl_relaxed_poll_timeout(smmu->base + ack_off, reg, reg == val,
2142 1, ARM_SMMU_POLL_TIMEOUT_US);
2145 /* GBPA is "special" */
2146 static int arm_smmu_update_gbpa(struct arm_smmu_device *smmu, u32 set, u32 clr)
2149 u32 reg, __iomem *gbpa = smmu->base + ARM_SMMU_GBPA;
2151 ret = readl_relaxed_poll_timeout(gbpa, reg, !(reg & GBPA_UPDATE),
2152 1, ARM_SMMU_POLL_TIMEOUT_US);
2158 writel_relaxed(reg | GBPA_UPDATE, gbpa);
2159 return readl_relaxed_poll_timeout(gbpa, reg, !(reg & GBPA_UPDATE),
2160 1, ARM_SMMU_POLL_TIMEOUT_US);
2163 static void arm_smmu_free_msis(void *data)
2165 struct device *dev = data;
2166 platform_msi_domain_free_irqs(dev);
2169 static void arm_smmu_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
2171 phys_addr_t doorbell;
2172 struct device *dev = msi_desc_to_dev(desc);
2173 struct arm_smmu_device *smmu = dev_get_drvdata(dev);
2174 phys_addr_t *cfg = arm_smmu_msi_cfg[desc->platform.msi_index];
2176 doorbell = (((u64)msg->address_hi) << 32) | msg->address_lo;
2177 doorbell &= MSI_CFG0_ADDR_MASK << MSI_CFG0_ADDR_SHIFT;
2179 writeq_relaxed(doorbell, smmu->base + cfg[0]);
2180 writel_relaxed(msg->data, smmu->base + cfg[1]);
2181 writel_relaxed(MSI_CFG2_MEMATTR_DEVICE_nGnRE, smmu->base + cfg[2]);
2184 static void arm_smmu_setup_msis(struct arm_smmu_device *smmu)
2186 struct msi_desc *desc;
2187 int ret, nvec = ARM_SMMU_MAX_MSIS;
2188 struct device *dev = smmu->dev;
2190 /* Clear the MSI address regs */
2191 writeq_relaxed(0, smmu->base + ARM_SMMU_GERROR_IRQ_CFG0);
2192 writeq_relaxed(0, smmu->base + ARM_SMMU_EVTQ_IRQ_CFG0);
2194 if (smmu->features & ARM_SMMU_FEAT_PRI)
2195 writeq_relaxed(0, smmu->base + ARM_SMMU_PRIQ_IRQ_CFG0);
2199 if (!(smmu->features & ARM_SMMU_FEAT_MSI))
2202 /* Allocate MSIs for evtq, gerror and priq. Ignore cmdq */
2203 ret = platform_msi_domain_alloc_irqs(dev, nvec, arm_smmu_write_msi_msg);
2205 dev_warn(dev, "failed to allocate MSIs\n");
2209 for_each_msi_entry(desc, dev) {
2210 switch (desc->platform.msi_index) {
2211 case EVTQ_MSI_INDEX:
2212 smmu->evtq.q.irq = desc->irq;
2214 case GERROR_MSI_INDEX:
2215 smmu->gerr_irq = desc->irq;
2217 case PRIQ_MSI_INDEX:
2218 smmu->priq.q.irq = desc->irq;
2220 default: /* Unknown */
2225 /* Add callback to free MSIs on teardown */
2226 devm_add_action(dev, arm_smmu_free_msis, dev);
2229 static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu)
2232 u32 irqen_flags = IRQ_CTRL_EVTQ_IRQEN | IRQ_CTRL_GERROR_IRQEN;
2234 /* Disable IRQs first */
2235 ret = arm_smmu_write_reg_sync(smmu, 0, ARM_SMMU_IRQ_CTRL,
2236 ARM_SMMU_IRQ_CTRLACK);
2238 dev_err(smmu->dev, "failed to disable irqs\n");
2242 arm_smmu_setup_msis(smmu);
2244 /* Request interrupt lines */
2245 irq = smmu->evtq.q.irq;
2247 ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
2248 arm_smmu_evtq_thread,
2250 "arm-smmu-v3-evtq", smmu);
2252 dev_warn(smmu->dev, "failed to enable evtq irq\n");
2255 irq = smmu->cmdq.q.irq;
2257 ret = devm_request_irq(smmu->dev, irq,
2258 arm_smmu_cmdq_sync_handler, 0,
2259 "arm-smmu-v3-cmdq-sync", smmu);
2261 dev_warn(smmu->dev, "failed to enable cmdq-sync irq\n");
2264 irq = smmu->gerr_irq;
2266 ret = devm_request_irq(smmu->dev, irq, arm_smmu_gerror_handler,
2267 0, "arm-smmu-v3-gerror", smmu);
2269 dev_warn(smmu->dev, "failed to enable gerror irq\n");
2272 if (smmu->features & ARM_SMMU_FEAT_PRI) {
2273 irq = smmu->priq.q.irq;
2275 ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
2276 arm_smmu_priq_thread,
2282 "failed to enable priq irq\n");
2284 irqen_flags |= IRQ_CTRL_PRIQ_IRQEN;
2288 /* Enable interrupt generation on the SMMU */
2289 ret = arm_smmu_write_reg_sync(smmu, irqen_flags,
2290 ARM_SMMU_IRQ_CTRL, ARM_SMMU_IRQ_CTRLACK);
2292 dev_warn(smmu->dev, "failed to enable irqs\n");
2297 static int arm_smmu_device_disable(struct arm_smmu_device *smmu)
2301 ret = arm_smmu_write_reg_sync(smmu, 0, ARM_SMMU_CR0, ARM_SMMU_CR0ACK);
2303 dev_err(smmu->dev, "failed to clear cr0\n");
2308 static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
2312 struct arm_smmu_cmdq_ent cmd;
2314 /* Clear CR0 and sync (disables SMMU and queue processing) */
2315 reg = readl_relaxed(smmu->base + ARM_SMMU_CR0);
2316 if (reg & CR0_SMMUEN)
2317 dev_warn(smmu->dev, "SMMU currently enabled! Resetting...\n");
2319 ret = arm_smmu_device_disable(smmu);
2323 /* CR1 (table and queue memory attributes) */
2324 reg = (CR1_SH_ISH << CR1_TABLE_SH_SHIFT) |
2325 (CR1_CACHE_WB << CR1_TABLE_OC_SHIFT) |
2326 (CR1_CACHE_WB << CR1_TABLE_IC_SHIFT) |
2327 (CR1_SH_ISH << CR1_QUEUE_SH_SHIFT) |
2328 (CR1_CACHE_WB << CR1_QUEUE_OC_SHIFT) |
2329 (CR1_CACHE_WB << CR1_QUEUE_IC_SHIFT);
2330 writel_relaxed(reg, smmu->base + ARM_SMMU_CR1);
2332 /* CR2 (random crap) */
2333 reg = CR2_PTM | CR2_RECINVSID | CR2_E2H;
2334 writel_relaxed(reg, smmu->base + ARM_SMMU_CR2);
2337 writeq_relaxed(smmu->strtab_cfg.strtab_base,
2338 smmu->base + ARM_SMMU_STRTAB_BASE);
2339 writel_relaxed(smmu->strtab_cfg.strtab_base_cfg,
2340 smmu->base + ARM_SMMU_STRTAB_BASE_CFG);
2343 writeq_relaxed(smmu->cmdq.q.q_base, smmu->base + ARM_SMMU_CMDQ_BASE);
2344 writel_relaxed(smmu->cmdq.q.prod, smmu->base + ARM_SMMU_CMDQ_PROD);
2345 writel_relaxed(smmu->cmdq.q.cons, smmu->base + ARM_SMMU_CMDQ_CONS);
2347 enables = CR0_CMDQEN;
2348 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
2351 dev_err(smmu->dev, "failed to enable command queue\n");
2355 /* Invalidate any cached configuration */
2356 cmd.opcode = CMDQ_OP_CFGI_ALL;
2357 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2358 cmd.opcode = CMDQ_OP_CMD_SYNC;
2359 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2361 /* Invalidate any stale TLB entries */
2362 if (smmu->features & ARM_SMMU_FEAT_HYP) {
2363 cmd.opcode = CMDQ_OP_TLBI_EL2_ALL;
2364 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2367 cmd.opcode = CMDQ_OP_TLBI_NSNH_ALL;
2368 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2369 cmd.opcode = CMDQ_OP_CMD_SYNC;
2370 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2373 writeq_relaxed(smmu->evtq.q.q_base, smmu->base + ARM_SMMU_EVTQ_BASE);
2374 writel_relaxed(smmu->evtq.q.prod, smmu->base + ARM_SMMU_EVTQ_PROD);
2375 writel_relaxed(smmu->evtq.q.cons, smmu->base + ARM_SMMU_EVTQ_CONS);
2377 enables |= CR0_EVTQEN;
2378 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
2381 dev_err(smmu->dev, "failed to enable event queue\n");
2386 if (smmu->features & ARM_SMMU_FEAT_PRI) {
2387 writeq_relaxed(smmu->priq.q.q_base,
2388 smmu->base + ARM_SMMU_PRIQ_BASE);
2389 writel_relaxed(smmu->priq.q.prod,
2390 smmu->base + ARM_SMMU_PRIQ_PROD);
2391 writel_relaxed(smmu->priq.q.cons,
2392 smmu->base + ARM_SMMU_PRIQ_CONS);
2394 enables |= CR0_PRIQEN;
2395 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
2398 dev_err(smmu->dev, "failed to enable PRI queue\n");
2403 ret = arm_smmu_setup_irqs(smmu);
2405 dev_err(smmu->dev, "failed to setup irqs\n");
2410 /* Enable the SMMU interface, or ensure bypass */
2411 if (!bypass || disable_bypass) {
2412 enables |= CR0_SMMUEN;
2414 ret = arm_smmu_update_gbpa(smmu, 0, GBPA_ABORT);
2416 dev_err(smmu->dev, "GBPA not responding to update\n");
2420 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
2423 dev_err(smmu->dev, "failed to enable SMMU interface\n");
2430 static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
2433 bool coherent = smmu->features & ARM_SMMU_FEAT_COHERENCY;
2436 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR0);
2438 /* 2-level structures */
2439 if ((reg & IDR0_ST_LVL_MASK << IDR0_ST_LVL_SHIFT) == IDR0_ST_LVL_2LVL)
2440 smmu->features |= ARM_SMMU_FEAT_2_LVL_STRTAB;
2442 if (reg & IDR0_CD2L)
2443 smmu->features |= ARM_SMMU_FEAT_2_LVL_CDTAB;
2446 * Translation table endianness.
2447 * We currently require the same endianness as the CPU, but this
2448 * could be changed later by adding a new IO_PGTABLE_QUIRK.
2450 switch (reg & IDR0_TTENDIAN_MASK << IDR0_TTENDIAN_SHIFT) {
2451 case IDR0_TTENDIAN_MIXED:
2452 smmu->features |= ARM_SMMU_FEAT_TT_LE | ARM_SMMU_FEAT_TT_BE;
2455 case IDR0_TTENDIAN_BE:
2456 smmu->features |= ARM_SMMU_FEAT_TT_BE;
2459 case IDR0_TTENDIAN_LE:
2460 smmu->features |= ARM_SMMU_FEAT_TT_LE;
2464 dev_err(smmu->dev, "unknown/unsupported TT endianness!\n");
2468 /* Boolean feature flags */
2469 if (IS_ENABLED(CONFIG_PCI_PRI) && reg & IDR0_PRI)
2470 smmu->features |= ARM_SMMU_FEAT_PRI;
2472 if (IS_ENABLED(CONFIG_PCI_ATS) && reg & IDR0_ATS)
2473 smmu->features |= ARM_SMMU_FEAT_ATS;
2476 smmu->features |= ARM_SMMU_FEAT_SEV;
2479 smmu->features |= ARM_SMMU_FEAT_MSI;
2482 smmu->features |= ARM_SMMU_FEAT_HYP;
2485 * The coherency feature as set by FW is used in preference to the ID
2486 * register, but warn on mismatch.
2488 if (!!(reg & IDR0_COHACC) != coherent)
2489 dev_warn(smmu->dev, "IDR0.COHACC overridden by dma-coherent property (%s)\n",
2490 coherent ? "true" : "false");
2492 switch (reg & IDR0_STALL_MODEL_MASK << IDR0_STALL_MODEL_SHIFT) {
2493 case IDR0_STALL_MODEL_STALL:
2495 case IDR0_STALL_MODEL_FORCE:
2496 smmu->features |= ARM_SMMU_FEAT_STALLS;
2500 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
2503 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
2505 if (!(reg & (IDR0_S1P | IDR0_S2P))) {
2506 dev_err(smmu->dev, "no translation support!\n");
2510 /* We only support the AArch64 table format at present */
2511 switch (reg & IDR0_TTF_MASK << IDR0_TTF_SHIFT) {
2512 case IDR0_TTF_AARCH32_64:
2515 case IDR0_TTF_AARCH64:
2518 dev_err(smmu->dev, "AArch64 table format not supported!\n");
2522 /* ASID/VMID sizes */
2523 smmu->asid_bits = reg & IDR0_ASID16 ? 16 : 8;
2524 smmu->vmid_bits = reg & IDR0_VMID16 ? 16 : 8;
2527 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR1);
2528 if (reg & (IDR1_TABLES_PRESET | IDR1_QUEUES_PRESET | IDR1_REL)) {
2529 dev_err(smmu->dev, "embedded implementation not supported\n");
2533 /* Queue sizes, capped at 4k */
2534 smmu->cmdq.q.max_n_shift = min((u32)CMDQ_MAX_SZ_SHIFT,
2535 reg >> IDR1_CMDQ_SHIFT & IDR1_CMDQ_MASK);
2536 if (!smmu->cmdq.q.max_n_shift) {
2537 /* Odd alignment restrictions on the base, so ignore for now */
2538 dev_err(smmu->dev, "unit-length command queue not supported\n");
2542 smmu->evtq.q.max_n_shift = min((u32)EVTQ_MAX_SZ_SHIFT,
2543 reg >> IDR1_EVTQ_SHIFT & IDR1_EVTQ_MASK);
2544 smmu->priq.q.max_n_shift = min((u32)PRIQ_MAX_SZ_SHIFT,
2545 reg >> IDR1_PRIQ_SHIFT & IDR1_PRIQ_MASK);
2547 /* SID/SSID sizes */
2548 smmu->ssid_bits = reg >> IDR1_SSID_SHIFT & IDR1_SSID_MASK;
2549 smmu->sid_bits = reg >> IDR1_SID_SHIFT & IDR1_SID_MASK;
2552 * If the SMMU supports fewer bits than would fill a single L2 stream
2553 * table, use a linear table instead.
2555 if (smmu->sid_bits <= STRTAB_SPLIT)
2556 smmu->features &= ~ARM_SMMU_FEAT_2_LVL_STRTAB;
2559 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR5);
2561 /* Maximum number of outstanding stalls */
2562 smmu->evtq.max_stalls = reg >> IDR5_STALL_MAX_SHIFT
2563 & IDR5_STALL_MAX_MASK;
2566 if (reg & IDR5_GRAN64K)
2567 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
2568 if (reg & IDR5_GRAN16K)
2569 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
2570 if (reg & IDR5_GRAN4K)
2571 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
2573 if (arm_smmu_ops.pgsize_bitmap == -1UL)
2574 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
2576 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
2578 /* Output address size */
2579 switch (reg & IDR5_OAS_MASK << IDR5_OAS_SHIFT) {
2580 case IDR5_OAS_32_BIT:
2583 case IDR5_OAS_36_BIT:
2586 case IDR5_OAS_40_BIT:
2589 case IDR5_OAS_42_BIT:
2592 case IDR5_OAS_44_BIT:
2597 "unknown output address size. Truncating to 48-bit\n");
2599 case IDR5_OAS_48_BIT:
2603 /* Set the DMA mask for our table walker */
2604 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(smmu->oas)))
2606 "failed to set DMA mask for table walker\n");
2608 smmu->ias = max(smmu->ias, smmu->oas);
2610 dev_info(smmu->dev, "ias %lu-bit, oas %lu-bit (features 0x%08x)\n",
2611 smmu->ias, smmu->oas, smmu->features);
2616 static int arm_smmu_device_acpi_probe(struct platform_device *pdev,
2617 struct arm_smmu_device *smmu)
2619 struct acpi_iort_smmu_v3 *iort_smmu;
2620 struct device *dev = smmu->dev;
2621 struct acpi_iort_node *node;
2623 node = *(struct acpi_iort_node **)dev_get_platdata(dev);
2625 /* Retrieve SMMUv3 specific data */
2626 iort_smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
2628 if (iort_smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE)
2629 smmu->features |= ARM_SMMU_FEAT_COHERENCY;
2634 static inline int arm_smmu_device_acpi_probe(struct platform_device *pdev,
2635 struct arm_smmu_device *smmu)
2641 static int arm_smmu_device_dt_probe(struct platform_device *pdev,
2642 struct arm_smmu_device *smmu)
2644 struct device *dev = &pdev->dev;
2648 if (of_property_read_u32(dev->of_node, "#iommu-cells", &cells))
2649 dev_err(dev, "missing #iommu-cells property\n");
2650 else if (cells != 1)
2651 dev_err(dev, "invalid #iommu-cells value (%d)\n", cells);
2655 parse_driver_options(smmu);
2657 if (of_dma_is_coherent(dev->of_node))
2658 smmu->features |= ARM_SMMU_FEAT_COHERENCY;
2663 static int arm_smmu_device_probe(struct platform_device *pdev)
2666 struct resource *res;
2667 resource_size_t ioaddr;
2668 struct arm_smmu_device *smmu;
2669 struct device *dev = &pdev->dev;
2672 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
2674 dev_err(dev, "failed to allocate arm_smmu_device\n");
2680 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2681 if (resource_size(res) + 1 < SZ_128K) {
2682 dev_err(dev, "MMIO region too small (%pr)\n", res);
2685 ioaddr = res->start;
2687 smmu->base = devm_ioremap_resource(dev, res);
2688 if (IS_ERR(smmu->base))
2689 return PTR_ERR(smmu->base);
2691 /* Interrupt lines */
2692 irq = platform_get_irq_byname(pdev, "eventq");
2694 smmu->evtq.q.irq = irq;
2696 irq = platform_get_irq_byname(pdev, "priq");
2698 smmu->priq.q.irq = irq;
2700 irq = platform_get_irq_byname(pdev, "cmdq-sync");
2702 smmu->cmdq.q.irq = irq;
2704 irq = platform_get_irq_byname(pdev, "gerror");
2706 smmu->gerr_irq = irq;
2709 ret = arm_smmu_device_dt_probe(pdev, smmu);
2711 ret = arm_smmu_device_acpi_probe(pdev, smmu);
2716 /* Set bypass mode according to firmware probing result */
2720 ret = arm_smmu_device_hw_probe(smmu);
2724 /* Initialise in-memory data structures */
2725 ret = arm_smmu_init_structures(smmu);
2729 /* Record our private device structure */
2730 platform_set_drvdata(pdev, smmu);
2732 /* Reset the device */
2733 ret = arm_smmu_device_reset(smmu, bypass);
2737 /* And we're up. Go go go! */
2738 ret = iommu_device_sysfs_add(&smmu->iommu, dev, NULL,
2739 "smmu3.%pa", &ioaddr);
2743 iommu_device_set_ops(&smmu->iommu, &arm_smmu_ops);
2744 iommu_device_set_fwnode(&smmu->iommu, dev->fwnode);
2746 ret = iommu_device_register(&smmu->iommu);
2748 dev_err(dev, "Failed to register iommu\n");
2753 if (pci_bus_type.iommu_ops != &arm_smmu_ops) {
2755 ret = bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
2760 #ifdef CONFIG_ARM_AMBA
2761 if (amba_bustype.iommu_ops != &arm_smmu_ops) {
2762 ret = bus_set_iommu(&amba_bustype, &arm_smmu_ops);
2767 if (platform_bus_type.iommu_ops != &arm_smmu_ops) {
2768 ret = bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
2775 static int arm_smmu_device_remove(struct platform_device *pdev)
2777 struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
2779 arm_smmu_device_disable(smmu);
2783 static const struct of_device_id arm_smmu_of_match[] = {
2784 { .compatible = "arm,smmu-v3", },
2787 MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
2789 static struct platform_driver arm_smmu_driver = {
2791 .name = "arm-smmu-v3",
2792 .of_match_table = of_match_ptr(arm_smmu_of_match),
2794 .probe = arm_smmu_device_probe,
2795 .remove = arm_smmu_device_remove,
2797 module_platform_driver(arm_smmu_driver);
2799 IOMMU_OF_DECLARE(arm_smmuv3, "arm,smmu-v3", NULL);
2801 MODULE_DESCRIPTION("IOMMU API for ARM architected SMMUv3 implementations");
2802 MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
2803 MODULE_LICENSE("GPL v2");