2 * IOMMU API for ARM architected SMMU implementations.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 * Copyright (C) 2013 ARM Limited
19 * Author: Will Deacon <will.deacon@arm.com>
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
26 * - Context fault reporting
27 * - Extended Stream ID (16 bit)
30 #define pr_fmt(fmt) "arm-smmu: " fmt
32 #include <linux/acpi.h>
33 #include <linux/acpi_iort.h>
34 #include <linux/atomic.h>
35 #include <linux/delay.h>
36 #include <linux/dma-iommu.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/err.h>
39 #include <linux/interrupt.h>
41 #include <linux/io-64-nonatomic-hi-lo.h>
42 #include <linux/iommu.h>
43 #include <linux/iopoll.h>
44 #include <linux/module.h>
46 #include <linux/of_address.h>
47 #include <linux/of_device.h>
48 #include <linux/of_iommu.h>
49 #include <linux/pci.h>
50 #include <linux/platform_device.h>
51 #include <linux/slab.h>
52 #include <linux/spinlock.h>
54 #include <linux/amba/bus.h>
56 #include "io-pgtable.h"
58 /* Maximum number of context banks per SMMU */
59 #define ARM_SMMU_MAX_CBS 128
61 /* SMMU global address space */
62 #define ARM_SMMU_GR0(smmu) ((smmu)->base)
63 #define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
66 * SMMU global address space with conditional offset to access secure
67 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
70 #define ARM_SMMU_GR0_NS(smmu) \
72 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
76 * Some 64-bit registers only make sense to write atomically, but in such
77 * cases all the data relevant to AArch32 formats lies within the lower word,
78 * therefore this actually makes more sense than it might first appear.
81 #define smmu_write_atomic_lq writeq_relaxed
83 #define smmu_write_atomic_lq writel_relaxed
86 /* Configuration registers */
87 #define ARM_SMMU_GR0_sCR0 0x0
88 #define sCR0_CLIENTPD (1 << 0)
89 #define sCR0_GFRE (1 << 1)
90 #define sCR0_GFIE (1 << 2)
91 #define sCR0_EXIDENABLE (1 << 3)
92 #define sCR0_GCFGFRE (1 << 4)
93 #define sCR0_GCFGFIE (1 << 5)
94 #define sCR0_USFCFG (1 << 10)
95 #define sCR0_VMIDPNE (1 << 11)
96 #define sCR0_PTM (1 << 12)
97 #define sCR0_FB (1 << 13)
98 #define sCR0_VMID16EN (1 << 31)
99 #define sCR0_BSU_SHIFT 14
100 #define sCR0_BSU_MASK 0x3
102 /* Auxiliary Configuration register */
103 #define ARM_SMMU_GR0_sACR 0x10
105 /* Identification registers */
106 #define ARM_SMMU_GR0_ID0 0x20
107 #define ARM_SMMU_GR0_ID1 0x24
108 #define ARM_SMMU_GR0_ID2 0x28
109 #define ARM_SMMU_GR0_ID3 0x2c
110 #define ARM_SMMU_GR0_ID4 0x30
111 #define ARM_SMMU_GR0_ID5 0x34
112 #define ARM_SMMU_GR0_ID6 0x38
113 #define ARM_SMMU_GR0_ID7 0x3c
114 #define ARM_SMMU_GR0_sGFSR 0x48
115 #define ARM_SMMU_GR0_sGFSYNR0 0x50
116 #define ARM_SMMU_GR0_sGFSYNR1 0x54
117 #define ARM_SMMU_GR0_sGFSYNR2 0x58
119 #define ID0_S1TS (1 << 30)
120 #define ID0_S2TS (1 << 29)
121 #define ID0_NTS (1 << 28)
122 #define ID0_SMS (1 << 27)
123 #define ID0_ATOSNS (1 << 26)
124 #define ID0_PTFS_NO_AARCH32 (1 << 25)
125 #define ID0_PTFS_NO_AARCH32S (1 << 24)
126 #define ID0_CTTW (1 << 14)
127 #define ID0_NUMIRPT_SHIFT 16
128 #define ID0_NUMIRPT_MASK 0xff
129 #define ID0_NUMSIDB_SHIFT 9
130 #define ID0_NUMSIDB_MASK 0xf
131 #define ID0_EXIDS (1 << 8)
132 #define ID0_NUMSMRG_SHIFT 0
133 #define ID0_NUMSMRG_MASK 0xff
135 #define ID1_PAGESIZE (1 << 31)
136 #define ID1_NUMPAGENDXB_SHIFT 28
137 #define ID1_NUMPAGENDXB_MASK 7
138 #define ID1_NUMS2CB_SHIFT 16
139 #define ID1_NUMS2CB_MASK 0xff
140 #define ID1_NUMCB_SHIFT 0
141 #define ID1_NUMCB_MASK 0xff
143 #define ID2_OAS_SHIFT 4
144 #define ID2_OAS_MASK 0xf
145 #define ID2_IAS_SHIFT 0
146 #define ID2_IAS_MASK 0xf
147 #define ID2_UBS_SHIFT 8
148 #define ID2_UBS_MASK 0xf
149 #define ID2_PTFS_4K (1 << 12)
150 #define ID2_PTFS_16K (1 << 13)
151 #define ID2_PTFS_64K (1 << 14)
152 #define ID2_VMID16 (1 << 15)
154 #define ID7_MAJOR_SHIFT 4
155 #define ID7_MAJOR_MASK 0xf
157 /* Global TLB invalidation */
158 #define ARM_SMMU_GR0_TLBIVMID 0x64
159 #define ARM_SMMU_GR0_TLBIALLNSNH 0x68
160 #define ARM_SMMU_GR0_TLBIALLH 0x6c
161 #define ARM_SMMU_GR0_sTLBGSYNC 0x70
162 #define ARM_SMMU_GR0_sTLBGSTATUS 0x74
163 #define sTLBGSTATUS_GSACTIVE (1 << 0)
164 #define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
166 /* Stream mapping registers */
167 #define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
168 #define SMR_VALID (1 << 31)
169 #define SMR_MASK_SHIFT 16
170 #define SMR_ID_SHIFT 0
172 #define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
173 #define S2CR_CBNDX_SHIFT 0
174 #define S2CR_CBNDX_MASK 0xff
175 #define S2CR_EXIDVALID (1 << 10)
176 #define S2CR_TYPE_SHIFT 16
177 #define S2CR_TYPE_MASK 0x3
178 enum arm_smmu_s2cr_type {
184 #define S2CR_PRIVCFG_SHIFT 24
185 #define S2CR_PRIVCFG_MASK 0x3
186 enum arm_smmu_s2cr_privcfg {
187 S2CR_PRIVCFG_DEFAULT,
193 /* Context bank attribute registers */
194 #define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
195 #define CBAR_VMID_SHIFT 0
196 #define CBAR_VMID_MASK 0xff
197 #define CBAR_S1_BPSHCFG_SHIFT 8
198 #define CBAR_S1_BPSHCFG_MASK 3
199 #define CBAR_S1_BPSHCFG_NSH 3
200 #define CBAR_S1_MEMATTR_SHIFT 12
201 #define CBAR_S1_MEMATTR_MASK 0xf
202 #define CBAR_S1_MEMATTR_WB 0xf
203 #define CBAR_TYPE_SHIFT 16
204 #define CBAR_TYPE_MASK 0x3
205 #define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
206 #define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
207 #define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
208 #define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
209 #define CBAR_IRPTNDX_SHIFT 24
210 #define CBAR_IRPTNDX_MASK 0xff
212 #define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
213 #define CBA2R_RW64_32BIT (0 << 0)
214 #define CBA2R_RW64_64BIT (1 << 0)
215 #define CBA2R_VMID_SHIFT 16
216 #define CBA2R_VMID_MASK 0xffff
218 /* Translation context bank */
219 #define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
220 #define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift))
222 #define ARM_SMMU_CB_SCTLR 0x0
223 #define ARM_SMMU_CB_ACTLR 0x4
224 #define ARM_SMMU_CB_RESUME 0x8
225 #define ARM_SMMU_CB_TTBCR2 0x10
226 #define ARM_SMMU_CB_TTBR0 0x20
227 #define ARM_SMMU_CB_TTBR1 0x28
228 #define ARM_SMMU_CB_TTBCR 0x30
229 #define ARM_SMMU_CB_CONTEXTIDR 0x34
230 #define ARM_SMMU_CB_S1_MAIR0 0x38
231 #define ARM_SMMU_CB_S1_MAIR1 0x3c
232 #define ARM_SMMU_CB_PAR 0x50
233 #define ARM_SMMU_CB_FSR 0x58
234 #define ARM_SMMU_CB_FAR 0x60
235 #define ARM_SMMU_CB_FSYNR0 0x68
236 #define ARM_SMMU_CB_S1_TLBIVA 0x600
237 #define ARM_SMMU_CB_S1_TLBIASID 0x610
238 #define ARM_SMMU_CB_S1_TLBIVAL 0x620
239 #define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
240 #define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
241 #define ARM_SMMU_CB_ATS1PR 0x800
242 #define ARM_SMMU_CB_ATSR 0x8f0
244 #define SCTLR_S1_ASIDPNE (1 << 12)
245 #define SCTLR_CFCFG (1 << 7)
246 #define SCTLR_CFIE (1 << 6)
247 #define SCTLR_CFRE (1 << 5)
248 #define SCTLR_E (1 << 4)
249 #define SCTLR_AFE (1 << 2)
250 #define SCTLR_TRE (1 << 1)
251 #define SCTLR_M (1 << 0)
253 #define ARM_MMU500_ACTLR_CPRE (1 << 1)
255 #define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
256 #define ARM_MMU500_ACR_SMTNMB_TLBEN (1 << 8)
258 #define CB_PAR_F (1 << 0)
260 #define ATSR_ACTIVE (1 << 0)
262 #define RESUME_RETRY (0 << 0)
263 #define RESUME_TERMINATE (1 << 0)
265 #define TTBCR2_SEP_SHIFT 15
266 #define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
268 #define TTBRn_ASID_SHIFT 48
270 #define FSR_MULTI (1 << 31)
271 #define FSR_SS (1 << 30)
272 #define FSR_UUT (1 << 8)
273 #define FSR_ASF (1 << 7)
274 #define FSR_TLBLKF (1 << 6)
275 #define FSR_TLBMCF (1 << 5)
276 #define FSR_EF (1 << 4)
277 #define FSR_PF (1 << 3)
278 #define FSR_AFF (1 << 2)
279 #define FSR_TF (1 << 1)
281 #define FSR_IGN (FSR_AFF | FSR_ASF | \
282 FSR_TLBMCF | FSR_TLBLKF)
283 #define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
284 FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
286 #define FSYNR0_WNR (1 << 4)
288 static int force_stage;
289 module_param(force_stage, int, S_IRUGO);
290 MODULE_PARM_DESC(force_stage,
291 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
292 static bool disable_bypass;
293 module_param(disable_bypass, bool, S_IRUGO);
294 MODULE_PARM_DESC(disable_bypass,
295 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
297 enum arm_smmu_arch_version {
303 enum arm_smmu_implementation {
309 struct arm_smmu_s2cr {
310 struct iommu_group *group;
312 enum arm_smmu_s2cr_type type;
313 enum arm_smmu_s2cr_privcfg privcfg;
317 #define s2cr_init_val (struct arm_smmu_s2cr){ \
318 .type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS, \
321 struct arm_smmu_smr {
327 struct arm_smmu_master_cfg {
328 struct arm_smmu_device *smmu;
331 #define INVALID_SMENDX -1
332 #define __fwspec_cfg(fw) ((struct arm_smmu_master_cfg *)fw->iommu_priv)
333 #define fwspec_smmu(fw) (__fwspec_cfg(fw)->smmu)
334 #define fwspec_smendx(fw, i) \
335 (i >= fw->num_ids ? INVALID_SMENDX : __fwspec_cfg(fw)->smendx[i])
336 #define for_each_cfg_sme(fw, i, idx) \
337 for (i = 0; idx = fwspec_smendx(fw, i), i < fw->num_ids; ++i)
339 struct arm_smmu_device {
344 unsigned long pgshift;
346 #define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
347 #define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
348 #define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
349 #define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
350 #define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
351 #define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
352 #define ARM_SMMU_FEAT_VMID16 (1 << 6)
353 #define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7)
354 #define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8)
355 #define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
356 #define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
357 #define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
358 #define ARM_SMMU_FEAT_EXIDS (1 << 12)
361 #define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
363 enum arm_smmu_arch_version version;
364 enum arm_smmu_implementation model;
366 u32 num_context_banks;
367 u32 num_s2_context_banks;
368 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
371 u32 num_mapping_groups;
374 struct arm_smmu_smr *smrs;
375 struct arm_smmu_s2cr *s2crs;
376 struct mutex stream_map_mutex;
378 unsigned long va_size;
379 unsigned long ipa_size;
380 unsigned long pa_size;
381 unsigned long pgsize_bitmap;
384 u32 num_context_irqs;
387 u32 cavium_id_base; /* Specific to Cavium */
390 enum arm_smmu_context_fmt {
391 ARM_SMMU_CTX_FMT_NONE,
392 ARM_SMMU_CTX_FMT_AARCH64,
393 ARM_SMMU_CTX_FMT_AARCH32_L,
394 ARM_SMMU_CTX_FMT_AARCH32_S,
397 struct arm_smmu_cfg {
401 enum arm_smmu_context_fmt fmt;
403 #define INVALID_IRPTNDX 0xff
405 #define ARM_SMMU_CB_ASID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx)
406 #define ARM_SMMU_CB_VMID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx + 1)
408 enum arm_smmu_domain_stage {
409 ARM_SMMU_DOMAIN_S1 = 0,
411 ARM_SMMU_DOMAIN_NESTED,
414 struct arm_smmu_domain {
415 struct arm_smmu_device *smmu;
416 struct io_pgtable_ops *pgtbl_ops;
417 spinlock_t pgtbl_lock;
418 struct arm_smmu_cfg cfg;
419 enum arm_smmu_domain_stage stage;
420 struct mutex init_mutex; /* Protects smmu pointer */
421 struct iommu_domain domain;
424 struct arm_smmu_option_prop {
429 static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
431 static bool using_legacy_binding, using_generic_binding;
433 static struct arm_smmu_option_prop arm_smmu_options[] = {
434 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
438 static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
440 return container_of(dom, struct arm_smmu_domain, domain);
443 static void parse_driver_options(struct arm_smmu_device *smmu)
448 if (of_property_read_bool(smmu->dev->of_node,
449 arm_smmu_options[i].prop)) {
450 smmu->options |= arm_smmu_options[i].opt;
451 dev_notice(smmu->dev, "option %s\n",
452 arm_smmu_options[i].prop);
454 } while (arm_smmu_options[++i].opt);
457 static struct device_node *dev_get_dev_node(struct device *dev)
459 if (dev_is_pci(dev)) {
460 struct pci_bus *bus = to_pci_dev(dev)->bus;
462 while (!pci_is_root_bus(bus))
464 return of_node_get(bus->bridge->parent->of_node);
467 return of_node_get(dev->of_node);
470 static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
472 *((__be32 *)data) = cpu_to_be32(alias);
473 return 0; /* Continue walking */
476 static int __find_legacy_master_phandle(struct device *dev, void *data)
478 struct of_phandle_iterator *it = *(void **)data;
479 struct device_node *np = it->node;
482 of_for_each_phandle(it, err, dev->of_node, "mmu-masters",
483 "#stream-id-cells", 0)
484 if (it->node == np) {
485 *(void **)data = dev;
489 return err == -ENOENT ? 0 : err;
492 static struct platform_driver arm_smmu_driver;
493 static struct iommu_ops arm_smmu_ops;
495 static int arm_smmu_register_legacy_master(struct device *dev,
496 struct arm_smmu_device **smmu)
498 struct device *smmu_dev;
499 struct device_node *np;
500 struct of_phandle_iterator it;
506 np = dev_get_dev_node(dev);
507 if (!np || !of_find_property(np, "#stream-id-cells", NULL)) {
513 err = driver_for_each_device(&arm_smmu_driver.driver, NULL, &data,
514 __find_legacy_master_phandle);
522 if (dev_is_pci(dev)) {
523 /* "mmu-masters" assumes Stream ID == Requester ID */
524 pci_for_each_dma_alias(to_pci_dev(dev), __arm_smmu_get_pci_sid,
530 err = iommu_fwspec_init(dev, &smmu_dev->of_node->fwnode,
535 sids = kcalloc(it.cur_count, sizeof(*sids), GFP_KERNEL);
539 *smmu = dev_get_drvdata(smmu_dev);
540 of_phandle_iterator_args(&it, sids, it.cur_count);
541 err = iommu_fwspec_add_ids(dev, sids, it.cur_count);
546 static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
551 idx = find_next_zero_bit(map, end, start);
554 } while (test_and_set_bit(idx, map));
559 static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
564 /* Wait for any pending TLB invalidations to complete */
565 static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
568 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
570 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC);
571 while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS)
572 & sTLBGSTATUS_GSACTIVE) {
574 if (++count == TLB_LOOP_TIMEOUT) {
575 dev_err_ratelimited(smmu->dev,
576 "TLB sync timed out -- SMMU may be deadlocked\n");
583 static void arm_smmu_tlb_sync(void *cookie)
585 struct arm_smmu_domain *smmu_domain = cookie;
586 __arm_smmu_tlb_sync(smmu_domain->smmu);
589 static void arm_smmu_tlb_inv_context(void *cookie)
591 struct arm_smmu_domain *smmu_domain = cookie;
592 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
593 struct arm_smmu_device *smmu = smmu_domain->smmu;
594 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
598 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
599 writel_relaxed(ARM_SMMU_CB_ASID(smmu, cfg),
600 base + ARM_SMMU_CB_S1_TLBIASID);
602 base = ARM_SMMU_GR0(smmu);
603 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg),
604 base + ARM_SMMU_GR0_TLBIVMID);
607 __arm_smmu_tlb_sync(smmu);
610 static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
611 size_t granule, bool leaf, void *cookie)
613 struct arm_smmu_domain *smmu_domain = cookie;
614 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
615 struct arm_smmu_device *smmu = smmu_domain->smmu;
616 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
620 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
621 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
623 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
625 iova |= ARM_SMMU_CB_ASID(smmu, cfg);
627 writel_relaxed(iova, reg);
629 } while (size -= granule);
632 iova |= (u64)ARM_SMMU_CB_ASID(smmu, cfg) << 48;
634 writeq_relaxed(iova, reg);
635 iova += granule >> 12;
636 } while (size -= granule);
638 } else if (smmu->version == ARM_SMMU_V2) {
639 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
640 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
641 ARM_SMMU_CB_S2_TLBIIPAS2;
644 smmu_write_atomic_lq(iova, reg);
645 iova += granule >> 12;
646 } while (size -= granule);
648 reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
649 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg), reg);
653 static const struct iommu_gather_ops arm_smmu_gather_ops = {
654 .tlb_flush_all = arm_smmu_tlb_inv_context,
655 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
656 .tlb_sync = arm_smmu_tlb_sync,
659 static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
663 struct iommu_domain *domain = dev;
664 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
665 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
666 struct arm_smmu_device *smmu = smmu_domain->smmu;
667 void __iomem *cb_base;
669 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
670 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
672 if (!(fsr & FSR_FAULT))
675 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
676 iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
678 dev_err_ratelimited(smmu->dev,
679 "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cb=%d\n",
680 fsr, iova, fsynr, cfg->cbndx);
682 writel(fsr, cb_base + ARM_SMMU_CB_FSR);
686 static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
688 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
689 struct arm_smmu_device *smmu = dev;
690 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
692 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
693 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
694 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
695 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
700 dev_err_ratelimited(smmu->dev,
701 "Unexpected global fault, this could be serious\n");
702 dev_err_ratelimited(smmu->dev,
703 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
704 gfsr, gfsynr0, gfsynr1, gfsynr2);
706 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
710 static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
711 struct io_pgtable_cfg *pgtbl_cfg)
716 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
717 struct arm_smmu_device *smmu = smmu_domain->smmu;
718 void __iomem *cb_base, *gr1_base;
720 gr1_base = ARM_SMMU_GR1(smmu);
721 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
722 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
724 if (smmu->version > ARM_SMMU_V1) {
725 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
726 reg = CBA2R_RW64_64BIT;
728 reg = CBA2R_RW64_32BIT;
729 /* 16-bit VMIDs live in CBA2R */
730 if (smmu->features & ARM_SMMU_FEAT_VMID16)
731 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBA2R_VMID_SHIFT;
733 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
738 if (smmu->version < ARM_SMMU_V2)
739 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
742 * Use the weakest shareability/memory types, so they are
743 * overridden by the ttbcr/pte.
746 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
747 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
748 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
749 /* 8-bit VMIDs live in CBAR */
750 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBAR_VMID_SHIFT;
752 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
756 u16 asid = ARM_SMMU_CB_ASID(smmu, cfg);
758 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
759 reg = pgtbl_cfg->arm_v7s_cfg.ttbr[0];
760 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0);
761 reg = pgtbl_cfg->arm_v7s_cfg.ttbr[1];
762 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1);
763 writel_relaxed(asid, cb_base + ARM_SMMU_CB_CONTEXTIDR);
765 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
766 reg64 |= (u64)asid << TTBRn_ASID_SHIFT;
767 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
768 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
769 reg64 |= (u64)asid << TTBRn_ASID_SHIFT;
770 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1);
773 reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
774 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
779 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
780 reg = pgtbl_cfg->arm_v7s_cfg.tcr;
783 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
784 reg2 = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
785 reg2 |= TTBCR2_SEP_UPSTREAM;
787 if (smmu->version > ARM_SMMU_V1)
788 writel_relaxed(reg2, cb_base + ARM_SMMU_CB_TTBCR2);
790 reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
792 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
794 /* MAIRs (stage-1 only) */
796 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
797 reg = pgtbl_cfg->arm_v7s_cfg.prrr;
798 reg2 = pgtbl_cfg->arm_v7s_cfg.nmrr;
800 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
801 reg2 = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
803 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
804 writel_relaxed(reg2, cb_base + ARM_SMMU_CB_S1_MAIR1);
808 reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE | SCTLR_M;
810 reg |= SCTLR_S1_ASIDPNE;
814 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
817 static int arm_smmu_init_domain_context(struct iommu_domain *domain,
818 struct arm_smmu_device *smmu)
820 int irq, start, ret = 0;
821 unsigned long ias, oas;
822 struct io_pgtable_ops *pgtbl_ops;
823 struct io_pgtable_cfg pgtbl_cfg;
824 enum io_pgtable_fmt fmt;
825 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
826 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
828 mutex_lock(&smmu_domain->init_mutex);
829 if (smmu_domain->smmu)
833 * Mapping the requested stage onto what we support is surprisingly
834 * complicated, mainly because the spec allows S1+S2 SMMUs without
835 * support for nested translation. That means we end up with the
838 * Requested Supported Actual
848 * Note that you can't actually request stage-2 mappings.
850 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
851 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
852 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
853 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
856 * Choosing a suitable context format is even more fiddly. Until we
857 * grow some way for the caller to express a preference, and/or move
858 * the decision into the io-pgtable code where it arguably belongs,
859 * just aim for the closest thing to the rest of the system, and hope
860 * that the hardware isn't esoteric enough that we can't assume AArch64
861 * support to be a superset of AArch32 support...
863 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
864 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
865 if (IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) &&
866 !IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_ARM_LPAE) &&
867 (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) &&
868 (smmu_domain->stage == ARM_SMMU_DOMAIN_S1))
869 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S;
870 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
871 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
872 ARM_SMMU_FEAT_FMT_AARCH64_16K |
873 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
874 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
876 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
881 switch (smmu_domain->stage) {
882 case ARM_SMMU_DOMAIN_S1:
883 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
884 start = smmu->num_s2_context_banks;
886 oas = smmu->ipa_size;
887 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
888 fmt = ARM_64_LPAE_S1;
889 } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) {
890 fmt = ARM_32_LPAE_S1;
891 ias = min(ias, 32UL);
892 oas = min(oas, 40UL);
895 ias = min(ias, 32UL);
896 oas = min(oas, 32UL);
899 case ARM_SMMU_DOMAIN_NESTED:
901 * We will likely want to change this if/when KVM gets
904 case ARM_SMMU_DOMAIN_S2:
905 cfg->cbar = CBAR_TYPE_S2_TRANS;
907 ias = smmu->ipa_size;
909 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
910 fmt = ARM_64_LPAE_S2;
912 fmt = ARM_32_LPAE_S2;
913 ias = min(ias, 40UL);
914 oas = min(oas, 40UL);
922 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
923 smmu->num_context_banks);
928 if (smmu->version < ARM_SMMU_V2) {
929 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
930 cfg->irptndx %= smmu->num_context_irqs;
932 cfg->irptndx = cfg->cbndx;
935 pgtbl_cfg = (struct io_pgtable_cfg) {
936 .pgsize_bitmap = smmu->pgsize_bitmap,
939 .tlb = &arm_smmu_gather_ops,
940 .iommu_dev = smmu->dev,
943 smmu_domain->smmu = smmu;
944 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
950 /* Update the domain's page sizes to reflect the page table format */
951 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
952 domain->geometry.aperture_end = (1UL << ias) - 1;
953 domain->geometry.force_aperture = true;
955 /* Initialise the context bank with our page table cfg */
956 arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
959 * Request context fault interrupt. Do this last to avoid the
960 * handler seeing a half-initialised domain state.
962 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
963 ret = devm_request_irq(smmu->dev, irq, arm_smmu_context_fault,
964 IRQF_SHARED, "arm-smmu-context-fault", domain);
966 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
968 cfg->irptndx = INVALID_IRPTNDX;
971 mutex_unlock(&smmu_domain->init_mutex);
973 /* Publish page table ops for map/unmap */
974 smmu_domain->pgtbl_ops = pgtbl_ops;
978 smmu_domain->smmu = NULL;
980 mutex_unlock(&smmu_domain->init_mutex);
984 static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
986 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
987 struct arm_smmu_device *smmu = smmu_domain->smmu;
988 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
989 void __iomem *cb_base;
996 * Disable the context bank and free the page tables before freeing
999 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1000 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
1002 if (cfg->irptndx != INVALID_IRPTNDX) {
1003 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
1004 devm_free_irq(smmu->dev, irq, domain);
1007 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
1008 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
1011 static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
1013 struct arm_smmu_domain *smmu_domain;
1015 if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
1018 * Allocate the domain and initialise some of its data structures.
1019 * We can't really do anything meaningful until we've added a
1022 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1026 if (type == IOMMU_DOMAIN_DMA && (using_legacy_binding ||
1027 iommu_get_dma_cookie(&smmu_domain->domain))) {
1032 mutex_init(&smmu_domain->init_mutex);
1033 spin_lock_init(&smmu_domain->pgtbl_lock);
1035 return &smmu_domain->domain;
1038 static void arm_smmu_domain_free(struct iommu_domain *domain)
1040 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1043 * Free the domain resources. We assume that all devices have
1044 * already been detached.
1046 iommu_put_dma_cookie(domain);
1047 arm_smmu_destroy_domain_context(domain);
1051 static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
1053 struct arm_smmu_smr *smr = smmu->smrs + idx;
1054 u32 reg = smr->id << SMR_ID_SHIFT | smr->mask << SMR_MASK_SHIFT;
1056 if (!(smmu->features & ARM_SMMU_FEAT_EXIDS) && smr->valid)
1058 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
1061 static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
1063 struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
1064 u32 reg = (s2cr->type & S2CR_TYPE_MASK) << S2CR_TYPE_SHIFT |
1065 (s2cr->cbndx & S2CR_CBNDX_MASK) << S2CR_CBNDX_SHIFT |
1066 (s2cr->privcfg & S2CR_PRIVCFG_MASK) << S2CR_PRIVCFG_SHIFT;
1068 if (smmu->features & ARM_SMMU_FEAT_EXIDS && smmu->smrs &&
1069 smmu->smrs[idx].valid)
1070 reg |= S2CR_EXIDVALID;
1071 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx));
1074 static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
1076 arm_smmu_write_s2cr(smmu, idx);
1078 arm_smmu_write_smr(smmu, idx);
1082 * The width of SMR's mask field depends on sCR0_EXIDENABLE, so this function
1083 * should be called after sCR0 is written.
1085 static void arm_smmu_test_smr_masks(struct arm_smmu_device *smmu)
1087 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1094 * SMR.ID bits may not be preserved if the corresponding MASK
1095 * bits are set, so check each one separately. We can reject
1096 * masters later if they try to claim IDs outside these masks.
1098 smr = smmu->streamid_mask << SMR_ID_SHIFT;
1099 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1100 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
1101 smmu->streamid_mask = smr >> SMR_ID_SHIFT;
1103 smr = smmu->streamid_mask << SMR_MASK_SHIFT;
1104 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1105 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
1106 smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT;
1109 static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
1111 struct arm_smmu_smr *smrs = smmu->smrs;
1112 int i, free_idx = -ENOSPC;
1114 /* Stream indexing is blissfully easy */
1118 /* Validating SMRs is... less so */
1119 for (i = 0; i < smmu->num_mapping_groups; ++i) {
1120 if (!smrs[i].valid) {
1122 * Note the first free entry we come across, which
1123 * we'll claim in the end if nothing else matches.
1130 * If the new entry is _entirely_ matched by an existing entry,
1131 * then reuse that, with the guarantee that there also cannot
1132 * be any subsequent conflicting entries. In normal use we'd
1133 * expect simply identical entries for this case, but there's
1134 * no harm in accommodating the generalisation.
1136 if ((mask & smrs[i].mask) == mask &&
1137 !((id ^ smrs[i].id) & ~smrs[i].mask))
1140 * If the new entry has any other overlap with an existing one,
1141 * though, then there always exists at least one stream ID
1142 * which would cause a conflict, and we can't allow that risk.
1144 if (!((id ^ smrs[i].id) & ~(smrs[i].mask | mask)))
1151 static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx)
1153 if (--smmu->s2crs[idx].count)
1156 smmu->s2crs[idx] = s2cr_init_val;
1158 smmu->smrs[idx].valid = false;
1163 static int arm_smmu_master_alloc_smes(struct device *dev)
1165 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1166 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
1167 struct arm_smmu_device *smmu = cfg->smmu;
1168 struct arm_smmu_smr *smrs = smmu->smrs;
1169 struct iommu_group *group;
1172 mutex_lock(&smmu->stream_map_mutex);
1173 /* Figure out a viable stream map entry allocation */
1174 for_each_cfg_sme(fwspec, i, idx) {
1175 u16 sid = fwspec->ids[i];
1176 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
1178 if (idx != INVALID_SMENDX) {
1183 ret = arm_smmu_find_sme(smmu, sid, mask);
1188 if (smrs && smmu->s2crs[idx].count == 0) {
1190 smrs[idx].mask = mask;
1191 smrs[idx].valid = true;
1193 smmu->s2crs[idx].count++;
1194 cfg->smendx[i] = (s16)idx;
1197 group = iommu_group_get_for_dev(dev);
1199 group = ERR_PTR(-ENOMEM);
1200 if (IS_ERR(group)) {
1201 ret = PTR_ERR(group);
1204 iommu_group_put(group);
1206 /* It worked! Now, poke the actual hardware */
1207 for_each_cfg_sme(fwspec, i, idx) {
1208 arm_smmu_write_sme(smmu, idx);
1209 smmu->s2crs[idx].group = group;
1212 mutex_unlock(&smmu->stream_map_mutex);
1217 arm_smmu_free_sme(smmu, cfg->smendx[i]);
1218 cfg->smendx[i] = INVALID_SMENDX;
1220 mutex_unlock(&smmu->stream_map_mutex);
1224 static void arm_smmu_master_free_smes(struct iommu_fwspec *fwspec)
1226 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
1227 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
1230 mutex_lock(&smmu->stream_map_mutex);
1231 for_each_cfg_sme(fwspec, i, idx) {
1232 if (arm_smmu_free_sme(smmu, idx))
1233 arm_smmu_write_sme(smmu, idx);
1234 cfg->smendx[i] = INVALID_SMENDX;
1236 mutex_unlock(&smmu->stream_map_mutex);
1239 static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
1240 struct iommu_fwspec *fwspec)
1242 struct arm_smmu_device *smmu = smmu_domain->smmu;
1243 struct arm_smmu_s2cr *s2cr = smmu->s2crs;
1244 enum arm_smmu_s2cr_type type = S2CR_TYPE_TRANS;
1245 u8 cbndx = smmu_domain->cfg.cbndx;
1248 for_each_cfg_sme(fwspec, i, idx) {
1249 if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx)
1252 s2cr[idx].type = type;
1253 s2cr[idx].privcfg = S2CR_PRIVCFG_UNPRIV;
1254 s2cr[idx].cbndx = cbndx;
1255 arm_smmu_write_s2cr(smmu, idx);
1260 static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1263 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1264 struct arm_smmu_device *smmu;
1265 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1267 if (!fwspec || fwspec->ops != &arm_smmu_ops) {
1268 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
1273 * FIXME: The arch/arm DMA API code tries to attach devices to its own
1274 * domains between of_xlate() and add_device() - we have no way to cope
1275 * with that, so until ARM gets converted to rely on groups and default
1276 * domains, just say no (but more politely than by dereferencing NULL).
1277 * This should be at least a WARN_ON once that's sorted.
1279 if (!fwspec->iommu_priv)
1282 smmu = fwspec_smmu(fwspec);
1283 /* Ensure that the domain is finalised */
1284 ret = arm_smmu_init_domain_context(domain, smmu);
1289 * Sanity check the domain. We don't support domains across
1292 if (smmu_domain->smmu != smmu) {
1294 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
1295 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
1299 /* Looks ok, so add the device to the domain */
1300 return arm_smmu_domain_add_master(smmu_domain, fwspec);
1303 static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
1304 phys_addr_t paddr, size_t size, int prot)
1307 unsigned long flags;
1308 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1309 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1314 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1315 ret = ops->map(ops, iova, paddr, size, prot);
1316 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1320 static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
1324 unsigned long flags;
1325 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1326 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1331 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1332 ret = ops->unmap(ops, iova, size);
1333 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1337 static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
1340 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1341 struct arm_smmu_device *smmu = smmu_domain->smmu;
1342 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1343 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1344 struct device *dev = smmu->dev;
1345 void __iomem *cb_base;
1350 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1352 /* ATS1 registers can only be written atomically */
1353 va = iova & ~0xfffUL;
1354 if (smmu->version == ARM_SMMU_V2)
1355 smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR);
1356 else /* Register is only 32-bit in v1 */
1357 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
1359 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
1360 !(tmp & ATSR_ACTIVE), 5, 50)) {
1362 "iova to phys timed out on %pad. Falling back to software table walk.\n",
1364 return ops->iova_to_phys(ops, iova);
1367 phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
1368 if (phys & CB_PAR_F) {
1369 dev_err(dev, "translation fault!\n");
1370 dev_err(dev, "PAR = 0x%llx\n", phys);
1374 return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
1377 static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
1381 unsigned long flags;
1382 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1383 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1388 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1389 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
1390 smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1391 ret = arm_smmu_iova_to_phys_hard(domain, iova);
1393 ret = ops->iova_to_phys(ops, iova);
1396 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1401 static bool arm_smmu_capable(enum iommu_cap cap)
1404 case IOMMU_CAP_CACHE_COHERENCY:
1406 * Return true here as the SMMU can always send out coherent
1410 case IOMMU_CAP_INTR_REMAP:
1411 return true; /* MSIs are just memory writes */
1412 case IOMMU_CAP_NOEXEC:
1419 static int arm_smmu_match_node(struct device *dev, void *data)
1421 return dev->fwnode == data;
1425 struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
1427 struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL,
1428 fwnode, arm_smmu_match_node);
1430 return dev ? dev_get_drvdata(dev) : NULL;
1433 static int arm_smmu_add_device(struct device *dev)
1435 struct arm_smmu_device *smmu;
1436 struct arm_smmu_master_cfg *cfg;
1437 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1440 if (using_legacy_binding) {
1441 ret = arm_smmu_register_legacy_master(dev, &smmu);
1442 fwspec = dev->iommu_fwspec;
1445 } else if (fwspec && fwspec->ops == &arm_smmu_ops) {
1446 smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
1452 for (i = 0; i < fwspec->num_ids; i++) {
1453 u16 sid = fwspec->ids[i];
1454 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
1456 if (sid & ~smmu->streamid_mask) {
1457 dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
1458 sid, smmu->streamid_mask);
1461 if (mask & ~smmu->smr_mask_mask) {
1462 dev_err(dev, "SMR mask 0x%x out of range for SMMU (0x%x)\n",
1463 sid, smmu->smr_mask_mask);
1469 cfg = kzalloc(offsetof(struct arm_smmu_master_cfg, smendx[i]),
1475 fwspec->iommu_priv = cfg;
1477 cfg->smendx[i] = INVALID_SMENDX;
1479 ret = arm_smmu_master_alloc_smes(dev);
1487 kfree(fwspec->iommu_priv);
1488 iommu_fwspec_free(dev);
1492 static void arm_smmu_remove_device(struct device *dev)
1494 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1496 if (!fwspec || fwspec->ops != &arm_smmu_ops)
1499 arm_smmu_master_free_smes(fwspec);
1500 iommu_group_remove_device(dev);
1501 kfree(fwspec->iommu_priv);
1502 iommu_fwspec_free(dev);
1505 static struct iommu_group *arm_smmu_device_group(struct device *dev)
1507 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1508 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
1509 struct iommu_group *group = NULL;
1512 for_each_cfg_sme(fwspec, i, idx) {
1513 if (group && smmu->s2crs[idx].group &&
1514 group != smmu->s2crs[idx].group)
1515 return ERR_PTR(-EINVAL);
1517 group = smmu->s2crs[idx].group;
1521 return iommu_group_ref_get(group);
1523 if (dev_is_pci(dev))
1524 group = pci_device_group(dev);
1526 group = generic_device_group(dev);
1531 static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1532 enum iommu_attr attr, void *data)
1534 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1537 case DOMAIN_ATTR_NESTING:
1538 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1545 static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1546 enum iommu_attr attr, void *data)
1549 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1551 mutex_lock(&smmu_domain->init_mutex);
1554 case DOMAIN_ATTR_NESTING:
1555 if (smmu_domain->smmu) {
1561 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1563 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1571 mutex_unlock(&smmu_domain->init_mutex);
1575 static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
1579 if (args->args_count > 0)
1580 fwid |= (u16)args->args[0];
1582 if (args->args_count > 1)
1583 fwid |= (u16)args->args[1] << SMR_MASK_SHIFT;
1585 return iommu_fwspec_add_ids(dev, &fwid, 1);
1588 static struct iommu_ops arm_smmu_ops = {
1589 .capable = arm_smmu_capable,
1590 .domain_alloc = arm_smmu_domain_alloc,
1591 .domain_free = arm_smmu_domain_free,
1592 .attach_dev = arm_smmu_attach_dev,
1593 .map = arm_smmu_map,
1594 .unmap = arm_smmu_unmap,
1595 .map_sg = default_iommu_map_sg,
1596 .iova_to_phys = arm_smmu_iova_to_phys,
1597 .add_device = arm_smmu_add_device,
1598 .remove_device = arm_smmu_remove_device,
1599 .device_group = arm_smmu_device_group,
1600 .domain_get_attr = arm_smmu_domain_get_attr,
1601 .domain_set_attr = arm_smmu_domain_set_attr,
1602 .of_xlate = arm_smmu_of_xlate,
1603 .pgsize_bitmap = -1UL, /* Restricted during device attach */
1606 static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1608 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1609 void __iomem *cb_base;
1613 /* clear global FSR */
1614 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
1615 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
1618 * Reset stream mapping groups: Initial values mark all SMRn as
1619 * invalid and all S2CRn as bypass unless overridden.
1621 for (i = 0; i < smmu->num_mapping_groups; ++i)
1622 arm_smmu_write_sme(smmu, i);
1624 if (smmu->model == ARM_MMU500) {
1626 * Before clearing ARM_MMU500_ACTLR_CPRE, need to
1627 * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
1628 * bit is only present in MMU-500r2 onwards.
1630 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
1631 major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
1632 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
1634 reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
1636 * Allow unmatched Stream IDs to allocate bypass
1637 * TLB entries for reduced latency.
1639 reg |= ARM_MMU500_ACR_SMTNMB_TLBEN;
1640 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
1643 /* Make sure all context banks are disabled and clear CB_FSR */
1644 for (i = 0; i < smmu->num_context_banks; ++i) {
1645 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
1646 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
1647 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
1649 * Disable MMU-500's not-particularly-beneficial next-page
1650 * prefetcher for the sake of errata #841119 and #826419.
1652 if (smmu->model == ARM_MMU500) {
1653 reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
1654 reg &= ~ARM_MMU500_ACTLR_CPRE;
1655 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
1659 /* Invalidate the TLB, just in case */
1660 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
1661 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
1663 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
1665 /* Enable fault reporting */
1666 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
1668 /* Disable TLB broadcasting. */
1669 reg |= (sCR0_VMIDPNE | sCR0_PTM);
1671 /* Enable client access, handling unmatched streams as appropriate */
1672 reg &= ~sCR0_CLIENTPD;
1676 reg &= ~sCR0_USFCFG;
1678 /* Disable forced broadcasting */
1681 /* Don't upgrade barriers */
1682 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
1684 if (smmu->features & ARM_SMMU_FEAT_VMID16)
1685 reg |= sCR0_VMID16EN;
1687 if (smmu->features & ARM_SMMU_FEAT_EXIDS)
1688 reg |= sCR0_EXIDENABLE;
1690 /* Push the button */
1691 __arm_smmu_tlb_sync(smmu);
1692 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
1695 static int arm_smmu_id_size_to_bits(int size)
1714 static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1717 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1719 bool cttw_reg, cttw_fw = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK;
1722 dev_notice(smmu->dev, "probing hardware configuration...\n");
1723 dev_notice(smmu->dev, "SMMUv%d with:\n",
1724 smmu->version == ARM_SMMU_V2 ? 2 : 1);
1727 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
1729 /* Restrict available stages based on module parameter */
1730 if (force_stage == 1)
1731 id &= ~(ID0_S2TS | ID0_NTS);
1732 else if (force_stage == 2)
1733 id &= ~(ID0_S1TS | ID0_NTS);
1735 if (id & ID0_S1TS) {
1736 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
1737 dev_notice(smmu->dev, "\tstage 1 translation\n");
1740 if (id & ID0_S2TS) {
1741 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
1742 dev_notice(smmu->dev, "\tstage 2 translation\n");
1746 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
1747 dev_notice(smmu->dev, "\tnested translation\n");
1750 if (!(smmu->features &
1751 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
1752 dev_err(smmu->dev, "\tno translation support!\n");
1756 if ((id & ID0_S1TS) &&
1757 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
1758 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
1759 dev_notice(smmu->dev, "\taddress translation ops\n");
1763 * In order for DMA API calls to work properly, we must defer to what
1764 * the FW says about coherency, regardless of what the hardware claims.
1765 * Fortunately, this also opens up a workaround for systems where the
1766 * ID register value has ended up configured incorrectly.
1768 cttw_reg = !!(id & ID0_CTTW);
1769 if (cttw_fw || cttw_reg)
1770 dev_notice(smmu->dev, "\t%scoherent table walk\n",
1771 cttw_fw ? "" : "non-");
1772 if (cttw_fw != cttw_reg)
1773 dev_notice(smmu->dev,
1774 "\t(IDR0.CTTW overridden by FW configuration)\n");
1776 /* Max. number of entries we have for stream matching/indexing */
1777 if (smmu->version == ARM_SMMU_V2 && id & ID0_EXIDS) {
1778 smmu->features |= ARM_SMMU_FEAT_EXIDS;
1781 size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK);
1783 smmu->streamid_mask = size - 1;
1785 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
1786 size = (id >> ID0_NUMSMRG_SHIFT) & ID0_NUMSMRG_MASK;
1789 "stream-matching supported, but no SMRs present!\n");
1793 /* Zero-initialised to mark as invalid */
1794 smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
1799 dev_notice(smmu->dev,
1800 "\tstream matching with %lu register groups", size);
1802 /* s2cr->type == 0 means translation, so initialise explicitly */
1803 smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
1807 for (i = 0; i < size; i++)
1808 smmu->s2crs[i] = s2cr_init_val;
1810 smmu->num_mapping_groups = size;
1811 mutex_init(&smmu->stream_map_mutex);
1813 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
1814 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
1815 if (!(id & ID0_PTFS_NO_AARCH32S))
1816 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
1820 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
1821 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
1823 /* Check for size mismatch of SMMU address space from mapped region */
1824 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
1825 size *= 2 << smmu->pgshift;
1826 if (smmu->size != size)
1828 "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
1831 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
1832 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
1833 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
1834 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
1837 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
1838 smmu->num_context_banks, smmu->num_s2_context_banks);
1840 * Cavium CN88xx erratum #27704.
1841 * Ensure ASID and VMID allocation is unique across all SMMUs in
1844 if (smmu->model == CAVIUM_SMMUV2) {
1845 smmu->cavium_id_base =
1846 atomic_add_return(smmu->num_context_banks,
1847 &cavium_smmu_context_count);
1848 smmu->cavium_id_base -= smmu->num_context_banks;
1852 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
1853 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
1854 smmu->ipa_size = size;
1856 /* The output mask is also applied for bypass */
1857 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
1858 smmu->pa_size = size;
1860 if (id & ID2_VMID16)
1861 smmu->features |= ARM_SMMU_FEAT_VMID16;
1864 * What the page table walker can address actually depends on which
1865 * descriptor format is in use, but since a) we don't know that yet,
1866 * and b) it can vary per context bank, this will have to do...
1868 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
1870 "failed to set DMA mask for table walker\n");
1872 if (smmu->version < ARM_SMMU_V2) {
1873 smmu->va_size = smmu->ipa_size;
1874 if (smmu->version == ARM_SMMU_V1_64K)
1875 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
1877 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
1878 smmu->va_size = arm_smmu_id_size_to_bits(size);
1879 if (id & ID2_PTFS_4K)
1880 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
1881 if (id & ID2_PTFS_16K)
1882 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
1883 if (id & ID2_PTFS_64K)
1884 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
1887 /* Now we've corralled the various formats, what'll it do? */
1888 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
1889 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
1890 if (smmu->features &
1891 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
1892 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
1893 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
1894 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
1895 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
1896 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
1898 if (arm_smmu_ops.pgsize_bitmap == -1UL)
1899 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
1901 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
1902 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
1903 smmu->pgsize_bitmap);
1906 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
1907 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
1908 smmu->va_size, smmu->ipa_size);
1910 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
1911 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
1912 smmu->ipa_size, smmu->pa_size);
1917 struct arm_smmu_match_data {
1918 enum arm_smmu_arch_version version;
1919 enum arm_smmu_implementation model;
1922 #define ARM_SMMU_MATCH_DATA(name, ver, imp) \
1923 static struct arm_smmu_match_data name = { .version = ver, .model = imp }
1925 ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU);
1926 ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
1927 ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU);
1928 ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500);
1929 ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2);
1931 static const struct of_device_id arm_smmu_of_match[] = {
1932 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
1933 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
1934 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
1935 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
1936 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
1937 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
1940 MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
1943 static int acpi_smmu_get_data(u32 model, struct arm_smmu_device *smmu)
1948 case ACPI_IORT_SMMU_V1:
1949 case ACPI_IORT_SMMU_CORELINK_MMU400:
1950 smmu->version = ARM_SMMU_V1;
1951 smmu->model = GENERIC_SMMU;
1953 case ACPI_IORT_SMMU_V2:
1954 smmu->version = ARM_SMMU_V2;
1955 smmu->model = GENERIC_SMMU;
1957 case ACPI_IORT_SMMU_CORELINK_MMU500:
1958 smmu->version = ARM_SMMU_V2;
1959 smmu->model = ARM_MMU500;
1968 static int arm_smmu_device_acpi_probe(struct platform_device *pdev,
1969 struct arm_smmu_device *smmu)
1971 struct device *dev = smmu->dev;
1972 struct acpi_iort_node *node =
1973 *(struct acpi_iort_node **)dev_get_platdata(dev);
1974 struct acpi_iort_smmu *iort_smmu;
1977 /* Retrieve SMMU1/2 specific data */
1978 iort_smmu = (struct acpi_iort_smmu *)node->node_data;
1980 ret = acpi_smmu_get_data(iort_smmu->model, smmu);
1984 /* Ignore the configuration access interrupt */
1985 smmu->num_global_irqs = 1;
1987 if (iort_smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK)
1988 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
1993 static inline int arm_smmu_device_acpi_probe(struct platform_device *pdev,
1994 struct arm_smmu_device *smmu)
2000 static int arm_smmu_device_dt_probe(struct platform_device *pdev,
2001 struct arm_smmu_device *smmu)
2003 const struct arm_smmu_match_data *data;
2004 struct device *dev = &pdev->dev;
2005 bool legacy_binding;
2007 if (of_property_read_u32(dev->of_node, "#global-interrupts",
2008 &smmu->num_global_irqs)) {
2009 dev_err(dev, "missing #global-interrupts property\n");
2013 data = of_device_get_match_data(dev);
2014 smmu->version = data->version;
2015 smmu->model = data->model;
2017 parse_driver_options(smmu);
2019 legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
2020 if (legacy_binding && !using_generic_binding) {
2021 if (!using_legacy_binding)
2022 pr_notice("deprecated \"mmu-masters\" DT property in use; DMA API support unavailable\n");
2023 using_legacy_binding = true;
2024 } else if (!legacy_binding && !using_legacy_binding) {
2025 using_generic_binding = true;
2027 dev_err(dev, "not probing due to mismatched DT properties\n");
2031 if (of_dma_is_coherent(dev->of_node))
2032 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
2037 static int arm_smmu_device_probe(struct platform_device *pdev)
2039 struct resource *res;
2040 struct arm_smmu_device *smmu;
2041 struct device *dev = &pdev->dev;
2042 int num_irqs, i, err;
2044 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
2046 dev_err(dev, "failed to allocate arm_smmu_device\n");
2052 err = arm_smmu_device_dt_probe(pdev, smmu);
2054 err = arm_smmu_device_acpi_probe(pdev, smmu);
2059 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2060 smmu->base = devm_ioremap_resource(dev, res);
2061 if (IS_ERR(smmu->base))
2062 return PTR_ERR(smmu->base);
2063 smmu->size = resource_size(res);
2066 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
2068 if (num_irqs > smmu->num_global_irqs)
2069 smmu->num_context_irqs++;
2072 if (!smmu->num_context_irqs) {
2073 dev_err(dev, "found %d interrupts but expected at least %d\n",
2074 num_irqs, smmu->num_global_irqs + 1);
2078 smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
2081 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
2085 for (i = 0; i < num_irqs; ++i) {
2086 int irq = platform_get_irq(pdev, i);
2089 dev_err(dev, "failed to get irq index %d\n", i);
2092 smmu->irqs[i] = irq;
2095 err = arm_smmu_device_cfg_probe(smmu);
2099 if (smmu->version == ARM_SMMU_V2 &&
2100 smmu->num_context_banks != smmu->num_context_irqs) {
2102 "found only %d context interrupt(s) but %d required\n",
2103 smmu->num_context_irqs, smmu->num_context_banks);
2107 for (i = 0; i < smmu->num_global_irqs; ++i) {
2108 err = devm_request_irq(smmu->dev, smmu->irqs[i],
2109 arm_smmu_global_fault,
2111 "arm-smmu global fault",
2114 dev_err(dev, "failed to request global IRQ %d (%u)\n",
2120 iommu_register_instance(dev->fwnode, &arm_smmu_ops);
2121 platform_set_drvdata(pdev, smmu);
2122 arm_smmu_device_reset(smmu);
2123 arm_smmu_test_smr_masks(smmu);
2125 /* Oh, for a proper bus abstraction */
2126 if (!iommu_present(&platform_bus_type))
2127 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
2128 #ifdef CONFIG_ARM_AMBA
2129 if (!iommu_present(&amba_bustype))
2130 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
2133 if (!iommu_present(&pci_bus_type)) {
2135 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
2141 static int arm_smmu_device_remove(struct platform_device *pdev)
2143 struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
2148 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
2149 dev_err(&pdev->dev, "removing device with active domains!\n");
2151 /* Turn the thing off */
2152 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
2156 static struct platform_driver arm_smmu_driver = {
2159 .of_match_table = of_match_ptr(arm_smmu_of_match),
2161 .probe = arm_smmu_device_probe,
2162 .remove = arm_smmu_device_remove,
2165 static int __init arm_smmu_init(void)
2167 static bool registered;
2171 ret = platform_driver_register(&arm_smmu_driver);
2177 static void __exit arm_smmu_exit(void)
2179 return platform_driver_unregister(&arm_smmu_driver);
2182 subsys_initcall(arm_smmu_init);
2183 module_exit(arm_smmu_exit);
2185 static int __init arm_smmu_of_init(struct device_node *np)
2187 int ret = arm_smmu_init();
2192 if (!of_platform_device_create(np, NULL, platform_bus_type.dev_root))
2197 IOMMU_OF_DECLARE(arm_smmuv1, "arm,smmu-v1", arm_smmu_of_init);
2198 IOMMU_OF_DECLARE(arm_smmuv2, "arm,smmu-v2", arm_smmu_of_init);
2199 IOMMU_OF_DECLARE(arm_mmu400, "arm,mmu-400", arm_smmu_of_init);
2200 IOMMU_OF_DECLARE(arm_mmu401, "arm,mmu-401", arm_smmu_of_init);
2201 IOMMU_OF_DECLARE(arm_mmu500, "arm,mmu-500", arm_smmu_of_init);
2202 IOMMU_OF_DECLARE(cavium_smmuv2, "cavium,smmu-v2", arm_smmu_of_init);
2205 static int __init arm_smmu_acpi_init(struct acpi_table_header *table)
2207 if (iort_node_match(ACPI_IORT_NODE_SMMU))
2208 return arm_smmu_init();
2212 IORT_ACPI_DECLARE(arm_smmu, ACPI_SIG_IORT, arm_smmu_acpi_init);
2215 MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
2216 MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
2217 MODULE_LICENSE("GPL v2");