2 * IOMMU API for ARM architected SMMU implementations.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 * Copyright (C) 2013 ARM Limited
19 * Author: Will Deacon <will.deacon@arm.com>
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
26 * - Context fault reporting
27 * - Extended Stream ID (16 bit)
30 #define pr_fmt(fmt) "arm-smmu: " fmt
32 #include <linux/acpi.h>
33 #include <linux/acpi_iort.h>
34 #include <linux/atomic.h>
35 #include <linux/delay.h>
36 #include <linux/dma-iommu.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/err.h>
39 #include <linux/interrupt.h>
41 #include <linux/io-64-nonatomic-hi-lo.h>
42 #include <linux/iommu.h>
43 #include <linux/iopoll.h>
44 #include <linux/module.h>
46 #include <linux/of_address.h>
47 #include <linux/of_device.h>
48 #include <linux/of_iommu.h>
49 #include <linux/pci.h>
50 #include <linux/platform_device.h>
51 #include <linux/slab.h>
52 #include <linux/spinlock.h>
54 #include <linux/amba/bus.h>
56 #include "io-pgtable.h"
58 /* Maximum number of context banks per SMMU */
59 #define ARM_SMMU_MAX_CBS 128
61 /* SMMU global address space */
62 #define ARM_SMMU_GR0(smmu) ((smmu)->base)
63 #define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
66 * SMMU global address space with conditional offset to access secure
67 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
70 #define ARM_SMMU_GR0_NS(smmu) \
72 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
76 * Some 64-bit registers only make sense to write atomically, but in such
77 * cases all the data relevant to AArch32 formats lies within the lower word,
78 * therefore this actually makes more sense than it might first appear.
81 #define smmu_write_atomic_lq writeq_relaxed
83 #define smmu_write_atomic_lq writel_relaxed
86 /* Configuration registers */
87 #define ARM_SMMU_GR0_sCR0 0x0
88 #define sCR0_CLIENTPD (1 << 0)
89 #define sCR0_GFRE (1 << 1)
90 #define sCR0_GFIE (1 << 2)
91 #define sCR0_EXIDENABLE (1 << 3)
92 #define sCR0_GCFGFRE (1 << 4)
93 #define sCR0_GCFGFIE (1 << 5)
94 #define sCR0_USFCFG (1 << 10)
95 #define sCR0_VMIDPNE (1 << 11)
96 #define sCR0_PTM (1 << 12)
97 #define sCR0_FB (1 << 13)
98 #define sCR0_VMID16EN (1 << 31)
99 #define sCR0_BSU_SHIFT 14
100 #define sCR0_BSU_MASK 0x3
102 /* Auxiliary Configuration register */
103 #define ARM_SMMU_GR0_sACR 0x10
105 /* Identification registers */
106 #define ARM_SMMU_GR0_ID0 0x20
107 #define ARM_SMMU_GR0_ID1 0x24
108 #define ARM_SMMU_GR0_ID2 0x28
109 #define ARM_SMMU_GR0_ID3 0x2c
110 #define ARM_SMMU_GR0_ID4 0x30
111 #define ARM_SMMU_GR0_ID5 0x34
112 #define ARM_SMMU_GR0_ID6 0x38
113 #define ARM_SMMU_GR0_ID7 0x3c
114 #define ARM_SMMU_GR0_sGFSR 0x48
115 #define ARM_SMMU_GR0_sGFSYNR0 0x50
116 #define ARM_SMMU_GR0_sGFSYNR1 0x54
117 #define ARM_SMMU_GR0_sGFSYNR2 0x58
119 #define ID0_S1TS (1 << 30)
120 #define ID0_S2TS (1 << 29)
121 #define ID0_NTS (1 << 28)
122 #define ID0_SMS (1 << 27)
123 #define ID0_ATOSNS (1 << 26)
124 #define ID0_PTFS_NO_AARCH32 (1 << 25)
125 #define ID0_PTFS_NO_AARCH32S (1 << 24)
126 #define ID0_CTTW (1 << 14)
127 #define ID0_NUMIRPT_SHIFT 16
128 #define ID0_NUMIRPT_MASK 0xff
129 #define ID0_NUMSIDB_SHIFT 9
130 #define ID0_NUMSIDB_MASK 0xf
131 #define ID0_EXIDS (1 << 8)
132 #define ID0_NUMSMRG_SHIFT 0
133 #define ID0_NUMSMRG_MASK 0xff
135 #define ID1_PAGESIZE (1 << 31)
136 #define ID1_NUMPAGENDXB_SHIFT 28
137 #define ID1_NUMPAGENDXB_MASK 7
138 #define ID1_NUMS2CB_SHIFT 16
139 #define ID1_NUMS2CB_MASK 0xff
140 #define ID1_NUMCB_SHIFT 0
141 #define ID1_NUMCB_MASK 0xff
143 #define ID2_OAS_SHIFT 4
144 #define ID2_OAS_MASK 0xf
145 #define ID2_IAS_SHIFT 0
146 #define ID2_IAS_MASK 0xf
147 #define ID2_UBS_SHIFT 8
148 #define ID2_UBS_MASK 0xf
149 #define ID2_PTFS_4K (1 << 12)
150 #define ID2_PTFS_16K (1 << 13)
151 #define ID2_PTFS_64K (1 << 14)
152 #define ID2_VMID16 (1 << 15)
154 #define ID7_MAJOR_SHIFT 4
155 #define ID7_MAJOR_MASK 0xf
157 /* Global TLB invalidation */
158 #define ARM_SMMU_GR0_TLBIVMID 0x64
159 #define ARM_SMMU_GR0_TLBIALLNSNH 0x68
160 #define ARM_SMMU_GR0_TLBIALLH 0x6c
161 #define ARM_SMMU_GR0_sTLBGSYNC 0x70
162 #define ARM_SMMU_GR0_sTLBGSTATUS 0x74
163 #define sTLBGSTATUS_GSACTIVE (1 << 0)
164 #define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
165 #define TLB_SPIN_COUNT 10
167 /* Stream mapping registers */
168 #define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
169 #define SMR_VALID (1 << 31)
170 #define SMR_MASK_SHIFT 16
171 #define SMR_ID_SHIFT 0
173 #define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
174 #define S2CR_CBNDX_SHIFT 0
175 #define S2CR_CBNDX_MASK 0xff
176 #define S2CR_EXIDVALID (1 << 10)
177 #define S2CR_TYPE_SHIFT 16
178 #define S2CR_TYPE_MASK 0x3
179 enum arm_smmu_s2cr_type {
185 #define S2CR_PRIVCFG_SHIFT 24
186 #define S2CR_PRIVCFG_MASK 0x3
187 enum arm_smmu_s2cr_privcfg {
188 S2CR_PRIVCFG_DEFAULT,
194 /* Context bank attribute registers */
195 #define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
196 #define CBAR_VMID_SHIFT 0
197 #define CBAR_VMID_MASK 0xff
198 #define CBAR_S1_BPSHCFG_SHIFT 8
199 #define CBAR_S1_BPSHCFG_MASK 3
200 #define CBAR_S1_BPSHCFG_NSH 3
201 #define CBAR_S1_MEMATTR_SHIFT 12
202 #define CBAR_S1_MEMATTR_MASK 0xf
203 #define CBAR_S1_MEMATTR_WB 0xf
204 #define CBAR_TYPE_SHIFT 16
205 #define CBAR_TYPE_MASK 0x3
206 #define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
207 #define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
208 #define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
209 #define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
210 #define CBAR_IRPTNDX_SHIFT 24
211 #define CBAR_IRPTNDX_MASK 0xff
213 #define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
214 #define CBA2R_RW64_32BIT (0 << 0)
215 #define CBA2R_RW64_64BIT (1 << 0)
216 #define CBA2R_VMID_SHIFT 16
217 #define CBA2R_VMID_MASK 0xffff
219 /* Translation context bank */
220 #define ARM_SMMU_CB(smmu, n) ((smmu)->cb_base + ((n) << (smmu)->pgshift))
222 #define ARM_SMMU_CB_SCTLR 0x0
223 #define ARM_SMMU_CB_ACTLR 0x4
224 #define ARM_SMMU_CB_RESUME 0x8
225 #define ARM_SMMU_CB_TTBCR2 0x10
226 #define ARM_SMMU_CB_TTBR0 0x20
227 #define ARM_SMMU_CB_TTBR1 0x28
228 #define ARM_SMMU_CB_TTBCR 0x30
229 #define ARM_SMMU_CB_CONTEXTIDR 0x34
230 #define ARM_SMMU_CB_S1_MAIR0 0x38
231 #define ARM_SMMU_CB_S1_MAIR1 0x3c
232 #define ARM_SMMU_CB_PAR 0x50
233 #define ARM_SMMU_CB_FSR 0x58
234 #define ARM_SMMU_CB_FAR 0x60
235 #define ARM_SMMU_CB_FSYNR0 0x68
236 #define ARM_SMMU_CB_S1_TLBIVA 0x600
237 #define ARM_SMMU_CB_S1_TLBIASID 0x610
238 #define ARM_SMMU_CB_S1_TLBIVAL 0x620
239 #define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
240 #define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
241 #define ARM_SMMU_CB_TLBSYNC 0x7f0
242 #define ARM_SMMU_CB_TLBSTATUS 0x7f4
243 #define ARM_SMMU_CB_ATS1PR 0x800
244 #define ARM_SMMU_CB_ATSR 0x8f0
246 #define SCTLR_S1_ASIDPNE (1 << 12)
247 #define SCTLR_CFCFG (1 << 7)
248 #define SCTLR_CFIE (1 << 6)
249 #define SCTLR_CFRE (1 << 5)
250 #define SCTLR_E (1 << 4)
251 #define SCTLR_AFE (1 << 2)
252 #define SCTLR_TRE (1 << 1)
253 #define SCTLR_M (1 << 0)
255 #define ARM_MMU500_ACTLR_CPRE (1 << 1)
257 #define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
258 #define ARM_MMU500_ACR_SMTNMB_TLBEN (1 << 8)
260 #define CB_PAR_F (1 << 0)
262 #define ATSR_ACTIVE (1 << 0)
264 #define RESUME_RETRY (0 << 0)
265 #define RESUME_TERMINATE (1 << 0)
267 #define TTBCR2_SEP_SHIFT 15
268 #define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
269 #define TTBCR2_AS (1 << 4)
271 #define TTBRn_ASID_SHIFT 48
273 #define FSR_MULTI (1 << 31)
274 #define FSR_SS (1 << 30)
275 #define FSR_UUT (1 << 8)
276 #define FSR_ASF (1 << 7)
277 #define FSR_TLBLKF (1 << 6)
278 #define FSR_TLBMCF (1 << 5)
279 #define FSR_EF (1 << 4)
280 #define FSR_PF (1 << 3)
281 #define FSR_AFF (1 << 2)
282 #define FSR_TF (1 << 1)
284 #define FSR_IGN (FSR_AFF | FSR_ASF | \
285 FSR_TLBMCF | FSR_TLBLKF)
286 #define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
287 FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
289 #define FSYNR0_WNR (1 << 4)
291 #define MSI_IOVA_BASE 0x8000000
292 #define MSI_IOVA_LENGTH 0x100000
294 static int force_stage;
295 module_param(force_stage, int, S_IRUGO);
296 MODULE_PARM_DESC(force_stage,
297 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
298 static bool disable_bypass;
299 module_param(disable_bypass, bool, S_IRUGO);
300 MODULE_PARM_DESC(disable_bypass,
301 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
303 enum arm_smmu_arch_version {
309 enum arm_smmu_implementation {
315 /* Until ACPICA headers cover IORT rev. C */
316 #ifndef ACPI_IORT_SMMU_CORELINK_MMU401
317 #define ACPI_IORT_SMMU_CORELINK_MMU401 0x4
319 #ifndef ACPI_IORT_SMMU_CAVIUM_THUNDERX
320 #define ACPI_IORT_SMMU_CAVIUM_THUNDERX 0x5
323 struct arm_smmu_s2cr {
324 struct iommu_group *group;
326 enum arm_smmu_s2cr_type type;
327 enum arm_smmu_s2cr_privcfg privcfg;
331 #define s2cr_init_val (struct arm_smmu_s2cr){ \
332 .type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS, \
335 struct arm_smmu_smr {
341 struct arm_smmu_master_cfg {
342 struct arm_smmu_device *smmu;
345 #define INVALID_SMENDX -1
346 #define __fwspec_cfg(fw) ((struct arm_smmu_master_cfg *)fw->iommu_priv)
347 #define fwspec_smmu(fw) (__fwspec_cfg(fw)->smmu)
348 #define fwspec_smendx(fw, i) \
349 (i >= fw->num_ids ? INVALID_SMENDX : __fwspec_cfg(fw)->smendx[i])
350 #define for_each_cfg_sme(fw, i, idx) \
351 for (i = 0; idx = fwspec_smendx(fw, i), i < fw->num_ids; ++i)
353 struct arm_smmu_device {
357 void __iomem *cb_base;
358 unsigned long pgshift;
360 #define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
361 #define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
362 #define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
363 #define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
364 #define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
365 #define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
366 #define ARM_SMMU_FEAT_VMID16 (1 << 6)
367 #define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7)
368 #define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8)
369 #define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
370 #define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
371 #define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
372 #define ARM_SMMU_FEAT_EXIDS (1 << 12)
375 #define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
377 enum arm_smmu_arch_version version;
378 enum arm_smmu_implementation model;
380 u32 num_context_banks;
381 u32 num_s2_context_banks;
382 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
385 u32 num_mapping_groups;
388 struct arm_smmu_smr *smrs;
389 struct arm_smmu_s2cr *s2crs;
390 struct mutex stream_map_mutex;
392 unsigned long va_size;
393 unsigned long ipa_size;
394 unsigned long pa_size;
395 unsigned long pgsize_bitmap;
398 u32 num_context_irqs;
401 u32 cavium_id_base; /* Specific to Cavium */
403 /* IOMMU core code handle */
404 struct iommu_device iommu;
407 enum arm_smmu_context_fmt {
408 ARM_SMMU_CTX_FMT_NONE,
409 ARM_SMMU_CTX_FMT_AARCH64,
410 ARM_SMMU_CTX_FMT_AARCH32_L,
411 ARM_SMMU_CTX_FMT_AARCH32_S,
414 struct arm_smmu_cfg {
422 enum arm_smmu_context_fmt fmt;
424 #define INVALID_IRPTNDX 0xff
426 enum arm_smmu_domain_stage {
427 ARM_SMMU_DOMAIN_S1 = 0,
429 ARM_SMMU_DOMAIN_NESTED,
430 ARM_SMMU_DOMAIN_BYPASS,
433 struct arm_smmu_domain {
434 struct arm_smmu_device *smmu;
435 struct io_pgtable_ops *pgtbl_ops;
436 spinlock_t pgtbl_lock;
437 struct arm_smmu_cfg cfg;
438 enum arm_smmu_domain_stage stage;
439 struct mutex init_mutex; /* Protects smmu pointer */
440 struct iommu_domain domain;
443 struct arm_smmu_option_prop {
448 static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
450 static bool using_legacy_binding, using_generic_binding;
452 static struct arm_smmu_option_prop arm_smmu_options[] = {
453 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
457 static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
459 return container_of(dom, struct arm_smmu_domain, domain);
462 static void parse_driver_options(struct arm_smmu_device *smmu)
467 if (of_property_read_bool(smmu->dev->of_node,
468 arm_smmu_options[i].prop)) {
469 smmu->options |= arm_smmu_options[i].opt;
470 dev_notice(smmu->dev, "option %s\n",
471 arm_smmu_options[i].prop);
473 } while (arm_smmu_options[++i].opt);
476 static struct device_node *dev_get_dev_node(struct device *dev)
478 if (dev_is_pci(dev)) {
479 struct pci_bus *bus = to_pci_dev(dev)->bus;
481 while (!pci_is_root_bus(bus))
483 return of_node_get(bus->bridge->parent->of_node);
486 return of_node_get(dev->of_node);
489 static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
491 *((__be32 *)data) = cpu_to_be32(alias);
492 return 0; /* Continue walking */
495 static int __find_legacy_master_phandle(struct device *dev, void *data)
497 struct of_phandle_iterator *it = *(void **)data;
498 struct device_node *np = it->node;
501 of_for_each_phandle(it, err, dev->of_node, "mmu-masters",
502 "#stream-id-cells", 0)
503 if (it->node == np) {
504 *(void **)data = dev;
508 return err == -ENOENT ? 0 : err;
511 static struct platform_driver arm_smmu_driver;
512 static struct iommu_ops arm_smmu_ops;
514 static int arm_smmu_register_legacy_master(struct device *dev,
515 struct arm_smmu_device **smmu)
517 struct device *smmu_dev;
518 struct device_node *np;
519 struct of_phandle_iterator it;
525 np = dev_get_dev_node(dev);
526 if (!np || !of_find_property(np, "#stream-id-cells", NULL)) {
532 err = driver_for_each_device(&arm_smmu_driver.driver, NULL, &data,
533 __find_legacy_master_phandle);
541 if (dev_is_pci(dev)) {
542 /* "mmu-masters" assumes Stream ID == Requester ID */
543 pci_for_each_dma_alias(to_pci_dev(dev), __arm_smmu_get_pci_sid,
549 err = iommu_fwspec_init(dev, &smmu_dev->of_node->fwnode,
554 sids = kcalloc(it.cur_count, sizeof(*sids), GFP_KERNEL);
558 *smmu = dev_get_drvdata(smmu_dev);
559 of_phandle_iterator_args(&it, sids, it.cur_count);
560 err = iommu_fwspec_add_ids(dev, sids, it.cur_count);
565 static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
570 idx = find_next_zero_bit(map, end, start);
573 } while (test_and_set_bit(idx, map));
578 static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
583 /* Wait for any pending TLB invalidations to complete */
584 static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu,
585 void __iomem *sync, void __iomem *status)
587 unsigned int spin_cnt, delay;
589 writel_relaxed(0, sync);
590 for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
591 for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
592 if (!(readl_relaxed(status) & sTLBGSTATUS_GSACTIVE))
598 dev_err_ratelimited(smmu->dev,
599 "TLB sync timed out -- SMMU may be deadlocked\n");
602 static void arm_smmu_tlb_sync_global(struct arm_smmu_device *smmu)
604 void __iomem *base = ARM_SMMU_GR0(smmu);
606 __arm_smmu_tlb_sync(smmu, base + ARM_SMMU_GR0_sTLBGSYNC,
607 base + ARM_SMMU_GR0_sTLBGSTATUS);
610 static void arm_smmu_tlb_sync_context(void *cookie)
612 struct arm_smmu_domain *smmu_domain = cookie;
613 struct arm_smmu_device *smmu = smmu_domain->smmu;
614 void __iomem *base = ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx);
616 __arm_smmu_tlb_sync(smmu, base + ARM_SMMU_CB_TLBSYNC,
617 base + ARM_SMMU_CB_TLBSTATUS);
620 static void arm_smmu_tlb_sync_vmid(void *cookie)
622 struct arm_smmu_domain *smmu_domain = cookie;
624 arm_smmu_tlb_sync_global(smmu_domain->smmu);
627 static void arm_smmu_tlb_inv_context_s1(void *cookie)
629 struct arm_smmu_domain *smmu_domain = cookie;
630 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
631 void __iomem *base = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx);
633 writel_relaxed(cfg->asid, base + ARM_SMMU_CB_S1_TLBIASID);
634 arm_smmu_tlb_sync_context(cookie);
637 static void arm_smmu_tlb_inv_context_s2(void *cookie)
639 struct arm_smmu_domain *smmu_domain = cookie;
640 struct arm_smmu_device *smmu = smmu_domain->smmu;
641 void __iomem *base = ARM_SMMU_GR0(smmu);
643 writel_relaxed(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
644 arm_smmu_tlb_sync_global(smmu);
647 static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
648 size_t granule, bool leaf, void *cookie)
650 struct arm_smmu_domain *smmu_domain = cookie;
651 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
652 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
653 void __iomem *reg = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx);
656 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
658 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
662 writel_relaxed(iova, reg);
664 } while (size -= granule);
667 iova |= (u64)cfg->asid << 48;
669 writeq_relaxed(iova, reg);
670 iova += granule >> 12;
671 } while (size -= granule);
674 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
675 ARM_SMMU_CB_S2_TLBIIPAS2;
678 smmu_write_atomic_lq(iova, reg);
679 iova += granule >> 12;
680 } while (size -= granule);
685 * On MMU-401 at least, the cost of firing off multiple TLBIVMIDs appears
686 * almost negligible, but the benefit of getting the first one in as far ahead
687 * of the sync as possible is significant, hence we don't just make this a
688 * no-op and set .tlb_sync to arm_smmu_inv_context_s2() as you might think.
690 static void arm_smmu_tlb_inv_vmid_nosync(unsigned long iova, size_t size,
691 size_t granule, bool leaf, void *cookie)
693 struct arm_smmu_domain *smmu_domain = cookie;
694 void __iomem *base = ARM_SMMU_GR0(smmu_domain->smmu);
696 writel_relaxed(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
699 static const struct iommu_gather_ops arm_smmu_s1_tlb_ops = {
700 .tlb_flush_all = arm_smmu_tlb_inv_context_s1,
701 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
702 .tlb_sync = arm_smmu_tlb_sync_context,
705 static const struct iommu_gather_ops arm_smmu_s2_tlb_ops_v2 = {
706 .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
707 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
708 .tlb_sync = arm_smmu_tlb_sync_context,
711 static const struct iommu_gather_ops arm_smmu_s2_tlb_ops_v1 = {
712 .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
713 .tlb_add_flush = arm_smmu_tlb_inv_vmid_nosync,
714 .tlb_sync = arm_smmu_tlb_sync_vmid,
717 static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
721 struct iommu_domain *domain = dev;
722 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
723 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
724 struct arm_smmu_device *smmu = smmu_domain->smmu;
725 void __iomem *cb_base;
727 cb_base = ARM_SMMU_CB(smmu, cfg->cbndx);
728 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
730 if (!(fsr & FSR_FAULT))
733 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
734 iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
736 dev_err_ratelimited(smmu->dev,
737 "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cb=%d\n",
738 fsr, iova, fsynr, cfg->cbndx);
740 writel(fsr, cb_base + ARM_SMMU_CB_FSR);
744 static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
746 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
747 struct arm_smmu_device *smmu = dev;
748 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
750 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
751 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
752 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
753 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
758 dev_err_ratelimited(smmu->dev,
759 "Unexpected global fault, this could be serious\n");
760 dev_err_ratelimited(smmu->dev,
761 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
762 gfsr, gfsynr0, gfsynr1, gfsynr2);
764 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
768 static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
769 struct io_pgtable_cfg *pgtbl_cfg)
774 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
775 struct arm_smmu_device *smmu = smmu_domain->smmu;
776 void __iomem *cb_base, *gr1_base;
778 gr1_base = ARM_SMMU_GR1(smmu);
779 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
780 cb_base = ARM_SMMU_CB(smmu, cfg->cbndx);
782 if (smmu->version > ARM_SMMU_V1) {
783 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
784 reg = CBA2R_RW64_64BIT;
786 reg = CBA2R_RW64_32BIT;
787 /* 16-bit VMIDs live in CBA2R */
788 if (smmu->features & ARM_SMMU_FEAT_VMID16)
789 reg |= cfg->vmid << CBA2R_VMID_SHIFT;
791 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
796 if (smmu->version < ARM_SMMU_V2)
797 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
800 * Use the weakest shareability/memory types, so they are
801 * overridden by the ttbcr/pte.
804 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
805 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
806 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
807 /* 8-bit VMIDs live in CBAR */
808 reg |= cfg->vmid << CBAR_VMID_SHIFT;
810 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
814 * We must write this before the TTBRs, since it determines the
815 * access behaviour of some fields (in particular, ASID[15:8]).
818 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
819 reg = pgtbl_cfg->arm_v7s_cfg.tcr;
822 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
823 reg2 = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
824 reg2 |= TTBCR2_SEP_UPSTREAM;
825 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
828 if (smmu->version > ARM_SMMU_V1)
829 writel_relaxed(reg2, cb_base + ARM_SMMU_CB_TTBCR2);
831 reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
833 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
837 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
838 reg = pgtbl_cfg->arm_v7s_cfg.ttbr[0];
839 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0);
840 reg = pgtbl_cfg->arm_v7s_cfg.ttbr[1];
841 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1);
842 writel_relaxed(cfg->asid, cb_base + ARM_SMMU_CB_CONTEXTIDR);
844 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
845 reg64 |= (u64)cfg->asid << TTBRn_ASID_SHIFT;
846 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
847 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
848 reg64 |= (u64)cfg->asid << TTBRn_ASID_SHIFT;
849 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1);
852 reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
853 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
856 /* MAIRs (stage-1 only) */
858 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
859 reg = pgtbl_cfg->arm_v7s_cfg.prrr;
860 reg2 = pgtbl_cfg->arm_v7s_cfg.nmrr;
862 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
863 reg2 = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
865 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
866 writel_relaxed(reg2, cb_base + ARM_SMMU_CB_S1_MAIR1);
870 reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE | SCTLR_M;
872 reg |= SCTLR_S1_ASIDPNE;
876 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
879 static int arm_smmu_init_domain_context(struct iommu_domain *domain,
880 struct arm_smmu_device *smmu)
882 int irq, start, ret = 0;
883 unsigned long ias, oas;
884 struct io_pgtable_ops *pgtbl_ops;
885 struct io_pgtable_cfg pgtbl_cfg;
886 enum io_pgtable_fmt fmt;
887 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
888 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
889 const struct iommu_gather_ops *tlb_ops;
891 mutex_lock(&smmu_domain->init_mutex);
892 if (smmu_domain->smmu)
895 if (domain->type == IOMMU_DOMAIN_IDENTITY) {
896 smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS;
897 smmu_domain->smmu = smmu;
902 * Mapping the requested stage onto what we support is surprisingly
903 * complicated, mainly because the spec allows S1+S2 SMMUs without
904 * support for nested translation. That means we end up with the
907 * Requested Supported Actual
917 * Note that you can't actually request stage-2 mappings.
919 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
920 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
921 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
922 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
925 * Choosing a suitable context format is even more fiddly. Until we
926 * grow some way for the caller to express a preference, and/or move
927 * the decision into the io-pgtable code where it arguably belongs,
928 * just aim for the closest thing to the rest of the system, and hope
929 * that the hardware isn't esoteric enough that we can't assume AArch64
930 * support to be a superset of AArch32 support...
932 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
933 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
934 if (IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) &&
935 !IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_ARM_LPAE) &&
936 (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) &&
937 (smmu_domain->stage == ARM_SMMU_DOMAIN_S1))
938 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S;
939 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
940 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
941 ARM_SMMU_FEAT_FMT_AARCH64_16K |
942 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
943 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
945 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
950 switch (smmu_domain->stage) {
951 case ARM_SMMU_DOMAIN_S1:
952 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
953 start = smmu->num_s2_context_banks;
955 oas = smmu->ipa_size;
956 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
957 fmt = ARM_64_LPAE_S1;
958 } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) {
959 fmt = ARM_32_LPAE_S1;
960 ias = min(ias, 32UL);
961 oas = min(oas, 40UL);
964 ias = min(ias, 32UL);
965 oas = min(oas, 32UL);
967 tlb_ops = &arm_smmu_s1_tlb_ops;
969 case ARM_SMMU_DOMAIN_NESTED:
971 * We will likely want to change this if/when KVM gets
974 case ARM_SMMU_DOMAIN_S2:
975 cfg->cbar = CBAR_TYPE_S2_TRANS;
977 ias = smmu->ipa_size;
979 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
980 fmt = ARM_64_LPAE_S2;
982 fmt = ARM_32_LPAE_S2;
983 ias = min(ias, 40UL);
984 oas = min(oas, 40UL);
986 if (smmu->version == ARM_SMMU_V2)
987 tlb_ops = &arm_smmu_s2_tlb_ops_v2;
989 tlb_ops = &arm_smmu_s2_tlb_ops_v1;
995 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
996 smmu->num_context_banks);
1001 if (smmu->version < ARM_SMMU_V2) {
1002 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
1003 cfg->irptndx %= smmu->num_context_irqs;
1005 cfg->irptndx = cfg->cbndx;
1008 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S2)
1009 cfg->vmid = cfg->cbndx + 1 + smmu->cavium_id_base;
1011 cfg->asid = cfg->cbndx + smmu->cavium_id_base;
1013 pgtbl_cfg = (struct io_pgtable_cfg) {
1014 .pgsize_bitmap = smmu->pgsize_bitmap,
1018 .iommu_dev = smmu->dev,
1021 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
1022 pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_NO_DMA;
1024 smmu_domain->smmu = smmu;
1025 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
1028 goto out_clear_smmu;
1031 /* Update the domain's page sizes to reflect the page table format */
1032 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
1033 domain->geometry.aperture_end = (1UL << ias) - 1;
1034 domain->geometry.force_aperture = true;
1036 /* Initialise the context bank with our page table cfg */
1037 arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
1040 * Request context fault interrupt. Do this last to avoid the
1041 * handler seeing a half-initialised domain state.
1043 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
1044 ret = devm_request_irq(smmu->dev, irq, arm_smmu_context_fault,
1045 IRQF_SHARED, "arm-smmu-context-fault", domain);
1047 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
1049 cfg->irptndx = INVALID_IRPTNDX;
1052 mutex_unlock(&smmu_domain->init_mutex);
1054 /* Publish page table ops for map/unmap */
1055 smmu_domain->pgtbl_ops = pgtbl_ops;
1059 smmu_domain->smmu = NULL;
1061 mutex_unlock(&smmu_domain->init_mutex);
1065 static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
1067 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1068 struct arm_smmu_device *smmu = smmu_domain->smmu;
1069 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1070 void __iomem *cb_base;
1073 if (!smmu || domain->type == IOMMU_DOMAIN_IDENTITY)
1077 * Disable the context bank and free the page tables before freeing
1080 cb_base = ARM_SMMU_CB(smmu, cfg->cbndx);
1081 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
1083 if (cfg->irptndx != INVALID_IRPTNDX) {
1084 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
1085 devm_free_irq(smmu->dev, irq, domain);
1088 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
1089 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
1092 static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
1094 struct arm_smmu_domain *smmu_domain;
1096 if (type != IOMMU_DOMAIN_UNMANAGED &&
1097 type != IOMMU_DOMAIN_DMA &&
1098 type != IOMMU_DOMAIN_IDENTITY)
1101 * Allocate the domain and initialise some of its data structures.
1102 * We can't really do anything meaningful until we've added a
1105 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1109 if (type == IOMMU_DOMAIN_DMA && (using_legacy_binding ||
1110 iommu_get_dma_cookie(&smmu_domain->domain))) {
1115 mutex_init(&smmu_domain->init_mutex);
1116 spin_lock_init(&smmu_domain->pgtbl_lock);
1118 return &smmu_domain->domain;
1121 static void arm_smmu_domain_free(struct iommu_domain *domain)
1123 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1126 * Free the domain resources. We assume that all devices have
1127 * already been detached.
1129 iommu_put_dma_cookie(domain);
1130 arm_smmu_destroy_domain_context(domain);
1134 static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
1136 struct arm_smmu_smr *smr = smmu->smrs + idx;
1137 u32 reg = smr->id << SMR_ID_SHIFT | smr->mask << SMR_MASK_SHIFT;
1139 if (!(smmu->features & ARM_SMMU_FEAT_EXIDS) && smr->valid)
1141 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
1144 static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
1146 struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
1147 u32 reg = (s2cr->type & S2CR_TYPE_MASK) << S2CR_TYPE_SHIFT |
1148 (s2cr->cbndx & S2CR_CBNDX_MASK) << S2CR_CBNDX_SHIFT |
1149 (s2cr->privcfg & S2CR_PRIVCFG_MASK) << S2CR_PRIVCFG_SHIFT;
1151 if (smmu->features & ARM_SMMU_FEAT_EXIDS && smmu->smrs &&
1152 smmu->smrs[idx].valid)
1153 reg |= S2CR_EXIDVALID;
1154 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx));
1157 static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
1159 arm_smmu_write_s2cr(smmu, idx);
1161 arm_smmu_write_smr(smmu, idx);
1165 * The width of SMR's mask field depends on sCR0_EXIDENABLE, so this function
1166 * should be called after sCR0 is written.
1168 static void arm_smmu_test_smr_masks(struct arm_smmu_device *smmu)
1170 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1177 * SMR.ID bits may not be preserved if the corresponding MASK
1178 * bits are set, so check each one separately. We can reject
1179 * masters later if they try to claim IDs outside these masks.
1181 smr = smmu->streamid_mask << SMR_ID_SHIFT;
1182 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1183 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
1184 smmu->streamid_mask = smr >> SMR_ID_SHIFT;
1186 smr = smmu->streamid_mask << SMR_MASK_SHIFT;
1187 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1188 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
1189 smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT;
1192 static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
1194 struct arm_smmu_smr *smrs = smmu->smrs;
1195 int i, free_idx = -ENOSPC;
1197 /* Stream indexing is blissfully easy */
1201 /* Validating SMRs is... less so */
1202 for (i = 0; i < smmu->num_mapping_groups; ++i) {
1203 if (!smrs[i].valid) {
1205 * Note the first free entry we come across, which
1206 * we'll claim in the end if nothing else matches.
1213 * If the new entry is _entirely_ matched by an existing entry,
1214 * then reuse that, with the guarantee that there also cannot
1215 * be any subsequent conflicting entries. In normal use we'd
1216 * expect simply identical entries for this case, but there's
1217 * no harm in accommodating the generalisation.
1219 if ((mask & smrs[i].mask) == mask &&
1220 !((id ^ smrs[i].id) & ~smrs[i].mask))
1223 * If the new entry has any other overlap with an existing one,
1224 * though, then there always exists at least one stream ID
1225 * which would cause a conflict, and we can't allow that risk.
1227 if (!((id ^ smrs[i].id) & ~(smrs[i].mask | mask)))
1234 static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx)
1236 if (--smmu->s2crs[idx].count)
1239 smmu->s2crs[idx] = s2cr_init_val;
1241 smmu->smrs[idx].valid = false;
1246 static int arm_smmu_master_alloc_smes(struct device *dev)
1248 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1249 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
1250 struct arm_smmu_device *smmu = cfg->smmu;
1251 struct arm_smmu_smr *smrs = smmu->smrs;
1252 struct iommu_group *group;
1255 mutex_lock(&smmu->stream_map_mutex);
1256 /* Figure out a viable stream map entry allocation */
1257 for_each_cfg_sme(fwspec, i, idx) {
1258 u16 sid = fwspec->ids[i];
1259 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
1261 if (idx != INVALID_SMENDX) {
1266 ret = arm_smmu_find_sme(smmu, sid, mask);
1271 if (smrs && smmu->s2crs[idx].count == 0) {
1273 smrs[idx].mask = mask;
1274 smrs[idx].valid = true;
1276 smmu->s2crs[idx].count++;
1277 cfg->smendx[i] = (s16)idx;
1280 group = iommu_group_get_for_dev(dev);
1282 group = ERR_PTR(-ENOMEM);
1283 if (IS_ERR(group)) {
1284 ret = PTR_ERR(group);
1287 iommu_group_put(group);
1289 /* It worked! Now, poke the actual hardware */
1290 for_each_cfg_sme(fwspec, i, idx) {
1291 arm_smmu_write_sme(smmu, idx);
1292 smmu->s2crs[idx].group = group;
1295 mutex_unlock(&smmu->stream_map_mutex);
1300 arm_smmu_free_sme(smmu, cfg->smendx[i]);
1301 cfg->smendx[i] = INVALID_SMENDX;
1303 mutex_unlock(&smmu->stream_map_mutex);
1307 static void arm_smmu_master_free_smes(struct iommu_fwspec *fwspec)
1309 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
1310 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
1313 mutex_lock(&smmu->stream_map_mutex);
1314 for_each_cfg_sme(fwspec, i, idx) {
1315 if (arm_smmu_free_sme(smmu, idx))
1316 arm_smmu_write_sme(smmu, idx);
1317 cfg->smendx[i] = INVALID_SMENDX;
1319 mutex_unlock(&smmu->stream_map_mutex);
1322 static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
1323 struct iommu_fwspec *fwspec)
1325 struct arm_smmu_device *smmu = smmu_domain->smmu;
1326 struct arm_smmu_s2cr *s2cr = smmu->s2crs;
1327 u8 cbndx = smmu_domain->cfg.cbndx;
1328 enum arm_smmu_s2cr_type type;
1331 if (smmu_domain->stage == ARM_SMMU_DOMAIN_BYPASS)
1332 type = S2CR_TYPE_BYPASS;
1334 type = S2CR_TYPE_TRANS;
1336 for_each_cfg_sme(fwspec, i, idx) {
1337 if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx)
1340 s2cr[idx].type = type;
1341 s2cr[idx].privcfg = S2CR_PRIVCFG_DEFAULT;
1342 s2cr[idx].cbndx = cbndx;
1343 arm_smmu_write_s2cr(smmu, idx);
1348 static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1351 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1352 struct arm_smmu_device *smmu;
1353 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1355 if (!fwspec || fwspec->ops != &arm_smmu_ops) {
1356 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
1361 * FIXME: The arch/arm DMA API code tries to attach devices to its own
1362 * domains between of_xlate() and add_device() - we have no way to cope
1363 * with that, so until ARM gets converted to rely on groups and default
1364 * domains, just say no (but more politely than by dereferencing NULL).
1365 * This should be at least a WARN_ON once that's sorted.
1367 if (!fwspec->iommu_priv)
1370 smmu = fwspec_smmu(fwspec);
1371 /* Ensure that the domain is finalised */
1372 ret = arm_smmu_init_domain_context(domain, smmu);
1377 * Sanity check the domain. We don't support domains across
1380 if (smmu_domain->smmu != smmu) {
1382 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
1383 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
1387 /* Looks ok, so add the device to the domain */
1388 return arm_smmu_domain_add_master(smmu_domain, fwspec);
1391 static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
1392 phys_addr_t paddr, size_t size, int prot)
1395 unsigned long flags;
1396 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1397 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1402 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1403 ret = ops->map(ops, iova, paddr, size, prot);
1404 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1408 static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
1412 unsigned long flags;
1413 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1414 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1419 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1420 ret = ops->unmap(ops, iova, size);
1421 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1425 static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
1428 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1429 struct arm_smmu_device *smmu = smmu_domain->smmu;
1430 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1431 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1432 struct device *dev = smmu->dev;
1433 void __iomem *cb_base;
1438 cb_base = ARM_SMMU_CB(smmu, cfg->cbndx);
1440 /* ATS1 registers can only be written atomically */
1441 va = iova & ~0xfffUL;
1442 if (smmu->version == ARM_SMMU_V2)
1443 smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR);
1444 else /* Register is only 32-bit in v1 */
1445 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
1447 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
1448 !(tmp & ATSR_ACTIVE), 5, 50)) {
1450 "iova to phys timed out on %pad. Falling back to software table walk.\n",
1452 return ops->iova_to_phys(ops, iova);
1455 phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
1456 if (phys & CB_PAR_F) {
1457 dev_err(dev, "translation fault!\n");
1458 dev_err(dev, "PAR = 0x%llx\n", phys);
1462 return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
1465 static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
1469 unsigned long flags;
1470 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1471 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1473 if (domain->type == IOMMU_DOMAIN_IDENTITY)
1479 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1480 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
1481 smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1482 ret = arm_smmu_iova_to_phys_hard(domain, iova);
1484 ret = ops->iova_to_phys(ops, iova);
1487 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1492 static bool arm_smmu_capable(enum iommu_cap cap)
1495 case IOMMU_CAP_CACHE_COHERENCY:
1497 * Return true here as the SMMU can always send out coherent
1501 case IOMMU_CAP_NOEXEC:
1508 static int arm_smmu_match_node(struct device *dev, void *data)
1510 return dev->fwnode == data;
1514 struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
1516 struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL,
1517 fwnode, arm_smmu_match_node);
1519 return dev ? dev_get_drvdata(dev) : NULL;
1522 static int arm_smmu_add_device(struct device *dev)
1524 struct arm_smmu_device *smmu;
1525 struct arm_smmu_master_cfg *cfg;
1526 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1529 if (using_legacy_binding) {
1530 ret = arm_smmu_register_legacy_master(dev, &smmu);
1531 fwspec = dev->iommu_fwspec;
1534 } else if (fwspec && fwspec->ops == &arm_smmu_ops) {
1535 smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
1541 for (i = 0; i < fwspec->num_ids; i++) {
1542 u16 sid = fwspec->ids[i];
1543 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
1545 if (sid & ~smmu->streamid_mask) {
1546 dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
1547 sid, smmu->streamid_mask);
1550 if (mask & ~smmu->smr_mask_mask) {
1551 dev_err(dev, "SMR mask 0x%x out of range for SMMU (0x%x)\n",
1552 mask, smmu->smr_mask_mask);
1558 cfg = kzalloc(offsetof(struct arm_smmu_master_cfg, smendx[i]),
1564 fwspec->iommu_priv = cfg;
1566 cfg->smendx[i] = INVALID_SMENDX;
1568 ret = arm_smmu_master_alloc_smes(dev);
1572 iommu_device_link(&smmu->iommu, dev);
1578 kfree(fwspec->iommu_priv);
1579 iommu_fwspec_free(dev);
1583 static void arm_smmu_remove_device(struct device *dev)
1585 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1586 struct arm_smmu_master_cfg *cfg;
1587 struct arm_smmu_device *smmu;
1590 if (!fwspec || fwspec->ops != &arm_smmu_ops)
1593 cfg = fwspec->iommu_priv;
1596 iommu_device_unlink(&smmu->iommu, dev);
1597 arm_smmu_master_free_smes(fwspec);
1598 iommu_group_remove_device(dev);
1599 kfree(fwspec->iommu_priv);
1600 iommu_fwspec_free(dev);
1603 static struct iommu_group *arm_smmu_device_group(struct device *dev)
1605 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1606 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
1607 struct iommu_group *group = NULL;
1610 for_each_cfg_sme(fwspec, i, idx) {
1611 if (group && smmu->s2crs[idx].group &&
1612 group != smmu->s2crs[idx].group)
1613 return ERR_PTR(-EINVAL);
1615 group = smmu->s2crs[idx].group;
1619 return iommu_group_ref_get(group);
1621 if (dev_is_pci(dev))
1622 group = pci_device_group(dev);
1624 group = generic_device_group(dev);
1629 static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1630 enum iommu_attr attr, void *data)
1632 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1634 if (domain->type != IOMMU_DOMAIN_UNMANAGED)
1638 case DOMAIN_ATTR_NESTING:
1639 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1646 static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1647 enum iommu_attr attr, void *data)
1650 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1652 if (domain->type != IOMMU_DOMAIN_UNMANAGED)
1655 mutex_lock(&smmu_domain->init_mutex);
1658 case DOMAIN_ATTR_NESTING:
1659 if (smmu_domain->smmu) {
1665 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1667 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1675 mutex_unlock(&smmu_domain->init_mutex);
1679 static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
1683 if (args->args_count > 0)
1684 fwid |= (u16)args->args[0];
1686 if (args->args_count > 1)
1687 fwid |= (u16)args->args[1] << SMR_MASK_SHIFT;
1688 else if (!of_property_read_u32(args->np, "stream-match-mask", &mask))
1689 fwid |= (u16)mask << SMR_MASK_SHIFT;
1691 return iommu_fwspec_add_ids(dev, &fwid, 1);
1694 static void arm_smmu_get_resv_regions(struct device *dev,
1695 struct list_head *head)
1697 struct iommu_resv_region *region;
1698 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
1700 region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
1701 prot, IOMMU_RESV_SW_MSI);
1705 list_add_tail(®ion->list, head);
1707 iommu_dma_get_resv_regions(dev, head);
1710 static void arm_smmu_put_resv_regions(struct device *dev,
1711 struct list_head *head)
1713 struct iommu_resv_region *entry, *next;
1715 list_for_each_entry_safe(entry, next, head, list)
1719 static struct iommu_ops arm_smmu_ops = {
1720 .capable = arm_smmu_capable,
1721 .domain_alloc = arm_smmu_domain_alloc,
1722 .domain_free = arm_smmu_domain_free,
1723 .attach_dev = arm_smmu_attach_dev,
1724 .map = arm_smmu_map,
1725 .unmap = arm_smmu_unmap,
1726 .map_sg = default_iommu_map_sg,
1727 .iova_to_phys = arm_smmu_iova_to_phys,
1728 .add_device = arm_smmu_add_device,
1729 .remove_device = arm_smmu_remove_device,
1730 .device_group = arm_smmu_device_group,
1731 .domain_get_attr = arm_smmu_domain_get_attr,
1732 .domain_set_attr = arm_smmu_domain_set_attr,
1733 .of_xlate = arm_smmu_of_xlate,
1734 .get_resv_regions = arm_smmu_get_resv_regions,
1735 .put_resv_regions = arm_smmu_put_resv_regions,
1736 .pgsize_bitmap = -1UL, /* Restricted during device attach */
1739 static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1741 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1742 void __iomem *cb_base;
1746 /* clear global FSR */
1747 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
1748 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
1751 * Reset stream mapping groups: Initial values mark all SMRn as
1752 * invalid and all S2CRn as bypass unless overridden.
1754 for (i = 0; i < smmu->num_mapping_groups; ++i)
1755 arm_smmu_write_sme(smmu, i);
1757 if (smmu->model == ARM_MMU500) {
1759 * Before clearing ARM_MMU500_ACTLR_CPRE, need to
1760 * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
1761 * bit is only present in MMU-500r2 onwards.
1763 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
1764 major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
1765 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
1767 reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
1769 * Allow unmatched Stream IDs to allocate bypass
1770 * TLB entries for reduced latency.
1772 reg |= ARM_MMU500_ACR_SMTNMB_TLBEN;
1773 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
1776 /* Make sure all context banks are disabled and clear CB_FSR */
1777 for (i = 0; i < smmu->num_context_banks; ++i) {
1778 cb_base = ARM_SMMU_CB(smmu, i);
1779 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
1780 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
1782 * Disable MMU-500's not-particularly-beneficial next-page
1783 * prefetcher for the sake of errata #841119 and #826419.
1785 if (smmu->model == ARM_MMU500) {
1786 reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
1787 reg &= ~ARM_MMU500_ACTLR_CPRE;
1788 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
1792 /* Invalidate the TLB, just in case */
1793 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
1794 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
1796 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
1798 /* Enable fault reporting */
1799 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
1801 /* Disable TLB broadcasting. */
1802 reg |= (sCR0_VMIDPNE | sCR0_PTM);
1804 /* Enable client access, handling unmatched streams as appropriate */
1805 reg &= ~sCR0_CLIENTPD;
1809 reg &= ~sCR0_USFCFG;
1811 /* Disable forced broadcasting */
1814 /* Don't upgrade barriers */
1815 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
1817 if (smmu->features & ARM_SMMU_FEAT_VMID16)
1818 reg |= sCR0_VMID16EN;
1820 if (smmu->features & ARM_SMMU_FEAT_EXIDS)
1821 reg |= sCR0_EXIDENABLE;
1823 /* Push the button */
1824 arm_smmu_tlb_sync_global(smmu);
1825 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
1828 static int arm_smmu_id_size_to_bits(int size)
1847 static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1850 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1852 bool cttw_reg, cttw_fw = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK;
1855 dev_notice(smmu->dev, "probing hardware configuration...\n");
1856 dev_notice(smmu->dev, "SMMUv%d with:\n",
1857 smmu->version == ARM_SMMU_V2 ? 2 : 1);
1860 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
1862 /* Restrict available stages based on module parameter */
1863 if (force_stage == 1)
1864 id &= ~(ID0_S2TS | ID0_NTS);
1865 else if (force_stage == 2)
1866 id &= ~(ID0_S1TS | ID0_NTS);
1868 if (id & ID0_S1TS) {
1869 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
1870 dev_notice(smmu->dev, "\tstage 1 translation\n");
1873 if (id & ID0_S2TS) {
1874 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
1875 dev_notice(smmu->dev, "\tstage 2 translation\n");
1879 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
1880 dev_notice(smmu->dev, "\tnested translation\n");
1883 if (!(smmu->features &
1884 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
1885 dev_err(smmu->dev, "\tno translation support!\n");
1889 if ((id & ID0_S1TS) &&
1890 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
1891 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
1892 dev_notice(smmu->dev, "\taddress translation ops\n");
1896 * In order for DMA API calls to work properly, we must defer to what
1897 * the FW says about coherency, regardless of what the hardware claims.
1898 * Fortunately, this also opens up a workaround for systems where the
1899 * ID register value has ended up configured incorrectly.
1901 cttw_reg = !!(id & ID0_CTTW);
1902 if (cttw_fw || cttw_reg)
1903 dev_notice(smmu->dev, "\t%scoherent table walk\n",
1904 cttw_fw ? "" : "non-");
1905 if (cttw_fw != cttw_reg)
1906 dev_notice(smmu->dev,
1907 "\t(IDR0.CTTW overridden by FW configuration)\n");
1909 /* Max. number of entries we have for stream matching/indexing */
1910 if (smmu->version == ARM_SMMU_V2 && id & ID0_EXIDS) {
1911 smmu->features |= ARM_SMMU_FEAT_EXIDS;
1914 size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK);
1916 smmu->streamid_mask = size - 1;
1918 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
1919 size = (id >> ID0_NUMSMRG_SHIFT) & ID0_NUMSMRG_MASK;
1922 "stream-matching supported, but no SMRs present!\n");
1926 /* Zero-initialised to mark as invalid */
1927 smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
1932 dev_notice(smmu->dev,
1933 "\tstream matching with %lu register groups", size);
1935 /* s2cr->type == 0 means translation, so initialise explicitly */
1936 smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
1940 for (i = 0; i < size; i++)
1941 smmu->s2crs[i] = s2cr_init_val;
1943 smmu->num_mapping_groups = size;
1944 mutex_init(&smmu->stream_map_mutex);
1946 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
1947 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
1948 if (!(id & ID0_PTFS_NO_AARCH32S))
1949 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
1953 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
1954 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
1956 /* Check for size mismatch of SMMU address space from mapped region */
1957 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
1958 size <<= smmu->pgshift;
1959 if (smmu->cb_base != gr0_base + size)
1961 "SMMU address space size (0x%lx) differs from mapped region size (0x%tx)!\n",
1962 size * 2, (smmu->cb_base - gr0_base) * 2);
1964 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
1965 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
1966 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
1967 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
1970 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
1971 smmu->num_context_banks, smmu->num_s2_context_banks);
1973 * Cavium CN88xx erratum #27704.
1974 * Ensure ASID and VMID allocation is unique across all SMMUs in
1977 if (smmu->model == CAVIUM_SMMUV2) {
1978 smmu->cavium_id_base =
1979 atomic_add_return(smmu->num_context_banks,
1980 &cavium_smmu_context_count);
1981 smmu->cavium_id_base -= smmu->num_context_banks;
1982 dev_notice(smmu->dev, "\tenabling workaround for Cavium erratum 27704\n");
1986 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
1987 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
1988 smmu->ipa_size = size;
1990 /* The output mask is also applied for bypass */
1991 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
1992 smmu->pa_size = size;
1994 if (id & ID2_VMID16)
1995 smmu->features |= ARM_SMMU_FEAT_VMID16;
1998 * What the page table walker can address actually depends on which
1999 * descriptor format is in use, but since a) we don't know that yet,
2000 * and b) it can vary per context bank, this will have to do...
2002 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
2004 "failed to set DMA mask for table walker\n");
2006 if (smmu->version < ARM_SMMU_V2) {
2007 smmu->va_size = smmu->ipa_size;
2008 if (smmu->version == ARM_SMMU_V1_64K)
2009 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
2011 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
2012 smmu->va_size = arm_smmu_id_size_to_bits(size);
2013 if (id & ID2_PTFS_4K)
2014 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
2015 if (id & ID2_PTFS_16K)
2016 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
2017 if (id & ID2_PTFS_64K)
2018 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
2021 /* Now we've corralled the various formats, what'll it do? */
2022 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
2023 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
2024 if (smmu->features &
2025 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
2026 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
2027 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
2028 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
2029 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
2030 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
2032 if (arm_smmu_ops.pgsize_bitmap == -1UL)
2033 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
2035 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
2036 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
2037 smmu->pgsize_bitmap);
2040 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
2041 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
2042 smmu->va_size, smmu->ipa_size);
2044 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
2045 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
2046 smmu->ipa_size, smmu->pa_size);
2051 struct arm_smmu_match_data {
2052 enum arm_smmu_arch_version version;
2053 enum arm_smmu_implementation model;
2056 #define ARM_SMMU_MATCH_DATA(name, ver, imp) \
2057 static struct arm_smmu_match_data name = { .version = ver, .model = imp }
2059 ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU);
2060 ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
2061 ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU);
2062 ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500);
2063 ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2);
2065 static const struct of_device_id arm_smmu_of_match[] = {
2066 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
2067 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
2068 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
2069 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
2070 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
2071 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
2074 MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
2077 static int acpi_smmu_get_data(u32 model, struct arm_smmu_device *smmu)
2082 case ACPI_IORT_SMMU_V1:
2083 case ACPI_IORT_SMMU_CORELINK_MMU400:
2084 smmu->version = ARM_SMMU_V1;
2085 smmu->model = GENERIC_SMMU;
2087 case ACPI_IORT_SMMU_CORELINK_MMU401:
2088 smmu->version = ARM_SMMU_V1_64K;
2089 smmu->model = GENERIC_SMMU;
2091 case ACPI_IORT_SMMU_V2:
2092 smmu->version = ARM_SMMU_V2;
2093 smmu->model = GENERIC_SMMU;
2095 case ACPI_IORT_SMMU_CORELINK_MMU500:
2096 smmu->version = ARM_SMMU_V2;
2097 smmu->model = ARM_MMU500;
2099 case ACPI_IORT_SMMU_CAVIUM_THUNDERX:
2100 smmu->version = ARM_SMMU_V2;
2101 smmu->model = CAVIUM_SMMUV2;
2110 static int arm_smmu_device_acpi_probe(struct platform_device *pdev,
2111 struct arm_smmu_device *smmu)
2113 struct device *dev = smmu->dev;
2114 struct acpi_iort_node *node =
2115 *(struct acpi_iort_node **)dev_get_platdata(dev);
2116 struct acpi_iort_smmu *iort_smmu;
2119 /* Retrieve SMMU1/2 specific data */
2120 iort_smmu = (struct acpi_iort_smmu *)node->node_data;
2122 ret = acpi_smmu_get_data(iort_smmu->model, smmu);
2126 /* Ignore the configuration access interrupt */
2127 smmu->num_global_irqs = 1;
2129 if (iort_smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK)
2130 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
2135 static inline int arm_smmu_device_acpi_probe(struct platform_device *pdev,
2136 struct arm_smmu_device *smmu)
2142 static int arm_smmu_device_dt_probe(struct platform_device *pdev,
2143 struct arm_smmu_device *smmu)
2145 const struct arm_smmu_match_data *data;
2146 struct device *dev = &pdev->dev;
2147 bool legacy_binding;
2149 if (of_property_read_u32(dev->of_node, "#global-interrupts",
2150 &smmu->num_global_irqs)) {
2151 dev_err(dev, "missing #global-interrupts property\n");
2155 data = of_device_get_match_data(dev);
2156 smmu->version = data->version;
2157 smmu->model = data->model;
2159 parse_driver_options(smmu);
2161 legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
2162 if (legacy_binding && !using_generic_binding) {
2163 if (!using_legacy_binding)
2164 pr_notice("deprecated \"mmu-masters\" DT property in use; DMA API support unavailable\n");
2165 using_legacy_binding = true;
2166 } else if (!legacy_binding && !using_legacy_binding) {
2167 using_generic_binding = true;
2169 dev_err(dev, "not probing due to mismatched DT properties\n");
2173 if (of_dma_is_coherent(dev->of_node))
2174 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
2179 static void arm_smmu_bus_init(void)
2181 /* Oh, for a proper bus abstraction */
2182 if (!iommu_present(&platform_bus_type))
2183 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
2184 #ifdef CONFIG_ARM_AMBA
2185 if (!iommu_present(&amba_bustype))
2186 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
2189 if (!iommu_present(&pci_bus_type)) {
2191 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
2196 static int arm_smmu_device_probe(struct platform_device *pdev)
2198 struct resource *res;
2199 resource_size_t ioaddr;
2200 struct arm_smmu_device *smmu;
2201 struct device *dev = &pdev->dev;
2202 int num_irqs, i, err;
2204 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
2206 dev_err(dev, "failed to allocate arm_smmu_device\n");
2212 err = arm_smmu_device_dt_probe(pdev, smmu);
2214 err = arm_smmu_device_acpi_probe(pdev, smmu);
2219 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2220 ioaddr = res->start;
2221 smmu->base = devm_ioremap_resource(dev, res);
2222 if (IS_ERR(smmu->base))
2223 return PTR_ERR(smmu->base);
2224 smmu->cb_base = smmu->base + resource_size(res) / 2;
2227 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
2229 if (num_irqs > smmu->num_global_irqs)
2230 smmu->num_context_irqs++;
2233 if (!smmu->num_context_irqs) {
2234 dev_err(dev, "found %d interrupts but expected at least %d\n",
2235 num_irqs, smmu->num_global_irqs + 1);
2239 smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
2242 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
2246 for (i = 0; i < num_irqs; ++i) {
2247 int irq = platform_get_irq(pdev, i);
2250 dev_err(dev, "failed to get irq index %d\n", i);
2253 smmu->irqs[i] = irq;
2256 err = arm_smmu_device_cfg_probe(smmu);
2260 if (smmu->version == ARM_SMMU_V2 &&
2261 smmu->num_context_banks != smmu->num_context_irqs) {
2263 "found only %d context interrupt(s) but %d required\n",
2264 smmu->num_context_irqs, smmu->num_context_banks);
2268 for (i = 0; i < smmu->num_global_irqs; ++i) {
2269 err = devm_request_irq(smmu->dev, smmu->irqs[i],
2270 arm_smmu_global_fault,
2272 "arm-smmu global fault",
2275 dev_err(dev, "failed to request global IRQ %d (%u)\n",
2281 err = iommu_device_sysfs_add(&smmu->iommu, smmu->dev, NULL,
2282 "smmu.%pa", &ioaddr);
2284 dev_err(dev, "Failed to register iommu in sysfs\n");
2288 iommu_device_set_ops(&smmu->iommu, &arm_smmu_ops);
2289 iommu_device_set_fwnode(&smmu->iommu, dev->fwnode);
2291 err = iommu_device_register(&smmu->iommu);
2293 dev_err(dev, "Failed to register iommu\n");
2297 platform_set_drvdata(pdev, smmu);
2298 arm_smmu_device_reset(smmu);
2299 arm_smmu_test_smr_masks(smmu);
2302 * For ACPI and generic DT bindings, an SMMU will be probed before
2303 * any device which might need it, so we want the bus ops in place
2304 * ready to handle default domain setup as soon as any SMMU exists.
2306 if (!using_legacy_binding)
2307 arm_smmu_bus_init();
2313 * With the legacy DT binding in play, though, we have no guarantees about
2314 * probe order, but then we're also not doing default domains, so we can
2315 * delay setting bus ops until we're sure every possible SMMU is ready,
2316 * and that way ensure that no add_device() calls get missed.
2318 static int arm_smmu_legacy_bus_init(void)
2320 if (using_legacy_binding)
2321 arm_smmu_bus_init();
2324 device_initcall_sync(arm_smmu_legacy_bus_init);
2326 static int arm_smmu_device_remove(struct platform_device *pdev)
2328 struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
2333 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
2334 dev_err(&pdev->dev, "removing device with active domains!\n");
2336 /* Turn the thing off */
2337 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
2341 static struct platform_driver arm_smmu_driver = {
2344 .of_match_table = of_match_ptr(arm_smmu_of_match),
2346 .probe = arm_smmu_device_probe,
2347 .remove = arm_smmu_device_remove,
2349 module_platform_driver(arm_smmu_driver);
2351 IOMMU_OF_DECLARE(arm_smmuv1, "arm,smmu-v1", NULL);
2352 IOMMU_OF_DECLARE(arm_smmuv2, "arm,smmu-v2", NULL);
2353 IOMMU_OF_DECLARE(arm_mmu400, "arm,mmu-400", NULL);
2354 IOMMU_OF_DECLARE(arm_mmu401, "arm,mmu-401", NULL);
2355 IOMMU_OF_DECLARE(arm_mmu500, "arm,mmu-500", NULL);
2356 IOMMU_OF_DECLARE(cavium_smmuv2, "cavium,smmu-v2", NULL);
2358 MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
2359 MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
2360 MODULE_LICENSE("GPL v2");