2 * IOMMU API for ARM architected SMMU implementations.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 * Copyright (C) 2013 ARM Limited
19 * Author: Will Deacon <will.deacon@arm.com>
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
26 * - Context fault reporting
29 #define pr_fmt(fmt) "arm-smmu: " fmt
31 #include <linux/acpi.h>
32 #include <linux/acpi_iort.h>
33 #include <linux/atomic.h>
34 #include <linux/delay.h>
35 #include <linux/dma-iommu.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/err.h>
38 #include <linux/interrupt.h>
40 #include <linux/io-64-nonatomic-hi-lo.h>
41 #include <linux/iommu.h>
42 #include <linux/iopoll.h>
43 #include <linux/module.h>
45 #include <linux/of_address.h>
46 #include <linux/of_device.h>
47 #include <linux/of_iommu.h>
48 #include <linux/pci.h>
49 #include <linux/platform_device.h>
50 #include <linux/slab.h>
51 #include <linux/spinlock.h>
53 #include <linux/amba/bus.h>
55 #include "io-pgtable.h"
57 /* Maximum number of context banks per SMMU */
58 #define ARM_SMMU_MAX_CBS 128
60 /* SMMU global address space */
61 #define ARM_SMMU_GR0(smmu) ((smmu)->base)
62 #define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
65 * SMMU global address space with conditional offset to access secure
66 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
69 #define ARM_SMMU_GR0_NS(smmu) \
71 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
75 * Some 64-bit registers only make sense to write atomically, but in such
76 * cases all the data relevant to AArch32 formats lies within the lower word,
77 * therefore this actually makes more sense than it might first appear.
80 #define smmu_write_atomic_lq writeq_relaxed
82 #define smmu_write_atomic_lq writel_relaxed
85 /* Configuration registers */
86 #define ARM_SMMU_GR0_sCR0 0x0
87 #define sCR0_CLIENTPD (1 << 0)
88 #define sCR0_GFRE (1 << 1)
89 #define sCR0_GFIE (1 << 2)
90 #define sCR0_GCFGFRE (1 << 4)
91 #define sCR0_GCFGFIE (1 << 5)
92 #define sCR0_USFCFG (1 << 10)
93 #define sCR0_VMIDPNE (1 << 11)
94 #define sCR0_PTM (1 << 12)
95 #define sCR0_FB (1 << 13)
96 #define sCR0_VMID16EN (1 << 31)
97 #define sCR0_BSU_SHIFT 14
98 #define sCR0_BSU_MASK 0x3
100 /* Auxiliary Configuration register */
101 #define ARM_SMMU_GR0_sACR 0x10
103 /* Identification registers */
104 #define ARM_SMMU_GR0_ID0 0x20
105 #define ARM_SMMU_GR0_ID1 0x24
106 #define ARM_SMMU_GR0_ID2 0x28
107 #define ARM_SMMU_GR0_ID3 0x2c
108 #define ARM_SMMU_GR0_ID4 0x30
109 #define ARM_SMMU_GR0_ID5 0x34
110 #define ARM_SMMU_GR0_ID6 0x38
111 #define ARM_SMMU_GR0_ID7 0x3c
112 #define ARM_SMMU_GR0_sGFSR 0x48
113 #define ARM_SMMU_GR0_sGFSYNR0 0x50
114 #define ARM_SMMU_GR0_sGFSYNR1 0x54
115 #define ARM_SMMU_GR0_sGFSYNR2 0x58
117 #define ID0_S1TS (1 << 30)
118 #define ID0_S2TS (1 << 29)
119 #define ID0_NTS (1 << 28)
120 #define ID0_SMS (1 << 27)
121 #define ID0_ATOSNS (1 << 26)
122 #define ID0_PTFS_NO_AARCH32 (1 << 25)
123 #define ID0_PTFS_NO_AARCH32S (1 << 24)
124 #define ID0_CTTW (1 << 14)
125 #define ID0_NUMIRPT_SHIFT 16
126 #define ID0_NUMIRPT_MASK 0xff
127 #define ID0_NUMSIDB_SHIFT 9
128 #define ID0_NUMSIDB_MASK 0xf
129 #define ID0_NUMSMRG_SHIFT 0
130 #define ID0_NUMSMRG_MASK 0xff
132 #define ID1_PAGESIZE (1 << 31)
133 #define ID1_NUMPAGENDXB_SHIFT 28
134 #define ID1_NUMPAGENDXB_MASK 7
135 #define ID1_NUMS2CB_SHIFT 16
136 #define ID1_NUMS2CB_MASK 0xff
137 #define ID1_NUMCB_SHIFT 0
138 #define ID1_NUMCB_MASK 0xff
140 #define ID2_OAS_SHIFT 4
141 #define ID2_OAS_MASK 0xf
142 #define ID2_IAS_SHIFT 0
143 #define ID2_IAS_MASK 0xf
144 #define ID2_UBS_SHIFT 8
145 #define ID2_UBS_MASK 0xf
146 #define ID2_PTFS_4K (1 << 12)
147 #define ID2_PTFS_16K (1 << 13)
148 #define ID2_PTFS_64K (1 << 14)
149 #define ID2_VMID16 (1 << 15)
151 #define ID7_MAJOR_SHIFT 4
152 #define ID7_MAJOR_MASK 0xf
154 /* Global TLB invalidation */
155 #define ARM_SMMU_GR0_TLBIVMID 0x64
156 #define ARM_SMMU_GR0_TLBIALLNSNH 0x68
157 #define ARM_SMMU_GR0_TLBIALLH 0x6c
158 #define ARM_SMMU_GR0_sTLBGSYNC 0x70
159 #define ARM_SMMU_GR0_sTLBGSTATUS 0x74
160 #define sTLBGSTATUS_GSACTIVE (1 << 0)
161 #define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
163 /* Stream mapping registers */
164 #define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
165 #define SMR_VALID (1 << 31)
166 #define SMR_MASK_SHIFT 16
167 #define SMR_ID_SHIFT 0
169 #define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
170 #define S2CR_CBNDX_SHIFT 0
171 #define S2CR_CBNDX_MASK 0xff
172 #define S2CR_TYPE_SHIFT 16
173 #define S2CR_TYPE_MASK 0x3
174 enum arm_smmu_s2cr_type {
180 #define S2CR_PRIVCFG_SHIFT 24
181 #define S2CR_PRIVCFG_MASK 0x3
182 enum arm_smmu_s2cr_privcfg {
183 S2CR_PRIVCFG_DEFAULT,
189 /* Context bank attribute registers */
190 #define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
191 #define CBAR_VMID_SHIFT 0
192 #define CBAR_VMID_MASK 0xff
193 #define CBAR_S1_BPSHCFG_SHIFT 8
194 #define CBAR_S1_BPSHCFG_MASK 3
195 #define CBAR_S1_BPSHCFG_NSH 3
196 #define CBAR_S1_MEMATTR_SHIFT 12
197 #define CBAR_S1_MEMATTR_MASK 0xf
198 #define CBAR_S1_MEMATTR_WB 0xf
199 #define CBAR_TYPE_SHIFT 16
200 #define CBAR_TYPE_MASK 0x3
201 #define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
202 #define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
203 #define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
204 #define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
205 #define CBAR_IRPTNDX_SHIFT 24
206 #define CBAR_IRPTNDX_MASK 0xff
208 #define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
209 #define CBA2R_RW64_32BIT (0 << 0)
210 #define CBA2R_RW64_64BIT (1 << 0)
211 #define CBA2R_VMID_SHIFT 16
212 #define CBA2R_VMID_MASK 0xffff
214 /* Translation context bank */
215 #define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
216 #define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift))
218 #define ARM_SMMU_CB_SCTLR 0x0
219 #define ARM_SMMU_CB_ACTLR 0x4
220 #define ARM_SMMU_CB_RESUME 0x8
221 #define ARM_SMMU_CB_TTBCR2 0x10
222 #define ARM_SMMU_CB_TTBR0 0x20
223 #define ARM_SMMU_CB_TTBR1 0x28
224 #define ARM_SMMU_CB_TTBCR 0x30
225 #define ARM_SMMU_CB_CONTEXTIDR 0x34
226 #define ARM_SMMU_CB_S1_MAIR0 0x38
227 #define ARM_SMMU_CB_S1_MAIR1 0x3c
228 #define ARM_SMMU_CB_PAR 0x50
229 #define ARM_SMMU_CB_FSR 0x58
230 #define ARM_SMMU_CB_FAR 0x60
231 #define ARM_SMMU_CB_FSYNR0 0x68
232 #define ARM_SMMU_CB_S1_TLBIVA 0x600
233 #define ARM_SMMU_CB_S1_TLBIASID 0x610
234 #define ARM_SMMU_CB_S1_TLBIVAL 0x620
235 #define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
236 #define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
237 #define ARM_SMMU_CB_ATS1PR 0x800
238 #define ARM_SMMU_CB_ATSR 0x8f0
240 #define SCTLR_S1_ASIDPNE (1 << 12)
241 #define SCTLR_CFCFG (1 << 7)
242 #define SCTLR_CFIE (1 << 6)
243 #define SCTLR_CFRE (1 << 5)
244 #define SCTLR_E (1 << 4)
245 #define SCTLR_AFE (1 << 2)
246 #define SCTLR_TRE (1 << 1)
247 #define SCTLR_M (1 << 0)
249 #define ARM_MMU500_ACTLR_CPRE (1 << 1)
251 #define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
252 #define ARM_MMU500_ACR_SMTNMB_TLBEN (1 << 8)
254 #define CB_PAR_F (1 << 0)
256 #define ATSR_ACTIVE (1 << 0)
258 #define RESUME_RETRY (0 << 0)
259 #define RESUME_TERMINATE (1 << 0)
261 #define TTBCR2_SEP_SHIFT 15
262 #define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
264 #define TTBRn_ASID_SHIFT 48
266 #define FSR_MULTI (1 << 31)
267 #define FSR_SS (1 << 30)
268 #define FSR_UUT (1 << 8)
269 #define FSR_ASF (1 << 7)
270 #define FSR_TLBLKF (1 << 6)
271 #define FSR_TLBMCF (1 << 5)
272 #define FSR_EF (1 << 4)
273 #define FSR_PF (1 << 3)
274 #define FSR_AFF (1 << 2)
275 #define FSR_TF (1 << 1)
277 #define FSR_IGN (FSR_AFF | FSR_ASF | \
278 FSR_TLBMCF | FSR_TLBLKF)
279 #define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
280 FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
282 #define FSYNR0_WNR (1 << 4)
284 #define MSI_IOVA_BASE 0x8000000
285 #define MSI_IOVA_LENGTH 0x100000
287 static int force_stage;
288 module_param(force_stage, int, S_IRUGO);
289 MODULE_PARM_DESC(force_stage,
290 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
291 static bool disable_bypass;
292 module_param(disable_bypass, bool, S_IRUGO);
293 MODULE_PARM_DESC(disable_bypass,
294 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
296 enum arm_smmu_arch_version {
302 enum arm_smmu_implementation {
308 struct arm_smmu_s2cr {
309 struct iommu_group *group;
311 enum arm_smmu_s2cr_type type;
312 enum arm_smmu_s2cr_privcfg privcfg;
316 #define s2cr_init_val (struct arm_smmu_s2cr){ \
317 .type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS, \
320 struct arm_smmu_smr {
326 struct arm_smmu_master_cfg {
327 struct arm_smmu_device *smmu;
330 #define INVALID_SMENDX -1
331 #define __fwspec_cfg(fw) ((struct arm_smmu_master_cfg *)fw->iommu_priv)
332 #define fwspec_smmu(fw) (__fwspec_cfg(fw)->smmu)
333 #define fwspec_smendx(fw, i) \
334 (i >= fw->num_ids ? INVALID_SMENDX : __fwspec_cfg(fw)->smendx[i])
335 #define for_each_cfg_sme(fw, i, idx) \
336 for (i = 0; idx = fwspec_smendx(fw, i), i < fw->num_ids; ++i)
338 struct arm_smmu_device {
343 unsigned long pgshift;
345 #define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
346 #define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
347 #define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
348 #define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
349 #define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
350 #define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
351 #define ARM_SMMU_FEAT_VMID16 (1 << 6)
352 #define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7)
353 #define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8)
354 #define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
355 #define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
356 #define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
359 #define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
361 enum arm_smmu_arch_version version;
362 enum arm_smmu_implementation model;
364 u32 num_context_banks;
365 u32 num_s2_context_banks;
366 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
369 u32 num_mapping_groups;
372 struct arm_smmu_smr *smrs;
373 struct arm_smmu_s2cr *s2crs;
374 struct mutex stream_map_mutex;
376 unsigned long va_size;
377 unsigned long ipa_size;
378 unsigned long pa_size;
379 unsigned long pgsize_bitmap;
382 u32 num_context_irqs;
385 u32 cavium_id_base; /* Specific to Cavium */
388 enum arm_smmu_context_fmt {
389 ARM_SMMU_CTX_FMT_NONE,
390 ARM_SMMU_CTX_FMT_AARCH64,
391 ARM_SMMU_CTX_FMT_AARCH32_L,
392 ARM_SMMU_CTX_FMT_AARCH32_S,
395 struct arm_smmu_cfg {
399 enum arm_smmu_context_fmt fmt;
401 #define INVALID_IRPTNDX 0xff
403 #define ARM_SMMU_CB_ASID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx)
404 #define ARM_SMMU_CB_VMID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx + 1)
406 enum arm_smmu_domain_stage {
407 ARM_SMMU_DOMAIN_S1 = 0,
409 ARM_SMMU_DOMAIN_NESTED,
412 struct arm_smmu_domain {
413 struct arm_smmu_device *smmu;
414 struct io_pgtable_ops *pgtbl_ops;
415 spinlock_t pgtbl_lock;
416 struct arm_smmu_cfg cfg;
417 enum arm_smmu_domain_stage stage;
418 struct mutex init_mutex; /* Protects smmu pointer */
419 struct iommu_domain domain;
422 struct arm_smmu_option_prop {
427 static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
429 static bool using_legacy_binding, using_generic_binding;
431 static struct arm_smmu_option_prop arm_smmu_options[] = {
432 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
436 static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
438 return container_of(dom, struct arm_smmu_domain, domain);
441 static void parse_driver_options(struct arm_smmu_device *smmu)
446 if (of_property_read_bool(smmu->dev->of_node,
447 arm_smmu_options[i].prop)) {
448 smmu->options |= arm_smmu_options[i].opt;
449 dev_notice(smmu->dev, "option %s\n",
450 arm_smmu_options[i].prop);
452 } while (arm_smmu_options[++i].opt);
455 static struct device_node *dev_get_dev_node(struct device *dev)
457 if (dev_is_pci(dev)) {
458 struct pci_bus *bus = to_pci_dev(dev)->bus;
460 while (!pci_is_root_bus(bus))
462 return of_node_get(bus->bridge->parent->of_node);
465 return of_node_get(dev->of_node);
468 static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
470 *((__be32 *)data) = cpu_to_be32(alias);
471 return 0; /* Continue walking */
474 static int __find_legacy_master_phandle(struct device *dev, void *data)
476 struct of_phandle_iterator *it = *(void **)data;
477 struct device_node *np = it->node;
480 of_for_each_phandle(it, err, dev->of_node, "mmu-masters",
481 "#stream-id-cells", 0)
482 if (it->node == np) {
483 *(void **)data = dev;
487 return err == -ENOENT ? 0 : err;
490 static struct platform_driver arm_smmu_driver;
491 static struct iommu_ops arm_smmu_ops;
493 static int arm_smmu_register_legacy_master(struct device *dev,
494 struct arm_smmu_device **smmu)
496 struct device *smmu_dev;
497 struct device_node *np;
498 struct of_phandle_iterator it;
504 np = dev_get_dev_node(dev);
505 if (!np || !of_find_property(np, "#stream-id-cells", NULL)) {
511 err = driver_for_each_device(&arm_smmu_driver.driver, NULL, &data,
512 __find_legacy_master_phandle);
520 if (dev_is_pci(dev)) {
521 /* "mmu-masters" assumes Stream ID == Requester ID */
522 pci_for_each_dma_alias(to_pci_dev(dev), __arm_smmu_get_pci_sid,
528 err = iommu_fwspec_init(dev, &smmu_dev->of_node->fwnode,
533 sids = kcalloc(it.cur_count, sizeof(*sids), GFP_KERNEL);
537 *smmu = dev_get_drvdata(smmu_dev);
538 of_phandle_iterator_args(&it, sids, it.cur_count);
539 err = iommu_fwspec_add_ids(dev, sids, it.cur_count);
544 static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
549 idx = find_next_zero_bit(map, end, start);
552 } while (test_and_set_bit(idx, map));
557 static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
562 /* Wait for any pending TLB invalidations to complete */
563 static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
566 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
568 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC);
569 while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS)
570 & sTLBGSTATUS_GSACTIVE) {
572 if (++count == TLB_LOOP_TIMEOUT) {
573 dev_err_ratelimited(smmu->dev,
574 "TLB sync timed out -- SMMU may be deadlocked\n");
581 static void arm_smmu_tlb_sync(void *cookie)
583 struct arm_smmu_domain *smmu_domain = cookie;
584 __arm_smmu_tlb_sync(smmu_domain->smmu);
587 static void arm_smmu_tlb_inv_context(void *cookie)
589 struct arm_smmu_domain *smmu_domain = cookie;
590 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
591 struct arm_smmu_device *smmu = smmu_domain->smmu;
592 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
596 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
597 writel_relaxed(ARM_SMMU_CB_ASID(smmu, cfg),
598 base + ARM_SMMU_CB_S1_TLBIASID);
600 base = ARM_SMMU_GR0(smmu);
601 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg),
602 base + ARM_SMMU_GR0_TLBIVMID);
605 __arm_smmu_tlb_sync(smmu);
608 static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
609 size_t granule, bool leaf, void *cookie)
611 struct arm_smmu_domain *smmu_domain = cookie;
612 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
613 struct arm_smmu_device *smmu = smmu_domain->smmu;
614 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
618 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
619 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
621 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
623 iova |= ARM_SMMU_CB_ASID(smmu, cfg);
625 writel_relaxed(iova, reg);
627 } while (size -= granule);
630 iova |= (u64)ARM_SMMU_CB_ASID(smmu, cfg) << 48;
632 writeq_relaxed(iova, reg);
633 iova += granule >> 12;
634 } while (size -= granule);
636 } else if (smmu->version == ARM_SMMU_V2) {
637 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
638 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
639 ARM_SMMU_CB_S2_TLBIIPAS2;
642 smmu_write_atomic_lq(iova, reg);
643 iova += granule >> 12;
644 } while (size -= granule);
646 reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
647 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg), reg);
651 static const struct iommu_gather_ops arm_smmu_gather_ops = {
652 .tlb_flush_all = arm_smmu_tlb_inv_context,
653 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
654 .tlb_sync = arm_smmu_tlb_sync,
657 static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
661 struct iommu_domain *domain = dev;
662 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
663 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
664 struct arm_smmu_device *smmu = smmu_domain->smmu;
665 void __iomem *cb_base;
667 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
668 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
670 if (!(fsr & FSR_FAULT))
673 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
674 iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
676 dev_err_ratelimited(smmu->dev,
677 "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cb=%d\n",
678 fsr, iova, fsynr, cfg->cbndx);
680 writel(fsr, cb_base + ARM_SMMU_CB_FSR);
684 static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
686 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
687 struct arm_smmu_device *smmu = dev;
688 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
690 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
691 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
692 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
693 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
698 dev_err_ratelimited(smmu->dev,
699 "Unexpected global fault, this could be serious\n");
700 dev_err_ratelimited(smmu->dev,
701 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
702 gfsr, gfsynr0, gfsynr1, gfsynr2);
704 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
708 static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
709 struct io_pgtable_cfg *pgtbl_cfg)
714 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
715 struct arm_smmu_device *smmu = smmu_domain->smmu;
716 void __iomem *cb_base, *gr1_base;
718 gr1_base = ARM_SMMU_GR1(smmu);
719 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
720 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
722 if (smmu->version > ARM_SMMU_V1) {
723 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
724 reg = CBA2R_RW64_64BIT;
726 reg = CBA2R_RW64_32BIT;
727 /* 16-bit VMIDs live in CBA2R */
728 if (smmu->features & ARM_SMMU_FEAT_VMID16)
729 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBA2R_VMID_SHIFT;
731 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
736 if (smmu->version < ARM_SMMU_V2)
737 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
740 * Use the weakest shareability/memory types, so they are
741 * overridden by the ttbcr/pte.
744 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
745 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
746 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
747 /* 8-bit VMIDs live in CBAR */
748 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBAR_VMID_SHIFT;
750 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
754 u16 asid = ARM_SMMU_CB_ASID(smmu, cfg);
756 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
757 reg = pgtbl_cfg->arm_v7s_cfg.ttbr[0];
758 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0);
759 reg = pgtbl_cfg->arm_v7s_cfg.ttbr[1];
760 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1);
761 writel_relaxed(asid, cb_base + ARM_SMMU_CB_CONTEXTIDR);
763 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
764 reg64 |= (u64)asid << TTBRn_ASID_SHIFT;
765 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
766 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
767 reg64 |= (u64)asid << TTBRn_ASID_SHIFT;
768 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1);
771 reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
772 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
777 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
778 reg = pgtbl_cfg->arm_v7s_cfg.tcr;
781 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
782 reg2 = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
783 reg2 |= TTBCR2_SEP_UPSTREAM;
785 if (smmu->version > ARM_SMMU_V1)
786 writel_relaxed(reg2, cb_base + ARM_SMMU_CB_TTBCR2);
788 reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
790 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
792 /* MAIRs (stage-1 only) */
794 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
795 reg = pgtbl_cfg->arm_v7s_cfg.prrr;
796 reg2 = pgtbl_cfg->arm_v7s_cfg.nmrr;
798 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
799 reg2 = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
801 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
802 writel_relaxed(reg2, cb_base + ARM_SMMU_CB_S1_MAIR1);
806 reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE | SCTLR_M;
808 reg |= SCTLR_S1_ASIDPNE;
812 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
815 static int arm_smmu_init_domain_context(struct iommu_domain *domain,
816 struct arm_smmu_device *smmu)
818 int irq, start, ret = 0;
819 unsigned long ias, oas;
820 struct io_pgtable_ops *pgtbl_ops;
821 struct io_pgtable_cfg pgtbl_cfg;
822 enum io_pgtable_fmt fmt;
823 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
824 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
826 mutex_lock(&smmu_domain->init_mutex);
827 if (smmu_domain->smmu)
831 * Mapping the requested stage onto what we support is surprisingly
832 * complicated, mainly because the spec allows S1+S2 SMMUs without
833 * support for nested translation. That means we end up with the
836 * Requested Supported Actual
846 * Note that you can't actually request stage-2 mappings.
848 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
849 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
850 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
851 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
854 * Choosing a suitable context format is even more fiddly. Until we
855 * grow some way for the caller to express a preference, and/or move
856 * the decision into the io-pgtable code where it arguably belongs,
857 * just aim for the closest thing to the rest of the system, and hope
858 * that the hardware isn't esoteric enough that we can't assume AArch64
859 * support to be a superset of AArch32 support...
861 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
862 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
863 if (IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) &&
864 !IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_ARM_LPAE) &&
865 (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) &&
866 (smmu_domain->stage == ARM_SMMU_DOMAIN_S1))
867 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S;
868 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
869 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
870 ARM_SMMU_FEAT_FMT_AARCH64_16K |
871 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
872 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
874 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
879 switch (smmu_domain->stage) {
880 case ARM_SMMU_DOMAIN_S1:
881 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
882 start = smmu->num_s2_context_banks;
884 oas = smmu->ipa_size;
885 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
886 fmt = ARM_64_LPAE_S1;
887 } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) {
888 fmt = ARM_32_LPAE_S1;
889 ias = min(ias, 32UL);
890 oas = min(oas, 40UL);
893 ias = min(ias, 32UL);
894 oas = min(oas, 32UL);
897 case ARM_SMMU_DOMAIN_NESTED:
899 * We will likely want to change this if/when KVM gets
902 case ARM_SMMU_DOMAIN_S2:
903 cfg->cbar = CBAR_TYPE_S2_TRANS;
905 ias = smmu->ipa_size;
907 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
908 fmt = ARM_64_LPAE_S2;
910 fmt = ARM_32_LPAE_S2;
911 ias = min(ias, 40UL);
912 oas = min(oas, 40UL);
920 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
921 smmu->num_context_banks);
926 if (smmu->version < ARM_SMMU_V2) {
927 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
928 cfg->irptndx %= smmu->num_context_irqs;
930 cfg->irptndx = cfg->cbndx;
933 pgtbl_cfg = (struct io_pgtable_cfg) {
934 .pgsize_bitmap = smmu->pgsize_bitmap,
937 .tlb = &arm_smmu_gather_ops,
938 .iommu_dev = smmu->dev,
941 smmu_domain->smmu = smmu;
942 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
948 /* Update the domain's page sizes to reflect the page table format */
949 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
950 domain->geometry.aperture_end = (1UL << ias) - 1;
951 domain->geometry.force_aperture = true;
953 /* Initialise the context bank with our page table cfg */
954 arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
957 * Request context fault interrupt. Do this last to avoid the
958 * handler seeing a half-initialised domain state.
960 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
961 ret = devm_request_irq(smmu->dev, irq, arm_smmu_context_fault,
962 IRQF_SHARED, "arm-smmu-context-fault", domain);
964 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
966 cfg->irptndx = INVALID_IRPTNDX;
969 mutex_unlock(&smmu_domain->init_mutex);
971 /* Publish page table ops for map/unmap */
972 smmu_domain->pgtbl_ops = pgtbl_ops;
976 smmu_domain->smmu = NULL;
978 mutex_unlock(&smmu_domain->init_mutex);
982 static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
984 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
985 struct arm_smmu_device *smmu = smmu_domain->smmu;
986 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
987 void __iomem *cb_base;
994 * Disable the context bank and free the page tables before freeing
997 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
998 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
1000 if (cfg->irptndx != INVALID_IRPTNDX) {
1001 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
1002 devm_free_irq(smmu->dev, irq, domain);
1005 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
1006 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
1009 static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
1011 struct arm_smmu_domain *smmu_domain;
1013 if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
1016 * Allocate the domain and initialise some of its data structures.
1017 * We can't really do anything meaningful until we've added a
1020 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1024 if (type == IOMMU_DOMAIN_DMA && (using_legacy_binding ||
1025 iommu_get_dma_cookie(&smmu_domain->domain))) {
1030 mutex_init(&smmu_domain->init_mutex);
1031 spin_lock_init(&smmu_domain->pgtbl_lock);
1033 return &smmu_domain->domain;
1036 static void arm_smmu_domain_free(struct iommu_domain *domain)
1038 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1041 * Free the domain resources. We assume that all devices have
1042 * already been detached.
1044 iommu_put_dma_cookie(domain);
1045 arm_smmu_destroy_domain_context(domain);
1049 static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
1051 struct arm_smmu_smr *smr = smmu->smrs + idx;
1052 u32 reg = smr->id << SMR_ID_SHIFT | smr->mask << SMR_MASK_SHIFT;
1056 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
1059 static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
1061 struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
1062 u32 reg = (s2cr->type & S2CR_TYPE_MASK) << S2CR_TYPE_SHIFT |
1063 (s2cr->cbndx & S2CR_CBNDX_MASK) << S2CR_CBNDX_SHIFT |
1064 (s2cr->privcfg & S2CR_PRIVCFG_MASK) << S2CR_PRIVCFG_SHIFT;
1066 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx));
1069 static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
1071 arm_smmu_write_s2cr(smmu, idx);
1073 arm_smmu_write_smr(smmu, idx);
1076 static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
1078 struct arm_smmu_smr *smrs = smmu->smrs;
1079 int i, free_idx = -ENOSPC;
1081 /* Stream indexing is blissfully easy */
1085 /* Validating SMRs is... less so */
1086 for (i = 0; i < smmu->num_mapping_groups; ++i) {
1087 if (!smrs[i].valid) {
1089 * Note the first free entry we come across, which
1090 * we'll claim in the end if nothing else matches.
1097 * If the new entry is _entirely_ matched by an existing entry,
1098 * then reuse that, with the guarantee that there also cannot
1099 * be any subsequent conflicting entries. In normal use we'd
1100 * expect simply identical entries for this case, but there's
1101 * no harm in accommodating the generalisation.
1103 if ((mask & smrs[i].mask) == mask &&
1104 !((id ^ smrs[i].id) & ~smrs[i].mask))
1107 * If the new entry has any other overlap with an existing one,
1108 * though, then there always exists at least one stream ID
1109 * which would cause a conflict, and we can't allow that risk.
1111 if (!((id ^ smrs[i].id) & ~(smrs[i].mask | mask)))
1118 static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx)
1120 if (--smmu->s2crs[idx].count)
1123 smmu->s2crs[idx] = s2cr_init_val;
1125 smmu->smrs[idx].valid = false;
1130 static int arm_smmu_master_alloc_smes(struct device *dev)
1132 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1133 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
1134 struct arm_smmu_device *smmu = cfg->smmu;
1135 struct arm_smmu_smr *smrs = smmu->smrs;
1136 struct iommu_group *group;
1139 mutex_lock(&smmu->stream_map_mutex);
1140 /* Figure out a viable stream map entry allocation */
1141 for_each_cfg_sme(fwspec, i, idx) {
1142 u16 sid = fwspec->ids[i];
1143 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
1145 if (idx != INVALID_SMENDX) {
1150 ret = arm_smmu_find_sme(smmu, sid, mask);
1155 if (smrs && smmu->s2crs[idx].count == 0) {
1157 smrs[idx].mask = mask;
1158 smrs[idx].valid = true;
1160 smmu->s2crs[idx].count++;
1161 cfg->smendx[i] = (s16)idx;
1164 group = iommu_group_get_for_dev(dev);
1166 group = ERR_PTR(-ENOMEM);
1167 if (IS_ERR(group)) {
1168 ret = PTR_ERR(group);
1171 iommu_group_put(group);
1173 /* It worked! Now, poke the actual hardware */
1174 for_each_cfg_sme(fwspec, i, idx) {
1175 arm_smmu_write_sme(smmu, idx);
1176 smmu->s2crs[idx].group = group;
1179 mutex_unlock(&smmu->stream_map_mutex);
1184 arm_smmu_free_sme(smmu, cfg->smendx[i]);
1185 cfg->smendx[i] = INVALID_SMENDX;
1187 mutex_unlock(&smmu->stream_map_mutex);
1191 static void arm_smmu_master_free_smes(struct iommu_fwspec *fwspec)
1193 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
1194 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
1197 mutex_lock(&smmu->stream_map_mutex);
1198 for_each_cfg_sme(fwspec, i, idx) {
1199 if (arm_smmu_free_sme(smmu, idx))
1200 arm_smmu_write_sme(smmu, idx);
1201 cfg->smendx[i] = INVALID_SMENDX;
1203 mutex_unlock(&smmu->stream_map_mutex);
1206 static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
1207 struct iommu_fwspec *fwspec)
1209 struct arm_smmu_device *smmu = smmu_domain->smmu;
1210 struct arm_smmu_s2cr *s2cr = smmu->s2crs;
1211 enum arm_smmu_s2cr_type type = S2CR_TYPE_TRANS;
1212 u8 cbndx = smmu_domain->cfg.cbndx;
1215 for_each_cfg_sme(fwspec, i, idx) {
1216 if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx)
1219 s2cr[idx].type = type;
1220 s2cr[idx].privcfg = S2CR_PRIVCFG_UNPRIV;
1221 s2cr[idx].cbndx = cbndx;
1222 arm_smmu_write_s2cr(smmu, idx);
1227 static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1230 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1231 struct arm_smmu_device *smmu;
1232 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1234 if (!fwspec || fwspec->ops != &arm_smmu_ops) {
1235 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
1240 * FIXME: The arch/arm DMA API code tries to attach devices to its own
1241 * domains between of_xlate() and add_device() - we have no way to cope
1242 * with that, so until ARM gets converted to rely on groups and default
1243 * domains, just say no (but more politely than by dereferencing NULL).
1244 * This should be at least a WARN_ON once that's sorted.
1246 if (!fwspec->iommu_priv)
1249 smmu = fwspec_smmu(fwspec);
1250 /* Ensure that the domain is finalised */
1251 ret = arm_smmu_init_domain_context(domain, smmu);
1256 * Sanity check the domain. We don't support domains across
1259 if (smmu_domain->smmu != smmu) {
1261 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
1262 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
1266 /* Looks ok, so add the device to the domain */
1267 return arm_smmu_domain_add_master(smmu_domain, fwspec);
1270 static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
1271 phys_addr_t paddr, size_t size, int prot)
1274 unsigned long flags;
1275 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1276 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1281 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1282 ret = ops->map(ops, iova, paddr, size, prot);
1283 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1287 static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
1291 unsigned long flags;
1292 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1293 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1298 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1299 ret = ops->unmap(ops, iova, size);
1300 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1304 static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
1307 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1308 struct arm_smmu_device *smmu = smmu_domain->smmu;
1309 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1310 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1311 struct device *dev = smmu->dev;
1312 void __iomem *cb_base;
1317 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1319 /* ATS1 registers can only be written atomically */
1320 va = iova & ~0xfffUL;
1321 if (smmu->version == ARM_SMMU_V2)
1322 smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR);
1323 else /* Register is only 32-bit in v1 */
1324 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
1326 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
1327 !(tmp & ATSR_ACTIVE), 5, 50)) {
1329 "iova to phys timed out on %pad. Falling back to software table walk.\n",
1331 return ops->iova_to_phys(ops, iova);
1334 phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
1335 if (phys & CB_PAR_F) {
1336 dev_err(dev, "translation fault!\n");
1337 dev_err(dev, "PAR = 0x%llx\n", phys);
1341 return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
1344 static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
1348 unsigned long flags;
1349 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1350 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1355 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1356 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
1357 smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1358 ret = arm_smmu_iova_to_phys_hard(domain, iova);
1360 ret = ops->iova_to_phys(ops, iova);
1363 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1368 static bool arm_smmu_capable(enum iommu_cap cap)
1371 case IOMMU_CAP_CACHE_COHERENCY:
1373 * Return true here as the SMMU can always send out coherent
1377 case IOMMU_CAP_INTR_REMAP:
1378 return true; /* MSIs are just memory writes */
1379 case IOMMU_CAP_NOEXEC:
1386 static int arm_smmu_match_node(struct device *dev, void *data)
1388 return dev->fwnode == data;
1392 struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
1394 struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL,
1395 fwnode, arm_smmu_match_node);
1397 return dev ? dev_get_drvdata(dev) : NULL;
1400 static int arm_smmu_add_device(struct device *dev)
1402 struct arm_smmu_device *smmu;
1403 struct arm_smmu_master_cfg *cfg;
1404 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1407 if (using_legacy_binding) {
1408 ret = arm_smmu_register_legacy_master(dev, &smmu);
1409 fwspec = dev->iommu_fwspec;
1412 } else if (fwspec && fwspec->ops == &arm_smmu_ops) {
1413 smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
1419 for (i = 0; i < fwspec->num_ids; i++) {
1420 u16 sid = fwspec->ids[i];
1421 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
1423 if (sid & ~smmu->streamid_mask) {
1424 dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
1425 sid, smmu->streamid_mask);
1428 if (mask & ~smmu->smr_mask_mask) {
1429 dev_err(dev, "SMR mask 0x%x out of range for SMMU (0x%x)\n",
1430 sid, smmu->smr_mask_mask);
1436 cfg = kzalloc(offsetof(struct arm_smmu_master_cfg, smendx[i]),
1442 fwspec->iommu_priv = cfg;
1444 cfg->smendx[i] = INVALID_SMENDX;
1446 ret = arm_smmu_master_alloc_smes(dev);
1454 kfree(fwspec->iommu_priv);
1455 iommu_fwspec_free(dev);
1459 static void arm_smmu_remove_device(struct device *dev)
1461 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1463 if (!fwspec || fwspec->ops != &arm_smmu_ops)
1466 arm_smmu_master_free_smes(fwspec);
1467 iommu_group_remove_device(dev);
1468 kfree(fwspec->iommu_priv);
1469 iommu_fwspec_free(dev);
1472 static struct iommu_group *arm_smmu_device_group(struct device *dev)
1474 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1475 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
1476 struct iommu_group *group = NULL;
1479 for_each_cfg_sme(fwspec, i, idx) {
1480 if (group && smmu->s2crs[idx].group &&
1481 group != smmu->s2crs[idx].group)
1482 return ERR_PTR(-EINVAL);
1484 group = smmu->s2crs[idx].group;
1488 return iommu_group_ref_get(group);
1490 if (dev_is_pci(dev))
1491 group = pci_device_group(dev);
1493 group = generic_device_group(dev);
1498 static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1499 enum iommu_attr attr, void *data)
1501 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1504 case DOMAIN_ATTR_NESTING:
1505 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1512 static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1513 enum iommu_attr attr, void *data)
1516 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1518 mutex_lock(&smmu_domain->init_mutex);
1521 case DOMAIN_ATTR_NESTING:
1522 if (smmu_domain->smmu) {
1528 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1530 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1538 mutex_unlock(&smmu_domain->init_mutex);
1542 static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
1546 if (args->args_count > 0)
1547 fwid |= (u16)args->args[0];
1549 if (args->args_count > 1)
1550 fwid |= (u16)args->args[1] << SMR_MASK_SHIFT;
1552 return iommu_fwspec_add_ids(dev, &fwid, 1);
1555 static void arm_smmu_get_resv_regions(struct device *dev,
1556 struct list_head *head)
1558 struct iommu_resv_region *region;
1559 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
1561 region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
1562 prot, IOMMU_RESV_MSI);
1566 list_add_tail(®ion->list, head);
1569 static void arm_smmu_put_resv_regions(struct device *dev,
1570 struct list_head *head)
1572 struct iommu_resv_region *entry, *next;
1574 list_for_each_entry_safe(entry, next, head, list)
1578 static struct iommu_ops arm_smmu_ops = {
1579 .capable = arm_smmu_capable,
1580 .domain_alloc = arm_smmu_domain_alloc,
1581 .domain_free = arm_smmu_domain_free,
1582 .attach_dev = arm_smmu_attach_dev,
1583 .map = arm_smmu_map,
1584 .unmap = arm_smmu_unmap,
1585 .map_sg = default_iommu_map_sg,
1586 .iova_to_phys = arm_smmu_iova_to_phys,
1587 .add_device = arm_smmu_add_device,
1588 .remove_device = arm_smmu_remove_device,
1589 .device_group = arm_smmu_device_group,
1590 .domain_get_attr = arm_smmu_domain_get_attr,
1591 .domain_set_attr = arm_smmu_domain_set_attr,
1592 .of_xlate = arm_smmu_of_xlate,
1593 .get_resv_regions = arm_smmu_get_resv_regions,
1594 .put_resv_regions = arm_smmu_put_resv_regions,
1595 .pgsize_bitmap = -1UL, /* Restricted during device attach */
1598 static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1600 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1601 void __iomem *cb_base;
1605 /* clear global FSR */
1606 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
1607 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
1610 * Reset stream mapping groups: Initial values mark all SMRn as
1611 * invalid and all S2CRn as bypass unless overridden.
1613 for (i = 0; i < smmu->num_mapping_groups; ++i)
1614 arm_smmu_write_sme(smmu, i);
1616 if (smmu->model == ARM_MMU500) {
1618 * Before clearing ARM_MMU500_ACTLR_CPRE, need to
1619 * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
1620 * bit is only present in MMU-500r2 onwards.
1622 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
1623 major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
1624 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
1626 reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
1628 * Allow unmatched Stream IDs to allocate bypass
1629 * TLB entries for reduced latency.
1631 reg |= ARM_MMU500_ACR_SMTNMB_TLBEN;
1632 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
1635 /* Make sure all context banks are disabled and clear CB_FSR */
1636 for (i = 0; i < smmu->num_context_banks; ++i) {
1637 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
1638 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
1639 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
1641 * Disable MMU-500's not-particularly-beneficial next-page
1642 * prefetcher for the sake of errata #841119 and #826419.
1644 if (smmu->model == ARM_MMU500) {
1645 reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
1646 reg &= ~ARM_MMU500_ACTLR_CPRE;
1647 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
1651 /* Invalidate the TLB, just in case */
1652 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
1653 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
1655 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
1657 /* Enable fault reporting */
1658 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
1660 /* Disable TLB broadcasting. */
1661 reg |= (sCR0_VMIDPNE | sCR0_PTM);
1663 /* Enable client access, handling unmatched streams as appropriate */
1664 reg &= ~sCR0_CLIENTPD;
1668 reg &= ~sCR0_USFCFG;
1670 /* Disable forced broadcasting */
1673 /* Don't upgrade barriers */
1674 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
1676 if (smmu->features & ARM_SMMU_FEAT_VMID16)
1677 reg |= sCR0_VMID16EN;
1679 /* Push the button */
1680 __arm_smmu_tlb_sync(smmu);
1681 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
1684 static int arm_smmu_id_size_to_bits(int size)
1703 static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1706 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1708 bool cttw_reg, cttw_fw = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK;
1711 dev_notice(smmu->dev, "probing hardware configuration...\n");
1712 dev_notice(smmu->dev, "SMMUv%d with:\n",
1713 smmu->version == ARM_SMMU_V2 ? 2 : 1);
1716 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
1718 /* Restrict available stages based on module parameter */
1719 if (force_stage == 1)
1720 id &= ~(ID0_S2TS | ID0_NTS);
1721 else if (force_stage == 2)
1722 id &= ~(ID0_S1TS | ID0_NTS);
1724 if (id & ID0_S1TS) {
1725 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
1726 dev_notice(smmu->dev, "\tstage 1 translation\n");
1729 if (id & ID0_S2TS) {
1730 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
1731 dev_notice(smmu->dev, "\tstage 2 translation\n");
1735 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
1736 dev_notice(smmu->dev, "\tnested translation\n");
1739 if (!(smmu->features &
1740 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
1741 dev_err(smmu->dev, "\tno translation support!\n");
1745 if ((id & ID0_S1TS) &&
1746 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
1747 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
1748 dev_notice(smmu->dev, "\taddress translation ops\n");
1752 * In order for DMA API calls to work properly, we must defer to what
1753 * the FW says about coherency, regardless of what the hardware claims.
1754 * Fortunately, this also opens up a workaround for systems where the
1755 * ID register value has ended up configured incorrectly.
1757 cttw_reg = !!(id & ID0_CTTW);
1758 if (cttw_fw || cttw_reg)
1759 dev_notice(smmu->dev, "\t%scoherent table walk\n",
1760 cttw_fw ? "" : "non-");
1761 if (cttw_fw != cttw_reg)
1762 dev_notice(smmu->dev,
1763 "\t(IDR0.CTTW overridden by FW configuration)\n");
1765 /* Max. number of entries we have for stream matching/indexing */
1766 size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK);
1767 smmu->streamid_mask = size - 1;
1771 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
1772 size = (id >> ID0_NUMSMRG_SHIFT) & ID0_NUMSMRG_MASK;
1775 "stream-matching supported, but no SMRs present!\n");
1780 * SMR.ID bits may not be preserved if the corresponding MASK
1781 * bits are set, so check each one separately. We can reject
1782 * masters later if they try to claim IDs outside these masks.
1784 smr = smmu->streamid_mask << SMR_ID_SHIFT;
1785 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1786 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
1787 smmu->streamid_mask = smr >> SMR_ID_SHIFT;
1789 smr = smmu->streamid_mask << SMR_MASK_SHIFT;
1790 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1791 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
1792 smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT;
1794 /* Zero-initialised to mark as invalid */
1795 smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
1800 dev_notice(smmu->dev,
1801 "\tstream matching with %lu register groups, mask 0x%x",
1802 size, smmu->smr_mask_mask);
1804 /* s2cr->type == 0 means translation, so initialise explicitly */
1805 smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
1809 for (i = 0; i < size; i++)
1810 smmu->s2crs[i] = s2cr_init_val;
1812 smmu->num_mapping_groups = size;
1813 mutex_init(&smmu->stream_map_mutex);
1815 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
1816 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
1817 if (!(id & ID0_PTFS_NO_AARCH32S))
1818 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
1822 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
1823 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
1825 /* Check for size mismatch of SMMU address space from mapped region */
1826 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
1827 size *= 2 << smmu->pgshift;
1828 if (smmu->size != size)
1830 "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
1833 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
1834 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
1835 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
1836 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
1839 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
1840 smmu->num_context_banks, smmu->num_s2_context_banks);
1842 * Cavium CN88xx erratum #27704.
1843 * Ensure ASID and VMID allocation is unique across all SMMUs in
1846 if (smmu->model == CAVIUM_SMMUV2) {
1847 smmu->cavium_id_base =
1848 atomic_add_return(smmu->num_context_banks,
1849 &cavium_smmu_context_count);
1850 smmu->cavium_id_base -= smmu->num_context_banks;
1854 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
1855 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
1856 smmu->ipa_size = size;
1858 /* The output mask is also applied for bypass */
1859 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
1860 smmu->pa_size = size;
1862 if (id & ID2_VMID16)
1863 smmu->features |= ARM_SMMU_FEAT_VMID16;
1866 * What the page table walker can address actually depends on which
1867 * descriptor format is in use, but since a) we don't know that yet,
1868 * and b) it can vary per context bank, this will have to do...
1870 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
1872 "failed to set DMA mask for table walker\n");
1874 if (smmu->version < ARM_SMMU_V2) {
1875 smmu->va_size = smmu->ipa_size;
1876 if (smmu->version == ARM_SMMU_V1_64K)
1877 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
1879 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
1880 smmu->va_size = arm_smmu_id_size_to_bits(size);
1881 if (id & ID2_PTFS_4K)
1882 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
1883 if (id & ID2_PTFS_16K)
1884 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
1885 if (id & ID2_PTFS_64K)
1886 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
1889 /* Now we've corralled the various formats, what'll it do? */
1890 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
1891 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
1892 if (smmu->features &
1893 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
1894 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
1895 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
1896 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
1897 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
1898 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
1900 if (arm_smmu_ops.pgsize_bitmap == -1UL)
1901 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
1903 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
1904 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
1905 smmu->pgsize_bitmap);
1908 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
1909 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
1910 smmu->va_size, smmu->ipa_size);
1912 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
1913 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
1914 smmu->ipa_size, smmu->pa_size);
1919 struct arm_smmu_match_data {
1920 enum arm_smmu_arch_version version;
1921 enum arm_smmu_implementation model;
1924 #define ARM_SMMU_MATCH_DATA(name, ver, imp) \
1925 static struct arm_smmu_match_data name = { .version = ver, .model = imp }
1927 ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU);
1928 ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
1929 ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU);
1930 ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500);
1931 ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2);
1933 static const struct of_device_id arm_smmu_of_match[] = {
1934 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
1935 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
1936 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
1937 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
1938 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
1939 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
1942 MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
1945 static int acpi_smmu_get_data(u32 model, struct arm_smmu_device *smmu)
1950 case ACPI_IORT_SMMU_V1:
1951 case ACPI_IORT_SMMU_CORELINK_MMU400:
1952 smmu->version = ARM_SMMU_V1;
1953 smmu->model = GENERIC_SMMU;
1955 case ACPI_IORT_SMMU_V2:
1956 smmu->version = ARM_SMMU_V2;
1957 smmu->model = GENERIC_SMMU;
1959 case ACPI_IORT_SMMU_CORELINK_MMU500:
1960 smmu->version = ARM_SMMU_V2;
1961 smmu->model = ARM_MMU500;
1970 static int arm_smmu_device_acpi_probe(struct platform_device *pdev,
1971 struct arm_smmu_device *smmu)
1973 struct device *dev = smmu->dev;
1974 struct acpi_iort_node *node =
1975 *(struct acpi_iort_node **)dev_get_platdata(dev);
1976 struct acpi_iort_smmu *iort_smmu;
1979 /* Retrieve SMMU1/2 specific data */
1980 iort_smmu = (struct acpi_iort_smmu *)node->node_data;
1982 ret = acpi_smmu_get_data(iort_smmu->model, smmu);
1986 /* Ignore the configuration access interrupt */
1987 smmu->num_global_irqs = 1;
1989 if (iort_smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK)
1990 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
1995 static inline int arm_smmu_device_acpi_probe(struct platform_device *pdev,
1996 struct arm_smmu_device *smmu)
2002 static int arm_smmu_device_dt_probe(struct platform_device *pdev,
2003 struct arm_smmu_device *smmu)
2005 const struct arm_smmu_match_data *data;
2006 struct device *dev = &pdev->dev;
2007 bool legacy_binding;
2009 if (of_property_read_u32(dev->of_node, "#global-interrupts",
2010 &smmu->num_global_irqs)) {
2011 dev_err(dev, "missing #global-interrupts property\n");
2015 data = of_device_get_match_data(dev);
2016 smmu->version = data->version;
2017 smmu->model = data->model;
2019 parse_driver_options(smmu);
2021 legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
2022 if (legacy_binding && !using_generic_binding) {
2023 if (!using_legacy_binding)
2024 pr_notice("deprecated \"mmu-masters\" DT property in use; DMA API support unavailable\n");
2025 using_legacy_binding = true;
2026 } else if (!legacy_binding && !using_legacy_binding) {
2027 using_generic_binding = true;
2029 dev_err(dev, "not probing due to mismatched DT properties\n");
2033 if (of_dma_is_coherent(dev->of_node))
2034 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
2039 static int arm_smmu_device_probe(struct platform_device *pdev)
2041 struct resource *res;
2042 struct arm_smmu_device *smmu;
2043 struct device *dev = &pdev->dev;
2044 int num_irqs, i, err;
2046 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
2048 dev_err(dev, "failed to allocate arm_smmu_device\n");
2054 err = arm_smmu_device_dt_probe(pdev, smmu);
2056 err = arm_smmu_device_acpi_probe(pdev, smmu);
2061 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2062 smmu->base = devm_ioremap_resource(dev, res);
2063 if (IS_ERR(smmu->base))
2064 return PTR_ERR(smmu->base);
2065 smmu->size = resource_size(res);
2068 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
2070 if (num_irqs > smmu->num_global_irqs)
2071 smmu->num_context_irqs++;
2074 if (!smmu->num_context_irqs) {
2075 dev_err(dev, "found %d interrupts but expected at least %d\n",
2076 num_irqs, smmu->num_global_irqs + 1);
2080 smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
2083 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
2087 for (i = 0; i < num_irqs; ++i) {
2088 int irq = platform_get_irq(pdev, i);
2091 dev_err(dev, "failed to get irq index %d\n", i);
2094 smmu->irqs[i] = irq;
2097 err = arm_smmu_device_cfg_probe(smmu);
2101 if (smmu->version == ARM_SMMU_V2 &&
2102 smmu->num_context_banks != smmu->num_context_irqs) {
2104 "found only %d context interrupt(s) but %d required\n",
2105 smmu->num_context_irqs, smmu->num_context_banks);
2109 for (i = 0; i < smmu->num_global_irqs; ++i) {
2110 err = devm_request_irq(smmu->dev, smmu->irqs[i],
2111 arm_smmu_global_fault,
2113 "arm-smmu global fault",
2116 dev_err(dev, "failed to request global IRQ %d (%u)\n",
2122 iommu_register_instance(dev->fwnode, &arm_smmu_ops);
2123 platform_set_drvdata(pdev, smmu);
2124 arm_smmu_device_reset(smmu);
2126 /* Oh, for a proper bus abstraction */
2127 if (!iommu_present(&platform_bus_type))
2128 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
2129 #ifdef CONFIG_ARM_AMBA
2130 if (!iommu_present(&amba_bustype))
2131 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
2134 if (!iommu_present(&pci_bus_type)) {
2136 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
2142 static int arm_smmu_device_remove(struct platform_device *pdev)
2144 struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
2149 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
2150 dev_err(&pdev->dev, "removing device with active domains!\n");
2152 /* Turn the thing off */
2153 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
2157 static struct platform_driver arm_smmu_driver = {
2160 .of_match_table = of_match_ptr(arm_smmu_of_match),
2162 .probe = arm_smmu_device_probe,
2163 .remove = arm_smmu_device_remove,
2166 static int __init arm_smmu_init(void)
2168 static bool registered;
2172 ret = platform_driver_register(&arm_smmu_driver);
2178 static void __exit arm_smmu_exit(void)
2180 return platform_driver_unregister(&arm_smmu_driver);
2183 subsys_initcall(arm_smmu_init);
2184 module_exit(arm_smmu_exit);
2186 static int __init arm_smmu_of_init(struct device_node *np)
2188 int ret = arm_smmu_init();
2193 if (!of_platform_device_create(np, NULL, platform_bus_type.dev_root))
2198 IOMMU_OF_DECLARE(arm_smmuv1, "arm,smmu-v1", arm_smmu_of_init);
2199 IOMMU_OF_DECLARE(arm_smmuv2, "arm,smmu-v2", arm_smmu_of_init);
2200 IOMMU_OF_DECLARE(arm_mmu400, "arm,mmu-400", arm_smmu_of_init);
2201 IOMMU_OF_DECLARE(arm_mmu401, "arm,mmu-401", arm_smmu_of_init);
2202 IOMMU_OF_DECLARE(arm_mmu500, "arm,mmu-500", arm_smmu_of_init);
2203 IOMMU_OF_DECLARE(cavium_smmuv2, "cavium,smmu-v2", arm_smmu_of_init);
2206 static int __init arm_smmu_acpi_init(struct acpi_table_header *table)
2208 if (iort_node_match(ACPI_IORT_NODE_SMMU))
2209 return arm_smmu_init();
2213 IORT_ACPI_DECLARE(arm_smmu, ACPI_SIG_IORT, arm_smmu_acpi_init);
2216 MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
2217 MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
2218 MODULE_LICENSE("GPL v2");