1 // SPDX-License-Identifier: GPL-2.0
3 * IOMMU API for Renesas VMSA-compatible IPMMU
4 * Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
6 * Copyright (C) 2014 Renesas Electronics Corporation
9 #include <linux/bitmap.h>
10 #include <linux/delay.h>
11 #include <linux/dma-iommu.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/err.h>
14 #include <linux/export.h>
15 #include <linux/init.h>
16 #include <linux/interrupt.h>
18 #include <linux/io-pgtable.h>
19 #include <linux/iommu.h>
21 #include <linux/of_device.h>
22 #include <linux/of_iommu.h>
23 #include <linux/of_platform.h>
24 #include <linux/platform_device.h>
25 #include <linux/sizes.h>
26 #include <linux/slab.h>
27 #include <linux/sys_soc.h>
29 #if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA)
30 #include <asm/dma-iommu.h>
31 #include <asm/pgalloc.h>
33 #define arm_iommu_create_mapping(...) NULL
34 #define arm_iommu_attach_device(...) -ENODEV
35 #define arm_iommu_release_mapping(...) do {} while (0)
36 #define arm_iommu_detach_device(...) do {} while (0)
39 #define IPMMU_CTX_MAX 8U
40 #define IPMMU_CTX_INVALID -1
42 #define IPMMU_UTLB_MAX 48U
44 struct ipmmu_features {
45 bool use_ns_alias_offset;
46 bool has_cache_leaf_nodes;
47 unsigned int number_of_contexts;
48 unsigned int num_utlbs;
50 bool twobit_imttbcr_sl0;
51 bool reserved_context;
54 struct ipmmu_vmsa_device {
57 struct iommu_device iommu;
58 struct ipmmu_vmsa_device *root;
59 const struct ipmmu_features *features;
61 spinlock_t lock; /* Protects ctx and domains[] */
62 DECLARE_BITMAP(ctx, IPMMU_CTX_MAX);
63 struct ipmmu_vmsa_domain *domains[IPMMU_CTX_MAX];
64 s8 utlb_ctx[IPMMU_UTLB_MAX];
66 struct iommu_group *group;
67 struct dma_iommu_mapping *mapping;
70 struct ipmmu_vmsa_domain {
71 struct ipmmu_vmsa_device *mmu;
72 struct iommu_domain io_domain;
74 struct io_pgtable_cfg cfg;
75 struct io_pgtable_ops *iop;
77 unsigned int context_id;
78 struct mutex mutex; /* Protects mappings */
81 static struct ipmmu_vmsa_domain *to_vmsa_domain(struct iommu_domain *dom)
83 return container_of(dom, struct ipmmu_vmsa_domain, io_domain);
86 static struct ipmmu_vmsa_device *to_ipmmu(struct device *dev)
88 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
90 return fwspec ? fwspec->iommu_priv : NULL;
93 #define TLB_LOOP_TIMEOUT 100 /* 100us */
95 /* -----------------------------------------------------------------------------
96 * Registers Definition
99 #define IM_NS_ALIAS_OFFSET 0x800
101 #define IM_CTX_SIZE 0x40
104 #define IMCTR_TRE (1 << 17)
105 #define IMCTR_AFE (1 << 16)
106 #define IMCTR_RTSEL_MASK (3 << 4)
107 #define IMCTR_RTSEL_SHIFT 4
108 #define IMCTR_TREN (1 << 3)
109 #define IMCTR_INTEN (1 << 2)
110 #define IMCTR_FLUSH (1 << 1)
111 #define IMCTR_MMUEN (1 << 0)
113 #define IMCAAR 0x0004
115 #define IMTTBCR 0x0008
116 #define IMTTBCR_EAE (1 << 31)
117 #define IMTTBCR_PMB (1 << 30)
118 #define IMTTBCR_SH1_NON_SHAREABLE (0 << 28)
119 #define IMTTBCR_SH1_OUTER_SHAREABLE (2 << 28)
120 #define IMTTBCR_SH1_INNER_SHAREABLE (3 << 28)
121 #define IMTTBCR_SH1_MASK (3 << 28)
122 #define IMTTBCR_ORGN1_NC (0 << 26)
123 #define IMTTBCR_ORGN1_WB_WA (1 << 26)
124 #define IMTTBCR_ORGN1_WT (2 << 26)
125 #define IMTTBCR_ORGN1_WB (3 << 26)
126 #define IMTTBCR_ORGN1_MASK (3 << 26)
127 #define IMTTBCR_IRGN1_NC (0 << 24)
128 #define IMTTBCR_IRGN1_WB_WA (1 << 24)
129 #define IMTTBCR_IRGN1_WT (2 << 24)
130 #define IMTTBCR_IRGN1_WB (3 << 24)
131 #define IMTTBCR_IRGN1_MASK (3 << 24)
132 #define IMTTBCR_TSZ1_MASK (7 << 16)
133 #define IMTTBCR_TSZ1_SHIFT 16
134 #define IMTTBCR_SH0_NON_SHAREABLE (0 << 12)
135 #define IMTTBCR_SH0_OUTER_SHAREABLE (2 << 12)
136 #define IMTTBCR_SH0_INNER_SHAREABLE (3 << 12)
137 #define IMTTBCR_SH0_MASK (3 << 12)
138 #define IMTTBCR_ORGN0_NC (0 << 10)
139 #define IMTTBCR_ORGN0_WB_WA (1 << 10)
140 #define IMTTBCR_ORGN0_WT (2 << 10)
141 #define IMTTBCR_ORGN0_WB (3 << 10)
142 #define IMTTBCR_ORGN0_MASK (3 << 10)
143 #define IMTTBCR_IRGN0_NC (0 << 8)
144 #define IMTTBCR_IRGN0_WB_WA (1 << 8)
145 #define IMTTBCR_IRGN0_WT (2 << 8)
146 #define IMTTBCR_IRGN0_WB (3 << 8)
147 #define IMTTBCR_IRGN0_MASK (3 << 8)
148 #define IMTTBCR_SL0_LVL_2 (0 << 4)
149 #define IMTTBCR_SL0_LVL_1 (1 << 4)
150 #define IMTTBCR_TSZ0_MASK (7 << 0)
151 #define IMTTBCR_TSZ0_SHIFT O
153 #define IMTTBCR_SL0_TWOBIT_LVL_3 (0 << 6)
154 #define IMTTBCR_SL0_TWOBIT_LVL_2 (1 << 6)
155 #define IMTTBCR_SL0_TWOBIT_LVL_1 (2 << 6)
157 #define IMBUSCR 0x000c
158 #define IMBUSCR_DVM (1 << 2)
159 #define IMBUSCR_BUSSEL_SYS (0 << 0)
160 #define IMBUSCR_BUSSEL_CCI (1 << 0)
161 #define IMBUSCR_BUSSEL_IMCAAR (2 << 0)
162 #define IMBUSCR_BUSSEL_CCI_IMCAAR (3 << 0)
163 #define IMBUSCR_BUSSEL_MASK (3 << 0)
165 #define IMTTLBR0 0x0010
166 #define IMTTUBR0 0x0014
167 #define IMTTLBR1 0x0018
168 #define IMTTUBR1 0x001c
171 #define IMSTR_ERRLVL_MASK (3 << 12)
172 #define IMSTR_ERRLVL_SHIFT 12
173 #define IMSTR_ERRCODE_TLB_FORMAT (1 << 8)
174 #define IMSTR_ERRCODE_ACCESS_PERM (4 << 8)
175 #define IMSTR_ERRCODE_SECURE_ACCESS (5 << 8)
176 #define IMSTR_ERRCODE_MASK (7 << 8)
177 #define IMSTR_MHIT (1 << 4)
178 #define IMSTR_ABORT (1 << 2)
179 #define IMSTR_PF (1 << 1)
180 #define IMSTR_TF (1 << 0)
182 #define IMMAIR0 0x0028
183 #define IMMAIR1 0x002c
184 #define IMMAIR_ATTR_MASK 0xff
185 #define IMMAIR_ATTR_DEVICE 0x04
186 #define IMMAIR_ATTR_NC 0x44
187 #define IMMAIR_ATTR_WBRWA 0xff
188 #define IMMAIR_ATTR_SHIFT(n) ((n) << 3)
189 #define IMMAIR_ATTR_IDX_NC 0
190 #define IMMAIR_ATTR_IDX_WBRWA 1
191 #define IMMAIR_ATTR_IDX_DEV 2
193 #define IMELAR 0x0030 /* IMEAR on R-Car Gen2 */
194 #define IMEUAR 0x0034 /* R-Car Gen3 only */
196 #define IMPCTR 0x0200
197 #define IMPSTR 0x0208
198 #define IMPEAR 0x020c
199 #define IMPMBA(n) (0x0280 + ((n) * 4))
200 #define IMPMBD(n) (0x02c0 + ((n) * 4))
202 #define IMUCTR(n) ((n) < 32 ? IMUCTR0(n) : IMUCTR32(n))
203 #define IMUCTR0(n) (0x0300 + ((n) * 16))
204 #define IMUCTR32(n) (0x0600 + (((n) - 32) * 16))
205 #define IMUCTR_FIXADDEN (1 << 31)
206 #define IMUCTR_FIXADD_MASK (0xff << 16)
207 #define IMUCTR_FIXADD_SHIFT 16
208 #define IMUCTR_TTSEL_MMU(n) ((n) << 4)
209 #define IMUCTR_TTSEL_PMB (8 << 4)
210 #define IMUCTR_TTSEL_MASK (15 << 4)
211 #define IMUCTR_FLUSH (1 << 1)
212 #define IMUCTR_MMUEN (1 << 0)
214 #define IMUASID(n) ((n) < 32 ? IMUASID0(n) : IMUASID32(n))
215 #define IMUASID0(n) (0x0308 + ((n) * 16))
216 #define IMUASID32(n) (0x0608 + (((n) - 32) * 16))
217 #define IMUASID_ASID8_MASK (0xff << 8)
218 #define IMUASID_ASID8_SHIFT 8
219 #define IMUASID_ASID0_MASK (0xff << 0)
220 #define IMUASID_ASID0_SHIFT 0
222 /* -----------------------------------------------------------------------------
223 * Root device handling
226 static struct platform_driver ipmmu_driver;
228 static bool ipmmu_is_root(struct ipmmu_vmsa_device *mmu)
230 return mmu->root == mmu;
233 static int __ipmmu_check_device(struct device *dev, void *data)
235 struct ipmmu_vmsa_device *mmu = dev_get_drvdata(dev);
236 struct ipmmu_vmsa_device **rootp = data;
238 if (ipmmu_is_root(mmu))
244 static struct ipmmu_vmsa_device *ipmmu_find_root(void)
246 struct ipmmu_vmsa_device *root = NULL;
248 return driver_for_each_device(&ipmmu_driver.driver, NULL, &root,
249 __ipmmu_check_device) == 0 ? root : NULL;
252 /* -----------------------------------------------------------------------------
256 static u32 ipmmu_read(struct ipmmu_vmsa_device *mmu, unsigned int offset)
258 return ioread32(mmu->base + offset);
261 static void ipmmu_write(struct ipmmu_vmsa_device *mmu, unsigned int offset,
264 iowrite32(data, mmu->base + offset);
267 static u32 ipmmu_ctx_read_root(struct ipmmu_vmsa_domain *domain,
270 return ipmmu_read(domain->mmu->root,
271 domain->context_id * IM_CTX_SIZE + reg);
274 static void ipmmu_ctx_write_root(struct ipmmu_vmsa_domain *domain,
275 unsigned int reg, u32 data)
277 ipmmu_write(domain->mmu->root,
278 domain->context_id * IM_CTX_SIZE + reg, data);
281 static void ipmmu_ctx_write_all(struct ipmmu_vmsa_domain *domain,
282 unsigned int reg, u32 data)
284 if (domain->mmu != domain->mmu->root)
285 ipmmu_write(domain->mmu,
286 domain->context_id * IM_CTX_SIZE + reg, data);
288 ipmmu_write(domain->mmu->root,
289 domain->context_id * IM_CTX_SIZE + reg, data);
292 /* -----------------------------------------------------------------------------
293 * TLB and microTLB Management
296 /* Wait for any pending TLB invalidations to complete */
297 static void ipmmu_tlb_sync(struct ipmmu_vmsa_domain *domain)
299 unsigned int count = 0;
301 while (ipmmu_ctx_read_root(domain, IMCTR) & IMCTR_FLUSH) {
303 if (++count == TLB_LOOP_TIMEOUT) {
304 dev_err_ratelimited(domain->mmu->dev,
305 "TLB sync timed out -- MMU may be deadlocked\n");
312 static void ipmmu_tlb_invalidate(struct ipmmu_vmsa_domain *domain)
316 reg = ipmmu_ctx_read_root(domain, IMCTR);
318 ipmmu_ctx_write_all(domain, IMCTR, reg);
320 ipmmu_tlb_sync(domain);
324 * Enable MMU translation for the microTLB.
326 static void ipmmu_utlb_enable(struct ipmmu_vmsa_domain *domain,
329 struct ipmmu_vmsa_device *mmu = domain->mmu;
332 * TODO: Reference-count the microTLB as several bus masters can be
333 * connected to the same microTLB.
336 /* TODO: What should we set the ASID to ? */
337 ipmmu_write(mmu, IMUASID(utlb), 0);
338 /* TODO: Do we need to flush the microTLB ? */
339 ipmmu_write(mmu, IMUCTR(utlb),
340 IMUCTR_TTSEL_MMU(domain->context_id) | IMUCTR_FLUSH |
342 mmu->utlb_ctx[utlb] = domain->context_id;
346 * Disable MMU translation for the microTLB.
348 static void ipmmu_utlb_disable(struct ipmmu_vmsa_domain *domain,
351 struct ipmmu_vmsa_device *mmu = domain->mmu;
353 ipmmu_write(mmu, IMUCTR(utlb), 0);
354 mmu->utlb_ctx[utlb] = IPMMU_CTX_INVALID;
357 static void ipmmu_tlb_flush_all(void *cookie)
359 struct ipmmu_vmsa_domain *domain = cookie;
361 ipmmu_tlb_invalidate(domain);
364 static void ipmmu_tlb_add_flush(unsigned long iova, size_t size,
365 size_t granule, bool leaf, void *cookie)
367 /* The hardware doesn't support selective TLB flush. */
370 static const struct iommu_gather_ops ipmmu_gather_ops = {
371 .tlb_flush_all = ipmmu_tlb_flush_all,
372 .tlb_add_flush = ipmmu_tlb_add_flush,
373 .tlb_sync = ipmmu_tlb_flush_all,
376 /* -----------------------------------------------------------------------------
377 * Domain/Context Management
380 static int ipmmu_domain_allocate_context(struct ipmmu_vmsa_device *mmu,
381 struct ipmmu_vmsa_domain *domain)
386 spin_lock_irqsave(&mmu->lock, flags);
388 ret = find_first_zero_bit(mmu->ctx, mmu->num_ctx);
389 if (ret != mmu->num_ctx) {
390 mmu->domains[ret] = domain;
391 set_bit(ret, mmu->ctx);
395 spin_unlock_irqrestore(&mmu->lock, flags);
400 static void ipmmu_domain_free_context(struct ipmmu_vmsa_device *mmu,
401 unsigned int context_id)
405 spin_lock_irqsave(&mmu->lock, flags);
407 clear_bit(context_id, mmu->ctx);
408 mmu->domains[context_id] = NULL;
410 spin_unlock_irqrestore(&mmu->lock, flags);
413 static void ipmmu_domain_setup_context(struct ipmmu_vmsa_domain *domain)
419 ttbr = domain->cfg.arm_lpae_s1_cfg.ttbr[0];
420 ipmmu_ctx_write_root(domain, IMTTLBR0, ttbr);
421 ipmmu_ctx_write_root(domain, IMTTUBR0, ttbr >> 32);
425 * We use long descriptors with inner-shareable WBWA tables and allocate
426 * the whole 32-bit VA space to TTBR0.
428 if (domain->mmu->features->twobit_imttbcr_sl0)
429 tmp = IMTTBCR_SL0_TWOBIT_LVL_1;
431 tmp = IMTTBCR_SL0_LVL_1;
433 ipmmu_ctx_write_root(domain, IMTTBCR, IMTTBCR_EAE |
434 IMTTBCR_SH0_INNER_SHAREABLE | IMTTBCR_ORGN0_WB_WA |
435 IMTTBCR_IRGN0_WB_WA | tmp);
438 ipmmu_ctx_write_root(domain, IMMAIR0,
439 domain->cfg.arm_lpae_s1_cfg.mair[0]);
442 if (domain->mmu->features->setup_imbuscr)
443 ipmmu_ctx_write_root(domain, IMBUSCR,
444 ipmmu_ctx_read_root(domain, IMBUSCR) &
445 ~(IMBUSCR_DVM | IMBUSCR_BUSSEL_MASK));
449 * Clear all interrupt flags.
451 ipmmu_ctx_write_root(domain, IMSTR, ipmmu_ctx_read_root(domain, IMSTR));
455 * Enable the MMU and interrupt generation. The long-descriptor
456 * translation table format doesn't use TEX remapping. Don't enable AF
457 * software management as we have no use for it. Flush the TLB as
458 * required when modifying the context registers.
460 ipmmu_ctx_write_all(domain, IMCTR,
461 IMCTR_INTEN | IMCTR_FLUSH | IMCTR_MMUEN);
464 static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
469 * Allocate the page table operations.
471 * VMSA states in section B3.6.3 "Control of Secure or Non-secure memory
472 * access, Long-descriptor format" that the NStable bit being set in a
473 * table descriptor will result in the NStable and NS bits of all child
474 * entries being ignored and considered as being set. The IPMMU seems
475 * not to comply with this, as it generates a secure access page fault
476 * if any of the NStable and NS bits isn't set when running in
479 domain->cfg.quirks = IO_PGTABLE_QUIRK_ARM_NS;
480 domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K;
481 domain->cfg.ias = 32;
482 domain->cfg.oas = 40;
483 domain->cfg.tlb = &ipmmu_gather_ops;
484 domain->io_domain.geometry.aperture_end = DMA_BIT_MASK(32);
485 domain->io_domain.geometry.force_aperture = true;
487 * TODO: Add support for coherent walk through CCI with DVM and remove
488 * cache handling. For now, delegate it to the io-pgtable code.
490 domain->cfg.iommu_dev = domain->mmu->root->dev;
493 * Find an unused context.
495 ret = ipmmu_domain_allocate_context(domain->mmu->root, domain);
499 domain->context_id = ret;
501 domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg,
504 ipmmu_domain_free_context(domain->mmu->root,
509 ipmmu_domain_setup_context(domain);
513 static void ipmmu_domain_destroy_context(struct ipmmu_vmsa_domain *domain)
519 * Disable the context. Flush the TLB as required when modifying the
522 * TODO: Is TLB flush really needed ?
524 ipmmu_ctx_write_all(domain, IMCTR, IMCTR_FLUSH);
525 ipmmu_tlb_sync(domain);
526 ipmmu_domain_free_context(domain->mmu->root, domain->context_id);
529 /* -----------------------------------------------------------------------------
533 static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain)
535 const u32 err_mask = IMSTR_MHIT | IMSTR_ABORT | IMSTR_PF | IMSTR_TF;
536 struct ipmmu_vmsa_device *mmu = domain->mmu;
540 status = ipmmu_ctx_read_root(domain, IMSTR);
541 if (!(status & err_mask))
544 iova = ipmmu_ctx_read_root(domain, IMELAR);
545 if (IS_ENABLED(CONFIG_64BIT))
546 iova |= (u64)ipmmu_ctx_read_root(domain, IMEUAR) << 32;
549 * Clear the error status flags. Unlike traditional interrupt flag
550 * registers that must be cleared by writing 1, this status register
551 * seems to require 0. The error address register must be read before,
552 * otherwise its value will be 0.
554 ipmmu_ctx_write_root(domain, IMSTR, 0);
556 /* Log fatal errors. */
557 if (status & IMSTR_MHIT)
558 dev_err_ratelimited(mmu->dev, "Multiple TLB hits @0x%lx\n",
560 if (status & IMSTR_ABORT)
561 dev_err_ratelimited(mmu->dev, "Page Table Walk Abort @0x%lx\n",
564 if (!(status & (IMSTR_PF | IMSTR_TF)))
568 * Try to handle page faults and translation faults.
570 * TODO: We need to look up the faulty device based on the I/O VA. Use
571 * the IOMMU device for now.
573 if (!report_iommu_fault(&domain->io_domain, mmu->dev, iova, 0))
576 dev_err_ratelimited(mmu->dev,
577 "Unhandled fault: status 0x%08x iova 0x%lx\n",
583 static irqreturn_t ipmmu_irq(int irq, void *dev)
585 struct ipmmu_vmsa_device *mmu = dev;
586 irqreturn_t status = IRQ_NONE;
590 spin_lock_irqsave(&mmu->lock, flags);
593 * Check interrupts for all active contexts.
595 for (i = 0; i < mmu->num_ctx; i++) {
596 if (!mmu->domains[i])
598 if (ipmmu_domain_irq(mmu->domains[i]) == IRQ_HANDLED)
599 status = IRQ_HANDLED;
602 spin_unlock_irqrestore(&mmu->lock, flags);
607 /* -----------------------------------------------------------------------------
611 static struct iommu_domain *__ipmmu_domain_alloc(unsigned type)
613 struct ipmmu_vmsa_domain *domain;
615 domain = kzalloc(sizeof(*domain), GFP_KERNEL);
619 mutex_init(&domain->mutex);
621 return &domain->io_domain;
624 static struct iommu_domain *ipmmu_domain_alloc(unsigned type)
626 struct iommu_domain *io_domain = NULL;
629 case IOMMU_DOMAIN_UNMANAGED:
630 io_domain = __ipmmu_domain_alloc(type);
633 case IOMMU_DOMAIN_DMA:
634 io_domain = __ipmmu_domain_alloc(type);
635 if (io_domain && iommu_get_dma_cookie(io_domain)) {
645 static void ipmmu_domain_free(struct iommu_domain *io_domain)
647 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
650 * Free the domain resources. We assume that all devices have already
653 iommu_put_dma_cookie(io_domain);
654 ipmmu_domain_destroy_context(domain);
655 free_io_pgtable_ops(domain->iop);
659 static int ipmmu_attach_device(struct iommu_domain *io_domain,
662 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
663 struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
664 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
669 dev_err(dev, "Cannot attach to IPMMU\n");
673 mutex_lock(&domain->mutex);
676 /* The domain hasn't been used yet, initialize it. */
678 ret = ipmmu_domain_init_context(domain);
680 dev_err(dev, "Unable to initialize IPMMU context\n");
683 dev_info(dev, "Using IPMMU context %u\n",
686 } else if (domain->mmu != mmu) {
688 * Something is wrong, we can't attach two devices using
689 * different IOMMUs to the same domain.
691 dev_err(dev, "Can't attach IPMMU %s to domain on IPMMU %s\n",
692 dev_name(mmu->dev), dev_name(domain->mmu->dev));
695 dev_info(dev, "Reusing IPMMU context %u\n", domain->context_id);
697 mutex_unlock(&domain->mutex);
702 for (i = 0; i < fwspec->num_ids; ++i)
703 ipmmu_utlb_enable(domain, fwspec->ids[i]);
708 static void ipmmu_detach_device(struct iommu_domain *io_domain,
711 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
712 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
715 for (i = 0; i < fwspec->num_ids; ++i)
716 ipmmu_utlb_disable(domain, fwspec->ids[i]);
719 * TODO: Optimize by disabling the context when no device is attached.
723 static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova,
724 phys_addr_t paddr, size_t size, int prot)
726 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
731 return domain->iop->map(domain->iop, iova, paddr, size, prot);
734 static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova,
737 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
739 return domain->iop->unmap(domain->iop, iova, size);
742 static void ipmmu_iotlb_sync(struct iommu_domain *io_domain)
744 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
747 ipmmu_tlb_flush_all(domain);
750 static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain,
753 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
755 /* TODO: Is locking needed ? */
757 return domain->iop->iova_to_phys(domain->iop, iova);
760 static int ipmmu_init_platform_device(struct device *dev,
761 struct of_phandle_args *args)
763 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
764 struct platform_device *ipmmu_pdev;
766 ipmmu_pdev = of_find_device_by_node(args->np);
770 fwspec->iommu_priv = platform_get_drvdata(ipmmu_pdev);
775 static const struct soc_device_attribute soc_rcar_gen3[] = {
776 { .soc_id = "r8a774a1", },
777 { .soc_id = "r8a774c0", },
778 { .soc_id = "r8a7795", },
779 { .soc_id = "r8a7796", },
780 { .soc_id = "r8a77965", },
781 { .soc_id = "r8a77970", },
782 { .soc_id = "r8a77990", },
783 { .soc_id = "r8a77995", },
787 static const struct soc_device_attribute soc_rcar_gen3_whitelist[] = {
788 { .soc_id = "r8a774c0", },
789 { .soc_id = "r8a7795", .revision = "ES3.*" },
790 { .soc_id = "r8a77965", },
791 { .soc_id = "r8a77990", },
792 { .soc_id = "r8a77995", },
796 static const char * const rcar_gen3_slave_whitelist[] = {
799 static bool ipmmu_slave_whitelist(struct device *dev)
804 * For R-Car Gen3 use a white list to opt-in slave devices.
805 * For Other SoCs, this returns true anyway.
807 if (!soc_device_match(soc_rcar_gen3))
810 /* Check whether this R-Car Gen3 can use the IPMMU correctly or not */
811 if (!soc_device_match(soc_rcar_gen3_whitelist))
814 /* Check whether this slave device can work with the IPMMU */
815 for (i = 0; i < ARRAY_SIZE(rcar_gen3_slave_whitelist); i++) {
816 if (!strcmp(dev_name(dev), rcar_gen3_slave_whitelist[i]))
820 /* Otherwise, do not allow use of IPMMU */
824 static int ipmmu_of_xlate(struct device *dev,
825 struct of_phandle_args *spec)
827 if (!ipmmu_slave_whitelist(dev))
830 iommu_fwspec_add_ids(dev, spec->args, 1);
832 /* Initialize once - xlate() will call multiple times */
836 return ipmmu_init_platform_device(dev, spec);
839 static int ipmmu_init_arm_mapping(struct device *dev)
841 struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
842 struct iommu_group *group;
845 /* Create a device group and add the device to it. */
846 group = iommu_group_alloc();
848 dev_err(dev, "Failed to allocate IOMMU group\n");
849 return PTR_ERR(group);
852 ret = iommu_group_add_device(group, dev);
853 iommu_group_put(group);
856 dev_err(dev, "Failed to add device to IPMMU group\n");
861 * Create the ARM mapping, used by the ARM DMA mapping core to allocate
862 * VAs. This will allocate a corresponding IOMMU domain.
865 * - Create one mapping per context (TLB).
866 * - Make the mapping size configurable ? We currently use a 2GB mapping
867 * at a 1GB offset to ensure that NULL VAs will fault.
870 struct dma_iommu_mapping *mapping;
872 mapping = arm_iommu_create_mapping(&platform_bus_type,
874 if (IS_ERR(mapping)) {
875 dev_err(mmu->dev, "failed to create ARM IOMMU mapping\n");
876 ret = PTR_ERR(mapping);
880 mmu->mapping = mapping;
883 /* Attach the ARM VA mapping to the device. */
884 ret = arm_iommu_attach_device(dev, mmu->mapping);
886 dev_err(dev, "Failed to attach device to VA mapping\n");
893 iommu_group_remove_device(dev);
895 arm_iommu_release_mapping(mmu->mapping);
900 static int ipmmu_add_device(struct device *dev)
902 struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
903 struct iommu_group *group;
907 * Only let through devices that have been verified in xlate()
912 if (IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_IOMMU_DMA)) {
913 ret = ipmmu_init_arm_mapping(dev);
917 group = iommu_group_get_for_dev(dev);
919 return PTR_ERR(group);
921 iommu_group_put(group);
924 iommu_device_link(&mmu->iommu, dev);
928 static void ipmmu_remove_device(struct device *dev)
930 struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
932 iommu_device_unlink(&mmu->iommu, dev);
933 arm_iommu_detach_device(dev);
934 iommu_group_remove_device(dev);
937 static struct iommu_group *ipmmu_find_group(struct device *dev)
939 struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
940 struct iommu_group *group;
943 return iommu_group_ref_get(mmu->group);
945 group = iommu_group_alloc();
952 static const struct iommu_ops ipmmu_ops = {
953 .domain_alloc = ipmmu_domain_alloc,
954 .domain_free = ipmmu_domain_free,
955 .attach_dev = ipmmu_attach_device,
956 .detach_dev = ipmmu_detach_device,
958 .unmap = ipmmu_unmap,
959 .flush_iotlb_all = ipmmu_iotlb_sync,
960 .iotlb_sync = ipmmu_iotlb_sync,
961 .iova_to_phys = ipmmu_iova_to_phys,
962 .add_device = ipmmu_add_device,
963 .remove_device = ipmmu_remove_device,
964 .device_group = ipmmu_find_group,
965 .pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K,
966 .of_xlate = ipmmu_of_xlate,
969 /* -----------------------------------------------------------------------------
970 * Probe/remove and init
973 static void ipmmu_device_reset(struct ipmmu_vmsa_device *mmu)
977 /* Disable all contexts. */
978 for (i = 0; i < mmu->num_ctx; ++i)
979 ipmmu_write(mmu, i * IM_CTX_SIZE + IMCTR, 0);
982 static const struct ipmmu_features ipmmu_features_default = {
983 .use_ns_alias_offset = true,
984 .has_cache_leaf_nodes = false,
985 .number_of_contexts = 1, /* software only tested with one context */
987 .setup_imbuscr = true,
988 .twobit_imttbcr_sl0 = false,
989 .reserved_context = false,
992 static const struct ipmmu_features ipmmu_features_rcar_gen3 = {
993 .use_ns_alias_offset = false,
994 .has_cache_leaf_nodes = true,
995 .number_of_contexts = 8,
997 .setup_imbuscr = false,
998 .twobit_imttbcr_sl0 = true,
999 .reserved_context = true,
1002 static const struct of_device_id ipmmu_of_ids[] = {
1004 .compatible = "renesas,ipmmu-vmsa",
1005 .data = &ipmmu_features_default,
1007 .compatible = "renesas,ipmmu-r8a774a1",
1008 .data = &ipmmu_features_rcar_gen3,
1010 .compatible = "renesas,ipmmu-r8a774c0",
1011 .data = &ipmmu_features_rcar_gen3,
1013 .compatible = "renesas,ipmmu-r8a7795",
1014 .data = &ipmmu_features_rcar_gen3,
1016 .compatible = "renesas,ipmmu-r8a7796",
1017 .data = &ipmmu_features_rcar_gen3,
1019 .compatible = "renesas,ipmmu-r8a77965",
1020 .data = &ipmmu_features_rcar_gen3,
1022 .compatible = "renesas,ipmmu-r8a77970",
1023 .data = &ipmmu_features_rcar_gen3,
1025 .compatible = "renesas,ipmmu-r8a77990",
1026 .data = &ipmmu_features_rcar_gen3,
1028 .compatible = "renesas,ipmmu-r8a77995",
1029 .data = &ipmmu_features_rcar_gen3,
1035 static int ipmmu_probe(struct platform_device *pdev)
1037 struct ipmmu_vmsa_device *mmu;
1038 struct resource *res;
1042 mmu = devm_kzalloc(&pdev->dev, sizeof(*mmu), GFP_KERNEL);
1044 dev_err(&pdev->dev, "cannot allocate device data\n");
1048 mmu->dev = &pdev->dev;
1049 spin_lock_init(&mmu->lock);
1050 bitmap_zero(mmu->ctx, IPMMU_CTX_MAX);
1051 mmu->features = of_device_get_match_data(&pdev->dev);
1052 memset(mmu->utlb_ctx, IPMMU_CTX_INVALID, mmu->features->num_utlbs);
1053 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
1055 /* Map I/O memory and request IRQ. */
1056 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1057 mmu->base = devm_ioremap_resource(&pdev->dev, res);
1058 if (IS_ERR(mmu->base))
1059 return PTR_ERR(mmu->base);
1062 * The IPMMU has two register banks, for secure and non-secure modes.
1063 * The bank mapped at the beginning of the IPMMU address space
1064 * corresponds to the running mode of the CPU. When running in secure
1065 * mode the non-secure register bank is also available at an offset.
1067 * Secure mode operation isn't clearly documented and is thus currently
1068 * not implemented in the driver. Furthermore, preliminary tests of
1069 * non-secure operation with the main register bank were not successful.
1070 * Offset the registers base unconditionally to point to the non-secure
1071 * alias space for now.
1073 if (mmu->features->use_ns_alias_offset)
1074 mmu->base += IM_NS_ALIAS_OFFSET;
1076 mmu->num_ctx = min(IPMMU_CTX_MAX, mmu->features->number_of_contexts);
1078 irq = platform_get_irq(pdev, 0);
1081 * Determine if this IPMMU instance is a root device by checking for
1082 * the lack of has_cache_leaf_nodes flag or renesas,ipmmu-main property.
1084 if (!mmu->features->has_cache_leaf_nodes ||
1085 !of_find_property(pdev->dev.of_node, "renesas,ipmmu-main", NULL))
1088 mmu->root = ipmmu_find_root();
1091 * Wait until the root device has been registered for sure.
1094 return -EPROBE_DEFER;
1096 /* Root devices have mandatory IRQs */
1097 if (ipmmu_is_root(mmu)) {
1099 dev_err(&pdev->dev, "no IRQ found\n");
1103 ret = devm_request_irq(&pdev->dev, irq, ipmmu_irq, 0,
1104 dev_name(&pdev->dev), mmu);
1106 dev_err(&pdev->dev, "failed to request IRQ %d\n", irq);
1110 ipmmu_device_reset(mmu);
1112 if (mmu->features->reserved_context) {
1113 dev_info(&pdev->dev, "IPMMU context 0 is reserved\n");
1114 set_bit(0, mmu->ctx);
1119 * Register the IPMMU to the IOMMU subsystem in the following cases:
1120 * - R-Car Gen2 IPMMU (all devices registered)
1121 * - R-Car Gen3 IPMMU (leaf devices only - skip root IPMMU-MM device)
1123 if (!mmu->features->has_cache_leaf_nodes || !ipmmu_is_root(mmu)) {
1124 ret = iommu_device_sysfs_add(&mmu->iommu, &pdev->dev, NULL,
1125 dev_name(&pdev->dev));
1129 iommu_device_set_ops(&mmu->iommu, &ipmmu_ops);
1130 iommu_device_set_fwnode(&mmu->iommu,
1131 &pdev->dev.of_node->fwnode);
1133 ret = iommu_device_register(&mmu->iommu);
1137 #if defined(CONFIG_IOMMU_DMA)
1138 if (!iommu_present(&platform_bus_type))
1139 bus_set_iommu(&platform_bus_type, &ipmmu_ops);
1144 * We can't create the ARM mapping here as it requires the bus to have
1145 * an IOMMU, which only happens when bus_set_iommu() is called in
1146 * ipmmu_init() after the probe function returns.
1149 platform_set_drvdata(pdev, mmu);
1154 static int ipmmu_remove(struct platform_device *pdev)
1156 struct ipmmu_vmsa_device *mmu = platform_get_drvdata(pdev);
1158 iommu_device_sysfs_remove(&mmu->iommu);
1159 iommu_device_unregister(&mmu->iommu);
1161 arm_iommu_release_mapping(mmu->mapping);
1163 ipmmu_device_reset(mmu);
1168 #ifdef CONFIG_PM_SLEEP
1169 static int ipmmu_resume_noirq(struct device *dev)
1171 struct ipmmu_vmsa_device *mmu = dev_get_drvdata(dev);
1174 /* Reset root MMU and restore contexts */
1175 if (ipmmu_is_root(mmu)) {
1176 ipmmu_device_reset(mmu);
1178 for (i = 0; i < mmu->num_ctx; i++) {
1179 if (!mmu->domains[i])
1182 ipmmu_domain_setup_context(mmu->domains[i]);
1186 /* Re-enable active micro-TLBs */
1187 for (i = 0; i < mmu->features->num_utlbs; i++) {
1188 if (mmu->utlb_ctx[i] == IPMMU_CTX_INVALID)
1191 ipmmu_utlb_enable(mmu->root->domains[mmu->utlb_ctx[i]], i);
1197 static const struct dev_pm_ops ipmmu_pm = {
1198 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(NULL, ipmmu_resume_noirq)
1200 #define DEV_PM_OPS &ipmmu_pm
1202 #define DEV_PM_OPS NULL
1203 #endif /* CONFIG_PM_SLEEP */
1205 static struct platform_driver ipmmu_driver = {
1207 .name = "ipmmu-vmsa",
1208 .of_match_table = of_match_ptr(ipmmu_of_ids),
1211 .probe = ipmmu_probe,
1212 .remove = ipmmu_remove,
1215 static int __init ipmmu_init(void)
1217 struct device_node *np;
1218 static bool setup_done;
1224 np = of_find_matching_node(NULL, ipmmu_of_ids);
1230 ret = platform_driver_register(&ipmmu_driver);
1234 #if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA)
1235 if (!iommu_present(&platform_bus_type))
1236 bus_set_iommu(&platform_bus_type, &ipmmu_ops);
1242 subsys_initcall(ipmmu_init);