1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2015-2016 MediaTek Inc.
4 * Author: Yong Wu <yong.wu@mediatek.com>
6 #include <linux/memblock.h>
9 #include <linux/component.h>
10 #include <linux/device.h>
11 #include <linux/dma-iommu.h>
12 #include <linux/err.h>
13 #include <linux/interrupt.h>
15 #include <linux/iommu.h>
16 #include <linux/iopoll.h>
17 #include <linux/list.h>
18 #include <linux/of_address.h>
19 #include <linux/of_iommu.h>
20 #include <linux/of_irq.h>
21 #include <linux/of_platform.h>
22 #include <linux/platform_device.h>
23 #include <linux/slab.h>
24 #include <linux/spinlock.h>
25 #include <asm/barrier.h>
26 #include <soc/mediatek/smi.h>
28 #include "mtk_iommu.h"
30 #define REG_MMU_PT_BASE_ADDR 0x000
31 #define MMU_PT_ADDR_MASK GENMASK(31, 7)
33 #define REG_MMU_INVALIDATE 0x020
34 #define F_ALL_INVLD 0x2
35 #define F_MMU_INV_RANGE 0x1
37 #define REG_MMU_INVLD_START_A 0x024
38 #define REG_MMU_INVLD_END_A 0x028
40 #define REG_MMU_INV_SEL 0x038
41 #define F_INVLD_EN0 BIT(0)
42 #define F_INVLD_EN1 BIT(1)
44 #define REG_MMU_STANDARD_AXI_MODE 0x048
45 #define REG_MMU_DCM_DIS 0x050
47 #define REG_MMU_CTRL_REG 0x110
48 #define F_MMU_TF_PROT_TO_PROGRAM_ADDR (2 << 4)
49 #define F_MMU_PREFETCH_RT_REPLACE_MOD BIT(4)
50 #define F_MMU_TF_PROT_TO_PROGRAM_ADDR_MT8173 (2 << 5)
52 #define REG_MMU_IVRP_PADDR 0x114
54 #define REG_MMU_VLD_PA_RNG 0x118
55 #define F_MMU_VLD_PA_RNG(EA, SA) (((EA) << 8) | (SA))
57 #define REG_MMU_INT_CONTROL0 0x120
58 #define F_L2_MULIT_HIT_EN BIT(0)
59 #define F_TABLE_WALK_FAULT_INT_EN BIT(1)
60 #define F_PREETCH_FIFO_OVERFLOW_INT_EN BIT(2)
61 #define F_MISS_FIFO_OVERFLOW_INT_EN BIT(3)
62 #define F_PREFETCH_FIFO_ERR_INT_EN BIT(5)
63 #define F_MISS_FIFO_ERR_INT_EN BIT(6)
64 #define F_INT_CLR_BIT BIT(12)
66 #define REG_MMU_INT_MAIN_CONTROL 0x124
68 #define F_INT_TRANSLATION_FAULT (BIT(0) | BIT(7))
69 #define F_INT_MAIN_MULTI_HIT_FAULT (BIT(1) | BIT(8))
70 #define F_INT_INVALID_PA_FAULT (BIT(2) | BIT(9))
71 #define F_INT_ENTRY_REPLACEMENT_FAULT (BIT(3) | BIT(10))
72 #define F_INT_TLB_MISS_FAULT (BIT(4) | BIT(11))
73 #define F_INT_MISS_TRANSACTION_FIFO_FAULT (BIT(5) | BIT(12))
74 #define F_INT_PRETETCH_TRANSATION_FIFO_FAULT (BIT(6) | BIT(13))
76 #define REG_MMU_CPE_DONE 0x12C
78 #define REG_MMU_FAULT_ST1 0x134
79 #define F_REG_MMU0_FAULT_MASK GENMASK(6, 0)
80 #define F_REG_MMU1_FAULT_MASK GENMASK(13, 7)
82 #define REG_MMU0_FAULT_VA 0x13c
83 #define F_MMU_FAULT_VA_WRITE_BIT BIT(1)
84 #define F_MMU_FAULT_VA_LAYER_BIT BIT(0)
86 #define REG_MMU0_INVLD_PA 0x140
87 #define REG_MMU1_FAULT_VA 0x144
88 #define REG_MMU1_INVLD_PA 0x148
89 #define REG_MMU0_INT_ID 0x150
90 #define REG_MMU1_INT_ID 0x154
91 #define F_MMU_INT_ID_LARB_ID(a) (((a) >> 7) & 0x7)
92 #define F_MMU_INT_ID_PORT_ID(a) (((a) >> 2) & 0x1f)
94 #define MTK_PROTECT_PA_ALIGN 128
97 * Get the local arbiter ID and the portid within the larb arbiter
98 * from mtk_m4u_id which is defined by MTK_M4U_ID.
100 #define MTK_M4U_TO_LARB(id) (((id) >> 5) & 0xf)
101 #define MTK_M4U_TO_PORT(id) ((id) & 0x1f)
103 struct mtk_iommu_domain {
104 spinlock_t pgtlock; /* lock for page table */
106 struct io_pgtable_cfg cfg;
107 struct io_pgtable_ops *iop;
109 struct iommu_domain domain;
112 static const struct iommu_ops mtk_iommu_ops;
115 * In M4U 4GB mode, the physical address is remapped as below:
117 * CPU Physical address:
118 * ====================
121 * |---A---|---B---|---C---|---D---|---E---|
122 * +--I/O--+------------Memory-------------+
124 * IOMMU output physical address:
125 * =============================
128 * |---E---|---B---|---C---|---D---|
129 * +------------Memory-------------+
131 * The Region 'A'(I/O) can NOT be mapped by M4U; For Region 'B'/'C'/'D', the
132 * bit32 of the CPU physical address always is needed to set, and for Region
133 * 'E', the CPU physical address keep as is.
134 * Additionally, The iommu consumers always use the CPU phyiscal address.
136 #define MTK_IOMMU_4GB_MODE_REMAP_BASE 0x140000000UL
138 static LIST_HEAD(m4ulist); /* List all the M4U HWs */
140 #define for_each_m4u(data) list_for_each_entry(data, &m4ulist, list)
143 * There may be 1 or 2 M4U HWs, But we always expect they are in the same domain
144 * for the performance.
146 * Here always return the mtk_iommu_data of the first probed M4U where the
147 * iommu domain information is recorded.
149 static struct mtk_iommu_data *mtk_iommu_get_m4u_data(void)
151 struct mtk_iommu_data *data;
159 static struct mtk_iommu_domain *to_mtk_domain(struct iommu_domain *dom)
161 return container_of(dom, struct mtk_iommu_domain, domain);
164 static void mtk_iommu_tlb_flush_all(void *cookie)
166 struct mtk_iommu_data *data = cookie;
169 writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0,
170 data->base + REG_MMU_INV_SEL);
171 writel_relaxed(F_ALL_INVLD, data->base + REG_MMU_INVALIDATE);
172 wmb(); /* Make sure the tlb flush all done */
176 static void mtk_iommu_tlb_flush_range_sync(unsigned long iova, size_t size,
177 size_t granule, void *cookie)
179 struct mtk_iommu_data *data = cookie;
185 spin_lock_irqsave(&data->tlb_lock, flags);
186 writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0,
187 data->base + REG_MMU_INV_SEL);
189 writel_relaxed(iova, data->base + REG_MMU_INVLD_START_A);
190 writel_relaxed(iova + size - 1,
191 data->base + REG_MMU_INVLD_END_A);
192 writel_relaxed(F_MMU_INV_RANGE,
193 data->base + REG_MMU_INVALIDATE);
196 ret = readl_poll_timeout_atomic(data->base + REG_MMU_CPE_DONE,
197 tmp, tmp != 0, 10, 100000);
200 "Partial TLB flush timed out, falling back to full flush\n");
201 mtk_iommu_tlb_flush_all(cookie);
203 /* Clear the CPE status */
204 writel_relaxed(0, data->base + REG_MMU_CPE_DONE);
205 spin_unlock_irqrestore(&data->tlb_lock, flags);
209 static void mtk_iommu_tlb_flush_page_nosync(struct iommu_iotlb_gather *gather,
210 unsigned long iova, size_t granule,
213 struct mtk_iommu_data *data = cookie;
214 struct iommu_domain *domain = &data->m4u_dom->domain;
216 iommu_iotlb_gather_add_page(domain, gather, iova, granule);
219 static const struct iommu_flush_ops mtk_iommu_flush_ops = {
220 .tlb_flush_all = mtk_iommu_tlb_flush_all,
221 .tlb_flush_walk = mtk_iommu_tlb_flush_range_sync,
222 .tlb_flush_leaf = mtk_iommu_tlb_flush_range_sync,
223 .tlb_add_page = mtk_iommu_tlb_flush_page_nosync,
226 static irqreturn_t mtk_iommu_isr(int irq, void *dev_id)
228 struct mtk_iommu_data *data = dev_id;
229 struct mtk_iommu_domain *dom = data->m4u_dom;
230 u32 int_state, regval, fault_iova, fault_pa;
231 unsigned int fault_larb, fault_port;
234 /* Read error info from registers */
235 int_state = readl_relaxed(data->base + REG_MMU_FAULT_ST1);
236 if (int_state & F_REG_MMU0_FAULT_MASK) {
237 regval = readl_relaxed(data->base + REG_MMU0_INT_ID);
238 fault_iova = readl_relaxed(data->base + REG_MMU0_FAULT_VA);
239 fault_pa = readl_relaxed(data->base + REG_MMU0_INVLD_PA);
241 regval = readl_relaxed(data->base + REG_MMU1_INT_ID);
242 fault_iova = readl_relaxed(data->base + REG_MMU1_FAULT_VA);
243 fault_pa = readl_relaxed(data->base + REG_MMU1_INVLD_PA);
245 layer = fault_iova & F_MMU_FAULT_VA_LAYER_BIT;
246 write = fault_iova & F_MMU_FAULT_VA_WRITE_BIT;
247 fault_larb = F_MMU_INT_ID_LARB_ID(regval);
248 fault_port = F_MMU_INT_ID_PORT_ID(regval);
250 fault_larb = data->plat_data->larbid_remap[fault_larb];
252 if (report_iommu_fault(&dom->domain, data->dev, fault_iova,
253 write ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ)) {
256 "fault type=0x%x iova=0x%x pa=0x%x larb=%d port=%d layer=%d %s\n",
257 int_state, fault_iova, fault_pa, fault_larb, fault_port,
258 layer, write ? "write" : "read");
261 /* Interrupt clear */
262 regval = readl_relaxed(data->base + REG_MMU_INT_CONTROL0);
263 regval |= F_INT_CLR_BIT;
264 writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL0);
266 mtk_iommu_tlb_flush_all(data);
271 static void mtk_iommu_config(struct mtk_iommu_data *data,
272 struct device *dev, bool enable)
274 struct mtk_smi_larb_iommu *larb_mmu;
275 unsigned int larbid, portid;
276 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
279 for (i = 0; i < fwspec->num_ids; ++i) {
280 larbid = MTK_M4U_TO_LARB(fwspec->ids[i]);
281 portid = MTK_M4U_TO_PORT(fwspec->ids[i]);
282 larb_mmu = &data->larb_imu[larbid];
284 dev_dbg(dev, "%s iommu port: %d\n",
285 enable ? "enable" : "disable", portid);
288 larb_mmu->mmu |= MTK_SMI_MMU_EN(portid);
290 larb_mmu->mmu &= ~MTK_SMI_MMU_EN(portid);
294 static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom)
296 struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
298 spin_lock_init(&dom->pgtlock);
300 dom->cfg = (struct io_pgtable_cfg) {
301 .quirks = IO_PGTABLE_QUIRK_ARM_NS |
302 IO_PGTABLE_QUIRK_NO_PERMS |
303 IO_PGTABLE_QUIRK_TLBI_ON_MAP |
304 IO_PGTABLE_QUIRK_ARM_MTK_EXT,
305 .pgsize_bitmap = mtk_iommu_ops.pgsize_bitmap,
308 .tlb = &mtk_iommu_flush_ops,
309 .iommu_dev = data->dev,
312 dom->iop = alloc_io_pgtable_ops(ARM_V7S, &dom->cfg, data);
314 dev_err(data->dev, "Failed to alloc io pgtable\n");
318 /* Update our support page sizes bitmap */
319 dom->domain.pgsize_bitmap = dom->cfg.pgsize_bitmap;
323 static struct iommu_domain *mtk_iommu_domain_alloc(unsigned type)
325 struct mtk_iommu_domain *dom;
327 if (type != IOMMU_DOMAIN_DMA)
330 dom = kzalloc(sizeof(*dom), GFP_KERNEL);
334 if (iommu_get_dma_cookie(&dom->domain))
337 if (mtk_iommu_domain_finalise(dom))
340 dom->domain.geometry.aperture_start = 0;
341 dom->domain.geometry.aperture_end = DMA_BIT_MASK(32);
342 dom->domain.geometry.force_aperture = true;
347 iommu_put_dma_cookie(&dom->domain);
353 static void mtk_iommu_domain_free(struct iommu_domain *domain)
355 struct mtk_iommu_domain *dom = to_mtk_domain(domain);
357 free_io_pgtable_ops(dom->iop);
358 iommu_put_dma_cookie(domain);
359 kfree(to_mtk_domain(domain));
362 static int mtk_iommu_attach_device(struct iommu_domain *domain,
365 struct mtk_iommu_domain *dom = to_mtk_domain(domain);
366 struct mtk_iommu_data *data = dev_iommu_fwspec_get(dev)->iommu_priv;
371 /* Update the pgtable base address register of the M4U HW */
372 if (!data->m4u_dom) {
374 writel(dom->cfg.arm_v7s_cfg.ttbr[0] & MMU_PT_ADDR_MASK,
375 data->base + REG_MMU_PT_BASE_ADDR);
378 mtk_iommu_config(data, dev, true);
382 static void mtk_iommu_detach_device(struct iommu_domain *domain,
385 struct mtk_iommu_data *data = dev_iommu_fwspec_get(dev)->iommu_priv;
390 mtk_iommu_config(data, dev, false);
393 static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova,
394 phys_addr_t paddr, size_t size, int prot)
396 struct mtk_iommu_domain *dom = to_mtk_domain(domain);
397 struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
401 /* The "4GB mode" M4U physically can not use the lower remap of Dram. */
402 if (data->enable_4GB)
403 paddr |= BIT_ULL(32);
405 spin_lock_irqsave(&dom->pgtlock, flags);
406 ret = dom->iop->map(dom->iop, iova, paddr, size, prot);
407 spin_unlock_irqrestore(&dom->pgtlock, flags);
412 static size_t mtk_iommu_unmap(struct iommu_domain *domain,
413 unsigned long iova, size_t size,
414 struct iommu_iotlb_gather *gather)
416 struct mtk_iommu_domain *dom = to_mtk_domain(domain);
420 spin_lock_irqsave(&dom->pgtlock, flags);
421 unmapsz = dom->iop->unmap(dom->iop, iova, size, gather);
422 spin_unlock_irqrestore(&dom->pgtlock, flags);
427 static void mtk_iommu_flush_iotlb_all(struct iommu_domain *domain)
429 mtk_iommu_tlb_flush_all(mtk_iommu_get_m4u_data());
432 static void mtk_iommu_iotlb_sync(struct iommu_domain *domain,
433 struct iommu_iotlb_gather *gather)
435 struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
436 size_t length = gather->end - gather->start;
438 if (gather->start == ULONG_MAX)
441 mtk_iommu_tlb_flush_range_sync(gather->start, length, gather->pgsize,
445 static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
448 struct mtk_iommu_domain *dom = to_mtk_domain(domain);
449 struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
453 spin_lock_irqsave(&dom->pgtlock, flags);
454 pa = dom->iop->iova_to_phys(dom->iop, iova);
455 spin_unlock_irqrestore(&dom->pgtlock, flags);
457 if (data->enable_4GB && pa >= MTK_IOMMU_4GB_MODE_REMAP_BASE)
463 static int mtk_iommu_add_device(struct device *dev)
465 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
466 struct mtk_iommu_data *data;
467 struct iommu_group *group;
469 if (!fwspec || fwspec->ops != &mtk_iommu_ops)
470 return -ENODEV; /* Not a iommu client device */
472 data = fwspec->iommu_priv;
473 iommu_device_link(&data->iommu, dev);
475 group = iommu_group_get_for_dev(dev);
477 return PTR_ERR(group);
479 iommu_group_put(group);
483 static void mtk_iommu_remove_device(struct device *dev)
485 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
486 struct mtk_iommu_data *data;
488 if (!fwspec || fwspec->ops != &mtk_iommu_ops)
491 data = fwspec->iommu_priv;
492 iommu_device_unlink(&data->iommu, dev);
494 iommu_group_remove_device(dev);
495 iommu_fwspec_free(dev);
498 static struct iommu_group *mtk_iommu_device_group(struct device *dev)
500 struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
503 return ERR_PTR(-ENODEV);
505 /* All the client devices are in the same m4u iommu-group */
506 if (!data->m4u_group) {
507 data->m4u_group = iommu_group_alloc();
508 if (IS_ERR(data->m4u_group))
509 dev_err(dev, "Failed to allocate M4U IOMMU group\n");
511 iommu_group_ref_get(data->m4u_group);
513 return data->m4u_group;
516 static int mtk_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
518 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
519 struct platform_device *m4updev;
521 if (args->args_count != 1) {
522 dev_err(dev, "invalid #iommu-cells(%d) property for IOMMU\n",
527 if (!fwspec->iommu_priv) {
528 /* Get the m4u device */
529 m4updev = of_find_device_by_node(args->np);
530 if (WARN_ON(!m4updev))
533 fwspec->iommu_priv = platform_get_drvdata(m4updev);
536 return iommu_fwspec_add_ids(dev, args->args, 1);
539 static const struct iommu_ops mtk_iommu_ops = {
540 .domain_alloc = mtk_iommu_domain_alloc,
541 .domain_free = mtk_iommu_domain_free,
542 .attach_dev = mtk_iommu_attach_device,
543 .detach_dev = mtk_iommu_detach_device,
544 .map = mtk_iommu_map,
545 .unmap = mtk_iommu_unmap,
546 .flush_iotlb_all = mtk_iommu_flush_iotlb_all,
547 .iotlb_sync = mtk_iommu_iotlb_sync,
548 .iova_to_phys = mtk_iommu_iova_to_phys,
549 .add_device = mtk_iommu_add_device,
550 .remove_device = mtk_iommu_remove_device,
551 .device_group = mtk_iommu_device_group,
552 .of_xlate = mtk_iommu_of_xlate,
553 .pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M,
556 static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
561 ret = clk_prepare_enable(data->bclk);
563 dev_err(data->dev, "Failed to enable iommu bclk(%d)\n", ret);
567 if (data->plat_data->m4u_plat == M4U_MT8173)
568 regval = F_MMU_PREFETCH_RT_REPLACE_MOD |
569 F_MMU_TF_PROT_TO_PROGRAM_ADDR_MT8173;
571 regval = F_MMU_TF_PROT_TO_PROGRAM_ADDR;
572 writel_relaxed(regval, data->base + REG_MMU_CTRL_REG);
574 regval = F_L2_MULIT_HIT_EN |
575 F_TABLE_WALK_FAULT_INT_EN |
576 F_PREETCH_FIFO_OVERFLOW_INT_EN |
577 F_MISS_FIFO_OVERFLOW_INT_EN |
578 F_PREFETCH_FIFO_ERR_INT_EN |
579 F_MISS_FIFO_ERR_INT_EN;
580 writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL0);
582 regval = F_INT_TRANSLATION_FAULT |
583 F_INT_MAIN_MULTI_HIT_FAULT |
584 F_INT_INVALID_PA_FAULT |
585 F_INT_ENTRY_REPLACEMENT_FAULT |
586 F_INT_TLB_MISS_FAULT |
587 F_INT_MISS_TRANSACTION_FIFO_FAULT |
588 F_INT_PRETETCH_TRANSATION_FIFO_FAULT;
589 writel_relaxed(regval, data->base + REG_MMU_INT_MAIN_CONTROL);
591 if (data->plat_data->m4u_plat == M4U_MT8173)
592 regval = (data->protect_base >> 1) | (data->enable_4GB << 31);
594 regval = lower_32_bits(data->protect_base) |
595 upper_32_bits(data->protect_base);
596 writel_relaxed(regval, data->base + REG_MMU_IVRP_PADDR);
598 if (data->enable_4GB && data->plat_data->has_vld_pa_rng) {
600 * If 4GB mode is enabled, the validate PA range is from
601 * 0x1_0000_0000 to 0x1_ffff_ffff. here record bit[32:30].
603 regval = F_MMU_VLD_PA_RNG(7, 4);
604 writel_relaxed(regval, data->base + REG_MMU_VLD_PA_RNG);
606 writel_relaxed(0, data->base + REG_MMU_DCM_DIS);
608 if (data->plat_data->reset_axi)
609 writel_relaxed(0, data->base + REG_MMU_STANDARD_AXI_MODE);
611 if (devm_request_irq(data->dev, data->irq, mtk_iommu_isr, 0,
612 dev_name(data->dev), (void *)data)) {
613 writel_relaxed(0, data->base + REG_MMU_PT_BASE_ADDR);
614 clk_disable_unprepare(data->bclk);
615 dev_err(data->dev, "Failed @ IRQ-%d Request\n", data->irq);
622 static const struct component_master_ops mtk_iommu_com_ops = {
623 .bind = mtk_iommu_bind,
624 .unbind = mtk_iommu_unbind,
627 static int mtk_iommu_probe(struct platform_device *pdev)
629 struct mtk_iommu_data *data;
630 struct device *dev = &pdev->dev;
631 struct resource *res;
632 resource_size_t ioaddr;
633 struct component_match *match = NULL;
637 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
641 data->plat_data = of_device_get_match_data(dev);
643 /* Protect memory. HW will access here while translation fault.*/
644 protect = devm_kzalloc(dev, MTK_PROTECT_PA_ALIGN * 2, GFP_KERNEL);
647 data->protect_base = ALIGN(virt_to_phys(protect), MTK_PROTECT_PA_ALIGN);
649 /* Whether the current dram is over 4GB */
650 data->enable_4GB = !!(max_pfn > (BIT_ULL(32) >> PAGE_SHIFT));
651 if (!data->plat_data->has_4gb_mode)
652 data->enable_4GB = false;
654 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
655 data->base = devm_ioremap_resource(dev, res);
656 if (IS_ERR(data->base))
657 return PTR_ERR(data->base);
660 data->irq = platform_get_irq(pdev, 0);
664 if (data->plat_data->has_bclk) {
665 data->bclk = devm_clk_get(dev, "bclk");
666 if (IS_ERR(data->bclk))
667 return PTR_ERR(data->bclk);
670 larb_nr = of_count_phandle_with_args(dev->of_node,
671 "mediatek,larbs", NULL);
675 for (i = 0; i < larb_nr; i++) {
676 struct device_node *larbnode;
677 struct platform_device *plarbdev;
680 larbnode = of_parse_phandle(dev->of_node, "mediatek,larbs", i);
684 if (!of_device_is_available(larbnode)) {
685 of_node_put(larbnode);
689 ret = of_property_read_u32(larbnode, "mediatek,larb-id", &id);
690 if (ret)/* The id is consecutive if there is no this property */
693 plarbdev = of_find_device_by_node(larbnode);
695 of_node_put(larbnode);
696 return -EPROBE_DEFER;
698 data->larb_imu[id].dev = &plarbdev->dev;
700 component_match_add_release(dev, &match, release_of,
701 compare_of, larbnode);
704 platform_set_drvdata(pdev, data);
706 ret = mtk_iommu_hw_init(data);
710 ret = iommu_device_sysfs_add(&data->iommu, dev, NULL,
711 "mtk-iommu.%pa", &ioaddr);
715 iommu_device_set_ops(&data->iommu, &mtk_iommu_ops);
716 iommu_device_set_fwnode(&data->iommu, &pdev->dev.of_node->fwnode);
718 ret = iommu_device_register(&data->iommu);
722 spin_lock_init(&data->tlb_lock);
723 list_add_tail(&data->list, &m4ulist);
725 if (!iommu_present(&platform_bus_type))
726 bus_set_iommu(&platform_bus_type, &mtk_iommu_ops);
728 return component_master_add_with_match(dev, &mtk_iommu_com_ops, match);
731 static int mtk_iommu_remove(struct platform_device *pdev)
733 struct mtk_iommu_data *data = platform_get_drvdata(pdev);
735 iommu_device_sysfs_remove(&data->iommu);
736 iommu_device_unregister(&data->iommu);
738 if (iommu_present(&platform_bus_type))
739 bus_set_iommu(&platform_bus_type, NULL);
741 clk_disable_unprepare(data->bclk);
742 devm_free_irq(&pdev->dev, data->irq, data);
743 component_master_del(&pdev->dev, &mtk_iommu_com_ops);
747 static int __maybe_unused mtk_iommu_suspend(struct device *dev)
749 struct mtk_iommu_data *data = dev_get_drvdata(dev);
750 struct mtk_iommu_suspend_reg *reg = &data->reg;
751 void __iomem *base = data->base;
753 reg->standard_axi_mode = readl_relaxed(base +
754 REG_MMU_STANDARD_AXI_MODE);
755 reg->dcm_dis = readl_relaxed(base + REG_MMU_DCM_DIS);
756 reg->ctrl_reg = readl_relaxed(base + REG_MMU_CTRL_REG);
757 reg->int_control0 = readl_relaxed(base + REG_MMU_INT_CONTROL0);
758 reg->int_main_control = readl_relaxed(base + REG_MMU_INT_MAIN_CONTROL);
759 reg->ivrp_paddr = readl_relaxed(base + REG_MMU_IVRP_PADDR);
760 reg->vld_pa_rng = readl_relaxed(base + REG_MMU_VLD_PA_RNG);
761 clk_disable_unprepare(data->bclk);
765 static int __maybe_unused mtk_iommu_resume(struct device *dev)
767 struct mtk_iommu_data *data = dev_get_drvdata(dev);
768 struct mtk_iommu_suspend_reg *reg = &data->reg;
769 struct mtk_iommu_domain *m4u_dom = data->m4u_dom;
770 void __iomem *base = data->base;
773 ret = clk_prepare_enable(data->bclk);
775 dev_err(data->dev, "Failed to enable clk(%d) in resume\n", ret);
778 writel_relaxed(reg->standard_axi_mode,
779 base + REG_MMU_STANDARD_AXI_MODE);
780 writel_relaxed(reg->dcm_dis, base + REG_MMU_DCM_DIS);
781 writel_relaxed(reg->ctrl_reg, base + REG_MMU_CTRL_REG);
782 writel_relaxed(reg->int_control0, base + REG_MMU_INT_CONTROL0);
783 writel_relaxed(reg->int_main_control, base + REG_MMU_INT_MAIN_CONTROL);
784 writel_relaxed(reg->ivrp_paddr, base + REG_MMU_IVRP_PADDR);
785 writel_relaxed(reg->vld_pa_rng, base + REG_MMU_VLD_PA_RNG);
787 writel(m4u_dom->cfg.arm_v7s_cfg.ttbr[0] & MMU_PT_ADDR_MASK,
788 base + REG_MMU_PT_BASE_ADDR);
792 static const struct dev_pm_ops mtk_iommu_pm_ops = {
793 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_iommu_suspend, mtk_iommu_resume)
796 static const struct mtk_iommu_plat_data mt2712_data = {
797 .m4u_plat = M4U_MT2712,
798 .has_4gb_mode = true,
800 .has_vld_pa_rng = true,
801 .larbid_remap = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9},
804 static const struct mtk_iommu_plat_data mt8173_data = {
805 .m4u_plat = M4U_MT8173,
806 .has_4gb_mode = true,
809 .larbid_remap = {0, 1, 2, 3, 4, 5}, /* Linear mapping. */
812 static const struct mtk_iommu_plat_data mt8183_data = {
813 .m4u_plat = M4U_MT8183,
815 .larbid_remap = {0, 4, 5, 6, 7, 2, 3, 1},
818 static const struct of_device_id mtk_iommu_of_ids[] = {
819 { .compatible = "mediatek,mt2712-m4u", .data = &mt2712_data},
820 { .compatible = "mediatek,mt8173-m4u", .data = &mt8173_data},
821 { .compatible = "mediatek,mt8183-m4u", .data = &mt8183_data},
825 static struct platform_driver mtk_iommu_driver = {
826 .probe = mtk_iommu_probe,
827 .remove = mtk_iommu_remove,
830 .of_match_table = of_match_ptr(mtk_iommu_of_ids),
831 .pm = &mtk_iommu_pm_ops,
835 static int __init mtk_iommu_init(void)
839 ret = platform_driver_register(&mtk_iommu_driver);
841 pr_err("Failed to register MTK IOMMU driver\n");
846 subsys_initcall(mtk_iommu_init)