1 // SPDX-License-Identifier: GPL-2.0-only
3 * CPU-agnostic ARM page table allocator.
5 * Copyright (C) 2014 ARM Limited
7 * Author: Will Deacon <will.deacon@arm.com>
10 #define pr_fmt(fmt) "arm-lpae io-pgtable: " fmt
12 #include <linux/atomic.h>
13 #include <linux/bitops.h>
14 #include <linux/io-pgtable.h>
15 #include <linux/kernel.h>
16 #include <linux/sizes.h>
17 #include <linux/slab.h>
18 #include <linux/types.h>
19 #include <linux/dma-mapping.h>
21 #include <asm/barrier.h>
23 #define ARM_LPAE_MAX_ADDR_BITS 52
24 #define ARM_LPAE_S2_MAX_CONCAT_PAGES 16
25 #define ARM_LPAE_MAX_LEVELS 4
27 /* Struct accessors */
28 #define io_pgtable_to_data(x) \
29 container_of((x), struct arm_lpae_io_pgtable, iop)
31 #define io_pgtable_ops_to_data(x) \
32 io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
35 * Calculate the right shift amount to get to the portion describing level l
36 * in a virtual address mapped by the pagetable in d.
38 #define ARM_LPAE_LVL_SHIFT(l,d) \
39 (((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level) + \
40 ilog2(sizeof(arm_lpae_iopte)))
42 #define ARM_LPAE_GRANULE(d) \
43 (sizeof(arm_lpae_iopte) << (d)->bits_per_level)
44 #define ARM_LPAE_PGD_SIZE(d) \
45 (sizeof(arm_lpae_iopte) << (d)->pgd_bits)
48 * Calculate the index at level l used to map virtual address a using the
51 #define ARM_LPAE_PGD_IDX(l,d) \
52 ((l) == (d)->start_level ? (d)->pgd_bits - (d)->bits_per_level : 0)
54 #define ARM_LPAE_LVL_IDX(a,l,d) \
55 (((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \
56 ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1))
58 /* Calculate the block/page mapping size at level l for pagetable in d. */
59 #define ARM_LPAE_BLOCK_SIZE(l,d) (1ULL << ARM_LPAE_LVL_SHIFT(l,d))
62 #define ARM_LPAE_PTE_TYPE_SHIFT 0
63 #define ARM_LPAE_PTE_TYPE_MASK 0x3
65 #define ARM_LPAE_PTE_TYPE_BLOCK 1
66 #define ARM_LPAE_PTE_TYPE_TABLE 3
67 #define ARM_LPAE_PTE_TYPE_PAGE 3
69 #define ARM_LPAE_PTE_ADDR_MASK GENMASK_ULL(47,12)
71 #define ARM_LPAE_PTE_NSTABLE (((arm_lpae_iopte)1) << 63)
72 #define ARM_LPAE_PTE_XN (((arm_lpae_iopte)3) << 53)
73 #define ARM_LPAE_PTE_AF (((arm_lpae_iopte)1) << 10)
74 #define ARM_LPAE_PTE_SH_NS (((arm_lpae_iopte)0) << 8)
75 #define ARM_LPAE_PTE_SH_OS (((arm_lpae_iopte)2) << 8)
76 #define ARM_LPAE_PTE_SH_IS (((arm_lpae_iopte)3) << 8)
77 #define ARM_LPAE_PTE_NS (((arm_lpae_iopte)1) << 5)
78 #define ARM_LPAE_PTE_VALID (((arm_lpae_iopte)1) << 0)
80 #define ARM_LPAE_PTE_ATTR_LO_MASK (((arm_lpae_iopte)0x3ff) << 2)
81 /* Ignore the contiguous bit for block splitting */
82 #define ARM_LPAE_PTE_ATTR_HI_MASK (((arm_lpae_iopte)6) << 52)
83 #define ARM_LPAE_PTE_ATTR_MASK (ARM_LPAE_PTE_ATTR_LO_MASK | \
84 ARM_LPAE_PTE_ATTR_HI_MASK)
85 /* Software bit for solving coherency races */
86 #define ARM_LPAE_PTE_SW_SYNC (((arm_lpae_iopte)1) << 55)
89 #define ARM_LPAE_PTE_AP_UNPRIV (((arm_lpae_iopte)1) << 6)
90 #define ARM_LPAE_PTE_AP_RDONLY (((arm_lpae_iopte)2) << 6)
91 #define ARM_LPAE_PTE_ATTRINDX_SHIFT 2
92 #define ARM_LPAE_PTE_nG (((arm_lpae_iopte)1) << 11)
95 #define ARM_LPAE_PTE_HAP_FAULT (((arm_lpae_iopte)0) << 6)
96 #define ARM_LPAE_PTE_HAP_READ (((arm_lpae_iopte)1) << 6)
97 #define ARM_LPAE_PTE_HAP_WRITE (((arm_lpae_iopte)2) << 6)
98 #define ARM_LPAE_PTE_MEMATTR_OIWB (((arm_lpae_iopte)0xf) << 2)
99 #define ARM_LPAE_PTE_MEMATTR_NC (((arm_lpae_iopte)0x5) << 2)
100 #define ARM_LPAE_PTE_MEMATTR_DEV (((arm_lpae_iopte)0x1) << 2)
103 #define ARM_32_LPAE_TCR_EAE (1 << 31)
104 #define ARM_64_LPAE_S2_TCR_RES1 (1 << 31)
106 #define ARM_LPAE_TCR_EPD1 (1 << 23)
108 #define ARM_LPAE_TCR_TG0_4K (0 << 14)
109 #define ARM_LPAE_TCR_TG0_64K (1 << 14)
110 #define ARM_LPAE_TCR_TG0_16K (2 << 14)
112 #define ARM_LPAE_TCR_SH0_SHIFT 12
113 #define ARM_LPAE_TCR_SH0_MASK 0x3
114 #define ARM_LPAE_TCR_SH_NS 0
115 #define ARM_LPAE_TCR_SH_OS 2
116 #define ARM_LPAE_TCR_SH_IS 3
118 #define ARM_LPAE_TCR_ORGN0_SHIFT 10
119 #define ARM_LPAE_TCR_IRGN0_SHIFT 8
120 #define ARM_LPAE_TCR_RGN_MASK 0x3
121 #define ARM_LPAE_TCR_RGN_NC 0
122 #define ARM_LPAE_TCR_RGN_WBWA 1
123 #define ARM_LPAE_TCR_RGN_WT 2
124 #define ARM_LPAE_TCR_RGN_WB 3
126 #define ARM_LPAE_TCR_SL0_SHIFT 6
127 #define ARM_LPAE_TCR_SL0_MASK 0x3
129 #define ARM_LPAE_TCR_T0SZ_SHIFT 0
130 #define ARM_LPAE_TCR_SZ_MASK 0xf
132 #define ARM_LPAE_TCR_PS_SHIFT 16
133 #define ARM_LPAE_TCR_PS_MASK 0x7
135 #define ARM_LPAE_TCR_IPS_SHIFT 32
136 #define ARM_LPAE_TCR_IPS_MASK 0x7
138 #define ARM_LPAE_TCR_PS_32_BIT 0x0ULL
139 #define ARM_LPAE_TCR_PS_36_BIT 0x1ULL
140 #define ARM_LPAE_TCR_PS_40_BIT 0x2ULL
141 #define ARM_LPAE_TCR_PS_42_BIT 0x3ULL
142 #define ARM_LPAE_TCR_PS_44_BIT 0x4ULL
143 #define ARM_LPAE_TCR_PS_48_BIT 0x5ULL
144 #define ARM_LPAE_TCR_PS_52_BIT 0x6ULL
146 #define ARM_LPAE_MAIR_ATTR_SHIFT(n) ((n) << 3)
147 #define ARM_LPAE_MAIR_ATTR_MASK 0xff
148 #define ARM_LPAE_MAIR_ATTR_DEVICE 0x04
149 #define ARM_LPAE_MAIR_ATTR_NC 0x44
150 #define ARM_LPAE_MAIR_ATTR_INC_OWBRWA 0xf4
151 #define ARM_LPAE_MAIR_ATTR_WBRWA 0xff
152 #define ARM_LPAE_MAIR_ATTR_IDX_NC 0
153 #define ARM_LPAE_MAIR_ATTR_IDX_CACHE 1
154 #define ARM_LPAE_MAIR_ATTR_IDX_DEV 2
155 #define ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE 3
157 #define ARM_MALI_LPAE_TTBR_ADRMODE_TABLE (3u << 0)
158 #define ARM_MALI_LPAE_TTBR_READ_INNER BIT(2)
159 #define ARM_MALI_LPAE_TTBR_SHARE_OUTER BIT(4)
161 #define ARM_MALI_LPAE_MEMATTR_IMP_DEF 0x88ULL
162 #define ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC 0x8DULL
164 /* IOPTE accessors */
165 #define iopte_deref(pte,d) __va(iopte_to_paddr(pte, d))
167 #define iopte_type(pte,l) \
168 (((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK)
170 #define iopte_prot(pte) ((pte) & ARM_LPAE_PTE_ATTR_MASK)
172 struct arm_lpae_io_pgtable {
173 struct io_pgtable iop;
182 typedef u64 arm_lpae_iopte;
184 static inline bool iopte_leaf(arm_lpae_iopte pte, int lvl,
185 enum io_pgtable_fmt fmt)
187 if (lvl == (ARM_LPAE_MAX_LEVELS - 1) && fmt != ARM_MALI_LPAE)
188 return iopte_type(pte, lvl) == ARM_LPAE_PTE_TYPE_PAGE;
190 return iopte_type(pte, lvl) == ARM_LPAE_PTE_TYPE_BLOCK;
193 static arm_lpae_iopte paddr_to_iopte(phys_addr_t paddr,
194 struct arm_lpae_io_pgtable *data)
196 arm_lpae_iopte pte = paddr;
198 /* Of the bits which overlap, either 51:48 or 15:12 are always RES0 */
199 return (pte | (pte >> (48 - 12))) & ARM_LPAE_PTE_ADDR_MASK;
202 static phys_addr_t iopte_to_paddr(arm_lpae_iopte pte,
203 struct arm_lpae_io_pgtable *data)
205 u64 paddr = pte & ARM_LPAE_PTE_ADDR_MASK;
207 if (ARM_LPAE_GRANULE(data) < SZ_64K)
210 /* Rotate the packed high-order bits back to the top */
211 return (paddr | (paddr << (48 - 12))) & (ARM_LPAE_PTE_ADDR_MASK << 4);
214 static bool selftest_running = false;
216 static dma_addr_t __arm_lpae_dma_addr(void *pages)
218 return (dma_addr_t)virt_to_phys(pages);
221 static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
222 struct io_pgtable_cfg *cfg)
224 struct device *dev = cfg->iommu_dev;
225 int order = get_order(size);
230 VM_BUG_ON((gfp & __GFP_HIGHMEM));
231 p = alloc_pages_node(dev ? dev_to_node(dev) : NUMA_NO_NODE,
232 gfp | __GFP_ZERO, order);
236 pages = page_address(p);
237 if (!cfg->coherent_walk) {
238 dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE);
239 if (dma_mapping_error(dev, dma))
242 * We depend on the IOMMU being able to work with any physical
243 * address directly, so if the DMA layer suggests otherwise by
244 * translating or truncating them, that bodes very badly...
246 if (dma != virt_to_phys(pages))
253 dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n");
254 dma_unmap_single(dev, dma, size, DMA_TO_DEVICE);
256 __free_pages(p, order);
260 static void __arm_lpae_free_pages(void *pages, size_t size,
261 struct io_pgtable_cfg *cfg)
263 if (!cfg->coherent_walk)
264 dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages),
265 size, DMA_TO_DEVICE);
266 free_pages((unsigned long)pages, get_order(size));
269 static void __arm_lpae_sync_pte(arm_lpae_iopte *ptep,
270 struct io_pgtable_cfg *cfg)
272 dma_sync_single_for_device(cfg->iommu_dev, __arm_lpae_dma_addr(ptep),
273 sizeof(*ptep), DMA_TO_DEVICE);
276 static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte,
277 struct io_pgtable_cfg *cfg)
281 if (!cfg->coherent_walk)
282 __arm_lpae_sync_pte(ptep, cfg);
285 static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
286 struct iommu_iotlb_gather *gather,
287 unsigned long iova, size_t size, int lvl,
288 arm_lpae_iopte *ptep);
290 static void __arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
291 phys_addr_t paddr, arm_lpae_iopte prot,
292 int lvl, arm_lpae_iopte *ptep)
294 arm_lpae_iopte pte = prot;
296 if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
297 pte |= ARM_LPAE_PTE_NS;
299 if (data->iop.fmt != ARM_MALI_LPAE && lvl == ARM_LPAE_MAX_LEVELS - 1)
300 pte |= ARM_LPAE_PTE_TYPE_PAGE;
302 pte |= ARM_LPAE_PTE_TYPE_BLOCK;
304 if (data->iop.fmt != ARM_MALI_LPAE)
305 pte |= ARM_LPAE_PTE_AF;
306 pte |= ARM_LPAE_PTE_SH_IS;
307 pte |= paddr_to_iopte(paddr, data);
309 __arm_lpae_set_pte(ptep, pte, &data->iop.cfg);
312 static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
313 unsigned long iova, phys_addr_t paddr,
314 arm_lpae_iopte prot, int lvl,
315 arm_lpae_iopte *ptep)
317 arm_lpae_iopte pte = *ptep;
319 if (iopte_leaf(pte, lvl, data->iop.fmt)) {
320 /* We require an unmap first */
321 WARN_ON(!selftest_running);
323 } else if (iopte_type(pte, lvl) == ARM_LPAE_PTE_TYPE_TABLE) {
325 * We need to unmap and free the old table before
326 * overwriting it with a block entry.
328 arm_lpae_iopte *tblp;
329 size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
331 tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data);
332 if (__arm_lpae_unmap(data, NULL, iova, sz, lvl, tblp) != sz) {
338 __arm_lpae_init_pte(data, paddr, prot, lvl, ptep);
342 static arm_lpae_iopte arm_lpae_install_table(arm_lpae_iopte *table,
343 arm_lpae_iopte *ptep,
345 struct io_pgtable_cfg *cfg)
347 arm_lpae_iopte old, new;
349 new = __pa(table) | ARM_LPAE_PTE_TYPE_TABLE;
350 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
351 new |= ARM_LPAE_PTE_NSTABLE;
354 * Ensure the table itself is visible before its PTE can be.
355 * Whilst we could get away with cmpxchg64_release below, this
356 * doesn't have any ordering semantics when !CONFIG_SMP.
360 old = cmpxchg64_relaxed(ptep, curr, new);
362 if (cfg->coherent_walk || (old & ARM_LPAE_PTE_SW_SYNC))
365 /* Even if it's not ours, there's no point waiting; just kick it */
366 __arm_lpae_sync_pte(ptep, cfg);
368 WRITE_ONCE(*ptep, new | ARM_LPAE_PTE_SW_SYNC);
373 static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
374 phys_addr_t paddr, size_t size, arm_lpae_iopte prot,
375 int lvl, arm_lpae_iopte *ptep)
377 arm_lpae_iopte *cptep, pte;
378 size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
379 size_t tblsz = ARM_LPAE_GRANULE(data);
380 struct io_pgtable_cfg *cfg = &data->iop.cfg;
382 /* Find our entry at the current level */
383 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
385 /* If we can install a leaf entry at this level, then do so */
386 if (size == block_size)
387 return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep);
389 /* We can't allocate tables at the final level */
390 if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1))
393 /* Grab a pointer to the next level */
394 pte = READ_ONCE(*ptep);
396 cptep = __arm_lpae_alloc_pages(tblsz, GFP_ATOMIC, cfg);
400 pte = arm_lpae_install_table(cptep, ptep, 0, cfg);
402 __arm_lpae_free_pages(cptep, tblsz, cfg);
403 } else if (!cfg->coherent_walk && !(pte & ARM_LPAE_PTE_SW_SYNC)) {
404 __arm_lpae_sync_pte(ptep, cfg);
407 if (pte && !iopte_leaf(pte, lvl, data->iop.fmt)) {
408 cptep = iopte_deref(pte, data);
410 /* We require an unmap first */
411 WARN_ON(!selftest_running);
416 return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep);
419 static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
424 if (data->iop.fmt == ARM_64_LPAE_S1 ||
425 data->iop.fmt == ARM_32_LPAE_S1) {
426 pte = ARM_LPAE_PTE_nG;
427 if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
428 pte |= ARM_LPAE_PTE_AP_RDONLY;
429 if (!(prot & IOMMU_PRIV))
430 pte |= ARM_LPAE_PTE_AP_UNPRIV;
432 pte = ARM_LPAE_PTE_HAP_FAULT;
433 if (prot & IOMMU_READ)
434 pte |= ARM_LPAE_PTE_HAP_READ;
435 if (prot & IOMMU_WRITE)
436 pte |= ARM_LPAE_PTE_HAP_WRITE;
440 * Note that this logic is structured to accommodate Mali LPAE
441 * having stage-1-like attributes but stage-2-like permissions.
443 if (data->iop.fmt == ARM_64_LPAE_S2 ||
444 data->iop.fmt == ARM_32_LPAE_S2) {
445 if (prot & IOMMU_MMIO)
446 pte |= ARM_LPAE_PTE_MEMATTR_DEV;
447 else if (prot & IOMMU_CACHE)
448 pte |= ARM_LPAE_PTE_MEMATTR_OIWB;
450 pte |= ARM_LPAE_PTE_MEMATTR_NC;
452 if (prot & IOMMU_MMIO)
453 pte |= (ARM_LPAE_MAIR_ATTR_IDX_DEV
454 << ARM_LPAE_PTE_ATTRINDX_SHIFT);
455 else if (prot & IOMMU_CACHE)
456 pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
457 << ARM_LPAE_PTE_ATTRINDX_SHIFT);
458 else if (prot & IOMMU_SYS_CACHE_ONLY)
459 pte |= (ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE
460 << ARM_LPAE_PTE_ATTRINDX_SHIFT);
463 if (prot & IOMMU_NOEXEC)
464 pte |= ARM_LPAE_PTE_XN;
469 static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
470 phys_addr_t paddr, size_t size, int iommu_prot)
472 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
473 struct io_pgtable_cfg *cfg = &data->iop.cfg;
474 arm_lpae_iopte *ptep = data->pgd;
475 int ret, lvl = data->start_level;
478 /* If no access, then nothing to do */
479 if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
482 if (WARN_ON(!size || (size & cfg->pgsize_bitmap) != size))
485 if (WARN_ON(iova >> data->iop.cfg.ias || paddr >> data->iop.cfg.oas))
488 prot = arm_lpae_prot_to_pte(data, iommu_prot);
489 ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep);
491 * Synchronise all PTE updates for the new mapping before there's
492 * a chance for anything to kick off a table walk for the new iova.
499 static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
500 arm_lpae_iopte *ptep)
502 arm_lpae_iopte *start, *end;
503 unsigned long table_size;
505 if (lvl == data->start_level)
506 table_size = ARM_LPAE_PGD_SIZE(data);
508 table_size = ARM_LPAE_GRANULE(data);
512 /* Only leaf entries at the last level */
513 if (lvl == ARM_LPAE_MAX_LEVELS - 1)
516 end = (void *)ptep + table_size;
518 while (ptep != end) {
519 arm_lpae_iopte pte = *ptep++;
521 if (!pte || iopte_leaf(pte, lvl, data->iop.fmt))
524 __arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
527 __arm_lpae_free_pages(start, table_size, &data->iop.cfg);
530 static void arm_lpae_free_pgtable(struct io_pgtable *iop)
532 struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
534 __arm_lpae_free_pgtable(data, data->start_level, data->pgd);
538 static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
539 struct iommu_iotlb_gather *gather,
540 unsigned long iova, size_t size,
541 arm_lpae_iopte blk_pte, int lvl,
542 arm_lpae_iopte *ptep)
544 struct io_pgtable_cfg *cfg = &data->iop.cfg;
545 arm_lpae_iopte pte, *tablep;
546 phys_addr_t blk_paddr;
547 size_t tablesz = ARM_LPAE_GRANULE(data);
548 size_t split_sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
549 int i, unmap_idx = -1;
551 if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
554 tablep = __arm_lpae_alloc_pages(tablesz, GFP_ATOMIC, cfg);
556 return 0; /* Bytes unmapped */
558 if (size == split_sz)
559 unmap_idx = ARM_LPAE_LVL_IDX(iova, lvl, data);
561 blk_paddr = iopte_to_paddr(blk_pte, data);
562 pte = iopte_prot(blk_pte);
564 for (i = 0; i < tablesz / sizeof(pte); i++, blk_paddr += split_sz) {
569 __arm_lpae_init_pte(data, blk_paddr, pte, lvl, &tablep[i]);
572 pte = arm_lpae_install_table(tablep, ptep, blk_pte, cfg);
573 if (pte != blk_pte) {
574 __arm_lpae_free_pages(tablep, tablesz, cfg);
576 * We may race against someone unmapping another part of this
577 * block, but anything else is invalid. We can't misinterpret
578 * a page entry here since we're never at the last level.
580 if (iopte_type(pte, lvl - 1) != ARM_LPAE_PTE_TYPE_TABLE)
583 tablep = iopte_deref(pte, data);
584 } else if (unmap_idx >= 0) {
585 io_pgtable_tlb_add_page(&data->iop, gather, iova, size);
589 return __arm_lpae_unmap(data, gather, iova, size, lvl, tablep);
592 static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
593 struct iommu_iotlb_gather *gather,
594 unsigned long iova, size_t size, int lvl,
595 arm_lpae_iopte *ptep)
598 struct io_pgtable *iop = &data->iop;
600 /* Something went horribly wrong and we ran out of page table */
601 if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
604 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
605 pte = READ_ONCE(*ptep);
609 /* If the size matches this level, we're in the right place */
610 if (size == ARM_LPAE_BLOCK_SIZE(lvl, data)) {
611 __arm_lpae_set_pte(ptep, 0, &iop->cfg);
613 if (!iopte_leaf(pte, lvl, iop->fmt)) {
614 /* Also flush any partial walks */
615 io_pgtable_tlb_flush_walk(iop, iova, size,
616 ARM_LPAE_GRANULE(data));
617 ptep = iopte_deref(pte, data);
618 __arm_lpae_free_pgtable(data, lvl + 1, ptep);
619 } else if (iop->cfg.quirks & IO_PGTABLE_QUIRK_NON_STRICT) {
621 * Order the PTE update against queueing the IOVA, to
622 * guarantee that a flush callback from a different CPU
623 * has observed it before the TLBIALL can be issued.
627 io_pgtable_tlb_add_page(iop, gather, iova, size);
631 } else if (iopte_leaf(pte, lvl, iop->fmt)) {
633 * Insert a table at the next level to map the old region,
634 * minus the part we want to unmap
636 return arm_lpae_split_blk_unmap(data, gather, iova, size, pte,
640 /* Keep on walkin' */
641 ptep = iopte_deref(pte, data);
642 return __arm_lpae_unmap(data, gather, iova, size, lvl + 1, ptep);
645 static size_t arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
646 size_t size, struct iommu_iotlb_gather *gather)
648 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
649 struct io_pgtable_cfg *cfg = &data->iop.cfg;
650 arm_lpae_iopte *ptep = data->pgd;
652 if (WARN_ON(!size || (size & cfg->pgsize_bitmap) != size))
655 if (WARN_ON(iova >> data->iop.cfg.ias))
658 return __arm_lpae_unmap(data, gather, iova, size, data->start_level, ptep);
661 static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
664 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
665 arm_lpae_iopte pte, *ptep = data->pgd;
666 int lvl = data->start_level;
669 /* Valid IOPTE pointer? */
673 /* Grab the IOPTE we're interested in */
674 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
675 pte = READ_ONCE(*ptep);
682 if (iopte_leaf(pte, lvl, data->iop.fmt))
683 goto found_translation;
685 /* Take it to the next level */
686 ptep = iopte_deref(pte, data);
687 } while (++lvl < ARM_LPAE_MAX_LEVELS);
689 /* Ran out of page tables to walk */
693 iova &= (ARM_LPAE_BLOCK_SIZE(lvl, data) - 1);
694 return iopte_to_paddr(pte, data) | iova;
697 static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
699 unsigned long granule, page_sizes;
700 unsigned int max_addr_bits = 48;
703 * We need to restrict the supported page sizes to match the
704 * translation regime for a particular granule. Aim to match
705 * the CPU page size if possible, otherwise prefer smaller sizes.
706 * While we're at it, restrict the block sizes to match the
709 if (cfg->pgsize_bitmap & PAGE_SIZE)
711 else if (cfg->pgsize_bitmap & ~PAGE_MASK)
712 granule = 1UL << __fls(cfg->pgsize_bitmap & ~PAGE_MASK);
713 else if (cfg->pgsize_bitmap & PAGE_MASK)
714 granule = 1UL << __ffs(cfg->pgsize_bitmap & PAGE_MASK);
720 page_sizes = (SZ_4K | SZ_2M | SZ_1G);
723 page_sizes = (SZ_16K | SZ_32M);
727 page_sizes = (SZ_64K | SZ_512M);
729 page_sizes |= 1ULL << 42; /* 4TB */
735 cfg->pgsize_bitmap &= page_sizes;
736 cfg->ias = min(cfg->ias, max_addr_bits);
737 cfg->oas = min(cfg->oas, max_addr_bits);
740 static struct arm_lpae_io_pgtable *
741 arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
743 struct arm_lpae_io_pgtable *data;
744 int levels, va_bits, pg_shift;
746 arm_lpae_restrict_pgsizes(cfg);
748 if (!(cfg->pgsize_bitmap & (SZ_4K | SZ_16K | SZ_64K)))
751 if (cfg->ias > ARM_LPAE_MAX_ADDR_BITS)
754 if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS)
757 if (!selftest_running && cfg->iommu_dev->dma_pfn_offset) {
758 dev_err(cfg->iommu_dev, "Cannot accommodate DMA offset for IOMMU page tables\n");
762 data = kmalloc(sizeof(*data), GFP_KERNEL);
766 pg_shift = __ffs(cfg->pgsize_bitmap);
767 data->bits_per_level = pg_shift - ilog2(sizeof(arm_lpae_iopte));
769 va_bits = cfg->ias - pg_shift;
770 levels = DIV_ROUND_UP(va_bits, data->bits_per_level);
771 data->start_level = ARM_LPAE_MAX_LEVELS - levels;
773 /* Calculate the actual size of our pgd (without concatenation) */
774 data->pgd_bits = va_bits - (data->bits_per_level * (levels - 1));
776 data->iop.ops = (struct io_pgtable_ops) {
778 .unmap = arm_lpae_unmap,
779 .iova_to_phys = arm_lpae_iova_to_phys,
785 static struct io_pgtable *
786 arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
789 struct arm_lpae_io_pgtable *data;
791 if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS |
792 IO_PGTABLE_QUIRK_NON_STRICT))
795 data = arm_lpae_alloc_pgtable(cfg);
800 if (cfg->coherent_walk) {
801 reg = (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
802 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
803 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
805 reg = (ARM_LPAE_TCR_SH_OS << ARM_LPAE_TCR_SH0_SHIFT) |
806 (ARM_LPAE_TCR_RGN_NC << ARM_LPAE_TCR_IRGN0_SHIFT) |
807 (ARM_LPAE_TCR_RGN_NC << ARM_LPAE_TCR_ORGN0_SHIFT);
810 switch (ARM_LPAE_GRANULE(data)) {
812 reg |= ARM_LPAE_TCR_TG0_4K;
815 reg |= ARM_LPAE_TCR_TG0_16K;
818 reg |= ARM_LPAE_TCR_TG0_64K;
824 reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_IPS_SHIFT);
827 reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_IPS_SHIFT);
830 reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_IPS_SHIFT);
833 reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_IPS_SHIFT);
836 reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_IPS_SHIFT);
839 reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_IPS_SHIFT);
842 reg |= (ARM_LPAE_TCR_PS_52_BIT << ARM_LPAE_TCR_IPS_SHIFT);
848 reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
850 /* Disable speculative walks through TTBR1 */
851 reg |= ARM_LPAE_TCR_EPD1;
852 cfg->arm_lpae_s1_cfg.tcr = reg;
855 reg = (ARM_LPAE_MAIR_ATTR_NC
856 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
857 (ARM_LPAE_MAIR_ATTR_WBRWA
858 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
859 (ARM_LPAE_MAIR_ATTR_DEVICE
860 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV)) |
861 (ARM_LPAE_MAIR_ATTR_INC_OWBRWA
862 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE));
864 cfg->arm_lpae_s1_cfg.mair = reg;
866 /* Looking good; allocate a pgd */
867 data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data),
872 /* Ensure the empty pgd is visible before any actual TTBR write */
876 cfg->arm_lpae_s1_cfg.ttbr = virt_to_phys(data->pgd);
884 static struct io_pgtable *
885 arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
888 struct arm_lpae_io_pgtable *data;
890 /* The NS quirk doesn't apply at stage 2 */
891 if (cfg->quirks & ~(IO_PGTABLE_QUIRK_NON_STRICT))
894 data = arm_lpae_alloc_pgtable(cfg);
899 * Concatenate PGDs at level 1 if possible in order to reduce
900 * the depth of the stage-2 walk.
902 if (data->start_level == 0) {
903 unsigned long pgd_pages;
905 pgd_pages = ARM_LPAE_PGD_SIZE(data) / sizeof(arm_lpae_iopte);
906 if (pgd_pages <= ARM_LPAE_S2_MAX_CONCAT_PAGES) {
907 data->pgd_bits += data->bits_per_level;
913 reg = ARM_64_LPAE_S2_TCR_RES1 |
914 (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
915 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
916 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
918 sl = data->start_level;
920 switch (ARM_LPAE_GRANULE(data)) {
922 reg |= ARM_LPAE_TCR_TG0_4K;
923 sl++; /* SL0 format is different for 4K granule size */
926 reg |= ARM_LPAE_TCR_TG0_16K;
929 reg |= ARM_LPAE_TCR_TG0_64K;
935 reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_PS_SHIFT);
938 reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_PS_SHIFT);
941 reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_PS_SHIFT);
944 reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_PS_SHIFT);
947 reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_PS_SHIFT);
950 reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_PS_SHIFT);
953 reg |= (ARM_LPAE_TCR_PS_52_BIT << ARM_LPAE_TCR_PS_SHIFT);
959 reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
960 reg |= (~sl & ARM_LPAE_TCR_SL0_MASK) << ARM_LPAE_TCR_SL0_SHIFT;
961 cfg->arm_lpae_s2_cfg.vtcr = reg;
963 /* Allocate pgd pages */
964 data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data),
969 /* Ensure the empty pgd is visible before any actual TTBR write */
973 cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd);
981 static struct io_pgtable *
982 arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
984 struct io_pgtable *iop;
986 if (cfg->ias > 32 || cfg->oas > 40)
989 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
990 iop = arm_64_lpae_alloc_pgtable_s1(cfg, cookie);
992 cfg->arm_lpae_s1_cfg.tcr |= ARM_32_LPAE_TCR_EAE;
993 cfg->arm_lpae_s1_cfg.tcr &= 0xffffffff;
999 static struct io_pgtable *
1000 arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
1002 struct io_pgtable *iop;
1004 if (cfg->ias > 40 || cfg->oas > 40)
1007 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
1008 iop = arm_64_lpae_alloc_pgtable_s2(cfg, cookie);
1010 cfg->arm_lpae_s2_cfg.vtcr &= 0xffffffff;
1015 static struct io_pgtable *
1016 arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
1018 struct arm_lpae_io_pgtable *data;
1020 /* No quirks for Mali (hopefully) */
1024 if (cfg->ias > 48 || cfg->oas > 40)
1027 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
1029 data = arm_lpae_alloc_pgtable(cfg);
1033 /* Mali seems to need a full 4-level table regardless of IAS */
1034 if (data->start_level > 0) {
1035 data->start_level = 0;
1039 * MEMATTR: Mali has no actual notion of a non-cacheable type, so the
1040 * best we can do is mimic the out-of-tree driver and hope that the
1041 * "implementation-defined caching policy" is good enough. Similarly,
1042 * we'll use it for the sake of a valid attribute for our 'device'
1043 * index, although callers should never request that in practice.
1045 cfg->arm_mali_lpae_cfg.memattr =
1046 (ARM_MALI_LPAE_MEMATTR_IMP_DEF
1047 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
1048 (ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC
1049 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
1050 (ARM_MALI_LPAE_MEMATTR_IMP_DEF
1051 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV));
1053 data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data), GFP_KERNEL,
1058 /* Ensure the empty pgd is visible before TRANSTAB can be written */
1061 cfg->arm_mali_lpae_cfg.transtab = virt_to_phys(data->pgd) |
1062 ARM_MALI_LPAE_TTBR_READ_INNER |
1063 ARM_MALI_LPAE_TTBR_ADRMODE_TABLE;
1071 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = {
1072 .alloc = arm_64_lpae_alloc_pgtable_s1,
1073 .free = arm_lpae_free_pgtable,
1076 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns = {
1077 .alloc = arm_64_lpae_alloc_pgtable_s2,
1078 .free = arm_lpae_free_pgtable,
1081 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns = {
1082 .alloc = arm_32_lpae_alloc_pgtable_s1,
1083 .free = arm_lpae_free_pgtable,
1086 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns = {
1087 .alloc = arm_32_lpae_alloc_pgtable_s2,
1088 .free = arm_lpae_free_pgtable,
1091 struct io_pgtable_init_fns io_pgtable_arm_mali_lpae_init_fns = {
1092 .alloc = arm_mali_lpae_alloc_pgtable,
1093 .free = arm_lpae_free_pgtable,
1096 #ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST
1098 static struct io_pgtable_cfg *cfg_cookie __initdata;
1100 static void __init dummy_tlb_flush_all(void *cookie)
1102 WARN_ON(cookie != cfg_cookie);
1105 static void __init dummy_tlb_flush(unsigned long iova, size_t size,
1106 size_t granule, void *cookie)
1108 WARN_ON(cookie != cfg_cookie);
1109 WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
1112 static void __init dummy_tlb_add_page(struct iommu_iotlb_gather *gather,
1113 unsigned long iova, size_t granule,
1116 dummy_tlb_flush(iova, granule, granule, cookie);
1119 static const struct iommu_flush_ops dummy_tlb_ops __initconst = {
1120 .tlb_flush_all = dummy_tlb_flush_all,
1121 .tlb_flush_walk = dummy_tlb_flush,
1122 .tlb_flush_leaf = dummy_tlb_flush,
1123 .tlb_add_page = dummy_tlb_add_page,
1126 static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
1128 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
1129 struct io_pgtable_cfg *cfg = &data->iop.cfg;
1131 pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n",
1132 cfg->pgsize_bitmap, cfg->ias);
1133 pr_err("data: %d levels, 0x%zx pgd_size, %u pg_shift, %u bits_per_level, pgd @ %p\n",
1134 ARM_LPAE_MAX_LEVELS - data->start_level, ARM_LPAE_PGD_SIZE(data),
1135 ilog2(ARM_LPAE_GRANULE(data)), data->bits_per_level, data->pgd);
1138 #define __FAIL(ops, i) ({ \
1139 WARN(1, "selftest: test failed for fmt idx %d\n", (i)); \
1140 arm_lpae_dump_ops(ops); \
1141 selftest_running = false; \
1145 static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
1147 static const enum io_pgtable_fmt fmts[] __initconst = {
1155 struct io_pgtable_ops *ops;
1157 selftest_running = true;
1159 for (i = 0; i < ARRAY_SIZE(fmts); ++i) {
1161 ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg);
1163 pr_err("selftest: failed to allocate io pgtable ops\n");
1168 * Initial sanity checks.
1169 * Empty page tables shouldn't provide any translations.
1171 if (ops->iova_to_phys(ops, 42))
1172 return __FAIL(ops, i);
1174 if (ops->iova_to_phys(ops, SZ_1G + 42))
1175 return __FAIL(ops, i);
1177 if (ops->iova_to_phys(ops, SZ_2G + 42))
1178 return __FAIL(ops, i);
1181 * Distinct mappings of different granule sizes.
1184 for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) {
1187 if (ops->map(ops, iova, iova, size, IOMMU_READ |
1191 return __FAIL(ops, i);
1193 /* Overlapping mappings */
1194 if (!ops->map(ops, iova, iova + size, size,
1195 IOMMU_READ | IOMMU_NOEXEC))
1196 return __FAIL(ops, i);
1198 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
1199 return __FAIL(ops, i);
1205 size = 1UL << __ffs(cfg->pgsize_bitmap);
1206 if (ops->unmap(ops, SZ_1G + size, size, NULL) != size)
1207 return __FAIL(ops, i);
1209 /* Remap of partial unmap */
1210 if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ))
1211 return __FAIL(ops, i);
1213 if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42))
1214 return __FAIL(ops, i);
1218 for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) {
1221 if (ops->unmap(ops, iova, size, NULL) != size)
1222 return __FAIL(ops, i);
1224 if (ops->iova_to_phys(ops, iova + 42))
1225 return __FAIL(ops, i);
1227 /* Remap full block */
1228 if (ops->map(ops, iova, iova, size, IOMMU_WRITE))
1229 return __FAIL(ops, i);
1231 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
1232 return __FAIL(ops, i);
1237 free_io_pgtable_ops(ops);
1240 selftest_running = false;
1244 static int __init arm_lpae_do_selftests(void)
1246 static const unsigned long pgsize[] __initconst = {
1247 SZ_4K | SZ_2M | SZ_1G,
1252 static const unsigned int ias[] __initconst = {
1253 32, 36, 40, 42, 44, 48,
1256 int i, j, pass = 0, fail = 0;
1257 struct io_pgtable_cfg cfg = {
1258 .tlb = &dummy_tlb_ops,
1260 .coherent_walk = true,
1263 for (i = 0; i < ARRAY_SIZE(pgsize); ++i) {
1264 for (j = 0; j < ARRAY_SIZE(ias); ++j) {
1265 cfg.pgsize_bitmap = pgsize[i];
1267 pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u\n",
1269 if (arm_lpae_run_tests(&cfg))
1276 pr_info("selftest: completed with %d PASS %d FAIL\n", pass, fail);
1277 return fail ? -EFAULT : 0;
1279 subsys_initcall(arm_lpae_do_selftests);