2 * CPU-agnostic ARM page table allocator.
4 * ARMv7 Short-descriptor format, supporting
5 * - Basic memory attributes
6 * - Simplified access permissions (AP[2:1] model)
7 * - Backwards-compatible TEX remap
8 * - Large pages/supersections (if indicated by the caller)
11 * - Legacy access permissions (AP[2:0] model)
13 * Almost certainly never supporting:
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License version 2 as
19 * published by the Free Software Foundation.
21 * This program is distributed in the hope that it will be useful,
22 * but WITHOUT ANY WARRANTY; without even the implied warranty of
23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
24 * GNU General Public License for more details.
26 * You should have received a copy of the GNU General Public License
27 * along with this program. If not, see <http://www.gnu.org/licenses/>.
29 * Copyright (C) 2014-2015 ARM Limited
30 * Copyright (c) 2014-2015 MediaTek Inc.
33 #define pr_fmt(fmt) "arm-v7s io-pgtable: " fmt
35 #include <linux/atomic.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/gfp.h>
38 #include <linux/io-pgtable.h>
39 #include <linux/iommu.h>
40 #include <linux/kernel.h>
41 #include <linux/kmemleak.h>
42 #include <linux/sizes.h>
43 #include <linux/slab.h>
44 #include <linux/spinlock.h>
45 #include <linux/types.h>
47 #include <asm/barrier.h>
49 /* Struct accessors */
50 #define io_pgtable_to_data(x) \
51 container_of((x), struct arm_v7s_io_pgtable, iop)
53 #define io_pgtable_ops_to_data(x) \
54 io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
57 * We have 32 bits total; 12 bits resolved at level 1, 8 bits at level 2,
58 * and 12 bits in a page. With some carefully-chosen coefficients we can
59 * hide the ugly inconsistencies behind these macros and at least let the
60 * rest of the code pretend to be somewhat sane.
62 #define ARM_V7S_ADDR_BITS 32
63 #define _ARM_V7S_LVL_BITS(lvl) (16 - (lvl) * 4)
64 #define ARM_V7S_LVL_SHIFT(lvl) (ARM_V7S_ADDR_BITS - (4 + 8 * (lvl)))
65 #define ARM_V7S_TABLE_SHIFT 10
67 #define ARM_V7S_PTES_PER_LVL(lvl) (1 << _ARM_V7S_LVL_BITS(lvl))
68 #define ARM_V7S_TABLE_SIZE(lvl) \
69 (ARM_V7S_PTES_PER_LVL(lvl) * sizeof(arm_v7s_iopte))
71 #define ARM_V7S_BLOCK_SIZE(lvl) (1UL << ARM_V7S_LVL_SHIFT(lvl))
72 #define ARM_V7S_LVL_MASK(lvl) ((u32)(~0U << ARM_V7S_LVL_SHIFT(lvl)))
73 #define ARM_V7S_TABLE_MASK ((u32)(~0U << ARM_V7S_TABLE_SHIFT))
74 #define _ARM_V7S_IDX_MASK(lvl) (ARM_V7S_PTES_PER_LVL(lvl) - 1)
75 #define ARM_V7S_LVL_IDX(addr, lvl) ({ \
77 ((u32)(addr) >> ARM_V7S_LVL_SHIFT(_l)) & _ARM_V7S_IDX_MASK(_l); \
81 * Large page/supersection entries are effectively a block of 16 page/section
82 * entries, along the lines of the LPAE contiguous hint, but all with the
83 * same output address. For want of a better common name we'll call them
84 * "contiguous" versions of their respective page/section entries here, but
85 * noting the distinction (WRT to TLB maintenance) that they represent *one*
86 * entry repeated 16 times, not 16 separate entries (as in the LPAE case).
88 #define ARM_V7S_CONT_PAGES 16
90 /* PTE type bits: these are all mixed up with XN/PXN bits in most cases */
91 #define ARM_V7S_PTE_TYPE_TABLE 0x1
92 #define ARM_V7S_PTE_TYPE_PAGE 0x2
93 #define ARM_V7S_PTE_TYPE_CONT_PAGE 0x1
95 #define ARM_V7S_PTE_IS_VALID(pte) (((pte) & 0x3) != 0)
96 #define ARM_V7S_PTE_IS_TABLE(pte, lvl) \
97 ((lvl) == 1 && (((pte) & 0x3) == ARM_V7S_PTE_TYPE_TABLE))
100 #define ARM_V7S_ATTR_XN(lvl) BIT(4 * (2 - (lvl)))
101 #define ARM_V7S_ATTR_B BIT(2)
102 #define ARM_V7S_ATTR_C BIT(3)
103 #define ARM_V7S_ATTR_NS_TABLE BIT(3)
104 #define ARM_V7S_ATTR_NS_SECTION BIT(19)
106 #define ARM_V7S_CONT_SECTION BIT(18)
107 #define ARM_V7S_CONT_PAGE_XN_SHIFT 15
110 * The attribute bits are consistently ordered*, but occupy bits [17:10] of
111 * a level 1 PTE vs. bits [11:4] at level 2. Thus we define the individual
112 * fields relative to that 8-bit block, plus a total shift relative to the PTE.
114 #define ARM_V7S_ATTR_SHIFT(lvl) (16 - (lvl) * 6)
116 #define ARM_V7S_ATTR_MASK 0xff
117 #define ARM_V7S_ATTR_AP0 BIT(0)
118 #define ARM_V7S_ATTR_AP1 BIT(1)
119 #define ARM_V7S_ATTR_AP2 BIT(5)
120 #define ARM_V7S_ATTR_S BIT(6)
121 #define ARM_V7S_ATTR_NG BIT(7)
122 #define ARM_V7S_TEX_SHIFT 2
123 #define ARM_V7S_TEX_MASK 0x7
124 #define ARM_V7S_ATTR_TEX(val) (((val) & ARM_V7S_TEX_MASK) << ARM_V7S_TEX_SHIFT)
126 #define ARM_V7S_ATTR_MTK_4GB BIT(9) /* MTK extend it for 4GB mode */
128 /* *well, except for TEX on level 2 large pages, of course :( */
129 #define ARM_V7S_CONT_PAGE_TEX_SHIFT 6
130 #define ARM_V7S_CONT_PAGE_TEX_MASK (ARM_V7S_TEX_MASK << ARM_V7S_CONT_PAGE_TEX_SHIFT)
132 /* Simplified access permissions */
133 #define ARM_V7S_PTE_AF ARM_V7S_ATTR_AP0
134 #define ARM_V7S_PTE_AP_UNPRIV ARM_V7S_ATTR_AP1
135 #define ARM_V7S_PTE_AP_RDONLY ARM_V7S_ATTR_AP2
138 #define ARM_V7S_RGN_NC 0
139 #define ARM_V7S_RGN_WBWA 1
140 #define ARM_V7S_RGN_WT 2
141 #define ARM_V7S_RGN_WB 3
143 #define ARM_V7S_PRRR_TYPE_DEVICE 1
144 #define ARM_V7S_PRRR_TYPE_NORMAL 2
145 #define ARM_V7S_PRRR_TR(n, type) (((type) & 0x3) << ((n) * 2))
146 #define ARM_V7S_PRRR_DS0 BIT(16)
147 #define ARM_V7S_PRRR_DS1 BIT(17)
148 #define ARM_V7S_PRRR_NS0 BIT(18)
149 #define ARM_V7S_PRRR_NS1 BIT(19)
150 #define ARM_V7S_PRRR_NOS(n) BIT((n) + 24)
152 #define ARM_V7S_NMRR_IR(n, attr) (((attr) & 0x3) << ((n) * 2))
153 #define ARM_V7S_NMRR_OR(n, attr) (((attr) & 0x3) << ((n) * 2 + 16))
155 #define ARM_V7S_TTBR_S BIT(1)
156 #define ARM_V7S_TTBR_NOS BIT(5)
157 #define ARM_V7S_TTBR_ORGN_ATTR(attr) (((attr) & 0x3) << 3)
158 #define ARM_V7S_TTBR_IRGN_ATTR(attr) \
159 ((((attr) & 0x1) << 6) | (((attr) & 0x2) >> 1))
161 #define ARM_V7S_TCR_PD1 BIT(5)
163 typedef u32 arm_v7s_iopte;
165 static bool selftest_running;
167 struct arm_v7s_io_pgtable {
168 struct io_pgtable iop;
171 struct kmem_cache *l2_tables;
172 spinlock_t split_lock;
175 static dma_addr_t __arm_v7s_dma_addr(void *pages)
177 return (dma_addr_t)virt_to_phys(pages);
180 static arm_v7s_iopte *iopte_deref(arm_v7s_iopte pte, int lvl)
182 if (ARM_V7S_PTE_IS_TABLE(pte, lvl))
183 pte &= ARM_V7S_TABLE_MASK;
185 pte &= ARM_V7S_LVL_MASK(lvl);
186 return phys_to_virt(pte);
189 static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp,
190 struct arm_v7s_io_pgtable *data)
192 struct io_pgtable_cfg *cfg = &data->iop.cfg;
193 struct device *dev = cfg->iommu_dev;
196 size_t size = ARM_V7S_TABLE_SIZE(lvl);
200 table = (void *)__get_dma_pages(__GFP_ZERO, get_order(size));
202 table = kmem_cache_zalloc(data->l2_tables, gfp | GFP_DMA);
203 phys = virt_to_phys(table);
204 if (phys != (arm_v7s_iopte)phys)
205 /* Doesn't fit in PTE */
207 if (table && !(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) {
208 dma = dma_map_single(dev, table, size, DMA_TO_DEVICE);
209 if (dma_mapping_error(dev, dma))
212 * We depend on the IOMMU being able to work with any physical
213 * address directly, so if the DMA layer suggests otherwise by
214 * translating or truncating them, that bodes very badly...
220 kmemleak_ignore(table);
224 dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n");
225 dma_unmap_single(dev, dma, size, DMA_TO_DEVICE);
228 free_pages((unsigned long)table, get_order(size));
230 kmem_cache_free(data->l2_tables, table);
234 static void __arm_v7s_free_table(void *table, int lvl,
235 struct arm_v7s_io_pgtable *data)
237 struct io_pgtable_cfg *cfg = &data->iop.cfg;
238 struct device *dev = cfg->iommu_dev;
239 size_t size = ARM_V7S_TABLE_SIZE(lvl);
241 if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA))
242 dma_unmap_single(dev, __arm_v7s_dma_addr(table), size,
245 free_pages((unsigned long)table, get_order(size));
247 kmem_cache_free(data->l2_tables, table);
250 static void __arm_v7s_pte_sync(arm_v7s_iopte *ptep, int num_entries,
251 struct io_pgtable_cfg *cfg)
253 if (cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)
256 dma_sync_single_for_device(cfg->iommu_dev, __arm_v7s_dma_addr(ptep),
257 num_entries * sizeof(*ptep), DMA_TO_DEVICE);
259 static void __arm_v7s_set_pte(arm_v7s_iopte *ptep, arm_v7s_iopte pte,
260 int num_entries, struct io_pgtable_cfg *cfg)
264 for (i = 0; i < num_entries; i++)
267 __arm_v7s_pte_sync(ptep, num_entries, cfg);
270 static arm_v7s_iopte arm_v7s_prot_to_pte(int prot, int lvl,
271 struct io_pgtable_cfg *cfg)
273 bool ap = !(cfg->quirks & IO_PGTABLE_QUIRK_NO_PERMS);
274 arm_v7s_iopte pte = ARM_V7S_ATTR_NG | ARM_V7S_ATTR_S;
276 if (!(prot & IOMMU_MMIO))
277 pte |= ARM_V7S_ATTR_TEX(1);
279 pte |= ARM_V7S_PTE_AF;
280 if (!(prot & IOMMU_PRIV))
281 pte |= ARM_V7S_PTE_AP_UNPRIV;
282 if (!(prot & IOMMU_WRITE))
283 pte |= ARM_V7S_PTE_AP_RDONLY;
285 pte <<= ARM_V7S_ATTR_SHIFT(lvl);
287 if ((prot & IOMMU_NOEXEC) && ap)
288 pte |= ARM_V7S_ATTR_XN(lvl);
289 if (prot & IOMMU_MMIO)
290 pte |= ARM_V7S_ATTR_B;
291 else if (prot & IOMMU_CACHE)
292 pte |= ARM_V7S_ATTR_B | ARM_V7S_ATTR_C;
294 pte |= ARM_V7S_PTE_TYPE_PAGE;
295 if (lvl == 1 && (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS))
296 pte |= ARM_V7S_ATTR_NS_SECTION;
298 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_4GB)
299 pte |= ARM_V7S_ATTR_MTK_4GB;
304 static int arm_v7s_pte_to_prot(arm_v7s_iopte pte, int lvl)
306 int prot = IOMMU_READ;
307 arm_v7s_iopte attr = pte >> ARM_V7S_ATTR_SHIFT(lvl);
309 if (!(attr & ARM_V7S_PTE_AP_RDONLY))
311 if (!(attr & ARM_V7S_PTE_AP_UNPRIV))
313 if ((attr & (ARM_V7S_TEX_MASK << ARM_V7S_TEX_SHIFT)) == 0)
315 else if (pte & ARM_V7S_ATTR_C)
317 if (pte & ARM_V7S_ATTR_XN(lvl))
318 prot |= IOMMU_NOEXEC;
323 static arm_v7s_iopte arm_v7s_pte_to_cont(arm_v7s_iopte pte, int lvl)
326 pte |= ARM_V7S_CONT_SECTION;
327 } else if (lvl == 2) {
328 arm_v7s_iopte xn = pte & ARM_V7S_ATTR_XN(lvl);
329 arm_v7s_iopte tex = pte & ARM_V7S_CONT_PAGE_TEX_MASK;
331 pte ^= xn | tex | ARM_V7S_PTE_TYPE_PAGE;
332 pte |= (xn << ARM_V7S_CONT_PAGE_XN_SHIFT) |
333 (tex << ARM_V7S_CONT_PAGE_TEX_SHIFT) |
334 ARM_V7S_PTE_TYPE_CONT_PAGE;
339 static arm_v7s_iopte arm_v7s_cont_to_pte(arm_v7s_iopte pte, int lvl)
342 pte &= ~ARM_V7S_CONT_SECTION;
343 } else if (lvl == 2) {
344 arm_v7s_iopte xn = pte & BIT(ARM_V7S_CONT_PAGE_XN_SHIFT);
345 arm_v7s_iopte tex = pte & (ARM_V7S_CONT_PAGE_TEX_MASK <<
346 ARM_V7S_CONT_PAGE_TEX_SHIFT);
348 pte ^= xn | tex | ARM_V7S_PTE_TYPE_CONT_PAGE;
349 pte |= (xn >> ARM_V7S_CONT_PAGE_XN_SHIFT) |
350 (tex >> ARM_V7S_CONT_PAGE_TEX_SHIFT) |
351 ARM_V7S_PTE_TYPE_PAGE;
356 static bool arm_v7s_pte_is_cont(arm_v7s_iopte pte, int lvl)
358 if (lvl == 1 && !ARM_V7S_PTE_IS_TABLE(pte, lvl))
359 return pte & ARM_V7S_CONT_SECTION;
361 return !(pte & ARM_V7S_PTE_TYPE_PAGE);
365 static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable *, unsigned long,
366 size_t, int, arm_v7s_iopte *);
368 static int arm_v7s_init_pte(struct arm_v7s_io_pgtable *data,
369 unsigned long iova, phys_addr_t paddr, int prot,
370 int lvl, int num_entries, arm_v7s_iopte *ptep)
372 struct io_pgtable_cfg *cfg = &data->iop.cfg;
376 for (i = 0; i < num_entries; i++)
377 if (ARM_V7S_PTE_IS_TABLE(ptep[i], lvl)) {
379 * We need to unmap and free the old table before
380 * overwriting it with a block entry.
383 size_t sz = ARM_V7S_BLOCK_SIZE(lvl);
385 tblp = ptep - ARM_V7S_LVL_IDX(iova, lvl);
386 if (WARN_ON(__arm_v7s_unmap(data, iova + i * sz,
387 sz, lvl, tblp) != sz))
389 } else if (ptep[i]) {
390 /* We require an unmap first */
391 WARN_ON(!selftest_running);
395 pte = arm_v7s_prot_to_pte(prot, lvl, cfg);
397 pte = arm_v7s_pte_to_cont(pte, lvl);
399 pte |= paddr & ARM_V7S_LVL_MASK(lvl);
401 __arm_v7s_set_pte(ptep, pte, num_entries, cfg);
405 static arm_v7s_iopte arm_v7s_install_table(arm_v7s_iopte *table,
408 struct io_pgtable_cfg *cfg)
410 arm_v7s_iopte old, new;
412 new = virt_to_phys(table) | ARM_V7S_PTE_TYPE_TABLE;
413 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
414 new |= ARM_V7S_ATTR_NS_TABLE;
417 * Ensure the table itself is visible before its PTE can be.
418 * Whilst we could get away with cmpxchg64_release below, this
419 * doesn't have any ordering semantics when !CONFIG_SMP.
423 old = cmpxchg_relaxed(ptep, curr, new);
424 __arm_v7s_pte_sync(ptep, 1, cfg);
429 static int __arm_v7s_map(struct arm_v7s_io_pgtable *data, unsigned long iova,
430 phys_addr_t paddr, size_t size, int prot,
431 int lvl, arm_v7s_iopte *ptep)
433 struct io_pgtable_cfg *cfg = &data->iop.cfg;
434 arm_v7s_iopte pte, *cptep;
435 int num_entries = size >> ARM_V7S_LVL_SHIFT(lvl);
437 /* Find our entry at the current level */
438 ptep += ARM_V7S_LVL_IDX(iova, lvl);
440 /* If we can install a leaf entry at this level, then do so */
442 return arm_v7s_init_pte(data, iova, paddr, prot,
443 lvl, num_entries, ptep);
445 /* We can't allocate tables at the final level */
446 if (WARN_ON(lvl == 2))
449 /* Grab a pointer to the next level */
450 pte = READ_ONCE(*ptep);
452 cptep = __arm_v7s_alloc_table(lvl + 1, GFP_ATOMIC, data);
456 pte = arm_v7s_install_table(cptep, ptep, 0, cfg);
458 __arm_v7s_free_table(cptep, lvl + 1, data);
460 /* We've no easy way of knowing if it's synced yet, so... */
461 __arm_v7s_pte_sync(ptep, 1, cfg);
464 if (ARM_V7S_PTE_IS_TABLE(pte, lvl)) {
465 cptep = iopte_deref(pte, lvl);
467 /* We require an unmap first */
468 WARN_ON(!selftest_running);
473 return __arm_v7s_map(data, iova, paddr, size, prot, lvl + 1, cptep);
476 static int arm_v7s_map(struct io_pgtable_ops *ops, unsigned long iova,
477 phys_addr_t paddr, size_t size, int prot)
479 struct arm_v7s_io_pgtable *data = io_pgtable_ops_to_data(ops);
480 struct io_pgtable *iop = &data->iop;
483 /* If no access, then nothing to do */
484 if (!(prot & (IOMMU_READ | IOMMU_WRITE)))
487 if (WARN_ON(upper_32_bits(iova) || upper_32_bits(paddr)))
490 ret = __arm_v7s_map(data, iova, paddr, size, prot, 1, data->pgd);
492 * Synchronise all PTE updates for the new mapping before there's
493 * a chance for anything to kick off a table walk for the new iova.
495 if (iop->cfg.quirks & IO_PGTABLE_QUIRK_TLBI_ON_MAP) {
496 io_pgtable_tlb_add_flush(iop, iova, size,
497 ARM_V7S_BLOCK_SIZE(2), false);
498 io_pgtable_tlb_sync(iop);
506 static void arm_v7s_free_pgtable(struct io_pgtable *iop)
508 struct arm_v7s_io_pgtable *data = io_pgtable_to_data(iop);
511 for (i = 0; i < ARM_V7S_PTES_PER_LVL(1); i++) {
512 arm_v7s_iopte pte = data->pgd[i];
514 if (ARM_V7S_PTE_IS_TABLE(pte, 1))
515 __arm_v7s_free_table(iopte_deref(pte, 1), 2, data);
517 __arm_v7s_free_table(data->pgd, 1, data);
518 kmem_cache_destroy(data->l2_tables);
522 static arm_v7s_iopte arm_v7s_split_cont(struct arm_v7s_io_pgtable *data,
523 unsigned long iova, int idx, int lvl,
526 struct io_pgtable *iop = &data->iop;
528 size_t size = ARM_V7S_BLOCK_SIZE(lvl);
531 /* Check that we didn't lose a race to get the lock */
533 if (!arm_v7s_pte_is_cont(pte, lvl))
536 ptep -= idx & (ARM_V7S_CONT_PAGES - 1);
537 pte = arm_v7s_cont_to_pte(pte, lvl);
538 for (i = 0; i < ARM_V7S_CONT_PAGES; i++)
539 ptep[i] = pte + i * size;
541 __arm_v7s_pte_sync(ptep, ARM_V7S_CONT_PAGES, &iop->cfg);
543 size *= ARM_V7S_CONT_PAGES;
544 io_pgtable_tlb_add_flush(iop, iova, size, size, true);
545 io_pgtable_tlb_sync(iop);
549 static size_t arm_v7s_split_blk_unmap(struct arm_v7s_io_pgtable *data,
550 unsigned long iova, size_t size,
551 arm_v7s_iopte blk_pte,
554 struct io_pgtable_cfg *cfg = &data->iop.cfg;
555 arm_v7s_iopte pte, *tablep;
556 int i, unmap_idx, num_entries, num_ptes;
558 tablep = __arm_v7s_alloc_table(2, GFP_ATOMIC, data);
560 return 0; /* Bytes unmapped */
562 num_ptes = ARM_V7S_PTES_PER_LVL(2);
563 num_entries = size >> ARM_V7S_LVL_SHIFT(2);
564 unmap_idx = ARM_V7S_LVL_IDX(iova, 2);
566 pte = arm_v7s_prot_to_pte(arm_v7s_pte_to_prot(blk_pte, 1), 2, cfg);
568 pte = arm_v7s_pte_to_cont(pte, 2);
570 for (i = 0; i < num_ptes; i += num_entries, pte += size) {
575 __arm_v7s_set_pte(&tablep[i], pte, num_entries, cfg);
578 pte = arm_v7s_install_table(tablep, ptep, blk_pte, cfg);
579 if (pte != blk_pte) {
580 __arm_v7s_free_table(tablep, 2, data);
582 if (!ARM_V7S_PTE_IS_TABLE(pte, 1))
585 tablep = iopte_deref(pte, 1);
586 return __arm_v7s_unmap(data, iova, size, 2, tablep);
589 io_pgtable_tlb_add_flush(&data->iop, iova, size, size, true);
590 io_pgtable_tlb_sync(&data->iop);
594 static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable *data,
595 unsigned long iova, size_t size, int lvl,
598 arm_v7s_iopte pte[ARM_V7S_CONT_PAGES];
599 struct io_pgtable *iop = &data->iop;
600 int idx, i = 0, num_entries = size >> ARM_V7S_LVL_SHIFT(lvl);
602 /* Something went horribly wrong and we ran out of page table */
603 if (WARN_ON(lvl > 2))
606 idx = ARM_V7S_LVL_IDX(iova, lvl);
609 pte[i] = READ_ONCE(ptep[i]);
610 if (WARN_ON(!ARM_V7S_PTE_IS_VALID(pte[i])))
612 } while (++i < num_entries);
615 * If we've hit a contiguous 'large page' entry at this level, it
616 * needs splitting first, unless we're unmapping the whole lot.
618 * For splitting, we can't rewrite 16 PTEs atomically, and since we
619 * can't necessarily assume TEX remap we don't have a software bit to
620 * mark live entries being split. In practice (i.e. DMA API code), we
621 * will never be splitting large pages anyway, so just wrap this edge
622 * case in a lock for the sake of correctness and be done with it.
624 if (num_entries <= 1 && arm_v7s_pte_is_cont(pte[0], lvl)) {
627 spin_lock_irqsave(&data->split_lock, flags);
628 pte[0] = arm_v7s_split_cont(data, iova, idx, lvl, ptep);
629 spin_unlock_irqrestore(&data->split_lock, flags);
632 /* If the size matches this level, we're in the right place */
634 size_t blk_size = ARM_V7S_BLOCK_SIZE(lvl);
636 __arm_v7s_set_pte(ptep, 0, num_entries, &iop->cfg);
638 for (i = 0; i < num_entries; i++) {
639 if (ARM_V7S_PTE_IS_TABLE(pte[i], lvl)) {
640 /* Also flush any partial walks */
641 io_pgtable_tlb_add_flush(iop, iova, blk_size,
642 ARM_V7S_BLOCK_SIZE(lvl + 1), false);
643 io_pgtable_tlb_sync(iop);
644 ptep = iopte_deref(pte[i], lvl);
645 __arm_v7s_free_table(ptep, lvl + 1, data);
646 } else if (iop->cfg.quirks & IO_PGTABLE_QUIRK_NON_STRICT) {
648 * Order the PTE update against queueing the IOVA, to
649 * guarantee that a flush callback from a different CPU
650 * has observed it before the TLBIALL can be issued.
654 io_pgtable_tlb_add_flush(iop, iova, blk_size,
660 } else if (lvl == 1 && !ARM_V7S_PTE_IS_TABLE(pte[0], lvl)) {
662 * Insert a table at the next level to map the old region,
663 * minus the part we want to unmap
665 return arm_v7s_split_blk_unmap(data, iova, size, pte[0], ptep);
668 /* Keep on walkin' */
669 ptep = iopte_deref(pte[0], lvl);
670 return __arm_v7s_unmap(data, iova, size, lvl + 1, ptep);
673 static size_t arm_v7s_unmap(struct io_pgtable_ops *ops, unsigned long iova,
676 struct arm_v7s_io_pgtable *data = io_pgtable_ops_to_data(ops);
678 if (WARN_ON(upper_32_bits(iova)))
681 return __arm_v7s_unmap(data, iova, size, 1, data->pgd);
684 static phys_addr_t arm_v7s_iova_to_phys(struct io_pgtable_ops *ops,
687 struct arm_v7s_io_pgtable *data = io_pgtable_ops_to_data(ops);
688 arm_v7s_iopte *ptep = data->pgd, pte;
693 ptep += ARM_V7S_LVL_IDX(iova, ++lvl);
694 pte = READ_ONCE(*ptep);
695 ptep = iopte_deref(pte, lvl);
696 } while (ARM_V7S_PTE_IS_TABLE(pte, lvl));
698 if (!ARM_V7S_PTE_IS_VALID(pte))
701 mask = ARM_V7S_LVL_MASK(lvl);
702 if (arm_v7s_pte_is_cont(pte, lvl))
703 mask *= ARM_V7S_CONT_PAGES;
704 return (pte & mask) | (iova & ~mask);
707 static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
710 struct arm_v7s_io_pgtable *data;
712 if (cfg->ias > ARM_V7S_ADDR_BITS || cfg->oas > ARM_V7S_ADDR_BITS)
715 if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS |
716 IO_PGTABLE_QUIRK_NO_PERMS |
717 IO_PGTABLE_QUIRK_TLBI_ON_MAP |
718 IO_PGTABLE_QUIRK_ARM_MTK_4GB |
719 IO_PGTABLE_QUIRK_NO_DMA |
720 IO_PGTABLE_QUIRK_NON_STRICT))
723 /* If ARM_MTK_4GB is enabled, the NO_PERMS is also expected. */
724 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_4GB &&
725 !(cfg->quirks & IO_PGTABLE_QUIRK_NO_PERMS))
728 data = kmalloc(sizeof(*data), GFP_KERNEL);
732 spin_lock_init(&data->split_lock);
733 data->l2_tables = kmem_cache_create("io-pgtable_armv7s_l2",
734 ARM_V7S_TABLE_SIZE(2),
735 ARM_V7S_TABLE_SIZE(2),
736 SLAB_CACHE_DMA, NULL);
737 if (!data->l2_tables)
740 data->iop.ops = (struct io_pgtable_ops) {
742 .unmap = arm_v7s_unmap,
743 .iova_to_phys = arm_v7s_iova_to_phys,
746 /* We have to do this early for __arm_v7s_alloc_table to work... */
747 data->iop.cfg = *cfg;
750 * Unless the IOMMU driver indicates supersection support by
751 * having SZ_16M set in the initial bitmap, they won't be used.
753 cfg->pgsize_bitmap &= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
755 /* TCR: T0SZ=0, disable TTBR1 */
756 cfg->arm_v7s_cfg.tcr = ARM_V7S_TCR_PD1;
759 * TEX remap: the indices used map to the closest equivalent types
760 * under the non-TEX-remap interpretation of those attribute bits,
761 * excepting various implementation-defined aspects of shareability.
763 cfg->arm_v7s_cfg.prrr = ARM_V7S_PRRR_TR(1, ARM_V7S_PRRR_TYPE_DEVICE) |
764 ARM_V7S_PRRR_TR(4, ARM_V7S_PRRR_TYPE_NORMAL) |
765 ARM_V7S_PRRR_TR(7, ARM_V7S_PRRR_TYPE_NORMAL) |
766 ARM_V7S_PRRR_DS0 | ARM_V7S_PRRR_DS1 |
767 ARM_V7S_PRRR_NS1 | ARM_V7S_PRRR_NOS(7);
768 cfg->arm_v7s_cfg.nmrr = ARM_V7S_NMRR_IR(7, ARM_V7S_RGN_WBWA) |
769 ARM_V7S_NMRR_OR(7, ARM_V7S_RGN_WBWA);
771 /* Looking good; allocate a pgd */
772 data->pgd = __arm_v7s_alloc_table(1, GFP_KERNEL, data);
776 /* Ensure the empty pgd is visible before any actual TTBR write */
780 cfg->arm_v7s_cfg.ttbr[0] = virt_to_phys(data->pgd) |
781 ARM_V7S_TTBR_S | ARM_V7S_TTBR_NOS |
782 ARM_V7S_TTBR_IRGN_ATTR(ARM_V7S_RGN_WBWA) |
783 ARM_V7S_TTBR_ORGN_ATTR(ARM_V7S_RGN_WBWA);
784 cfg->arm_v7s_cfg.ttbr[1] = 0;
788 kmem_cache_destroy(data->l2_tables);
793 struct io_pgtable_init_fns io_pgtable_arm_v7s_init_fns = {
794 .alloc = arm_v7s_alloc_pgtable,
795 .free = arm_v7s_free_pgtable,
798 #ifdef CONFIG_IOMMU_IO_PGTABLE_ARMV7S_SELFTEST
800 static struct io_pgtable_cfg *cfg_cookie;
802 static void dummy_tlb_flush_all(void *cookie)
804 WARN_ON(cookie != cfg_cookie);
807 static void dummy_tlb_add_flush(unsigned long iova, size_t size,
808 size_t granule, bool leaf, void *cookie)
810 WARN_ON(cookie != cfg_cookie);
811 WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
814 static void dummy_tlb_sync(void *cookie)
816 WARN_ON(cookie != cfg_cookie);
819 static const struct iommu_gather_ops dummy_tlb_ops = {
820 .tlb_flush_all = dummy_tlb_flush_all,
821 .tlb_add_flush = dummy_tlb_add_flush,
822 .tlb_sync = dummy_tlb_sync,
825 #define __FAIL(ops) ({ \
826 WARN(1, "selftest: test failed\n"); \
827 selftest_running = false; \
831 static int __init arm_v7s_do_selftests(void)
833 struct io_pgtable_ops *ops;
834 struct io_pgtable_cfg cfg = {
835 .tlb = &dummy_tlb_ops,
838 .quirks = IO_PGTABLE_QUIRK_ARM_NS | IO_PGTABLE_QUIRK_NO_DMA,
839 .pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M,
841 unsigned int iova, size, iova_start;
842 unsigned int i, loopnr = 0;
844 selftest_running = true;
848 ops = alloc_io_pgtable_ops(ARM_V7S, &cfg, &cfg);
850 pr_err("selftest: failed to allocate io pgtable ops\n");
855 * Initial sanity checks.
856 * Empty page tables shouldn't provide any translations.
858 if (ops->iova_to_phys(ops, 42))
861 if (ops->iova_to_phys(ops, SZ_1G + 42))
864 if (ops->iova_to_phys(ops, SZ_2G + 42))
868 * Distinct mappings of different granule sizes.
871 for_each_set_bit(i, &cfg.pgsize_bitmap, BITS_PER_LONG) {
873 if (ops->map(ops, iova, iova, size, IOMMU_READ |
879 /* Overlapping mappings */
880 if (!ops->map(ops, iova, iova + size, size,
881 IOMMU_READ | IOMMU_NOEXEC))
884 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
893 size = 1UL << __ffs(cfg.pgsize_bitmap);
895 iova_start = i * SZ_16M;
896 if (ops->unmap(ops, iova_start + size, size) != size)
899 /* Remap of partial unmap */
900 if (ops->map(ops, iova_start + size, size, size, IOMMU_READ))
903 if (ops->iova_to_phys(ops, iova_start + size + 42)
911 for_each_set_bit(i, &cfg.pgsize_bitmap, BITS_PER_LONG) {
914 if (ops->unmap(ops, iova, size) != size)
917 if (ops->iova_to_phys(ops, iova + 42))
920 /* Remap full block */
921 if (ops->map(ops, iova, iova, size, IOMMU_WRITE))
924 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
930 free_io_pgtable_ops(ops);
932 selftest_running = false;
934 pr_info("self test ok\n");
937 subsys_initcall(arm_v7s_do_selftests);