]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - drivers/iommu/io-pgtable-arm.c
Merge tag 'linux-kselftest-5.5-rc1-fixes2' of git://git.kernel.org/pub/scm/linux...
[linux.git] / drivers / iommu / io-pgtable-arm.c
index ca51036aa53c7140470fbc0fb3abbc975e7a7a45..bdf47f74526879d1316f640e2308a0a3871931f1 100644 (file)
 #define io_pgtable_ops_to_data(x)                                      \
        io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
 
-/*
- * For consistency with the architecture, we always consider
- * ARM_LPAE_MAX_LEVELS levels, with the walk starting at level n >=0
- */
-#define ARM_LPAE_START_LVL(d)          (ARM_LPAE_MAX_LEVELS - (d)->levels)
-
 /*
  * Calculate the right shift amount to get to the portion describing level l
  * in a virtual address mapped by the pagetable in d.
  */
 #define ARM_LPAE_LVL_SHIFT(l,d)                                                \
-       ((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1))             \
-         * (d)->bits_per_level) + (d)->pg_shift)
+       (((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level) +          \
+       ilog2(sizeof(arm_lpae_iopte)))
 
-#define ARM_LPAE_GRANULE(d)            (1UL << (d)->pg_shift)
-
-#define ARM_LPAE_PAGES_PER_PGD(d)                                      \
-       DIV_ROUND_UP((d)->pgd_size, ARM_LPAE_GRANULE(d))
+#define ARM_LPAE_GRANULE(d)                                            \
+       (sizeof(arm_lpae_iopte) << (d)->bits_per_level)
+#define ARM_LPAE_PGD_SIZE(d)                                           \
+       (sizeof(arm_lpae_iopte) << (d)->pgd_bits)
 
 /*
  * Calculate the index at level l used to map virtual address a using the
  * pagetable in d.
  */
 #define ARM_LPAE_PGD_IDX(l,d)                                          \
-       ((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0)
+       ((l) == (d)->start_level ? (d)->pgd_bits - (d)->bits_per_level : 0)
 
 #define ARM_LPAE_LVL_IDX(a,l,d)                                                \
        (((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) &                        \
         ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1))
 
 /* Calculate the block/page mapping size at level l for pagetable in d. */
-#define ARM_LPAE_BLOCK_SIZE(l,d)                                       \
-       (1ULL << (ilog2(sizeof(arm_lpae_iopte)) +                       \
-               ((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level)))
+#define ARM_LPAE_BLOCK_SIZE(l,d)       (1ULL << ARM_LPAE_LVL_SHIFT(l,d))
 
 /* Page table bits */
 #define ARM_LPAE_PTE_TYPE_SHIFT                0
 struct arm_lpae_io_pgtable {
        struct io_pgtable       iop;
 
-       int                     levels;
-       size_t                  pgd_size;
-       unsigned long           pg_shift;
-       unsigned long           bits_per_level;
+       int                     pgd_bits;
+       int                     start_level;
+       int                     bits_per_level;
 
        void                    *pgd;
 };
@@ -213,7 +204,7 @@ static phys_addr_t iopte_to_paddr(arm_lpae_iopte pte,
 {
        u64 paddr = pte & ARM_LPAE_PTE_ADDR_MASK;
 
-       if (data->pg_shift < 16)
+       if (ARM_LPAE_GRANULE(data) < SZ_64K)
                return paddr;
 
        /* Rotate the packed high-order bits back to the top */
@@ -392,7 +383,7 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
        ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
 
        /* If we can install a leaf entry at this level, then do so */
-       if (size == block_size && (size & cfg->pgsize_bitmap))
+       if (size == block_size)
                return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep);
 
        /* We can't allocate tables at the final level */
@@ -464,7 +455,7 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
                else if (prot & IOMMU_CACHE)
                        pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
                                << ARM_LPAE_PTE_ATTRINDX_SHIFT);
-               else if (prot & IOMMU_QCOM_SYS_CACHE)
+               else if (prot & IOMMU_SYS_CACHE_ONLY)
                        pte |= (ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE
                                << ARM_LPAE_PTE_ATTRINDX_SHIFT);
        }
@@ -479,16 +470,19 @@ static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
                        phys_addr_t paddr, size_t size, int iommu_prot)
 {
        struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
+       struct io_pgtable_cfg *cfg = &data->iop.cfg;
        arm_lpae_iopte *ptep = data->pgd;
-       int ret, lvl = ARM_LPAE_START_LVL(data);
+       int ret, lvl = data->start_level;
        arm_lpae_iopte prot;
 
        /* If no access, then nothing to do */
        if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
                return 0;
 
-       if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias) ||
-                   paddr >= (1ULL << data->iop.cfg.oas)))
+       if (WARN_ON(!size || (size & cfg->pgsize_bitmap) != size))
+               return -EINVAL;
+
+       if (WARN_ON(iova >> data->iop.cfg.ias || paddr >> data->iop.cfg.oas))
                return -ERANGE;
 
        prot = arm_lpae_prot_to_pte(data, iommu_prot);
@@ -508,8 +502,8 @@ static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
        arm_lpae_iopte *start, *end;
        unsigned long table_size;
 
-       if (lvl == ARM_LPAE_START_LVL(data))
-               table_size = data->pgd_size;
+       if (lvl == data->start_level)
+               table_size = ARM_LPAE_PGD_SIZE(data);
        else
                table_size = ARM_LPAE_GRANULE(data);
 
@@ -537,7 +531,7 @@ static void arm_lpae_free_pgtable(struct io_pgtable *iop)
 {
        struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
 
-       __arm_lpae_free_pgtable(data, ARM_LPAE_START_LVL(data), data->pgd);
+       __arm_lpae_free_pgtable(data, data->start_level, data->pgd);
        kfree(data);
 }
 
@@ -652,13 +646,16 @@ static size_t arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
                             size_t size, struct iommu_iotlb_gather *gather)
 {
        struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
+       struct io_pgtable_cfg *cfg = &data->iop.cfg;
        arm_lpae_iopte *ptep = data->pgd;
-       int lvl = ARM_LPAE_START_LVL(data);
 
-       if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias)))
+       if (WARN_ON(!size || (size & cfg->pgsize_bitmap) != size))
+               return 0;
+
+       if (WARN_ON(iova >> data->iop.cfg.ias))
                return 0;
 
-       return __arm_lpae_unmap(data, gather, iova, size, lvl, ptep);
+       return __arm_lpae_unmap(data, gather, iova, size, data->start_level, ptep);
 }
 
 static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
@@ -666,7 +663,7 @@ static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
 {
        struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
        arm_lpae_iopte pte, *ptep = data->pgd;
-       int lvl = ARM_LPAE_START_LVL(data);
+       int lvl = data->start_level;
 
        do {
                /* Valid IOPTE pointer? */
@@ -743,8 +740,8 @@ static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
 static struct arm_lpae_io_pgtable *
 arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
 {
-       unsigned long va_bits, pgd_bits;
        struct arm_lpae_io_pgtable *data;
+       int levels, va_bits, pg_shift;
 
        arm_lpae_restrict_pgsizes(cfg);
 
@@ -766,15 +763,15 @@ arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
        if (!data)
                return NULL;
 
-       data->pg_shift = __ffs(cfg->pgsize_bitmap);
-       data->bits_per_level = data->pg_shift - ilog2(sizeof(arm_lpae_iopte));
+       pg_shift = __ffs(cfg->pgsize_bitmap);
+       data->bits_per_level = pg_shift - ilog2(sizeof(arm_lpae_iopte));
 
-       va_bits = cfg->ias - data->pg_shift;
-       data->levels = DIV_ROUND_UP(va_bits, data->bits_per_level);
+       va_bits = cfg->ias - pg_shift;
+       levels = DIV_ROUND_UP(va_bits, data->bits_per_level);
+       data->start_level = ARM_LPAE_MAX_LEVELS - levels;
 
        /* Calculate the actual size of our pgd (without concatenation) */
-       pgd_bits = va_bits - (data->bits_per_level * (data->levels - 1));
-       data->pgd_size = 1UL << (pgd_bits + ilog2(sizeof(arm_lpae_iopte)));
+       data->pgd_bits = va_bits - (data->bits_per_level * (levels - 1));
 
        data->iop.ops = (struct io_pgtable_ops) {
                .map            = arm_lpae_map,
@@ -864,11 +861,11 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
              (ARM_LPAE_MAIR_ATTR_INC_OWBRWA
               << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE));
 
-       cfg->arm_lpae_s1_cfg.mair[0] = reg;
-       cfg->arm_lpae_s1_cfg.mair[1] = 0;
+       cfg->arm_lpae_s1_cfg.mair = reg;
 
        /* Looking good; allocate a pgd */
-       data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
+       data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data),
+                                          GFP_KERNEL, cfg);
        if (!data->pgd)
                goto out_free_data;
 
@@ -903,13 +900,13 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
         * Concatenate PGDs at level 1 if possible in order to reduce
         * the depth of the stage-2 walk.
         */
-       if (data->levels == ARM_LPAE_MAX_LEVELS) {
+       if (data->start_level == 0) {
                unsigned long pgd_pages;
 
-               pgd_pages = data->pgd_size >> ilog2(sizeof(arm_lpae_iopte));
+               pgd_pages = ARM_LPAE_PGD_SIZE(data) / sizeof(arm_lpae_iopte);
                if (pgd_pages <= ARM_LPAE_S2_MAX_CONCAT_PAGES) {
-                       data->pgd_size = pgd_pages << data->pg_shift;
-                       data->levels--;
+                       data->pgd_bits += data->bits_per_level;
+                       data->start_level++;
                }
        }
 
@@ -919,7 +916,7 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
             (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
             (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
 
-       sl = ARM_LPAE_START_LVL(data);
+       sl = data->start_level;
 
        switch (ARM_LPAE_GRANULE(data)) {
        case SZ_4K:
@@ -965,7 +962,8 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
        cfg->arm_lpae_s2_cfg.vtcr = reg;
 
        /* Allocate pgd pages */
-       data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
+       data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data),
+                                          GFP_KERNEL, cfg);
        if (!data->pgd)
                goto out_free_data;
 
@@ -1034,9 +1032,9 @@ arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
                return NULL;
 
        /* Mali seems to need a full 4-level table regardless of IAS */
-       if (data->levels < ARM_LPAE_MAX_LEVELS) {
-               data->levels = ARM_LPAE_MAX_LEVELS;
-               data->pgd_size = sizeof(arm_lpae_iopte);
+       if (data->start_level > 0) {
+               data->start_level = 0;
+               data->pgd_bits = 0;
        }
        /*
         * MEMATTR: Mali has no actual notion of a non-cacheable type, so the
@@ -1053,7 +1051,8 @@ arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
                (ARM_MALI_LPAE_MEMATTR_IMP_DEF
                 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV));
 
-       data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
+       data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data), GFP_KERNEL,
+                                          cfg);
        if (!data->pgd)
                goto out_free_data;
 
@@ -1097,22 +1096,23 @@ struct io_pgtable_init_fns io_pgtable_arm_mali_lpae_init_fns = {
 
 #ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST
 
-static struct io_pgtable_cfg *cfg_cookie;
+static struct io_pgtable_cfg *cfg_cookie __initdata;
 
-static void dummy_tlb_flush_all(void *cookie)
+static void __init dummy_tlb_flush_all(void *cookie)
 {
        WARN_ON(cookie != cfg_cookie);
 }
 
-static void dummy_tlb_flush(unsigned long iova, size_t size, size_t granule,
-                           void *cookie)
+static void __init dummy_tlb_flush(unsigned long iova, size_t size,
+                                  size_t granule, void *cookie)
 {
        WARN_ON(cookie != cfg_cookie);
        WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
 }
 
-static void dummy_tlb_add_page(struct iommu_iotlb_gather *gather,
-                              unsigned long iova, size_t granule, void *cookie)
+static void __init dummy_tlb_add_page(struct iommu_iotlb_gather *gather,
+                                     unsigned long iova, size_t granule,
+                                     void *cookie)
 {
        dummy_tlb_flush(iova, granule, granule, cookie);
 }
@@ -1131,9 +1131,9 @@ static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
 
        pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n",
                cfg->pgsize_bitmap, cfg->ias);
-       pr_err("data: %d levels, 0x%zx pgd_size, %lu pg_shift, %lu bits_per_level, pgd @ %p\n",
-               data->levels, data->pgd_size, data->pg_shift,
-               data->bits_per_level, data->pgd);
+       pr_err("data: %d levels, 0x%zx pgd_size, %u pg_shift, %u bits_per_level, pgd @ %p\n",
+               ARM_LPAE_MAX_LEVELS - data->start_level, ARM_LPAE_PGD_SIZE(data),
+               ilog2(ARM_LPAE_GRANULE(data)), data->bits_per_level, data->pgd);
 }
 
 #define __FAIL(ops, i) ({                                              \
@@ -1145,7 +1145,7 @@ static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
 
 static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
 {
-       static const enum io_pgtable_fmt fmts[] = {
+       static const enum io_pgtable_fmt fmts[] __initconst = {
                ARM_64_LPAE_S1,
                ARM_64_LPAE_S2,
        };
@@ -1244,13 +1244,13 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
 
 static int __init arm_lpae_do_selftests(void)
 {
-       static const unsigned long pgsize[] = {
+       static const unsigned long pgsize[] __initconst = {
                SZ_4K | SZ_2M | SZ_1G,
                SZ_16K | SZ_32M,
                SZ_64K | SZ_512M,
        };
 
-       static const unsigned int ias[] = {
+       static const unsigned int ias[] __initconst = {
                32, 36, 40, 42, 44, 48,
        };