]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - drivers/iommu/arm-smmu.c
iommu/arm-smmu: Add implementation infrastructure
[linux.git] / drivers / iommu / arm-smmu.c
index e72554f334eeb20ebca7e9e2f95cb1b138fe2d2d..1e81531828301dc7813b3f40895f021fb076ae2c 100644 (file)
@@ -19,7 +19,6 @@
 
 #include <linux/acpi.h>
 #include <linux/acpi_iort.h>
-#include <linux/atomic.h>
 #include <linux/bitfield.h>
 #include <linux/delay.h>
 #include <linux/dma-iommu.h>
@@ -29,7 +28,6 @@
 #include <linux/io.h>
 #include <linux/io-64-nonatomic-hi-lo.h>
 #include <linux/io-pgtable.h>
-#include <linux/iommu.h>
 #include <linux/iopoll.h>
 #include <linux/init.h>
 #include <linux/moduleparam.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 #include <linux/slab.h>
-#include <linux/spinlock.h>
 
 #include <linux/amba/bus.h>
 #include <linux/fsl/mc.h>
 
-#include "arm-smmu-regs.h"
+#include "arm-smmu.h"
 
 /*
  * Apparently, some Qualcomm arm64 platforms which appear to expose their SMMU
 #define TLB_LOOP_TIMEOUT               1000000 /* 1s! */
 #define TLB_SPIN_COUNT                 10
 
-/* Maximum number of context banks per SMMU */
-#define ARM_SMMU_MAX_CBS               128
-
-/* SMMU global address space */
-#define ARM_SMMU_GR0(smmu)             ((smmu)->base)
-
-/*
- * SMMU global address space with conditional offset to access secure
- * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
- * nsGFSYNR0: 0x450)
- */
-#define ARM_SMMU_GR0_NS(smmu)                                          \
-       ((smmu)->base +                                                 \
-               ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS)       \
-                       ? 0x400 : 0))
-
 #define MSI_IOVA_BASE                  0x8000000
 #define MSI_IOVA_LENGTH                        0x100000
 
@@ -99,19 +80,6 @@ module_param(disable_bypass, bool, S_IRUGO);
 MODULE_PARM_DESC(disable_bypass,
        "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
 
-enum arm_smmu_arch_version {
-       ARM_SMMU_V1,
-       ARM_SMMU_V1_64K,
-       ARM_SMMU_V2,
-};
-
-enum arm_smmu_implementation {
-       GENERIC_SMMU,
-       ARM_MMU500,
-       CAVIUM_SMMUV2,
-       QCOM_SMMUV2,
-};
-
 struct arm_smmu_s2cr {
        struct iommu_group              *group;
        int                             count;
@@ -149,65 +117,6 @@ struct arm_smmu_master_cfg {
 #define for_each_cfg_sme(fw, i, idx) \
        for (i = 0; idx = fwspec_smendx(fw, i), i < fw->num_ids; ++i)
 
-struct arm_smmu_device {
-       struct device                   *dev;
-
-       void __iomem                    *base;
-       unsigned int                    numpage;
-       unsigned int                    pgshift;
-
-#define ARM_SMMU_FEAT_COHERENT_WALK    (1 << 0)
-#define ARM_SMMU_FEAT_STREAM_MATCH     (1 << 1)
-#define ARM_SMMU_FEAT_TRANS_S1         (1 << 2)
-#define ARM_SMMU_FEAT_TRANS_S2         (1 << 3)
-#define ARM_SMMU_FEAT_TRANS_NESTED     (1 << 4)
-#define ARM_SMMU_FEAT_TRANS_OPS                (1 << 5)
-#define ARM_SMMU_FEAT_VMID16           (1 << 6)
-#define ARM_SMMU_FEAT_FMT_AARCH64_4K   (1 << 7)
-#define ARM_SMMU_FEAT_FMT_AARCH64_16K  (1 << 8)
-#define ARM_SMMU_FEAT_FMT_AARCH64_64K  (1 << 9)
-#define ARM_SMMU_FEAT_FMT_AARCH32_L    (1 << 10)
-#define ARM_SMMU_FEAT_FMT_AARCH32_S    (1 << 11)
-#define ARM_SMMU_FEAT_EXIDS            (1 << 12)
-       u32                             features;
-
-#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
-       u32                             options;
-       enum arm_smmu_arch_version      version;
-       enum arm_smmu_implementation    model;
-
-       u32                             num_context_banks;
-       u32                             num_s2_context_banks;
-       DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
-       struct arm_smmu_cb              *cbs;
-       atomic_t                        irptndx;
-
-       u32                             num_mapping_groups;
-       u16                             streamid_mask;
-       u16                             smr_mask_mask;
-       struct arm_smmu_smr             *smrs;
-       struct arm_smmu_s2cr            *s2crs;
-       struct mutex                    stream_map_mutex;
-
-       unsigned long                   va_size;
-       unsigned long                   ipa_size;
-       unsigned long                   pa_size;
-       unsigned long                   pgsize_bitmap;
-
-       u32                             num_global_irqs;
-       u32                             num_context_irqs;
-       unsigned int                    *irqs;
-       struct clk_bulk_data            *clks;
-       int                             num_clks;
-
-       u32                             cavium_id_base; /* Specific to Cavium */
-
-       spinlock_t                      global_sync_lock;
-
-       /* IOMMU core code handle */
-       struct iommu_device             iommu;
-};
-
 enum arm_smmu_context_fmt {
        ARM_SMMU_CTX_FMT_NONE,
        ARM_SMMU_CTX_FMT_AARCH64,
@@ -246,6 +155,21 @@ struct arm_smmu_domain {
        struct iommu_domain             domain;
 };
 
+static int arm_smmu_gr0_ns(int offset)
+{
+       switch(offset) {
+       case ARM_SMMU_GR0_sCR0:
+       case ARM_SMMU_GR0_sACR:
+       case ARM_SMMU_GR0_sGFSR:
+       case ARM_SMMU_GR0_sGFSYNR0:
+       case ARM_SMMU_GR0_sGFSYNR1:
+       case ARM_SMMU_GR0_sGFSYNR2:
+               return offset + 0x400;
+       default:
+               return offset;
+       }
+}
+
 static void __iomem *arm_smmu_page(struct arm_smmu_device *smmu, int n)
 {
        return smmu->base + (n << smmu->pgshift);
@@ -253,12 +177,18 @@ static void __iomem *arm_smmu_page(struct arm_smmu_device *smmu, int n)
 
 static u32 arm_smmu_readl(struct arm_smmu_device *smmu, int page, int offset)
 {
+       if ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) && page == 0)
+               offset = arm_smmu_gr0_ns(offset);
+
        return readl_relaxed(arm_smmu_page(smmu, page) + offset);
 }
 
 static void arm_smmu_writel(struct arm_smmu_device *smmu, int page, int offset,
                            u32 val)
 {
+       if ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) && page == 0)
+               offset = arm_smmu_gr0_ns(offset);
+
        writel_relaxed(val, arm_smmu_page(smmu, page) + offset);
 }
 
@@ -273,9 +203,15 @@ static void arm_smmu_writeq(struct arm_smmu_device *smmu, int page, int offset,
        writeq_relaxed(val, arm_smmu_page(smmu, page) + offset);
 }
 
+#define ARM_SMMU_GR0           0
 #define ARM_SMMU_GR1           1
 #define ARM_SMMU_CB(s, n)      ((s)->numpage + (n))
 
+#define arm_smmu_gr0_read(s, o)                \
+       arm_smmu_readl((s), ARM_SMMU_GR0, (o))
+#define arm_smmu_gr0_write(s, o, v)    \
+       arm_smmu_writel((s), ARM_SMMU_GR0, (o), (v))
+
 #define arm_smmu_gr1_read(s, o)                \
        arm_smmu_readl((s), ARM_SMMU_GR1, (o))
 #define arm_smmu_gr1_write(s, o, v)    \
@@ -470,7 +406,7 @@ static void arm_smmu_tlb_sync_global(struct arm_smmu_device *smmu)
        unsigned long flags;
 
        spin_lock_irqsave(&smmu->global_sync_lock, flags);
-       __arm_smmu_tlb_sync(smmu, 0, ARM_SMMU_GR0_sTLBGSYNC,
+       __arm_smmu_tlb_sync(smmu, ARM_SMMU_GR0, ARM_SMMU_GR0_sTLBGSYNC,
                            ARM_SMMU_GR0_sTLBGSTATUS);
        spin_unlock_irqrestore(&smmu->global_sync_lock, flags);
 }
@@ -511,10 +447,10 @@ static void arm_smmu_tlb_inv_context_s2(void *cookie)
 {
        struct arm_smmu_domain *smmu_domain = cookie;
        struct arm_smmu_device *smmu = smmu_domain->smmu;
-       void __iomem *base = ARM_SMMU_GR0(smmu);
 
-       /* NOTE: see above */
-       writel(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
+       /* See above */
+       wmb();
+       arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid);
        arm_smmu_tlb_sync_global(smmu);
 }
 
@@ -579,12 +515,12 @@ static void arm_smmu_tlb_inv_vmid_nosync(unsigned long iova, size_t size,
                                         size_t granule, bool leaf, void *cookie)
 {
        struct arm_smmu_domain *smmu_domain = cookie;
-       void __iomem *base = ARM_SMMU_GR0(smmu_domain->smmu);
+       struct arm_smmu_device *smmu = smmu_domain->smmu;
 
-       if (smmu_domain->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
+       if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
                wmb();
 
-       writel_relaxed(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
+       arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid);
 }
 
 static const struct iommu_gather_ops arm_smmu_s1_tlb_ops = {
@@ -634,12 +570,11 @@ static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
 {
        u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
        struct arm_smmu_device *smmu = dev;
-       void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
 
-       gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
-       gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
-       gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
-       gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
+       gfsr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSR);
+       gfsynr0 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR0);
+       gfsynr1 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR1);
+       gfsynr2 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR2);
 
        if (!gfsr)
                return IRQ_NONE;
@@ -650,7 +585,7 @@ static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
                "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
                gfsr, gfsynr0, gfsynr1, gfsynr2);
 
-       writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
+       arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sGFSR, gfsr);
        return IRQ_HANDLED;
 }
 
@@ -1056,7 +991,7 @@ static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
 
        if (!(smmu->features & ARM_SMMU_FEAT_EXIDS) && smr->valid)
                reg |= SMR_VALID;
-       writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
+       arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(idx), reg);
 }
 
 static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
@@ -1069,7 +1004,7 @@ static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
        if (smmu->features & ARM_SMMU_FEAT_EXIDS && smmu->smrs &&
            smmu->smrs[idx].valid)
                reg |= S2CR_EXIDVALID;
-       writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx));
+       arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_S2CR(idx), reg);
 }
 
 static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
@@ -1085,7 +1020,6 @@ static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
  */
 static void arm_smmu_test_smr_masks(struct arm_smmu_device *smmu)
 {
-       void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
        u32 smr;
 
        if (!smmu->smrs)
@@ -1097,13 +1031,13 @@ static void arm_smmu_test_smr_masks(struct arm_smmu_device *smmu)
         * masters later if they try to claim IDs outside these masks.
         */
        smr = FIELD_PREP(SMR_ID, smmu->streamid_mask);
-       writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
-       smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
+       arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(0), smr);
+       smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(0));
        smmu->streamid_mask = FIELD_GET(SMR_ID, smr);
 
        smr = FIELD_PREP(SMR_MASK, smmu->streamid_mask);
-       writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
-       smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
+       arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(0), smr);
+       smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(0));
        smmu->smr_mask_mask = FIELD_GET(SMR_MASK, smr);
 }
 
@@ -1736,13 +1670,12 @@ static struct iommu_ops arm_smmu_ops = {
 
 static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
 {
-       void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
        int i;
        u32 reg, major;
 
        /* clear global FSR */
-       reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
-       writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
+       reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSR);
+       arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sGFSR, reg);
 
        /*
         * Reset stream mapping groups: Initial values mark all SMRn as
@@ -1757,9 +1690,9 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
                 * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
                 * bit is only present in MMU-500r2 onwards.
                 */
-               reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
+               reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID7);
                major = FIELD_GET(ID7_MAJOR, reg);
-               reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
+               reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sACR);
                if (major >= 2)
                        reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
                /*
@@ -1767,7 +1700,7 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
                 * TLB entries for reduced latency.
                 */
                reg |= ARM_MMU500_ACR_SMTNMB_TLBEN | ARM_MMU500_ACR_S2CRB_TLBEN;
-               writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
+               arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sACR, reg);
        }
 
        /* Make sure all context banks are disabled and clear CB_FSR  */
@@ -1786,10 +1719,10 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
        }
 
        /* Invalidate the TLB, just in case */
-       writel_relaxed(QCOM_DUMMY_VAL, gr0_base + ARM_SMMU_GR0_TLBIALLH);
-       writel_relaxed(QCOM_DUMMY_VAL, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
+       arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIALLH, QCOM_DUMMY_VAL);
+       arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIALLNSNH, QCOM_DUMMY_VAL);
 
-       reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
+       reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sCR0);
 
        /* Enable fault reporting */
        reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
@@ -1818,7 +1751,7 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
 
        /* Push the button */
        arm_smmu_tlb_sync_global(smmu);
-       writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
+       arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sCR0, reg);
 }
 
 static int arm_smmu_id_size_to_bits(int size)
@@ -1843,7 +1776,6 @@ static int arm_smmu_id_size_to_bits(int size)
 static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
 {
        unsigned int size;
-       void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
        u32 id;
        bool cttw_reg, cttw_fw = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK;
        int i;
@@ -1853,7 +1785,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
                        smmu->version == ARM_SMMU_V2 ? 2 : 1);
 
        /* ID0 */
-       id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
+       id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID0);
 
        /* Restrict available stages based on module parameter */
        if (force_stage == 1)
@@ -1947,7 +1879,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
        }
 
        /* ID1 */
-       id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
+       id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID1);
        smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
 
        /* Check for size mismatch of SMMU address space from mapped region */
@@ -1985,7 +1917,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
                return -ENOMEM;
 
        /* ID2 */
-       id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
+       id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID2);
        size = arm_smmu_id_size_to_bits(FIELD_GET(ID2_IAS, id));
        smmu->ipa_size = size;
 
@@ -2223,6 +2155,10 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
        if (err)
                return err;
 
+       smmu = arm_smmu_impl_init(smmu);
+       if (IS_ERR(smmu))
+               return PTR_ERR(smmu);
+
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        ioaddr = res->start;
        smmu->base = devm_ioremap_resource(dev, res);
@@ -2372,7 +2308,7 @@ static void arm_smmu_device_shutdown(struct platform_device *pdev)
 
        arm_smmu_rpm_get(smmu);
        /* Turn the thing off */
-       writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
+       arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sCR0, sCR0_CLIENTPD);
        arm_smmu_rpm_put(smmu);
 
        if (pm_runtime_enabled(smmu->dev))