]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - drivers/iommu/arm-smmu.c
iommu/ipmmu-vmsa: Hook up r8a774c0 DT matching code
[linux.git] / drivers / iommu / arm-smmu.c
index fd1b80ef9490d2f4044f0704e9ebc5843d21b2bf..5a28ae892504ae9e89b265abb94711fa01e5afdd 100644 (file)
@@ -52,6 +52,7 @@
 #include <linux/spinlock.h>
 
 #include <linux/amba/bus.h>
+#include <linux/fsl/mc.h>
 
 #include "io-pgtable.h"
 #include "arm-smmu-regs.h"
@@ -246,6 +247,7 @@ struct arm_smmu_domain {
        const struct iommu_gather_ops   *tlb_ops;
        struct arm_smmu_cfg             cfg;
        enum arm_smmu_domain_stage      stage;
+       bool                            non_strict;
        struct mutex                    init_mutex; /* Protects smmu pointer */
        spinlock_t                      cb_lock; /* Serialises ATS1* ops and TLB syncs */
        struct iommu_domain             domain;
@@ -447,7 +449,11 @@ static void arm_smmu_tlb_inv_context_s1(void *cookie)
        struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
        void __iomem *base = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx);
 
-       writel_relaxed(cfg->asid, base + ARM_SMMU_CB_S1_TLBIASID);
+       /*
+        * NOTE: this is not a relaxed write; it needs to guarantee that PTEs
+        * cleared by the current CPU are visible to the SMMU before the TLBI.
+        */
+       writel(cfg->asid, base + ARM_SMMU_CB_S1_TLBIASID);
        arm_smmu_tlb_sync_context(cookie);
 }
 
@@ -457,7 +463,8 @@ static void arm_smmu_tlb_inv_context_s2(void *cookie)
        struct arm_smmu_device *smmu = smmu_domain->smmu;
        void __iomem *base = ARM_SMMU_GR0(smmu);
 
-       writel_relaxed(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
+       /* NOTE: see above */
+       writel(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
        arm_smmu_tlb_sync_global(smmu);
 }
 
@@ -469,6 +476,9 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
        bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
        void __iomem *reg = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx);
 
+       if (smmu_domain->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
+               wmb();
+
        if (stage1) {
                reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
 
@@ -510,6 +520,9 @@ static void arm_smmu_tlb_inv_vmid_nosync(unsigned long iova, size_t size,
        struct arm_smmu_domain *smmu_domain = cookie;
        void __iomem *base = ARM_SMMU_GR0(smmu_domain->smmu);
 
+       if (smmu_domain->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
+               wmb();
+
        writel_relaxed(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
 }
 
@@ -863,6 +876,9 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
        if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
                pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_NO_DMA;
 
+       if (smmu_domain->non_strict)
+               pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;
+
        smmu_domain->smmu = smmu;
        pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
        if (!pgtbl_ops) {
@@ -1252,6 +1268,14 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
        return ops->unmap(ops, iova, size);
 }
 
+static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain)
+{
+       struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+
+       if (smmu_domain->tlb_ops)
+               smmu_domain->tlb_ops->tlb_flush_all(smmu_domain);
+}
+
 static void arm_smmu_iotlb_sync(struct iommu_domain *domain)
 {
        struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
@@ -1459,6 +1483,8 @@ static struct iommu_group *arm_smmu_device_group(struct device *dev)
 
        if (dev_is_pci(dev))
                group = pci_device_group(dev);
+       else if (dev_is_fsl_mc(dev))
+               group = fsl_mc_device_group(dev);
        else
                group = generic_device_group(dev);
 
@@ -1470,15 +1496,27 @@ static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
 {
        struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
 
-       if (domain->type != IOMMU_DOMAIN_UNMANAGED)
-               return -EINVAL;
-
-       switch (attr) {
-       case DOMAIN_ATTR_NESTING:
-               *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
-               return 0;
+       switch(domain->type) {
+       case IOMMU_DOMAIN_UNMANAGED:
+               switch (attr) {
+               case DOMAIN_ATTR_NESTING:
+                       *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
+                       return 0;
+               default:
+                       return -ENODEV;
+               }
+               break;
+       case IOMMU_DOMAIN_DMA:
+               switch (attr) {
+               case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
+                       *(int *)data = smmu_domain->non_strict;
+                       return 0;
+               default:
+                       return -ENODEV;
+               }
+               break;
        default:
-               return -ENODEV;
+               return -EINVAL;
        }
 }
 
@@ -1488,28 +1526,38 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
        int ret = 0;
        struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
 
-       if (domain->type != IOMMU_DOMAIN_UNMANAGED)
-               return -EINVAL;
-
        mutex_lock(&smmu_domain->init_mutex);
 
-       switch (attr) {
-       case DOMAIN_ATTR_NESTING:
-               if (smmu_domain->smmu) {
-                       ret = -EPERM;
-                       goto out_unlock;
+       switch(domain->type) {
+       case IOMMU_DOMAIN_UNMANAGED:
+               switch (attr) {
+               case DOMAIN_ATTR_NESTING:
+                       if (smmu_domain->smmu) {
+                               ret = -EPERM;
+                               goto out_unlock;
+                       }
+
+                       if (*(int *)data)
+                               smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
+                       else
+                               smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
+                       break;
+               default:
+                       ret = -ENODEV;
+               }
+               break;
+       case IOMMU_DOMAIN_DMA:
+               switch (attr) {
+               case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
+                       smmu_domain->non_strict = *(int *)data;
+                       break;
+               default:
+                       ret = -ENODEV;
                }
-
-               if (*(int *)data)
-                       smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
-               else
-                       smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
-
                break;
        default:
-               ret = -ENODEV;
+               ret = -EINVAL;
        }
-
 out_unlock:
        mutex_unlock(&smmu_domain->init_mutex);
        return ret;
@@ -1562,7 +1610,7 @@ static struct iommu_ops arm_smmu_ops = {
        .attach_dev             = arm_smmu_attach_dev,
        .map                    = arm_smmu_map,
        .unmap                  = arm_smmu_unmap,
-       .flush_iotlb_all        = arm_smmu_iotlb_sync,
+       .flush_iotlb_all        = arm_smmu_flush_iotlb_all,
        .iotlb_sync             = arm_smmu_iotlb_sync,
        .iova_to_phys           = arm_smmu_iova_to_phys,
        .add_device             = arm_smmu_add_device,
@@ -2036,6 +2084,10 @@ static void arm_smmu_bus_init(void)
                bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
        }
 #endif
+#ifdef CONFIG_FSL_MC_BUS
+       if (!iommu_present(&fsl_mc_bus_type))
+               bus_set_iommu(&fsl_mc_bus_type, &arm_smmu_ops);
+#endif
 }
 
 static int arm_smmu_device_probe(struct platform_device *pdev)