]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - drivers/iommu/arm-smmu.c
Merge branches 'iommu/fixes', 'arm/renesas', 'arm/mediatek', 'arm/tegra', 'arm/omap...
[linux.git] / drivers / iommu / arm-smmu.c
index 44bff7de5fe2ecb48e62efb21a8d7989a397edaf..af18a7e7f91724d62e7b042d14ecc602418713d9 100644 (file)
@@ -49,6 +49,7 @@
 #include <linux/of_iommu.h>
 #include <linux/pci.h>
 #include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
 
@@ -124,6 +125,7 @@ enum arm_smmu_implementation {
        GENERIC_SMMU,
        ARM_MMU500,
        CAVIUM_SMMUV2,
+       QCOM_SMMUV2,
 };
 
 struct arm_smmu_s2cr {
@@ -211,6 +213,8 @@ struct arm_smmu_device {
        u32                             num_global_irqs;
        u32                             num_context_irqs;
        unsigned int                    *irqs;
+       struct clk_bulk_data            *clks;
+       int                             num_clks;
 
        u32                             cavium_id_base; /* Specific to Cavium */
 
@@ -272,6 +276,20 @@ static struct arm_smmu_option_prop arm_smmu_options[] = {
        { 0, NULL},
 };
 
+static inline int arm_smmu_rpm_get(struct arm_smmu_device *smmu)
+{
+       if (pm_runtime_enabled(smmu->dev))
+               return pm_runtime_get_sync(smmu->dev);
+
+       return 0;
+}
+
+static inline void arm_smmu_rpm_put(struct arm_smmu_device *smmu)
+{
+       if (pm_runtime_enabled(smmu->dev))
+               pm_runtime_put(smmu->dev);
+}
+
 static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
 {
        return container_of(dom, struct arm_smmu_domain, domain);
@@ -931,11 +949,15 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
        struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
        struct arm_smmu_device *smmu = smmu_domain->smmu;
        struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
-       int irq;
+       int ret, irq;
 
        if (!smmu || domain->type == IOMMU_DOMAIN_IDENTITY)
                return;
 
+       ret = arm_smmu_rpm_get(smmu);
+       if (ret < 0)
+               return;
+
        /*
         * Disable the context bank and free the page tables before freeing
         * it.
@@ -950,6 +972,8 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
 
        free_io_pgtable_ops(smmu_domain->pgtbl_ops);
        __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
+
+       arm_smmu_rpm_put(smmu);
 }
 
 static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
@@ -1231,10 +1255,15 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
                return -ENODEV;
 
        smmu = fwspec_smmu(fwspec);
+
+       ret = arm_smmu_rpm_get(smmu);
+       if (ret < 0)
+               return ret;
+
        /* Ensure that the domain is finalised */
        ret = arm_smmu_init_domain_context(domain, smmu);
        if (ret < 0)
-               return ret;
+               goto rpm_put;
 
        /*
         * Sanity check the domain. We don't support domains across
@@ -1244,49 +1273,74 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
                dev_err(dev,
                        "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
                        dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
-               return -EINVAL;
+               ret = -EINVAL;
+               goto rpm_put;
        }
 
        /* Looks ok, so add the device to the domain */
-       return arm_smmu_domain_add_master(smmu_domain, fwspec);
+       ret = arm_smmu_domain_add_master(smmu_domain, fwspec);
+
+rpm_put:
+       arm_smmu_rpm_put(smmu);
+       return ret;
 }
 
 static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
                        phys_addr_t paddr, size_t size, int prot)
 {
        struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
+       struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
+       int ret;
 
        if (!ops)
                return -ENODEV;
 
-       return ops->map(ops, iova, paddr, size, prot);
+       arm_smmu_rpm_get(smmu);
+       ret = ops->map(ops, iova, paddr, size, prot);
+       arm_smmu_rpm_put(smmu);
+
+       return ret;
 }
 
 static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
                             size_t size)
 {
        struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
+       struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
+       size_t ret;
 
        if (!ops)
                return 0;
 
-       return ops->unmap(ops, iova, size);
+       arm_smmu_rpm_get(smmu);
+       ret = ops->unmap(ops, iova, size);
+       arm_smmu_rpm_put(smmu);
+
+       return ret;
 }
 
 static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain)
 {
        struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+       struct arm_smmu_device *smmu = smmu_domain->smmu;
 
-       if (smmu_domain->tlb_ops)
+       if (smmu_domain->tlb_ops) {
+               arm_smmu_rpm_get(smmu);
                smmu_domain->tlb_ops->tlb_flush_all(smmu_domain);
+               arm_smmu_rpm_put(smmu);
+       }
 }
 
 static void arm_smmu_iotlb_sync(struct iommu_domain *domain)
 {
        struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+       struct arm_smmu_device *smmu = smmu_domain->smmu;
 
-       if (smmu_domain->tlb_ops)
+       if (smmu_domain->tlb_ops) {
+               arm_smmu_rpm_get(smmu);
                smmu_domain->tlb_ops->tlb_sync(smmu_domain);
+               arm_smmu_rpm_put(smmu);
+       }
 }
 
 static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
@@ -1301,6 +1355,11 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
        u32 tmp;
        u64 phys;
        unsigned long va, flags;
+       int ret;
+
+       ret = arm_smmu_rpm_get(smmu);
+       if (ret < 0)
+               return 0;
 
        cb_base = ARM_SMMU_CB(smmu, cfg->cbndx);
 
@@ -1329,6 +1388,8 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
                return 0;
        }
 
+       arm_smmu_rpm_put(smmu);
+
        return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
 }
 
@@ -1433,12 +1494,21 @@ static int arm_smmu_add_device(struct device *dev)
        while (i--)
                cfg->smendx[i] = INVALID_SMENDX;
 
+       ret = arm_smmu_rpm_get(smmu);
+       if (ret < 0)
+               goto out_cfg_free;
+
        ret = arm_smmu_master_alloc_smes(dev);
+       arm_smmu_rpm_put(smmu);
+
        if (ret)
                goto out_cfg_free;
 
        iommu_device_link(&smmu->iommu, dev);
 
+       device_link_add(dev, smmu->dev,
+                       DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_SUPPLIER);
+
        return 0;
 
 out_cfg_free:
@@ -1453,7 +1523,7 @@ static void arm_smmu_remove_device(struct device *dev)
        struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
        struct arm_smmu_master_cfg *cfg;
        struct arm_smmu_device *smmu;
-
+       int ret;
 
        if (!fwspec || fwspec->ops != &arm_smmu_ops)
                return;
@@ -1461,8 +1531,15 @@ static void arm_smmu_remove_device(struct device *dev)
        cfg  = fwspec->iommu_priv;
        smmu = cfg->smmu;
 
+       ret = arm_smmu_rpm_get(smmu);
+       if (ret < 0)
+               return;
+
        iommu_device_unlink(&smmu->iommu, dev);
        arm_smmu_master_free_smes(fwspec);
+
+       arm_smmu_rpm_put(smmu);
+
        iommu_group_remove_device(dev);
        kfree(fwspec->iommu_priv);
        iommu_fwspec_free(dev);
@@ -1952,13 +2029,14 @@ struct arm_smmu_match_data {
 };
 
 #define ARM_SMMU_MATCH_DATA(name, ver, imp)    \
-static struct arm_smmu_match_data name = { .version = ver, .model = imp }
+static const struct arm_smmu_match_data name = { .version = ver, .model = imp }
 
 ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU);
 ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
 ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU);
 ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500);
 ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2);
+ARM_SMMU_MATCH_DATA(qcom_smmuv2, ARM_SMMU_V2, QCOM_SMMUV2);
 
 static const struct of_device_id arm_smmu_of_match[] = {
        { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
@@ -1967,6 +2045,7 @@ static const struct of_device_id arm_smmu_of_match[] = {
        { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
        { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
        { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
+       { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
        { },
 };
 
@@ -2154,6 +2233,17 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
                smmu->irqs[i] = irq;
        }
 
+       err = devm_clk_bulk_get_all(dev, &smmu->clks);
+       if (err < 0) {
+               dev_err(dev, "failed to get clocks %d\n", err);
+               return err;
+       }
+       smmu->num_clks = err;
+
+       err = clk_bulk_prepare_enable(smmu->num_clks, smmu->clks);
+       if (err)
+               return err;
+
        err = arm_smmu_device_cfg_probe(smmu);
        if (err)
                return err;
@@ -2203,6 +2293,17 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
        arm_smmu_device_reset(smmu);
        arm_smmu_test_smr_masks(smmu);
 
+       /*
+        * We want to avoid touching dev->power.lock in fastpaths unless
+        * it's really going to do something useful - pm_runtime_enabled()
+        * can serve as an ideal proxy for that decision. So, conditionally
+        * enable pm_runtime.
+        */
+       if (dev->pm_domain) {
+               pm_runtime_set_active(dev);
+               pm_runtime_enable(dev);
+       }
+
        /*
         * For ACPI and generic DT bindings, an SMMU will be probed before
         * any device which might need it, so we want the bus ops in place
@@ -2238,19 +2339,63 @@ static void arm_smmu_device_shutdown(struct platform_device *pdev)
        if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
                dev_err(&pdev->dev, "removing device with active domains!\n");
 
+       arm_smmu_rpm_get(smmu);
        /* Turn the thing off */
        writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
+       arm_smmu_rpm_put(smmu);
+
+       if (pm_runtime_enabled(smmu->dev))
+               pm_runtime_force_suspend(smmu->dev);
+       else
+               clk_bulk_disable(smmu->num_clks, smmu->clks);
+
+       clk_bulk_unprepare(smmu->num_clks, smmu->clks);
 }
 
-static int __maybe_unused arm_smmu_pm_resume(struct device *dev)
+static int __maybe_unused arm_smmu_runtime_resume(struct device *dev)
 {
        struct arm_smmu_device *smmu = dev_get_drvdata(dev);
+       int ret;
+
+       ret = clk_bulk_enable(smmu->num_clks, smmu->clks);
+       if (ret)
+               return ret;
 
        arm_smmu_device_reset(smmu);
+
        return 0;
 }
 
-static SIMPLE_DEV_PM_OPS(arm_smmu_pm_ops, NULL, arm_smmu_pm_resume);
+static int __maybe_unused arm_smmu_runtime_suspend(struct device *dev)
+{
+       struct arm_smmu_device *smmu = dev_get_drvdata(dev);
+
+       clk_bulk_disable(smmu->num_clks, smmu->clks);
+
+       return 0;
+}
+
+static int __maybe_unused arm_smmu_pm_resume(struct device *dev)
+{
+       if (pm_runtime_suspended(dev))
+               return 0;
+
+       return arm_smmu_runtime_resume(dev);
+}
+
+static int __maybe_unused arm_smmu_pm_suspend(struct device *dev)
+{
+       if (pm_runtime_suspended(dev))
+               return 0;
+
+       return arm_smmu_runtime_suspend(dev);
+}
+
+static const struct dev_pm_ops arm_smmu_pm_ops = {
+       SET_SYSTEM_SLEEP_PM_OPS(arm_smmu_pm_suspend, arm_smmu_pm_resume)
+       SET_RUNTIME_PM_OPS(arm_smmu_runtime_suspend,
+                          arm_smmu_runtime_resume, NULL)
+};
 
 static struct platform_driver arm_smmu_driver = {
        .driver = {