]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
Merge branches 'iommu/fixes', 'arm/smmu', 'x86/amd', 'x86/vt-d' and 'core' into next
authorJoerg Roedel <jroedel@suse.de>
Fri, 24 Jan 2020 14:39:39 +0000 (15:39 +0100)
committerJoerg Roedel <jroedel@suse.de>
Fri, 24 Jan 2020 14:39:39 +0000 (15:39 +0100)
1  2  3  4  5 
drivers/iommu/Kconfig
drivers/iommu/amd_iommu.c
drivers/iommu/amd_iommu_init.c
drivers/iommu/arm-smmu-v3.c
drivers/iommu/arm-smmu.c
drivers/iommu/intel-iommu.c
drivers/iommu/iommu.c
include/linux/iommu.h

Simple merge
Simple merge
Simple merge
index effe72eb89e7f36c1f8afbd72840b6fff7f959e9,5e04c1f3992a33a38b55aace049d3235581355d6,effe72eb89e7f36c1f8afbd72840b6fff7f959e9,effe72eb89e7f36c1f8afbd72840b6fff7f959e9,7f5b74a418de62d5ac65afa0d4765e5a8ddb8973..aa3ac2a03807f6db12f9546a3eb65a90547d962b
@@@@@@ -1443,50 -1483,238 -1443,50 -1443,50 -1443,50 +1483,238 @@@@@@ static int arm_smmu_cmdq_issue_sync(str
     }
     
     /* Context descriptor manipulation functions */
- ---static u64 arm_smmu_cpu_tcr_to_cd(u64 tcr)
+ +++static void arm_smmu_sync_cd(struct arm_smmu_domain *smmu_domain,
+ +++                        int ssid, bool leaf)
     {
- ---   u64 val = 0;
+ +++   size_t i;
+ +++   unsigned long flags;
+ +++   struct arm_smmu_master *master;
+ +++   struct arm_smmu_device *smmu = smmu_domain->smmu;
+ +++   struct arm_smmu_cmdq_ent cmd = {
+ +++           .opcode = CMDQ_OP_CFGI_CD,
+ +++           .cfgi   = {
+ +++                   .ssid   = ssid,
+ +++                   .leaf   = leaf,
+ +++           },
+ +++   };
+ +++
+ +++   spin_lock_irqsave(&smmu_domain->devices_lock, flags);
+ +++   list_for_each_entry(master, &smmu_domain->devices, domain_head) {
+ +++           for (i = 0; i < master->num_sids; i++) {
+ +++                   cmd.cfgi.sid = master->sids[i];
+ +++                   arm_smmu_cmdq_issue_cmd(smmu, &cmd);
+ +++           }
+ +++   }
+ +++   spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
+ +++
+ +++   arm_smmu_cmdq_issue_sync(smmu);
+ +++}
     
- ---   /* Repack the TCR. Just care about TTBR0 for now */
- ---   val |= ARM_SMMU_TCR2CD(tcr, T0SZ);
- ---   val |= ARM_SMMU_TCR2CD(tcr, TG0);
- ---   val |= ARM_SMMU_TCR2CD(tcr, IRGN0);
- ---   val |= ARM_SMMU_TCR2CD(tcr, ORGN0);
- ---   val |= ARM_SMMU_TCR2CD(tcr, SH0);
- ---   val |= ARM_SMMU_TCR2CD(tcr, EPD0);
- ---   val |= ARM_SMMU_TCR2CD(tcr, EPD1);
- ---   val |= ARM_SMMU_TCR2CD(tcr, IPS);
+ +++static int arm_smmu_alloc_cd_leaf_table(struct arm_smmu_device *smmu,
+ +++                                   struct arm_smmu_l1_ctx_desc *l1_desc)
+ +++{
+ +++   size_t size = CTXDESC_L2_ENTRIES * (CTXDESC_CD_DWORDS << 3);
     
- ---   return val;
+ +++   l1_desc->l2ptr = dmam_alloc_coherent(smmu->dev, size,
+ +++                                        &l1_desc->l2ptr_dma, GFP_KERNEL);
+ +++   if (!l1_desc->l2ptr) {
+ +++           dev_warn(smmu->dev,
+ +++                    "failed to allocate context descriptor table\n");
+ +++           return -ENOMEM;
+ +++   }
+ +++   return 0;
     }
     
- ---static void arm_smmu_write_ctx_desc(struct arm_smmu_device *smmu,
- ---                               struct arm_smmu_s1_cfg *cfg)
+ +++static void arm_smmu_write_cd_l1_desc(__le64 *dst,
+ +++                                 struct arm_smmu_l1_ctx_desc *l1_desc)
     {
- ---   u64 val;
+ +++   u64 val = (l1_desc->l2ptr_dma & CTXDESC_L1_DESC_L2PTR_MASK) |
+ +++             CTXDESC_L1_DESC_V;
+ ++ 
+ +++   WRITE_ONCE(*dst, cpu_to_le64(val));
+ +++}
+ +++
+ +++static __le64 *arm_smmu_get_cd_ptr(struct arm_smmu_domain *smmu_domain,
+ +++                              u32 ssid)
+ +++{
+ +++   __le64 *l1ptr;
+ +++   unsigned int idx;
+ +++   struct arm_smmu_l1_ctx_desc *l1_desc;
+ +++   struct arm_smmu_device *smmu = smmu_domain->smmu;
+ +++   struct arm_smmu_ctx_desc_cfg *cdcfg = &smmu_domain->s1_cfg.cdcfg;
    +
+ +++   if (smmu_domain->s1_cfg.s1fmt == STRTAB_STE_0_S1FMT_LINEAR)
+ +++           return cdcfg->cdtab + ssid * CTXDESC_CD_DWORDS;
+ +++
+ +++   idx = ssid >> CTXDESC_SPLIT;
+ +++   l1_desc = &cdcfg->l1_desc[idx];
+ +++   if (!l1_desc->l2ptr) {
+ +++           if (arm_smmu_alloc_cd_leaf_table(smmu, l1_desc))
+ +++                   return NULL;
+ +++
+ +++           l1ptr = cdcfg->cdtab + idx * CTXDESC_L1_DESC_DWORDS;
+ +++           arm_smmu_write_cd_l1_desc(l1ptr, l1_desc);
+ +++           /* An invalid L1CD can be cached */
+ +++           arm_smmu_sync_cd(smmu_domain, ssid, false);
+ +++   }
+ +++   idx = ssid & (CTXDESC_L2_ENTRIES - 1);
+ +++   return l1_desc->l2ptr + idx * CTXDESC_CD_DWORDS;
+ +++}
+ +++
+ +++static int arm_smmu_write_ctx_desc(struct arm_smmu_domain *smmu_domain,
+ +++                              int ssid, struct arm_smmu_ctx_desc *cd)
+ +++{
        /*
- ---    * We don't need to issue any invalidation here, as we'll invalidate
- ---    * the STE when installing the new entry anyway.
+ +++    * This function handles the following cases:
+ +++    *
+ +++    * (1) Install primary CD, for normal DMA traffic (SSID = 0).
+ +++    * (2) Install a secondary CD, for SID+SSID traffic.
+ +++    * (3) Update ASID of a CD. Atomically write the first 64 bits of the
+ +++    *     CD, then invalidate the old entry and mappings.
+ +++    * (4) Remove a secondary CD.
         */
- ---   val = arm_smmu_cpu_tcr_to_cd(cfg->cd.tcr) |
+ +++   u64 val;
+ +++   bool cd_live;
+ +++   __le64 *cdptr;
+ +++   struct arm_smmu_device *smmu = smmu_domain->smmu;
+ +++
+ +++   if (WARN_ON(ssid >= (1 << smmu_domain->s1_cfg.s1cdmax)))
+ +++           return -E2BIG;
+ +++
+ +++   cdptr = arm_smmu_get_cd_ptr(smmu_domain, ssid);
+ +++   if (!cdptr)
+ +++           return -ENOMEM;
+ +++
+ +++   val = le64_to_cpu(cdptr[0]);
+ +++   cd_live = !!(val & CTXDESC_CD_0_V);
+ +++
+ +++   if (!cd) { /* (4) */
+ +++           val = 0;
+ +++   } else if (cd_live) { /* (3) */
+ +++           val &= ~CTXDESC_CD_0_ASID;
+ +++           val |= FIELD_PREP(CTXDESC_CD_0_ASID, cd->asid);
+ +++           /*
+ +++            * Until CD+TLB invalidation, both ASIDs may be used for tagging
+ +++            * this substream's traffic
+ +++            */
+ +++   } else { /* (1) and (2) */
+ +++           cdptr[1] = cpu_to_le64(cd->ttbr & CTXDESC_CD_1_TTB0_MASK);
+ +++           cdptr[2] = 0;
+ +++           cdptr[3] = cpu_to_le64(cd->mair);
+ +++
+ +++           /*
+ +++            * STE is live, and the SMMU might read dwords of this CD in any
+ +++            * order. Ensure that it observes valid values before reading
+ +++            * V=1.
+ +++            */
+ +++           arm_smmu_sync_cd(smmu_domain, ssid, true);
+ +++
+ +++           val = cd->tcr |
     #ifdef __BIG_ENDIAN
- ---         CTXDESC_CD_0_ENDI |
+ +++                   CTXDESC_CD_0_ENDI |
     #endif
- ---         CTXDESC_CD_0_R | CTXDESC_CD_0_A | CTXDESC_CD_0_ASET |
- ---         CTXDESC_CD_0_AA64 | FIELD_PREP(CTXDESC_CD_0_ASID, cfg->cd.asid) |
- ---         CTXDESC_CD_0_V;
+ +++                   CTXDESC_CD_0_R | CTXDESC_CD_0_A | CTXDESC_CD_0_ASET |
+ +++                   CTXDESC_CD_0_AA64 |
+ +++                   FIELD_PREP(CTXDESC_CD_0_ASID, cd->asid) |
+ +++                   CTXDESC_CD_0_V;
    +
- --    /* STALL_MODEL==0b10 && CD.S==0 is ILLEGAL */
- --    if (smmu->features & ARM_SMMU_FEAT_STALL_FORCE)
- --            val |= CTXDESC_CD_0_S;
+ +++           /* STALL_MODEL==0b10 && CD.S==0 is ILLEGAL */
+ +++           if (smmu->features & ARM_SMMU_FEAT_STALL_FORCE)
+ +++                   val |= CTXDESC_CD_0_S;
+ +++   }
    +
- --    cfg->cdptr[0] = cpu_to_le64(val);
+ +++   /*
+ +++    * The SMMU accesses 64-bit values atomically. See IHI0070Ca 3.21.3
+ +++    * "Configuration structures and configuration invalidation completion"
+ +++    *
+ +++    *   The size of single-copy atomic reads made by the SMMU is
+ +++    *   IMPLEMENTATION DEFINED but must be at least 64 bits. Any single
+ +++    *   field within an aligned 64-bit span of a structure can be altered
+ +++    *   without first making the structure invalid.
+ +++    */
+ +++   WRITE_ONCE(cdptr[0], cpu_to_le64(val));
+ +++   arm_smmu_sync_cd(smmu_domain, ssid, true);
+ +++   return 0;
+ +++}
+ +++
+ +++static int arm_smmu_alloc_cd_tables(struct arm_smmu_domain *smmu_domain)
+ +++{
+ +++   int ret;
+ +++   size_t l1size;
+ +++   size_t max_contexts;
+ +++   struct arm_smmu_device *smmu = smmu_domain->smmu;
+ +++   struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
+ +++   struct arm_smmu_ctx_desc_cfg *cdcfg = &cfg->cdcfg;
+ +++
+ +++   max_contexts = 1 << cfg->s1cdmax;
+ +++
+ +++   if (!(smmu->features & ARM_SMMU_FEAT_2_LVL_CDTAB) ||
+ +++       max_contexts <= CTXDESC_L2_ENTRIES) {
+ +++           cfg->s1fmt = STRTAB_STE_0_S1FMT_LINEAR;
+ +++           cdcfg->num_l1_ents = max_contexts;
    +
- --    val = cfg->cd.ttbr & CTXDESC_CD_1_TTB0_MASK;
- --    cfg->cdptr[1] = cpu_to_le64(val);
+ +++           l1size = max_contexts * (CTXDESC_CD_DWORDS << 3);
+ +++   } else {
+ +++           cfg->s1fmt = STRTAB_STE_0_S1FMT_64K_L2;
+ +++           cdcfg->num_l1_ents = DIV_ROUND_UP(max_contexts,
+ +++                                             CTXDESC_L2_ENTRIES);
+ +++
+ +++           cdcfg->l1_desc = devm_kcalloc(smmu->dev, cdcfg->num_l1_ents,
+ +++                                         sizeof(*cdcfg->l1_desc),
+ +++                                         GFP_KERNEL);
+ +++           if (!cdcfg->l1_desc)
+ +++                   return -ENOMEM;
+ +++
+ +++           l1size = cdcfg->num_l1_ents * (CTXDESC_L1_DESC_DWORDS << 3);
+ +++   }
+ ++ 
    -   /* STALL_MODEL==0b10 && CD.S==0 is ILLEGAL */
    -   if (smmu->features & ARM_SMMU_FEAT_STALL_FORCE)
    -           val |= CTXDESC_CD_0_S;
+ +++   cdcfg->cdtab = dmam_alloc_coherent(smmu->dev, l1size, &cdcfg->cdtab_dma,
+ +++                                      GFP_KERNEL);
+ +++   if (!cdcfg->cdtab) {
+ +++           dev_warn(smmu->dev, "failed to allocate context descriptor\n");
+ +++           ret = -ENOMEM;
+ +++           goto err_free_l1;
+ +++   }
+ +++
+ +++   return 0;
    +
- --    cfg->cdptr[3] = cpu_to_le64(cfg->cd.mair);
+ +++err_free_l1:
+ +++   if (cdcfg->l1_desc) {
+ +++           devm_kfree(smmu->dev, cdcfg->l1_desc);
+ +++           cdcfg->l1_desc = NULL;
+ +++   }
+ +++   return ret;
+ +++}
+ +++
+ +++static void arm_smmu_free_cd_tables(struct arm_smmu_domain *smmu_domain)
+ +++{
+ +++   int i;
+ +++   size_t size, l1size;
+ +++   struct arm_smmu_device *smmu = smmu_domain->smmu;
+ +++   struct arm_smmu_ctx_desc_cfg *cdcfg = &smmu_domain->s1_cfg.cdcfg;
+ +++
+ +++   if (cdcfg->l1_desc) {
+ +++           size = CTXDESC_L2_ENTRIES * (CTXDESC_CD_DWORDS << 3);
+ +++
+ +++           for (i = 0; i < cdcfg->num_l1_ents; i++) {
+ +++                   if (!cdcfg->l1_desc[i].l2ptr)
+ +++                           continue;
+ ++ 
    -   cfg->cdptr[0] = cpu_to_le64(val);
+ +++                   dmam_free_coherent(smmu->dev, size,
+ +++                                      cdcfg->l1_desc[i].l2ptr,
+ +++                                      cdcfg->l1_desc[i].l2ptr_dma);
+ +++           }
+ +++           devm_kfree(smmu->dev, cdcfg->l1_desc);
+ +++           cdcfg->l1_desc = NULL;
+ ++ 
    -   val = cfg->cd.ttbr & CTXDESC_CD_1_TTB0_MASK;
    -   cfg->cdptr[1] = cpu_to_le64(val);
+ +++           l1size = cdcfg->num_l1_ents * (CTXDESC_L1_DESC_DWORDS << 3);
+ +++   } else {
+ +++           l1size = cdcfg->num_l1_ents * (CTXDESC_CD_DWORDS << 3);
+ +++   }
+ ++ 
    -   cfg->cdptr[3] = cpu_to_le64(cfg->cd.mair);
+ +++   dmam_free_coherent(smmu->dev, l1size, cdcfg->cdtab, cdcfg->cdtab_dma);
+ +++   cdcfg->cdtab_dma = 0;
+ +++   cdcfg->cdtab = NULL;
     }
     
     /* Stream table manipulation functions */
index 4f1a350d9529b68fad40c2b66efe1c562282a711,cca94c30b301bc4809a62064d5903924817789eb,4f1a350d9529b68fad40c2b66efe1c562282a711,4f1a350d9529b68fad40c2b66efe1c562282a711,7da03eba6f89e0ae32e284a9d2761a965525047b..16c4b87af42bb42b519e529556e3f804c97b5e98
@@@@@@ -2029,9 -2082,25 -2029,9 -2029,9 -2020,9 +2073,25 @@@@@@ static int arm_smmu_bus_init(struct iom
        }
     #endif
     #ifdef CONFIG_FSL_MC_BUS
- ---   if (!iommu_present(&fsl_mc_bus_type))
- ---           bus_set_iommu(&fsl_mc_bus_type, &arm_smmu_ops);
+ +++   if (!iommu_present(&fsl_mc_bus_type)) {
+ +++           err = bus_set_iommu(&fsl_mc_bus_type, ops);
+ +++           if (err)
+ +++                   goto err_reset_pci_ops;
+ +++   }
    +#endif
+ +++   return 0;
+ +++
+ +++err_reset_pci_ops: __maybe_unused;
+ +++#ifdef CONFIG_PCI
+ +++   bus_set_iommu(&pci_bus_type, NULL);
+ ++ #endif
+ +++err_reset_amba_ops: __maybe_unused;
+ +++#ifdef CONFIG_ARM_AMBA
+ +++   bus_set_iommu(&amba_bustype, NULL);
+ +++#endif
+ +++err_reset_platform_ops: __maybe_unused;
+ +++   bus_set_iommu(&platform_bus_type, NULL);
+ +++   return err;
     }
     
     static int arm_smmu_device_probe(struct platform_device *pdev)
         * ready to handle default domain setup as soon as any SMMU exists.
         */
        if (!using_legacy_binding)
- ---           arm_smmu_bus_init();
    -
    -   return 0;
    -}
+ +++           return arm_smmu_bus_init(&arm_smmu_ops);
     
    -/*
    - * With the legacy DT binding in play, though, we have no guarantees about
    - * probe order, but then we're also not doing default domains, so we can
    - * delay setting bus ops until we're sure every possible SMMU is ready,
    - * and that way ensure that no add_device() calls get missed.
    - */
    -static int arm_smmu_legacy_bus_init(void)
    -{
    -   if (using_legacy_binding)
    -           arm_smmu_bus_init();
        return 0;
     }
    -device_initcall_sync(arm_smmu_legacy_bus_init);
     
- -- /*
- --  * With the legacy DT binding in play, though, we have no guarantees about
- --  * probe order, but then we're also not doing default domains, so we can
- --  * delay setting bus ops until we're sure every possible SMMU is ready,
- --  * and that way ensure that no add_device() calls get missed.
- --  */
- -- static int arm_smmu_legacy_bus_init(void)
- -- {
- --    if (using_legacy_binding)
- --            arm_smmu_bus_init();
- --    return 0;
- -- }
- -- device_initcall_sync(arm_smmu_legacy_bus_init);
- -- 
- ---static void arm_smmu_device_shutdown(struct platform_device *pdev)
+ +++static int arm_smmu_device_remove(struct platform_device *pdev)
     {
        struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
     
index 932267f49f9a8c6529c120c938262a1d15f1f11f,42966611a192647f572e059e3c6965792fe6f2fd,42966611a192647f572e059e3c6965792fe6f2fd,c0a983c4e84a64872baa67ebadf9f0844cc29c29,b2526a4fc6c4e49151f56319a723b8daebb5056f..64ddccf1d5fe3be70f3a0199aab4a87d4d06ea17
@@@@@@ -6002,9 -5987,9 -5987,9 -6179,9 -5979,9 +6186,9 @@@@@@ const struct iommu_ops intel_iommu_ops 
        .add_device             = intel_iommu_add_device,
        .remove_device          = intel_iommu_remove_device,
        .get_resv_regions       = intel_iommu_get_resv_regions,
----    .put_resv_regions       = intel_iommu_put_resv_regions,
++++    .put_resv_regions       = generic_iommu_put_resv_regions,
        .apply_resv_region      = intel_iommu_apply_resv_region,
 ----   .device_group           = pci_device_group,
 ++++   .device_group           = intel_iommu_device_group,
        .dev_has_feat           = intel_iommu_dev_has_feat,
        .dev_feat_enabled       = intel_iommu_dev_feat_enabled,
        .dev_enable_feat        = intel_iommu_dev_enable_feat,
index 3ead597e1c5703c31feb5d3080efb69a33a65cee,ffe6f685ceae4ae9104df389d2a9fb9c4536d586,fdd40756dbc1578da825e71ab21781be311e7137,fdd40756dbc1578da825e71ab21781be311e7137,101f2d68eb6ea735ad7be5f251297f3de33d999b..3e3528436e0b220b8714470675bf9b1eefb673bb
@@@@@@ -183,10 -186,21 -183,10 -183,10 -183,10 +186,21 @@@@@@ int iommu_probe_device(struct device *d
        if (!iommu_get_dev_param(dev))
                return -ENOMEM;
     
+ +++   if (!try_module_get(ops->owner)) {
+ +++           ret = -EINVAL;
+ +++           goto err_free_dev_param;
+ +++   }
+ +++
        ret = ops->add_device(dev);
        if (ret)
- ---           iommu_free_dev_param(dev);
+ +++           goto err_module_put;
+ ++ 
+ +++   return 0;
    +
+ +++err_module_put:
+ +++   module_put(ops->owner);
+ +++err_free_dev_param:
+ +++   iommu_free_dev_param(dev);
        return ret;
     }
     
Simple merge