From 8af46c784ecfe8929f66b5eaae987f6874953226 Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Sat, 25 May 2019 13:41:32 +0800 Subject: [PATCH] iommu/vt-d: Implement is_attach_deferred iommu ops entry As a domain is now attached to a device earlier, we should implement the is_attach_deferred call-back and use it to defer the domain attach from iommu driver init to device driver init when iommu is pre-enabled in kdump kernel. Suggested-by: Tom Murphy Signed-off-by: Lu Baolu Signed-off-by: Joerg Roedel --- drivers/iommu/intel-iommu.c | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 0bae0b73076c..c8b73802f0e0 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -353,6 +353,8 @@ static void domain_context_clear(struct intel_iommu *iommu, static int domain_detach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu); static bool device_is_rmrr_locked(struct device *dev); +static int intel_iommu_attach_device(struct iommu_domain *domain, + struct device *dev); #ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON int dmar_disabled = 0; @@ -378,6 +380,7 @@ int intel_iommu_gfx_mapped; EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped); #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1)) +#define DEFER_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-2)) static DEFINE_SPINLOCK(device_domain_lock); static LIST_HEAD(device_domain_list); @@ -2408,8 +2411,18 @@ static struct dmar_domain *find_domain(struct device *dev) { struct device_domain_info *info; + if (unlikely(dev->archdata.iommu == DEFER_DEVICE_DOMAIN_INFO)) { + struct iommu_domain *domain; + + dev->archdata.iommu = NULL; + domain = iommu_get_domain_for_dev(dev); + if (domain) + intel_iommu_attach_device(domain, dev); + } + /* No lock here, assumes no domain exit in normal case */ info = dev->archdata.iommu; + if (likely(info)) return info->domain; return NULL; @@ -5504,6 +5517,9 @@ static int intel_iommu_add_device(struct device *dev) iommu_device_link(&iommu->iommu, dev); + if (translation_pre_enabled(iommu)) + dev->archdata.iommu = DEFER_DEVICE_DOMAIN_INFO; + group = iommu_group_get_for_dev(dev); if (IS_ERR(group)) @@ -5828,6 +5844,12 @@ intel_iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev) dmar_domain->default_pasid : -EINVAL; } +static bool intel_iommu_is_attach_deferred(struct iommu_domain *domain, + struct device *dev) +{ + return dev->archdata.iommu == DEFER_DEVICE_DOMAIN_INFO; +} + const struct iommu_ops intel_iommu_ops = { .capable = intel_iommu_capable, .domain_alloc = intel_iommu_domain_alloc, @@ -5850,6 +5872,7 @@ const struct iommu_ops intel_iommu_ops = { .dev_feat_enabled = intel_iommu_dev_feat_enabled, .dev_enable_feat = intel_iommu_dev_enable_feat, .dev_disable_feat = intel_iommu_dev_disable_feat, + .is_attach_deferred = intel_iommu_is_attach_deferred, .pgsize_bitmap = INTEL_IOMMU_PGSIZES, }; -- 2.45.2