2 * Copyright © 2006-2014 Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * Authors: David Woodhouse <dwmw2@infradead.org>,
14 * Ashok Raj <ashok.raj@intel.com>,
15 * Shaohua Li <shaohua.li@intel.com>,
16 * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>,
17 * Fenghua Yu <fenghua.yu@intel.com>
18 * Joerg Roedel <jroedel@suse.de>
21 #define pr_fmt(fmt) "DMAR: " fmt
22 #define dev_fmt(fmt) pr_fmt(fmt)
24 #include <linux/init.h>
25 #include <linux/bitmap.h>
26 #include <linux/debugfs.h>
27 #include <linux/export.h>
28 #include <linux/slab.h>
29 #include <linux/irq.h>
30 #include <linux/interrupt.h>
31 #include <linux/spinlock.h>
32 #include <linux/pci.h>
33 #include <linux/dmar.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/mempool.h>
36 #include <linux/memory.h>
37 #include <linux/cpu.h>
38 #include <linux/timer.h>
40 #include <linux/iova.h>
41 #include <linux/iommu.h>
42 #include <linux/intel-iommu.h>
43 #include <linux/syscore_ops.h>
44 #include <linux/tboot.h>
45 #include <linux/dmi.h>
46 #include <linux/pci-ats.h>
47 #include <linux/memblock.h>
48 #include <linux/dma-contiguous.h>
49 #include <linux/dma-direct.h>
50 #include <linux/crash_dump.h>
51 #include <linux/numa.h>
52 #include <asm/irq_remapping.h>
53 #include <asm/cacheflush.h>
54 #include <asm/iommu.h>
56 #include "irq_remapping.h"
57 #include "intel-pasid.h"
59 #define ROOT_SIZE VTD_PAGE_SIZE
60 #define CONTEXT_SIZE VTD_PAGE_SIZE
62 #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
63 #define IS_USB_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB)
64 #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
65 #define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
67 #define IOAPIC_RANGE_START (0xfee00000)
68 #define IOAPIC_RANGE_END (0xfeefffff)
69 #define IOVA_START_ADDR (0x1000)
71 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 57
73 #define MAX_AGAW_WIDTH 64
74 #define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
76 #define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
77 #define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
79 /* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
80 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
81 #define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
82 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
83 #define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
85 /* IO virtual address start page frame number */
86 #define IOVA_START_PFN (1)
88 #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
90 /* page table handling */
91 #define LEVEL_STRIDE (9)
92 #define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
95 * This bitmap is used to advertise the page sizes our hardware support
96 * to the IOMMU core, which will then use this information to split
97 * physically contiguous memory regions it is mapping into page sizes
100 * Traditionally the IOMMU core just handed us the mappings directly,
101 * after making sure the size is an order of a 4KiB page and that the
102 * mapping has natural alignment.
104 * To retain this behavior, we currently advertise that we support
105 * all page sizes that are an order of 4KiB.
107 * If at some point we'd like to utilize the IOMMU core's new behavior,
108 * we could change this to advertise the real page sizes we support.
110 #define INTEL_IOMMU_PGSIZES (~0xFFFUL)
112 static inline int agaw_to_level(int agaw)
117 static inline int agaw_to_width(int agaw)
119 return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
122 static inline int width_to_agaw(int width)
124 return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
127 static inline unsigned int level_to_offset_bits(int level)
129 return (level - 1) * LEVEL_STRIDE;
132 static inline int pfn_level_offset(unsigned long pfn, int level)
134 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
137 static inline unsigned long level_mask(int level)
139 return -1UL << level_to_offset_bits(level);
142 static inline unsigned long level_size(int level)
144 return 1UL << level_to_offset_bits(level);
147 static inline unsigned long align_to_level(unsigned long pfn, int level)
149 return (pfn + level_size(level) - 1) & level_mask(level);
152 static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
154 return 1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
157 /* VT-d pages must always be _smaller_ than MM pages. Otherwise things
158 are never going to work. */
159 static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
161 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
164 static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
166 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
168 static inline unsigned long page_to_dma_pfn(struct page *pg)
170 return mm_to_dma_pfn(page_to_pfn(pg));
172 static inline unsigned long virt_to_dma_pfn(void *p)
174 return page_to_dma_pfn(virt_to_page(p));
177 /* global iommu list, set NULL for ignored DMAR units */
178 static struct intel_iommu **g_iommus;
180 static void __init check_tylersburg_isoch(void);
181 static int rwbf_quirk;
184 * set to 1 to panic kernel if can't successfully enable VT-d
185 * (used when kernel is launched w/ TXT)
187 static int force_on = 0;
188 int intel_iommu_tboot_noforce;
189 static int no_platform_optin;
191 #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
194 * Take a root_entry and return the Lower Context Table Pointer (LCTP)
197 static phys_addr_t root_entry_lctp(struct root_entry *re)
202 return re->lo & VTD_PAGE_MASK;
206 * Take a root_entry and return the Upper Context Table Pointer (UCTP)
209 static phys_addr_t root_entry_uctp(struct root_entry *re)
214 return re->hi & VTD_PAGE_MASK;
217 static inline void context_clear_pasid_enable(struct context_entry *context)
219 context->lo &= ~(1ULL << 11);
222 static inline bool context_pasid_enabled(struct context_entry *context)
224 return !!(context->lo & (1ULL << 11));
227 static inline void context_set_copied(struct context_entry *context)
229 context->hi |= (1ull << 3);
232 static inline bool context_copied(struct context_entry *context)
234 return !!(context->hi & (1ULL << 3));
237 static inline bool __context_present(struct context_entry *context)
239 return (context->lo & 1);
242 bool context_present(struct context_entry *context)
244 return context_pasid_enabled(context) ?
245 __context_present(context) :
246 __context_present(context) && !context_copied(context);
249 static inline void context_set_present(struct context_entry *context)
254 static inline void context_set_fault_enable(struct context_entry *context)
256 context->lo &= (((u64)-1) << 2) | 1;
259 static inline void context_set_translation_type(struct context_entry *context,
262 context->lo &= (((u64)-1) << 4) | 3;
263 context->lo |= (value & 3) << 2;
266 static inline void context_set_address_root(struct context_entry *context,
269 context->lo &= ~VTD_PAGE_MASK;
270 context->lo |= value & VTD_PAGE_MASK;
273 static inline void context_set_address_width(struct context_entry *context,
276 context->hi |= value & 7;
279 static inline void context_set_domain_id(struct context_entry *context,
282 context->hi |= (value & ((1 << 16) - 1)) << 8;
285 static inline int context_domain_id(struct context_entry *c)
287 return((c->hi >> 8) & 0xffff);
290 static inline void context_clear_entry(struct context_entry *context)
297 * This domain is a statically identity mapping domain.
298 * 1. This domain creats a static 1:1 mapping to all usable memory.
299 * 2. It maps to each iommu if successful.
300 * 3. Each iommu mapps to this domain if successful.
302 static struct dmar_domain *si_domain;
303 static int hw_pass_through = 1;
305 /* si_domain contains mulitple devices */
306 #define DOMAIN_FLAG_STATIC_IDENTITY BIT(0)
309 * This is a DMA domain allocated through the iommu domain allocation
310 * interface. But one or more devices belonging to this domain have
311 * been chosen to use a private domain. We should avoid to use the
312 * map/unmap/iova_to_phys APIs on it.
314 #define DOMAIN_FLAG_LOSE_CHILDREN BIT(1)
316 #define for_each_domain_iommu(idx, domain) \
317 for (idx = 0; idx < g_num_of_iommus; idx++) \
318 if (domain->iommu_refcnt[idx])
320 struct dmar_rmrr_unit {
321 struct list_head list; /* list of rmrr units */
322 struct acpi_dmar_header *hdr; /* ACPI header */
323 u64 base_address; /* reserved base address*/
324 u64 end_address; /* reserved end address */
325 struct dmar_dev_scope *devices; /* target devices */
326 int devices_cnt; /* target device count */
329 struct dmar_atsr_unit {
330 struct list_head list; /* list of ATSR units */
331 struct acpi_dmar_header *hdr; /* ACPI header */
332 struct dmar_dev_scope *devices; /* target devices */
333 int devices_cnt; /* target device count */
334 u8 include_all:1; /* include all ports */
337 static LIST_HEAD(dmar_atsr_units);
338 static LIST_HEAD(dmar_rmrr_units);
340 #define for_each_rmrr_units(rmrr) \
341 list_for_each_entry(rmrr, &dmar_rmrr_units, list)
343 /* bitmap for indexing intel_iommus */
344 static int g_num_of_iommus;
346 static void domain_exit(struct dmar_domain *domain);
347 static void domain_remove_dev_info(struct dmar_domain *domain);
348 static void dmar_remove_one_dev_info(struct device *dev);
349 static void __dmar_remove_one_dev_info(struct device_domain_info *info);
350 static void domain_context_clear(struct intel_iommu *iommu,
352 static int domain_detach_iommu(struct dmar_domain *domain,
353 struct intel_iommu *iommu);
354 static bool device_is_rmrr_locked(struct device *dev);
355 static int intel_iommu_attach_device(struct iommu_domain *domain,
358 #ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
359 int dmar_disabled = 0;
361 int dmar_disabled = 1;
362 #endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
365 int intel_iommu_enabled = 0;
366 EXPORT_SYMBOL_GPL(intel_iommu_enabled);
368 static int dmar_map_gfx = 1;
369 static int dmar_forcedac;
370 static int intel_iommu_strict;
371 static int intel_iommu_superpage = 1;
372 static int iommu_identity_mapping;
374 #define IDENTMAP_ALL 1
375 #define IDENTMAP_GFX 2
376 #define IDENTMAP_AZALIA 4
378 int intel_iommu_gfx_mapped;
379 EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
381 #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
382 #define DEFER_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-2))
383 static DEFINE_SPINLOCK(device_domain_lock);
384 static LIST_HEAD(device_domain_list);
387 * Iterate over elements in device_domain_list and call the specified
388 * callback @fn against each element.
390 int for_each_device_domain(int (*fn)(struct device_domain_info *info,
391 void *data), void *data)
395 struct device_domain_info *info;
397 spin_lock_irqsave(&device_domain_lock, flags);
398 list_for_each_entry(info, &device_domain_list, global) {
399 ret = fn(info, data);
401 spin_unlock_irqrestore(&device_domain_lock, flags);
405 spin_unlock_irqrestore(&device_domain_lock, flags);
410 const struct iommu_ops intel_iommu_ops;
412 static bool translation_pre_enabled(struct intel_iommu *iommu)
414 return (iommu->flags & VTD_FLAG_TRANS_PRE_ENABLED);
417 static void clear_translation_pre_enabled(struct intel_iommu *iommu)
419 iommu->flags &= ~VTD_FLAG_TRANS_PRE_ENABLED;
422 static void init_translation_status(struct intel_iommu *iommu)
426 gsts = readl(iommu->reg + DMAR_GSTS_REG);
427 if (gsts & DMA_GSTS_TES)
428 iommu->flags |= VTD_FLAG_TRANS_PRE_ENABLED;
431 /* Convert generic 'struct iommu_domain to private struct dmar_domain */
432 static struct dmar_domain *to_dmar_domain(struct iommu_domain *dom)
434 return container_of(dom, struct dmar_domain, domain);
437 static int __init intel_iommu_setup(char *str)
442 if (!strncmp(str, "on", 2)) {
444 pr_info("IOMMU enabled\n");
445 } else if (!strncmp(str, "off", 3)) {
447 no_platform_optin = 1;
448 pr_info("IOMMU disabled\n");
449 } else if (!strncmp(str, "igfx_off", 8)) {
451 pr_info("Disable GFX device mapping\n");
452 } else if (!strncmp(str, "forcedac", 8)) {
453 pr_info("Forcing DAC for PCI devices\n");
455 } else if (!strncmp(str, "strict", 6)) {
456 pr_info("Disable batched IOTLB flush\n");
457 intel_iommu_strict = 1;
458 } else if (!strncmp(str, "sp_off", 6)) {
459 pr_info("Disable supported super page\n");
460 intel_iommu_superpage = 0;
461 } else if (!strncmp(str, "sm_on", 5)) {
462 pr_info("Intel-IOMMU: scalable mode supported\n");
464 } else if (!strncmp(str, "tboot_noforce", 13)) {
466 "Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n");
467 intel_iommu_tboot_noforce = 1;
470 str += strcspn(str, ",");
476 __setup("intel_iommu=", intel_iommu_setup);
478 static struct kmem_cache *iommu_domain_cache;
479 static struct kmem_cache *iommu_devinfo_cache;
481 static struct dmar_domain* get_iommu_domain(struct intel_iommu *iommu, u16 did)
483 struct dmar_domain **domains;
486 domains = iommu->domains[idx];
490 return domains[did & 0xff];
493 static void set_iommu_domain(struct intel_iommu *iommu, u16 did,
494 struct dmar_domain *domain)
496 struct dmar_domain **domains;
499 if (!iommu->domains[idx]) {
500 size_t size = 256 * sizeof(struct dmar_domain *);
501 iommu->domains[idx] = kzalloc(size, GFP_ATOMIC);
504 domains = iommu->domains[idx];
505 if (WARN_ON(!domains))
508 domains[did & 0xff] = domain;
511 void *alloc_pgtable_page(int node)
516 page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
518 vaddr = page_address(page);
522 void free_pgtable_page(void *vaddr)
524 free_page((unsigned long)vaddr);
527 static inline void *alloc_domain_mem(void)
529 return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
532 static void free_domain_mem(void *vaddr)
534 kmem_cache_free(iommu_domain_cache, vaddr);
537 static inline void * alloc_devinfo_mem(void)
539 return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
542 static inline void free_devinfo_mem(void *vaddr)
544 kmem_cache_free(iommu_devinfo_cache, vaddr);
547 static inline int domain_type_is_si(struct dmar_domain *domain)
549 return domain->flags & DOMAIN_FLAG_STATIC_IDENTITY;
552 static inline int domain_pfn_supported(struct dmar_domain *domain,
555 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
557 return !(addr_width < BITS_PER_LONG && pfn >> addr_width);
560 static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
565 sagaw = cap_sagaw(iommu->cap);
566 for (agaw = width_to_agaw(max_gaw);
568 if (test_bit(agaw, &sagaw))
576 * Calculate max SAGAW for each iommu.
578 int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
580 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
584 * calculate agaw for each iommu.
585 * "SAGAW" may be different across iommus, use a default agaw, and
586 * get a supported less agaw for iommus that don't support the default agaw.
588 int iommu_calculate_agaw(struct intel_iommu *iommu)
590 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
593 /* This functionin only returns single iommu in a domain */
594 struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
598 /* si_domain and vm domain should not get here. */
599 if (WARN_ON(domain->domain.type != IOMMU_DOMAIN_DMA))
602 for_each_domain_iommu(iommu_id, domain)
605 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
608 return g_iommus[iommu_id];
611 static void domain_update_iommu_coherency(struct dmar_domain *domain)
613 struct dmar_drhd_unit *drhd;
614 struct intel_iommu *iommu;
618 domain->iommu_coherency = 1;
620 for_each_domain_iommu(i, domain) {
622 if (!ecap_coherent(g_iommus[i]->ecap)) {
623 domain->iommu_coherency = 0;
630 /* No hardware attached; use lowest common denominator */
632 for_each_active_iommu(iommu, drhd) {
633 if (!ecap_coherent(iommu->ecap)) {
634 domain->iommu_coherency = 0;
641 static int domain_update_iommu_snooping(struct intel_iommu *skip)
643 struct dmar_drhd_unit *drhd;
644 struct intel_iommu *iommu;
648 for_each_active_iommu(iommu, drhd) {
650 if (!ecap_sc_support(iommu->ecap)) {
661 static int domain_update_iommu_superpage(struct intel_iommu *skip)
663 struct dmar_drhd_unit *drhd;
664 struct intel_iommu *iommu;
667 if (!intel_iommu_superpage) {
671 /* set iommu_superpage to the smallest common denominator */
673 for_each_active_iommu(iommu, drhd) {
675 mask &= cap_super_page_val(iommu->cap);
685 /* Some capabilities may be different across iommus */
686 static void domain_update_iommu_cap(struct dmar_domain *domain)
688 domain_update_iommu_coherency(domain);
689 domain->iommu_snooping = domain_update_iommu_snooping(NULL);
690 domain->iommu_superpage = domain_update_iommu_superpage(NULL);
693 struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
696 struct root_entry *root = &iommu->root_entry[bus];
697 struct context_entry *context;
701 if (sm_supported(iommu)) {
709 context = phys_to_virt(*entry & VTD_PAGE_MASK);
711 unsigned long phy_addr;
715 context = alloc_pgtable_page(iommu->node);
719 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
720 phy_addr = virt_to_phys((void *)context);
721 *entry = phy_addr | 1;
722 __iommu_flush_cache(iommu, entry, sizeof(*entry));
724 return &context[devfn];
727 static int iommu_dummy(struct device *dev)
729 return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
733 * is_downstream_to_pci_bridge - test if a device belongs to the PCI
734 * sub-hierarchy of a candidate PCI-PCI bridge
735 * @dev: candidate PCI device belonging to @bridge PCI sub-hierarchy
736 * @bridge: the candidate PCI-PCI bridge
738 * Return: true if @dev belongs to @bridge PCI sub-hierarchy, else false.
741 is_downstream_to_pci_bridge(struct device *dev, struct device *bridge)
743 struct pci_dev *pdev, *pbridge;
745 if (!dev_is_pci(dev) || !dev_is_pci(bridge))
748 pdev = to_pci_dev(dev);
749 pbridge = to_pci_dev(bridge);
751 if (pbridge->subordinate &&
752 pbridge->subordinate->number <= pdev->bus->number &&
753 pbridge->subordinate->busn_res.end >= pdev->bus->number)
759 static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
761 struct dmar_drhd_unit *drhd = NULL;
762 struct intel_iommu *iommu;
764 struct pci_dev *pdev = NULL;
768 if (iommu_dummy(dev))
771 if (dev_is_pci(dev)) {
772 struct pci_dev *pf_pdev;
774 pdev = to_pci_dev(dev);
777 /* VMD child devices currently cannot be handled individually */
778 if (is_vmd(pdev->bus))
782 /* VFs aren't listed in scope tables; we need to look up
783 * the PF instead to find the IOMMU. */
784 pf_pdev = pci_physfn(pdev);
786 segment = pci_domain_nr(pdev->bus);
787 } else if (has_acpi_companion(dev))
788 dev = &ACPI_COMPANION(dev)->dev;
791 for_each_active_iommu(iommu, drhd) {
792 if (pdev && segment != drhd->segment)
795 for_each_active_dev_scope(drhd->devices,
796 drhd->devices_cnt, i, tmp) {
798 /* For a VF use its original BDF# not that of the PF
799 * which we used for the IOMMU lookup. Strictly speaking
800 * we could do this for all PCI devices; we only need to
801 * get the BDF# from the scope table for ACPI matches. */
802 if (pdev && pdev->is_virtfn)
805 *bus = drhd->devices[i].bus;
806 *devfn = drhd->devices[i].devfn;
810 if (is_downstream_to_pci_bridge(dev, tmp))
814 if (pdev && drhd->include_all) {
816 *bus = pdev->bus->number;
817 *devfn = pdev->devfn;
828 static void domain_flush_cache(struct dmar_domain *domain,
829 void *addr, int size)
831 if (!domain->iommu_coherency)
832 clflush_cache_range(addr, size);
835 static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
837 struct context_entry *context;
841 spin_lock_irqsave(&iommu->lock, flags);
842 context = iommu_context_addr(iommu, bus, devfn, 0);
844 ret = context_present(context);
845 spin_unlock_irqrestore(&iommu->lock, flags);
849 static void free_context_table(struct intel_iommu *iommu)
853 struct context_entry *context;
855 spin_lock_irqsave(&iommu->lock, flags);
856 if (!iommu->root_entry) {
859 for (i = 0; i < ROOT_ENTRY_NR; i++) {
860 context = iommu_context_addr(iommu, i, 0, 0);
862 free_pgtable_page(context);
864 if (!sm_supported(iommu))
867 context = iommu_context_addr(iommu, i, 0x80, 0);
869 free_pgtable_page(context);
872 free_pgtable_page(iommu->root_entry);
873 iommu->root_entry = NULL;
875 spin_unlock_irqrestore(&iommu->lock, flags);
878 static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
879 unsigned long pfn, int *target_level)
881 struct dma_pte *parent, *pte;
882 int level = agaw_to_level(domain->agaw);
885 BUG_ON(!domain->pgd);
887 if (!domain_pfn_supported(domain, pfn))
888 /* Address beyond IOMMU's addressing capabilities. */
891 parent = domain->pgd;
896 offset = pfn_level_offset(pfn, level);
897 pte = &parent[offset];
898 if (!*target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
900 if (level == *target_level)
903 if (!dma_pte_present(pte)) {
906 tmp_page = alloc_pgtable_page(domain->nid);
911 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
912 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
913 if (cmpxchg64(&pte->val, 0ULL, pteval))
914 /* Someone else set it while we were thinking; use theirs. */
915 free_pgtable_page(tmp_page);
917 domain_flush_cache(domain, pte, sizeof(*pte));
922 parent = phys_to_virt(dma_pte_addr(pte));
927 *target_level = level;
932 /* return address's pte at specific level */
933 static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
935 int level, int *large_page)
937 struct dma_pte *parent, *pte;
938 int total = agaw_to_level(domain->agaw);
941 parent = domain->pgd;
942 while (level <= total) {
943 offset = pfn_level_offset(pfn, total);
944 pte = &parent[offset];
948 if (!dma_pte_present(pte)) {
953 if (dma_pte_superpage(pte)) {
958 parent = phys_to_virt(dma_pte_addr(pte));
964 /* clear last level pte, a tlb flush should be followed */
965 static void dma_pte_clear_range(struct dmar_domain *domain,
966 unsigned long start_pfn,
967 unsigned long last_pfn)
969 unsigned int large_page;
970 struct dma_pte *first_pte, *pte;
972 BUG_ON(!domain_pfn_supported(domain, start_pfn));
973 BUG_ON(!domain_pfn_supported(domain, last_pfn));
974 BUG_ON(start_pfn > last_pfn);
976 /* we don't need lock here; nobody else touches the iova range */
979 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
981 start_pfn = align_to_level(start_pfn + 1, large_page + 1);
986 start_pfn += lvl_to_nr_pages(large_page);
988 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
990 domain_flush_cache(domain, first_pte,
991 (void *)pte - (void *)first_pte);
993 } while (start_pfn && start_pfn <= last_pfn);
996 static void dma_pte_free_level(struct dmar_domain *domain, int level,
997 int retain_level, struct dma_pte *pte,
998 unsigned long pfn, unsigned long start_pfn,
999 unsigned long last_pfn)
1001 pfn = max(start_pfn, pfn);
1002 pte = &pte[pfn_level_offset(pfn, level)];
1005 unsigned long level_pfn;
1006 struct dma_pte *level_pte;
1008 if (!dma_pte_present(pte) || dma_pte_superpage(pte))
1011 level_pfn = pfn & level_mask(level);
1012 level_pte = phys_to_virt(dma_pte_addr(pte));
1015 dma_pte_free_level(domain, level - 1, retain_level,
1016 level_pte, level_pfn, start_pfn,
1021 * Free the page table if we're below the level we want to
1022 * retain and the range covers the entire table.
1024 if (level < retain_level && !(start_pfn > level_pfn ||
1025 last_pfn < level_pfn + level_size(level) - 1)) {
1027 domain_flush_cache(domain, pte, sizeof(*pte));
1028 free_pgtable_page(level_pte);
1031 pfn += level_size(level);
1032 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1036 * clear last level (leaf) ptes and free page table pages below the
1037 * level we wish to keep intact.
1039 static void dma_pte_free_pagetable(struct dmar_domain *domain,
1040 unsigned long start_pfn,
1041 unsigned long last_pfn,
1044 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1045 BUG_ON(!domain_pfn_supported(domain, last_pfn));
1046 BUG_ON(start_pfn > last_pfn);
1048 dma_pte_clear_range(domain, start_pfn, last_pfn);
1050 /* We don't need lock here; nobody else touches the iova range */
1051 dma_pte_free_level(domain, agaw_to_level(domain->agaw), retain_level,
1052 domain->pgd, 0, start_pfn, last_pfn);
1055 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1056 free_pgtable_page(domain->pgd);
1061 /* When a page at a given level is being unlinked from its parent, we don't
1062 need to *modify* it at all. All we need to do is make a list of all the
1063 pages which can be freed just as soon as we've flushed the IOTLB and we
1064 know the hardware page-walk will no longer touch them.
1065 The 'pte' argument is the *parent* PTE, pointing to the page that is to
1067 static struct page *dma_pte_list_pagetables(struct dmar_domain *domain,
1068 int level, struct dma_pte *pte,
1069 struct page *freelist)
1073 pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT);
1074 pg->freelist = freelist;
1080 pte = page_address(pg);
1082 if (dma_pte_present(pte) && !dma_pte_superpage(pte))
1083 freelist = dma_pte_list_pagetables(domain, level - 1,
1086 } while (!first_pte_in_page(pte));
1091 static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
1092 struct dma_pte *pte, unsigned long pfn,
1093 unsigned long start_pfn,
1094 unsigned long last_pfn,
1095 struct page *freelist)
1097 struct dma_pte *first_pte = NULL, *last_pte = NULL;
1099 pfn = max(start_pfn, pfn);
1100 pte = &pte[pfn_level_offset(pfn, level)];
1103 unsigned long level_pfn;
1105 if (!dma_pte_present(pte))
1108 level_pfn = pfn & level_mask(level);
1110 /* If range covers entire pagetable, free it */
1111 if (start_pfn <= level_pfn &&
1112 last_pfn >= level_pfn + level_size(level) - 1) {
1113 /* These suborbinate page tables are going away entirely. Don't
1114 bother to clear them; we're just going to *free* them. */
1115 if (level > 1 && !dma_pte_superpage(pte))
1116 freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist);
1122 } else if (level > 1) {
1123 /* Recurse down into a level that isn't *entirely* obsolete */
1124 freelist = dma_pte_clear_level(domain, level - 1,
1125 phys_to_virt(dma_pte_addr(pte)),
1126 level_pfn, start_pfn, last_pfn,
1130 pfn += level_size(level);
1131 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1134 domain_flush_cache(domain, first_pte,
1135 (void *)++last_pte - (void *)first_pte);
1140 /* We can't just free the pages because the IOMMU may still be walking
1141 the page tables, and may have cached the intermediate levels. The
1142 pages can only be freed after the IOTLB flush has been done. */
1143 static struct page *domain_unmap(struct dmar_domain *domain,
1144 unsigned long start_pfn,
1145 unsigned long last_pfn)
1147 struct page *freelist;
1149 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1150 BUG_ON(!domain_pfn_supported(domain, last_pfn));
1151 BUG_ON(start_pfn > last_pfn);
1153 /* we don't need lock here; nobody else touches the iova range */
1154 freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw),
1155 domain->pgd, 0, start_pfn, last_pfn, NULL);
1158 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1159 struct page *pgd_page = virt_to_page(domain->pgd);
1160 pgd_page->freelist = freelist;
1161 freelist = pgd_page;
1169 static void dma_free_pagelist(struct page *freelist)
1173 while ((pg = freelist)) {
1174 freelist = pg->freelist;
1175 free_pgtable_page(page_address(pg));
1179 static void iova_entry_free(unsigned long data)
1181 struct page *freelist = (struct page *)data;
1183 dma_free_pagelist(freelist);
1186 /* iommu handling */
1187 static int iommu_alloc_root_entry(struct intel_iommu *iommu)
1189 struct root_entry *root;
1190 unsigned long flags;
1192 root = (struct root_entry *)alloc_pgtable_page(iommu->node);
1194 pr_err("Allocating root entry for %s failed\n",
1199 __iommu_flush_cache(iommu, root, ROOT_SIZE);
1201 spin_lock_irqsave(&iommu->lock, flags);
1202 iommu->root_entry = root;
1203 spin_unlock_irqrestore(&iommu->lock, flags);
1208 static void iommu_set_root_entry(struct intel_iommu *iommu)
1214 addr = virt_to_phys(iommu->root_entry);
1215 if (sm_supported(iommu))
1216 addr |= DMA_RTADDR_SMT;
1218 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1219 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, addr);
1221 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
1223 /* Make sure hardware complete it */
1224 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1225 readl, (sts & DMA_GSTS_RTPS), sts);
1227 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1230 void iommu_flush_write_buffer(struct intel_iommu *iommu)
1235 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
1238 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1239 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
1241 /* Make sure hardware complete it */
1242 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1243 readl, (!(val & DMA_GSTS_WBFS)), val);
1245 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1248 /* return value determine if we need a write buffer flush */
1249 static void __iommu_flush_context(struct intel_iommu *iommu,
1250 u16 did, u16 source_id, u8 function_mask,
1257 case DMA_CCMD_GLOBAL_INVL:
1258 val = DMA_CCMD_GLOBAL_INVL;
1260 case DMA_CCMD_DOMAIN_INVL:
1261 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
1263 case DMA_CCMD_DEVICE_INVL:
1264 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
1265 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1270 val |= DMA_CCMD_ICC;
1272 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1273 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1275 /* Make sure hardware complete it */
1276 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1277 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1279 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1282 /* return value determine if we need a write buffer flush */
1283 static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1284 u64 addr, unsigned int size_order, u64 type)
1286 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1287 u64 val = 0, val_iva = 0;
1291 case DMA_TLB_GLOBAL_FLUSH:
1292 /* global flush doesn't need set IVA_REG */
1293 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1295 case DMA_TLB_DSI_FLUSH:
1296 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1298 case DMA_TLB_PSI_FLUSH:
1299 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1300 /* IH bit is passed in as part of address */
1301 val_iva = size_order | addr;
1306 /* Note: set drain read/write */
1309 * This is probably to be super secure.. Looks like we can
1310 * ignore it without any impact.
1312 if (cap_read_drain(iommu->cap))
1313 val |= DMA_TLB_READ_DRAIN;
1315 if (cap_write_drain(iommu->cap))
1316 val |= DMA_TLB_WRITE_DRAIN;
1318 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1319 /* Note: Only uses first TLB reg currently */
1321 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1322 dmar_writeq(iommu->reg + tlb_offset + 8, val);
1324 /* Make sure hardware complete it */
1325 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1326 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1328 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1330 /* check IOTLB invalidation granularity */
1331 if (DMA_TLB_IAIG(val) == 0)
1332 pr_err("Flush IOTLB failed\n");
1333 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
1334 pr_debug("TLB flush request %Lx, actual %Lx\n",
1335 (unsigned long long)DMA_TLB_IIRG(type),
1336 (unsigned long long)DMA_TLB_IAIG(val));
1339 static struct device_domain_info *
1340 iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
1343 struct device_domain_info *info;
1345 assert_spin_locked(&device_domain_lock);
1350 list_for_each_entry(info, &domain->devices, link)
1351 if (info->iommu == iommu && info->bus == bus &&
1352 info->devfn == devfn) {
1353 if (info->ats_supported && info->dev)
1361 static void domain_update_iotlb(struct dmar_domain *domain)
1363 struct device_domain_info *info;
1364 bool has_iotlb_device = false;
1366 assert_spin_locked(&device_domain_lock);
1368 list_for_each_entry(info, &domain->devices, link) {
1369 struct pci_dev *pdev;
1371 if (!info->dev || !dev_is_pci(info->dev))
1374 pdev = to_pci_dev(info->dev);
1375 if (pdev->ats_enabled) {
1376 has_iotlb_device = true;
1381 domain->has_iotlb_device = has_iotlb_device;
1384 static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1386 struct pci_dev *pdev;
1388 assert_spin_locked(&device_domain_lock);
1390 if (!info || !dev_is_pci(info->dev))
1393 pdev = to_pci_dev(info->dev);
1394 /* For IOMMU that supports device IOTLB throttling (DIT), we assign
1395 * PFSID to the invalidation desc of a VF such that IOMMU HW can gauge
1396 * queue depth at PF level. If DIT is not set, PFSID will be treated as
1397 * reserved, which should be set to 0.
1399 if (!ecap_dit(info->iommu->ecap))
1402 struct pci_dev *pf_pdev;
1404 /* pdev will be returned if device is not a vf */
1405 pf_pdev = pci_physfn(pdev);
1406 info->pfsid = pci_dev_id(pf_pdev);
1409 #ifdef CONFIG_INTEL_IOMMU_SVM
1410 /* The PCIe spec, in its wisdom, declares that the behaviour of
1411 the device if you enable PASID support after ATS support is
1412 undefined. So always enable PASID support on devices which
1413 have it, even if we can't yet know if we're ever going to
1415 if (info->pasid_supported && !pci_enable_pasid(pdev, info->pasid_supported & ~1))
1416 info->pasid_enabled = 1;
1418 if (info->pri_supported &&
1419 (info->pasid_enabled ? pci_prg_resp_pasid_required(pdev) : 1) &&
1420 !pci_reset_pri(pdev) && !pci_enable_pri(pdev, 32))
1421 info->pri_enabled = 1;
1423 if (!pdev->untrusted && info->ats_supported &&
1424 pci_ats_page_aligned(pdev) &&
1425 !pci_enable_ats(pdev, VTD_PAGE_SHIFT)) {
1426 info->ats_enabled = 1;
1427 domain_update_iotlb(info->domain);
1428 info->ats_qdep = pci_ats_queue_depth(pdev);
1432 static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1434 struct pci_dev *pdev;
1436 assert_spin_locked(&device_domain_lock);
1438 if (!dev_is_pci(info->dev))
1441 pdev = to_pci_dev(info->dev);
1443 if (info->ats_enabled) {
1444 pci_disable_ats(pdev);
1445 info->ats_enabled = 0;
1446 domain_update_iotlb(info->domain);
1448 #ifdef CONFIG_INTEL_IOMMU_SVM
1449 if (info->pri_enabled) {
1450 pci_disable_pri(pdev);
1451 info->pri_enabled = 0;
1453 if (info->pasid_enabled) {
1454 pci_disable_pasid(pdev);
1455 info->pasid_enabled = 0;
1460 static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1461 u64 addr, unsigned mask)
1464 unsigned long flags;
1465 struct device_domain_info *info;
1467 if (!domain->has_iotlb_device)
1470 spin_lock_irqsave(&device_domain_lock, flags);
1471 list_for_each_entry(info, &domain->devices, link) {
1472 if (!info->ats_enabled)
1475 sid = info->bus << 8 | info->devfn;
1476 qdep = info->ats_qdep;
1477 qi_flush_dev_iotlb(info->iommu, sid, info->pfsid,
1480 spin_unlock_irqrestore(&device_domain_lock, flags);
1483 static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
1484 struct dmar_domain *domain,
1485 unsigned long pfn, unsigned int pages,
1488 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
1489 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
1490 u16 did = domain->iommu_did[iommu->seq_id];
1497 * Fallback to domain selective flush if no PSI support or the size is
1499 * PSI requires page size to be 2 ^ x, and the base address is naturally
1500 * aligned to the size
1502 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1503 iommu->flush.flush_iotlb(iommu, did, 0, 0,
1506 iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
1510 * In caching mode, changes of pages from non-present to present require
1511 * flush. However, device IOTLB doesn't need to be flushed in this case.
1513 if (!cap_caching_mode(iommu->cap) || !map)
1514 iommu_flush_dev_iotlb(domain, addr, mask);
1517 /* Notification for newly created mappings */
1518 static inline void __mapping_notify_one(struct intel_iommu *iommu,
1519 struct dmar_domain *domain,
1520 unsigned long pfn, unsigned int pages)
1522 /* It's a non-present to present mapping. Only flush if caching mode */
1523 if (cap_caching_mode(iommu->cap))
1524 iommu_flush_iotlb_psi(iommu, domain, pfn, pages, 0, 1);
1526 iommu_flush_write_buffer(iommu);
1529 static void iommu_flush_iova(struct iova_domain *iovad)
1531 struct dmar_domain *domain;
1534 domain = container_of(iovad, struct dmar_domain, iovad);
1536 for_each_domain_iommu(idx, domain) {
1537 struct intel_iommu *iommu = g_iommus[idx];
1538 u16 did = domain->iommu_did[iommu->seq_id];
1540 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
1542 if (!cap_caching_mode(iommu->cap))
1543 iommu_flush_dev_iotlb(get_iommu_domain(iommu, did),
1544 0, MAX_AGAW_PFN_WIDTH);
1548 static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1551 unsigned long flags;
1553 if (!cap_plmr(iommu->cap) && !cap_phmr(iommu->cap))
1556 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1557 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1558 pmen &= ~DMA_PMEN_EPM;
1559 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1561 /* wait for the protected region status bit to clear */
1562 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1563 readl, !(pmen & DMA_PMEN_PRS), pmen);
1565 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1568 static void iommu_enable_translation(struct intel_iommu *iommu)
1571 unsigned long flags;
1573 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1574 iommu->gcmd |= DMA_GCMD_TE;
1575 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1577 /* Make sure hardware complete it */
1578 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1579 readl, (sts & DMA_GSTS_TES), sts);
1581 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1584 static void iommu_disable_translation(struct intel_iommu *iommu)
1589 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1590 iommu->gcmd &= ~DMA_GCMD_TE;
1591 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1593 /* Make sure hardware complete it */
1594 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1595 readl, (!(sts & DMA_GSTS_TES)), sts);
1597 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1600 static int iommu_init_domains(struct intel_iommu *iommu)
1602 u32 ndomains, nlongs;
1605 ndomains = cap_ndoms(iommu->cap);
1606 pr_debug("%s: Number of Domains supported <%d>\n",
1607 iommu->name, ndomains);
1608 nlongs = BITS_TO_LONGS(ndomains);
1610 spin_lock_init(&iommu->lock);
1612 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1613 if (!iommu->domain_ids) {
1614 pr_err("%s: Allocating domain id array failed\n",
1619 size = (ALIGN(ndomains, 256) >> 8) * sizeof(struct dmar_domain **);
1620 iommu->domains = kzalloc(size, GFP_KERNEL);
1622 if (iommu->domains) {
1623 size = 256 * sizeof(struct dmar_domain *);
1624 iommu->domains[0] = kzalloc(size, GFP_KERNEL);
1627 if (!iommu->domains || !iommu->domains[0]) {
1628 pr_err("%s: Allocating domain array failed\n",
1630 kfree(iommu->domain_ids);
1631 kfree(iommu->domains);
1632 iommu->domain_ids = NULL;
1633 iommu->domains = NULL;
1638 * If Caching mode is set, then invalid translations are tagged
1639 * with domain-id 0, hence we need to pre-allocate it. We also
1640 * use domain-id 0 as a marker for non-allocated domain-id, so
1641 * make sure it is not used for a real domain.
1643 set_bit(0, iommu->domain_ids);
1646 * Vt-d spec rev3.0 (section 6.2.3.1) requires that each pasid
1647 * entry for first-level or pass-through translation modes should
1648 * be programmed with a domain id different from those used for
1649 * second-level or nested translation. We reserve a domain id for
1652 if (sm_supported(iommu))
1653 set_bit(FLPT_DEFAULT_DID, iommu->domain_ids);
1658 static void disable_dmar_iommu(struct intel_iommu *iommu)
1660 struct device_domain_info *info, *tmp;
1661 unsigned long flags;
1663 if (!iommu->domains || !iommu->domain_ids)
1666 spin_lock_irqsave(&device_domain_lock, flags);
1667 list_for_each_entry_safe(info, tmp, &device_domain_list, global) {
1668 if (info->iommu != iommu)
1671 if (!info->dev || !info->domain)
1674 __dmar_remove_one_dev_info(info);
1676 spin_unlock_irqrestore(&device_domain_lock, flags);
1678 if (iommu->gcmd & DMA_GCMD_TE)
1679 iommu_disable_translation(iommu);
1682 static void free_dmar_iommu(struct intel_iommu *iommu)
1684 if ((iommu->domains) && (iommu->domain_ids)) {
1685 int elems = ALIGN(cap_ndoms(iommu->cap), 256) >> 8;
1688 for (i = 0; i < elems; i++)
1689 kfree(iommu->domains[i]);
1690 kfree(iommu->domains);
1691 kfree(iommu->domain_ids);
1692 iommu->domains = NULL;
1693 iommu->domain_ids = NULL;
1696 g_iommus[iommu->seq_id] = NULL;
1698 /* free context mapping */
1699 free_context_table(iommu);
1701 #ifdef CONFIG_INTEL_IOMMU_SVM
1702 if (pasid_supported(iommu)) {
1703 if (ecap_prs(iommu->ecap))
1704 intel_svm_finish_prq(iommu);
1709 static struct dmar_domain *alloc_domain(int flags)
1711 struct dmar_domain *domain;
1713 domain = alloc_domain_mem();
1717 memset(domain, 0, sizeof(*domain));
1718 domain->nid = NUMA_NO_NODE;
1719 domain->flags = flags;
1720 domain->has_iotlb_device = false;
1721 INIT_LIST_HEAD(&domain->devices);
1726 /* Must be called with iommu->lock */
1727 static int domain_attach_iommu(struct dmar_domain *domain,
1728 struct intel_iommu *iommu)
1730 unsigned long ndomains;
1733 assert_spin_locked(&device_domain_lock);
1734 assert_spin_locked(&iommu->lock);
1736 domain->iommu_refcnt[iommu->seq_id] += 1;
1737 domain->iommu_count += 1;
1738 if (domain->iommu_refcnt[iommu->seq_id] == 1) {
1739 ndomains = cap_ndoms(iommu->cap);
1740 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1742 if (num >= ndomains) {
1743 pr_err("%s: No free domain ids\n", iommu->name);
1744 domain->iommu_refcnt[iommu->seq_id] -= 1;
1745 domain->iommu_count -= 1;
1749 set_bit(num, iommu->domain_ids);
1750 set_iommu_domain(iommu, num, domain);
1752 domain->iommu_did[iommu->seq_id] = num;
1753 domain->nid = iommu->node;
1755 domain_update_iommu_cap(domain);
1761 static int domain_detach_iommu(struct dmar_domain *domain,
1762 struct intel_iommu *iommu)
1766 assert_spin_locked(&device_domain_lock);
1767 assert_spin_locked(&iommu->lock);
1769 domain->iommu_refcnt[iommu->seq_id] -= 1;
1770 count = --domain->iommu_count;
1771 if (domain->iommu_refcnt[iommu->seq_id] == 0) {
1772 num = domain->iommu_did[iommu->seq_id];
1773 clear_bit(num, iommu->domain_ids);
1774 set_iommu_domain(iommu, num, NULL);
1776 domain_update_iommu_cap(domain);
1777 domain->iommu_did[iommu->seq_id] = 0;
1783 static struct iova_domain reserved_iova_list;
1784 static struct lock_class_key reserved_rbtree_key;
1786 static int dmar_init_reserved_ranges(void)
1788 struct pci_dev *pdev = NULL;
1792 init_iova_domain(&reserved_iova_list, VTD_PAGE_SIZE, IOVA_START_PFN);
1794 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1795 &reserved_rbtree_key);
1797 /* IOAPIC ranges shouldn't be accessed by DMA */
1798 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1799 IOVA_PFN(IOAPIC_RANGE_END));
1801 pr_err("Reserve IOAPIC range failed\n");
1805 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1806 for_each_pci_dev(pdev) {
1809 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1810 r = &pdev->resource[i];
1811 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1813 iova = reserve_iova(&reserved_iova_list,
1817 pci_err(pdev, "Reserve iova for %pR failed\n", r);
1825 static void domain_reserve_special_ranges(struct dmar_domain *domain)
1827 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1830 static inline int guestwidth_to_adjustwidth(int gaw)
1833 int r = (gaw - 12) % 9;
1844 static void domain_exit(struct dmar_domain *domain)
1846 struct page *freelist;
1848 /* Remove associated devices and clear attached or cached domains */
1849 domain_remove_dev_info(domain);
1852 put_iova_domain(&domain->iovad);
1854 freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
1856 dma_free_pagelist(freelist);
1858 free_domain_mem(domain);
1862 * Get the PASID directory size for scalable mode context entry.
1863 * Value of X in the PDTS field of a scalable mode context entry
1864 * indicates PASID directory with 2^(X + 7) entries.
1866 static inline unsigned long context_get_sm_pds(struct pasid_table *table)
1870 max_pde = table->max_pasid >> PASID_PDE_SHIFT;
1871 pds = find_first_bit((unsigned long *)&max_pde, MAX_NR_PASID_BITS);
1879 * Set the RID_PASID field of a scalable mode context entry. The
1880 * IOMMU hardware will use the PASID value set in this field for
1881 * DMA translations of DMA requests without PASID.
1884 context_set_sm_rid2pasid(struct context_entry *context, unsigned long pasid)
1886 context->hi |= pasid & ((1 << 20) - 1);
1887 context->hi |= (1 << 20);
1891 * Set the DTE(Device-TLB Enable) field of a scalable mode context
1894 static inline void context_set_sm_dte(struct context_entry *context)
1896 context->lo |= (1 << 2);
1900 * Set the PRE(Page Request Enable) field of a scalable mode context
1903 static inline void context_set_sm_pre(struct context_entry *context)
1905 context->lo |= (1 << 4);
1908 /* Convert value to context PASID directory size field coding. */
1909 #define context_pdts(pds) (((pds) & 0x7) << 9)
1911 static int domain_context_mapping_one(struct dmar_domain *domain,
1912 struct intel_iommu *iommu,
1913 struct pasid_table *table,
1916 u16 did = domain->iommu_did[iommu->seq_id];
1917 int translation = CONTEXT_TT_MULTI_LEVEL;
1918 struct device_domain_info *info = NULL;
1919 struct context_entry *context;
1920 unsigned long flags;
1925 if (hw_pass_through && domain_type_is_si(domain))
1926 translation = CONTEXT_TT_PASS_THROUGH;
1928 pr_debug("Set context mapping for %02x:%02x.%d\n",
1929 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
1931 BUG_ON(!domain->pgd);
1933 spin_lock_irqsave(&device_domain_lock, flags);
1934 spin_lock(&iommu->lock);
1937 context = iommu_context_addr(iommu, bus, devfn, 1);
1942 if (context_present(context))
1946 * For kdump cases, old valid entries may be cached due to the
1947 * in-flight DMA and copied pgtable, but there is no unmapping
1948 * behaviour for them, thus we need an explicit cache flush for
1949 * the newly-mapped device. For kdump, at this point, the device
1950 * is supposed to finish reset at its driver probe stage, so no
1951 * in-flight DMA will exist, and we don't need to worry anymore
1954 if (context_copied(context)) {
1955 u16 did_old = context_domain_id(context);
1957 if (did_old < cap_ndoms(iommu->cap)) {
1958 iommu->flush.flush_context(iommu, did_old,
1959 (((u16)bus) << 8) | devfn,
1960 DMA_CCMD_MASK_NOBIT,
1961 DMA_CCMD_DEVICE_INVL);
1962 iommu->flush.flush_iotlb(iommu, did_old, 0, 0,
1967 context_clear_entry(context);
1969 if (sm_supported(iommu)) {
1974 /* Setup the PASID DIR pointer: */
1975 pds = context_get_sm_pds(table);
1976 context->lo = (u64)virt_to_phys(table->table) |
1979 /* Setup the RID_PASID field: */
1980 context_set_sm_rid2pasid(context, PASID_RID2PASID);
1983 * Setup the Device-TLB enable bit and Page request
1986 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
1987 if (info && info->ats_supported)
1988 context_set_sm_dte(context);
1989 if (info && info->pri_supported)
1990 context_set_sm_pre(context);
1992 struct dma_pte *pgd = domain->pgd;
1995 context_set_domain_id(context, did);
1997 if (translation != CONTEXT_TT_PASS_THROUGH) {
1999 * Skip top levels of page tables for iommu which has
2000 * less agaw than default. Unnecessary for PT mode.
2002 for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) {
2004 pgd = phys_to_virt(dma_pte_addr(pgd));
2005 if (!dma_pte_present(pgd))
2009 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
2010 if (info && info->ats_supported)
2011 translation = CONTEXT_TT_DEV_IOTLB;
2013 translation = CONTEXT_TT_MULTI_LEVEL;
2015 context_set_address_root(context, virt_to_phys(pgd));
2016 context_set_address_width(context, agaw);
2019 * In pass through mode, AW must be programmed to
2020 * indicate the largest AGAW value supported by
2021 * hardware. And ASR is ignored by hardware.
2023 context_set_address_width(context, iommu->msagaw);
2026 context_set_translation_type(context, translation);
2029 context_set_fault_enable(context);
2030 context_set_present(context);
2031 domain_flush_cache(domain, context, sizeof(*context));
2034 * It's a non-present to present mapping. If hardware doesn't cache
2035 * non-present entry we only need to flush the write-buffer. If the
2036 * _does_ cache non-present entries, then it does so in the special
2037 * domain #0, which we have to flush:
2039 if (cap_caching_mode(iommu->cap)) {
2040 iommu->flush.flush_context(iommu, 0,
2041 (((u16)bus) << 8) | devfn,
2042 DMA_CCMD_MASK_NOBIT,
2043 DMA_CCMD_DEVICE_INVL);
2044 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
2046 iommu_flush_write_buffer(iommu);
2048 iommu_enable_dev_iotlb(info);
2053 spin_unlock(&iommu->lock);
2054 spin_unlock_irqrestore(&device_domain_lock, flags);
2059 struct domain_context_mapping_data {
2060 struct dmar_domain *domain;
2061 struct intel_iommu *iommu;
2062 struct pasid_table *table;
2065 static int domain_context_mapping_cb(struct pci_dev *pdev,
2066 u16 alias, void *opaque)
2068 struct domain_context_mapping_data *data = opaque;
2070 return domain_context_mapping_one(data->domain, data->iommu,
2071 data->table, PCI_BUS_NUM(alias),
2076 domain_context_mapping(struct dmar_domain *domain, struct device *dev)
2078 struct domain_context_mapping_data data;
2079 struct pasid_table *table;
2080 struct intel_iommu *iommu;
2083 iommu = device_to_iommu(dev, &bus, &devfn);
2087 table = intel_pasid_get_table(dev);
2089 if (!dev_is_pci(dev))
2090 return domain_context_mapping_one(domain, iommu, table,
2093 data.domain = domain;
2097 return pci_for_each_dma_alias(to_pci_dev(dev),
2098 &domain_context_mapping_cb, &data);
2101 static int domain_context_mapped_cb(struct pci_dev *pdev,
2102 u16 alias, void *opaque)
2104 struct intel_iommu *iommu = opaque;
2106 return !device_context_mapped(iommu, PCI_BUS_NUM(alias), alias & 0xff);
2109 static int domain_context_mapped(struct device *dev)
2111 struct intel_iommu *iommu;
2114 iommu = device_to_iommu(dev, &bus, &devfn);
2118 if (!dev_is_pci(dev))
2119 return device_context_mapped(iommu, bus, devfn);
2121 return !pci_for_each_dma_alias(to_pci_dev(dev),
2122 domain_context_mapped_cb, iommu);
2125 /* Returns a number of VTD pages, but aligned to MM page size */
2126 static inline unsigned long aligned_nrpages(unsigned long host_addr,
2129 host_addr &= ~PAGE_MASK;
2130 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
2133 /* Return largest possible superpage level for a given mapping */
2134 static inline int hardware_largepage_caps(struct dmar_domain *domain,
2135 unsigned long iov_pfn,
2136 unsigned long phy_pfn,
2137 unsigned long pages)
2139 int support, level = 1;
2140 unsigned long pfnmerge;
2142 support = domain->iommu_superpage;
2144 /* To use a large page, the virtual *and* physical addresses
2145 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
2146 of them will mean we have to use smaller pages. So just
2147 merge them and check both at once. */
2148 pfnmerge = iov_pfn | phy_pfn;
2150 while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
2151 pages >>= VTD_STRIDE_SHIFT;
2154 pfnmerge >>= VTD_STRIDE_SHIFT;
2161 static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2162 struct scatterlist *sg, unsigned long phys_pfn,
2163 unsigned long nr_pages, int prot)
2165 struct dma_pte *first_pte = NULL, *pte = NULL;
2166 phys_addr_t uninitialized_var(pteval);
2167 unsigned long sg_res = 0;
2168 unsigned int largepage_lvl = 0;
2169 unsigned long lvl_pages = 0;
2171 BUG_ON(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1));
2173 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
2176 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
2180 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
2183 while (nr_pages > 0) {
2187 unsigned int pgoff = sg->offset & ~PAGE_MASK;
2189 sg_res = aligned_nrpages(sg->offset, sg->length);
2190 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + pgoff;
2191 sg->dma_length = sg->length;
2192 pteval = (sg_phys(sg) - pgoff) | prot;
2193 phys_pfn = pteval >> VTD_PAGE_SHIFT;
2197 largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
2199 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
2202 /* It is large page*/
2203 if (largepage_lvl > 1) {
2204 unsigned long nr_superpages, end_pfn;
2206 pteval |= DMA_PTE_LARGE_PAGE;
2207 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2209 nr_superpages = sg_res / lvl_pages;
2210 end_pfn = iov_pfn + nr_superpages * lvl_pages - 1;
2213 * Ensure that old small page tables are
2214 * removed to make room for superpage(s).
2215 * We're adding new large pages, so make sure
2216 * we don't remove their parent tables.
2218 dma_pte_free_pagetable(domain, iov_pfn, end_pfn,
2221 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
2225 /* We don't need lock here, nobody else
2226 * touches the iova range
2228 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
2230 static int dumps = 5;
2231 pr_crit("ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
2232 iov_pfn, tmp, (unsigned long long)pteval);
2235 debug_dma_dump_mappings(NULL);
2240 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2242 BUG_ON(nr_pages < lvl_pages);
2243 BUG_ON(sg_res < lvl_pages);
2245 nr_pages -= lvl_pages;
2246 iov_pfn += lvl_pages;
2247 phys_pfn += lvl_pages;
2248 pteval += lvl_pages * VTD_PAGE_SIZE;
2249 sg_res -= lvl_pages;
2251 /* If the next PTE would be the first in a new page, then we
2252 need to flush the cache on the entries we've just written.
2253 And then we'll need to recalculate 'pte', so clear it and
2254 let it get set again in the if (!pte) block above.
2256 If we're done (!nr_pages) we need to flush the cache too.
2258 Also if we've been setting superpages, we may need to
2259 recalculate 'pte' and switch back to smaller pages for the
2260 end of the mapping, if the trailing size is not enough to
2261 use another superpage (i.e. sg_res < lvl_pages). */
2263 if (!nr_pages || first_pte_in_page(pte) ||
2264 (largepage_lvl > 1 && sg_res < lvl_pages)) {
2265 domain_flush_cache(domain, first_pte,
2266 (void *)pte - (void *)first_pte);
2270 if (!sg_res && nr_pages)
2276 static int domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2277 struct scatterlist *sg, unsigned long phys_pfn,
2278 unsigned long nr_pages, int prot)
2281 struct intel_iommu *iommu;
2283 /* Do the real mapping first */
2284 ret = __domain_mapping(domain, iov_pfn, sg, phys_pfn, nr_pages, prot);
2288 for_each_domain_iommu(iommu_id, domain) {
2289 iommu = g_iommus[iommu_id];
2290 __mapping_notify_one(iommu, domain, iov_pfn, nr_pages);
2296 static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2297 struct scatterlist *sg, unsigned long nr_pages,
2300 return domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
2303 static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2304 unsigned long phys_pfn, unsigned long nr_pages,
2307 return domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
2310 static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn)
2312 unsigned long flags;
2313 struct context_entry *context;
2319 spin_lock_irqsave(&iommu->lock, flags);
2320 context = iommu_context_addr(iommu, bus, devfn, 0);
2322 spin_unlock_irqrestore(&iommu->lock, flags);
2325 did_old = context_domain_id(context);
2326 context_clear_entry(context);
2327 __iommu_flush_cache(iommu, context, sizeof(*context));
2328 spin_unlock_irqrestore(&iommu->lock, flags);
2329 iommu->flush.flush_context(iommu,
2331 (((u16)bus) << 8) | devfn,
2332 DMA_CCMD_MASK_NOBIT,
2333 DMA_CCMD_DEVICE_INVL);
2334 iommu->flush.flush_iotlb(iommu,
2341 static inline void unlink_domain_info(struct device_domain_info *info)
2343 assert_spin_locked(&device_domain_lock);
2344 list_del(&info->link);
2345 list_del(&info->global);
2347 info->dev->archdata.iommu = NULL;
2350 static void domain_remove_dev_info(struct dmar_domain *domain)
2352 struct device_domain_info *info, *tmp;
2353 unsigned long flags;
2355 spin_lock_irqsave(&device_domain_lock, flags);
2356 list_for_each_entry_safe(info, tmp, &domain->devices, link)
2357 __dmar_remove_one_dev_info(info);
2358 spin_unlock_irqrestore(&device_domain_lock, flags);
2363 * Note: we use struct device->archdata.iommu stores the info
2365 static struct dmar_domain *find_domain(struct device *dev)
2367 struct device_domain_info *info;
2369 if (unlikely(dev->archdata.iommu == DEFER_DEVICE_DOMAIN_INFO)) {
2370 struct iommu_domain *domain;
2372 dev->archdata.iommu = NULL;
2373 domain = iommu_get_domain_for_dev(dev);
2375 intel_iommu_attach_device(domain, dev);
2378 /* No lock here, assumes no domain exit in normal case */
2379 info = dev->archdata.iommu;
2382 return info->domain;
2386 static inline struct device_domain_info *
2387 dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
2389 struct device_domain_info *info;
2391 list_for_each_entry(info, &device_domain_list, global)
2392 if (info->iommu->segment == segment && info->bus == bus &&
2393 info->devfn == devfn)
2399 static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
2402 struct dmar_domain *domain)
2404 struct dmar_domain *found = NULL;
2405 struct device_domain_info *info;
2406 unsigned long flags;
2409 info = alloc_devinfo_mem();
2414 info->devfn = devfn;
2415 info->ats_supported = info->pasid_supported = info->pri_supported = 0;
2416 info->ats_enabled = info->pasid_enabled = info->pri_enabled = 0;
2419 info->domain = domain;
2420 info->iommu = iommu;
2421 info->pasid_table = NULL;
2422 info->auxd_enabled = 0;
2423 INIT_LIST_HEAD(&info->auxiliary_domains);
2425 if (dev && dev_is_pci(dev)) {
2426 struct pci_dev *pdev = to_pci_dev(info->dev);
2428 if (!pdev->untrusted &&
2429 !pci_ats_disabled() &&
2430 ecap_dev_iotlb_support(iommu->ecap) &&
2431 pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS) &&
2432 dmar_find_matched_atsr_unit(pdev))
2433 info->ats_supported = 1;
2435 if (sm_supported(iommu)) {
2436 if (pasid_supported(iommu)) {
2437 int features = pci_pasid_features(pdev);
2439 info->pasid_supported = features | 1;
2442 if (info->ats_supported && ecap_prs(iommu->ecap) &&
2443 pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI))
2444 info->pri_supported = 1;
2448 spin_lock_irqsave(&device_domain_lock, flags);
2450 found = find_domain(dev);
2453 struct device_domain_info *info2;
2454 info2 = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn);
2456 found = info2->domain;
2462 spin_unlock_irqrestore(&device_domain_lock, flags);
2463 free_devinfo_mem(info);
2464 /* Caller must free the original domain */
2468 spin_lock(&iommu->lock);
2469 ret = domain_attach_iommu(domain, iommu);
2470 spin_unlock(&iommu->lock);
2473 spin_unlock_irqrestore(&device_domain_lock, flags);
2474 free_devinfo_mem(info);
2478 list_add(&info->link, &domain->devices);
2479 list_add(&info->global, &device_domain_list);
2481 dev->archdata.iommu = info;
2482 spin_unlock_irqrestore(&device_domain_lock, flags);
2484 /* PASID table is mandatory for a PCI device in scalable mode. */
2485 if (dev && dev_is_pci(dev) && sm_supported(iommu)) {
2486 ret = intel_pasid_alloc_table(dev);
2488 dev_err(dev, "PASID table allocation failed\n");
2489 dmar_remove_one_dev_info(dev);
2493 /* Setup the PASID entry for requests without PASID: */
2494 spin_lock(&iommu->lock);
2495 if (hw_pass_through && domain_type_is_si(domain))
2496 ret = intel_pasid_setup_pass_through(iommu, domain,
2497 dev, PASID_RID2PASID);
2499 ret = intel_pasid_setup_second_level(iommu, domain,
2500 dev, PASID_RID2PASID);
2501 spin_unlock(&iommu->lock);
2503 dev_err(dev, "Setup RID2PASID failed\n");
2504 dmar_remove_one_dev_info(dev);
2509 if (dev && domain_context_mapping(domain, dev)) {
2510 dev_err(dev, "Domain context map failed\n");
2511 dmar_remove_one_dev_info(dev);
2518 static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque)
2520 *(u16 *)opaque = alias;
2524 static int domain_init(struct dmar_domain *domain, int guest_width)
2528 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN);
2529 domain_reserve_special_ranges(domain);
2531 /* calculate AGAW */
2532 domain->gaw = guest_width;
2533 adjust_width = guestwidth_to_adjustwidth(guest_width);
2534 domain->agaw = width_to_agaw(adjust_width);
2536 domain->iommu_coherency = 0;
2537 domain->iommu_snooping = 0;
2538 domain->iommu_superpage = 0;
2539 domain->max_addr = 0;
2541 /* always allocate the top pgd */
2542 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
2545 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
2549 static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw)
2551 struct device_domain_info *info;
2552 struct dmar_domain *domain = NULL;
2553 struct intel_iommu *iommu;
2555 unsigned long flags;
2558 iommu = device_to_iommu(dev, &bus, &devfn);
2562 if (dev_is_pci(dev)) {
2563 struct pci_dev *pdev = to_pci_dev(dev);
2565 pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
2567 spin_lock_irqsave(&device_domain_lock, flags);
2568 info = dmar_search_domain_by_dev_info(pci_domain_nr(pdev->bus),
2569 PCI_BUS_NUM(dma_alias),
2572 iommu = info->iommu;
2573 domain = info->domain;
2575 spin_unlock_irqrestore(&device_domain_lock, flags);
2577 /* DMA alias already has a domain, use it */
2582 /* Allocate and initialize new domain for the device */
2583 domain = alloc_domain(0);
2587 if (domain_init(domain, gaw)) {
2588 domain_exit(domain);
2592 if (init_iova_flush_queue(&domain->iovad,
2595 pr_warn("iova flush queue initialization failed\n");
2596 intel_iommu_strict = 1;
2603 static struct dmar_domain *set_domain_for_dev(struct device *dev,
2604 struct dmar_domain *domain)
2606 struct intel_iommu *iommu;
2607 struct dmar_domain *tmp;
2608 u16 req_id, dma_alias;
2611 iommu = device_to_iommu(dev, &bus, &devfn);
2615 req_id = ((u16)bus << 8) | devfn;
2617 if (dev_is_pci(dev)) {
2618 struct pci_dev *pdev = to_pci_dev(dev);
2620 pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
2622 /* register PCI DMA alias device */
2623 if (req_id != dma_alias) {
2624 tmp = dmar_insert_one_dev_info(iommu, PCI_BUS_NUM(dma_alias),
2625 dma_alias & 0xff, NULL, domain);
2627 if (!tmp || tmp != domain)
2632 tmp = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
2633 if (!tmp || tmp != domain)
2639 static int iommu_domain_identity_map(struct dmar_domain *domain,
2640 unsigned long long start,
2641 unsigned long long end)
2643 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2644 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
2646 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2647 dma_to_mm_pfn(last_vpfn))) {
2648 pr_err("Reserving iova failed\n");
2652 pr_debug("Mapping reserved region %llx-%llx\n", start, end);
2654 * RMRR range might have overlap with physical memory range,
2657 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
2659 return __domain_mapping(domain, first_vpfn, NULL,
2660 first_vpfn, last_vpfn - first_vpfn + 1,
2661 DMA_PTE_READ|DMA_PTE_WRITE);
2664 static int domain_prepare_identity_map(struct device *dev,
2665 struct dmar_domain *domain,
2666 unsigned long long start,
2667 unsigned long long end)
2669 /* For _hardware_ passthrough, don't bother. But for software
2670 passthrough, we do it anyway -- it may indicate a memory
2671 range which is reserved in E820, so which didn't get set
2672 up to start with in si_domain */
2673 if (domain == si_domain && hw_pass_through) {
2674 dev_warn(dev, "Ignoring identity map for HW passthrough [0x%Lx - 0x%Lx]\n",
2679 dev_info(dev, "Setting identity map [0x%Lx - 0x%Lx]\n", start, end);
2682 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2683 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2684 dmi_get_system_info(DMI_BIOS_VENDOR),
2685 dmi_get_system_info(DMI_BIOS_VERSION),
2686 dmi_get_system_info(DMI_PRODUCT_VERSION));
2690 if (end >> agaw_to_width(domain->agaw)) {
2691 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2692 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2693 agaw_to_width(domain->agaw),
2694 dmi_get_system_info(DMI_BIOS_VENDOR),
2695 dmi_get_system_info(DMI_BIOS_VERSION),
2696 dmi_get_system_info(DMI_PRODUCT_VERSION));
2700 return iommu_domain_identity_map(domain, start, end);
2703 static int __init si_domain_init(int hw)
2705 struct dmar_rmrr_unit *rmrr;
2709 si_domain = alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY);
2713 if (domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2714 domain_exit(si_domain);
2721 for_each_online_node(nid) {
2722 unsigned long start_pfn, end_pfn;
2725 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2726 ret = iommu_domain_identity_map(si_domain,
2727 PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
2734 * Normally we use DMA domains for devices which have RMRRs. But we
2735 * loose this requirement for graphic and usb devices. Identity map
2736 * the RMRRs for graphic and USB devices so that they could use the
2739 for_each_rmrr_units(rmrr) {
2740 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
2742 unsigned long long start = rmrr->base_address;
2743 unsigned long long end = rmrr->end_address;
2745 if (device_is_rmrr_locked(dev))
2748 if (WARN_ON(end < start ||
2749 end >> agaw_to_width(si_domain->agaw)))
2752 ret = iommu_domain_identity_map(si_domain, start, end);
2761 static int identity_mapping(struct device *dev)
2763 struct device_domain_info *info;
2765 info = dev->archdata.iommu;
2766 if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2767 return (info->domain == si_domain);
2772 static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
2774 struct dmar_domain *ndomain;
2775 struct intel_iommu *iommu;
2778 iommu = device_to_iommu(dev, &bus, &devfn);
2782 ndomain = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
2783 if (ndomain != domain)
2789 static bool device_has_rmrr(struct device *dev)
2791 struct dmar_rmrr_unit *rmrr;
2796 for_each_rmrr_units(rmrr) {
2798 * Return TRUE if this RMRR contains the device that
2801 for_each_active_dev_scope(rmrr->devices,
2802 rmrr->devices_cnt, i, tmp)
2804 is_downstream_to_pci_bridge(dev, tmp)) {
2814 * device_rmrr_is_relaxable - Test whether the RMRR of this device
2815 * is relaxable (ie. is allowed to be not enforced under some conditions)
2816 * @dev: device handle
2818 * We assume that PCI USB devices with RMRRs have them largely
2819 * for historical reasons and that the RMRR space is not actively used post
2820 * boot. This exclusion may change if vendors begin to abuse it.
2822 * The same exception is made for graphics devices, with the requirement that
2823 * any use of the RMRR regions will be torn down before assigning the device
2826 * Return: true if the RMRR is relaxable, false otherwise
2828 static bool device_rmrr_is_relaxable(struct device *dev)
2830 struct pci_dev *pdev;
2832 if (!dev_is_pci(dev))
2835 pdev = to_pci_dev(dev);
2836 if (IS_USB_DEVICE(pdev) || IS_GFX_DEVICE(pdev))
2843 * There are a couple cases where we need to restrict the functionality of
2844 * devices associated with RMRRs. The first is when evaluating a device for
2845 * identity mapping because problems exist when devices are moved in and out
2846 * of domains and their respective RMRR information is lost. This means that
2847 * a device with associated RMRRs will never be in a "passthrough" domain.
2848 * The second is use of the device through the IOMMU API. This interface
2849 * expects to have full control of the IOVA space for the device. We cannot
2850 * satisfy both the requirement that RMRR access is maintained and have an
2851 * unencumbered IOVA space. We also have no ability to quiesce the device's
2852 * use of the RMRR space or even inform the IOMMU API user of the restriction.
2853 * We therefore prevent devices associated with an RMRR from participating in
2854 * the IOMMU API, which eliminates them from device assignment.
2856 * In both cases, devices which have relaxable RMRRs are not concerned by this
2857 * restriction. See device_rmrr_is_relaxable comment.
2859 static bool device_is_rmrr_locked(struct device *dev)
2861 if (!device_has_rmrr(dev))
2864 if (device_rmrr_is_relaxable(dev))
2871 * Return the required default domain type for a specific device.
2873 * @dev: the device in query
2874 * @startup: true if this is during early boot
2877 * - IOMMU_DOMAIN_DMA: device requires a dynamic mapping domain
2878 * - IOMMU_DOMAIN_IDENTITY: device requires an identical mapping domain
2879 * - 0: both identity and dynamic domains work for this device
2881 static int device_def_domain_type(struct device *dev)
2883 if (dev_is_pci(dev)) {
2884 struct pci_dev *pdev = to_pci_dev(dev);
2886 if (device_is_rmrr_locked(dev))
2887 return IOMMU_DOMAIN_DMA;
2890 * Prevent any device marked as untrusted from getting
2891 * placed into the statically identity mapping domain.
2893 if (pdev->untrusted)
2894 return IOMMU_DOMAIN_DMA;
2896 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2897 return IOMMU_DOMAIN_IDENTITY;
2899 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2900 return IOMMU_DOMAIN_IDENTITY;
2903 * We want to start off with all devices in the 1:1 domain, and
2904 * take them out later if we find they can't access all of memory.
2906 * However, we can't do this for PCI devices behind bridges,
2907 * because all PCI devices behind the same bridge will end up
2908 * with the same source-id on their transactions.
2910 * Practically speaking, we can't change things around for these
2911 * devices at run-time, because we can't be sure there'll be no
2912 * DMA transactions in flight for any of their siblings.
2914 * So PCI devices (unless they're on the root bus) as well as
2915 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2916 * the 1:1 domain, just in _case_ one of their siblings turns out
2917 * not to be able to map all of memory.
2919 if (!pci_is_pcie(pdev)) {
2920 if (!pci_is_root_bus(pdev->bus))
2921 return IOMMU_DOMAIN_DMA;
2922 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2923 return IOMMU_DOMAIN_DMA;
2924 } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
2925 return IOMMU_DOMAIN_DMA;
2927 if (device_has_rmrr(dev))
2928 return IOMMU_DOMAIN_DMA;
2931 return (iommu_identity_mapping & IDENTMAP_ALL) ?
2932 IOMMU_DOMAIN_IDENTITY : 0;
2935 static void intel_iommu_init_qi(struct intel_iommu *iommu)
2938 * Start from the sane iommu hardware state.
2939 * If the queued invalidation is already initialized by us
2940 * (for example, while enabling interrupt-remapping) then
2941 * we got the things already rolling from a sane state.
2945 * Clear any previous faults.
2947 dmar_fault(-1, iommu);
2949 * Disable queued invalidation if supported and already enabled
2950 * before OS handover.
2952 dmar_disable_qi(iommu);
2955 if (dmar_enable_qi(iommu)) {
2957 * Queued Invalidate not enabled, use Register Based Invalidate
2959 iommu->flush.flush_context = __iommu_flush_context;
2960 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
2961 pr_info("%s: Using Register based invalidation\n",
2964 iommu->flush.flush_context = qi_flush_context;
2965 iommu->flush.flush_iotlb = qi_flush_iotlb;
2966 pr_info("%s: Using Queued invalidation\n", iommu->name);
2970 static int copy_context_table(struct intel_iommu *iommu,
2971 struct root_entry *old_re,
2972 struct context_entry **tbl,
2975 int tbl_idx, pos = 0, idx, devfn, ret = 0, did;
2976 struct context_entry *new_ce = NULL, ce;
2977 struct context_entry *old_ce = NULL;
2978 struct root_entry re;
2979 phys_addr_t old_ce_phys;
2981 tbl_idx = ext ? bus * 2 : bus;
2982 memcpy(&re, old_re, sizeof(re));
2984 for (devfn = 0; devfn < 256; devfn++) {
2985 /* First calculate the correct index */
2986 idx = (ext ? devfn * 2 : devfn) % 256;
2989 /* First save what we may have and clean up */
2991 tbl[tbl_idx] = new_ce;
2992 __iommu_flush_cache(iommu, new_ce,
3002 old_ce_phys = root_entry_lctp(&re);
3004 old_ce_phys = root_entry_uctp(&re);
3007 if (ext && devfn == 0) {
3008 /* No LCTP, try UCTP */
3017 old_ce = memremap(old_ce_phys, PAGE_SIZE,
3022 new_ce = alloc_pgtable_page(iommu->node);
3029 /* Now copy the context entry */
3030 memcpy(&ce, old_ce + idx, sizeof(ce));
3032 if (!__context_present(&ce))
3035 did = context_domain_id(&ce);
3036 if (did >= 0 && did < cap_ndoms(iommu->cap))
3037 set_bit(did, iommu->domain_ids);
3040 * We need a marker for copied context entries. This
3041 * marker needs to work for the old format as well as
3042 * for extended context entries.
3044 * Bit 67 of the context entry is used. In the old
3045 * format this bit is available to software, in the
3046 * extended format it is the PGE bit, but PGE is ignored
3047 * by HW if PASIDs are disabled (and thus still
3050 * So disable PASIDs first and then mark the entry
3051 * copied. This means that we don't copy PASID
3052 * translations from the old kernel, but this is fine as
3053 * faults there are not fatal.
3055 context_clear_pasid_enable(&ce);
3056 context_set_copied(&ce);
3061 tbl[tbl_idx + pos] = new_ce;
3063 __iommu_flush_cache(iommu, new_ce, VTD_PAGE_SIZE);
3072 static int copy_translation_tables(struct intel_iommu *iommu)
3074 struct context_entry **ctxt_tbls;
3075 struct root_entry *old_rt;
3076 phys_addr_t old_rt_phys;
3077 int ctxt_table_entries;
3078 unsigned long flags;
3083 rtaddr_reg = dmar_readq(iommu->reg + DMAR_RTADDR_REG);
3084 ext = !!(rtaddr_reg & DMA_RTADDR_RTT);
3085 new_ext = !!ecap_ecs(iommu->ecap);
3088 * The RTT bit can only be changed when translation is disabled,
3089 * but disabling translation means to open a window for data
3090 * corruption. So bail out and don't copy anything if we would
3091 * have to change the bit.
3096 old_rt_phys = rtaddr_reg & VTD_PAGE_MASK;
3100 old_rt = memremap(old_rt_phys, PAGE_SIZE, MEMREMAP_WB);
3104 /* This is too big for the stack - allocate it from slab */
3105 ctxt_table_entries = ext ? 512 : 256;
3107 ctxt_tbls = kcalloc(ctxt_table_entries, sizeof(void *), GFP_KERNEL);
3111 for (bus = 0; bus < 256; bus++) {
3112 ret = copy_context_table(iommu, &old_rt[bus],
3113 ctxt_tbls, bus, ext);
3115 pr_err("%s: Failed to copy context table for bus %d\n",
3121 spin_lock_irqsave(&iommu->lock, flags);
3123 /* Context tables are copied, now write them to the root_entry table */
3124 for (bus = 0; bus < 256; bus++) {
3125 int idx = ext ? bus * 2 : bus;
3128 if (ctxt_tbls[idx]) {
3129 val = virt_to_phys(ctxt_tbls[idx]) | 1;
3130 iommu->root_entry[bus].lo = val;
3133 if (!ext || !ctxt_tbls[idx + 1])
3136 val = virt_to_phys(ctxt_tbls[idx + 1]) | 1;
3137 iommu->root_entry[bus].hi = val;
3140 spin_unlock_irqrestore(&iommu->lock, flags);
3144 __iommu_flush_cache(iommu, iommu->root_entry, PAGE_SIZE);
3154 static int __init init_dmars(void)
3156 struct dmar_drhd_unit *drhd;
3157 struct intel_iommu *iommu;
3163 * initialize and program root entry to not present
3166 for_each_drhd_unit(drhd) {
3168 * lock not needed as this is only incremented in the single
3169 * threaded kernel __init code path all other access are read
3172 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED) {
3176 pr_err_once("Exceeded %d IOMMUs\n", DMAR_UNITS_SUPPORTED);
3179 /* Preallocate enough resources for IOMMU hot-addition */
3180 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED)
3181 g_num_of_iommus = DMAR_UNITS_SUPPORTED;
3183 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
3186 pr_err("Allocating global iommu array failed\n");
3191 for_each_iommu(iommu, drhd) {
3192 if (drhd->ignored) {
3193 iommu_disable_translation(iommu);
3198 * Find the max pasid size of all IOMMU's in the system.
3199 * We need to ensure the system pasid table is no bigger
3200 * than the smallest supported.
3202 if (pasid_supported(iommu)) {
3203 u32 temp = 2 << ecap_pss(iommu->ecap);
3205 intel_pasid_max_id = min_t(u32, temp,
3206 intel_pasid_max_id);
3209 g_iommus[iommu->seq_id] = iommu;
3211 intel_iommu_init_qi(iommu);
3213 ret = iommu_init_domains(iommu);
3217 init_translation_status(iommu);
3219 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
3220 iommu_disable_translation(iommu);
3221 clear_translation_pre_enabled(iommu);
3222 pr_warn("Translation was enabled for %s but we are not in kdump mode\n",
3228 * we could share the same root & context tables
3229 * among all IOMMU's. Need to Split it later.
3231 ret = iommu_alloc_root_entry(iommu);
3235 if (translation_pre_enabled(iommu)) {
3236 pr_info("Translation already enabled - trying to copy translation structures\n");
3238 ret = copy_translation_tables(iommu);
3241 * We found the IOMMU with translation
3242 * enabled - but failed to copy over the
3243 * old root-entry table. Try to proceed
3244 * by disabling translation now and
3245 * allocating a clean root-entry table.
3246 * This might cause DMAR faults, but
3247 * probably the dump will still succeed.
3249 pr_err("Failed to copy translation tables from previous kernel for %s\n",
3251 iommu_disable_translation(iommu);
3252 clear_translation_pre_enabled(iommu);
3254 pr_info("Copied translation tables from previous kernel for %s\n",
3259 if (!ecap_pass_through(iommu->ecap))
3260 hw_pass_through = 0;
3261 #ifdef CONFIG_INTEL_IOMMU_SVM
3262 if (pasid_supported(iommu))
3263 intel_svm_init(iommu);
3268 * Now that qi is enabled on all iommus, set the root entry and flush
3269 * caches. This is required on some Intel X58 chipsets, otherwise the
3270 * flush_context function will loop forever and the boot hangs.
3272 for_each_active_iommu(iommu, drhd) {
3273 iommu_flush_write_buffer(iommu);
3274 iommu_set_root_entry(iommu);
3275 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
3276 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3279 if (iommu_pass_through)
3280 iommu_identity_mapping |= IDENTMAP_ALL;
3282 #ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
3287 iommu_identity_mapping |= IDENTMAP_GFX;
3289 check_tylersburg_isoch();
3291 ret = si_domain_init(hw_pass_through);
3298 * global invalidate context cache
3299 * global invalidate iotlb
3300 * enable translation
3302 for_each_iommu(iommu, drhd) {
3303 if (drhd->ignored) {
3305 * we always have to disable PMRs or DMA may fail on
3309 iommu_disable_protect_mem_regions(iommu);
3313 iommu_flush_write_buffer(iommu);
3315 #ifdef CONFIG_INTEL_IOMMU_SVM
3316 if (pasid_supported(iommu) && ecap_prs(iommu->ecap)) {
3318 * Call dmar_alloc_hwirq() with dmar_global_lock held,
3319 * could cause possible lock race condition.
3321 up_write(&dmar_global_lock);
3322 ret = intel_svm_enable_prq(iommu);
3323 down_write(&dmar_global_lock);
3328 ret = dmar_set_interrupt(iommu);
3336 for_each_active_iommu(iommu, drhd) {
3337 disable_dmar_iommu(iommu);
3338 free_dmar_iommu(iommu);
3347 /* This takes a number of _MM_ pages, not VTD pages */
3348 static unsigned long intel_alloc_iova(struct device *dev,
3349 struct dmar_domain *domain,
3350 unsigned long nrpages, uint64_t dma_mask)
3352 unsigned long iova_pfn;
3354 /* Restrict dma_mask to the width that the iommu can handle */
3355 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
3356 /* Ensure we reserve the whole size-aligned region */
3357 nrpages = __roundup_pow_of_two(nrpages);
3359 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
3361 * First try to allocate an io virtual address in
3362 * DMA_BIT_MASK(32) and if that fails then try allocating
3365 iova_pfn = alloc_iova_fast(&domain->iovad, nrpages,
3366 IOVA_PFN(DMA_BIT_MASK(32)), false);
3370 iova_pfn = alloc_iova_fast(&domain->iovad, nrpages,
3371 IOVA_PFN(dma_mask), true);
3372 if (unlikely(!iova_pfn)) {
3373 dev_err(dev, "Allocating %ld-page iova failed", nrpages);
3380 static struct dmar_domain *get_private_domain_for_dev(struct device *dev)
3382 struct dmar_domain *domain, *tmp;
3383 struct dmar_rmrr_unit *rmrr;
3384 struct device *i_dev;
3387 /* Device shouldn't be attached by any domains. */
3388 domain = find_domain(dev);
3392 domain = find_or_alloc_domain(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
3396 /* We have a new domain - setup possible RMRRs for the device */
3398 for_each_rmrr_units(rmrr) {
3399 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
3404 ret = domain_prepare_identity_map(dev, domain,
3408 dev_err(dev, "Mapping reserved region failed\n");
3413 tmp = set_domain_for_dev(dev, domain);
3414 if (!tmp || domain != tmp) {
3415 domain_exit(domain);
3421 dev_err(dev, "Allocating domain failed\n");
3423 domain->domain.type = IOMMU_DOMAIN_DMA;
3428 /* Check if the dev needs to go through non-identity map and unmap process.*/
3429 static bool iommu_need_mapping(struct device *dev)
3433 if (iommu_dummy(dev))
3436 ret = identity_mapping(dev);
3438 u64 dma_mask = *dev->dma_mask;
3440 if (dev->coherent_dma_mask && dev->coherent_dma_mask < dma_mask)
3441 dma_mask = dev->coherent_dma_mask;
3443 if (dma_mask >= dma_get_required_mask(dev))
3447 * 32 bit DMA is removed from si_domain and fall back to
3448 * non-identity mapping.
3450 dmar_remove_one_dev_info(dev);
3451 ret = iommu_request_dma_domain_for_dev(dev);
3453 struct iommu_domain *domain;
3454 struct dmar_domain *dmar_domain;
3456 domain = iommu_get_domain_for_dev(dev);
3458 dmar_domain = to_dmar_domain(domain);
3459 dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN;
3461 get_private_domain_for_dev(dev);
3464 dev_info(dev, "32bit DMA uses non-identity mapping\n");
3470 static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
3471 size_t size, int dir, u64 dma_mask)
3473 struct dmar_domain *domain;
3474 phys_addr_t start_paddr;
3475 unsigned long iova_pfn;
3478 struct intel_iommu *iommu;
3479 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
3481 BUG_ON(dir == DMA_NONE);
3483 domain = find_domain(dev);
3485 return DMA_MAPPING_ERROR;
3487 iommu = domain_get_iommu(domain);
3488 size = aligned_nrpages(paddr, size);
3490 iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask);
3495 * Check if DMAR supports zero-length reads on write only
3498 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
3499 !cap_zlr(iommu->cap))
3500 prot |= DMA_PTE_READ;
3501 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3502 prot |= DMA_PTE_WRITE;
3504 * paddr - (paddr + size) might be partial page, we should map the whole
3505 * page. Note: if two part of one page are separately mapped, we
3506 * might have two guest_addr mapping to the same host paddr, but this
3507 * is not a big problem
3509 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova_pfn),
3510 mm_to_dma_pfn(paddr_pfn), size, prot);
3514 start_paddr = (phys_addr_t)iova_pfn << PAGE_SHIFT;
3515 start_paddr += paddr & ~PAGE_MASK;
3520 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size));
3521 dev_err(dev, "Device request: %zx@%llx dir %d --- failed\n",
3522 size, (unsigned long long)paddr, dir);
3523 return DMA_MAPPING_ERROR;
3526 static dma_addr_t intel_map_page(struct device *dev, struct page *page,
3527 unsigned long offset, size_t size,
3528 enum dma_data_direction dir,
3529 unsigned long attrs)
3531 if (iommu_need_mapping(dev))
3532 return __intel_map_single(dev, page_to_phys(page) + offset,
3533 size, dir, *dev->dma_mask);
3534 return dma_direct_map_page(dev, page, offset, size, dir, attrs);
3537 static dma_addr_t intel_map_resource(struct device *dev, phys_addr_t phys_addr,
3538 size_t size, enum dma_data_direction dir,
3539 unsigned long attrs)
3541 if (iommu_need_mapping(dev))
3542 return __intel_map_single(dev, phys_addr, size, dir,
3544 return dma_direct_map_resource(dev, phys_addr, size, dir, attrs);
3547 static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
3549 struct dmar_domain *domain;
3550 unsigned long start_pfn, last_pfn;
3551 unsigned long nrpages;
3552 unsigned long iova_pfn;
3553 struct intel_iommu *iommu;
3554 struct page *freelist;
3555 struct pci_dev *pdev = NULL;
3557 domain = find_domain(dev);
3560 iommu = domain_get_iommu(domain);
3562 iova_pfn = IOVA_PFN(dev_addr);
3564 nrpages = aligned_nrpages(dev_addr, size);
3565 start_pfn = mm_to_dma_pfn(iova_pfn);
3566 last_pfn = start_pfn + nrpages - 1;
3568 if (dev_is_pci(dev))
3569 pdev = to_pci_dev(dev);
3571 dev_dbg(dev, "Device unmapping: pfn %lx-%lx\n", start_pfn, last_pfn);
3573 freelist = domain_unmap(domain, start_pfn, last_pfn);
3575 if (intel_iommu_strict || (pdev && pdev->untrusted)) {
3576 iommu_flush_iotlb_psi(iommu, domain, start_pfn,
3577 nrpages, !freelist, 0);
3579 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(nrpages));
3580 dma_free_pagelist(freelist);
3582 queue_iova(&domain->iovad, iova_pfn, nrpages,
3583 (unsigned long)freelist);
3585 * queue up the release of the unmap to save the 1/6th of the
3586 * cpu used up by the iotlb flush operation...
3591 static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
3592 size_t size, enum dma_data_direction dir,
3593 unsigned long attrs)
3595 if (iommu_need_mapping(dev))
3596 intel_unmap(dev, dev_addr, size);
3598 dma_direct_unmap_page(dev, dev_addr, size, dir, attrs);
3601 static void intel_unmap_resource(struct device *dev, dma_addr_t dev_addr,
3602 size_t size, enum dma_data_direction dir, unsigned long attrs)
3604 if (iommu_need_mapping(dev))
3605 intel_unmap(dev, dev_addr, size);
3608 static void *intel_alloc_coherent(struct device *dev, size_t size,
3609 dma_addr_t *dma_handle, gfp_t flags,
3610 unsigned long attrs)
3612 struct page *page = NULL;
3615 if (!iommu_need_mapping(dev))
3616 return dma_direct_alloc(dev, size, dma_handle, flags, attrs);
3618 size = PAGE_ALIGN(size);
3619 order = get_order(size);
3621 if (gfpflags_allow_blocking(flags)) {
3622 unsigned int count = size >> PAGE_SHIFT;
3624 page = dma_alloc_from_contiguous(dev, count, order,
3625 flags & __GFP_NOWARN);
3629 page = alloc_pages(flags, order);
3632 memset(page_address(page), 0, size);
3634 *dma_handle = __intel_map_single(dev, page_to_phys(page), size,
3636 dev->coherent_dma_mask);
3637 if (*dma_handle != DMA_MAPPING_ERROR)
3638 return page_address(page);
3639 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3640 __free_pages(page, order);
3645 static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
3646 dma_addr_t dma_handle, unsigned long attrs)
3649 struct page *page = virt_to_page(vaddr);
3651 if (!iommu_need_mapping(dev))
3652 return dma_direct_free(dev, size, vaddr, dma_handle, attrs);
3654 size = PAGE_ALIGN(size);
3655 order = get_order(size);
3657 intel_unmap(dev, dma_handle, size);
3658 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3659 __free_pages(page, order);
3662 static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
3663 int nelems, enum dma_data_direction dir,
3664 unsigned long attrs)
3666 dma_addr_t startaddr = sg_dma_address(sglist) & PAGE_MASK;
3667 unsigned long nrpages = 0;
3668 struct scatterlist *sg;
3671 if (!iommu_need_mapping(dev))
3672 return dma_direct_unmap_sg(dev, sglist, nelems, dir, attrs);
3674 for_each_sg(sglist, sg, nelems, i) {
3675 nrpages += aligned_nrpages(sg_dma_address(sg), sg_dma_len(sg));
3678 intel_unmap(dev, startaddr, nrpages << VTD_PAGE_SHIFT);
3681 static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
3682 enum dma_data_direction dir, unsigned long attrs)
3685 struct dmar_domain *domain;
3688 unsigned long iova_pfn;
3690 struct scatterlist *sg;
3691 unsigned long start_vpfn;
3692 struct intel_iommu *iommu;
3694 BUG_ON(dir == DMA_NONE);
3695 if (!iommu_need_mapping(dev))
3696 return dma_direct_map_sg(dev, sglist, nelems, dir, attrs);
3698 domain = find_domain(dev);
3702 iommu = domain_get_iommu(domain);
3704 for_each_sg(sglist, sg, nelems, i)
3705 size += aligned_nrpages(sg->offset, sg->length);
3707 iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size),
3710 sglist->dma_length = 0;
3715 * Check if DMAR supports zero-length reads on write only
3718 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
3719 !cap_zlr(iommu->cap))
3720 prot |= DMA_PTE_READ;
3721 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3722 prot |= DMA_PTE_WRITE;
3724 start_vpfn = mm_to_dma_pfn(iova_pfn);
3726 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
3727 if (unlikely(ret)) {
3728 dma_pte_free_pagetable(domain, start_vpfn,
3729 start_vpfn + size - 1,
3730 agaw_to_level(domain->agaw) + 1);
3731 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size));
3738 static const struct dma_map_ops intel_dma_ops = {
3739 .alloc = intel_alloc_coherent,
3740 .free = intel_free_coherent,
3741 .map_sg = intel_map_sg,
3742 .unmap_sg = intel_unmap_sg,
3743 .map_page = intel_map_page,
3744 .unmap_page = intel_unmap_page,
3745 .map_resource = intel_map_resource,
3746 .unmap_resource = intel_unmap_resource,
3747 .dma_supported = dma_direct_supported,
3750 static inline int iommu_domain_cache_init(void)
3754 iommu_domain_cache = kmem_cache_create("iommu_domain",
3755 sizeof(struct dmar_domain),
3760 if (!iommu_domain_cache) {
3761 pr_err("Couldn't create iommu_domain cache\n");
3768 static inline int iommu_devinfo_cache_init(void)
3772 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3773 sizeof(struct device_domain_info),
3777 if (!iommu_devinfo_cache) {
3778 pr_err("Couldn't create devinfo cache\n");
3785 static int __init iommu_init_mempool(void)
3788 ret = iova_cache_get();
3792 ret = iommu_domain_cache_init();
3796 ret = iommu_devinfo_cache_init();
3800 kmem_cache_destroy(iommu_domain_cache);
3807 static void __init iommu_exit_mempool(void)
3809 kmem_cache_destroy(iommu_devinfo_cache);
3810 kmem_cache_destroy(iommu_domain_cache);
3814 static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
3816 struct dmar_drhd_unit *drhd;
3820 /* We know that this device on this chipset has its own IOMMU.
3821 * If we find it under a different IOMMU, then the BIOS is lying
3822 * to us. Hope that the IOMMU for this device is actually
3823 * disabled, and it needs no translation...
3825 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
3827 /* "can't" happen */
3828 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
3831 vtbar &= 0xffff0000;
3833 /* we know that the this iommu should be at offset 0xa000 from vtbar */
3834 drhd = dmar_find_matched_drhd_unit(pdev);
3835 if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
3836 TAINT_FIRMWARE_WORKAROUND,
3837 "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
3838 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3840 DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
3842 static void __init init_no_remapping_devices(void)
3844 struct dmar_drhd_unit *drhd;
3848 for_each_drhd_unit(drhd) {
3849 if (!drhd->include_all) {
3850 for_each_active_dev_scope(drhd->devices,
3851 drhd->devices_cnt, i, dev)
3853 /* ignore DMAR unit if no devices exist */
3854 if (i == drhd->devices_cnt)
3859 for_each_active_drhd_unit(drhd) {
3860 if (drhd->include_all)
3863 for_each_active_dev_scope(drhd->devices,
3864 drhd->devices_cnt, i, dev)
3865 if (!dev_is_pci(dev) || !IS_GFX_DEVICE(to_pci_dev(dev)))
3867 if (i < drhd->devices_cnt)
3870 /* This IOMMU has *only* gfx devices. Either bypass it or
3871 set the gfx_mapped flag, as appropriate */
3872 if (!dmar_map_gfx) {
3874 for_each_active_dev_scope(drhd->devices,
3875 drhd->devices_cnt, i, dev)
3876 dev->archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3881 #ifdef CONFIG_SUSPEND
3882 static int init_iommu_hw(void)
3884 struct dmar_drhd_unit *drhd;
3885 struct intel_iommu *iommu = NULL;
3887 for_each_active_iommu(iommu, drhd)
3889 dmar_reenable_qi(iommu);
3891 for_each_iommu(iommu, drhd) {
3892 if (drhd->ignored) {
3894 * we always have to disable PMRs or DMA may fail on
3898 iommu_disable_protect_mem_regions(iommu);
3902 iommu_flush_write_buffer(iommu);
3904 iommu_set_root_entry(iommu);
3906 iommu->flush.flush_context(iommu, 0, 0, 0,
3907 DMA_CCMD_GLOBAL_INVL);
3908 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3909 iommu_enable_translation(iommu);
3910 iommu_disable_protect_mem_regions(iommu);
3916 static void iommu_flush_all(void)
3918 struct dmar_drhd_unit *drhd;
3919 struct intel_iommu *iommu;
3921 for_each_active_iommu(iommu, drhd) {
3922 iommu->flush.flush_context(iommu, 0, 0, 0,
3923 DMA_CCMD_GLOBAL_INVL);
3924 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
3925 DMA_TLB_GLOBAL_FLUSH);
3929 static int iommu_suspend(void)
3931 struct dmar_drhd_unit *drhd;
3932 struct intel_iommu *iommu = NULL;
3935 for_each_active_iommu(iommu, drhd) {
3936 iommu->iommu_state = kcalloc(MAX_SR_DMAR_REGS, sizeof(u32),
3938 if (!iommu->iommu_state)
3944 for_each_active_iommu(iommu, drhd) {
3945 iommu_disable_translation(iommu);
3947 raw_spin_lock_irqsave(&iommu->register_lock, flag);
3949 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3950 readl(iommu->reg + DMAR_FECTL_REG);
3951 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3952 readl(iommu->reg + DMAR_FEDATA_REG);
3953 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3954 readl(iommu->reg + DMAR_FEADDR_REG);
3955 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3956 readl(iommu->reg + DMAR_FEUADDR_REG);
3958 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
3963 for_each_active_iommu(iommu, drhd)
3964 kfree(iommu->iommu_state);
3969 static void iommu_resume(void)
3971 struct dmar_drhd_unit *drhd;
3972 struct intel_iommu *iommu = NULL;
3975 if (init_iommu_hw()) {
3977 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
3979 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
3983 for_each_active_iommu(iommu, drhd) {
3985 raw_spin_lock_irqsave(&iommu->register_lock, flag);
3987 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3988 iommu->reg + DMAR_FECTL_REG);
3989 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3990 iommu->reg + DMAR_FEDATA_REG);
3991 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3992 iommu->reg + DMAR_FEADDR_REG);
3993 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3994 iommu->reg + DMAR_FEUADDR_REG);
3996 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
3999 for_each_active_iommu(iommu, drhd)
4000 kfree(iommu->iommu_state);
4003 static struct syscore_ops iommu_syscore_ops = {
4004 .resume = iommu_resume,
4005 .suspend = iommu_suspend,
4008 static void __init init_iommu_pm_ops(void)
4010 register_syscore_ops(&iommu_syscore_ops);
4014 static inline void init_iommu_pm_ops(void) {}
4015 #endif /* CONFIG_PM */
4017 int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
4019 struct acpi_dmar_reserved_memory *rmrr;
4020 struct dmar_rmrr_unit *rmrru;
4022 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
4026 rmrru->hdr = header;
4027 rmrr = (struct acpi_dmar_reserved_memory *)header;
4028 rmrru->base_address = rmrr->base_address;
4029 rmrru->end_address = rmrr->end_address;
4031 rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
4032 ((void *)rmrr) + rmrr->header.length,
4033 &rmrru->devices_cnt);
4034 if (rmrru->devices_cnt && rmrru->devices == NULL)
4037 list_add(&rmrru->list, &dmar_rmrr_units);
4046 static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
4048 struct dmar_atsr_unit *atsru;
4049 struct acpi_dmar_atsr *tmp;
4051 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
4052 tmp = (struct acpi_dmar_atsr *)atsru->hdr;
4053 if (atsr->segment != tmp->segment)
4055 if (atsr->header.length != tmp->header.length)
4057 if (memcmp(atsr, tmp, atsr->header.length) == 0)
4064 int dmar_parse_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4066 struct acpi_dmar_atsr *atsr;
4067 struct dmar_atsr_unit *atsru;
4069 if (system_state >= SYSTEM_RUNNING && !intel_iommu_enabled)
4072 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4073 atsru = dmar_find_atsr(atsr);
4077 atsru = kzalloc(sizeof(*atsru) + hdr->length, GFP_KERNEL);
4082 * If memory is allocated from slab by ACPI _DSM method, we need to
4083 * copy the memory content because the memory buffer will be freed
4086 atsru->hdr = (void *)(atsru + 1);
4087 memcpy(atsru->hdr, hdr, hdr->length);
4088 atsru->include_all = atsr->flags & 0x1;
4089 if (!atsru->include_all) {
4090 atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1),
4091 (void *)atsr + atsr->header.length,
4092 &atsru->devices_cnt);
4093 if (atsru->devices_cnt && atsru->devices == NULL) {
4099 list_add_rcu(&atsru->list, &dmar_atsr_units);
4104 static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
4106 dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt);
4110 int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4112 struct acpi_dmar_atsr *atsr;
4113 struct dmar_atsr_unit *atsru;
4115 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4116 atsru = dmar_find_atsr(atsr);
4118 list_del_rcu(&atsru->list);
4120 intel_iommu_free_atsr(atsru);
4126 int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4130 struct acpi_dmar_atsr *atsr;
4131 struct dmar_atsr_unit *atsru;
4133 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4134 atsru = dmar_find_atsr(atsr);
4138 if (!atsru->include_all && atsru->devices && atsru->devices_cnt) {
4139 for_each_active_dev_scope(atsru->devices, atsru->devices_cnt,
4147 static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
4150 struct intel_iommu *iommu = dmaru->iommu;
4152 if (g_iommus[iommu->seq_id])
4155 if (hw_pass_through && !ecap_pass_through(iommu->ecap)) {
4156 pr_warn("%s: Doesn't support hardware pass through.\n",
4160 if (!ecap_sc_support(iommu->ecap) &&
4161 domain_update_iommu_snooping(iommu)) {
4162 pr_warn("%s: Doesn't support snooping.\n",
4166 sp = domain_update_iommu_superpage(iommu) - 1;
4167 if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) {
4168 pr_warn("%s: Doesn't support large page.\n",
4174 * Disable translation if already enabled prior to OS handover.
4176 if (iommu->gcmd & DMA_GCMD_TE)
4177 iommu_disable_translation(iommu);
4179 g_iommus[iommu->seq_id] = iommu;
4180 ret = iommu_init_domains(iommu);
4182 ret = iommu_alloc_root_entry(iommu);
4186 #ifdef CONFIG_INTEL_IOMMU_SVM
4187 if (pasid_supported(iommu))
4188 intel_svm_init(iommu);
4191 if (dmaru->ignored) {
4193 * we always have to disable PMRs or DMA may fail on this device
4196 iommu_disable_protect_mem_regions(iommu);
4200 intel_iommu_init_qi(iommu);
4201 iommu_flush_write_buffer(iommu);
4203 #ifdef CONFIG_INTEL_IOMMU_SVM
4204 if (pasid_supported(iommu) && ecap_prs(iommu->ecap)) {
4205 ret = intel_svm_enable_prq(iommu);
4210 ret = dmar_set_interrupt(iommu);
4214 iommu_set_root_entry(iommu);
4215 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
4216 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
4217 iommu_enable_translation(iommu);
4219 iommu_disable_protect_mem_regions(iommu);
4223 disable_dmar_iommu(iommu);
4225 free_dmar_iommu(iommu);
4229 int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
4232 struct intel_iommu *iommu = dmaru->iommu;
4234 if (!intel_iommu_enabled)
4240 ret = intel_iommu_add(dmaru);
4242 disable_dmar_iommu(iommu);
4243 free_dmar_iommu(iommu);
4249 static void intel_iommu_free_dmars(void)
4251 struct dmar_rmrr_unit *rmrru, *rmrr_n;
4252 struct dmar_atsr_unit *atsru, *atsr_n;
4254 list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
4255 list_del(&rmrru->list);
4256 dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
4260 list_for_each_entry_safe(atsru, atsr_n, &dmar_atsr_units, list) {
4261 list_del(&atsru->list);
4262 intel_iommu_free_atsr(atsru);
4266 int dmar_find_matched_atsr_unit(struct pci_dev *dev)
4269 struct pci_bus *bus;
4270 struct pci_dev *bridge = NULL;
4272 struct acpi_dmar_atsr *atsr;
4273 struct dmar_atsr_unit *atsru;
4275 dev = pci_physfn(dev);
4276 for (bus = dev->bus; bus; bus = bus->parent) {
4278 /* If it's an integrated device, allow ATS */
4281 /* Connected via non-PCIe: no ATS */
4282 if (!pci_is_pcie(bridge) ||
4283 pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
4285 /* If we found the root port, look it up in the ATSR */
4286 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
4291 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
4292 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
4293 if (atsr->segment != pci_domain_nr(dev->bus))
4296 for_each_dev_scope(atsru->devices, atsru->devices_cnt, i, tmp)
4297 if (tmp == &bridge->dev)
4300 if (atsru->include_all)
4310 int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
4313 struct dmar_rmrr_unit *rmrru;
4314 struct dmar_atsr_unit *atsru;
4315 struct acpi_dmar_atsr *atsr;
4316 struct acpi_dmar_reserved_memory *rmrr;
4318 if (!intel_iommu_enabled && system_state >= SYSTEM_RUNNING)
4321 list_for_each_entry(rmrru, &dmar_rmrr_units, list) {
4322 rmrr = container_of(rmrru->hdr,
4323 struct acpi_dmar_reserved_memory, header);
4324 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
4325 ret = dmar_insert_dev_scope(info, (void *)(rmrr + 1),
4326 ((void *)rmrr) + rmrr->header.length,
4327 rmrr->segment, rmrru->devices,
4328 rmrru->devices_cnt);
4331 } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
4332 dmar_remove_dev_scope(info, rmrr->segment,
4333 rmrru->devices, rmrru->devices_cnt);
4337 list_for_each_entry(atsru, &dmar_atsr_units, list) {
4338 if (atsru->include_all)
4341 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
4342 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
4343 ret = dmar_insert_dev_scope(info, (void *)(atsr + 1),
4344 (void *)atsr + atsr->header.length,
4345 atsr->segment, atsru->devices,
4346 atsru->devices_cnt);
4351 } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
4352 if (dmar_remove_dev_scope(info, atsr->segment,
4353 atsru->devices, atsru->devices_cnt))
4361 static int intel_iommu_memory_notifier(struct notifier_block *nb,
4362 unsigned long val, void *v)
4364 struct memory_notify *mhp = v;
4365 unsigned long long start, end;
4366 unsigned long start_vpfn, last_vpfn;
4369 case MEM_GOING_ONLINE:
4370 start = mhp->start_pfn << PAGE_SHIFT;
4371 end = ((mhp->start_pfn + mhp->nr_pages) << PAGE_SHIFT) - 1;
4372 if (iommu_domain_identity_map(si_domain, start, end)) {
4373 pr_warn("Failed to build identity map for [%llx-%llx]\n",
4380 case MEM_CANCEL_ONLINE:
4381 start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
4382 last_vpfn = mm_to_dma_pfn(mhp->start_pfn + mhp->nr_pages - 1);
4383 while (start_vpfn <= last_vpfn) {
4385 struct dmar_drhd_unit *drhd;
4386 struct intel_iommu *iommu;
4387 struct page *freelist;
4389 iova = find_iova(&si_domain->iovad, start_vpfn);
4391 pr_debug("Failed get IOVA for PFN %lx\n",
4396 iova = split_and_remove_iova(&si_domain->iovad, iova,
4397 start_vpfn, last_vpfn);
4399 pr_warn("Failed to split IOVA PFN [%lx-%lx]\n",
4400 start_vpfn, last_vpfn);
4404 freelist = domain_unmap(si_domain, iova->pfn_lo,
4408 for_each_active_iommu(iommu, drhd)
4409 iommu_flush_iotlb_psi(iommu, si_domain,
4410 iova->pfn_lo, iova_size(iova),
4413 dma_free_pagelist(freelist);
4415 start_vpfn = iova->pfn_hi + 1;
4416 free_iova_mem(iova);
4424 static struct notifier_block intel_iommu_memory_nb = {
4425 .notifier_call = intel_iommu_memory_notifier,
4429 static void free_all_cpu_cached_iovas(unsigned int cpu)
4433 for (i = 0; i < g_num_of_iommus; i++) {
4434 struct intel_iommu *iommu = g_iommus[i];
4435 struct dmar_domain *domain;
4441 for (did = 0; did < cap_ndoms(iommu->cap); did++) {
4442 domain = get_iommu_domain(iommu, (u16)did);
4446 free_cpu_cached_iovas(cpu, &domain->iovad);
4451 static int intel_iommu_cpu_dead(unsigned int cpu)
4453 free_all_cpu_cached_iovas(cpu);
4457 static void intel_disable_iommus(void)
4459 struct intel_iommu *iommu = NULL;
4460 struct dmar_drhd_unit *drhd;
4462 for_each_iommu(iommu, drhd)
4463 iommu_disable_translation(iommu);
4466 static inline struct intel_iommu *dev_to_intel_iommu(struct device *dev)
4468 struct iommu_device *iommu_dev = dev_to_iommu_device(dev);
4470 return container_of(iommu_dev, struct intel_iommu, iommu);
4473 static ssize_t intel_iommu_show_version(struct device *dev,
4474 struct device_attribute *attr,
4477 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4478 u32 ver = readl(iommu->reg + DMAR_VER_REG);
4479 return sprintf(buf, "%d:%d\n",
4480 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver));
4482 static DEVICE_ATTR(version, S_IRUGO, intel_iommu_show_version, NULL);
4484 static ssize_t intel_iommu_show_address(struct device *dev,
4485 struct device_attribute *attr,
4488 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4489 return sprintf(buf, "%llx\n", iommu->reg_phys);
4491 static DEVICE_ATTR(address, S_IRUGO, intel_iommu_show_address, NULL);
4493 static ssize_t intel_iommu_show_cap(struct device *dev,
4494 struct device_attribute *attr,
4497 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4498 return sprintf(buf, "%llx\n", iommu->cap);
4500 static DEVICE_ATTR(cap, S_IRUGO, intel_iommu_show_cap, NULL);
4502 static ssize_t intel_iommu_show_ecap(struct device *dev,
4503 struct device_attribute *attr,
4506 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4507 return sprintf(buf, "%llx\n", iommu->ecap);
4509 static DEVICE_ATTR(ecap, S_IRUGO, intel_iommu_show_ecap, NULL);
4511 static ssize_t intel_iommu_show_ndoms(struct device *dev,
4512 struct device_attribute *attr,
4515 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4516 return sprintf(buf, "%ld\n", cap_ndoms(iommu->cap));
4518 static DEVICE_ATTR(domains_supported, S_IRUGO, intel_iommu_show_ndoms, NULL);
4520 static ssize_t intel_iommu_show_ndoms_used(struct device *dev,
4521 struct device_attribute *attr,
4524 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4525 return sprintf(buf, "%d\n", bitmap_weight(iommu->domain_ids,
4526 cap_ndoms(iommu->cap)));
4528 static DEVICE_ATTR(domains_used, S_IRUGO, intel_iommu_show_ndoms_used, NULL);
4530 static struct attribute *intel_iommu_attrs[] = {
4531 &dev_attr_version.attr,
4532 &dev_attr_address.attr,
4534 &dev_attr_ecap.attr,
4535 &dev_attr_domains_supported.attr,
4536 &dev_attr_domains_used.attr,
4540 static struct attribute_group intel_iommu_group = {
4541 .name = "intel-iommu",
4542 .attrs = intel_iommu_attrs,
4545 const struct attribute_group *intel_iommu_groups[] = {
4550 static int __init platform_optin_force_iommu(void)
4552 struct pci_dev *pdev = NULL;
4553 bool has_untrusted_dev = false;
4555 if (!dmar_platform_optin() || no_platform_optin)
4558 for_each_pci_dev(pdev) {
4559 if (pdev->untrusted) {
4560 has_untrusted_dev = true;
4565 if (!has_untrusted_dev)
4568 if (no_iommu || dmar_disabled)
4569 pr_info("Intel-IOMMU force enabled due to platform opt in\n");
4572 * If Intel-IOMMU is disabled by default, we will apply identity
4573 * map for all devices except those marked as being untrusted.
4576 iommu_identity_mapping |= IDENTMAP_ALL;
4579 #if defined(CONFIG_X86) && defined(CONFIG_SWIOTLB)
4587 static int __init probe_acpi_namespace_devices(void)
4589 struct dmar_drhd_unit *drhd;
4590 /* To avoid a -Wunused-but-set-variable warning. */
4591 struct intel_iommu *iommu __maybe_unused;
4595 for_each_active_iommu(iommu, drhd) {
4596 for_each_active_dev_scope(drhd->devices,
4597 drhd->devices_cnt, i, dev) {
4598 struct acpi_device_physical_node *pn;
4599 struct iommu_group *group;
4600 struct acpi_device *adev;
4602 if (dev->bus != &acpi_bus_type)
4605 adev = to_acpi_device(dev);
4606 mutex_lock(&adev->physical_node_lock);
4607 list_for_each_entry(pn,
4608 &adev->physical_node_list, node) {
4609 group = iommu_group_get(pn->dev);
4611 iommu_group_put(group);
4615 pn->dev->bus->iommu_ops = &intel_iommu_ops;
4616 ret = iommu_probe_device(pn->dev);
4620 mutex_unlock(&adev->physical_node_lock);
4630 int __init intel_iommu_init(void)
4633 struct dmar_drhd_unit *drhd;
4634 struct intel_iommu *iommu;
4637 * Intel IOMMU is required for a TXT/tboot launch or platform
4638 * opt in, so enforce that.
4640 force_on = tboot_force_iommu() || platform_optin_force_iommu();
4642 if (iommu_init_mempool()) {
4644 panic("tboot: Failed to initialize iommu memory\n");
4648 down_write(&dmar_global_lock);
4649 if (dmar_table_init()) {
4651 panic("tboot: Failed to initialize DMAR table\n");
4655 if (dmar_dev_scope_init() < 0) {
4657 panic("tboot: Failed to initialize DMAR device scope\n");
4661 up_write(&dmar_global_lock);
4664 * The bus notifier takes the dmar_global_lock, so lockdep will
4665 * complain later when we register it under the lock.
4667 dmar_register_bus_notifier();
4669 down_write(&dmar_global_lock);
4671 if (no_iommu || dmar_disabled) {
4673 * We exit the function here to ensure IOMMU's remapping and
4674 * mempool aren't setup, which means that the IOMMU's PMRs
4675 * won't be disabled via the call to init_dmars(). So disable
4676 * it explicitly here. The PMRs were setup by tboot prior to
4677 * calling SENTER, but the kernel is expected to reset/tear
4680 if (intel_iommu_tboot_noforce) {
4681 for_each_iommu(iommu, drhd)
4682 iommu_disable_protect_mem_regions(iommu);
4686 * Make sure the IOMMUs are switched off, even when we
4687 * boot into a kexec kernel and the previous kernel left
4690 intel_disable_iommus();
4694 if (list_empty(&dmar_rmrr_units))
4695 pr_info("No RMRR found\n");
4697 if (list_empty(&dmar_atsr_units))
4698 pr_info("No ATSR found\n");
4700 if (dmar_init_reserved_ranges()) {
4702 panic("tboot: Failed to reserve iommu ranges\n");
4703 goto out_free_reserved_range;
4707 intel_iommu_gfx_mapped = 1;
4709 init_no_remapping_devices();
4714 panic("tboot: Failed to initialize DMARs\n");
4715 pr_err("Initialization failed\n");
4716 goto out_free_reserved_range;
4718 up_write(&dmar_global_lock);
4720 #if defined(CONFIG_X86) && defined(CONFIG_SWIOTLB)
4723 dma_ops = &intel_dma_ops;
4725 init_iommu_pm_ops();
4727 for_each_active_iommu(iommu, drhd) {
4728 iommu_device_sysfs_add(&iommu->iommu, NULL,
4731 iommu_device_set_ops(&iommu->iommu, &intel_iommu_ops);
4732 iommu_device_register(&iommu->iommu);
4735 bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
4736 if (si_domain && !hw_pass_through)
4737 register_memory_notifier(&intel_iommu_memory_nb);
4738 cpuhp_setup_state(CPUHP_IOMMU_INTEL_DEAD, "iommu/intel:dead", NULL,
4739 intel_iommu_cpu_dead);
4741 down_read(&dmar_global_lock);
4742 if (probe_acpi_namespace_devices())
4743 pr_warn("ACPI name space devices didn't probe correctly\n");
4744 up_read(&dmar_global_lock);
4746 /* Finally, we enable the DMA remapping hardware. */
4747 for_each_iommu(iommu, drhd) {
4748 if (!drhd->ignored && !translation_pre_enabled(iommu))
4749 iommu_enable_translation(iommu);
4751 iommu_disable_protect_mem_regions(iommu);
4753 pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
4755 intel_iommu_enabled = 1;
4756 intel_iommu_debugfs_init();
4760 out_free_reserved_range:
4761 put_iova_domain(&reserved_iova_list);
4763 intel_iommu_free_dmars();
4764 up_write(&dmar_global_lock);
4765 iommu_exit_mempool();
4769 static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *opaque)
4771 struct intel_iommu *iommu = opaque;
4773 domain_context_clear_one(iommu, PCI_BUS_NUM(alias), alias & 0xff);
4778 * NB - intel-iommu lacks any sort of reference counting for the users of
4779 * dependent devices. If multiple endpoints have intersecting dependent
4780 * devices, unbinding the driver from any one of them will possibly leave
4781 * the others unable to operate.
4783 static void domain_context_clear(struct intel_iommu *iommu, struct device *dev)
4785 if (!iommu || !dev || !dev_is_pci(dev))
4788 pci_for_each_dma_alias(to_pci_dev(dev), &domain_context_clear_one_cb, iommu);
4791 static void __dmar_remove_one_dev_info(struct device_domain_info *info)
4793 struct dmar_domain *domain;
4794 struct intel_iommu *iommu;
4795 unsigned long flags;
4797 assert_spin_locked(&device_domain_lock);
4802 iommu = info->iommu;
4803 domain = info->domain;
4806 if (dev_is_pci(info->dev) && sm_supported(iommu))
4807 intel_pasid_tear_down_entry(iommu, info->dev,
4810 iommu_disable_dev_iotlb(info);
4811 domain_context_clear(iommu, info->dev);
4812 intel_pasid_free_table(info->dev);
4815 unlink_domain_info(info);
4817 spin_lock_irqsave(&iommu->lock, flags);
4818 domain_detach_iommu(domain, iommu);
4819 spin_unlock_irqrestore(&iommu->lock, flags);
4821 /* free the private domain */
4822 if (domain->flags & DOMAIN_FLAG_LOSE_CHILDREN &&
4823 !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY))
4824 domain_exit(info->domain);
4826 free_devinfo_mem(info);
4829 static void dmar_remove_one_dev_info(struct device *dev)
4831 struct device_domain_info *info;
4832 unsigned long flags;
4834 spin_lock_irqsave(&device_domain_lock, flags);
4835 info = dev->archdata.iommu;
4836 __dmar_remove_one_dev_info(info);
4837 spin_unlock_irqrestore(&device_domain_lock, flags);
4840 static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
4842 struct dmar_domain *dmar_domain;
4843 struct iommu_domain *domain;
4846 case IOMMU_DOMAIN_DMA:
4848 case IOMMU_DOMAIN_UNMANAGED:
4849 dmar_domain = alloc_domain(0);
4851 pr_err("Can't allocate dmar_domain\n");
4854 if (domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
4855 pr_err("Domain initialization failed\n");
4856 domain_exit(dmar_domain);
4860 if (type == IOMMU_DOMAIN_DMA &&
4861 init_iova_flush_queue(&dmar_domain->iovad,
4862 iommu_flush_iova, iova_entry_free)) {
4863 pr_warn("iova flush queue initialization failed\n");
4864 intel_iommu_strict = 1;
4867 domain_update_iommu_cap(dmar_domain);
4869 domain = &dmar_domain->domain;
4870 domain->geometry.aperture_start = 0;
4871 domain->geometry.aperture_end =
4872 __DOMAIN_MAX_ADDR(dmar_domain->gaw);
4873 domain->geometry.force_aperture = true;
4876 case IOMMU_DOMAIN_IDENTITY:
4877 return &si_domain->domain;
4885 static void intel_iommu_domain_free(struct iommu_domain *domain)
4887 if (domain != &si_domain->domain)
4888 domain_exit(to_dmar_domain(domain));
4892 * Check whether a @domain could be attached to the @dev through the
4893 * aux-domain attach/detach APIs.
4896 is_aux_domain(struct device *dev, struct iommu_domain *domain)
4898 struct device_domain_info *info = dev->archdata.iommu;
4900 return info && info->auxd_enabled &&
4901 domain->type == IOMMU_DOMAIN_UNMANAGED;
4904 static void auxiliary_link_device(struct dmar_domain *domain,
4907 struct device_domain_info *info = dev->archdata.iommu;
4909 assert_spin_locked(&device_domain_lock);
4913 domain->auxd_refcnt++;
4914 list_add(&domain->auxd, &info->auxiliary_domains);
4917 static void auxiliary_unlink_device(struct dmar_domain *domain,
4920 struct device_domain_info *info = dev->archdata.iommu;
4922 assert_spin_locked(&device_domain_lock);
4926 list_del(&domain->auxd);
4927 domain->auxd_refcnt--;
4929 if (!domain->auxd_refcnt && domain->default_pasid > 0)
4930 intel_pasid_free_id(domain->default_pasid);
4933 static int aux_domain_add_dev(struct dmar_domain *domain,
4938 unsigned long flags;
4939 struct intel_iommu *iommu;
4941 iommu = device_to_iommu(dev, &bus, &devfn);
4945 if (domain->default_pasid <= 0) {
4948 pasid = intel_pasid_alloc_id(domain, PASID_MIN,
4949 pci_max_pasids(to_pci_dev(dev)),
4952 pr_err("Can't allocate default pasid\n");
4955 domain->default_pasid = pasid;
4958 spin_lock_irqsave(&device_domain_lock, flags);
4960 * iommu->lock must be held to attach domain to iommu and setup the
4961 * pasid entry for second level translation.
4963 spin_lock(&iommu->lock);
4964 ret = domain_attach_iommu(domain, iommu);
4968 /* Setup the PASID entry for mediated devices: */
4969 ret = intel_pasid_setup_second_level(iommu, domain, dev,
4970 domain->default_pasid);
4973 spin_unlock(&iommu->lock);
4975 auxiliary_link_device(domain, dev);
4977 spin_unlock_irqrestore(&device_domain_lock, flags);
4982 domain_detach_iommu(domain, iommu);
4984 spin_unlock(&iommu->lock);
4985 spin_unlock_irqrestore(&device_domain_lock, flags);
4986 if (!domain->auxd_refcnt && domain->default_pasid > 0)
4987 intel_pasid_free_id(domain->default_pasid);
4992 static void aux_domain_remove_dev(struct dmar_domain *domain,
4995 struct device_domain_info *info;
4996 struct intel_iommu *iommu;
4997 unsigned long flags;
4999 if (!is_aux_domain(dev, &domain->domain))
5002 spin_lock_irqsave(&device_domain_lock, flags);
5003 info = dev->archdata.iommu;
5004 iommu = info->iommu;
5006 auxiliary_unlink_device(domain, dev);
5008 spin_lock(&iommu->lock);
5009 intel_pasid_tear_down_entry(iommu, dev, domain->default_pasid);
5010 domain_detach_iommu(domain, iommu);
5011 spin_unlock(&iommu->lock);
5013 spin_unlock_irqrestore(&device_domain_lock, flags);
5016 static int prepare_domain_attach_device(struct iommu_domain *domain,
5019 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5020 struct intel_iommu *iommu;
5024 iommu = device_to_iommu(dev, &bus, &devfn);
5028 /* check if this iommu agaw is sufficient for max mapped address */
5029 addr_width = agaw_to_width(iommu->agaw);
5030 if (addr_width > cap_mgaw(iommu->cap))
5031 addr_width = cap_mgaw(iommu->cap);
5033 if (dmar_domain->max_addr > (1LL << addr_width)) {
5034 dev_err(dev, "%s: iommu width (%d) is not "
5035 "sufficient for the mapped address (%llx)\n",
5036 __func__, addr_width, dmar_domain->max_addr);
5039 dmar_domain->gaw = addr_width;
5042 * Knock out extra levels of page tables if necessary
5044 while (iommu->agaw < dmar_domain->agaw) {
5045 struct dma_pte *pte;
5047 pte = dmar_domain->pgd;
5048 if (dma_pte_present(pte)) {
5049 dmar_domain->pgd = (struct dma_pte *)
5050 phys_to_virt(dma_pte_addr(pte));
5051 free_pgtable_page(pte);
5053 dmar_domain->agaw--;
5059 static int intel_iommu_attach_device(struct iommu_domain *domain,
5064 if (domain->type == IOMMU_DOMAIN_UNMANAGED &&
5065 device_is_rmrr_locked(dev)) {
5066 dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement. Contact your platform vendor.\n");
5070 if (is_aux_domain(dev, domain))
5073 /* normally dev is not mapped */
5074 if (unlikely(domain_context_mapped(dev))) {
5075 struct dmar_domain *old_domain;
5077 old_domain = find_domain(dev);
5079 dmar_remove_one_dev_info(dev);
5082 ret = prepare_domain_attach_device(domain, dev);
5086 return domain_add_dev_info(to_dmar_domain(domain), dev);
5089 static int intel_iommu_aux_attach_device(struct iommu_domain *domain,
5094 if (!is_aux_domain(dev, domain))
5097 ret = prepare_domain_attach_device(domain, dev);
5101 return aux_domain_add_dev(to_dmar_domain(domain), dev);
5104 static void intel_iommu_detach_device(struct iommu_domain *domain,
5107 dmar_remove_one_dev_info(dev);
5110 static void intel_iommu_aux_detach_device(struct iommu_domain *domain,
5113 aux_domain_remove_dev(to_dmar_domain(domain), dev);
5116 static int intel_iommu_map(struct iommu_domain *domain,
5117 unsigned long iova, phys_addr_t hpa,
5118 size_t size, int iommu_prot)
5120 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5125 if (dmar_domain->flags & DOMAIN_FLAG_LOSE_CHILDREN)
5128 if (iommu_prot & IOMMU_READ)
5129 prot |= DMA_PTE_READ;
5130 if (iommu_prot & IOMMU_WRITE)
5131 prot |= DMA_PTE_WRITE;
5132 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
5133 prot |= DMA_PTE_SNP;
5135 max_addr = iova + size;
5136 if (dmar_domain->max_addr < max_addr) {
5139 /* check if minimum agaw is sufficient for mapped address */
5140 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
5141 if (end < max_addr) {
5142 pr_err("%s: iommu width (%d) is not "
5143 "sufficient for the mapped address (%llx)\n",
5144 __func__, dmar_domain->gaw, max_addr);
5147 dmar_domain->max_addr = max_addr;
5149 /* Round up size to next multiple of PAGE_SIZE, if it and
5150 the low bits of hpa would take us onto the next page */
5151 size = aligned_nrpages(hpa, size);
5152 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
5153 hpa >> VTD_PAGE_SHIFT, size, prot);
5157 static size_t intel_iommu_unmap(struct iommu_domain *domain,
5158 unsigned long iova, size_t size)
5160 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5161 struct page *freelist = NULL;
5162 unsigned long start_pfn, last_pfn;
5163 unsigned int npages;
5164 int iommu_id, level = 0;
5166 /* Cope with horrid API which requires us to unmap more than the
5167 size argument if it happens to be a large-page mapping. */
5168 BUG_ON(!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level));
5169 if (dmar_domain->flags & DOMAIN_FLAG_LOSE_CHILDREN)
5172 if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
5173 size = VTD_PAGE_SIZE << level_to_offset_bits(level);
5175 start_pfn = iova >> VTD_PAGE_SHIFT;
5176 last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT;
5178 freelist = domain_unmap(dmar_domain, start_pfn, last_pfn);
5180 npages = last_pfn - start_pfn + 1;
5182 for_each_domain_iommu(iommu_id, dmar_domain)
5183 iommu_flush_iotlb_psi(g_iommus[iommu_id], dmar_domain,
5184 start_pfn, npages, !freelist, 0);
5186 dma_free_pagelist(freelist);
5188 if (dmar_domain->max_addr == iova + size)
5189 dmar_domain->max_addr = iova;
5194 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
5197 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5198 struct dma_pte *pte;
5202 if (dmar_domain->flags & DOMAIN_FLAG_LOSE_CHILDREN)
5205 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
5207 phys = dma_pte_addr(pte);
5212 static inline bool scalable_mode_support(void)
5214 struct dmar_drhd_unit *drhd;
5215 struct intel_iommu *iommu;
5219 for_each_active_iommu(iommu, drhd) {
5220 if (!sm_supported(iommu)) {
5230 static inline bool iommu_pasid_support(void)
5232 struct dmar_drhd_unit *drhd;
5233 struct intel_iommu *iommu;
5237 for_each_active_iommu(iommu, drhd) {
5238 if (!pasid_supported(iommu)) {
5248 static bool intel_iommu_capable(enum iommu_cap cap)
5250 if (cap == IOMMU_CAP_CACHE_COHERENCY)
5251 return domain_update_iommu_snooping(NULL) == 1;
5252 if (cap == IOMMU_CAP_INTR_REMAP)
5253 return irq_remapping_enabled == 1;
5258 static int intel_iommu_add_device(struct device *dev)
5260 struct dmar_domain *dmar_domain;
5261 struct iommu_domain *domain;
5262 struct intel_iommu *iommu;
5263 struct iommu_group *group;
5267 iommu = device_to_iommu(dev, &bus, &devfn);
5271 iommu_device_link(&iommu->iommu, dev);
5273 if (translation_pre_enabled(iommu))
5274 dev->archdata.iommu = DEFER_DEVICE_DOMAIN_INFO;
5276 group = iommu_group_get_for_dev(dev);
5279 return PTR_ERR(group);
5281 iommu_group_put(group);
5283 domain = iommu_get_domain_for_dev(dev);
5284 dmar_domain = to_dmar_domain(domain);
5285 if (domain->type == IOMMU_DOMAIN_DMA) {
5286 if (device_def_domain_type(dev) == IOMMU_DOMAIN_IDENTITY) {
5287 ret = iommu_request_dm_for_dev(dev);
5289 dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN;
5290 domain_add_dev_info(si_domain, dev);
5292 "Device uses a private identity domain.\n");
5296 if (device_def_domain_type(dev) == IOMMU_DOMAIN_DMA) {
5297 ret = iommu_request_dma_domain_for_dev(dev);
5299 dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN;
5300 if (!get_private_domain_for_dev(dev)) {
5302 "Failed to get a private domain.\n");
5307 "Device uses a private dma domain.\n");
5315 static void intel_iommu_remove_device(struct device *dev)
5317 struct intel_iommu *iommu;
5320 iommu = device_to_iommu(dev, &bus, &devfn);
5324 iommu_group_remove_device(dev);
5326 iommu_device_unlink(&iommu->iommu, dev);
5329 static void intel_iommu_get_resv_regions(struct device *device,
5330 struct list_head *head)
5332 int prot = DMA_PTE_READ | DMA_PTE_WRITE;
5333 struct iommu_resv_region *reg;
5334 struct dmar_rmrr_unit *rmrr;
5335 struct device *i_dev;
5338 down_read(&dmar_global_lock);
5339 for_each_rmrr_units(rmrr) {
5340 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
5342 struct iommu_resv_region *resv;
5343 enum iommu_resv_type type;
5346 if (i_dev != device &&
5347 !is_downstream_to_pci_bridge(device, i_dev))
5350 length = rmrr->end_address - rmrr->base_address + 1;
5352 type = device_rmrr_is_relaxable(device) ?
5353 IOMMU_RESV_DIRECT_RELAXABLE : IOMMU_RESV_DIRECT;
5355 resv = iommu_alloc_resv_region(rmrr->base_address,
5356 length, prot, type);
5360 list_add_tail(&resv->list, head);
5363 up_read(&dmar_global_lock);
5365 #ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
5366 if (dev_is_pci(device)) {
5367 struct pci_dev *pdev = to_pci_dev(device);
5369 if ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA) {
5370 reg = iommu_alloc_resv_region(0, 1UL << 24, 0,
5373 list_add_tail(®->list, head);
5376 #endif /* CONFIG_INTEL_IOMMU_FLOPPY_WA */
5378 reg = iommu_alloc_resv_region(IOAPIC_RANGE_START,
5379 IOAPIC_RANGE_END - IOAPIC_RANGE_START + 1,
5383 list_add_tail(®->list, head);
5386 static void intel_iommu_put_resv_regions(struct device *dev,
5387 struct list_head *head)
5389 struct iommu_resv_region *entry, *next;
5391 list_for_each_entry_safe(entry, next, head, list)
5395 int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev)
5397 struct device_domain_info *info;
5398 struct context_entry *context;
5399 struct dmar_domain *domain;
5400 unsigned long flags;
5404 domain = find_domain(dev);
5408 spin_lock_irqsave(&device_domain_lock, flags);
5409 spin_lock(&iommu->lock);
5412 info = dev->archdata.iommu;
5413 if (!info || !info->pasid_supported)
5416 context = iommu_context_addr(iommu, info->bus, info->devfn, 0);
5417 if (WARN_ON(!context))
5420 ctx_lo = context[0].lo;
5422 if (!(ctx_lo & CONTEXT_PASIDE)) {
5423 ctx_lo |= CONTEXT_PASIDE;
5424 context[0].lo = ctx_lo;
5426 iommu->flush.flush_context(iommu,
5427 domain->iommu_did[iommu->seq_id],
5428 PCI_DEVID(info->bus, info->devfn),
5429 DMA_CCMD_MASK_NOBIT,
5430 DMA_CCMD_DEVICE_INVL);
5433 /* Enable PASID support in the device, if it wasn't already */
5434 if (!info->pasid_enabled)
5435 iommu_enable_dev_iotlb(info);
5440 spin_unlock(&iommu->lock);
5441 spin_unlock_irqrestore(&device_domain_lock, flags);
5446 static void intel_iommu_apply_resv_region(struct device *dev,
5447 struct iommu_domain *domain,
5448 struct iommu_resv_region *region)
5450 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5451 unsigned long start, end;
5453 start = IOVA_PFN(region->start);
5454 end = IOVA_PFN(region->start + region->length - 1);
5456 WARN_ON_ONCE(!reserve_iova(&dmar_domain->iovad, start, end));
5459 #ifdef CONFIG_INTEL_IOMMU_SVM
5460 struct intel_iommu *intel_svm_device_to_iommu(struct device *dev)
5462 struct intel_iommu *iommu;
5465 if (iommu_dummy(dev)) {
5467 "No IOMMU translation for device; cannot enable SVM\n");
5471 iommu = device_to_iommu(dev, &bus, &devfn);
5473 dev_err(dev, "No IOMMU for device; cannot enable SVM\n");
5479 #endif /* CONFIG_INTEL_IOMMU_SVM */
5481 static int intel_iommu_enable_auxd(struct device *dev)
5483 struct device_domain_info *info;
5484 struct intel_iommu *iommu;
5485 unsigned long flags;
5489 iommu = device_to_iommu(dev, &bus, &devfn);
5490 if (!iommu || dmar_disabled)
5493 if (!sm_supported(iommu) || !pasid_supported(iommu))
5496 ret = intel_iommu_enable_pasid(iommu, dev);
5500 spin_lock_irqsave(&device_domain_lock, flags);
5501 info = dev->archdata.iommu;
5502 info->auxd_enabled = 1;
5503 spin_unlock_irqrestore(&device_domain_lock, flags);
5508 static int intel_iommu_disable_auxd(struct device *dev)
5510 struct device_domain_info *info;
5511 unsigned long flags;
5513 spin_lock_irqsave(&device_domain_lock, flags);
5514 info = dev->archdata.iommu;
5515 if (!WARN_ON(!info))
5516 info->auxd_enabled = 0;
5517 spin_unlock_irqrestore(&device_domain_lock, flags);
5523 * A PCI express designated vendor specific extended capability is defined
5524 * in the section 3.7 of Intel scalable I/O virtualization technical spec
5525 * for system software and tools to detect endpoint devices supporting the
5526 * Intel scalable IO virtualization without host driver dependency.
5528 * Returns the address of the matching extended capability structure within
5529 * the device's PCI configuration space or 0 if the device does not support
5532 static int siov_find_pci_dvsec(struct pci_dev *pdev)
5537 pos = pci_find_next_ext_capability(pdev, 0, 0x23);
5539 pci_read_config_word(pdev, pos + 4, &vendor);
5540 pci_read_config_word(pdev, pos + 8, &id);
5541 if (vendor == PCI_VENDOR_ID_INTEL && id == 5)
5544 pos = pci_find_next_ext_capability(pdev, pos, 0x23);
5551 intel_iommu_dev_has_feat(struct device *dev, enum iommu_dev_features feat)
5553 if (feat == IOMMU_DEV_FEAT_AUX) {
5556 if (!dev_is_pci(dev) || dmar_disabled ||
5557 !scalable_mode_support() || !iommu_pasid_support())
5560 ret = pci_pasid_features(to_pci_dev(dev));
5564 return !!siov_find_pci_dvsec(to_pci_dev(dev));
5571 intel_iommu_dev_enable_feat(struct device *dev, enum iommu_dev_features feat)
5573 if (feat == IOMMU_DEV_FEAT_AUX)
5574 return intel_iommu_enable_auxd(dev);
5580 intel_iommu_dev_disable_feat(struct device *dev, enum iommu_dev_features feat)
5582 if (feat == IOMMU_DEV_FEAT_AUX)
5583 return intel_iommu_disable_auxd(dev);
5589 intel_iommu_dev_feat_enabled(struct device *dev, enum iommu_dev_features feat)
5591 struct device_domain_info *info = dev->archdata.iommu;
5593 if (feat == IOMMU_DEV_FEAT_AUX)
5594 return scalable_mode_support() && info && info->auxd_enabled;
5600 intel_iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev)
5602 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5604 return dmar_domain->default_pasid > 0 ?
5605 dmar_domain->default_pasid : -EINVAL;
5608 static bool intel_iommu_is_attach_deferred(struct iommu_domain *domain,
5611 return dev->archdata.iommu == DEFER_DEVICE_DOMAIN_INFO;
5614 const struct iommu_ops intel_iommu_ops = {
5615 .capable = intel_iommu_capable,
5616 .domain_alloc = intel_iommu_domain_alloc,
5617 .domain_free = intel_iommu_domain_free,
5618 .attach_dev = intel_iommu_attach_device,
5619 .detach_dev = intel_iommu_detach_device,
5620 .aux_attach_dev = intel_iommu_aux_attach_device,
5621 .aux_detach_dev = intel_iommu_aux_detach_device,
5622 .aux_get_pasid = intel_iommu_aux_get_pasid,
5623 .map = intel_iommu_map,
5624 .unmap = intel_iommu_unmap,
5625 .iova_to_phys = intel_iommu_iova_to_phys,
5626 .add_device = intel_iommu_add_device,
5627 .remove_device = intel_iommu_remove_device,
5628 .get_resv_regions = intel_iommu_get_resv_regions,
5629 .put_resv_regions = intel_iommu_put_resv_regions,
5630 .apply_resv_region = intel_iommu_apply_resv_region,
5631 .device_group = pci_device_group,
5632 .dev_has_feat = intel_iommu_dev_has_feat,
5633 .dev_feat_enabled = intel_iommu_dev_feat_enabled,
5634 .dev_enable_feat = intel_iommu_dev_enable_feat,
5635 .dev_disable_feat = intel_iommu_dev_disable_feat,
5636 .is_attach_deferred = intel_iommu_is_attach_deferred,
5637 .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
5640 static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
5642 /* G4x/GM45 integrated gfx dmar support is totally busted. */
5643 pci_info(dev, "Disabling IOMMU for graphics on this chipset\n");
5647 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
5648 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
5649 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
5650 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
5651 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
5652 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
5653 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
5655 static void quirk_iommu_rwbf(struct pci_dev *dev)
5658 * Mobile 4 Series Chipset neglects to set RWBF capability,
5659 * but needs it. Same seems to hold for the desktop versions.
5661 pci_info(dev, "Forcing write-buffer flush capability\n");
5665 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
5666 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
5667 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
5668 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
5669 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
5670 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
5671 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
5674 #define GGC_MEMORY_SIZE_MASK (0xf << 8)
5675 #define GGC_MEMORY_SIZE_NONE (0x0 << 8)
5676 #define GGC_MEMORY_SIZE_1M (0x1 << 8)
5677 #define GGC_MEMORY_SIZE_2M (0x3 << 8)
5678 #define GGC_MEMORY_VT_ENABLED (0x8 << 8)
5679 #define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
5680 #define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
5681 #define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
5683 static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
5687 if (pci_read_config_word(dev, GGC, &ggc))
5690 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
5691 pci_info(dev, "BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
5693 } else if (dmar_map_gfx) {
5694 /* we have to ensure the gfx device is idle before we flush */
5695 pci_info(dev, "Disabling batched IOTLB flush on Ironlake\n");
5696 intel_iommu_strict = 1;
5699 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
5700 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
5701 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
5702 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
5704 /* On Tylersburg chipsets, some BIOSes have been known to enable the
5705 ISOCH DMAR unit for the Azalia sound device, but not give it any
5706 TLB entries, which causes it to deadlock. Check for that. We do
5707 this in a function called from init_dmars(), instead of in a PCI
5708 quirk, because we don't want to print the obnoxious "BIOS broken"
5709 message if VT-d is actually disabled.
5711 static void __init check_tylersburg_isoch(void)
5713 struct pci_dev *pdev;
5714 uint32_t vtisochctrl;
5716 /* If there's no Azalia in the system anyway, forget it. */
5717 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
5722 /* System Management Registers. Might be hidden, in which case
5723 we can't do the sanity check. But that's OK, because the
5724 known-broken BIOSes _don't_ actually hide it, so far. */
5725 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
5729 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
5736 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
5737 if (vtisochctrl & 1)
5740 /* Drop all bits other than the number of TLB entries */
5741 vtisochctrl &= 0x1c;
5743 /* If we have the recommended number of TLB entries (16), fine. */
5744 if (vtisochctrl == 0x10)
5747 /* Zero TLB entries? You get to ride the short bus to school. */
5749 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
5750 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
5751 dmi_get_system_info(DMI_BIOS_VENDOR),
5752 dmi_get_system_info(DMI_BIOS_VERSION),
5753 dmi_get_system_info(DMI_PRODUCT_VERSION));
5754 iommu_identity_mapping |= IDENTMAP_AZALIA;
5758 pr_warn("Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",