2 * Copyright © 2006-2014 Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * Authors: David Woodhouse <dwmw2@infradead.org>,
14 * Ashok Raj <ashok.raj@intel.com>,
15 * Shaohua Li <shaohua.li@intel.com>,
16 * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>,
17 * Fenghua Yu <fenghua.yu@intel.com>
18 * Joerg Roedel <jroedel@suse.de>
21 #define pr_fmt(fmt) "DMAR: " fmt
23 #include <linux/init.h>
24 #include <linux/bitmap.h>
25 #include <linux/debugfs.h>
26 #include <linux/export.h>
27 #include <linux/slab.h>
28 #include <linux/irq.h>
29 #include <linux/interrupt.h>
30 #include <linux/spinlock.h>
31 #include <linux/pci.h>
32 #include <linux/dmar.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/dma-direct.h>
35 #include <linux/mempool.h>
36 #include <linux/memory.h>
37 #include <linux/cpu.h>
38 #include <linux/timer.h>
40 #include <linux/iova.h>
41 #include <linux/iommu.h>
42 #include <linux/intel-iommu.h>
43 #include <linux/syscore_ops.h>
44 #include <linux/tboot.h>
45 #include <linux/dmi.h>
46 #include <linux/pci-ats.h>
47 #include <linux/memblock.h>
48 #include <linux/dma-contiguous.h>
49 #include <linux/dma-direct.h>
50 #include <linux/crash_dump.h>
51 #include <asm/irq_remapping.h>
52 #include <asm/cacheflush.h>
53 #include <asm/iommu.h>
55 #include "irq_remapping.h"
57 #define ROOT_SIZE VTD_PAGE_SIZE
58 #define CONTEXT_SIZE VTD_PAGE_SIZE
60 #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
61 #define IS_USB_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB)
62 #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
63 #define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
65 #define IOAPIC_RANGE_START (0xfee00000)
66 #define IOAPIC_RANGE_END (0xfeefffff)
67 #define IOVA_START_ADDR (0x1000)
69 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 57
71 #define MAX_AGAW_WIDTH 64
72 #define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
74 #define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
75 #define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
77 /* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
78 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
79 #define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
80 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
81 #define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
83 /* IO virtual address start page frame number */
84 #define IOVA_START_PFN (1)
86 #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
88 /* page table handling */
89 #define LEVEL_STRIDE (9)
90 #define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
93 * This bitmap is used to advertise the page sizes our hardware support
94 * to the IOMMU core, which will then use this information to split
95 * physically contiguous memory regions it is mapping into page sizes
98 * Traditionally the IOMMU core just handed us the mappings directly,
99 * after making sure the size is an order of a 4KiB page and that the
100 * mapping has natural alignment.
102 * To retain this behavior, we currently advertise that we support
103 * all page sizes that are an order of 4KiB.
105 * If at some point we'd like to utilize the IOMMU core's new behavior,
106 * we could change this to advertise the real page sizes we support.
108 #define INTEL_IOMMU_PGSIZES (~0xFFFUL)
110 static inline int agaw_to_level(int agaw)
115 static inline int agaw_to_width(int agaw)
117 return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
120 static inline int width_to_agaw(int width)
122 return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
125 static inline unsigned int level_to_offset_bits(int level)
127 return (level - 1) * LEVEL_STRIDE;
130 static inline int pfn_level_offset(unsigned long pfn, int level)
132 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
135 static inline unsigned long level_mask(int level)
137 return -1UL << level_to_offset_bits(level);
140 static inline unsigned long level_size(int level)
142 return 1UL << level_to_offset_bits(level);
145 static inline unsigned long align_to_level(unsigned long pfn, int level)
147 return (pfn + level_size(level) - 1) & level_mask(level);
150 static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
152 return 1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
155 /* VT-d pages must always be _smaller_ than MM pages. Otherwise things
156 are never going to work. */
157 static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
159 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
162 static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
164 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
166 static inline unsigned long page_to_dma_pfn(struct page *pg)
168 return mm_to_dma_pfn(page_to_pfn(pg));
170 static inline unsigned long virt_to_dma_pfn(void *p)
172 return page_to_dma_pfn(virt_to_page(p));
175 /* global iommu list, set NULL for ignored DMAR units */
176 static struct intel_iommu **g_iommus;
178 static void __init check_tylersburg_isoch(void);
179 static int rwbf_quirk;
182 * set to 1 to panic kernel if can't successfully enable VT-d
183 * (used when kernel is launched w/ TXT)
185 static int force_on = 0;
186 int intel_iommu_tboot_noforce;
191 * 12-63: Context Ptr (12 - (haw-1))
198 #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
201 * Take a root_entry and return the Lower Context Table Pointer (LCTP)
204 static phys_addr_t root_entry_lctp(struct root_entry *re)
209 return re->lo & VTD_PAGE_MASK;
213 * Take a root_entry and return the Upper Context Table Pointer (UCTP)
216 static phys_addr_t root_entry_uctp(struct root_entry *re)
221 return re->hi & VTD_PAGE_MASK;
226 * 1: fault processing disable
227 * 2-3: translation type
228 * 12-63: address space root
234 struct context_entry {
239 static inline void context_clear_pasid_enable(struct context_entry *context)
241 context->lo &= ~(1ULL << 11);
244 static inline bool context_pasid_enabled(struct context_entry *context)
246 return !!(context->lo & (1ULL << 11));
249 static inline void context_set_copied(struct context_entry *context)
251 context->hi |= (1ull << 3);
254 static inline bool context_copied(struct context_entry *context)
256 return !!(context->hi & (1ULL << 3));
259 static inline bool __context_present(struct context_entry *context)
261 return (context->lo & 1);
264 static inline bool context_present(struct context_entry *context)
266 return context_pasid_enabled(context) ?
267 __context_present(context) :
268 __context_present(context) && !context_copied(context);
271 static inline void context_set_present(struct context_entry *context)
276 static inline void context_set_fault_enable(struct context_entry *context)
278 context->lo &= (((u64)-1) << 2) | 1;
281 static inline void context_set_translation_type(struct context_entry *context,
284 context->lo &= (((u64)-1) << 4) | 3;
285 context->lo |= (value & 3) << 2;
288 static inline void context_set_address_root(struct context_entry *context,
291 context->lo &= ~VTD_PAGE_MASK;
292 context->lo |= value & VTD_PAGE_MASK;
295 static inline void context_set_address_width(struct context_entry *context,
298 context->hi |= value & 7;
301 static inline void context_set_domain_id(struct context_entry *context,
304 context->hi |= (value & ((1 << 16) - 1)) << 8;
307 static inline int context_domain_id(struct context_entry *c)
309 return((c->hi >> 8) & 0xffff);
312 static inline void context_clear_entry(struct context_entry *context)
325 * 12-63: Host physcial address
331 static inline void dma_clear_pte(struct dma_pte *pte)
336 static inline u64 dma_pte_addr(struct dma_pte *pte)
339 return pte->val & VTD_PAGE_MASK;
341 /* Must have a full atomic 64-bit read */
342 return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
346 static inline bool dma_pte_present(struct dma_pte *pte)
348 return (pte->val & 3) != 0;
351 static inline bool dma_pte_superpage(struct dma_pte *pte)
353 return (pte->val & DMA_PTE_LARGE_PAGE);
356 static inline int first_pte_in_page(struct dma_pte *pte)
358 return !((unsigned long)pte & ~VTD_PAGE_MASK);
362 * This domain is a statically identity mapping domain.
363 * 1. This domain creats a static 1:1 mapping to all usable memory.
364 * 2. It maps to each iommu if successful.
365 * 3. Each iommu mapps to this domain if successful.
367 static struct dmar_domain *si_domain;
368 static int hw_pass_through = 1;
371 * Domain represents a virtual machine, more than one devices
372 * across iommus may be owned in one domain, e.g. kvm guest.
374 #define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 0)
376 /* si_domain contains mulitple devices */
377 #define DOMAIN_FLAG_STATIC_IDENTITY (1 << 1)
379 #define for_each_domain_iommu(idx, domain) \
380 for (idx = 0; idx < g_num_of_iommus; idx++) \
381 if (domain->iommu_refcnt[idx])
384 int nid; /* node id */
386 unsigned iommu_refcnt[DMAR_UNITS_SUPPORTED];
387 /* Refcount of devices per iommu */
390 u16 iommu_did[DMAR_UNITS_SUPPORTED];
391 /* Domain ids per IOMMU. Use u16 since
392 * domain ids are 16 bit wide according
393 * to VT-d spec, section 9.3 */
395 bool has_iotlb_device;
396 struct list_head devices; /* all devices' list */
397 struct iova_domain iovad; /* iova's that belong to this domain */
399 struct dma_pte *pgd; /* virtual address */
400 int gaw; /* max guest address width */
402 /* adjusted guest address width, 0 is level 2 30-bit */
405 int flags; /* flags to find out type of domain */
407 int iommu_coherency;/* indicate coherency of iommu access */
408 int iommu_snooping; /* indicate snooping control feature*/
409 int iommu_count; /* reference count of iommu */
410 int iommu_superpage;/* Level of superpages supported:
411 0 == 4KiB (no superpages), 1 == 2MiB,
412 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
413 u64 max_addr; /* maximum mapped address */
415 struct iommu_domain domain; /* generic domain data structure for
419 /* PCI domain-device relationship */
420 struct device_domain_info {
421 struct list_head link; /* link to domain siblings */
422 struct list_head global; /* link to global list */
423 u8 bus; /* PCI bus number */
424 u8 devfn; /* PCI devfn number */
425 u16 pfsid; /* SRIOV physical function source ID */
426 u8 pasid_supported:3;
433 struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
434 struct intel_iommu *iommu; /* IOMMU used by this device */
435 struct dmar_domain *domain; /* pointer to domain */
438 struct dmar_rmrr_unit {
439 struct list_head list; /* list of rmrr units */
440 struct acpi_dmar_header *hdr; /* ACPI header */
441 u64 base_address; /* reserved base address*/
442 u64 end_address; /* reserved end address */
443 struct dmar_dev_scope *devices; /* target devices */
444 int devices_cnt; /* target device count */
445 struct iommu_resv_region *resv; /* reserved region handle */
448 struct dmar_atsr_unit {
449 struct list_head list; /* list of ATSR units */
450 struct acpi_dmar_header *hdr; /* ACPI header */
451 struct dmar_dev_scope *devices; /* target devices */
452 int devices_cnt; /* target device count */
453 u8 include_all:1; /* include all ports */
456 static LIST_HEAD(dmar_atsr_units);
457 static LIST_HEAD(dmar_rmrr_units);
459 #define for_each_rmrr_units(rmrr) \
460 list_for_each_entry(rmrr, &dmar_rmrr_units, list)
462 /* bitmap for indexing intel_iommus */
463 static int g_num_of_iommus;
465 static void domain_exit(struct dmar_domain *domain);
466 static void domain_remove_dev_info(struct dmar_domain *domain);
467 static void dmar_remove_one_dev_info(struct dmar_domain *domain,
469 static void __dmar_remove_one_dev_info(struct device_domain_info *info);
470 static void domain_context_clear(struct intel_iommu *iommu,
472 static int domain_detach_iommu(struct dmar_domain *domain,
473 struct intel_iommu *iommu);
475 #ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
476 int dmar_disabled = 0;
478 int dmar_disabled = 1;
479 #endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
481 int intel_iommu_enabled = 0;
482 EXPORT_SYMBOL_GPL(intel_iommu_enabled);
484 static int dmar_map_gfx = 1;
485 static int dmar_forcedac;
486 static int intel_iommu_strict;
487 static int intel_iommu_superpage = 1;
488 static int intel_iommu_ecs = 1;
489 static int iommu_identity_mapping;
491 #define IDENTMAP_ALL 1
492 #define IDENTMAP_GFX 2
493 #define IDENTMAP_AZALIA 4
495 #define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap))
496 #define pasid_enabled(iommu) (ecs_enabled(iommu) && ecap_pasid(iommu->ecap))
498 int intel_iommu_gfx_mapped;
499 EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
501 #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
502 static DEFINE_SPINLOCK(device_domain_lock);
503 static LIST_HEAD(device_domain_list);
505 const struct iommu_ops intel_iommu_ops;
507 static bool translation_pre_enabled(struct intel_iommu *iommu)
509 return (iommu->flags & VTD_FLAG_TRANS_PRE_ENABLED);
512 static void clear_translation_pre_enabled(struct intel_iommu *iommu)
514 iommu->flags &= ~VTD_FLAG_TRANS_PRE_ENABLED;
517 static void init_translation_status(struct intel_iommu *iommu)
521 gsts = readl(iommu->reg + DMAR_GSTS_REG);
522 if (gsts & DMA_GSTS_TES)
523 iommu->flags |= VTD_FLAG_TRANS_PRE_ENABLED;
526 /* Convert generic 'struct iommu_domain to private struct dmar_domain */
527 static struct dmar_domain *to_dmar_domain(struct iommu_domain *dom)
529 return container_of(dom, struct dmar_domain, domain);
532 static int __init intel_iommu_setup(char *str)
537 if (!strncmp(str, "on", 2)) {
539 pr_info("IOMMU enabled\n");
540 } else if (!strncmp(str, "off", 3)) {
542 pr_info("IOMMU disabled\n");
543 } else if (!strncmp(str, "igfx_off", 8)) {
545 pr_info("Disable GFX device mapping\n");
546 } else if (!strncmp(str, "forcedac", 8)) {
547 pr_info("Forcing DAC for PCI devices\n");
549 } else if (!strncmp(str, "strict", 6)) {
550 pr_info("Disable batched IOTLB flush\n");
551 intel_iommu_strict = 1;
552 } else if (!strncmp(str, "sp_off", 6)) {
553 pr_info("Disable supported super page\n");
554 intel_iommu_superpage = 0;
555 } else if (!strncmp(str, "ecs_off", 7)) {
557 "Intel-IOMMU: disable extended context table support\n");
559 } else if (!strncmp(str, "tboot_noforce", 13)) {
561 "Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n");
562 intel_iommu_tboot_noforce = 1;
565 str += strcspn(str, ",");
571 __setup("intel_iommu=", intel_iommu_setup);
573 static struct kmem_cache *iommu_domain_cache;
574 static struct kmem_cache *iommu_devinfo_cache;
576 static struct dmar_domain* get_iommu_domain(struct intel_iommu *iommu, u16 did)
578 struct dmar_domain **domains;
581 domains = iommu->domains[idx];
585 return domains[did & 0xff];
588 static void set_iommu_domain(struct intel_iommu *iommu, u16 did,
589 struct dmar_domain *domain)
591 struct dmar_domain **domains;
594 if (!iommu->domains[idx]) {
595 size_t size = 256 * sizeof(struct dmar_domain *);
596 iommu->domains[idx] = kzalloc(size, GFP_ATOMIC);
599 domains = iommu->domains[idx];
600 if (WARN_ON(!domains))
603 domains[did & 0xff] = domain;
606 static inline void *alloc_pgtable_page(int node)
611 page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
613 vaddr = page_address(page);
617 static inline void free_pgtable_page(void *vaddr)
619 free_page((unsigned long)vaddr);
622 static inline void *alloc_domain_mem(void)
624 return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
627 static void free_domain_mem(void *vaddr)
629 kmem_cache_free(iommu_domain_cache, vaddr);
632 static inline void * alloc_devinfo_mem(void)
634 return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
637 static inline void free_devinfo_mem(void *vaddr)
639 kmem_cache_free(iommu_devinfo_cache, vaddr);
642 static inline int domain_type_is_vm(struct dmar_domain *domain)
644 return domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE;
647 static inline int domain_type_is_si(struct dmar_domain *domain)
649 return domain->flags & DOMAIN_FLAG_STATIC_IDENTITY;
652 static inline int domain_type_is_vm_or_si(struct dmar_domain *domain)
654 return domain->flags & (DOMAIN_FLAG_VIRTUAL_MACHINE |
655 DOMAIN_FLAG_STATIC_IDENTITY);
658 static inline int domain_pfn_supported(struct dmar_domain *domain,
661 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
663 return !(addr_width < BITS_PER_LONG && pfn >> addr_width);
666 static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
671 sagaw = cap_sagaw(iommu->cap);
672 for (agaw = width_to_agaw(max_gaw);
674 if (test_bit(agaw, &sagaw))
682 * Calculate max SAGAW for each iommu.
684 int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
686 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
690 * calculate agaw for each iommu.
691 * "SAGAW" may be different across iommus, use a default agaw, and
692 * get a supported less agaw for iommus that don't support the default agaw.
694 int iommu_calculate_agaw(struct intel_iommu *iommu)
696 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
699 /* This functionin only returns single iommu in a domain */
700 static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
704 /* si_domain and vm domain should not get here. */
705 BUG_ON(domain_type_is_vm_or_si(domain));
706 for_each_domain_iommu(iommu_id, domain)
709 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
712 return g_iommus[iommu_id];
715 static void domain_update_iommu_coherency(struct dmar_domain *domain)
717 struct dmar_drhd_unit *drhd;
718 struct intel_iommu *iommu;
722 domain->iommu_coherency = 1;
724 for_each_domain_iommu(i, domain) {
726 if (!ecap_coherent(g_iommus[i]->ecap)) {
727 domain->iommu_coherency = 0;
734 /* No hardware attached; use lowest common denominator */
736 for_each_active_iommu(iommu, drhd) {
737 if (!ecap_coherent(iommu->ecap)) {
738 domain->iommu_coherency = 0;
745 static int domain_update_iommu_snooping(struct intel_iommu *skip)
747 struct dmar_drhd_unit *drhd;
748 struct intel_iommu *iommu;
752 for_each_active_iommu(iommu, drhd) {
754 if (!ecap_sc_support(iommu->ecap)) {
765 static int domain_update_iommu_superpage(struct intel_iommu *skip)
767 struct dmar_drhd_unit *drhd;
768 struct intel_iommu *iommu;
771 if (!intel_iommu_superpage) {
775 /* set iommu_superpage to the smallest common denominator */
777 for_each_active_iommu(iommu, drhd) {
779 mask &= cap_super_page_val(iommu->cap);
789 /* Some capabilities may be different across iommus */
790 static void domain_update_iommu_cap(struct dmar_domain *domain)
792 domain_update_iommu_coherency(domain);
793 domain->iommu_snooping = domain_update_iommu_snooping(NULL);
794 domain->iommu_superpage = domain_update_iommu_superpage(NULL);
797 static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu,
798 u8 bus, u8 devfn, int alloc)
800 struct root_entry *root = &iommu->root_entry[bus];
801 struct context_entry *context;
805 if (ecs_enabled(iommu)) {
813 context = phys_to_virt(*entry & VTD_PAGE_MASK);
815 unsigned long phy_addr;
819 context = alloc_pgtable_page(iommu->node);
823 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
824 phy_addr = virt_to_phys((void *)context);
825 *entry = phy_addr | 1;
826 __iommu_flush_cache(iommu, entry, sizeof(*entry));
828 return &context[devfn];
831 static int iommu_dummy(struct device *dev)
833 return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
836 static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
838 struct dmar_drhd_unit *drhd = NULL;
839 struct intel_iommu *iommu;
841 struct pci_dev *ptmp, *pdev = NULL;
845 if (iommu_dummy(dev))
848 if (dev_is_pci(dev)) {
849 struct pci_dev *pf_pdev;
851 pdev = to_pci_dev(dev);
854 /* VMD child devices currently cannot be handled individually */
855 if (is_vmd(pdev->bus))
859 /* VFs aren't listed in scope tables; we need to look up
860 * the PF instead to find the IOMMU. */
861 pf_pdev = pci_physfn(pdev);
863 segment = pci_domain_nr(pdev->bus);
864 } else if (has_acpi_companion(dev))
865 dev = &ACPI_COMPANION(dev)->dev;
868 for_each_active_iommu(iommu, drhd) {
869 if (pdev && segment != drhd->segment)
872 for_each_active_dev_scope(drhd->devices,
873 drhd->devices_cnt, i, tmp) {
875 /* For a VF use its original BDF# not that of the PF
876 * which we used for the IOMMU lookup. Strictly speaking
877 * we could do this for all PCI devices; we only need to
878 * get the BDF# from the scope table for ACPI matches. */
879 if (pdev && pdev->is_virtfn)
882 *bus = drhd->devices[i].bus;
883 *devfn = drhd->devices[i].devfn;
887 if (!pdev || !dev_is_pci(tmp))
890 ptmp = to_pci_dev(tmp);
891 if (ptmp->subordinate &&
892 ptmp->subordinate->number <= pdev->bus->number &&
893 ptmp->subordinate->busn_res.end >= pdev->bus->number)
897 if (pdev && drhd->include_all) {
899 *bus = pdev->bus->number;
900 *devfn = pdev->devfn;
911 static void domain_flush_cache(struct dmar_domain *domain,
912 void *addr, int size)
914 if (!domain->iommu_coherency)
915 clflush_cache_range(addr, size);
918 static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
920 struct context_entry *context;
924 spin_lock_irqsave(&iommu->lock, flags);
925 context = iommu_context_addr(iommu, bus, devfn, 0);
927 ret = context_present(context);
928 spin_unlock_irqrestore(&iommu->lock, flags);
932 static void free_context_table(struct intel_iommu *iommu)
936 struct context_entry *context;
938 spin_lock_irqsave(&iommu->lock, flags);
939 if (!iommu->root_entry) {
942 for (i = 0; i < ROOT_ENTRY_NR; i++) {
943 context = iommu_context_addr(iommu, i, 0, 0);
945 free_pgtable_page(context);
947 if (!ecs_enabled(iommu))
950 context = iommu_context_addr(iommu, i, 0x80, 0);
952 free_pgtable_page(context);
955 free_pgtable_page(iommu->root_entry);
956 iommu->root_entry = NULL;
958 spin_unlock_irqrestore(&iommu->lock, flags);
961 static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
962 unsigned long pfn, int *target_level)
964 struct dma_pte *parent, *pte = NULL;
965 int level = agaw_to_level(domain->agaw);
968 BUG_ON(!domain->pgd);
970 if (!domain_pfn_supported(domain, pfn))
971 /* Address beyond IOMMU's addressing capabilities. */
974 parent = domain->pgd;
979 offset = pfn_level_offset(pfn, level);
980 pte = &parent[offset];
981 if (!*target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
983 if (level == *target_level)
986 if (!dma_pte_present(pte)) {
989 tmp_page = alloc_pgtable_page(domain->nid);
994 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
995 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
996 if (cmpxchg64(&pte->val, 0ULL, pteval))
997 /* Someone else set it while we were thinking; use theirs. */
998 free_pgtable_page(tmp_page);
1000 domain_flush_cache(domain, pte, sizeof(*pte));
1005 parent = phys_to_virt(dma_pte_addr(pte));
1010 *target_level = level;
1016 /* return address's pte at specific level */
1017 static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
1019 int level, int *large_page)
1021 struct dma_pte *parent, *pte = NULL;
1022 int total = agaw_to_level(domain->agaw);
1025 parent = domain->pgd;
1026 while (level <= total) {
1027 offset = pfn_level_offset(pfn, total);
1028 pte = &parent[offset];
1032 if (!dma_pte_present(pte)) {
1033 *large_page = total;
1037 if (dma_pte_superpage(pte)) {
1038 *large_page = total;
1042 parent = phys_to_virt(dma_pte_addr(pte));
1048 /* clear last level pte, a tlb flush should be followed */
1049 static void dma_pte_clear_range(struct dmar_domain *domain,
1050 unsigned long start_pfn,
1051 unsigned long last_pfn)
1053 unsigned int large_page = 1;
1054 struct dma_pte *first_pte, *pte;
1056 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1057 BUG_ON(!domain_pfn_supported(domain, last_pfn));
1058 BUG_ON(start_pfn > last_pfn);
1060 /* we don't need lock here; nobody else touches the iova range */
1063 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
1065 start_pfn = align_to_level(start_pfn + 1, large_page + 1);
1070 start_pfn += lvl_to_nr_pages(large_page);
1072 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
1074 domain_flush_cache(domain, first_pte,
1075 (void *)pte - (void *)first_pte);
1077 } while (start_pfn && start_pfn <= last_pfn);
1080 static void dma_pte_free_level(struct dmar_domain *domain, int level,
1081 int retain_level, struct dma_pte *pte,
1082 unsigned long pfn, unsigned long start_pfn,
1083 unsigned long last_pfn)
1085 pfn = max(start_pfn, pfn);
1086 pte = &pte[pfn_level_offset(pfn, level)];
1089 unsigned long level_pfn;
1090 struct dma_pte *level_pte;
1092 if (!dma_pte_present(pte) || dma_pte_superpage(pte))
1095 level_pfn = pfn & level_mask(level);
1096 level_pte = phys_to_virt(dma_pte_addr(pte));
1099 dma_pte_free_level(domain, level - 1, retain_level,
1100 level_pte, level_pfn, start_pfn,
1105 * Free the page table if we're below the level we want to
1106 * retain and the range covers the entire table.
1108 if (level < retain_level && !(start_pfn > level_pfn ||
1109 last_pfn < level_pfn + level_size(level) - 1)) {
1111 domain_flush_cache(domain, pte, sizeof(*pte));
1112 free_pgtable_page(level_pte);
1115 pfn += level_size(level);
1116 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1120 * clear last level (leaf) ptes and free page table pages below the
1121 * level we wish to keep intact.
1123 static void dma_pte_free_pagetable(struct dmar_domain *domain,
1124 unsigned long start_pfn,
1125 unsigned long last_pfn,
1128 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1129 BUG_ON(!domain_pfn_supported(domain, last_pfn));
1130 BUG_ON(start_pfn > last_pfn);
1132 dma_pte_clear_range(domain, start_pfn, last_pfn);
1134 /* We don't need lock here; nobody else touches the iova range */
1135 dma_pte_free_level(domain, agaw_to_level(domain->agaw), retain_level,
1136 domain->pgd, 0, start_pfn, last_pfn);
1139 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1140 free_pgtable_page(domain->pgd);
1145 /* When a page at a given level is being unlinked from its parent, we don't
1146 need to *modify* it at all. All we need to do is make a list of all the
1147 pages which can be freed just as soon as we've flushed the IOTLB and we
1148 know the hardware page-walk will no longer touch them.
1149 The 'pte' argument is the *parent* PTE, pointing to the page that is to
1151 static struct page *dma_pte_list_pagetables(struct dmar_domain *domain,
1152 int level, struct dma_pte *pte,
1153 struct page *freelist)
1157 pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT);
1158 pg->freelist = freelist;
1164 pte = page_address(pg);
1166 if (dma_pte_present(pte) && !dma_pte_superpage(pte))
1167 freelist = dma_pte_list_pagetables(domain, level - 1,
1170 } while (!first_pte_in_page(pte));
1175 static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
1176 struct dma_pte *pte, unsigned long pfn,
1177 unsigned long start_pfn,
1178 unsigned long last_pfn,
1179 struct page *freelist)
1181 struct dma_pte *first_pte = NULL, *last_pte = NULL;
1183 pfn = max(start_pfn, pfn);
1184 pte = &pte[pfn_level_offset(pfn, level)];
1187 unsigned long level_pfn;
1189 if (!dma_pte_present(pte))
1192 level_pfn = pfn & level_mask(level);
1194 /* If range covers entire pagetable, free it */
1195 if (start_pfn <= level_pfn &&
1196 last_pfn >= level_pfn + level_size(level) - 1) {
1197 /* These suborbinate page tables are going away entirely. Don't
1198 bother to clear them; we're just going to *free* them. */
1199 if (level > 1 && !dma_pte_superpage(pte))
1200 freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist);
1206 } else if (level > 1) {
1207 /* Recurse down into a level that isn't *entirely* obsolete */
1208 freelist = dma_pte_clear_level(domain, level - 1,
1209 phys_to_virt(dma_pte_addr(pte)),
1210 level_pfn, start_pfn, last_pfn,
1214 pfn += level_size(level);
1215 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1218 domain_flush_cache(domain, first_pte,
1219 (void *)++last_pte - (void *)first_pte);
1224 /* We can't just free the pages because the IOMMU may still be walking
1225 the page tables, and may have cached the intermediate levels. The
1226 pages can only be freed after the IOTLB flush has been done. */
1227 static struct page *domain_unmap(struct dmar_domain *domain,
1228 unsigned long start_pfn,
1229 unsigned long last_pfn)
1231 struct page *freelist = NULL;
1233 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1234 BUG_ON(!domain_pfn_supported(domain, last_pfn));
1235 BUG_ON(start_pfn > last_pfn);
1237 /* we don't need lock here; nobody else touches the iova range */
1238 freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw),
1239 domain->pgd, 0, start_pfn, last_pfn, NULL);
1242 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1243 struct page *pgd_page = virt_to_page(domain->pgd);
1244 pgd_page->freelist = freelist;
1245 freelist = pgd_page;
1253 static void dma_free_pagelist(struct page *freelist)
1257 while ((pg = freelist)) {
1258 freelist = pg->freelist;
1259 free_pgtable_page(page_address(pg));
1263 static void iova_entry_free(unsigned long data)
1265 struct page *freelist = (struct page *)data;
1267 dma_free_pagelist(freelist);
1270 /* iommu handling */
1271 static int iommu_alloc_root_entry(struct intel_iommu *iommu)
1273 struct root_entry *root;
1274 unsigned long flags;
1276 root = (struct root_entry *)alloc_pgtable_page(iommu->node);
1278 pr_err("Allocating root entry for %s failed\n",
1283 __iommu_flush_cache(iommu, root, ROOT_SIZE);
1285 spin_lock_irqsave(&iommu->lock, flags);
1286 iommu->root_entry = root;
1287 spin_unlock_irqrestore(&iommu->lock, flags);
1292 static void iommu_set_root_entry(struct intel_iommu *iommu)
1298 addr = virt_to_phys(iommu->root_entry);
1299 if (ecs_enabled(iommu))
1300 addr |= DMA_RTADDR_RTT;
1302 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1303 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, addr);
1305 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
1307 /* Make sure hardware complete it */
1308 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1309 readl, (sts & DMA_GSTS_RTPS), sts);
1311 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1314 static void iommu_flush_write_buffer(struct intel_iommu *iommu)
1319 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
1322 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1323 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
1325 /* Make sure hardware complete it */
1326 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1327 readl, (!(val & DMA_GSTS_WBFS)), val);
1329 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1332 /* return value determine if we need a write buffer flush */
1333 static void __iommu_flush_context(struct intel_iommu *iommu,
1334 u16 did, u16 source_id, u8 function_mask,
1341 case DMA_CCMD_GLOBAL_INVL:
1342 val = DMA_CCMD_GLOBAL_INVL;
1344 case DMA_CCMD_DOMAIN_INVL:
1345 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
1347 case DMA_CCMD_DEVICE_INVL:
1348 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
1349 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1354 val |= DMA_CCMD_ICC;
1356 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1357 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1359 /* Make sure hardware complete it */
1360 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1361 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1363 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1366 /* return value determine if we need a write buffer flush */
1367 static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1368 u64 addr, unsigned int size_order, u64 type)
1370 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1371 u64 val = 0, val_iva = 0;
1375 case DMA_TLB_GLOBAL_FLUSH:
1376 /* global flush doesn't need set IVA_REG */
1377 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1379 case DMA_TLB_DSI_FLUSH:
1380 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1382 case DMA_TLB_PSI_FLUSH:
1383 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1384 /* IH bit is passed in as part of address */
1385 val_iva = size_order | addr;
1390 /* Note: set drain read/write */
1393 * This is probably to be super secure.. Looks like we can
1394 * ignore it without any impact.
1396 if (cap_read_drain(iommu->cap))
1397 val |= DMA_TLB_READ_DRAIN;
1399 if (cap_write_drain(iommu->cap))
1400 val |= DMA_TLB_WRITE_DRAIN;
1402 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1403 /* Note: Only uses first TLB reg currently */
1405 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1406 dmar_writeq(iommu->reg + tlb_offset + 8, val);
1408 /* Make sure hardware complete it */
1409 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1410 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1412 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1414 /* check IOTLB invalidation granularity */
1415 if (DMA_TLB_IAIG(val) == 0)
1416 pr_err("Flush IOTLB failed\n");
1417 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
1418 pr_debug("TLB flush request %Lx, actual %Lx\n",
1419 (unsigned long long)DMA_TLB_IIRG(type),
1420 (unsigned long long)DMA_TLB_IAIG(val));
1423 static struct device_domain_info *
1424 iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
1427 struct device_domain_info *info;
1429 assert_spin_locked(&device_domain_lock);
1434 list_for_each_entry(info, &domain->devices, link)
1435 if (info->iommu == iommu && info->bus == bus &&
1436 info->devfn == devfn) {
1437 if (info->ats_supported && info->dev)
1445 static void domain_update_iotlb(struct dmar_domain *domain)
1447 struct device_domain_info *info;
1448 bool has_iotlb_device = false;
1450 assert_spin_locked(&device_domain_lock);
1452 list_for_each_entry(info, &domain->devices, link) {
1453 struct pci_dev *pdev;
1455 if (!info->dev || !dev_is_pci(info->dev))
1458 pdev = to_pci_dev(info->dev);
1459 if (pdev->ats_enabled) {
1460 has_iotlb_device = true;
1465 domain->has_iotlb_device = has_iotlb_device;
1468 static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1470 struct pci_dev *pdev;
1472 assert_spin_locked(&device_domain_lock);
1474 if (!info || !dev_is_pci(info->dev))
1477 pdev = to_pci_dev(info->dev);
1478 /* For IOMMU that supports device IOTLB throttling (DIT), we assign
1479 * PFSID to the invalidation desc of a VF such that IOMMU HW can gauge
1480 * queue depth at PF level. If DIT is not set, PFSID will be treated as
1481 * reserved, which should be set to 0.
1483 if (!ecap_dit(info->iommu->ecap))
1486 struct pci_dev *pf_pdev;
1488 /* pdev will be returned if device is not a vf */
1489 pf_pdev = pci_physfn(pdev);
1490 info->pfsid = PCI_DEVID(pf_pdev->bus->number, pf_pdev->devfn);
1493 #ifdef CONFIG_INTEL_IOMMU_SVM
1494 /* The PCIe spec, in its wisdom, declares that the behaviour of
1495 the device if you enable PASID support after ATS support is
1496 undefined. So always enable PASID support on devices which
1497 have it, even if we can't yet know if we're ever going to
1499 if (info->pasid_supported && !pci_enable_pasid(pdev, info->pasid_supported & ~1))
1500 info->pasid_enabled = 1;
1502 if (info->pri_supported && !pci_reset_pri(pdev) && !pci_enable_pri(pdev, 32))
1503 info->pri_enabled = 1;
1505 if (info->ats_supported && !pci_enable_ats(pdev, VTD_PAGE_SHIFT)) {
1506 info->ats_enabled = 1;
1507 domain_update_iotlb(info->domain);
1508 info->ats_qdep = pci_ats_queue_depth(pdev);
1512 static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1514 struct pci_dev *pdev;
1516 assert_spin_locked(&device_domain_lock);
1518 if (!dev_is_pci(info->dev))
1521 pdev = to_pci_dev(info->dev);
1523 if (info->ats_enabled) {
1524 pci_disable_ats(pdev);
1525 info->ats_enabled = 0;
1526 domain_update_iotlb(info->domain);
1528 #ifdef CONFIG_INTEL_IOMMU_SVM
1529 if (info->pri_enabled) {
1530 pci_disable_pri(pdev);
1531 info->pri_enabled = 0;
1533 if (info->pasid_enabled) {
1534 pci_disable_pasid(pdev);
1535 info->pasid_enabled = 0;
1540 static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1541 u64 addr, unsigned mask)
1544 unsigned long flags;
1545 struct device_domain_info *info;
1547 if (!domain->has_iotlb_device)
1550 spin_lock_irqsave(&device_domain_lock, flags);
1551 list_for_each_entry(info, &domain->devices, link) {
1552 if (!info->ats_enabled)
1555 sid = info->bus << 8 | info->devfn;
1556 qdep = info->ats_qdep;
1557 qi_flush_dev_iotlb(info->iommu, sid, info->pfsid,
1560 spin_unlock_irqrestore(&device_domain_lock, flags);
1563 static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
1564 struct dmar_domain *domain,
1565 unsigned long pfn, unsigned int pages,
1568 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
1569 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
1570 u16 did = domain->iommu_did[iommu->seq_id];
1577 * Fallback to domain selective flush if no PSI support or the size is
1579 * PSI requires page size to be 2 ^ x, and the base address is naturally
1580 * aligned to the size
1582 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1583 iommu->flush.flush_iotlb(iommu, did, 0, 0,
1586 iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
1590 * In caching mode, changes of pages from non-present to present require
1591 * flush. However, device IOTLB doesn't need to be flushed in this case.
1593 if (!cap_caching_mode(iommu->cap) || !map)
1594 iommu_flush_dev_iotlb(domain, addr, mask);
1597 /* Notification for newly created mappings */
1598 static inline void __mapping_notify_one(struct intel_iommu *iommu,
1599 struct dmar_domain *domain,
1600 unsigned long pfn, unsigned int pages)
1602 /* It's a non-present to present mapping. Only flush if caching mode */
1603 if (cap_caching_mode(iommu->cap))
1604 iommu_flush_iotlb_psi(iommu, domain, pfn, pages, 0, 1);
1606 iommu_flush_write_buffer(iommu);
1609 static void iommu_flush_iova(struct iova_domain *iovad)
1611 struct dmar_domain *domain;
1614 domain = container_of(iovad, struct dmar_domain, iovad);
1616 for_each_domain_iommu(idx, domain) {
1617 struct intel_iommu *iommu = g_iommus[idx];
1618 u16 did = domain->iommu_did[iommu->seq_id];
1620 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
1622 if (!cap_caching_mode(iommu->cap))
1623 iommu_flush_dev_iotlb(get_iommu_domain(iommu, did),
1624 0, MAX_AGAW_PFN_WIDTH);
1628 static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1631 unsigned long flags;
1633 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1634 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1635 pmen &= ~DMA_PMEN_EPM;
1636 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1638 /* wait for the protected region status bit to clear */
1639 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1640 readl, !(pmen & DMA_PMEN_PRS), pmen);
1642 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1645 static void iommu_enable_translation(struct intel_iommu *iommu)
1648 unsigned long flags;
1650 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1651 iommu->gcmd |= DMA_GCMD_TE;
1652 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1654 /* Make sure hardware complete it */
1655 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1656 readl, (sts & DMA_GSTS_TES), sts);
1658 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1661 static void iommu_disable_translation(struct intel_iommu *iommu)
1666 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1667 iommu->gcmd &= ~DMA_GCMD_TE;
1668 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1670 /* Make sure hardware complete it */
1671 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1672 readl, (!(sts & DMA_GSTS_TES)), sts);
1674 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1678 static int iommu_init_domains(struct intel_iommu *iommu)
1680 u32 ndomains, nlongs;
1683 ndomains = cap_ndoms(iommu->cap);
1684 pr_debug("%s: Number of Domains supported <%d>\n",
1685 iommu->name, ndomains);
1686 nlongs = BITS_TO_LONGS(ndomains);
1688 spin_lock_init(&iommu->lock);
1690 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1691 if (!iommu->domain_ids) {
1692 pr_err("%s: Allocating domain id array failed\n",
1697 size = (ALIGN(ndomains, 256) >> 8) * sizeof(struct dmar_domain **);
1698 iommu->domains = kzalloc(size, GFP_KERNEL);
1700 if (iommu->domains) {
1701 size = 256 * sizeof(struct dmar_domain *);
1702 iommu->domains[0] = kzalloc(size, GFP_KERNEL);
1705 if (!iommu->domains || !iommu->domains[0]) {
1706 pr_err("%s: Allocating domain array failed\n",
1708 kfree(iommu->domain_ids);
1709 kfree(iommu->domains);
1710 iommu->domain_ids = NULL;
1711 iommu->domains = NULL;
1718 * If Caching mode is set, then invalid translations are tagged
1719 * with domain-id 0, hence we need to pre-allocate it. We also
1720 * use domain-id 0 as a marker for non-allocated domain-id, so
1721 * make sure it is not used for a real domain.
1723 set_bit(0, iommu->domain_ids);
1728 static void disable_dmar_iommu(struct intel_iommu *iommu)
1730 struct device_domain_info *info, *tmp;
1731 unsigned long flags;
1733 if (!iommu->domains || !iommu->domain_ids)
1737 spin_lock_irqsave(&device_domain_lock, flags);
1738 list_for_each_entry_safe(info, tmp, &device_domain_list, global) {
1739 struct dmar_domain *domain;
1741 if (info->iommu != iommu)
1744 if (!info->dev || !info->domain)
1747 domain = info->domain;
1749 __dmar_remove_one_dev_info(info);
1751 if (!domain_type_is_vm_or_si(domain)) {
1753 * The domain_exit() function can't be called under
1754 * device_domain_lock, as it takes this lock itself.
1755 * So release the lock here and re-run the loop
1758 spin_unlock_irqrestore(&device_domain_lock, flags);
1759 domain_exit(domain);
1763 spin_unlock_irqrestore(&device_domain_lock, flags);
1765 if (iommu->gcmd & DMA_GCMD_TE)
1766 iommu_disable_translation(iommu);
1769 static void free_dmar_iommu(struct intel_iommu *iommu)
1771 if ((iommu->domains) && (iommu->domain_ids)) {
1772 int elems = ALIGN(cap_ndoms(iommu->cap), 256) >> 8;
1775 for (i = 0; i < elems; i++)
1776 kfree(iommu->domains[i]);
1777 kfree(iommu->domains);
1778 kfree(iommu->domain_ids);
1779 iommu->domains = NULL;
1780 iommu->domain_ids = NULL;
1783 g_iommus[iommu->seq_id] = NULL;
1785 /* free context mapping */
1786 free_context_table(iommu);
1788 #ifdef CONFIG_INTEL_IOMMU_SVM
1789 if (pasid_enabled(iommu)) {
1790 if (ecap_prs(iommu->ecap))
1791 intel_svm_finish_prq(iommu);
1792 intel_svm_free_pasid_tables(iommu);
1797 static struct dmar_domain *alloc_domain(int flags)
1799 struct dmar_domain *domain;
1801 domain = alloc_domain_mem();
1805 memset(domain, 0, sizeof(*domain));
1807 domain->flags = flags;
1808 domain->has_iotlb_device = false;
1809 INIT_LIST_HEAD(&domain->devices);
1814 /* Must be called with iommu->lock */
1815 static int domain_attach_iommu(struct dmar_domain *domain,
1816 struct intel_iommu *iommu)
1818 unsigned long ndomains;
1821 assert_spin_locked(&device_domain_lock);
1822 assert_spin_locked(&iommu->lock);
1824 domain->iommu_refcnt[iommu->seq_id] += 1;
1825 domain->iommu_count += 1;
1826 if (domain->iommu_refcnt[iommu->seq_id] == 1) {
1827 ndomains = cap_ndoms(iommu->cap);
1828 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1830 if (num >= ndomains) {
1831 pr_err("%s: No free domain ids\n", iommu->name);
1832 domain->iommu_refcnt[iommu->seq_id] -= 1;
1833 domain->iommu_count -= 1;
1837 set_bit(num, iommu->domain_ids);
1838 set_iommu_domain(iommu, num, domain);
1840 domain->iommu_did[iommu->seq_id] = num;
1841 domain->nid = iommu->node;
1843 domain_update_iommu_cap(domain);
1849 static int domain_detach_iommu(struct dmar_domain *domain,
1850 struct intel_iommu *iommu)
1852 int num, count = INT_MAX;
1854 assert_spin_locked(&device_domain_lock);
1855 assert_spin_locked(&iommu->lock);
1857 domain->iommu_refcnt[iommu->seq_id] -= 1;
1858 count = --domain->iommu_count;
1859 if (domain->iommu_refcnt[iommu->seq_id] == 0) {
1860 num = domain->iommu_did[iommu->seq_id];
1861 clear_bit(num, iommu->domain_ids);
1862 set_iommu_domain(iommu, num, NULL);
1864 domain_update_iommu_cap(domain);
1865 domain->iommu_did[iommu->seq_id] = 0;
1871 static struct iova_domain reserved_iova_list;
1872 static struct lock_class_key reserved_rbtree_key;
1874 static int dmar_init_reserved_ranges(void)
1876 struct pci_dev *pdev = NULL;
1880 init_iova_domain(&reserved_iova_list, VTD_PAGE_SIZE, IOVA_START_PFN);
1882 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1883 &reserved_rbtree_key);
1885 /* IOAPIC ranges shouldn't be accessed by DMA */
1886 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1887 IOVA_PFN(IOAPIC_RANGE_END));
1889 pr_err("Reserve IOAPIC range failed\n");
1893 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1894 for_each_pci_dev(pdev) {
1897 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1898 r = &pdev->resource[i];
1899 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1901 iova = reserve_iova(&reserved_iova_list,
1905 pr_err("Reserve iova failed\n");
1913 static void domain_reserve_special_ranges(struct dmar_domain *domain)
1915 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1918 static inline int guestwidth_to_adjustwidth(int gaw)
1921 int r = (gaw - 12) % 9;
1932 static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu,
1935 int adjust_width, agaw;
1936 unsigned long sagaw;
1939 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN);
1941 err = init_iova_flush_queue(&domain->iovad,
1942 iommu_flush_iova, iova_entry_free);
1946 domain_reserve_special_ranges(domain);
1948 /* calculate AGAW */
1949 if (guest_width > cap_mgaw(iommu->cap))
1950 guest_width = cap_mgaw(iommu->cap);
1951 domain->gaw = guest_width;
1952 adjust_width = guestwidth_to_adjustwidth(guest_width);
1953 agaw = width_to_agaw(adjust_width);
1954 sagaw = cap_sagaw(iommu->cap);
1955 if (!test_bit(agaw, &sagaw)) {
1956 /* hardware doesn't support it, choose a bigger one */
1957 pr_debug("Hardware doesn't support agaw %d\n", agaw);
1958 agaw = find_next_bit(&sagaw, 5, agaw);
1962 domain->agaw = agaw;
1964 if (ecap_coherent(iommu->ecap))
1965 domain->iommu_coherency = 1;
1967 domain->iommu_coherency = 0;
1969 if (ecap_sc_support(iommu->ecap))
1970 domain->iommu_snooping = 1;
1972 domain->iommu_snooping = 0;
1974 if (intel_iommu_superpage)
1975 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
1977 domain->iommu_superpage = 0;
1979 domain->nid = iommu->node;
1981 /* always allocate the top pgd */
1982 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
1985 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
1989 static void domain_exit(struct dmar_domain *domain)
1991 struct page *freelist = NULL;
1993 /* Domain 0 is reserved, so dont process it */
1997 /* Remove associated devices and clear attached or cached domains */
1999 domain_remove_dev_info(domain);
2003 put_iova_domain(&domain->iovad);
2005 freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
2007 dma_free_pagelist(freelist);
2009 free_domain_mem(domain);
2012 static int domain_context_mapping_one(struct dmar_domain *domain,
2013 struct intel_iommu *iommu,
2016 u16 did = domain->iommu_did[iommu->seq_id];
2017 int translation = CONTEXT_TT_MULTI_LEVEL;
2018 struct device_domain_info *info = NULL;
2019 struct context_entry *context;
2020 unsigned long flags;
2021 struct dma_pte *pgd;
2026 if (hw_pass_through && domain_type_is_si(domain))
2027 translation = CONTEXT_TT_PASS_THROUGH;
2029 pr_debug("Set context mapping for %02x:%02x.%d\n",
2030 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
2032 BUG_ON(!domain->pgd);
2034 spin_lock_irqsave(&device_domain_lock, flags);
2035 spin_lock(&iommu->lock);
2038 context = iommu_context_addr(iommu, bus, devfn, 1);
2043 if (context_present(context))
2047 * For kdump cases, old valid entries may be cached due to the
2048 * in-flight DMA and copied pgtable, but there is no unmapping
2049 * behaviour for them, thus we need an explicit cache flush for
2050 * the newly-mapped device. For kdump, at this point, the device
2051 * is supposed to finish reset at its driver probe stage, so no
2052 * in-flight DMA will exist, and we don't need to worry anymore
2055 if (context_copied(context)) {
2056 u16 did_old = context_domain_id(context);
2058 if (did_old < cap_ndoms(iommu->cap)) {
2059 iommu->flush.flush_context(iommu, did_old,
2060 (((u16)bus) << 8) | devfn,
2061 DMA_CCMD_MASK_NOBIT,
2062 DMA_CCMD_DEVICE_INVL);
2063 iommu->flush.flush_iotlb(iommu, did_old, 0, 0,
2070 context_clear_entry(context);
2071 context_set_domain_id(context, did);
2074 * Skip top levels of page tables for iommu which has less agaw
2075 * than default. Unnecessary for PT mode.
2077 if (translation != CONTEXT_TT_PASS_THROUGH) {
2078 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
2080 pgd = phys_to_virt(dma_pte_addr(pgd));
2081 if (!dma_pte_present(pgd))
2085 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
2086 if (info && info->ats_supported)
2087 translation = CONTEXT_TT_DEV_IOTLB;
2089 translation = CONTEXT_TT_MULTI_LEVEL;
2091 context_set_address_root(context, virt_to_phys(pgd));
2092 context_set_address_width(context, iommu->agaw);
2095 * In pass through mode, AW must be programmed to
2096 * indicate the largest AGAW value supported by
2097 * hardware. And ASR is ignored by hardware.
2099 context_set_address_width(context, iommu->msagaw);
2102 context_set_translation_type(context, translation);
2103 context_set_fault_enable(context);
2104 context_set_present(context);
2105 domain_flush_cache(domain, context, sizeof(*context));
2108 * It's a non-present to present mapping. If hardware doesn't cache
2109 * non-present entry we only need to flush the write-buffer. If the
2110 * _does_ cache non-present entries, then it does so in the special
2111 * domain #0, which we have to flush:
2113 if (cap_caching_mode(iommu->cap)) {
2114 iommu->flush.flush_context(iommu, 0,
2115 (((u16)bus) << 8) | devfn,
2116 DMA_CCMD_MASK_NOBIT,
2117 DMA_CCMD_DEVICE_INVL);
2118 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
2120 iommu_flush_write_buffer(iommu);
2122 iommu_enable_dev_iotlb(info);
2127 spin_unlock(&iommu->lock);
2128 spin_unlock_irqrestore(&device_domain_lock, flags);
2133 struct domain_context_mapping_data {
2134 struct dmar_domain *domain;
2135 struct intel_iommu *iommu;
2138 static int domain_context_mapping_cb(struct pci_dev *pdev,
2139 u16 alias, void *opaque)
2141 struct domain_context_mapping_data *data = opaque;
2143 return domain_context_mapping_one(data->domain, data->iommu,
2144 PCI_BUS_NUM(alias), alias & 0xff);
2148 domain_context_mapping(struct dmar_domain *domain, struct device *dev)
2150 struct intel_iommu *iommu;
2152 struct domain_context_mapping_data data;
2154 iommu = device_to_iommu(dev, &bus, &devfn);
2158 if (!dev_is_pci(dev))
2159 return domain_context_mapping_one(domain, iommu, bus, devfn);
2161 data.domain = domain;
2164 return pci_for_each_dma_alias(to_pci_dev(dev),
2165 &domain_context_mapping_cb, &data);
2168 static int domain_context_mapped_cb(struct pci_dev *pdev,
2169 u16 alias, void *opaque)
2171 struct intel_iommu *iommu = opaque;
2173 return !device_context_mapped(iommu, PCI_BUS_NUM(alias), alias & 0xff);
2176 static int domain_context_mapped(struct device *dev)
2178 struct intel_iommu *iommu;
2181 iommu = device_to_iommu(dev, &bus, &devfn);
2185 if (!dev_is_pci(dev))
2186 return device_context_mapped(iommu, bus, devfn);
2188 return !pci_for_each_dma_alias(to_pci_dev(dev),
2189 domain_context_mapped_cb, iommu);
2192 /* Returns a number of VTD pages, but aligned to MM page size */
2193 static inline unsigned long aligned_nrpages(unsigned long host_addr,
2196 host_addr &= ~PAGE_MASK;
2197 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
2200 /* Return largest possible superpage level for a given mapping */
2201 static inline int hardware_largepage_caps(struct dmar_domain *domain,
2202 unsigned long iov_pfn,
2203 unsigned long phy_pfn,
2204 unsigned long pages)
2206 int support, level = 1;
2207 unsigned long pfnmerge;
2209 support = domain->iommu_superpage;
2211 /* To use a large page, the virtual *and* physical addresses
2212 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
2213 of them will mean we have to use smaller pages. So just
2214 merge them and check both at once. */
2215 pfnmerge = iov_pfn | phy_pfn;
2217 while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
2218 pages >>= VTD_STRIDE_SHIFT;
2221 pfnmerge >>= VTD_STRIDE_SHIFT;
2228 static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2229 struct scatterlist *sg, unsigned long phys_pfn,
2230 unsigned long nr_pages, int prot)
2232 struct dma_pte *first_pte = NULL, *pte = NULL;
2233 phys_addr_t uninitialized_var(pteval);
2234 unsigned long sg_res = 0;
2235 unsigned int largepage_lvl = 0;
2236 unsigned long lvl_pages = 0;
2238 BUG_ON(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1));
2240 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
2243 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
2247 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
2250 while (nr_pages > 0) {
2254 unsigned int pgoff = sg->offset & ~PAGE_MASK;
2256 sg_res = aligned_nrpages(sg->offset, sg->length);
2257 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + pgoff;
2258 sg->dma_length = sg->length;
2259 pteval = (sg_phys(sg) - pgoff) | prot;
2260 phys_pfn = pteval >> VTD_PAGE_SHIFT;
2264 largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
2266 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
2269 /* It is large page*/
2270 if (largepage_lvl > 1) {
2271 unsigned long nr_superpages, end_pfn;
2273 pteval |= DMA_PTE_LARGE_PAGE;
2274 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2276 nr_superpages = sg_res / lvl_pages;
2277 end_pfn = iov_pfn + nr_superpages * lvl_pages - 1;
2280 * Ensure that old small page tables are
2281 * removed to make room for superpage(s).
2282 * We're adding new large pages, so make sure
2283 * we don't remove their parent tables.
2285 dma_pte_free_pagetable(domain, iov_pfn, end_pfn,
2288 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
2292 /* We don't need lock here, nobody else
2293 * touches the iova range
2295 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
2297 static int dumps = 5;
2298 pr_crit("ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
2299 iov_pfn, tmp, (unsigned long long)pteval);
2302 debug_dma_dump_mappings(NULL);
2307 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2309 BUG_ON(nr_pages < lvl_pages);
2310 BUG_ON(sg_res < lvl_pages);
2312 nr_pages -= lvl_pages;
2313 iov_pfn += lvl_pages;
2314 phys_pfn += lvl_pages;
2315 pteval += lvl_pages * VTD_PAGE_SIZE;
2316 sg_res -= lvl_pages;
2318 /* If the next PTE would be the first in a new page, then we
2319 need to flush the cache on the entries we've just written.
2320 And then we'll need to recalculate 'pte', so clear it and
2321 let it get set again in the if (!pte) block above.
2323 If we're done (!nr_pages) we need to flush the cache too.
2325 Also if we've been setting superpages, we may need to
2326 recalculate 'pte' and switch back to smaller pages for the
2327 end of the mapping, if the trailing size is not enough to
2328 use another superpage (i.e. sg_res < lvl_pages). */
2330 if (!nr_pages || first_pte_in_page(pte) ||
2331 (largepage_lvl > 1 && sg_res < lvl_pages)) {
2332 domain_flush_cache(domain, first_pte,
2333 (void *)pte - (void *)first_pte);
2337 if (!sg_res && nr_pages)
2343 static int domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2344 struct scatterlist *sg, unsigned long phys_pfn,
2345 unsigned long nr_pages, int prot)
2348 struct intel_iommu *iommu;
2350 /* Do the real mapping first */
2351 ret = __domain_mapping(domain, iov_pfn, sg, phys_pfn, nr_pages, prot);
2355 /* Notify about the new mapping */
2356 if (domain_type_is_vm(domain)) {
2357 /* VM typed domains can have more than one IOMMUs */
2359 for_each_domain_iommu(iommu_id, domain) {
2360 iommu = g_iommus[iommu_id];
2361 __mapping_notify_one(iommu, domain, iov_pfn, nr_pages);
2364 /* General domains only have one IOMMU */
2365 iommu = domain_get_iommu(domain);
2366 __mapping_notify_one(iommu, domain, iov_pfn, nr_pages);
2372 static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2373 struct scatterlist *sg, unsigned long nr_pages,
2376 return domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
2379 static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2380 unsigned long phys_pfn, unsigned long nr_pages,
2383 return domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
2386 static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn)
2388 unsigned long flags;
2389 struct context_entry *context;
2395 spin_lock_irqsave(&iommu->lock, flags);
2396 context = iommu_context_addr(iommu, bus, devfn, 0);
2398 spin_unlock_irqrestore(&iommu->lock, flags);
2401 did_old = context_domain_id(context);
2402 context_clear_entry(context);
2403 __iommu_flush_cache(iommu, context, sizeof(*context));
2404 spin_unlock_irqrestore(&iommu->lock, flags);
2405 iommu->flush.flush_context(iommu,
2407 (((u16)bus) << 8) | devfn,
2408 DMA_CCMD_MASK_NOBIT,
2409 DMA_CCMD_DEVICE_INVL);
2410 iommu->flush.flush_iotlb(iommu,
2417 static inline void unlink_domain_info(struct device_domain_info *info)
2419 assert_spin_locked(&device_domain_lock);
2420 list_del(&info->link);
2421 list_del(&info->global);
2423 info->dev->archdata.iommu = NULL;
2426 static void domain_remove_dev_info(struct dmar_domain *domain)
2428 struct device_domain_info *info, *tmp;
2429 unsigned long flags;
2431 spin_lock_irqsave(&device_domain_lock, flags);
2432 list_for_each_entry_safe(info, tmp, &domain->devices, link)
2433 __dmar_remove_one_dev_info(info);
2434 spin_unlock_irqrestore(&device_domain_lock, flags);
2439 * Note: we use struct device->archdata.iommu stores the info
2441 static struct dmar_domain *find_domain(struct device *dev)
2443 struct device_domain_info *info;
2445 /* No lock here, assumes no domain exit in normal case */
2446 info = dev->archdata.iommu;
2448 return info->domain;
2452 static inline struct device_domain_info *
2453 dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
2455 struct device_domain_info *info;
2457 list_for_each_entry(info, &device_domain_list, global)
2458 if (info->iommu->segment == segment && info->bus == bus &&
2459 info->devfn == devfn)
2465 static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
2468 struct dmar_domain *domain)
2470 struct dmar_domain *found = NULL;
2471 struct device_domain_info *info;
2472 unsigned long flags;
2475 info = alloc_devinfo_mem();
2480 info->devfn = devfn;
2481 info->ats_supported = info->pasid_supported = info->pri_supported = 0;
2482 info->ats_enabled = info->pasid_enabled = info->pri_enabled = 0;
2485 info->domain = domain;
2486 info->iommu = iommu;
2488 if (dev && dev_is_pci(dev)) {
2489 struct pci_dev *pdev = to_pci_dev(info->dev);
2491 if (!pci_ats_disabled() &&
2492 ecap_dev_iotlb_support(iommu->ecap) &&
2493 pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS) &&
2494 dmar_find_matched_atsr_unit(pdev))
2495 info->ats_supported = 1;
2497 if (ecs_enabled(iommu)) {
2498 if (pasid_enabled(iommu)) {
2499 int features = pci_pasid_features(pdev);
2501 info->pasid_supported = features | 1;
2504 if (info->ats_supported && ecap_prs(iommu->ecap) &&
2505 pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI))
2506 info->pri_supported = 1;
2510 spin_lock_irqsave(&device_domain_lock, flags);
2512 found = find_domain(dev);
2515 struct device_domain_info *info2;
2516 info2 = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn);
2518 found = info2->domain;
2524 spin_unlock_irqrestore(&device_domain_lock, flags);
2525 free_devinfo_mem(info);
2526 /* Caller must free the original domain */
2530 spin_lock(&iommu->lock);
2531 ret = domain_attach_iommu(domain, iommu);
2532 spin_unlock(&iommu->lock);
2535 spin_unlock_irqrestore(&device_domain_lock, flags);
2536 free_devinfo_mem(info);
2540 list_add(&info->link, &domain->devices);
2541 list_add(&info->global, &device_domain_list);
2543 dev->archdata.iommu = info;
2544 spin_unlock_irqrestore(&device_domain_lock, flags);
2546 if (dev && domain_context_mapping(domain, dev)) {
2547 pr_err("Domain context map for %s failed\n", dev_name(dev));
2548 dmar_remove_one_dev_info(domain, dev);
2555 static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque)
2557 *(u16 *)opaque = alias;
2561 static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw)
2563 struct device_domain_info *info = NULL;
2564 struct dmar_domain *domain = NULL;
2565 struct intel_iommu *iommu;
2567 unsigned long flags;
2570 iommu = device_to_iommu(dev, &bus, &devfn);
2574 if (dev_is_pci(dev)) {
2575 struct pci_dev *pdev = to_pci_dev(dev);
2577 pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
2579 spin_lock_irqsave(&device_domain_lock, flags);
2580 info = dmar_search_domain_by_dev_info(pci_domain_nr(pdev->bus),
2581 PCI_BUS_NUM(dma_alias),
2584 iommu = info->iommu;
2585 domain = info->domain;
2587 spin_unlock_irqrestore(&device_domain_lock, flags);
2589 /* DMA alias already has a domain, use it */
2594 /* Allocate and initialize new domain for the device */
2595 domain = alloc_domain(0);
2598 if (domain_init(domain, iommu, gaw)) {
2599 domain_exit(domain);
2608 static struct dmar_domain *set_domain_for_dev(struct device *dev,
2609 struct dmar_domain *domain)
2611 struct intel_iommu *iommu;
2612 struct dmar_domain *tmp;
2613 u16 req_id, dma_alias;
2616 iommu = device_to_iommu(dev, &bus, &devfn);
2620 req_id = ((u16)bus << 8) | devfn;
2622 if (dev_is_pci(dev)) {
2623 struct pci_dev *pdev = to_pci_dev(dev);
2625 pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
2627 /* register PCI DMA alias device */
2628 if (req_id != dma_alias) {
2629 tmp = dmar_insert_one_dev_info(iommu, PCI_BUS_NUM(dma_alias),
2630 dma_alias & 0xff, NULL, domain);
2632 if (!tmp || tmp != domain)
2637 tmp = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
2638 if (!tmp || tmp != domain)
2644 static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
2646 struct dmar_domain *domain, *tmp;
2648 domain = find_domain(dev);
2652 domain = find_or_alloc_domain(dev, gaw);
2656 tmp = set_domain_for_dev(dev, domain);
2657 if (!tmp || domain != tmp) {
2658 domain_exit(domain);
2667 static int iommu_domain_identity_map(struct dmar_domain *domain,
2668 unsigned long long start,
2669 unsigned long long end)
2671 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2672 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
2674 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2675 dma_to_mm_pfn(last_vpfn))) {
2676 pr_err("Reserving iova failed\n");
2680 pr_debug("Mapping reserved region %llx-%llx\n", start, end);
2682 * RMRR range might have overlap with physical memory range,
2685 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
2687 return __domain_mapping(domain, first_vpfn, NULL,
2688 first_vpfn, last_vpfn - first_vpfn + 1,
2689 DMA_PTE_READ|DMA_PTE_WRITE);
2692 static int domain_prepare_identity_map(struct device *dev,
2693 struct dmar_domain *domain,
2694 unsigned long long start,
2695 unsigned long long end)
2697 /* For _hardware_ passthrough, don't bother. But for software
2698 passthrough, we do it anyway -- it may indicate a memory
2699 range which is reserved in E820, so which didn't get set
2700 up to start with in si_domain */
2701 if (domain == si_domain && hw_pass_through) {
2702 pr_warn("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
2703 dev_name(dev), start, end);
2707 pr_info("Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
2708 dev_name(dev), start, end);
2711 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2712 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2713 dmi_get_system_info(DMI_BIOS_VENDOR),
2714 dmi_get_system_info(DMI_BIOS_VERSION),
2715 dmi_get_system_info(DMI_PRODUCT_VERSION));
2719 if (end >> agaw_to_width(domain->agaw)) {
2720 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2721 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2722 agaw_to_width(domain->agaw),
2723 dmi_get_system_info(DMI_BIOS_VENDOR),
2724 dmi_get_system_info(DMI_BIOS_VERSION),
2725 dmi_get_system_info(DMI_PRODUCT_VERSION));
2729 return iommu_domain_identity_map(domain, start, end);
2732 static int iommu_prepare_identity_map(struct device *dev,
2733 unsigned long long start,
2734 unsigned long long end)
2736 struct dmar_domain *domain;
2739 domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
2743 ret = domain_prepare_identity_map(dev, domain, start, end);
2745 domain_exit(domain);
2750 static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
2753 if (dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
2755 return iommu_prepare_identity_map(dev, rmrr->base_address,
2759 #ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
2760 static inline void iommu_prepare_isa(void)
2762 struct pci_dev *pdev;
2765 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2769 pr_info("Prepare 0-16MiB unity mapping for LPC\n");
2770 ret = iommu_prepare_identity_map(&pdev->dev, 0, 16*1024*1024 - 1);
2773 pr_err("Failed to create 0-16MiB identity map - floppy might not work\n");
2778 static inline void iommu_prepare_isa(void)
2782 #endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
2784 static int md_domain_init(struct dmar_domain *domain, int guest_width);
2786 static int __init si_domain_init(int hw)
2790 si_domain = alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY);
2794 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2795 domain_exit(si_domain);
2799 pr_debug("Identity mapping domain allocated\n");
2804 for_each_online_node(nid) {
2805 unsigned long start_pfn, end_pfn;
2808 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2809 ret = iommu_domain_identity_map(si_domain,
2810 PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
2819 static int identity_mapping(struct device *dev)
2821 struct device_domain_info *info;
2823 if (likely(!iommu_identity_mapping))
2826 info = dev->archdata.iommu;
2827 if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2828 return (info->domain == si_domain);
2833 static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
2835 struct dmar_domain *ndomain;
2836 struct intel_iommu *iommu;
2839 iommu = device_to_iommu(dev, &bus, &devfn);
2843 ndomain = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
2844 if (ndomain != domain)
2850 static bool device_has_rmrr(struct device *dev)
2852 struct dmar_rmrr_unit *rmrr;
2857 for_each_rmrr_units(rmrr) {
2859 * Return TRUE if this RMRR contains the device that
2862 for_each_active_dev_scope(rmrr->devices,
2863 rmrr->devices_cnt, i, tmp)
2874 * There are a couple cases where we need to restrict the functionality of
2875 * devices associated with RMRRs. The first is when evaluating a device for
2876 * identity mapping because problems exist when devices are moved in and out
2877 * of domains and their respective RMRR information is lost. This means that
2878 * a device with associated RMRRs will never be in a "passthrough" domain.
2879 * The second is use of the device through the IOMMU API. This interface
2880 * expects to have full control of the IOVA space for the device. We cannot
2881 * satisfy both the requirement that RMRR access is maintained and have an
2882 * unencumbered IOVA space. We also have no ability to quiesce the device's
2883 * use of the RMRR space or even inform the IOMMU API user of the restriction.
2884 * We therefore prevent devices associated with an RMRR from participating in
2885 * the IOMMU API, which eliminates them from device assignment.
2887 * In both cases we assume that PCI USB devices with RMRRs have them largely
2888 * for historical reasons and that the RMRR space is not actively used post
2889 * boot. This exclusion may change if vendors begin to abuse it.
2891 * The same exception is made for graphics devices, with the requirement that
2892 * any use of the RMRR regions will be torn down before assigning the device
2895 static bool device_is_rmrr_locked(struct device *dev)
2897 if (!device_has_rmrr(dev))
2900 if (dev_is_pci(dev)) {
2901 struct pci_dev *pdev = to_pci_dev(dev);
2903 if (IS_USB_DEVICE(pdev) || IS_GFX_DEVICE(pdev))
2910 static int iommu_should_identity_map(struct device *dev, int startup)
2913 if (dev_is_pci(dev)) {
2914 struct pci_dev *pdev = to_pci_dev(dev);
2916 if (device_is_rmrr_locked(dev))
2919 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2922 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2925 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2929 * We want to start off with all devices in the 1:1 domain, and
2930 * take them out later if we find they can't access all of memory.
2932 * However, we can't do this for PCI devices behind bridges,
2933 * because all PCI devices behind the same bridge will end up
2934 * with the same source-id on their transactions.
2936 * Practically speaking, we can't change things around for these
2937 * devices at run-time, because we can't be sure there'll be no
2938 * DMA transactions in flight for any of their siblings.
2940 * So PCI devices (unless they're on the root bus) as well as
2941 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2942 * the 1:1 domain, just in _case_ one of their siblings turns out
2943 * not to be able to map all of memory.
2945 if (!pci_is_pcie(pdev)) {
2946 if (!pci_is_root_bus(pdev->bus))
2948 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2950 } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
2953 if (device_has_rmrr(dev))
2958 * At boot time, we don't yet know if devices will be 64-bit capable.
2959 * Assume that they will — if they turn out not to be, then we can
2960 * take them out of the 1:1 domain later.
2964 * If the device's dma_mask is less than the system's memory
2965 * size then this is not a candidate for identity mapping.
2967 u64 dma_mask = *dev->dma_mask;
2969 if (dev->coherent_dma_mask &&
2970 dev->coherent_dma_mask < dma_mask)
2971 dma_mask = dev->coherent_dma_mask;
2973 return dma_mask >= dma_get_required_mask(dev);
2979 static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw)
2983 if (!iommu_should_identity_map(dev, 1))
2986 ret = domain_add_dev_info(si_domain, dev);
2988 pr_info("%s identity mapping for device %s\n",
2989 hw ? "Hardware" : "Software", dev_name(dev));
2990 else if (ret == -ENODEV)
2991 /* device not associated with an iommu */
2998 static int __init iommu_prepare_static_identity_mapping(int hw)
3000 struct pci_dev *pdev = NULL;
3001 struct dmar_drhd_unit *drhd;
3002 struct intel_iommu *iommu;
3007 for_each_pci_dev(pdev) {
3008 ret = dev_prepare_static_identity_mapping(&pdev->dev, hw);
3013 for_each_active_iommu(iommu, drhd)
3014 for_each_active_dev_scope(drhd->devices, drhd->devices_cnt, i, dev) {
3015 struct acpi_device_physical_node *pn;
3016 struct acpi_device *adev;
3018 if (dev->bus != &acpi_bus_type)
3021 adev= to_acpi_device(dev);
3022 mutex_lock(&adev->physical_node_lock);
3023 list_for_each_entry(pn, &adev->physical_node_list, node) {
3024 ret = dev_prepare_static_identity_mapping(pn->dev, hw);
3028 mutex_unlock(&adev->physical_node_lock);
3036 static void intel_iommu_init_qi(struct intel_iommu *iommu)
3039 * Start from the sane iommu hardware state.
3040 * If the queued invalidation is already initialized by us
3041 * (for example, while enabling interrupt-remapping) then
3042 * we got the things already rolling from a sane state.
3046 * Clear any previous faults.
3048 dmar_fault(-1, iommu);
3050 * Disable queued invalidation if supported and already enabled
3051 * before OS handover.
3053 dmar_disable_qi(iommu);
3056 if (dmar_enable_qi(iommu)) {
3058 * Queued Invalidate not enabled, use Register Based Invalidate
3060 iommu->flush.flush_context = __iommu_flush_context;
3061 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
3062 pr_info("%s: Using Register based invalidation\n",
3065 iommu->flush.flush_context = qi_flush_context;
3066 iommu->flush.flush_iotlb = qi_flush_iotlb;
3067 pr_info("%s: Using Queued invalidation\n", iommu->name);
3071 static int copy_context_table(struct intel_iommu *iommu,
3072 struct root_entry *old_re,
3073 struct context_entry **tbl,
3076 int tbl_idx, pos = 0, idx, devfn, ret = 0, did;
3077 struct context_entry *new_ce = NULL, ce;
3078 struct context_entry *old_ce = NULL;
3079 struct root_entry re;
3080 phys_addr_t old_ce_phys;
3082 tbl_idx = ext ? bus * 2 : bus;
3083 memcpy(&re, old_re, sizeof(re));
3085 for (devfn = 0; devfn < 256; devfn++) {
3086 /* First calculate the correct index */
3087 idx = (ext ? devfn * 2 : devfn) % 256;
3090 /* First save what we may have and clean up */
3092 tbl[tbl_idx] = new_ce;
3093 __iommu_flush_cache(iommu, new_ce,
3103 old_ce_phys = root_entry_lctp(&re);
3105 old_ce_phys = root_entry_uctp(&re);
3108 if (ext && devfn == 0) {
3109 /* No LCTP, try UCTP */
3118 old_ce = memremap(old_ce_phys, PAGE_SIZE,
3123 new_ce = alloc_pgtable_page(iommu->node);
3130 /* Now copy the context entry */
3131 memcpy(&ce, old_ce + idx, sizeof(ce));
3133 if (!__context_present(&ce))
3136 did = context_domain_id(&ce);
3137 if (did >= 0 && did < cap_ndoms(iommu->cap))
3138 set_bit(did, iommu->domain_ids);
3141 * We need a marker for copied context entries. This
3142 * marker needs to work for the old format as well as
3143 * for extended context entries.
3145 * Bit 67 of the context entry is used. In the old
3146 * format this bit is available to software, in the
3147 * extended format it is the PGE bit, but PGE is ignored
3148 * by HW if PASIDs are disabled (and thus still
3151 * So disable PASIDs first and then mark the entry
3152 * copied. This means that we don't copy PASID
3153 * translations from the old kernel, but this is fine as
3154 * faults there are not fatal.
3156 context_clear_pasid_enable(&ce);
3157 context_set_copied(&ce);
3162 tbl[tbl_idx + pos] = new_ce;
3164 __iommu_flush_cache(iommu, new_ce, VTD_PAGE_SIZE);
3173 static int copy_translation_tables(struct intel_iommu *iommu)
3175 struct context_entry **ctxt_tbls;
3176 struct root_entry *old_rt;
3177 phys_addr_t old_rt_phys;
3178 int ctxt_table_entries;
3179 unsigned long flags;
3184 rtaddr_reg = dmar_readq(iommu->reg + DMAR_RTADDR_REG);
3185 ext = !!(rtaddr_reg & DMA_RTADDR_RTT);
3186 new_ext = !!ecap_ecs(iommu->ecap);
3189 * The RTT bit can only be changed when translation is disabled,
3190 * but disabling translation means to open a window for data
3191 * corruption. So bail out and don't copy anything if we would
3192 * have to change the bit.
3197 old_rt_phys = rtaddr_reg & VTD_PAGE_MASK;
3201 old_rt = memremap(old_rt_phys, PAGE_SIZE, MEMREMAP_WB);
3205 /* This is too big for the stack - allocate it from slab */
3206 ctxt_table_entries = ext ? 512 : 256;
3208 ctxt_tbls = kcalloc(ctxt_table_entries, sizeof(void *), GFP_KERNEL);
3212 for (bus = 0; bus < 256; bus++) {
3213 ret = copy_context_table(iommu, &old_rt[bus],
3214 ctxt_tbls, bus, ext);
3216 pr_err("%s: Failed to copy context table for bus %d\n",
3222 spin_lock_irqsave(&iommu->lock, flags);
3224 /* Context tables are copied, now write them to the root_entry table */
3225 for (bus = 0; bus < 256; bus++) {
3226 int idx = ext ? bus * 2 : bus;
3229 if (ctxt_tbls[idx]) {
3230 val = virt_to_phys(ctxt_tbls[idx]) | 1;
3231 iommu->root_entry[bus].lo = val;
3234 if (!ext || !ctxt_tbls[idx + 1])
3237 val = virt_to_phys(ctxt_tbls[idx + 1]) | 1;
3238 iommu->root_entry[bus].hi = val;
3241 spin_unlock_irqrestore(&iommu->lock, flags);
3245 __iommu_flush_cache(iommu, iommu->root_entry, PAGE_SIZE);
3255 static int __init init_dmars(void)
3257 struct dmar_drhd_unit *drhd;
3258 struct dmar_rmrr_unit *rmrr;
3259 bool copied_tables = false;
3261 struct intel_iommu *iommu;
3267 * initialize and program root entry to not present
3270 for_each_drhd_unit(drhd) {
3272 * lock not needed as this is only incremented in the single
3273 * threaded kernel __init code path all other access are read
3276 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED) {
3280 pr_err_once("Exceeded %d IOMMUs\n", DMAR_UNITS_SUPPORTED);
3283 /* Preallocate enough resources for IOMMU hot-addition */
3284 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED)
3285 g_num_of_iommus = DMAR_UNITS_SUPPORTED;
3287 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
3290 pr_err("Allocating global iommu array failed\n");
3295 for_each_active_iommu(iommu, drhd) {
3296 g_iommus[iommu->seq_id] = iommu;
3298 intel_iommu_init_qi(iommu);
3300 ret = iommu_init_domains(iommu);
3304 init_translation_status(iommu);
3306 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
3307 iommu_disable_translation(iommu);
3308 clear_translation_pre_enabled(iommu);
3309 pr_warn("Translation was enabled for %s but we are not in kdump mode\n",
3315 * we could share the same root & context tables
3316 * among all IOMMU's. Need to Split it later.
3318 ret = iommu_alloc_root_entry(iommu);
3322 if (translation_pre_enabled(iommu)) {
3323 pr_info("Translation already enabled - trying to copy translation structures\n");
3325 ret = copy_translation_tables(iommu);
3328 * We found the IOMMU with translation
3329 * enabled - but failed to copy over the
3330 * old root-entry table. Try to proceed
3331 * by disabling translation now and
3332 * allocating a clean root-entry table.
3333 * This might cause DMAR faults, but
3334 * probably the dump will still succeed.
3336 pr_err("Failed to copy translation tables from previous kernel for %s\n",
3338 iommu_disable_translation(iommu);
3339 clear_translation_pre_enabled(iommu);
3341 pr_info("Copied translation tables from previous kernel for %s\n",
3343 copied_tables = true;
3347 if (!ecap_pass_through(iommu->ecap))
3348 hw_pass_through = 0;
3349 #ifdef CONFIG_INTEL_IOMMU_SVM
3350 if (pasid_enabled(iommu))
3351 intel_svm_alloc_pasid_tables(iommu);
3356 * Now that qi is enabled on all iommus, set the root entry and flush
3357 * caches. This is required on some Intel X58 chipsets, otherwise the
3358 * flush_context function will loop forever and the boot hangs.
3360 for_each_active_iommu(iommu, drhd) {
3361 iommu_flush_write_buffer(iommu);
3362 iommu_set_root_entry(iommu);
3363 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
3364 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3367 if (iommu_pass_through)
3368 iommu_identity_mapping |= IDENTMAP_ALL;
3370 #ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
3371 iommu_identity_mapping |= IDENTMAP_GFX;
3374 check_tylersburg_isoch();
3376 if (iommu_identity_mapping) {
3377 ret = si_domain_init(hw_pass_through);
3384 * If we copied translations from a previous kernel in the kdump
3385 * case, we can not assign the devices to domains now, as that
3386 * would eliminate the old mappings. So skip this part and defer
3387 * the assignment to device driver initialization time.
3393 * If pass through is not set or not enabled, setup context entries for
3394 * identity mappings for rmrr, gfx, and isa and may fall back to static
3395 * identity mapping if iommu_identity_mapping is set.
3397 if (iommu_identity_mapping) {
3398 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
3400 pr_crit("Failed to setup IOMMU pass-through\n");
3406 * for each dev attached to rmrr
3408 * locate drhd for dev, alloc domain for dev
3409 * allocate free domain
3410 * allocate page table entries for rmrr
3411 * if context not allocated for bus
3412 * allocate and init context
3413 * set present in root table for this bus
3414 * init context with domain, translation etc
3418 pr_info("Setting RMRR:\n");
3419 for_each_rmrr_units(rmrr) {
3420 /* some BIOS lists non-exist devices in DMAR table. */
3421 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
3423 ret = iommu_prepare_rmrr_dev(rmrr, dev);
3425 pr_err("Mapping reserved region failed\n");
3429 iommu_prepare_isa();
3436 * global invalidate context cache
3437 * global invalidate iotlb
3438 * enable translation
3440 for_each_iommu(iommu, drhd) {
3441 if (drhd->ignored) {
3443 * we always have to disable PMRs or DMA may fail on
3447 iommu_disable_protect_mem_regions(iommu);
3451 iommu_flush_write_buffer(iommu);
3453 #ifdef CONFIG_INTEL_IOMMU_SVM
3454 if (pasid_enabled(iommu) && ecap_prs(iommu->ecap)) {
3455 ret = intel_svm_enable_prq(iommu);
3460 ret = dmar_set_interrupt(iommu);
3464 if (!translation_pre_enabled(iommu))
3465 iommu_enable_translation(iommu);
3467 iommu_disable_protect_mem_regions(iommu);
3473 for_each_active_iommu(iommu, drhd) {
3474 disable_dmar_iommu(iommu);
3475 free_dmar_iommu(iommu);
3484 /* This takes a number of _MM_ pages, not VTD pages */
3485 static unsigned long intel_alloc_iova(struct device *dev,
3486 struct dmar_domain *domain,
3487 unsigned long nrpages, uint64_t dma_mask)
3489 unsigned long iova_pfn = 0;
3491 /* Restrict dma_mask to the width that the iommu can handle */
3492 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
3493 /* Ensure we reserve the whole size-aligned region */
3494 nrpages = __roundup_pow_of_two(nrpages);
3496 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
3498 * First try to allocate an io virtual address in
3499 * DMA_BIT_MASK(32) and if that fails then try allocating
3502 iova_pfn = alloc_iova_fast(&domain->iovad, nrpages,
3503 IOVA_PFN(DMA_BIT_MASK(32)), false);
3507 iova_pfn = alloc_iova_fast(&domain->iovad, nrpages,
3508 IOVA_PFN(dma_mask), true);
3509 if (unlikely(!iova_pfn)) {
3510 pr_err("Allocating %ld-page iova for %s failed",
3511 nrpages, dev_name(dev));
3518 static struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
3520 struct dmar_domain *domain, *tmp;
3521 struct dmar_rmrr_unit *rmrr;
3522 struct device *i_dev;
3525 domain = find_domain(dev);
3529 domain = find_or_alloc_domain(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
3533 /* We have a new domain - setup possible RMRRs for the device */
3535 for_each_rmrr_units(rmrr) {
3536 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
3541 ret = domain_prepare_identity_map(dev, domain,
3545 dev_err(dev, "Mapping reserved region failed\n");
3550 tmp = set_domain_for_dev(dev, domain);
3551 if (!tmp || domain != tmp) {
3552 domain_exit(domain);
3559 pr_err("Allocating domain for %s failed\n", dev_name(dev));
3565 /* Check if the dev needs to go through non-identity map and unmap process.*/
3566 static int iommu_no_mapping(struct device *dev)
3570 if (iommu_dummy(dev))
3573 if (!iommu_identity_mapping)
3576 found = identity_mapping(dev);
3578 if (iommu_should_identity_map(dev, 0))
3582 * 32 bit DMA is removed from si_domain and fall back
3583 * to non-identity mapping.
3585 dmar_remove_one_dev_info(si_domain, dev);
3586 pr_info("32bit %s uses non-identity mapping\n",
3592 * In case of a detached 64 bit DMA device from vm, the device
3593 * is put into si_domain for identity mapping.
3595 if (iommu_should_identity_map(dev, 0)) {
3597 ret = domain_add_dev_info(si_domain, dev);
3599 pr_info("64bit %s uses identity mapping\n",
3609 static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
3610 size_t size, int dir, u64 dma_mask)
3612 struct dmar_domain *domain;
3613 phys_addr_t start_paddr;
3614 unsigned long iova_pfn;
3617 struct intel_iommu *iommu;
3618 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
3620 BUG_ON(dir == DMA_NONE);
3622 if (iommu_no_mapping(dev))
3625 domain = get_valid_domain_for_dev(dev);
3629 iommu = domain_get_iommu(domain);
3630 size = aligned_nrpages(paddr, size);
3632 iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask);
3637 * Check if DMAR supports zero-length reads on write only
3640 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
3641 !cap_zlr(iommu->cap))
3642 prot |= DMA_PTE_READ;
3643 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3644 prot |= DMA_PTE_WRITE;
3646 * paddr - (paddr + size) might be partial page, we should map the whole
3647 * page. Note: if two part of one page are separately mapped, we
3648 * might have two guest_addr mapping to the same host paddr, but this
3649 * is not a big problem
3651 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova_pfn),
3652 mm_to_dma_pfn(paddr_pfn), size, prot);
3656 start_paddr = (phys_addr_t)iova_pfn << PAGE_SHIFT;
3657 start_paddr += paddr & ~PAGE_MASK;
3662 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size));
3663 pr_err("Device %s request: %zx@%llx dir %d --- failed\n",
3664 dev_name(dev), size, (unsigned long long)paddr, dir);
3668 static dma_addr_t intel_map_page(struct device *dev, struct page *page,
3669 unsigned long offset, size_t size,
3670 enum dma_data_direction dir,
3671 unsigned long attrs)
3673 return __intel_map_single(dev, page_to_phys(page) + offset, size,
3674 dir, *dev->dma_mask);
3677 static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
3679 struct dmar_domain *domain;
3680 unsigned long start_pfn, last_pfn;
3681 unsigned long nrpages;
3682 unsigned long iova_pfn;
3683 struct intel_iommu *iommu;
3684 struct page *freelist;
3686 if (iommu_no_mapping(dev))
3689 domain = find_domain(dev);
3692 iommu = domain_get_iommu(domain);
3694 iova_pfn = IOVA_PFN(dev_addr);
3696 nrpages = aligned_nrpages(dev_addr, size);
3697 start_pfn = mm_to_dma_pfn(iova_pfn);
3698 last_pfn = start_pfn + nrpages - 1;
3700 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
3701 dev_name(dev), start_pfn, last_pfn);
3703 freelist = domain_unmap(domain, start_pfn, last_pfn);
3705 if (intel_iommu_strict) {
3706 iommu_flush_iotlb_psi(iommu, domain, start_pfn,
3707 nrpages, !freelist, 0);
3709 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(nrpages));
3710 dma_free_pagelist(freelist);
3712 queue_iova(&domain->iovad, iova_pfn, nrpages,
3713 (unsigned long)freelist);
3715 * queue up the release of the unmap to save the 1/6th of the
3716 * cpu used up by the iotlb flush operation...
3721 static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
3722 size_t size, enum dma_data_direction dir,
3723 unsigned long attrs)
3725 intel_unmap(dev, dev_addr, size);
3728 static void *intel_alloc_coherent(struct device *dev, size_t size,
3729 dma_addr_t *dma_handle, gfp_t flags,
3730 unsigned long attrs)
3734 vaddr = dma_direct_alloc(dev, size, dma_handle, flags, attrs);
3735 if (iommu_no_mapping(dev) || !vaddr)
3738 *dma_handle = __intel_map_single(dev, virt_to_phys(vaddr),
3739 PAGE_ALIGN(size), DMA_BIDIRECTIONAL,
3740 dev->coherent_dma_mask);
3742 goto out_free_pages;
3746 dma_direct_free(dev, size, vaddr, *dma_handle, attrs);
3750 static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
3751 dma_addr_t dma_handle, unsigned long attrs)
3753 if (!iommu_no_mapping(dev))
3754 intel_unmap(dev, dma_handle, PAGE_ALIGN(size));
3755 dma_direct_free(dev, size, vaddr, dma_handle, attrs);
3758 static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
3759 int nelems, enum dma_data_direction dir,
3760 unsigned long attrs)
3762 dma_addr_t startaddr = sg_dma_address(sglist) & PAGE_MASK;
3763 unsigned long nrpages = 0;
3764 struct scatterlist *sg;
3767 for_each_sg(sglist, sg, nelems, i) {
3768 nrpages += aligned_nrpages(sg_dma_address(sg), sg_dma_len(sg));
3771 intel_unmap(dev, startaddr, nrpages << VTD_PAGE_SHIFT);
3774 static int intel_nontranslate_map_sg(struct device *hddev,
3775 struct scatterlist *sglist, int nelems, int dir)
3778 struct scatterlist *sg;
3780 for_each_sg(sglist, sg, nelems, i) {
3781 BUG_ON(!sg_page(sg));
3782 sg->dma_address = sg_phys(sg);
3783 sg->dma_length = sg->length;
3788 static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
3789 enum dma_data_direction dir, unsigned long attrs)
3792 struct dmar_domain *domain;
3795 unsigned long iova_pfn;
3797 struct scatterlist *sg;
3798 unsigned long start_vpfn;
3799 struct intel_iommu *iommu;
3801 BUG_ON(dir == DMA_NONE);
3802 if (iommu_no_mapping(dev))
3803 return intel_nontranslate_map_sg(dev, sglist, nelems, dir);
3805 domain = get_valid_domain_for_dev(dev);
3809 iommu = domain_get_iommu(domain);
3811 for_each_sg(sglist, sg, nelems, i)
3812 size += aligned_nrpages(sg->offset, sg->length);
3814 iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size),
3817 sglist->dma_length = 0;
3822 * Check if DMAR supports zero-length reads on write only
3825 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
3826 !cap_zlr(iommu->cap))
3827 prot |= DMA_PTE_READ;
3828 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3829 prot |= DMA_PTE_WRITE;
3831 start_vpfn = mm_to_dma_pfn(iova_pfn);
3833 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
3834 if (unlikely(ret)) {
3835 dma_pte_free_pagetable(domain, start_vpfn,
3836 start_vpfn + size - 1,
3837 agaw_to_level(domain->agaw) + 1);
3838 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size));
3845 static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
3850 const struct dma_map_ops intel_dma_ops = {
3851 .alloc = intel_alloc_coherent,
3852 .free = intel_free_coherent,
3853 .map_sg = intel_map_sg,
3854 .unmap_sg = intel_unmap_sg,
3855 .map_page = intel_map_page,
3856 .unmap_page = intel_unmap_page,
3857 .mapping_error = intel_mapping_error,
3859 .dma_supported = dma_direct_supported,
3863 static inline int iommu_domain_cache_init(void)
3867 iommu_domain_cache = kmem_cache_create("iommu_domain",
3868 sizeof(struct dmar_domain),
3873 if (!iommu_domain_cache) {
3874 pr_err("Couldn't create iommu_domain cache\n");
3881 static inline int iommu_devinfo_cache_init(void)
3885 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3886 sizeof(struct device_domain_info),
3890 if (!iommu_devinfo_cache) {
3891 pr_err("Couldn't create devinfo cache\n");
3898 static int __init iommu_init_mempool(void)
3901 ret = iova_cache_get();
3905 ret = iommu_domain_cache_init();
3909 ret = iommu_devinfo_cache_init();
3913 kmem_cache_destroy(iommu_domain_cache);
3920 static void __init iommu_exit_mempool(void)
3922 kmem_cache_destroy(iommu_devinfo_cache);
3923 kmem_cache_destroy(iommu_domain_cache);
3927 static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
3929 struct dmar_drhd_unit *drhd;
3933 /* We know that this device on this chipset has its own IOMMU.
3934 * If we find it under a different IOMMU, then the BIOS is lying
3935 * to us. Hope that the IOMMU for this device is actually
3936 * disabled, and it needs no translation...
3938 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
3940 /* "can't" happen */
3941 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
3944 vtbar &= 0xffff0000;
3946 /* we know that the this iommu should be at offset 0xa000 from vtbar */
3947 drhd = dmar_find_matched_drhd_unit(pdev);
3948 if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
3949 TAINT_FIRMWARE_WORKAROUND,
3950 "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
3951 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3953 DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
3955 static void __init init_no_remapping_devices(void)
3957 struct dmar_drhd_unit *drhd;
3961 for_each_drhd_unit(drhd) {
3962 if (!drhd->include_all) {
3963 for_each_active_dev_scope(drhd->devices,
3964 drhd->devices_cnt, i, dev)
3966 /* ignore DMAR unit if no devices exist */
3967 if (i == drhd->devices_cnt)
3972 for_each_active_drhd_unit(drhd) {
3973 if (drhd->include_all)
3976 for_each_active_dev_scope(drhd->devices,
3977 drhd->devices_cnt, i, dev)
3978 if (!dev_is_pci(dev) || !IS_GFX_DEVICE(to_pci_dev(dev)))
3980 if (i < drhd->devices_cnt)
3983 /* This IOMMU has *only* gfx devices. Either bypass it or
3984 set the gfx_mapped flag, as appropriate */
3986 intel_iommu_gfx_mapped = 1;
3989 for_each_active_dev_scope(drhd->devices,
3990 drhd->devices_cnt, i, dev)
3991 dev->archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3996 #ifdef CONFIG_SUSPEND
3997 static int init_iommu_hw(void)
3999 struct dmar_drhd_unit *drhd;
4000 struct intel_iommu *iommu = NULL;
4002 for_each_active_iommu(iommu, drhd)
4004 dmar_reenable_qi(iommu);
4006 for_each_iommu(iommu, drhd) {
4007 if (drhd->ignored) {
4009 * we always have to disable PMRs or DMA may fail on
4013 iommu_disable_protect_mem_regions(iommu);
4017 iommu_flush_write_buffer(iommu);
4019 iommu_set_root_entry(iommu);
4021 iommu->flush.flush_context(iommu, 0, 0, 0,
4022 DMA_CCMD_GLOBAL_INVL);
4023 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
4024 iommu_enable_translation(iommu);
4025 iommu_disable_protect_mem_regions(iommu);
4031 static void iommu_flush_all(void)
4033 struct dmar_drhd_unit *drhd;
4034 struct intel_iommu *iommu;
4036 for_each_active_iommu(iommu, drhd) {
4037 iommu->flush.flush_context(iommu, 0, 0, 0,
4038 DMA_CCMD_GLOBAL_INVL);
4039 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
4040 DMA_TLB_GLOBAL_FLUSH);
4044 static int iommu_suspend(void)
4046 struct dmar_drhd_unit *drhd;
4047 struct intel_iommu *iommu = NULL;
4050 for_each_active_iommu(iommu, drhd) {
4051 iommu->iommu_state = kcalloc(MAX_SR_DMAR_REGS, sizeof(u32),
4053 if (!iommu->iommu_state)
4059 for_each_active_iommu(iommu, drhd) {
4060 iommu_disable_translation(iommu);
4062 raw_spin_lock_irqsave(&iommu->register_lock, flag);
4064 iommu->iommu_state[SR_DMAR_FECTL_REG] =
4065 readl(iommu->reg + DMAR_FECTL_REG);
4066 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
4067 readl(iommu->reg + DMAR_FEDATA_REG);
4068 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
4069 readl(iommu->reg + DMAR_FEADDR_REG);
4070 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
4071 readl(iommu->reg + DMAR_FEUADDR_REG);
4073 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
4078 for_each_active_iommu(iommu, drhd)
4079 kfree(iommu->iommu_state);
4084 static void iommu_resume(void)
4086 struct dmar_drhd_unit *drhd;
4087 struct intel_iommu *iommu = NULL;
4090 if (init_iommu_hw()) {
4092 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
4094 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
4098 for_each_active_iommu(iommu, drhd) {
4100 raw_spin_lock_irqsave(&iommu->register_lock, flag);
4102 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
4103 iommu->reg + DMAR_FECTL_REG);
4104 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
4105 iommu->reg + DMAR_FEDATA_REG);
4106 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
4107 iommu->reg + DMAR_FEADDR_REG);
4108 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
4109 iommu->reg + DMAR_FEUADDR_REG);
4111 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
4114 for_each_active_iommu(iommu, drhd)
4115 kfree(iommu->iommu_state);
4118 static struct syscore_ops iommu_syscore_ops = {
4119 .resume = iommu_resume,
4120 .suspend = iommu_suspend,
4123 static void __init init_iommu_pm_ops(void)
4125 register_syscore_ops(&iommu_syscore_ops);
4129 static inline void init_iommu_pm_ops(void) {}
4130 #endif /* CONFIG_PM */
4133 int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
4135 struct acpi_dmar_reserved_memory *rmrr;
4136 int prot = DMA_PTE_READ|DMA_PTE_WRITE;
4137 struct dmar_rmrr_unit *rmrru;
4140 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
4144 rmrru->hdr = header;
4145 rmrr = (struct acpi_dmar_reserved_memory *)header;
4146 rmrru->base_address = rmrr->base_address;
4147 rmrru->end_address = rmrr->end_address;
4149 length = rmrr->end_address - rmrr->base_address + 1;
4150 rmrru->resv = iommu_alloc_resv_region(rmrr->base_address, length, prot,
4155 rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
4156 ((void *)rmrr) + rmrr->header.length,
4157 &rmrru->devices_cnt);
4158 if (rmrru->devices_cnt && rmrru->devices == NULL)
4161 list_add(&rmrru->list, &dmar_rmrr_units);
4172 static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
4174 struct dmar_atsr_unit *atsru;
4175 struct acpi_dmar_atsr *tmp;
4177 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
4178 tmp = (struct acpi_dmar_atsr *)atsru->hdr;
4179 if (atsr->segment != tmp->segment)
4181 if (atsr->header.length != tmp->header.length)
4183 if (memcmp(atsr, tmp, atsr->header.length) == 0)
4190 int dmar_parse_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4192 struct acpi_dmar_atsr *atsr;
4193 struct dmar_atsr_unit *atsru;
4195 if (system_state >= SYSTEM_RUNNING && !intel_iommu_enabled)
4198 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4199 atsru = dmar_find_atsr(atsr);
4203 atsru = kzalloc(sizeof(*atsru) + hdr->length, GFP_KERNEL);
4208 * If memory is allocated from slab by ACPI _DSM method, we need to
4209 * copy the memory content because the memory buffer will be freed
4212 atsru->hdr = (void *)(atsru + 1);
4213 memcpy(atsru->hdr, hdr, hdr->length);
4214 atsru->include_all = atsr->flags & 0x1;
4215 if (!atsru->include_all) {
4216 atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1),
4217 (void *)atsr + atsr->header.length,
4218 &atsru->devices_cnt);
4219 if (atsru->devices_cnt && atsru->devices == NULL) {
4225 list_add_rcu(&atsru->list, &dmar_atsr_units);
4230 static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
4232 dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt);
4236 int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4238 struct acpi_dmar_atsr *atsr;
4239 struct dmar_atsr_unit *atsru;
4241 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4242 atsru = dmar_find_atsr(atsr);
4244 list_del_rcu(&atsru->list);
4246 intel_iommu_free_atsr(atsru);
4252 int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4256 struct acpi_dmar_atsr *atsr;
4257 struct dmar_atsr_unit *atsru;
4259 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4260 atsru = dmar_find_atsr(atsr);
4264 if (!atsru->include_all && atsru->devices && atsru->devices_cnt) {
4265 for_each_active_dev_scope(atsru->devices, atsru->devices_cnt,
4273 static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
4276 struct intel_iommu *iommu = dmaru->iommu;
4278 if (g_iommus[iommu->seq_id])
4281 if (hw_pass_through && !ecap_pass_through(iommu->ecap)) {
4282 pr_warn("%s: Doesn't support hardware pass through.\n",
4286 if (!ecap_sc_support(iommu->ecap) &&
4287 domain_update_iommu_snooping(iommu)) {
4288 pr_warn("%s: Doesn't support snooping.\n",
4292 sp = domain_update_iommu_superpage(iommu) - 1;
4293 if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) {
4294 pr_warn("%s: Doesn't support large page.\n",
4300 * Disable translation if already enabled prior to OS handover.
4302 if (iommu->gcmd & DMA_GCMD_TE)
4303 iommu_disable_translation(iommu);
4305 g_iommus[iommu->seq_id] = iommu;
4306 ret = iommu_init_domains(iommu);
4308 ret = iommu_alloc_root_entry(iommu);
4312 #ifdef CONFIG_INTEL_IOMMU_SVM
4313 if (pasid_enabled(iommu))
4314 intel_svm_alloc_pasid_tables(iommu);
4317 if (dmaru->ignored) {
4319 * we always have to disable PMRs or DMA may fail on this device
4322 iommu_disable_protect_mem_regions(iommu);
4326 intel_iommu_init_qi(iommu);
4327 iommu_flush_write_buffer(iommu);
4329 #ifdef CONFIG_INTEL_IOMMU_SVM
4330 if (pasid_enabled(iommu) && ecap_prs(iommu->ecap)) {
4331 ret = intel_svm_enable_prq(iommu);
4336 ret = dmar_set_interrupt(iommu);
4340 iommu_set_root_entry(iommu);
4341 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
4342 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
4343 iommu_enable_translation(iommu);
4345 iommu_disable_protect_mem_regions(iommu);
4349 disable_dmar_iommu(iommu);
4351 free_dmar_iommu(iommu);
4355 int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
4358 struct intel_iommu *iommu = dmaru->iommu;
4360 if (!intel_iommu_enabled)
4366 ret = intel_iommu_add(dmaru);
4368 disable_dmar_iommu(iommu);
4369 free_dmar_iommu(iommu);
4375 static void intel_iommu_free_dmars(void)
4377 struct dmar_rmrr_unit *rmrru, *rmrr_n;
4378 struct dmar_atsr_unit *atsru, *atsr_n;
4380 list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
4381 list_del(&rmrru->list);
4382 dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
4387 list_for_each_entry_safe(atsru, atsr_n, &dmar_atsr_units, list) {
4388 list_del(&atsru->list);
4389 intel_iommu_free_atsr(atsru);
4393 int dmar_find_matched_atsr_unit(struct pci_dev *dev)
4396 struct pci_bus *bus;
4397 struct pci_dev *bridge = NULL;
4399 struct acpi_dmar_atsr *atsr;
4400 struct dmar_atsr_unit *atsru;
4402 dev = pci_physfn(dev);
4403 for (bus = dev->bus; bus; bus = bus->parent) {
4405 /* If it's an integrated device, allow ATS */
4408 /* Connected via non-PCIe: no ATS */
4409 if (!pci_is_pcie(bridge) ||
4410 pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
4412 /* If we found the root port, look it up in the ATSR */
4413 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
4418 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
4419 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
4420 if (atsr->segment != pci_domain_nr(dev->bus))
4423 for_each_dev_scope(atsru->devices, atsru->devices_cnt, i, tmp)
4424 if (tmp == &bridge->dev)
4427 if (atsru->include_all)
4437 int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
4440 struct dmar_rmrr_unit *rmrru;
4441 struct dmar_atsr_unit *atsru;
4442 struct acpi_dmar_atsr *atsr;
4443 struct acpi_dmar_reserved_memory *rmrr;
4445 if (!intel_iommu_enabled && system_state >= SYSTEM_RUNNING)
4448 list_for_each_entry(rmrru, &dmar_rmrr_units, list) {
4449 rmrr = container_of(rmrru->hdr,
4450 struct acpi_dmar_reserved_memory, header);
4451 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
4452 ret = dmar_insert_dev_scope(info, (void *)(rmrr + 1),
4453 ((void *)rmrr) + rmrr->header.length,
4454 rmrr->segment, rmrru->devices,
4455 rmrru->devices_cnt);
4458 } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
4459 dmar_remove_dev_scope(info, rmrr->segment,
4460 rmrru->devices, rmrru->devices_cnt);
4464 list_for_each_entry(atsru, &dmar_atsr_units, list) {
4465 if (atsru->include_all)
4468 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
4469 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
4470 ret = dmar_insert_dev_scope(info, (void *)(atsr + 1),
4471 (void *)atsr + atsr->header.length,
4472 atsr->segment, atsru->devices,
4473 atsru->devices_cnt);
4478 } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
4479 if (dmar_remove_dev_scope(info, atsr->segment,
4480 atsru->devices, atsru->devices_cnt))
4489 * Here we only respond to action of unbound device from driver.
4491 * Added device is not attached to its DMAR domain here yet. That will happen
4492 * when mapping the device to iova.
4494 static int device_notifier(struct notifier_block *nb,
4495 unsigned long action, void *data)
4497 struct device *dev = data;
4498 struct dmar_domain *domain;
4500 if (iommu_dummy(dev))
4503 if (action != BUS_NOTIFY_REMOVED_DEVICE)
4506 domain = find_domain(dev);
4510 dmar_remove_one_dev_info(domain, dev);
4511 if (!domain_type_is_vm_or_si(domain) && list_empty(&domain->devices))
4512 domain_exit(domain);
4517 static struct notifier_block device_nb = {
4518 .notifier_call = device_notifier,
4521 static int intel_iommu_memory_notifier(struct notifier_block *nb,
4522 unsigned long val, void *v)
4524 struct memory_notify *mhp = v;
4525 unsigned long long start, end;
4526 unsigned long start_vpfn, last_vpfn;
4529 case MEM_GOING_ONLINE:
4530 start = mhp->start_pfn << PAGE_SHIFT;
4531 end = ((mhp->start_pfn + mhp->nr_pages) << PAGE_SHIFT) - 1;
4532 if (iommu_domain_identity_map(si_domain, start, end)) {
4533 pr_warn("Failed to build identity map for [%llx-%llx]\n",
4540 case MEM_CANCEL_ONLINE:
4541 start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
4542 last_vpfn = mm_to_dma_pfn(mhp->start_pfn + mhp->nr_pages - 1);
4543 while (start_vpfn <= last_vpfn) {
4545 struct dmar_drhd_unit *drhd;
4546 struct intel_iommu *iommu;
4547 struct page *freelist;
4549 iova = find_iova(&si_domain->iovad, start_vpfn);
4551 pr_debug("Failed get IOVA for PFN %lx\n",
4556 iova = split_and_remove_iova(&si_domain->iovad, iova,
4557 start_vpfn, last_vpfn);
4559 pr_warn("Failed to split IOVA PFN [%lx-%lx]\n",
4560 start_vpfn, last_vpfn);
4564 freelist = domain_unmap(si_domain, iova->pfn_lo,
4568 for_each_active_iommu(iommu, drhd)
4569 iommu_flush_iotlb_psi(iommu, si_domain,
4570 iova->pfn_lo, iova_size(iova),
4573 dma_free_pagelist(freelist);
4575 start_vpfn = iova->pfn_hi + 1;
4576 free_iova_mem(iova);
4584 static struct notifier_block intel_iommu_memory_nb = {
4585 .notifier_call = intel_iommu_memory_notifier,
4589 static void free_all_cpu_cached_iovas(unsigned int cpu)
4593 for (i = 0; i < g_num_of_iommus; i++) {
4594 struct intel_iommu *iommu = g_iommus[i];
4595 struct dmar_domain *domain;
4601 for (did = 0; did < cap_ndoms(iommu->cap); did++) {
4602 domain = get_iommu_domain(iommu, (u16)did);
4606 free_cpu_cached_iovas(cpu, &domain->iovad);
4611 static int intel_iommu_cpu_dead(unsigned int cpu)
4613 free_all_cpu_cached_iovas(cpu);
4617 static void intel_disable_iommus(void)
4619 struct intel_iommu *iommu = NULL;
4620 struct dmar_drhd_unit *drhd;
4622 for_each_iommu(iommu, drhd)
4623 iommu_disable_translation(iommu);
4626 static inline struct intel_iommu *dev_to_intel_iommu(struct device *dev)
4628 struct iommu_device *iommu_dev = dev_to_iommu_device(dev);
4630 return container_of(iommu_dev, struct intel_iommu, iommu);
4633 static ssize_t intel_iommu_show_version(struct device *dev,
4634 struct device_attribute *attr,
4637 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4638 u32 ver = readl(iommu->reg + DMAR_VER_REG);
4639 return sprintf(buf, "%d:%d\n",
4640 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver));
4642 static DEVICE_ATTR(version, S_IRUGO, intel_iommu_show_version, NULL);
4644 static ssize_t intel_iommu_show_address(struct device *dev,
4645 struct device_attribute *attr,
4648 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4649 return sprintf(buf, "%llx\n", iommu->reg_phys);
4651 static DEVICE_ATTR(address, S_IRUGO, intel_iommu_show_address, NULL);
4653 static ssize_t intel_iommu_show_cap(struct device *dev,
4654 struct device_attribute *attr,
4657 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4658 return sprintf(buf, "%llx\n", iommu->cap);
4660 static DEVICE_ATTR(cap, S_IRUGO, intel_iommu_show_cap, NULL);
4662 static ssize_t intel_iommu_show_ecap(struct device *dev,
4663 struct device_attribute *attr,
4666 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4667 return sprintf(buf, "%llx\n", iommu->ecap);
4669 static DEVICE_ATTR(ecap, S_IRUGO, intel_iommu_show_ecap, NULL);
4671 static ssize_t intel_iommu_show_ndoms(struct device *dev,
4672 struct device_attribute *attr,
4675 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4676 return sprintf(buf, "%ld\n", cap_ndoms(iommu->cap));
4678 static DEVICE_ATTR(domains_supported, S_IRUGO, intel_iommu_show_ndoms, NULL);
4680 static ssize_t intel_iommu_show_ndoms_used(struct device *dev,
4681 struct device_attribute *attr,
4684 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4685 return sprintf(buf, "%d\n", bitmap_weight(iommu->domain_ids,
4686 cap_ndoms(iommu->cap)));
4688 static DEVICE_ATTR(domains_used, S_IRUGO, intel_iommu_show_ndoms_used, NULL);
4690 static struct attribute *intel_iommu_attrs[] = {
4691 &dev_attr_version.attr,
4692 &dev_attr_address.attr,
4694 &dev_attr_ecap.attr,
4695 &dev_attr_domains_supported.attr,
4696 &dev_attr_domains_used.attr,
4700 static struct attribute_group intel_iommu_group = {
4701 .name = "intel-iommu",
4702 .attrs = intel_iommu_attrs,
4705 const struct attribute_group *intel_iommu_groups[] = {
4710 int __init intel_iommu_init(void)
4713 struct dmar_drhd_unit *drhd;
4714 struct intel_iommu *iommu;
4716 /* VT-d is required for a TXT/tboot launch, so enforce that */
4717 force_on = tboot_force_iommu();
4719 if (iommu_init_mempool()) {
4721 panic("tboot: Failed to initialize iommu memory\n");
4725 down_write(&dmar_global_lock);
4726 if (dmar_table_init()) {
4728 panic("tboot: Failed to initialize DMAR table\n");
4732 if (dmar_dev_scope_init() < 0) {
4734 panic("tboot: Failed to initialize DMAR device scope\n");
4738 up_write(&dmar_global_lock);
4741 * The bus notifier takes the dmar_global_lock, so lockdep will
4742 * complain later when we register it under the lock.
4744 dmar_register_bus_notifier();
4746 down_write(&dmar_global_lock);
4748 if (no_iommu || dmar_disabled) {
4750 * We exit the function here to ensure IOMMU's remapping and
4751 * mempool aren't setup, which means that the IOMMU's PMRs
4752 * won't be disabled via the call to init_dmars(). So disable
4753 * it explicitly here. The PMRs were setup by tboot prior to
4754 * calling SENTER, but the kernel is expected to reset/tear
4757 if (intel_iommu_tboot_noforce) {
4758 for_each_iommu(iommu, drhd)
4759 iommu_disable_protect_mem_regions(iommu);
4763 * Make sure the IOMMUs are switched off, even when we
4764 * boot into a kexec kernel and the previous kernel left
4767 intel_disable_iommus();
4771 if (list_empty(&dmar_rmrr_units))
4772 pr_info("No RMRR found\n");
4774 if (list_empty(&dmar_atsr_units))
4775 pr_info("No ATSR found\n");
4777 if (dmar_init_reserved_ranges()) {
4779 panic("tboot: Failed to reserve iommu ranges\n");
4780 goto out_free_reserved_range;
4783 init_no_remapping_devices();
4788 panic("tboot: Failed to initialize DMARs\n");
4789 pr_err("Initialization failed\n");
4790 goto out_free_reserved_range;
4792 up_write(&dmar_global_lock);
4793 pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
4795 #if defined(CONFIG_X86) && defined(CONFIG_SWIOTLB)
4798 dma_ops = &intel_dma_ops;
4800 init_iommu_pm_ops();
4802 for_each_active_iommu(iommu, drhd) {
4803 iommu_device_sysfs_add(&iommu->iommu, NULL,
4806 iommu_device_set_ops(&iommu->iommu, &intel_iommu_ops);
4807 iommu_device_register(&iommu->iommu);
4810 bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
4811 bus_register_notifier(&pci_bus_type, &device_nb);
4812 if (si_domain && !hw_pass_through)
4813 register_memory_notifier(&intel_iommu_memory_nb);
4814 cpuhp_setup_state(CPUHP_IOMMU_INTEL_DEAD, "iommu/intel:dead", NULL,
4815 intel_iommu_cpu_dead);
4816 intel_iommu_enabled = 1;
4820 out_free_reserved_range:
4821 put_iova_domain(&reserved_iova_list);
4823 intel_iommu_free_dmars();
4824 up_write(&dmar_global_lock);
4825 iommu_exit_mempool();
4829 static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *opaque)
4831 struct intel_iommu *iommu = opaque;
4833 domain_context_clear_one(iommu, PCI_BUS_NUM(alias), alias & 0xff);
4838 * NB - intel-iommu lacks any sort of reference counting for the users of
4839 * dependent devices. If multiple endpoints have intersecting dependent
4840 * devices, unbinding the driver from any one of them will possibly leave
4841 * the others unable to operate.
4843 static void domain_context_clear(struct intel_iommu *iommu, struct device *dev)
4845 if (!iommu || !dev || !dev_is_pci(dev))
4848 pci_for_each_dma_alias(to_pci_dev(dev), &domain_context_clear_one_cb, iommu);
4851 static void __dmar_remove_one_dev_info(struct device_domain_info *info)
4853 struct intel_iommu *iommu;
4854 unsigned long flags;
4856 assert_spin_locked(&device_domain_lock);
4861 iommu = info->iommu;
4864 iommu_disable_dev_iotlb(info);
4865 domain_context_clear(iommu, info->dev);
4868 unlink_domain_info(info);
4870 spin_lock_irqsave(&iommu->lock, flags);
4871 domain_detach_iommu(info->domain, iommu);
4872 spin_unlock_irqrestore(&iommu->lock, flags);
4874 free_devinfo_mem(info);
4877 static void dmar_remove_one_dev_info(struct dmar_domain *domain,
4880 struct device_domain_info *info;
4881 unsigned long flags;
4883 spin_lock_irqsave(&device_domain_lock, flags);
4884 info = dev->archdata.iommu;
4885 __dmar_remove_one_dev_info(info);
4886 spin_unlock_irqrestore(&device_domain_lock, flags);
4889 static int md_domain_init(struct dmar_domain *domain, int guest_width)
4893 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN);
4894 domain_reserve_special_ranges(domain);
4896 /* calculate AGAW */
4897 domain->gaw = guest_width;
4898 adjust_width = guestwidth_to_adjustwidth(guest_width);
4899 domain->agaw = width_to_agaw(adjust_width);
4901 domain->iommu_coherency = 0;
4902 domain->iommu_snooping = 0;
4903 domain->iommu_superpage = 0;
4904 domain->max_addr = 0;
4906 /* always allocate the top pgd */
4907 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
4910 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
4914 static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
4916 struct dmar_domain *dmar_domain;
4917 struct iommu_domain *domain;
4919 if (type != IOMMU_DOMAIN_UNMANAGED)
4922 dmar_domain = alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE);
4924 pr_err("Can't allocate dmar_domain\n");
4927 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
4928 pr_err("Domain initialization failed\n");
4929 domain_exit(dmar_domain);
4932 domain_update_iommu_cap(dmar_domain);
4934 domain = &dmar_domain->domain;
4935 domain->geometry.aperture_start = 0;
4936 domain->geometry.aperture_end = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
4937 domain->geometry.force_aperture = true;
4942 static void intel_iommu_domain_free(struct iommu_domain *domain)
4944 domain_exit(to_dmar_domain(domain));
4947 static int intel_iommu_attach_device(struct iommu_domain *domain,
4950 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
4951 struct intel_iommu *iommu;
4955 if (device_is_rmrr_locked(dev)) {
4956 dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement. Contact your platform vendor.\n");
4960 /* normally dev is not mapped */
4961 if (unlikely(domain_context_mapped(dev))) {
4962 struct dmar_domain *old_domain;
4964 old_domain = find_domain(dev);
4967 dmar_remove_one_dev_info(old_domain, dev);
4970 if (!domain_type_is_vm_or_si(old_domain) &&
4971 list_empty(&old_domain->devices))
4972 domain_exit(old_domain);
4976 iommu = device_to_iommu(dev, &bus, &devfn);
4980 /* check if this iommu agaw is sufficient for max mapped address */
4981 addr_width = agaw_to_width(iommu->agaw);
4982 if (addr_width > cap_mgaw(iommu->cap))
4983 addr_width = cap_mgaw(iommu->cap);
4985 if (dmar_domain->max_addr > (1LL << addr_width)) {
4986 pr_err("%s: iommu width (%d) is not "
4987 "sufficient for the mapped address (%llx)\n",
4988 __func__, addr_width, dmar_domain->max_addr);
4991 dmar_domain->gaw = addr_width;
4994 * Knock out extra levels of page tables if necessary
4996 while (iommu->agaw < dmar_domain->agaw) {
4997 struct dma_pte *pte;
4999 pte = dmar_domain->pgd;
5000 if (dma_pte_present(pte)) {
5001 dmar_domain->pgd = (struct dma_pte *)
5002 phys_to_virt(dma_pte_addr(pte));
5003 free_pgtable_page(pte);
5005 dmar_domain->agaw--;
5008 return domain_add_dev_info(dmar_domain, dev);
5011 static void intel_iommu_detach_device(struct iommu_domain *domain,
5014 dmar_remove_one_dev_info(to_dmar_domain(domain), dev);
5017 static int intel_iommu_map(struct iommu_domain *domain,
5018 unsigned long iova, phys_addr_t hpa,
5019 size_t size, int iommu_prot)
5021 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5026 if (iommu_prot & IOMMU_READ)
5027 prot |= DMA_PTE_READ;
5028 if (iommu_prot & IOMMU_WRITE)
5029 prot |= DMA_PTE_WRITE;
5030 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
5031 prot |= DMA_PTE_SNP;
5033 max_addr = iova + size;
5034 if (dmar_domain->max_addr < max_addr) {
5037 /* check if minimum agaw is sufficient for mapped address */
5038 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
5039 if (end < max_addr) {
5040 pr_err("%s: iommu width (%d) is not "
5041 "sufficient for the mapped address (%llx)\n",
5042 __func__, dmar_domain->gaw, max_addr);
5045 dmar_domain->max_addr = max_addr;
5047 /* Round up size to next multiple of PAGE_SIZE, if it and
5048 the low bits of hpa would take us onto the next page */
5049 size = aligned_nrpages(hpa, size);
5050 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
5051 hpa >> VTD_PAGE_SHIFT, size, prot);
5055 static size_t intel_iommu_unmap(struct iommu_domain *domain,
5056 unsigned long iova, size_t size)
5058 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5059 struct page *freelist = NULL;
5060 unsigned long start_pfn, last_pfn;
5061 unsigned int npages;
5062 int iommu_id, level = 0;
5064 /* Cope with horrid API which requires us to unmap more than the
5065 size argument if it happens to be a large-page mapping. */
5066 BUG_ON(!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level));
5068 if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
5069 size = VTD_PAGE_SIZE << level_to_offset_bits(level);
5071 start_pfn = iova >> VTD_PAGE_SHIFT;
5072 last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT;
5074 freelist = domain_unmap(dmar_domain, start_pfn, last_pfn);
5076 npages = last_pfn - start_pfn + 1;
5078 for_each_domain_iommu(iommu_id, dmar_domain)
5079 iommu_flush_iotlb_psi(g_iommus[iommu_id], dmar_domain,
5080 start_pfn, npages, !freelist, 0);
5082 dma_free_pagelist(freelist);
5084 if (dmar_domain->max_addr == iova + size)
5085 dmar_domain->max_addr = iova;
5090 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
5093 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5094 struct dma_pte *pte;
5098 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
5100 phys = dma_pte_addr(pte);
5105 static bool intel_iommu_capable(enum iommu_cap cap)
5107 if (cap == IOMMU_CAP_CACHE_COHERENCY)
5108 return domain_update_iommu_snooping(NULL) == 1;
5109 if (cap == IOMMU_CAP_INTR_REMAP)
5110 return irq_remapping_enabled == 1;
5115 static int intel_iommu_add_device(struct device *dev)
5117 struct intel_iommu *iommu;
5118 struct iommu_group *group;
5121 iommu = device_to_iommu(dev, &bus, &devfn);
5125 iommu_device_link(&iommu->iommu, dev);
5127 group = iommu_group_get_for_dev(dev);
5130 return PTR_ERR(group);
5132 iommu_group_put(group);
5136 static void intel_iommu_remove_device(struct device *dev)
5138 struct intel_iommu *iommu;
5141 iommu = device_to_iommu(dev, &bus, &devfn);
5145 iommu_group_remove_device(dev);
5147 iommu_device_unlink(&iommu->iommu, dev);
5150 static void intel_iommu_get_resv_regions(struct device *device,
5151 struct list_head *head)
5153 struct iommu_resv_region *reg;
5154 struct dmar_rmrr_unit *rmrr;
5155 struct device *i_dev;
5159 for_each_rmrr_units(rmrr) {
5160 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
5162 if (i_dev != device)
5165 list_add_tail(&rmrr->resv->list, head);
5170 reg = iommu_alloc_resv_region(IOAPIC_RANGE_START,
5171 IOAPIC_RANGE_END - IOAPIC_RANGE_START + 1,
5175 list_add_tail(®->list, head);
5178 static void intel_iommu_put_resv_regions(struct device *dev,
5179 struct list_head *head)
5181 struct iommu_resv_region *entry, *next;
5183 list_for_each_entry_safe(entry, next, head, list) {
5184 if (entry->type == IOMMU_RESV_RESERVED)
5189 #ifdef CONFIG_INTEL_IOMMU_SVM
5190 #define MAX_NR_PASID_BITS (20)
5191 static inline unsigned long intel_iommu_get_pts(struct intel_iommu *iommu)
5194 * Convert ecap_pss to extend context entry pts encoding, also
5195 * respect the soft pasid_max value set by the iommu.
5196 * - number of PASID bits = ecap_pss + 1
5197 * - number of PASID table entries = 2^(pts + 5)
5198 * Therefore, pts = ecap_pss - 4
5199 * e.g. KBL ecap_pss = 0x13, PASID has 20 bits, pts = 15
5201 if (ecap_pss(iommu->ecap) < 5)
5204 /* pasid_max is encoded as actual number of entries not the bits */
5205 return find_first_bit((unsigned long *)&iommu->pasid_max,
5206 MAX_NR_PASID_BITS) - 5;
5209 int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sdev)
5211 struct device_domain_info *info;
5212 struct context_entry *context;
5213 struct dmar_domain *domain;
5214 unsigned long flags;
5218 domain = get_valid_domain_for_dev(sdev->dev);
5222 spin_lock_irqsave(&device_domain_lock, flags);
5223 spin_lock(&iommu->lock);
5226 info = sdev->dev->archdata.iommu;
5227 if (!info || !info->pasid_supported)
5230 context = iommu_context_addr(iommu, info->bus, info->devfn, 0);
5231 if (WARN_ON(!context))
5234 ctx_lo = context[0].lo;
5236 sdev->did = domain->iommu_did[iommu->seq_id];
5237 sdev->sid = PCI_DEVID(info->bus, info->devfn);
5239 if (!(ctx_lo & CONTEXT_PASIDE)) {
5240 if (iommu->pasid_state_table)
5241 context[1].hi = (u64)virt_to_phys(iommu->pasid_state_table);
5242 context[1].lo = (u64)virt_to_phys(iommu->pasid_table) |
5243 intel_iommu_get_pts(iommu);
5246 /* CONTEXT_TT_MULTI_LEVEL and CONTEXT_TT_DEV_IOTLB are both
5247 * extended to permit requests-with-PASID if the PASIDE bit
5248 * is set. which makes sense. For CONTEXT_TT_PASS_THROUGH,
5249 * however, the PASIDE bit is ignored and requests-with-PASID
5250 * are unconditionally blocked. Which makes less sense.
5251 * So convert from CONTEXT_TT_PASS_THROUGH to one of the new
5252 * "guest mode" translation types depending on whether ATS
5253 * is available or not. Annoyingly, we can't use the new
5254 * modes *unless* PASIDE is set. */
5255 if ((ctx_lo & CONTEXT_TT_MASK) == (CONTEXT_TT_PASS_THROUGH << 2)) {
5256 ctx_lo &= ~CONTEXT_TT_MASK;
5257 if (info->ats_supported)
5258 ctx_lo |= CONTEXT_TT_PT_PASID_DEV_IOTLB << 2;
5260 ctx_lo |= CONTEXT_TT_PT_PASID << 2;
5262 ctx_lo |= CONTEXT_PASIDE;
5263 if (iommu->pasid_state_table)
5264 ctx_lo |= CONTEXT_DINVE;
5265 if (info->pri_supported)
5266 ctx_lo |= CONTEXT_PRS;
5267 context[0].lo = ctx_lo;
5269 iommu->flush.flush_context(iommu, sdev->did, sdev->sid,
5270 DMA_CCMD_MASK_NOBIT,
5271 DMA_CCMD_DEVICE_INVL);
5274 /* Enable PASID support in the device, if it wasn't already */
5275 if (!info->pasid_enabled)
5276 iommu_enable_dev_iotlb(info);
5278 if (info->ats_enabled) {
5279 sdev->dev_iotlb = 1;
5280 sdev->qdep = info->ats_qdep;
5281 if (sdev->qdep >= QI_DEV_EIOTLB_MAX_INVS)
5287 spin_unlock(&iommu->lock);
5288 spin_unlock_irqrestore(&device_domain_lock, flags);
5293 struct intel_iommu *intel_svm_device_to_iommu(struct device *dev)
5295 struct intel_iommu *iommu;
5298 if (iommu_dummy(dev)) {
5300 "No IOMMU translation for device; cannot enable SVM\n");
5304 iommu = device_to_iommu(dev, &bus, &devfn);
5306 dev_err(dev, "No IOMMU for device; cannot enable SVM\n");
5310 if (!iommu->pasid_table) {
5311 dev_err(dev, "PASID not enabled on IOMMU; cannot enable SVM\n");
5317 #endif /* CONFIG_INTEL_IOMMU_SVM */
5319 const struct iommu_ops intel_iommu_ops = {
5320 .capable = intel_iommu_capable,
5321 .domain_alloc = intel_iommu_domain_alloc,
5322 .domain_free = intel_iommu_domain_free,
5323 .attach_dev = intel_iommu_attach_device,
5324 .detach_dev = intel_iommu_detach_device,
5325 .map = intel_iommu_map,
5326 .unmap = intel_iommu_unmap,
5327 .map_sg = default_iommu_map_sg,
5328 .iova_to_phys = intel_iommu_iova_to_phys,
5329 .add_device = intel_iommu_add_device,
5330 .remove_device = intel_iommu_remove_device,
5331 .get_resv_regions = intel_iommu_get_resv_regions,
5332 .put_resv_regions = intel_iommu_put_resv_regions,
5333 .device_group = pci_device_group,
5334 .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
5337 static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
5339 /* G4x/GM45 integrated gfx dmar support is totally busted. */
5340 pr_info("Disabling IOMMU for graphics on this chipset\n");
5344 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
5345 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
5346 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
5347 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
5348 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
5349 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
5350 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
5352 static void quirk_iommu_rwbf(struct pci_dev *dev)
5355 * Mobile 4 Series Chipset neglects to set RWBF capability,
5356 * but needs it. Same seems to hold for the desktop versions.
5358 pr_info("Forcing write-buffer flush capability\n");
5362 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
5363 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
5364 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
5365 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
5366 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
5367 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
5368 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
5371 #define GGC_MEMORY_SIZE_MASK (0xf << 8)
5372 #define GGC_MEMORY_SIZE_NONE (0x0 << 8)
5373 #define GGC_MEMORY_SIZE_1M (0x1 << 8)
5374 #define GGC_MEMORY_SIZE_2M (0x3 << 8)
5375 #define GGC_MEMORY_VT_ENABLED (0x8 << 8)
5376 #define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
5377 #define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
5378 #define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
5380 static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
5384 if (pci_read_config_word(dev, GGC, &ggc))
5387 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
5388 pr_info("BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
5390 } else if (dmar_map_gfx) {
5391 /* we have to ensure the gfx device is idle before we flush */
5392 pr_info("Disabling batched IOTLB flush on Ironlake\n");
5393 intel_iommu_strict = 1;
5396 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
5397 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
5398 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
5399 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
5401 /* On Tylersburg chipsets, some BIOSes have been known to enable the
5402 ISOCH DMAR unit for the Azalia sound device, but not give it any
5403 TLB entries, which causes it to deadlock. Check for that. We do
5404 this in a function called from init_dmars(), instead of in a PCI
5405 quirk, because we don't want to print the obnoxious "BIOS broken"
5406 message if VT-d is actually disabled.
5408 static void __init check_tylersburg_isoch(void)
5410 struct pci_dev *pdev;
5411 uint32_t vtisochctrl;
5413 /* If there's no Azalia in the system anyway, forget it. */
5414 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
5419 /* System Management Registers. Might be hidden, in which case
5420 we can't do the sanity check. But that's OK, because the
5421 known-broken BIOSes _don't_ actually hide it, so far. */
5422 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
5426 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
5433 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
5434 if (vtisochctrl & 1)
5437 /* Drop all bits other than the number of TLB entries */
5438 vtisochctrl &= 0x1c;
5440 /* If we have the recommended number of TLB entries (16), fine. */
5441 if (vtisochctrl == 0x10)
5444 /* Zero TLB entries? You get to ride the short bus to school. */
5446 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
5447 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
5448 dmi_get_system_info(DMI_BIOS_VENDOR),
5449 dmi_get_system_info(DMI_BIOS_VERSION),
5450 dmi_get_system_info(DMI_PRODUCT_VERSION));
5451 iommu_identity_mapping |= IDENTMAP_AZALIA;
5455 pr_warn("Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",