1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
4 * Author: Joerg Roedel <jroedel@suse.de>
5 * Leo Duran <leo.duran@amd.com>
8 #define pr_fmt(fmt) "AMD-Vi: " fmt
9 #define dev_fmt(fmt) pr_fmt(fmt)
11 #include <linux/pci.h>
12 #include <linux/acpi.h>
13 #include <linux/list.h>
14 #include <linux/bitmap.h>
15 #include <linux/slab.h>
16 #include <linux/syscore_ops.h>
17 #include <linux/interrupt.h>
18 #include <linux/msi.h>
19 #include <linux/amd-iommu.h>
20 #include <linux/export.h>
21 #include <linux/iommu.h>
22 #include <linux/kmemleak.h>
23 #include <linux/mem_encrypt.h>
24 #include <asm/pci-direct.h>
25 #include <asm/iommu.h>
27 #include <asm/x86_init.h>
28 #include <asm/iommu_table.h>
29 #include <asm/io_apic.h>
30 #include <asm/irq_remapping.h>
32 #include <linux/crash_dump.h>
33 #include "amd_iommu_proto.h"
34 #include "amd_iommu_types.h"
35 #include "irq_remapping.h"
38 * definitions for the ACPI scanning code
40 #define IVRS_HEADER_LENGTH 48
42 #define ACPI_IVHD_TYPE_MAX_SUPPORTED 0x40
43 #define ACPI_IVMD_TYPE_ALL 0x20
44 #define ACPI_IVMD_TYPE 0x21
45 #define ACPI_IVMD_TYPE_RANGE 0x22
47 #define IVHD_DEV_ALL 0x01
48 #define IVHD_DEV_SELECT 0x02
49 #define IVHD_DEV_SELECT_RANGE_START 0x03
50 #define IVHD_DEV_RANGE_END 0x04
51 #define IVHD_DEV_ALIAS 0x42
52 #define IVHD_DEV_ALIAS_RANGE 0x43
53 #define IVHD_DEV_EXT_SELECT 0x46
54 #define IVHD_DEV_EXT_SELECT_RANGE 0x47
55 #define IVHD_DEV_SPECIAL 0x48
56 #define IVHD_DEV_ACPI_HID 0xf0
58 #define UID_NOT_PRESENT 0
59 #define UID_IS_INTEGER 1
60 #define UID_IS_CHARACTER 2
62 #define IVHD_SPECIAL_IOAPIC 1
63 #define IVHD_SPECIAL_HPET 2
65 #define IVHD_FLAG_HT_TUN_EN_MASK 0x01
66 #define IVHD_FLAG_PASSPW_EN_MASK 0x02
67 #define IVHD_FLAG_RESPASSPW_EN_MASK 0x04
68 #define IVHD_FLAG_ISOC_EN_MASK 0x08
70 #define IVMD_FLAG_EXCL_RANGE 0x08
71 #define IVMD_FLAG_UNITY_MAP 0x01
73 #define ACPI_DEVFLAG_INITPASS 0x01
74 #define ACPI_DEVFLAG_EXTINT 0x02
75 #define ACPI_DEVFLAG_NMI 0x04
76 #define ACPI_DEVFLAG_SYSMGT1 0x10
77 #define ACPI_DEVFLAG_SYSMGT2 0x20
78 #define ACPI_DEVFLAG_LINT0 0x40
79 #define ACPI_DEVFLAG_LINT1 0x80
80 #define ACPI_DEVFLAG_ATSDIS 0x10000000
82 #define LOOP_TIMEOUT 100000
84 * ACPI table definitions
86 * These data structures are laid over the table to parse the important values
90 extern const struct iommu_ops amd_iommu_ops;
93 * structure describing one IOMMU in the ACPI table. Typically followed by one
94 * or more ivhd_entrys.
107 /* Following only valid on IVHD type 11h and 40h */
108 u64 efr_reg; /* Exact copy of MMIO_EXT_FEATURES */
110 } __attribute__((packed));
113 * A device entry describing which devices a specific IOMMU translates and
114 * which requestor ids they use.
126 } __attribute__((packed));
129 * An AMD IOMMU memory definition structure. It defines things like exclusion
130 * ranges for devices and regions that should be unity mapped.
141 } __attribute__((packed));
144 bool amd_iommu_irq_remap __read_mostly;
146 int amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC;
147 static int amd_iommu_xt_mode = IRQ_REMAP_X2APIC_MODE;
149 static bool amd_iommu_detected;
150 static bool __initdata amd_iommu_disabled;
151 static int amd_iommu_target_ivhd_type;
153 u16 amd_iommu_last_bdf; /* largest PCI device id we have
155 LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings
157 bool amd_iommu_unmap_flush; /* if true, flush on every unmap */
159 LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the
162 /* Array to assign indices to IOMMUs*/
163 struct amd_iommu *amd_iommus[MAX_IOMMUS];
165 /* Number of IOMMUs present in the system */
166 static int amd_iommus_present;
168 /* IOMMUs have a non-present cache? */
169 bool amd_iommu_np_cache __read_mostly;
170 bool amd_iommu_iotlb_sup __read_mostly = true;
172 u32 amd_iommu_max_pasid __read_mostly = ~0;
174 bool amd_iommu_v2_present __read_mostly;
175 static bool amd_iommu_pc_present __read_mostly;
177 bool amd_iommu_force_isolation __read_mostly;
180 * Pointer to the device table which is shared by all AMD IOMMUs
181 * it is indexed by the PCI device id or the HT unit id and contains
182 * information about the domain the device belongs to as well as the
183 * page table root pointer.
185 struct dev_table_entry *amd_iommu_dev_table;
187 * Pointer to a device table which the content of old device table
188 * will be copied to. It's only be used in kdump kernel.
190 static struct dev_table_entry *old_dev_tbl_cpy;
193 * The alias table is a driver specific data structure which contains the
194 * mappings of the PCI device ids to the actual requestor ids on the IOMMU.
195 * More than one device can share the same requestor id.
197 u16 *amd_iommu_alias_table;
200 * The rlookup table is used to find the IOMMU which is responsible
201 * for a specific device. It is also indexed by the PCI device id.
203 struct amd_iommu **amd_iommu_rlookup_table;
204 EXPORT_SYMBOL(amd_iommu_rlookup_table);
207 * This table is used to find the irq remapping table for a given device id
210 struct irq_remap_table **irq_lookup_table;
213 * AMD IOMMU allows up to 2^16 different protection domains. This is a bitmap
214 * to know which ones are already in use.
216 unsigned long *amd_iommu_pd_alloc_bitmap;
218 static u32 dev_table_size; /* size of the device table */
219 static u32 alias_table_size; /* size of the alias table */
220 static u32 rlookup_table_size; /* size if the rlookup table */
222 enum iommu_init_state {
233 IOMMU_CMDLINE_DISABLED,
236 /* Early ioapic and hpet maps from kernel command line */
237 #define EARLY_MAP_SIZE 4
238 static struct devid_map __initdata early_ioapic_map[EARLY_MAP_SIZE];
239 static struct devid_map __initdata early_hpet_map[EARLY_MAP_SIZE];
240 static struct acpihid_map_entry __initdata early_acpihid_map[EARLY_MAP_SIZE];
242 static int __initdata early_ioapic_map_size;
243 static int __initdata early_hpet_map_size;
244 static int __initdata early_acpihid_map_size;
246 static bool __initdata cmdline_maps;
248 static enum iommu_init_state init_state = IOMMU_START_STATE;
250 static int amd_iommu_enable_interrupts(void);
251 static int __init iommu_go_to_state(enum iommu_init_state state);
252 static void init_device_table_dma(void);
254 static bool amd_iommu_pre_enabled = true;
256 bool translation_pre_enabled(struct amd_iommu *iommu)
258 return (iommu->flags & AMD_IOMMU_FLAG_TRANS_PRE_ENABLED);
260 EXPORT_SYMBOL(translation_pre_enabled);
262 static void clear_translation_pre_enabled(struct amd_iommu *iommu)
264 iommu->flags &= ~AMD_IOMMU_FLAG_TRANS_PRE_ENABLED;
267 static void init_translation_status(struct amd_iommu *iommu)
271 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
272 if (ctrl & (1<<CONTROL_IOMMU_EN))
273 iommu->flags |= AMD_IOMMU_FLAG_TRANS_PRE_ENABLED;
276 static inline void update_last_devid(u16 devid)
278 if (devid > amd_iommu_last_bdf)
279 amd_iommu_last_bdf = devid;
282 static inline unsigned long tbl_size(int entry_size)
284 unsigned shift = PAGE_SHIFT +
285 get_order(((int)amd_iommu_last_bdf + 1) * entry_size);
290 int amd_iommu_get_num_iommus(void)
292 return amd_iommus_present;
295 /* Access to l1 and l2 indexed register spaces */
297 static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address)
301 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
302 pci_read_config_dword(iommu->dev, 0xfc, &val);
306 static void iommu_write_l1(struct amd_iommu *iommu, u16 l1, u8 address, u32 val)
308 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16 | 1 << 31));
309 pci_write_config_dword(iommu->dev, 0xfc, val);
310 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
313 static u32 iommu_read_l2(struct amd_iommu *iommu, u8 address)
317 pci_write_config_dword(iommu->dev, 0xf0, address);
318 pci_read_config_dword(iommu->dev, 0xf4, &val);
322 static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val)
324 pci_write_config_dword(iommu->dev, 0xf0, (address | 1 << 8));
325 pci_write_config_dword(iommu->dev, 0xf4, val);
328 /****************************************************************************
330 * AMD IOMMU MMIO register space handling functions
332 * These functions are used to program the IOMMU device registers in
333 * MMIO space required for that driver.
335 ****************************************************************************/
338 * This function set the exclusion range in the IOMMU. DMA accesses to the
339 * exclusion range are passed through untranslated
341 static void iommu_set_exclusion_range(struct amd_iommu *iommu)
343 u64 start = iommu->exclusion_start & PAGE_MASK;
344 u64 limit = (start + iommu->exclusion_length - 1) & PAGE_MASK;
347 if (!iommu->exclusion_start)
350 entry = start | MMIO_EXCL_ENABLE_MASK;
351 memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET,
352 &entry, sizeof(entry));
355 memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET,
356 &entry, sizeof(entry));
359 /* Programs the physical address of the device table into the IOMMU hardware */
360 static void iommu_set_device_table(struct amd_iommu *iommu)
364 BUG_ON(iommu->mmio_base == NULL);
366 entry = iommu_virt_to_phys(amd_iommu_dev_table);
367 entry |= (dev_table_size >> 12) - 1;
368 memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET,
369 &entry, sizeof(entry));
372 /* Generic functions to enable/disable certain features of the IOMMU. */
373 static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit)
377 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
378 ctrl |= (1ULL << bit);
379 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
382 static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
386 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
387 ctrl &= ~(1ULL << bit);
388 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
391 static void iommu_set_inv_tlb_timeout(struct amd_iommu *iommu, int timeout)
395 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
396 ctrl &= ~CTRL_INV_TO_MASK;
397 ctrl |= (timeout << CONTROL_INV_TIMEOUT) & CTRL_INV_TO_MASK;
398 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
401 /* Function to enable the hardware */
402 static void iommu_enable(struct amd_iommu *iommu)
404 iommu_feature_enable(iommu, CONTROL_IOMMU_EN);
407 static void iommu_disable(struct amd_iommu *iommu)
409 if (!iommu->mmio_base)
412 /* Disable command buffer */
413 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
415 /* Disable event logging and event interrupts */
416 iommu_feature_disable(iommu, CONTROL_EVT_INT_EN);
417 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
419 /* Disable IOMMU GA_LOG */
420 iommu_feature_disable(iommu, CONTROL_GALOG_EN);
421 iommu_feature_disable(iommu, CONTROL_GAINT_EN);
423 /* Disable IOMMU hardware itself */
424 iommu_feature_disable(iommu, CONTROL_IOMMU_EN);
428 * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in
429 * the system has one.
431 static u8 __iomem * __init iommu_map_mmio_space(u64 address, u64 end)
433 if (!request_mem_region(address, end, "amd_iommu")) {
434 pr_err("Can not reserve memory region %llx-%llx for mmio\n",
436 pr_err("This is a BIOS bug. Please contact your hardware vendor\n");
440 return (u8 __iomem *)ioremap_nocache(address, end);
443 static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu)
445 if (iommu->mmio_base)
446 iounmap(iommu->mmio_base);
447 release_mem_region(iommu->mmio_phys, iommu->mmio_phys_end);
450 static inline u32 get_ivhd_header_size(struct ivhd_header *h)
466 /****************************************************************************
468 * The functions below belong to the first pass of AMD IOMMU ACPI table
469 * parsing. In this pass we try to find out the highest device id this
470 * code has to handle. Upon this information the size of the shared data
471 * structures is determined later.
473 ****************************************************************************/
476 * This function calculates the length of a given IVHD entry
478 static inline int ivhd_entry_length(u8 *ivhd)
480 u32 type = ((struct ivhd_entry *)ivhd)->type;
483 return 0x04 << (*ivhd >> 6);
484 } else if (type == IVHD_DEV_ACPI_HID) {
485 /* For ACPI_HID, offset 21 is uid len */
486 return *((u8 *)ivhd + 21) + 22;
492 * After reading the highest device id from the IOMMU PCI capability header
493 * this function looks if there is a higher device id defined in the ACPI table
495 static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
497 u8 *p = (void *)h, *end = (void *)h;
498 struct ivhd_entry *dev;
500 u32 ivhd_size = get_ivhd_header_size(h);
503 pr_err("Unsupported IVHD type %#x\n", h->type);
511 dev = (struct ivhd_entry *)p;
514 /* Use maximum BDF value for DEV_ALL */
515 update_last_devid(0xffff);
517 case IVHD_DEV_SELECT:
518 case IVHD_DEV_RANGE_END:
520 case IVHD_DEV_EXT_SELECT:
521 /* all the above subfield types refer to device ids */
522 update_last_devid(dev->devid);
527 p += ivhd_entry_length(p);
535 static int __init check_ivrs_checksum(struct acpi_table_header *table)
538 u8 checksum = 0, *p = (u8 *)table;
540 for (i = 0; i < table->length; ++i)
543 /* ACPI table corrupt */
544 pr_err(FW_BUG "IVRS invalid checksum\n");
552 * Iterate over all IVHD entries in the ACPI table and find the highest device
553 * id which we need to handle. This is the first of three functions which parse
554 * the ACPI table. So we check the checksum here.
556 static int __init find_last_devid_acpi(struct acpi_table_header *table)
558 u8 *p = (u8 *)table, *end = (u8 *)table;
559 struct ivhd_header *h;
561 p += IVRS_HEADER_LENGTH;
563 end += table->length;
565 h = (struct ivhd_header *)p;
566 if (h->type == amd_iommu_target_ivhd_type) {
567 int ret = find_last_devid_from_ivhd(h);
579 /****************************************************************************
581 * The following functions belong to the code path which parses the ACPI table
582 * the second time. In this ACPI parsing iteration we allocate IOMMU specific
583 * data structures, initialize the device/alias/rlookup table and also
584 * basically initialize the hardware.
586 ****************************************************************************/
589 * Allocates the command buffer. This buffer is per AMD IOMMU. We can
590 * write commands to that buffer later and the IOMMU will execute them
593 static int __init alloc_command_buffer(struct amd_iommu *iommu)
595 iommu->cmd_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
596 get_order(CMD_BUFFER_SIZE));
598 return iommu->cmd_buf ? 0 : -ENOMEM;
602 * This function resets the command buffer if the IOMMU stopped fetching
605 void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu)
607 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
609 writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
610 writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
611 iommu->cmd_buf_head = 0;
612 iommu->cmd_buf_tail = 0;
614 iommu_feature_enable(iommu, CONTROL_CMDBUF_EN);
618 * This function writes the command buffer address to the hardware and
621 static void iommu_enable_command_buffer(struct amd_iommu *iommu)
625 BUG_ON(iommu->cmd_buf == NULL);
627 entry = iommu_virt_to_phys(iommu->cmd_buf);
628 entry |= MMIO_CMD_SIZE_512;
630 memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET,
631 &entry, sizeof(entry));
633 amd_iommu_reset_cmd_buffer(iommu);
637 * This function disables the command buffer
639 static void iommu_disable_command_buffer(struct amd_iommu *iommu)
641 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
644 static void __init free_command_buffer(struct amd_iommu *iommu)
646 free_pages((unsigned long)iommu->cmd_buf, get_order(CMD_BUFFER_SIZE));
649 /* allocates the memory where the IOMMU will log its events to */
650 static int __init alloc_event_buffer(struct amd_iommu *iommu)
652 iommu->evt_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
653 get_order(EVT_BUFFER_SIZE));
655 return iommu->evt_buf ? 0 : -ENOMEM;
658 static void iommu_enable_event_buffer(struct amd_iommu *iommu)
662 BUG_ON(iommu->evt_buf == NULL);
664 entry = iommu_virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK;
666 memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET,
667 &entry, sizeof(entry));
669 /* set head and tail to zero manually */
670 writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
671 writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
673 iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
677 * This function disables the event log buffer
679 static void iommu_disable_event_buffer(struct amd_iommu *iommu)
681 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
684 static void __init free_event_buffer(struct amd_iommu *iommu)
686 free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE));
689 /* allocates the memory where the IOMMU will log its events to */
690 static int __init alloc_ppr_log(struct amd_iommu *iommu)
692 iommu->ppr_log = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
693 get_order(PPR_LOG_SIZE));
695 return iommu->ppr_log ? 0 : -ENOMEM;
698 static void iommu_enable_ppr_log(struct amd_iommu *iommu)
702 if (iommu->ppr_log == NULL)
705 entry = iommu_virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512;
707 memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET,
708 &entry, sizeof(entry));
710 /* set head and tail to zero manually */
711 writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
712 writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
714 iommu_feature_enable(iommu, CONTROL_PPFLOG_EN);
715 iommu_feature_enable(iommu, CONTROL_PPR_EN);
718 static void __init free_ppr_log(struct amd_iommu *iommu)
720 if (iommu->ppr_log == NULL)
723 free_pages((unsigned long)iommu->ppr_log, get_order(PPR_LOG_SIZE));
726 static void free_ga_log(struct amd_iommu *iommu)
728 #ifdef CONFIG_IRQ_REMAP
730 free_pages((unsigned long)iommu->ga_log,
731 get_order(GA_LOG_SIZE));
732 if (iommu->ga_log_tail)
733 free_pages((unsigned long)iommu->ga_log_tail,
738 static int iommu_ga_log_enable(struct amd_iommu *iommu)
740 #ifdef CONFIG_IRQ_REMAP
746 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
748 /* Check if already running */
749 if (status & (MMIO_STATUS_GALOG_RUN_MASK))
752 iommu_feature_enable(iommu, CONTROL_GAINT_EN);
753 iommu_feature_enable(iommu, CONTROL_GALOG_EN);
755 for (i = 0; i < LOOP_TIMEOUT; ++i) {
756 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
757 if (status & (MMIO_STATUS_GALOG_RUN_MASK))
761 if (i >= LOOP_TIMEOUT)
763 #endif /* CONFIG_IRQ_REMAP */
767 #ifdef CONFIG_IRQ_REMAP
768 static int iommu_init_ga_log(struct amd_iommu *iommu)
772 if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
775 iommu->ga_log = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
776 get_order(GA_LOG_SIZE));
780 iommu->ga_log_tail = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
782 if (!iommu->ga_log_tail)
785 entry = iommu_virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512;
786 memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET,
787 &entry, sizeof(entry));
788 entry = (iommu_virt_to_phys(iommu->ga_log_tail) &
789 (BIT_ULL(52)-1)) & ~7ULL;
790 memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_TAIL_OFFSET,
791 &entry, sizeof(entry));
792 writel(0x00, iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
793 writel(0x00, iommu->mmio_base + MMIO_GA_TAIL_OFFSET);
800 #endif /* CONFIG_IRQ_REMAP */
802 static int iommu_init_ga(struct amd_iommu *iommu)
806 #ifdef CONFIG_IRQ_REMAP
807 /* Note: We have already checked GASup from IVRS table.
808 * Now, we need to make sure that GAMSup is set.
810 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) &&
811 !iommu_feature(iommu, FEATURE_GAM_VAPIC))
812 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
814 ret = iommu_init_ga_log(iommu);
815 #endif /* CONFIG_IRQ_REMAP */
820 static void iommu_enable_xt(struct amd_iommu *iommu)
822 #ifdef CONFIG_IRQ_REMAP
824 * XT mode (32-bit APIC destination ID) requires
825 * GA mode (128-bit IRTE support) as a prerequisite.
827 if (AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir) &&
828 amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
829 iommu_feature_enable(iommu, CONTROL_XT_EN);
830 #endif /* CONFIG_IRQ_REMAP */
833 static void iommu_enable_gt(struct amd_iommu *iommu)
835 if (!iommu_feature(iommu, FEATURE_GT))
838 iommu_feature_enable(iommu, CONTROL_GT_EN);
841 /* sets a specific bit in the device table entry. */
842 static void set_dev_entry_bit(u16 devid, u8 bit)
844 int i = (bit >> 6) & 0x03;
845 int _bit = bit & 0x3f;
847 amd_iommu_dev_table[devid].data[i] |= (1UL << _bit);
850 static int get_dev_entry_bit(u16 devid, u8 bit)
852 int i = (bit >> 6) & 0x03;
853 int _bit = bit & 0x3f;
855 return (amd_iommu_dev_table[devid].data[i] & (1UL << _bit)) >> _bit;
859 static bool copy_device_table(void)
861 u64 int_ctl, int_tab_len, entry = 0, last_entry = 0;
862 struct dev_table_entry *old_devtb = NULL;
863 u32 lo, hi, devid, old_devtb_size;
864 phys_addr_t old_devtb_phys;
865 struct amd_iommu *iommu;
866 u16 dom_id, dte_v, irq_v;
870 if (!amd_iommu_pre_enabled)
873 pr_warn("Translation is already enabled - trying to copy translation structures\n");
874 for_each_iommu(iommu) {
875 /* All IOMMUs should use the same device table with the same size */
876 lo = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET);
877 hi = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET + 4);
878 entry = (((u64) hi) << 32) + lo;
879 if (last_entry && last_entry != entry) {
880 pr_err("IOMMU:%d should use the same dev table as others!\n",
886 old_devtb_size = ((entry & ~PAGE_MASK) + 1) << 12;
887 if (old_devtb_size != dev_table_size) {
888 pr_err("The device table size of IOMMU:%d is not expected!\n",
895 * When SME is enabled in the first kernel, the entry includes the
896 * memory encryption mask(sme_me_mask), we must remove the memory
897 * encryption mask to obtain the true physical address in kdump kernel.
899 old_devtb_phys = __sme_clr(entry) & PAGE_MASK;
901 if (old_devtb_phys >= 0x100000000ULL) {
902 pr_err("The address of old device table is above 4G, not trustworthy!\n");
905 old_devtb = (sme_active() && is_kdump_kernel())
906 ? (__force void *)ioremap_encrypted(old_devtb_phys,
908 : memremap(old_devtb_phys, dev_table_size, MEMREMAP_WB);
913 gfp_flag = GFP_KERNEL | __GFP_ZERO | GFP_DMA32;
914 old_dev_tbl_cpy = (void *)__get_free_pages(gfp_flag,
915 get_order(dev_table_size));
916 if (old_dev_tbl_cpy == NULL) {
917 pr_err("Failed to allocate memory for copying old device table!\n");
921 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
922 old_dev_tbl_cpy[devid] = old_devtb[devid];
923 dom_id = old_devtb[devid].data[1] & DEV_DOMID_MASK;
924 dte_v = old_devtb[devid].data[0] & DTE_FLAG_V;
926 if (dte_v && dom_id) {
927 old_dev_tbl_cpy[devid].data[0] = old_devtb[devid].data[0];
928 old_dev_tbl_cpy[devid].data[1] = old_devtb[devid].data[1];
929 __set_bit(dom_id, amd_iommu_pd_alloc_bitmap);
930 /* If gcr3 table existed, mask it out */
931 if (old_devtb[devid].data[0] & DTE_FLAG_GV) {
932 tmp = DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B;
933 tmp |= DTE_GCR3_VAL_C(~0ULL) << DTE_GCR3_SHIFT_C;
934 old_dev_tbl_cpy[devid].data[1] &= ~tmp;
935 tmp = DTE_GCR3_VAL_A(~0ULL) << DTE_GCR3_SHIFT_A;
937 old_dev_tbl_cpy[devid].data[0] &= ~tmp;
941 irq_v = old_devtb[devid].data[2] & DTE_IRQ_REMAP_ENABLE;
942 int_ctl = old_devtb[devid].data[2] & DTE_IRQ_REMAP_INTCTL_MASK;
943 int_tab_len = old_devtb[devid].data[2] & DTE_IRQ_TABLE_LEN_MASK;
944 if (irq_v && (int_ctl || int_tab_len)) {
945 if ((int_ctl != DTE_IRQ_REMAP_INTCTL) ||
946 (int_tab_len != DTE_IRQ_TABLE_LEN)) {
947 pr_err("Wrong old irq remapping flag: %#x\n", devid);
951 old_dev_tbl_cpy[devid].data[2] = old_devtb[devid].data[2];
959 void amd_iommu_apply_erratum_63(u16 devid)
963 sysmgt = get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1) |
964 (get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2) << 1);
967 set_dev_entry_bit(devid, DEV_ENTRY_IW);
970 /* Writes the specific IOMMU for a device into the rlookup table */
971 static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid)
973 amd_iommu_rlookup_table[devid] = iommu;
977 * This function takes the device specific flags read from the ACPI
978 * table and sets up the device table entry with that information
980 static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
981 u16 devid, u32 flags, u32 ext_flags)
983 if (flags & ACPI_DEVFLAG_INITPASS)
984 set_dev_entry_bit(devid, DEV_ENTRY_INIT_PASS);
985 if (flags & ACPI_DEVFLAG_EXTINT)
986 set_dev_entry_bit(devid, DEV_ENTRY_EINT_PASS);
987 if (flags & ACPI_DEVFLAG_NMI)
988 set_dev_entry_bit(devid, DEV_ENTRY_NMI_PASS);
989 if (flags & ACPI_DEVFLAG_SYSMGT1)
990 set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1);
991 if (flags & ACPI_DEVFLAG_SYSMGT2)
992 set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2);
993 if (flags & ACPI_DEVFLAG_LINT0)
994 set_dev_entry_bit(devid, DEV_ENTRY_LINT0_PASS);
995 if (flags & ACPI_DEVFLAG_LINT1)
996 set_dev_entry_bit(devid, DEV_ENTRY_LINT1_PASS);
998 amd_iommu_apply_erratum_63(devid);
1000 set_iommu_for_device(iommu, devid);
1003 static int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line)
1005 struct devid_map *entry;
1006 struct list_head *list;
1008 if (type == IVHD_SPECIAL_IOAPIC)
1010 else if (type == IVHD_SPECIAL_HPET)
1015 list_for_each_entry(entry, list, list) {
1016 if (!(entry->id == id && entry->cmd_line))
1019 pr_info("Command-line override present for %s id %d - ignoring\n",
1020 type == IVHD_SPECIAL_IOAPIC ? "IOAPIC" : "HPET", id);
1022 *devid = entry->devid;
1027 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1032 entry->devid = *devid;
1033 entry->cmd_line = cmd_line;
1035 list_add_tail(&entry->list, list);
1040 static int __init add_acpi_hid_device(u8 *hid, u8 *uid, u16 *devid,
1043 struct acpihid_map_entry *entry;
1044 struct list_head *list = &acpihid_map;
1046 list_for_each_entry(entry, list, list) {
1047 if (strcmp(entry->hid, hid) ||
1048 (*uid && *entry->uid && strcmp(entry->uid, uid)) ||
1052 pr_info("Command-line override for hid:%s uid:%s\n",
1054 *devid = entry->devid;
1058 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1062 memcpy(entry->uid, uid, strlen(uid));
1063 memcpy(entry->hid, hid, strlen(hid));
1064 entry->devid = *devid;
1065 entry->cmd_line = cmd_line;
1066 entry->root_devid = (entry->devid & (~0x7));
1068 pr_info("%s, add hid:%s, uid:%s, rdevid:%d\n",
1069 entry->cmd_line ? "cmd" : "ivrs",
1070 entry->hid, entry->uid, entry->root_devid);
1072 list_add_tail(&entry->list, list);
1076 static int __init add_early_maps(void)
1080 for (i = 0; i < early_ioapic_map_size; ++i) {
1081 ret = add_special_device(IVHD_SPECIAL_IOAPIC,
1082 early_ioapic_map[i].id,
1083 &early_ioapic_map[i].devid,
1084 early_ioapic_map[i].cmd_line);
1089 for (i = 0; i < early_hpet_map_size; ++i) {
1090 ret = add_special_device(IVHD_SPECIAL_HPET,
1091 early_hpet_map[i].id,
1092 &early_hpet_map[i].devid,
1093 early_hpet_map[i].cmd_line);
1098 for (i = 0; i < early_acpihid_map_size; ++i) {
1099 ret = add_acpi_hid_device(early_acpihid_map[i].hid,
1100 early_acpihid_map[i].uid,
1101 &early_acpihid_map[i].devid,
1102 early_acpihid_map[i].cmd_line);
1111 * Reads the device exclusion range from ACPI and initializes the IOMMU with
1114 static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m)
1116 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
1118 if (!(m->flags & IVMD_FLAG_EXCL_RANGE))
1123 * We only can configure exclusion ranges per IOMMU, not
1124 * per device. But we can enable the exclusion range per
1125 * device. This is done here
1127 set_dev_entry_bit(devid, DEV_ENTRY_EX);
1128 iommu->exclusion_start = m->range_start;
1129 iommu->exclusion_length = m->range_length;
1134 * Takes a pointer to an AMD IOMMU entry in the ACPI table and
1135 * initializes the hardware and our data structures with it.
1137 static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
1138 struct ivhd_header *h)
1141 u8 *end = p, flags = 0;
1142 u16 devid = 0, devid_start = 0, devid_to = 0;
1143 u32 dev_i, ext_flags = 0;
1145 struct ivhd_entry *e;
1150 ret = add_early_maps();
1155 * First save the recommended feature enable bits from ACPI
1157 iommu->acpi_flags = h->flags;
1160 * Done. Now parse the device entries
1162 ivhd_size = get_ivhd_header_size(h);
1164 pr_err("Unsupported IVHD type %#x\n", h->type);
1174 e = (struct ivhd_entry *)p;
1178 DUMP_printk(" DEV_ALL\t\t\tflags: %02x\n", e->flags);
1180 for (dev_i = 0; dev_i <= amd_iommu_last_bdf; ++dev_i)
1181 set_dev_entry_from_acpi(iommu, dev_i, e->flags, 0);
1183 case IVHD_DEV_SELECT:
1185 DUMP_printk(" DEV_SELECT\t\t\t devid: %02x:%02x.%x "
1187 PCI_BUS_NUM(e->devid),
1193 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1195 case IVHD_DEV_SELECT_RANGE_START:
1197 DUMP_printk(" DEV_SELECT_RANGE_START\t "
1198 "devid: %02x:%02x.%x flags: %02x\n",
1199 PCI_BUS_NUM(e->devid),
1204 devid_start = e->devid;
1209 case IVHD_DEV_ALIAS:
1211 DUMP_printk(" DEV_ALIAS\t\t\t devid: %02x:%02x.%x "
1212 "flags: %02x devid_to: %02x:%02x.%x\n",
1213 PCI_BUS_NUM(e->devid),
1217 PCI_BUS_NUM(e->ext >> 8),
1218 PCI_SLOT(e->ext >> 8),
1219 PCI_FUNC(e->ext >> 8));
1222 devid_to = e->ext >> 8;
1223 set_dev_entry_from_acpi(iommu, devid , e->flags, 0);
1224 set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0);
1225 amd_iommu_alias_table[devid] = devid_to;
1227 case IVHD_DEV_ALIAS_RANGE:
1229 DUMP_printk(" DEV_ALIAS_RANGE\t\t "
1230 "devid: %02x:%02x.%x flags: %02x "
1231 "devid_to: %02x:%02x.%x\n",
1232 PCI_BUS_NUM(e->devid),
1236 PCI_BUS_NUM(e->ext >> 8),
1237 PCI_SLOT(e->ext >> 8),
1238 PCI_FUNC(e->ext >> 8));
1240 devid_start = e->devid;
1242 devid_to = e->ext >> 8;
1246 case IVHD_DEV_EXT_SELECT:
1248 DUMP_printk(" DEV_EXT_SELECT\t\t devid: %02x:%02x.%x "
1249 "flags: %02x ext: %08x\n",
1250 PCI_BUS_NUM(e->devid),
1256 set_dev_entry_from_acpi(iommu, devid, e->flags,
1259 case IVHD_DEV_EXT_SELECT_RANGE:
1261 DUMP_printk(" DEV_EXT_SELECT_RANGE\t devid: "
1262 "%02x:%02x.%x flags: %02x ext: %08x\n",
1263 PCI_BUS_NUM(e->devid),
1268 devid_start = e->devid;
1273 case IVHD_DEV_RANGE_END:
1275 DUMP_printk(" DEV_RANGE_END\t\t devid: %02x:%02x.%x\n",
1276 PCI_BUS_NUM(e->devid),
1278 PCI_FUNC(e->devid));
1281 for (dev_i = devid_start; dev_i <= devid; ++dev_i) {
1283 amd_iommu_alias_table[dev_i] = devid_to;
1284 set_dev_entry_from_acpi(iommu,
1285 devid_to, flags, ext_flags);
1287 set_dev_entry_from_acpi(iommu, dev_i,
1291 case IVHD_DEV_SPECIAL: {
1297 handle = e->ext & 0xff;
1298 devid = (e->ext >> 8) & 0xffff;
1299 type = (e->ext >> 24) & 0xff;
1301 if (type == IVHD_SPECIAL_IOAPIC)
1303 else if (type == IVHD_SPECIAL_HPET)
1308 DUMP_printk(" DEV_SPECIAL(%s[%d])\t\tdevid: %02x:%02x.%x\n",
1314 ret = add_special_device(type, handle, &devid, false);
1319 * add_special_device might update the devid in case a
1320 * command-line override is present. So call
1321 * set_dev_entry_from_acpi after add_special_device.
1323 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1327 case IVHD_DEV_ACPI_HID: {
1329 u8 hid[ACPIHID_HID_LEN] = {0};
1330 u8 uid[ACPIHID_UID_LEN] = {0};
1333 if (h->type != 0x40) {
1334 pr_err(FW_BUG "Invalid IVHD device type %#x\n",
1339 memcpy(hid, (u8 *)(&e->ext), ACPIHID_HID_LEN - 1);
1340 hid[ACPIHID_HID_LEN - 1] = '\0';
1343 pr_err(FW_BUG "Invalid HID.\n");
1348 case UID_NOT_PRESENT:
1351 pr_warn(FW_BUG "Invalid UID length.\n");
1354 case UID_IS_INTEGER:
1356 sprintf(uid, "%d", e->uid);
1359 case UID_IS_CHARACTER:
1361 memcpy(uid, (u8 *)(&e->uid), ACPIHID_UID_LEN - 1);
1362 uid[ACPIHID_UID_LEN - 1] = '\0';
1370 DUMP_printk(" DEV_ACPI_HID(%s[%s])\t\tdevid: %02x:%02x.%x\n",
1378 ret = add_acpi_hid_device(hid, uid, &devid, false);
1383 * add_special_device might update the devid in case a
1384 * command-line override is present. So call
1385 * set_dev_entry_from_acpi after add_special_device.
1387 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1395 p += ivhd_entry_length(p);
1401 static void __init free_iommu_one(struct amd_iommu *iommu)
1403 free_command_buffer(iommu);
1404 free_event_buffer(iommu);
1405 free_ppr_log(iommu);
1407 iommu_unmap_mmio_space(iommu);
1410 static void __init free_iommu_all(void)
1412 struct amd_iommu *iommu, *next;
1414 for_each_iommu_safe(iommu, next) {
1415 list_del(&iommu->list);
1416 free_iommu_one(iommu);
1422 * Family15h Model 10h-1fh erratum 746 (IOMMU Logging May Stall Translations)
1424 * BIOS should disable L2B micellaneous clock gating by setting
1425 * L2_L2B_CK_GATE_CONTROL[CKGateL2BMiscDisable](D0F2xF4_x90[2]) = 1b
1427 static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu)
1431 if ((boot_cpu_data.x86 != 0x15) ||
1432 (boot_cpu_data.x86_model < 0x10) ||
1433 (boot_cpu_data.x86_model > 0x1f))
1436 pci_write_config_dword(iommu->dev, 0xf0, 0x90);
1437 pci_read_config_dword(iommu->dev, 0xf4, &value);
1442 /* Select NB indirect register 0x90 and enable writing */
1443 pci_write_config_dword(iommu->dev, 0xf0, 0x90 | (1 << 8));
1445 pci_write_config_dword(iommu->dev, 0xf4, value | 0x4);
1446 pci_info(iommu->dev, "Applying erratum 746 workaround\n");
1448 /* Clear the enable writing bit */
1449 pci_write_config_dword(iommu->dev, 0xf0, 0x90);
1453 * Family15h Model 30h-3fh (IOMMU Mishandles ATS Write Permission)
1455 * BIOS should enable ATS write permission check by setting
1456 * L2_DEBUG_3[AtsIgnoreIWDis](D0F2xF4_x47[0]) = 1b
1458 static void amd_iommu_ats_write_check_workaround(struct amd_iommu *iommu)
1462 if ((boot_cpu_data.x86 != 0x15) ||
1463 (boot_cpu_data.x86_model < 0x30) ||
1464 (boot_cpu_data.x86_model > 0x3f))
1467 /* Test L2_DEBUG_3[AtsIgnoreIWDis] == 1 */
1468 value = iommu_read_l2(iommu, 0x47);
1473 /* Set L2_DEBUG_3[AtsIgnoreIWDis] = 1 */
1474 iommu_write_l2(iommu, 0x47, value | BIT(0));
1476 pci_info(iommu->dev, "Applying ATS write check workaround\n");
1480 * This function clues the initialization function for one IOMMU
1481 * together and also allocates the command buffer and programs the
1482 * hardware. It does NOT enable the IOMMU. This is done afterwards.
1484 static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
1488 raw_spin_lock_init(&iommu->lock);
1490 /* Add IOMMU to internal data structures */
1491 list_add_tail(&iommu->list, &amd_iommu_list);
1492 iommu->index = amd_iommus_present++;
1494 if (unlikely(iommu->index >= MAX_IOMMUS)) {
1495 WARN(1, "System has more IOMMUs than supported by this driver\n");
1499 /* Index is fine - add IOMMU to the array */
1500 amd_iommus[iommu->index] = iommu;
1503 * Copy data from ACPI table entry to the iommu struct
1505 iommu->devid = h->devid;
1506 iommu->cap_ptr = h->cap_ptr;
1507 iommu->pci_seg = h->pci_seg;
1508 iommu->mmio_phys = h->mmio_phys;
1512 /* Check if IVHD EFR contains proper max banks/counters */
1513 if ((h->efr_attr != 0) &&
1514 ((h->efr_attr & (0xF << 13)) != 0) &&
1515 ((h->efr_attr & (0x3F << 17)) != 0))
1516 iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
1518 iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
1519 if (((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0))
1520 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
1521 if (((h->efr_attr & (0x1 << IOMMU_FEAT_XTSUP_SHIFT)) == 0))
1522 amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE;
1526 if (h->efr_reg & (1 << 9))
1527 iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
1529 iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
1530 if (((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0))
1531 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
1532 if (((h->efr_reg & (0x1 << IOMMU_EFR_XTSUP_SHIFT)) == 0))
1533 amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE;
1539 iommu->mmio_base = iommu_map_mmio_space(iommu->mmio_phys,
1540 iommu->mmio_phys_end);
1541 if (!iommu->mmio_base)
1544 if (alloc_command_buffer(iommu))
1547 if (alloc_event_buffer(iommu))
1550 iommu->int_enabled = false;
1552 init_translation_status(iommu);
1553 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
1554 iommu_disable(iommu);
1555 clear_translation_pre_enabled(iommu);
1556 pr_warn("Translation was enabled for IOMMU:%d but we are not in kdump mode\n",
1559 if (amd_iommu_pre_enabled)
1560 amd_iommu_pre_enabled = translation_pre_enabled(iommu);
1562 ret = init_iommu_from_acpi(iommu, h);
1566 ret = amd_iommu_create_irq_domain(iommu);
1571 * Make sure IOMMU is not considered to translate itself. The IVRS
1572 * table tells us so, but this is a lie!
1574 amd_iommu_rlookup_table[iommu->devid] = NULL;
1580 * get_highest_supported_ivhd_type - Look up the appropriate IVHD type
1581 * @ivrs Pointer to the IVRS header
1583 * This function search through all IVDB of the maximum supported IVHD
1585 static u8 get_highest_supported_ivhd_type(struct acpi_table_header *ivrs)
1587 u8 *base = (u8 *)ivrs;
1588 struct ivhd_header *ivhd = (struct ivhd_header *)
1589 (base + IVRS_HEADER_LENGTH);
1590 u8 last_type = ivhd->type;
1591 u16 devid = ivhd->devid;
1593 while (((u8 *)ivhd - base < ivrs->length) &&
1594 (ivhd->type <= ACPI_IVHD_TYPE_MAX_SUPPORTED)) {
1595 u8 *p = (u8 *) ivhd;
1597 if (ivhd->devid == devid)
1598 last_type = ivhd->type;
1599 ivhd = (struct ivhd_header *)(p + ivhd->length);
1606 * Iterates over all IOMMU entries in the ACPI table, allocates the
1607 * IOMMU structure and initializes it with init_iommu_one()
1609 static int __init init_iommu_all(struct acpi_table_header *table)
1611 u8 *p = (u8 *)table, *end = (u8 *)table;
1612 struct ivhd_header *h;
1613 struct amd_iommu *iommu;
1616 end += table->length;
1617 p += IVRS_HEADER_LENGTH;
1620 h = (struct ivhd_header *)p;
1621 if (*p == amd_iommu_target_ivhd_type) {
1623 DUMP_printk("device: %02x:%02x.%01x cap: %04x "
1624 "seg: %d flags: %01x info %04x\n",
1625 PCI_BUS_NUM(h->devid), PCI_SLOT(h->devid),
1626 PCI_FUNC(h->devid), h->cap_ptr,
1627 h->pci_seg, h->flags, h->info);
1628 DUMP_printk(" mmio-addr: %016llx\n",
1631 iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL);
1635 ret = init_iommu_one(iommu, h);
1647 static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
1648 u8 fxn, u64 *value, bool is_write);
1650 static void init_iommu_perf_ctr(struct amd_iommu *iommu)
1652 struct pci_dev *pdev = iommu->dev;
1653 u64 val = 0xabcd, val2 = 0;
1655 if (!iommu_feature(iommu, FEATURE_PC))
1658 amd_iommu_pc_present = true;
1660 /* Check if the performance counters can be written to */
1661 if ((iommu_pc_get_set_reg(iommu, 0, 0, 0, &val, true)) ||
1662 (iommu_pc_get_set_reg(iommu, 0, 0, 0, &val2, false)) ||
1664 pci_err(pdev, "Unable to write to IOMMU perf counter.\n");
1665 amd_iommu_pc_present = false;
1669 pci_info(pdev, "IOMMU performance counters supported\n");
1671 val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET);
1672 iommu->max_banks = (u8) ((val >> 12) & 0x3f);
1673 iommu->max_counters = (u8) ((val >> 7) & 0xf);
1676 static ssize_t amd_iommu_show_cap(struct device *dev,
1677 struct device_attribute *attr,
1680 struct amd_iommu *iommu = dev_to_amd_iommu(dev);
1681 return sprintf(buf, "%x\n", iommu->cap);
1683 static DEVICE_ATTR(cap, S_IRUGO, amd_iommu_show_cap, NULL);
1685 static ssize_t amd_iommu_show_features(struct device *dev,
1686 struct device_attribute *attr,
1689 struct amd_iommu *iommu = dev_to_amd_iommu(dev);
1690 return sprintf(buf, "%llx\n", iommu->features);
1692 static DEVICE_ATTR(features, S_IRUGO, amd_iommu_show_features, NULL);
1694 static struct attribute *amd_iommu_attrs[] = {
1696 &dev_attr_features.attr,
1700 static struct attribute_group amd_iommu_group = {
1701 .name = "amd-iommu",
1702 .attrs = amd_iommu_attrs,
1705 static const struct attribute_group *amd_iommu_groups[] = {
1710 static int __init iommu_init_pci(struct amd_iommu *iommu)
1712 int cap_ptr = iommu->cap_ptr;
1713 u32 range, misc, low, high;
1716 iommu->dev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(iommu->devid),
1717 iommu->devid & 0xff);
1721 /* Prevent binding other PCI device drivers to IOMMU devices */
1722 iommu->dev->match_driver = false;
1724 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET,
1726 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_RANGE_OFFSET,
1728 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_MISC_OFFSET,
1731 if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB)))
1732 amd_iommu_iotlb_sup = false;
1734 /* read extended feature bits */
1735 low = readl(iommu->mmio_base + MMIO_EXT_FEATURES);
1736 high = readl(iommu->mmio_base + MMIO_EXT_FEATURES + 4);
1738 iommu->features = ((u64)high << 32) | low;
1740 if (iommu_feature(iommu, FEATURE_GT)) {
1745 pasmax = iommu->features & FEATURE_PASID_MASK;
1746 pasmax >>= FEATURE_PASID_SHIFT;
1747 max_pasid = (1 << (pasmax + 1)) - 1;
1749 amd_iommu_max_pasid = min(amd_iommu_max_pasid, max_pasid);
1751 BUG_ON(amd_iommu_max_pasid & ~PASID_MASK);
1753 glxval = iommu->features & FEATURE_GLXVAL_MASK;
1754 glxval >>= FEATURE_GLXVAL_SHIFT;
1756 if (amd_iommu_max_glx_val == -1)
1757 amd_iommu_max_glx_val = glxval;
1759 amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval);
1762 if (iommu_feature(iommu, FEATURE_GT) &&
1763 iommu_feature(iommu, FEATURE_PPR)) {
1764 iommu->is_iommu_v2 = true;
1765 amd_iommu_v2_present = true;
1768 if (iommu_feature(iommu, FEATURE_PPR) && alloc_ppr_log(iommu))
1771 ret = iommu_init_ga(iommu);
1775 if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE))
1776 amd_iommu_np_cache = true;
1778 init_iommu_perf_ctr(iommu);
1780 if (is_rd890_iommu(iommu->dev)) {
1784 pci_get_domain_bus_and_slot(0, iommu->dev->bus->number,
1788 * Some rd890 systems may not be fully reconfigured by the
1789 * BIOS, so it's necessary for us to store this information so
1790 * it can be reprogrammed on resume
1792 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4,
1793 &iommu->stored_addr_lo);
1794 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8,
1795 &iommu->stored_addr_hi);
1797 /* Low bit locks writes to configuration space */
1798 iommu->stored_addr_lo &= ~1;
1800 for (i = 0; i < 6; i++)
1801 for (j = 0; j < 0x12; j++)
1802 iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j);
1804 for (i = 0; i < 0x83; i++)
1805 iommu->stored_l2[i] = iommu_read_l2(iommu, i);
1808 amd_iommu_erratum_746_workaround(iommu);
1809 amd_iommu_ats_write_check_workaround(iommu);
1811 iommu_device_sysfs_add(&iommu->iommu, &iommu->dev->dev,
1812 amd_iommu_groups, "ivhd%d", iommu->index);
1813 iommu_device_set_ops(&iommu->iommu, &amd_iommu_ops);
1814 iommu_device_register(&iommu->iommu);
1816 return pci_enable_device(iommu->dev);
1819 static void print_iommu_info(void)
1821 static const char * const feat_str[] = {
1822 "PreF", "PPR", "X2APIC", "NX", "GT", "[5]",
1823 "IA", "GA", "HE", "PC"
1825 struct amd_iommu *iommu;
1827 for_each_iommu(iommu) {
1828 struct pci_dev *pdev = iommu->dev;
1831 pci_info(pdev, "Found IOMMU cap 0x%hx\n", iommu->cap_ptr);
1833 if (iommu->cap & (1 << IOMMU_CAP_EFR)) {
1834 pci_info(pdev, "Extended features (%#llx):\n",
1836 for (i = 0; i < ARRAY_SIZE(feat_str); ++i) {
1837 if (iommu_feature(iommu, (1ULL << i)))
1838 pr_cont(" %s", feat_str[i]);
1841 if (iommu->features & FEATURE_GAM_VAPIC)
1842 pr_cont(" GA_vAPIC");
1847 if (irq_remapping_enabled) {
1848 pr_info("Interrupt remapping enabled\n");
1849 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
1850 pr_info("Virtual APIC enabled\n");
1851 if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
1852 pr_info("X2APIC enabled\n");
1856 static int __init amd_iommu_init_pci(void)
1858 struct amd_iommu *iommu;
1861 for_each_iommu(iommu) {
1862 ret = iommu_init_pci(iommu);
1868 * Order is important here to make sure any unity map requirements are
1869 * fulfilled. The unity mappings are created and written to the device
1870 * table during the amd_iommu_init_api() call.
1872 * After that we call init_device_table_dma() to make sure any
1873 * uninitialized DTE will block DMA, and in the end we flush the caches
1874 * of all IOMMUs to make sure the changes to the device table are
1877 ret = amd_iommu_init_api();
1879 init_device_table_dma();
1881 for_each_iommu(iommu)
1882 iommu_flush_all_caches(iommu);
1890 /****************************************************************************
1892 * The following functions initialize the MSI interrupts for all IOMMUs
1893 * in the system. It's a bit challenging because there could be multiple
1894 * IOMMUs per PCI BDF but we can call pci_enable_msi(x) only once per
1897 ****************************************************************************/
1899 static int iommu_setup_msi(struct amd_iommu *iommu)
1903 r = pci_enable_msi(iommu->dev);
1907 r = request_threaded_irq(iommu->dev->irq,
1908 amd_iommu_int_handler,
1909 amd_iommu_int_thread,
1914 pci_disable_msi(iommu->dev);
1918 iommu->int_enabled = true;
1923 static int iommu_init_msi(struct amd_iommu *iommu)
1927 if (iommu->int_enabled)
1930 if (iommu->dev->msi_cap)
1931 ret = iommu_setup_msi(iommu);
1939 iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
1941 if (iommu->ppr_log != NULL)
1942 iommu_feature_enable(iommu, CONTROL_PPFINT_EN);
1944 iommu_ga_log_enable(iommu);
1949 /****************************************************************************
1951 * The next functions belong to the third pass of parsing the ACPI
1952 * table. In this last pass the memory mapping requirements are
1953 * gathered (like exclusion and unity mapping ranges).
1955 ****************************************************************************/
1957 static void __init free_unity_maps(void)
1959 struct unity_map_entry *entry, *next;
1961 list_for_each_entry_safe(entry, next, &amd_iommu_unity_map, list) {
1962 list_del(&entry->list);
1967 /* called when we find an exclusion range definition in ACPI */
1968 static int __init init_exclusion_range(struct ivmd_header *m)
1973 case ACPI_IVMD_TYPE:
1974 set_device_exclusion_range(m->devid, m);
1976 case ACPI_IVMD_TYPE_ALL:
1977 for (i = 0; i <= amd_iommu_last_bdf; ++i)
1978 set_device_exclusion_range(i, m);
1980 case ACPI_IVMD_TYPE_RANGE:
1981 for (i = m->devid; i <= m->aux; ++i)
1982 set_device_exclusion_range(i, m);
1991 /* called for unity map ACPI definition */
1992 static int __init init_unity_map_range(struct ivmd_header *m)
1994 struct unity_map_entry *e = NULL;
1997 e = kzalloc(sizeof(*e), GFP_KERNEL);
2001 if (m->flags & IVMD_FLAG_EXCL_RANGE)
2002 init_exclusion_range(m);
2008 case ACPI_IVMD_TYPE:
2009 s = "IVMD_TYPEi\t\t\t";
2010 e->devid_start = e->devid_end = m->devid;
2012 case ACPI_IVMD_TYPE_ALL:
2013 s = "IVMD_TYPE_ALL\t\t";
2015 e->devid_end = amd_iommu_last_bdf;
2017 case ACPI_IVMD_TYPE_RANGE:
2018 s = "IVMD_TYPE_RANGE\t\t";
2019 e->devid_start = m->devid;
2020 e->devid_end = m->aux;
2023 e->address_start = PAGE_ALIGN(m->range_start);
2024 e->address_end = e->address_start + PAGE_ALIGN(m->range_length);
2025 e->prot = m->flags >> 1;
2027 DUMP_printk("%s devid_start: %02x:%02x.%x devid_end: %02x:%02x.%x"
2028 " range_start: %016llx range_end: %016llx flags: %x\n", s,
2029 PCI_BUS_NUM(e->devid_start), PCI_SLOT(e->devid_start),
2030 PCI_FUNC(e->devid_start), PCI_BUS_NUM(e->devid_end),
2031 PCI_SLOT(e->devid_end), PCI_FUNC(e->devid_end),
2032 e->address_start, e->address_end, m->flags);
2034 list_add_tail(&e->list, &amd_iommu_unity_map);
2039 /* iterates over all memory definitions we find in the ACPI table */
2040 static int __init init_memory_definitions(struct acpi_table_header *table)
2042 u8 *p = (u8 *)table, *end = (u8 *)table;
2043 struct ivmd_header *m;
2045 end += table->length;
2046 p += IVRS_HEADER_LENGTH;
2049 m = (struct ivmd_header *)p;
2050 if (m->flags & (IVMD_FLAG_UNITY_MAP | IVMD_FLAG_EXCL_RANGE))
2051 init_unity_map_range(m);
2060 * Init the device table to not allow DMA access for devices
2062 static void init_device_table_dma(void)
2066 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
2067 set_dev_entry_bit(devid, DEV_ENTRY_VALID);
2068 set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION);
2072 static void __init uninit_device_table_dma(void)
2076 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
2077 amd_iommu_dev_table[devid].data[0] = 0ULL;
2078 amd_iommu_dev_table[devid].data[1] = 0ULL;
2082 static void init_device_table(void)
2086 if (!amd_iommu_irq_remap)
2089 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid)
2090 set_dev_entry_bit(devid, DEV_ENTRY_IRQ_TBL_EN);
2093 static void iommu_init_flags(struct amd_iommu *iommu)
2095 iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ?
2096 iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
2097 iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
2099 iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ?
2100 iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
2101 iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
2103 iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
2104 iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
2105 iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
2107 iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ?
2108 iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
2109 iommu_feature_disable(iommu, CONTROL_ISOC_EN);
2112 * make IOMMU memory accesses cache coherent
2114 iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
2116 /* Set IOTLB invalidation timeout to 1s */
2117 iommu_set_inv_tlb_timeout(iommu, CTRL_INV_TO_1S);
2120 static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
2123 u32 ioc_feature_control;
2124 struct pci_dev *pdev = iommu->root_pdev;
2126 /* RD890 BIOSes may not have completely reconfigured the iommu */
2127 if (!is_rd890_iommu(iommu->dev) || !pdev)
2131 * First, we need to ensure that the iommu is enabled. This is
2132 * controlled by a register in the northbridge
2135 /* Select Northbridge indirect register 0x75 and enable writing */
2136 pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7));
2137 pci_read_config_dword(pdev, 0x64, &ioc_feature_control);
2139 /* Enable the iommu */
2140 if (!(ioc_feature_control & 0x1))
2141 pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1);
2143 /* Restore the iommu BAR */
2144 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
2145 iommu->stored_addr_lo);
2146 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 8,
2147 iommu->stored_addr_hi);
2149 /* Restore the l1 indirect regs for each of the 6 l1s */
2150 for (i = 0; i < 6; i++)
2151 for (j = 0; j < 0x12; j++)
2152 iommu_write_l1(iommu, i, j, iommu->stored_l1[i][j]);
2154 /* Restore the l2 indirect regs */
2155 for (i = 0; i < 0x83; i++)
2156 iommu_write_l2(iommu, i, iommu->stored_l2[i]);
2158 /* Lock PCI setup registers */
2159 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
2160 iommu->stored_addr_lo | 1);
2163 static void iommu_enable_ga(struct amd_iommu *iommu)
2165 #ifdef CONFIG_IRQ_REMAP
2166 switch (amd_iommu_guest_ir) {
2167 case AMD_IOMMU_GUEST_IR_VAPIC:
2168 iommu_feature_enable(iommu, CONTROL_GAM_EN);
2170 case AMD_IOMMU_GUEST_IR_LEGACY_GA:
2171 iommu_feature_enable(iommu, CONTROL_GA_EN);
2172 iommu->irte_ops = &irte_128_ops;
2175 iommu->irte_ops = &irte_32_ops;
2181 static void early_enable_iommu(struct amd_iommu *iommu)
2183 iommu_disable(iommu);
2184 iommu_init_flags(iommu);
2185 iommu_set_device_table(iommu);
2186 iommu_enable_command_buffer(iommu);
2187 iommu_enable_event_buffer(iommu);
2188 iommu_set_exclusion_range(iommu);
2189 iommu_enable_ga(iommu);
2190 iommu_enable_xt(iommu);
2191 iommu_enable(iommu);
2192 iommu_flush_all_caches(iommu);
2196 * This function finally enables all IOMMUs found in the system after
2197 * they have been initialized.
2199 * Or if in kdump kernel and IOMMUs are all pre-enabled, try to copy
2200 * the old content of device table entries. Not this case or copy failed,
2201 * just continue as normal kernel does.
2203 static void early_enable_iommus(void)
2205 struct amd_iommu *iommu;
2208 if (!copy_device_table()) {
2210 * If come here because of failure in copying device table from old
2211 * kernel with all IOMMUs enabled, print error message and try to
2212 * free allocated old_dev_tbl_cpy.
2214 if (amd_iommu_pre_enabled)
2215 pr_err("Failed to copy DEV table from previous kernel.\n");
2216 if (old_dev_tbl_cpy != NULL)
2217 free_pages((unsigned long)old_dev_tbl_cpy,
2218 get_order(dev_table_size));
2220 for_each_iommu(iommu) {
2221 clear_translation_pre_enabled(iommu);
2222 early_enable_iommu(iommu);
2225 pr_info("Copied DEV table from previous kernel.\n");
2226 free_pages((unsigned long)amd_iommu_dev_table,
2227 get_order(dev_table_size));
2228 amd_iommu_dev_table = old_dev_tbl_cpy;
2229 for_each_iommu(iommu) {
2230 iommu_disable_command_buffer(iommu);
2231 iommu_disable_event_buffer(iommu);
2232 iommu_enable_command_buffer(iommu);
2233 iommu_enable_event_buffer(iommu);
2234 iommu_enable_ga(iommu);
2235 iommu_enable_xt(iommu);
2236 iommu_set_device_table(iommu);
2237 iommu_flush_all_caches(iommu);
2241 #ifdef CONFIG_IRQ_REMAP
2242 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
2243 amd_iommu_irq_ops.capability |= (1 << IRQ_POSTING_CAP);
2247 static void enable_iommus_v2(void)
2249 struct amd_iommu *iommu;
2251 for_each_iommu(iommu) {
2252 iommu_enable_ppr_log(iommu);
2253 iommu_enable_gt(iommu);
2257 static void enable_iommus(void)
2259 early_enable_iommus();
2264 static void disable_iommus(void)
2266 struct amd_iommu *iommu;
2268 for_each_iommu(iommu)
2269 iommu_disable(iommu);
2271 #ifdef CONFIG_IRQ_REMAP
2272 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
2273 amd_iommu_irq_ops.capability &= ~(1 << IRQ_POSTING_CAP);
2278 * Suspend/Resume support
2279 * disable suspend until real resume implemented
2282 static void amd_iommu_resume(void)
2284 struct amd_iommu *iommu;
2286 for_each_iommu(iommu)
2287 iommu_apply_resume_quirks(iommu);
2289 /* re-load the hardware */
2292 amd_iommu_enable_interrupts();
2295 static int amd_iommu_suspend(void)
2297 /* disable IOMMUs to go out of the way for BIOS */
2303 static struct syscore_ops amd_iommu_syscore_ops = {
2304 .suspend = amd_iommu_suspend,
2305 .resume = amd_iommu_resume,
2308 static void __init free_iommu_resources(void)
2310 kmemleak_free(irq_lookup_table);
2311 free_pages((unsigned long)irq_lookup_table,
2312 get_order(rlookup_table_size));
2313 irq_lookup_table = NULL;
2315 kmem_cache_destroy(amd_iommu_irq_cache);
2316 amd_iommu_irq_cache = NULL;
2318 free_pages((unsigned long)amd_iommu_rlookup_table,
2319 get_order(rlookup_table_size));
2320 amd_iommu_rlookup_table = NULL;
2322 free_pages((unsigned long)amd_iommu_alias_table,
2323 get_order(alias_table_size));
2324 amd_iommu_alias_table = NULL;
2326 free_pages((unsigned long)amd_iommu_dev_table,
2327 get_order(dev_table_size));
2328 amd_iommu_dev_table = NULL;
2333 /* SB IOAPIC is always on this device in AMD systems */
2334 #define IOAPIC_SB_DEVID ((0x00 << 8) | PCI_DEVFN(0x14, 0))
2336 static bool __init check_ioapic_information(void)
2338 const char *fw_bug = FW_BUG;
2339 bool ret, has_sb_ioapic;
2342 has_sb_ioapic = false;
2346 * If we have map overrides on the kernel command line the
2347 * messages in this function might not describe firmware bugs
2348 * anymore - so be careful
2353 for (idx = 0; idx < nr_ioapics; idx++) {
2354 int devid, id = mpc_ioapic_id(idx);
2356 devid = get_ioapic_devid(id);
2358 pr_err("%s: IOAPIC[%d] not in IVRS table\n",
2361 } else if (devid == IOAPIC_SB_DEVID) {
2362 has_sb_ioapic = true;
2367 if (!has_sb_ioapic) {
2369 * We expect the SB IOAPIC to be listed in the IVRS
2370 * table. The system timer is connected to the SB IOAPIC
2371 * and if we don't have it in the list the system will
2372 * panic at boot time. This situation usually happens
2373 * when the BIOS is buggy and provides us the wrong
2374 * device id for the IOAPIC in the system.
2376 pr_err("%s: No southbridge IOAPIC found\n", fw_bug);
2380 pr_err("Disabling interrupt remapping\n");
2385 static void __init free_dma_resources(void)
2387 free_pages((unsigned long)amd_iommu_pd_alloc_bitmap,
2388 get_order(MAX_DOMAIN_ID/8));
2389 amd_iommu_pd_alloc_bitmap = NULL;
2395 * This is the hardware init function for AMD IOMMU in the system.
2396 * This function is called either from amd_iommu_init or from the interrupt
2397 * remapping setup code.
2399 * This function basically parses the ACPI table for AMD IOMMU (IVRS)
2402 * 1 pass) Discover the most comprehensive IVHD type to use.
2404 * 2 pass) Find the highest PCI device id the driver has to handle.
2405 * Upon this information the size of the data structures is
2406 * determined that needs to be allocated.
2408 * 3 pass) Initialize the data structures just allocated with the
2409 * information in the ACPI table about available AMD IOMMUs
2410 * in the system. It also maps the PCI devices in the
2411 * system to specific IOMMUs
2413 * 4 pass) After the basic data structures are allocated and
2414 * initialized we update them with information about memory
2415 * remapping requirements parsed out of the ACPI table in
2418 * After everything is set up the IOMMUs are enabled and the necessary
2419 * hotplug and suspend notifiers are registered.
2421 static int __init early_amd_iommu_init(void)
2423 struct acpi_table_header *ivrs_base;
2425 int i, remap_cache_sz, ret = 0;
2427 if (!amd_iommu_detected)
2430 status = acpi_get_table("IVRS", 0, &ivrs_base);
2431 if (status == AE_NOT_FOUND)
2433 else if (ACPI_FAILURE(status)) {
2434 const char *err = acpi_format_exception(status);
2435 pr_err("IVRS table error: %s\n", err);
2440 * Validate checksum here so we don't need to do it when
2441 * we actually parse the table
2443 ret = check_ivrs_checksum(ivrs_base);
2447 amd_iommu_target_ivhd_type = get_highest_supported_ivhd_type(ivrs_base);
2448 DUMP_printk("Using IVHD type %#x\n", amd_iommu_target_ivhd_type);
2451 * First parse ACPI tables to find the largest Bus/Dev/Func
2452 * we need to handle. Upon this information the shared data
2453 * structures for the IOMMUs in the system will be allocated
2455 ret = find_last_devid_acpi(ivrs_base);
2459 dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE);
2460 alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE);
2461 rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE);
2463 /* Device table - directly used by all IOMMUs */
2465 amd_iommu_dev_table = (void *)__get_free_pages(
2466 GFP_KERNEL | __GFP_ZERO | GFP_DMA32,
2467 get_order(dev_table_size));
2468 if (amd_iommu_dev_table == NULL)
2472 * Alias table - map PCI Bus/Dev/Func to Bus/Dev/Func the
2473 * IOMMU see for that device
2475 amd_iommu_alias_table = (void *)__get_free_pages(GFP_KERNEL,
2476 get_order(alias_table_size));
2477 if (amd_iommu_alias_table == NULL)
2480 /* IOMMU rlookup table - find the IOMMU for a specific device */
2481 amd_iommu_rlookup_table = (void *)__get_free_pages(
2482 GFP_KERNEL | __GFP_ZERO,
2483 get_order(rlookup_table_size));
2484 if (amd_iommu_rlookup_table == NULL)
2487 amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages(
2488 GFP_KERNEL | __GFP_ZERO,
2489 get_order(MAX_DOMAIN_ID/8));
2490 if (amd_iommu_pd_alloc_bitmap == NULL)
2494 * let all alias entries point to itself
2496 for (i = 0; i <= amd_iommu_last_bdf; ++i)
2497 amd_iommu_alias_table[i] = i;
2500 * never allocate domain 0 because its used as the non-allocated and
2501 * error value placeholder
2503 __set_bit(0, amd_iommu_pd_alloc_bitmap);
2506 * now the data structures are allocated and basically initialized
2507 * start the real acpi table scan
2509 ret = init_iommu_all(ivrs_base);
2513 /* Disable any previously enabled IOMMUs */
2514 if (!is_kdump_kernel() || amd_iommu_disabled)
2517 if (amd_iommu_irq_remap)
2518 amd_iommu_irq_remap = check_ioapic_information();
2520 if (amd_iommu_irq_remap) {
2522 * Interrupt remapping enabled, create kmem_cache for the
2526 if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
2527 remap_cache_sz = MAX_IRQS_PER_TABLE * sizeof(u32);
2529 remap_cache_sz = MAX_IRQS_PER_TABLE * (sizeof(u64) * 2);
2530 amd_iommu_irq_cache = kmem_cache_create("irq_remap_cache",
2532 IRQ_TABLE_ALIGNMENT,
2534 if (!amd_iommu_irq_cache)
2537 irq_lookup_table = (void *)__get_free_pages(
2538 GFP_KERNEL | __GFP_ZERO,
2539 get_order(rlookup_table_size));
2540 kmemleak_alloc(irq_lookup_table, rlookup_table_size,
2542 if (!irq_lookup_table)
2546 ret = init_memory_definitions(ivrs_base);
2550 /* init the device table */
2551 init_device_table();
2554 /* Don't leak any ACPI memory */
2555 acpi_put_table(ivrs_base);
2561 static int amd_iommu_enable_interrupts(void)
2563 struct amd_iommu *iommu;
2566 for_each_iommu(iommu) {
2567 ret = iommu_init_msi(iommu);
2576 static bool detect_ivrs(void)
2578 struct acpi_table_header *ivrs_base;
2581 status = acpi_get_table("IVRS", 0, &ivrs_base);
2582 if (status == AE_NOT_FOUND)
2584 else if (ACPI_FAILURE(status)) {
2585 const char *err = acpi_format_exception(status);
2586 pr_err("IVRS table error: %s\n", err);
2590 acpi_put_table(ivrs_base);
2592 /* Make sure ACS will be enabled during PCI probe */
2598 /****************************************************************************
2600 * AMD IOMMU Initialization State Machine
2602 ****************************************************************************/
2604 static int __init state_next(void)
2608 switch (init_state) {
2609 case IOMMU_START_STATE:
2610 if (!detect_ivrs()) {
2611 init_state = IOMMU_NOT_FOUND;
2614 init_state = IOMMU_IVRS_DETECTED;
2617 case IOMMU_IVRS_DETECTED:
2618 ret = early_amd_iommu_init();
2619 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED;
2620 if (init_state == IOMMU_ACPI_FINISHED && amd_iommu_disabled) {
2621 pr_info("AMD IOMMU disabled on kernel command-line\n");
2622 init_state = IOMMU_CMDLINE_DISABLED;
2626 case IOMMU_ACPI_FINISHED:
2627 early_enable_iommus();
2628 x86_platform.iommu_shutdown = disable_iommus;
2629 init_state = IOMMU_ENABLED;
2632 register_syscore_ops(&amd_iommu_syscore_ops);
2633 ret = amd_iommu_init_pci();
2634 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT;
2637 case IOMMU_PCI_INIT:
2638 ret = amd_iommu_enable_interrupts();
2639 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_INTERRUPTS_EN;
2641 case IOMMU_INTERRUPTS_EN:
2642 ret = amd_iommu_init_dma_ops();
2643 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_DMA_OPS;
2646 init_state = IOMMU_INITIALIZED;
2648 case IOMMU_INITIALIZED:
2651 case IOMMU_NOT_FOUND:
2652 case IOMMU_INIT_ERROR:
2653 case IOMMU_CMDLINE_DISABLED:
2654 /* Error states => do nothing */
2663 free_dma_resources();
2664 if (!irq_remapping_enabled) {
2666 free_iommu_resources();
2668 struct amd_iommu *iommu;
2670 uninit_device_table_dma();
2671 for_each_iommu(iommu)
2672 iommu_flush_all_caches(iommu);
2678 static int __init iommu_go_to_state(enum iommu_init_state state)
2682 while (init_state != state) {
2683 if (init_state == IOMMU_NOT_FOUND ||
2684 init_state == IOMMU_INIT_ERROR ||
2685 init_state == IOMMU_CMDLINE_DISABLED)
2693 #ifdef CONFIG_IRQ_REMAP
2694 int __init amd_iommu_prepare(void)
2698 amd_iommu_irq_remap = true;
2700 ret = iommu_go_to_state(IOMMU_ACPI_FINISHED);
2703 return amd_iommu_irq_remap ? 0 : -ENODEV;
2706 int __init amd_iommu_enable(void)
2710 ret = iommu_go_to_state(IOMMU_ENABLED);
2714 irq_remapping_enabled = 1;
2715 return amd_iommu_xt_mode;
2718 void amd_iommu_disable(void)
2720 amd_iommu_suspend();
2723 int amd_iommu_reenable(int mode)
2730 int __init amd_iommu_enable_faulting(void)
2732 /* We enable MSI later when PCI is initialized */
2738 * This is the core init function for AMD IOMMU hardware in the system.
2739 * This function is called from the generic x86 DMA layer initialization
2742 static int __init amd_iommu_init(void)
2744 struct amd_iommu *iommu;
2747 ret = iommu_go_to_state(IOMMU_INITIALIZED);
2748 #ifdef CONFIG_GART_IOMMU
2749 if (ret && list_empty(&amd_iommu_list)) {
2751 * We failed to initialize the AMD IOMMU - try fallback
2752 * to GART if possible.
2758 for_each_iommu(iommu)
2759 amd_iommu_debugfs_setup(iommu);
2764 static bool amd_iommu_sme_check(void)
2766 if (!sme_active() || (boot_cpu_data.x86 != 0x17))
2769 /* For Fam17h, a specific level of support is required */
2770 if (boot_cpu_data.microcode >= 0x08001205)
2773 if ((boot_cpu_data.microcode >= 0x08001126) &&
2774 (boot_cpu_data.microcode <= 0x080011ff))
2777 pr_notice("IOMMU not currently supported when SME is active\n");
2782 /****************************************************************************
2784 * Early detect code. This code runs at IOMMU detection time in the DMA
2785 * layer. It just looks if there is an IVRS ACPI table to detect AMD
2788 ****************************************************************************/
2789 int __init amd_iommu_detect(void)
2793 if (no_iommu || (iommu_detected && !gart_iommu_aperture))
2796 if (!amd_iommu_sme_check())
2799 ret = iommu_go_to_state(IOMMU_IVRS_DETECTED);
2803 amd_iommu_detected = true;
2805 x86_init.iommu.iommu_init = amd_iommu_init;
2810 /****************************************************************************
2812 * Parsing functions for the AMD IOMMU specific kernel command line
2815 ****************************************************************************/
2817 static int __init parse_amd_iommu_dump(char *str)
2819 amd_iommu_dump = true;
2824 static int __init parse_amd_iommu_intr(char *str)
2826 for (; *str; ++str) {
2827 if (strncmp(str, "legacy", 6) == 0) {
2828 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
2831 if (strncmp(str, "vapic", 5) == 0) {
2832 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC;
2839 static int __init parse_amd_iommu_options(char *str)
2841 for (; *str; ++str) {
2842 if (strncmp(str, "fullflush", 9) == 0)
2843 amd_iommu_unmap_flush = true;
2844 if (strncmp(str, "off", 3) == 0)
2845 amd_iommu_disabled = true;
2846 if (strncmp(str, "force_isolation", 15) == 0)
2847 amd_iommu_force_isolation = true;
2853 static int __init parse_ivrs_ioapic(char *str)
2855 unsigned int bus, dev, fn;
2859 ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn);
2862 pr_err("Invalid command line: ivrs_ioapic%s\n", str);
2866 if (early_ioapic_map_size == EARLY_MAP_SIZE) {
2867 pr_err("Early IOAPIC map overflow - ignoring ivrs_ioapic%s\n",
2872 devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
2874 cmdline_maps = true;
2875 i = early_ioapic_map_size++;
2876 early_ioapic_map[i].id = id;
2877 early_ioapic_map[i].devid = devid;
2878 early_ioapic_map[i].cmd_line = true;
2883 static int __init parse_ivrs_hpet(char *str)
2885 unsigned int bus, dev, fn;
2889 ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn);
2892 pr_err("Invalid command line: ivrs_hpet%s\n", str);
2896 if (early_hpet_map_size == EARLY_MAP_SIZE) {
2897 pr_err("Early HPET map overflow - ignoring ivrs_hpet%s\n",
2902 devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
2904 cmdline_maps = true;
2905 i = early_hpet_map_size++;
2906 early_hpet_map[i].id = id;
2907 early_hpet_map[i].devid = devid;
2908 early_hpet_map[i].cmd_line = true;
2913 static int __init parse_ivrs_acpihid(char *str)
2916 char *hid, *uid, *p;
2917 char acpiid[ACPIHID_UID_LEN + ACPIHID_HID_LEN] = {0};
2920 ret = sscanf(str, "[%x:%x.%x]=%s", &bus, &dev, &fn, acpiid);
2922 pr_err("Invalid command line: ivrs_acpihid(%s)\n", str);
2927 hid = strsep(&p, ":");
2930 if (!hid || !(*hid) || !uid) {
2931 pr_err("Invalid command line: hid or uid\n");
2935 i = early_acpihid_map_size++;
2936 memcpy(early_acpihid_map[i].hid, hid, strlen(hid));
2937 memcpy(early_acpihid_map[i].uid, uid, strlen(uid));
2938 early_acpihid_map[i].devid =
2939 ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
2940 early_acpihid_map[i].cmd_line = true;
2945 __setup("amd_iommu_dump", parse_amd_iommu_dump);
2946 __setup("amd_iommu=", parse_amd_iommu_options);
2947 __setup("amd_iommu_intr=", parse_amd_iommu_intr);
2948 __setup("ivrs_ioapic", parse_ivrs_ioapic);
2949 __setup("ivrs_hpet", parse_ivrs_hpet);
2950 __setup("ivrs_acpihid", parse_ivrs_acpihid);
2952 IOMMU_INIT_FINISH(amd_iommu_detect,
2953 gart_iommu_hole_init,
2957 bool amd_iommu_v2_supported(void)
2959 return amd_iommu_v2_present;
2961 EXPORT_SYMBOL(amd_iommu_v2_supported);
2963 struct amd_iommu *get_amd_iommu(unsigned int idx)
2966 struct amd_iommu *iommu;
2968 for_each_iommu(iommu)
2973 EXPORT_SYMBOL(get_amd_iommu);
2975 /****************************************************************************
2977 * IOMMU EFR Performance Counter support functionality. This code allows
2978 * access to the IOMMU PC functionality.
2980 ****************************************************************************/
2982 u8 amd_iommu_pc_get_max_banks(unsigned int idx)
2984 struct amd_iommu *iommu = get_amd_iommu(idx);
2987 return iommu->max_banks;
2991 EXPORT_SYMBOL(amd_iommu_pc_get_max_banks);
2993 bool amd_iommu_pc_supported(void)
2995 return amd_iommu_pc_present;
2997 EXPORT_SYMBOL(amd_iommu_pc_supported);
2999 u8 amd_iommu_pc_get_max_counters(unsigned int idx)
3001 struct amd_iommu *iommu = get_amd_iommu(idx);
3004 return iommu->max_counters;
3008 EXPORT_SYMBOL(amd_iommu_pc_get_max_counters);
3010 static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
3011 u8 fxn, u64 *value, bool is_write)
3016 /* Make sure the IOMMU PC resource is available */
3017 if (!amd_iommu_pc_present)
3020 /* Check for valid iommu and pc register indexing */
3021 if (WARN_ON(!iommu || (fxn > 0x28) || (fxn & 7)))
3024 offset = (u32)(((0x40 | bank) << 12) | (cntr << 8) | fxn);
3026 /* Limit the offset to the hw defined mmio region aperture */
3027 max_offset_lim = (u32)(((0x40 | iommu->max_banks) << 12) |
3028 (iommu->max_counters << 8) | 0x28);
3029 if ((offset < MMIO_CNTR_REG_OFFSET) ||
3030 (offset > max_offset_lim))
3034 u64 val = *value & GENMASK_ULL(47, 0);
3036 writel((u32)val, iommu->mmio_base + offset);
3037 writel((val >> 32), iommu->mmio_base + offset + 4);
3039 *value = readl(iommu->mmio_base + offset + 4);
3041 *value |= readl(iommu->mmio_base + offset);
3042 *value &= GENMASK_ULL(47, 0);
3048 int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value)
3053 return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, false);
3055 EXPORT_SYMBOL(amd_iommu_pc_get_reg);
3057 int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value)
3062 return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, true);
3064 EXPORT_SYMBOL(amd_iommu_pc_set_reg);