]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 20 Jan 2018 19:41:09 +0000 (11:41 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 20 Jan 2018 19:41:09 +0000 (11:41 -0800)
Pull KVM fixes from Radim Krčmář:
 "ARM:
   - fix incorrect huge page mappings on systems using the contiguous
     hint for hugetlbfs
   - support alternative GICv4 init sequence
   - correctly implement the ARM SMCC for HVC and SMC handling

  PPC:
   - add KVM IOCTL for reporting vulnerability and workaround status

  s390:
   - provide userspace interface for branch prediction changes in
     firmware

  x86:
   - use correct macros for bits"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
  KVM: s390: wire up bpb feature
  KVM: PPC: Book3S: Provide information about hardware/firmware CVE workarounds
  KVM/x86: Fix wrong macro references of X86_CR0_PG_BIT and X86_CR4_PAE_BIT in kvm_valid_sregs()
  arm64: KVM: Fix SMCCC handling of unimplemented SMC/HVC calls
  KVM: arm64: Fix GICv4 init when called from vgic_its_create
  KVM: arm/arm64: Check pagesize when allocating a hugepage at Stage 2

13 files changed:
Documentation/virtual/kvm/api.txt
arch/arm64/kvm/handle_exit.c
arch/powerpc/include/uapi/asm/kvm.h
arch/powerpc/kvm/powerpc.c
arch/s390/include/asm/kvm_host.h
arch/s390/include/uapi/asm/kvm.h
arch/s390/kvm/kvm-s390.c
arch/s390/kvm/vsie.c
arch/x86/kvm/x86.c
include/uapi/linux/kvm.h
virt/kvm/arm/mmu.c
virt/kvm/arm/vgic/vgic-init.c
virt/kvm/arm/vgic/vgic-v4.c

index 57d3ee9e4bde2a799715ca75871fd61b27858b0a..fc3ae951bc07e3d9f28553ece2bb1e62549b0155 100644 (file)
@@ -3403,6 +3403,52 @@ invalid, if invalid pages are written to (e.g. after the end of memory)
 or if no page table is present for the addresses (e.g. when using
 hugepages).
 
+4.108 KVM_PPC_GET_CPU_CHAR
+
+Capability: KVM_CAP_PPC_GET_CPU_CHAR
+Architectures: powerpc
+Type: vm ioctl
+Parameters: struct kvm_ppc_cpu_char (out)
+Returns: 0 on successful completion
+        -EFAULT if struct kvm_ppc_cpu_char cannot be written
+
+This ioctl gives userspace information about certain characteristics
+of the CPU relating to speculative execution of instructions and
+possible information leakage resulting from speculative execution (see
+CVE-2017-5715, CVE-2017-5753 and CVE-2017-5754).  The information is
+returned in struct kvm_ppc_cpu_char, which looks like this:
+
+struct kvm_ppc_cpu_char {
+       __u64   character;              /* characteristics of the CPU */
+       __u64   behaviour;              /* recommended software behaviour */
+       __u64   character_mask;         /* valid bits in character */
+       __u64   behaviour_mask;         /* valid bits in behaviour */
+};
+
+For extensibility, the character_mask and behaviour_mask fields
+indicate which bits of character and behaviour have been filled in by
+the kernel.  If the set of defined bits is extended in future then
+userspace will be able to tell whether it is running on a kernel that
+knows about the new bits.
+
+The character field describes attributes of the CPU which can help
+with preventing inadvertent information disclosure - specifically,
+whether there is an instruction to flash-invalidate the L1 data cache
+(ori 30,30,0 or mtspr SPRN_TRIG2,rN), whether the L1 data cache is set
+to a mode where entries can only be used by the thread that created
+them, whether the bcctr[l] instruction prevents speculation, and
+whether a speculation barrier instruction (ori 31,31,0) is provided.
+
+The behaviour field describes actions that software should take to
+prevent inadvertent information disclosure, and thus describes which
+vulnerabilities the hardware is subject to; specifically whether the
+L1 data cache should be flushed when returning to user mode from the
+kernel, and whether a speculation barrier should be placed between an
+array bounds check and the array access.
+
+These fields use the same bit definitions as the new
+H_GET_CPU_CHARACTERISTICS hypercall.
+
 5. The kvm_run structure
 ------------------------
 
index 304203fa9e3307583748de10c6c1f7425a231c7b..e60494f1eef9d5970e1ee5bda1daef2c58321670 100644 (file)
@@ -45,7 +45,7 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
 
        ret = kvm_psci_call(vcpu);
        if (ret < 0) {
-               kvm_inject_undefined(vcpu);
+               vcpu_set_reg(vcpu, 0, ~0UL);
                return 1;
        }
 
@@ -54,7 +54,7 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
 
 static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
 {
-       kvm_inject_undefined(vcpu);
+       vcpu_set_reg(vcpu, 0, ~0UL);
        return 1;
 }
 
index 61d6049f4c1eced70ad3fef32f99b3a438da3eab..637b7263cb867f09618cc2a5e7b525686a0ea267 100644 (file)
@@ -443,6 +443,31 @@ struct kvm_ppc_rmmu_info {
        __u32   ap_encodings[8];
 };
 
+/* For KVM_PPC_GET_CPU_CHAR */
+struct kvm_ppc_cpu_char {
+       __u64   character;              /* characteristics of the CPU */
+       __u64   behaviour;              /* recommended software behaviour */
+       __u64   character_mask;         /* valid bits in character */
+       __u64   behaviour_mask;         /* valid bits in behaviour */
+};
+
+/*
+ * Values for character and character_mask.
+ * These are identical to the values used by H_GET_CPU_CHARACTERISTICS.
+ */
+#define KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31                (1ULL << 63)
+#define KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED     (1ULL << 62)
+#define KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30       (1ULL << 61)
+#define KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2       (1ULL << 60)
+#define KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV       (1ULL << 59)
+#define KVM_PPC_CPU_CHAR_BR_HINT_HONOURED      (1ULL << 58)
+#define KVM_PPC_CPU_CHAR_MTTRIG_THR_RECONF     (1ULL << 57)
+#define KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS       (1ULL << 56)
+
+#define KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY      (1ULL << 63)
+#define KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR         (1ULL << 62)
+#define KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR    (1ULL << 61)
+
 /* Per-vcpu XICS interrupt controller state */
 #define KVM_REG_PPC_ICP_STATE  (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x8c)
 
index 1915e86cef6f8fc2e05852ddc7a0867eca1c560b..0a7c88786ec0e5d0a1ca07f2e9dc07502ea344b0 100644 (file)
 #include <asm/iommu.h>
 #include <asm/switch_to.h>
 #include <asm/xive.h>
+#ifdef CONFIG_PPC_PSERIES
+#include <asm/hvcall.h>
+#include <asm/plpar_wrappers.h>
+#endif
 
 #include "timing.h"
 #include "irq.h"
@@ -548,6 +552,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
 #ifdef CONFIG_KVM_XICS
        case KVM_CAP_IRQ_XICS:
 #endif
+       case KVM_CAP_PPC_GET_CPU_CHAR:
                r = 1;
                break;
 
@@ -1759,6 +1764,124 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
        return r;
 }
 
+#ifdef CONFIG_PPC_BOOK3S_64
+/*
+ * These functions check whether the underlying hardware is safe
+ * against attacks based on observing the effects of speculatively
+ * executed instructions, and whether it supplies instructions for
+ * use in workarounds.  The information comes from firmware, either
+ * via the device tree on powernv platforms or from an hcall on
+ * pseries platforms.
+ */
+#ifdef CONFIG_PPC_PSERIES
+static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp)
+{
+       struct h_cpu_char_result c;
+       unsigned long rc;
+
+       if (!machine_is(pseries))
+               return -ENOTTY;
+
+       rc = plpar_get_cpu_characteristics(&c);
+       if (rc == H_SUCCESS) {
+               cp->character = c.character;
+               cp->behaviour = c.behaviour;
+               cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 |
+                       KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED |
+                       KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 |
+                       KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 |
+                       KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV |
+                       KVM_PPC_CPU_CHAR_BR_HINT_HONOURED |
+                       KVM_PPC_CPU_CHAR_MTTRIG_THR_RECONF |
+                       KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS;
+               cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY |
+                       KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR |
+                       KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR;
+       }
+       return 0;
+}
+#else
+static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp)
+{
+       return -ENOTTY;
+}
+#endif
+
+static inline bool have_fw_feat(struct device_node *fw_features,
+                               const char *state, const char *name)
+{
+       struct device_node *np;
+       bool r = false;
+
+       np = of_get_child_by_name(fw_features, name);
+       if (np) {
+               r = of_property_read_bool(np, state);
+               of_node_put(np);
+       }
+       return r;
+}
+
+static int kvmppc_get_cpu_char(struct kvm_ppc_cpu_char *cp)
+{
+       struct device_node *np, *fw_features;
+       int r;
+
+       memset(cp, 0, sizeof(*cp));
+       r = pseries_get_cpu_char(cp);
+       if (r != -ENOTTY)
+               return r;
+
+       np = of_find_node_by_name(NULL, "ibm,opal");
+       if (np) {
+               fw_features = of_get_child_by_name(np, "fw-features");
+               of_node_put(np);
+               if (!fw_features)
+                       return 0;
+               if (have_fw_feat(fw_features, "enabled",
+                                "inst-spec-barrier-ori31,31,0"))
+                       cp->character |= KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31;
+               if (have_fw_feat(fw_features, "enabled",
+                                "fw-bcctrl-serialized"))
+                       cp->character |= KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED;
+               if (have_fw_feat(fw_features, "enabled",
+                                "inst-l1d-flush-ori30,30,0"))
+                       cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30;
+               if (have_fw_feat(fw_features, "enabled",
+                                "inst-l1d-flush-trig2"))
+                       cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2;
+               if (have_fw_feat(fw_features, "enabled",
+                                "fw-l1d-thread-split"))
+                       cp->character |= KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV;
+               if (have_fw_feat(fw_features, "enabled",
+                                "fw-count-cache-disabled"))
+                       cp->character |= KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS;
+               cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 |
+                       KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED |
+                       KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 |
+                       KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 |
+                       KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV |
+                       KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS;
+
+               if (have_fw_feat(fw_features, "enabled",
+                                "speculation-policy-favor-security"))
+                       cp->behaviour |= KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY;
+               if (!have_fw_feat(fw_features, "disabled",
+                                 "needs-l1d-flush-msr-pr-0-to-1"))
+                       cp->behaviour |= KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR;
+               if (!have_fw_feat(fw_features, "disabled",
+                                 "needs-spec-barrier-for-bound-checks"))
+                       cp->behaviour |= KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR;
+               cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY |
+                       KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR |
+                       KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR;
+
+               of_node_put(fw_features);
+       }
+
+       return 0;
+}
+#endif
+
 long kvm_arch_vm_ioctl(struct file *filp,
                        unsigned int ioctl, unsigned long arg)
 {
@@ -1861,6 +1984,14 @@ long kvm_arch_vm_ioctl(struct file *filp,
                        r = -EFAULT;
                break;
        }
+       case KVM_PPC_GET_CPU_CHAR: {
+               struct kvm_ppc_cpu_char cpuchar;
+
+               r = kvmppc_get_cpu_char(&cpuchar);
+               if (r >= 0 && copy_to_user(argp, &cpuchar, sizeof(cpuchar)))
+                       r = -EFAULT;
+               break;
+       }
        default: {
                struct kvm *kvm = filp->private_data;
                r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg);
index e14f381757f67b6c0111c78c491c2c1078a7f177..c1b0a9ac1dc81d2f83ba3747ebf44a0e06df260f 100644 (file)
@@ -207,7 +207,8 @@ struct kvm_s390_sie_block {
        __u16   ipa;                    /* 0x0056 */
        __u32   ipb;                    /* 0x0058 */
        __u32   scaoh;                  /* 0x005c */
-       __u8    reserved60;             /* 0x0060 */
+#define FPF_BPBC       0x20
+       __u8    fpf;                    /* 0x0060 */
 #define ECB_GS         0x40
 #define ECB_TE         0x10
 #define ECB_SRSI       0x04
index 38535a57fef8327c3b08bf20e1f8621fd93c776a..4cdaa55fabfe2658e054b5aaf1455b6f0556e584 100644 (file)
@@ -224,6 +224,7 @@ struct kvm_guest_debug_arch {
 #define KVM_SYNC_RICCB  (1UL << 7)
 #define KVM_SYNC_FPRS   (1UL << 8)
 #define KVM_SYNC_GSCB   (1UL << 9)
+#define KVM_SYNC_BPBC   (1UL << 10)
 /* length and alignment of the sdnx as a power of two */
 #define SDNXC 8
 #define SDNXL (1UL << SDNXC)
@@ -247,7 +248,9 @@ struct kvm_sync_regs {
        };
        __u8  reserved[512];    /* for future vector expansion */
        __u32 fpc;              /* valid on KVM_SYNC_VRS or KVM_SYNC_FPRS */
-       __u8 padding1[52];      /* riccb needs to be 64byte aligned */
+       __u8 bpbc : 1;          /* bp mode */
+       __u8 reserved2 : 7;
+       __u8 padding1[51];      /* riccb needs to be 64byte aligned */
        __u8 riccb[64];         /* runtime instrumentation controls block */
        __u8 padding2[192];     /* sdnx needs to be 256byte aligned */
        union {
index 2c93cbbcd15e35a389078643874bf424160f25db..2598cf243b86e08f21ccb6f9ae6ea2e28465d680 100644 (file)
@@ -421,6 +421,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
        case KVM_CAP_S390_GS:
                r = test_facility(133);
                break;
+       case KVM_CAP_S390_BPB:
+               r = test_facility(82);
+               break;
        default:
                r = 0;
        }
@@ -2198,6 +2201,8 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
        kvm_s390_set_prefix(vcpu, 0);
        if (test_kvm_facility(vcpu->kvm, 64))
                vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
+       if (test_kvm_facility(vcpu->kvm, 82))
+               vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC;
        if (test_kvm_facility(vcpu->kvm, 133))
                vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB;
        /* fprs can be synchronized via vrs, even if the guest has no vx. With
@@ -2339,6 +2344,7 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
        current->thread.fpu.fpc = 0;
        vcpu->arch.sie_block->gbea = 1;
        vcpu->arch.sie_block->pp = 0;
+       vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
        vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
        kvm_clear_async_pf_completion_queue(vcpu);
        if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
@@ -3298,6 +3304,11 @@ static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
                vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
                vcpu->arch.gs_enabled = 1;
        }
+       if ((kvm_run->kvm_dirty_regs & KVM_SYNC_BPBC) &&
+           test_kvm_facility(vcpu->kvm, 82)) {
+               vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
+               vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0;
+       }
        save_access_regs(vcpu->arch.host_acrs);
        restore_access_regs(vcpu->run->s.regs.acrs);
        /* save host (userspace) fprs/vrs */
@@ -3344,6 +3355,7 @@ static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
        kvm_run->s.regs.pft = vcpu->arch.pfault_token;
        kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
        kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
+       kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
        save_access_regs(vcpu->run->s.regs.acrs);
        restore_access_regs(vcpu->arch.host_acrs);
        /* Save guest register state */
index 5d6ae0326d9e8fa2707e8d066488c8313dfef879..751348348477834d874e322bd2e36b19bb2ec301 100644 (file)
@@ -223,6 +223,12 @@ static void unshadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
        memcpy(scb_o->gcr, scb_s->gcr, 128);
        scb_o->pp = scb_s->pp;
 
+       /* branch prediction */
+       if (test_kvm_facility(vcpu->kvm, 82)) {
+               scb_o->fpf &= ~FPF_BPBC;
+               scb_o->fpf |= scb_s->fpf & FPF_BPBC;
+       }
+
        /* interrupt intercept */
        switch (scb_s->icptcode) {
        case ICPT_PROGI:
@@ -265,6 +271,7 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
        scb_s->ecb3 = 0;
        scb_s->ecd = 0;
        scb_s->fac = 0;
+       scb_s->fpf = 0;
 
        rc = prepare_cpuflags(vcpu, vsie_page);
        if (rc)
@@ -324,6 +331,9 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
                        prefix_unmapped(vsie_page);
                scb_s->ecb |= scb_o->ecb & ECB_TE;
        }
+       /* branch prediction */
+       if (test_kvm_facility(vcpu->kvm, 82))
+               scb_s->fpf |= scb_o->fpf & FPF_BPBC;
        /* SIMD */
        if (test_kvm_facility(vcpu->kvm, 129)) {
                scb_s->eca |= scb_o->eca & ECA_VX;
index 1cec2c62a0b08405d2bd7c8908d6b7f33de3b63c..c53298dfbf50a78c8f1e209e7f52614ec6c4ea36 100644 (file)
@@ -7496,13 +7496,13 @@ EXPORT_SYMBOL_GPL(kvm_task_switch);
 
 int kvm_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
 {
-       if ((sregs->efer & EFER_LME) && (sregs->cr0 & X86_CR0_PG_BIT)) {
+       if ((sregs->efer & EFER_LME) && (sregs->cr0 & X86_CR0_PG)) {
                /*
                 * When EFER.LME and CR0.PG are set, the processor is in
                 * 64-bit mode (though maybe in a 32-bit code segment).
                 * CR4.PAE and EFER.LMA must be set.
                 */
-               if (!(sregs->cr4 & X86_CR4_PAE_BIT)
+               if (!(sregs->cr4 & X86_CR4_PAE)
                    || !(sregs->efer & EFER_LMA))
                        return -EINVAL;
        } else {
index 496e59a2738ba99308f438e1f0509e66e17086cb..8fb90a0819c3939eb2a74c6053c9dc5d586d9eab 100644 (file)
@@ -932,6 +932,8 @@ struct kvm_ppc_resize_hpt {
 #define KVM_CAP_HYPERV_SYNIC2 148
 #define KVM_CAP_HYPERV_VP_INDEX 149
 #define KVM_CAP_S390_AIS_MIGRATION 150
+#define KVM_CAP_PPC_GET_CPU_CHAR 151
+#define KVM_CAP_S390_BPB 152
 
 #ifdef KVM_CAP_IRQ_ROUTING
 
@@ -1261,6 +1263,8 @@ struct kvm_s390_ucas_mapping {
 #define KVM_PPC_CONFIGURE_V3_MMU  _IOW(KVMIO,  0xaf, struct kvm_ppc_mmuv3_cfg)
 /* Available with KVM_CAP_PPC_RADIX_MMU */
 #define KVM_PPC_GET_RMMU_INFO    _IOW(KVMIO,  0xb0, struct kvm_ppc_rmmu_info)
+/* Available with KVM_CAP_PPC_GET_CPU_CHAR */
+#define KVM_PPC_GET_CPU_CHAR     _IOR(KVMIO,  0xb1, struct kvm_ppc_cpu_char)
 
 /* ioctl for vm fd */
 #define KVM_CREATE_DEVICE        _IOWR(KVMIO,  0xe0, struct kvm_create_device)
index b4b69c2d10120237e12bc6524243071bf645f1d9..9dea96380339fd9e6e5889b5ee3684d4300664d7 100644 (file)
@@ -1310,7 +1310,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
                return -EFAULT;
        }
 
-       if (is_vm_hugetlb_page(vma) && !logging_active) {
+       if (vma_kernel_pagesize(vma) == PMD_SIZE && !logging_active) {
                hugetlb = true;
                gfn = (fault_ipa & PMD_MASK) >> PAGE_SHIFT;
        } else {
index 62310122ee78eff828eb7f4ae6f8cfdb4ed18f0c..743ca5cb05ef6ec4a2e16ab09802aaa760fff219 100644 (file)
@@ -285,9 +285,11 @@ int vgic_init(struct kvm *kvm)
        if (ret)
                goto out;
 
-       ret = vgic_v4_init(kvm);
-       if (ret)
-               goto out;
+       if (vgic_has_its(kvm)) {
+               ret = vgic_v4_init(kvm);
+               if (ret)
+                       goto out;
+       }
 
        kvm_for_each_vcpu(i, vcpu, kvm)
                kvm_vgic_vcpu_enable(vcpu);
index 4a37292855bc7f8bed299b99407d68882b98c84e..bc4265154bacf0665e2cf450641ab8839e35a1f6 100644 (file)
@@ -118,7 +118,7 @@ int vgic_v4_init(struct kvm *kvm)
        struct kvm_vcpu *vcpu;
        int i, nr_vcpus, ret;
 
-       if (!vgic_supports_direct_msis(kvm))
+       if (!kvm_vgic_global_state.has_gicv4)
                return 0; /* Nothing to see here... move along. */
 
        if (dist->its_vm.vpes)