]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - arch/x86/kvm/x86.c
KVM: X86: Emulate MSR_IA32_MISC_ENABLE MWAIT bit
[linux.git] / arch / x86 / kvm / x86.c
index 83aefd759846e1942066e53b016e420fd39d8a7d..10feed6a01eb8fd83ba6a4f89cbde5073d31d565 100644 (file)
@@ -1212,11 +1212,12 @@ static u32 msr_based_features[] = {
 
 static unsigned int num_msr_based_features;
 
-u64 kvm_get_arch_capabilities(void)
+static u64 kvm_get_arch_capabilities(void)
 {
-       u64 data;
+       u64 data = 0;
 
-       rdmsrl_safe(MSR_IA32_ARCH_CAPABILITIES, &data);
+       if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES))
+               rdmsrl(MSR_IA32_ARCH_CAPABILITIES, data);
 
        /*
         * If we're doing cache flushes (either "always" or "cond")
@@ -1232,7 +1233,6 @@ u64 kvm_get_arch_capabilities(void)
 
        return data;
 }
-EXPORT_SYMBOL_GPL(kvm_get_arch_capabilities);
 
 static int kvm_get_msr_feature(struct kvm_msr_entry *msr)
 {
@@ -2547,7 +2547,15 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                }
                break;
        case MSR_IA32_MISC_ENABLE:
-               vcpu->arch.ia32_misc_enable_msr = data;
+               if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT) &&
+                   ((vcpu->arch.ia32_misc_enable_msr ^ data) & MSR_IA32_MISC_ENABLE_MWAIT)) {
+                       if (!guest_cpuid_has(vcpu, X86_FEATURE_XMM3))
+                               return 1;
+                       vcpu->arch.ia32_misc_enable_msr = data;
+                       kvm_update_cpuid(vcpu);
+               } else {
+                       vcpu->arch.ia32_misc_enable_msr = data;
+               }
                break;
        case MSR_IA32_SMBASE:
                if (!msr_info->host_initiated)
@@ -3098,7 +3106,8 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
                r = KVM_CLOCK_TSC_STABLE;
                break;
        case KVM_CAP_X86_DISABLE_EXITS:
-               r |=  KVM_X86_DISABLE_EXITS_HLT | KVM_X86_DISABLE_EXITS_PAUSE;
+               r |=  KVM_X86_DISABLE_EXITS_HLT | KVM_X86_DISABLE_EXITS_PAUSE |
+                     KVM_X86_DISABLE_EXITS_CSTATE;
                if(kvm_can_mwait_in_guest())
                        r |= KVM_X86_DISABLE_EXITS_MWAIT;
                break;
@@ -4615,6 +4624,8 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
                        kvm->arch.hlt_in_guest = true;
                if (cap->args[0] & KVM_X86_DISABLE_EXITS_PAUSE)
                        kvm->arch.pause_in_guest = true;
+               if (cap->args[0] & KVM_X86_DISABLE_EXITS_CSTATE)
+                       kvm->arch.cstate_in_guest = true;
                r = 0;
                break;
        case KVM_CAP_MSR_PLATFORM_INFO:
@@ -6910,35 +6921,6 @@ static struct perf_guest_info_callbacks kvm_guest_cbs = {
        .handle_intel_pt_intr   = kvm_handle_intel_pt_intr,
 };
 
-static void kvm_set_mmio_spte_mask(void)
-{
-       u64 mask;
-       int maxphyaddr = boot_cpu_data.x86_phys_bits;
-
-       /*
-        * Set the reserved bits and the present bit of an paging-structure
-        * entry to generate page fault with PFER.RSV = 1.
-        */
-
-       /*
-        * Mask the uppermost physical address bit, which would be reserved as
-        * long as the supported physical address width is less than 52.
-        */
-       mask = 1ull << 51;
-
-       /* Set the present bit. */
-       mask |= 1ull;
-
-       /*
-        * If reserved bit is not supported, clear the present bit to disable
-        * mmio page fault.
-        */
-       if (IS_ENABLED(CONFIG_X86_64) && maxphyaddr == 52)
-               mask &= ~1ull;
-
-       kvm_mmu_set_mmio_spte_mask(mask, mask);
-}
-
 #ifdef CONFIG_X86_64
 static void pvclock_gtod_update_fn(struct work_struct *work)
 {
@@ -7035,8 +7017,6 @@ int kvm_arch_init(void *opaque)
        if (r)
                goto out_free_percpu;
 
-       kvm_set_mmio_spte_mask();
-
        kvm_x86_ops = ops;
 
        kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
@@ -7953,9 +7933,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
        }
 
        trace_kvm_entry(vcpu->vcpu_id);
-       if (lapic_in_kernel(vcpu) &&
-           vcpu->arch.apic->lapic_timer.timer_advance_ns)
-               wait_lapic_expire(vcpu);
        guest_enter_irqoff();
 
        fpregs_assert_state_consistent();
@@ -8011,6 +7988,13 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
        ++vcpu->stat.exits;
 
        guest_exit_irqoff();
+       if (lapic_in_kernel(vcpu)) {
+               s64 delta = vcpu->arch.apic->lapic_timer.advance_expire_delta;
+               if (delta != S64_MIN) {
+                       trace_kvm_wait_lapic_expire(vcpu->vcpu_id, delta);
+                       vcpu->arch.apic->lapic_timer.advance_expire_delta = S64_MIN;
+               }
+       }
 
        local_irq_enable();
        preempt_enable();
@@ -9109,9 +9093,9 @@ void kvm_arch_hardware_unsetup(void)
        kvm_x86_ops->hardware_unsetup();
 }
 
-void kvm_arch_check_processor_compat(void *rtn)
+int kvm_arch_check_processor_compat(void)
 {
-       kvm_x86_ops->check_processor_compatibility(rtn);
+       return kvm_x86_ops->check_processor_compatibility();
 }
 
 bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu)