]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - arch/x86/kvm/x86.h
KVM: x86: Fix perf timer mode IP reporting
[linux.git] / arch / x86 / kvm / x86.h
index d0b95b7a90b4ecbed10eaf076ad19059b702a60b..35efd567a6765f6079ee06f9f9083460381a5b01 100644 (file)
@@ -2,8 +2,6 @@
 #ifndef ARCH_X86_KVM_X86_H
 #define ARCH_X86_KVM_X86_H
 
-#include <asm/processor.h>
-#include <asm/mwait.h>
 #include <linux/kvm_host.h>
 #include <asm/pvclock.h>
 #include "kvm_cache_regs.h"
@@ -12,6 +10,7 @@
 
 static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu)
 {
+       vcpu->arch.exception.pending = false;
        vcpu->arch.exception.injected = false;
 }
 
@@ -204,8 +203,6 @@ static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk)
        return !(kvm->arch.disabled_quirks & quirk);
 }
 
-void kvm_before_handle_nmi(struct kvm_vcpu *vcpu);
-void kvm_after_handle_nmi(struct kvm_vcpu *vcpu);
 void kvm_set_pending_timer(struct kvm_vcpu *vcpu);
 int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip);
 
@@ -241,6 +238,8 @@ extern unsigned int min_timer_period_us;
 
 extern unsigned int lapic_timer_advance_ns;
 
+extern bool enable_vmware_backdoor;
+
 extern struct static_key kvm_no_apic_vcpu;
 
 static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec)
@@ -263,38 +262,38 @@ static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec)
            __rem;                                              \
         })
 
-static inline bool kvm_mwait_in_guest(void)
-{
-       unsigned int eax, ebx, ecx, edx;
+#define KVM_X86_DISABLE_EXITS_MWAIT          (1 << 0)
+#define KVM_X86_DISABLE_EXITS_HTL            (1 << 1)
+#define KVM_X86_DISABLE_EXITS_PAUSE          (1 << 2)
+#define KVM_X86_DISABLE_VALID_EXITS          (KVM_X86_DISABLE_EXITS_MWAIT | \
+                                              KVM_X86_DISABLE_EXITS_HTL | \
+                                              KVM_X86_DISABLE_EXITS_PAUSE)
 
-       if (!cpu_has(&boot_cpu_data, X86_FEATURE_MWAIT))
-               return false;
+static inline bool kvm_mwait_in_guest(struct kvm *kvm)
+{
+       return kvm->arch.mwait_in_guest;
+}
 
-       switch (boot_cpu_data.x86_vendor) {
-       case X86_VENDOR_AMD:
-               /* All AMD CPUs have a working MWAIT implementation */
-               return true;
-       case X86_VENDOR_INTEL:
-               /* Handle Intel below */
-               break;
-       default:
-               return false;
-       }
+static inline bool kvm_hlt_in_guest(struct kvm *kvm)
+{
+       return kvm->arch.hlt_in_guest;
+}
 
-       /*
-        * Intel CPUs without CPUID5_ECX_INTERRUPT_BREAK are problematic as
-        * they would allow guest to stop the CPU completely by disabling
-        * interrupts then invoking MWAIT.
-        */
-       if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
-               return false;
+static inline bool kvm_pause_in_guest(struct kvm *kvm)
+{
+       return kvm->arch.pause_in_guest;
+}
 
-       cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx);
+DECLARE_PER_CPU(struct kvm_vcpu *, current_vcpu);
 
-       if (!(ecx & CPUID5_ECX_INTERRUPT_BREAK))
-               return false;
+static inline void kvm_before_interrupt(struct kvm_vcpu *vcpu)
+{
+       __this_cpu_write(current_vcpu, vcpu);
+}
 
-       return true;
+static inline void kvm_after_interrupt(struct kvm_vcpu *vcpu)
+{
+       __this_cpu_write(current_vcpu, NULL);
 }
 
 #endif