]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
KVM: vmx: Emulate MSR IA32_UMWAIT_CONTROL
authorTao Xu <tao3.xu@intel.com>
Tue, 16 Jul 2019 06:55:50 +0000 (14:55 +0800)
committerPaolo Bonzini <pbonzini@redhat.com>
Tue, 24 Sep 2019 12:34:36 +0000 (14:34 +0200)
UMWAIT and TPAUSE instructions use 32bit IA32_UMWAIT_CONTROL at MSR index
E1H to determines the maximum time in TSC-quanta that the processor can
reside in either C0.1 or C0.2.

This patch emulates MSR IA32_UMWAIT_CONTROL in guest and differentiate
IA32_UMWAIT_CONTROL between host and guest. The variable
mwait_control_cached in arch/x86/kernel/cpu/umwait.c caches the MSR value,
so this patch uses it to avoid frequently rdmsr of IA32_UMWAIT_CONTROL.

Co-developed-by: Jingqi Liu <jingqi.liu@intel.com>
Signed-off-by: Jingqi Liu <jingqi.liu@intel.com>
Signed-off-by: Tao Xu <tao3.xu@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kernel/cpu/umwait.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/vmx/vmx.h
arch/x86/kvm/x86.c

index 32b4dc9030aa9fbcd482b39ef427b5804ec6499c..c222f283b456089c45dfc14702fea12aeb3e0cd2 100644 (file)
  */
 static u32 umwait_control_cached = UMWAIT_CTRL_VAL(100000, UMWAIT_C02_ENABLE);
 
+u32 get_umwait_control_msr(void)
+{
+       return umwait_control_cached;
+}
+EXPORT_SYMBOL_GPL(get_umwait_control_msr);
+
 /*
  * Cache the original IA32_UMWAIT_CONTROL MSR value which is configured by
  * hardware or BIOS before kernel boot.
index bb55f54e29b1509a70c31e1f0a75fb69f5d4b17f..52dd2241b749b392617ec1162abbbd88cfca0717 100644 (file)
@@ -1733,6 +1733,12 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 #endif
        case MSR_EFER:
                return kvm_get_msr_common(vcpu, msr_info);
+       case MSR_IA32_UMWAIT_CONTROL:
+               if (!msr_info->host_initiated && !vmx_has_waitpkg(vmx))
+                       return 1;
+
+               msr_info->data = vmx->msr_ia32_umwait_control;
+               break;
        case MSR_IA32_SPEC_CTRL:
                if (!msr_info->host_initiated &&
                    !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
@@ -1906,6 +1912,16 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                        return 1;
                vmcs_write64(GUEST_BNDCFGS, data);
                break;
+       case MSR_IA32_UMWAIT_CONTROL:
+               if (!msr_info->host_initiated && !vmx_has_waitpkg(vmx))
+                       return 1;
+
+               /* The reserved bit 1 and non-32 bit [63:32] should be zero */
+               if (data & (BIT_ULL(1) | GENMASK_ULL(63, 32)))
+                       return 1;
+
+               vmx->msr_ia32_umwait_control = data;
+               break;
        case MSR_IA32_SPEC_CTRL:
                if (!msr_info->host_initiated &&
                    !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
@@ -4211,6 +4227,8 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
        vmx->rmode.vm86_active = 0;
        vmx->spec_ctrl = 0;
 
+       vmx->msr_ia32_umwait_control = 0;
+
        vcpu->arch.microcode_version = 0x100000000ULL;
        vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val();
        vmx->hv_deadline_tsc = -1;
@@ -6384,6 +6402,23 @@ static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
                                        msrs[i].host, false);
 }
 
+static void atomic_switch_umwait_control_msr(struct vcpu_vmx *vmx)
+{
+       u32 host_umwait_control;
+
+       if (!vmx_has_waitpkg(vmx))
+               return;
+
+       host_umwait_control = get_umwait_control_msr();
+
+       if (vmx->msr_ia32_umwait_control != host_umwait_control)
+               add_atomic_switch_msr(vmx, MSR_IA32_UMWAIT_CONTROL,
+                       vmx->msr_ia32_umwait_control,
+                       host_umwait_control, false);
+       else
+               clear_atomic_switch_msr(vmx, MSR_IA32_UMWAIT_CONTROL);
+}
+
 static void vmx_update_hv_timer(struct kvm_vcpu *vcpu)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -6478,6 +6513,7 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
        pt_guest_enter(vmx);
 
        atomic_switch_perf_msrs(vmx);
+       atomic_switch_umwait_control_msr(vmx);
 
        if (enable_preemption_timer)
                vmx_update_hv_timer(vcpu);
index 64d5a4890aa9e3e5f88343a6f0aec2c63e21d36e..bee16687dc0bf054957273d520306cfcbbd178dc 100644 (file)
@@ -14,6 +14,8 @@
 extern const u32 vmx_msr_index[];
 extern u64 host_efer;
 
+extern u32 get_umwait_control_msr(void);
+
 #define MSR_TYPE_R     1
 #define MSR_TYPE_W     2
 #define MSR_TYPE_RW    3
@@ -211,6 +213,7 @@ struct vcpu_vmx {
 #endif
 
        u64                   spec_ctrl;
+       u32                   msr_ia32_umwait_control;
 
        u32 secondary_exec_control;
 
@@ -497,6 +500,12 @@ static inline void decache_tsc_multiplier(struct vcpu_vmx *vmx)
        vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio);
 }
 
+static inline bool vmx_has_waitpkg(struct vcpu_vmx *vmx)
+{
+       return vmx->secondary_exec_control &
+               SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE;
+}
+
 void dump_vmcs(void);
 
 #endif /* __KVM_X86_VMX_H */
index c38d247dbffbe4d8b799554bea926a32aedb2b5d..977b36348bedceef965d1a5ef5fd9d98f6ba3c7a 100644 (file)
@@ -1145,6 +1145,8 @@ static u32 msrs_to_save[] = {
        MSR_IA32_RTIT_ADDR1_A, MSR_IA32_RTIT_ADDR1_B,
        MSR_IA32_RTIT_ADDR2_A, MSR_IA32_RTIT_ADDR2_B,
        MSR_IA32_RTIT_ADDR3_A, MSR_IA32_RTIT_ADDR3_B,
+       MSR_IA32_UMWAIT_CONTROL,
+
        MSR_ARCH_PERFMON_FIXED_CTR0, MSR_ARCH_PERFMON_FIXED_CTR1,
        MSR_ARCH_PERFMON_FIXED_CTR0 + 2, MSR_ARCH_PERFMON_FIXED_CTR0 + 3,
        MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_CORE_PERF_GLOBAL_STATUS,