]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
KVM: VMX: Update VMCS.HOST_RSP via helper C function
authorSean Christopherson <sean.j.christopherson@intel.com>
Fri, 25 Jan 2019 15:41:04 +0000 (07:41 -0800)
committerPaolo Bonzini <pbonzini@redhat.com>
Tue, 12 Feb 2019 12:12:24 +0000 (13:12 +0100)
Providing a helper function to update HOST_RSP is visibly easier to
read, and more importantly (for the future) eliminates two arguments to
the VM-Enter assembly blob.  Reducing the number of arguments to the asm
blob is for all intents and purposes a prerequisite to moving the code
to a proper assembly routine.  It's not truly mandatory, but it greatly
simplifies the future code, and the cost of the extra CALL+RET is
negligible in the grand scheme.

Note that although _ASM_ARG[1-3] can be used in the inline asm itself,
the intput/output constraints need to be manually defined.  gcc will
actually compile with _ASM_ARG[1-3] specified as constraints, but what
it actually ends up doing with the bogus constraint is unknown.

Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/vmx/vmx.c

index 5e43999ece1ddc72f4a08e1e83da40f15d345d19..4b8a94fedb769a2746f1c9347ee21252297a13bb 100644 (file)
@@ -6363,15 +6363,18 @@ static void vmx_update_hv_timer(struct kvm_vcpu *vcpu)
        vmx->loaded_vmcs->hv_timer_armed = false;
 }
 
-static void __vmx_vcpu_run(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
+void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp)
 {
-       unsigned long evmcs_rsp;
+       if (unlikely(host_rsp != vmx->loaded_vmcs->host_state.rsp)) {
+               vmx->loaded_vmcs->host_state.rsp = host_rsp;
+               vmcs_writel(HOST_RSP, host_rsp);
+       }
+}
 
+static void __vmx_vcpu_run(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
+{
        vmx->__launched = vmx->loaded_vmcs->launched;
 
-       evmcs_rsp = static_branch_unlikely(&enable_evmcs) ?
-               (unsigned long)&current_evmcs->host_rsp : 0;
-
        if (static_branch_unlikely(&vmx_l1d_should_flush))
                vmx_l1d_flush(vcpu);
 
@@ -6382,21 +6385,14 @@ static void __vmx_vcpu_run(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
                /* Store host registers */
                "push %%" _ASM_BP " \n\t"
                "sub $%c[wordsize], %%" _ASM_SP "\n\t" /* placeholder for guest RCX */
-               "push %%" _ASM_CX " \n\t"
-               "sub $%c[wordsize], %%" _ASM_SP "\n\t" /* temporarily adjust RSP for CALL */
-               "cmp %%" _ASM_SP ", (%%" _ASM_DI ") \n\t"
-               "je 1f \n\t"
-               "mov %%" _ASM_SP ", (%%" _ASM_DI ") \n\t"
-               /* Avoid VMWRITE when Enlightened VMCS is in use */
-               "test %%" _ASM_SI ", %%" _ASM_SI " \n\t"
-               "jz 2f \n\t"
-               "mov %%" _ASM_SP ", (%%" _ASM_SI ") \n\t"
-               "jmp 1f \n\t"
-               "2: \n\t"
-               "mov $%c[HOST_RSP], %%" _ASM_DX " \n\t"
-               __ex("vmwrite %%" _ASM_SP ", %%" _ASM_DX) "\n\t"
-               "1: \n\t"
-               "add $%c[wordsize], %%" _ASM_SP "\n\t" /* un-adjust RSP */
+               "push %%" _ASM_ARG1 " \n\t"
+
+               /* Adjust RSP to account for the CALL to vmx_vmenter(). */
+               "lea -%c[wordsize](%%" _ASM_SP "), %%" _ASM_ARG2 " \n\t"
+               "call vmx_update_host_rsp \n\t"
+
+               /* Load the vcpu_vmx pointer to RCX. */
+               "mov (%%" _ASM_SP "), %%" _ASM_CX " \n\t"
 
                /* Check if vmlaunch or vmresume is needed */
                "cmpb $0, %c[launched](%%" _ASM_CX ") \n\t"
@@ -6475,11 +6471,16 @@ static void __vmx_vcpu_run(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
                "xor %%edi, %%edi \n\t"
                "xor %%ebp, %%ebp \n\t"
                "pop  %%" _ASM_BP " \n\t"
-             : ASM_CALL_CONSTRAINT, "=D"((int){0}), "=S"((int){0})
-             : "c"(vmx), "D"(&vmx->loaded_vmcs->host_state.rsp), "S"(evmcs_rsp),
+             : ASM_CALL_CONSTRAINT,
+#ifdef CONFIG_X86_64
+               "=D"((int){0})
+             : "D"(vmx),
+#else
+               "=a"((int){0})
+             : "a"(vmx),
+#endif
                [launched]"i"(offsetof(struct vcpu_vmx, __launched)),
                [fail]"i"(offsetof(struct vcpu_vmx, fail)),
-               [HOST_RSP]"i"(HOST_RSP),
                [rax]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RAX])),
                [rbx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBX])),
                [rcx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RCX])),
@@ -6500,10 +6501,10 @@ static void __vmx_vcpu_run(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
                [wordsize]"i"(sizeof(ulong))
              : "cc", "memory"
 #ifdef CONFIG_X86_64
-               , "rax", "rbx", "rdx"
+               , "rax", "rbx", "rcx", "rdx", "rsi"
                , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
 #else
-               , "eax", "ebx", "edx"
+               , "ebx", "ecx", "edx", "edi", "esi"
 #endif
              );