]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
KVM: nVMX: Capture VM-Fail to a local var in nested_vmx_check_vmentry_hw()
authorSean Christopherson <sean.j.christopherson@intel.com>
Fri, 25 Jan 2019 15:40:58 +0000 (07:40 -0800)
committerPaolo Bonzini <pbonzini@redhat.com>
Tue, 12 Feb 2019 12:12:19 +0000 (13:12 +0100)
Unlike the primary vCPU-run flow, the nested early checks code doesn't
actually want to propagate VM-Fail back to 'vmx'.  Yay copy+paste.

In additional to eliminating the need to clear vmx->fail before
returning, using a local boolean also drops a reference to 'vmx' in the
asm blob.  Dropping the reference to 'vmx' will save a register in the
long run as future patches will shift all pointer references from 'vmx'
to 'vmx->loaded_vmcs'.

Fixes: 52017608da33 ("KVM: nVMX: add option to perform early consistency checks via H/W")
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Reviewed-by: Jim Mattson <jmattson@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/vmx/nested.c

index a562ecabc1184ac010e73c9414c9caa58acc88c9..bfacf90294669f723970554e5050f391952b1bdf 100644 (file)
@@ -2718,6 +2718,7 @@ static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        unsigned long cr3, cr4;
+       bool vm_fail;
 
        if (!nested_early_check)
                return 0;
@@ -2763,14 +2764,18 @@ static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
                /* Check if vmlaunch or vmresume is needed */
                "cmpb $0, %c[launched](%% " _ASM_CX")\n\t"
 
+               /*
+                * VMLAUNCH and VMRESUME clear RFLAGS.{CF,ZF} on VM-Exit, set
+                * RFLAGS.CF on VM-Fail Invalid and set RFLAGS.ZF on VM-Fail
+                * Valid.  vmx_vmenter() directly "returns" RFLAGS, and so the
+                * results of VM-Enter is captured via SETBE to vm_fail.
+                */
                "call vmx_vmenter\n\t"
 
-               /* Set vmx->fail accordingly */
-               "setbe %c[fail](%% " _ASM_CX")\n\t"
-             : ASM_CALL_CONSTRAINT
+               "setbe %[fail]\n\t"
+             : ASM_CALL_CONSTRAINT, [fail]"=qm"(vm_fail)
              : "c"(vmx), "d"((unsigned long)HOST_RSP),
                [launched]"i"(offsetof(struct vcpu_vmx, __launched)),
-               [fail]"i"(offsetof(struct vcpu_vmx, fail)),
                [host_rsp]"i"(offsetof(struct vcpu_vmx, host_rsp)),
                [wordsize]"i"(sizeof(ulong))
              : "cc", "memory"
@@ -2783,10 +2788,9 @@ static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
        if (vmx->msr_autoload.guest.nr)
                vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
 
-       if (vmx->fail) {
+       if (vm_fail) {
                WARN_ON_ONCE(vmcs_read32(VM_INSTRUCTION_ERROR) !=
                             VMXERR_ENTRY_INVALID_CONTROL_FIELD);
-               vmx->fail = 0;
                return 1;
        }