]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - arch/x86/kvm/vmx.c
KVM: async_pf: Force a nested vmexit if the injected #PF is async_pf
[linux.git] / arch / x86 / kvm / vmx.c
index 6dcc4873e435c7357892e9dcd200c3c4c9b3688a..5a3bb1a697a226e68874c5a2a49a57c946822be3 100644 (file)
@@ -913,8 +913,9 @@ static void nested_release_page_clean(struct page *page)
        kvm_release_page_clean(page);
 }
 
+static bool nested_ept_ad_enabled(struct kvm_vcpu *vcpu);
 static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu);
-static u64 construct_eptp(unsigned long root_hpa);
+static u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa);
 static bool vmx_xsaves_supported(void);
 static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr);
 static void vmx_set_segment(struct kvm_vcpu *vcpu,
@@ -2421,28 +2422,41 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
  * KVM wants to inject page-faults which it got to the guest. This function
  * checks whether in a nested guest, we need to inject them to L1 or L2.
  */
-static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned nr)
+static int nested_vmx_check_exception(struct kvm_vcpu *vcpu)
 {
        struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+       unsigned int nr = vcpu->arch.exception.nr;
 
-       if (!(vmcs12->exception_bitmap & (1u << nr)))
+       if (!((vmcs12->exception_bitmap & (1u << nr)) ||
+               (nr == PF_VECTOR && vcpu->arch.exception.nested_apf)))
                return 0;
 
+       if (vcpu->arch.exception.nested_apf) {
+               vmcs_write32(VM_EXIT_INTR_ERROR_CODE, vcpu->arch.exception.error_code);
+               nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
+                       PF_VECTOR | INTR_TYPE_HARD_EXCEPTION |
+                       INTR_INFO_DELIVER_CODE_MASK | INTR_INFO_VALID_MASK,
+                       vcpu->arch.apf.nested_apf_token);
+               return 1;
+       }
+
        nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
                          vmcs_read32(VM_EXIT_INTR_INFO),
                          vmcs_readl(EXIT_QUALIFICATION));
        return 1;
 }
 
-static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
-                               bool has_error_code, u32 error_code,
-                               bool reinject)
+static void vmx_queue_exception(struct kvm_vcpu *vcpu)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
+       unsigned nr = vcpu->arch.exception.nr;
+       bool has_error_code = vcpu->arch.exception.has_error_code;
+       bool reinject = vcpu->arch.exception.reinject;
+       u32 error_code = vcpu->arch.exception.error_code;
        u32 intr_info = nr | INTR_INFO_VALID_MASK;
 
        if (!reinject && is_guest_mode(vcpu) &&
-           nested_vmx_check_exception(vcpu, nr))
+           nested_vmx_check_exception(vcpu))
                return;
 
        if (has_error_code) {
@@ -2772,7 +2786,7 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
                if (enable_ept_ad_bits) {
                        vmx->nested.nested_vmx_secondary_ctls_high |=
                                SECONDARY_EXEC_ENABLE_PML;
-                      vmx->nested.nested_vmx_ept_caps |= VMX_EPT_AD_BIT;
+                       vmx->nested.nested_vmx_ept_caps |= VMX_EPT_AD_BIT;
                }
        } else
                vmx->nested.nested_vmx_ept_caps = 0;
@@ -3198,7 +3212,8 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                msr_info->data = vmcs_readl(GUEST_SYSENTER_ESP);
                break;
        case MSR_IA32_BNDCFGS:
-               if (!kvm_mpx_supported())
+               if (!kvm_mpx_supported() ||
+                   (!msr_info->host_initiated && !guest_cpuid_has_mpx(vcpu)))
                        return 1;
                msr_info->data = vmcs_read64(GUEST_BNDCFGS);
                break;
@@ -3280,7 +3295,11 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                vmcs_writel(GUEST_SYSENTER_ESP, data);
                break;
        case MSR_IA32_BNDCFGS:
-               if (!kvm_mpx_supported())
+               if (!kvm_mpx_supported() ||
+                   (!msr_info->host_initiated && !guest_cpuid_has_mpx(vcpu)))
+                       return 1;
+               if (is_noncanonical_address(data & PAGE_MASK) ||
+                   (data & MSR_IA32_BNDCFGS_RSVD))
                        return 1;
                vmcs_write64(GUEST_BNDCFGS, data);
                break;
@@ -3758,6 +3777,25 @@ static void free_kvm_area(void)
        }
 }
 
+enum vmcs_field_type {
+       VMCS_FIELD_TYPE_U16 = 0,
+       VMCS_FIELD_TYPE_U64 = 1,
+       VMCS_FIELD_TYPE_U32 = 2,
+       VMCS_FIELD_TYPE_NATURAL_WIDTH = 3
+};
+
+static inline int vmcs_field_type(unsigned long field)
+{
+       if (0x1 & field)        /* the *_HIGH fields are all 32 bit */
+               return VMCS_FIELD_TYPE_U32;
+       return (field >> 13) & 0x3 ;
+}
+
+static inline int vmcs_field_readonly(unsigned long field)
+{
+       return (((field >> 10) & 0x3) == 1);
+}
+
 static void init_vmcs_shadow_fields(void)
 {
        int i, j;
@@ -3783,14 +3821,22 @@ static void init_vmcs_shadow_fields(void)
 
        /* shadowed fields guest access without vmexit */
        for (i = 0; i < max_shadow_read_write_fields; i++) {
-               clear_bit(shadow_read_write_fields[i],
-                         vmx_vmwrite_bitmap);
-               clear_bit(shadow_read_write_fields[i],
-                         vmx_vmread_bitmap);
+               unsigned long field = shadow_read_write_fields[i];
+
+               clear_bit(field, vmx_vmwrite_bitmap);
+               clear_bit(field, vmx_vmread_bitmap);
+               if (vmcs_field_type(field) == VMCS_FIELD_TYPE_U64) {
+                       clear_bit(field + 1, vmx_vmwrite_bitmap);
+                       clear_bit(field + 1, vmx_vmread_bitmap);
+               }
+       }
+       for (i = 0; i < max_shadow_read_only_fields; i++) {
+               unsigned long field = shadow_read_only_fields[i];
+
+               clear_bit(field, vmx_vmread_bitmap);
+               if (vmcs_field_type(field) == VMCS_FIELD_TYPE_U64)
+                       clear_bit(field + 1, vmx_vmread_bitmap);
        }
-       for (i = 0; i < max_shadow_read_only_fields; i++)
-               clear_bit(shadow_read_only_fields[i],
-                         vmx_vmread_bitmap);
 }
 
 static __init int alloc_kvm_area(void)
@@ -4013,7 +4059,7 @@ static inline void __vmx_flush_tlb(struct kvm_vcpu *vcpu, int vpid)
        if (enable_ept) {
                if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
                        return;
-               ept_sync_context(construct_eptp(vcpu->arch.mmu.root_hpa));
+               ept_sync_context(construct_eptp(vcpu, vcpu->arch.mmu.root_hpa));
        } else {
                vpid_sync_context(vpid);
        }
@@ -4188,14 +4234,15 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
        vmx->emulation_required = emulation_required(vcpu);
 }
 
-static u64 construct_eptp(unsigned long root_hpa)
+static u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa)
 {
        u64 eptp;
 
        /* TODO write the value reading from MSR */
        eptp = VMX_EPT_DEFAULT_MT |
                VMX_EPT_DEFAULT_GAW << VMX_EPT_GAW_EPTP_SHIFT;
-       if (enable_ept_ad_bits)
+       if (enable_ept_ad_bits &&
+           (!is_guest_mode(vcpu) || nested_ept_ad_enabled(vcpu)))
                eptp |= VMX_EPT_AD_ENABLE_BIT;
        eptp |= (root_hpa & PAGE_MASK);
 
@@ -4209,7 +4256,7 @@ static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
 
        guest_cr3 = cr3;
        if (enable_ept) {
-               eptp = construct_eptp(cr3);
+               eptp = construct_eptp(vcpu, cr3);
                vmcs_write64(EPT_POINTER, eptp);
                if (is_paging(vcpu) || is_guest_mode(vcpu))
                        guest_cr3 = kvm_read_cr3(vcpu);
@@ -4627,6 +4674,11 @@ static bool guest_state_valid(struct kvm_vcpu *vcpu)
        return true;
 }
 
+static bool page_address_valid(struct kvm_vcpu *vcpu, gpa_t gpa)
+{
+       return PAGE_ALIGNED(gpa) && !(gpa >> cpuid_maxphyaddr(vcpu));
+}
+
 static int init_rmode_tss(struct kvm *kvm)
 {
        gfn_t fn;
@@ -5170,7 +5222,8 @@ static void ept_set_mmio_spte_mask(void)
         * EPT Misconfigurations can be generated if the value of bits 2:0
         * of an EPT paging-structure entry is 110b (write/execute).
         */
-       kvm_mmu_set_mmio_spte_mask(VMX_EPT_MISCONFIG_WX_VALUE);
+       kvm_mmu_set_mmio_spte_mask(VMX_EPT_RWX_MASK,
+                                  VMX_EPT_MISCONFIG_WX_VALUE);
 }
 
 #define VMX_XSS_EXIT_BITMAP 0
@@ -5656,14 +5709,11 @@ static int handle_exception(struct kvm_vcpu *vcpu)
        }
 
        if (is_page_fault(intr_info)) {
-               /* EPT won't cause page fault directly */
-               BUG_ON(enable_ept);
                cr2 = vmcs_readl(EXIT_QUALIFICATION);
-               trace_kvm_page_fault(cr2, error_code);
-
-               if (kvm_event_needs_reinjection(vcpu))
-                       kvm_mmu_unprotect_page_virt(vcpu, cr2);
-               return kvm_mmu_page_fault(vcpu, cr2, error_code, NULL, 0);
+               /* EPT won't cause page fault directly */
+               WARN_ON_ONCE(!vcpu->arch.apf.host_apf_reason && enable_ept);
+               return kvm_handle_page_fault(vcpu, error_code, cr2, NULL, 0,
+                               true);
        }
 
        ex_no = intr_info & INTR_INFO_VECTOR_MASK;
@@ -6220,17 +6270,6 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu)
 
        exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
 
-       if (is_guest_mode(vcpu)
-           && !(exit_qualification & EPT_VIOLATION_GVA_TRANSLATED)) {
-               /*
-                * Fix up exit_qualification according to whether guest
-                * page table accesses are reads or writes.
-                */
-               u64 eptp = nested_ept_get_cr3(vcpu);
-               if (!(eptp & VMX_EPT_AD_ENABLE_BIT))
-                       exit_qualification &= ~EPT_VIOLATION_ACC_WRITE;
-       }
-
        /*
         * EPT violation happened while executing iret from NMI,
         * "blocked by NMI" bit has to be set before next VM entry.
@@ -6453,7 +6492,7 @@ void vmx_enable_tdp(void)
                enable_ept_ad_bits ? VMX_EPT_DIRTY_BIT : 0ull,
                0ull, VMX_EPT_EXECUTABLE_MASK,
                cpu_has_vmx_ept_execute_only() ? 0ull : VMX_EPT_READABLE_MASK,
-               enable_ept_ad_bits ? 0ull : VMX_EPT_RWX_MASK);
+               VMX_EPT_RWX_MASK);
 
        ept_set_mmio_spte_mask();
        kvm_enable_tdp();
@@ -6557,7 +6596,6 @@ static __init int hardware_setup(void)
        vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false);
        vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false);
        vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false);
-       vmx_disable_intercept_for_msr(MSR_IA32_BNDCFGS, true);
 
        memcpy(vmx_msr_bitmap_legacy_x2apic_apicv,
                        vmx_msr_bitmap_legacy, PAGE_SIZE);
@@ -7218,25 +7256,6 @@ static int handle_vmresume(struct kvm_vcpu *vcpu)
        return nested_vmx_run(vcpu, false);
 }
 
-enum vmcs_field_type {
-       VMCS_FIELD_TYPE_U16 = 0,
-       VMCS_FIELD_TYPE_U64 = 1,
-       VMCS_FIELD_TYPE_U32 = 2,
-       VMCS_FIELD_TYPE_NATURAL_WIDTH = 3
-};
-
-static inline int vmcs_field_type(unsigned long field)
-{
-       if (0x1 & field)        /* the *_HIGH fields are all 32 bit */
-               return VMCS_FIELD_TYPE_U32;
-       return (field >> 13) & 0x3 ;
-}
-
-static inline int vmcs_field_readonly(unsigned long field)
-{
-       return (((field >> 10) & 0x3) == 1);
-}
-
 /*
  * Read a vmcs12 field. Since these can have varying lengths and we return
  * one type, we chose the biggest type (u64) and zero-extend the return value
@@ -7661,7 +7680,10 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
        unsigned long type, types;
        gva_t gva;
        struct x86_exception e;
-       int vpid;
+       struct {
+               u64 vpid;
+               u64 gla;
+       } operand;
 
        if (!(vmx->nested.nested_vmx_secondary_ctls_high &
              SECONDARY_EXEC_ENABLE_VPID) ||
@@ -7691,17 +7713,28 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
        if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
                        vmx_instruction_info, false, &gva))
                return 1;
-       if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vpid,
-                               sizeof(u32), &e)) {
+       if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &operand,
+                               sizeof(operand), &e)) {
                kvm_inject_page_fault(vcpu, &e);
                return 1;
        }
+       if (operand.vpid >> 16) {
+               nested_vmx_failValid(vcpu,
+                       VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
+               return kvm_skip_emulated_instruction(vcpu);
+       }
 
        switch (type) {
        case VMX_VPID_EXTENT_INDIVIDUAL_ADDR:
+               if (is_noncanonical_address(operand.gla)) {
+                       nested_vmx_failValid(vcpu,
+                               VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
+                       return kvm_skip_emulated_instruction(vcpu);
+               }
+               /* fall through */
        case VMX_VPID_EXTENT_SINGLE_CONTEXT:
        case VMX_VPID_EXTENT_SINGLE_NON_GLOBAL:
-               if (!vpid) {
+               if (!operand.vpid) {
                        nested_vmx_failValid(vcpu,
                                VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
                        return kvm_skip_emulated_instruction(vcpu);
@@ -8408,9 +8441,15 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
                        exit_reason != EXIT_REASON_TASK_SWITCH)) {
                vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
                vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV;
-               vcpu->run->internal.ndata = 2;
+               vcpu->run->internal.ndata = 3;
                vcpu->run->internal.data[0] = vectoring_info;
                vcpu->run->internal.data[1] = exit_reason;
+               vcpu->run->internal.data[2] = vcpu->arch.exit_qualification;
+               if (exit_reason == EXIT_REASON_EPT_MISCONFIG) {
+                       vcpu->run->internal.ndata++;
+                       vcpu->run->internal.data[3] =
+                               vmcs_read64(GUEST_PHYSICAL_ADDRESS);
+               }
                return 0;
        }
 
@@ -8601,17 +8640,24 @@ static void vmx_apicv_post_state_restore(struct kvm_vcpu *vcpu)
 
 static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx)
 {
-       u32 exit_intr_info;
+       u32 exit_intr_info = 0;
+       u16 basic_exit_reason = (u16)vmx->exit_reason;
 
-       if (!(vmx->exit_reason == EXIT_REASON_MCE_DURING_VMENTRY
-             || vmx->exit_reason == EXIT_REASON_EXCEPTION_NMI))
+       if (!(basic_exit_reason == EXIT_REASON_MCE_DURING_VMENTRY
+             || basic_exit_reason == EXIT_REASON_EXCEPTION_NMI))
                return;
 
-       vmx->exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
-       exit_intr_info = vmx->exit_intr_info;
+       if (!(vmx->exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY))
+               exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
+       vmx->exit_intr_info = exit_intr_info;
+
+       /* if exit due to PF check for async PF */
+       if (is_page_fault(exit_intr_info))
+               vmx->vcpu.arch.apf.host_apf_reason = kvm_read_and_reset_pf_reason();
 
        /* Handle machine checks before interrupts are enabled */
-       if (is_machine_check(exit_intr_info))
+       if (basic_exit_reason == EXIT_REASON_MCE_DURING_VMENTRY ||
+           is_machine_check(exit_intr_info))
                kvm_machine_check();
 
        /* We need to handle NMIs before interrupts are enabled */
@@ -9394,6 +9440,11 @@ static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu,
        vmcs12->guest_physical_address = fault->address;
 }
 
+static bool nested_ept_ad_enabled(struct kvm_vcpu *vcpu)
+{
+       return nested_ept_get_cr3(vcpu) & VMX_EPT_AD_ENABLE_BIT;
+}
+
 /* Callbacks for nested_ept_init_mmu_context: */
 
 static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu)
@@ -9404,18 +9455,18 @@ static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu)
 
 static int nested_ept_init_mmu_context(struct kvm_vcpu *vcpu)
 {
-       u64 eptp;
+       bool wants_ad;
 
        WARN_ON(mmu_is_nested(vcpu));
-       eptp = nested_ept_get_cr3(vcpu);
-       if ((eptp & VMX_EPT_AD_ENABLE_BIT) && !enable_ept_ad_bits)
+       wants_ad = nested_ept_ad_enabled(vcpu);
+       if (wants_ad && !enable_ept_ad_bits)
                return 1;
 
        kvm_mmu_unload(vcpu);
        kvm_init_shadow_ept_mmu(vcpu,
                        to_vmx(vcpu)->nested.nested_vmx_ept_caps &
                        VMX_EPT_EXECUTE_ONLY_BIT,
-                       eptp & VMX_EPT_AD_ENABLE_BIT);
+                       wants_ad);
        vcpu->arch.mmu.set_cr3           = vmx_set_cr3;
        vcpu->arch.mmu.get_cr3           = nested_ept_get_cr3;
        vcpu->arch.mmu.inject_page_fault = nested_ept_inject_page_fault;
@@ -9574,23 +9625,26 @@ static void vmx_start_preemption_timer(struct kvm_vcpu *vcpu)
                      ns_to_ktime(preemption_timeout), HRTIMER_MODE_REL);
 }
 
+static int nested_vmx_check_io_bitmap_controls(struct kvm_vcpu *vcpu,
+                                              struct vmcs12 *vmcs12)
+{
+       if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
+               return 0;
+
+       if (!page_address_valid(vcpu, vmcs12->io_bitmap_a) ||
+           !page_address_valid(vcpu, vmcs12->io_bitmap_b))
+               return -EINVAL;
+
+       return 0;
+}
+
 static int nested_vmx_check_msr_bitmap_controls(struct kvm_vcpu *vcpu,
                                                struct vmcs12 *vmcs12)
 {
-       int maxphyaddr;
-       u64 addr;
-
        if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
                return 0;
 
-       if (vmcs12_read_any(vcpu, MSR_BITMAP, &addr)) {
-               WARN_ON(1);
-               return -EINVAL;
-       }
-       maxphyaddr = cpuid_maxphyaddr(vcpu);
-
-       if (!PAGE_ALIGNED(vmcs12->msr_bitmap) ||
-          ((addr + PAGE_SIZE) >> maxphyaddr))
+       if (!page_address_valid(vcpu, vmcs12->msr_bitmap))
                return -EINVAL;
 
        return 0;
@@ -10278,6 +10332,9 @@ static int check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
            vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT)
                return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
 
+       if (nested_vmx_check_io_bitmap_controls(vcpu, vmcs12))
+               return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
+
        if (nested_vmx_check_msr_bitmap_controls(vcpu, vmcs12))
                return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
 
@@ -10414,8 +10471,6 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry)
                return 1;
        }
 
-       vmcs12->launch_state = 1;
-
        /*
         * Note no nested_vmx_succeed or nested_vmx_fail here. At this point
         * we are no longer running L1, and VMLAUNCH/VMRESUME has not yet
@@ -10728,8 +10783,7 @@ static void sync_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
                vmcs12->guest_pdptr3 = vmcs_read64(GUEST_PDPTR3);
        }
 
-       if (nested_cpu_has_ept(vmcs12))
-               vmcs12->guest_linear_address = vmcs_readl(GUEST_LINEAR_ADDRESS);
+       vmcs12->guest_linear_address = vmcs_readl(GUEST_LINEAR_ADDRESS);
 
        if (nested_cpu_has_vid(vmcs12))
                vmcs12->guest_intr_status = vmcs_read16(GUEST_INTR_STATUS);
@@ -10754,8 +10808,6 @@ static void sync_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
        vmcs12->guest_sysenter_eip = vmcs_readl(GUEST_SYSENTER_EIP);
        if (kvm_mpx_supported())
                vmcs12->guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
-       if (nested_cpu_has_xsaves(vmcs12))
-               vmcs12->xss_exit_bitmap = vmcs_read64(XSS_EXIT_BITMAP);
 }
 
 /*
@@ -10792,6 +10844,8 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
        vmcs12->vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
 
        if (!(vmcs12->vm_exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) {
+               vmcs12->launch_state = 1;
+
                /* vm_entry_intr_info_field is cleared on exit. Emulate this
                 * instead of reading the real value. */
                vmcs12->vm_entry_intr_info_field &= ~INTR_INFO_VALID_MASK;
@@ -11152,7 +11206,8 @@ static int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc)
        vmx->hv_deadline_tsc = tscl + delta_tsc;
        vmcs_set_bits(PIN_BASED_VM_EXEC_CONTROL,
                        PIN_BASED_VMX_PREEMPTION_TIMER);
-       return 0;
+
+       return delta_tsc == 0;
 }
 
 static void vmx_cancel_hv_timer(struct kvm_vcpu *vcpu)