]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - arch/x86/kvm/vmx.c
kvm: nVMX: Introduce KVM_CAP_NESTED_STATE
[linux.git] / arch / x86 / kvm / vmx.c
index e30da9a2430cad425c56decdb5dd284c381fd9bc..4be6486173b7d4195e1681e91e14c4bfdf305c85 100644 (file)
@@ -802,7 +802,6 @@ struct vcpu_vmx {
 #endif
                int           gs_ldt_reload_needed;
                int           fs_reload_needed;
-               u64           msr_host_bndcfgs;
        } host_state;
        struct {
                int vm86_active;
@@ -2630,8 +2629,6 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
        vmcs_writel(HOST_FS_BASE, segment_base(vmx->host_state.fs_sel));
        vmcs_writel(HOST_GS_BASE, segment_base(vmx->host_state.gs_sel));
 #endif
-       if (boot_cpu_has(X86_FEATURE_MPX))
-               rdmsrl(MSR_IA32_BNDCFGS, vmx->host_state.msr_host_bndcfgs);
        for (i = 0; i < vmx->save_nmsrs; ++i)
                kvm_set_shared_msr(vmx->guest_msrs[i].index,
                                   vmx->guest_msrs[i].data,
@@ -2669,8 +2666,6 @@ static void __vmx_load_host_state(struct vcpu_vmx *vmx)
 #ifdef CONFIG_X86_64
        wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
 #endif
-       if (vmx->host_state.msr_host_bndcfgs)
-               wrmsrl(MSR_IA32_BNDCFGS, vmx->host_state.msr_host_bndcfgs);
        load_fixmap_gdt(raw_smp_processor_id());
 }
 
@@ -7502,6 +7497,7 @@ static void vmx_enable_tdp(void)
 
 static __init int hardware_setup(void)
 {
+       unsigned long host_bndcfgs;
        int r = -ENOMEM, i;
 
        rdmsrl_safe(MSR_EFER, &host_efer);
@@ -7526,6 +7522,11 @@ static __init int hardware_setup(void)
        if (boot_cpu_has(X86_FEATURE_NX))
                kvm_enable_efer_bits(EFER_NX);
 
+       if (boot_cpu_has(X86_FEATURE_MPX)) {
+               rdmsrl(MSR_IA32_BNDCFGS, host_bndcfgs);
+               WARN_ONCE(host_bndcfgs, "KVM: BNDCFGS in host will be lost");
+       }
+
        if (!cpu_has_vmx_vpid() || !cpu_has_vmx_invvpid() ||
                !(cpu_has_vmx_invvpid_single() || cpu_has_vmx_invvpid_global()))
                enable_vpid = 0;
@@ -7588,6 +7589,11 @@ static __init int hardware_setup(void)
        else
                kvm_disable_tdp();
 
+       if (!nested) {
+               kvm_x86_ops->get_nested_state = NULL;
+               kvm_x86_ops->set_nested_state = NULL;
+       }
+
        /*
         * Only enable PML when hardware supports PML feature, and both EPT
         * and EPT A/D bit features are enabled -- PML depends on them to work.
@@ -10659,9 +10665,9 @@ static void vmx_inject_page_fault_nested(struct kvm_vcpu *vcpu,
 static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
                                                 struct vmcs12 *vmcs12);
 
-static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu,
-                                       struct vmcs12 *vmcs12)
+static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
 {
+       struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        struct page *page;
        u64 hpa;
@@ -11773,12 +11779,17 @@ static int check_vmentry_postreqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
        return 0;
 }
 
-static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu)
+/*
+ * If exit_qual is NULL, this is being called from state restore (either RSM
+ * or KVM_SET_NESTED_STATE).  Otherwise it's called from vmlaunch/vmresume.
+ */
+static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, u32 *exit_qual)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
-       u32 exit_qual;
-       int r;
+       bool from_vmentry = !!exit_qual;
+       u32 dummy_exit_qual;
+       int r = 0;
 
        enter_guest_mode(vcpu);
 
@@ -11792,17 +11803,28 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu)
                vcpu->arch.tsc_offset += vmcs12->tsc_offset;
 
        r = EXIT_REASON_INVALID_STATE;
-       if (prepare_vmcs02(vcpu, vmcs12, &exit_qual))
+       if (prepare_vmcs02(vcpu, vmcs12, from_vmentry ? exit_qual : &dummy_exit_qual))
                goto fail;
 
-       nested_get_vmcs12_pages(vcpu, vmcs12);
+       if (from_vmentry) {
+               nested_get_vmcs12_pages(vcpu);
 
-       r = EXIT_REASON_MSR_LOAD_FAIL;
-       exit_qual = nested_vmx_load_msr(vcpu,
-                                       vmcs12->vm_entry_msr_load_addr,
-                                       vmcs12->vm_entry_msr_load_count);
-       if (exit_qual)
-               goto fail;
+               r = EXIT_REASON_MSR_LOAD_FAIL;
+               *exit_qual = nested_vmx_load_msr(vcpu,
+                                                vmcs12->vm_entry_msr_load_addr,
+                                                vmcs12->vm_entry_msr_load_count);
+               if (*exit_qual)
+                       goto fail;
+       } else {
+               /*
+                * The MMU is not initialized to point at the right entities yet and
+                * "get pages" would need to read data from the guest (i.e. we will
+                * need to perform gpa to hpa translation). Request a call
+                * to nested_get_vmcs12_pages before the next VM-entry.  The MSRs
+                * have already been set at vmentry time and should not be reset.
+                */
+               kvm_make_request(KVM_REQ_GET_VMCS12_PAGES, vcpu);
+       }
 
        /*
         * Note no nested_vmx_succeed or nested_vmx_fail here. At this point
@@ -11817,8 +11839,7 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu)
                vcpu->arch.tsc_offset -= vmcs12->tsc_offset;
        leave_guest_mode(vcpu);
        vmx_switch_vmcs(vcpu, &vmx->vmcs01);
-       nested_vmx_entry_failure(vcpu, vmcs12, r, exit_qual);
-       return 1;
+       return r;
 }
 
 /*
@@ -11895,10 +11916,11 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
         */
 
        vmx->nested.nested_run_pending = 1;
-       ret = enter_vmx_non_root_mode(vcpu);
+       ret = enter_vmx_non_root_mode(vcpu, &exit_qual);
        if (ret) {
+               nested_vmx_entry_failure(vcpu, vmcs12, ret, exit_qual);
                vmx->nested.nested_run_pending = 0;
-               return ret;
+               return 1;
        }
 
        /*
@@ -12984,7 +13006,7 @@ static int vmx_pre_leave_smm(struct kvm_vcpu *vcpu, u64 smbase)
 
        if (vmx->nested.smm.guest_mode) {
                vcpu->arch.hflags &= ~HF_SMM_MASK;
-               ret = enter_vmx_non_root_mode(vcpu);
+               ret = enter_vmx_non_root_mode(vcpu, NULL);
                vcpu->arch.hflags |= HF_SMM_MASK;
                if (ret)
                        return ret;
@@ -12999,6 +13021,170 @@ static int enable_smi_window(struct kvm_vcpu *vcpu)
        return 0;
 }
 
+static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
+                               struct kvm_nested_state __user *user_kvm_nested_state,
+                               u32 user_data_size)
+{
+       struct vcpu_vmx *vmx;
+       struct vmcs12 *vmcs12;
+       struct kvm_nested_state kvm_state = {
+               .flags = 0,
+               .format = 0,
+               .size = sizeof(kvm_state),
+               .vmx.vmxon_pa = -1ull,
+               .vmx.vmcs_pa = -1ull,
+       };
+
+       if (!vcpu)
+               return kvm_state.size + 2 * VMCS12_SIZE;
+
+       vmx = to_vmx(vcpu);
+       vmcs12 = get_vmcs12(vcpu);
+       if (nested_vmx_allowed(vcpu) &&
+           (vmx->nested.vmxon || vmx->nested.smm.vmxon)) {
+               kvm_state.vmx.vmxon_pa = vmx->nested.vmxon_ptr;
+               kvm_state.vmx.vmcs_pa = vmx->nested.current_vmptr;
+
+               if (vmx->nested.current_vmptr != -1ull)
+                       kvm_state.size += VMCS12_SIZE;
+
+               if (vmx->nested.smm.vmxon)
+                       kvm_state.vmx.smm.flags |= KVM_STATE_NESTED_SMM_VMXON;
+
+               if (vmx->nested.smm.guest_mode)
+                       kvm_state.vmx.smm.flags |= KVM_STATE_NESTED_SMM_GUEST_MODE;
+
+               if (is_guest_mode(vcpu)) {
+                       kvm_state.flags |= KVM_STATE_NESTED_GUEST_MODE;
+
+                       if (vmx->nested.nested_run_pending)
+                               kvm_state.flags |= KVM_STATE_NESTED_RUN_PENDING;
+               }
+       }
+
+       if (user_data_size < kvm_state.size)
+               goto out;
+
+       if (copy_to_user(user_kvm_nested_state, &kvm_state, sizeof(kvm_state)))
+               return -EFAULT;
+
+       if (vmx->nested.current_vmptr == -1ull)
+               goto out;
+
+       /*
+        * When running L2, the authoritative vmcs12 state is in the
+        * vmcs02. When running L1, the authoritative vmcs12 state is
+        * in the shadow vmcs linked to vmcs01, unless
+        * sync_shadow_vmcs is set, in which case, the authoritative
+        * vmcs12 state is in the vmcs12 already.
+        */
+       if (is_guest_mode(vcpu))
+               sync_vmcs12(vcpu, vmcs12);
+       else if (enable_shadow_vmcs && !vmx->nested.sync_shadow_vmcs)
+               copy_shadow_to_vmcs12(vmx);
+
+       if (copy_to_user(user_kvm_nested_state->data, vmcs12, sizeof(*vmcs12)))
+               return -EFAULT;
+
+out:
+       return kvm_state.size;
+}
+
+static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
+                               struct kvm_nested_state __user *user_kvm_nested_state,
+                               struct kvm_nested_state *kvm_state)
+{
+       struct vcpu_vmx *vmx = to_vmx(vcpu);
+       struct vmcs12 *vmcs12;
+       u32 exit_qual;
+       int ret;
+
+       if (kvm_state->format != 0)
+               return -EINVAL;
+
+       if (!nested_vmx_allowed(vcpu))
+               return kvm_state->vmx.vmxon_pa == -1ull ? 0 : -EINVAL;
+
+       if (kvm_state->vmx.vmxon_pa == -1ull) {
+               if (kvm_state->vmx.smm.flags)
+                       return -EINVAL;
+
+               if (kvm_state->vmx.vmcs_pa != -1ull)
+                       return -EINVAL;
+
+               vmx_leave_nested(vcpu);
+               return 0;
+       }
+
+       if (!page_address_valid(vcpu, kvm_state->vmx.vmxon_pa))
+               return -EINVAL;
+
+       if (kvm_state->size < sizeof(kvm_state) + sizeof(*vmcs12))
+               return -EINVAL;
+
+       if (kvm_state->vmx.vmcs_pa == kvm_state->vmx.vmxon_pa ||
+           !page_address_valid(vcpu, kvm_state->vmx.vmcs_pa))
+               return -EINVAL;
+
+       if ((kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) &&
+           (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
+               return -EINVAL;
+
+       if (kvm_state->vmx.smm.flags &
+           ~(KVM_STATE_NESTED_SMM_GUEST_MODE | KVM_STATE_NESTED_SMM_VMXON))
+               return -EINVAL;
+
+       if ((kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) &&
+           !(kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON))
+               return -EINVAL;
+
+       vmx_leave_nested(vcpu);
+       if (kvm_state->vmx.vmxon_pa == -1ull)
+               return 0;
+
+       vmx->nested.vmxon_ptr = kvm_state->vmx.vmxon_pa;
+       ret = enter_vmx_operation(vcpu);
+       if (ret)
+               return ret;
+
+       set_current_vmptr(vmx, kvm_state->vmx.vmcs_pa);
+
+       if (kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON) {
+               vmx->nested.smm.vmxon = true;
+               vmx->nested.vmxon = false;
+
+               if (kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE)
+                       vmx->nested.smm.guest_mode = true;
+       }
+
+       vmcs12 = get_vmcs12(vcpu);
+       if (copy_from_user(vmcs12, user_kvm_nested_state->data, sizeof(*vmcs12)))
+               return -EFAULT;
+
+       if (vmcs12->revision_id != VMCS12_REVISION)
+               return -EINVAL;
+
+       if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
+               return 0;
+
+       vmx->nested.nested_run_pending =
+               !!(kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING);
+
+       if (check_vmentry_prereqs(vcpu, vmcs12) ||
+           check_vmentry_postreqs(vcpu, vmcs12, &exit_qual))
+               return -EINVAL;
+
+       if (kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING)
+               vmx->nested.nested_run_pending = 1;
+
+       vmx->nested.dirty_vmcs12 = true;
+       ret = enter_vmx_non_root_mode(vcpu, NULL);
+       if (ret)
+               return -EINVAL;
+
+       return 0;
+}
+
 static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
        .cpu_has_kvm_support = cpu_has_kvm_support,
        .disabled_by_bios = vmx_disabled_by_bios,
@@ -13133,6 +13319,10 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
 
        .setup_mce = vmx_setup_mce,
 
+       .get_nested_state = vmx_get_nested_state,
+       .set_nested_state = vmx_set_nested_state,
+       .get_vmcs12_pages = nested_get_vmcs12_pages,
+
        .smi_allowed = vmx_smi_allowed,
        .pre_enter_smm = vmx_pre_enter_smm,
        .pre_leave_smm = vmx_pre_leave_smm,