]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - arch/x86/kvm/x86.c
kvm: nVMX: Introduce KVM_CAP_NESTED_STATE
[linux.git] / arch / x86 / kvm / x86.c
index 0046aa70205aa2dfbc0577065250be717ca25b4e..1b14c4a654c32baf770f13307e698407bb0c2989 100644 (file)
@@ -1097,6 +1097,7 @@ static u32 msr_based_features[] = {
 
        MSR_F10H_DECFG,
        MSR_IA32_UCODE_REV,
+       MSR_IA32_ARCH_CAPABILITIES,
 };
 
 static unsigned int num_msr_based_features;
@@ -1105,7 +1106,8 @@ static int kvm_get_msr_feature(struct kvm_msr_entry *msr)
 {
        switch (msr->index) {
        case MSR_IA32_UCODE_REV:
-               rdmsrl(msr->index, msr->data);
+       case MSR_IA32_ARCH_CAPABILITIES:
+               rdmsrl_safe(msr->index, &msr->data);
                break;
        default:
                if (kvm_x86_ops->get_msr_feature(msr))
@@ -2158,10 +2160,11 @@ static int set_msr_mce(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                vcpu->arch.mcg_status = data;
                break;
        case MSR_IA32_MCG_CTL:
-               if (!(mcg_cap & MCG_CTL_P))
+               if (!(mcg_cap & MCG_CTL_P) &&
+                   (data || !msr_info->host_initiated))
                        return 1;
                if (data != 0 && data != ~(u64)0)
-                       return -1;
+                       return 1;
                vcpu->arch.mcg_ctl = data;
                break;
        default:
@@ -2549,7 +2552,7 @@ int kvm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
 }
 EXPORT_SYMBOL_GPL(kvm_get_msr);
 
-static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
+static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
 {
        u64 data;
        u64 mcg_cap = vcpu->arch.mcg_cap;
@@ -2564,7 +2567,7 @@ static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
                data = vcpu->arch.mcg_cap;
                break;
        case MSR_IA32_MCG_CTL:
-               if (!(mcg_cap & MCG_CTL_P))
+               if (!(mcg_cap & MCG_CTL_P) && !host)
                        return 1;
                data = vcpu->arch.mcg_ctl;
                break;
@@ -2697,7 +2700,8 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
        case MSR_IA32_MCG_CTL:
        case MSR_IA32_MCG_STATUS:
        case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1:
-               return get_msr_mce(vcpu, msr_info->index, &msr_info->data);
+               return get_msr_mce(vcpu, msr_info->index, &msr_info->data,
+                                  msr_info->host_initiated);
        case MSR_K7_CLK_CTL:
                /*
                 * Provide expected ramp-up count for K7. All other
@@ -2718,7 +2722,8 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
        case HV_X64_MSR_TSC_EMULATION_CONTROL:
        case HV_X64_MSR_TSC_EMULATION_STATUS:
                return kvm_hv_get_msr_common(vcpu,
-                                            msr_info->index, &msr_info->data);
+                                            msr_info->index, &msr_info->data,
+                                            msr_info->host_initiated);
                break;
        case MSR_IA32_BBL_CR_CTL3:
                /* This legacy MSR exists but isn't fully documented in current
@@ -2942,6 +2947,10 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
        case KVM_CAP_X2APIC_API:
                r = KVM_X2APIC_API_VALID_FLAGS;
                break;
+       case KVM_CAP_NESTED_STATE:
+               r = kvm_x86_ops->get_nested_state ?
+                       kvm_x86_ops->get_nested_state(NULL, 0, 0) : 0;
+               break;
        default:
                break;
        }
@@ -3958,6 +3967,56 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
                r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
                break;
        }
+       case KVM_GET_NESTED_STATE: {
+               struct kvm_nested_state __user *user_kvm_nested_state = argp;
+               u32 user_data_size;
+
+               r = -EINVAL;
+               if (!kvm_x86_ops->get_nested_state)
+                       break;
+
+               BUILD_BUG_ON(sizeof(user_data_size) != sizeof(user_kvm_nested_state->size));
+               if (get_user(user_data_size, &user_kvm_nested_state->size))
+                       return -EFAULT;
+
+               r = kvm_x86_ops->get_nested_state(vcpu, user_kvm_nested_state,
+                                                 user_data_size);
+               if (r < 0)
+                       return r;
+
+               if (r > user_data_size) {
+                       if (put_user(r, &user_kvm_nested_state->size))
+                               return -EFAULT;
+                       return -E2BIG;
+               }
+               r = 0;
+               break;
+       }
+       case KVM_SET_NESTED_STATE: {
+               struct kvm_nested_state __user *user_kvm_nested_state = argp;
+               struct kvm_nested_state kvm_state;
+
+               r = -EINVAL;
+               if (!kvm_x86_ops->set_nested_state)
+                       break;
+
+               if (copy_from_user(&kvm_state, user_kvm_nested_state, sizeof(kvm_state)))
+                       return -EFAULT;
+
+               if (kvm_state.size < sizeof(kvm_state))
+                       return -EINVAL;
+
+               if (kvm_state.flags &
+                   ~(KVM_STATE_NESTED_RUN_PENDING | KVM_STATE_NESTED_GUEST_MODE))
+                       return -EINVAL;
+
+               /* nested_run_pending implies guest_mode.  */
+               if (kvm_state.flags == KVM_STATE_NESTED_RUN_PENDING)
+                       return -EINVAL;
+
+               r = kvm_x86_ops->set_nested_state(vcpu, user_kvm_nested_state, &kvm_state);
+               break;
+       }
        default:
                r = -EINVAL;
        }
@@ -7255,6 +7314,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
        bool req_immediate_exit = false;
 
        if (kvm_request_pending(vcpu)) {
+               if (kvm_check_request(KVM_REQ_GET_VMCS12_PAGES, vcpu))
+                       kvm_x86_ops->get_vmcs12_pages(vcpu);
                if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu))
                        kvm_mmu_unload(vcpu);
                if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu))