]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - arch/x86/kvm/x86.c
x86/kvm/hyper-v: add reenlightenment MSRs support
[linux.git] / arch / x86 / kvm / x86.c
index c8a0b545ac20c71a464738a1dd0cd7e1c3df388e..36ef3d8aad18dec85f7db33cdfc40238386f5330 100644 (file)
@@ -102,6 +102,8 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu);
 static void process_nmi(struct kvm_vcpu *vcpu);
 static void enter_smm(struct kvm_vcpu *vcpu);
 static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
+static void store_regs(struct kvm_vcpu *vcpu);
+static int sync_regs(struct kvm_vcpu *vcpu);
 
 struct kvm_x86_ops *kvm_x86_ops __read_mostly;
 EXPORT_SYMBOL_GPL(kvm_x86_ops);
@@ -1032,7 +1034,11 @@ static u32 emulated_msrs[] = {
        HV_X64_MSR_VP_RUNTIME,
        HV_X64_MSR_SCONTROL,
        HV_X64_MSR_STIMER0_CONFIG,
-       HV_X64_MSR_APIC_ASSIST_PAGE, MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME,
+       HV_X64_MSR_APIC_ASSIST_PAGE,
+       HV_X64_MSR_REENLIGHTENMENT_CONTROL, HV_X64_MSR_TSC_EMULATION_CONTROL,
+       HV_X64_MSR_TSC_EMULATION_STATUS,
+
+       MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME,
        MSR_KVM_PV_EOI_EN,
 
        MSR_IA32_TSC_ADJUST,
@@ -1049,6 +1055,64 @@ static u32 emulated_msrs[] = {
 
 static unsigned num_emulated_msrs;
 
+/*
+ * List of msr numbers which are used to expose MSR-based features that
+ * can be used by a hypervisor to validate requested CPU features.
+ */
+static u32 msr_based_features[] = {
+       MSR_IA32_VMX_BASIC,
+       MSR_IA32_VMX_TRUE_PINBASED_CTLS,
+       MSR_IA32_VMX_PINBASED_CTLS,
+       MSR_IA32_VMX_TRUE_PROCBASED_CTLS,
+       MSR_IA32_VMX_PROCBASED_CTLS,
+       MSR_IA32_VMX_TRUE_EXIT_CTLS,
+       MSR_IA32_VMX_EXIT_CTLS,
+       MSR_IA32_VMX_TRUE_ENTRY_CTLS,
+       MSR_IA32_VMX_ENTRY_CTLS,
+       MSR_IA32_VMX_MISC,
+       MSR_IA32_VMX_CR0_FIXED0,
+       MSR_IA32_VMX_CR0_FIXED1,
+       MSR_IA32_VMX_CR4_FIXED0,
+       MSR_IA32_VMX_CR4_FIXED1,
+       MSR_IA32_VMX_VMCS_ENUM,
+       MSR_IA32_VMX_PROCBASED_CTLS2,
+       MSR_IA32_VMX_EPT_VPID_CAP,
+       MSR_IA32_VMX_VMFUNC,
+
+       MSR_F10H_DECFG,
+       MSR_IA32_UCODE_REV,
+};
+
+static unsigned int num_msr_based_features;
+
+static int kvm_get_msr_feature(struct kvm_msr_entry *msr)
+{
+       switch (msr->index) {
+       case MSR_IA32_UCODE_REV:
+               rdmsrl(msr->index, msr->data);
+               break;
+       default:
+               if (kvm_x86_ops->get_msr_feature(msr))
+                       return 1;
+       }
+       return 0;
+}
+
+static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
+{
+       struct kvm_msr_entry msr;
+       int r;
+
+       msr.index = index;
+       r = kvm_get_msr_feature(&msr);
+       if (r)
+               return r;
+
+       *data = msr.data;
+
+       return 0;
+}
+
 bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
 {
        if (efer & efer_reserved_bits)
@@ -2222,7 +2286,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 
        switch (msr) {
        case MSR_AMD64_NB_CFG:
-       case MSR_IA32_UCODE_REV:
        case MSR_IA32_UCODE_WRITE:
        case MSR_VM_HSAVE_PA:
        case MSR_AMD64_PATCH_LOADER:
@@ -2230,6 +2293,10 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
        case MSR_AMD64_DC_CFG:
                break;
 
+       case MSR_IA32_UCODE_REV:
+               if (msr_info->host_initiated)
+                       vcpu->arch.microcode_version = data;
+               break;
        case MSR_EFER:
                return set_efer(vcpu, data);
        case MSR_K7_HWCR:
@@ -2390,6 +2457,9 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
        case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
        case HV_X64_MSR_CRASH_CTL:
        case HV_X64_MSR_STIMER0_CONFIG ... HV_X64_MSR_STIMER3_COUNT:
+       case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
+       case HV_X64_MSR_TSC_EMULATION_CONTROL:
+       case HV_X64_MSR_TSC_EMULATION_STATUS:
                return kvm_hv_set_msr_common(vcpu, msr, data,
                                             msr_info->host_initiated);
        case MSR_IA32_BBL_CR_CTL3:
@@ -2516,6 +2586,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
        case MSR_AMD64_DC_CFG:
                msr_info->data = 0;
                break;
+       case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_CTR5:
        case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
        case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
        case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR1:
@@ -2525,7 +2596,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                msr_info->data = 0;
                break;
        case MSR_IA32_UCODE_REV:
-               msr_info->data = 0x100000000ULL;
+               msr_info->data = vcpu->arch.microcode_version;
                break;
        case MSR_MTRRcap:
        case 0x200 ... 0x2ff:
@@ -2619,6 +2690,9 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
        case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
        case HV_X64_MSR_CRASH_CTL:
        case HV_X64_MSR_STIMER0_CONFIG ... HV_X64_MSR_STIMER3_COUNT:
+       case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
+       case HV_X64_MSR_TSC_EMULATION_CONTROL:
+       case HV_X64_MSR_TSC_EMULATION_STATUS:
                return kvm_hv_get_msr_common(vcpu,
                                             msr_info->index, &msr_info->data);
                break;
@@ -2680,13 +2754,11 @@ static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
                    int (*do_msr)(struct kvm_vcpu *vcpu,
                                  unsigned index, u64 *data))
 {
-       int i, idx;
+       int i;
 
-       idx = srcu_read_lock(&vcpu->kvm->srcu);
        for (i = 0; i < msrs->nmsrs; ++i)
                if (do_msr(vcpu, entries[i].index, &entries[i].data))
                        break;
-       srcu_read_unlock(&vcpu->kvm->srcu, idx);
 
        return i;
 }
@@ -2769,6 +2841,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
        case KVM_CAP_HYPERV_SYNIC:
        case KVM_CAP_HYPERV_SYNIC2:
        case KVM_CAP_HYPERV_VP_INDEX:
+       case KVM_CAP_HYPERV_EVENTFD:
        case KVM_CAP_PCI_SEGMENT:
        case KVM_CAP_DEBUGREGS:
        case KVM_CAP_X86_ROBUST_SINGLESTEP:
@@ -2785,8 +2858,12 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
        case KVM_CAP_SET_BOOT_CPU_ID:
        case KVM_CAP_SPLIT_IRQCHIP:
        case KVM_CAP_IMMEDIATE_EXIT:
+       case KVM_CAP_GET_MSR_FEATURES:
                r = 1;
                break;
+       case KVM_CAP_SYNC_REGS:
+               r = KVM_SYNC_X86_VALID_FIELDS;
+               break;
        case KVM_CAP_ADJUST_CLOCK:
                r = KVM_CLOCK_TSC_STABLE;
                break;
@@ -2899,6 +2976,31 @@ long kvm_arch_dev_ioctl(struct file *filp,
                        goto out;
                r = 0;
                break;
+       case KVM_GET_MSR_FEATURE_INDEX_LIST: {
+               struct kvm_msr_list __user *user_msr_list = argp;
+               struct kvm_msr_list msr_list;
+               unsigned int n;
+
+               r = -EFAULT;
+               if (copy_from_user(&msr_list, user_msr_list, sizeof(msr_list)))
+                       goto out;
+               n = msr_list.nmsrs;
+               msr_list.nmsrs = num_msr_based_features;
+               if (copy_to_user(user_msr_list, &msr_list, sizeof(msr_list)))
+                       goto out;
+               r = -E2BIG;
+               if (n < msr_list.nmsrs)
+                       goto out;
+               r = -EFAULT;
+               if (copy_to_user(user_msr_list->indices, &msr_based_features,
+                                num_msr_based_features * sizeof(u32)))
+                       goto out;
+               r = 0;
+               break;
+       }
+       case KVM_GET_MSRS:
+               r = msr_io(NULL, argp, do_get_msr_feature, 1);
+               break;
        }
        default:
                r = -EINVAL;
@@ -3636,12 +3738,18 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
                r = 0;
                break;
        }
-       case KVM_GET_MSRS:
+       case KVM_GET_MSRS: {
+               int idx = srcu_read_lock(&vcpu->kvm->srcu);
                r = msr_io(vcpu, argp, do_get_msr, 1);
+               srcu_read_unlock(&vcpu->kvm->srcu, idx);
                break;
-       case KVM_SET_MSRS:
+       }
+       case KVM_SET_MSRS: {
+               int idx = srcu_read_lock(&vcpu->kvm->srcu);
                r = msr_io(vcpu, argp, do_set_msr, 0);
+               srcu_read_unlock(&vcpu->kvm->srcu, idx);
                break;
+       }
        case KVM_TPR_ACCESS_REPORTING: {
                struct kvm_tpr_access_ctl tac;
 
@@ -4410,6 +4518,15 @@ long kvm_arch_vm_ioctl(struct file *filp,
                        r = kvm_x86_ops->mem_enc_unreg_region(kvm, &region);
                break;
        }
+       case KVM_HYPERV_EVENTFD: {
+               struct kvm_hyperv_eventfd hvevfd;
+
+               r = -EFAULT;
+               if (copy_from_user(&hvevfd, argp, sizeof(hvevfd)))
+                       goto out;
+               r = kvm_vm_ioctl_hv_eventfd(kvm, &hvevfd);
+               break;
+       }
        default:
                r = -ENOTTY;
        }
@@ -4464,6 +4581,19 @@ static void kvm_init_msr_list(void)
                j++;
        }
        num_emulated_msrs = j;
+
+       for (i = j = 0; i < ARRAY_SIZE(msr_based_features); i++) {
+               struct kvm_msr_entry msr;
+
+               msr.index = msr_based_features[i];
+               if (kvm_get_msr_feature(&msr))
+                       continue;
+
+               if (j < i)
+                       msr_based_features[j] = msr_based_features[i];
+               j++;
+       }
+       num_msr_based_features = j;
 }
 
 static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len,
@@ -7415,7 +7545,6 @@ static int complete_emulated_mmio(struct kvm_vcpu *vcpu)
        return 0;
 }
 
-
 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 {
        int r;
@@ -7441,6 +7570,17 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
                goto out;
        }
 
+       if (vcpu->run->kvm_valid_regs & ~KVM_SYNC_X86_VALID_FIELDS) {
+               r = -EINVAL;
+               goto out;
+       }
+
+       if (vcpu->run->kvm_dirty_regs) {
+               r = sync_regs(vcpu);
+               if (r != 0)
+                       goto out;
+       }
+
        /* re-sync apic's tpr */
        if (!lapic_in_kernel(vcpu)) {
                if (kvm_set_cr8(vcpu, kvm_run->cr8) != 0) {
@@ -7465,6 +7605,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 
 out:
        kvm_put_guest_fpu(vcpu);
+       if (vcpu->run->kvm_valid_regs)
+               store_regs(vcpu);
        post_kvm_run_save(vcpu);
        kvm_sigset_deactivate(vcpu);
 
@@ -7472,10 +7614,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
        return r;
 }
 
-int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
+static void __get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
 {
-       vcpu_load(vcpu);
-
        if (vcpu->arch.emulate_regs_need_sync_to_vcpu) {
                /*
                 * We are here if userspace calls get_regs() in the middle of
@@ -7508,15 +7648,18 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
 
        regs->rip = kvm_rip_read(vcpu);
        regs->rflags = kvm_get_rflags(vcpu);
+}
 
+int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
+{
+       vcpu_load(vcpu);
+       __get_regs(vcpu, regs);
        vcpu_put(vcpu);
        return 0;
 }
 
-int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
+static void __set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
 {
-       vcpu_load(vcpu);
-
        vcpu->arch.emulate_regs_need_sync_from_vcpu = true;
        vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
 
@@ -7545,7 +7688,12 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
        vcpu->arch.exception.pending = false;
 
        kvm_make_request(KVM_REQ_EVENT, vcpu);
+}
 
+int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
+{
+       vcpu_load(vcpu);
+       __set_regs(vcpu, regs);
        vcpu_put(vcpu);
        return 0;
 }
@@ -7560,13 +7708,10 @@ void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
 }
 EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits);
 
-int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
-                                 struct kvm_sregs *sregs)
+static void __get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
 {
        struct desc_ptr dt;
 
-       vcpu_load(vcpu);
-
        kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
        kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
        kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
@@ -7597,7 +7742,13 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
        if (vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft)
                set_bit(vcpu->arch.interrupt.nr,
                        (unsigned long *)sregs->interrupt_bitmap);
+}
 
+int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
+                                 struct kvm_sregs *sregs)
+{
+       vcpu_load(vcpu);
+       __get_sregs(vcpu, sregs);
        vcpu_put(vcpu);
        return 0;
 }
@@ -7692,8 +7843,7 @@ int kvm_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
        return 0;
 }
 
-int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
-                                 struct kvm_sregs *sregs)
+static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
 {
        struct msr_data apic_base_msr;
        int mmu_reset_needed = 0;
@@ -7701,8 +7851,6 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
        struct desc_ptr dt;
        int ret = -EINVAL;
 
-       vcpu_load(vcpu);
-
        if (!guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
                        (sregs->cr4 & X86_CR4_OSXSAVE))
                goto out;
@@ -7781,6 +7929,16 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
 
        ret = 0;
 out:
+       return ret;
+}
+
+int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
+                                 struct kvm_sregs *sregs)
+{
+       int ret;
+
+       vcpu_load(vcpu);
+       ret = __set_sregs(vcpu, sregs);
        vcpu_put(vcpu);
        return ret;
 }
@@ -7907,6 +8065,45 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
        return 0;
 }
 
+static void store_regs(struct kvm_vcpu *vcpu)
+{
+       BUILD_BUG_ON(sizeof(struct kvm_sync_regs) > SYNC_REGS_SIZE_BYTES);
+
+       if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_REGS)
+               __get_regs(vcpu, &vcpu->run->s.regs.regs);
+
+       if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_SREGS)
+               __get_sregs(vcpu, &vcpu->run->s.regs.sregs);
+
+       if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_EVENTS)
+               kvm_vcpu_ioctl_x86_get_vcpu_events(
+                               vcpu, &vcpu->run->s.regs.events);
+}
+
+static int sync_regs(struct kvm_vcpu *vcpu)
+{
+       if (vcpu->run->kvm_dirty_regs & ~KVM_SYNC_X86_VALID_FIELDS)
+               return -EINVAL;
+
+       if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_REGS) {
+               __set_regs(vcpu, &vcpu->run->s.regs.regs);
+               vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_REGS;
+       }
+       if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_SREGS) {
+               if (__set_sregs(vcpu, &vcpu->run->s.regs.sregs))
+                       return -EINVAL;
+               vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_SREGS;
+       }
+       if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_EVENTS) {
+               if (kvm_vcpu_ioctl_x86_set_vcpu_events(
+                               vcpu, &vcpu->run->s.regs.events))
+                       return -EINVAL;
+               vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_EVENTS;
+       }
+
+       return 0;
+}
+
 static void fx_init(struct kvm_vcpu *vcpu)
 {
        fpstate_init(&vcpu->arch.guest_fpu.state);
@@ -8017,6 +8214,8 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
 
 void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
 {
+       kvm_lapic_reset(vcpu, init_event);
+
        vcpu->arch.hflags = 0;
 
        vcpu->arch.smi_pending = 0;
@@ -8360,7 +8559,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
 
        raw_spin_lock_init(&kvm->arch.tsc_write_lock);
        mutex_init(&kvm->arch.apic_map_lock);
-       mutex_init(&kvm->arch.hyperv.hv_lock);
        spin_lock_init(&kvm->arch.pvclock_gtod_sync_lock);
 
        kvm->arch.kvmclock_offset = -ktime_get_boot_ns();
@@ -8369,6 +8567,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
        INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn);
        INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn);
 
+       kvm_hv_init_vm(kvm);
        kvm_page_track_init(kvm);
        kvm_mmu_init_vm(kvm);
 
@@ -8460,10 +8659,8 @@ int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
                        return r;
        }
 
-       if (!size) {
-               r = vm_munmap(old.userspace_addr, old.npages * PAGE_SIZE);
-               WARN_ON(r < 0);
-       }
+       if (!size)
+               vm_munmap(old.userspace_addr, old.npages * PAGE_SIZE);
 
        return 0;
 }
@@ -8501,6 +8698,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
        kvfree(rcu_dereference_check(kvm->arch.apic_map, 1));
        kvm_mmu_uninit_vm(kvm);
        kvm_page_track_cleanup(kvm);
+       kvm_hv_destroy_vm(kvm);
 }
 
 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,