]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - arch/x86/kvm/x86.c
KVM: X86: Emulate MSR_IA32_MISC_ENABLE MWAIT bit
[linux.git] / arch / x86 / kvm / x86.c
index b9591abde62a1bcf22c9bba74d7f5eb3f019cbe0..10feed6a01eb8fd83ba6a4f89cbde5073d31d565 100644 (file)
@@ -143,7 +143,7 @@ module_param(tsc_tolerance_ppm, uint, S_IRUGO | S_IWUSR);
  * tuning, i.e. allows priveleged userspace to set an exact advancement time.
  */
 static int __read_mostly lapic_timer_advance_ns = -1;
-module_param(lapic_timer_advance_ns, uint, S_IRUGO | S_IWUSR);
+module_param(lapic_timer_advance_ns, int, S_IRUGO | S_IWUSR);
 
 static bool __read_mostly vector_hashing = true;
 module_param(vector_hashing, bool, S_IRUGO);
@@ -1100,15 +1100,15 @@ EXPORT_SYMBOL_GPL(kvm_get_dr);
 
 bool kvm_rdpmc(struct kvm_vcpu *vcpu)
 {
-       u32 ecx = kvm_register_read(vcpu, VCPU_REGS_RCX);
+       u32 ecx = kvm_rcx_read(vcpu);
        u64 data;
        int err;
 
        err = kvm_pmu_rdpmc(vcpu, ecx, &data);
        if (err)
                return err;
-       kvm_register_write(vcpu, VCPU_REGS_RAX, (u32)data);
-       kvm_register_write(vcpu, VCPU_REGS_RDX, data >> 32);
+       kvm_rax_write(vcpu, (u32)data);
+       kvm_rdx_write(vcpu, data >> 32);
        return err;
 }
 EXPORT_SYMBOL_GPL(kvm_rdpmc);
@@ -1174,6 +1174,9 @@ static u32 emulated_msrs[] = {
        MSR_PLATFORM_INFO,
        MSR_MISC_FEATURES_ENABLES,
        MSR_AMD64_VIRT_SPEC_CTRL,
+       MSR_IA32_POWER_CTL,
+
+       MSR_K7_HWCR,
 };
 
 static unsigned num_emulated_msrs;
@@ -1209,11 +1212,12 @@ static u32 msr_based_features[] = {
 
 static unsigned int num_msr_based_features;
 
-u64 kvm_get_arch_capabilities(void)
+static u64 kvm_get_arch_capabilities(void)
 {
-       u64 data;
+       u64 data = 0;
 
-       rdmsrl_safe(MSR_IA32_ARCH_CAPABILITIES, &data);
+       if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES))
+               rdmsrl(MSR_IA32_ARCH_CAPABILITIES, data);
 
        /*
         * If we're doing cache flushes (either "always" or "cond")
@@ -1229,7 +1233,6 @@ u64 kvm_get_arch_capabilities(void)
 
        return data;
 }
-EXPORT_SYMBOL_GPL(kvm_get_arch_capabilities);
 
 static int kvm_get_msr_feature(struct kvm_msr_entry *msr)
 {
@@ -1262,31 +1265,49 @@ static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
        return 0;
 }
 
-bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
+static bool __kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
 {
-       if (efer & efer_reserved_bits)
-               return false;
-
        if (efer & EFER_FFXSR && !guest_cpuid_has(vcpu, X86_FEATURE_FXSR_OPT))
-                       return false;
+               return false;
 
        if (efer & EFER_SVME && !guest_cpuid_has(vcpu, X86_FEATURE_SVM))
-                       return false;
+               return false;
+
+       if (efer & (EFER_LME | EFER_LMA) &&
+           !guest_cpuid_has(vcpu, X86_FEATURE_LM))
+               return false;
+
+       if (efer & EFER_NX && !guest_cpuid_has(vcpu, X86_FEATURE_NX))
+               return false;
 
        return true;
+
+}
+bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
+{
+       if (efer & efer_reserved_bits)
+               return false;
+
+       return __kvm_valid_efer(vcpu, efer);
 }
 EXPORT_SYMBOL_GPL(kvm_valid_efer);
 
-static int set_efer(struct kvm_vcpu *vcpu, u64 efer)
+static int set_efer(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 {
        u64 old_efer = vcpu->arch.efer;
+       u64 efer = msr_info->data;
 
-       if (!kvm_valid_efer(vcpu, efer))
+       if (efer & efer_reserved_bits)
                return 1;
 
-       if (is_paging(vcpu)
-           && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
-               return 1;
+       if (!msr_info->host_initiated) {
+               if (!__kvm_valid_efer(vcpu, efer))
+                       return 1;
+
+               if (is_paging(vcpu) &&
+                   (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
+                       return 1;
+       }
 
        efer &= ~EFER_LMA;
        efer |= vcpu->arch.efer & EFER_LMA;
@@ -2279,6 +2300,18 @@ static void kvmclock_sync_fn(struct work_struct *work)
                                        KVMCLOCK_SYNC_PERIOD);
 }
 
+/*
+ * On AMD, HWCR[McStatusWrEn] controls whether setting MCi_STATUS results in #GP.
+ */
+static bool can_set_mci_status(struct kvm_vcpu *vcpu)
+{
+       /* McStatusWrEn enabled? */
+       if (guest_cpuid_is_amd(vcpu))
+               return !!(vcpu->arch.msr_hwcr & BIT_ULL(18));
+
+       return false;
+}
+
 static int set_msr_mce(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 {
        u64 mcg_cap = vcpu->arch.mcg_cap;
@@ -2310,9 +2343,14 @@ static int set_msr_mce(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                        if ((offset & 0x3) == 0 &&
                            data != 0 && (data | (1 << 10)) != ~(u64)0)
                                return -1;
+
+                       /* MCi_STATUS */
                        if (!msr_info->host_initiated &&
-                               (offset & 0x3) == 1 && data != 0)
-                               return -1;
+                           (offset & 0x3) == 1 && data != 0) {
+                               if (!can_set_mci_status(vcpu))
+                                       return -1;
+                       }
+
                        vcpu->arch.mce_banks[offset] = data;
                        break;
                }
@@ -2456,13 +2494,16 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                vcpu->arch.arch_capabilities = data;
                break;
        case MSR_EFER:
-               return set_efer(vcpu, data);
+               return set_efer(vcpu, msr_info);
        case MSR_K7_HWCR:
                data &= ~(u64)0x40;     /* ignore flush filter disable */
                data &= ~(u64)0x100;    /* ignore ignne emulation enable */
                data &= ~(u64)0x8;      /* ignore TLB cache disable */
-               data &= ~(u64)0x40000;  /* ignore Mc status write enable */
-               if (data != 0) {
+
+               /* Handle McStatusWrEn */
+               if (data == BIT_ULL(18)) {
+                       vcpu->arch.msr_hwcr = data;
+               } else if (data != 0) {
                        vcpu_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n",
                                    data);
                        return 1;
@@ -2506,7 +2547,15 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                }
                break;
        case MSR_IA32_MISC_ENABLE:
-               vcpu->arch.ia32_misc_enable_msr = data;
+               if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT) &&
+                   ((vcpu->arch.ia32_misc_enable_msr ^ data) & MSR_IA32_MISC_ENABLE_MWAIT)) {
+                       if (!guest_cpuid_has(vcpu, X86_FEATURE_XMM3))
+                               return 1;
+                       vcpu->arch.ia32_misc_enable_msr = data;
+                       kvm_update_cpuid(vcpu);
+               } else {
+                       vcpu->arch.ia32_misc_enable_msr = data;
+               }
                break;
        case MSR_IA32_SMBASE:
                if (!msr_info->host_initiated)
@@ -2736,7 +2785,6 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
        case MSR_K8_SYSCFG:
        case MSR_K8_TSEG_ADDR:
        case MSR_K8_TSEG_MASK:
-       case MSR_K7_HWCR:
        case MSR_VM_HSAVE_PA:
        case MSR_K8_INT_PENDING_MSG:
        case MSR_AMD64_NB_CFG:
@@ -2900,6 +2948,9 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
        case MSR_MISC_FEATURES_ENABLES:
                msr_info->data = vcpu->arch.msr_misc_features_enables;
                break;
+       case MSR_K7_HWCR:
+               msr_info->data = vcpu->arch.msr_hwcr;
+               break;
        default:
                if (kvm_pmu_is_valid_msr(vcpu, msr_info->index))
                        return kvm_pmu_get_msr(vcpu, msr_info->index, &msr_info->data);
@@ -3055,7 +3106,8 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
                r = KVM_CLOCK_TSC_STABLE;
                break;
        case KVM_CAP_X86_DISABLE_EXITS:
-               r |=  KVM_X86_DISABLE_EXITS_HLT | KVM_X86_DISABLE_EXITS_PAUSE;
+               r |=  KVM_X86_DISABLE_EXITS_HLT | KVM_X86_DISABLE_EXITS_PAUSE |
+                     KVM_X86_DISABLE_EXITS_CSTATE;
                if(kvm_can_mwait_in_guest())
                        r |= KVM_X86_DISABLE_EXITS_MWAIT;
                break;
@@ -3079,8 +3131,8 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
        case KVM_CAP_MAX_VCPUS:
                r = KVM_MAX_VCPUS;
                break;
-       case KVM_CAP_NR_MEMSLOTS:
-               r = KVM_USER_MEM_SLOTS;
+       case KVM_CAP_MAX_VCPU_ID:
+               r = KVM_MAX_VCPU_ID;
                break;
        case KVM_CAP_PV_MMU:    /* obsolete */
                r = 0;
@@ -4572,6 +4624,8 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
                        kvm->arch.hlt_in_guest = true;
                if (cap->args[0] & KVM_X86_DISABLE_EXITS_PAUSE)
                        kvm->arch.pause_in_guest = true;
+               if (cap->args[0] & KVM_X86_DISABLE_EXITS_CSTATE)
+                       kvm->arch.cstate_in_guest = true;
                r = 0;
                break;
        case KVM_CAP_MSR_PLATFORM_INFO:
@@ -5521,9 +5575,9 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
                                     unsigned int bytes,
                                     struct x86_exception *exception)
 {
+       struct kvm_host_map map;
        struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
        gpa_t gpa;
-       struct page *page;
        char *kaddr;
        bool exchanged;
 
@@ -5540,12 +5594,11 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
        if (((gpa + bytes - 1) & PAGE_MASK) != (gpa & PAGE_MASK))
                goto emul_write;
 
-       page = kvm_vcpu_gfn_to_page(vcpu, gpa >> PAGE_SHIFT);
-       if (is_error_page(page))
+       if (kvm_vcpu_map(vcpu, gpa_to_gfn(gpa), &map))
                goto emul_write;
 
-       kaddr = kmap_atomic(page);
-       kaddr += offset_in_page(gpa);
+       kaddr = map.hva + offset_in_page(gpa);
+
        switch (bytes) {
        case 1:
                exchanged = CMPXCHG_TYPE(u8, kaddr, old, new);
@@ -5562,13 +5615,12 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
        default:
                BUG();
        }
-       kunmap_atomic(kaddr);
-       kvm_release_page_dirty(page);
+
+       kvm_vcpu_unmap(vcpu, &map, true);
 
        if (!exchanged)
                return X86EMUL_CMPXCHG_FAILED;
 
-       kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT);
        kvm_page_track_write(vcpu, gpa, new, bytes);
 
        return X86EMUL_CONTINUE;
@@ -6558,7 +6610,7 @@ static int complete_fast_pio_out(struct kvm_vcpu *vcpu)
 static int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size,
                            unsigned short port)
 {
-       unsigned long val = kvm_register_read(vcpu, VCPU_REGS_RAX);
+       unsigned long val = kvm_rax_read(vcpu);
        int ret = emulator_pio_out_emulated(&vcpu->arch.emulate_ctxt,
                                            size, port, &val, 1);
        if (ret)
@@ -6593,8 +6645,7 @@ static int complete_fast_pio_in(struct kvm_vcpu *vcpu)
        }
 
        /* For size less than 4 we merge, else we zero extend */
-       val = (vcpu->arch.pio.size < 4) ? kvm_register_read(vcpu, VCPU_REGS_RAX)
-                                       : 0;
+       val = (vcpu->arch.pio.size < 4) ? kvm_rax_read(vcpu) : 0;
 
        /*
         * Since vcpu->arch.pio.count == 1 let emulator_pio_in_emulated perform
@@ -6602,7 +6653,7 @@ static int complete_fast_pio_in(struct kvm_vcpu *vcpu)
         */
        emulator_pio_in_emulated(&vcpu->arch.emulate_ctxt, vcpu->arch.pio.size,
                                 vcpu->arch.pio.port, &val, 1);
-       kvm_register_write(vcpu, VCPU_REGS_RAX, val);
+       kvm_rax_write(vcpu, val);
 
        return kvm_skip_emulated_instruction(vcpu);
 }
@@ -6614,12 +6665,12 @@ static int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size,
        int ret;
 
        /* For size less than 4 we merge, else we zero extend */
-       val = (size < 4) ? kvm_register_read(vcpu, VCPU_REGS_RAX) : 0;
+       val = (size < 4) ? kvm_rax_read(vcpu) : 0;
 
        ret = emulator_pio_in_emulated(&vcpu->arch.emulate_ctxt, size, port,
                                       &val, 1);
        if (ret) {
-               kvm_register_write(vcpu, VCPU_REGS_RAX, val);
+               kvm_rax_write(vcpu, val);
                return ret;
        }
 
@@ -6854,41 +6905,22 @@ static unsigned long kvm_get_guest_ip(void)
        return ip;
 }
 
+static void kvm_handle_intel_pt_intr(void)
+{
+       struct kvm_vcpu *vcpu = __this_cpu_read(current_vcpu);
+
+       kvm_make_request(KVM_REQ_PMI, vcpu);
+       __set_bit(MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI_BIT,
+                       (unsigned long *)&vcpu->arch.pmu.global_status);
+}
+
 static struct perf_guest_info_callbacks kvm_guest_cbs = {
        .is_in_guest            = kvm_is_in_guest,
        .is_user_mode           = kvm_is_user_mode,
        .get_guest_ip           = kvm_get_guest_ip,
+       .handle_intel_pt_intr   = kvm_handle_intel_pt_intr,
 };
 
-static void kvm_set_mmio_spte_mask(void)
-{
-       u64 mask;
-       int maxphyaddr = boot_cpu_data.x86_phys_bits;
-
-       /*
-        * Set the reserved bits and the present bit of an paging-structure
-        * entry to generate page fault with PFER.RSV = 1.
-        */
-
-       /*
-        * Mask the uppermost physical address bit, which would be reserved as
-        * long as the supported physical address width is less than 52.
-        */
-       mask = 1ull << 51;
-
-       /* Set the present bit. */
-       mask |= 1ull;
-
-       /*
-        * If reserved bit is not supported, clear the present bit to disable
-        * mmio page fault.
-        */
-       if (IS_ENABLED(CONFIG_X86_64) && maxphyaddr == 52)
-               mask &= ~1ull;
-
-       kvm_mmu_set_mmio_spte_mask(mask, mask);
-}
-
 #ifdef CONFIG_X86_64
 static void pvclock_gtod_update_fn(struct work_struct *work)
 {
@@ -6985,8 +7017,6 @@ int kvm_arch_init(void *opaque)
        if (r)
                goto out_free_percpu;
 
-       kvm_set_mmio_spte_mask();
-
        kvm_x86_ops = ops;
 
        kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
@@ -7133,11 +7163,11 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
        if (kvm_hv_hypercall_enabled(vcpu->kvm))
                return kvm_hv_hypercall(vcpu);
 
-       nr = kvm_register_read(vcpu, VCPU_REGS_RAX);
-       a0 = kvm_register_read(vcpu, VCPU_REGS_RBX);
-       a1 = kvm_register_read(vcpu, VCPU_REGS_RCX);
-       a2 = kvm_register_read(vcpu, VCPU_REGS_RDX);
-       a3 = kvm_register_read(vcpu, VCPU_REGS_RSI);
+       nr = kvm_rax_read(vcpu);
+       a0 = kvm_rbx_read(vcpu);
+       a1 = kvm_rcx_read(vcpu);
+       a2 = kvm_rdx_read(vcpu);
+       a3 = kvm_rsi_read(vcpu);
 
        trace_kvm_hypercall(nr, a0, a1, a2, a3);
 
@@ -7178,7 +7208,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
 out:
        if (!op_64_bit)
                ret = (u32)ret;
-       kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
+       kvm_rax_write(vcpu, ret);
 
        ++vcpu->stat.hypercalls;
        return kvm_skip_emulated_instruction(vcpu);
@@ -7903,9 +7933,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
        }
 
        trace_kvm_entry(vcpu->vcpu_id);
-       if (lapic_in_kernel(vcpu) &&
-           vcpu->arch.apic->lapic_timer.timer_advance_ns)
-               wait_lapic_expire(vcpu);
        guest_enter_irqoff();
 
        fpregs_assert_state_consistent();
@@ -7961,6 +7988,13 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
        ++vcpu->stat.exits;
 
        guest_exit_irqoff();
+       if (lapic_in_kernel(vcpu)) {
+               s64 delta = vcpu->arch.apic->lapic_timer.advance_expire_delta;
+               if (delta != S64_MIN) {
+                       trace_kvm_wait_lapic_expire(vcpu->vcpu_id, delta);
+                       vcpu->arch.apic->lapic_timer.advance_expire_delta = S64_MIN;
+               }
+       }
 
        local_irq_enable();
        preempt_enable();
@@ -8280,23 +8314,23 @@ static void __get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
                emulator_writeback_register_cache(&vcpu->arch.emulate_ctxt);
                vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
        }
-       regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX);
-       regs->rbx = kvm_register_read(vcpu, VCPU_REGS_RBX);
-       regs->rcx = kvm_register_read(vcpu, VCPU_REGS_RCX);
-       regs->rdx = kvm_register_read(vcpu, VCPU_REGS_RDX);
-       regs->rsi = kvm_register_read(vcpu, VCPU_REGS_RSI);
-       regs->rdi = kvm_register_read(vcpu, VCPU_REGS_RDI);
-       regs->rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
-       regs->rbp = kvm_register_read(vcpu, VCPU_REGS_RBP);
+       regs->rax = kvm_rax_read(vcpu);
+       regs->rbx = kvm_rbx_read(vcpu);
+       regs->rcx = kvm_rcx_read(vcpu);
+       regs->rdx = kvm_rdx_read(vcpu);
+       regs->rsi = kvm_rsi_read(vcpu);
+       regs->rdi = kvm_rdi_read(vcpu);
+       regs->rsp = kvm_rsp_read(vcpu);
+       regs->rbp = kvm_rbp_read(vcpu);
 #ifdef CONFIG_X86_64
-       regs->r8 = kvm_register_read(vcpu, VCPU_REGS_R8);
-       regs->r9 = kvm_register_read(vcpu, VCPU_REGS_R9);
-       regs->r10 = kvm_register_read(vcpu, VCPU_REGS_R10);
-       regs->r11 = kvm_register_read(vcpu, VCPU_REGS_R11);
-       regs->r12 = kvm_register_read(vcpu, VCPU_REGS_R12);
-       regs->r13 = kvm_register_read(vcpu, VCPU_REGS_R13);
-       regs->r14 = kvm_register_read(vcpu, VCPU_REGS_R14);
-       regs->r15 = kvm_register_read(vcpu, VCPU_REGS_R15);
+       regs->r8 = kvm_r8_read(vcpu);
+       regs->r9 = kvm_r9_read(vcpu);
+       regs->r10 = kvm_r10_read(vcpu);
+       regs->r11 = kvm_r11_read(vcpu);
+       regs->r12 = kvm_r12_read(vcpu);
+       regs->r13 = kvm_r13_read(vcpu);
+       regs->r14 = kvm_r14_read(vcpu);
+       regs->r15 = kvm_r15_read(vcpu);
 #endif
 
        regs->rip = kvm_rip_read(vcpu);
@@ -8316,23 +8350,23 @@ static void __set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
        vcpu->arch.emulate_regs_need_sync_from_vcpu = true;
        vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
 
-       kvm_register_write(vcpu, VCPU_REGS_RAX, regs->rax);
-       kvm_register_write(vcpu, VCPU_REGS_RBX, regs->rbx);
-       kvm_register_write(vcpu, VCPU_REGS_RCX, regs->rcx);
-       kvm_register_write(vcpu, VCPU_REGS_RDX, regs->rdx);
-       kvm_register_write(vcpu, VCPU_REGS_RSI, regs->rsi);
-       kvm_register_write(vcpu, VCPU_REGS_RDI, regs->rdi);
-       kvm_register_write(vcpu, VCPU_REGS_RSP, regs->rsp);
-       kvm_register_write(vcpu, VCPU_REGS_RBP, regs->rbp);
+       kvm_rax_write(vcpu, regs->rax);
+       kvm_rbx_write(vcpu, regs->rbx);
+       kvm_rcx_write(vcpu, regs->rcx);
+       kvm_rdx_write(vcpu, regs->rdx);
+       kvm_rsi_write(vcpu, regs->rsi);
+       kvm_rdi_write(vcpu, regs->rdi);
+       kvm_rsp_write(vcpu, regs->rsp);
+       kvm_rbp_write(vcpu, regs->rbp);
 #ifdef CONFIG_X86_64
-       kvm_register_write(vcpu, VCPU_REGS_R8, regs->r8);
-       kvm_register_write(vcpu, VCPU_REGS_R9, regs->r9);
-       kvm_register_write(vcpu, VCPU_REGS_R10, regs->r10);
-       kvm_register_write(vcpu, VCPU_REGS_R11, regs->r11);
-       kvm_register_write(vcpu, VCPU_REGS_R12, regs->r12);
-       kvm_register_write(vcpu, VCPU_REGS_R13, regs->r13);
-       kvm_register_write(vcpu, VCPU_REGS_R14, regs->r14);
-       kvm_register_write(vcpu, VCPU_REGS_R15, regs->r15);
+       kvm_r8_write(vcpu, regs->r8);
+       kvm_r9_write(vcpu, regs->r9);
+       kvm_r10_write(vcpu, regs->r10);
+       kvm_r11_write(vcpu, regs->r11);
+       kvm_r12_write(vcpu, regs->r12);
+       kvm_r13_write(vcpu, regs->r13);
+       kvm_r14_write(vcpu, regs->r14);
+       kvm_r15_write(vcpu, regs->r15);
 #endif
 
        kvm_rip_write(vcpu, regs->rip);
@@ -9059,9 +9093,9 @@ void kvm_arch_hardware_unsetup(void)
        kvm_x86_ops->hardware_unsetup();
 }
 
-void kvm_arch_check_processor_compat(void *rtn)
+int kvm_arch_check_processor_compat(void)
 {
-       kvm_x86_ops->check_processor_compatibility(rtn);
+       return kvm_x86_ops->check_processor_compatibility();
 }
 
 bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu)