]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - arch/x86/kvm/x86.c
Merge tag 'kvm-ppc-next-5.2-2' of git://git.kernel.org/pub/scm/linux/kernel/git/paulu...
[linux.git] / arch / x86 / kvm / x86.c
index 099b851dabafd7e2980f96472209777f9cc8f77b..dc621f73e96b3b0aad91a1ec0948cd22af13bfd5 100644 (file)
@@ -800,7 +800,7 @@ void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
 }
 EXPORT_SYMBOL_GPL(kvm_lmsw);
 
-static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
+void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
 {
        if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) &&
                        !vcpu->guest_xcr0_loaded) {
@@ -810,8 +810,9 @@ static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
                vcpu->guest_xcr0_loaded = 1;
        }
 }
+EXPORT_SYMBOL_GPL(kvm_load_guest_xcr0);
 
-static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
+void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
 {
        if (vcpu->guest_xcr0_loaded) {
                if (vcpu->arch.xcr0 != host_xcr0)
@@ -819,6 +820,7 @@ static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
                vcpu->guest_xcr0_loaded = 0;
        }
 }
+EXPORT_SYMBOL_GPL(kvm_put_guest_xcr0);
 
 static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
 {
@@ -1094,15 +1096,15 @@ EXPORT_SYMBOL_GPL(kvm_get_dr);
 
 bool kvm_rdpmc(struct kvm_vcpu *vcpu)
 {
-       u32 ecx = kvm_register_read(vcpu, VCPU_REGS_RCX);
+       u32 ecx = kvm_rcx_read(vcpu);
        u64 data;
        int err;
 
        err = kvm_pmu_rdpmc(vcpu, ecx, &data);
        if (err)
                return err;
-       kvm_register_write(vcpu, VCPU_REGS_RAX, (u32)data);
-       kvm_register_write(vcpu, VCPU_REGS_RDX, data >> 32);
+       kvm_rax_write(vcpu, (u32)data);
+       kvm_rdx_write(vcpu, data >> 32);
        return err;
 }
 EXPORT_SYMBOL_GPL(kvm_rdpmc);
@@ -1168,6 +1170,9 @@ static u32 emulated_msrs[] = {
        MSR_PLATFORM_INFO,
        MSR_MISC_FEATURES_ENABLES,
        MSR_AMD64_VIRT_SPEC_CTRL,
+       MSR_IA32_POWER_CTL,
+
+       MSR_K7_HWCR,
 };
 
 static unsigned num_emulated_msrs;
@@ -1256,31 +1261,49 @@ static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
        return 0;
 }
 
-bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
+static bool __kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
 {
-       if (efer & efer_reserved_bits)
-               return false;
-
        if (efer & EFER_FFXSR && !guest_cpuid_has(vcpu, X86_FEATURE_FXSR_OPT))
-                       return false;
+               return false;
 
        if (efer & EFER_SVME && !guest_cpuid_has(vcpu, X86_FEATURE_SVM))
-                       return false;
+               return false;
+
+       if (efer & (EFER_LME | EFER_LMA) &&
+           !guest_cpuid_has(vcpu, X86_FEATURE_LM))
+               return false;
+
+       if (efer & EFER_NX && !guest_cpuid_has(vcpu, X86_FEATURE_NX))
+               return false;
 
        return true;
+
+}
+bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
+{
+       if (efer & efer_reserved_bits)
+               return false;
+
+       return __kvm_valid_efer(vcpu, efer);
 }
 EXPORT_SYMBOL_GPL(kvm_valid_efer);
 
-static int set_efer(struct kvm_vcpu *vcpu, u64 efer)
+static int set_efer(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 {
        u64 old_efer = vcpu->arch.efer;
+       u64 efer = msr_info->data;
 
-       if (!kvm_valid_efer(vcpu, efer))
-               return 1;
+       if (efer & efer_reserved_bits)
+               return false;
 
-       if (is_paging(vcpu)
-           && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
-               return 1;
+       if (!msr_info->host_initiated) {
+               if (!__kvm_valid_efer(vcpu, efer))
+                       return 1;
+
+               if (is_paging(vcpu) &&
+                   (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
+                       return 1;
+       }
 
        efer &= ~EFER_LMA;
        efer |= vcpu->arch.efer & EFER_LMA;
@@ -2273,6 +2296,18 @@ static void kvmclock_sync_fn(struct work_struct *work)
                                        KVMCLOCK_SYNC_PERIOD);
 }
 
+/*
+ * On AMD, HWCR[McStatusWrEn] controls whether setting MCi_STATUS results in #GP.
+ */
+static bool can_set_mci_status(struct kvm_vcpu *vcpu)
+{
+       /* McStatusWrEn enabled? */
+       if (guest_cpuid_is_amd(vcpu))
+               return !!(vcpu->arch.msr_hwcr & BIT_ULL(18));
+
+       return false;
+}
+
 static int set_msr_mce(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 {
        u64 mcg_cap = vcpu->arch.mcg_cap;
@@ -2304,9 +2339,14 @@ static int set_msr_mce(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                        if ((offset & 0x3) == 0 &&
                            data != 0 && (data | (1 << 10)) != ~(u64)0)
                                return -1;
+
+                       /* MCi_STATUS */
                        if (!msr_info->host_initiated &&
-                               (offset & 0x3) == 1 && data != 0)
-                               return -1;
+                           (offset & 0x3) == 1 && data != 0) {
+                               if (!can_set_mci_status(vcpu))
+                                       return -1;
+                       }
+
                        vcpu->arch.mce_banks[offset] = data;
                        break;
                }
@@ -2450,13 +2490,16 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                vcpu->arch.arch_capabilities = data;
                break;
        case MSR_EFER:
-               return set_efer(vcpu, data);
+               return set_efer(vcpu, msr_info);
        case MSR_K7_HWCR:
                data &= ~(u64)0x40;     /* ignore flush filter disable */
                data &= ~(u64)0x100;    /* ignore ignne emulation enable */
                data &= ~(u64)0x8;      /* ignore TLB cache disable */
-               data &= ~(u64)0x40000;  /* ignore Mc status write enable */
-               if (data != 0) {
+
+               /* Handle McStatusWrEn */
+               if (data == BIT_ULL(18)) {
+                       vcpu->arch.msr_hwcr = data;
+               } else if (data != 0) {
                        vcpu_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n",
                                    data);
                        return 1;
@@ -2730,7 +2773,6 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
        case MSR_K8_SYSCFG:
        case MSR_K8_TSEG_ADDR:
        case MSR_K8_TSEG_MASK:
-       case MSR_K7_HWCR:
        case MSR_VM_HSAVE_PA:
        case MSR_K8_INT_PENDING_MSG:
        case MSR_AMD64_NB_CFG:
@@ -2894,6 +2936,9 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
        case MSR_MISC_FEATURES_ENABLES:
                msr_info->data = vcpu->arch.msr_misc_features_enables;
                break;
+       case MSR_K7_HWCR:
+               msr_info->data = vcpu->arch.msr_hwcr;
+               break;
        default:
                if (kvm_pmu_is_valid_msr(vcpu, msr_info->index))
                        return kvm_pmu_get_msr(vcpu, msr_info->index, &msr_info->data);
@@ -3073,9 +3118,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
        case KVM_CAP_MAX_VCPUS:
                r = KVM_MAX_VCPUS;
                break;
-       case KVM_CAP_NR_MEMSLOTS:
-               r = KVM_USER_MEM_SLOTS;
-               break;
        case KVM_CAP_PV_MMU:    /* obsolete */
                r = 0;
                break;
@@ -3093,7 +3135,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
                break;
        case KVM_CAP_NESTED_STATE:
                r = kvm_x86_ops->get_nested_state ?
-                       kvm_x86_ops->get_nested_state(NULL, 0, 0) : 0;
+                       kvm_x86_ops->get_nested_state(NULL, NULL, 0) : 0;
                break;
        default:
                break;
@@ -3528,7 +3570,7 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
        memset(&events->reserved, 0, sizeof(events->reserved));
 }
 
-static void kvm_set_hflags(struct kvm_vcpu *vcpu, unsigned emul_flags);
+static void kvm_smm_changed(struct kvm_vcpu *vcpu);
 
 static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
                                              struct kvm_vcpu_events *events)
@@ -3588,12 +3630,13 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
                vcpu->arch.apic->sipi_vector = events->sipi_vector;
 
        if (events->flags & KVM_VCPUEVENT_VALID_SMM) {
-               u32 hflags = vcpu->arch.hflags;
-               if (events->smi.smm)
-                       hflags |= HF_SMM_MASK;
-               else
-                       hflags &= ~HF_SMM_MASK;
-               kvm_set_hflags(vcpu, hflags);
+               if (!!(vcpu->arch.hflags & HF_SMM_MASK) != events->smi.smm) {
+                       if (events->smi.smm)
+                               vcpu->arch.hflags |= HF_SMM_MASK;
+                       else
+                               vcpu->arch.hflags &= ~HF_SMM_MASK;
+                       kvm_smm_changed(vcpu);
+               }
 
                vcpu->arch.smi_pending = events->smi.pending;
 
@@ -4270,7 +4313,7 @@ static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm,
 }
 
 static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
-                                         u32 kvm_nr_mmu_pages)
+                                        unsigned long kvm_nr_mmu_pages)
 {
        if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
                return -EINVAL;
@@ -4284,7 +4327,7 @@ static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
        return 0;
 }
 
-static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
+static unsigned long kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
 {
        return kvm->arch.n_max_mmu_pages;
 }
@@ -5514,9 +5557,9 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
                                     unsigned int bytes,
                                     struct x86_exception *exception)
 {
+       struct kvm_host_map map;
        struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
        gpa_t gpa;
-       struct page *page;
        char *kaddr;
        bool exchanged;
 
@@ -5533,12 +5576,11 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
        if (((gpa + bytes - 1) & PAGE_MASK) != (gpa & PAGE_MASK))
                goto emul_write;
 
-       page = kvm_vcpu_gfn_to_page(vcpu, gpa >> PAGE_SHIFT);
-       if (is_error_page(page))
+       if (kvm_vcpu_map(vcpu, gpa_to_gfn(gpa), &map))
                goto emul_write;
 
-       kaddr = kmap_atomic(page);
-       kaddr += offset_in_page(gpa);
+       kaddr = map.hva + offset_in_page(gpa);
+
        switch (bytes) {
        case 1:
                exchanged = CMPXCHG_TYPE(u8, kaddr, old, new);
@@ -5555,13 +5597,12 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
        default:
                BUG();
        }
-       kunmap_atomic(kaddr);
-       kvm_release_page_dirty(page);
+
+       kvm_vcpu_unmap(vcpu, &map, true);
 
        if (!exchanged)
                return X86EMUL_CMPXCHG_FAILED;
 
-       kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT);
        kvm_page_track_write(vcpu, gpa, new, bytes);
 
        return X86EMUL_CONTINUE;
@@ -5958,12 +5999,18 @@ static unsigned emulator_get_hflags(struct x86_emulate_ctxt *ctxt)
 
 static void emulator_set_hflags(struct x86_emulate_ctxt *ctxt, unsigned emul_flags)
 {
-       kvm_set_hflags(emul_to_vcpu(ctxt), emul_flags);
+       emul_to_vcpu(ctxt)->arch.hflags = emul_flags;
 }
 
-static int emulator_pre_leave_smm(struct x86_emulate_ctxt *ctxt, u64 smbase)
+static int emulator_pre_leave_smm(struct x86_emulate_ctxt *ctxt,
+                                 const char *smstate)
 {
-       return kvm_x86_ops->pre_leave_smm(emul_to_vcpu(ctxt), smbase);
+       return kvm_x86_ops->pre_leave_smm(emul_to_vcpu(ctxt), smstate);
+}
+
+static void emulator_post_leave_smm(struct x86_emulate_ctxt *ctxt)
+{
+       kvm_smm_changed(emul_to_vcpu(ctxt));
 }
 
 static const struct x86_emulate_ops emulate_ops = {
@@ -6006,6 +6053,7 @@ static const struct x86_emulate_ops emulate_ops = {
        .get_hflags          = emulator_get_hflags,
        .set_hflags          = emulator_set_hflags,
        .pre_leave_smm       = emulator_pre_leave_smm,
+       .post_leave_smm      = emulator_post_leave_smm,
 };
 
 static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
@@ -6247,16 +6295,6 @@ static void kvm_smm_changed(struct kvm_vcpu *vcpu)
        kvm_mmu_reset_context(vcpu);
 }
 
-static void kvm_set_hflags(struct kvm_vcpu *vcpu, unsigned emul_flags)
-{
-       unsigned changed = vcpu->arch.hflags ^ emul_flags;
-
-       vcpu->arch.hflags = emul_flags;
-
-       if (changed & HF_SMM_MASK)
-               kvm_smm_changed(vcpu);
-}
-
 static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7,
                                unsigned long *db)
 {
@@ -6548,7 +6586,7 @@ static int complete_fast_pio_out(struct kvm_vcpu *vcpu)
 static int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size,
                            unsigned short port)
 {
-       unsigned long val = kvm_register_read(vcpu, VCPU_REGS_RAX);
+       unsigned long val = kvm_rax_read(vcpu);
        int ret = emulator_pio_out_emulated(&vcpu->arch.emulate_ctxt,
                                            size, port, &val, 1);
 
@@ -6572,8 +6610,7 @@ static int complete_fast_pio_in(struct kvm_vcpu *vcpu)
        }
 
        /* For size less than 4 we merge, else we zero extend */
-       val = (vcpu->arch.pio.size < 4) ? kvm_register_read(vcpu, VCPU_REGS_RAX)
-                                       : 0;
+       val = (vcpu->arch.pio.size < 4) ? kvm_rax_read(vcpu) : 0;
 
        /*
         * Since vcpu->arch.pio.count == 1 let emulator_pio_in_emulated perform
@@ -6581,7 +6618,7 @@ static int complete_fast_pio_in(struct kvm_vcpu *vcpu)
         */
        emulator_pio_in_emulated(&vcpu->arch.emulate_ctxt, vcpu->arch.pio.size,
                                 vcpu->arch.pio.port, &val, 1);
-       kvm_register_write(vcpu, VCPU_REGS_RAX, val);
+       kvm_rax_write(vcpu, val);
 
        return kvm_skip_emulated_instruction(vcpu);
 }
@@ -6593,12 +6630,12 @@ static int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size,
        int ret;
 
        /* For size less than 4 we merge, else we zero extend */
-       val = (size < 4) ? kvm_register_read(vcpu, VCPU_REGS_RAX) : 0;
+       val = (size < 4) ? kvm_rax_read(vcpu) : 0;
 
        ret = emulator_pio_in_emulated(&vcpu->arch.emulate_ctxt, size, port,
                                       &val, 1);
        if (ret) {
-               kvm_register_write(vcpu, VCPU_REGS_RAX, val);
+               kvm_rax_write(vcpu, val);
                return ret;
        }
 
@@ -6824,10 +6861,20 @@ static unsigned long kvm_get_guest_ip(void)
        return ip;
 }
 
+static void kvm_handle_intel_pt_intr(void)
+{
+       struct kvm_vcpu *vcpu = __this_cpu_read(current_vcpu);
+
+       kvm_make_request(KVM_REQ_PMI, vcpu);
+       __set_bit(MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI_BIT,
+                       (unsigned long *)&vcpu->arch.pmu.global_status);
+}
+
 static struct perf_guest_info_callbacks kvm_guest_cbs = {
        .is_in_guest            = kvm_is_in_guest,
        .is_user_mode           = kvm_is_user_mode,
        .get_guest_ip           = kvm_get_guest_ip,
+       .handle_intel_pt_intr   = kvm_handle_intel_pt_intr,
 };
 
 static void kvm_set_mmio_spte_mask(void)
@@ -7103,11 +7150,11 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
        if (kvm_hv_hypercall_enabled(vcpu->kvm))
                return kvm_hv_hypercall(vcpu);
 
-       nr = kvm_register_read(vcpu, VCPU_REGS_RAX);
-       a0 = kvm_register_read(vcpu, VCPU_REGS_RBX);
-       a1 = kvm_register_read(vcpu, VCPU_REGS_RCX);
-       a2 = kvm_register_read(vcpu, VCPU_REGS_RDX);
-       a3 = kvm_register_read(vcpu, VCPU_REGS_RSI);
+       nr = kvm_rax_read(vcpu);
+       a0 = kvm_rbx_read(vcpu);
+       a1 = kvm_rcx_read(vcpu);
+       a2 = kvm_rdx_read(vcpu);
+       a3 = kvm_rsi_read(vcpu);
 
        trace_kvm_hypercall(nr, a0, a1, a2, a3);
 
@@ -7148,7 +7195,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
 out:
        if (!op_64_bit)
                ret = (u32)ret;
-       kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
+       kvm_rax_write(vcpu, ret);
 
        ++vcpu->stat.hypercalls;
        return kvm_skip_emulated_instruction(vcpu);
@@ -7441,9 +7488,9 @@ static void enter_smm_save_state_32(struct kvm_vcpu *vcpu, char *buf)
        put_smstate(u32, buf, 0x7ef8, vcpu->arch.smbase);
 }
 
+#ifdef CONFIG_X86_64
 static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, char *buf)
 {
-#ifdef CONFIG_X86_64
        struct desc_ptr dt;
        struct kvm_segment seg;
        unsigned long val;
@@ -7493,10 +7540,8 @@ static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, char *buf)
 
        for (i = 0; i < 6; i++)
                enter_smm_save_seg_64(vcpu, buf, i);
-#else
-       WARN_ON_ONCE(1);
-#endif
 }
+#endif
 
 static void enter_smm(struct kvm_vcpu *vcpu)
 {
@@ -7507,9 +7552,11 @@ static void enter_smm(struct kvm_vcpu *vcpu)
 
        trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, true);
        memset(buf, 0, 512);
+#ifdef CONFIG_X86_64
        if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
                enter_smm_save_state_64(vcpu, buf);
        else
+#endif
                enter_smm_save_state_32(vcpu, buf);
 
        /*
@@ -7567,8 +7614,10 @@ static void enter_smm(struct kvm_vcpu *vcpu)
        kvm_set_segment(vcpu, &ds, VCPU_SREG_GS);
        kvm_set_segment(vcpu, &ds, VCPU_SREG_SS);
 
+#ifdef CONFIG_X86_64
        if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
                kvm_x86_ops->set_efer(vcpu, 0);
+#endif
 
        kvm_update_cpuid(vcpu);
        kvm_mmu_reset_context(vcpu);
@@ -7865,8 +7914,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                goto cancel_injection;
        }
 
-       kvm_load_guest_xcr0(vcpu);
-
        if (req_immediate_exit) {
                kvm_make_request(KVM_REQ_EVENT, vcpu);
                kvm_x86_ops->request_immediate_exit(vcpu);
@@ -7919,8 +7966,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
        vcpu->mode = OUTSIDE_GUEST_MODE;
        smp_wmb();
 
-       kvm_put_guest_xcr0(vcpu);
-
        kvm_before_interrupt(vcpu);
        kvm_x86_ops->handle_external_intr(vcpu);
        kvm_after_interrupt(vcpu);
@@ -8239,23 +8284,23 @@ static void __get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
                emulator_writeback_register_cache(&vcpu->arch.emulate_ctxt);
                vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
        }
-       regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX);
-       regs->rbx = kvm_register_read(vcpu, VCPU_REGS_RBX);
-       regs->rcx = kvm_register_read(vcpu, VCPU_REGS_RCX);
-       regs->rdx = kvm_register_read(vcpu, VCPU_REGS_RDX);
-       regs->rsi = kvm_register_read(vcpu, VCPU_REGS_RSI);
-       regs->rdi = kvm_register_read(vcpu, VCPU_REGS_RDI);
-       regs->rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
-       regs->rbp = kvm_register_read(vcpu, VCPU_REGS_RBP);
+       regs->rax = kvm_rax_read(vcpu);
+       regs->rbx = kvm_rbx_read(vcpu);
+       regs->rcx = kvm_rcx_read(vcpu);
+       regs->rdx = kvm_rdx_read(vcpu);
+       regs->rsi = kvm_rsi_read(vcpu);
+       regs->rdi = kvm_rdi_read(vcpu);
+       regs->rsp = kvm_rsp_read(vcpu);
+       regs->rbp = kvm_rbp_read(vcpu);
 #ifdef CONFIG_X86_64
-       regs->r8 = kvm_register_read(vcpu, VCPU_REGS_R8);
-       regs->r9 = kvm_register_read(vcpu, VCPU_REGS_R9);
-       regs->r10 = kvm_register_read(vcpu, VCPU_REGS_R10);
-       regs->r11 = kvm_register_read(vcpu, VCPU_REGS_R11);
-       regs->r12 = kvm_register_read(vcpu, VCPU_REGS_R12);
-       regs->r13 = kvm_register_read(vcpu, VCPU_REGS_R13);
-       regs->r14 = kvm_register_read(vcpu, VCPU_REGS_R14);
-       regs->r15 = kvm_register_read(vcpu, VCPU_REGS_R15);
+       regs->r8 = kvm_r8_read(vcpu);
+       regs->r9 = kvm_r9_read(vcpu);
+       regs->r10 = kvm_r10_read(vcpu);
+       regs->r11 = kvm_r11_read(vcpu);
+       regs->r12 = kvm_r12_read(vcpu);
+       regs->r13 = kvm_r13_read(vcpu);
+       regs->r14 = kvm_r14_read(vcpu);
+       regs->r15 = kvm_r15_read(vcpu);
 #endif
 
        regs->rip = kvm_rip_read(vcpu);
@@ -8275,23 +8320,23 @@ static void __set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
        vcpu->arch.emulate_regs_need_sync_from_vcpu = true;
        vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
 
-       kvm_register_write(vcpu, VCPU_REGS_RAX, regs->rax);
-       kvm_register_write(vcpu, VCPU_REGS_RBX, regs->rbx);
-       kvm_register_write(vcpu, VCPU_REGS_RCX, regs->rcx);
-       kvm_register_write(vcpu, VCPU_REGS_RDX, regs->rdx);
-       kvm_register_write(vcpu, VCPU_REGS_RSI, regs->rsi);
-       kvm_register_write(vcpu, VCPU_REGS_RDI, regs->rdi);
-       kvm_register_write(vcpu, VCPU_REGS_RSP, regs->rsp);
-       kvm_register_write(vcpu, VCPU_REGS_RBP, regs->rbp);
+       kvm_rax_write(vcpu, regs->rax);
+       kvm_rbx_write(vcpu, regs->rbx);
+       kvm_rcx_write(vcpu, regs->rcx);
+       kvm_rdx_write(vcpu, regs->rdx);
+       kvm_rsi_write(vcpu, regs->rsi);
+       kvm_rdi_write(vcpu, regs->rdi);
+       kvm_rsp_write(vcpu, regs->rsp);
+       kvm_rbp_write(vcpu, regs->rbp);
 #ifdef CONFIG_X86_64
-       kvm_register_write(vcpu, VCPU_REGS_R8, regs->r8);
-       kvm_register_write(vcpu, VCPU_REGS_R9, regs->r9);
-       kvm_register_write(vcpu, VCPU_REGS_R10, regs->r10);
-       kvm_register_write(vcpu, VCPU_REGS_R11, regs->r11);
-       kvm_register_write(vcpu, VCPU_REGS_R12, regs->r12);
-       kvm_register_write(vcpu, VCPU_REGS_R13, regs->r13);
-       kvm_register_write(vcpu, VCPU_REGS_R14, regs->r14);
-       kvm_register_write(vcpu, VCPU_REGS_R15, regs->r15);
+       kvm_r8_write(vcpu, regs->r8);
+       kvm_r9_write(vcpu, regs->r9);
+       kvm_r10_write(vcpu, regs->r10);
+       kvm_r11_write(vcpu, regs->r11);
+       kvm_r12_write(vcpu, regs->r12);
+       kvm_r13_write(vcpu, regs->r13);
+       kvm_r14_write(vcpu, regs->r14);
+       kvm_r15_write(vcpu, regs->r15);
 #endif
 
        kvm_rip_write(vcpu, regs->rip);