]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - arch/x86/kvm/svm.c
KVM: SVM: add struct kvm_svm to hold SVM specific KVM vars
[linux.git] / arch / x86 / kvm / svm.c
index 4e3c7953052634a040bd4aff6d881d03336bf18a..cb46e985e29a2eb42746fb50d94efc551851314e 100644 (file)
 #include <linux/amd-iommu.h>
 #include <linux/hashtable.h>
 #include <linux/frame.h>
+#include <linux/psp-sev.h>
+#include <linux/file.h>
+#include <linux/pagemap.h>
+#include <linux/swap.h>
 
 #include <asm/apic.h>
 #include <asm/perf_event.h>
@@ -45,6 +49,7 @@
 #include <asm/debugreg.h>
 #include <asm/kvm_para.h>
 #include <asm/irq_remapping.h>
+#include <asm/microcode.h>
 #include <asm/nospec-branch.h>
 
 #include <asm/virtext.h>
@@ -127,6 +132,28 @@ static const u32 host_save_user_msrs[] = {
 
 #define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs)
 
+struct kvm_sev_info {
+       bool active;            /* SEV enabled guest */
+       unsigned int asid;      /* ASID used for this guest */
+       unsigned int handle;    /* SEV firmware handle */
+       int fd;                 /* SEV device fd */
+       unsigned long pages_locked; /* Number of pages locked */
+       struct list_head regions_list;  /* List of registered regions */
+};
+
+struct kvm_svm {
+       struct kvm kvm;
+
+       /* Struct members for AVIC */
+       u32 avic_vm_id;
+       u32 ldr_mode;
+       struct page *avic_logical_id_table_page;
+       struct page *avic_physical_id_table_page;
+       struct hlist_node hnode;
+
+       struct kvm_sev_info sev_info;
+};
+
 struct kvm_vcpu;
 
 struct nested_state {
@@ -174,6 +201,8 @@ struct vcpu_svm {
        uint64_t sysenter_eip;
        uint64_t tsc_aux;
 
+       u64 msr_decfg;
+
        u64 next_rip;
 
        u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS];
@@ -214,6 +243,9 @@ struct vcpu_svm {
         */
        struct list_head ir_list;
        spinlock_t ir_list_lock;
+
+       /* which host CPU was used for running this vcpu */
+       unsigned int last_cpu;
 };
 
 /*
@@ -289,8 +321,14 @@ module_param(vls, int, 0444);
 static int vgif = true;
 module_param(vgif, int, 0444);
 
+/* enable/disable SEV support */
+static int sev = IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT);
+module_param(sev, int, 0444);
+
+static u8 rsm_ins_bytes[] = "\x0f\xaa";
+
 static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
-static void svm_flush_tlb(struct kvm_vcpu *vcpu);
+static void svm_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa);
 static void svm_complete_interrupts(struct vcpu_svm *svm);
 
 static int nested_svm_exit_handled(struct vcpu_svm *svm);
@@ -324,6 +362,44 @@ enum {
 
 #define VMCB_AVIC_APIC_BAR_MASK                0xFFFFFFFFFF000ULL
 
+static unsigned int max_sev_asid;
+static unsigned int min_sev_asid;
+static unsigned long *sev_asid_bitmap;
+#define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT)
+
+struct enc_region {
+       struct list_head list;
+       unsigned long npages;
+       struct page **pages;
+       unsigned long uaddr;
+       unsigned long size;
+};
+
+
+static inline struct kvm_svm *to_kvm_svm(struct kvm *kvm)
+{
+       return container_of(kvm, struct kvm_svm, kvm);
+}
+
+static inline bool svm_sev_enabled(void)
+{
+       return max_sev_asid;
+}
+
+static inline bool sev_guest(struct kvm *kvm)
+{
+       struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+
+       return sev->active;
+}
+
+static inline int sev_get_asid(struct kvm *kvm)
+{
+       struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+
+       return sev->asid;
+}
+
 static inline void mark_all_dirty(struct vmcb *vmcb)
 {
        vmcb->control.clean = 0;
@@ -530,10 +606,14 @@ struct svm_cpu_data {
        u64 asid_generation;
        u32 max_asid;
        u32 next_asid;
+       u32 min_asid;
        struct kvm_ldttss_desc *tss_desc;
 
        struct page *save_area;
        struct vmcb *current_vmcb;
+
+       /* index = sev_asid, value = vmcb pointer */
+       struct vmcb **sev_vmcbs;
 };
 
 static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
@@ -788,6 +868,7 @@ static int svm_hardware_enable(void)
        sd->asid_generation = 1;
        sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
        sd->next_asid = sd->max_asid + 1;
+       sd->min_asid = max_sev_asid + 1;
 
        gdt = get_current_gdt_rw();
        sd->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
@@ -846,6 +927,7 @@ static void svm_cpu_uninit(int cpu)
                return;
 
        per_cpu(svm_data, raw_smp_processor_id()) = NULL;
+       kfree(sd->sev_vmcbs);
        __free_page(sd->save_area);
        kfree(sd);
 }
@@ -859,11 +941,18 @@ static int svm_cpu_init(int cpu)
        if (!sd)
                return -ENOMEM;
        sd->cpu = cpu;
-       sd->save_area = alloc_page(GFP_KERNEL);
        r = -ENOMEM;
+       sd->save_area = alloc_page(GFP_KERNEL);
        if (!sd->save_area)
                goto err_1;
 
+       if (svm_sev_enabled()) {
+               r = -ENOMEM;
+               sd->sev_vmcbs = kmalloc((max_sev_asid + 1) * sizeof(void *), GFP_KERNEL);
+               if (!sd->sev_vmcbs)
+                       goto err_1;
+       }
+
        per_cpu(svm_data, cpu) = sd;
 
        return 0;
@@ -1023,7 +1112,7 @@ static void disable_nmi_singlestep(struct vcpu_svm *svm)
 }
 
 /* Note:
- * This hash table is used to map VM_ID to a struct kvm_arch,
+ * This hash table is used to map VM_ID to a struct kvm_svm,
  * when handling AMD IOMMU GALOG notification to schedule in
  * a particular vCPU.
  */
@@ -1040,7 +1129,7 @@ static DEFINE_SPINLOCK(svm_vm_data_hash_lock);
 static int avic_ga_log_notifier(u32 ga_tag)
 {
        unsigned long flags;
-       struct kvm_arch *ka = NULL;
+       struct kvm_svm *kvm_svm;
        struct kvm_vcpu *vcpu = NULL;
        u32 vm_id = AVIC_GATAG_TO_VMID(ga_tag);
        u32 vcpu_id = AVIC_GATAG_TO_VCPUID(ga_tag);
@@ -1048,13 +1137,10 @@ static int avic_ga_log_notifier(u32 ga_tag)
        pr_debug("SVM: %s: vm_id=%#x, vcpu_id=%#x\n", __func__, vm_id, vcpu_id);
 
        spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
-       hash_for_each_possible(svm_vm_data_hash, ka, hnode, vm_id) {
-               struct kvm *kvm = container_of(ka, struct kvm, arch);
-               struct kvm_arch *vm_data = &kvm->arch;
-
-               if (vm_data->avic_vm_id != vm_id)
+       hash_for_each_possible(svm_vm_data_hash, kvm_svm, hnode, vm_id) {
+               if (kvm_svm->avic_vm_id != vm_id)
                        continue;
-               vcpu = kvm_get_vcpu_by_id(kvm, vcpu_id);
+               vcpu = kvm_get_vcpu_by_id(&kvm_svm->kvm, vcpu_id);
                break;
        }
        spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
@@ -1070,6 +1156,48 @@ static int avic_ga_log_notifier(u32 ga_tag)
        return 0;
 }
 
+static __init int sev_hardware_setup(void)
+{
+       struct sev_user_data_status *status;
+       int rc;
+
+       /* Maximum number of encrypted guests supported simultaneously */
+       max_sev_asid = cpuid_ecx(0x8000001F);
+
+       if (!max_sev_asid)
+               return 1;
+
+       /* Minimum ASID value that should be used for SEV guest */
+       min_sev_asid = cpuid_edx(0x8000001F);
+
+       /* Initialize SEV ASID bitmap */
+       sev_asid_bitmap = kcalloc(BITS_TO_LONGS(max_sev_asid),
+                               sizeof(unsigned long), GFP_KERNEL);
+       if (!sev_asid_bitmap)
+               return 1;
+
+       status = kmalloc(sizeof(*status), GFP_KERNEL);
+       if (!status)
+               return 1;
+
+       /*
+        * Check SEV platform status.
+        *
+        * PLATFORM_STATUS can be called in any state, if we failed to query
+        * the PLATFORM status then either PSP firmware does not support SEV
+        * feature or SEV firmware is dead.
+        */
+       rc = sev_platform_status(status, NULL);
+       if (rc)
+               goto err;
+
+       pr_info("SEV supported\n");
+
+err:
+       kfree(status);
+       return rc;
+}
+
 static __init int svm_hardware_setup(void)
 {
        int cpu;
@@ -1105,6 +1233,17 @@ static __init int svm_hardware_setup(void)
                kvm_enable_efer_bits(EFER_SVME | EFER_LMSLE);
        }
 
+       if (sev) {
+               if (boot_cpu_has(X86_FEATURE_SEV) &&
+                   IS_ENABLED(CONFIG_KVM_AMD_SEV)) {
+                       r = sev_hardware_setup();
+                       if (r)
+                               sev = false;
+               } else {
+                       sev = false;
+               }
+       }
+
        for_each_possible_cpu(cpu) {
                r = svm_cpu_init(cpu);
                if (r)
@@ -1166,6 +1305,9 @@ static __exit void svm_hardware_unsetup(void)
 {
        int cpu;
 
+       if (svm_sev_enabled())
+               kfree(sev_asid_bitmap);
+
        for_each_possible_cpu(cpu)
                svm_cpu_uninit(cpu);
 
@@ -1212,10 +1354,10 @@ static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
 static void avic_init_vmcb(struct vcpu_svm *svm)
 {
        struct vmcb *vmcb = svm->vmcb;
-       struct kvm_arch *vm_data = &svm->vcpu.kvm->arch;
+       struct kvm_svm *kvm_svm = to_kvm_svm(svm->vcpu.kvm);
        phys_addr_t bpa = __sme_set(page_to_phys(svm->avic_backing_page));
-       phys_addr_t lpa = __sme_set(page_to_phys(vm_data->avic_logical_id_table_page));
-       phys_addr_t ppa = __sme_set(page_to_phys(vm_data->avic_physical_id_table_page));
+       phys_addr_t lpa = __sme_set(page_to_phys(kvm_svm->avic_logical_id_table_page));
+       phys_addr_t ppa = __sme_set(page_to_phys(kvm_svm->avic_physical_id_table_page));
 
        vmcb->control.avic_backing_page = bpa & AVIC_HPA_MASK;
        vmcb->control.avic_logical_id = lpa & AVIC_HPA_MASK;
@@ -1247,6 +1389,14 @@ static void init_vmcb(struct vcpu_svm *svm)
        set_exception_intercept(svm, MC_VECTOR);
        set_exception_intercept(svm, AC_VECTOR);
        set_exception_intercept(svm, DB_VECTOR);
+       /*
+        * Guest access to VMware backdoor ports could legitimately
+        * trigger #GP because of TSS I/O permission bitmap.
+        * We intercept those #GP and allow access to them anyway
+        * as VMware does.
+        */
+       if (enable_vmware_backdoor)
+               set_exception_intercept(svm, GP_VECTOR);
 
        set_intercept(svm, INTERCEPT_INTR);
        set_intercept(svm, INTERCEPT_NMI);
@@ -1255,7 +1405,6 @@ static void init_vmcb(struct vcpu_svm *svm)
        set_intercept(svm, INTERCEPT_RDPMC);
        set_intercept(svm, INTERCEPT_CPUID);
        set_intercept(svm, INTERCEPT_INVD);
-       set_intercept(svm, INTERCEPT_HLT);
        set_intercept(svm, INTERCEPT_INVLPG);
        set_intercept(svm, INTERCEPT_INVLPGA);
        set_intercept(svm, INTERCEPT_IOIO_PROT);
@@ -1271,12 +1420,16 @@ static void init_vmcb(struct vcpu_svm *svm)
        set_intercept(svm, INTERCEPT_SKINIT);
        set_intercept(svm, INTERCEPT_WBINVD);
        set_intercept(svm, INTERCEPT_XSETBV);
+       set_intercept(svm, INTERCEPT_RSM);
 
-       if (!kvm_mwait_in_guest()) {
+       if (!kvm_mwait_in_guest(svm->vcpu.kvm)) {
                set_intercept(svm, INTERCEPT_MONITOR);
                set_intercept(svm, INTERCEPT_MWAIT);
        }
 
+       if (!kvm_hlt_in_guest(svm->vcpu.kvm))
+               set_intercept(svm, INTERCEPT_HLT);
+
        control->iopm_base_pa = __sme_set(iopm_base);
        control->msrpm_base_pa = __sme_set(__pa(svm->msrpm));
        control->int_ctl = V_INTR_MASKING_MASK;
@@ -1318,7 +1471,7 @@ static void init_vmcb(struct vcpu_svm *svm)
 
        if (npt_enabled) {
                /* Setup VMCB for Nested Paging */
-               control->nested_ctl = 1;
+               control->nested_ctl |= SVM_NESTED_CTL_NP_ENABLE;
                clr_intercept(svm, INTERCEPT_INVLPG);
                clr_exception_intercept(svm, PF_VECTOR);
                clr_cr_intercept(svm, INTERCEPT_CR3_READ);
@@ -1332,7 +1485,8 @@ static void init_vmcb(struct vcpu_svm *svm)
        svm->nested.vmcb = 0;
        svm->vcpu.arch.hflags = 0;
 
-       if (boot_cpu_has(X86_FEATURE_PAUSEFILTER)) {
+       if (boot_cpu_has(X86_FEATURE_PAUSEFILTER) &&
+           !kvm_pause_in_guest(svm->vcpu.kvm)) {
                control->pause_filter_count = 3000;
                set_intercept(svm, INTERCEPT_PAUSE);
        }
@@ -1356,6 +1510,11 @@ static void init_vmcb(struct vcpu_svm *svm)
                svm->vmcb->control.int_ctl |= V_GIF_ENABLE_MASK;
        }
 
+       if (sev_guest(svm->vcpu.kvm)) {
+               svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ENABLE;
+               clr_exception_intercept(svm, UD_VECTOR);
+       }
+
        mark_all_dirty(svm->vmcb);
 
        enable_gif(svm);
@@ -1366,12 +1525,12 @@ static u64 *avic_get_physical_id_entry(struct kvm_vcpu *vcpu,
                                       unsigned int index)
 {
        u64 *avic_physical_id_table;
-       struct kvm_arch *vm_data = &vcpu->kvm->arch;
+       struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm);
 
        if (index >= AVIC_MAX_PHYSICAL_ID_COUNT)
                return NULL;
 
-       avic_physical_id_table = page_address(vm_data->avic_physical_id_table_page);
+       avic_physical_id_table = page_address(kvm_svm->avic_physical_id_table_page);
 
        return &avic_physical_id_table[index];
 }
@@ -1438,32 +1597,222 @@ static int avic_init_backing_page(struct kvm_vcpu *vcpu)
        return 0;
 }
 
+static void __sev_asid_free(int asid)
+{
+       struct svm_cpu_data *sd;
+       int cpu, pos;
+
+       pos = asid - 1;
+       clear_bit(pos, sev_asid_bitmap);
+
+       for_each_possible_cpu(cpu) {
+               sd = per_cpu(svm_data, cpu);
+               sd->sev_vmcbs[pos] = NULL;
+       }
+}
+
+static void sev_asid_free(struct kvm *kvm)
+{
+       struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+
+       __sev_asid_free(sev->asid);
+}
+
+static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
+{
+       struct sev_data_decommission *decommission;
+       struct sev_data_deactivate *data;
+
+       if (!handle)
+               return;
+
+       data = kzalloc(sizeof(*data), GFP_KERNEL);
+       if (!data)
+               return;
+
+       /* deactivate handle */
+       data->handle = handle;
+       sev_guest_deactivate(data, NULL);
+
+       wbinvd_on_all_cpus();
+       sev_guest_df_flush(NULL);
+       kfree(data);
+
+       decommission = kzalloc(sizeof(*decommission), GFP_KERNEL);
+       if (!decommission)
+               return;
+
+       /* decommission handle */
+       decommission->handle = handle;
+       sev_guest_decommission(decommission, NULL);
+
+       kfree(decommission);
+}
+
+static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
+                                   unsigned long ulen, unsigned long *n,
+                                   int write)
+{
+       struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+       unsigned long npages, npinned, size;
+       unsigned long locked, lock_limit;
+       struct page **pages;
+       int first, last;
+
+       /* Calculate number of pages. */
+       first = (uaddr & PAGE_MASK) >> PAGE_SHIFT;
+       last = ((uaddr + ulen - 1) & PAGE_MASK) >> PAGE_SHIFT;
+       npages = (last - first + 1);
+
+       locked = sev->pages_locked + npages;
+       lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
+       if (locked > lock_limit && !capable(CAP_IPC_LOCK)) {
+               pr_err("SEV: %lu locked pages exceed the lock limit of %lu.\n", locked, lock_limit);
+               return NULL;
+       }
+
+       /* Avoid using vmalloc for smaller buffers. */
+       size = npages * sizeof(struct page *);
+       if (size > PAGE_SIZE)
+               pages = vmalloc(size);
+       else
+               pages = kmalloc(size, GFP_KERNEL);
+
+       if (!pages)
+               return NULL;
+
+       /* Pin the user virtual address. */
+       npinned = get_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages);
+       if (npinned != npages) {
+               pr_err("SEV: Failure locking %lu pages.\n", npages);
+               goto err;
+       }
+
+       *n = npages;
+       sev->pages_locked = locked;
+
+       return pages;
+
+err:
+       if (npinned > 0)
+               release_pages(pages, npinned);
+
+       kvfree(pages);
+       return NULL;
+}
+
+static void sev_unpin_memory(struct kvm *kvm, struct page **pages,
+                            unsigned long npages)
+{
+       struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+
+       release_pages(pages, npages);
+       kvfree(pages);
+       sev->pages_locked -= npages;
+}
+
+static void sev_clflush_pages(struct page *pages[], unsigned long npages)
+{
+       uint8_t *page_virtual;
+       unsigned long i;
+
+       if (npages == 0 || pages == NULL)
+               return;
+
+       for (i = 0; i < npages; i++) {
+               page_virtual = kmap_atomic(pages[i]);
+               clflush_cache_range(page_virtual, PAGE_SIZE);
+               kunmap_atomic(page_virtual);
+       }
+}
+
+static void __unregister_enc_region_locked(struct kvm *kvm,
+                                          struct enc_region *region)
+{
+       /*
+        * The guest may change the memory encryption attribute from C=0 -> C=1
+        * or vice versa for this memory range. Lets make sure caches are
+        * flushed to ensure that guest data gets written into memory with
+        * correct C-bit.
+        */
+       sev_clflush_pages(region->pages, region->npages);
+
+       sev_unpin_memory(kvm, region->pages, region->npages);
+       list_del(&region->list);
+       kfree(region);
+}
+
+static struct kvm *svm_vm_alloc(void)
+{
+       struct kvm_svm *kvm_svm = kzalloc(sizeof(struct kvm_svm), GFP_KERNEL);
+       return &kvm_svm->kvm;
+}
+
+static void svm_vm_free(struct kvm *kvm)
+{
+       kfree(to_kvm_svm(kvm));
+}
+
+static void sev_vm_destroy(struct kvm *kvm)
+{
+       struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+       struct list_head *head = &sev->regions_list;
+       struct list_head *pos, *q;
+
+       if (!sev_guest(kvm))
+               return;
+
+       mutex_lock(&kvm->lock);
+
+       /*
+        * if userspace was terminated before unregistering the memory regions
+        * then lets unpin all the registered memory.
+        */
+       if (!list_empty(head)) {
+               list_for_each_safe(pos, q, head) {
+                       __unregister_enc_region_locked(kvm,
+                               list_entry(pos, struct enc_region, list));
+               }
+       }
+
+       mutex_unlock(&kvm->lock);
+
+       sev_unbind_asid(kvm, sev->handle);
+       sev_asid_free(kvm);
+}
+
 static void avic_vm_destroy(struct kvm *kvm)
 {
        unsigned long flags;
-       struct kvm_arch *vm_data = &kvm->arch;
+       struct kvm_svm *kvm_svm = to_kvm_svm(kvm);
 
        if (!avic)
                return;
 
-       if (vm_data->avic_logical_id_table_page)
-               __free_page(vm_data->avic_logical_id_table_page);
-       if (vm_data->avic_physical_id_table_page)
-               __free_page(vm_data->avic_physical_id_table_page);
+       if (kvm_svm->avic_logical_id_table_page)
+               __free_page(kvm_svm->avic_logical_id_table_page);
+       if (kvm_svm->avic_physical_id_table_page)
+               __free_page(kvm_svm->avic_physical_id_table_page);
 
        spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
-       hash_del(&vm_data->hnode);
+       hash_del(&kvm_svm->hnode);
        spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
 }
 
+static void svm_vm_destroy(struct kvm *kvm)
+{
+       avic_vm_destroy(kvm);
+       sev_vm_destroy(kvm);
+}
+
 static int avic_vm_init(struct kvm *kvm)
 {
        unsigned long flags;
        int err = -ENOMEM;
-       struct kvm_arch *vm_data = &kvm->arch;
+       struct kvm_svm *kvm_svm = to_kvm_svm(kvm);
+       struct kvm_svm *k2;
        struct page *p_page;
        struct page *l_page;
-       struct kvm_arch *ka;
        u32 vm_id;
 
        if (!avic)
@@ -1474,7 +1823,7 @@ static int avic_vm_init(struct kvm *kvm)
        if (!p_page)
                goto free_avic;
 
-       vm_data->avic_physical_id_table_page = p_page;
+       kvm_svm->avic_physical_id_table_page = p_page;
        clear_page(page_address(p_page));
 
        /* Allocating logical APIC ID table (4KB) */
@@ -1482,7 +1831,7 @@ static int avic_vm_init(struct kvm *kvm)
        if (!l_page)
                goto free_avic;
 
-       vm_data->avic_logical_id_table_page = l_page;
+       kvm_svm->avic_logical_id_table_page = l_page;
        clear_page(page_address(l_page));
 
        spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
@@ -1494,15 +1843,13 @@ static int avic_vm_init(struct kvm *kvm)
        }
        /* Is it still in use? Only possible if wrapped at least once */
        if (next_vm_id_wrapped) {
-               hash_for_each_possible(svm_vm_data_hash, ka, hnode, vm_id) {
-                       struct kvm *k2 = container_of(ka, struct kvm, arch);
-                       struct kvm_arch *vd2 = &k2->arch;
-                       if (vd2->avic_vm_id == vm_id)
+               hash_for_each_possible(svm_vm_data_hash, k2, hnode, vm_id) {
+                       if (k2->avic_vm_id == vm_id)
                                goto again;
                }
        }
-       vm_data->avic_vm_id = vm_id;
-       hash_add(svm_vm_data_hash, &vm_data->hnode, vm_data->avic_vm_id);
+       kvm_svm->avic_vm_id = vm_id;
+       hash_add(svm_vm_data_hash, &kvm_svm->hnode, kvm_svm->avic_vm_id);
        spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
 
        return 0;
@@ -1606,6 +1953,7 @@ static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
        u32 dummy;
        u32 eax = 1;
 
+       vcpu->arch.microcode_version = 0x01000065;
        svm->spec_ctrl = 0;
 
        if (!init_event) {
@@ -2066,7 +2414,7 @@ static int svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
                return 1;
 
        if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE))
-               svm_flush_tlb(vcpu);
+               svm_flush_tlb(vcpu, true);
 
        vcpu->arch.cr4 = cr4;
        if (!npt_enabled)
@@ -2125,7 +2473,7 @@ static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
 {
        if (sd->next_asid > sd->max_asid) {
                ++sd->asid_generation;
-               sd->next_asid = 1;
+               sd->next_asid = sd->min_asid;
                svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
        }
 
@@ -2173,22 +2521,24 @@ static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
 
 static int pf_interception(struct vcpu_svm *svm)
 {
-       u64 fault_address = svm->vmcb->control.exit_info_2;
+       u64 fault_address = __sme_clr(svm->vmcb->control.exit_info_2);
        u64 error_code = svm->vmcb->control.exit_info_1;
 
        return kvm_handle_page_fault(&svm->vcpu, error_code, fault_address,
-                       svm->vmcb->control.insn_bytes,
+                       static_cpu_has(X86_FEATURE_DECODEASSISTS) ?
+                       svm->vmcb->control.insn_bytes : NULL,
                        svm->vmcb->control.insn_len);
 }
 
 static int npf_interception(struct vcpu_svm *svm)
 {
-       u64 fault_address = svm->vmcb->control.exit_info_2;
+       u64 fault_address = __sme_clr(svm->vmcb->control.exit_info_2);
        u64 error_code = svm->vmcb->control.exit_info_1;
 
        trace_kvm_page_fault(fault_address, error_code);
        return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code,
-                       svm->vmcb->control.insn_bytes,
+                       static_cpu_has(X86_FEATURE_DECODEASSISTS) ?
+                       svm->vmcb->control.insn_bytes : NULL,
                        svm->vmcb->control.insn_len);
 }
 
@@ -2247,6 +2597,23 @@ static int ac_interception(struct vcpu_svm *svm)
        return 1;
 }
 
+static int gp_interception(struct vcpu_svm *svm)
+{
+       struct kvm_vcpu *vcpu = &svm->vcpu;
+       u32 error_code = svm->vmcb->control.exit_info_1;
+       int er;
+
+       WARN_ON_ONCE(!enable_vmware_backdoor);
+
+       er = emulate_instruction(vcpu,
+               EMULTYPE_VMWARE | EMULTYPE_NO_UD_ON_FAIL);
+       if (er == EMULATE_USER_EXIT)
+               return 0;
+       else if (er != EMULATE_DONE)
+               kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
+       return 1;
+}
+
 static bool is_erratum_383(void)
 {
        int err, i;
@@ -2335,7 +2702,7 @@ static int io_interception(struct vcpu_svm *svm)
 {
        struct kvm_vcpu *vcpu = &svm->vcpu;
        u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */
-       int size, in, string, ret;
+       int size, in, string;
        unsigned port;
 
        ++svm->vcpu.stat.io_exits;
@@ -2347,16 +2714,8 @@ static int io_interception(struct vcpu_svm *svm)
        port = io_info >> 16;
        size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
        svm->next_rip = svm->vmcb->control.exit_info_2;
-       ret = kvm_skip_emulated_instruction(&svm->vcpu);
 
-       /*
-        * TODO: we might be squashing a KVM_GUESTDBG_SINGLESTEP-triggered
-        * KVM_EXIT_DEBUG here.
-        */
-       if (in)
-               return kvm_fast_pio_in(vcpu, size, port) && ret;
-       else
-               return kvm_fast_pio_out(vcpu, size, port) && ret;
+       return kvm_fast_pio(&svm->vcpu, size, port, in);
 }
 
 static int nmi_interception(struct vcpu_svm *svm)
@@ -2415,7 +2774,7 @@ static void nested_svm_set_tdp_cr3(struct kvm_vcpu *vcpu,
 
        svm->vmcb->control.nested_cr3 = __sme_set(root);
        mark_dirty(svm->vmcb, VMCB_NPT);
-       svm_flush_tlb(vcpu);
+       svm_flush_tlb(vcpu, true);
 }
 
 static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
@@ -2957,7 +3316,8 @@ static bool nested_vmcb_checks(struct vmcb *vmcb)
        if (vmcb->control.asid == 0)
                return false;
 
-       if (vmcb->control.nested_ctl && !npt_enabled)
+       if ((vmcb->control.nested_ctl & SVM_NESTED_CTL_NP_ENABLE) &&
+           !npt_enabled)
                return false;
 
        return true;
@@ -2971,7 +3331,7 @@ static void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
        else
                svm->vcpu.arch.hflags &= ~HF_HIF_MASK;
 
-       if (nested_vmcb->control.nested_ctl) {
+       if (nested_vmcb->control.nested_ctl & SVM_NESTED_CTL_NP_ENABLE) {
                kvm_mmu_unload(&svm->vcpu);
                svm->nested.nested_cr3 = nested_vmcb->control.nested_cr3;
                nested_svm_init_mmu_context(&svm->vcpu);
@@ -3019,7 +3379,7 @@ static void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
        svm->nested.intercept_exceptions = nested_vmcb->control.intercept_exceptions;
        svm->nested.intercept            = nested_vmcb->control.intercept;
 
-       svm_flush_tlb(&svm->vcpu);
+       svm_flush_tlb(&svm->vcpu, true);
        svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK;
        if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK)
                svm->vcpu.arch.hflags |= HF_VINTR_MASK;
@@ -3400,6 +3760,12 @@ static int emulate_on_interception(struct vcpu_svm *svm)
        return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
 }
 
+static int rsm_interception(struct vcpu_svm *svm)
+{
+       return x86_emulate_instruction(&svm->vcpu, 0, 0,
+                                      rsm_ins_bytes, 2) == EMULATE_DONE;
+}
+
 static int rdpmc_interception(struct vcpu_svm *svm)
 {
        int err;
@@ -3561,6 +3927,22 @@ static int cr8_write_interception(struct vcpu_svm *svm)
        return 0;
 }
 
+static int svm_get_msr_feature(struct kvm_msr_entry *msr)
+{
+       msr->data = 0;
+
+       switch (msr->index) {
+       case MSR_F10H_DECFG:
+               if (boot_cpu_has(X86_FEATURE_LFENCE_RDTSC))
+                       msr->data |= MSR_F10H_DECFG_LFENCE_SERIALIZE;
+               break;
+       default:
+               return 1;
+       }
+
+       return 0;
+}
+
 static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
@@ -3636,9 +4018,6 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 
                msr_info->data = svm->spec_ctrl;
                break;
-       case MSR_IA32_UCODE_REV:
-               msr_info->data = 0x01000065;
-               break;
        case MSR_F15H_IC_CFG: {
 
                int family, model;
@@ -3656,6 +4035,9 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                        msr_info->data = 0x1E;
                }
                break;
+       case MSR_F10H_DECFG:
+               msr_info->data = svm->msr_decfg;
+               break;
        default:
                return kvm_get_msr_common(vcpu, msr_info);
        }
@@ -3834,6 +4216,24 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
        case MSR_VM_IGNNE:
                vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
                break;
+       case MSR_F10H_DECFG: {
+               struct kvm_msr_entry msr_entry;
+
+               msr_entry.index = msr->index;
+               if (svm_get_msr_feature(&msr_entry))
+                       return 1;
+
+               /* Check the supported bits */
+               if (data & ~msr_entry.data)
+                       return 1;
+
+               /* Don't allow the guest to change a bit, #GP */
+               if (!msr->host_initiated && (data ^ msr_entry.data))
+                       return 1;
+
+               svm->msr_decfg = data;
+               break;
+       }
        case MSR_IA32_APICBASE:
                if (kvm_vcpu_apicv_active(vcpu))
                        avic_update_vapic_bar(to_svm(vcpu), data);
@@ -3978,7 +4378,7 @@ static int avic_incomplete_ipi_interception(struct vcpu_svm *svm)
 
 static u32 *avic_get_logical_id_entry(struct kvm_vcpu *vcpu, u32 ldr, bool flat)
 {
-       struct kvm_arch *vm_data = &vcpu->kvm->arch;
+       struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm);
        int index;
        u32 *logical_apic_id_table;
        int dlid = GET_APIC_LOGICAL_ID(ldr);
@@ -4000,7 +4400,7 @@ static u32 *avic_get_logical_id_entry(struct kvm_vcpu *vcpu, u32 ldr, bool flat)
                index = (cluster << 2) + apic;
        }
 
-       logical_apic_id_table = (u32 *) page_address(vm_data->avic_logical_id_table_page);
+       logical_apic_id_table = (u32 *) page_address(kvm_svm->avic_logical_id_table_page);
 
        return &logical_apic_id_table[index];
 }
@@ -4080,7 +4480,7 @@ static int avic_handle_apic_id_update(struct kvm_vcpu *vcpu)
 static int avic_handle_dfr_update(struct kvm_vcpu *vcpu)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
-       struct kvm_arch *vm_data = &vcpu->kvm->arch;
+       struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm);
        u32 dfr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_DFR);
        u32 mod = (dfr >> 28) & 0xf;
 
@@ -4089,11 +4489,11 @@ static int avic_handle_dfr_update(struct kvm_vcpu *vcpu)
         * If this changes, we need to flush the AVIC logical
         * APID id table.
         */
-       if (vm_data->ldr_mode == mod)
+       if (kvm_svm->ldr_mode == mod)
                return 0;
 
-       clear_page(page_address(vm_data->avic_logical_id_table_page));
-       vm_data->ldr_mode = mod;
+       clear_page(page_address(kvm_svm->avic_logical_id_table_page));
+       kvm_svm->ldr_mode = mod;
 
        if (svm->ldr_reg)
                avic_handle_ldr_update(vcpu);
@@ -4213,6 +4613,7 @@ static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = {
        [SVM_EXIT_EXCP_BASE + PF_VECTOR]        = pf_interception,
        [SVM_EXIT_EXCP_BASE + MC_VECTOR]        = mc_interception,
        [SVM_EXIT_EXCP_BASE + AC_VECTOR]        = ac_interception,
+       [SVM_EXIT_EXCP_BASE + GP_VECTOR]        = gp_interception,
        [SVM_EXIT_INTR]                         = intr_interception,
        [SVM_EXIT_NMI]                          = nmi_interception,
        [SVM_EXIT_SMI]                          = nop_on_interception,
@@ -4242,7 +4643,7 @@ static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = {
        [SVM_EXIT_MWAIT]                        = mwait_interception,
        [SVM_EXIT_XSETBV]                       = xsetbv_interception,
        [SVM_EXIT_NPF]                          = npf_interception,
-       [SVM_EXIT_RSM]                          = emulate_on_interception,
+       [SVM_EXIT_RSM]                          = rsm_interception,
        [SVM_EXIT_AVIC_INCOMPLETE_IPI]          = avic_incomplete_ipi_interception,
        [SVM_EXIT_AVIC_UNACCELERATED_ACCESS]    = avic_unaccelerated_access_interception,
 };
@@ -4442,12 +4843,39 @@ static void reload_tss(struct kvm_vcpu *vcpu)
        load_TR_desc();
 }
 
+static void pre_sev_run(struct vcpu_svm *svm, int cpu)
+{
+       struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
+       int asid = sev_get_asid(svm->vcpu.kvm);
+
+       /* Assign the asid allocated with this SEV guest */
+       svm->vmcb->control.asid = asid;
+
+       /*
+        * Flush guest TLB:
+        *
+        * 1) when different VMCB for the same ASID is to be run on the same host CPU.
+        * 2) or this VMCB was executed on different host CPU in previous VMRUNs.
+        */
+       if (sd->sev_vmcbs[asid] == svm->vmcb &&
+           svm->last_cpu == cpu)
+               return;
+
+       svm->last_cpu = cpu;
+       sd->sev_vmcbs[asid] = svm->vmcb;
+       svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
+       mark_dirty(svm->vmcb, VMCB_ASID);
+}
+
 static void pre_svm_run(struct vcpu_svm *svm)
 {
        int cpu = raw_smp_processor_id();
 
        struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
 
+       if (sev_guest(svm->vcpu.kvm))
+               return pre_sev_run(svm, cpu);
+
        /* FIXME: handle wraparound of asid_generation */
        if (svm->asid_generation != sd->asid_generation)
                new_asid(svm, sd);
@@ -4701,7 +5129,7 @@ static int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
                        /* Try to enable guest_mode in IRTE */
                        pi.base = __sme_set(page_to_phys(svm->avic_backing_page) &
                                            AVIC_HPA_MASK);
-                       pi.ga_tag = AVIC_GATAG(kvm->arch.avic_vm_id,
+                       pi.ga_tag = AVIC_GATAG(to_kvm_svm(kvm)->avic_vm_id,
                                                     svm->vcpu.vcpu_id);
                        pi.is_guest_mode = true;
                        pi.vcpu_data = &vcpu_info;
@@ -4865,7 +5293,12 @@ static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr)
        return 0;
 }
 
-static void svm_flush_tlb(struct kvm_vcpu *vcpu)
+static int svm_set_identity_map_addr(struct kvm *kvm, u64 ident_addr)
+{
+       return 0;
+}
+
+static void svm_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
 
@@ -5029,7 +5462,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
         * being speculatively taken.
         */
        if (svm->spec_ctrl)
-               wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
+               native_wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
 
        asm volatile (
                "push %%" _ASM_BP "; \n\t"
@@ -5138,11 +5571,11 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
         * If the L02 MSR bitmap does not intercept the MSR, then we need to
         * save it.
         */
-       if (!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))
-               rdmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
+       if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
+               svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
 
        if (svm->spec_ctrl)
-               wrmsrl(MSR_IA32_SPEC_CTRL, 0);
+               native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
 
        /* Eliminate branch target predictions from guest mode */
        vmexit_fill_RSB();
@@ -5208,7 +5641,7 @@ static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
 
        svm->vmcb->save.cr3 = __sme_set(root);
        mark_dirty(svm->vmcb, VMCB_CR);
-       svm_flush_tlb(vcpu);
+       svm_flush_tlb(vcpu, true);
 }
 
 static void set_tdp_cr3(struct kvm_vcpu *vcpu, unsigned long root)
@@ -5222,7 +5655,7 @@ static void set_tdp_cr3(struct kvm_vcpu *vcpu, unsigned long root)
        svm->vmcb->save.cr3 = kvm_read_cr3(vcpu);
        mark_dirty(svm->vmcb, VMCB_CR);
 
-       svm_flush_tlb(vcpu);
+       svm_flush_tlb(vcpu, true);
 }
 
 static int is_disabled(void)
@@ -5308,6 +5741,12 @@ static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
                        entry->edx |= SVM_FEATURE_NPT;
 
                break;
+       case 0x8000001F:
+               /* Support memory encryption cpuid if host supports it */
+               if (boot_cpu_has(X86_FEATURE_SEV))
+                       cpuid(0x8000001f, &entry->eax, &entry->ebx,
+                               &entry->ecx, &entry->edx);
+
        }
 }
 
@@ -5336,6 +5775,11 @@ static bool svm_xsaves_supported(void)
        return false;
 }
 
+static bool svm_umip_emulated(void)
+{
+       return false;
+}
+
 static bool svm_has_wbinvd_exit(void)
 {
        return true;
@@ -5637,56 +6081,883 @@ static int enable_smi_window(struct kvm_vcpu *vcpu)
        return 0;
 }
 
-static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
-       .cpu_has_kvm_support = has_svm,
-       .disabled_by_bios = is_disabled,
-       .hardware_setup = svm_hardware_setup,
-       .hardware_unsetup = svm_hardware_unsetup,
-       .check_processor_compatibility = svm_check_processor_compat,
-       .hardware_enable = svm_hardware_enable,
-       .hardware_disable = svm_hardware_disable,
-       .cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr,
-       .cpu_has_high_real_mode_segbase = svm_has_high_real_mode_segbase,
+static int sev_asid_new(void)
+{
+       int pos;
 
-       .vcpu_create = svm_create_vcpu,
-       .vcpu_free = svm_free_vcpu,
-       .vcpu_reset = svm_vcpu_reset,
+       /*
+        * SEV-enabled guest must use asid from min_sev_asid to max_sev_asid.
+        */
+       pos = find_next_zero_bit(sev_asid_bitmap, max_sev_asid, min_sev_asid - 1);
+       if (pos >= max_sev_asid)
+               return -EBUSY;
 
-       .vm_init = avic_vm_init,
-       .vm_destroy = avic_vm_destroy,
+       set_bit(pos, sev_asid_bitmap);
+       return pos + 1;
+}
 
-       .prepare_guest_switch = svm_prepare_guest_switch,
-       .vcpu_load = svm_vcpu_load,
-       .vcpu_put = svm_vcpu_put,
-       .vcpu_blocking = svm_vcpu_blocking,
-       .vcpu_unblocking = svm_vcpu_unblocking,
+static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
+{
+       struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+       int asid, ret;
 
-       .update_bp_intercept = update_bp_intercept,
-       .get_msr = svm_get_msr,
-       .set_msr = svm_set_msr,
-       .get_segment_base = svm_get_segment_base,
-       .get_segment = svm_get_segment,
-       .set_segment = svm_set_segment,
-       .get_cpl = svm_get_cpl,
-       .get_cs_db_l_bits = kvm_get_cs_db_l_bits,
-       .decache_cr0_guest_bits = svm_decache_cr0_guest_bits,
-       .decache_cr3 = svm_decache_cr3,
-       .decache_cr4_guest_bits = svm_decache_cr4_guest_bits,
-       .set_cr0 = svm_set_cr0,
-       .set_cr3 = svm_set_cr3,
-       .set_cr4 = svm_set_cr4,
-       .set_efer = svm_set_efer,
-       .get_idt = svm_get_idt,
-       .set_idt = svm_set_idt,
-       .get_gdt = svm_get_gdt,
-       .set_gdt = svm_set_gdt,
-       .get_dr6 = svm_get_dr6,
-       .set_dr6 = svm_set_dr6,
-       .set_dr7 = svm_set_dr7,
-       .sync_dirty_debug_regs = svm_sync_dirty_debug_regs,
-       .cache_reg = svm_cache_reg,
-       .get_rflags = svm_get_rflags,
-       .set_rflags = svm_set_rflags,
+       ret = -EBUSY;
+       asid = sev_asid_new();
+       if (asid < 0)
+               return ret;
+
+       ret = sev_platform_init(&argp->error);
+       if (ret)
+               goto e_free;
+
+       sev->active = true;
+       sev->asid = asid;
+       INIT_LIST_HEAD(&sev->regions_list);
+
+       return 0;
+
+e_free:
+       __sev_asid_free(asid);
+       return ret;
+}
+
+static int sev_bind_asid(struct kvm *kvm, unsigned int handle, int *error)
+{
+       struct sev_data_activate *data;
+       int asid = sev_get_asid(kvm);
+       int ret;
+
+       wbinvd_on_all_cpus();
+
+       ret = sev_guest_df_flush(error);
+       if (ret)
+               return ret;
+
+       data = kzalloc(sizeof(*data), GFP_KERNEL);
+       if (!data)
+               return -ENOMEM;
+
+       /* activate ASID on the given handle */
+       data->handle = handle;
+       data->asid   = asid;
+       ret = sev_guest_activate(data, error);
+       kfree(data);
+
+       return ret;
+}
+
+static int __sev_issue_cmd(int fd, int id, void *data, int *error)
+{
+       struct fd f;
+       int ret;
+
+       f = fdget(fd);
+       if (!f.file)
+               return -EBADF;
+
+       ret = sev_issue_cmd_external_user(f.file, id, data, error);
+
+       fdput(f);
+       return ret;
+}
+
+static int sev_issue_cmd(struct kvm *kvm, int id, void *data, int *error)
+{
+       struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+
+       return __sev_issue_cmd(sev->fd, id, data, error);
+}
+
+static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
+{
+       struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+       struct sev_data_launch_start *start;
+       struct kvm_sev_launch_start params;
+       void *dh_blob, *session_blob;
+       int *error = &argp->error;
+       int ret;
+
+       if (!sev_guest(kvm))
+               return -ENOTTY;
+
+       if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
+               return -EFAULT;
+
+       start = kzalloc(sizeof(*start), GFP_KERNEL);
+       if (!start)
+               return -ENOMEM;
+
+       dh_blob = NULL;
+       if (params.dh_uaddr) {
+               dh_blob = psp_copy_user_blob(params.dh_uaddr, params.dh_len);
+               if (IS_ERR(dh_blob)) {
+                       ret = PTR_ERR(dh_blob);
+                       goto e_free;
+               }
+
+               start->dh_cert_address = __sme_set(__pa(dh_blob));
+               start->dh_cert_len = params.dh_len;
+       }
+
+       session_blob = NULL;
+       if (params.session_uaddr) {
+               session_blob = psp_copy_user_blob(params.session_uaddr, params.session_len);
+               if (IS_ERR(session_blob)) {
+                       ret = PTR_ERR(session_blob);
+                       goto e_free_dh;
+               }
+
+               start->session_address = __sme_set(__pa(session_blob));
+               start->session_len = params.session_len;
+       }
+
+       start->handle = params.handle;
+       start->policy = params.policy;
+
+       /* create memory encryption context */
+       ret = __sev_issue_cmd(argp->sev_fd, SEV_CMD_LAUNCH_START, start, error);
+       if (ret)
+               goto e_free_session;
+
+       /* Bind ASID to this guest */
+       ret = sev_bind_asid(kvm, start->handle, error);
+       if (ret)
+               goto e_free_session;
+
+       /* return handle to userspace */
+       params.handle = start->handle;
+       if (copy_to_user((void __user *)(uintptr_t)argp->data, &params, sizeof(params))) {
+               sev_unbind_asid(kvm, start->handle);
+               ret = -EFAULT;
+               goto e_free_session;
+       }
+
+       sev->handle = start->handle;
+       sev->fd = argp->sev_fd;
+
+e_free_session:
+       kfree(session_blob);
+e_free_dh:
+       kfree(dh_blob);
+e_free:
+       kfree(start);
+       return ret;
+}
+
+static int get_num_contig_pages(int idx, struct page **inpages,
+                               unsigned long npages)
+{
+       unsigned long paddr, next_paddr;
+       int i = idx + 1, pages = 1;
+
+       /* find the number of contiguous pages starting from idx */
+       paddr = __sme_page_pa(inpages[idx]);
+       while (i < npages) {
+               next_paddr = __sme_page_pa(inpages[i++]);
+               if ((paddr + PAGE_SIZE) == next_paddr) {
+                       pages++;
+                       paddr = next_paddr;
+                       continue;
+               }
+               break;
+       }
+
+       return pages;
+}
+
+static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
+{
+       unsigned long vaddr, vaddr_end, next_vaddr, npages, size;
+       struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+       struct kvm_sev_launch_update_data params;
+       struct sev_data_launch_update_data *data;
+       struct page **inpages;
+       int i, ret, pages;
+
+       if (!sev_guest(kvm))
+               return -ENOTTY;
+
+       if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
+               return -EFAULT;
+
+       data = kzalloc(sizeof(*data), GFP_KERNEL);
+       if (!data)
+               return -ENOMEM;
+
+       vaddr = params.uaddr;
+       size = params.len;
+       vaddr_end = vaddr + size;
+
+       /* Lock the user memory. */
+       inpages = sev_pin_memory(kvm, vaddr, size, &npages, 1);
+       if (!inpages) {
+               ret = -ENOMEM;
+               goto e_free;
+       }
+
+       /*
+        * The LAUNCH_UPDATE command will perform in-place encryption of the
+        * memory content (i.e it will write the same memory region with C=1).
+        * It's possible that the cache may contain the data with C=0, i.e.,
+        * unencrypted so invalidate it first.
+        */
+       sev_clflush_pages(inpages, npages);
+
+       for (i = 0; vaddr < vaddr_end; vaddr = next_vaddr, i += pages) {
+               int offset, len;
+
+               /*
+                * If the user buffer is not page-aligned, calculate the offset
+                * within the page.
+                */
+               offset = vaddr & (PAGE_SIZE - 1);
+
+               /* Calculate the number of pages that can be encrypted in one go. */
+               pages = get_num_contig_pages(i, inpages, npages);
+
+               len = min_t(size_t, ((pages * PAGE_SIZE) - offset), size);
+
+               data->handle = sev->handle;
+               data->len = len;
+               data->address = __sme_page_pa(inpages[i]) + offset;
+               ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_DATA, data, &argp->error);
+               if (ret)
+                       goto e_unpin;
+
+               size -= len;
+               next_vaddr = vaddr + len;
+       }
+
+e_unpin:
+       /* content of memory is updated, mark pages dirty */
+       for (i = 0; i < npages; i++) {
+               set_page_dirty_lock(inpages[i]);
+               mark_page_accessed(inpages[i]);
+       }
+       /* unlock the user pages */
+       sev_unpin_memory(kvm, inpages, npages);
+e_free:
+       kfree(data);
+       return ret;
+}
+
+static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp)
+{
+       void __user *measure = (void __user *)(uintptr_t)argp->data;
+       struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+       struct sev_data_launch_measure *data;
+       struct kvm_sev_launch_measure params;
+       void __user *p = NULL;
+       void *blob = NULL;
+       int ret;
+
+       if (!sev_guest(kvm))
+               return -ENOTTY;
+
+       if (copy_from_user(&params, measure, sizeof(params)))
+               return -EFAULT;
+
+       data = kzalloc(sizeof(*data), GFP_KERNEL);
+       if (!data)
+               return -ENOMEM;
+
+       /* User wants to query the blob length */
+       if (!params.len)
+               goto cmd;
+
+       p = (void __user *)(uintptr_t)params.uaddr;
+       if (p) {
+               if (params.len > SEV_FW_BLOB_MAX_SIZE) {
+                       ret = -EINVAL;
+                       goto e_free;
+               }
+
+               ret = -ENOMEM;
+               blob = kmalloc(params.len, GFP_KERNEL);
+               if (!blob)
+                       goto e_free;
+
+               data->address = __psp_pa(blob);
+               data->len = params.len;
+       }
+
+cmd:
+       data->handle = sev->handle;
+       ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_MEASURE, data, &argp->error);
+
+       /*
+        * If we query the session length, FW responded with expected data.
+        */
+       if (!params.len)
+               goto done;
+
+       if (ret)
+               goto e_free_blob;
+
+       if (blob) {
+               if (copy_to_user(p, blob, params.len))
+                       ret = -EFAULT;
+       }
+
+done:
+       params.len = data->len;
+       if (copy_to_user(measure, &params, sizeof(params)))
+               ret = -EFAULT;
+e_free_blob:
+       kfree(blob);
+e_free:
+       kfree(data);
+       return ret;
+}
+
+static int sev_launch_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
+{
+       struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+       struct sev_data_launch_finish *data;
+       int ret;
+
+       if (!sev_guest(kvm))
+               return -ENOTTY;
+
+       data = kzalloc(sizeof(*data), GFP_KERNEL);
+       if (!data)
+               return -ENOMEM;
+
+       data->handle = sev->handle;
+       ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_FINISH, data, &argp->error);
+
+       kfree(data);
+       return ret;
+}
+
+static int sev_guest_status(struct kvm *kvm, struct kvm_sev_cmd *argp)
+{
+       struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+       struct kvm_sev_guest_status params;
+       struct sev_data_guest_status *data;
+       int ret;
+
+       if (!sev_guest(kvm))
+               return -ENOTTY;
+
+       data = kzalloc(sizeof(*data), GFP_KERNEL);
+       if (!data)
+               return -ENOMEM;
+
+       data->handle = sev->handle;
+       ret = sev_issue_cmd(kvm, SEV_CMD_GUEST_STATUS, data, &argp->error);
+       if (ret)
+               goto e_free;
+
+       params.policy = data->policy;
+       params.state = data->state;
+       params.handle = data->handle;
+
+       if (copy_to_user((void __user *)(uintptr_t)argp->data, &params, sizeof(params)))
+               ret = -EFAULT;
+e_free:
+       kfree(data);
+       return ret;
+}
+
+static int __sev_issue_dbg_cmd(struct kvm *kvm, unsigned long src,
+                              unsigned long dst, int size,
+                              int *error, bool enc)
+{
+       struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+       struct sev_data_dbg *data;
+       int ret;
+
+       data = kzalloc(sizeof(*data), GFP_KERNEL);
+       if (!data)
+               return -ENOMEM;
+
+       data->handle = sev->handle;
+       data->dst_addr = dst;
+       data->src_addr = src;
+       data->len = size;
+
+       ret = sev_issue_cmd(kvm,
+                           enc ? SEV_CMD_DBG_ENCRYPT : SEV_CMD_DBG_DECRYPT,
+                           data, error);
+       kfree(data);
+       return ret;
+}
+
+static int __sev_dbg_decrypt(struct kvm *kvm, unsigned long src_paddr,
+                            unsigned long dst_paddr, int sz, int *err)
+{
+       int offset;
+
+       /*
+        * Its safe to read more than we are asked, caller should ensure that
+        * destination has enough space.
+        */
+       src_paddr = round_down(src_paddr, 16);
+       offset = src_paddr & 15;
+       sz = round_up(sz + offset, 16);
+
+       return __sev_issue_dbg_cmd(kvm, src_paddr, dst_paddr, sz, err, false);
+}
+
+static int __sev_dbg_decrypt_user(struct kvm *kvm, unsigned long paddr,
+                                 unsigned long __user dst_uaddr,
+                                 unsigned long dst_paddr,
+                                 int size, int *err)
+{
+       struct page *tpage = NULL;
+       int ret, offset;
+
+       /* if inputs are not 16-byte then use intermediate buffer */
+       if (!IS_ALIGNED(dst_paddr, 16) ||
+           !IS_ALIGNED(paddr,     16) ||
+           !IS_ALIGNED(size,      16)) {
+               tpage = (void *)alloc_page(GFP_KERNEL);
+               if (!tpage)
+                       return -ENOMEM;
+
+               dst_paddr = __sme_page_pa(tpage);
+       }
+
+       ret = __sev_dbg_decrypt(kvm, paddr, dst_paddr, size, err);
+       if (ret)
+               goto e_free;
+
+       if (tpage) {
+               offset = paddr & 15;
+               if (copy_to_user((void __user *)(uintptr_t)dst_uaddr,
+                                page_address(tpage) + offset, size))
+                       ret = -EFAULT;
+       }
+
+e_free:
+       if (tpage)
+               __free_page(tpage);
+
+       return ret;
+}
+
+static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr,
+                                 unsigned long __user vaddr,
+                                 unsigned long dst_paddr,
+                                 unsigned long __user dst_vaddr,
+                                 int size, int *error)
+{
+       struct page *src_tpage = NULL;
+       struct page *dst_tpage = NULL;
+       int ret, len = size;
+
+       /* If source buffer is not aligned then use an intermediate buffer */
+       if (!IS_ALIGNED(vaddr, 16)) {
+               src_tpage = alloc_page(GFP_KERNEL);
+               if (!src_tpage)
+                       return -ENOMEM;
+
+               if (copy_from_user(page_address(src_tpage),
+                               (void __user *)(uintptr_t)vaddr, size)) {
+                       __free_page(src_tpage);
+                       return -EFAULT;
+               }
+
+               paddr = __sme_page_pa(src_tpage);
+       }
+
+       /*
+        *  If destination buffer or length is not aligned then do read-modify-write:
+        *   - decrypt destination in an intermediate buffer
+        *   - copy the source buffer in an intermediate buffer
+        *   - use the intermediate buffer as source buffer
+        */
+       if (!IS_ALIGNED(dst_vaddr, 16) || !IS_ALIGNED(size, 16)) {
+               int dst_offset;
+
+               dst_tpage = alloc_page(GFP_KERNEL);
+               if (!dst_tpage) {
+                       ret = -ENOMEM;
+                       goto e_free;
+               }
+
+               ret = __sev_dbg_decrypt(kvm, dst_paddr,
+                                       __sme_page_pa(dst_tpage), size, error);
+               if (ret)
+                       goto e_free;
+
+               /*
+                *  If source is kernel buffer then use memcpy() otherwise
+                *  copy_from_user().
+                */
+               dst_offset = dst_paddr & 15;
+
+               if (src_tpage)
+                       memcpy(page_address(dst_tpage) + dst_offset,
+                              page_address(src_tpage), size);
+               else {
+                       if (copy_from_user(page_address(dst_tpage) + dst_offset,
+                                          (void __user *)(uintptr_t)vaddr, size)) {
+                               ret = -EFAULT;
+                               goto e_free;
+                       }
+               }
+
+               paddr = __sme_page_pa(dst_tpage);
+               dst_paddr = round_down(dst_paddr, 16);
+               len = round_up(size, 16);
+       }
+
+       ret = __sev_issue_dbg_cmd(kvm, paddr, dst_paddr, len, error, true);
+
+e_free:
+       if (src_tpage)
+               __free_page(src_tpage);
+       if (dst_tpage)
+               __free_page(dst_tpage);
+       return ret;
+}
+
+static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
+{
+       unsigned long vaddr, vaddr_end, next_vaddr;
+       unsigned long dst_vaddr, dst_vaddr_end;
+       struct page **src_p, **dst_p;
+       struct kvm_sev_dbg debug;
+       unsigned long n;
+       int ret, size;
+
+       if (!sev_guest(kvm))
+               return -ENOTTY;
+
+       if (copy_from_user(&debug, (void __user *)(uintptr_t)argp->data, sizeof(debug)))
+               return -EFAULT;
+
+       vaddr = debug.src_uaddr;
+       size = debug.len;
+       vaddr_end = vaddr + size;
+       dst_vaddr = debug.dst_uaddr;
+       dst_vaddr_end = dst_vaddr + size;
+
+       for (; vaddr < vaddr_end; vaddr = next_vaddr) {
+               int len, s_off, d_off;
+
+               /* lock userspace source and destination page */
+               src_p = sev_pin_memory(kvm, vaddr & PAGE_MASK, PAGE_SIZE, &n, 0);
+               if (!src_p)
+                       return -EFAULT;
+
+               dst_p = sev_pin_memory(kvm, dst_vaddr & PAGE_MASK, PAGE_SIZE, &n, 1);
+               if (!dst_p) {
+                       sev_unpin_memory(kvm, src_p, n);
+                       return -EFAULT;
+               }
+
+               /*
+                * The DBG_{DE,EN}CRYPT commands will perform {dec,en}cryption of the
+                * memory content (i.e it will write the same memory region with C=1).
+                * It's possible that the cache may contain the data with C=0, i.e.,
+                * unencrypted so invalidate it first.
+                */
+               sev_clflush_pages(src_p, 1);
+               sev_clflush_pages(dst_p, 1);
+
+               /*
+                * Since user buffer may not be page aligned, calculate the
+                * offset within the page.
+                */
+               s_off = vaddr & ~PAGE_MASK;
+               d_off = dst_vaddr & ~PAGE_MASK;
+               len = min_t(size_t, (PAGE_SIZE - s_off), size);
+
+               if (dec)
+                       ret = __sev_dbg_decrypt_user(kvm,
+                                                    __sme_page_pa(src_p[0]) + s_off,
+                                                    dst_vaddr,
+                                                    __sme_page_pa(dst_p[0]) + d_off,
+                                                    len, &argp->error);
+               else
+                       ret = __sev_dbg_encrypt_user(kvm,
+                                                    __sme_page_pa(src_p[0]) + s_off,
+                                                    vaddr,
+                                                    __sme_page_pa(dst_p[0]) + d_off,
+                                                    dst_vaddr,
+                                                    len, &argp->error);
+
+               sev_unpin_memory(kvm, src_p, 1);
+               sev_unpin_memory(kvm, dst_p, 1);
+
+               if (ret)
+                       goto err;
+
+               next_vaddr = vaddr + len;
+               dst_vaddr = dst_vaddr + len;
+               size -= len;
+       }
+err:
+       return ret;
+}
+
+static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp)
+{
+       struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+       struct sev_data_launch_secret *data;
+       struct kvm_sev_launch_secret params;
+       struct page **pages;
+       void *blob, *hdr;
+       unsigned long n;
+       int ret, offset;
+
+       if (!sev_guest(kvm))
+               return -ENOTTY;
+
+       if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
+               return -EFAULT;
+
+       pages = sev_pin_memory(kvm, params.guest_uaddr, params.guest_len, &n, 1);
+       if (!pages)
+               return -ENOMEM;
+
+       /*
+        * The secret must be copied into contiguous memory region, lets verify
+        * that userspace memory pages are contiguous before we issue command.
+        */
+       if (get_num_contig_pages(0, pages, n) != n) {
+               ret = -EINVAL;
+               goto e_unpin_memory;
+       }
+
+       ret = -ENOMEM;
+       data = kzalloc(sizeof(*data), GFP_KERNEL);
+       if (!data)
+               goto e_unpin_memory;
+
+       offset = params.guest_uaddr & (PAGE_SIZE - 1);
+       data->guest_address = __sme_page_pa(pages[0]) + offset;
+       data->guest_len = params.guest_len;
+
+       blob = psp_copy_user_blob(params.trans_uaddr, params.trans_len);
+       if (IS_ERR(blob)) {
+               ret = PTR_ERR(blob);
+               goto e_free;
+       }
+
+       data->trans_address = __psp_pa(blob);
+       data->trans_len = params.trans_len;
+
+       hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len);
+       if (IS_ERR(hdr)) {
+               ret = PTR_ERR(hdr);
+               goto e_free_blob;
+       }
+       data->hdr_address = __psp_pa(hdr);
+       data->hdr_len = params.hdr_len;
+
+       data->handle = sev->handle;
+       ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_SECRET, data, &argp->error);
+
+       kfree(hdr);
+
+e_free_blob:
+       kfree(blob);
+e_free:
+       kfree(data);
+e_unpin_memory:
+       sev_unpin_memory(kvm, pages, n);
+       return ret;
+}
+
+static int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
+{
+       struct kvm_sev_cmd sev_cmd;
+       int r;
+
+       if (!svm_sev_enabled())
+               return -ENOTTY;
+
+       if (copy_from_user(&sev_cmd, argp, sizeof(struct kvm_sev_cmd)))
+               return -EFAULT;
+
+       mutex_lock(&kvm->lock);
+
+       switch (sev_cmd.id) {
+       case KVM_SEV_INIT:
+               r = sev_guest_init(kvm, &sev_cmd);
+               break;
+       case KVM_SEV_LAUNCH_START:
+               r = sev_launch_start(kvm, &sev_cmd);
+               break;
+       case KVM_SEV_LAUNCH_UPDATE_DATA:
+               r = sev_launch_update_data(kvm, &sev_cmd);
+               break;
+       case KVM_SEV_LAUNCH_MEASURE:
+               r = sev_launch_measure(kvm, &sev_cmd);
+               break;
+       case KVM_SEV_LAUNCH_FINISH:
+               r = sev_launch_finish(kvm, &sev_cmd);
+               break;
+       case KVM_SEV_GUEST_STATUS:
+               r = sev_guest_status(kvm, &sev_cmd);
+               break;
+       case KVM_SEV_DBG_DECRYPT:
+               r = sev_dbg_crypt(kvm, &sev_cmd, true);
+               break;
+       case KVM_SEV_DBG_ENCRYPT:
+               r = sev_dbg_crypt(kvm, &sev_cmd, false);
+               break;
+       case KVM_SEV_LAUNCH_SECRET:
+               r = sev_launch_secret(kvm, &sev_cmd);
+               break;
+       default:
+               r = -EINVAL;
+               goto out;
+       }
+
+       if (copy_to_user(argp, &sev_cmd, sizeof(struct kvm_sev_cmd)))
+               r = -EFAULT;
+
+out:
+       mutex_unlock(&kvm->lock);
+       return r;
+}
+
+static int svm_register_enc_region(struct kvm *kvm,
+                                  struct kvm_enc_region *range)
+{
+       struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+       struct enc_region *region;
+       int ret = 0;
+
+       if (!sev_guest(kvm))
+               return -ENOTTY;
+
+       region = kzalloc(sizeof(*region), GFP_KERNEL);
+       if (!region)
+               return -ENOMEM;
+
+       region->pages = sev_pin_memory(kvm, range->addr, range->size, &region->npages, 1);
+       if (!region->pages) {
+               ret = -ENOMEM;
+               goto e_free;
+       }
+
+       /*
+        * The guest may change the memory encryption attribute from C=0 -> C=1
+        * or vice versa for this memory range. Lets make sure caches are
+        * flushed to ensure that guest data gets written into memory with
+        * correct C-bit.
+        */
+       sev_clflush_pages(region->pages, region->npages);
+
+       region->uaddr = range->addr;
+       region->size = range->size;
+
+       mutex_lock(&kvm->lock);
+       list_add_tail(&region->list, &sev->regions_list);
+       mutex_unlock(&kvm->lock);
+
+       return ret;
+
+e_free:
+       kfree(region);
+       return ret;
+}
+
+static struct enc_region *
+find_enc_region(struct kvm *kvm, struct kvm_enc_region *range)
+{
+       struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+       struct list_head *head = &sev->regions_list;
+       struct enc_region *i;
+
+       list_for_each_entry(i, head, list) {
+               if (i->uaddr == range->addr &&
+                   i->size == range->size)
+                       return i;
+       }
+
+       return NULL;
+}
+
+
+static int svm_unregister_enc_region(struct kvm *kvm,
+                                    struct kvm_enc_region *range)
+{
+       struct enc_region *region;
+       int ret;
+
+       mutex_lock(&kvm->lock);
+
+       if (!sev_guest(kvm)) {
+               ret = -ENOTTY;
+               goto failed;
+       }
+
+       region = find_enc_region(kvm, range);
+       if (!region) {
+               ret = -EINVAL;
+               goto failed;
+       }
+
+       __unregister_enc_region_locked(kvm, region);
+
+       mutex_unlock(&kvm->lock);
+       return 0;
+
+failed:
+       mutex_unlock(&kvm->lock);
+       return ret;
+}
+
+static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
+       .cpu_has_kvm_support = has_svm,
+       .disabled_by_bios = is_disabled,
+       .hardware_setup = svm_hardware_setup,
+       .hardware_unsetup = svm_hardware_unsetup,
+       .check_processor_compatibility = svm_check_processor_compat,
+       .hardware_enable = svm_hardware_enable,
+       .hardware_disable = svm_hardware_disable,
+       .cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr,
+       .cpu_has_high_real_mode_segbase = svm_has_high_real_mode_segbase,
+
+       .vcpu_create = svm_create_vcpu,
+       .vcpu_free = svm_free_vcpu,
+       .vcpu_reset = svm_vcpu_reset,
+
+       .vm_alloc = svm_vm_alloc,
+       .vm_free = svm_vm_free,
+       .vm_init = avic_vm_init,
+       .vm_destroy = svm_vm_destroy,
+
+       .prepare_guest_switch = svm_prepare_guest_switch,
+       .vcpu_load = svm_vcpu_load,
+       .vcpu_put = svm_vcpu_put,
+       .vcpu_blocking = svm_vcpu_blocking,
+       .vcpu_unblocking = svm_vcpu_unblocking,
+
+       .update_bp_intercept = update_bp_intercept,
+       .get_msr_feature = svm_get_msr_feature,
+       .get_msr = svm_get_msr,
+       .set_msr = svm_set_msr,
+       .get_segment_base = svm_get_segment_base,
+       .get_segment = svm_get_segment,
+       .set_segment = svm_set_segment,
+       .get_cpl = svm_get_cpl,
+       .get_cs_db_l_bits = kvm_get_cs_db_l_bits,
+       .decache_cr0_guest_bits = svm_decache_cr0_guest_bits,
+       .decache_cr3 = svm_decache_cr3,
+       .decache_cr4_guest_bits = svm_decache_cr4_guest_bits,
+       .set_cr0 = svm_set_cr0,
+       .set_cr3 = svm_set_cr3,
+       .set_cr4 = svm_set_cr4,
+       .set_efer = svm_set_efer,
+       .get_idt = svm_get_idt,
+       .set_idt = svm_set_idt,
+       .get_gdt = svm_get_gdt,
+       .set_gdt = svm_set_gdt,
+       .get_dr6 = svm_get_dr6,
+       .set_dr6 = svm_set_dr6,
+       .set_dr7 = svm_set_dr7,
+       .sync_dirty_debug_regs = svm_sync_dirty_debug_regs,
+       .cache_reg = svm_cache_reg,
+       .get_rflags = svm_get_rflags,
+       .set_rflags = svm_set_rflags,
 
        .tlb_flush = svm_flush_tlb,
 
@@ -5713,9 +6984,11 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
        .load_eoi_exitmap = svm_load_eoi_exitmap,
        .hwapic_irr_update = svm_hwapic_irr_update,
        .hwapic_isr_update = svm_hwapic_isr_update,
+       .sync_pir_to_irr = kvm_lapic_find_highest_irr,
        .apicv_post_state_restore = avic_post_state_restore,
 
        .set_tss_addr = svm_set_tss_addr,
+       .set_identity_map_addr = svm_set_identity_map_addr,
        .get_tdp_level = get_npt_level,
        .get_mt_mask = svm_get_mt_mask,
 
@@ -5729,6 +7002,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
        .invpcid_supported = svm_invpcid_supported,
        .mpx_supported = svm_mpx_supported,
        .xsaves_supported = svm_xsaves_supported,
+       .umip_emulated = svm_umip_emulated,
 
        .set_supported_cpuid = svm_set_supported_cpuid,
 
@@ -5752,6 +7026,10 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
        .pre_enter_smm = svm_pre_enter_smm,
        .pre_leave_smm = svm_pre_leave_smm,
        .enable_smi_window = enable_smi_window,
+
+       .mem_enc_op = svm_mem_enc_op,
+       .mem_enc_reg_region = svm_register_enc_region,
+       .mem_enc_unreg_region = svm_unregister_enc_region,
 };
 
 static int __init svm_init(void)