]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - arch/x86/kvm/svm.c
kvm: x86: Dynamically allocate guest_fpu
[linux.git] / arch / x86 / kvm / svm.c
index 0e21ccc46792f6bcc6665ff63979f23979aa1829..e4f18a305ef6118198024258dd78acdf5998ebbf 100644 (file)
@@ -675,11 +675,6 @@ struct svm_cpu_data {
 
 static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
 
-struct svm_init_data {
-       int cpu;
-       int r;
-};
-
 static const u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000};
 
 #define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges)
@@ -1446,7 +1441,7 @@ static u64 svm_read_l1_tsc_offset(struct kvm_vcpu *vcpu)
        return vcpu->arch.tsc_offset;
 }
 
-static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
+static u64 svm_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
        u64 g_tsc_offset = 0;
@@ -1456,14 +1451,16 @@ static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
                g_tsc_offset = svm->vmcb->control.tsc_offset -
                               svm->nested.hsave->control.tsc_offset;
                svm->nested.hsave->control.tsc_offset = offset;
-       } else
-               trace_kvm_write_tsc_offset(vcpu->vcpu_id,
-                                          svm->vmcb->control.tsc_offset,
-                                          offset);
+       }
+
+       trace_kvm_write_tsc_offset(vcpu->vcpu_id,
+                                  svm->vmcb->control.tsc_offset - g_tsc_offset,
+                                  offset);
 
        svm->vmcb->control.tsc_offset = offset + g_tsc_offset;
 
        mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
+       return svm->vmcb->control.tsc_offset;
 }
 
 static void avic_init_vmcb(struct vcpu_svm *svm)
@@ -1664,20 +1661,23 @@ static u64 *avic_get_physical_id_entry(struct kvm_vcpu *vcpu,
 static int avic_init_access_page(struct kvm_vcpu *vcpu)
 {
        struct kvm *kvm = vcpu->kvm;
-       int ret;
+       int ret = 0;
 
+       mutex_lock(&kvm->slots_lock);
        if (kvm->arch.apic_access_page_done)
-               return 0;
+               goto out;
 
-       ret = x86_set_memory_region(kvm,
-                                   APIC_ACCESS_PAGE_PRIVATE_MEMSLOT,
-                                   APIC_DEFAULT_PHYS_BASE,
-                                   PAGE_SIZE);
+       ret = __x86_set_memory_region(kvm,
+                                     APIC_ACCESS_PAGE_PRIVATE_MEMSLOT,
+                                     APIC_DEFAULT_PHYS_BASE,
+                                     PAGE_SIZE);
        if (ret)
-               return ret;
+               goto out;
 
        kvm->arch.apic_access_page_done = true;
-       return 0;
+out:
+       mutex_unlock(&kvm->slots_lock);
+       return ret;
 }
 
 static int avic_init_backing_page(struct kvm_vcpu *vcpu)
@@ -2125,6 +2125,13 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
                goto out;
        }
 
+       svm->vcpu.arch.guest_fpu = kmem_cache_zalloc(x86_fpu_cache, GFP_KERNEL);
+       if (!svm->vcpu.arch.guest_fpu) {
+               printk(KERN_ERR "kvm: failed to allocate vcpu's fpu\n");
+               err = -ENOMEM;
+               goto free_partial_svm;
+       }
+
        err = kvm_vcpu_init(&svm->vcpu, kvm, id);
        if (err)
                goto free_svm;
@@ -2184,26 +2191,39 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
 uninit:
        kvm_vcpu_uninit(&svm->vcpu);
 free_svm:
+       kmem_cache_free(x86_fpu_cache, svm->vcpu.arch.guest_fpu);
+free_partial_svm:
        kmem_cache_free(kvm_vcpu_cache, svm);
 out:
        return ERR_PTR(err);
 }
 
+static void svm_clear_current_vmcb(struct vmcb *vmcb)
+{
+       int i;
+
+       for_each_online_cpu(i)
+               cmpxchg(&per_cpu(svm_data, i)->current_vmcb, vmcb, NULL);
+}
+
 static void svm_free_vcpu(struct kvm_vcpu *vcpu)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
 
+       /*
+        * The vmcb page can be recycled, causing a false negative in
+        * svm_vcpu_load(). So, ensure that no logical CPU has this
+        * vmcb page recorded as its current vmcb.
+        */
+       svm_clear_current_vmcb(svm->vmcb);
+
        __free_page(pfn_to_page(__sme_clr(svm->vmcb_pa) >> PAGE_SHIFT));
        __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
        __free_page(virt_to_page(svm->nested.hsave));
        __free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
        kvm_vcpu_uninit(vcpu);
+       kmem_cache_free(x86_fpu_cache, svm->vcpu.arch.guest_fpu);
        kmem_cache_free(kvm_vcpu_cache, svm);
-       /*
-        * The vmcb page can be recycled, causing a false negative in
-        * svm_vcpu_load(). So do a full IBPB now.
-        */
-       indirect_branch_prediction_barrier();
 }
 
 static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
@@ -7037,6 +7057,12 @@ static int svm_unregister_enc_region(struct kvm *kvm,
        return ret;
 }
 
+static uint16_t nested_get_evmcs_version(struct kvm_vcpu *vcpu)
+{
+       /* Not supported */
+       return 0;
+}
+
 static int nested_enable_evmcs(struct kvm_vcpu *vcpu,
                                   uint16_t *vmcs_version)
 {
@@ -7149,7 +7175,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
        .has_wbinvd_exit = svm_has_wbinvd_exit,
 
        .read_l1_tsc_offset = svm_read_l1_tsc_offset,
-       .write_tsc_offset = svm_write_tsc_offset,
+       .write_l1_tsc_offset = svm_write_l1_tsc_offset,
 
        .set_tdp_cr3 = set_tdp_cr3,
 
@@ -7175,6 +7201,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
        .mem_enc_unreg_region = svm_unregister_enc_region,
 
        .nested_enable_evmcs = nested_enable_evmcs,
+       .nested_get_evmcs_version = nested_get_evmcs_version,
 };
 
 static int __init svm_init(void)