]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
KVM: PPC: Book3S HV: Handle memory plug/unplug to secure VM
authorBharata B Rao <bharata@linux.ibm.com>
Mon, 25 Nov 2019 03:06:29 +0000 (08:36 +0530)
committerPaul Mackerras <paulus@ozlabs.org>
Thu, 28 Nov 2019 06:02:26 +0000 (17:02 +1100)
Register the new memslot with UV during plug and unregister
the memslot during unplug. In addition, release all the
device pages during unplug.

Signed-off-by: Bharata B Rao <bharata@linux.ibm.com>
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
arch/powerpc/include/asm/kvm_book3s_uvmem.h
arch/powerpc/include/asm/ultravisor-api.h
arch/powerpc/include/asm/ultravisor.h
arch/powerpc/kvm/book3s_64_mmu_radix.c
arch/powerpc/kvm/book3s_hv.c
arch/powerpc/kvm/book3s_hv_uvmem.c

index 3033a9585b434ea4ede63a9343aaeadeff4596eb..50204e228f1671ed938d6e5ce51ddad4024787cb 100644 (file)
@@ -19,6 +19,8 @@ unsigned long kvmppc_h_svm_page_out(struct kvm *kvm,
 unsigned long kvmppc_h_svm_init_start(struct kvm *kvm);
 unsigned long kvmppc_h_svm_init_done(struct kvm *kvm);
 int kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gfn);
+void kvmppc_uvmem_drop_pages(const struct kvm_memory_slot *free,
+                            struct kvm *kvm);
 #else
 static inline int kvmppc_uvmem_init(void)
 {
@@ -64,5 +66,9 @@ static inline int kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gfn)
 {
        return -EFAULT;
 }
+
+static inline void
+kvmppc_uvmem_drop_pages(const struct kvm_memory_slot *free,
+                       struct kvm *kvm) { }
 #endif /* CONFIG_PPC_UV */
 #endif /* __ASM_KVM_BOOK3S_UVMEM_H__ */
index e774274ab30ed4de86a17498878a4428d181ec95..4b0d044caa2ad24e98766ef4cc04d8e6947c3959 100644 (file)
@@ -27,6 +27,7 @@
 #define UV_RETURN                      0xF11C
 #define UV_ESM                         0xF110
 #define UV_REGISTER_MEM_SLOT           0xF120
+#define UV_UNREGISTER_MEM_SLOT         0xF124
 #define UV_PAGE_IN                     0xF128
 #define UV_PAGE_OUT                    0xF12C
 #define UV_SHARE_PAGE                  0xF130
index 40cc8bace6545cc698e1246d80c47542870a561a..b8e59b7b4ac84f59a4ae350f0463d7bfc0f3ebe6 100644 (file)
@@ -67,6 +67,11 @@ static inline int uv_register_mem_slot(u64 lpid, u64 start_gpa, u64 size,
                            size, flags, slotid);
 }
 
+static inline int uv_unregister_mem_slot(u64 lpid, u64 slotid)
+{
+       return ucall_norets(UV_UNREGISTER_MEM_SLOT, lpid, slotid);
+}
+
 static inline int uv_page_inval(u64 lpid, u64 gpa, u64 page_shift)
 {
        return ucall_norets(UV_PAGE_INVAL, lpid, gpa, page_shift);
index 9f6ba113ffe327bdabc389c02874312fc27b44f5..da857c8ba6e40fc53df5455dc003d1e702751ef2 100644 (file)
@@ -1101,6 +1101,9 @@ void kvmppc_radix_flush_memslot(struct kvm *kvm,
        unsigned long gpa;
        unsigned int shift;
 
+       if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START)
+               kvmppc_uvmem_drop_pages(memslot, kvm);
+
        if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
                return;
 
index 03d56aeec714ece5505fc48f8a84981859eecaae..a8e815648b0a0105683a24914f75657b4273bee5 100644 (file)
@@ -74,6 +74,7 @@
 #include <asm/hw_breakpoint.h>
 #include <asm/kvm_host.h>
 #include <asm/kvm_book3s_uvmem.h>
+#include <asm/ultravisor.h>
 
 #include "book3s.h"
 
@@ -4515,6 +4516,29 @@ static void kvmppc_core_commit_memory_region_hv(struct kvm *kvm,
        if (change == KVM_MR_FLAGS_ONLY && kvm_is_radix(kvm) &&
            ((new->flags ^ old->flags) & KVM_MEM_LOG_DIRTY_PAGES))
                kvmppc_radix_flush_memslot(kvm, old);
+       /*
+        * If UV hasn't yet called H_SVM_INIT_START, don't register memslots.
+        */
+       if (!kvm->arch.secure_guest)
+               return;
+
+       switch (change) {
+       case KVM_MR_CREATE:
+               if (kvmppc_uvmem_slot_init(kvm, new))
+                       return;
+               uv_register_mem_slot(kvm->arch.lpid,
+                                    new->base_gfn << PAGE_SHIFT,
+                                    new->npages * PAGE_SIZE,
+                                    0, new->id);
+               break;
+       case KVM_MR_DELETE:
+               uv_unregister_mem_slot(kvm->arch.lpid, old->id);
+               kvmppc_uvmem_slot_free(kvm, old);
+               break;
+       default:
+               /* TODO: Handle KVM_MR_MOVE */
+               break;
+       }
 }
 
 /*
index ed51498b20eeb5ce3f4cd0bd78f55fd44aabdf59..2de264fc31563867a76c89d96cdefc96344c12a1 100644 (file)
@@ -249,6 +249,43 @@ unsigned long kvmppc_h_svm_init_done(struct kvm *kvm)
        return H_SUCCESS;
 }
 
+/*
+ * Drop device pages that we maintain for the secure guest
+ *
+ * We first mark the pages to be skipped from UV_PAGE_OUT when there
+ * is HV side fault on these pages. Next we *get* these pages, forcing
+ * fault on them, do fault time migration to replace the device PTEs in
+ * QEMU page table with normal PTEs from newly allocated pages.
+ */
+void kvmppc_uvmem_drop_pages(const struct kvm_memory_slot *free,
+                            struct kvm *kvm)
+{
+       int i;
+       struct kvmppc_uvmem_page_pvt *pvt;
+       unsigned long pfn, uvmem_pfn;
+       unsigned long gfn = free->base_gfn;
+
+       for (i = free->npages; i; --i, ++gfn) {
+               struct page *uvmem_page;
+
+               mutex_lock(&kvm->arch.uvmem_lock);
+               if (!kvmppc_gfn_is_uvmem_pfn(gfn, kvm, &uvmem_pfn)) {
+                       mutex_unlock(&kvm->arch.uvmem_lock);
+                       continue;
+               }
+
+               uvmem_page = pfn_to_page(uvmem_pfn);
+               pvt = uvmem_page->zone_device_data;
+               pvt->skip_page_out = true;
+               mutex_unlock(&kvm->arch.uvmem_lock);
+
+               pfn = gfn_to_pfn(kvm, gfn);
+               if (is_error_noslot_pfn(pfn))
+                       continue;
+               kvm_release_pfn_clean(pfn);
+       }
+}
+
 /*
  * Get a free device PFN from the pool
  *