]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
KVM: s390: Add FAULT_FLAG_RETRY_NOWAIT for guest fault
authorDominik Dingel <dingel@linux.vnet.ibm.com>
Mon, 17 Jun 2013 14:25:18 +0000 (16:25 +0200)
committerChristian Borntraeger <borntraeger@de.ibm.com>
Thu, 30 Jan 2014 11:50:39 +0000 (12:50 +0100)
In the case of a fault, we will retry to exit sie64 but with gmap fault
indication for this thread set. This makes it possible to handle async
page faults.

Based on a patch from Martin Schwidefsky.

Signed-off-by: Dominik Dingel <dingel@linux.vnet.ibm.com>
Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
arch/s390/include/asm/pgtable.h
arch/s390/include/asm/processor.h
arch/s390/kvm/kvm-s390.c
arch/s390/mm/fault.c

index 2204400d0bd58d4a1e45c82394ff5cbd100aa2cc..66101f6c6d819354d2e04ef33a275cec253c2edd 100644 (file)
@@ -767,6 +767,7 @@ static inline void pgste_set_pte(pte_t *ptep, pte_t entry)
  * @table: pointer to the page directory
  * @asce: address space control element for gmap page table
  * @crst_list: list of all crst tables used in the guest address space
+ * @pfault_enabled: defines if pfaults are applicable for the guest
  */
 struct gmap {
        struct list_head list;
@@ -775,6 +776,7 @@ struct gmap {
        unsigned long asce;
        void *private;
        struct list_head crst_list;
+       bool pfault_enabled;
 };
 
 /**
index 0a876bc543d36f3c121a39f4acf1a351233c1667..dc5fc4f90e52b86a3152d434bbd7425d3f644337 100644 (file)
@@ -79,6 +79,7 @@ struct thread_struct {
         unsigned long ksp;              /* kernel stack pointer             */
        mm_segment_t mm_segment;
        unsigned long gmap_addr;        /* address of last gmap fault. */
+       unsigned int gmap_pfault;       /* signal of a pending guest pfault */
        struct per_regs per_user;       /* User specified PER registers */
        struct per_event per_event;     /* Cause of the last PER trap */
        unsigned long per_flags;        /* Flags to control debug behavior */
index 782420f3c4d5294f2c589445e2d98a847d01d227..9eec794caa7f0b07d3c5d8e7e25539c9a20228e3 100644 (file)
@@ -255,6 +255,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
                if (!kvm->arch.gmap)
                        goto out_nogmap;
                kvm->arch.gmap->private = kvm;
+               kvm->arch.gmap->pfault_enabled = 0;
        }
 
        kvm->arch.css_support = 0;
@@ -701,6 +702,17 @@ static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
        return 0;
 }
 
+static long kvm_arch_fault_in_sync(struct kvm_vcpu *vcpu)
+{
+       long rc;
+       hva_t fault = gmap_fault(current->thread.gmap_addr, vcpu->arch.gmap);
+       struct mm_struct *mm = current->mm;
+       down_read(&mm->mmap_sem);
+       rc = get_user_pages(current, mm, fault, 1, 1, 0, NULL, NULL);
+       up_read(&mm->mmap_sem);
+       return rc;
+}
+
 static int vcpu_pre_run(struct kvm_vcpu *vcpu)
 {
        int rc, cpuflags;
@@ -730,7 +742,7 @@ static int vcpu_pre_run(struct kvm_vcpu *vcpu)
 
 static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
 {
-       int rc;
+       int rc = -1;
 
        VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
                   vcpu->arch.sie_block->icptcode);
@@ -744,7 +756,14 @@ static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
                                                current->thread.gmap_addr;
                vcpu->run->s390_ucontrol.pgm_code = 0x10;
                rc = -EREMOTE;
-       } else {
+
+       } else if (current->thread.gmap_pfault) {
+               current->thread.gmap_pfault = 0;
+               if (kvm_arch_fault_in_sync(vcpu) >= 0)
+                       rc = 0;
+       }
+
+       if (rc == -1) {
                VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
                trace_kvm_s390_sie_fault(vcpu);
                rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
index d95265b2719f8e7fb0b15305b49a52130069aebd..88cef505453bfb14aa21fabb745dcd781906ee5e 100644 (file)
@@ -50,6 +50,7 @@
 #define VM_FAULT_BADMAP                0x020000
 #define VM_FAULT_BADACCESS     0x040000
 #define VM_FAULT_SIGNAL                0x080000
+#define VM_FAULT_PFAULT                0x100000
 
 static unsigned long store_indication __read_mostly;
 
@@ -227,6 +228,7 @@ static noinline void do_fault_error(struct pt_regs *regs, int fault)
                        return;
                }
        case VM_FAULT_BADCONTEXT:
+       case VM_FAULT_PFAULT:
                do_no_context(regs);
                break;
        case VM_FAULT_SIGNAL:
@@ -264,6 +266,9 @@ static noinline void do_fault_error(struct pt_regs *regs, int fault)
  */
 static inline int do_exception(struct pt_regs *regs, int access)
 {
+#ifdef CONFIG_PGSTE
+       struct gmap *gmap;
+#endif
        struct task_struct *tsk;
        struct mm_struct *mm;
        struct vm_area_struct *vma;
@@ -304,9 +309,10 @@ static inline int do_exception(struct pt_regs *regs, int access)
        down_read(&mm->mmap_sem);
 
 #ifdef CONFIG_PGSTE
-       if ((current->flags & PF_VCPU) && S390_lowcore.gmap) {
-               address = __gmap_fault(address,
-                                    (struct gmap *) S390_lowcore.gmap);
+       gmap = (struct gmap *)
+               ((current->flags & PF_VCPU) ? S390_lowcore.gmap : 0);
+       if (gmap) {
+               address = __gmap_fault(address, gmap);
                if (address == -EFAULT) {
                        fault = VM_FAULT_BADMAP;
                        goto out_up;
@@ -315,6 +321,8 @@ static inline int do_exception(struct pt_regs *regs, int access)
                        fault = VM_FAULT_OOM;
                        goto out_up;
                }
+               if (gmap->pfault_enabled)
+                       flags |= FAULT_FLAG_RETRY_NOWAIT;
        }
 #endif
 
@@ -371,9 +379,19 @@ static inline int do_exception(struct pt_regs *regs, int access)
                                      regs, address);
                }
                if (fault & VM_FAULT_RETRY) {
+#ifdef CONFIG_PGSTE
+                       if (gmap && (flags & FAULT_FLAG_RETRY_NOWAIT)) {
+                               /* FAULT_FLAG_RETRY_NOWAIT has been set,
+                                * mmap_sem has not been released */
+                               current->thread.gmap_pfault = 1;
+                               fault = VM_FAULT_PFAULT;
+                               goto out_up;
+                       }
+#endif
                        /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
                         * of starvation. */
-                       flags &= ~FAULT_FLAG_ALLOW_RETRY;
+                       flags &= ~(FAULT_FLAG_ALLOW_RETRY |
+                                  FAULT_FLAG_RETRY_NOWAIT);
                        flags |= FAULT_FLAG_TRIED;
                        down_read(&mm->mmap_sem);
                        goto retry;