]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
powerpc/64s/hash: Simplify slb_flush_and_rebolt()
authorNicholas Piggin <npiggin@gmail.com>
Tue, 2 Oct 2018 14:27:58 +0000 (00:27 +1000)
committerMichael Ellerman <mpe@ellerman.id.au>
Sun, 14 Oct 2018 07:04:09 +0000 (18:04 +1100)
slb_flush_and_rebolt() is misleading, it is called in virtual mode, so
it can not possibly change the stack, so it should not be touching the
shadow area. And since vmalloc is no longer bolted, it should not
change any bolted mappings at all.

Change the name to slb_flush_and_restore_bolted(), and have it just
load the kernel stack from what's currently in the shadow SLB area.

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
arch/powerpc/include/asm/book3s/64/mmu-hash.h
arch/powerpc/kernel/swsusp_asm64.S
arch/powerpc/mm/hash_utils_64.c
arch/powerpc/mm/slb.c
arch/powerpc/mm/slice.c

index 14e552ea5e5200ea670d37694972ccd778566f97..60cda8fb0677ea1c2239f9f9c1c7534096bd6adb 100644 (file)
@@ -503,7 +503,7 @@ struct slb_entry {
 };
 
 extern void slb_initialize(void);
-extern void slb_flush_and_rebolt(void);
+void slb_flush_and_restore_bolted(void);
 void slb_flush_all_realmode(void);
 void __slb_restore_bolted_realmode(void);
 void slb_restore_bolted_realmode(void);
index f83bf6f72cb0e461af4780cba1825bf186b610a1..185216becb8b3672f874b6e1fce19338bf4c7544 100644 (file)
@@ -262,7 +262,7 @@ END_FW_FTR_SECTION_IFCLR(FW_FEATURE_LPAR)
 
        addi    r1,r1,-128
 #ifdef CONFIG_PPC_BOOK3S_64
-       bl      slb_flush_and_rebolt
+       bl      slb_flush_and_restore_bolted
 #endif
        bl      do_after_copyback
        addi    r1,r1,128
index 854edc3722e03d95c9bc1994edbfbc84cb87f2a2..0cc7fbc3bd1c76d3b786979332a059d303726555 100644 (file)
@@ -1125,7 +1125,7 @@ void demote_segment_4k(struct mm_struct *mm, unsigned long addr)
        if ((get_paca_psize(addr) != MMU_PAGE_4K) && (current->mm == mm)) {
 
                copy_mm_to_paca(mm);
-               slb_flush_and_rebolt();
+               slb_flush_and_restore_bolted();
        }
 }
 #endif /* CONFIG_PPC_64K_PAGES */
@@ -1197,7 +1197,7 @@ static void check_paca_psize(unsigned long ea, struct mm_struct *mm,
        if (user_region) {
                if (psize != get_paca_psize(ea)) {
                        copy_mm_to_paca(mm);
-                       slb_flush_and_rebolt();
+                       slb_flush_and_restore_bolted();
                }
        } else if (get_paca()->vmalloc_sllp !=
                   mmu_psize_defs[mmu_vmalloc_psize].sllp) {
index 3b7d8af097247760e66cac842b037dcb6572fb3f..d8d9c9bd15d3156aafd27fa6f1d94f752c32303f 100644 (file)
@@ -115,8 +115,6 @@ void __slb_restore_bolted_realmode(void)
 
 /*
  * Insert the bolted entries into an empty SLB.
- * This is not the same as rebolt because the bolted segments are not
- * changed, just loaded from the shadow area.
  */
 void slb_restore_bolted_realmode(void)
 {
@@ -135,12 +133,15 @@ void slb_flush_all_realmode(void)
        asm volatile("slbmte %0,%0; slbia" : : "r" (0));
 }
 
-void slb_flush_and_rebolt(void)
+/*
+ * This flushes non-bolted entries, it can be run in virtual mode. Must
+ * be called with interrupts disabled.
+ */
+void slb_flush_and_restore_bolted(void)
 {
-       /* If you change this make sure you change SLB_NUM_BOLTED
-        * and PR KVM appropriately too. */
-       unsigned long linear_llp, lflags;
-       unsigned long ksp_esid_data, ksp_vsid_data;
+       struct slb_shadow *p = get_slb_shadow();
+
+       BUILD_BUG_ON(SLB_NUM_BOLTED != 2);
 
        WARN_ON(!irqs_disabled());
 
@@ -150,30 +151,12 @@ void slb_flush_and_rebolt(void)
         */
        hard_irq_disable();
 
-       linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
-       lflags = SLB_VSID_KERNEL | linear_llp;
-
-       ksp_esid_data = mk_esid_data(get_paca()->kstack, mmu_kernel_ssize, KSTACK_INDEX);
-       if ((ksp_esid_data & ~0xfffffffUL) <= PAGE_OFFSET) {
-               ksp_esid_data &= ~SLB_ESID_V;
-               ksp_vsid_data = 0;
-               slb_shadow_clear(KSTACK_INDEX);
-       } else {
-               /* Update stack entry; others don't change */
-               slb_shadow_update(get_paca()->kstack, mmu_kernel_ssize, lflags, KSTACK_INDEX);
-               ksp_vsid_data =
-                       be64_to_cpu(get_slb_shadow()->save_area[KSTACK_INDEX].vsid);
-       }
-
-       /* We need to do this all in asm, so we're sure we don't touch
-        * the stack between the slbia and rebolting it. */
        asm volatile("isync\n"
                     "slbia\n"
-                    /* Slot 1 - kernel stack */
-                    "slbmte    %0,%1\n"
-                    "isync"
-                    :: "r"(ksp_vsid_data),
-                       "r"(ksp_esid_data)
+                    "slbmte  %0, %1\n"
+                    "isync\n"
+                    :: "r" (be64_to_cpu(p->save_area[KSTACK_INDEX].vsid)),
+                       "r" (be64_to_cpu(p->save_area[KSTACK_INDEX].esid))
                     : "memory");
 
        get_paca()->slb_cache_ptr = 0;
@@ -254,7 +237,10 @@ void slb_dump_contents(struct slb_entry *slb_ptr)
 
 void slb_vmalloc_update(void)
 {
-       slb_flush_and_rebolt();
+       /*
+        * vmalloc is not bolted, so just have to flush non-bolted.
+        */
+       slb_flush_and_restore_bolted();
 }
 
 static bool preload_hit(struct thread_info *ti, unsigned long esid)
index 546dd07c8083994c02a4ccacd3e4a00d23a0ccdf..50ba3d0456a535ae601b97ba89c7e2ee115f3a69 100644 (file)
@@ -219,7 +219,7 @@ static void slice_flush_segments(void *parm)
        copy_mm_to_paca(current->active_mm);
 
        local_irq_save(flags);
-       slb_flush_and_rebolt();
+       slb_flush_and_restore_bolted();
        local_irq_restore(flags);
 #endif
 }