]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
powerpc/64: Move set_soft_enabled() and rename
authorMadhavan Srinivasan <maddy@linux.vnet.ibm.com>
Wed, 20 Dec 2017 03:55:45 +0000 (09:25 +0530)
committerMichael Ellerman <mpe@ellerman.id.au>
Fri, 19 Jan 2018 11:36:58 +0000 (22:36 +1100)
Move set_soft_enabled() from powerpc/kernel/irq.c to asm/hw_irq.c, to
encourage updates to paca->soft_enabled done via these access
function. Add "memory" clobber to hint compiler since
paca->soft_enabled memory is the target here.

Renaming it as soft_enabled_set() will make namespaces works better as
prefix than a postfix when new soft_enabled manipulation functions are
introduced.

Reviewed-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Madhavan Srinivasan <maddy@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
arch/powerpc/include/asm/hw_irq.h
arch/powerpc/include/asm/kvm_ppc.h
arch/powerpc/kernel/irq.c
arch/powerpc/kernel/setup_64.c
arch/powerpc/kernel/time.c

index 4c54db29104f6e1fc65fd4c1c5b29e301c44eb8b..d046d9f3b77774dc12740719b861e61b52848e70 100644 (file)
@@ -49,6 +49,21 @@ extern void unknown_exception(struct pt_regs *regs);
 #ifdef CONFIG_PPC64
 #include <asm/paca.h>
 
+/*
+ * The "memory" clobber acts as both a compiler barrier
+ * for the critical section and as a clobber because
+ * we changed paca->soft_enabled
+ */
+static inline notrace void soft_enabled_set(unsigned long enable)
+{
+       asm volatile(
+               "stb %0,%1(13)"
+               :
+               : "r" (enable),
+                 "i" (offsetof(struct paca_struct, soft_enabled))
+               : "memory");
+}
+
 static inline unsigned long arch_local_save_flags(void)
 {
        unsigned long flags;
@@ -63,12 +78,7 @@ static inline unsigned long arch_local_save_flags(void)
 
 static inline void arch_local_irq_disable(void)
 {
-       asm volatile(
-               "stb %0,%1(13)"
-               :
-               : "r" (IRQS_DISABLED),
-                 "i" (offsetof(struct paca_struct, soft_enabled))
-               : "memory");
+       soft_enabled_set(IRQS_DISABLED);
 }
 
 extern void arch_local_irq_restore(unsigned long);
index 68484d77a3cbd3e646b9a5c7ae23bddde0808d36..369f0640826c363e350cec7e7d08ced10b8ce78f 100644 (file)
@@ -873,7 +873,7 @@ static inline void kvmppc_fix_ee_before_entry(void)
 
        /* Only need to enable IRQs by hard enabling them after this */
        local_paca->irq_happened = 0;
-       local_paca->soft_enabled = IRQS_ENABLED;
+       soft_enabled_set(IRQS_ENABLED);
 #endif
 }
 
index 483a9206554f5d0a366d37dcf29bd33aad6ffa2a..6c04e465caf51dfc508b299039edda1cfb4e55a8 100644 (file)
@@ -107,12 +107,6 @@ static inline notrace unsigned long get_irq_happened(void)
        return happened;
 }
 
-static inline notrace void set_soft_enabled(unsigned long enable)
-{
-       __asm__ __volatile__("stb %0,%1(13)"
-       : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled)));
-}
-
 static inline notrace int decrementer_check_overflow(void)
 {
        u64 now = get_tb_or_rtc();
@@ -231,7 +225,7 @@ notrace void arch_local_irq_restore(unsigned long en)
        unsigned int replay;
 
        /* Write the new soft-enabled value */
-       set_soft_enabled(en);
+       soft_enabled_set(en);
        if (en == IRQS_DISABLED)
                return;
        /*
@@ -277,7 +271,7 @@ notrace void arch_local_irq_restore(unsigned long en)
        }
 #endif /* CONFIG_TRACE_IRQFLAGS */
 
-       set_soft_enabled(IRQS_DISABLED);
+       soft_enabled_set(IRQS_DISABLED);
        trace_hardirqs_off();
 
        /*
@@ -289,7 +283,7 @@ notrace void arch_local_irq_restore(unsigned long en)
 
        /* We can soft-enable now */
        trace_hardirqs_on();
-       set_soft_enabled(IRQS_ENABLED);
+       soft_enabled_set(IRQS_ENABLED);
 
        /*
         * And replay if we have to. This will return with interrupts
@@ -364,7 +358,7 @@ bool prep_irq_for_idle(void)
         * of entering the low power state.
         */
        local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
-       local_paca->soft_enabled = IRQS_ENABLED;
+       soft_enabled_set(IRQS_ENABLED);
 
        /* Tell the caller to enter the low power state */
        return true;
index 2fd4e167ef096c0e6605cbd77f6db187eef14538..f6bedebda90a6203f06ca0d47fa0925938f235c9 100644 (file)
@@ -189,7 +189,7 @@ static void __init fixup_boot_paca(void)
        /* Allow percpu accesses to work until we setup percpu data */
        get_paca()->data_offset = 0;
        /* Mark interrupts disabled in PACA */
-       get_paca()->soft_enabled = IRQS_DISABLED;
+       soft_enabled_set(IRQS_DISABLED);
 }
 
 static void __init configure_exceptions(void)
@@ -352,7 +352,7 @@ void __init early_setup(unsigned long dt_ptr)
 void early_setup_secondary(void)
 {
        /* Mark interrupts disabled in PACA */
-       get_paca()->soft_enabled = 0;
+       soft_enabled_set(IRQS_DISABLED);
 
        /* Initialize the hash table or TLB handling */
        early_init_mmu_secondary();
index 070092b1ba8ab925225ce6cf33870d75d0d51f9d..320b8459c74ee7606c4f6ea820ee551ca1d534cb 100644 (file)
@@ -253,7 +253,7 @@ void accumulate_stolen_time(void)
         * needs to reflect that so various debug stuff doesn't
         * complain
         */
-       local_paca->soft_enabled = IRQS_DISABLED;
+       soft_enabled_set(IRQS_DISABLED);
 
        sst = scan_dispatch_log(acct->starttime_user);
        ust = scan_dispatch_log(acct->starttime);
@@ -261,7 +261,7 @@ void accumulate_stolen_time(void)
        acct->utime -= ust;
        acct->steal_time += ust + sst;
 
-       local_paca->soft_enabled = save_soft_enabled;
+       soft_enabled_set(save_soft_enabled);
 }
 
 static inline u64 calculate_stolen_time(u64 stop_tb)