]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - kernel/time/tick-broadcast.c
Merge tag 'mfd-next-5.3' of git://git.kernel.org/pub/scm/linux/kernel/git/lee/mfd
[linux.git] / kernel / time / tick-broadcast.c
index ee834d4fb8140c49a7d9e17b4e5edfaf2c3b118e..e51778c312f1c51e722a3aa914be71ca94c42885 100644 (file)
@@ -36,10 +36,16 @@ static __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(tick_broadcast_lock);
 static void tick_broadcast_setup_oneshot(struct clock_event_device *bc);
 static void tick_broadcast_clear_oneshot(int cpu);
 static void tick_resume_broadcast_oneshot(struct clock_event_device *bc);
+# ifdef CONFIG_HOTPLUG_CPU
+static void tick_broadcast_oneshot_offline(unsigned int cpu);
+# endif
 #else
 static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) { BUG(); }
 static inline void tick_broadcast_clear_oneshot(int cpu) { }
 static inline void tick_resume_broadcast_oneshot(struct clock_event_device *bc) { }
+# ifdef CONFIG_HOTPLUG_CPU
+static inline void tick_broadcast_oneshot_offline(unsigned int cpu) { }
+# endif
 #endif
 
 /*
@@ -433,27 +439,29 @@ void tick_set_periodic_handler(struct clock_event_device *dev, int broadcast)
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
-/*
- * Remove a CPU from broadcasting
- */
-void tick_shutdown_broadcast(unsigned int cpu)
+static void tick_shutdown_broadcast(void)
 {
-       struct clock_event_device *bc;
-       unsigned long flags;
-
-       raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
-
-       bc = tick_broadcast_device.evtdev;
-       cpumask_clear_cpu(cpu, tick_broadcast_mask);
-       cpumask_clear_cpu(cpu, tick_broadcast_on);
+       struct clock_event_device *bc = tick_broadcast_device.evtdev;
 
        if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) {
                if (bc && cpumask_empty(tick_broadcast_mask))
                        clockevents_shutdown(bc);
        }
+}
 
-       raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
+/*
+ * Remove a CPU from broadcasting
+ */
+void tick_broadcast_offline(unsigned int cpu)
+{
+       raw_spin_lock(&tick_broadcast_lock);
+       cpumask_clear_cpu(cpu, tick_broadcast_mask);
+       cpumask_clear_cpu(cpu, tick_broadcast_on);
+       tick_broadcast_oneshot_offline(cpu);
+       tick_shutdown_broadcast();
+       raw_spin_unlock(&tick_broadcast_lock);
 }
+
 #endif
 
 void tick_suspend_broadcast(void)
@@ -801,13 +809,13 @@ int __tick_broadcast_oneshot_control(enum tick_broadcast_state state)
                         * either the CPU handling the broadcast
                         * interrupt or we got woken by something else.
                         *
-                        * We are not longer in the broadcast mask, so
+                        * We are no longer in the broadcast mask, so
                         * if the cpu local expiry time is already
                         * reached, we would reprogram the cpu local
                         * timer with an already expired event.
                         *
                         * This can lead to a ping-pong when we return
-                        * to idle and therefor rearm the broadcast
+                        * to idle and therefore rearm the broadcast
                         * timer before the cpu local timer was able
                         * to fire. This happens because the forced
                         * reprogramming makes sure that the event
@@ -950,14 +958,10 @@ void hotplug_cpu__broadcast_tick_pull(int deadcpu)
 }
 
 /*
- * Remove a dead CPU from broadcasting
+ * Remove a dying CPU from broadcasting
  */
-void tick_shutdown_broadcast_oneshot(unsigned int cpu)
+static void tick_broadcast_oneshot_offline(unsigned int cpu)
 {
-       unsigned long flags;
-
-       raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
-
        /*
         * Clear the broadcast masks for the dead cpu, but do not stop
         * the broadcast device!
@@ -965,8 +969,6 @@ void tick_shutdown_broadcast_oneshot(unsigned int cpu)
        cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
        cpumask_clear_cpu(cpu, tick_broadcast_pending_mask);
        cpumask_clear_cpu(cpu, tick_broadcast_force_mask);
-
-       raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
 }
 #endif