]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
cpuhotplug: Link lock stacks for hotplug callbacks
authorThomas Gleixner <tglx@linutronix.de>
Wed, 24 May 2017 08:15:43 +0000 (10:15 +0200)
committerThomas Gleixner <tglx@linutronix.de>
Fri, 26 May 2017 08:10:48 +0000 (10:10 +0200)
The CPU hotplug callbacks are not covered by lockdep versus the cpu hotplug
rwsem.

CPU0 CPU1
cpuhp_setup_state(STATE, startup, teardown);
 cpus_read_lock();
  invoke_callback_on_ap();
    kick_hotplug_thread(ap);
    wait_for_completion(); hotplug_thread_fn()
       lock(m);
  do_stuff();
  unlock(m);

Lockdep does not know about this dependency and will not trigger on the
following code sequence:

  lock(m);
  cpus_read_lock();

Add a lockdep map and connect the initiators lock chain with the hotplug
thread lock chain, so potential deadlocks can be detected.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Tested-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Acked-by: Ingo Molnar <mingo@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Sebastian Siewior <bigeasy@linutronix.de>
Cc: Steven Rostedt <rostedt@goodmis.org>
Link: http://lkml.kernel.org/r/20170524081549.709375845@linutronix.de
kernel/cpu.c

index 66836216ebae596adfb2f20aa697e59ed76267be..7435ffc6163b83f8dc9bd56f9f1446445b535dde 100644 (file)
@@ -66,6 +66,12 @@ struct cpuhp_cpu_state {
 
 static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state);
 
+#if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP)
+static struct lock_class_key cpuhp_state_key;
+static struct lockdep_map cpuhp_state_lock_map =
+       STATIC_LOCKDEP_MAP_INIT("cpuhp_state", &cpuhp_state_key);
+#endif
+
 /**
  * cpuhp_step - Hotplug state machine step
  * @name:      Name of the step
@@ -403,6 +409,7 @@ static void cpuhp_thread_fun(unsigned int cpu)
 
        st->should_run = false;
 
+       lock_map_acquire(&cpuhp_state_lock_map);
        /* Single callback invocation for [un]install ? */
        if (st->single) {
                if (st->cb_state < CPUHP_AP_ONLINE) {
@@ -429,6 +436,7 @@ static void cpuhp_thread_fun(unsigned int cpu)
                else if (st->state > st->target)
                        ret = cpuhp_ap_offline(cpu, st);
        }
+       lock_map_release(&cpuhp_state_lock_map);
        st->result = ret;
        complete(&st->done);
 }
@@ -443,6 +451,9 @@ cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup,
        if (!cpu_online(cpu))
                return 0;
 
+       lock_map_acquire(&cpuhp_state_lock_map);
+       lock_map_release(&cpuhp_state_lock_map);
+
        /*
         * If we are up and running, use the hotplug thread. For early calls
         * we invoke the thread function directly.
@@ -486,6 +497,8 @@ static int cpuhp_kick_ap_work(unsigned int cpu)
        enum cpuhp_state state = st->state;
 
        trace_cpuhp_enter(cpu, st->target, state, cpuhp_kick_ap_work);
+       lock_map_acquire(&cpuhp_state_lock_map);
+       lock_map_release(&cpuhp_state_lock_map);
        __cpuhp_kick_ap_work(st);
        wait_for_completion(&st->done);
        trace_cpuhp_exit(cpu, st->state, state, st->result);