]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - kernel/sched/core.c
sched/smt: Make sched_smt_present track topology
[linux.git] / kernel / sched / core.c
index 091e089063be1dc25ab1180fec3f99a4408fb024..6fedf3a98581b34b388a014d9ddb671ab8281a1f 100644 (file)
@@ -5738,15 +5738,10 @@ int sched_cpu_activate(unsigned int cpu)
 
 #ifdef CONFIG_SCHED_SMT
        /*
-        * The sched_smt_present static key needs to be evaluated on every
-        * hotplug event because at boot time SMT might be disabled when
-        * the number of booted CPUs is limited.
-        *
-        * If then later a sibling gets hotplugged, then the key would stay
-        * off and SMT scheduling would never be functional.
+        * When going up, increment the number of cores with SMT present.
         */
-       if (cpumask_weight(cpu_smt_mask(cpu)) > 1)
-               static_branch_enable_cpuslocked(&sched_smt_present);
+       if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
+               static_branch_inc_cpuslocked(&sched_smt_present);
 #endif
        set_cpu_active(cpu, true);
 
@@ -5790,6 +5785,14 @@ int sched_cpu_deactivate(unsigned int cpu)
         */
        synchronize_rcu_mult(call_rcu, call_rcu_sched);
 
+#ifdef CONFIG_SCHED_SMT
+       /*
+        * When going down, decrement the number of cores with SMT present.
+        */
+       if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
+               static_branch_dec_cpuslocked(&sched_smt_present);
+#endif
+
        if (!sched_smp_initialized)
                return 0;