]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - kernel/sched/topology.c
Merge branch 'x86-entry-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux.git] / kernel / sched / topology.c
index f751ce0b783e57eff53f4d5feb73be1859d05cd6..b5667a273bf67e0718371a783b259ffb4c91cbc7 100644 (file)
@@ -1284,6 +1284,7 @@ static int                        sched_domains_curr_level;
 int                            sched_max_numa_distance;
 static int                     *sched_domains_numa_distance;
 static struct cpumask          ***sched_domains_numa_masks;
+int __read_mostly              node_reclaim_distance = RECLAIM_DISTANCE;
 #endif
 
 /*
@@ -1402,7 +1403,7 @@ sd_init(struct sched_domain_topology_level *tl,
 
                sd->flags &= ~SD_PREFER_SIBLING;
                sd->flags |= SD_SERIALIZE;
-               if (sched_domains_numa_distance[tl->numa_level] > RECLAIM_DISTANCE) {
+               if (sched_domains_numa_distance[tl->numa_level] > node_reclaim_distance) {
                        sd->flags &= ~(SD_BALANCE_EXEC |
                                       SD_BALANCE_FORK |
                                       SD_WAKE_AFFINE);
@@ -1724,6 +1725,26 @@ void sched_domains_numa_masks_clear(unsigned int cpu)
        }
 }
 
+/*
+ * sched_numa_find_closest() - given the NUMA topology, find the cpu
+ *                             closest to @cpu from @cpumask.
+ * cpumask: cpumask to find a cpu from
+ * cpu: cpu to be close to
+ *
+ * returns: cpu, or nr_cpu_ids when nothing found.
+ */
+int sched_numa_find_closest(const struct cpumask *cpus, int cpu)
+{
+       int i, j = cpu_to_node(cpu);
+
+       for (i = 0; i < sched_domains_numa_levels; i++) {
+               cpu = cpumask_any_and(cpus, sched_domains_numa_masks[i][j]);
+               if (cpu < nr_cpu_ids)
+                       return cpu;
+       }
+       return nr_cpu_ids;
+}
+
 #endif /* CONFIG_NUMA */
 
 static int __sdt_alloc(const struct cpumask *cpu_map)
@@ -2149,16 +2170,16 @@ static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
  * ndoms_new == 0 is a special case for destroying existing domains,
  * and it will not create the default domain.
  *
- * Call with hotplug lock held
+ * Call with hotplug lock and sched_domains_mutex held
  */
-void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
-                            struct sched_domain_attr *dattr_new)
+void partition_sched_domains_locked(int ndoms_new, cpumask_var_t doms_new[],
+                                   struct sched_domain_attr *dattr_new)
 {
        bool __maybe_unused has_eas = false;
        int i, j, n;
        int new_topology;
 
-       mutex_lock(&sched_domains_mutex);
+       lockdep_assert_held(&sched_domains_mutex);
 
        /* Always unregister in case we don't destroy any domains: */
        unregister_sched_domain_sysctl();
@@ -2183,8 +2204,19 @@ void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
        for (i = 0; i < ndoms_cur; i++) {
                for (j = 0; j < n && !new_topology; j++) {
                        if (cpumask_equal(doms_cur[i], doms_new[j]) &&
-                           dattrs_equal(dattr_cur, i, dattr_new, j))
+                           dattrs_equal(dattr_cur, i, dattr_new, j)) {
+                               struct root_domain *rd;
+
+                               /*
+                                * This domain won't be destroyed and as such
+                                * its dl_bw->total_bw needs to be cleared.  It
+                                * will be recomputed in function
+                                * update_tasks_root_domain().
+                                */
+                               rd = cpu_rq(cpumask_any(doms_cur[i]))->rd;
+                               dl_clear_root_domain(rd);
                                goto match1;
+                       }
                }
                /* No match - a current sched domain not in new doms_new[] */
                detach_destroy_domains(doms_cur[i]);
@@ -2241,6 +2273,15 @@ void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
        ndoms_cur = ndoms_new;
 
        register_sched_domain_sysctl();
+}
 
+/*
+ * Call with hotplug lock held
+ */
+void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
+                            struct sched_domain_attr *dattr_new)
+{
+       mutex_lock(&sched_domains_mutex);
+       partition_sched_domains_locked(ndoms_new, doms_new, dattr_new);
        mutex_unlock(&sched_domains_mutex);
 }