]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - kernel/sched/topology.c
sched/topology: Reference the Energy Model of CPUs when available
[linux.git] / kernel / sched / topology.c
index 7364e0b427b75ad36370fd1ab77276ea6e46e660..169d25cafab595602221b75427cd08263c7a8059 100644 (file)
@@ -201,6 +201,116 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
        return 1;
 }
 
+#ifdef CONFIG_ENERGY_MODEL
+static void free_pd(struct perf_domain *pd)
+{
+       struct perf_domain *tmp;
+
+       while (pd) {
+               tmp = pd->next;
+               kfree(pd);
+               pd = tmp;
+       }
+}
+
+static struct perf_domain *find_pd(struct perf_domain *pd, int cpu)
+{
+       while (pd) {
+               if (cpumask_test_cpu(cpu, perf_domain_span(pd)))
+                       return pd;
+               pd = pd->next;
+       }
+
+       return NULL;
+}
+
+static struct perf_domain *pd_init(int cpu)
+{
+       struct em_perf_domain *obj = em_cpu_get(cpu);
+       struct perf_domain *pd;
+
+       if (!obj) {
+               if (sched_debug())
+                       pr_info("%s: no EM found for CPU%d\n", __func__, cpu);
+               return NULL;
+       }
+
+       pd = kzalloc(sizeof(*pd), GFP_KERNEL);
+       if (!pd)
+               return NULL;
+       pd->em_pd = obj;
+
+       return pd;
+}
+
+static void perf_domain_debug(const struct cpumask *cpu_map,
+                                               struct perf_domain *pd)
+{
+       if (!sched_debug() || !pd)
+               return;
+
+       printk(KERN_DEBUG "root_domain %*pbl:", cpumask_pr_args(cpu_map));
+
+       while (pd) {
+               printk(KERN_CONT " pd%d:{ cpus=%*pbl nr_cstate=%d }",
+                               cpumask_first(perf_domain_span(pd)),
+                               cpumask_pr_args(perf_domain_span(pd)),
+                               em_pd_nr_cap_states(pd->em_pd));
+               pd = pd->next;
+       }
+
+       printk(KERN_CONT "\n");
+}
+
+static void destroy_perf_domain_rcu(struct rcu_head *rp)
+{
+       struct perf_domain *pd;
+
+       pd = container_of(rp, struct perf_domain, rcu);
+       free_pd(pd);
+}
+
+static void build_perf_domains(const struct cpumask *cpu_map)
+{
+       struct perf_domain *pd = NULL, *tmp;
+       int cpu = cpumask_first(cpu_map);
+       struct root_domain *rd = cpu_rq(cpu)->rd;
+       int i;
+
+       for_each_cpu(i, cpu_map) {
+               /* Skip already covered CPUs. */
+               if (find_pd(pd, i))
+                       continue;
+
+               /* Create the new pd and add it to the local list. */
+               tmp = pd_init(i);
+               if (!tmp)
+                       goto free;
+               tmp->next = pd;
+               pd = tmp;
+       }
+
+       perf_domain_debug(cpu_map, pd);
+
+       /* Attach the new list of performance domains to the root domain. */
+       tmp = rd->pd;
+       rcu_assign_pointer(rd->pd, pd);
+       if (tmp)
+               call_rcu(&tmp->rcu, destroy_perf_domain_rcu);
+
+       return;
+
+free:
+       free_pd(pd);
+       tmp = rd->pd;
+       rcu_assign_pointer(rd->pd, NULL);
+       if (tmp)
+               call_rcu(&tmp->rcu, destroy_perf_domain_rcu);
+}
+#else
+static void free_pd(struct perf_domain *pd) { }
+#endif /* CONFIG_ENERGY_MODEL */
+
 static void free_rootdomain(struct rcu_head *rcu)
 {
        struct root_domain *rd = container_of(rcu, struct root_domain, rcu);
@@ -211,6 +321,7 @@ static void free_rootdomain(struct rcu_head *rcu)
        free_cpumask_var(rd->rto_mask);
        free_cpumask_var(rd->online);
        free_cpumask_var(rd->span);
+       free_pd(rd->pd);
        kfree(rd);
 }
 
@@ -1959,8 +2070,8 @@ void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
        /* Destroy deleted domains: */
        for (i = 0; i < ndoms_cur; i++) {
                for (j = 0; j < n && !new_topology; j++) {
-                       if (cpumask_equal(doms_cur[i], doms_new[j])
-                           && dattrs_equal(dattr_cur, i, dattr_new, j))
+                       if (cpumask_equal(doms_cur[i], doms_new[j]) &&
+                           dattrs_equal(dattr_cur, i, dattr_new, j))
                                goto match1;
                }
                /* No match - a current sched domain not in new doms_new[] */
@@ -1980,8 +2091,8 @@ void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
        /* Build new domains: */
        for (i = 0; i < ndoms_new; i++) {
                for (j = 0; j < n && !new_topology; j++) {
-                       if (cpumask_equal(doms_new[i], doms_cur[j])
-                           && dattrs_equal(dattr_new, i, dattr_cur, j))
+                       if (cpumask_equal(doms_new[i], doms_cur[j]) &&
+                           dattrs_equal(dattr_new, i, dattr_cur, j))
                                goto match2;
                }
                /* No match - add a new doms_new */
@@ -1990,6 +2101,21 @@ void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
                ;
        }
 
+#ifdef CONFIG_ENERGY_MODEL
+       /* Build perf. domains: */
+       for (i = 0; i < ndoms_new; i++) {
+               for (j = 0; j < n; j++) {
+                       if (cpumask_equal(doms_new[i], doms_cur[j]) &&
+                           cpu_rq(cpumask_first(doms_cur[j]))->rd->pd)
+                               goto match3;
+               }
+               /* No match - add perf. domains for a new rd */
+               build_perf_domains(doms_new[i]);
+match3:
+               ;
+       }
+#endif
+
        /* Remember the new sched domains: */
        if (doms_cur != &fallback_doms)
                free_sched_domains(doms_cur, ndoms_cur);