/* Protected by sched_domains_mutex: */
cpumask_var_t sched_domains_tmpmask;
+cpumask_var_t sched_domains_tmpmask2;
#ifdef CONFIG_SCHED_DEBUG
* Only CPUs that can arrive at this group should be considered to continue
* balancing.
*/
-static void build_group_mask(struct sched_domain *sd, struct sched_group *sg)
+static void
+build_group_mask(struct sched_domain *sd, struct sched_group *sg, struct cpumask *mask)
{
const struct cpumask *sg_span = sched_group_cpus(sg);
struct sd_data *sdd = sd->private;
struct sched_domain *sibling;
int i;
+ cpumask_clear(mask);
+
for_each_cpu(i, sg_span) {
sibling = *per_cpu_ptr(sdd->sd, i);
if (!cpumask_equal(sg_span, sched_domain_span(sibling->child)))
continue;
- cpumask_set_cpu(i, sched_group_mask(sg));
+ cpumask_set_cpu(i, mask);
}
/* We must not have empty masks here */
- WARN_ON_ONCE(cpumask_empty(sched_group_mask(sg)));
+ WARN_ON_ONCE(cpumask_empty(mask));
}
/*
}
static void init_overlap_sched_group(struct sched_domain *sd,
- struct sched_group *sg, int cpu)
+ struct sched_group *sg)
{
+ struct cpumask *mask = sched_domains_tmpmask2;
struct sd_data *sdd = sd->private;
struct cpumask *sg_span;
+ int cpu;
+
+ build_group_mask(sd, sg, mask);
+ cpu = cpumask_first_and(sched_group_cpus(sg), mask);
sg->sgc = *per_cpu_ptr(sdd->sgc, cpu);
if (atomic_inc_return(&sg->sgc->ref) == 1)
- build_group_mask(sd, sg);
+ cpumask_copy(sched_group_mask(sg), mask);
/*
* Initialize sgc->capacity such that even if we mess up the
sg_span = sched_group_cpus(sg);
cpumask_or(covered, covered, sg_span);
- init_overlap_sched_group(sd, sg, i);
+ init_overlap_sched_group(sd, sg);
if (!first)
first = sg;
int err;
zalloc_cpumask_var(&sched_domains_tmpmask, GFP_KERNEL);
+ zalloc_cpumask_var(&sched_domains_tmpmask2, GFP_KERNEL);
zalloc_cpumask_var(&fallback_doms, GFP_KERNEL);
arch_update_cpu_topology();