]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
Merge branch 'sched/urgent' into sched/core, to pick up fixes
authorIngo Molnar <mingo@kernel.org>
Tue, 2 Oct 2018 07:43:39 +0000 (09:43 +0200)
committerIngo Molnar <mingo@kernel.org>
Tue, 2 Oct 2018 07:43:39 +0000 (09:43 +0200)
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/arm/include/asm/topology.h
arch/arm64/include/asm/topology.h
drivers/base/arch_topology.c
include/linux/arch_topology.h
include/linux/sched/topology.h
include/trace/events/sched.h
kernel/sched/fair.c
kernel/sched/sched.h
kernel/sched/topology.c

index 5d88d2f22b2cc5d62ccc883cded7693605134084..2a786f54d8b8b26af5b17f00c3373e09da3e30f8 100644 (file)
@@ -33,6 +33,9 @@ const struct cpumask *cpu_coregroup_mask(int cpu);
 /* Replace task scheduler's default cpu-invariant accounting */
 #define arch_scale_cpu_capacity topology_get_cpu_scale
 
+/* Enable topology flag updates */
+#define arch_update_cpu_topology topology_update_cpu_topology
+
 #else
 
 static inline void init_cpu_topology(void) { }
index 49a0fee4f89b58ba232508f9f04e21b97fd5455f..0524f243864931087c1227fc3b8cf970d471ce5b 100644 (file)
@@ -45,6 +45,9 @@ int pcibus_to_node(struct pci_bus *bus);
 /* Replace task scheduler's default cpu-invariant accounting */
 #define arch_scale_cpu_capacity topology_get_cpu_scale
 
+/* Enable topology flag updates */
+#define arch_update_cpu_topology topology_update_cpu_topology
+
 #include <asm-generic/topology.h>
 
 #endif /* _ASM_ARM_TOPOLOGY_H */
index e7cb0c6ade81ec7171a0a55fdb0cf0f5ab4cd199..edfcf8d982e4186a80a3bf76ab626976bb86d200 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/slab.h>
 #include <linux/string.h>
 #include <linux/sched/topology.h>
+#include <linux/cpuset.h>
 
 DEFINE_PER_CPU(unsigned long, freq_scale) = SCHED_CAPACITY_SCALE;
 
@@ -47,6 +48,9 @@ static ssize_t cpu_capacity_show(struct device *dev,
        return sprintf(buf, "%lu\n", topology_get_cpu_scale(NULL, cpu->dev.id));
 }
 
+static void update_topology_flags_workfn(struct work_struct *work);
+static DECLARE_WORK(update_topology_flags_work, update_topology_flags_workfn);
+
 static ssize_t cpu_capacity_store(struct device *dev,
                                  struct device_attribute *attr,
                                  const char *buf,
@@ -72,6 +76,8 @@ static ssize_t cpu_capacity_store(struct device *dev,
                topology_set_cpu_scale(i, new_capacity);
        mutex_unlock(&cpu_scale_mutex);
 
+       schedule_work(&update_topology_flags_work);
+
        return count;
 }
 
@@ -96,6 +102,25 @@ static int register_cpu_capacity_sysctl(void)
 }
 subsys_initcall(register_cpu_capacity_sysctl);
 
+static int update_topology;
+
+int topology_update_cpu_topology(void)
+{
+       return update_topology;
+}
+
+/*
+ * Updating the sched_domains can't be done directly from cpufreq callbacks
+ * due to locking, so queue the work for later.
+ */
+static void update_topology_flags_workfn(struct work_struct *work)
+{
+       update_topology = 1;
+       rebuild_sched_domains();
+       pr_debug("sched_domain hierarchy rebuilt, flags updated\n");
+       update_topology = 0;
+}
+
 static u32 capacity_scale;
 static u32 *raw_capacity;
 
@@ -201,6 +226,7 @@ init_cpu_capacity_callback(struct notifier_block *nb,
 
        if (cpumask_empty(cpus_to_visit)) {
                topology_normalize_cpu_scale();
+               schedule_work(&update_topology_flags_work);
                free_raw_capacity();
                pr_debug("cpu_capacity: parsing done\n");
                schedule_work(&parsing_done_work);
index 2b709416de051989c56b916f1c8e6a3b02b4b3c7..d9bdc1a7f4e7aa9874e60f3dd29ef3175f11ed6c 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/percpu.h>
 
 void topology_normalize_cpu_scale(void);
+int topology_update_cpu_topology(void);
 
 struct device_node;
 bool topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu);
index 26347741ba502af17f909debf4af756d7154d327..6b9976180c1ebab037c31032999fbe59a991893b 100644 (file)
 #define SD_BALANCE_FORK                0x0008  /* Balance on fork, clone */
 #define SD_BALANCE_WAKE                0x0010  /* Balance on wakeup */
 #define SD_WAKE_AFFINE         0x0020  /* Wake task to waking CPU */
-#define SD_ASYM_CPUCAPACITY    0x0040  /* Groups have different max cpu capacities */
-#define SD_SHARE_CPUCAPACITY   0x0080  /* Domain members share cpu capacity */
+#define SD_ASYM_CPUCAPACITY    0x0040  /* Domain members have different CPU capacities */
+#define SD_SHARE_CPUCAPACITY   0x0080  /* Domain members share CPU capacity */
 #define SD_SHARE_POWERDOMAIN   0x0100  /* Domain members share power domain */
-#define SD_SHARE_PKG_RESOURCES 0x0200  /* Domain members share cpu pkg resources */
+#define SD_SHARE_PKG_RESOURCES 0x0200  /* Domain members share CPU pkg resources */
 #define SD_SERIALIZE           0x0400  /* Only a single load balancing instance */
 #define SD_ASYM_PACKING                0x0800  /* Place busy groups earlier in the domain */
 #define SD_PREFER_SIBLING      0x1000  /* Prefer to place tasks in a sibling domain */
index 0be866c91f62d055f5a6a3ab25a263a11f111916..f07b270d4fc4febeb1d3c4d0413ba4bf14e05daf 100644 (file)
@@ -159,9 +159,14 @@ TRACE_EVENT(sched_switch,
 
                (__entry->prev_state & (TASK_REPORT_MAX - 1)) ?
                  __print_flags(__entry->prev_state & (TASK_REPORT_MAX - 1), "|",
-                               { 0x01, "S" }, { 0x02, "D" }, { 0x04, "T" },
-                               { 0x08, "t" }, { 0x10, "X" }, { 0x20, "Z" },
-                               { 0x40, "P" }, { 0x80, "I" }) :
+                               { TASK_INTERRUPTIBLE, "S" },
+                               { TASK_UNINTERRUPTIBLE, "D" },
+                               { __TASK_STOPPED, "T" },
+                               { __TASK_TRACED, "t" },
+                               { EXIT_DEAD, "X" },
+                               { EXIT_ZOMBIE, "Z" },
+                               { TASK_PARKED, "P" },
+                               { TASK_DEAD, "I" }) :
                  "R",
 
                __entry->prev_state & TASK_REPORT_MAX ? "+" : "",
index 25c7c7e09cbdd7af90c35a9c4887fa60208dbfdb..1d92ed2eca8b1deb587652c206d9345810cd4e23 100644 (file)
@@ -693,6 +693,7 @@ static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
 
 static int select_idle_sibling(struct task_struct *p, int prev_cpu, int cpu);
 static unsigned long task_h_load(struct task_struct *p);
+static unsigned long capacity_of(int cpu);
 
 /* Give new sched_entity start runnable values to heavy its load in infant time */
 void init_entity_runnable_average(struct sched_entity *se)
@@ -1446,7 +1447,6 @@ bool should_numa_migrate_memory(struct task_struct *p, struct page * page,
 static unsigned long weighted_cpuload(struct rq *rq);
 static unsigned long source_load(int cpu, int type);
 static unsigned long target_load(int cpu, int type);
-static unsigned long capacity_of(int cpu);
 
 /* Cached statistics for all CPUs within a node */
 struct numa_stats {
@@ -1454,8 +1454,6 @@ struct numa_stats {
 
        /* Total compute capacity of CPUs on a node */
        unsigned long compute_capacity;
-
-       unsigned int nr_running;
 };
 
 /*
@@ -1463,36 +1461,16 @@ struct numa_stats {
  */
 static void update_numa_stats(struct numa_stats *ns, int nid)
 {
-       int smt, cpu, cpus = 0;
-       unsigned long capacity;
+       int cpu;
 
        memset(ns, 0, sizeof(*ns));
        for_each_cpu(cpu, cpumask_of_node(nid)) {
                struct rq *rq = cpu_rq(cpu);
 
-               ns->nr_running += rq->nr_running;
                ns->load += weighted_cpuload(rq);
                ns->compute_capacity += capacity_of(cpu);
-
-               cpus++;
        }
 
-       /*
-        * If we raced with hotplug and there are no CPUs left in our mask
-        * the @ns structure is NULL'ed and task_numa_compare() will
-        * not find this node attractive.
-        *
-        * We'll detect a huge imbalance and bail there.
-        */
-       if (!cpus)
-               return;
-
-       /* smt := ceil(cpus / capacity), assumes: 1 < smt_power < 2 */
-       smt = DIV_ROUND_UP(SCHED_CAPACITY_SCALE * cpus, ns->compute_capacity);
-       capacity = cpus / smt; /* cores */
-
-       capacity = min_t(unsigned, capacity,
-               DIV_ROUND_CLOSEST(ns->compute_capacity, SCHED_CAPACITY_SCALE));
 }
 
 struct task_numa_env {
@@ -3713,6 +3691,29 @@ util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep)
        WRITE_ONCE(p->se.avg.util_est, ue);
 }
 
+static inline int task_fits_capacity(struct task_struct *p, long capacity)
+{
+       return capacity * 1024 > task_util_est(p) * capacity_margin;
+}
+
+static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
+{
+       if (!static_branch_unlikely(&sched_asym_cpucapacity))
+               return;
+
+       if (!p) {
+               rq->misfit_task_load = 0;
+               return;
+       }
+
+       if (task_fits_capacity(p, capacity_of(cpu_of(rq)))) {
+               rq->misfit_task_load = 0;
+               return;
+       }
+
+       rq->misfit_task_load = task_h_load(p);
+}
+
 #else /* CONFIG_SMP */
 
 #define UPDATE_TG      0x0
@@ -3742,6 +3743,7 @@ util_est_enqueue(struct cfs_rq *cfs_rq, struct task_struct *p) {}
 static inline void
 util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p,
                 bool task_sleep) {}
+static inline void update_misfit_status(struct task_struct *p, struct rq *rq) {}
 
 #endif /* CONFIG_SMP */
 
@@ -6254,6 +6256,9 @@ static int wake_cap(struct task_struct *p, int cpu, int prev_cpu)
 {
        long min_cap, max_cap;
 
+       if (!static_branch_unlikely(&sched_asym_cpucapacity))
+               return 0;
+
        min_cap = min(capacity_orig_of(prev_cpu), capacity_orig_of(cpu));
        max_cap = cpu_rq(cpu)->rd->max_cpu_capacity;
 
@@ -6264,7 +6269,7 @@ static int wake_cap(struct task_struct *p, int cpu, int prev_cpu)
        /* Bring task utilization in sync with prev_cpu */
        sync_entity_load_avg(&p->se);
 
-       return min_cap * 1024 < task_util(p) * capacity_margin;
+       return !task_fits_capacity(p, min_cap);
 }
 
 /*
@@ -6683,9 +6688,12 @@ done: __maybe_unused;
        if (hrtick_enabled(rq))
                hrtick_start_fair(rq, p);
 
+       update_misfit_status(p, rq);
+
        return p;
 
 idle:
+       update_misfit_status(NULL, rq);
        new_tasks = idle_balance(rq, rf);
 
        /*
@@ -6891,6 +6899,13 @@ static unsigned long __read_mostly max_load_balance_interval = HZ/10;
 
 enum fbq_type { regular, remote, all };
 
+enum group_type {
+       group_other = 0,
+       group_misfit_task,
+       group_imbalanced,
+       group_overloaded,
+};
+
 #define LBF_ALL_PINNED 0x01
 #define LBF_NEED_BREAK 0x02
 #define LBF_DST_PINNED  0x04
@@ -6921,6 +6936,7 @@ struct lb_env {
        unsigned int            loop_max;
 
        enum fbq_type           fbq_type;
+       enum group_type         src_grp_type;
        struct list_head        tasks;
 };
 
@@ -7464,12 +7480,6 @@ static unsigned long task_h_load(struct task_struct *p)
 
 /********** Helpers for find_busiest_group ************************/
 
-enum group_type {
-       group_other = 0,
-       group_imbalanced,
-       group_overloaded,
-};
-
 /*
  * sg_lb_stats - stats of a sched_group required for load_balancing
  */
@@ -7485,6 +7495,7 @@ struct sg_lb_stats {
        unsigned int group_weight;
        enum group_type group_type;
        int group_no_capacity;
+       unsigned long group_misfit_task_load; /* A CPU has a task too big for its capacity */
 #ifdef CONFIG_NUMA_BALANCING
        unsigned int nr_numa_running;
        unsigned int nr_preferred_running;
@@ -7593,13 +7604,14 @@ static void update_cpu_capacity(struct sched_domain *sd, int cpu)
        cpu_rq(cpu)->cpu_capacity = capacity;
        sdg->sgc->capacity = capacity;
        sdg->sgc->min_capacity = capacity;
+       sdg->sgc->max_capacity = capacity;
 }
 
 void update_group_capacity(struct sched_domain *sd, int cpu)
 {
        struct sched_domain *child = sd->child;
        struct sched_group *group, *sdg = sd->groups;
-       unsigned long capacity, min_capacity;
+       unsigned long capacity, min_capacity, max_capacity;
        unsigned long interval;
 
        interval = msecs_to_jiffies(sd->balance_interval);
@@ -7613,6 +7625,7 @@ void update_group_capacity(struct sched_domain *sd, int cpu)
 
        capacity = 0;
        min_capacity = ULONG_MAX;
+       max_capacity = 0;
 
        if (child->flags & SD_OVERLAP) {
                /*
@@ -7643,6 +7656,7 @@ void update_group_capacity(struct sched_domain *sd, int cpu)
                        }
 
                        min_capacity = min(capacity, min_capacity);
+                       max_capacity = max(capacity, max_capacity);
                }
        } else  {
                /*
@@ -7656,12 +7670,14 @@ void update_group_capacity(struct sched_domain *sd, int cpu)
 
                        capacity += sgc->capacity;
                        min_capacity = min(sgc->min_capacity, min_capacity);
+                       max_capacity = max(sgc->max_capacity, max_capacity);
                        group = group->next;
                } while (group != child->groups);
        }
 
        sdg->sgc->capacity = capacity;
        sdg->sgc->min_capacity = min_capacity;
+       sdg->sgc->max_capacity = max_capacity;
 }
 
 /*
@@ -7757,16 +7773,27 @@ group_is_overloaded(struct lb_env *env, struct sg_lb_stats *sgs)
 }
 
 /*
- * group_smaller_cpu_capacity: Returns true if sched_group sg has smaller
+ * group_smaller_min_cpu_capacity: Returns true if sched_group sg has smaller
  * per-CPU capacity than sched_group ref.
  */
 static inline bool
-group_smaller_cpu_capacity(struct sched_group *sg, struct sched_group *ref)
+group_smaller_min_cpu_capacity(struct sched_group *sg, struct sched_group *ref)
 {
        return sg->sgc->min_capacity * capacity_margin <
                                                ref->sgc->min_capacity * 1024;
 }
 
+/*
+ * group_smaller_max_cpu_capacity: Returns true if sched_group sg has smaller
+ * per-CPU capacity_orig than sched_group ref.
+ */
+static inline bool
+group_smaller_max_cpu_capacity(struct sched_group *sg, struct sched_group *ref)
+{
+       return sg->sgc->max_capacity * capacity_margin <
+                                               ref->sgc->max_capacity * 1024;
+}
+
 static inline enum
 group_type group_classify(struct sched_group *group,
                          struct sg_lb_stats *sgs)
@@ -7777,6 +7804,9 @@ group_type group_classify(struct sched_group *group,
        if (sg_imbalanced(group))
                return group_imbalanced;
 
+       if (sgs->group_misfit_task_load)
+               return group_misfit_task;
+
        return group_other;
 }
 
@@ -7809,7 +7839,7 @@ static bool update_nohz_stats(struct rq *rq, bool force)
  * @load_idx: Load index of sched_domain of this_cpu for load calc.
  * @local_group: Does group contain this_cpu.
  * @sgs: variable to hold the statistics for this group.
- * @overload: Indicate more than one runnable task for any CPU.
+ * @overload: Indicate pullable load (e.g. >1 runnable task).
  */
 static inline void update_sg_lb_stats(struct lb_env *env,
                        struct sched_group *group, int load_idx,
@@ -7851,6 +7881,12 @@ static inline void update_sg_lb_stats(struct lb_env *env,
                 */
                if (!nr_running && idle_cpu(i))
                        sgs->idle_cpus++;
+
+               if (env->sd->flags & SD_ASYM_CPUCAPACITY &&
+                   sgs->group_misfit_task_load < rq->misfit_task_load) {
+                       sgs->group_misfit_task_load = rq->misfit_task_load;
+                       *overload = 1;
+               }
        }
 
        /* Adjust by relative CPU capacity of the group */
@@ -7886,6 +7922,17 @@ static bool update_sd_pick_busiest(struct lb_env *env,
 {
        struct sg_lb_stats *busiest = &sds->busiest_stat;
 
+       /*
+        * Don't try to pull misfit tasks we can't help.
+        * We can use max_capacity here as reduction in capacity on some
+        * CPUs in the group should either be possible to resolve
+        * internally or be covered by avg_load imbalance (eventually).
+        */
+       if (sgs->group_type == group_misfit_task &&
+           (!group_smaller_max_cpu_capacity(sg, sds->local) ||
+            !group_has_capacity(env, &sds->local_stat)))
+               return false;
+
        if (sgs->group_type > busiest->group_type)
                return true;
 
@@ -7905,7 +7952,14 @@ static bool update_sd_pick_busiest(struct lb_env *env,
         * power/energy consequences are not considered.
         */
        if (sgs->sum_nr_running <= sgs->group_weight &&
-           group_smaller_cpu_capacity(sds->local, sg))
+           group_smaller_min_cpu_capacity(sds->local, sg))
+               return false;
+
+       /*
+        * If we have more than one misfit sg go with the biggest misfit.
+        */
+       if (sgs->group_type == group_misfit_task &&
+           sgs->group_misfit_task_load < busiest->group_misfit_task_load)
                return false;
 
 asym_packing:
@@ -7976,11 +8030,9 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
        struct sched_group *sg = env->sd->groups;
        struct sg_lb_stats *local = &sds->local_stat;
        struct sg_lb_stats tmp_sgs;
-       int load_idx, prefer_sibling = 0;
+       int load_idx;
        bool overload = false;
-
-       if (child && child->flags & SD_PREFER_SIBLING)
-               prefer_sibling = 1;
+       bool prefer_sibling = child && child->flags & SD_PREFER_SIBLING;
 
 #ifdef CONFIG_NO_HZ_COMMON
        if (env->idle == CPU_NEWLY_IDLE && READ_ONCE(nohz.has_blocked))
@@ -8054,8 +8106,8 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
 
        if (!env->sd->parent) {
                /* update overload indicator if we are at root domain */
-               if (env->dst_rq->rd->overload != overload)
-                       env->dst_rq->rd->overload = overload;
+               if (READ_ONCE(env->dst_rq->rd->overload) != overload)
+                       WRITE_ONCE(env->dst_rq->rd->overload, overload);
        }
 }
 
@@ -8205,8 +8257,9 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
         * factors in sg capacity and sgs with smaller group_type are
         * skipped when updating the busiest sg:
         */
-       if (busiest->avg_load <= sds->avg_load ||
-           local->avg_load >= sds->avg_load) {
+       if (busiest->group_type != group_misfit_task &&
+           (busiest->avg_load <= sds->avg_load ||
+            local->avg_load >= sds->avg_load)) {
                env->imbalance = 0;
                return fix_small_imbalance(env, sds);
        }
@@ -8240,6 +8293,12 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
                (sds->avg_load - local->avg_load) * local->group_capacity
        ) / SCHED_CAPACITY_SCALE;
 
+       /* Boost imbalance to allow misfit task to be balanced. */
+       if (busiest->group_type == group_misfit_task) {
+               env->imbalance = max_t(long, env->imbalance,
+                                      busiest->group_misfit_task_load);
+       }
+
        /*
         * if *imbalance is less than the average load per runnable task
         * there is no guarantee that any tasks will be moved so we'll have
@@ -8306,6 +8365,10 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
            busiest->group_no_capacity)
                goto force_balance;
 
+       /* Misfit tasks should be dealt with regardless of the avg load */
+       if (busiest->group_type == group_misfit_task)
+               goto force_balance;
+
        /*
         * If the local group is busier than the selected busiest group
         * don't try and pull any tasks.
@@ -8343,6 +8406,7 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
 
 force_balance:
        /* Looks like there is an imbalance. Compute it */
+       env->src_grp_type = busiest->group_type;
        calculate_imbalance(env, &sds);
        return env->imbalance ? sds.busiest : NULL;
 
@@ -8390,8 +8454,32 @@ static struct rq *find_busiest_queue(struct lb_env *env,
                if (rt > env->fbq_type)
                        continue;
 
+               /*
+                * For ASYM_CPUCAPACITY domains with misfit tasks we simply
+                * seek the "biggest" misfit task.
+                */
+               if (env->src_grp_type == group_misfit_task) {
+                       if (rq->misfit_task_load > busiest_load) {
+                               busiest_load = rq->misfit_task_load;
+                               busiest = rq;
+                       }
+
+                       continue;
+               }
+
                capacity = capacity_of(i);
 
+               /*
+                * For ASYM_CPUCAPACITY domains, don't pick a CPU that could
+                * eventually lead to active_balancing high->low capacity.
+                * Higher per-CPU capacity is considered better than balancing
+                * average load.
+                */
+               if (env->sd->flags & SD_ASYM_CPUCAPACITY &&
+                   capacity_of(env->dst_cpu) < capacity &&
+                   rq->nr_running == 1)
+                       continue;
+
                wl = weighted_cpuload(rq);
 
                /*
@@ -8459,6 +8547,9 @@ static int need_active_balance(struct lb_env *env)
                        return 1;
        }
 
+       if (env->src_grp_type == group_misfit_task)
+               return 1;
+
        return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
 }
 
@@ -9101,7 +9192,7 @@ static void nohz_balancer_kick(struct rq *rq)
        if (time_before(now, nohz.next_balance))
                goto out;
 
-       if (rq->nr_running >= 2) {
+       if (rq->nr_running >= 2 || rq->misfit_task_load) {
                flags = NOHZ_KICK_MASK;
                goto out;
        }
@@ -9470,7 +9561,7 @@ static int idle_balance(struct rq *this_rq, struct rq_flags *rf)
        rq_unpin_lock(this_rq, rf);
 
        if (this_rq->avg_idle < sysctl_sched_migration_cost ||
-           !this_rq->rd->overload) {
+           !READ_ONCE(this_rq->rd->overload)) {
 
                rcu_read_lock();
                sd = rcu_dereference_check_sched_domain(this_rq->sd);
@@ -9632,6 +9723,8 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
 
        if (static_branch_unlikely(&sched_numa_balancing))
                task_tick_numa(rq, curr);
+
+       update_misfit_status(curr, rq);
 }
 
 /*
index 455fa330de0462db774f827a726478aa66abad3b..632804fa0b125e83fd62e9e4056386c052f697ac 100644 (file)
@@ -715,8 +715,12 @@ struct root_domain {
        cpumask_var_t           span;
        cpumask_var_t           online;
 
-       /* Indicate more than one runnable task for any CPU */
-       bool                    overload;
+       /*
+        * Indicate pullable load on at least one CPU, e.g:
+        * - More than one runnable task
+        * - Running task is misfit
+        */
+       int                     overload;
 
        /*
         * The bit corresponding to a CPU gets set here if such CPU has more
@@ -843,6 +847,8 @@ struct rq {
 
        unsigned char           idle_balance;
 
+       unsigned long           misfit_task_load;
+
        /* For active balancing */
        int                     active_balance;
        int                     push_cpu;
@@ -1186,6 +1192,7 @@ DECLARE_PER_CPU(int, sd_llc_id);
 DECLARE_PER_CPU(struct sched_domain_shared *, sd_llc_shared);
 DECLARE_PER_CPU(struct sched_domain *, sd_numa);
 DECLARE_PER_CPU(struct sched_domain *, sd_asym);
+extern struct static_key_false sched_asym_cpucapacity;
 
 struct sched_group_capacity {
        atomic_t                ref;
@@ -1195,6 +1202,7 @@ struct sched_group_capacity {
         */
        unsigned long           capacity;
        unsigned long           min_capacity;           /* Min per-CPU capacity in group */
+       unsigned long           max_capacity;           /* Max per-CPU capacity in group */
        unsigned long           next_update;
        int                     imbalance;              /* XXX unrelated to capacity but shared group state */
 
@@ -1394,7 +1402,7 @@ static const_debug __maybe_unused unsigned int sysctl_sched_features =
        0;
 #undef SCHED_FEAT
 
-#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
+#define sched_feat(x) !!(sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
 
 #endif /* SCHED_DEBUG && HAVE_JUMP_LABEL */
 
@@ -1694,8 +1702,8 @@ static inline void add_nr_running(struct rq *rq, unsigned count)
 
        if (prev_nr < 2 && rq->nr_running >= 2) {
 #ifdef CONFIG_SMP
-               if (!rq->rd->overload)
-                       rq->rd->overload = true;
+               if (!READ_ONCE(rq->rd->overload))
+                       WRITE_ONCE(rq->rd->overload, 1);
 #endif
        }
 
index 505a41c42b96107247e8b4a1f7259d4d883e4ec2..9d74371e4aad86a436c549045be08c2a1ed30853 100644 (file)
@@ -7,8 +7,8 @@
 DEFINE_MUTEX(sched_domains_mutex);
 
 /* Protected by sched_domains_mutex: */
-cpumask_var_t sched_domains_tmpmask;
-cpumask_var_t sched_domains_tmpmask2;
+static cpumask_var_t sched_domains_tmpmask;
+static cpumask_var_t sched_domains_tmpmask2;
 
 #ifdef CONFIG_SCHED_DEBUG
 
@@ -398,6 +398,7 @@ DEFINE_PER_CPU(int, sd_llc_id);
 DEFINE_PER_CPU(struct sched_domain_shared *, sd_llc_shared);
 DEFINE_PER_CPU(struct sched_domain *, sd_numa);
 DEFINE_PER_CPU(struct sched_domain *, sd_asym);
+DEFINE_STATIC_KEY_FALSE(sched_asym_cpucapacity);
 
 static void update_top_cache_domain(int cpu)
 {
@@ -692,6 +693,7 @@ static void init_overlap_sched_group(struct sched_domain *sd,
        sg_span = sched_group_span(sg);
        sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span);
        sg->sgc->min_capacity = SCHED_CAPACITY_SCALE;
+       sg->sgc->max_capacity = SCHED_CAPACITY_SCALE;
 }
 
 static int
@@ -851,6 +853,7 @@ static struct sched_group *get_group(int cpu, struct sd_data *sdd)
 
        sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sched_group_span(sg));
        sg->sgc->min_capacity = SCHED_CAPACITY_SCALE;
+       sg->sgc->max_capacity = SCHED_CAPACITY_SCALE;
 
        return sg;
 }
@@ -1061,7 +1064,6 @@ static struct cpumask             ***sched_domains_numa_masks;
  *   SD_SHARE_PKG_RESOURCES - describes shared caches
  *   SD_NUMA                - describes NUMA topologies
  *   SD_SHARE_POWERDOMAIN   - describes shared power domain
- *   SD_ASYM_CPUCAPACITY    - describes mixed capacity topologies
  *
  * Odd one out, which beside describing the topology has a quirk also
  * prescribes the desired behaviour that goes along with it:
@@ -1073,13 +1075,12 @@ static struct cpumask           ***sched_domains_numa_masks;
         SD_SHARE_PKG_RESOURCES |       \
         SD_NUMA                |       \
         SD_ASYM_PACKING        |       \
-        SD_ASYM_CPUCAPACITY    |       \
         SD_SHARE_POWERDOMAIN)
 
 static struct sched_domain *
 sd_init(struct sched_domain_topology_level *tl,
        const struct cpumask *cpu_map,
-       struct sched_domain *child, int cpu)
+       struct sched_domain *child, int dflags, int cpu)
 {
        struct sd_data *sdd = &tl->data;
        struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
@@ -1100,6 +1101,9 @@ sd_init(struct sched_domain_topology_level *tl,
                        "wrong sd_flags in topology description\n"))
                sd_flags &= ~TOPOLOGY_SD_FLAGS;
 
+       /* Apply detected topology flags */
+       sd_flags |= dflags;
+
        *sd = (struct sched_domain){
                .min_interval           = sd_weight,
                .max_interval           = 2*sd_weight,
@@ -1122,7 +1126,7 @@ sd_init(struct sched_domain_topology_level *tl,
                                        | 0*SD_SHARE_CPUCAPACITY
                                        | 0*SD_SHARE_PKG_RESOURCES
                                        | 0*SD_SERIALIZE
-                                       | 0*SD_PREFER_SIBLING
+                                       | 1*SD_PREFER_SIBLING
                                        | 0*SD_NUMA
                                        | sd_flags
                                        ,
@@ -1148,17 +1152,21 @@ sd_init(struct sched_domain_topology_level *tl,
        if (sd->flags & SD_ASYM_CPUCAPACITY) {
                struct sched_domain *t = sd;
 
+               /*
+                * Don't attempt to spread across CPUs of different capacities.
+                */
+               if (sd->child)
+                       sd->child->flags &= ~SD_PREFER_SIBLING;
+
                for_each_lower_domain(t)
                        t->flags |= SD_BALANCE_WAKE;
        }
 
        if (sd->flags & SD_SHARE_CPUCAPACITY) {
-               sd->flags |= SD_PREFER_SIBLING;
                sd->imbalance_pct = 110;
                sd->smt_gain = 1178; /* ~15% */
 
        } else if (sd->flags & SD_SHARE_PKG_RESOURCES) {
-               sd->flags |= SD_PREFER_SIBLING;
                sd->imbalance_pct = 117;
                sd->cache_nice_tries = 1;
                sd->busy_idx = 2;
@@ -1169,6 +1177,7 @@ sd_init(struct sched_domain_topology_level *tl,
                sd->busy_idx = 3;
                sd->idle_idx = 2;
 
+               sd->flags &= ~SD_PREFER_SIBLING;
                sd->flags |= SD_SERIALIZE;
                if (sched_domains_numa_distance[tl->numa_level] > RECLAIM_DISTANCE) {
                        sd->flags &= ~(SD_BALANCE_EXEC |
@@ -1178,7 +1187,6 @@ sd_init(struct sched_domain_topology_level *tl,
 
 #endif
        } else {
-               sd->flags |= SD_PREFER_SIBLING;
                sd->cache_nice_tries = 1;
                sd->busy_idx = 2;
                sd->idle_idx = 1;
@@ -1604,9 +1612,9 @@ static void __sdt_free(const struct cpumask *cpu_map)
 
 static struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
                const struct cpumask *cpu_map, struct sched_domain_attr *attr,
-               struct sched_domain *child, int cpu)
+               struct sched_domain *child, int dflags, int cpu)
 {
-       struct sched_domain *sd = sd_init(tl, cpu_map, child, cpu);
+       struct sched_domain *sd = sd_init(tl, cpu_map, child, dflags, cpu);
 
        if (child) {
                sd->level = child->level + 1;
@@ -1632,6 +1640,65 @@ static struct sched_domain *build_sched_domain(struct sched_domain_topology_leve
        return sd;
 }
 
+/*
+ * Find the sched_domain_topology_level where all CPU capacities are visible
+ * for all CPUs.
+ */
+static struct sched_domain_topology_level
+*asym_cpu_capacity_level(const struct cpumask *cpu_map)
+{
+       int i, j, asym_level = 0;
+       bool asym = false;
+       struct sched_domain_topology_level *tl, *asym_tl = NULL;
+       unsigned long cap;
+
+       /* Is there any asymmetry? */
+       cap = arch_scale_cpu_capacity(NULL, cpumask_first(cpu_map));
+
+       for_each_cpu(i, cpu_map) {
+               if (arch_scale_cpu_capacity(NULL, i) != cap) {
+                       asym = true;
+                       break;
+               }
+       }
+
+       if (!asym)
+               return NULL;
+
+       /*
+        * Examine topology from all CPU's point of views to detect the lowest
+        * sched_domain_topology_level where a highest capacity CPU is visible
+        * to everyone.
+        */
+       for_each_cpu(i, cpu_map) {
+               unsigned long max_capacity = arch_scale_cpu_capacity(NULL, i);
+               int tl_id = 0;
+
+               for_each_sd_topology(tl) {
+                       if (tl_id < asym_level)
+                               goto next_level;
+
+                       for_each_cpu_and(j, tl->mask(i), cpu_map) {
+                               unsigned long capacity;
+
+                               capacity = arch_scale_cpu_capacity(NULL, j);
+
+                               if (capacity <= max_capacity)
+                                       continue;
+
+                               max_capacity = capacity;
+                               asym_level = tl_id;
+                               asym_tl = tl;
+                       }
+next_level:
+                       tl_id++;
+               }
+       }
+
+       return asym_tl;
+}
+
+
 /*
  * Build sched domains for a given set of CPUs and attach the sched domains
  * to the individual CPUs
@@ -1644,18 +1711,30 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att
        struct s_data d;
        struct rq *rq = NULL;
        int i, ret = -ENOMEM;
+       struct sched_domain_topology_level *tl_asym;
+       bool has_asym = false;
 
        alloc_state = __visit_domain_allocation_hell(&d, cpu_map);
        if (alloc_state != sa_rootdomain)
                goto error;
 
+       tl_asym = asym_cpu_capacity_level(cpu_map);
+
        /* Set up domains for CPUs specified by the cpu_map: */
        for_each_cpu(i, cpu_map) {
                struct sched_domain_topology_level *tl;
 
                sd = NULL;
                for_each_sd_topology(tl) {
-                       sd = build_sched_domain(tl, cpu_map, attr, sd, i);
+                       int dflags = 0;
+
+                       if (tl == tl_asym) {
+                               dflags |= SD_ASYM_CPUCAPACITY;
+                               has_asym = true;
+                       }
+
+                       sd = build_sched_domain(tl, cpu_map, attr, sd, dflags, i);
+
                        if (tl == sched_domain_topology)
                                *per_cpu_ptr(d.sd, i) = sd;
                        if (tl->flags & SDTL_OVERLAP)
@@ -1704,6 +1783,9 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att
        }
        rcu_read_unlock();
 
+       if (has_asym)
+               static_branch_enable_cpuslocked(&sched_asym_cpucapacity);
+
        if (rq && sched_debug_enabled) {
                pr_info("root domain span: %*pbl (max cpu_capacity = %lu)\n",
                        cpumask_pr_args(cpu_map), rq->rd->max_cpu_capacity);