]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
sched/core: Kill sched_class::task_waking to clean up the migration logic
authorPeter Zijlstra <peterz@infradead.org>
Tue, 10 May 2016 16:24:37 +0000 (18:24 +0200)
committerIngo Molnar <mingo@kernel.org>
Thu, 12 May 2016 07:55:31 +0000 (09:55 +0200)
With sched_class::task_waking being called only when we do
set_task_cpu(), we can make sched_class::migrate_task_rq() do the work
and eliminate sched_class::task_waking entirely.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Hunter <ahh@google.com>
Cc: Ben Segall <bsegall@google.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Matt Fleming <matt@codeblueprint.co.uk>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Mike Galbraith <umgwanakikbuti@gmail.com>
Cc: Morten Rasmussen <morten.rasmussen@arm.com>
Cc: Paul Turner <pjt@google.com>
Cc: Pavan Kondeti <pkondeti@codeaurora.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: byungchul.park@lge.com
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
kernel/sched/core.c
kernel/sched/fair.c
kernel/sched/sched.h

index 1f73e2554bc109683c210fb24b75d77a2a72e581..636c4b9cac38037a08407f7bf96faac2427825af 100644 (file)
@@ -1717,11 +1717,8 @@ ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,
        if (p->sched_contributes_to_load)
                rq->nr_uninterruptible--;
 
-       /*
-        * If we migrated; we must have called sched_class::task_waking().
-        */
        if (wake_flags & WF_MIGRATED)
-               en_flags |= ENQUEUE_WAKING;
+               en_flags |= ENQUEUE_MIGRATED;
 #endif
 
        ttwu_activate(rq, p, en_flags);
@@ -2049,10 +2046,6 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
        cpu = select_task_rq(p, p->wake_cpu, SD_BALANCE_WAKE, wake_flags);
        if (task_cpu(p) != cpu) {
                wake_flags |= WF_MIGRATED;
-
-               if (p->sched_class->task_waking)
-                       p->sched_class->task_waking(p);
-
                set_task_cpu(p, cpu);
        }
 #endif /* CONFIG_SMP */
index 445bcd2d7ee133d24bb8639ec2963ee41b76b194..24ce01b73906a29f2a3c4be17bdeac9f4a51e5f0 100644 (file)
@@ -3273,7 +3273,7 @@ static inline void check_schedstat_required(void)
  *
  * WAKEUP (remote)
  *
- *     ->task_waking_fair()
+ *     ->migrate_task_rq_fair() (p->state == TASK_WAKING)
  *       vruntime -= min_vruntime
  *
  *     enqueue
@@ -3292,7 +3292,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
         * Update the normalized vruntime before updating min_vruntime
         * through calling update_curr().
         */
-       if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_WAKING))
+       if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_MIGRATED))
                se->vruntime += cfs_rq->min_vruntime;
 
        /*
@@ -4841,33 +4841,6 @@ static unsigned long cpu_avg_load_per_task(int cpu)
        return 0;
 }
 
-/*
- * Called to migrate a waking task; as blocked tasks retain absolute vruntime
- * the migration needs to deal with this by subtracting the old and adding the
- * new min_vruntime -- the latter is done by enqueue_entity() when placing
- * the task on the new runqueue.
- */
-static void task_waking_fair(struct task_struct *p)
-{
-       struct sched_entity *se = &p->se;
-       struct cfs_rq *cfs_rq = cfs_rq_of(se);
-       u64 min_vruntime;
-
-#ifndef CONFIG_64BIT
-       u64 min_vruntime_copy;
-
-       do {
-               min_vruntime_copy = cfs_rq->min_vruntime_copy;
-               smp_rmb();
-               min_vruntime = cfs_rq->min_vruntime;
-       } while (min_vruntime != min_vruntime_copy);
-#else
-       min_vruntime = cfs_rq->min_vruntime;
-#endif
-
-       se->vruntime -= min_vruntime;
-}
-
 #ifdef CONFIG_FAIR_GROUP_SCHED
 /*
  * effective_load() calculates the load change as seen from the root_task_group
@@ -5402,6 +5375,32 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
  */
 static void migrate_task_rq_fair(struct task_struct *p)
 {
+       /*
+        * As blocked tasks retain absolute vruntime the migration needs to
+        * deal with this by subtracting the old and adding the new
+        * min_vruntime -- the latter is done by enqueue_entity() when placing
+        * the task on the new runqueue.
+        */
+       if (p->state == TASK_WAKING) {
+               struct sched_entity *se = &p->se;
+               struct cfs_rq *cfs_rq = cfs_rq_of(se);
+               u64 min_vruntime;
+
+#ifndef CONFIG_64BIT
+               u64 min_vruntime_copy;
+
+               do {
+                       min_vruntime_copy = cfs_rq->min_vruntime_copy;
+                       smp_rmb();
+                       min_vruntime = cfs_rq->min_vruntime;
+               } while (min_vruntime != min_vruntime_copy);
+#else
+               min_vruntime = cfs_rq->min_vruntime;
+#endif
+
+               se->vruntime -= min_vruntime;
+       }
+
        /*
         * We are supposed to update the task to "current" time, then its up to date
         * and ready to go to new CPU/cfs_rq. But we have difficulty in getting
@@ -8672,7 +8671,6 @@ const struct sched_class fair_sched_class = {
        .rq_online              = rq_online_fair,
        .rq_offline             = rq_offline_fair,
 
-       .task_waking            = task_waking_fair,
        .task_dead              = task_dead_fair,
        .set_cpus_allowed       = set_cpus_allowed_common,
 #endif
index ab6adb159e234ab490b630bedab38b8e917979d8..e51145e76807565f5ea5f4658bddddd40816c55c 100644 (file)
@@ -1168,7 +1168,7 @@ extern const u32 sched_prio_to_wmult[40];
  *
  * ENQUEUE_HEAD      - place at front of runqueue (tail if not specified)
  * ENQUEUE_REPLENISH - CBS (replenish runtime and postpone deadline)
- * ENQUEUE_WAKING    - sched_class::task_waking was called
+ * ENQUEUE_MIGRATED  - the task was migrated during wakeup
  *
  */
 
@@ -1183,9 +1183,9 @@ extern const u32 sched_prio_to_wmult[40];
 #define ENQUEUE_HEAD           0x08
 #define ENQUEUE_REPLENISH      0x10
 #ifdef CONFIG_SMP
-#define ENQUEUE_WAKING         0x20
+#define ENQUEUE_MIGRATED       0x20
 #else
-#define ENQUEUE_WAKING         0x00
+#define ENQUEUE_MIGRATED       0x00
 #endif
 
 #define RETRY_TASK             ((void *)-1UL)
@@ -1217,7 +1217,6 @@ struct sched_class {
        int  (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags);
        void (*migrate_task_rq)(struct task_struct *p);
 
-       void (*task_waking) (struct task_struct *task);
        void (*task_woken) (struct rq *this_rq, struct task_struct *task);
 
        void (*set_cpus_allowed)(struct task_struct *p,