]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - kernel/sched/deadline.c
Merge branch 'x86-entry-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux.git] / kernel / sched / deadline.c
index 46122edd8552c9abd7acb3cf665332d91746ed7d..39dc9f74f2898f13b56837f8073f49043275a5d2 100644 (file)
@@ -529,6 +529,7 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq);
 static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p)
 {
        struct rq *later_rq = NULL;
+       struct dl_bw *dl_b;
 
        later_rq = find_lock_later_rq(p, rq);
        if (!later_rq) {
@@ -557,6 +558,38 @@ static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p
                double_lock_balance(rq, later_rq);
        }
 
+       if (p->dl.dl_non_contending || p->dl.dl_throttled) {
+               /*
+                * Inactive timer is armed (or callback is running, but
+                * waiting for us to release rq locks). In any case, when it
+                * will fire (or continue), it will see running_bw of this
+                * task migrated to later_rq (and correctly handle it).
+                */
+               sub_running_bw(&p->dl, &rq->dl);
+               sub_rq_bw(&p->dl, &rq->dl);
+
+               add_rq_bw(&p->dl, &later_rq->dl);
+               add_running_bw(&p->dl, &later_rq->dl);
+       } else {
+               sub_rq_bw(&p->dl, &rq->dl);
+               add_rq_bw(&p->dl, &later_rq->dl);
+       }
+
+       /*
+        * And we finally need to fixup root_domain(s) bandwidth accounting,
+        * since p is still hanging out in the old (now moved to default) root
+        * domain.
+        */
+       dl_b = &rq->rd->dl_bw;
+       raw_spin_lock(&dl_b->lock);
+       __dl_sub(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
+       raw_spin_unlock(&dl_b->lock);
+
+       dl_b = &later_rq->rd->dl_bw;
+       raw_spin_lock(&dl_b->lock);
+       __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(later_rq->rd->span));
+       raw_spin_unlock(&dl_b->lock);
+
        set_task_cpu(p, later_rq->cpu);
        double_unlock_balance(later_rq, rq);
 
@@ -1694,12 +1727,20 @@ static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
 }
 #endif
 
-static inline void set_next_task(struct rq *rq, struct task_struct *p)
+static void set_next_task_dl(struct rq *rq, struct task_struct *p)
 {
        p->se.exec_start = rq_clock_task(rq);
 
        /* You can't push away the running task */
        dequeue_pushable_dl_task(rq, p);
+
+       if (hrtick_enabled(rq))
+               start_hrtick_dl(rq, p);
+
+       if (rq->curr->sched_class != &dl_sched_class)
+               update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0);
+
+       deadline_queue_push_tasks(rq);
 }
 
 static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq,
@@ -1720,64 +1761,42 @@ pick_next_task_dl(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
        struct task_struct *p;
        struct dl_rq *dl_rq;
 
-       dl_rq = &rq->dl;
+       WARN_ON_ONCE(prev || rf);
 
-       if (need_pull_dl_task(rq, prev)) {
-               /*
-                * This is OK, because current is on_cpu, which avoids it being
-                * picked for load-balance and preemption/IRQs are still
-                * disabled avoiding further scheduler activity on it and we're
-                * being very careful to re-start the picking loop.
-                */
-               rq_unpin_lock(rq, rf);
-               pull_dl_task(rq);
-               rq_repin_lock(rq, rf);
-               /*
-                * pull_dl_task() can drop (and re-acquire) rq->lock; this
-                * means a stop task can slip in, in which case we need to
-                * re-start task selection.
-                */
-               if (rq->stop && task_on_rq_queued(rq->stop))
-                       return RETRY_TASK;
-       }
-
-       /*
-        * When prev is DL, we may throttle it in put_prev_task().
-        * So, we update time before we check for dl_nr_running.
-        */
-       if (prev->sched_class == &dl_sched_class)
-               update_curr_dl(rq);
+       dl_rq = &rq->dl;
 
        if (unlikely(!dl_rq->dl_nr_running))
                return NULL;
 
-       put_prev_task(rq, prev);
-
        dl_se = pick_next_dl_entity(rq, dl_rq);
        BUG_ON(!dl_se);
 
        p = dl_task_of(dl_se);
 
-       set_next_task(rq, p);
-
-       if (hrtick_enabled(rq))
-               start_hrtick_dl(rq, p);
-
-       deadline_queue_push_tasks(rq);
-
-       if (rq->curr->sched_class != &dl_sched_class)
-               update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0);
+       set_next_task_dl(rq, p);
 
        return p;
 }
 
-static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
+static void put_prev_task_dl(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
 {
        update_curr_dl(rq);
 
        update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1);
        if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
                enqueue_pushable_dl_task(rq, p);
+
+       if (rf && !on_dl_rq(&p->dl) && need_pull_dl_task(rq, p)) {
+               /*
+                * This is OK, because current is on_cpu, which avoids it being
+                * picked for load-balance and preemption/IRQs are still
+                * disabled avoiding further scheduler activity on it and we've
+                * not yet started the picking loop.
+                */
+               rq_unpin_lock(rq, rf);
+               pull_dl_task(rq);
+               rq_repin_lock(rq, rf);
+       }
 }
 
 /*
@@ -1811,11 +1830,6 @@ static void task_fork_dl(struct task_struct *p)
         */
 }
 
-static void set_curr_task_dl(struct rq *rq)
-{
-       set_next_task(rq, rq->curr);
-}
-
 #ifdef CONFIG_SMP
 
 /* Only try algorithms three times */
@@ -2275,6 +2289,36 @@ void __init init_sched_dl_class(void)
                                        GFP_KERNEL, cpu_to_node(i));
 }
 
+void dl_add_task_root_domain(struct task_struct *p)
+{
+       struct rq_flags rf;
+       struct rq *rq;
+       struct dl_bw *dl_b;
+
+       rq = task_rq_lock(p, &rf);
+       if (!dl_task(p))
+               goto unlock;
+
+       dl_b = &rq->rd->dl_bw;
+       raw_spin_lock(&dl_b->lock);
+
+       __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
+
+       raw_spin_unlock(&dl_b->lock);
+
+unlock:
+       task_rq_unlock(rq, p, &rf);
+}
+
+void dl_clear_root_domain(struct root_domain *rd)
+{
+       unsigned long flags;
+
+       raw_spin_lock_irqsave(&rd->dl_bw.lock, flags);
+       rd->dl_bw.total_bw = 0;
+       raw_spin_unlock_irqrestore(&rd->dl_bw.lock, flags);
+}
+
 #endif /* CONFIG_SMP */
 
 static void switched_from_dl(struct rq *rq, struct task_struct *p)
@@ -2395,6 +2439,7 @@ const struct sched_class dl_sched_class = {
 
        .pick_next_task         = pick_next_task_dl,
        .put_prev_task          = put_prev_task_dl,
+       .set_next_task          = set_next_task_dl,
 
 #ifdef CONFIG_SMP
        .select_task_rq         = select_task_rq_dl,
@@ -2405,7 +2450,6 @@ const struct sched_class dl_sched_class = {
        .task_woken             = task_woken_dl,
 #endif
 
-       .set_curr_task          = set_curr_task_dl,
        .task_tick              = task_tick_dl,
        .task_fork              = task_fork_dl,