]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
sched: Prevent buddy interactions with throttled entities
authorPaul Turner <pjt@google.com>
Thu, 21 Jul 2011 16:43:37 +0000 (09:43 -0700)
committerIngo Molnar <mingo@elte.hu>
Sun, 14 Aug 2011 10:03:42 +0000 (12:03 +0200)
Buddies allow us to select "on-rq" entities without actually selecting them
from a cfs_rq's rb_tree.  As a result we must ensure that throttled entities
are not falsely nominated as buddies.  The fact that entities are dequeued
within throttle_entity is not sufficient for clearing buddy status as the
nomination may occur after throttling.

Signed-off-by: Paul Turner <pjt@google.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/20110721184757.886850167@google.com
Signed-off-by: Ingo Molnar <mingo@elte.hu>
kernel/sched_fair.c

index 5a2089492a983c5a1b8f7bb2b526f9ededab85c8..1d4acbea9e6001d8e5110f4046ab698a8f8ee258 100644 (file)
@@ -2348,6 +2348,15 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
        if (unlikely(se == pse))
                return;
 
+       /*
+        * This is possible from callers such as pull_task(), in which we
+        * unconditionally check_prempt_curr() after an enqueue (which may have
+        * lead to a throttle).  This both saves work and prevents false
+        * next-buddy nomination below.
+        */
+       if (unlikely(throttled_hierarchy(cfs_rq_of(pse))))
+               return;
+
        if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) {
                set_next_buddy(pse);
                next_buddy_marked = 1;
@@ -2356,6 +2365,12 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
        /*
         * We can come here with TIF_NEED_RESCHED already set from new task
         * wake up path.
+        *
+        * Note: this also catches the edge-case of curr being in a throttled
+        * group (e.g. via set_curr_task), since update_curr() (in the
+        * enqueue of curr) will have resulted in resched being set.  This
+        * prevents us from potentially nominating it as a false LAST_BUDDY
+        * below.
         */
        if (test_tsk_need_resched(curr))
                return;
@@ -2474,7 +2489,8 @@ static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preemp
 {
        struct sched_entity *se = &p->se;
 
-       if (!se->on_rq)
+       /* throttled hierarchies are not runnable */
+       if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se)))
                return false;
 
        /* Tell the scheduler that we'd really like pse to run next. */