]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - kernel/sched/rt.c
sched/smt: Make sched_smt_present track topology
[linux.git] / kernel / sched / rt.c
index 47556b0c9a95faff3e827f6ffd690646cff38224..a21ea60219293a0be6cc65ee63918f650b2606e1 100644 (file)
@@ -5,6 +5,8 @@
  */
 #include "sched.h"
 
+#include "pelt.h"
+
 int sched_rr_timeslice = RR_TIMESLICE;
 int sysctl_sched_rr_timeslice = (MSEC_PER_SEC / HZ) * RR_TIMESLICE;
 
@@ -508,8 +510,11 @@ static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
 
        rt_se = rt_rq->tg->rt_se[cpu];
 
-       if (!rt_se)
+       if (!rt_se) {
                dequeue_top_rt_rq(rt_rq);
+               /* Kick cpufreq (see the comment in kernel/sched/sched.h). */
+               cpufreq_update_util(rq_of_rt_rq(rt_rq), 0);
+       }
        else if (on_rt_rq(rt_se))
                dequeue_rt_entity(rt_se, 0);
 }
@@ -833,6 +838,8 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
                 * can be time-consuming. Try to avoid it when possible.
                 */
                raw_spin_lock(&rt_rq->rt_runtime_lock);
+               if (!sched_feat(RT_RUNTIME_SHARE) && rt_rq->rt_runtime != RUNTIME_INF)
+                       rt_rq->rt_runtime = rt_b->rt_runtime;
                skip = !rt_rq->rt_time && !rt_rq->rt_nr_running;
                raw_spin_unlock(&rt_rq->rt_runtime_lock);
                if (skip)
@@ -968,8 +975,6 @@ static void update_curr_rt(struct rq *rq)
        curr->se.exec_start = now;
        cgroup_account_cputime(curr, delta_exec);
 
-       sched_rt_avg_update(rq, delta_exec);
-
        if (!rt_bandwidth_enabled())
                return;
 
@@ -1001,8 +1006,6 @@ dequeue_top_rt_rq(struct rt_rq *rt_rq)
        sub_nr_running(rq, rt_rq->rt_nr_running);
        rt_rq->rt_queued = 0;
 
-       /* Kick cpufreq (see the comment in kernel/sched/sched.h). */
-       cpufreq_update_util(rq, 0);
 }
 
 static void
@@ -1014,11 +1017,14 @@ enqueue_top_rt_rq(struct rt_rq *rt_rq)
 
        if (rt_rq->rt_queued)
                return;
-       if (rt_rq_throttled(rt_rq) || !rt_rq->rt_nr_running)
+
+       if (rt_rq_throttled(rt_rq))
                return;
 
-       add_nr_running(rq, rt_rq->rt_nr_running);
-       rt_rq->rt_queued = 1;
+       if (rt_rq->rt_nr_running) {
+               add_nr_running(rq, rt_rq->rt_nr_running);
+               rt_rq->rt_queued = 1;
+       }
 
        /* Kick cpufreq (see the comment in kernel/sched/sched.h). */
        cpufreq_update_util(rq, 0);
@@ -1555,7 +1561,7 @@ pick_next_task_rt(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
 
        /*
         * We may dequeue prev's rt_rq in put_prev_task().
-        * So, we update time before rt_nr_running check.
+        * So, we update time before rt_queued check.
         */
        if (prev->sched_class == &rt_sched_class)
                update_curr_rt(rq);
@@ -1572,6 +1578,14 @@ pick_next_task_rt(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
 
        rt_queue_push_tasks(rq);
 
+       /*
+        * If prev task was rt, put_prev_task() has already updated the
+        * utilization. We only care of the case where we start to schedule a
+        * rt task
+        */
+       if (rq->curr->sched_class != &rt_sched_class)
+               update_rt_rq_load_avg(rq_clock_task(rq), rq, 0);
+
        return p;
 }
 
@@ -1579,6 +1593,8 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
 {
        update_curr_rt(rq);
 
+       update_rt_rq_load_avg(rq_clock_task(rq), rq, 1);
+
        /*
         * The previous task needs to be made eligible for pushing
         * if it is still active
@@ -2308,6 +2324,7 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
        struct sched_rt_entity *rt_se = &p->rt;
 
        update_curr_rt(rq);
+       update_rt_rq_load_avg(rq_clock_task(rq), rq, 1);
 
        watchdog(rq, p);