]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
sched/deadline: Make bandwidth enforcement scale-invariant
authorJuri Lelli <juri.lelli@arm.com>
Mon, 4 Dec 2017 10:23:25 +0000 (11:23 +0100)
committerIngo Molnar <mingo@kernel.org>
Wed, 10 Jan 2018 11:53:35 +0000 (12:53 +0100)
Apply frequency and CPU scale-invariance correction factor to bandwidth
enforcement (similar to what we already do to fair utilization tracking).

Each delta_exec gets scaled considering current frequency and maximum
CPU capacity; which means that the reservation runtime parameter (that
need to be specified profiling the task execution at max frequency on
biggest capacity core) gets thus scaled accordingly.

Signed-off-by: Juri Lelli <juri.lelli@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Claudio Scordino <claudio@evidence.eu.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Luca Abeni <luca.abeni@santannapisa.it>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rafael J . Wysocki <rafael.j.wysocki@intel.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Viresh Kumar <viresh.kumar@linaro.org>
Cc: alessio.balsini@arm.com
Cc: bristot@redhat.com
Cc: dietmar.eggemann@arm.com
Cc: joelaf@google.com
Cc: juri.lelli@redhat.com
Cc: mathieu.poirier@linaro.org
Cc: morten.rasmussen@arm.com
Cc: patrick.bellasi@arm.com
Cc: rjw@rjwysocki.net
Cc: rostedt@goodmis.org
Cc: tkjos@android.com
Cc: tommaso.cucinotta@santannapisa.it
Cc: vincent.guittot@linaro.org
Link: http://lkml.kernel.org/r/20171204102325.5110-9-juri.lelli@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
kernel/sched/deadline.c
kernel/sched/fair.c
kernel/sched/sched.h

index 54a0dc1424a9fb085b867254de51bb156020f5b1..9bb0e0c412ec6617c5ef6cfa0b4f703a6a536a23 100644 (file)
@@ -1151,7 +1151,8 @@ static void update_curr_dl(struct rq *rq)
 {
        struct task_struct *curr = rq->curr;
        struct sched_dl_entity *dl_se = &curr->dl;
-       u64 delta_exec;
+       u64 delta_exec, scaled_delta_exec;
+       int cpu = cpu_of(rq);
 
        if (!dl_task(curr) || !on_dl_rq(dl_se))
                return;
@@ -1185,9 +1186,26 @@ static void update_curr_dl(struct rq *rq)
        if (dl_entity_is_special(dl_se))
                return;
 
-       if (unlikely(dl_se->flags & SCHED_FLAG_RECLAIM))
-               delta_exec = grub_reclaim(delta_exec, rq, &curr->dl);
-       dl_se->runtime -= delta_exec;
+       /*
+        * For tasks that participate in GRUB, we implement GRUB-PA: the
+        * spare reclaimed bandwidth is used to clock down frequency.
+        *
+        * For the others, we still need to scale reservation parameters
+        * according to current frequency and CPU maximum capacity.
+        */
+       if (unlikely(dl_se->flags & SCHED_FLAG_RECLAIM)) {
+               scaled_delta_exec = grub_reclaim(delta_exec,
+                                                rq,
+                                                &curr->dl);
+       } else {
+               unsigned long scale_freq = arch_scale_freq_capacity(cpu);
+               unsigned long scale_cpu = arch_scale_cpu_capacity(NULL, cpu);
+
+               scaled_delta_exec = cap_scale(delta_exec, scale_freq);
+               scaled_delta_exec = cap_scale(scaled_delta_exec, scale_cpu);
+       }
+
+       dl_se->runtime -= scaled_delta_exec;
 
 throttle:
        if (dl_runtime_exceeded(dl_se) || dl_se->dl_yielded) {
index 14859757bff09300f2f4c7f7e0e081774622a3e6..1070803cb4237d29b76ccd81326b6fd4085170af 100644 (file)
@@ -3089,8 +3089,6 @@ static u32 __accumulate_pelt_segments(u64 periods, u32 d1, u32 d3)
        return c1 + c2 + c3;
 }
 
-#define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
-
 /*
  * Accumulate the three separate parts of the sum; d1 the remainder
  * of the last (incomplete) period, d2 the span of full periods and d3
index e122c89bdbdd8ecc5c59542efb2187635e2ed18d..2e95505e23c692a6c0ecd25b752f3bc6f72fe2d0 100644 (file)
@@ -156,6 +156,8 @@ static inline int task_has_dl_policy(struct task_struct *p)
        return dl_policy(p->policy);
 }
 
+#define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
+
 /*
  * !! For sched_setattr_nocheck() (kernel) only !!
  *