]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - kernel/sched/rt.c
sched/core: Annotate curr pointer in rq with __rcu
[linux.git] / kernel / sched / rt.c
index e591d40fd645e451e79946fc9801ee24e72ef68e..4043abe45459df664d96351030966fb7104384e2 100644 (file)
@@ -437,6 +437,45 @@ static inline int on_rt_rq(struct sched_rt_entity *rt_se)
        return rt_se->on_rq;
 }
 
+#ifdef CONFIG_UCLAMP_TASK
+/*
+ * Verify the fitness of task @p to run on @cpu taking into account the uclamp
+ * settings.
+ *
+ * This check is only important for heterogeneous systems where uclamp_min value
+ * is higher than the capacity of a @cpu. For non-heterogeneous system this
+ * function will always return true.
+ *
+ * The function will return true if the capacity of the @cpu is >= the
+ * uclamp_min and false otherwise.
+ *
+ * Note that uclamp_min will be clamped to uclamp_max if uclamp_min
+ * > uclamp_max.
+ */
+static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu)
+{
+       unsigned int min_cap;
+       unsigned int max_cap;
+       unsigned int cpu_cap;
+
+       /* Only heterogeneous systems can benefit from this check */
+       if (!static_branch_unlikely(&sched_asym_cpucapacity))
+               return true;
+
+       min_cap = uclamp_eff_value(p, UCLAMP_MIN);
+       max_cap = uclamp_eff_value(p, UCLAMP_MAX);
+
+       cpu_cap = capacity_orig_of(cpu);
+
+       return cpu_cap >= min(min_cap, max_cap);
+}
+#else
+static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu)
+{
+       return true;
+}
+#endif
+
 #ifdef CONFIG_RT_GROUP_SCHED
 
 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
@@ -1391,6 +1430,7 @@ select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
 {
        struct task_struct *curr;
        struct rq *rq;
+       bool test;
 
        /* For anything but wake ups, just return the task_cpu */
        if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
@@ -1422,10 +1462,16 @@ select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
         *
         * This test is optimistic, if we get it wrong the load-balancer
         * will have to sort it out.
+        *
+        * We take into account the capacity of the CPU to ensure it fits the
+        * requirement of the task - which is only important on heterogeneous
+        * systems like big.LITTLE.
         */
-       if (curr && unlikely(rt_task(curr)) &&
-           (curr->nr_cpus_allowed < 2 ||
-            curr->prio <= p->prio)) {
+       test = curr &&
+              unlikely(rt_task(curr)) &&
+              (curr->nr_cpus_allowed < 2 || curr->prio <= p->prio);
+
+       if (test || !rt_task_fits_capacity(p, cpu)) {
                int target = find_lowest_rq(p);
 
                /*
@@ -1449,15 +1495,15 @@ static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
         * let's hope p can move out.
         */
        if (rq->curr->nr_cpus_allowed == 1 ||
-           !cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
+           !cpupri_find(&rq->rd->cpupri, rq->curr, NULL, NULL))
                return;
 
        /*
         * p is migratable, so let's not schedule it and
         * see if it is pushed or pulled somewhere else.
         */
-       if (p->nr_cpus_allowed != 1
-           && cpupri_find(&rq->rd->cpupri, p, NULL))
+       if (p->nr_cpus_allowed != 1 &&
+           cpupri_find(&rq->rd->cpupri, p, NULL, NULL))
                return;
 
        /*
@@ -1601,7 +1647,8 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
 {
        if (!task_running(rq, p) &&
-           cpumask_test_cpu(cpu, p->cpus_ptr))
+           cpumask_test_cpu(cpu, p->cpus_ptr) &&
+           rt_task_fits_capacity(p, cpu))
                return 1;
 
        return 0;
@@ -1643,7 +1690,8 @@ static int find_lowest_rq(struct task_struct *task)
        if (task->nr_cpus_allowed == 1)
                return -1; /* No other targets possible */
 
-       if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
+       if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask,
+                        rt_task_fits_capacity))
                return -1; /* No targets found */
 
        /*
@@ -2147,12 +2195,14 @@ static void pull_rt_task(struct rq *this_rq)
  */
 static void task_woken_rt(struct rq *rq, struct task_struct *p)
 {
-       if (!task_running(rq, p) &&
-           !test_tsk_need_resched(rq->curr) &&
-           p->nr_cpus_allowed > 1 &&
-           (dl_task(rq->curr) || rt_task(rq->curr)) &&
-           (rq->curr->nr_cpus_allowed < 2 ||
-            rq->curr->prio <= p->prio))
+       bool need_to_push = !task_running(rq, p) &&
+                           !test_tsk_need_resched(rq->curr) &&
+                           p->nr_cpus_allowed > 1 &&
+                           (dl_task(rq->curr) || rt_task(rq->curr)) &&
+                           (rq->curr->nr_cpus_allowed < 2 ||
+                            rq->curr->prio <= p->prio);
+
+       if (need_to_push || !rt_task_fits_capacity(p, cpu_of(rq)))
                push_rt_tasks(rq);
 }
 
@@ -2224,7 +2274,10 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p)
         */
        if (task_on_rq_queued(p) && rq->curr != p) {
 #ifdef CONFIG_SMP
-               if (p->nr_cpus_allowed > 1 && rq->rt.overloaded)
+               bool need_to_push = rq->rt.overloaded ||
+                                   !rt_task_fits_capacity(p, cpu_of(rq));
+
+               if (p->nr_cpus_allowed > 1 && need_to_push)
                        rt_queue_push_tasks(rq);
 #endif /* CONFIG_SMP */
                if (p->prio < rq->curr->prio && cpu_online(cpu_of(rq)))