]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
sched/core: Streamle calls to task_rq_unlock()
authorMathieu Poirier <mathieu.poirier@linaro.org>
Fri, 19 Jul 2019 13:59:54 +0000 (15:59 +0200)
committerIngo Molnar <mingo@kernel.org>
Thu, 25 Jul 2019 13:51:57 +0000 (15:51 +0200)
Calls to task_rq_unlock() are done several times in the
__sched_setscheduler() function.  This is fine when only the rq lock needs to be
handled but not so much when other locks come into play.

This patch streamlines the release of the rq lock so that only one
location need to be modified when dealing with more than one lock.

No change of functionality is introduced by this patch.

Tested-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
Signed-off-by: Mathieu Poirier <mathieu.poirier@linaro.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
Acked-by: Tejun Heo <tj@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: bristot@redhat.com
Cc: claudio@evidence.eu.com
Cc: lizefan@huawei.com
Cc: longman@redhat.com
Cc: luca.abeni@santannapisa.it
Cc: tommaso.cucinotta@santannapisa.it
Link: https://lkml.kernel.org/r/20190719140000.31694-3-juri.lelli@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
kernel/sched/core.c

index 0b22e55cebe8f012a92a6ba26338b545c1a5d0ed..1af3d2dc6b294ae0437661d2663b7bcfa265895a 100644 (file)
@@ -4712,8 +4712,8 @@ static int __sched_setscheduler(struct task_struct *p,
         * Changing the policy of the stop threads its a very bad idea:
         */
        if (p == rq->stop) {
-               task_rq_unlock(rq, p, &rf);
-               return -EINVAL;
+               retval = -EINVAL;
+               goto unlock;
        }
 
        /*
@@ -4731,8 +4731,8 @@ static int __sched_setscheduler(struct task_struct *p,
                        goto change;
 
                p->sched_reset_on_fork = reset_on_fork;
-               task_rq_unlock(rq, p, &rf);
-               return 0;
+               retval = 0;
+               goto unlock;
        }
 change:
 
@@ -4745,8 +4745,8 @@ static int __sched_setscheduler(struct task_struct *p,
                if (rt_bandwidth_enabled() && rt_policy(policy) &&
                                task_group(p)->rt_bandwidth.rt_runtime == 0 &&
                                !task_group_is_autogroup(task_group(p))) {
-                       task_rq_unlock(rq, p, &rf);
-                       return -EPERM;
+                       retval = -EPERM;
+                       goto unlock;
                }
 #endif
 #ifdef CONFIG_SMP
@@ -4761,8 +4761,8 @@ static int __sched_setscheduler(struct task_struct *p,
                         */
                        if (!cpumask_subset(span, p->cpus_ptr) ||
                            rq->rd->dl_bw.bw == 0) {
-                               task_rq_unlock(rq, p, &rf);
-                               return -EPERM;
+                               retval = -EPERM;
+                               goto unlock;
                        }
                }
 #endif
@@ -4781,8 +4781,8 @@ static int __sched_setscheduler(struct task_struct *p,
         * is available.
         */
        if ((dl_policy(policy) || dl_task(p)) && sched_dl_overflow(p, policy, attr)) {
-               task_rq_unlock(rq, p, &rf);
-               return -EBUSY;
+               retval = -EBUSY;
+               goto unlock;
        }
 
        p->sched_reset_on_fork = reset_on_fork;
@@ -4840,6 +4840,10 @@ static int __sched_setscheduler(struct task_struct *p,
        preempt_enable();
 
        return 0;
+
+unlock:
+       task_rq_unlock(rq, p, &rf);
+       return retval;
 }
 
 static int _sched_setscheduler(struct task_struct *p, int policy,