From: Ingo Molnar Date: Tue, 23 Sep 2008 14:23:05 +0000 (+0200) Subject: Merge branches 'sched/urgent' and 'sched/rt' into sched/devel X-Git-Tag: v2.6.28-rc1~725^2~2^2~9 X-Git-Url: https://asedeno.scripts.mit.edu/gitweb/?a=commitdiff_plain;h=63e5c39859a41591662466028c4d1281c033c05a;p=linux.git Merge branches 'sched/urgent' and 'sched/rt' into sched/devel --- 63e5c39859a41591662466028c4d1281c033c05a diff --cc kernel/sched.c index 927c9307cd00,13dd2db9fb2d,4de2bfb28c58..669c49aa57f0 --- a/kernel/sched.c +++ b/kernel/sched.c @@@@ -1425,9 -1425,9 -1418,35 +1418,35 @@@@ up parent = parent->parent; if (parent) goto up; ++ out_unlock: rcu_read_unlock(); ++ ++ return ret; + } + ++ static int tg_nop(struct task_group *tg, void *data) ++ { ++ return 0; + } ++ #endif ++ ++ #ifdef CONFIG_SMP ++ static unsigned long source_load(int cpu, int type); ++ static unsigned long target_load(int cpu, int type); ++ static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd); ++ ++ static unsigned long cpu_avg_load_per_task(int cpu) ++ { ++ struct rq *rq = cpu_rq(cpu); ++ ++ if (rq->nr_running) ++ rq->avg_load_per_task = rq->load.weight / rq->nr_running; ++ ++ return rq->avg_load_per_task; ++ } ++ ++ #ifdef CONFIG_FAIR_GROUP_SCHED + static void __set_se_shares(struct sched_entity *se, unsigned long shares); /* @@@@ -8808,73 -8753,73 -8706,77 +8827,77 @@@@ static DEFINE_MUTEX(rt_constraints_mute static unsigned long to_ratio(u64 period, u64 runtime) { if (runtime == RUNTIME_INF) -- return 1ULL << 16; ++ return 1ULL << 20; -- return div64_u64(runtime << 16, period); ++ return div64_u64(runtime << 20, period); } -- #ifdef CONFIG_CGROUP_SCHED -- static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime) ++ /* Must be called with tasklist_lock held */ ++ static inline int tg_has_rt_tasks(struct task_group *tg) { -- struct task_group *tgi, *parent = tg->parent; -- unsigned long total = 0; ++ struct task_struct *g, *p; -- if (!parent) { -- if (global_rt_period() < period) -- return 0; ++ do_each_thread(g, p) { ++ if (rt_task(p) && rt_rq_of_se(&p->rt)->tg == tg) ++ return 1; ++ } while_each_thread(g, p); -- return to_ratio(period, runtime) < -- to_ratio(global_rt_period(), global_rt_runtime()); -- } ++ return 0; ++ } -- if (ktime_to_ns(parent->rt_bandwidth.rt_period) < period) -- return 0; ++ struct rt_schedulable_data { ++ struct task_group *tg; ++ u64 rt_period; ++ u64 rt_runtime; ++ }; -- rcu_read_lock(); -- list_for_each_entry_rcu(tgi, &parent->children, siblings) { -- if (tgi == tg) -- continue; ++ static int tg_schedulable(struct task_group *tg, void *data) ++ { ++ struct rt_schedulable_data *d = data; ++ struct task_group *child; ++ unsigned long total, sum = 0; ++ u64 period, runtime; ++ ++ period = ktime_to_ns(tg->rt_bandwidth.rt_period); ++ runtime = tg->rt_bandwidth.rt_runtime; -- total += to_ratio(ktime_to_ns(tgi->rt_bandwidth.rt_period), -- tgi->rt_bandwidth.rt_runtime); ++ if (tg == d->tg) { ++ period = d->rt_period; ++ runtime = d->rt_runtime; } -- rcu_read_unlock(); -- return total + to_ratio(period, runtime) <= -- to_ratio(ktime_to_ns(parent->rt_bandwidth.rt_period), -- parent->rt_bandwidth.rt_runtime); -- } -- #elif defined CONFIG_USER_SCHED -- static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime) -- { -- struct task_group *tgi; -- unsigned long total = 0; -- unsigned long global_ratio = -- to_ratio(global_rt_period(), global_rt_runtime()); ++ if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg)) ++ return -EBUSY; -- rcu_read_lock(); -- list_for_each_entry_rcu(tgi, &task_groups, list) { -- if (tgi == tg) -- continue; ++ total = to_ratio(period, runtime); + - total += to_ratio(ktime_to_ns(tgi->rt_bandwidth.rt_period), - tgi->rt_bandwidth.rt_runtime); ++ list_for_each_entry_rcu(child, &tg->children, siblings) { ++ period = ktime_to_ns(child->rt_bandwidth.rt_period); ++ runtime = child->rt_bandwidth.rt_runtime; ++ ++ if (child == d->tg) { ++ period = d->rt_period; ++ runtime = d->rt_runtime; ++ } + - total += to_ratio(ktime_to_ns(tgi->rt_bandwidth.rt_period), - tgi->rt_bandwidth.rt_runtime); ++ sum += to_ratio(period, runtime); } -- rcu_read_unlock(); -- return total + to_ratio(period, runtime) < global_ratio; ++ if (sum > total) ++ return -EINVAL; ++ ++ return 0; } -- #endif -- /* Must be called with tasklist_lock held */ -- static inline int tg_has_rt_tasks(struct task_group *tg) ++ static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime) { -- struct task_struct *g, *p; -- do_each_thread(g, p) { -- if (rt_task(p) && rt_rq_of_se(&p->rt)->tg == tg) -- return 1; -- } while_each_thread(g, p); -- return 0; ++ struct rt_schedulable_data data = { ++ .tg = tg, ++ .rt_period = period, ++ .rt_runtime = runtime, ++ }; ++ ++ return walk_tg_tree(tg_schedulable, tg_nop, &data); } static int tg_set_bandwidth(struct task_group *tg, @@@@ -8884,14 -8829,14 -8786,9 +8907,9 @@@@ mutex_lock(&rt_constraints_mutex); read_lock(&tasklist_lock); -- if (rt_runtime == 0 && tg_has_rt_tasks(tg)) { -- err = -EBUSY; - goto unlock; - } - if (!__rt_schedulable(tg, rt_period, rt_runtime)) { - err = -EINVAL; ++ err = __rt_schedulable(tg, rt_period, rt_runtime); ++ if (err) goto unlock; - } - if (!__rt_schedulable(tg, rt_period, rt_runtime)) { - err = -EINVAL; - goto unlock; -- } spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock); tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);