From: Ingo Molnar Date: Mon, 15 Oct 2007 15:00:04 +0000 (+0200) Subject: sched: clean up calc_weighted() X-Git-Tag: v2.6.24-rc1~1289^2~124 X-Git-Url: https://git.karo-electronics.de/?a=commitdiff_plain;h=08e2388aa1e40cb06f7d04ac621e2ae94e1d8fdc;p=karo-tx-linux.git sched: clean up calc_weighted() clean up calc_weighted() - we always use the normalized shift so it's not needed to pass that in. Also, push the non-nice0 branch into the function. Signed-off-by: Ingo Molnar Signed-off-by: Peter Zijlstra Signed-off-by: Mike Galbraith Reviewed-by: Thomas Gleixner --- diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 91a227b436ee..b46f8078e78f 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -397,27 +397,16 @@ update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se) schedstat_set(se->wait_start, rq_of(cfs_rq)->clock); } -/* - * We calculate fair deltas here, so protect against the random effects - * of a multiplication overflow by capping it to the runtime limit: - */ -#if BITS_PER_LONG == 32 static inline unsigned long -calc_weighted(unsigned long delta, unsigned long weight, int shift) +calc_weighted(unsigned long delta, struct sched_entity *se) { - u64 tmp = (u64)delta * weight >> shift; + unsigned long weight = se->load.weight; - if (unlikely(tmp > sysctl_sched_runtime_limit*2)) - return sysctl_sched_runtime_limit*2; - return tmp; + if (unlikely(weight != NICE_0_LOAD)) + return (u64)delta * se->load.weight >> NICE_0_SHIFT; + else + return delta; } -#else -static inline unsigned long -calc_weighted(unsigned long delta, unsigned long weight, int shift) -{ - return delta * weight >> shift; -} -#endif /* * Task is being enqueued - update stats: @@ -469,9 +458,7 @@ __update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se, schedstat_set(se->wait_max, max(se->wait_max, rq_of(cfs_rq)->clock - se->wait_start)); - if (unlikely(se->load.weight != NICE_0_LOAD)) - delta_fair = calc_weighted(delta_fair, se->load.weight, - NICE_0_SHIFT); + delta_fair = calc_weighted(delta_fair, se); add_wait_runtime(cfs_rq, se, delta_fair); } @@ -554,9 +541,7 @@ static void __enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se, delta_fair = div64_likely32((u64)delta_fair * load, load + se->load.weight); - if (unlikely(se->load.weight != NICE_0_LOAD)) - delta_fair = calc_weighted(delta_fair, se->load.weight, - NICE_0_SHIFT); + delta_fair = calc_weighted(delta_fair, se); prev_runtime = se->wait_runtime; __add_wait_runtime(cfs_rq, se, delta_fair);