From 7cff8cf61cac15fa29a1ca802826d2bcbca66152 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 9 Aug 2007 11:16:52 +0200 Subject: [PATCH] sched: refine negative nice level granularity refine the granularity of negative nice level tasks: let them reschedule more often to offset the effect of them consuming their wait_runtime proportionately slower. (This makes nice-0 task scheduling smoother in the presence of negatively reniced tasks.) Signed-off-by: Ingo Molnar --- kernel/sched_fair.c | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 7a632c534ce5..e91db32cadfd 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -222,21 +222,25 @@ niced_granularity(struct sched_entity *curr, unsigned long granularity) { u64 tmp; + if (likely(curr->load.weight == NICE_0_LOAD)) + return granularity; /* - * Negative nice levels get the same granularity as nice-0: + * Positive nice levels get the same granularity as nice-0: */ - if (likely(curr->load.weight >= NICE_0_LOAD)) - return granularity; + if (likely(curr->load.weight < NICE_0_LOAD)) { + tmp = curr->load.weight * (u64)granularity; + return (long) (tmp >> NICE_0_SHIFT); + } /* - * Positive nice level tasks get linearly finer + * Negative nice level tasks get linearly finer * granularity: */ - tmp = curr->load.weight * (u64)granularity; + tmp = curr->load.inv_weight * (u64)granularity; /* * It will always fit into 'long': */ - return (long) (tmp >> NICE_0_SHIFT); + return (long) (tmp >> WMULT_SHIFT); } static inline void -- 2.39.5