]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
sched: speed up and simplify vslice calculations
authorPeter Zijlstra <a.p.zijlstra@chello.nl>
Mon, 15 Oct 2007 15:00:12 +0000 (17:00 +0200)
committerIngo Molnar <mingo@elte.hu>
Mon, 15 Oct 2007 15:00:12 +0000 (17:00 +0200)
speed up and simplify vslice calculations.

[ From: Mike Galbraith <efault@gmx.de>: build fix ]

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
include/linux/sched.h
kernel/sched_debug.c
kernel/sched_fair.c
kernel/sysctl.c

index d5daca4bcc6b767bb59e1eac48a96d92019be944..97f736b749c291f3093319feea4fb0aae6908471 100644 (file)
@@ -1400,7 +1400,7 @@ extern void sched_idle_next(void);
 
 #ifdef CONFIG_SCHED_DEBUG
 extern unsigned int sysctl_sched_latency;
-extern unsigned int sysctl_sched_min_granularity;
+extern unsigned int sysctl_sched_nr_latency;
 extern unsigned int sysctl_sched_wakeup_granularity;
 extern unsigned int sysctl_sched_batch_wakeup_granularity;
 extern unsigned int sysctl_sched_child_runs_first;
index be79cd6d9e8082d3e966683cde5abc3377b5e86f..995bbd384a97a7195238c6800bec8129ae43f296 100644 (file)
@@ -210,7 +210,7 @@ static int sched_debug_show(struct seq_file *m, void *v)
 #define PN(x) \
        SEQ_printf(m, "  .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
        PN(sysctl_sched_latency);
-       PN(sysctl_sched_min_granularity);
+       PN(sysctl_sched_nr_latency);
        PN(sysctl_sched_wakeup_granularity);
        PN(sysctl_sched_batch_wakeup_granularity);
        PN(sysctl_sched_child_runs_first);
index ec0569e59e24a7e2b21b38d26d2e64eb1e71ca75..ae2d4b08e782c4e6237208593865ceb93020cf13 100644 (file)
@@ -46,7 +46,7 @@ const_debug unsigned int sysctl_sched_child_runs_first = 1;
  * Minimal preemption granularity for CPU-bound tasks:
  * (default: 2 msec, units: nanoseconds)
  */
-unsigned int sysctl_sched_min_granularity __read_mostly = 2000000ULL;
+const_debug unsigned int sysctl_sched_nr_latency = 20;
 
 /*
  * sys_sched_yield() compat mode
@@ -222,8 +222,7 @@ static inline struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
 static u64 __sched_period(unsigned long nr_running)
 {
        u64 period = sysctl_sched_latency;
-       unsigned long nr_latency =
-               sysctl_sched_latency / sysctl_sched_min_granularity;
+       unsigned long nr_latency = sysctl_sched_nr_latency;
 
        if (unlikely(nr_running > nr_latency)) {
                period *= nr_running;
@@ -245,11 +244,15 @@ static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
 
 static u64 __sched_vslice(unsigned long nr_running)
 {
-       u64 period = __sched_period(nr_running);
+       unsigned long period = sysctl_sched_latency;
+       unsigned long nr_latency = sysctl_sched_nr_latency;
 
-       do_div(period, nr_running);
+       if (unlikely(nr_running > nr_latency))
+               nr_running = nr_latency;
 
-       return period;
+       period /= nr_running;
+
+       return (u64)period;
 }
 
 /*
index 97b15c27407f4e2791436132ae448776ab5a5889..230ca4eb57fea7c14e584ceb295c1178420d2bdf 100644 (file)
@@ -222,14 +222,11 @@ static ctl_table kern_table[] = {
 #ifdef CONFIG_SCHED_DEBUG
        {
                .ctl_name       = CTL_UNNUMBERED,
-               .procname       = "sched_min_granularity_ns",
-               .data           = &sysctl_sched_min_granularity,
+               .procname       = "sched_nr_latency",
+               .data           = &sysctl_sched_nr_latency,
                .maxlen         = sizeof(unsigned int),
                .mode           = 0644,
-               .proc_handler   = &proc_dointvec_minmax,
-               .strategy       = &sysctl_intvec,
-               .extra1         = &min_sched_granularity_ns,
-               .extra2         = &max_sched_granularity_ns,
+               .proc_handler   = &proc_dointvec,
        },
        {
                .ctl_name       = CTL_UNNUMBERED,