]> git.karo-electronics.de Git - mv-sheeva.git/blobdiff - kernel/sched_fair.c
ide-cd: fix 'ireason' handling for REQ_TYPE_ATA_PC requests
[mv-sheeva.git] / kernel / sched_fair.c
index 3dab1ff83c4fd280994c36dddf634370e56dd44f..6c091d6e159d01fb23c0dd786495c533cb005c5b 100644 (file)
@@ -20,6 +20,8 @@
  *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
  */
 
+#include <linux/latencytop.h>
+
 /*
  * Targeted preemption latency for CPU-bound tasks:
  * (default: 20ms * (1 + ilog(ncpus)), units: nanoseconds)
@@ -383,6 +385,9 @@ update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
        schedstat_set(se->wait_max, max(se->wait_max,
                        rq_of(cfs_rq)->clock - se->wait_start));
+       schedstat_set(se->wait_count, se->wait_count + 1);
+       schedstat_set(se->wait_sum, se->wait_sum +
+                       rq_of(cfs_rq)->clock - se->wait_start);
        schedstat_set(se->wait_start, 0);
 }
 
@@ -434,6 +439,7 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
 #ifdef CONFIG_SCHEDSTATS
        if (se->sleep_start) {
                u64 delta = rq_of(cfs_rq)->clock - se->sleep_start;
+               struct task_struct *tsk = task_of(se);
 
                if ((s64)delta < 0)
                        delta = 0;
@@ -443,9 +449,12 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
 
                se->sleep_start = 0;
                se->sum_sleep_runtime += delta;
+
+               account_scheduler_latency(tsk, delta >> 10, 1);
        }
        if (se->block_start) {
                u64 delta = rq_of(cfs_rq)->clock - se->block_start;
+               struct task_struct *tsk = task_of(se);
 
                if ((s64)delta < 0)
                        delta = 0;
@@ -462,11 +471,11 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
                 * time that the task spent sleeping:
                 */
                if (unlikely(prof_on == SLEEP_PROFILING)) {
-                       struct task_struct *tsk = task_of(se);
 
                        profile_hits(SLEEP_PROFILING, (void *)get_wchan(tsk),
                                     delta >> 20);
                }
+               account_scheduler_latency(tsk, delta >> 10, 0);
        }
 #endif
 }
@@ -511,7 +520,7 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
 
        if (!initial) {
                /* sleeps upto a single latency don't count. */
-               if (sched_feat(NEW_FAIR_SLEEPERS) && entity_is_task(se))
+               if (sched_feat(NEW_FAIR_SLEEPERS))
                        vruntime -= sysctl_sched_latency;
 
                /* ensure we never gain time by being placed backwards. */
@@ -1097,7 +1106,11 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p)
        }
 
        gran = sysctl_sched_wakeup_granularity;
-       if (unlikely(se->load.weight != NICE_0_LOAD))
+       /*
+        * More easily preempt - nice tasks, while not making
+        * it harder for + nice tasks.
+        */
+       if (unlikely(se->load.weight > NICE_0_LOAD))
                gran = calc_delta_fair(gran, &se->load);
 
        if (pse->vruntime + gran < se->vruntime)
@@ -1428,9 +1441,9 @@ static void print_cfs_stats(struct seq_file *m, int cpu)
 #ifdef CONFIG_FAIR_GROUP_SCHED
        print_cfs_rq(m, cpu, &cpu_rq(cpu)->cfs);
 #endif
-       lock_task_group_list();
+       rcu_read_lock();
        for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
                print_cfs_rq(m, cpu, cfs_rq);
-       unlock_task_group_list();
+       rcu_read_unlock();
 }
 #endif