]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - kernel/sched.c
sched: Implement hierarchical task accounting for SCHED_OTHER
[karo-tx-linux.git] / kernel / sched.c
index ccacdbdecf452bda8769878ca6e558d13ebb4e74..cd1a531ca8ff33ce527c3a22dc57f30a38fcb314 100644 (file)
@@ -311,7 +311,7 @@ struct task_group root_task_group;
 /* CFS-related fields in a runqueue */
 struct cfs_rq {
        struct load_weight load;
-       unsigned long nr_running;
+       unsigned long nr_running, h_nr_running;
 
        u64 exec_clock;
        u64 min_vruntime;
@@ -520,8 +520,6 @@ struct rq {
        int cpu;
        int online;
 
-       unsigned long avg_load_per_task;
-
        u64 rt_avg;
        u64 age_stamp;
        u64 idle_stamp;
@@ -1569,11 +1567,9 @@ static unsigned long cpu_avg_load_per_task(int cpu)
        unsigned long nr_running = ACCESS_ONCE(rq->nr_running);
 
        if (nr_running)
-               rq->avg_load_per_task = rq->load.weight / nr_running;
-       else
-               rq->avg_load_per_task = 0;
+               return rq->load.weight / nr_running;
 
-       return rq->avg_load_per_task;
+       return 0;
 }
 
 #ifdef CONFIG_PREEMPT
@@ -1806,7 +1802,6 @@ static void activate_task(struct rq *rq, struct task_struct *p, int flags)
                rq->nr_uninterruptible--;
 
        enqueue_task(rq, p, flags);
-       inc_nr_running(rq);
 }
 
 /*
@@ -1818,7 +1813,6 @@ static void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
                rq->nr_uninterruptible++;
 
        dequeue_task(rq, p, flags);
-       dec_nr_running(rq);
 }
 
 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
@@ -2847,20 +2841,24 @@ void sched_fork(struct task_struct *p)
         */
        p->state = TASK_RUNNING;
 
+       /*
+        * Make sure we do not leak PI boosting priority to the child.
+        */
+       p->prio = current->normal_prio;
+
        /*
         * Revert to default priority/policy on fork if requested.
         */
        if (unlikely(p->sched_reset_on_fork)) {
-               if (p->policy == SCHED_FIFO || p->policy == SCHED_RR) {
+               if (task_has_rt_policy(p)) {
                        p->policy = SCHED_NORMAL;
-                       p->normal_prio = p->static_prio;
-               }
-
-               if (PRIO_TO_NICE(p->static_prio) < 0) {
                        p->static_prio = NICE_TO_PRIO(0);
-                       p->normal_prio = p->static_prio;
-                       set_load_weight(p);
-               }
+                       p->rt_priority = 0;
+               } else if (PRIO_TO_NICE(p->static_prio) < 0)
+                       p->static_prio = NICE_TO_PRIO(0);
+
+               p->prio = p->normal_prio = __normal_prio(p);
+               set_load_weight(p);
 
                /*
                 * We don't need the reset flag anymore after the fork. It has
@@ -2869,11 +2867,6 @@ void sched_fork(struct task_struct *p)
                p->sched_reset_on_fork = 0;
        }
 
-       /*
-        * Make sure we do not leak PI boosting priority to the child.
-        */
-       p->prio = current->normal_prio;
-
        if (!rt_prio(p->prio))
                p->sched_class = &fair_sched_class;
 
@@ -4263,7 +4256,7 @@ pick_next_task(struct rq *rq)
         * Optimization: we know that if all tasks are in
         * the fair class we can call that function directly:
         */
-       if (likely(rq->nr_running == rq->cfs.nr_running)) {
+       if (likely(rq->nr_running == rq->cfs.h_nr_running)) {
                p = fair_sched_class.pick_next_task(rq);
                if (likely(p))
                        return p;