]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - kernel/sched_rt.c
sched: Implement hierarchical task accounting for SCHED_OTHER
[karo-tx-linux.git] / kernel / sched_rt.c
index 8e189455ed1270eb1e980d9cefebb4474934cacb..a9d3c6bc684adcc7d68e598d878912ce6af686e2 100644 (file)
@@ -124,21 +124,33 @@ static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
        update_rt_migration(rt_rq);
 }
 
+static inline int has_pushable_tasks(struct rq *rq)
+{
+       return !plist_head_empty(&rq->rt.pushable_tasks);
+}
+
 static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
 {
        plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
        plist_node_init(&p->pushable_tasks, p->prio);
        plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
+
+       /* Update the highest prio pushable task */
+       if (p->prio < rq->rt.highest_prio.next)
+               rq->rt.highest_prio.next = p->prio;
 }
 
 static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
 {
        plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
-}
 
-static inline int has_pushable_tasks(struct rq *rq)
-{
-       return !plist_head_empty(&rq->rt.pushable_tasks);
+       /* Update the new highest prio pushable task */
+       if (has_pushable_tasks(rq)) {
+               p = plist_first_entry(&rq->rt.pushable_tasks,
+                                     struct task_struct, pushable_tasks);
+               rq->rt.highest_prio.next = p->prio;
+       } else
+               rq->rt.highest_prio.next = MAX_RT_PRIO;
 }
 
 #else
@@ -698,47 +710,13 @@ static void update_curr_rt(struct rq *rq)
 
 #if defined CONFIG_SMP
 
-static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu);
-
-static inline int next_prio(struct rq *rq)
-{
-       struct task_struct *next = pick_next_highest_task_rt(rq, rq->cpu);
-
-       if (next)
-               return next->prio;
-       else
-               return MAX_RT_PRIO;
-}
-
 static void
 inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
 {
        struct rq *rq = rq_of_rt_rq(rt_rq);
 
-       if (prio < prev_prio) {
-
-               /*
-                * If the new task is higher in priority than anything on the
-                * run-queue, we know that the previous high becomes our
-                * next-highest.
-                */
-               rt_rq->highest_prio.next = prev_prio;
-
-               if (rq->online)
-                       cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
-
-       } else if (prio == rt_rq->highest_prio.curr)
-               /*
-                * If the next task is equal in priority to the highest on
-                * the run-queue, then we implicitly know that the next highest
-                * task cannot be any lower than current
-                */
-               rt_rq->highest_prio.next = prio;
-       else if (prio < rt_rq->highest_prio.next)
-               /*
-                * Otherwise, we need to recompute next-highest
-                */
-               rt_rq->highest_prio.next = next_prio(rq);
+       if (rq->online && prio < prev_prio)
+               cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
 }
 
 static void
@@ -746,9 +724,6 @@ dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
 {
        struct rq *rq = rq_of_rt_rq(rt_rq);
 
-       if (rt_rq->rt_nr_running && (prio <= rt_rq->highest_prio.next))
-               rt_rq->highest_prio.next = next_prio(rq);
-
        if (rq->online && rt_rq->highest_prio.curr != prev_prio)
                cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
 }
@@ -961,6 +936,8 @@ enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
 
        if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1)
                enqueue_pushable_task(rq, p);
+
+       inc_nr_running(rq);
 }
 
 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
@@ -971,6 +948,8 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
        dequeue_rt_entity(rt_se);
 
        dequeue_pushable_task(rq, p);
+
+       dec_nr_running(rq);
 }
 
 /*
@@ -1017,10 +996,12 @@ select_task_rq_rt(struct task_struct *p, int sd_flag, int flags)
        struct rq *rq;
        int cpu;
 
-       if (sd_flag != SD_BALANCE_WAKE)
-               return smp_processor_id();
-
        cpu = task_cpu(p);
+
+       /* For anything but wake ups, just return the task_cpu */
+       if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
+               goto out;
+
        rq = cpu_rq(cpu);
 
        rcu_read_lock();
@@ -1059,6 +1040,7 @@ select_task_rq_rt(struct task_struct *p, int sd_flag, int flags)
        }
        rcu_read_unlock();
 
+out:
        return cpu;
 }
 
@@ -1178,7 +1160,6 @@ static struct task_struct *pick_next_task_rt(struct rq *rq)
 static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
 {
        update_curr_rt(rq);
-       p->se.exec_start = 0;
 
        /*
         * The previous task needs to be made eligible for pushing
@@ -1864,4 +1845,3 @@ static void print_rt_stats(struct seq_file *m, int cpu)
        rcu_read_unlock();
 }
 #endif /* CONFIG_SCHED_DEBUG */
-