]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - kernel/sched.c
sched: min_vruntime fix
[karo-tx-linux.git] / kernel / sched.c
index b8ee864c7481ac635c39525790755dec6c67efff..9df9ba73cb7a941ea7a8feb18a58b35e726b26a4 100644 (file)
@@ -4268,11 +4268,10 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
        oldprio = p->prio;
        on_rq = p->se.on_rq;
        running = task_current(rq, p);
-       if (on_rq) {
+       if (on_rq)
                dequeue_task(rq, p, 0);
-               if (running)
-                       p->sched_class->put_prev_task(rq, p);
-       }
+       if (running)
+               p->sched_class->put_prev_task(rq, p);
 
        if (rt_prio(prio))
                p->sched_class = &rt_sched_class;
@@ -4281,10 +4280,9 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
 
        p->prio = prio;
 
+       if (running)
+               p->sched_class->set_curr_task(rq);
        if (on_rq) {
-               if (running)
-                       p->sched_class->set_curr_task(rq);
-
                enqueue_task(rq, p, 0);
 
                check_class_changed(rq, p, prev_class, oldprio, running);
@@ -4581,19 +4579,17 @@ recheck:
        update_rq_clock(rq);
        on_rq = p->se.on_rq;
        running = task_current(rq, p);
-       if (on_rq) {
+       if (on_rq)
                deactivate_task(rq, p, 0);
-               if (running)
-                       p->sched_class->put_prev_task(rq, p);
-       }
+       if (running)
+               p->sched_class->put_prev_task(rq, p);
 
        oldprio = p->prio;
        __setscheduler(rq, p, policy, param->sched_priority);
 
+       if (running)
+               p->sched_class->set_curr_task(rq);
        if (on_rq) {
-               if (running)
-                       p->sched_class->set_curr_task(rq);
-
                activate_task(rq, p, 0);
 
                check_class_changed(rq, p, prev_class, oldprio, running);
@@ -5881,7 +5877,8 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
                spin_unlock_irq(&rq->lock);
                break;
 
-       case CPU_DOWN_PREPARE:
+       case CPU_DYING:
+       case CPU_DYING_FROZEN:
                /* Update our root-domain */
                rq = cpu_rq(cpu);
                spin_lock_irqsave(&rq->lock, flags);
@@ -7617,11 +7614,10 @@ void sched_move_task(struct task_struct *tsk)
        running = task_current(rq, tsk);
        on_rq = tsk->se.on_rq;
 
-       if (on_rq) {
+       if (on_rq)
                dequeue_task(rq, tsk, 0);
-               if (unlikely(running))
-                       tsk->sched_class->put_prev_task(rq, tsk);
-       }
+       if (unlikely(running))
+               tsk->sched_class->put_prev_task(rq, tsk);
 
        set_task_rq(tsk, task_cpu(tsk));
 
@@ -7630,11 +7626,10 @@ void sched_move_task(struct task_struct *tsk)
                tsk->sched_class->moved_group(tsk);
 #endif
 
-       if (on_rq) {
-               if (unlikely(running))
-                       tsk->sched_class->set_curr_task(rq);
+       if (unlikely(running))
+               tsk->sched_class->set_curr_task(rq);
+       if (on_rq)
                enqueue_task(rq, tsk, 0);
-       }
 
        task_rq_unlock(rq, &flags);
 }
@@ -7750,6 +7745,17 @@ static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
        return total + to_ratio(period, runtime) < global_ratio;
 }
 
+/* Must be called with tasklist_lock held */
+static inline int tg_has_rt_tasks(struct task_group *tg)
+{
+       struct task_struct *g, *p;
+       do_each_thread(g, p) {
+               if (rt_task(p) && rt_rq_of_se(&p->rt)->tg == tg)
+                       return 1;
+       } while_each_thread(g, p);
+       return 0;
+}
+
 int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
 {
        u64 rt_runtime, rt_period;
@@ -7761,12 +7767,18 @@ int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
                rt_runtime = RUNTIME_INF;
 
        mutex_lock(&rt_constraints_mutex);
+       read_lock(&tasklist_lock);
+       if (rt_runtime_us == 0 && tg_has_rt_tasks(tg)) {
+               err = -EBUSY;
+               goto unlock;
+       }
        if (!__rt_schedulable(tg, rt_period, rt_runtime)) {
                err = -EINVAL;
                goto unlock;
        }
        tg->rt_runtime = rt_runtime;
  unlock:
+       read_unlock(&tasklist_lock);
        mutex_unlock(&rt_constraints_mutex);
 
        return err;