]> git.karo-electronics.de Git - linux-beck.git/commitdiff
sched/core: Fix endless loop in pick_next_task()
authorKirill Tkhai <ktkhai@parallels.com>
Thu, 6 Mar 2014 09:32:01 +0000 (13:32 +0400)
committerIngo Molnar <mingo@kernel.org>
Tue, 11 Mar 2014 11:05:39 +0000 (12:05 +0100)
1) Single cpu machine case.

When rq has only RT tasks, but no one of them can be picked
because of throttling, we enter in endless loop.

pick_next_task_{dl,rt} return NULL.

In pick_next_task_fair() we permanently go to retry

if (rq->nr_running != rq->cfs.h_nr_running)
return RETRY_TASK;

(rq->nr_running is not being decremented when rt_rq becomes
throttled).

No chances to unthrottle any rt_rq or to wake fair here,
because of rq is locked permanently and interrupts are
disabled.

2) In case of SMP this can cause a hang too. Although we unlock
   rq in idle_balance(), interrupts are still disabled.

The solution is to check for available tasks in DL and RT
classes instead of checking for sum.

Signed-off-by: Kirill Tkhai <ktkhai@parallels.com>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1394098321.19290.11.camel@tkhai
Signed-off-by: Ingo Molnar <mingo@kernel.org>
kernel/sched/fair.c
kernel/sched/rt.c
kernel/sched/sched.h

index b956e70fc5033904870cbb6f5d3d347fef6c5ae2..10db4a87ad72186203d892bb96df66a6b28beee2 100644 (file)
@@ -6728,7 +6728,9 @@ static int idle_balance(struct rq *this_rq)
 
 out:
        /* Is there a task of a high priority class? */
-       if (this_rq->nr_running != this_rq->cfs.h_nr_running)
+       if (this_rq->nr_running != this_rq->cfs.h_nr_running &&
+           (this_rq->dl.dl_nr_running ||
+            (this_rq->rt.rt_nr_running && !rt_rq_throttled(&this_rq->rt))))
                pulled_task = -1;
 
        if (pulled_task) {
index f3cee0a63b76667e356876a704eae3732b9d71b1..d8cdf1618551c80143e0f5fd38de556d089eb1f6 100644 (file)
@@ -470,11 +470,6 @@ static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
                dequeue_rt_entity(rt_se);
 }
 
-static inline int rt_rq_throttled(struct rt_rq *rt_rq)
-{
-       return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
-}
-
 static int rt_se_boosted(struct sched_rt_entity *rt_se)
 {
        struct rt_rq *rt_rq = group_rt_rq(rt_se);
@@ -545,11 +540,6 @@ static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
 {
 }
 
-static inline int rt_rq_throttled(struct rt_rq *rt_rq)
-{
-       return rt_rq->rt_throttled;
-}
-
 static inline const struct cpumask *sched_rt_period_mask(void)
 {
        return cpu_online_mask;
index 378bff76267f7c11e5cabcef28c4c7b96a4aad3b..f2de7a17562053b30f9e72b298ae46752a0d5d72 100644 (file)
@@ -423,6 +423,18 @@ struct rt_rq {
 #endif
 };
 
+#ifdef CONFIG_RT_GROUP_SCHED
+static inline int rt_rq_throttled(struct rt_rq *rt_rq)
+{
+       return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
+}
+#else
+static inline int rt_rq_throttled(struct rt_rq *rt_rq)
+{
+       return rt_rq->rt_throttled;
+}
+#endif
+
 /* Deadline class' related fields in a runqueue */
 struct dl_rq {
        /* runqueue is an rbtree, ordered by deadline */