]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 19 Apr 2014 17:40:51 +0000 (10:40 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 19 Apr 2014 17:40:51 +0000 (10:40 -0700)
Pull scheduler fixes from Ingo Molnar:
 "Two fixes:

   - a SCHED_DEADLINE task selection fix
   - a sched/numa related lockdep splat fix"

* 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  sched: Check for stop task appearance when balancing happens
  sched/numa: Fix task_numa_free() lockdep splat

kernel/sched/deadline.c
kernel/sched/fair.c
kernel/sched/rt.c
kernel/sched/sched.h

index 27ef409255253367d35fd598ddb01550612df040..b08095786cb8fff0c96773eb5f6a582da503bcb8 100644 (file)
@@ -1021,8 +1021,17 @@ struct task_struct *pick_next_task_dl(struct rq *rq, struct task_struct *prev)
 
        dl_rq = &rq->dl;
 
-       if (need_pull_dl_task(rq, prev))
+       if (need_pull_dl_task(rq, prev)) {
                pull_dl_task(rq);
+               /*
+                * pull_rt_task() can drop (and re-acquire) rq->lock; this
+                * means a stop task can slip in, in which case we need to
+                * re-start task selection.
+                */
+               if (rq->stop && rq->stop->on_rq)
+                       return RETRY_TASK;
+       }
+
        /*
         * When prev is DL, we may throttle it in put_prev_task().
         * So, we update time before we check for dl_nr_running.
index 7e9bd0b1fa9ef1aa16880a5a10601374c7bb618b..7570dd969c2838e9aab12ba9cb2c24cb87e21855 100644 (file)
@@ -1497,7 +1497,7 @@ static void task_numa_placement(struct task_struct *p)
        /* If the task is part of a group prevent parallel updates to group stats */
        if (p->numa_group) {
                group_lock = &p->numa_group->lock;
-               spin_lock(group_lock);
+               spin_lock_irq(group_lock);
        }
 
        /* Find the node with the highest number of faults */
@@ -1572,7 +1572,7 @@ static void task_numa_placement(struct task_struct *p)
                        }
                }
 
-               spin_unlock(group_lock);
+               spin_unlock_irq(group_lock);
        }
 
        /* Preferred node as the node with the most faults */
@@ -1677,7 +1677,8 @@ static void task_numa_group(struct task_struct *p, int cpupid, int flags,
        if (!join)
                return;
 
-       double_lock(&my_grp->lock, &grp->lock);
+       BUG_ON(irqs_disabled());
+       double_lock_irq(&my_grp->lock, &grp->lock);
 
        for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) {
                my_grp->faults[i] -= p->numa_faults_memory[i];
@@ -1691,7 +1692,7 @@ static void task_numa_group(struct task_struct *p, int cpupid, int flags,
        grp->nr_tasks++;
 
        spin_unlock(&my_grp->lock);
-       spin_unlock(&grp->lock);
+       spin_unlock_irq(&grp->lock);
 
        rcu_assign_pointer(p->numa_group, grp);
 
@@ -1710,14 +1711,14 @@ void task_numa_free(struct task_struct *p)
        void *numa_faults = p->numa_faults_memory;
 
        if (grp) {
-               spin_lock(&grp->lock);
+               spin_lock_irq(&grp->lock);
                for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
                        grp->faults[i] -= p->numa_faults_memory[i];
                grp->total_faults -= p->total_numa_faults;
 
                list_del(&p->numa_entry);
                grp->nr_tasks--;
-               spin_unlock(&grp->lock);
+               spin_unlock_irq(&grp->lock);
                rcu_assign_pointer(p->numa_group, NULL);
                put_numa_group(grp);
        }
@@ -6727,7 +6728,8 @@ static int idle_balance(struct rq *this_rq)
 out:
        /* Is there a task of a high priority class? */
        if (this_rq->nr_running != this_rq->cfs.h_nr_running &&
-           (this_rq->dl.dl_nr_running ||
+           ((this_rq->stop && this_rq->stop->on_rq) ||
+            this_rq->dl.dl_nr_running ||
             (this_rq->rt.rt_nr_running && !rt_rq_throttled(&this_rq->rt))))
                pulled_task = -1;
 
index d8cdf1618551c80143e0f5fd38de556d089eb1f6..bd2267ad404fa78de092b8bc8f228cfea2dca87b 100644 (file)
@@ -1362,10 +1362,11 @@ pick_next_task_rt(struct rq *rq, struct task_struct *prev)
                pull_rt_task(rq);
                /*
                 * pull_rt_task() can drop (and re-acquire) rq->lock; this
-                * means a dl task can slip in, in which case we need to
-                * re-start task selection.
+                * means a dl or stop task can slip in, in which case we need
+                * to re-start task selection.
                 */
-               if (unlikely(rq->dl.dl_nr_running))
+               if (unlikely((rq->stop && rq->stop->on_rq) ||
+                            rq->dl.dl_nr_running))
                        return RETRY_TASK;
        }
 
index c9007f28d3a222ca97b5fb98210b2ecc1e756b7a..456e492a3dca37c13d7cb7b57a51965bfa18d6b3 100644 (file)
@@ -1385,6 +1385,15 @@ static inline void double_lock(spinlock_t *l1, spinlock_t *l2)
        spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
 }
 
+static inline void double_lock_irq(spinlock_t *l1, spinlock_t *l2)
+{
+       if (l1 > l2)
+               swap(l1, l2);
+
+       spin_lock_irq(l1);
+       spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
+}
+
 static inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2)
 {
        if (l1 > l2)