]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - kernel/sched.c
sched: Fix select_task_rq() vs hotplug issues
[karo-tx-linux.git] / kernel / sched.c
index 279f1ef0c4d8b8694c0c226519ff3beab7ff4d91..7a93b709c8efa9c5c9134e491689b473b9d464f0 100644 (file)
@@ -2317,6 +2317,43 @@ void task_oncpu_function_call(struct task_struct *p,
 }
 
 #ifdef CONFIG_SMP
+static int select_fallback_rq(int cpu, struct task_struct *p)
+{
+       int dest_cpu;
+       const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(cpu));
+
+       /* Look for allowed, online CPU in same node. */
+       for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask)
+               if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
+                       return dest_cpu;
+
+       /* Any allowed, online CPU? */
+       dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_active_mask);
+       if (dest_cpu < nr_cpu_ids)
+               return dest_cpu;
+
+       /* No more Mr. Nice Guy. */
+       if (dest_cpu >= nr_cpu_ids) {
+               rcu_read_lock();
+               cpuset_cpus_allowed_locked(p, &p->cpus_allowed);
+               rcu_read_unlock();
+               dest_cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed);
+
+               /*
+                * Don't tell them about moving exiting tasks or
+                * kernel threads (both mm NULL), since they never
+                * leave kernel.
+                */
+               if (p->mm && printk_ratelimit()) {
+                       printk(KERN_INFO "process %d (%s) no "
+                              "longer affine to cpu%d\n",
+                              task_pid_nr(p), p->comm, cpu);
+               }
+       }
+
+       return dest_cpu;
+}
+
 /*
  * Called from:
  *
@@ -2343,14 +2380,8 @@ int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags)
         *   not worry about this generic constraint ]
         */
        if (unlikely(!cpumask_test_cpu(cpu, &p->cpus_allowed) ||
-                    !cpu_active(cpu))) {
-
-               cpu = cpumask_any_and(&p->cpus_allowed, cpu_active_mask);
-               /*
-                * XXX: race against hot-plug modifying cpu_active_mask
-                */
-               BUG_ON(cpu >= nr_cpu_ids);
-       }
+                    !cpu_active(cpu)))
+               cpu = select_fallback_rq(task_cpu(p), p);
 
        return cpu;
 }
@@ -7352,37 +7383,10 @@ static int __migrate_task_irq(struct task_struct *p, int src_cpu, int dest_cpu)
 static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
 {
        int dest_cpu;
-       const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(dead_cpu));
 
 again:
-       /* Look for allowed, online CPU in same node. */
-       for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask)
-               if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
-                       goto move;
-
-       /* Any allowed, online CPU? */
-       dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_active_mask);
-       if (dest_cpu < nr_cpu_ids)
-               goto move;
-
-       /* No more Mr. Nice Guy. */
-       if (dest_cpu >= nr_cpu_ids) {
-               cpuset_cpus_allowed_locked(p, &p->cpus_allowed);
-               dest_cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed);
-
-               /*
-                * Don't tell them about moving exiting tasks or
-                * kernel threads (both mm NULL), since they never
-                * leave kernel.
-                */
-               if (p->mm && printk_ratelimit()) {
-                       printk(KERN_INFO "process %d (%s) no "
-                              "longer affine to cpu%d\n",
-                              task_pid_nr(p), p->comm, dead_cpu);
-               }
-       }
+       dest_cpu = select_fallback_rq(dead_cpu, p);
 
-move:
        /* It can have affinity changed while we were choosing. */
        if (unlikely(!__migrate_task_irq(p, dead_cpu, dest_cpu)))
                goto again;