From: Paul E. McKenney Date: Tue, 24 Jul 2012 21:57:17 +0000 (-0700) Subject: sched: Make migration_call() safe for stop_machine()-free hotplug X-Git-Tag: next-20120816~27^2^4~1 X-Git-Url: https://git.karo-electronics.de/?a=commitdiff_plain;h=c11f73e13c65ffda9bd211195706d6f11f7ebfce;p=karo-tx-linux.git sched: Make migration_call() safe for stop_machine()-free hotplug The CPU_DYING branch of migration_call() relies on the fact that CPU-hotplug offline operations use stop_machine(). This commit therefore attempts to remedy this situation by acquiring the relevant runqueue locks. Note that sched_ttwu_pending() remains outside of the scope of these new runqueue-lock critical sections because (1) sched_ttwu_pending() does its own runqueue-lock acquisition and (2) sched_ttwu_pending() handles pending wakeups, and no further wakeups can select this CPU because it is already marked as offline. Signed-off-by: Paul E. McKenney Signed-off-by: Paul E. McKenney --- diff --git a/kernel/sched/core.c b/kernel/sched/core.c index eaead2df6aa8..76d0bf2c1c0d 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -5175,10 +5175,8 @@ void idle_task_exit(void) * their home CPUs. So we just add the counter to another CPU's counter, * to keep the global sum constant after CPU-down: */ -static void migrate_nr_uninterruptible(struct rq *rq_src) +static void migrate_nr_uninterruptible(struct rq *rq_src, struct rq *rq_dest) { - struct rq *rq_dest = cpu_rq(cpumask_any(cpu_active_mask)); - rq_dest->nr_uninterruptible += rq_src->nr_uninterruptible; rq_src->nr_uninterruptible = 0; } @@ -5200,7 +5198,7 @@ static void calc_global_load_remove(struct rq *rq) * there's no concurrency possible, we hold the required locks anyway * because of lock validation efforts. */ -static void migrate_tasks(unsigned int dead_cpu) +static void migrate_tasks(unsigned int dead_cpu, struct rq *rq_dest) { struct rq *rq = cpu_rq(dead_cpu); struct task_struct *next, *stop = rq->stop; @@ -5234,11 +5232,11 @@ static void migrate_tasks(unsigned int dead_cpu) /* Find suitable destination for @next, with force if needed. */ dest_cpu = select_fallback_rq(dead_cpu, next); - raw_spin_unlock(&rq->lock); + double_rq_unlock(rq, rq_dest); __migrate_task(next, dead_cpu, dest_cpu); - raw_spin_lock(&rq->lock); + double_rq_lock(rq, rq_dest); } rq->stop = stop; @@ -5452,6 +5450,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) int cpu = (long)hcpu; unsigned long flags; struct rq *rq = cpu_rq(cpu); + struct rq __maybe_unused *rq_dest = cpu_rq(cpumask_any(cpu_active_mask)); switch (action & ~CPU_TASKS_FROZEN) { @@ -5474,17 +5473,19 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) case CPU_DYING: sched_ttwu_pending(); /* Update our root-domain */ - raw_spin_lock_irqsave(&rq->lock, flags); + local_irq_save(flags); + double_rq_lock(rq, rq_dest); if (rq->rd) { BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); set_rq_offline(rq); } - migrate_tasks(cpu); + migrate_tasks(cpu, rq_dest); BUG_ON(rq->nr_running != 1); /* the migration thread */ - raw_spin_unlock_irqrestore(&rq->lock, flags); - migrate_nr_uninterruptible(rq); + migrate_nr_uninterruptible(rq, rq_dest); calc_global_load_remove(rq); + double_rq_unlock(rq, rq_dest); + local_irq_restore(flags); break; #endif }