DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
EXPORT_PER_CPU_SYMBOL(cpu_info);
+static DEFINE_PER_CPU(struct completion, die_complete);
+
atomic_t init_deasserted;
/*
return ret;
clear_local_APIC();
-
+ init_completion(&per_cpu(die_complete, smp_processor_id()));
cpu_disable_common();
+
return 0;
}
void native_cpu_die(unsigned int cpu)
{
/* We don't do anything here: idle task is faking death itself. */
- unsigned int i;
+ wait_for_completion_timeout(&per_cpu(die_complete, cpu), HZ);
- for (i = 0; i < 10; i++) {
- /* They ack this in play_dead by setting CPU_DEAD */
- if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
- if (system_state == SYSTEM_RUNNING)
- pr_info("CPU %u is now offline\n", cpu);
- return;
- }
- msleep(100);
+ /* They ack this in play_dead() by setting CPU_DEAD */
+ if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
+ if (system_state == SYSTEM_RUNNING)
+ pr_info("CPU %u is now offline\n", cpu);
+ } else {
+ pr_err("CPU %u didn't die...\n", cpu);
}
- pr_err("CPU %u didn't die...\n", cpu);
}
void play_dead_common(void)
mb();
/* Ack it */
__this_cpu_write(cpu_state, CPU_DEAD);
+ complete(&per_cpu(die_complete, smp_processor_id()));
/*
* With physical CPU hotplug, we should halt the cpu