static struct smp_call_struct * volatile smp_call_function_data;
static DEFINE_SPINLOCK(smp_call_function_lock);
-int __init __cpu_up(unsigned int cpu)
+int __cpuinit __cpu_up(unsigned int cpu)
{
- struct task_struct *idle;
+ struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu);
+ struct task_struct *idle = ci->idle;
pgd_t *pgd;
pmd_t *pmd;
int ret;
/*
- * Spawn a new process manually. Grab a pointer to
- * its task struct so we can mess with it
+ * Spawn a new process manually, if not already done.
+ * Grab a pointer to its task struct so we can mess with it
*/
- idle = fork_idle(cpu);
- if (IS_ERR(idle)) {
- printk(KERN_ERR "CPU%u: fork() failed\n", cpu);
- return PTR_ERR(idle);
+ if (!idle) {
+ idle = fork_idle(cpu);
+ if (IS_ERR(idle)) {
+ printk(KERN_ERR "CPU%u: fork() failed\n", cpu);
+ return PTR_ERR(idle);
+ }
+ ci->idle = idle;
}
/*
* We need to tell the secondary core where to find
* its stack and the page tables.
*/
- secondary_data.stack = (void *)idle->thread_info + THREAD_SIZE - 8;
+ secondary_data.stack = (void *)idle->thread_info + THREAD_START_SP;
secondary_data.pgdir = virt_to_phys(pgd);
wmb();
ret = -EIO;
}
- secondary_data.stack = 0;
+ secondary_data.stack = NULL;
secondary_data.pgdir = 0;
*pmd_offset(pgd, PHYS_OFFSET) = __pmd(0);
return ret;
}
+#ifdef CONFIG_HOTPLUG_CPU
+/*
+ * __cpu_disable runs on the processor to be shutdown.
+ */
+int __cpuexit __cpu_disable(void)
+{
+ unsigned int cpu = smp_processor_id();
+ struct task_struct *p;
+ int ret;
+
+ ret = mach_cpu_disable(cpu);
+ if (ret)
+ return ret;
+
+ /*
+ * Take this CPU offline. Once we clear this, we can't return,
+ * and we must not schedule until we're ready to give up the cpu.
+ */
+ cpu_clear(cpu, cpu_online_map);
+
+ /*
+ * OK - migrate IRQs away from this CPU
+ */
+ migrate_irqs();
+
+ /*
+ * Flush user cache and TLB mappings, and then remove this CPU
+ * from the vm mask set of all processes.
+ */
+ flush_cache_all();
+ local_flush_tlb_all();
+
+ read_lock(&tasklist_lock);
+ for_each_process(p) {
+ if (p->mm)
+ cpu_clear(cpu, p->mm->cpu_vm_mask);
+ }
+ read_unlock(&tasklist_lock);
+
+ return 0;
+}
+
+/*
+ * called on the thread which is asking for a CPU to be shutdown -
+ * waits until shutdown has completed, or it is timed out.
+ */
+void __cpuexit __cpu_die(unsigned int cpu)
+{
+ if (!platform_cpu_kill(cpu))
+ printk("CPU%u: unable to kill\n", cpu);
+}
+
+/*
+ * Called from the idle thread for the CPU which has been shutdown.
+ *
+ * Note that we disable IRQs here, but do not re-enable them
+ * before returning to the caller. This is also the behaviour
+ * of the other hotplug-cpu capable cores, so presumably coming
+ * out of idle fixes this.
+ */
+void __cpuexit cpu_die(void)
+{
+ unsigned int cpu = smp_processor_id();
+
+ local_irq_disable();
+ idle_task_exit();
+
+ /*
+ * actual CPU shutdown procedure is at least platform (if not
+ * CPU) specific
+ */
+ platform_cpu_die(cpu);
+
+ /*
+ * Do not return to the idle loop - jump back to the secondary
+ * cpu initialisation. There's some initialisation which needs
+ * to be repeated to undo the effects of taking the CPU offline.
+ */
+ __asm__("mov sp, %0\n"
+ " b secondary_start_kernel"
+ :
+ : "r" ((void *)current->thread_info + THREAD_SIZE - 8));
+}
+#endif /* CONFIG_HOTPLUG_CPU */
+
/*
* This is the secondary CPU boot entry. We're using this CPUs
* idle thread stack, but a set of temporary page tables.
*/
-asmlinkage void __init secondary_start_kernel(void)
+asmlinkage void __cpuinit secondary_start_kernel(void)
{
struct mm_struct *mm = &init_mm;
unsigned int cpu = smp_processor_id();
cpu_set(cpu, mm->cpu_vm_mask);
cpu_switch_mm(mm->pgd, mm);
enter_lazy_tlb(mm, current);
+ local_flush_tlb_all();
cpu_init();
* Called by both boot and secondaries to move global data into
* per-processor storage.
*/
-void __init smp_store_cpu_info(unsigned int cpuid)
+void __cpuinit smp_store_cpu_info(unsigned int cpuid)
{
struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid);
{
unsigned int cpu = smp_processor_id();
+ per_cpu(cpu_data, cpu).idle = current;
+
cpu_set(cpu, cpu_possible_map);
cpu_set(cpu, cpu_present_map);
cpu_set(cpu, cpu_online_map);
* You must not call this function with disabled interrupts, from a
* hardware interrupt handler, nor from a bottom half handler.
*/
-int smp_call_function_on_cpu(void (*func)(void *info), void *info, int retry,
- int wait, cpumask_t callmap)
+static int smp_call_function_on_cpu(void (*func)(void *info), void *info,
+ int retry, int wait, cpumask_t callmap)
{
struct smp_call_struct data;
unsigned long timeout;
printk(KERN_CRIT
"CPU%u: smp_call_function timeout for %p(%p)\n"
" callmap %lx pending %lx, %swait\n",
- smp_processor_id(), func, info, callmap, data.pending,
- wait ? "" : "no ");
+ smp_processor_id(), func, info, *cpus_addr(callmap),
+ *cpus_addr(data.pending), wait ? "" : "no ");
/*
* TRACE