4 * SMP support for the SuperH processors.
6 * Copyright (C) 2002 - 2010 Paul Mundt
7 * Copyright (C) 2006 - 2007 Akio Idehara
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
13 #include <linux/err.h>
14 #include <linux/cache.h>
15 #include <linux/cpumask.h>
16 #include <linux/delay.h>
17 #include <linux/init.h>
18 #include <linux/spinlock.h>
20 #include <linux/module.h>
21 #include <linux/cpu.h>
22 #include <linux/interrupt.h>
23 #include <asm/atomic.h>
24 #include <asm/processor.h>
25 #include <asm/system.h>
26 #include <asm/mmu_context.h>
28 #include <asm/cacheflush.h>
29 #include <asm/sections.h>
31 int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
32 int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
34 struct plat_smp_ops *mp_ops = NULL;
36 /* State of each CPU */
37 DEFINE_PER_CPU(int, cpu_state) = { 0 };
39 void __cpuinit register_smp_ops(struct plat_smp_ops *ops)
42 printk(KERN_WARNING "Overriding previously set SMP ops\n");
47 static inline void __cpuinit smp_store_cpu_info(unsigned int cpu)
49 struct sh_cpuinfo *c = cpu_data + cpu;
51 memcpy(c, &boot_cpu_data, sizeof(struct sh_cpuinfo));
53 c->loops_per_jiffy = loops_per_jiffy;
56 void __init smp_prepare_cpus(unsigned int max_cpus)
58 unsigned int cpu = smp_processor_id();
60 init_new_context(current, &init_mm);
61 current_thread_info()->cpu = cpu;
62 mp_ops->prepare_cpus(max_cpus);
64 #ifndef CONFIG_HOTPLUG_CPU
65 init_cpu_present(&cpu_possible_map);
69 void __init smp_prepare_boot_cpu(void)
71 unsigned int cpu = smp_processor_id();
73 __cpu_number_map[0] = cpu;
74 __cpu_logical_map[0] = cpu;
76 set_cpu_online(cpu, true);
77 set_cpu_possible(cpu, true);
79 per_cpu(cpu_state, cpu) = CPU_ONLINE;
82 #ifdef CONFIG_HOTPLUG_CPU
83 void native_cpu_die(unsigned int cpu)
87 for (i = 0; i < 10; i++) {
89 if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
90 if (system_state == SYSTEM_RUNNING)
91 pr_info("CPU %u is now offline\n", cpu);
99 pr_err("CPU %u didn't die...\n", cpu);
102 int native_cpu_disable(unsigned int cpu)
104 return cpu == 0 ? -EPERM : 0;
107 void play_dead_common(void)
110 irq_ctx_exit(raw_smp_processor_id());
113 __get_cpu_var(cpu_state) = CPU_DEAD;
117 void native_play_dead(void)
122 int __cpu_disable(void)
124 unsigned int cpu = smp_processor_id();
125 struct task_struct *p;
128 ret = mp_ops->cpu_disable(cpu);
133 * Take this CPU offline. Once we clear this, we can't return,
134 * and we must not schedule until we're ready to give up the cpu.
136 set_cpu_online(cpu, false);
139 * OK - migrate IRQs away from this CPU
144 * Stop the local timer for this CPU.
146 local_timer_stop(cpu);
149 * Flush user cache and TLB mappings, and then remove this CPU
150 * from the vm mask set of all processes.
153 local_flush_tlb_all();
155 read_lock(&tasklist_lock);
158 cpumask_clear_cpu(cpu, mm_cpumask(p->mm));
159 read_unlock(&tasklist_lock);
163 #else /* ... !CONFIG_HOTPLUG_CPU */
164 int native_cpu_disable(unsigned int cpu)
169 void native_cpu_die(unsigned int cpu)
171 /* We said "no" in __cpu_disable */
175 void native_play_dead(void)
181 asmlinkage void __cpuinit start_secondary(void)
183 unsigned int cpu = smp_processor_id();
184 struct mm_struct *mm = &init_mm;
187 atomic_inc(&mm->mm_count);
188 atomic_inc(&mm->mm_users);
189 current->active_mm = mm;
190 enter_lazy_tlb(mm, current);
191 local_flush_tlb_all();
197 notify_cpu_starting(cpu);
201 /* Enable local timers */
202 local_timer_setup(cpu);
205 smp_store_cpu_info(cpu);
207 set_cpu_online(cpu, true);
208 per_cpu(cpu_state, cpu) = CPU_ONLINE;
215 unsigned long bss_start;
216 unsigned long bss_end;
217 void *start_kernel_fn;
222 int __cpuinit __cpu_up(unsigned int cpu)
224 struct task_struct *tsk;
225 unsigned long timeout;
227 tsk = cpu_data[cpu].idle;
229 tsk = fork_idle(cpu);
231 pr_err("Failed forking idle task for cpu %d\n", cpu);
235 cpu_data[cpu].idle = tsk;
238 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
240 /* Fill in data in head.S for secondary cpus */
241 stack_start.sp = tsk->thread.sp;
242 stack_start.thread_info = tsk->stack;
243 stack_start.bss_start = 0; /* don't clear bss for secondary cpus */
244 stack_start.start_kernel_fn = start_secondary;
246 flush_icache_range((unsigned long)&stack_start,
247 (unsigned long)&stack_start + sizeof(stack_start));
250 mp_ops->start_cpu(cpu, (unsigned long)_stext);
252 timeout = jiffies + HZ;
253 while (time_before(jiffies, timeout)) {
267 void __init smp_cpus_done(unsigned int max_cpus)
269 unsigned long bogosum = 0;
272 for_each_online_cpu(cpu)
273 bogosum += cpu_data[cpu].loops_per_jiffy;
275 printk(KERN_INFO "SMP: Total of %d processors activated "
276 "(%lu.%02lu BogoMIPS).\n", num_online_cpus(),
277 bogosum / (500000/HZ),
278 (bogosum / (5000/HZ)) % 100);
281 void smp_send_reschedule(int cpu)
283 mp_ops->send_ipi(cpu, SMP_MSG_RESCHEDULE);
286 void smp_send_stop(void)
288 smp_call_function(stop_this_cpu, 0, 0);
291 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
295 for_each_cpu(cpu, mask)
296 mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION);
299 void arch_send_call_function_single_ipi(int cpu)
301 mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE);
304 void smp_timer_broadcast(const struct cpumask *mask)
308 for_each_cpu(cpu, mask)
309 mp_ops->send_ipi(cpu, SMP_MSG_TIMER);
312 static void ipi_timer(void)
315 local_timer_interrupt();
319 void smp_message_recv(unsigned int msg)
322 case SMP_MSG_FUNCTION:
323 generic_smp_call_function_interrupt();
325 case SMP_MSG_RESCHEDULE:
327 case SMP_MSG_FUNCTION_SINGLE:
328 generic_smp_call_function_single_interrupt();
334 printk(KERN_WARNING "SMP %d: %s(): unknown IPI %d\n",
335 smp_processor_id(), __func__, msg);
340 /* Not really SMP stuff ... */
341 int setup_profiling_timer(unsigned int multiplier)
346 static void flush_tlb_all_ipi(void *info)
348 local_flush_tlb_all();
351 void flush_tlb_all(void)
353 on_each_cpu(flush_tlb_all_ipi, 0, 1);
356 static void flush_tlb_mm_ipi(void *mm)
358 local_flush_tlb_mm((struct mm_struct *)mm);
362 * The following tlb flush calls are invoked when old translations are
363 * being torn down, or pte attributes are changing. For single threaded
364 * address spaces, a new context is obtained on the current cpu, and tlb
365 * context on other cpus are invalidated to force a new context allocation
366 * at switch_mm time, should the mm ever be used on other cpus. For
367 * multithreaded address spaces, intercpu interrupts have to be sent.
368 * Another case where intercpu interrupts are required is when the target
369 * mm might be active on another cpu (eg debuggers doing the flushes on
370 * behalf of debugees, kswapd stealing pages from another process etc).
373 void flush_tlb_mm(struct mm_struct *mm)
377 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
378 smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1);
381 for (i = 0; i < num_online_cpus(); i++)
382 if (smp_processor_id() != i)
383 cpu_context(i, mm) = 0;
385 local_flush_tlb_mm(mm);
390 struct flush_tlb_data {
391 struct vm_area_struct *vma;
396 static void flush_tlb_range_ipi(void *info)
398 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
400 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
403 void flush_tlb_range(struct vm_area_struct *vma,
404 unsigned long start, unsigned long end)
406 struct mm_struct *mm = vma->vm_mm;
409 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
410 struct flush_tlb_data fd;
415 smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1);
418 for (i = 0; i < num_online_cpus(); i++)
419 if (smp_processor_id() != i)
420 cpu_context(i, mm) = 0;
422 local_flush_tlb_range(vma, start, end);
426 static void flush_tlb_kernel_range_ipi(void *info)
428 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
430 local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
433 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
435 struct flush_tlb_data fd;
439 on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1);
442 static void flush_tlb_page_ipi(void *info)
444 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
446 local_flush_tlb_page(fd->vma, fd->addr1);
449 void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
452 if ((atomic_read(&vma->vm_mm->mm_users) != 1) ||
453 (current->mm != vma->vm_mm)) {
454 struct flush_tlb_data fd;
458 smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1);
461 for (i = 0; i < num_online_cpus(); i++)
462 if (smp_processor_id() != i)
463 cpu_context(i, vma->vm_mm) = 0;
465 local_flush_tlb_page(vma, page);
469 static void flush_tlb_one_ipi(void *info)
471 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
472 local_flush_tlb_one(fd->addr1, fd->addr2);
475 void flush_tlb_one(unsigned long asid, unsigned long vaddr)
477 struct flush_tlb_data fd;
482 smp_call_function(flush_tlb_one_ipi, (void *)&fd, 1);
483 local_flush_tlb_one(asid, vaddr);