2 * linux/arch/arm/kernel/smp.c
4 * Copyright (C) 2002 ARM Limited, All Rights Reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 #include <linux/module.h>
11 #include <linux/delay.h>
12 #include <linux/init.h>
13 #include <linux/spinlock.h>
14 #include <linux/sched.h>
15 #include <linux/interrupt.h>
16 #include <linux/cache.h>
17 #include <linux/profile.h>
18 #include <linux/errno.h>
20 #include <linux/err.h>
21 #include <linux/cpu.h>
22 #include <linux/smp.h>
23 #include <linux/seq_file.h>
24 #include <linux/irq.h>
25 #include <linux/percpu.h>
26 #include <linux/clockchips.h>
28 #include <asm/atomic.h>
29 #include <asm/cacheflush.h>
31 #include <asm/cputype.h>
32 #include <asm/mmu_context.h>
33 #include <asm/pgtable.h>
34 #include <asm/pgalloc.h>
35 #include <asm/processor.h>
36 #include <asm/sections.h>
37 #include <asm/tlbflush.h>
38 #include <asm/ptrace.h>
39 #include <asm/localtimer.h>
40 #include <asm/smp_plat.h>
43 * as from 2.5, kernels no longer have an init_tasks structure
44 * so we need some other way of telling a new secondary core
45 * where to place its SVC stack
47 struct secondary_data secondary_data;
50 * structures for inter-processor calls
51 * - A collection of single bit ipi messages.
55 unsigned long ipi_count;
59 static DEFINE_PER_CPU(struct ipi_data, ipi_data) = {
60 .lock = SPIN_LOCK_UNLOCKED,
71 static inline void identity_mapping_add(pgd_t *pgd, unsigned long start,
74 unsigned long addr, prot;
77 prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE;
78 if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale())
81 for (addr = start & PGDIR_MASK; addr < end;) {
82 pmd = pmd_offset(pgd + pgd_index(addr), addr);
83 pmd[0] = __pmd(addr | prot);
85 pmd[1] = __pmd(addr | prot);
88 outer_clean_range(__pa(pmd), __pa(pmd + 1));
92 static inline void identity_mapping_del(pgd_t *pgd, unsigned long start,
98 for (addr = start & PGDIR_MASK; addr < end; addr += PGDIR_SIZE) {
99 pmd = pmd_offset(pgd + pgd_index(addr), addr);
102 clean_pmd_entry(pmd);
103 outer_clean_range(__pa(pmd), __pa(pmd + 1));
107 int __cpuinit __cpu_up(unsigned int cpu)
109 struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu);
110 struct task_struct *idle = ci->idle;
115 * Spawn a new process manually, if not already done.
116 * Grab a pointer to its task struct so we can mess with it
119 idle = fork_idle(cpu);
121 printk(KERN_ERR "CPU%u: fork() failed\n", cpu);
122 return PTR_ERR(idle);
127 * Since this idle thread is being re-used, call
128 * init_idle() to reinitialize the thread structure.
130 init_idle(idle, cpu);
134 * Allocate initial page tables to allow the new CPU to
135 * enable the MMU safely. This essentially means a set
136 * of our "standard" page tables, with the addition of
137 * a 1:1 mapping for the physical address of the kernel.
139 pgd = pgd_alloc(&init_mm);
143 if (PHYS_OFFSET != PAGE_OFFSET) {
144 #ifndef CONFIG_HOTPLUG_CPU
145 identity_mapping_add(pgd, __pa(__init_begin), __pa(__init_end));
147 identity_mapping_add(pgd, __pa(_stext), __pa(_etext));
148 identity_mapping_add(pgd, __pa(_sdata), __pa(_edata));
152 * We need to tell the secondary core where to find
153 * its stack and the page tables.
155 secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
156 secondary_data.pgdir = virt_to_phys(pgd);
157 __cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data));
158 outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1));
161 * Now bring the CPU into our world.
163 ret = boot_secondary(cpu, idle);
165 unsigned long timeout;
168 * CPU was successfully started, wait for it
169 * to come online or time out.
171 timeout = jiffies + HZ;
172 while (time_before(jiffies, timeout)) {
180 if (!cpu_online(cpu))
184 secondary_data.stack = NULL;
185 secondary_data.pgdir = 0;
187 if (PHYS_OFFSET != PAGE_OFFSET) {
188 #ifndef CONFIG_HOTPLUG_CPU
189 identity_mapping_del(pgd, __pa(__init_begin), __pa(__init_end));
191 identity_mapping_del(pgd, __pa(_stext), __pa(_etext));
192 identity_mapping_del(pgd, __pa(_sdata), __pa(_edata));
195 pgd_free(&init_mm, pgd);
198 printk(KERN_CRIT "CPU%u: processor failed to boot\n", cpu);
201 * FIXME: We need to clean up the new idle thread. --rmk
208 #ifdef CONFIG_HOTPLUG_CPU
210 * __cpu_disable runs on the processor to be shutdown.
212 int __cpu_disable(void)
214 unsigned int cpu = smp_processor_id();
215 struct task_struct *p;
218 ret = platform_cpu_disable(cpu);
223 * Take this CPU offline. Once we clear this, we can't return,
224 * and we must not schedule until we're ready to give up the cpu.
226 set_cpu_online(cpu, false);
229 * OK - migrate IRQs away from this CPU
234 * Stop the local timer for this CPU.
239 * Flush user cache and TLB mappings, and then remove this CPU
240 * from the vm mask set of all processes.
243 local_flush_tlb_all();
245 read_lock(&tasklist_lock);
246 for_each_process(p) {
248 cpumask_clear_cpu(cpu, mm_cpumask(p->mm));
250 read_unlock(&tasklist_lock);
256 * called on the thread which is asking for a CPU to be shutdown -
257 * waits until shutdown has completed, or it is timed out.
259 void __cpu_die(unsigned int cpu)
261 if (!platform_cpu_kill(cpu))
262 printk("CPU%u: unable to kill\n", cpu);
266 * Called from the idle thread for the CPU which has been shutdown.
268 * Note that we disable IRQs here, but do not re-enable them
269 * before returning to the caller. This is also the behaviour
270 * of the other hotplug-cpu capable cores, so presumably coming
271 * out of idle fixes this.
273 void __ref cpu_die(void)
275 unsigned int cpu = smp_processor_id();
281 * actual CPU shutdown procedure is at least platform (if not
284 platform_cpu_die(cpu);
287 * Do not return to the idle loop - jump back to the secondary
288 * cpu initialisation. There's some initialisation which needs
289 * to be repeated to undo the effects of taking the CPU offline.
291 __asm__("mov sp, %0\n"
292 " b secondary_start_kernel"
294 : "r" (task_stack_page(current) + THREAD_SIZE - 8));
296 #endif /* CONFIG_HOTPLUG_CPU */
299 * This is the secondary CPU boot entry. We're using this CPUs
300 * idle thread stack, but a set of temporary page tables.
302 asmlinkage void __cpuinit secondary_start_kernel(void)
304 struct mm_struct *mm = &init_mm;
305 unsigned int cpu = smp_processor_id();
307 printk("CPU%u: Booted secondary processor\n", cpu);
310 * All kernel threads share the same mm context; grab a
311 * reference and switch to it.
313 atomic_inc(&mm->mm_count);
314 current->active_mm = mm;
315 cpumask_set_cpu(cpu, mm_cpumask(mm));
316 cpu_switch_mm(mm->pgd, mm);
317 enter_lazy_tlb(mm, current);
318 local_flush_tlb_all();
324 * Give the platform a chance to do its own initialisation.
326 platform_secondary_init(cpu);
329 * Enable local interrupts.
331 notify_cpu_starting(cpu);
336 * Setup the percpu timer for this CPU.
338 percpu_timer_setup();
342 smp_store_cpu_info(cpu);
345 * OK, now it's safe to let the boot CPU continue
347 set_cpu_online(cpu, true);
350 * OK, it's off to the idle thread for us
356 * Called by both boot and secondaries to move global data into
357 * per-processor storage.
359 void __cpuinit smp_store_cpu_info(unsigned int cpuid)
361 struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid);
363 cpu_info->loops_per_jiffy = loops_per_jiffy;
366 void __init smp_cpus_done(unsigned int max_cpus)
369 unsigned long bogosum = 0;
371 for_each_online_cpu(cpu)
372 bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy;
374 printk(KERN_INFO "SMP: Total of %d processors activated "
375 "(%lu.%02lu BogoMIPS).\n",
377 bogosum / (500000/HZ),
378 (bogosum / (5000/HZ)) % 100);
381 void __init smp_prepare_boot_cpu(void)
383 unsigned int cpu = smp_processor_id();
385 per_cpu(cpu_data, cpu).idle = current;
388 static void send_ipi_message(const struct cpumask *mask, enum ipi_msg_type msg)
393 local_irq_save(flags);
395 for_each_cpu(cpu, mask) {
396 struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
398 spin_lock(&ipi->lock);
399 ipi->bits |= 1 << msg;
400 spin_unlock(&ipi->lock);
404 * Call the platform specific cross-CPU call function.
406 smp_cross_call(mask);
408 local_irq_restore(flags);
411 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
413 send_ipi_message(mask, IPI_CALL_FUNC);
416 void arch_send_call_function_single_ipi(int cpu)
418 send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
421 void show_ipi_list(struct seq_file *p)
427 for_each_present_cpu(cpu)
428 seq_printf(p, " %10lu", per_cpu(ipi_data, cpu).ipi_count);
433 void show_local_irqs(struct seq_file *p)
437 seq_printf(p, "LOC: ");
439 for_each_present_cpu(cpu)
440 seq_printf(p, "%10u ", irq_stat[cpu].local_timer_irqs);
446 * Timer (local or broadcast) support
448 static DEFINE_PER_CPU(struct clock_event_device, percpu_clockevent);
450 static void ipi_timer(void)
452 struct clock_event_device *evt = &__get_cpu_var(percpu_clockevent);
454 evt->event_handler(evt);
458 #ifdef CONFIG_LOCAL_TIMERS
459 asmlinkage void __exception do_local_timer(struct pt_regs *regs)
461 struct pt_regs *old_regs = set_irq_regs(regs);
462 int cpu = smp_processor_id();
464 if (local_timer_ack()) {
465 irq_stat[cpu].local_timer_irqs++;
469 set_irq_regs(old_regs);
473 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
474 static void smp_timer_broadcast(const struct cpumask *mask)
476 send_ipi_message(mask, IPI_TIMER);
479 #define smp_timer_broadcast NULL
482 #ifndef CONFIG_LOCAL_TIMERS
483 static void broadcast_timer_set_mode(enum clock_event_mode mode,
484 struct clock_event_device *evt)
488 static void local_timer_setup(struct clock_event_device *evt)
490 evt->name = "dummy_timer";
491 evt->features = CLOCK_EVT_FEAT_ONESHOT |
492 CLOCK_EVT_FEAT_PERIODIC |
493 CLOCK_EVT_FEAT_DUMMY;
496 evt->set_mode = broadcast_timer_set_mode;
498 clockevents_register_device(evt);
502 void __cpuinit percpu_timer_setup(void)
504 unsigned int cpu = smp_processor_id();
505 struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
507 evt->cpumask = cpumask_of(cpu);
508 evt->broadcast = smp_timer_broadcast;
510 local_timer_setup(evt);
513 static DEFINE_SPINLOCK(stop_lock);
516 * ipi_cpu_stop - handle IPI from smp_send_stop()
518 static void ipi_cpu_stop(unsigned int cpu)
520 if (system_state == SYSTEM_BOOTING ||
521 system_state == SYSTEM_RUNNING) {
522 spin_lock(&stop_lock);
523 printk(KERN_CRIT "CPU%u: stopping\n", cpu);
525 spin_unlock(&stop_lock);
528 set_cpu_online(cpu, false);
538 * Main handler for inter-processor interrupts
540 * For ARM, the ipimask now only identifies a single
541 * category of IPI (Bit 1 IPIs have been replaced by a
542 * different mechanism):
544 * Bit 0 - Inter-processor function call
546 asmlinkage void __exception do_IPI(struct pt_regs *regs)
548 unsigned int cpu = smp_processor_id();
549 struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
550 struct pt_regs *old_regs = set_irq_regs(regs);
557 spin_lock(&ipi->lock);
560 spin_unlock(&ipi->lock);
568 nextmsg = msgs & -msgs;
570 nextmsg = ffz(~nextmsg);
579 * nothing more to do - eveything is
580 * done on the interrupt return path
585 generic_smp_call_function_interrupt();
588 case IPI_CALL_FUNC_SINGLE:
589 generic_smp_call_function_single_interrupt();
597 printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n",
604 set_irq_regs(old_regs);
607 void smp_send_reschedule(int cpu)
609 send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
612 void smp_send_stop(void)
614 cpumask_t mask = cpu_online_map;
615 cpu_clear(smp_processor_id(), mask);
616 if (!cpus_empty(mask))
617 send_ipi_message(&mask, IPI_CPU_STOP);
623 int setup_profiling_timer(unsigned int multiplier)
629 on_each_cpu_mask(void (*func)(void *), void *info, int wait,
630 const struct cpumask *mask)
634 smp_call_function_many(mask, func, info, wait);
635 if (cpumask_test_cpu(smp_processor_id(), mask))
641 /**********************************************************************/
647 struct vm_area_struct *ta_vma;
648 unsigned long ta_start;
649 unsigned long ta_end;
652 static inline void ipi_flush_tlb_all(void *ignored)
654 local_flush_tlb_all();
657 static inline void ipi_flush_tlb_mm(void *arg)
659 struct mm_struct *mm = (struct mm_struct *)arg;
661 local_flush_tlb_mm(mm);
664 static inline void ipi_flush_tlb_page(void *arg)
666 struct tlb_args *ta = (struct tlb_args *)arg;
668 local_flush_tlb_page(ta->ta_vma, ta->ta_start);
671 static inline void ipi_flush_tlb_kernel_page(void *arg)
673 struct tlb_args *ta = (struct tlb_args *)arg;
675 local_flush_tlb_kernel_page(ta->ta_start);
678 static inline void ipi_flush_tlb_range(void *arg)
680 struct tlb_args *ta = (struct tlb_args *)arg;
682 local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
685 static inline void ipi_flush_tlb_kernel_range(void *arg)
687 struct tlb_args *ta = (struct tlb_args *)arg;
689 local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end);
692 void flush_tlb_all(void)
694 if (tlb_ops_need_broadcast())
695 on_each_cpu(ipi_flush_tlb_all, NULL, 1);
697 local_flush_tlb_all();
700 void flush_tlb_mm(struct mm_struct *mm)
702 if (tlb_ops_need_broadcast())
703 on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, mm_cpumask(mm));
705 local_flush_tlb_mm(mm);
708 void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
710 if (tlb_ops_need_broadcast()) {
714 on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, mm_cpumask(vma->vm_mm));
716 local_flush_tlb_page(vma, uaddr);
719 void flush_tlb_kernel_page(unsigned long kaddr)
721 if (tlb_ops_need_broadcast()) {
724 on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1);
726 local_flush_tlb_kernel_page(kaddr);
729 void flush_tlb_range(struct vm_area_struct *vma,
730 unsigned long start, unsigned long end)
732 if (tlb_ops_need_broadcast()) {
737 on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, mm_cpumask(vma->vm_mm));
739 local_flush_tlb_range(vma, start, end);
742 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
744 if (tlb_ops_need_broadcast()) {
748 on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1);
750 local_flush_tlb_kernel_range(start, end);