2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version 2
5 * of the License, or (at your option) any later version.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 * Copyright (C) 2000, 2001 Kanoj Sarcar
17 * Copyright (C) 2000, 2001 Ralf Baechle
18 * Copyright (C) 2000, 2001 Silicon Graphics, Inc.
19 * Copyright (C) 2000, 2001, 2003 Broadcom Corporation
21 #include <linux/cache.h>
22 #include <linux/delay.h>
23 #include <linux/init.h>
24 #include <linux/interrupt.h>
25 #include <linux/smp.h>
26 #include <linux/spinlock.h>
27 #include <linux/threads.h>
28 #include <linux/module.h>
29 #include <linux/time.h>
30 #include <linux/timex.h>
31 #include <linux/sched.h>
32 #include <linux/cpumask.h>
33 #include <linux/cpu.h>
34 #include <linux/err.h>
36 #include <asm/atomic.h>
38 #include <asm/processor.h>
39 #include <asm/r4k-timer.h>
40 #include <asm/system.h>
41 #include <asm/mmu_context.h>
44 #ifdef CONFIG_MIPS_MT_SMTC
45 #include <asm/mipsmtregs.h>
46 #endif /* CONFIG_MIPS_MT_SMTC */
48 volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */
49 int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
50 int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
52 /* Number of TCs (or siblings in Intel speak) per CPU core */
53 int smp_num_siblings = 1;
54 EXPORT_SYMBOL(smp_num_siblings);
56 /* representing the TCs (or siblings in Intel speak) of each logical CPU */
57 cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
58 EXPORT_SYMBOL(cpu_sibling_map);
60 /* representing cpus for which sibling maps can be computed */
61 static cpumask_t cpu_sibling_setup_map;
63 static inline void set_cpu_sibling_map(int cpu)
67 cpu_set(cpu, cpu_sibling_setup_map);
69 if (smp_num_siblings > 1) {
70 for_each_cpu_mask(i, cpu_sibling_setup_map) {
71 if (cpu_data[cpu].core == cpu_data[i].core) {
72 cpu_set(i, cpu_sibling_map[cpu]);
73 cpu_set(cpu, cpu_sibling_map[i]);
77 cpu_set(cpu, cpu_sibling_map[cpu]);
80 struct plat_smp_ops *mp_ops;
82 __cpuinit void register_smp_ops(struct plat_smp_ops *ops)
85 printk(KERN_WARNING "Overriding previously set SMP ops\n");
91 * First C code run on the secondary CPUs after being started up by
94 asmlinkage __cpuinit void start_secondary(void)
98 #ifdef CONFIG_MIPS_MT_SMTC
99 /* Only do cpu_probe for first TC of CPU */
100 if ((read_c0_tcbind() & TCBIND_CURTC) == 0)
101 #endif /* CONFIG_MIPS_MT_SMTC */
105 mips_clockevent_init();
106 mp_ops->init_secondary();
109 * XXX parity protection should be folded in here when it's converted
110 * to an option instead of something based on .cputype
115 cpu = smp_processor_id();
116 cpu_data[cpu].udelay_val = loops_per_jiffy;
118 notify_cpu_starting(cpu);
120 mp_ops->smp_finish();
121 set_cpu_sibling_map(cpu);
123 cpu_set(cpu, cpu_callin_map);
125 synchronise_count_slave();
130 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
132 mp_ops->send_ipi_mask(mask, SMP_CALL_FUNCTION);
136 * We reuse the same vector for the single IPI
138 void arch_send_call_function_single_ipi(int cpu)
140 mp_ops->send_ipi_mask(cpumask_of_cpu(cpu), SMP_CALL_FUNCTION);
144 * Call into both interrupt handlers, as we share the IPI for them
146 void smp_call_function_interrupt(void)
149 generic_smp_call_function_single_interrupt();
150 generic_smp_call_function_interrupt();
154 static void stop_this_cpu(void *dummy)
159 cpu_clear(smp_processor_id(), cpu_online_map);
162 (*cpu_wait)(); /* Wait if available. */
166 void smp_send_stop(void)
168 smp_call_function(stop_this_cpu, NULL, 0);
171 void __init smp_cpus_done(unsigned int max_cpus)
174 synchronise_count_master();
177 /* called from main before smp_init() */
178 void __init smp_prepare_cpus(unsigned int max_cpus)
180 init_new_context(current, &init_mm);
181 current_thread_info()->cpu = 0;
182 mp_ops->prepare_cpus(max_cpus);
183 set_cpu_sibling_map(0);
184 #ifndef CONFIG_HOTPLUG_CPU
185 init_cpu_present(&cpu_possible_map);
189 /* preload SMP state for boot cpu */
190 void __devinit smp_prepare_boot_cpu(void)
192 set_cpu_possible(0, true);
193 set_cpu_online(0, true);
194 cpu_set(0, cpu_callin_map);
198 * Called once for each "cpu_possible(cpu)". Needs to spin up the cpu
199 * and keep control until "cpu_online(cpu)" is set. Note: cpu is
200 * physical, not logical.
202 static struct task_struct *cpu_idle_thread[NR_CPUS];
204 int __cpuinit __cpu_up(unsigned int cpu)
206 struct task_struct *idle;
209 * Processor goes to start_secondary(), sets online flag
210 * The following code is purely to make sure
211 * Linux can schedule processes on this slave.
213 if (!cpu_idle_thread[cpu]) {
214 idle = fork_idle(cpu);
215 cpu_idle_thread[cpu] = idle;
218 panic(KERN_ERR "Fork failed for CPU %d", cpu);
220 idle = cpu_idle_thread[cpu];
221 init_idle(idle, cpu);
224 mp_ops->boot_secondary(cpu, idle);
227 * Trust is futile. We should really have timeouts ...
229 while (!cpu_isset(cpu, cpu_callin_map))
232 cpu_set(cpu, cpu_online_map);
237 /* Not really SMP stuff ... */
238 int setup_profiling_timer(unsigned int multiplier)
243 static void flush_tlb_all_ipi(void *info)
245 local_flush_tlb_all();
248 void flush_tlb_all(void)
250 on_each_cpu(flush_tlb_all_ipi, NULL, 1);
253 static void flush_tlb_mm_ipi(void *mm)
255 local_flush_tlb_mm((struct mm_struct *)mm);
259 * Special Variant of smp_call_function for use by TLB functions:
262 * o collapses to normal function call on UP kernels
263 * o collapses to normal function call on systems with a single shared
265 * o CONFIG_MIPS_MT_SMTC currently implies there is only one physical core.
267 static inline void smp_on_other_tlbs(void (*func) (void *info), void *info)
269 #ifndef CONFIG_MIPS_MT_SMTC
270 smp_call_function(func, info, 1);
274 static inline void smp_on_each_tlb(void (*func) (void *info), void *info)
278 smp_on_other_tlbs(func, info);
285 * The following tlb flush calls are invoked when old translations are
286 * being torn down, or pte attributes are changing. For single threaded
287 * address spaces, a new context is obtained on the current cpu, and tlb
288 * context on other cpus are invalidated to force a new context allocation
289 * at switch_mm time, should the mm ever be used on other cpus. For
290 * multithreaded address spaces, intercpu interrupts have to be sent.
291 * Another case where intercpu interrupts are required is when the target
292 * mm might be active on another cpu (eg debuggers doing the flushes on
293 * behalf of debugees, kswapd stealing pages from another process etc).
297 void flush_tlb_mm(struct mm_struct *mm)
301 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
302 smp_on_other_tlbs(flush_tlb_mm_ipi, mm);
304 cpumask_t mask = cpu_online_map;
307 cpu_clear(smp_processor_id(), mask);
308 for_each_cpu_mask(cpu, mask)
309 if (cpu_context(cpu, mm))
310 cpu_context(cpu, mm) = 0;
312 local_flush_tlb_mm(mm);
317 struct flush_tlb_data {
318 struct vm_area_struct *vma;
323 static void flush_tlb_range_ipi(void *info)
325 struct flush_tlb_data *fd = info;
327 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
330 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
332 struct mm_struct *mm = vma->vm_mm;
335 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
336 struct flush_tlb_data fd = {
342 smp_on_other_tlbs(flush_tlb_range_ipi, &fd);
344 cpumask_t mask = cpu_online_map;
347 cpu_clear(smp_processor_id(), mask);
348 for_each_cpu_mask(cpu, mask)
349 if (cpu_context(cpu, mm))
350 cpu_context(cpu, mm) = 0;
352 local_flush_tlb_range(vma, start, end);
356 static void flush_tlb_kernel_range_ipi(void *info)
358 struct flush_tlb_data *fd = info;
360 local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
363 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
365 struct flush_tlb_data fd = {
370 on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1);
373 static void flush_tlb_page_ipi(void *info)
375 struct flush_tlb_data *fd = info;
377 local_flush_tlb_page(fd->vma, fd->addr1);
380 void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
383 if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) {
384 struct flush_tlb_data fd = {
389 smp_on_other_tlbs(flush_tlb_page_ipi, &fd);
391 cpumask_t mask = cpu_online_map;
394 cpu_clear(smp_processor_id(), mask);
395 for_each_cpu_mask(cpu, mask)
396 if (cpu_context(cpu, vma->vm_mm))
397 cpu_context(cpu, vma->vm_mm) = 0;
399 local_flush_tlb_page(vma, page);
403 static void flush_tlb_one_ipi(void *info)
405 unsigned long vaddr = (unsigned long) info;
407 local_flush_tlb_one(vaddr);
410 void flush_tlb_one(unsigned long vaddr)
412 smp_on_each_tlb(flush_tlb_one_ipi, (void *) vaddr);
415 EXPORT_SYMBOL(flush_tlb_page);
416 EXPORT_SYMBOL(flush_tlb_one);