2 * IPI management based on arch/arm/kernel/smp.c (Copyright 2002 ARM Limited)
4 * Copyright 2007-2009 Analog Devices Inc.
5 * Philippe Gerum <rpm@xenomai.org>
7 * Licensed under the GPL-2.
10 #include <linux/module.h>
11 #include <linux/delay.h>
12 #include <linux/init.h>
13 #include <linux/spinlock.h>
14 #include <linux/sched.h>
15 #include <linux/interrupt.h>
16 #include <linux/cache.h>
17 #include <linux/profile.h>
18 #include <linux/errno.h>
20 #include <linux/cpu.h>
21 #include <linux/smp.h>
22 #include <linux/seq_file.h>
23 #include <linux/irq.h>
24 #include <asm/atomic.h>
25 #include <asm/cacheflush.h>
26 #include <asm/mmu_context.h>
27 #include <asm/pgtable.h>
28 #include <asm/pgalloc.h>
29 #include <asm/processor.h>
30 #include <asm/ptrace.h>
33 #include <linux/err.h>
37 * 05000120 - we always define corelock as 32-bit integer in L2
39 struct corelock_slot corelock __attribute__ ((__section__(".l2.bss")));
41 void __cpuinitdata *init_retx_coreb, *init_saved_retx_coreb,
42 *init_saved_seqstat_coreb, *init_saved_icplb_fault_addr_coreb,
43 *init_saved_dcplb_fault_addr_coreb;
45 cpumask_t cpu_possible_map;
46 EXPORT_SYMBOL(cpu_possible_map);
48 cpumask_t cpu_online_map;
49 EXPORT_SYMBOL(cpu_online_map);
51 #define BFIN_IPI_RESCHEDULE 0
52 #define BFIN_IPI_CALL_FUNC 1
53 #define BFIN_IPI_CPU_STOP 2
55 struct blackfin_flush_data {
60 void *secondary_stack;
63 struct smp_call_struct {
64 void (*func)(void *info);
71 static struct blackfin_flush_data smp_flush_data;
73 static DEFINE_SPINLOCK(stop_lock);
76 struct list_head list;
78 struct smp_call_struct call_struct;
81 struct ipi_message_queue {
82 struct list_head head;
87 static DEFINE_PER_CPU(struct ipi_message_queue, ipi_msg_queue);
89 static void ipi_cpu_stop(unsigned int cpu)
91 spin_lock(&stop_lock);
92 printk(KERN_CRIT "CPU%u: stopping\n", cpu);
94 spin_unlock(&stop_lock);
96 cpu_clear(cpu, cpu_online_map);
104 static void ipi_flush_icache(void *info)
106 struct blackfin_flush_data *fdata = info;
108 /* Invalidate the memory holding the bounds of the flushed region. */
109 blackfin_dcache_invalidate_range((unsigned long)fdata,
110 (unsigned long)fdata + sizeof(*fdata));
112 blackfin_icache_flush_range(fdata->start, fdata->end);
115 static void ipi_call_function(unsigned int cpu, struct ipi_message *msg)
118 void (*func)(void *info);
120 func = msg->call_struct.func;
121 info = msg->call_struct.info;
122 wait = msg->call_struct.wait;
123 cpu_clear(cpu, msg->call_struct.pending);
126 #ifdef __ARCH_SYNC_CORE_DCACHE
128 * 'wait' usually means synchronization between CPUs.
129 * Invalidate D cache in case shared data was changed
130 * by func() to ensure cache coherence.
132 resync_core_dcache();
134 cpu_clear(cpu, msg->call_struct.waitmask);
139 static irqreturn_t ipi_handler(int irq, void *dev_instance)
141 struct ipi_message *msg;
142 struct ipi_message_queue *msg_queue;
143 unsigned int cpu = smp_processor_id();
145 platform_clear_ipi(cpu);
147 msg_queue = &__get_cpu_var(ipi_msg_queue);
150 spin_lock(&msg_queue->lock);
151 while (!list_empty(&msg_queue->head)) {
152 msg = list_entry(msg_queue->head.next, typeof(*msg), list);
153 list_del(&msg->list);
155 case BFIN_IPI_RESCHEDULE:
156 /* That's the easiest one; leave it to
157 * return_from_int. */
160 case BFIN_IPI_CALL_FUNC:
161 spin_unlock(&msg_queue->lock);
162 ipi_call_function(cpu, msg);
163 spin_lock(&msg_queue->lock);
165 case BFIN_IPI_CPU_STOP:
166 spin_unlock(&msg_queue->lock);
168 spin_lock(&msg_queue->lock);
172 printk(KERN_CRIT "CPU%u: Unknown IPI message \
173 0x%lx\n", cpu, msg->type);
178 spin_unlock(&msg_queue->lock);
182 static void ipi_queue_init(void)
185 struct ipi_message_queue *msg_queue;
186 for_each_possible_cpu(cpu) {
187 msg_queue = &per_cpu(ipi_msg_queue, cpu);
188 INIT_LIST_HEAD(&msg_queue->head);
189 spin_lock_init(&msg_queue->lock);
190 msg_queue->count = 0;
194 int smp_call_function(void (*func)(void *info), void *info, int wait)
199 struct ipi_message_queue *msg_queue;
200 struct ipi_message *msg;
202 callmap = cpu_online_map;
203 cpu_clear(smp_processor_id(), callmap);
204 if (cpus_empty(callmap))
207 msg = kmalloc(sizeof(*msg), GFP_ATOMIC);
210 INIT_LIST_HEAD(&msg->list);
211 msg->call_struct.func = func;
212 msg->call_struct.info = info;
213 msg->call_struct.wait = wait;
214 msg->call_struct.pending = callmap;
215 msg->call_struct.waitmask = callmap;
216 msg->type = BFIN_IPI_CALL_FUNC;
218 for_each_cpu_mask(cpu, callmap) {
219 msg_queue = &per_cpu(ipi_msg_queue, cpu);
220 spin_lock_irqsave(&msg_queue->lock, flags);
221 list_add_tail(&msg->list, &msg_queue->head);
222 spin_unlock_irqrestore(&msg_queue->lock, flags);
223 platform_send_ipi_cpu(cpu);
226 while (!cpus_empty(msg->call_struct.waitmask))
227 blackfin_dcache_invalidate_range(
228 (unsigned long)(&msg->call_struct.waitmask),
229 (unsigned long)(&msg->call_struct.waitmask));
230 #ifdef __ARCH_SYNC_CORE_DCACHE
232 * Invalidate D cache in case shared data was changed by
233 * other processors to ensure cache coherence.
235 resync_core_dcache();
241 EXPORT_SYMBOL_GPL(smp_call_function);
243 int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
246 unsigned int cpu = cpuid;
249 struct ipi_message_queue *msg_queue;
250 struct ipi_message *msg;
252 if (cpu_is_offline(cpu))
255 cpu_set(cpu, callmap);
257 msg = kmalloc(sizeof(*msg), GFP_ATOMIC);
260 INIT_LIST_HEAD(&msg->list);
261 msg->call_struct.func = func;
262 msg->call_struct.info = info;
263 msg->call_struct.wait = wait;
264 msg->call_struct.pending = callmap;
265 msg->call_struct.waitmask = callmap;
266 msg->type = BFIN_IPI_CALL_FUNC;
268 msg_queue = &per_cpu(ipi_msg_queue, cpu);
269 spin_lock_irqsave(&msg_queue->lock, flags);
270 list_add_tail(&msg->list, &msg_queue->head);
271 spin_unlock_irqrestore(&msg_queue->lock, flags);
272 platform_send_ipi_cpu(cpu);
275 while (!cpus_empty(msg->call_struct.waitmask))
276 blackfin_dcache_invalidate_range(
277 (unsigned long)(&msg->call_struct.waitmask),
278 (unsigned long)(&msg->call_struct.waitmask));
279 #ifdef __ARCH_SYNC_CORE_DCACHE
281 * Invalidate D cache in case shared data was changed by
282 * other processors to ensure cache coherence.
284 resync_core_dcache();
290 EXPORT_SYMBOL_GPL(smp_call_function_single);
292 void smp_send_reschedule(int cpu)
295 struct ipi_message_queue *msg_queue;
296 struct ipi_message *msg;
298 if (cpu_is_offline(cpu))
301 msg = kzalloc(sizeof(*msg), GFP_ATOMIC);
304 INIT_LIST_HEAD(&msg->list);
305 msg->type = BFIN_IPI_RESCHEDULE;
307 msg_queue = &per_cpu(ipi_msg_queue, cpu);
308 spin_lock_irqsave(&msg_queue->lock, flags);
309 list_add_tail(&msg->list, &msg_queue->head);
310 spin_unlock_irqrestore(&msg_queue->lock, flags);
311 platform_send_ipi_cpu(cpu);
316 void smp_send_stop(void)
321 struct ipi_message_queue *msg_queue;
322 struct ipi_message *msg;
324 callmap = cpu_online_map;
325 cpu_clear(smp_processor_id(), callmap);
326 if (cpus_empty(callmap))
329 msg = kzalloc(sizeof(*msg), GFP_ATOMIC);
332 INIT_LIST_HEAD(&msg->list);
333 msg->type = BFIN_IPI_CPU_STOP;
335 for_each_cpu_mask(cpu, callmap) {
336 msg_queue = &per_cpu(ipi_msg_queue, cpu);
337 spin_lock_irqsave(&msg_queue->lock, flags);
338 list_add_tail(&msg->list, &msg_queue->head);
339 spin_unlock_irqrestore(&msg_queue->lock, flags);
340 platform_send_ipi_cpu(cpu);
345 int __cpuinit __cpu_up(unsigned int cpu)
348 static struct task_struct *idle;
353 idle = fork_idle(cpu);
355 printk(KERN_ERR "CPU%u: fork() failed\n", cpu);
356 return PTR_ERR(idle);
359 secondary_stack = task_stack_page(idle) + THREAD_SIZE;
361 ret = platform_boot_secondary(cpu, idle);
363 secondary_stack = NULL;
368 static void __cpuinit setup_secondary(unsigned int cpu)
374 ilat = bfin_read_ILAT();
376 bfin_write_ILAT(ilat);
379 /* Enable interrupt levels IVG7-15. IARs have been already
380 * programmed by the boot CPU. */
381 bfin_irq_flags |= IMASK_IVG15 |
382 IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 |
383 IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW;
386 void __cpuinit secondary_start_kernel(void)
388 unsigned int cpu = smp_processor_id();
389 struct mm_struct *mm = &init_mm;
391 if (_bfin_swrst & SWRST_DBL_FAULT_B) {
392 printk(KERN_EMERG "CoreB Recovering from DOUBLE FAULT event\n");
393 #ifdef CONFIG_DEBUG_DOUBLEFAULT
394 printk(KERN_EMERG " While handling exception (EXCAUSE = 0x%x) at %pF\n",
395 (int)init_saved_seqstat_coreb & SEQSTAT_EXCAUSE, init_saved_retx_coreb);
396 printk(KERN_NOTICE " DCPLB_FAULT_ADDR: %pF\n", init_saved_dcplb_fault_addr_coreb);
397 printk(KERN_NOTICE " ICPLB_FAULT_ADDR: %pF\n", init_saved_icplb_fault_addr_coreb);
399 printk(KERN_NOTICE " The instruction at %pF caused a double exception\n",
404 * We want the D-cache to be enabled early, in case the atomic
405 * support code emulates cache coherence (see
406 * __ARCH_SYNC_CORE_DCACHE).
408 init_exception_vectors();
410 bfin_setup_caches(cpu);
414 /* Attach the new idle task to the global mm. */
415 atomic_inc(&mm->mm_users);
416 atomic_inc(&mm->mm_count);
417 current->active_mm = mm;
421 setup_secondary(cpu);
423 platform_secondary_init(cpu);
425 /* setup local core timer */
426 bfin_local_timer_setup();
431 * Calibrate loops per jiffy value.
432 * IRQs need to be enabled here - D-cache can be invalidated
433 * in timer irq handler, so core B can read correct jiffies.
440 void __init smp_prepare_boot_cpu(void)
444 void __init smp_prepare_cpus(unsigned int max_cpus)
446 platform_prepare_cpus(max_cpus);
448 platform_request_ipi(&ipi_handler);
451 void __init smp_cpus_done(unsigned int max_cpus)
453 unsigned long bogosum = 0;
456 for_each_online_cpu(cpu)
457 bogosum += loops_per_jiffy;
459 printk(KERN_INFO "SMP: Total of %d processors activated "
460 "(%lu.%02lu BogoMIPS).\n",
462 bogosum / (500000/HZ),
463 (bogosum / (5000/HZ)) % 100);
466 void smp_icache_flush_range_others(unsigned long start, unsigned long end)
468 smp_flush_data.start = start;
469 smp_flush_data.end = end;
471 if (smp_call_function(&ipi_flush_icache, &smp_flush_data, 0))
472 printk(KERN_WARNING "SMP: failed to run I-cache flush request on other CPUs\n");
474 EXPORT_SYMBOL_GPL(smp_icache_flush_range_others);
476 #ifdef __ARCH_SYNC_CORE_ICACHE
477 unsigned long icache_invld_count[NR_CPUS];
478 void resync_core_icache(void)
480 unsigned int cpu = get_cpu();
481 blackfin_invalidate_entire_icache();
482 icache_invld_count[cpu]++;
485 EXPORT_SYMBOL(resync_core_icache);
488 #ifdef __ARCH_SYNC_CORE_DCACHE
489 unsigned long dcache_invld_count[NR_CPUS];
490 unsigned long barrier_mask __attribute__ ((__section__(".l2.bss")));
492 void resync_core_dcache(void)
494 unsigned int cpu = get_cpu();
495 blackfin_invalidate_entire_dcache();
496 dcache_invld_count[cpu]++;
499 EXPORT_SYMBOL(resync_core_dcache);
502 #ifdef CONFIG_HOTPLUG_CPU
503 int __cpuexit __cpu_disable(void)
505 unsigned int cpu = smp_processor_id();
510 set_cpu_online(cpu, false);
514 static DECLARE_COMPLETION(cpu_killed);
516 int __cpuexit __cpu_die(unsigned int cpu)
518 return wait_for_completion_timeout(&cpu_killed, 5000);
523 complete(&cpu_killed);
525 atomic_dec(&init_mm.mm_users);
526 atomic_dec(&init_mm.mm_count);