2 * Copyright (C) 2009,2010,2011 Imagination Technologies Ltd.
4 * Copyright (C) 2002 ARM Limited, All Rights Reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 #include <linux/atomic.h>
11 #include <linux/completion.h>
12 #include <linux/delay.h>
13 #include <linux/init.h>
14 #include <linux/spinlock.h>
15 #include <linux/sched.h>
16 #include <linux/interrupt.h>
17 #include <linux/cache.h>
18 #include <linux/profile.h>
19 #include <linux/errno.h>
21 #include <linux/err.h>
22 #include <linux/cpu.h>
23 #include <linux/smp.h>
24 #include <linux/seq_file.h>
25 #include <linux/irq.h>
26 #include <linux/bootmem.h>
28 #include <asm/cacheflush.h>
29 #include <asm/cachepart.h>
30 #include <asm/core_reg.h>
32 #include <asm/global_lock.h>
33 #include <asm/metag_mem.h>
34 #include <asm/mmu_context.h>
35 #include <asm/pgtable.h>
36 #include <asm/pgalloc.h>
37 #include <asm/processor.h>
38 #include <asm/setup.h>
39 #include <asm/tlbflush.h>
40 #include <asm/hwthread.h>
41 #include <asm/traps.h>
43 #define SYSC_DCPART(n) (SYSC_DCPART0 + SYSC_xCPARTn_STRIDE * (n))
44 #define SYSC_ICPART(n) (SYSC_ICPART0 + SYSC_xCPARTn_STRIDE * (n))
46 DECLARE_PER_CPU(PTBI, pTBI);
48 void *secondary_data_stack;
51 * structures for inter-processor calls
52 * - A collection of single bit ipi messages.
56 unsigned long ipi_count;
60 static DEFINE_PER_CPU(struct ipi_data, ipi_data) = {
61 .lock = __SPIN_LOCK_UNLOCKED(ipi_data.lock),
64 static DEFINE_SPINLOCK(boot_lock);
66 static DECLARE_COMPLETION(cpu_running);
69 * "thread" is assumed to be a valid Meta hardware thread ID.
71 int __cpuinit boot_secondary(unsigned int thread, struct task_struct *idle)
76 * set synchronisation state between this boot processor
77 * and the secondary one
79 spin_lock(&boot_lock);
81 core_reg_write(TXUPC_ID, 0, thread, (unsigned int)secondary_startup);
82 core_reg_write(TXUPC_ID, 1, thread, 0);
85 * Give the thread privilege (PSTAT) and clear potentially problematic
86 * bits in the process (namely ISTAT, CBMarker, CBMarkerI, LSM_STEP).
88 core_reg_write(TXUCT_ID, TXSTATUS_REGNUM, thread, TXSTATUS_PSTAT_BIT);
90 /* Clear the minim enable bit. */
91 val = core_reg_read(TXUCT_ID, TXPRIVEXT_REGNUM, thread);
92 core_reg_write(TXUCT_ID, TXPRIVEXT_REGNUM, thread, val & ~0x80);
95 * set the ThreadEnable bit (0x1) in the TXENABLE register
96 * for the specified thread - off it goes!
98 val = core_reg_read(TXUCT_ID, TXENABLE_REGNUM, thread);
99 core_reg_write(TXUCT_ID, TXENABLE_REGNUM, thread, val | 0x1);
102 * now the secondary core is starting up let it run its
103 * calibrations, then wait for it to finish
105 spin_unlock(&boot_lock);
111 * describe_cachepart_change: describe a change to cache partitions.
112 * @thread: Hardware thread number.
113 * @label: Label of cache type, e.g. "dcache" or "icache".
114 * @sz: Total size of the cache.
115 * @old: Old cache partition configuration (*CPART* register).
116 * @new: New cache partition configuration (*CPART* register).
118 * If the cache partition has changed, prints a message to the log describing
121 static __cpuinit void describe_cachepart_change(unsigned int thread,
127 unsigned int lor1, land1, gor1, gand1;
128 unsigned int lor2, land2, gor2, gand2;
129 unsigned int diff = old ^ new;
134 pr_info("Thread %d: %s partition changed:", thread, label);
135 if (diff & (SYSC_xCPARTL_OR_BITS | SYSC_xCPARTL_AND_BITS)) {
136 lor1 = (old & SYSC_xCPARTL_OR_BITS) >> SYSC_xCPARTL_OR_S;
137 lor2 = (new & SYSC_xCPARTL_OR_BITS) >> SYSC_xCPARTL_OR_S;
138 land1 = (old & SYSC_xCPARTL_AND_BITS) >> SYSC_xCPARTL_AND_S;
139 land2 = (new & SYSC_xCPARTL_AND_BITS) >> SYSC_xCPARTL_AND_S;
140 pr_cont(" L:%#x+%#x->%#x+%#x",
142 ((land1 + 1) * sz) >> 4,
144 ((land2 + 1) * sz) >> 4);
146 if (diff & (SYSC_xCPARTG_OR_BITS | SYSC_xCPARTG_AND_BITS)) {
147 gor1 = (old & SYSC_xCPARTG_OR_BITS) >> SYSC_xCPARTG_OR_S;
148 gor2 = (new & SYSC_xCPARTG_OR_BITS) >> SYSC_xCPARTG_OR_S;
149 gand1 = (old & SYSC_xCPARTG_AND_BITS) >> SYSC_xCPARTG_AND_S;
150 gand2 = (new & SYSC_xCPARTG_AND_BITS) >> SYSC_xCPARTG_AND_S;
151 pr_cont(" G:%#x+%#x->%#x+%#x",
153 ((gand1 + 1) * sz) >> 4,
155 ((gand2 + 1) * sz) >> 4);
157 if (diff & SYSC_CWRMODE_BIT)
159 (new & SYSC_CWRMODE_BIT) ? "+" : "-");
160 if (diff & SYSC_DCPART_GCON_BIT)
162 (new & SYSC_DCPART_GCON_BIT) ? "+" : "-");
167 * setup_smp_cache: ensure cache coherency for new SMP thread.
168 * @thread: New hardware thread number.
170 * Ensures that coherency is enabled and that the threads share the same cache
173 static __cpuinit void setup_smp_cache(unsigned int thread)
175 unsigned int this_thread, lflags;
176 unsigned int dcsz, dcpart_this, dcpart_old, dcpart_new;
177 unsigned int icsz, icpart_old, icpart_new;
180 * Copy over the current thread's cache partition configuration to the
181 * new thread so that they share cache partitions.
183 __global_lock2(lflags);
184 this_thread = hard_processor_id();
185 /* Share dcache partition */
186 dcpart_this = metag_in32(SYSC_DCPART(this_thread));
187 dcpart_old = metag_in32(SYSC_DCPART(thread));
188 dcpart_new = dcpart_this;
189 #if PAGE_OFFSET < LINGLOBAL_BASE
191 * For the local data cache to be coherent the threads must also have
194 dcpart_new |= SYSC_DCPART_GCON_BIT;
195 metag_out32(dcpart_new, SYSC_DCPART(this_thread));
197 metag_out32(dcpart_new, SYSC_DCPART(thread));
198 /* Share icache partition too */
199 icpart_new = metag_in32(SYSC_ICPART(this_thread));
200 icpart_old = metag_in32(SYSC_ICPART(thread));
201 metag_out32(icpart_new, SYSC_ICPART(thread));
202 __global_unlock2(lflags);
205 * Log if the cache partitions were altered so the user is aware of any
206 * potential unintentional cache wastage.
208 dcsz = get_dcache_size();
209 icsz = get_dcache_size();
210 describe_cachepart_change(this_thread, "dcache", dcsz,
211 dcpart_this, dcpart_new);
212 describe_cachepart_change(thread, "dcache", dcsz,
213 dcpart_old, dcpart_new);
214 describe_cachepart_change(thread, "icache", icsz,
215 icpart_old, icpart_new);
218 int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)
220 unsigned int thread = cpu_2_hwthread_id[cpu];
223 load_pgd(swapper_pg_dir, thread);
227 setup_smp_cache(thread);
230 * Tell the secondary CPU where to find its idle thread's stack.
232 secondary_data_stack = task_stack_page(idle);
237 * Now bring the CPU into our world.
239 ret = boot_secondary(thread, idle);
242 * CPU was successfully started, wait for it
243 * to come online or time out.
245 wait_for_completion_timeout(&cpu_running,
246 msecs_to_jiffies(1000));
248 if (!cpu_online(cpu))
252 secondary_data_stack = NULL;
255 pr_crit("CPU%u: processor failed to boot\n", cpu);
258 * FIXME: We need to clean up the new idle thread. --rmk
265 #ifdef CONFIG_HOTPLUG_CPU
266 static DECLARE_COMPLETION(cpu_killed);
269 * __cpu_disable runs on the processor to be shutdown.
271 int __cpuexit __cpu_disable(void)
273 unsigned int cpu = smp_processor_id();
276 * Take this CPU offline. Once we clear this, we can't return,
277 * and we must not schedule until we're ready to give up the cpu.
279 set_cpu_online(cpu, false);
282 * OK - migrate IRQs away from this CPU
287 * Flush user cache and TLB mappings, and then remove this CPU
288 * from the vm mask set of all processes.
291 local_flush_tlb_all();
293 clear_tasks_mm_cpumask(cpu);
299 * called on the thread which is asking for a CPU to be shutdown -
300 * waits until shutdown has completed, or it is timed out.
302 void __cpuexit __cpu_die(unsigned int cpu)
304 if (!wait_for_completion_timeout(&cpu_killed, msecs_to_jiffies(1)))
305 pr_err("CPU%u: unable to kill\n", cpu);
309 * Called from the idle thread for the CPU which has been shutdown.
311 * Note that we do not return from this function. If this cpu is
312 * brought online again it will need to run secondary_startup().
314 void __cpuexit cpu_die(void)
319 complete(&cpu_killed);
321 asm ("XOR TXENABLE, D0Re0,D0Re0\n");
323 #endif /* CONFIG_HOTPLUG_CPU */
326 * Called by both boot and secondaries to move global data into
327 * per-processor storage.
329 void __cpuinit smp_store_cpu_info(unsigned int cpuid)
331 struct cpuinfo_metag *cpu_info = &per_cpu(cpu_data, cpuid);
333 cpu_info->loops_per_jiffy = loops_per_jiffy;
337 * This is the secondary CPU boot entry. We're using this CPUs
338 * idle thread stack and the global page tables.
340 asmlinkage void secondary_start_kernel(void)
342 struct mm_struct *mm = &init_mm;
343 unsigned int cpu = smp_processor_id();
346 * All kernel threads share the same mm context; grab a
347 * reference and switch to it.
349 atomic_inc(&mm->mm_users);
350 atomic_inc(&mm->mm_count);
351 current->active_mm = mm;
352 cpumask_set_cpu(cpu, mm_cpumask(mm));
353 enter_lazy_tlb(mm, current);
354 local_flush_tlb_all();
357 * TODO: Some day it might be useful for each Linux CPU to
358 * have its own TBI structure. That would allow each Linux CPU
359 * to run different interrupt handlers for the same IRQ
362 * For now, simply copying the pointer to the boot CPU's TBI
363 * structure is sufficient because we always want to run the
364 * same interrupt handler whatever CPU takes the interrupt.
366 per_cpu(pTBI, cpu) = __TBI(TBID_ISTAT_BIT);
368 if (!per_cpu(pTBI, cpu))
369 panic("No TBI found!");
371 per_cpu_trap_init(cpu);
377 notify_cpu_starting(cpu);
379 pr_info("CPU%u (thread %u): Booted secondary processor\n",
380 cpu, cpu_2_hwthread_id[cpu]);
383 smp_store_cpu_info(cpu);
386 * OK, now it's safe to let the boot CPU continue
388 set_cpu_online(cpu, true);
389 complete(&cpu_running);
392 * Enable local interrupts.
394 tbi_startup_interrupt(TBID_SIGNUM_TRT);
398 * OK, it's off to the idle thread for us
400 cpu_startup_entry(CPUHP_ONLINE);
403 void __init smp_cpus_done(unsigned int max_cpus)
406 unsigned long bogosum = 0;
408 for_each_online_cpu(cpu)
409 bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy;
411 pr_info("SMP: Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
413 bogosum / (500000/HZ),
414 (bogosum / (5000/HZ)) % 100);
417 void __init smp_prepare_cpus(unsigned int max_cpus)
419 unsigned int cpu = smp_processor_id();
421 init_new_context(current, &init_mm);
422 current_thread_info()->cpu = cpu;
424 smp_store_cpu_info(cpu);
425 init_cpu_present(cpu_possible_mask);
428 void __init smp_prepare_boot_cpu(void)
430 unsigned int cpu = smp_processor_id();
432 per_cpu(pTBI, cpu) = __TBI(TBID_ISTAT_BIT);
434 if (!per_cpu(pTBI, cpu))
435 panic("No TBI found!");
438 static void smp_cross_call(cpumask_t callmap, enum ipi_msg_type msg);
440 static void send_ipi_message(const struct cpumask *mask, enum ipi_msg_type msg)
447 local_irq_save(flags);
449 for_each_cpu(cpu, mask) {
450 struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
452 spin_lock(&ipi->lock);
455 * KICK interrupts are queued in hardware so we'll get
456 * multiple interrupts if we call smp_cross_call()
457 * multiple times for one msg. The problem is that we
458 * only have one bit for each message - we can't queue
461 * The first time through ipi_handler() we'll clear
462 * the msg bit, having done all the work. But when we
463 * return we'll get _another_ interrupt (and another,
464 * and another until we've handled all the queued
465 * KICKs). Running ipi_handler() when there's no work
466 * to do is bad because that's how kick handler
467 * chaining detects who the KICK was intended for.
468 * See arch/metag/kernel/kick.c for more details.
470 * So only add 'cpu' to 'map' if we haven't already
471 * queued a KICK interrupt for 'msg'.
473 if (!(ipi->bits & (1 << msg))) {
474 ipi->bits |= 1 << msg;
475 cpumask_set_cpu(cpu, &map);
478 spin_unlock(&ipi->lock);
482 * Call the platform specific cross-CPU call function.
484 smp_cross_call(map, msg);
486 local_irq_restore(flags);
489 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
491 send_ipi_message(mask, IPI_CALL_FUNC);
494 void arch_send_call_function_single_ipi(int cpu)
496 send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
499 void show_ipi_list(struct seq_file *p)
505 for_each_present_cpu(cpu)
506 seq_printf(p, " %10lu", per_cpu(ipi_data, cpu).ipi_count);
511 static DEFINE_SPINLOCK(stop_lock);
514 * Main handler for inter-processor interrupts
516 * For Meta, the ipimask now only identifies a single
517 * category of IPI (Bit 1 IPIs have been replaced by a
518 * different mechanism):
520 * Bit 0 - Inter-processor function call
522 static int do_IPI(struct pt_regs *regs)
524 unsigned int cpu = smp_processor_id();
525 struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
526 struct pt_regs *old_regs = set_irq_regs(regs);
527 unsigned long msgs, nextmsg;
532 spin_lock(&ipi->lock);
534 nextmsg = msgs & -msgs;
535 ipi->bits &= ~nextmsg;
536 spin_unlock(&ipi->lock);
541 nextmsg = ffz(~nextmsg);
548 generic_smp_call_function_interrupt();
551 case IPI_CALL_FUNC_SINGLE:
552 generic_smp_call_function_single_interrupt();
556 pr_crit("CPU%u: Unknown IPI message 0x%lx\n",
562 set_irq_regs(old_regs);
567 void smp_send_reschedule(int cpu)
569 send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
572 static void stop_this_cpu(void *data)
574 unsigned int cpu = smp_processor_id();
576 if (system_state == SYSTEM_BOOTING ||
577 system_state == SYSTEM_RUNNING) {
578 spin_lock(&stop_lock);
579 pr_crit("CPU%u: stopping\n", cpu);
581 spin_unlock(&stop_lock);
584 set_cpu_online(cpu, false);
588 hard_processor_halt(HALT_OK);
591 void smp_send_stop(void)
593 smp_call_function(stop_this_cpu, NULL, 0);
599 int setup_profiling_timer(unsigned int multiplier)
605 * We use KICKs for inter-processor interrupts.
607 * For every CPU in "callmap" the IPI data must already have been
608 * stored in that CPU's "ipi_data" member prior to calling this
611 static void kick_raise_softirq(cpumask_t callmap, unsigned int irq)
615 for_each_cpu(cpu, &callmap) {
618 thread = cpu_2_hwthread_id[cpu];
620 BUG_ON(thread == BAD_HWTHREAD_ID);
622 metag_out32(1, T0KICKI + (thread * TnXKICK_STRIDE));
626 static TBIRES ipi_handler(TBIRES State, int SigNum, int Triggers,
627 int Inst, PTBI pTBI, int *handled)
629 *handled = do_IPI((struct pt_regs *)State.Sig.pCtx);
634 static struct kick_irq_handler ipi_irq = {
638 static void smp_cross_call(cpumask_t callmap, enum ipi_msg_type msg)
640 kick_raise_softirq(callmap, 1);
643 static inline unsigned int get_core_count(void)
646 unsigned int ret = 0;
648 for (i = 0; i < CONFIG_NR_CPUS; i++) {
649 if (core_reg_read(TXUCT_ID, TXENABLE_REGNUM, i))
657 * Initialise the CPU possible map early - this describes the CPUs
658 * which may be present or become present in the system.
660 void __init smp_init_cpus(void)
662 unsigned int i, ncores = get_core_count();
664 /* If no hwthread_map early param was set use default mapping */
665 for (i = 0; i < NR_CPUS; i++)
666 if (cpu_2_hwthread_id[i] == BAD_HWTHREAD_ID) {
667 cpu_2_hwthread_id[i] = i;
668 hwthread_id_2_cpu[i] = i;
671 for (i = 0; i < ncores; i++)
672 set_cpu_possible(i, true);
674 kick_register_func(&ipi_irq);