1 #include <linux/types.h>
4 #include <hwregs/intr_vect.h>
5 #include <hwregs/intr_vect_defs.h>
6 #include <asm/tlbflush.h>
7 #include <asm/mmu_context.h>
8 #include <hwregs/asm/mmu_defs_asm.h>
9 #include <hwregs/supp_reg.h>
10 #include <asm/atomic.h>
12 #include <linux/err.h>
13 #include <linux/init.h>
14 #include <linux/timex.h>
15 #include <linux/sched.h>
16 #include <linux/kernel.h>
17 #include <linux/cpumask.h>
18 #include <linux/interrupt.h>
19 #include <linux/module.h>
21 #define IPI_SCHEDULE 1
23 #define IPI_FLUSH_TLB 4
26 #define FLUSH_ALL (void*)0xffffffff
28 /* Vector of locks used for various atomic operations */
29 spinlock_t cris_atomic_locks[] = { [0 ... LOCK_COUNT - 1] = SPIN_LOCK_UNLOCKED};
32 cpumask_t phys_cpu_present_map = CPU_MASK_NONE;
33 EXPORT_SYMBOL(phys_cpu_present_map);
35 /* Variables used during SMP boot */
36 volatile int cpu_now_booting = 0;
37 volatile struct thread_info *smp_init_current_idle_thread;
39 /* Variables used during IPI */
40 static DEFINE_SPINLOCK(call_lock);
41 static DEFINE_SPINLOCK(tlbstate_lock);
43 struct call_data_struct {
44 void (*func) (void *info);
49 static struct call_data_struct * call_data;
51 static struct mm_struct* flush_mm;
52 static struct vm_area_struct* flush_vma;
53 static unsigned long flush_addr;
56 static unsigned long irq_regs[NR_CPUS] = {
61 static irqreturn_t crisv32_ipi_interrupt(int irq, void *dev_id);
62 static int send_ipi(int vector, int wait, cpumask_t cpu_mask);
63 static struct irqaction irq_ipi = {
64 .handler = crisv32_ipi_interrupt,
65 .flags = IRQF_DISABLED,
69 extern void cris_mmu_init(void);
70 extern void cris_timer_init(void);
72 /* SMP initialization */
73 void __init smp_prepare_cpus(unsigned int max_cpus)
77 /* From now on we can expect IPIs so set them up */
78 setup_irq(IPI_INTR_VECT, &irq_ipi);
80 /* Mark all possible CPUs as present */
81 for (i = 0; i < max_cpus; i++)
82 cpu_set(i, phys_cpu_present_map);
85 void __devinit smp_prepare_boot_cpu(void)
87 /* PGD pointer has moved after per_cpu initialization so
91 pgd = (pgd_t**)&per_cpu(current_pgd, smp_processor_id());
94 SUPP_REG_WR(RW_MM_TLB_PGD, pgd);
96 SUPP_REG_WR(RW_MM_TLB_PGD, pgd);
98 set_cpu_online(0, true);
99 cpu_set(0, phys_cpu_present_map);
100 set_cpu_possible(0, true);
103 void __init smp_cpus_done(unsigned int max_cpus)
107 /* Bring one cpu online.*/
109 smp_boot_one_cpu(int cpuid)
112 struct task_struct *idle;
113 cpumask_t cpu_mask = CPU_MASK_NONE;
115 idle = fork_idle(cpuid);
117 panic("SMP: fork failed for CPU:%d", cpuid);
119 task_thread_info(idle)->cpu = cpuid;
121 /* Information to the CPU that is about to boot */
122 smp_init_current_idle_thread = task_thread_info(idle);
123 cpu_now_booting = cpuid;
126 cpu_set(cpuid, cpu_online_map);
127 cpu_set(cpuid, cpu_mask);
128 send_ipi(IPI_BOOT, 0, cpu_mask);
129 cpu_clear(cpuid, cpu_online_map);
131 /* Wait for CPU to come online */
132 for (timeout = 0; timeout < 10000; timeout++) {
133 if(cpu_online(cpuid)) {
135 smp_init_current_idle_thread = NULL;
136 return 0; /* CPU online */
142 put_task_struct(idle);
145 printk(KERN_CRIT "SMP: CPU:%d is stuck.\n", cpuid);
149 /* Secondary CPUs starts using C here. Here we need to setup CPU
150 * specific stuff such as the local timer and the MMU. */
151 void __init smp_callin(void)
153 extern void cpu_idle(void);
155 int cpu = cpu_now_booting;
156 reg_intr_vect_rw_mask vect_mask = {0};
158 /* Initialise the idle task for this CPU */
159 atomic_inc(&init_mm.mm_count);
160 current->active_mm = &init_mm;
166 /* Setup local timer. */
169 /* Enable IRQ and idle */
170 REG_WR(intr_vect, irq_regs[cpu], rw_mask, vect_mask);
171 unmask_irq(IPI_INTR_VECT);
172 unmask_irq(TIMER0_INTR_VECT);
174 notify_cpu_starting(cpu);
177 cpu_set(cpu, cpu_online_map);
181 /* Stop execution on this CPU.*/
182 void stop_this_cpu(void* dummy)
185 asm volatile("halt");
189 void smp_send_stop(void)
191 smp_call_function(stop_this_cpu, NULL, 0);
194 int setup_profiling_timer(unsigned int multiplier)
200 /* cache_decay_ticks is used by the scheduler to decide if a process
201 * is "hot" on one CPU. A higher value means a higher penalty to move
202 * a process to another CPU. Our cache is rather small so we report
205 unsigned long cache_decay_ticks = 1;
207 int __cpuinit __cpu_up(unsigned int cpu)
209 smp_boot_one_cpu(cpu);
210 return cpu_online(cpu) ? 0 : -ENOSYS;
213 void smp_send_reschedule(int cpu)
215 cpumask_t cpu_mask = CPU_MASK_NONE;
216 cpu_set(cpu, cpu_mask);
217 send_ipi(IPI_SCHEDULE, 0, cpu_mask);
222 * Flush needs to be done on the local CPU and on any other CPU that
223 * may have the same mapping. The mm->cpu_vm_mask is used to keep track
224 * of which CPUs that a specific process has been executed on.
226 void flush_tlb_common(struct mm_struct* mm, struct vm_area_struct* vma, unsigned long addr)
231 spin_lock_irqsave(&tlbstate_lock, flags);
232 cpu_mask = (mm == FLUSH_ALL ? cpu_all_mask : *mm_cpumask(mm));
233 cpu_clear(smp_processor_id(), cpu_mask);
237 send_ipi(IPI_FLUSH_TLB, 1, cpu_mask);
238 spin_unlock_irqrestore(&tlbstate_lock, flags);
241 void flush_tlb_all(void)
244 flush_tlb_common(FLUSH_ALL, FLUSH_ALL, 0);
247 void flush_tlb_mm(struct mm_struct *mm)
250 flush_tlb_common(mm, FLUSH_ALL, 0);
251 /* No more mappings in other CPUs */
252 cpumask_clear(mm_cpumask(mm));
253 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
256 void flush_tlb_page(struct vm_area_struct *vma,
259 __flush_tlb_page(vma, addr);
260 flush_tlb_common(vma->vm_mm, vma, addr);
263 /* Inter processor interrupts
265 * The IPIs are used for:
266 * * Force a schedule on a CPU
267 * * FLush TLB on other CPUs
268 * * Call a function on other CPUs
271 int send_ipi(int vector, int wait, cpumask_t cpu_mask)
274 reg_intr_vect_rw_ipi ipi = REG_RD(intr_vect, irq_regs[i], rw_ipi);
277 /* Calculate CPUs to send to. */
278 cpus_and(cpu_mask, cpu_mask, cpu_online_map);
281 for_each_cpu_mask(i, cpu_mask)
283 ipi.vector |= vector;
284 REG_WR(intr_vect, irq_regs[i], rw_ipi, ipi);
287 /* Wait for IPI to finish on other CPUS */
289 for_each_cpu_mask(i, cpu_mask) {
291 for (j = 0 ; j < 1000; j++) {
292 ipi = REG_RD(intr_vect, irq_regs[i], rw_ipi);
300 printk("SMP call timeout from %d to %d\n", smp_processor_id(), i);
310 * You must not call this function with disabled interrupts or from a
311 * hardware interrupt handler or from a bottom half handler.
313 int smp_call_function(void (*func)(void *info), void *info, int wait)
315 cpumask_t cpu_mask = CPU_MASK_ALL;
316 struct call_data_struct data;
319 cpu_clear(smp_processor_id(), cpu_mask);
321 WARN_ON(irqs_disabled());
327 spin_lock(&call_lock);
329 ret = send_ipi(IPI_CALL, wait, cpu_mask);
330 spin_unlock(&call_lock);
335 irqreturn_t crisv32_ipi_interrupt(int irq, void *dev_id)
337 void (*func) (void *info) = call_data->func;
338 void *info = call_data->info;
339 reg_intr_vect_rw_ipi ipi;
341 ipi = REG_RD(intr_vect, irq_regs[smp_processor_id()], rw_ipi);
343 if (ipi.vector & IPI_CALL) {
346 if (ipi.vector & IPI_FLUSH_TLB) {
347 if (flush_mm == FLUSH_ALL)
349 else if (flush_vma == FLUSH_ALL)
350 __flush_tlb_mm(flush_mm);
352 __flush_tlb_page(flush_vma, flush_addr);
356 REG_WR(intr_vect, irq_regs[smp_processor_id()], rw_ipi, ipi);