1 /* smp.c: Sparc64 SMP support.
3 * Copyright (C) 1997, 2007, 2008 David S. Miller (davem@davemloft.net)
6 #include <linux/export.h>
7 #include <linux/kernel.h>
8 #include <linux/sched.h>
10 #include <linux/pagemap.h>
11 #include <linux/threads.h>
12 #include <linux/smp.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/delay.h>
16 #include <linux/init.h>
17 #include <linux/spinlock.h>
19 #include <linux/seq_file.h>
20 #include <linux/cache.h>
21 #include <linux/jiffies.h>
22 #include <linux/profile.h>
23 #include <linux/bootmem.h>
24 #include <linux/vmalloc.h>
25 #include <linux/ftrace.h>
26 #include <linux/cpu.h>
27 #include <linux/slab.h>
30 #include <asm/ptrace.h>
31 #include <linux/atomic.h>
32 #include <asm/tlbflush.h>
33 #include <asm/mmu_context.h>
34 #include <asm/cpudata.h>
35 #include <asm/hvtramp.h>
37 #include <asm/timer.h>
40 #include <asm/irq_regs.h>
42 #include <asm/pgtable.h>
43 #include <asm/oplib.h>
44 #include <asm/uaccess.h>
45 #include <asm/starfire.h>
47 #include <asm/sections.h>
49 #include <asm/mdesc.h>
51 #include <asm/hypervisor.h>
56 DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
57 cpumask_t cpu_core_map[NR_CPUS] __read_mostly =
58 { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
60 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
61 EXPORT_SYMBOL(cpu_core_map);
63 static cpumask_t smp_commenced_mask;
65 void smp_info(struct seq_file *m)
69 seq_printf(m, "State:\n");
70 for_each_online_cpu(i)
71 seq_printf(m, "CPU%d:\t\tonline\n", i);
74 void smp_bogo(struct seq_file *m)
78 for_each_online_cpu(i)
80 "Cpu%dClkTck\t: %016lx\n",
81 i, cpu_data(i).clock_tick);
84 extern void setup_sparc64_timer(void);
86 static volatile unsigned long callin_flag = 0;
90 int cpuid = hard_smp_processor_id();
92 __local_per_cpu_offset = __per_cpu_offset(cpuid);
94 if (tlb_type == hypervisor)
95 sun4v_ktsb_register();
99 setup_sparc64_timer();
101 if (cheetah_pcache_forced_on)
102 cheetah_enable_pcache();
105 __asm__ __volatile__("membar #Sync\n\t"
106 "flush %%g6" : : : "memory");
108 /* Clear this or we will die instantly when we
109 * schedule back to this idler...
111 current_thread_info()->new_child = 0;
113 /* Attach to the address space of init_task. */
114 atomic_inc(&init_mm.mm_count);
115 current->active_mm = &init_mm;
117 /* inform the notifiers about the new cpu */
118 notify_cpu_starting(cpuid);
120 while (!cpumask_test_cpu(cpuid, &smp_commenced_mask))
123 set_cpu_online(cpuid, true);
125 /* idle thread is expected to have preempt disabled */
130 cpu_startup_entry(CPUHP_ONLINE);
135 printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id());
136 panic("SMP bolixed\n");
139 /* This tick register synchronization scheme is taken entirely from
140 * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit.
142 * The only change I've made is to rework it so that the master
143 * initiates the synchonization instead of the slave. -DaveM
147 #define SLAVE (SMP_CACHE_BYTES/sizeof(unsigned long))
149 #define NUM_ROUNDS 64 /* magic value */
150 #define NUM_ITERS 5 /* likewise */
152 static DEFINE_RAW_SPINLOCK(itc_sync_lock);
153 static unsigned long go[SLAVE + 1];
155 #define DEBUG_TICK_SYNC 0
157 static inline long get_delta (long *rt, long *master)
159 unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0;
160 unsigned long tcenter, t0, t1, tm;
163 for (i = 0; i < NUM_ITERS; i++) {
164 t0 = tick_ops->get_tick();
166 membar_safe("#StoreLoad");
167 while (!(tm = go[SLAVE]))
171 t1 = tick_ops->get_tick();
173 if (t1 - t0 < best_t1 - best_t0)
174 best_t0 = t0, best_t1 = t1, best_tm = tm;
177 *rt = best_t1 - best_t0;
178 *master = best_tm - best_t0;
180 /* average best_t0 and best_t1 without overflow: */
181 tcenter = (best_t0/2 + best_t1/2);
182 if (best_t0 % 2 + best_t1 % 2 == 2)
184 return tcenter - best_tm;
187 void smp_synchronize_tick_client(void)
189 long i, delta, adj, adjust_latency = 0, done = 0;
190 unsigned long flags, rt, master_time_stamp;
193 long rt; /* roundtrip time */
194 long master; /* master's timestamp */
195 long diff; /* difference between midpoint and master's timestamp */
196 long lat; /* estimate of itc adjustment latency */
205 local_irq_save(flags);
207 for (i = 0; i < NUM_ROUNDS; i++) {
208 delta = get_delta(&rt, &master_time_stamp);
210 done = 1; /* let's lock on to this... */
214 adjust_latency += -delta;
215 adj = -delta + adjust_latency/4;
219 tick_ops->add_tick(adj);
223 t[i].master = master_time_stamp;
225 t[i].lat = adjust_latency/4;
229 local_irq_restore(flags);
232 for (i = 0; i < NUM_ROUNDS; i++)
233 printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
234 t[i].rt, t[i].master, t[i].diff, t[i].lat);
237 printk(KERN_INFO "CPU %d: synchronized TICK with master CPU "
238 "(last diff %ld cycles, maxerr %lu cycles)\n",
239 smp_processor_id(), delta, rt);
242 static void smp_start_sync_tick_client(int cpu);
244 static void smp_synchronize_one_tick(int cpu)
246 unsigned long flags, i;
250 smp_start_sync_tick_client(cpu);
252 /* wait for client to be ready */
256 /* now let the client proceed into his loop */
258 membar_safe("#StoreLoad");
260 raw_spin_lock_irqsave(&itc_sync_lock, flags);
262 for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) {
267 go[SLAVE] = tick_ops->get_tick();
268 membar_safe("#StoreLoad");
271 raw_spin_unlock_irqrestore(&itc_sync_lock, flags);
274 #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
275 /* XXX Put this in some common place. XXX */
276 static unsigned long kimage_addr_to_ra(void *p)
278 unsigned long val = (unsigned long) p;
280 return kern_base + (val - KERNBASE);
283 static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg,
286 extern unsigned long sparc64_ttable_tl0;
287 extern unsigned long kern_locked_tte_data;
288 struct hvtramp_descr *hdesc;
289 unsigned long trampoline_ra;
290 struct trap_per_cpu *tb;
291 u64 tte_vaddr, tte_data;
292 unsigned long hv_err;
295 hdesc = kzalloc(sizeof(*hdesc) +
296 (sizeof(struct hvtramp_mapping) *
297 num_kernel_image_mappings - 1),
300 printk(KERN_ERR "ldom_startcpu_cpuid: Cannot allocate "
307 hdesc->num_mappings = num_kernel_image_mappings;
309 tb = &trap_block[cpu];
311 hdesc->fault_info_va = (unsigned long) &tb->fault_info;
312 hdesc->fault_info_pa = kimage_addr_to_ra(&tb->fault_info);
314 hdesc->thread_reg = thread_reg;
316 tte_vaddr = (unsigned long) KERNBASE;
317 tte_data = kern_locked_tte_data;
319 for (i = 0; i < hdesc->num_mappings; i++) {
320 hdesc->maps[i].vaddr = tte_vaddr;
321 hdesc->maps[i].tte = tte_data;
322 tte_vaddr += 0x400000;
323 tte_data += 0x400000;
326 trampoline_ra = kimage_addr_to_ra(hv_cpu_startup);
328 hv_err = sun4v_cpu_start(cpu, trampoline_ra,
329 kimage_addr_to_ra(&sparc64_ttable_tl0),
332 printk(KERN_ERR "ldom_startcpu_cpuid: sun4v_cpu_start() "
333 "gives error %lu\n", hv_err);
337 extern unsigned long sparc64_cpu_startup;
339 /* The OBP cpu startup callback truncates the 3rd arg cookie to
340 * 32-bits (I think) so to be safe we have it read the pointer
341 * contained here so we work on >4GB machines. -DaveM
343 static struct thread_info *cpu_new_thread = NULL;
345 static int smp_boot_one_cpu(unsigned int cpu, struct task_struct *idle)
347 unsigned long entry =
348 (unsigned long)(&sparc64_cpu_startup);
349 unsigned long cookie =
350 (unsigned long)(&cpu_new_thread);
355 cpu_new_thread = task_thread_info(idle);
357 if (tlb_type == hypervisor) {
358 #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
359 if (ldom_domaining_enabled)
360 ldom_startcpu_cpuid(cpu,
361 (unsigned long) cpu_new_thread,
365 prom_startcpu_cpuid(cpu, entry, cookie);
367 struct device_node *dp = of_find_node_by_cpuid(cpu);
369 prom_startcpu(dp->phandle, entry, cookie);
372 for (timeout = 0; timeout < 50000; timeout++) {
381 printk("Processor %d is stuck.\n", cpu);
384 cpu_new_thread = NULL;
391 static void spitfire_xcall_helper(u64 data0, u64 data1, u64 data2, u64 pstate, unsigned long cpu)
396 if (this_is_starfire) {
397 /* map to real upaid */
398 cpu = (((cpu & 0x3c) << 1) |
399 ((cpu & 0x40) >> 4) |
403 target = (cpu << 14) | 0x70;
405 /* Ok, this is the real Spitfire Errata #54.
406 * One must read back from a UDB internal register
407 * after writes to the UDB interrupt dispatch, but
408 * before the membar Sync for that write.
409 * So we use the high UDB control register (ASI 0x7f,
410 * ADDR 0x20) for the dummy read. -DaveM
413 __asm__ __volatile__(
414 "wrpr %1, %2, %%pstate\n\t"
415 "stxa %4, [%0] %3\n\t"
416 "stxa %5, [%0+%8] %3\n\t"
418 "stxa %6, [%0+%8] %3\n\t"
420 "stxa %%g0, [%7] %3\n\t"
423 "ldxa [%%g1] 0x7f, %%g0\n\t"
426 : "r" (pstate), "i" (PSTATE_IE), "i" (ASI_INTR_W),
427 "r" (data0), "r" (data1), "r" (data2), "r" (target),
428 "r" (0x10), "0" (tmp)
431 /* NOTE: PSTATE_IE is still clear. */
434 __asm__ __volatile__("ldxa [%%g0] %1, %0"
436 : "i" (ASI_INTR_DISPATCH_STAT));
438 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
445 } while (result & 0x1);
446 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
449 printk("CPU[%d]: mondo stuckage result[%016llx]\n",
450 smp_processor_id(), result);
457 static void spitfire_xcall_deliver(struct trap_per_cpu *tb, int cnt)
459 u64 *mondo, data0, data1, data2;
464 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
465 cpu_list = __va(tb->cpu_list_pa);
466 mondo = __va(tb->cpu_mondo_block_pa);
470 for (i = 0; i < cnt; i++)
471 spitfire_xcall_helper(data0, data1, data2, pstate, cpu_list[i]);
474 /* Cheetah now allows to send the whole 64-bytes of data in the interrupt
475 * packet, but we have no use for that. However we do take advantage of
476 * the new pipelining feature (ie. dispatch to multiple cpus simultaneously).
478 static void cheetah_xcall_deliver(struct trap_per_cpu *tb, int cnt)
480 int nack_busy_id, is_jbus, need_more;
481 u64 *mondo, pstate, ver, busy_mask;
484 cpu_list = __va(tb->cpu_list_pa);
485 mondo = __va(tb->cpu_mondo_block_pa);
487 /* Unfortunately, someone at Sun had the brilliant idea to make the
488 * busy/nack fields hard-coded by ITID number for this Ultra-III
489 * derivative processor.
491 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
492 is_jbus = ((ver >> 32) == __JALAPENO_ID ||
493 (ver >> 32) == __SERRANO_ID);
495 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
499 __asm__ __volatile__("wrpr %0, %1, %%pstate\n\t"
500 : : "r" (pstate), "i" (PSTATE_IE));
502 /* Setup the dispatch data registers. */
503 __asm__ __volatile__("stxa %0, [%3] %6\n\t"
504 "stxa %1, [%4] %6\n\t"
505 "stxa %2, [%5] %6\n\t"
508 : "r" (mondo[0]), "r" (mondo[1]), "r" (mondo[2]),
509 "r" (0x40), "r" (0x50), "r" (0x60),
517 for (i = 0; i < cnt; i++) {
524 target = (nr << 14) | 0x70;
526 busy_mask |= (0x1UL << (nr * 2));
528 target |= (nack_busy_id << 24);
529 busy_mask |= (0x1UL <<
532 __asm__ __volatile__(
533 "stxa %%g0, [%0] %1\n\t"
536 : "r" (target), "i" (ASI_INTR_W));
538 if (nack_busy_id == 32) {
545 /* Now, poll for completion. */
547 u64 dispatch_stat, nack_mask;
550 stuck = 100000 * nack_busy_id;
551 nack_mask = busy_mask << 1;
553 __asm__ __volatile__("ldxa [%%g0] %1, %0"
554 : "=r" (dispatch_stat)
555 : "i" (ASI_INTR_DISPATCH_STAT));
556 if (!(dispatch_stat & (busy_mask | nack_mask))) {
557 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
559 if (unlikely(need_more)) {
561 for (i = 0; i < cnt; i++) {
562 if (cpu_list[i] == 0xffff)
564 cpu_list[i] = 0xffff;
575 } while (dispatch_stat & busy_mask);
577 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
580 if (dispatch_stat & busy_mask) {
581 /* Busy bits will not clear, continue instead
582 * of freezing up on this cpu.
584 printk("CPU[%d]: mondo stuckage result[%016llx]\n",
585 smp_processor_id(), dispatch_stat);
587 int i, this_busy_nack = 0;
589 /* Delay some random time with interrupts enabled
590 * to prevent deadlock.
592 udelay(2 * nack_busy_id);
594 /* Clear out the mask bits for cpus which did not
597 for (i = 0; i < cnt; i++) {
605 check_mask = (0x2UL << (2*nr));
607 check_mask = (0x2UL <<
609 if ((dispatch_stat & check_mask) == 0)
610 cpu_list[i] = 0xffff;
612 if (this_busy_nack == 64)
621 /* Multi-cpu list version. */
622 static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt)
624 int retries, this_cpu, prev_sent, i, saw_cpu_error;
625 unsigned long status;
628 this_cpu = smp_processor_id();
630 cpu_list = __va(tb->cpu_list_pa);
636 int forward_progress, n_sent;
638 status = sun4v_cpu_mondo_send(cnt,
640 tb->cpu_mondo_block_pa);
642 /* HV_EOK means all cpus received the xcall, we're done. */
643 if (likely(status == HV_EOK))
646 /* First, see if we made any forward progress.
648 * The hypervisor indicates successful sends by setting
649 * cpu list entries to the value 0xffff.
652 for (i = 0; i < cnt; i++) {
653 if (likely(cpu_list[i] == 0xffff))
657 forward_progress = 0;
658 if (n_sent > prev_sent)
659 forward_progress = 1;
663 /* If we get a HV_ECPUERROR, then one or more of the cpus
664 * in the list are in error state. Use the cpu_state()
665 * hypervisor call to find out which cpus are in error state.
667 if (unlikely(status == HV_ECPUERROR)) {
668 for (i = 0; i < cnt; i++) {
676 err = sun4v_cpu_state(cpu);
677 if (err == HV_CPU_STATE_ERROR) {
678 saw_cpu_error = (cpu + 1);
679 cpu_list[i] = 0xffff;
682 } else if (unlikely(status != HV_EWOULDBLOCK))
683 goto fatal_mondo_error;
685 /* Don't bother rewriting the CPU list, just leave the
686 * 0xffff and non-0xffff entries in there and the
687 * hypervisor will do the right thing.
689 * Only advance timeout state if we didn't make any
692 if (unlikely(!forward_progress)) {
693 if (unlikely(++retries > 10000))
694 goto fatal_mondo_timeout;
696 /* Delay a little bit to let other cpus catch up
697 * on their cpu mondo queue work.
703 if (unlikely(saw_cpu_error))
704 goto fatal_mondo_cpu_error;
708 fatal_mondo_cpu_error:
709 printk(KERN_CRIT "CPU[%d]: SUN4V mondo cpu error, some target cpus "
710 "(including %d) were in error state\n",
711 this_cpu, saw_cpu_error - 1);
715 printk(KERN_CRIT "CPU[%d]: SUN4V mondo timeout, no forward "
716 " progress after %d retries.\n",
718 goto dump_cpu_list_and_out;
721 printk(KERN_CRIT "CPU[%d]: Unexpected SUN4V mondo error %lu\n",
723 printk(KERN_CRIT "CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) "
724 "mondo_block_pa(%lx)\n",
725 this_cpu, cnt, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
727 dump_cpu_list_and_out:
728 printk(KERN_CRIT "CPU[%d]: CPU list [ ", this_cpu);
729 for (i = 0; i < cnt; i++)
730 printk("%u ", cpu_list[i]);
734 static void (*xcall_deliver_impl)(struct trap_per_cpu *, int);
736 static void xcall_deliver(u64 data0, u64 data1, u64 data2, const cpumask_t *mask)
738 struct trap_per_cpu *tb;
739 int this_cpu, i, cnt;
744 /* We have to do this whole thing with interrupts fully disabled.
745 * Otherwise if we send an xcall from interrupt context it will
746 * corrupt both our mondo block and cpu list state.
748 * One consequence of this is that we cannot use timeout mechanisms
749 * that depend upon interrupts being delivered locally. So, for
750 * example, we cannot sample jiffies and expect it to advance.
752 * Fortunately, udelay() uses %stick/%tick so we can use that.
754 local_irq_save(flags);
756 this_cpu = smp_processor_id();
757 tb = &trap_block[this_cpu];
759 mondo = __va(tb->cpu_mondo_block_pa);
765 cpu_list = __va(tb->cpu_list_pa);
767 /* Setup the initial cpu list. */
769 for_each_cpu(i, mask) {
770 if (i == this_cpu || !cpu_online(i))
776 xcall_deliver_impl(tb, cnt);
778 local_irq_restore(flags);
781 /* Send cross call to all processors mentioned in MASK_P
782 * except self. Really, there are only two cases currently,
783 * "cpu_online_mask" and "mm_cpumask(mm)".
785 static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, const cpumask_t *mask)
787 u64 data0 = (((u64)ctx)<<32 | (((u64)func) & 0xffffffff));
789 xcall_deliver(data0, data1, data2, mask);
792 /* Send cross call to all processors except self. */
793 static void smp_cross_call(unsigned long *func, u32 ctx, u64 data1, u64 data2)
795 smp_cross_call_masked(func, ctx, data1, data2, cpu_online_mask);
798 extern unsigned long xcall_sync_tick;
800 static void smp_start_sync_tick_client(int cpu)
802 xcall_deliver((u64) &xcall_sync_tick, 0, 0,
806 extern unsigned long xcall_call_function;
808 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
810 xcall_deliver((u64) &xcall_call_function, 0, 0, mask);
813 extern unsigned long xcall_call_function_single;
815 void arch_send_call_function_single_ipi(int cpu)
817 xcall_deliver((u64) &xcall_call_function_single, 0, 0,
821 void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs)
823 clear_softint(1 << irq);
824 generic_smp_call_function_interrupt();
827 void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs)
829 clear_softint(1 << irq);
830 generic_smp_call_function_single_interrupt();
833 static void tsb_sync(void *info)
835 struct trap_per_cpu *tp = &trap_block[raw_smp_processor_id()];
836 struct mm_struct *mm = info;
838 /* It is not valid to test "current->active_mm == mm" here.
840 * The value of "current" is not changed atomically with
841 * switch_mm(). But that's OK, we just need to check the
842 * current cpu's trap block PGD physical address.
844 if (tp->pgd_paddr == __pa(mm->pgd))
845 tsb_context_switch(mm);
848 void smp_tsb_sync(struct mm_struct *mm)
850 smp_call_function_many(mm_cpumask(mm), tsb_sync, mm, 1);
853 extern unsigned long xcall_flush_tlb_mm;
854 extern unsigned long xcall_flush_tlb_page;
855 extern unsigned long xcall_flush_tlb_kernel_range;
856 extern unsigned long xcall_fetch_glob_regs;
857 extern unsigned long xcall_fetch_glob_pmu;
858 extern unsigned long xcall_fetch_glob_pmu_n4;
859 extern unsigned long xcall_receive_signal;
860 extern unsigned long xcall_new_mmu_context_version;
862 extern unsigned long xcall_kgdb_capture;
865 #ifdef DCACHE_ALIASING_POSSIBLE
866 extern unsigned long xcall_flush_dcache_page_cheetah;
868 extern unsigned long xcall_flush_dcache_page_spitfire;
870 #ifdef CONFIG_DEBUG_DCFLUSH
871 extern atomic_t dcpage_flushes;
872 extern atomic_t dcpage_flushes_xcall;
875 static inline void __local_flush_dcache_page(struct page *page)
877 #ifdef DCACHE_ALIASING_POSSIBLE
878 __flush_dcache_page(page_address(page),
879 ((tlb_type == spitfire) &&
880 page_mapping(page) != NULL));
882 if (page_mapping(page) != NULL &&
883 tlb_type == spitfire)
884 __flush_icache_page(__pa(page_address(page)));
888 void smp_flush_dcache_page_impl(struct page *page, int cpu)
892 if (tlb_type == hypervisor)
895 #ifdef CONFIG_DEBUG_DCFLUSH
896 atomic_inc(&dcpage_flushes);
899 this_cpu = get_cpu();
901 if (cpu == this_cpu) {
902 __local_flush_dcache_page(page);
903 } else if (cpu_online(cpu)) {
904 void *pg_addr = page_address(page);
907 if (tlb_type == spitfire) {
908 data0 = ((u64)&xcall_flush_dcache_page_spitfire);
909 if (page_mapping(page) != NULL)
910 data0 |= ((u64)1 << 32);
911 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
912 #ifdef DCACHE_ALIASING_POSSIBLE
913 data0 = ((u64)&xcall_flush_dcache_page_cheetah);
917 xcall_deliver(data0, __pa(pg_addr),
918 (u64) pg_addr, cpumask_of(cpu));
919 #ifdef CONFIG_DEBUG_DCFLUSH
920 atomic_inc(&dcpage_flushes_xcall);
928 void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
933 if (tlb_type == hypervisor)
938 #ifdef CONFIG_DEBUG_DCFLUSH
939 atomic_inc(&dcpage_flushes);
942 pg_addr = page_address(page);
943 if (tlb_type == spitfire) {
944 data0 = ((u64)&xcall_flush_dcache_page_spitfire);
945 if (page_mapping(page) != NULL)
946 data0 |= ((u64)1 << 32);
947 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
948 #ifdef DCACHE_ALIASING_POSSIBLE
949 data0 = ((u64)&xcall_flush_dcache_page_cheetah);
953 xcall_deliver(data0, __pa(pg_addr),
954 (u64) pg_addr, cpu_online_mask);
955 #ifdef CONFIG_DEBUG_DCFLUSH
956 atomic_inc(&dcpage_flushes_xcall);
959 __local_flush_dcache_page(page);
964 void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs)
966 struct mm_struct *mm;
969 clear_softint(1 << irq);
971 /* See if we need to allocate a new TLB context because
972 * the version of the one we are using is now out of date.
974 mm = current->active_mm;
975 if (unlikely(!mm || (mm == &init_mm)))
978 spin_lock_irqsave(&mm->context.lock, flags);
980 if (unlikely(!CTX_VALID(mm->context)))
981 get_new_mmu_context(mm);
983 spin_unlock_irqrestore(&mm->context.lock, flags);
985 load_secondary_context(mm);
986 __flush_tlb_mm(CTX_HWBITS(mm->context),
990 void smp_new_mmu_context_version(void)
992 smp_cross_call(&xcall_new_mmu_context_version, 0, 0, 0);
996 void kgdb_roundup_cpus(unsigned long flags)
998 smp_cross_call(&xcall_kgdb_capture, 0, 0, 0);
1002 void smp_fetch_global_regs(void)
1004 smp_cross_call(&xcall_fetch_glob_regs, 0, 0, 0);
1007 void smp_fetch_global_pmu(void)
1009 if (tlb_type == hypervisor &&
1010 sun4v_chip_type >= SUN4V_CHIP_NIAGARA4)
1011 smp_cross_call(&xcall_fetch_glob_pmu_n4, 0, 0, 0);
1013 smp_cross_call(&xcall_fetch_glob_pmu, 0, 0, 0);
1016 /* We know that the window frames of the user have been flushed
1017 * to the stack before we get here because all callers of us
1018 * are flush_tlb_*() routines, and these run after flush_cache_*()
1019 * which performs the flushw.
1021 * The SMP TLB coherency scheme we use works as follows:
1023 * 1) mm->cpu_vm_mask is a bit mask of which cpus an address
1024 * space has (potentially) executed on, this is the heuristic
1025 * we use to avoid doing cross calls.
1027 * Also, for flushing from kswapd and also for clones, we
1028 * use cpu_vm_mask as the list of cpus to make run the TLB.
1030 * 2) TLB context numbers are shared globally across all processors
1031 * in the system, this allows us to play several games to avoid
1034 * One invariant is that when a cpu switches to a process, and
1035 * that processes tsk->active_mm->cpu_vm_mask does not have the
1036 * current cpu's bit set, that tlb context is flushed locally.
1038 * If the address space is non-shared (ie. mm->count == 1) we avoid
1039 * cross calls when we want to flush the currently running process's
1040 * tlb state. This is done by clearing all cpu bits except the current
1041 * processor's in current->mm->cpu_vm_mask and performing the
1042 * flush locally only. This will force any subsequent cpus which run
1043 * this task to flush the context from the local tlb if the process
1044 * migrates to another cpu (again).
1046 * 3) For shared address spaces (threads) and swapping we bite the
1047 * bullet for most cases and perform the cross call (but only to
1048 * the cpus listed in cpu_vm_mask).
1050 * The performance gain from "optimizing" away the cross call for threads is
1051 * questionable (in theory the big win for threads is the massive sharing of
1052 * address space state across processors).
1055 /* This currently is only used by the hugetlb arch pre-fault
1056 * hook on UltraSPARC-III+ and later when changing the pagesize
1057 * bits of the context register for an address space.
1059 void smp_flush_tlb_mm(struct mm_struct *mm)
1061 u32 ctx = CTX_HWBITS(mm->context);
1062 int cpu = get_cpu();
1064 if (atomic_read(&mm->mm_users) == 1) {
1065 cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
1066 goto local_flush_and_out;
1069 smp_cross_call_masked(&xcall_flush_tlb_mm,
1073 local_flush_and_out:
1074 __flush_tlb_mm(ctx, SECONDARY_CONTEXT);
1079 struct tlb_pending_info {
1082 unsigned long *vaddrs;
1085 static void tlb_pending_func(void *info)
1087 struct tlb_pending_info *t = info;
1089 __flush_tlb_pending(t->ctx, t->nr, t->vaddrs);
1092 void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs)
1094 u32 ctx = CTX_HWBITS(mm->context);
1095 struct tlb_pending_info info;
1096 int cpu = get_cpu();
1100 info.vaddrs = vaddrs;
1102 if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
1103 cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
1105 smp_call_function_many(mm_cpumask(mm), tlb_pending_func,
1108 __flush_tlb_pending(ctx, nr, vaddrs);
1113 void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
1115 unsigned long context = CTX_HWBITS(mm->context);
1116 int cpu = get_cpu();
1118 if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
1119 cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
1121 smp_cross_call_masked(&xcall_flush_tlb_page,
1124 __flush_tlb_page(context, vaddr);
1129 void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end)
1132 end = PAGE_ALIGN(end);
1134 smp_cross_call(&xcall_flush_tlb_kernel_range,
1137 __flush_tlb_kernel_range(start, end);
1142 /* #define CAPTURE_DEBUG */
1143 extern unsigned long xcall_capture;
1145 static atomic_t smp_capture_depth = ATOMIC_INIT(0);
1146 static atomic_t smp_capture_registry = ATOMIC_INIT(0);
1147 static unsigned long penguins_are_doing_time;
1149 void smp_capture(void)
1151 int result = atomic_add_ret(1, &smp_capture_depth);
1154 int ncpus = num_online_cpus();
1156 #ifdef CAPTURE_DEBUG
1157 printk("CPU[%d]: Sending penguins to jail...",
1158 smp_processor_id());
1160 penguins_are_doing_time = 1;
1161 atomic_inc(&smp_capture_registry);
1162 smp_cross_call(&xcall_capture, 0, 0, 0);
1163 while (atomic_read(&smp_capture_registry) != ncpus)
1165 #ifdef CAPTURE_DEBUG
1171 void smp_release(void)
1173 if (atomic_dec_and_test(&smp_capture_depth)) {
1174 #ifdef CAPTURE_DEBUG
1175 printk("CPU[%d]: Giving pardon to "
1176 "imprisoned penguins\n",
1177 smp_processor_id());
1179 penguins_are_doing_time = 0;
1180 membar_safe("#StoreLoad");
1181 atomic_dec(&smp_capture_registry);
1185 /* Imprisoned penguins run with %pil == PIL_NORMAL_MAX, but PSTATE_IE
1186 * set, so they can service tlb flush xcalls...
1188 extern void prom_world(int);
1190 void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs)
1192 clear_softint(1 << irq);
1196 __asm__ __volatile__("flushw");
1198 atomic_inc(&smp_capture_registry);
1199 membar_safe("#StoreLoad");
1200 while (penguins_are_doing_time)
1202 atomic_dec(&smp_capture_registry);
1208 /* /proc/profile writes can call this, don't __init it please. */
1209 int setup_profiling_timer(unsigned int multiplier)
1214 void __init smp_prepare_cpus(unsigned int max_cpus)
1218 void smp_prepare_boot_cpu(void)
1222 void __init smp_setup_processor_id(void)
1224 if (tlb_type == spitfire)
1225 xcall_deliver_impl = spitfire_xcall_deliver;
1226 else if (tlb_type == cheetah || tlb_type == cheetah_plus)
1227 xcall_deliver_impl = cheetah_xcall_deliver;
1229 xcall_deliver_impl = hypervisor_xcall_deliver;
1232 void smp_fill_in_sib_core_maps(void)
1236 for_each_present_cpu(i) {
1239 cpumask_clear(&cpu_core_map[i]);
1240 if (cpu_data(i).core_id == 0) {
1241 cpumask_set_cpu(i, &cpu_core_map[i]);
1245 for_each_present_cpu(j) {
1246 if (cpu_data(i).core_id ==
1247 cpu_data(j).core_id)
1248 cpumask_set_cpu(j, &cpu_core_map[i]);
1252 for_each_present_cpu(i) {
1255 cpumask_clear(&per_cpu(cpu_sibling_map, i));
1256 if (cpu_data(i).proc_id == -1) {
1257 cpumask_set_cpu(i, &per_cpu(cpu_sibling_map, i));
1261 for_each_present_cpu(j) {
1262 if (cpu_data(i).proc_id ==
1263 cpu_data(j).proc_id)
1264 cpumask_set_cpu(j, &per_cpu(cpu_sibling_map, i));
1269 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
1271 int ret = smp_boot_one_cpu(cpu, tidle);
1274 cpumask_set_cpu(cpu, &smp_commenced_mask);
1275 while (!cpu_online(cpu))
1277 if (!cpu_online(cpu)) {
1280 /* On SUN4V, writes to %tick and %stick are
1283 if (tlb_type != hypervisor)
1284 smp_synchronize_one_tick(cpu);
1290 #ifdef CONFIG_HOTPLUG_CPU
1291 void cpu_play_dead(void)
1293 int cpu = smp_processor_id();
1294 unsigned long pstate;
1298 if (tlb_type == hypervisor) {
1299 struct trap_per_cpu *tb = &trap_block[cpu];
1301 sun4v_cpu_qconf(HV_CPU_QUEUE_CPU_MONDO,
1302 tb->cpu_mondo_pa, 0);
1303 sun4v_cpu_qconf(HV_CPU_QUEUE_DEVICE_MONDO,
1304 tb->dev_mondo_pa, 0);
1305 sun4v_cpu_qconf(HV_CPU_QUEUE_RES_ERROR,
1306 tb->resum_mondo_pa, 0);
1307 sun4v_cpu_qconf(HV_CPU_QUEUE_NONRES_ERROR,
1308 tb->nonresum_mondo_pa, 0);
1311 cpumask_clear_cpu(cpu, &smp_commenced_mask);
1312 membar_safe("#Sync");
1314 local_irq_disable();
1316 __asm__ __volatile__(
1317 "rdpr %%pstate, %0\n\t"
1318 "wrpr %0, %1, %%pstate"
1326 int __cpu_disable(void)
1328 int cpu = smp_processor_id();
1332 for_each_cpu(i, &cpu_core_map[cpu])
1333 cpumask_clear_cpu(cpu, &cpu_core_map[i]);
1334 cpumask_clear(&cpu_core_map[cpu]);
1336 for_each_cpu(i, &per_cpu(cpu_sibling_map, cpu))
1337 cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, i));
1338 cpumask_clear(&per_cpu(cpu_sibling_map, cpu));
1347 /* Make sure no interrupts point to this cpu. */
1352 local_irq_disable();
1354 set_cpu_online(cpu, false);
1361 void __cpu_die(unsigned int cpu)
1365 for (i = 0; i < 100; i++) {
1367 if (!cpumask_test_cpu(cpu, &smp_commenced_mask))
1371 if (cpumask_test_cpu(cpu, &smp_commenced_mask)) {
1372 printk(KERN_ERR "CPU %u didn't die...\n", cpu);
1374 #if defined(CONFIG_SUN_LDOMS)
1375 unsigned long hv_err;
1379 hv_err = sun4v_cpu_stop(cpu);
1380 if (hv_err == HV_EOK) {
1381 set_cpu_present(cpu, false);
1384 } while (--limit > 0);
1386 printk(KERN_ERR "sun4v_cpu_stop() fails err=%lu\n",
1394 void __init smp_cpus_done(unsigned int max_cpus)
1399 void smp_send_reschedule(int cpu)
1401 if (cpu == smp_processor_id()) {
1402 WARN_ON_ONCE(preemptible());
1403 set_softint(1 << PIL_SMP_RECEIVE_SIGNAL);
1405 xcall_deliver((u64) &xcall_receive_signal,
1406 0, 0, cpumask_of(cpu));
1410 void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs)
1412 clear_softint(1 << irq);
1416 /* This is a nop because we capture all other cpus
1417 * anyways when making the PROM active.
1419 void smp_send_stop(void)
1424 * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu
1425 * @cpu: cpu to allocate for
1426 * @size: size allocation in bytes
1429 * Allocate @size bytes aligned at @align for cpu @cpu. This wrapper
1430 * does the right thing for NUMA regardless of the current
1434 * Pointer to the allocated area on success, NULL on failure.
1436 static void * __init pcpu_alloc_bootmem(unsigned int cpu, size_t size,
1439 const unsigned long goal = __pa(MAX_DMA_ADDRESS);
1440 #ifdef CONFIG_NEED_MULTIPLE_NODES
1441 int node = cpu_to_node(cpu);
1444 if (!node_online(node) || !NODE_DATA(node)) {
1445 ptr = __alloc_bootmem(size, align, goal);
1446 pr_info("cpu %d has no node %d or node-local memory\n",
1448 pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
1449 cpu, size, __pa(ptr));
1451 ptr = __alloc_bootmem_node(NODE_DATA(node),
1453 pr_debug("per cpu data for cpu%d %lu bytes on node%d at "
1454 "%016lx\n", cpu, size, node, __pa(ptr));
1458 return __alloc_bootmem(size, align, goal);
1462 static void __init pcpu_free_bootmem(void *ptr, size_t size)
1464 free_bootmem(__pa(ptr), size);
1467 static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
1469 if (cpu_to_node(from) == cpu_to_node(to))
1470 return LOCAL_DISTANCE;
1472 return REMOTE_DISTANCE;
1475 static void __init pcpu_populate_pte(unsigned long addr)
1477 pgd_t *pgd = pgd_offset_k(addr);
1481 pud = pud_offset(pgd, addr);
1482 if (pud_none(*pud)) {
1485 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1486 pud_populate(&init_mm, pud, new);
1489 pmd = pmd_offset(pud, addr);
1490 if (!pmd_present(*pmd)) {
1493 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1494 pmd_populate_kernel(&init_mm, pmd, new);
1498 void __init setup_per_cpu_areas(void)
1500 unsigned long delta;
1504 if (pcpu_chosen_fc != PCPU_FC_PAGE) {
1505 rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
1506 PERCPU_DYNAMIC_RESERVE, 4 << 20,
1511 pr_warning("PERCPU: %s allocator failed (%d), "
1512 "falling back to page size\n",
1513 pcpu_fc_names[pcpu_chosen_fc], rc);
1516 rc = pcpu_page_first_chunk(PERCPU_MODULE_RESERVE,
1521 panic("cannot initialize percpu area (err=%d)", rc);
1523 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
1524 for_each_possible_cpu(cpu)
1525 __per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
1527 /* Setup %g5 for the boot cpu. */
1528 __local_per_cpu_offset = __per_cpu_offset(smp_processor_id());
1530 of_fill_in_cpu_data();
1531 if (tlb_type == hypervisor)
1532 mdesc_fill_in_cpu_data(cpu_all_mask);