1 /* smp.c: Sparc64 SMP support.
3 * Copyright (C) 1997, 2007, 2008 David S. Miller (davem@davemloft.net)
6 #include <linux/export.h>
7 #include <linux/kernel.h>
8 #include <linux/sched/mm.h>
9 #include <linux/sched/hotplug.h>
11 #include <linux/pagemap.h>
12 #include <linux/threads.h>
13 #include <linux/smp.h>
14 #include <linux/interrupt.h>
15 #include <linux/kernel_stat.h>
16 #include <linux/delay.h>
17 #include <linux/init.h>
18 #include <linux/spinlock.h>
20 #include <linux/seq_file.h>
21 #include <linux/cache.h>
22 #include <linux/jiffies.h>
23 #include <linux/profile.h>
24 #include <linux/bootmem.h>
25 #include <linux/vmalloc.h>
26 #include <linux/ftrace.h>
27 #include <linux/cpu.h>
28 #include <linux/slab.h>
29 #include <linux/kgdb.h>
32 #include <asm/ptrace.h>
33 #include <linux/atomic.h>
34 #include <asm/tlbflush.h>
35 #include <asm/mmu_context.h>
36 #include <asm/cpudata.h>
37 #include <asm/hvtramp.h>
39 #include <asm/timer.h>
40 #include <asm/setup.h>
43 #include <asm/irq_regs.h>
45 #include <asm/pgtable.h>
46 #include <asm/oplib.h>
47 #include <linux/uaccess.h>
48 #include <asm/starfire.h>
50 #include <asm/sections.h>
52 #include <asm/mdesc.h>
54 #include <asm/hypervisor.h>
60 DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
61 cpumask_t cpu_core_map[NR_CPUS] __read_mostly =
62 { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
64 cpumask_t cpu_core_sib_map[NR_CPUS] __read_mostly = {
65 [0 ... NR_CPUS-1] = CPU_MASK_NONE };
67 cpumask_t cpu_core_sib_cache_map[NR_CPUS] __read_mostly = {
68 [0 ... NR_CPUS - 1] = CPU_MASK_NONE };
70 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
71 EXPORT_SYMBOL(cpu_core_map);
72 EXPORT_SYMBOL(cpu_core_sib_map);
73 EXPORT_SYMBOL(cpu_core_sib_cache_map);
75 static cpumask_t smp_commenced_mask;
77 void smp_info(struct seq_file *m)
81 seq_printf(m, "State:\n");
82 for_each_online_cpu(i)
83 seq_printf(m, "CPU%d:\t\tonline\n", i);
86 void smp_bogo(struct seq_file *m)
90 for_each_online_cpu(i)
92 "Cpu%dClkTck\t: %016lx\n",
93 i, cpu_data(i).clock_tick);
96 extern void setup_sparc64_timer(void);
98 static volatile unsigned long callin_flag = 0;
100 void smp_callin(void)
102 int cpuid = hard_smp_processor_id();
104 __local_per_cpu_offset = __per_cpu_offset(cpuid);
106 if (tlb_type == hypervisor)
107 sun4v_ktsb_register();
111 setup_sparc64_timer();
113 if (cheetah_pcache_forced_on)
114 cheetah_enable_pcache();
117 __asm__ __volatile__("membar #Sync\n\t"
118 "flush %%g6" : : : "memory");
120 /* Clear this or we will die instantly when we
121 * schedule back to this idler...
123 current_thread_info()->new_child = 0;
125 /* Attach to the address space of init_task. */
127 current->active_mm = &init_mm;
129 /* inform the notifiers about the new cpu */
130 notify_cpu_starting(cpuid);
132 while (!cpumask_test_cpu(cpuid, &smp_commenced_mask))
135 set_cpu_online(cpuid, true);
137 /* idle thread is expected to have preempt disabled */
142 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
147 printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id());
148 panic("SMP bolixed\n");
151 /* This tick register synchronization scheme is taken entirely from
152 * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit.
154 * The only change I've made is to rework it so that the master
155 * initiates the synchonization instead of the slave. -DaveM
159 #define SLAVE (SMP_CACHE_BYTES/sizeof(unsigned long))
161 #define NUM_ROUNDS 64 /* magic value */
162 #define NUM_ITERS 5 /* likewise */
164 static DEFINE_RAW_SPINLOCK(itc_sync_lock);
165 static unsigned long go[SLAVE + 1];
167 #define DEBUG_TICK_SYNC 0
169 static inline long get_delta (long *rt, long *master)
171 unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0;
172 unsigned long tcenter, t0, t1, tm;
175 for (i = 0; i < NUM_ITERS; i++) {
176 t0 = tick_ops->get_tick();
178 membar_safe("#StoreLoad");
179 while (!(tm = go[SLAVE]))
183 t1 = tick_ops->get_tick();
185 if (t1 - t0 < best_t1 - best_t0)
186 best_t0 = t0, best_t1 = t1, best_tm = tm;
189 *rt = best_t1 - best_t0;
190 *master = best_tm - best_t0;
192 /* average best_t0 and best_t1 without overflow: */
193 tcenter = (best_t0/2 + best_t1/2);
194 if (best_t0 % 2 + best_t1 % 2 == 2)
196 return tcenter - best_tm;
199 void smp_synchronize_tick_client(void)
201 long i, delta, adj, adjust_latency = 0, done = 0;
202 unsigned long flags, rt, master_time_stamp;
205 long rt; /* roundtrip time */
206 long master; /* master's timestamp */
207 long diff; /* difference between midpoint and master's timestamp */
208 long lat; /* estimate of itc adjustment latency */
217 local_irq_save(flags);
219 for (i = 0; i < NUM_ROUNDS; i++) {
220 delta = get_delta(&rt, &master_time_stamp);
222 done = 1; /* let's lock on to this... */
226 adjust_latency += -delta;
227 adj = -delta + adjust_latency/4;
231 tick_ops->add_tick(adj);
235 t[i].master = master_time_stamp;
237 t[i].lat = adjust_latency/4;
241 local_irq_restore(flags);
244 for (i = 0; i < NUM_ROUNDS; i++)
245 printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
246 t[i].rt, t[i].master, t[i].diff, t[i].lat);
249 printk(KERN_INFO "CPU %d: synchronized TICK with master CPU "
250 "(last diff %ld cycles, maxerr %lu cycles)\n",
251 smp_processor_id(), delta, rt);
254 static void smp_start_sync_tick_client(int cpu);
256 static void smp_synchronize_one_tick(int cpu)
258 unsigned long flags, i;
262 smp_start_sync_tick_client(cpu);
264 /* wait for client to be ready */
268 /* now let the client proceed into his loop */
270 membar_safe("#StoreLoad");
272 raw_spin_lock_irqsave(&itc_sync_lock, flags);
274 for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) {
279 go[SLAVE] = tick_ops->get_tick();
280 membar_safe("#StoreLoad");
283 raw_spin_unlock_irqrestore(&itc_sync_lock, flags);
286 #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
287 static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg,
290 extern unsigned long sparc64_ttable_tl0;
291 extern unsigned long kern_locked_tte_data;
292 struct hvtramp_descr *hdesc;
293 unsigned long trampoline_ra;
294 struct trap_per_cpu *tb;
295 u64 tte_vaddr, tte_data;
296 unsigned long hv_err;
299 hdesc = kzalloc(sizeof(*hdesc) +
300 (sizeof(struct hvtramp_mapping) *
301 num_kernel_image_mappings - 1),
304 printk(KERN_ERR "ldom_startcpu_cpuid: Cannot allocate "
311 hdesc->num_mappings = num_kernel_image_mappings;
313 tb = &trap_block[cpu];
315 hdesc->fault_info_va = (unsigned long) &tb->fault_info;
316 hdesc->fault_info_pa = kimage_addr_to_ra(&tb->fault_info);
318 hdesc->thread_reg = thread_reg;
320 tte_vaddr = (unsigned long) KERNBASE;
321 tte_data = kern_locked_tte_data;
323 for (i = 0; i < hdesc->num_mappings; i++) {
324 hdesc->maps[i].vaddr = tte_vaddr;
325 hdesc->maps[i].tte = tte_data;
326 tte_vaddr += 0x400000;
327 tte_data += 0x400000;
330 trampoline_ra = kimage_addr_to_ra(hv_cpu_startup);
332 hv_err = sun4v_cpu_start(cpu, trampoline_ra,
333 kimage_addr_to_ra(&sparc64_ttable_tl0),
336 printk(KERN_ERR "ldom_startcpu_cpuid: sun4v_cpu_start() "
337 "gives error %lu\n", hv_err);
341 extern unsigned long sparc64_cpu_startup;
343 /* The OBP cpu startup callback truncates the 3rd arg cookie to
344 * 32-bits (I think) so to be safe we have it read the pointer
345 * contained here so we work on >4GB machines. -DaveM
347 static struct thread_info *cpu_new_thread = NULL;
349 static int smp_boot_one_cpu(unsigned int cpu, struct task_struct *idle)
351 unsigned long entry =
352 (unsigned long)(&sparc64_cpu_startup);
353 unsigned long cookie =
354 (unsigned long)(&cpu_new_thread);
359 cpu_new_thread = task_thread_info(idle);
361 if (tlb_type == hypervisor) {
362 #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
363 if (ldom_domaining_enabled)
364 ldom_startcpu_cpuid(cpu,
365 (unsigned long) cpu_new_thread,
369 prom_startcpu_cpuid(cpu, entry, cookie);
371 struct device_node *dp = of_find_node_by_cpuid(cpu);
373 prom_startcpu(dp->phandle, entry, cookie);
376 for (timeout = 0; timeout < 50000; timeout++) {
385 printk("Processor %d is stuck.\n", cpu);
388 cpu_new_thread = NULL;
395 static void spitfire_xcall_helper(u64 data0, u64 data1, u64 data2, u64 pstate, unsigned long cpu)
400 if (this_is_starfire) {
401 /* map to real upaid */
402 cpu = (((cpu & 0x3c) << 1) |
403 ((cpu & 0x40) >> 4) |
407 target = (cpu << 14) | 0x70;
409 /* Ok, this is the real Spitfire Errata #54.
410 * One must read back from a UDB internal register
411 * after writes to the UDB interrupt dispatch, but
412 * before the membar Sync for that write.
413 * So we use the high UDB control register (ASI 0x7f,
414 * ADDR 0x20) for the dummy read. -DaveM
417 __asm__ __volatile__(
418 "wrpr %1, %2, %%pstate\n\t"
419 "stxa %4, [%0] %3\n\t"
420 "stxa %5, [%0+%8] %3\n\t"
422 "stxa %6, [%0+%8] %3\n\t"
424 "stxa %%g0, [%7] %3\n\t"
427 "ldxa [%%g1] 0x7f, %%g0\n\t"
430 : "r" (pstate), "i" (PSTATE_IE), "i" (ASI_INTR_W),
431 "r" (data0), "r" (data1), "r" (data2), "r" (target),
432 "r" (0x10), "0" (tmp)
435 /* NOTE: PSTATE_IE is still clear. */
438 __asm__ __volatile__("ldxa [%%g0] %1, %0"
440 : "i" (ASI_INTR_DISPATCH_STAT));
442 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
449 } while (result & 0x1);
450 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
453 printk("CPU[%d]: mondo stuckage result[%016llx]\n",
454 smp_processor_id(), result);
461 static void spitfire_xcall_deliver(struct trap_per_cpu *tb, int cnt)
463 u64 *mondo, data0, data1, data2;
468 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
469 cpu_list = __va(tb->cpu_list_pa);
470 mondo = __va(tb->cpu_mondo_block_pa);
474 for (i = 0; i < cnt; i++)
475 spitfire_xcall_helper(data0, data1, data2, pstate, cpu_list[i]);
478 /* Cheetah now allows to send the whole 64-bytes of data in the interrupt
479 * packet, but we have no use for that. However we do take advantage of
480 * the new pipelining feature (ie. dispatch to multiple cpus simultaneously).
482 static void cheetah_xcall_deliver(struct trap_per_cpu *tb, int cnt)
484 int nack_busy_id, is_jbus, need_more;
485 u64 *mondo, pstate, ver, busy_mask;
488 cpu_list = __va(tb->cpu_list_pa);
489 mondo = __va(tb->cpu_mondo_block_pa);
491 /* Unfortunately, someone at Sun had the brilliant idea to make the
492 * busy/nack fields hard-coded by ITID number for this Ultra-III
493 * derivative processor.
495 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
496 is_jbus = ((ver >> 32) == __JALAPENO_ID ||
497 (ver >> 32) == __SERRANO_ID);
499 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
503 __asm__ __volatile__("wrpr %0, %1, %%pstate\n\t"
504 : : "r" (pstate), "i" (PSTATE_IE));
506 /* Setup the dispatch data registers. */
507 __asm__ __volatile__("stxa %0, [%3] %6\n\t"
508 "stxa %1, [%4] %6\n\t"
509 "stxa %2, [%5] %6\n\t"
512 : "r" (mondo[0]), "r" (mondo[1]), "r" (mondo[2]),
513 "r" (0x40), "r" (0x50), "r" (0x60),
521 for (i = 0; i < cnt; i++) {
528 target = (nr << 14) | 0x70;
530 busy_mask |= (0x1UL << (nr * 2));
532 target |= (nack_busy_id << 24);
533 busy_mask |= (0x1UL <<
536 __asm__ __volatile__(
537 "stxa %%g0, [%0] %1\n\t"
540 : "r" (target), "i" (ASI_INTR_W));
542 if (nack_busy_id == 32) {
549 /* Now, poll for completion. */
551 u64 dispatch_stat, nack_mask;
554 stuck = 100000 * nack_busy_id;
555 nack_mask = busy_mask << 1;
557 __asm__ __volatile__("ldxa [%%g0] %1, %0"
558 : "=r" (dispatch_stat)
559 : "i" (ASI_INTR_DISPATCH_STAT));
560 if (!(dispatch_stat & (busy_mask | nack_mask))) {
561 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
563 if (unlikely(need_more)) {
565 for (i = 0; i < cnt; i++) {
566 if (cpu_list[i] == 0xffff)
568 cpu_list[i] = 0xffff;
579 } while (dispatch_stat & busy_mask);
581 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
584 if (dispatch_stat & busy_mask) {
585 /* Busy bits will not clear, continue instead
586 * of freezing up on this cpu.
588 printk("CPU[%d]: mondo stuckage result[%016llx]\n",
589 smp_processor_id(), dispatch_stat);
591 int i, this_busy_nack = 0;
593 /* Delay some random time with interrupts enabled
594 * to prevent deadlock.
596 udelay(2 * nack_busy_id);
598 /* Clear out the mask bits for cpus which did not
601 for (i = 0; i < cnt; i++) {
609 check_mask = (0x2UL << (2*nr));
611 check_mask = (0x2UL <<
613 if ((dispatch_stat & check_mask) == 0)
614 cpu_list[i] = 0xffff;
616 if (this_busy_nack == 64)
625 /* Multi-cpu list version. */
626 static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt)
628 int retries, this_cpu, prev_sent, i, saw_cpu_error;
629 unsigned long status;
632 this_cpu = smp_processor_id();
634 cpu_list = __va(tb->cpu_list_pa);
640 int forward_progress, n_sent;
642 status = sun4v_cpu_mondo_send(cnt,
644 tb->cpu_mondo_block_pa);
646 /* HV_EOK means all cpus received the xcall, we're done. */
647 if (likely(status == HV_EOK))
650 /* First, see if we made any forward progress.
652 * The hypervisor indicates successful sends by setting
653 * cpu list entries to the value 0xffff.
656 for (i = 0; i < cnt; i++) {
657 if (likely(cpu_list[i] == 0xffff))
661 forward_progress = 0;
662 if (n_sent > prev_sent)
663 forward_progress = 1;
667 /* If we get a HV_ECPUERROR, then one or more of the cpus
668 * in the list are in error state. Use the cpu_state()
669 * hypervisor call to find out which cpus are in error state.
671 if (unlikely(status == HV_ECPUERROR)) {
672 for (i = 0; i < cnt; i++) {
680 err = sun4v_cpu_state(cpu);
681 if (err == HV_CPU_STATE_ERROR) {
682 saw_cpu_error = (cpu + 1);
683 cpu_list[i] = 0xffff;
686 } else if (unlikely(status != HV_EWOULDBLOCK))
687 goto fatal_mondo_error;
689 /* Don't bother rewriting the CPU list, just leave the
690 * 0xffff and non-0xffff entries in there and the
691 * hypervisor will do the right thing.
693 * Only advance timeout state if we didn't make any
696 if (unlikely(!forward_progress)) {
697 if (unlikely(++retries > 10000))
698 goto fatal_mondo_timeout;
700 /* Delay a little bit to let other cpus catch up
701 * on their cpu mondo queue work.
707 if (unlikely(saw_cpu_error))
708 goto fatal_mondo_cpu_error;
712 fatal_mondo_cpu_error:
713 printk(KERN_CRIT "CPU[%d]: SUN4V mondo cpu error, some target cpus "
714 "(including %d) were in error state\n",
715 this_cpu, saw_cpu_error - 1);
719 printk(KERN_CRIT "CPU[%d]: SUN4V mondo timeout, no forward "
720 " progress after %d retries.\n",
722 goto dump_cpu_list_and_out;
725 printk(KERN_CRIT "CPU[%d]: Unexpected SUN4V mondo error %lu\n",
727 printk(KERN_CRIT "CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) "
728 "mondo_block_pa(%lx)\n",
729 this_cpu, cnt, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
731 dump_cpu_list_and_out:
732 printk(KERN_CRIT "CPU[%d]: CPU list [ ", this_cpu);
733 for (i = 0; i < cnt; i++)
734 printk("%u ", cpu_list[i]);
738 static void (*xcall_deliver_impl)(struct trap_per_cpu *, int);
740 static void xcall_deliver(u64 data0, u64 data1, u64 data2, const cpumask_t *mask)
742 struct trap_per_cpu *tb;
743 int this_cpu, i, cnt;
748 /* We have to do this whole thing with interrupts fully disabled.
749 * Otherwise if we send an xcall from interrupt context it will
750 * corrupt both our mondo block and cpu list state.
752 * One consequence of this is that we cannot use timeout mechanisms
753 * that depend upon interrupts being delivered locally. So, for
754 * example, we cannot sample jiffies and expect it to advance.
756 * Fortunately, udelay() uses %stick/%tick so we can use that.
758 local_irq_save(flags);
760 this_cpu = smp_processor_id();
761 tb = &trap_block[this_cpu];
763 mondo = __va(tb->cpu_mondo_block_pa);
769 cpu_list = __va(tb->cpu_list_pa);
771 /* Setup the initial cpu list. */
773 for_each_cpu(i, mask) {
774 if (i == this_cpu || !cpu_online(i))
780 xcall_deliver_impl(tb, cnt);
782 local_irq_restore(flags);
785 /* Send cross call to all processors mentioned in MASK_P
786 * except self. Really, there are only two cases currently,
787 * "cpu_online_mask" and "mm_cpumask(mm)".
789 static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, const cpumask_t *mask)
791 u64 data0 = (((u64)ctx)<<32 | (((u64)func) & 0xffffffff));
793 xcall_deliver(data0, data1, data2, mask);
796 /* Send cross call to all processors except self. */
797 static void smp_cross_call(unsigned long *func, u32 ctx, u64 data1, u64 data2)
799 smp_cross_call_masked(func, ctx, data1, data2, cpu_online_mask);
802 extern unsigned long xcall_sync_tick;
804 static void smp_start_sync_tick_client(int cpu)
806 xcall_deliver((u64) &xcall_sync_tick, 0, 0,
810 extern unsigned long xcall_call_function;
812 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
814 xcall_deliver((u64) &xcall_call_function, 0, 0, mask);
817 extern unsigned long xcall_call_function_single;
819 void arch_send_call_function_single_ipi(int cpu)
821 xcall_deliver((u64) &xcall_call_function_single, 0, 0,
825 void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs)
827 clear_softint(1 << irq);
829 generic_smp_call_function_interrupt();
833 void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs)
835 clear_softint(1 << irq);
837 generic_smp_call_function_single_interrupt();
841 static void tsb_sync(void *info)
843 struct trap_per_cpu *tp = &trap_block[raw_smp_processor_id()];
844 struct mm_struct *mm = info;
846 /* It is not valid to test "current->active_mm == mm" here.
848 * The value of "current" is not changed atomically with
849 * switch_mm(). But that's OK, we just need to check the
850 * current cpu's trap block PGD physical address.
852 if (tp->pgd_paddr == __pa(mm->pgd))
853 tsb_context_switch(mm);
856 void smp_tsb_sync(struct mm_struct *mm)
858 smp_call_function_many(mm_cpumask(mm), tsb_sync, mm, 1);
861 extern unsigned long xcall_flush_tlb_mm;
862 extern unsigned long xcall_flush_tlb_page;
863 extern unsigned long xcall_flush_tlb_kernel_range;
864 extern unsigned long xcall_fetch_glob_regs;
865 extern unsigned long xcall_fetch_glob_pmu;
866 extern unsigned long xcall_fetch_glob_pmu_n4;
867 extern unsigned long xcall_receive_signal;
868 extern unsigned long xcall_new_mmu_context_version;
870 extern unsigned long xcall_kgdb_capture;
873 #ifdef DCACHE_ALIASING_POSSIBLE
874 extern unsigned long xcall_flush_dcache_page_cheetah;
876 extern unsigned long xcall_flush_dcache_page_spitfire;
878 static inline void __local_flush_dcache_page(struct page *page)
880 #ifdef DCACHE_ALIASING_POSSIBLE
881 __flush_dcache_page(page_address(page),
882 ((tlb_type == spitfire) &&
883 page_mapping(page) != NULL));
885 if (page_mapping(page) != NULL &&
886 tlb_type == spitfire)
887 __flush_icache_page(__pa(page_address(page)));
891 void smp_flush_dcache_page_impl(struct page *page, int cpu)
895 if (tlb_type == hypervisor)
898 #ifdef CONFIG_DEBUG_DCFLUSH
899 atomic_inc(&dcpage_flushes);
902 this_cpu = get_cpu();
904 if (cpu == this_cpu) {
905 __local_flush_dcache_page(page);
906 } else if (cpu_online(cpu)) {
907 void *pg_addr = page_address(page);
910 if (tlb_type == spitfire) {
911 data0 = ((u64)&xcall_flush_dcache_page_spitfire);
912 if (page_mapping(page) != NULL)
913 data0 |= ((u64)1 << 32);
914 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
915 #ifdef DCACHE_ALIASING_POSSIBLE
916 data0 = ((u64)&xcall_flush_dcache_page_cheetah);
920 xcall_deliver(data0, __pa(pg_addr),
921 (u64) pg_addr, cpumask_of(cpu));
922 #ifdef CONFIG_DEBUG_DCFLUSH
923 atomic_inc(&dcpage_flushes_xcall);
931 void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
936 if (tlb_type == hypervisor)
941 #ifdef CONFIG_DEBUG_DCFLUSH
942 atomic_inc(&dcpage_flushes);
945 pg_addr = page_address(page);
946 if (tlb_type == spitfire) {
947 data0 = ((u64)&xcall_flush_dcache_page_spitfire);
948 if (page_mapping(page) != NULL)
949 data0 |= ((u64)1 << 32);
950 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
951 #ifdef DCACHE_ALIASING_POSSIBLE
952 data0 = ((u64)&xcall_flush_dcache_page_cheetah);
956 xcall_deliver(data0, __pa(pg_addr),
957 (u64) pg_addr, cpu_online_mask);
958 #ifdef CONFIG_DEBUG_DCFLUSH
959 atomic_inc(&dcpage_flushes_xcall);
962 __local_flush_dcache_page(page);
967 void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs)
969 struct mm_struct *mm;
972 clear_softint(1 << irq);
974 /* See if we need to allocate a new TLB context because
975 * the version of the one we are using is now out of date.
977 mm = current->active_mm;
978 if (unlikely(!mm || (mm == &init_mm)))
981 spin_lock_irqsave(&mm->context.lock, flags);
983 if (unlikely(!CTX_VALID(mm->context)))
984 get_new_mmu_context(mm);
986 spin_unlock_irqrestore(&mm->context.lock, flags);
988 load_secondary_context(mm);
989 __flush_tlb_mm(CTX_HWBITS(mm->context),
993 void smp_new_mmu_context_version(void)
995 smp_cross_call(&xcall_new_mmu_context_version, 0, 0, 0);
999 void kgdb_roundup_cpus(unsigned long flags)
1001 smp_cross_call(&xcall_kgdb_capture, 0, 0, 0);
1005 void smp_fetch_global_regs(void)
1007 smp_cross_call(&xcall_fetch_glob_regs, 0, 0, 0);
1010 void smp_fetch_global_pmu(void)
1012 if (tlb_type == hypervisor &&
1013 sun4v_chip_type >= SUN4V_CHIP_NIAGARA4)
1014 smp_cross_call(&xcall_fetch_glob_pmu_n4, 0, 0, 0);
1016 smp_cross_call(&xcall_fetch_glob_pmu, 0, 0, 0);
1019 /* We know that the window frames of the user have been flushed
1020 * to the stack before we get here because all callers of us
1021 * are flush_tlb_*() routines, and these run after flush_cache_*()
1022 * which performs the flushw.
1024 * The SMP TLB coherency scheme we use works as follows:
1026 * 1) mm->cpu_vm_mask is a bit mask of which cpus an address
1027 * space has (potentially) executed on, this is the heuristic
1028 * we use to avoid doing cross calls.
1030 * Also, for flushing from kswapd and also for clones, we
1031 * use cpu_vm_mask as the list of cpus to make run the TLB.
1033 * 2) TLB context numbers are shared globally across all processors
1034 * in the system, this allows us to play several games to avoid
1037 * One invariant is that when a cpu switches to a process, and
1038 * that processes tsk->active_mm->cpu_vm_mask does not have the
1039 * current cpu's bit set, that tlb context is flushed locally.
1041 * If the address space is non-shared (ie. mm->count == 1) we avoid
1042 * cross calls when we want to flush the currently running process's
1043 * tlb state. This is done by clearing all cpu bits except the current
1044 * processor's in current->mm->cpu_vm_mask and performing the
1045 * flush locally only. This will force any subsequent cpus which run
1046 * this task to flush the context from the local tlb if the process
1047 * migrates to another cpu (again).
1049 * 3) For shared address spaces (threads) and swapping we bite the
1050 * bullet for most cases and perform the cross call (but only to
1051 * the cpus listed in cpu_vm_mask).
1053 * The performance gain from "optimizing" away the cross call for threads is
1054 * questionable (in theory the big win for threads is the massive sharing of
1055 * address space state across processors).
1058 /* This currently is only used by the hugetlb arch pre-fault
1059 * hook on UltraSPARC-III+ and later when changing the pagesize
1060 * bits of the context register for an address space.
1062 void smp_flush_tlb_mm(struct mm_struct *mm)
1064 u32 ctx = CTX_HWBITS(mm->context);
1065 int cpu = get_cpu();
1067 if (atomic_read(&mm->mm_users) == 1) {
1068 cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
1069 goto local_flush_and_out;
1072 smp_cross_call_masked(&xcall_flush_tlb_mm,
1076 local_flush_and_out:
1077 __flush_tlb_mm(ctx, SECONDARY_CONTEXT);
1082 struct tlb_pending_info {
1085 unsigned long *vaddrs;
1088 static void tlb_pending_func(void *info)
1090 struct tlb_pending_info *t = info;
1092 __flush_tlb_pending(t->ctx, t->nr, t->vaddrs);
1095 void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs)
1097 u32 ctx = CTX_HWBITS(mm->context);
1098 struct tlb_pending_info info;
1099 int cpu = get_cpu();
1103 info.vaddrs = vaddrs;
1105 if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
1106 cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
1108 smp_call_function_many(mm_cpumask(mm), tlb_pending_func,
1111 __flush_tlb_pending(ctx, nr, vaddrs);
1116 void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
1118 unsigned long context = CTX_HWBITS(mm->context);
1119 int cpu = get_cpu();
1121 if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
1122 cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
1124 smp_cross_call_masked(&xcall_flush_tlb_page,
1127 __flush_tlb_page(context, vaddr);
1132 void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end)
1135 end = PAGE_ALIGN(end);
1137 smp_cross_call(&xcall_flush_tlb_kernel_range,
1140 __flush_tlb_kernel_range(start, end);
1145 /* #define CAPTURE_DEBUG */
1146 extern unsigned long xcall_capture;
1148 static atomic_t smp_capture_depth = ATOMIC_INIT(0);
1149 static atomic_t smp_capture_registry = ATOMIC_INIT(0);
1150 static unsigned long penguins_are_doing_time;
1152 void smp_capture(void)
1154 int result = atomic_add_return(1, &smp_capture_depth);
1157 int ncpus = num_online_cpus();
1159 #ifdef CAPTURE_DEBUG
1160 printk("CPU[%d]: Sending penguins to jail...",
1161 smp_processor_id());
1163 penguins_are_doing_time = 1;
1164 atomic_inc(&smp_capture_registry);
1165 smp_cross_call(&xcall_capture, 0, 0, 0);
1166 while (atomic_read(&smp_capture_registry) != ncpus)
1168 #ifdef CAPTURE_DEBUG
1174 void smp_release(void)
1176 if (atomic_dec_and_test(&smp_capture_depth)) {
1177 #ifdef CAPTURE_DEBUG
1178 printk("CPU[%d]: Giving pardon to "
1179 "imprisoned penguins\n",
1180 smp_processor_id());
1182 penguins_are_doing_time = 0;
1183 membar_safe("#StoreLoad");
1184 atomic_dec(&smp_capture_registry);
1188 /* Imprisoned penguins run with %pil == PIL_NORMAL_MAX, but PSTATE_IE
1189 * set, so they can service tlb flush xcalls...
1191 extern void prom_world(int);
1193 void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs)
1195 clear_softint(1 << irq);
1199 __asm__ __volatile__("flushw");
1201 atomic_inc(&smp_capture_registry);
1202 membar_safe("#StoreLoad");
1203 while (penguins_are_doing_time)
1205 atomic_dec(&smp_capture_registry);
1211 /* /proc/profile writes can call this, don't __init it please. */
1212 int setup_profiling_timer(unsigned int multiplier)
1217 void __init smp_prepare_cpus(unsigned int max_cpus)
1221 void smp_prepare_boot_cpu(void)
1225 void __init smp_setup_processor_id(void)
1227 if (tlb_type == spitfire)
1228 xcall_deliver_impl = spitfire_xcall_deliver;
1229 else if (tlb_type == cheetah || tlb_type == cheetah_plus)
1230 xcall_deliver_impl = cheetah_xcall_deliver;
1232 xcall_deliver_impl = hypervisor_xcall_deliver;
1235 void __init smp_fill_in_cpu_possible_map(void)
1237 int possible_cpus = num_possible_cpus();
1240 if (possible_cpus > nr_cpu_ids)
1241 possible_cpus = nr_cpu_ids;
1243 for (i = 0; i < possible_cpus; i++)
1244 set_cpu_possible(i, true);
1245 for (; i < NR_CPUS; i++)
1246 set_cpu_possible(i, false);
1249 void smp_fill_in_sib_core_maps(void)
1253 for_each_present_cpu(i) {
1256 cpumask_clear(&cpu_core_map[i]);
1257 if (cpu_data(i).core_id == 0) {
1258 cpumask_set_cpu(i, &cpu_core_map[i]);
1262 for_each_present_cpu(j) {
1263 if (cpu_data(i).core_id ==
1264 cpu_data(j).core_id)
1265 cpumask_set_cpu(j, &cpu_core_map[i]);
1269 for_each_present_cpu(i) {
1272 for_each_present_cpu(j) {
1273 if (cpu_data(i).max_cache_id ==
1274 cpu_data(j).max_cache_id)
1275 cpumask_set_cpu(j, &cpu_core_sib_cache_map[i]);
1277 if (cpu_data(i).sock_id == cpu_data(j).sock_id)
1278 cpumask_set_cpu(j, &cpu_core_sib_map[i]);
1282 for_each_present_cpu(i) {
1285 cpumask_clear(&per_cpu(cpu_sibling_map, i));
1286 if (cpu_data(i).proc_id == -1) {
1287 cpumask_set_cpu(i, &per_cpu(cpu_sibling_map, i));
1291 for_each_present_cpu(j) {
1292 if (cpu_data(i).proc_id ==
1293 cpu_data(j).proc_id)
1294 cpumask_set_cpu(j, &per_cpu(cpu_sibling_map, i));
1299 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
1301 int ret = smp_boot_one_cpu(cpu, tidle);
1304 cpumask_set_cpu(cpu, &smp_commenced_mask);
1305 while (!cpu_online(cpu))
1307 if (!cpu_online(cpu)) {
1310 /* On SUN4V, writes to %tick and %stick are
1313 if (tlb_type != hypervisor)
1314 smp_synchronize_one_tick(cpu);
1320 #ifdef CONFIG_HOTPLUG_CPU
1321 void cpu_play_dead(void)
1323 int cpu = smp_processor_id();
1324 unsigned long pstate;
1328 if (tlb_type == hypervisor) {
1329 struct trap_per_cpu *tb = &trap_block[cpu];
1331 sun4v_cpu_qconf(HV_CPU_QUEUE_CPU_MONDO,
1332 tb->cpu_mondo_pa, 0);
1333 sun4v_cpu_qconf(HV_CPU_QUEUE_DEVICE_MONDO,
1334 tb->dev_mondo_pa, 0);
1335 sun4v_cpu_qconf(HV_CPU_QUEUE_RES_ERROR,
1336 tb->resum_mondo_pa, 0);
1337 sun4v_cpu_qconf(HV_CPU_QUEUE_NONRES_ERROR,
1338 tb->nonresum_mondo_pa, 0);
1341 cpumask_clear_cpu(cpu, &smp_commenced_mask);
1342 membar_safe("#Sync");
1344 local_irq_disable();
1346 __asm__ __volatile__(
1347 "rdpr %%pstate, %0\n\t"
1348 "wrpr %0, %1, %%pstate"
1356 int __cpu_disable(void)
1358 int cpu = smp_processor_id();
1362 for_each_cpu(i, &cpu_core_map[cpu])
1363 cpumask_clear_cpu(cpu, &cpu_core_map[i]);
1364 cpumask_clear(&cpu_core_map[cpu]);
1366 for_each_cpu(i, &per_cpu(cpu_sibling_map, cpu))
1367 cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, i));
1368 cpumask_clear(&per_cpu(cpu_sibling_map, cpu));
1377 /* Make sure no interrupts point to this cpu. */
1382 local_irq_disable();
1384 set_cpu_online(cpu, false);
1391 void __cpu_die(unsigned int cpu)
1395 for (i = 0; i < 100; i++) {
1397 if (!cpumask_test_cpu(cpu, &smp_commenced_mask))
1401 if (cpumask_test_cpu(cpu, &smp_commenced_mask)) {
1402 printk(KERN_ERR "CPU %u didn't die...\n", cpu);
1404 #if defined(CONFIG_SUN_LDOMS)
1405 unsigned long hv_err;
1409 hv_err = sun4v_cpu_stop(cpu);
1410 if (hv_err == HV_EOK) {
1411 set_cpu_present(cpu, false);
1414 } while (--limit > 0);
1416 printk(KERN_ERR "sun4v_cpu_stop() fails err=%lu\n",
1424 void __init smp_cpus_done(unsigned int max_cpus)
1428 void smp_send_reschedule(int cpu)
1430 if (cpu == smp_processor_id()) {
1431 WARN_ON_ONCE(preemptible());
1432 set_softint(1 << PIL_SMP_RECEIVE_SIGNAL);
1434 xcall_deliver((u64) &xcall_receive_signal,
1435 0, 0, cpumask_of(cpu));
1439 void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs)
1441 clear_softint(1 << irq);
1445 static void stop_this_cpu(void *dummy)
1447 set_cpu_online(smp_processor_id(), false);
1451 void smp_send_stop(void)
1455 if (tlb_type == hypervisor) {
1456 int this_cpu = smp_processor_id();
1457 #ifdef CONFIG_SERIAL_SUNHV
1458 sunhv_migrate_hvcons_irq(this_cpu);
1460 for_each_online_cpu(cpu) {
1461 if (cpu == this_cpu)
1464 set_cpu_online(cpu, false);
1465 #ifdef CONFIG_SUN_LDOMS
1466 if (ldom_domaining_enabled) {
1467 unsigned long hv_err;
1468 hv_err = sun4v_cpu_stop(cpu);
1470 printk(KERN_ERR "sun4v_cpu_stop() "
1471 "failed err=%lu\n", hv_err);
1474 prom_stopcpu_cpuid(cpu);
1477 smp_call_function(stop_this_cpu, NULL, 0);
1481 * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu
1482 * @cpu: cpu to allocate for
1483 * @size: size allocation in bytes
1486 * Allocate @size bytes aligned at @align for cpu @cpu. This wrapper
1487 * does the right thing for NUMA regardless of the current
1491 * Pointer to the allocated area on success, NULL on failure.
1493 static void * __init pcpu_alloc_bootmem(unsigned int cpu, size_t size,
1496 const unsigned long goal = __pa(MAX_DMA_ADDRESS);
1497 #ifdef CONFIG_NEED_MULTIPLE_NODES
1498 int node = cpu_to_node(cpu);
1501 if (!node_online(node) || !NODE_DATA(node)) {
1502 ptr = __alloc_bootmem(size, align, goal);
1503 pr_info("cpu %d has no node %d or node-local memory\n",
1505 pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
1506 cpu, size, __pa(ptr));
1508 ptr = __alloc_bootmem_node(NODE_DATA(node),
1510 pr_debug("per cpu data for cpu%d %lu bytes on node%d at "
1511 "%016lx\n", cpu, size, node, __pa(ptr));
1515 return __alloc_bootmem(size, align, goal);
1519 static void __init pcpu_free_bootmem(void *ptr, size_t size)
1521 free_bootmem(__pa(ptr), size);
1524 static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
1526 if (cpu_to_node(from) == cpu_to_node(to))
1527 return LOCAL_DISTANCE;
1529 return REMOTE_DISTANCE;
1532 static void __init pcpu_populate_pte(unsigned long addr)
1534 pgd_t *pgd = pgd_offset_k(addr);
1538 if (pgd_none(*pgd)) {
1541 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1542 pgd_populate(&init_mm, pgd, new);
1545 pud = pud_offset(pgd, addr);
1546 if (pud_none(*pud)) {
1549 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1550 pud_populate(&init_mm, pud, new);
1553 pmd = pmd_offset(pud, addr);
1554 if (!pmd_present(*pmd)) {
1557 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1558 pmd_populate_kernel(&init_mm, pmd, new);
1562 void __init setup_per_cpu_areas(void)
1564 unsigned long delta;
1568 if (pcpu_chosen_fc != PCPU_FC_PAGE) {
1569 rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
1570 PERCPU_DYNAMIC_RESERVE, 4 << 20,
1575 pr_warning("PERCPU: %s allocator failed (%d), "
1576 "falling back to page size\n",
1577 pcpu_fc_names[pcpu_chosen_fc], rc);
1580 rc = pcpu_page_first_chunk(PERCPU_MODULE_RESERVE,
1585 panic("cannot initialize percpu area (err=%d)", rc);
1587 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
1588 for_each_possible_cpu(cpu)
1589 __per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
1591 /* Setup %g5 for the boot cpu. */
1592 __local_per_cpu_offset = __per_cpu_offset(smp_processor_id());
1594 of_fill_in_cpu_data();
1595 if (tlb_type == hypervisor)
1596 mdesc_fill_in_cpu_data(cpu_all_mask);