2 * x86 SMP booting functions
4 * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
5 * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
6 * Copyright 2001 Andi Kleen, SuSE Labs.
8 * Much of the core SMP work is based on previous work by Thomas Radke, to
9 * whom a great many thanks are extended.
11 * Thanks to Intel for making available several different Pentium,
12 * Pentium Pro and Pentium-II/Xeon MP machines.
13 * Original development of Linux SMP code supported by Caldera.
15 * This code is released under the GNU General Public License version 2
18 * Felix Koop : NR_CPUS used properly
19 * Jose Renau : Handle single CPU case.
20 * Alan Cox : By repeated request 8) - Total BogoMIP report.
21 * Greg Wright : Fix for kernel stacks panic.
22 * Erich Boleyn : MP v1.4 and additional changes.
23 * Matthias Sattler : Changes for 2.1 kernel map.
24 * Michel Lespinasse : Changes for 2.1 kernel map.
25 * Michael Chastain : Change trampoline.S to gnu as.
26 * Alan Cox : Dumb bug: 'B' step PPro's are fine
27 * Ingo Molnar : Added APIC timers, based on code
29 * Ingo Molnar : various cleanups and rewrites
30 * Tigran Aivazian : fixed "0.00 in /proc/uptime on SMP" bug.
31 * Maciej W. Rozycki : Bits for genuine 82489DX APICs
32 * Andi Kleen : Changed for SMP boot into long mode.
33 * Rusty Russell : Hacked into shape for new "hotplug" boot process.
34 * Andi Kleen : Converted to new state machine.
36 * Probably mostly hotplug CPU ready now.
37 * Ashok Raj : CPU hotplug support
41 #include <linux/config.h>
42 #include <linux/init.h>
45 #include <linux/kernel_stat.h>
46 #include <linux/smp_lock.h>
47 #include <linux/bootmem.h>
48 #include <linux/thread_info.h>
49 #include <linux/module.h>
51 #include <linux/delay.h>
52 #include <linux/mc146818rtc.h>
54 #include <asm/pgalloc.h>
56 #include <asm/kdebug.h>
57 #include <asm/tlbflush.h>
58 #include <asm/proto.h>
61 #include <asm/hw_irq.h>
64 /* Number of siblings per CPU package */
65 int smp_num_siblings = 1;
67 /* Last level cache ID of each logical CPU */
68 u8 cpu_llc_id[NR_CPUS] __cpuinitdata = {[0 ... NR_CPUS-1] = BAD_APICID};
70 /* Bitmask of currently online CPUs */
71 cpumask_t cpu_online_map __read_mostly;
73 EXPORT_SYMBOL(cpu_online_map);
76 * Private maps to synchronize booting between AP and BP.
77 * Probably not needed anymore, but it makes for easier debugging. -AK
79 cpumask_t cpu_callin_map;
80 cpumask_t cpu_callout_map;
82 cpumask_t cpu_possible_map;
83 EXPORT_SYMBOL(cpu_possible_map);
85 /* Per CPU bogomips and other parameters */
86 struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
88 /* Set when the idlers are all forked */
89 int smp_threads_ready;
91 /* representing HT siblings of each logical CPU */
92 cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
94 /* representing HT and core siblings of each logical CPU */
95 cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
96 EXPORT_SYMBOL(cpu_core_map);
99 * Trampoline 80x86 program as an array.
102 extern unsigned char trampoline_data[];
103 extern unsigned char trampoline_end[];
105 /* State of each CPU */
106 DEFINE_PER_CPU(int, cpu_state) = { 0 };
109 * Store all idle threads, this can be reused instead of creating
110 * a new thread. Also avoids complicated thread destroy functionality
113 struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ;
115 #define get_idle_for_cpu(x) (idle_thread_array[(x)])
116 #define set_idle_for_cpu(x,p) (idle_thread_array[(x)] = (p))
119 * Currently trivial. Write the real->protected mode
120 * bootstrap into the page concerned. The caller
121 * has made sure it's suitably aligned.
124 static unsigned long __cpuinit setup_trampoline(void)
126 void *tramp = __va(SMP_TRAMPOLINE_BASE);
127 memcpy(tramp, trampoline_data, trampoline_end - trampoline_data);
128 return virt_to_phys(tramp);
132 * The bootstrap kernel entry code has set these up. Save them for
136 static void __cpuinit smp_store_cpu_info(int id)
138 struct cpuinfo_x86 *c = cpu_data + id;
146 * New Funky TSC sync algorithm borrowed from IA64.
147 * Main advantage is that it doesn't reset the TSCs fully and
148 * in general looks more robust and it works better than my earlier
149 * attempts. I believe it was written by David Mosberger. Some minor
150 * adjustments for x86-64 by me -AK
152 * Original comment reproduced below.
154 * Synchronize TSC of the current (slave) CPU with the TSC of the
155 * MASTER CPU (normally the time-keeper CPU). We use a closed loop to
156 * eliminate the possibility of unaccounted-for errors (such as
157 * getting a machine check in the middle of a calibration step). The
158 * basic idea is for the slave to ask the master what itc value it has
159 * and to read its own itc before and after the master responds. Each
160 * iteration gives us three timestamps:
173 * The goal is to adjust the slave's TSC such that tm falls exactly
174 * half-way between t0 and t1. If we achieve this, the clocks are
175 * synchronized provided the interconnect between the slave and the
176 * master is symmetric. Even if the interconnect were asymmetric, we
177 * would still know that the synchronization error is smaller than the
178 * roundtrip latency (t0 - t1).
180 * When the interconnect is quiet and symmetric, this lets us
181 * synchronize the TSC to within one or two cycles. However, we can
182 * only *guarantee* that the synchronization is accurate to within a
183 * round-trip time, which is typically in the range of several hundred
184 * cycles (e.g., ~500 cycles). In practice, this means that the TSCs
185 * are usually almost perfectly synchronized, but we shouldn't assume
186 * that the accuracy is much better than half a micro second or so.
188 * [there are other errors like the latency of RDTSC and of the
189 * WRMSR. These can also account to hundreds of cycles. So it's
190 * probably worse. It claims 153 cycles error on a dual Opteron,
191 * but I suspect the numbers are actually somewhat worse -AK]
195 #define SLAVE (SMP_CACHE_BYTES/8)
197 /* Intentionally don't use cpu_relax() while TSC synchronization
198 because we don't want to go into funky power save modi or cause
199 hypervisors to schedule us away. Going to sleep would likely affect
200 latency and low latency is the primary objective here. -AK */
201 #define no_cpu_relax() barrier()
203 static __cpuinitdata DEFINE_SPINLOCK(tsc_sync_lock);
204 static volatile __cpuinitdata unsigned long go[SLAVE + 1];
205 static int notscsync __cpuinitdata;
207 #undef DEBUG_TSC_SYNC
209 #define NUM_ROUNDS 64 /* magic value */
210 #define NUM_ITERS 5 /* likewise */
212 /* Callback on boot CPU */
213 static __cpuinit void sync_master(void *arg)
215 unsigned long flags, i;
219 local_irq_save(flags);
221 for (i = 0; i < NUM_ROUNDS*NUM_ITERS; ++i) {
228 local_irq_restore(flags);
232 * Return the number of cycles by which our tsc differs from the tsc
233 * on the master (time-keeper) CPU. A positive number indicates our
234 * tsc is ahead of the master, negative that it is behind.
237 get_delta(long *rt, long *master)
239 unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0;
240 unsigned long tcenter, t0, t1, tm;
243 for (i = 0; i < NUM_ITERS; ++i) {
246 while (!(tm = go[SLAVE]))
251 if (t1 - t0 < best_t1 - best_t0)
252 best_t0 = t0, best_t1 = t1, best_tm = tm;
255 *rt = best_t1 - best_t0;
256 *master = best_tm - best_t0;
258 /* average best_t0 and best_t1 without overflow: */
259 tcenter = (best_t0/2 + best_t1/2);
260 if (best_t0 % 2 + best_t1 % 2 == 2)
262 return tcenter - best_tm;
265 static __cpuinit void sync_tsc(unsigned int master)
268 long delta, adj, adjust_latency = 0;
269 unsigned long flags, rt, master_time_stamp, bound;
270 #ifdef DEBUG_TSC_SYNC
271 static struct syncdebug {
272 long rt; /* roundtrip time */
273 long master; /* master's timestamp */
274 long diff; /* difference between midpoint and master's timestamp */
275 long lat; /* estimate of tsc adjustment latency */
276 } t[NUM_ROUNDS] __cpuinitdata;
279 printk(KERN_INFO "CPU %d: Syncing TSC to CPU %u.\n",
280 smp_processor_id(), master);
284 /* It is dangerous to broadcast IPI as cpus are coming up,
285 * as they may not be ready to accept them. So since
286 * we only need to send the ipi to the boot cpu direct
287 * the message, and avoid the race.
289 smp_call_function_single(master, sync_master, NULL, 1, 0);
291 while (go[MASTER]) /* wait for master to be ready */
294 spin_lock_irqsave(&tsc_sync_lock, flags);
296 for (i = 0; i < NUM_ROUNDS; ++i) {
297 delta = get_delta(&rt, &master_time_stamp);
299 done = 1; /* let's lock on to this... */
306 adjust_latency += -delta;
307 adj = -delta + adjust_latency/4;
312 wrmsrl(MSR_IA32_TSC, t + adj);
314 #ifdef DEBUG_TSC_SYNC
316 t[i].master = master_time_stamp;
318 t[i].lat = adjust_latency/4;
322 spin_unlock_irqrestore(&tsc_sync_lock, flags);
324 #ifdef DEBUG_TSC_SYNC
325 for (i = 0; i < NUM_ROUNDS; ++i)
326 printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
327 t[i].rt, t[i].master, t[i].diff, t[i].lat);
331 "CPU %d: synchronized TSC with CPU %u (last diff %ld cycles, "
332 "maxerr %lu cycles)\n",
333 smp_processor_id(), master, delta, rt);
336 static void __cpuinit tsc_sync_wait(void)
339 * When the CPU has synchronized TSCs assume the BIOS
340 * or the hardware already synced. Otherwise we could
341 * mess up a possible perfect synchronization with a
342 * not-quite-perfect algorithm.
344 if (notscsync || !cpu_has_tsc || !unsynchronized_tsc())
349 static __init int notscsync_setup(char *s)
354 __setup("notscsync", notscsync_setup);
356 static atomic_t init_deasserted __cpuinitdata;
359 * Report back to the Boot Processor.
362 void __cpuinit smp_callin(void)
365 unsigned long timeout;
368 * If waken up by an INIT in an 82489DX configuration
369 * we may get here before an INIT-deassert IPI reaches
370 * our local APIC. We have to wait for the IPI or we'll
371 * lock up on an APIC access.
373 while (!atomic_read(&init_deasserted))
377 * (This works even if the APIC is not enabled.)
379 phys_id = GET_APIC_ID(apic_read(APIC_ID));
380 cpuid = smp_processor_id();
381 if (cpu_isset(cpuid, cpu_callin_map)) {
382 panic("smp_callin: phys CPU#%d, CPU#%d already present??\n",
385 Dprintk("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid, phys_id);
388 * STARTUP IPIs are fragile beasts as they might sometimes
389 * trigger some glue motherboard logic. Complete APIC bus
390 * silence for 1 second, this overestimates the time the
391 * boot CPU is spending to send the up to 2 STARTUP IPIs
392 * by a factor of two. This should be enough.
396 * Waiting 2s total for startup (udelay is not yet working)
398 timeout = jiffies + 2*HZ;
399 while (time_before(jiffies, timeout)) {
401 * Has the boot CPU finished it's STARTUP sequence?
403 if (cpu_isset(cpuid, cpu_callout_map))
408 if (!time_before(jiffies, timeout)) {
409 panic("smp_callin: CPU%d started up but did not get a callout!\n",
414 * the boot CPU has finished the init stage and is spinning
415 * on callin_map until we finish. We are free to set up this
416 * CPU, first the APIC. (this is probably redundant on most
420 Dprintk("CALLIN, before setup_local_APIC().\n");
426 * Need to enable IRQs because it can take longer and then
427 * the NMI watchdog might kill us.
432 Dprintk("Stack at about %p\n",&cpuid);
434 disable_APIC_timer();
437 * Save our processor parameters
439 smp_store_cpu_info(cpuid);
442 * Allow the master to continue.
444 cpu_set(cpuid, cpu_callin_map);
447 /* maps the cpu to the sched domain representing multi-core */
448 cpumask_t cpu_coregroup_map(int cpu)
450 struct cpuinfo_x86 *c = cpu_data + cpu;
452 * For perf, we return last level cache shared map.
453 * TBD: when power saving sched policy is added, we will return
454 * cpu_core_map when power saving policy is enabled
456 return c->llc_shared_map;
459 /* representing cpus for which sibling maps can be computed */
460 static cpumask_t cpu_sibling_setup_map;
462 static inline void set_cpu_sibling_map(int cpu)
465 struct cpuinfo_x86 *c = cpu_data;
467 cpu_set(cpu, cpu_sibling_setup_map);
469 if (smp_num_siblings > 1) {
470 for_each_cpu_mask(i, cpu_sibling_setup_map) {
471 if (c[cpu].phys_proc_id == c[i].phys_proc_id &&
472 c[cpu].cpu_core_id == c[i].cpu_core_id) {
473 cpu_set(i, cpu_sibling_map[cpu]);
474 cpu_set(cpu, cpu_sibling_map[i]);
475 cpu_set(i, cpu_core_map[cpu]);
476 cpu_set(cpu, cpu_core_map[i]);
477 cpu_set(i, c[cpu].llc_shared_map);
478 cpu_set(cpu, c[i].llc_shared_map);
482 cpu_set(cpu, cpu_sibling_map[cpu]);
485 cpu_set(cpu, c[cpu].llc_shared_map);
487 if (current_cpu_data.x86_max_cores == 1) {
488 cpu_core_map[cpu] = cpu_sibling_map[cpu];
489 c[cpu].booted_cores = 1;
493 for_each_cpu_mask(i, cpu_sibling_setup_map) {
494 if (cpu_llc_id[cpu] != BAD_APICID &&
495 cpu_llc_id[cpu] == cpu_llc_id[i]) {
496 cpu_set(i, c[cpu].llc_shared_map);
497 cpu_set(cpu, c[i].llc_shared_map);
499 if (c[cpu].phys_proc_id == c[i].phys_proc_id) {
500 cpu_set(i, cpu_core_map[cpu]);
501 cpu_set(cpu, cpu_core_map[i]);
503 * Does this new cpu bringup a new core?
505 if (cpus_weight(cpu_sibling_map[cpu]) == 1) {
507 * for each core in package, increment
508 * the booted_cores for this new cpu
510 if (first_cpu(cpu_sibling_map[i]) == i)
511 c[cpu].booted_cores++;
513 * increment the core count for all
514 * the other cpus in this package
518 } else if (i != cpu && !c[cpu].booted_cores)
519 c[cpu].booted_cores = c[i].booted_cores;
525 * Setup code on secondary processor (after comming out of the trampoline)
527 void __cpuinit start_secondary(void)
530 * Dont put anything before smp_callin(), SMP
531 * booting is too fragile that we want to limit the
532 * things done here to the most necessary things.
538 /* otherwise gcc will move up the smp_processor_id before the cpu_init */
541 Dprintk("cpu %d: setting up apic clock\n", smp_processor_id());
542 setup_secondary_APIC_clock();
544 Dprintk("cpu %d: enabling apic timer\n", smp_processor_id());
546 if (nmi_watchdog == NMI_IO_APIC) {
547 disable_8259A_irq(0);
548 enable_NMI_through_LVT0(NULL);
555 * The sibling maps must be set before turing the online map on for
558 set_cpu_sibling_map(smp_processor_id());
561 * Wait for TSC sync to not schedule things before.
562 * We still process interrupts, which could see an inconsistent
563 * time in that window unfortunately.
564 * Do this here because TSC sync has global unprotected state.
569 * We need to hold call_lock, so there is no inconsistency
570 * between the time smp_call_function() determines number of
571 * IPI receipients, and the time when the determination is made
572 * for which cpus receive the IPI in genapic_flat.c. Holding this
573 * lock helps us to not include this cpu in a currently in progress
574 * smp_call_function().
576 lock_ipi_call_lock();
579 * Allow the master to continue.
581 cpu_set(smp_processor_id(), cpu_online_map);
582 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
583 unlock_ipi_call_lock();
588 extern volatile unsigned long init_rsp;
589 extern void (*initial_code)(void);
592 static void inquire_remote_apic(int apicid)
594 unsigned i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 };
595 char *names[] = { "ID", "VERSION", "SPIV" };
598 printk(KERN_INFO "Inquiring remote APIC #%d...\n", apicid);
600 for (i = 0; i < sizeof(regs) / sizeof(*regs); i++) {
601 printk("... APIC #%d %s: ", apicid, names[i]);
606 apic_wait_icr_idle();
608 apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(apicid));
609 apic_write(APIC_ICR, APIC_DM_REMRD | regs[i]);
614 status = apic_read(APIC_ICR) & APIC_ICR_RR_MASK;
615 } while (status == APIC_ICR_RR_INPROG && timeout++ < 1000);
618 case APIC_ICR_RR_VALID:
619 status = apic_read(APIC_RRR);
620 printk("%08x\n", status);
630 * Kick the secondary to wake up.
632 static int __cpuinit wakeup_secondary_via_INIT(int phys_apicid, unsigned int start_rip)
634 unsigned long send_status = 0, accept_status = 0;
635 int maxlvt, timeout, num_starts, j;
637 Dprintk("Asserting INIT.\n");
640 * Turn INIT on target chip
642 apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
647 apic_write(APIC_ICR, APIC_INT_LEVELTRIG | APIC_INT_ASSERT
650 Dprintk("Waiting for send to finish...\n");
655 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
656 } while (send_status && (timeout++ < 1000));
660 Dprintk("Deasserting INIT.\n");
663 apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
666 apic_write(APIC_ICR, APIC_INT_LEVELTRIG | APIC_DM_INIT);
668 Dprintk("Waiting for send to finish...\n");
673 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
674 } while (send_status && (timeout++ < 1000));
677 atomic_set(&init_deasserted, 1);
682 * Run STARTUP IPI loop.
684 Dprintk("#startup loops: %d.\n", num_starts);
686 maxlvt = get_maxlvt();
688 for (j = 1; j <= num_starts; j++) {
689 Dprintk("Sending STARTUP #%d.\n",j);
690 apic_write(APIC_ESR, 0);
692 Dprintk("After apic_write.\n");
699 apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
701 /* Boot on the stack */
702 /* Kick the second */
703 apic_write(APIC_ICR, APIC_DM_STARTUP | (start_rip >> 12));
706 * Give the other CPU some time to accept the IPI.
710 Dprintk("Startup point 1.\n");
712 Dprintk("Waiting for send to finish...\n");
717 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
718 } while (send_status && (timeout++ < 1000));
721 * Give the other CPU some time to accept the IPI.
725 * Due to the Pentium erratum 3AP.
728 apic_write(APIC_ESR, 0);
730 accept_status = (apic_read(APIC_ESR) & 0xEF);
731 if (send_status || accept_status)
734 Dprintk("After Startup.\n");
737 printk(KERN_ERR "APIC never delivered???\n");
739 printk(KERN_ERR "APIC delivery error (%lx).\n", accept_status);
741 return (send_status | accept_status);
745 struct task_struct *idle;
746 struct completion done;
750 void do_fork_idle(void *_c_idle)
752 struct create_idle *c_idle = _c_idle;
754 c_idle->idle = fork_idle(c_idle->cpu);
755 complete(&c_idle->done);
761 static int __cpuinit do_boot_cpu(int cpu, int apicid)
763 unsigned long boot_error;
765 unsigned long start_rip;
766 struct create_idle c_idle = {
768 .done = COMPLETION_INITIALIZER(c_idle.done),
770 DECLARE_WORK(work, do_fork_idle, &c_idle);
772 /* allocate memory for gdts of secondary cpus. Hotplug is considered */
773 if (!cpu_gdt_descr[cpu].address &&
774 !(cpu_gdt_descr[cpu].address = get_zeroed_page(GFP_KERNEL))) {
775 printk(KERN_ERR "Failed to allocate GDT for CPU %d\n", cpu);
779 /* Allocate node local memory for AP pdas */
780 if (cpu_pda(cpu) == &boot_cpu_pda[cpu]) {
781 struct x8664_pda *newpda, *pda;
782 int node = cpu_to_node(cpu);
784 newpda = kmalloc_node(sizeof (struct x8664_pda), GFP_ATOMIC,
787 memcpy(newpda, pda, sizeof (struct x8664_pda));
788 cpu_pda(cpu) = newpda;
791 "Could not allocate node local PDA for CPU %d on node %d\n",
796 alternatives_smp_switch(1);
798 c_idle.idle = get_idle_for_cpu(cpu);
801 c_idle.idle->thread.rsp = (unsigned long) (((struct pt_regs *)
802 (THREAD_SIZE + task_stack_page(c_idle.idle))) - 1);
803 init_idle(c_idle.idle, cpu);
808 * During cold boot process, keventd thread is not spun up yet.
809 * When we do cpu hot-add, we create idle threads on the fly, we should
810 * not acquire any attributes from the calling context. Hence the clean
811 * way to create kernel_threads() is to do that from keventd().
812 * We do the current_is_keventd() due to the fact that ACPI notifier
813 * was also queuing to keventd() and when the caller is already running
814 * in context of keventd(), we would end up with locking up the keventd
817 if (!keventd_up() || current_is_keventd())
818 work.func(work.data);
820 schedule_work(&work);
821 wait_for_completion(&c_idle.done);
824 if (IS_ERR(c_idle.idle)) {
825 printk("failed fork for CPU %d\n", cpu);
826 return PTR_ERR(c_idle.idle);
829 set_idle_for_cpu(cpu, c_idle.idle);
833 cpu_pda(cpu)->pcurrent = c_idle.idle;
835 start_rip = setup_trampoline();
837 init_rsp = c_idle.idle->thread.rsp;
838 per_cpu(init_tss,cpu).rsp0 = init_rsp;
839 initial_code = start_secondary;
840 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
842 printk(KERN_INFO "Booting processor %d/%d APIC 0x%x\n", cpu,
843 cpus_weight(cpu_present_map),
847 * This grunge runs the startup process for
848 * the targeted processor.
851 atomic_set(&init_deasserted, 0);
853 Dprintk("Setting warm reset code and vector.\n");
855 CMOS_WRITE(0xa, 0xf);
858 *((volatile unsigned short *) phys_to_virt(0x469)) = start_rip >> 4;
860 *((volatile unsigned short *) phys_to_virt(0x467)) = start_rip & 0xf;
864 * Be paranoid about clearing APIC errors.
866 apic_write(APIC_ESR, 0);
870 * Status is now clean
875 * Starting actual IPI sequence...
877 boot_error = wakeup_secondary_via_INIT(apicid, start_rip);
881 * allow APs to start initializing.
883 Dprintk("Before Callout %d.\n", cpu);
884 cpu_set(cpu, cpu_callout_map);
885 Dprintk("After Callout %d.\n", cpu);
888 * Wait 5s total for a response
890 for (timeout = 0; timeout < 50000; timeout++) {
891 if (cpu_isset(cpu, cpu_callin_map))
892 break; /* It has booted */
896 if (cpu_isset(cpu, cpu_callin_map)) {
897 /* number CPUs logically, starting from 1 (BSP is 0) */
898 Dprintk("CPU has booted.\n");
901 if (*((volatile unsigned char *)phys_to_virt(SMP_TRAMPOLINE_BASE))
903 /* trampoline started but...? */
904 printk("Stuck ??\n");
906 /* trampoline code not run */
907 printk("Not responding.\n");
909 inquire_remote_apic(apicid);
914 cpu_clear(cpu, cpu_callout_map); /* was set here (do_boot_cpu()) */
915 clear_bit(cpu, &cpu_initialized); /* was set by cpu_init() */
916 clear_node_cpumask(cpu); /* was set by numa_add_cpu */
917 cpu_clear(cpu, cpu_present_map);
918 cpu_clear(cpu, cpu_possible_map);
919 x86_cpu_to_apicid[cpu] = BAD_APICID;
920 x86_cpu_to_log_apicid[cpu] = BAD_APICID;
927 cycles_t cacheflush_time;
928 unsigned long cache_decay_ticks;
931 * Cleanup possible dangling ends...
933 static __cpuinit void smp_cleanup_boot(void)
936 * Paranoid: Set warm reset code and vector here back
942 * Reset trampoline flag
944 *((volatile int *) phys_to_virt(0x467)) = 0;
948 * Fall back to non SMP mode after errors.
950 * RED-PEN audit/test this more. I bet there is more state messed up here.
952 static __init void disable_smp(void)
954 cpu_present_map = cpumask_of_cpu(0);
955 cpu_possible_map = cpumask_of_cpu(0);
956 if (smp_found_config)
957 phys_cpu_present_map = physid_mask_of_physid(boot_cpu_id);
959 phys_cpu_present_map = physid_mask_of_physid(0);
960 cpu_set(0, cpu_sibling_map[0]);
961 cpu_set(0, cpu_core_map[0]);
964 #ifdef CONFIG_HOTPLUG_CPU
966 int additional_cpus __initdata = -1;
969 * cpu_possible_map should be static, it cannot change as cpu's
970 * are onlined, or offlined. The reason is per-cpu data-structures
971 * are allocated by some modules at init time, and dont expect to
972 * do this dynamically on cpu arrival/departure.
973 * cpu_present_map on the other hand can change dynamically.
974 * In case when cpu_hotplug is not compiled, then we resort to current
975 * behaviour, which is cpu_possible == cpu_present.
978 * Three ways to find out the number of additional hotplug CPUs:
979 * - If the BIOS specified disabled CPUs in ACPI/mptables use that.
980 * - The user can overwrite it with additional_cpus=NUM
981 * - Otherwise don't reserve additional CPUs.
982 * We do this because additional CPUs waste a lot of memory.
985 __init void prefill_possible_map(void)
990 if (additional_cpus == -1) {
991 if (disabled_cpus > 0)
992 additional_cpus = disabled_cpus;
996 possible = num_processors + additional_cpus;
997 if (possible > NR_CPUS)
1000 printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n",
1002 max_t(int, possible - num_processors, 0));
1004 for (i = 0; i < possible; i++)
1005 cpu_set(i, cpu_possible_map);
1010 * Various sanity checks.
1012 static int __init smp_sanity_check(unsigned max_cpus)
1014 if (!physid_isset(hard_smp_processor_id(), phys_cpu_present_map)) {
1015 printk("weird, boot CPU (#%d) not listed by the BIOS.\n",
1016 hard_smp_processor_id());
1017 physid_set(hard_smp_processor_id(), phys_cpu_present_map);
1021 * If we couldn't find an SMP configuration at boot time,
1022 * get out of here now!
1024 if (!smp_found_config) {
1025 printk(KERN_NOTICE "SMP motherboard not detected.\n");
1027 if (APIC_init_uniprocessor())
1028 printk(KERN_NOTICE "Local APIC not detected."
1029 " Using dummy APIC emulation.\n");
1034 * Should not be necessary because the MP table should list the boot
1035 * CPU too, but we do it for the sake of robustness anyway.
1037 if (!physid_isset(boot_cpu_id, phys_cpu_present_map)) {
1038 printk(KERN_NOTICE "weird, boot CPU (#%d) not listed by the BIOS.\n",
1040 physid_set(hard_smp_processor_id(), phys_cpu_present_map);
1044 * If we couldn't find a local APIC, then get out of here now!
1046 if (!cpu_has_apic) {
1047 printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n",
1049 printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n");
1055 * If SMP should be disabled, then really disable it!
1058 printk(KERN_INFO "SMP mode deactivated, forcing use of dummy APIC emulation.\n");
1067 * Prepare for SMP bootup. The MP table or ACPI has been read
1068 * earlier. Just do some sanity checking here and enable APIC mode.
1070 void __init smp_prepare_cpus(unsigned int max_cpus)
1072 nmi_watchdog_default();
1073 current_cpu_data = boot_cpu_data;
1074 current_thread_info()->cpu = 0; /* needed? */
1075 set_cpu_sibling_map(0);
1077 if (smp_sanity_check(max_cpus) < 0) {
1078 printk(KERN_INFO "SMP disabled\n");
1085 * Switch from PIC to APIC mode.
1090 if (GET_APIC_ID(apic_read(APIC_ID)) != boot_cpu_id) {
1091 panic("Boot APIC ID in local APIC unexpected (%d vs %d)",
1092 GET_APIC_ID(apic_read(APIC_ID)), boot_cpu_id);
1093 /* Or can we switch back to PIC here? */
1097 * Now start the IO-APICs
1099 if (!skip_ioapic_setup && nr_ioapics)
1105 * Set up local APIC timer on boot CPU.
1108 setup_boot_APIC_clock();
1112 * Early setup to make printk work.
1114 void __init smp_prepare_boot_cpu(void)
1116 int me = smp_processor_id();
1117 cpu_set(me, cpu_online_map);
1118 cpu_set(me, cpu_callout_map);
1119 per_cpu(cpu_state, me) = CPU_ONLINE;
1123 * Entry point to boot a CPU.
1125 int __cpuinit __cpu_up(unsigned int cpu)
1128 int apicid = cpu_present_to_apicid(cpu);
1130 WARN_ON(irqs_disabled());
1132 Dprintk("++++++++++++++++++++=_---CPU UP %u\n", cpu);
1134 if (apicid == BAD_APICID || apicid == boot_cpu_id ||
1135 !physid_isset(apicid, phys_cpu_present_map)) {
1136 printk("__cpu_up: bad cpu %d\n", cpu);
1141 * Already booted CPU?
1143 if (cpu_isset(cpu, cpu_callin_map)) {
1144 Dprintk("do_boot_cpu %d Already started\n", cpu);
1148 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
1150 err = do_boot_cpu(cpu, apicid);
1152 Dprintk("do_boot_cpu failed %d\n", err);
1156 /* Unleash the CPU! */
1157 Dprintk("waiting for cpu %d\n", cpu);
1159 while (!cpu_isset(cpu, cpu_online_map))
1167 * Finish the SMP boot.
1169 void __init smp_cpus_done(unsigned int max_cpus)
1173 #ifdef CONFIG_X86_IO_APIC
1174 setup_ioapic_dest();
1177 check_nmi_watchdog();
1180 #ifdef CONFIG_HOTPLUG_CPU
1182 static void remove_siblinginfo(int cpu)
1185 struct cpuinfo_x86 *c = cpu_data;
1187 for_each_cpu_mask(sibling, cpu_core_map[cpu]) {
1188 cpu_clear(cpu, cpu_core_map[sibling]);
1190 * last thread sibling in this cpu core going down
1192 if (cpus_weight(cpu_sibling_map[cpu]) == 1)
1193 c[sibling].booted_cores--;
1196 for_each_cpu_mask(sibling, cpu_sibling_map[cpu])
1197 cpu_clear(cpu, cpu_sibling_map[sibling]);
1198 cpus_clear(cpu_sibling_map[cpu]);
1199 cpus_clear(cpu_core_map[cpu]);
1200 c[cpu].phys_proc_id = 0;
1201 c[cpu].cpu_core_id = 0;
1202 cpu_clear(cpu, cpu_sibling_setup_map);
1205 void remove_cpu_from_maps(void)
1207 int cpu = smp_processor_id();
1209 cpu_clear(cpu, cpu_callout_map);
1210 cpu_clear(cpu, cpu_callin_map);
1211 clear_bit(cpu, &cpu_initialized); /* was set by cpu_init() */
1212 clear_node_cpumask(cpu);
1215 int __cpu_disable(void)
1217 int cpu = smp_processor_id();
1220 * Perhaps use cpufreq to drop frequency, but that could go
1221 * into generic code.
1223 * We won't take down the boot processor on i386 due to some
1224 * interrupts only being able to be serviced by the BSP.
1225 * Especially so if we're not using an IOAPIC -zwane
1234 * Allow any queued timer interrupts to get serviced
1235 * This is only a temporary solution until we cleanup
1236 * fixup_irqs as we do for IA64.
1241 local_irq_disable();
1242 remove_siblinginfo(cpu);
1244 /* It's now safe to remove this processor from the online map */
1245 cpu_clear(cpu, cpu_online_map);
1246 remove_cpu_from_maps();
1247 fixup_irqs(cpu_online_map);
1251 void __cpu_die(unsigned int cpu)
1253 /* We don't do anything here: idle task is faking death itself. */
1256 for (i = 0; i < 10; i++) {
1257 /* They ack this in play_dead by setting CPU_DEAD */
1258 if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
1259 printk ("CPU %d is now offline\n", cpu);
1260 if (1 == num_online_cpus())
1261 alternatives_smp_switch(0);
1266 printk(KERN_ERR "CPU %u didn't die...\n", cpu);
1269 __init int setup_additional_cpus(char *s)
1271 return get_option(&s, &additional_cpus);
1273 __setup("additional_cpus=", setup_additional_cpus);
1275 #else /* ... !CONFIG_HOTPLUG_CPU */
1277 int __cpu_disable(void)
1282 void __cpu_die(unsigned int cpu)
1284 /* We said "no" in __cpu_disable */
1287 #endif /* CONFIG_HOTPLUG_CPU */