2 * Intel SMP support routines.
4 * (c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk>
5 * (c) 1998-99, 2000, 2009 Ingo Molnar <mingo@redhat.com>
6 * (c) 2002,2003 Andi Kleen, SuSE Labs.
8 * i386 and x86_64 integration by Glauber Costa <gcosta@redhat.com>
10 * This code is released under the GNU General Public License version 2 or
14 #include <linux/init.h>
17 #include <linux/delay.h>
18 #include <linux/spinlock.h>
19 #include <linux/export.h>
20 #include <linux/kernel_stat.h>
21 #include <linux/mc146818rtc.h>
22 #include <linux/cache.h>
23 #include <linux/interrupt.h>
24 #include <linux/cpu.h>
25 #include <linux/gfp.h>
28 #include <asm/tlbflush.h>
29 #include <asm/mmu_context.h>
30 #include <asm/proto.h>
34 #include <asm/trace/irq_vectors.h>
35 #include <asm/kexec.h>
38 * Some notes on x86 processor bugs affecting SMP operation:
40 * Pentium, Pentium Pro, II, III (and all CPUs) have bugs.
41 * The Linux implications for SMP are handled as follows:
43 * Pentium III / [Xeon]
44 * None of the E1AP-E3AP errata are visible to the user.
51 * None of the A1AP-A3AP errata are visible to the user.
58 * None of 1AP-9AP errata are visible to the normal user,
59 * except occasional delivery of 'spurious interrupt' as trap #15.
60 * This is very rare and a non-problem.
62 * 1AP. Linux maps APIC as non-cacheable
63 * 2AP. worked around in hardware
64 * 3AP. fixed in C0 and above steppings microcode update.
65 * Linux does not use excessive STARTUP_IPIs.
66 * 4AP. worked around in hardware
67 * 5AP. symmetric IO mode (normal Linux operation) not affected.
68 * 'noapic' mode has vector 0xf filled out properly.
69 * 6AP. 'noapic' mode might be affected - fixed in later steppings
70 * 7AP. We do not assume writes to the LVT deassering IRQs
71 * 8AP. We do not enable low power mode (deep sleep) during MP bootup
72 * 9AP. We do not use mixed mode
75 * There is a marginal case where REP MOVS on 100MHz SMP
76 * machines with B stepping processors can fail. XXX should provide
77 * an L1cache=Writethrough or L1cache=off option.
79 * B stepping CPUs may hang. There are hardware work arounds
80 * for this. We warn about it in case your board doesn't have the work
81 * arounds. Basically that's so I can tell anyone with a B stepping
82 * CPU and SMP problems "tough".
84 * Specific items [From Pentium Processor Specification Update]
86 * 1AP. Linux doesn't use remote read
87 * 2AP. Linux doesn't trust APIC errors
88 * 3AP. We work around this
89 * 4AP. Linux never generated 3 interrupts of the same priority
90 * to cause a lost local interrupt.
91 * 5AP. Remote read is never used
92 * 6AP. not affected - worked around in hardware
93 * 7AP. not affected - worked around in hardware
94 * 8AP. worked around in hardware - we get explicit CS errors if not
95 * 9AP. only 'noapic' mode affected. Might generate spurious
96 * interrupts, we log only the first one and count the
98 * 10AP. not affected - worked around in hardware
99 * 11AP. Linux reads the APIC between writes to avoid this, as per
100 * the documentation. Make sure you preserve this as it affects
101 * the C stepping chips too.
102 * 12AP. not affected - worked around in hardware
103 * 13AP. not affected - worked around in hardware
104 * 14AP. we always deassert INIT during bootup
105 * 15AP. not affected - worked around in hardware
106 * 16AP. not affected - worked around in hardware
107 * 17AP. not affected - worked around in hardware
108 * 18AP. not affected - worked around in hardware
109 * 19AP. not affected - worked around in BIOS
111 * If this sounds worrying believe me these bugs are either ___RARE___,
112 * or are signal timing bugs worked around in hardware and there's
113 * about nothing of note with C stepping upwards.
116 static atomic_t stopping_cpu = ATOMIC_INIT(-1);
117 static bool smp_no_nmi_ipi = false;
120 * this function sends a 'reschedule' IPI to another CPU.
121 * it goes straight through and wastes no time serializing
122 * anything. Worst case is that we lose a reschedule ...
124 static void native_smp_send_reschedule(int cpu)
126 if (unlikely(cpu_is_offline(cpu))) {
130 apic->send_IPI(cpu, RESCHEDULE_VECTOR);
133 void native_send_call_func_single_ipi(int cpu)
135 apic->send_IPI(cpu, CALL_FUNCTION_SINGLE_VECTOR);
138 void native_send_call_func_ipi(const struct cpumask *mask)
140 cpumask_var_t allbutself;
142 if (!alloc_cpumask_var(&allbutself, GFP_ATOMIC)) {
143 apic->send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
147 cpumask_copy(allbutself, cpu_online_mask);
148 cpumask_clear_cpu(smp_processor_id(), allbutself);
150 if (cpumask_equal(mask, allbutself) &&
151 cpumask_equal(cpu_online_mask, cpu_callout_mask))
152 apic->send_IPI_allbutself(CALL_FUNCTION_VECTOR);
154 apic->send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
156 free_cpumask_var(allbutself);
159 static int smp_stop_nmi_callback(unsigned int val, struct pt_regs *regs)
161 /* We are registered on stopping cpu too, avoid spurious NMI */
162 if (raw_smp_processor_id() == atomic_read(&stopping_cpu))
171 * this function calls the 'stop' function on all other CPUs in the system.
174 asmlinkage __visible void smp_reboot_interrupt(void)
176 ipi_entering_ack_irq();
181 static void native_stop_other_cpus(int wait)
184 unsigned long timeout;
190 * Use an own vector here because smp_call_function
191 * does lots of things not suitable in a panic situation.
195 * We start by using the REBOOT_VECTOR irq.
196 * The irq is treated as a sync point to allow critical
197 * regions of code on other cpus to release their spin locks
198 * and re-enable irqs. Jumping straight to an NMI might
199 * accidentally cause deadlocks with further shutdown/panic
200 * code. By syncing, we give the cpus up to one second to
201 * finish their work before we force them off with the NMI.
203 if (num_online_cpus() > 1) {
204 /* did someone beat us here? */
205 if (atomic_cmpxchg(&stopping_cpu, -1, safe_smp_processor_id()) != -1)
208 /* sync above data before sending IRQ */
211 apic->send_IPI_allbutself(REBOOT_VECTOR);
214 * Don't wait longer than a second if the caller
215 * didn't ask us to wait.
217 timeout = USEC_PER_SEC;
218 while (num_online_cpus() > 1 && (wait || timeout--))
222 /* if the REBOOT_VECTOR didn't work, try with the NMI */
223 if ((num_online_cpus() > 1) && (!smp_no_nmi_ipi)) {
224 if (register_nmi_handler(NMI_LOCAL, smp_stop_nmi_callback,
225 NMI_FLAG_FIRST, "smp_stop"))
226 /* Note: we ignore failures here */
227 /* Hope the REBOOT_IRQ is good enough */
230 /* sync above data before sending IRQ */
233 pr_emerg("Shutting down cpus with NMI\n");
235 apic->send_IPI_allbutself(NMI_VECTOR);
238 * Don't wait longer than a 10 ms if the caller
239 * didn't ask us to wait.
241 timeout = USEC_PER_MSEC * 10;
242 while (num_online_cpus() > 1 && (wait || timeout--))
247 local_irq_save(flags);
248 disable_local_APIC();
249 mcheck_cpu_clear(this_cpu_ptr(&cpu_info));
250 local_irq_restore(flags);
254 * Reschedule call back.
256 static inline void __smp_reschedule_interrupt(void)
258 inc_irq_stat(irq_resched_count);
262 __visible void smp_reschedule_interrupt(struct pt_regs *regs)
266 __smp_reschedule_interrupt();
269 * KVM uses this interrupt to force a cpu out of guest mode
273 __visible void smp_trace_reschedule_interrupt(struct pt_regs *regs)
276 * Need to call irq_enter() before calling the trace point.
277 * __smp_reschedule_interrupt() calls irq_enter/exit() too (in
278 * scheduler_ipi(). This is OK, since those functions are allowed
281 ipi_entering_ack_irq();
282 trace_reschedule_entry(RESCHEDULE_VECTOR);
283 __smp_reschedule_interrupt();
284 trace_reschedule_exit(RESCHEDULE_VECTOR);
287 * KVM uses this interrupt to force a cpu out of guest mode
291 static inline void __smp_call_function_interrupt(void)
293 generic_smp_call_function_interrupt();
294 inc_irq_stat(irq_call_count);
297 __visible void smp_call_function_interrupt(struct pt_regs *regs)
299 ipi_entering_ack_irq();
300 __smp_call_function_interrupt();
304 __visible void smp_trace_call_function_interrupt(struct pt_regs *regs)
306 ipi_entering_ack_irq();
307 trace_call_function_entry(CALL_FUNCTION_VECTOR);
308 __smp_call_function_interrupt();
309 trace_call_function_exit(CALL_FUNCTION_VECTOR);
313 static inline void __smp_call_function_single_interrupt(void)
315 generic_smp_call_function_single_interrupt();
316 inc_irq_stat(irq_call_count);
319 __visible void smp_call_function_single_interrupt(struct pt_regs *regs)
321 ipi_entering_ack_irq();
322 __smp_call_function_single_interrupt();
326 __visible void smp_trace_call_function_single_interrupt(struct pt_regs *regs)
328 ipi_entering_ack_irq();
329 trace_call_function_single_entry(CALL_FUNCTION_SINGLE_VECTOR);
330 __smp_call_function_single_interrupt();
331 trace_call_function_single_exit(CALL_FUNCTION_SINGLE_VECTOR);
335 static int __init nonmi_ipi_setup(char *str)
337 smp_no_nmi_ipi = true;
341 __setup("nonmi_ipi", nonmi_ipi_setup);
343 struct smp_ops smp_ops = {
344 .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
345 .smp_prepare_cpus = native_smp_prepare_cpus,
346 .smp_cpus_done = native_smp_cpus_done,
348 .stop_other_cpus = native_stop_other_cpus,
349 #if defined(CONFIG_KEXEC_CORE)
350 .crash_stop_other_cpus = kdump_nmi_shootdown_cpus,
352 .smp_send_reschedule = native_smp_send_reschedule,
354 .cpu_up = native_cpu_up,
355 .cpu_die = native_cpu_die,
356 .cpu_disable = native_cpu_disable,
357 .play_dead = native_play_dead,
359 .send_call_func_ipi = native_send_call_func_ipi,
360 .send_call_func_single_ipi = native_send_call_func_single_ipi,
362 EXPORT_SYMBOL_GPL(smp_ops);