2 * Copyright (C) 1991, 1992 Linus Torvalds
3 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
4 * Copyright (C) 2011 Don Zickus Red Hat, Inc.
6 * Pentium III FXSR, SSE support
7 * Gareth Hughes <gareth@valinux.com>, May 2000
11 * Handle hardware traps and faults.
13 #include <linux/spinlock.h>
14 #include <linux/kprobes.h>
15 #include <linux/kdebug.h>
16 #include <linux/nmi.h>
17 #include <linux/debugfs.h>
18 #include <linux/delay.h>
19 #include <linux/hardirq.h>
20 #include <linux/ratelimit.h>
21 #include <linux/slab.h>
22 #include <linux/export.h>
24 #if defined(CONFIG_EDAC)
25 #include <linux/edac.h>
28 #include <linux/atomic.h>
29 #include <asm/traps.h>
30 #include <asm/mach_traps.h>
32 #include <asm/x86_init.h>
33 #include <asm/reboot.h>
34 #include <asm/cache.h>
36 #define CREATE_TRACE_POINTS
37 #include <trace/events/nmi.h>
41 struct list_head head;
44 static struct nmi_desc nmi_desc[NMI_MAX] =
47 .lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[0].lock),
48 .head = LIST_HEAD_INIT(nmi_desc[0].head),
51 .lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[1].lock),
52 .head = LIST_HEAD_INIT(nmi_desc[1].head),
55 .lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[2].lock),
56 .head = LIST_HEAD_INIT(nmi_desc[2].head),
59 .lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[3].lock),
60 .head = LIST_HEAD_INIT(nmi_desc[3].head),
68 unsigned int external;
72 static DEFINE_PER_CPU(struct nmi_stats, nmi_stats);
74 static int ignore_nmis __read_mostly;
76 int unknown_nmi_panic;
78 * Prevent NMI reason port (0x61) being accessed simultaneously, can
79 * only be used in NMI handler.
81 static DEFINE_RAW_SPINLOCK(nmi_reason_lock);
83 static int __init setup_unknown_nmi_panic(char *str)
85 unknown_nmi_panic = 1;
88 __setup("unknown_nmi_panic", setup_unknown_nmi_panic);
90 #define nmi_to_desc(type) (&nmi_desc[type])
92 static u64 nmi_longest_ns = 1 * NSEC_PER_MSEC;
94 static int __init nmi_warning_debugfs(void)
96 debugfs_create_u64("nmi_longest_ns", 0644,
97 arch_debugfs_dir, &nmi_longest_ns);
100 fs_initcall(nmi_warning_debugfs);
102 static void nmi_max_handler(struct irq_work *w)
104 struct nmiaction *a = container_of(w, struct nmiaction, irq_work);
105 int remainder_ns, decimal_msecs;
106 u64 whole_msecs = ACCESS_ONCE(a->max_duration);
108 remainder_ns = do_div(whole_msecs, (1000 * 1000));
109 decimal_msecs = remainder_ns / 1000;
111 printk_ratelimited(KERN_INFO
112 "INFO: NMI handler (%ps) took too long to run: %lld.%03d msecs\n",
113 a->handler, whole_msecs, decimal_msecs);
116 static int nmi_handle(unsigned int type, struct pt_regs *regs)
118 struct nmi_desc *desc = nmi_to_desc(type);
125 * NMIs are edge-triggered, which means if you have enough
126 * of them concurrently, you can lose some because only one
127 * can be latched at any given time. Walk the whole list
128 * to handle those situations.
130 list_for_each_entry_rcu(a, &desc->head, list) {
134 delta = sched_clock();
135 thishandled = a->handler(type, regs);
136 handled += thishandled;
137 delta = sched_clock() - delta;
138 trace_nmi_handler(a->handler, (int)delta, thishandled);
140 if (delta < nmi_longest_ns || delta < a->max_duration)
143 a->max_duration = delta;
144 irq_work_queue(&a->irq_work);
149 /* return total number of NMI events handled */
152 NOKPROBE_SYMBOL(nmi_handle);
154 int __register_nmi_handler(unsigned int type, struct nmiaction *action)
156 struct nmi_desc *desc = nmi_to_desc(type);
159 if (!action->handler)
162 init_irq_work(&action->irq_work, nmi_max_handler);
164 spin_lock_irqsave(&desc->lock, flags);
167 * most handlers of type NMI_UNKNOWN never return because
168 * they just assume the NMI is theirs. Just a sanity check
169 * to manage expectations
171 WARN_ON_ONCE(type == NMI_UNKNOWN && !list_empty(&desc->head));
172 WARN_ON_ONCE(type == NMI_SERR && !list_empty(&desc->head));
173 WARN_ON_ONCE(type == NMI_IO_CHECK && !list_empty(&desc->head));
176 * some handlers need to be executed first otherwise a fake
177 * event confuses some handlers (kdump uses this flag)
179 if (action->flags & NMI_FLAG_FIRST)
180 list_add_rcu(&action->list, &desc->head);
182 list_add_tail_rcu(&action->list, &desc->head);
184 spin_unlock_irqrestore(&desc->lock, flags);
187 EXPORT_SYMBOL(__register_nmi_handler);
189 void unregister_nmi_handler(unsigned int type, const char *name)
191 struct nmi_desc *desc = nmi_to_desc(type);
195 spin_lock_irqsave(&desc->lock, flags);
197 list_for_each_entry_rcu(n, &desc->head, list) {
199 * the name passed in to describe the nmi handler
200 * is used as the lookup key
202 if (!strcmp(n->name, name)) {
204 "Trying to free NMI (%s) from NMI context!\n", n->name);
205 list_del_rcu(&n->list);
210 spin_unlock_irqrestore(&desc->lock, flags);
213 EXPORT_SYMBOL_GPL(unregister_nmi_handler);
216 pci_serr_error(unsigned char reason, struct pt_regs *regs)
218 /* check to see if anyone registered against these types of errors */
219 if (nmi_handle(NMI_SERR, regs))
222 pr_emerg("NMI: PCI system error (SERR) for reason %02x on CPU %d.\n",
223 reason, smp_processor_id());
226 * On some machines, PCI SERR line is used to report memory
227 * errors. EDAC makes use of it.
229 #if defined(CONFIG_EDAC)
230 if (edac_handler_set()) {
231 edac_atomic_assert_error();
236 if (panic_on_unrecovered_nmi)
237 nmi_panic(regs, "NMI: Not continuing");
239 pr_emerg("Dazed and confused, but trying to continue\n");
241 /* Clear and disable the PCI SERR error line. */
242 reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_SERR;
243 outb(reason, NMI_REASON_PORT);
245 NOKPROBE_SYMBOL(pci_serr_error);
248 io_check_error(unsigned char reason, struct pt_regs *regs)
252 /* check to see if anyone registered against these types of errors */
253 if (nmi_handle(NMI_IO_CHECK, regs))
257 "NMI: IOCK error (debug interrupt?) for reason %02x on CPU %d.\n",
258 reason, smp_processor_id());
261 if (panic_on_io_nmi) {
262 nmi_panic(regs, "NMI IOCK error: Not continuing");
265 * If we end up here, it means we have received an NMI while
266 * processing panic(). Simply return without delaying and
272 /* Re-enable the IOCK line, wait for a few seconds */
273 reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_IOCHK;
274 outb(reason, NMI_REASON_PORT);
278 touch_nmi_watchdog();
282 reason &= ~NMI_REASON_CLEAR_IOCHK;
283 outb(reason, NMI_REASON_PORT);
285 NOKPROBE_SYMBOL(io_check_error);
288 unknown_nmi_error(unsigned char reason, struct pt_regs *regs)
293 * Use 'false' as back-to-back NMIs are dealt with one level up.
294 * Of course this makes having multiple 'unknown' handlers useless
295 * as only the first one is ever run (unless it can actually determine
296 * if it caused the NMI)
298 handled = nmi_handle(NMI_UNKNOWN, regs);
300 __this_cpu_add(nmi_stats.unknown, handled);
304 __this_cpu_add(nmi_stats.unknown, 1);
306 pr_emerg("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
307 reason, smp_processor_id());
309 pr_emerg("Do you have a strange power saving mode enabled?\n");
310 if (unknown_nmi_panic || panic_on_unrecovered_nmi)
311 nmi_panic(regs, "NMI: Not continuing");
313 pr_emerg("Dazed and confused, but trying to continue\n");
315 NOKPROBE_SYMBOL(unknown_nmi_error);
317 static DEFINE_PER_CPU(bool, swallow_nmi);
318 static DEFINE_PER_CPU(unsigned long, last_nmi_rip);
320 static void default_do_nmi(struct pt_regs *regs)
322 unsigned char reason = 0;
327 * CPU-specific NMI must be processed before non-CPU-specific
328 * NMI, otherwise we may lose it, because the CPU-specific
329 * NMI can not be detected/processed on other CPUs.
333 * Back-to-back NMIs are interesting because they can either
334 * be two NMI or more than two NMIs (any thing over two is dropped
335 * due to NMI being edge-triggered). If this is the second half
336 * of the back-to-back NMI, assume we dropped things and process
337 * more handlers. Otherwise reset the 'swallow' NMI behaviour
339 if (regs->ip == __this_cpu_read(last_nmi_rip))
342 __this_cpu_write(swallow_nmi, false);
344 __this_cpu_write(last_nmi_rip, regs->ip);
346 handled = nmi_handle(NMI_LOCAL, regs);
347 __this_cpu_add(nmi_stats.normal, handled);
350 * There are cases when a NMI handler handles multiple
351 * events in the current NMI. One of these events may
352 * be queued for in the next NMI. Because the event is
353 * already handled, the next NMI will result in an unknown
354 * NMI. Instead lets flag this for a potential NMI to
358 __this_cpu_write(swallow_nmi, true);
363 * Non-CPU-specific NMI: NMI sources can be processed on any CPU.
365 * Another CPU may be processing panic routines while holding
366 * nmi_reason_lock. Check if the CPU issued the IPI for crash dumping,
367 * and if so, call its callback directly. If there is no CPU preparing
368 * crash dump, we simply loop here.
370 while (!raw_spin_trylock(&nmi_reason_lock)) {
371 run_crash_ipi_callback(regs);
375 reason = x86_platform.get_nmi_reason();
377 if (reason & NMI_REASON_MASK) {
378 if (reason & NMI_REASON_SERR)
379 pci_serr_error(reason, regs);
380 else if (reason & NMI_REASON_IOCHK)
381 io_check_error(reason, regs);
384 * Reassert NMI in case it became active
385 * meanwhile as it's edge-triggered:
389 __this_cpu_add(nmi_stats.external, 1);
390 raw_spin_unlock(&nmi_reason_lock);
393 raw_spin_unlock(&nmi_reason_lock);
396 * Only one NMI can be latched at a time. To handle
397 * this we may process multiple nmi handlers at once to
398 * cover the case where an NMI is dropped. The downside
399 * to this approach is we may process an NMI prematurely,
400 * while its real NMI is sitting latched. This will cause
401 * an unknown NMI on the next run of the NMI processing.
403 * We tried to flag that condition above, by setting the
404 * swallow_nmi flag when we process more than one event.
405 * This condition is also only present on the second half
406 * of a back-to-back NMI, so we flag that condition too.
408 * If both are true, we assume we already processed this
409 * NMI previously and we swallow it. Otherwise we reset
412 * There are scenarios where we may accidentally swallow
413 * a 'real' unknown NMI. For example, while processing
414 * a perf NMI another perf NMI comes in along with a
415 * 'real' unknown NMI. These two NMIs get combined into
416 * one (as descibed above). When the next NMI gets
417 * processed, it will be flagged by perf as handled, but
418 * noone will know that there was a 'real' unknown NMI sent
419 * also. As a result it gets swallowed. Or if the first
420 * perf NMI returns two events handled then the second
421 * NMI will get eaten by the logic below, again losing a
422 * 'real' unknown NMI. But this is the best we can do
425 if (b2b && __this_cpu_read(swallow_nmi))
426 __this_cpu_add(nmi_stats.swallow, 1);
428 unknown_nmi_error(reason, regs);
430 NOKPROBE_SYMBOL(default_do_nmi);
433 * NMIs can page fault or hit breakpoints which will cause it to lose
434 * its NMI context with the CPU when the breakpoint or page fault does an IRET.
436 * As a result, NMIs can nest if NMIs get unmasked due an IRET during
437 * NMI processing. On x86_64, the asm glue protects us from nested NMIs
438 * if the outer NMI came from kernel mode, but we can still nest if the
439 * outer NMI came from user mode.
441 * To handle these nested NMIs, we have three states:
447 * When no NMI is in progress, it is in the "not running" state.
448 * When an NMI comes in, it goes into the "executing" state.
449 * Normally, if another NMI is triggered, it does not interrupt
450 * the running NMI and the HW will simply latch it so that when
451 * the first NMI finishes, it will restart the second NMI.
452 * (Note, the latch is binary, thus multiple NMIs triggering,
453 * when one is running, are ignored. Only one NMI is restarted.)
455 * If an NMI executes an iret, another NMI can preempt it. We do not
456 * want to allow this new NMI to run, but we want to execute it when the
457 * first one finishes. We set the state to "latched", and the exit of
458 * the first NMI will perform a dec_return, if the result is zero
459 * (NOT_RUNNING), then it will simply exit the NMI handler. If not, the
460 * dec_return would have set the state to NMI_EXECUTING (what we want it
461 * to be when we are running). In this case, we simply jump back to
462 * rerun the NMI handler again, and restart the 'latched' NMI.
464 * No trap (breakpoint or page fault) should be hit before nmi_restart,
465 * thus there is no race between the first check of state for NOT_RUNNING
466 * and setting it to NMI_EXECUTING. The HW will prevent nested NMIs
469 * In case the NMI takes a page fault, we need to save off the CR2
470 * because the NMI could have preempted another page fault and corrupt
471 * the CR2 that is about to be read. As nested NMIs must be restarted
472 * and they can not take breakpoints or page faults, the update of the
473 * CR2 must be done before converting the nmi state back to NOT_RUNNING.
474 * Otherwise, there would be a race of another nested NMI coming in
475 * after setting state to NOT_RUNNING but before updating the nmi_cr2.
482 static DEFINE_PER_CPU(enum nmi_states, nmi_state);
483 static DEFINE_PER_CPU(unsigned long, nmi_cr2);
487 * In x86_64, we need to handle breakpoint -> NMI -> breakpoint. Without
488 * some care, the inner breakpoint will clobber the outer breakpoint's
491 * If a breakpoint is being processed, and the debug stack is being
492 * used, if an NMI comes in and also hits a breakpoint, the stack
493 * pointer will be set to the same fixed address as the breakpoint that
494 * was interrupted, causing that stack to be corrupted. To handle this
495 * case, check if the stack that was interrupted is the debug stack, and
496 * if so, change the IDT so that new breakpoints will use the current
497 * stack and not switch to the fixed address. On return of the NMI,
498 * switch back to the original IDT.
500 static DEFINE_PER_CPU(int, update_debug_stack);
503 dotraplinkage notrace void
504 do_nmi(struct pt_regs *regs, long error_code)
506 if (this_cpu_read(nmi_state) != NMI_NOT_RUNNING) {
507 this_cpu_write(nmi_state, NMI_LATCHED);
510 this_cpu_write(nmi_state, NMI_EXECUTING);
511 this_cpu_write(nmi_cr2, read_cr2());
516 * If we interrupted a breakpoint, it is possible that
517 * the nmi handler will have breakpoints too. We need to
518 * change the IDT such that breakpoints that happen here
519 * continue to use the NMI stack.
521 if (unlikely(is_debug_stack(regs->sp))) {
522 debug_stack_set_zero();
523 this_cpu_write(update_debug_stack, 1);
529 inc_irq_stat(__nmi_count);
532 default_do_nmi(regs);
537 if (unlikely(this_cpu_read(update_debug_stack))) {
539 this_cpu_write(update_debug_stack, 0);
543 if (unlikely(this_cpu_read(nmi_cr2) != read_cr2()))
544 write_cr2(this_cpu_read(nmi_cr2));
545 if (this_cpu_dec_return(nmi_state))
548 NOKPROBE_SYMBOL(do_nmi);
555 void restart_nmi(void)
560 /* reset the back-to-back NMI logic */
561 void local_touch_nmi(void)
563 __this_cpu_write(last_nmi_rip, 0);
565 EXPORT_SYMBOL_GPL(local_touch_nmi);