2 * SGI NMI/TRACE support routines
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 * Copyright (c) 2009-2013 Silicon Graphics, Inc. All Rights Reserved.
19 * Copyright (c) Mike Travis
22 #include <linux/cpu.h>
23 #include <linux/delay.h>
24 #include <linux/kdb.h>
25 #include <linux/kexec.h>
26 #include <linux/kgdb.h>
27 #include <linux/module.h>
28 #include <linux/nmi.h>
29 #include <linux/sched.h>
30 #include <linux/slab.h>
33 #include <asm/current.h>
34 #include <asm/kdebug.h>
35 #include <asm/local64.h>
37 #include <asm/traps.h>
38 #include <asm/uv/uv.h>
39 #include <asm/uv/uv_hub.h>
40 #include <asm/uv/uv_mmrs.h>
42 void (*uv_trace_func)(const char *f, const int l, const char *fmt, ...);
43 EXPORT_SYMBOL(uv_trace_func);
45 void (*uv_trace_nmi_func)(unsigned int reason, struct pt_regs *regs);
46 EXPORT_SYMBOL(uv_trace_nmi_func);
52 * Handle system-wide NMI events generated by the global 'power nmi' command.
54 * Basic operation is to field the NMI interrupt on each cpu and wait
55 * until all cpus have arrived into the nmi handler. If some cpus do not
56 * make it into the handler, try and force them in with the IPI(NMI) signal.
58 * We also have to lessen UV Hub MMR accesses as much as possible as this
59 * disrupts the UV Hub's primary mission of directing NumaLink traffic and
60 * can cause system problems to occur.
62 * To do this we register our primary NMI notifier on the NMI_UNKNOWN
63 * chain. This reduces the number of false NMI calls when the perf
64 * tools are running which generate an enormous number of NMIs per
65 * second (~4M/s for 1024 cpu threads). Our secondary NMI handler is
66 * very short as it only checks that if it has been "pinged" with the
67 * IPI(NMI) signal as mentioned above, and does not read the UV Hub's MMR.
71 static struct uv_hub_nmi_s **uv_hub_nmi_list;
73 DEFINE_PER_CPU(struct uv_cpu_nmi_s, __uv_cpu_nmi);
74 EXPORT_PER_CPU_SYMBOL_GPL(__uv_cpu_nmi);
76 static unsigned long nmi_mmr;
77 static unsigned long nmi_mmr_clear;
78 static unsigned long nmi_mmr_pending;
80 static atomic_t uv_in_nmi;
81 static atomic_t uv_nmi_cpu = ATOMIC_INIT(-1);
82 static atomic_t uv_nmi_cpus_in_nmi = ATOMIC_INIT(-1);
83 static atomic_t uv_nmi_slave_continue;
84 static atomic_t uv_nmi_kexec_failed;
85 static cpumask_var_t uv_nmi_cpu_mask;
87 /* Values for uv_nmi_slave_continue */
89 #define SLAVE_CONTINUE 1
93 * Default is all stack dumps go to the console and buffer.
94 * Lower level to send to log buffer only.
96 static int uv_nmi_loglevel = 7;
97 module_param_named(dump_loglevel, uv_nmi_loglevel, int, 0644);
100 * The following values show statistics on how perf events are affecting
103 static int param_get_local64(char *buffer, const struct kernel_param *kp)
105 return sprintf(buffer, "%lu\n", local64_read((local64_t *)kp->arg));
108 static int param_set_local64(const char *val, const struct kernel_param *kp)
110 /* clear on any write */
111 local64_set((local64_t *)kp->arg, 0);
115 static struct kernel_param_ops param_ops_local64 = {
116 .get = param_get_local64,
117 .set = param_set_local64,
119 #define param_check_local64(name, p) __param_check(name, p, local64_t)
121 static local64_t uv_nmi_count;
122 module_param_named(nmi_count, uv_nmi_count, local64, 0644);
124 static local64_t uv_nmi_misses;
125 module_param_named(nmi_misses, uv_nmi_misses, local64, 0644);
127 static local64_t uv_nmi_ping_count;
128 module_param_named(ping_count, uv_nmi_ping_count, local64, 0644);
130 static local64_t uv_nmi_ping_misses;
131 module_param_named(ping_misses, uv_nmi_ping_misses, local64, 0644);
134 * Following values allow tuning for large systems under heavy loading
136 static int uv_nmi_initial_delay = 100;
137 module_param_named(initial_delay, uv_nmi_initial_delay, int, 0644);
139 static int uv_nmi_slave_delay = 100;
140 module_param_named(slave_delay, uv_nmi_slave_delay, int, 0644);
142 static int uv_nmi_loop_delay = 100;
143 module_param_named(loop_delay, uv_nmi_loop_delay, int, 0644);
145 static int uv_nmi_trigger_delay = 10000;
146 module_param_named(trigger_delay, uv_nmi_trigger_delay, int, 0644);
148 static int uv_nmi_wait_count = 100;
149 module_param_named(wait_count, uv_nmi_wait_count, int, 0644);
151 static int uv_nmi_retry_count = 500;
152 module_param_named(retry_count, uv_nmi_retry_count, int, 0644);
156 * "dump" - dump process stack for each cpu
157 * "ips" - dump IP info for each cpu
158 * "kdump" - do crash dump
159 * "kdb" - enter KDB/KGDB (default)
161 static char uv_nmi_action[8] = "kdb";
162 module_param_string(action, uv_nmi_action, sizeof(uv_nmi_action), 0644);
164 static inline bool uv_nmi_action_is(const char *action)
166 return (strncmp(uv_nmi_action, action, strlen(action)) == 0);
169 /* Setup which NMI support is present in system */
170 static void uv_nmi_setup_mmrs(void)
172 if (uv_read_local_mmr(UVH_NMI_MMRX_SUPPORTED)) {
173 uv_write_local_mmr(UVH_NMI_MMRX_REQ,
174 1UL << UVH_NMI_MMRX_REQ_SHIFT);
175 nmi_mmr = UVH_NMI_MMRX;
176 nmi_mmr_clear = UVH_NMI_MMRX_CLEAR;
177 nmi_mmr_pending = 1UL << UVH_NMI_MMRX_SHIFT;
178 pr_info("UV: SMI NMI support: %s\n", UVH_NMI_MMRX_TYPE);
180 nmi_mmr = UVH_NMI_MMR;
181 nmi_mmr_clear = UVH_NMI_MMR_CLEAR;
182 nmi_mmr_pending = 1UL << UVH_NMI_MMR_SHIFT;
183 pr_info("UV: SMI NMI support: %s\n", UVH_NMI_MMR_TYPE);
187 /* Read NMI MMR and check if NMI flag was set by BMC. */
188 static inline int uv_nmi_test_mmr(struct uv_hub_nmi_s *hub_nmi)
190 hub_nmi->nmi_value = uv_read_local_mmr(nmi_mmr);
191 atomic_inc(&hub_nmi->read_mmr_count);
192 return !!(hub_nmi->nmi_value & nmi_mmr_pending);
195 static inline void uv_local_mmr_clear_nmi(void)
197 uv_write_local_mmr(nmi_mmr_clear, nmi_mmr_pending);
201 * If first cpu in on this hub, set hub_nmi "in_nmi" and "owner" values and
202 * return true. If first cpu in on the system, set global "in_nmi" flag.
204 static int uv_set_in_nmi(int cpu, struct uv_hub_nmi_s *hub_nmi)
206 int first = atomic_add_unless(&hub_nmi->in_nmi, 1, 1);
209 atomic_set(&hub_nmi->cpu_owner, cpu);
210 if (atomic_add_unless(&uv_in_nmi, 1, 1))
211 atomic_set(&uv_nmi_cpu, cpu);
213 atomic_inc(&hub_nmi->nmi_count);
218 /* Check if this is a system NMI event */
219 static int uv_check_nmi(struct uv_hub_nmi_s *hub_nmi)
221 int cpu = smp_processor_id();
224 local64_inc(&uv_nmi_count);
225 uv_cpu_nmi.queries++;
228 nmi = atomic_read(&hub_nmi->in_nmi);
232 if (raw_spin_trylock(&hub_nmi->nmi_lock)) {
234 /* check hub MMR NMI flag */
235 if (uv_nmi_test_mmr(hub_nmi)) {
236 uv_set_in_nmi(cpu, hub_nmi);
241 /* MMR NMI flag is clear */
242 raw_spin_unlock(&hub_nmi->nmi_lock);
245 /* wait a moment for the hub nmi locker to set flag */
247 udelay(uv_nmi_slave_delay);
249 /* re-check hub in_nmi flag */
250 nmi = atomic_read(&hub_nmi->in_nmi);
255 /* check if this BMC missed setting the MMR NMI flag */
257 nmi = atomic_read(&uv_in_nmi);
259 uv_set_in_nmi(cpu, hub_nmi);
265 local64_inc(&uv_nmi_misses);
270 /* Need to reset the NMI MMR register, but only once per hub. */
271 static inline void uv_clear_nmi(int cpu)
273 struct uv_hub_nmi_s *hub_nmi = uv_hub_nmi;
275 if (cpu == atomic_read(&hub_nmi->cpu_owner)) {
276 atomic_set(&hub_nmi->cpu_owner, -1);
277 atomic_set(&hub_nmi->in_nmi, 0);
278 uv_local_mmr_clear_nmi();
279 raw_spin_unlock(&hub_nmi->nmi_lock);
283 /* Print non-responding cpus */
284 static void uv_nmi_nr_cpus_pr(char *fmt)
286 static char cpu_list[1024];
287 int len = sizeof(cpu_list);
288 int c = cpumask_weight(uv_nmi_cpu_mask);
289 int n = cpulist_scnprintf(cpu_list, len, uv_nmi_cpu_mask);
292 strcpy(&cpu_list[len - 6], "...\n");
294 printk(fmt, c, cpu_list);
297 /* Ping non-responding cpus attemping to force them into the NMI handler */
298 static void uv_nmi_nr_cpus_ping(void)
302 for_each_cpu(cpu, uv_nmi_cpu_mask)
303 atomic_set(&uv_cpu_nmi_per(cpu).pinging, 1);
305 apic->send_IPI_mask(uv_nmi_cpu_mask, APIC_DM_NMI);
308 /* Clean up flags for cpus that ignored both NMI and ping */
309 static void uv_nmi_cleanup_mask(void)
313 for_each_cpu(cpu, uv_nmi_cpu_mask) {
314 atomic_set(&uv_cpu_nmi_per(cpu).pinging, 0);
315 atomic_set(&uv_cpu_nmi_per(cpu).state, UV_NMI_STATE_OUT);
316 cpumask_clear_cpu(cpu, uv_nmi_cpu_mask);
320 /* Loop waiting as cpus enter nmi handler */
321 static int uv_nmi_wait_cpus(int first)
323 int i, j, k, n = num_online_cpus();
324 int last_k = 0, waiting = 0;
327 cpumask_copy(uv_nmi_cpu_mask, cpu_online_mask);
330 k = n - cpumask_weight(uv_nmi_cpu_mask);
333 udelay(uv_nmi_initial_delay);
334 for (i = 0; i < uv_nmi_retry_count; i++) {
335 int loop_delay = uv_nmi_loop_delay;
337 for_each_cpu(j, uv_nmi_cpu_mask) {
338 if (atomic_read(&uv_cpu_nmi_per(j).state)) {
339 cpumask_clear_cpu(j, uv_nmi_cpu_mask);
344 if (k >= n) { /* all in? */
348 if (last_k != k) { /* abort if no new cpus coming in */
351 } else if (++waiting > uv_nmi_wait_count)
354 /* extend delay if waiting only for cpu 0 */
355 if (waiting && (n - k) == 1 &&
356 cpumask_test_cpu(0, uv_nmi_cpu_mask))
361 atomic_set(&uv_nmi_cpus_in_nmi, k);
365 /* Wait until all slave cpus have entered UV NMI handler */
366 static void uv_nmi_wait(int master)
368 /* indicate this cpu is in */
369 atomic_set(&uv_cpu_nmi.state, UV_NMI_STATE_IN);
371 /* if not the first cpu in (the master), then we are a slave cpu */
376 /* wait for all other cpus to gather here */
377 if (!uv_nmi_wait_cpus(1))
380 /* if not all made it in, send IPI NMI to them */
381 uv_nmi_nr_cpus_pr(KERN_ALERT
382 "UV: Sending NMI IPI to %d non-responding CPUs: %s\n");
383 uv_nmi_nr_cpus_ping();
385 /* if all cpus are in, then done */
386 if (!uv_nmi_wait_cpus(0))
389 uv_nmi_nr_cpus_pr(KERN_ALERT
390 "UV: %d CPUs not in NMI loop: %s\n");
393 pr_alert("UV: %d of %d CPUs in NMI\n",
394 atomic_read(&uv_nmi_cpus_in_nmi), num_online_cpus());
397 static void uv_nmi_dump_cpu_ip_hdr(void)
400 "\nUV: %4s %6s %-32s %s (Note: PID 0 not listed)\n",
401 "CPU", "PID", "COMMAND", "IP");
404 static void uv_nmi_dump_cpu_ip(int cpu, struct pt_regs *regs)
406 printk(KERN_DEFAULT "UV: %4d %6d %-32.32s ",
407 cpu, current->pid, current->comm);
409 printk_address(regs->ip, 1);
412 /* Dump this cpu's state */
413 static void uv_nmi_dump_state_cpu(int cpu, struct pt_regs *regs)
415 const char *dots = " ................................. ";
417 if (uv_nmi_action_is("ips")) {
419 uv_nmi_dump_cpu_ip_hdr();
421 if (current->pid != 0)
422 uv_nmi_dump_cpu_ip(cpu, regs);
424 } else if (uv_nmi_action_is("dump")) {
426 "UV:%sNMI process trace for CPU %d\n", dots, cpu);
429 atomic_set(&uv_cpu_nmi.state, UV_NMI_STATE_DUMP_DONE);
432 /* Trigger a slave cpu to dump it's state */
433 static void uv_nmi_trigger_dump(int cpu)
435 int retry = uv_nmi_trigger_delay;
437 if (atomic_read(&uv_cpu_nmi_per(cpu).state) != UV_NMI_STATE_IN)
440 atomic_set(&uv_cpu_nmi_per(cpu).state, UV_NMI_STATE_DUMP);
444 if (atomic_read(&uv_cpu_nmi_per(cpu).state)
445 != UV_NMI_STATE_DUMP)
447 } while (--retry > 0);
449 pr_crit("UV: CPU %d stuck in process dump function\n", cpu);
450 atomic_set(&uv_cpu_nmi_per(cpu).state, UV_NMI_STATE_DUMP_DONE);
453 /* Wait until all cpus ready to exit */
454 static void uv_nmi_sync_exit(int master)
456 atomic_dec(&uv_nmi_cpus_in_nmi);
458 while (atomic_read(&uv_nmi_cpus_in_nmi) > 0)
460 atomic_set(&uv_nmi_slave_continue, SLAVE_CLEAR);
462 while (atomic_read(&uv_nmi_slave_continue))
467 /* Walk through cpu list and dump state of each */
468 static void uv_nmi_dump_state(int cpu, struct pt_regs *regs, int master)
473 int saved_console_loglevel = console_loglevel;
475 pr_alert("UV: tracing %s for %d CPUs from CPU %d\n",
476 uv_nmi_action_is("ips") ? "IPs" : "processes",
477 atomic_read(&uv_nmi_cpus_in_nmi), cpu);
479 console_loglevel = uv_nmi_loglevel;
480 atomic_set(&uv_nmi_slave_continue, SLAVE_EXIT);
481 for_each_online_cpu(tcpu) {
482 if (cpumask_test_cpu(tcpu, uv_nmi_cpu_mask))
484 else if (tcpu == cpu)
485 uv_nmi_dump_state_cpu(tcpu, regs);
487 uv_nmi_trigger_dump(tcpu);
490 printk(KERN_DEFAULT "UV: %d CPUs ignored NMI\n",
493 console_loglevel = saved_console_loglevel;
494 pr_alert("UV: process trace complete\n");
496 while (!atomic_read(&uv_nmi_slave_continue))
498 while (atomic_read(&uv_cpu_nmi.state) != UV_NMI_STATE_DUMP)
500 uv_nmi_dump_state_cpu(cpu, regs);
502 uv_nmi_sync_exit(master);
505 static void uv_nmi_touch_watchdogs(void)
507 touch_softlockup_watchdog_sync();
508 clocksource_touch_watchdog();
509 rcu_cpu_stall_reset();
510 touch_nmi_watchdog();
513 #if defined(CONFIG_KEXEC)
514 static void uv_nmi_kdump(int cpu, int master, struct pt_regs *regs)
516 /* Call crash to dump system state */
518 pr_emerg("UV: NMI executing crash_kexec on CPU%d\n", cpu);
521 pr_emerg("UV: crash_kexec unexpectedly returned, ");
522 if (!kexec_crash_image) {
523 pr_cont("crash kernel not loaded\n");
524 atomic_set(&uv_nmi_kexec_failed, 1);
528 pr_cont("kexec busy, stalling cpus while waiting\n");
531 /* If crash exec fails the slaves should return, otherwise stall */
532 while (atomic_read(&uv_nmi_kexec_failed) == 0)
535 /* Crash kernel most likely not loaded, return in an orderly fashion */
539 #else /* !CONFIG_KEXEC */
540 static inline void uv_nmi_kdump(int cpu, int master, struct pt_regs *regs)
543 pr_err("UV: NMI kdump: KEXEC not supported in this kernel\n");
545 #endif /* !CONFIG_KEXEC */
547 #ifdef CONFIG_KGDB_KDB
548 /* Call KDB from NMI handler */
549 static void uv_call_kdb(int cpu, struct pt_regs *regs, int master)
554 /* call KGDB NMI handler as MASTER */
555 ret = kgdb_nmicallin(cpu, X86_TRAP_NMI, regs,
556 &uv_nmi_slave_continue);
558 pr_alert("KDB returned error, is kgdboc set?\n");
559 atomic_set(&uv_nmi_slave_continue, SLAVE_EXIT);
562 /* wait for KGDB signal that it's ready for slaves to enter */
567 sig = atomic_read(&uv_nmi_slave_continue);
570 /* call KGDB as slave */
571 if (sig == SLAVE_CONTINUE)
572 kgdb_nmicallback(cpu, regs);
574 uv_nmi_sync_exit(master);
577 #else /* !CONFIG_KGDB_KDB */
578 static inline void uv_call_kdb(int cpu, struct pt_regs *regs, int master)
580 pr_err("UV: NMI error: KGDB/KDB is not enabled in this kernel\n");
582 #endif /* !CONFIG_KGDB_KDB */
587 int uv_handle_nmi(unsigned int reason, struct pt_regs *regs)
589 struct uv_hub_nmi_s *hub_nmi = uv_hub_nmi;
590 int cpu = smp_processor_id();
594 local_irq_save(flags);
596 /* If not a UV System NMI, ignore */
597 if (!atomic_read(&uv_cpu_nmi.pinging) && !uv_check_nmi(hub_nmi)) {
598 local_irq_restore(flags);
602 /* Call possible NMI trace function */
603 if (unlikely(uv_trace_nmi_func))
604 (uv_trace_nmi_func)(reason, regs);
606 /* Indicate we are the first CPU into the NMI handler */
607 master = (atomic_read(&uv_nmi_cpu) == cpu);
609 /* If NMI action is "kdump", then attempt to do it */
610 if (uv_nmi_action_is("kdump"))
611 uv_nmi_kdump(cpu, master, regs);
613 /* Pause as all cpus enter the NMI handler */
616 /* Dump state of each cpu */
617 if (uv_nmi_action_is("ips") || uv_nmi_action_is("dump"))
618 uv_nmi_dump_state(cpu, regs, master);
620 /* Call KDB if enabled */
621 else if (uv_nmi_action_is("kdb"))
622 uv_call_kdb(cpu, regs, master);
624 /* Clear per_cpu "in nmi" flag */
625 atomic_set(&uv_cpu_nmi.state, UV_NMI_STATE_OUT);
627 /* Clear MMR NMI flag on each hub */
630 /* Clear global flags */
632 if (cpumask_weight(uv_nmi_cpu_mask))
633 uv_nmi_cleanup_mask();
634 atomic_set(&uv_nmi_cpus_in_nmi, -1);
635 atomic_set(&uv_nmi_cpu, -1);
636 atomic_set(&uv_in_nmi, 0);
639 uv_nmi_touch_watchdogs();
640 local_irq_restore(flags);
646 * NMI handler for pulling in CPUs when perf events are grabbing our NMI
648 int uv_handle_nmi_ping(unsigned int reason, struct pt_regs *regs)
652 uv_cpu_nmi.queries++;
653 if (!atomic_read(&uv_cpu_nmi.pinging)) {
654 local64_inc(&uv_nmi_ping_misses);
659 local64_inc(&uv_nmi_ping_count);
660 ret = uv_handle_nmi(reason, regs);
661 atomic_set(&uv_cpu_nmi.pinging, 0);
665 void uv_register_nmi_notifier(void)
667 if (register_nmi_handler(NMI_UNKNOWN, uv_handle_nmi, 0, "uv"))
668 pr_warn("UV: NMI handler failed to register\n");
670 if (register_nmi_handler(NMI_LOCAL, uv_handle_nmi_ping, 0, "uvping"))
671 pr_warn("UV: PING NMI handler failed to register\n");
674 void uv_nmi_init(void)
679 * Unmask NMI on all cpus
681 value = apic_read(APIC_LVT1) | APIC_DM_NMI;
682 value &= ~APIC_LVT_MASKED;
683 apic_write(APIC_LVT1, value);
686 void uv_nmi_setup(void)
688 int size = sizeof(void *) * (1 << NODES_SHIFT);
691 /* Setup hub nmi info */
693 uv_hub_nmi_list = kzalloc(size, GFP_KERNEL);
694 pr_info("UV: NMI hub list @ 0x%p (%d)\n", uv_hub_nmi_list, size);
695 BUG_ON(!uv_hub_nmi_list);
696 size = sizeof(struct uv_hub_nmi_s);
697 for_each_present_cpu(cpu) {
698 nid = cpu_to_node(cpu);
699 if (uv_hub_nmi_list[nid] == NULL) {
700 uv_hub_nmi_list[nid] = kzalloc_node(size,
702 BUG_ON(!uv_hub_nmi_list[nid]);
703 raw_spin_lock_init(&(uv_hub_nmi_list[nid]->nmi_lock));
704 atomic_set(&uv_hub_nmi_list[nid]->cpu_owner, -1);
706 uv_hub_nmi_per(cpu) = uv_hub_nmi_list[nid];
708 BUG_ON(!alloc_cpumask_var(&uv_nmi_cpu_mask, GFP_KERNEL));