]> git.karo-electronics.de Git - linux-beck.git/commitdiff
Merge branch 'core-debug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 11 Sep 2009 20:15:55 +0000 (13:15 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 11 Sep 2009 20:15:55 +0000 (13:15 -0700)
* 'core-debug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  debug lockups: Improve lockup detection, fix generic arch fallback
  debug lockups: Improve lockup detection

arch/sparc/include/asm/irq_64.h
arch/sparc/kernel/process_64.c
arch/x86/include/asm/nmi.h
arch/x86/kernel/apic/nmi.c
drivers/char/sysrq.c
include/linux/nmi.h
kernel/rcutree.c

index 1934f2cbf5132a7e7ba5e71e97dc1d1ada6f5589..a0b443cb3c1f9964513ace18d16252df00e4fae1 100644 (file)
@@ -89,8 +89,8 @@ static inline unsigned long get_softint(void)
        return retval;
 }
 
-void __trigger_all_cpu_backtrace(void);
-#define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace()
+void arch_trigger_all_cpu_backtrace(void);
+#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
 
 extern void *hardirq_stack[NR_CPUS];
 extern void *softirq_stack[NR_CPUS];
index 4041f94e7724f89de745f2be540342c7ab3ade40..18d67854a1b8e9c9768a6eb11104ea3df0bfe797 100644 (file)
@@ -251,7 +251,7 @@ static void __global_reg_poll(struct global_reg_snapshot *gp)
        }
 }
 
-void __trigger_all_cpu_backtrace(void)
+void arch_trigger_all_cpu_backtrace(void)
 {
        struct thread_info *tp = current_thread_info();
        struct pt_regs *regs = get_irq_regs();
@@ -304,7 +304,7 @@ void __trigger_all_cpu_backtrace(void)
 
 static void sysrq_handle_globreg(int key, struct tty_struct *tty)
 {
-       __trigger_all_cpu_backtrace();
+       arch_trigger_all_cpu_backtrace();
 }
 
 static struct sysrq_key_op sparc_globalreg_op = {
index c86e5ed4af51dc0802291eb4b4af1af9793b938b..e63cf7d441e1351071997f4524315171c677e952 100644 (file)
@@ -45,8 +45,8 @@ extern int proc_nmi_enabled(struct ctl_table *, int , struct file *,
                        void __user *, size_t *, loff_t *);
 extern int unknown_nmi_panic;
 
-void __trigger_all_cpu_backtrace(void);
-#define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace()
+void arch_trigger_all_cpu_backtrace(void);
+#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
 
 static inline void localise_nmi_watchdog(void)
 {
index b3025b43b63a93053b0318574d06498b685c2607..db7220220d0927bb2756f5effd6da5aa539cd8b5 100644 (file)
@@ -39,7 +39,7 @@
 int unknown_nmi_panic;
 int nmi_watchdog_enabled;
 
-static cpumask_var_t backtrace_mask;
+static cpumask_t backtrace_mask __read_mostly;
 
 /* nmi_active:
  * >0: the lapic NMI watchdog is active, but can be disabled
@@ -138,7 +138,6 @@ int __init check_nmi_watchdog(void)
        if (!prev_nmi_count)
                goto error;
 
-       alloc_cpumask_var(&backtrace_mask, GFP_KERNEL|__GFP_ZERO);
        printk(KERN_INFO "Testing NMI watchdog ... ");
 
 #ifdef CONFIG_SMP
@@ -415,14 +414,17 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
        }
 
        /* We can be called before check_nmi_watchdog, hence NULL check. */
-       if (backtrace_mask != NULL && cpumask_test_cpu(cpu, backtrace_mask)) {
+       if (cpumask_test_cpu(cpu, &backtrace_mask)) {
                static DEFINE_SPINLOCK(lock);   /* Serialise the printks */
 
                spin_lock(&lock);
                printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu);
+               show_regs(regs);
                dump_stack();
                spin_unlock(&lock);
-               cpumask_clear_cpu(cpu, backtrace_mask);
+               cpumask_clear_cpu(cpu, &backtrace_mask);
+
+               rc = 1;
        }
 
        /* Could check oops_in_progress here too, but it's safer not to */
@@ -552,14 +554,18 @@ int do_nmi_callback(struct pt_regs *regs, int cpu)
        return 0;
 }
 
-void __trigger_all_cpu_backtrace(void)
+void arch_trigger_all_cpu_backtrace(void)
 {
        int i;
 
-       cpumask_copy(backtrace_mask, cpu_online_mask);
+       cpumask_copy(&backtrace_mask, cpu_online_mask);
+
+       printk(KERN_INFO "sending NMI to all CPUs:\n");
+       apic->send_IPI_all(NMI_VECTOR);
+
        /* Wait for up to 10 seconds for all CPUs to do the backtrace */
        for (i = 0; i < 10 * 1000; i++) {
-               if (cpumask_empty(backtrace_mask))
+               if (cpumask_empty(&backtrace_mask))
                        break;
                mdelay(1);
        }
index 5d7a02f63e1c66e539702acc31eac13b4f9ef7fe..50eecfe1d72449fb8d5e8f4604e09dc88bd756b7 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/sysrq.h>
 #include <linux/kbd_kern.h>
 #include <linux/proc_fs.h>
+#include <linux/nmi.h>
 #include <linux/quotaops.h>
 #include <linux/perf_counter.h>
 #include <linux/kernel.h>
@@ -222,12 +223,20 @@ static DECLARE_WORK(sysrq_showallcpus, sysrq_showregs_othercpus);
 
 static void sysrq_handle_showallcpus(int key, struct tty_struct *tty)
 {
-       struct pt_regs *regs = get_irq_regs();
-       if (regs) {
-               printk(KERN_INFO "CPU%d:\n", smp_processor_id());
-               show_regs(regs);
+       /*
+        * Fall back to the workqueue based printing if the
+        * backtrace printing did not succeed or the
+        * architecture has no support for it:
+        */
+       if (!trigger_all_cpu_backtrace()) {
+               struct pt_regs *regs = get_irq_regs();
+
+               if (regs) {
+                       printk(KERN_INFO "CPU%d:\n", smp_processor_id());
+                       show_regs(regs);
+               }
+               schedule_work(&sysrq_showallcpus);
        }
-       schedule_work(&sysrq_showallcpus);
 }
 
 static struct sysrq_key_op sysrq_showallcpus_op = {
index 29af2d5df0972cdac25fd2c73e42cd49976efa8a..b752e807addece22ec9210f0d109d2818505d34d 100644 (file)
@@ -28,8 +28,23 @@ static inline void acpi_nmi_disable(void) { }
 static inline void acpi_nmi_enable(void) { }
 #endif
 
-#ifndef trigger_all_cpu_backtrace
-#define trigger_all_cpu_backtrace() do { } while (0)
+/*
+ * Create trigger_all_cpu_backtrace() out of the arch-provided
+ * base function. Return whether such support was available,
+ * to allow calling code to fall back to some other mechanism:
+ */
+#ifdef arch_trigger_all_cpu_backtrace
+static inline bool trigger_all_cpu_backtrace(void)
+{
+       arch_trigger_all_cpu_backtrace();
+
+       return true;
+}
+#else
+static inline bool trigger_all_cpu_backtrace(void)
+{
+       return false;
+}
 #endif
 
 #endif
index 7717b95c202782d2af38837fc09dc475a5f845bb..9c5fa9fc57ecb5ed0dda298e1581b285d4a87be3 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/rcupdate.h>
 #include <linux/interrupt.h>
 #include <linux/sched.h>
+#include <linux/nmi.h>
 #include <asm/atomic.h>
 #include <linux/bitops.h>
 #include <linux/module.h>
@@ -469,6 +470,8 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
        }
        printk(" (detected by %d, t=%ld jiffies)\n",
               smp_processor_id(), (long)(jiffies - rsp->gp_start));
+       trigger_all_cpu_backtrace();
+
        force_quiescent_state(rsp, 0);  /* Kick them all. */
 }
 
@@ -479,12 +482,14 @@ static void print_cpu_stall(struct rcu_state *rsp)
 
        printk(KERN_ERR "INFO: RCU detected CPU %d stall (t=%lu jiffies)\n",
                        smp_processor_id(), jiffies - rsp->gp_start);
-       dump_stack();
+       trigger_all_cpu_backtrace();
+
        spin_lock_irqsave(&rnp->lock, flags);
        if ((long)(jiffies - rsp->jiffies_stall) >= 0)
                rsp->jiffies_stall =
                        jiffies + RCU_SECONDS_TILL_STALL_RECHECK;
        spin_unlock_irqrestore(&rnp->lock, flags);
+
        set_need_resched();  /* kick ourselves to get things going. */
 }