]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
rcu: Suppress NMI backtraces when stall ends before dump
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Sat, 13 Aug 2011 20:31:47 +0000 (13:31 -0700)
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Sun, 14 Aug 2011 22:56:28 +0000 (15:56 -0700)
It is possible for an RCU CPU stall to end just as it is detected, in
which case the current code will uselessly dump all CPU's stacks.
This commit therefore checks for this condition and refrains from
sending needless NMIs.

And yes, the stall might also end just after we checked all CPUs and
tasks, but in that case we would at least have given some clue as
to which CPU/task was at fault.

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
kernel/rcutree.c
kernel/rcutree.h
kernel/rcutree_plugin.h

index 309c84da413a12b3c848a4bf1a413a1fd14a6329..daa2e62dcb39b5a26c4ebbdae5292e1ef0668a10 100644 (file)
@@ -549,6 +549,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
        int cpu;
        long delta;
        unsigned long flags;
+       int ndetected;
        struct rcu_node *rnp = rcu_get_root(rsp);
 
        /* Only let one CPU complain about others per time interval. */
@@ -565,7 +566,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
         * Now rat on any tasks that got kicked up to the root rcu_node
         * due to CPU offlining.
         */
-       rcu_print_task_stall(rnp);
+       ndetected = rcu_print_task_stall(rnp);
        raw_spin_unlock_irqrestore(&rnp->lock, flags);
 
        /*
@@ -577,17 +578,21 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
               rsp->name);
        rcu_for_each_leaf_node(rsp, rnp) {
                raw_spin_lock_irqsave(&rnp->lock, flags);
-               rcu_print_task_stall(rnp);
+               ndetected += rcu_print_task_stall(rnp);
                raw_spin_unlock_irqrestore(&rnp->lock, flags);
                if (rnp->qsmask == 0)
                        continue;
                for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++)
-                       if (rnp->qsmask & (1UL << cpu))
+                       if (rnp->qsmask & (1UL << cpu)) {
                                printk(" %d", rnp->grplo + cpu);
+                               ndetected++;
+                       }
        }
        printk("} (detected by %d, t=%ld jiffies)\n",
               smp_processor_id(), (long)(jiffies - rsp->gp_start));
-       if (!trigger_all_cpu_backtrace())
+       if (ndetected == 0)
+               printk(KERN_ERR "INFO: Stall ended before state dump start\n");
+       else if (!trigger_all_cpu_backtrace())
                dump_stack();
 
        /* If so configured, complain about tasks blocking the grace period. */
index 51638b68b2dc78b58ceeeae308c3d558d30140b2..f509f728f9fbe970a2ecae3d5d6d278cccb88961 100644 (file)
@@ -438,7 +438,7 @@ static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp,
 static void rcu_stop_cpu_kthread(int cpu);
 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
 static void rcu_print_detail_task_stall(struct rcu_state *rsp);
-static void rcu_print_task_stall(struct rcu_node *rnp);
+static int rcu_print_task_stall(struct rcu_node *rnp);
 static void rcu_preempt_stall_reset(void);
 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp);
 #ifdef CONFIG_HOTPLUG_CPU
index fb1acde9bb58b8b927b8c49a435ceed91a1463c7..c44729f3a763dac3a1e4adc6ef9a3a03c171111f 100644 (file)
@@ -483,16 +483,20 @@ static void rcu_print_detail_task_stall(struct rcu_state *rsp)
  * Scan the current list of tasks blocked within RCU read-side critical
  * sections, printing out the tid of each.
  */
-static void rcu_print_task_stall(struct rcu_node *rnp)
+static int rcu_print_task_stall(struct rcu_node *rnp)
 {
        struct task_struct *t;
+       int ndetected = 0;
 
        if (!rcu_preempt_blocked_readers_cgp(rnp))
-               return;
+               return 0;
        t = list_entry(rnp->gp_tasks,
                       struct task_struct, rcu_node_entry);
-       list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry)
+       list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
                printk(" P%d", t->pid);
+               ndetected++;
+       }
+       return ndetected;
 }
 
 /*
@@ -976,8 +980,9 @@ static void rcu_print_detail_task_stall(struct rcu_state *rsp)
  * Because preemptible RCU does not exist, we never have to check for
  * tasks blocked within RCU read-side critical sections.
  */
-static void rcu_print_task_stall(struct rcu_node *rnp)
+static int rcu_print_task_stall(struct rcu_node *rnp)
 {
+       return 0;
 }
 
 /*