]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
rcu: Allow RCU grace-period initialization to be preempted
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Wed, 20 Jun 2012 00:18:20 +0000 (17:18 -0700)
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Fri, 6 Jul 2012 13:01:49 +0000 (06:01 -0700)
RCU grace-period initialization is currently carried out with interrupts
disabled, which can result in 200-microsecond latency spikes on systems
on which RCU has been configured for 4096 CPUs.  This patch therefore
makes the RCU grace-period initialization be preemptible, which should
eliminate those latency spikes.  Similar spikes from grace-period cleanup
and the forcing of quiescent states will be dealt with similarly by later
patches.

Reported-by: Mike Galbraith <mgalbraith@suse.de>
Reported-by: Dimitri Sivanich <sivanich@sgi.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
kernel/rcutree.c

index e1c5868aade7d3e6bc7e51b891b4bf5f00bec816..ef56aa341490b15b3d8d04e2706dbb254c107b15 100644 (file)
@@ -1069,6 +1069,7 @@ static int rcu_gp_kthread(void *arg)
                         * don't start another one.
                         */
                        raw_spin_unlock_irqrestore(&rnp->lock, flags);
+                       cond_resched();
                        continue;
                }
 
@@ -1079,6 +1080,7 @@ static int rcu_gp_kthread(void *arg)
                         */
                        rsp->fqs_need_gp = 1;
                        raw_spin_unlock_irqrestore(&rnp->lock, flags);
+                       cond_resched();
                        continue;
                }
 
@@ -1089,10 +1091,10 @@ static int rcu_gp_kthread(void *arg)
                rsp->fqs_state = RCU_GP_INIT; /* Stop force_quiescent_state. */
                rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS;
                record_gp_stall_check_time(rsp);
-               raw_spin_unlock(&rnp->lock);  /* leave irqs disabled. */
+               raw_spin_unlock_irqrestore(&rnp->lock, flags);
 
                /* Exclude any concurrent CPU-hotplug operations. */
-               raw_spin_lock(&rsp->onofflock);  /* irqs already disabled. */
+               get_online_cpus();
 
                /*
                 * Set the quiescent-state-needed bits in all the rcu_node
@@ -1112,7 +1114,7 @@ static int rcu_gp_kthread(void *arg)
                 * due to the fact that we have irqs disabled.
                 */
                rcu_for_each_node_breadth_first(rsp, rnp) {
-                       raw_spin_lock(&rnp->lock); /* irqs already disabled. */
+                       raw_spin_lock_irqsave(&rnp->lock, flags);
                        rcu_preempt_check_blocked_tasks(rnp);
                        rnp->qsmask = rnp->qsmaskinit;
                        rnp->gpnum = rsp->gpnum;
@@ -1123,15 +1125,16 @@ static int rcu_gp_kthread(void *arg)
                        trace_rcu_grace_period_init(rsp->name, rnp->gpnum,
                                                    rnp->level, rnp->grplo,
                                                    rnp->grphi, rnp->qsmask);
-                       raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
+                       raw_spin_unlock_irqrestore(&rnp->lock, flags);
+                       cond_resched();
                }
 
                rnp = rcu_get_root(rsp);
-               raw_spin_lock(&rnp->lock); /* irqs already disabled. */
+               raw_spin_lock_irqsave(&rnp->lock, flags);
                /* force_quiescent_state() now OK. */
                rsp->fqs_state = RCU_SIGNAL_INIT;
-               raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
-               raw_spin_unlock_irqrestore(&rsp->onofflock, flags);
+               raw_spin_unlock_irqrestore(&rnp->lock, flags);
+               put_online_cpus();
        }
        return 0;
 }