]> git.karo-electronics.de Git - linux-beck.git/commitdiff
nohz_full: Force RCU's grace-period kthreads onto timekeeping CPU
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Sat, 22 Jun 2013 00:10:40 +0000 (17:10 -0700)
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Sat, 31 Aug 2013 21:44:02 +0000 (14:44 -0700)
Because RCU's quiescent-state-forcing mechanism is used to drive the
full-system-idle state machine, and because this mechanism is executed
by RCU's grace-period kthreads, this commit forces these kthreads to
run on the timekeeping CPU (tick_do_timer_cpu).  To do otherwise would
mean that the RCU grace-period kthreads would force the system into
non-idle state every time they drove the state machine, which would
be just a bit on the futile side.

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Lai Jiangshan <laijs@cn.fujitsu.com>
Reviewed-by: Josh Triplett <josh@joshtriplett.org>
kernel/rcutree.c
kernel/rcutree.h
kernel/rcutree_plugin.h

index eca70f4469c199187f40f11088bd89b91dc189eb..64eaafb6c8f70c9797841d81b40898598650f812 100644 (file)
@@ -1303,6 +1303,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
        struct rcu_data *rdp;
        struct rcu_node *rnp = rcu_get_root(rsp);
 
+       rcu_bind_gp_kthread();
        raw_spin_lock_irq(&rnp->lock);
        rsp->gp_flags = 0; /* Clear all flags: New grace period. */
 
index 6fd3659cf01acb4fe8ade89a862404c6d1b944af..5f97eab602cd831dec54baaf7bafd970920d0c60 100644 (file)
@@ -560,6 +560,7 @@ static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
 static bool is_sysidle_rcu_state(struct rcu_state *rsp);
 static void rcu_sysidle_report_gp(struct rcu_state *rsp, int isidle,
                                  unsigned long maxj);
+static void rcu_bind_gp_kthread(void);
 static void rcu_sysidle_init_percpu_data(struct rcu_dynticks *rdtp);
 
 #endif /* #ifndef RCU_TREE_NONCORE */
index 45ebba747af4c199848cebbd88c69ebf0bcdefe9..130c97b027f2e1f244cf2c23edfca913238e6dfa 100644 (file)
@@ -2531,7 +2531,8 @@ static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
        if (!*isidle || rdp->rsp != rcu_sysidle_state ||
            cpu_is_offline(rdp->cpu) || rdp->cpu == tick_do_timer_cpu)
                return;
-       /* WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu); */
+       if (rcu_gp_in_progress(rdp->rsp))
+               WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu);
 
        /* Pick up current idle and NMI-nesting counter and check. */
        cur = atomic_read(&rdtp->dynticks_idle);
@@ -2556,6 +2557,20 @@ static bool is_sysidle_rcu_state(struct rcu_state *rsp)
        return rsp == rcu_sysidle_state;
 }
 
+/*
+ * Bind the grace-period kthread for the sysidle flavor of RCU to the
+ * timekeeping CPU.
+ */
+static void rcu_bind_gp_kthread(void)
+{
+       int cpu = ACCESS_ONCE(tick_do_timer_cpu);
+
+       if (cpu < 0 || cpu >= nr_cpu_ids)
+               return;
+       if (raw_smp_processor_id() != cpu)
+               set_cpus_allowed_ptr(current, cpumask_of(cpu));
+}
+
 /*
  * Return a delay in jiffies based on the number of CPUs, rcu_node
  * leaf fanout, and jiffies tick rate.  The idea is to allow larger
@@ -2766,6 +2781,10 @@ static bool is_sysidle_rcu_state(struct rcu_state *rsp)
        return false;
 }
 
+static void rcu_bind_gp_kthread(void)
+{
+}
+
 static void rcu_sysidle_report_gp(struct rcu_state *rsp, int isidle,
                                  unsigned long maxj)
 {