]> git.karo-electronics.de Git - linux-beck.git/commitdiff
rcu: Per-CPU operation cleanups to rcu_*_qs() functions
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Thu, 14 Aug 2014 23:38:46 +0000 (16:38 -0700)
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Sun, 7 Sep 2014 23:27:35 +0000 (16:27 -0700)
The rcu_bh_qs(), rcu_preempt_qs(), and rcu_sched_qs() functions use
old-style per-CPU variable access and write to ->passed_quiesce even
if it is already set.  This commit therefore updates to use the new-style
per-CPU variable access functions and avoids the spurious writes.
This commit also eliminates the "cpu" argument to these functions because
they are always invoked on the indicated CPU.

Reported-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
include/linux/rcupdate.h
include/linux/rcutiny.h
kernel/rcu/tiny.c
kernel/rcu/tree.c
kernel/rcu/tree_plugin.h
kernel/softirq.c

index 132e1e34cdcaba19d2bfe709adde5aafbccb1d43..2fab0e37afe05a4ba1f4077e68a89399e7d9114b 100644 (file)
@@ -261,8 +261,8 @@ static inline int rcu_preempt_depth(void)
 
 /* Internal to kernel */
 void rcu_init(void);
-void rcu_sched_qs(int cpu);
-void rcu_bh_qs(int cpu);
+void rcu_sched_qs(void);
+void rcu_bh_qs(void);
 void rcu_check_callbacks(int cpu, int user);
 struct notifier_block;
 void rcu_idle_enter(void);
index d40a6a45133040fbbb3eb74b0d879e3f013a70d4..38cc5b1e252d2f2ddcc9b2d9e0e1617321ced6d4 100644 (file)
@@ -80,7 +80,7 @@ static inline void kfree_call_rcu(struct rcu_head *head,
 
 static inline void rcu_note_context_switch(int cpu)
 {
-       rcu_sched_qs(cpu);
+       rcu_sched_qs();
 }
 
 /*
index 717f00854fc073fcb04bd9b3dde7c2e509db529e..61b8d2ccc2cb4f07e5da50c8c23217e720c100bc 100644 (file)
@@ -72,7 +72,7 @@ static void rcu_idle_enter_common(long long newval)
                          current->pid, current->comm,
                          idle->pid, idle->comm); /* must be idle task! */
        }
-       rcu_sched_qs(0); /* implies rcu_bh_qsctr_inc(0) */
+       rcu_sched_qs(); /* implies rcu_bh_inc() */
        barrier();
        rcu_dynticks_nesting = newval;
 }
@@ -217,7 +217,7 @@ static int rcu_qsctr_help(struct rcu_ctrlblk *rcp)
  * are at it, given that any rcu quiescent state is also an rcu_bh
  * quiescent state.  Use "+" instead of "||" to defeat short circuiting.
  */
-void rcu_sched_qs(int cpu)
+void rcu_sched_qs(void)
 {
        unsigned long flags;
 
@@ -231,7 +231,7 @@ void rcu_sched_qs(int cpu)
 /*
  * Record an rcu_bh quiescent state.
  */
-void rcu_bh_qs(int cpu)
+void rcu_bh_qs(void)
 {
        unsigned long flags;
 
@@ -251,9 +251,9 @@ void rcu_check_callbacks(int cpu, int user)
 {
        RCU_TRACE(check_cpu_stalls());
        if (user || rcu_is_cpu_rrupt_from_idle())
-               rcu_sched_qs(cpu);
+               rcu_sched_qs();
        else if (!in_softirq())
-               rcu_bh_qs(cpu);
+               rcu_bh_qs();
        if (user)
                rcu_note_voluntary_context_switch(current);
 }
index c880f5387b1fd3e43d4bc1ae752218abdb4081d0..4c340625ffd4bfa449b6285ac2e44f26f77efef2 100644 (file)
@@ -188,22 +188,24 @@ static int rcu_gp_in_progress(struct rcu_state *rsp)
  * one since the start of the grace period, this just sets a flag.
  * The caller must have disabled preemption.
  */
-void rcu_sched_qs(int cpu)
+void rcu_sched_qs(void)
 {
-       struct rcu_data *rdp = &per_cpu(rcu_sched_data, cpu);
-
-       if (rdp->passed_quiesce == 0)
-               trace_rcu_grace_period(TPS("rcu_sched"), rdp->gpnum, TPS("cpuqs"));
-       rdp->passed_quiesce = 1;
+       if (!__this_cpu_read(rcu_sched_data.passed_quiesce)) {
+               trace_rcu_grace_period(TPS("rcu_sched"),
+                                      __this_cpu_read(rcu_sched_data.gpnum),
+                                      TPS("cpuqs"));
+               __this_cpu_write(rcu_sched_data.passed_quiesce, 1);
+       }
 }
 
-void rcu_bh_qs(int cpu)
+void rcu_bh_qs(void)
 {
-       struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu);
-
-       if (rdp->passed_quiesce == 0)
-               trace_rcu_grace_period(TPS("rcu_bh"), rdp->gpnum, TPS("cpuqs"));
-       rdp->passed_quiesce = 1;
+       if (!__this_cpu_read(rcu_bh_data.passed_quiesce)) {
+               trace_rcu_grace_period(TPS("rcu_bh"),
+                                      __this_cpu_read(rcu_bh_data.gpnum),
+                                      TPS("cpuqs"));
+               __this_cpu_write(rcu_bh_data.passed_quiesce, 1);
+       }
 }
 
 static DEFINE_PER_CPU(int, rcu_sched_qs_mask);
@@ -278,7 +280,7 @@ static void rcu_momentary_dyntick_idle(void)
 void rcu_note_context_switch(int cpu)
 {
        trace_rcu_utilization(TPS("Start context switch"));
-       rcu_sched_qs(cpu);
+       rcu_sched_qs();
        rcu_preempt_note_context_switch(cpu);
        if (unlikely(raw_cpu_read(rcu_sched_qs_mask)))
                rcu_momentary_dyntick_idle();
@@ -2395,8 +2397,8 @@ void rcu_check_callbacks(int cpu, int user)
                 * at least not while the corresponding CPU is online.
                 */
 
-               rcu_sched_qs(cpu);
-               rcu_bh_qs(cpu);
+               rcu_sched_qs();
+               rcu_bh_qs();
 
        } else if (!in_softirq()) {
 
@@ -2407,7 +2409,7 @@ void rcu_check_callbacks(int cpu, int user)
                 * critical section, so note it.
                 */
 
-               rcu_bh_qs(cpu);
+               rcu_bh_qs();
        }
        rcu_preempt_check_callbacks(cpu);
        if (rcu_pending(cpu))
index 0981c0cd70fe49c05529ca712a7ac477321bcf74..25e692a3628005ebaa754dfedd5cf4f0191e9856 100644 (file)
@@ -158,14 +158,16 @@ EXPORT_SYMBOL_GPL(rcu_batches_completed);
  * As with the other rcu_*_qs() functions, callers to this function
  * must disable preemption.
  */
-static void rcu_preempt_qs(int cpu)
-{
-       struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu);
-
-       if (rdp->passed_quiesce == 0)
-               trace_rcu_grace_period(TPS("rcu_preempt"), rdp->gpnum, TPS("cpuqs"));
-       rdp->passed_quiesce = 1;
-       current->rcu_read_unlock_special.b.need_qs = false;
+static void rcu_preempt_qs(void)
+{
+       if (!__this_cpu_read(rcu_preempt_data.passed_quiesce)) {
+               trace_rcu_grace_period(TPS("rcu_preempt"),
+                                      __this_cpu_read(rcu_preempt_data.gpnum),
+                                      TPS("cpuqs"));
+               __this_cpu_write(rcu_preempt_data.passed_quiesce, 1);
+               barrier(); /* Coordinate with rcu_preempt_check_callbacks(). */
+               current->rcu_read_unlock_special.b.need_qs = false;
+       }
 }
 
 /*
@@ -256,7 +258,7 @@ static void rcu_preempt_note_context_switch(int cpu)
         * grace period, then the fact that the task has been enqueued
         * means that we continue to block the current grace period.
         */
-       rcu_preempt_qs(cpu);
+       rcu_preempt_qs();
 }
 
 /*
@@ -352,7 +354,7 @@ void rcu_read_unlock_special(struct task_struct *t)
         */
        special = t->rcu_read_unlock_special;
        if (special.b.need_qs) {
-               rcu_preempt_qs(smp_processor_id());
+               rcu_preempt_qs();
                if (!t->rcu_read_unlock_special.s) {
                        local_irq_restore(flags);
                        return;
@@ -651,11 +653,12 @@ static void rcu_preempt_check_callbacks(int cpu)
        struct task_struct *t = current;
 
        if (t->rcu_read_lock_nesting == 0) {
-               rcu_preempt_qs(cpu);
+               rcu_preempt_qs();
                return;
        }
        if (t->rcu_read_lock_nesting > 0 &&
-           per_cpu(rcu_preempt_data, cpu).qs_pending)
+           per_cpu(rcu_preempt_data, cpu).qs_pending &&
+           !per_cpu(rcu_preempt_data, cpu).passed_quiesce)
                t->rcu_read_unlock_special.b.need_qs = true;
 }
 
index 5918d227730f33d7daec6dfab1e8ef30cd653abf..348ec763b1049a64f5867837b4c36f521724fa51 100644 (file)
@@ -278,7 +278,7 @@ restart:
                pending >>= softirq_bit;
        }
 
-       rcu_bh_qs(smp_processor_id());
+       rcu_bh_qs();
        local_irq_disable();
 
        pending = local_softirq_pending();