]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
rcu: Use rsp->expedited_wq instead of sync_rcu_preempt_exp_wq
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Thu, 30 Jul 2015 00:28:11 +0000 (17:28 -0700)
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Mon, 21 Sep 2015 04:16:17 +0000 (21:16 -0700)
Now that there is an ->expedited_wq waitqueue in each rcu_state structure,
there is no need for the sync_rcu_preempt_exp_wq global variable.  This
commit therefore substitutes ->expedited_wq for sync_rcu_preempt_exp_wq.
It also initializes ->expedited_wq only once at boot instead of at the
start of each expedited grace period.

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
kernel/rcu/tree.c
kernel/rcu/tree_plugin.h

index 775d36cc00506620829bd2ca14b5da21e4de79e3..53d66ebb48113166ff76bf902f25c78a96409631 100644 (file)
@@ -3556,7 +3556,6 @@ void synchronize_sched_expedited(void)
        rcu_exp_gp_seq_start(rsp);
 
        /* Stop each CPU that is online, non-idle, and not us. */
-       init_waitqueue_head(&rsp->expedited_wq);
        atomic_set(&rsp->expedited_need_qs, 1); /* Extra count avoids race. */
        for_each_online_cpu(cpu) {
                struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
@@ -4179,6 +4178,7 @@ static void __init rcu_init_one(struct rcu_state *rsp,
        }
 
        init_waitqueue_head(&rsp->gp_wq);
+       init_waitqueue_head(&rsp->expedited_wq);
        rnp = rsp->level[rcu_num_lvls - 1];
        for_each_possible_cpu(i) {
                while (i > rnp->grphi)
index b2bf3963a0aee328d0d3bfc0d9e997b3a5ca106f..72df006de7987e088d5f3d137631975664049c03 100644 (file)
@@ -535,8 +535,6 @@ void synchronize_rcu(void)
 }
 EXPORT_SYMBOL_GPL(synchronize_rcu);
 
-static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq);
-
 /*
  * Return non-zero if there are any tasks in RCU read-side critical
  * sections blocking the current preemptible-RCU expedited grace period.
@@ -590,7 +588,7 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
                        raw_spin_unlock_irqrestore(&rnp->lock, flags);
                        if (wake) {
                                smp_mb(); /* EGP done before wake_up(). */
-                               wake_up(&sync_rcu_preempt_exp_wq);
+                               wake_up(&rsp->expedited_wq);
                        }
                        break;
                }
@@ -729,7 +727,7 @@ void synchronize_rcu_expedited(void)
 
        /* Wait for snapshotted ->blkd_tasks lists to drain. */
        rnp = rcu_get_root(rsp);
-       wait_event(sync_rcu_preempt_exp_wq,
+       wait_event(rsp->expedited_wq,
                   sync_rcu_preempt_exp_done(rnp));
 
        /* Clean up and exit. */