]> git.karo-electronics.de Git - linux-beck.git/commitdiff
rcu: Move rcu_barrier_cpu_count to rcu_state structure
authorPaul E. McKenney <paul.mckenney@linaro.org>
Tue, 29 May 2012 07:34:56 +0000 (00:34 -0700)
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Mon, 2 Jul 2012 19:33:22 +0000 (12:33 -0700)
In order to allow each RCU flavor to concurrently execute its rcu_barrier()
function, it is necessary to move the relevant state to the rcu_state
structure.  This commit therefore moves the rcu_barrier_cpu_count global
variable to a new ->barrier_cpu_count field in the rcu_state structure.

Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Reviewed-by: Josh Triplett <josh@joshtriplett.org>
kernel/rcutree.c
kernel/rcutree.h

index 1e552598b55d3a2b9fd8bf7d713c5dc9643d96a6..5929b021666ddab88bb84c24ea0b735cb989b68d 100644 (file)
@@ -157,7 +157,6 @@ unsigned long rcutorture_vernum;
 
 /* State information for rcu_barrier() and friends. */
 
-static atomic_t rcu_barrier_cpu_count;
 static DEFINE_MUTEX(rcu_barrier_mutex);
 static struct completion rcu_barrier_completion;
 
@@ -2270,9 +2269,12 @@ static int rcu_cpu_has_callbacks(int cpu)
  * RCU callback function for _rcu_barrier().  If we are last, wake
  * up the task executing _rcu_barrier().
  */
-static void rcu_barrier_callback(struct rcu_head *notused)
+static void rcu_barrier_callback(struct rcu_head *rhp)
 {
-       if (atomic_dec_and_test(&rcu_barrier_cpu_count))
+       struct rcu_data *rdp = container_of(rhp, struct rcu_data, barrier_head);
+       struct rcu_state *rsp = rdp->rsp;
+
+       if (atomic_dec_and_test(&rsp->barrier_cpu_count))
                complete(&rcu_barrier_completion);
 }
 
@@ -2284,7 +2286,7 @@ static void rcu_barrier_func(void *type)
        struct rcu_state *rsp = type;
        struct rcu_data *rdp = __this_cpu_ptr(rsp->rda);
 
-       atomic_inc(&rcu_barrier_cpu_count);
+       atomic_inc(&rsp->barrier_cpu_count);
        rsp->call(&rdp->barrier_head, rcu_barrier_callback);
 }
 
@@ -2297,9 +2299,9 @@ static void _rcu_barrier(struct rcu_state *rsp)
        int cpu;
        unsigned long flags;
        struct rcu_data *rdp;
-       struct rcu_head rh;
+       struct rcu_data rd;
 
-       init_rcu_head_on_stack(&rh);
+       init_rcu_head_on_stack(&rd.barrier_head);
 
        /* Take mutex to serialize concurrent rcu_barrier() requests. */
        mutex_lock(&rcu_barrier_mutex);
@@ -2324,7 +2326,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
         *      us -- but before CPU 1's orphaned callbacks are invoked!!!
         */
        init_completion(&rcu_barrier_completion);
-       atomic_set(&rcu_barrier_cpu_count, 1);
+       atomic_set(&rsp->barrier_cpu_count, 1);
        raw_spin_lock_irqsave(&rsp->onofflock, flags);
        rsp->rcu_barrier_in_progress = current;
        raw_spin_unlock_irqrestore(&rsp->onofflock, flags);
@@ -2363,15 +2365,16 @@ static void _rcu_barrier(struct rcu_state *rsp)
        rcu_adopt_orphan_cbs(rsp);
        rsp->rcu_barrier_in_progress = NULL;
        raw_spin_unlock_irqrestore(&rsp->onofflock, flags);
-       atomic_inc(&rcu_barrier_cpu_count);
+       atomic_inc(&rsp->barrier_cpu_count);
        smp_mb__after_atomic_inc(); /* Ensure atomic_inc() before callback. */
-       rsp->call(&rh, rcu_barrier_callback);
+       rd.rsp = rsp;
+       rsp->call(&rd.barrier_head, rcu_barrier_callback);
 
        /*
         * Now that we have an rcu_barrier_callback() callback on each
         * CPU, and thus each counted, remove the initial count.
         */
-       if (atomic_dec_and_test(&rcu_barrier_cpu_count))
+       if (atomic_dec_and_test(&rsp->barrier_cpu_count))
                complete(&rcu_barrier_completion);
 
        /* Wait for all rcu_barrier_callback() callbacks to be invoked. */
@@ -2380,7 +2383,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
        /* Other rcu_barrier() invocations can now safely proceed. */
        mutex_unlock(&rcu_barrier_mutex);
 
-       destroy_rcu_head_on_stack(&rh);
+       destroy_rcu_head_on_stack(&rd.barrier_head);
 }
 
 /**
index 586d93c978f28c5aa673c0ebee8589a3bdcda7c6..c57ef0b7f0978ac61ff0743b91deb6288e14385a 100644 (file)
@@ -400,6 +400,7 @@ struct rcu_state {
        struct task_struct *rcu_barrier_in_progress;
                                                /* Task doing rcu_barrier(), */
                                                /*  or NULL if no barrier. */
+       atomic_t barrier_cpu_count;             /* # CPUs waiting on. */
        raw_spinlock_t fqslock;                 /* Only one task forcing */
                                                /*  quiescent states. */
        unsigned long jiffies_force_qs;         /* Time at which to invoke */