/* State information for rcu_barrier() and friends. */
-static atomic_t rcu_barrier_cpu_count;
static DEFINE_MUTEX(rcu_barrier_mutex);
static struct completion rcu_barrier_completion;
* RCU callback function for _rcu_barrier(). If we are last, wake
* up the task executing _rcu_barrier().
*/
-static void rcu_barrier_callback(struct rcu_head *notused)
+static void rcu_barrier_callback(struct rcu_head *rhp)
{
- if (atomic_dec_and_test(&rcu_barrier_cpu_count))
+ struct rcu_data *rdp = container_of(rhp, struct rcu_data, barrier_head);
+ struct rcu_state *rsp = rdp->rsp;
+
+ if (atomic_dec_and_test(&rsp->barrier_cpu_count))
complete(&rcu_barrier_completion);
}
struct rcu_state *rsp = type;
struct rcu_data *rdp = __this_cpu_ptr(rsp->rda);
- atomic_inc(&rcu_barrier_cpu_count);
+ atomic_inc(&rsp->barrier_cpu_count);
rsp->call(&rdp->barrier_head, rcu_barrier_callback);
}
int cpu;
unsigned long flags;
struct rcu_data *rdp;
- struct rcu_head rh;
+ struct rcu_data rd;
- init_rcu_head_on_stack(&rh);
+ init_rcu_head_on_stack(&rd.barrier_head);
/* Take mutex to serialize concurrent rcu_barrier() requests. */
mutex_lock(&rcu_barrier_mutex);
* us -- but before CPU 1's orphaned callbacks are invoked!!!
*/
init_completion(&rcu_barrier_completion);
- atomic_set(&rcu_barrier_cpu_count, 1);
+ atomic_set(&rsp->barrier_cpu_count, 1);
raw_spin_lock_irqsave(&rsp->onofflock, flags);
rsp->rcu_barrier_in_progress = current;
raw_spin_unlock_irqrestore(&rsp->onofflock, flags);
rcu_adopt_orphan_cbs(rsp);
rsp->rcu_barrier_in_progress = NULL;
raw_spin_unlock_irqrestore(&rsp->onofflock, flags);
- atomic_inc(&rcu_barrier_cpu_count);
+ atomic_inc(&rsp->barrier_cpu_count);
smp_mb__after_atomic_inc(); /* Ensure atomic_inc() before callback. */
- rsp->call(&rh, rcu_barrier_callback);
+ rd.rsp = rsp;
+ rsp->call(&rd.barrier_head, rcu_barrier_callback);
/*
* Now that we have an rcu_barrier_callback() callback on each
* CPU, and thus each counted, remove the initial count.
*/
- if (atomic_dec_and_test(&rcu_barrier_cpu_count))
+ if (atomic_dec_and_test(&rsp->barrier_cpu_count))
complete(&rcu_barrier_completion);
/* Wait for all rcu_barrier_callback() callbacks to be invoked. */
/* Other rcu_barrier() invocations can now safely proceed. */
mutex_unlock(&rcu_barrier_mutex);
- destroy_rcu_head_on_stack(&rh);
+ destroy_rcu_head_on_stack(&rd.barrier_head);
}
/**