*/
static int dyntick_save_progress_counter(struct rcu_data *rdp)
{
+ smp_mb(); /* Work around some architectures weak impls. */
rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
+ smp_mb(); /* Work around some architectures weak impls. */
return 0;
}
unsigned int curr;
unsigned int snap;
+ smp_mb(); /* Work around some architectures weak impls. */
curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
+ smp_mb(); /* Work around some architectures weak impls. */
snap = (unsigned int)rdp->dynticks_snap;
/*
rcu_wait(atomic_read(&rnp->wakemask) != 0);
rnp->node_kthread_status = RCU_KTHREAD_RUNNING;
raw_spin_lock_irqsave(&rnp->lock, flags);
+ smp_mb(); /* Work around some architectures weak impls. */
mask = atomic_xchg(&rnp->wakemask, 0);
+ smp_mb(); /* Work around some architectures weak impls. */
rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */
for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) {
if ((mask & 0x1) == 0)
int firstsnap, s, snap, trycount = 0;
/* Note that atomic_inc_return() implies full memory barrier. */
+ smp_mb(); /* Work around some architectures weak impls. */
firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
+ smp_mb(); /* Work around some architectures weak impls. */
get_online_cpus();
/*
break;
}
} while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
+ smp_mb();
put_online_cpus();
}
for_each_online_cpu(thatcpu) {
if (thatcpu == cpu)
continue;
+ smp_mb(); /* Work around some architectures weak impls. */
snap = atomic_add_return(0, &per_cpu(rcu_dynticks,
thatcpu).dynticks);
- smp_mb(); /* Order sampling of snap with end of grace period. */
+ smp_mb(); /* Work around some architectures weak impls. */
if ((snap & 0x1) != 0) {
per_cpu(rcu_dyntick_drain, cpu) = 0;
per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1;