From: Paul E. McKenney Date: Fri, 6 Jul 2012 12:59:20 +0000 (-0700) Subject: Merge branches 'bigrtm.2012.07.04a', 'doctorture.2012.07.02a', 'fixes.2012.07.06a... X-Git-Url: https://git.karo-electronics.de/?a=commitdiff_plain;h=02a0677b0be545a07ffb15a13419efd7c82881e0;p=linux-beck.git Merge branches 'bigrtm.2012.07.04a', 'doctorture.2012.07.02a', 'fixes.2012.07.06a' and 'fnh.2012.07.02a' into HEAD bigrtm: First steps towards getting RCU out of the way of tens-of-microseconds real-time response on systems compiled with NR_CPUS=4096. Also cleanups for and increased concurrency of rcu_barrier() family of primitives. doctorture: rcutorture and documentation improvements. fixes: Miscellaneous fixes. fnh: RCU_FAST_NO_HZ fixes and improvements. --- 02a0677b0be545a07ffb15a13419efd7c82881e0 diff --cc kernel/rcutree.c index 967b4bed2cf3,4b97bba7396e,4154c9567a6d,6eb48f13eeeb..117218a43724 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c @@@@@ -2316,46 -2295,14 -2306,14 -2295,14 +2327,46 @@@@@ static void _rcu_barrier(struct rcu_sta int cpu; unsigned long flags; struct rcu_data *rdp; --- struct rcu_head rh; +++ struct rcu_data rd; +++ unsigned long snap = ACCESS_ONCE(rsp->n_barrier_done); +++ unsigned long snap_done; --- init_rcu_head_on_stack(&rh); +++ init_rcu_head_on_stack(&rd.barrier_head); +++ _rcu_barrier_trace(rsp, "Begin", -1, snap); /* Take mutex to serialize concurrent rcu_barrier() requests. */ --- mutex_lock(&rcu_barrier_mutex); +++ mutex_lock(&rsp->barrier_mutex); + + - smp_mb(); /* Prevent any prior operations from leaking in. */ +++ /* +++ * Ensure that all prior references, including to ->n_barrier_done, +++ * are ordered before the _rcu_barrier() machinery. +++ */ +++ smp_mb(); /* See above block comment. */ +++ +++ /* +++ * Recheck ->n_barrier_done to see if others did our work for us. +++ * This means checking ->n_barrier_done for an even-to-odd-to-even +++ * transition. The "if" expression below therefore rounds the old +++ * value up to the next even number and adds two before comparing. +++ */ +++ snap_done = ACCESS_ONCE(rsp->n_barrier_done); +++ _rcu_barrier_trace(rsp, "Check", -1, snap_done); +++ if (ULONG_CMP_GE(snap_done, ((snap + 1) & ~0x1) + 2)) { +++ _rcu_barrier_trace(rsp, "EarlyExit", -1, snap_done); +++ smp_mb(); /* caller's subsequent code after above check. */ +++ mutex_unlock(&rsp->barrier_mutex); +++ return; +++ } + - - smp_mb(); /* Prevent any prior operations from leaking in. */ +++ /* +++ * Increment ->n_barrier_done to avoid duplicate work. Use +++ * ACCESS_ONCE() to prevent the compiler from speculating +++ * the increment to precede the early-exit check. +++ */ +++ ACCESS_ONCE(rsp->n_barrier_done)++; +++ WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1); +++ _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done); +++ smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */ /* * Initialize the count to one rather than to zero in order to