From: Paul E. McKenney Date: Thu, 8 Nov 2012 20:00:12 +0000 (-0800) Subject: Merge branches 'urgent.2012.10.27a', 'doc.2012.11.08a', 'fixes.2012.11.08a', 'srcu... X-Git-Tag: next-20121109~43^2~2 X-Git-Url: https://git.karo-electronics.de/?a=commitdiff_plain;h=b3ef3d51826397b11dd00efcb1dd79493c55838d;p=karo-tx-linux.git Merge branches 'urgent.2012.10.27a', 'doc.2012.11.08a', 'fixes.2012.11.08a', 'srcu.2012.10.27a', 'stall.2012.10.24a', 'tracing.2012.11.08a' and 'idle.2012.10.24a' into HEAD Resolved conflicts: kernel/rcu.h kernel/rcupdate.c kernel/rcutree.c --- b3ef3d51826397b11dd00efcb1dd79493c55838d diff --cc kernel/rcu.h index 8ba99cdc6515,8ba99cdc6515,8ba99cdc6515,8ba99cdc6515,a88fa9f71b27,8ba99cdc6515,20dfba576c2b..7ff057d7063d --- a/kernel/rcu.h +++ b/kernel/rcu.h @@@@@@@@ -109,4 -109,4 -109,4 -109,4 -109,11 -109,4 -109,6 +109,13 @@@@@@@@ static inline bool __rcu_reclaim(char * } } ++++++ extern int rcu_expedited; ++++++ ++++ ++#if defined(CONFIG_SMP) || defined(CONFIG_RCU_TRACE) ++++ ++ ++++ ++extern int rcu_cpu_stall_suppress; ++++ ++int rcu_jiffies_till_stall_check(void); ++++ ++ ++++ ++#endif /* defined(CONFIG_SMP) || defined(CONFIG_RCU_TRACE) */ ++++ ++ #endif /* __LINUX_RCU_H */ diff --cc kernel/rcupdate.c index 29ca1c6da594,29ca1c6da594,29ca1c6da594,29ca1c6da594,0b69443a06e7,29ca1c6da594,a2cf76177b44..06cec61be6f9 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c @@@@@@@@ -46,6 -46,6 -46,6 -46,6 -46,7 -46,6 -46,7 +46,7 @@@@@@@@ #include #include #include - #include ++++++ #include #define CREATE_TRACE_POINTS #include diff --cc kernel/rcutree.c index 74df86bd9204,74df86bd9204,15a2beec320f,74df86bd9204,8ef81527a89f,8ed9c481db03,effd47a54b36..3edc761ace32 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c @@@@@@@@ -212,20 -212,20 -212,20 -212,20 -212,14 -212,20 -212,20 +212,14 @@@@@@@@ DEFINE_PER_CPU(struct rcu_dynticks, rcu #endif }; -- ----static int blimit = 10; /* Maximum callbacks per rcu_do_batch. */ -- ----static int qhimark = 10000; /* If this many pending, ignore blimit. */ -- ----static int qlowmark = 100; /* Once only this many pending, use blimit. */ ++ ++++static long blimit = 10; /* Maximum callbacks per rcu_do_batch. */ ++ ++++static long qhimark = 10000; /* If this many pending, ignore blimit. */ ++ ++++static long qlowmark = 100; /* Once only this many pending, use blimit. */ -- ----module_param(blimit, int, 0444); -- ----module_param(qhimark, int, 0444); -- ----module_param(qlowmark, int, 0444); -- - -- -- - --int rcu_cpu_stall_suppress __read_mostly; /* 1 = suppress stall warnings. */ -- - --int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT; -- - -- -- - --module_param(rcu_cpu_stall_suppress, int, 0644); -- - --module_param(rcu_cpu_stall_timeout, int, 0644); ++ ++++module_param(blimit, long, 0444); ++ ++++module_param(qhimark, long, 0444); ++ ++++module_param(qlowmark, long, 0444); - int rcu_cpu_stall_suppress __read_mostly; /* 1 = suppress stall warnings. */ - int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT; - - module_param(rcu_cpu_stall_suppress, int, 0644); - module_param(rcu_cpu_stall_timeout, int, 0644); - static ulong jiffies_till_first_fqs = RCU_JIFFIES_TILL_FORCE_QS; static ulong jiffies_till_next_fqs = RCU_JIFFIES_TILL_FORCE_QS; @@@@@@@@ -2308,10 -2308,10 -2352,10 -2308,10 -2301,10 -2305,32 -2314,10 +2348,32 @@@@@@@@ static int synchronize_sched_expedited_ */ void synchronize_sched_expedited(void) { ----- - int firstsnap, s, snap, trycount = 0; +++++ + long firstsnap, s, snap; +++++ + int trycount = 0; +++++ + struct rcu_state *rsp = &rcu_sched_state; ++ + + - - /* Note that atomic_inc_return() implies full memory barrier. */ - - firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started); +++++ + /* +++++ + * If we are in danger of counter wrap, just do synchronize_sched(). +++++ + * By allowing sync_sched_expedited_started to advance no more than +++++ + * ULONG_MAX/8 ahead of sync_sched_expedited_done, we are ensuring +++++ + * that more than 3.5 billion CPUs would be required to force a +++++ + * counter wrap on a 32-bit system. Quite a few more CPUs would of +++++ + * course be required on a 64-bit system. +++++ + */ +++++ + if (ULONG_CMP_GE((ulong)atomic_long_read(&rsp->expedited_start), +++++ + (ulong)atomic_long_read(&rsp->expedited_done) + +++++ + ULONG_MAX / 8)) { +++++ + synchronize_sched(); +++++ + atomic_long_inc(&rsp->expedited_wrap); +++++ + return; +++++ + } + + -- - - /* Note that atomic_inc_return() implies full memory barrier. */ -- - - firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started); +++++ + /* +++++ + * Take a ticket. Note that atomic_inc_return() implies a +++++ + * full memory barrier. +++++ + */ +++++ + snap = atomic_long_inc_return(&rsp->expedited_start); +++++ + firstsnap = snap; get_online_cpus(); WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id())); @@@@@@@@ -2328,7 -2328,7 -2372,7 -2328,7 -2321,7 -2357,8 -2334,7 +2400,8 @@@@@@@@ if (trycount++ < 10) { udelay(trycount * num_online_cpus()); } else { ------ synchronize_sched(); ++++++ wait_rcu_gp(call_rcu_sched); +++++ + atomic_long_inc(&rsp->expedited_normal); return; } diff --cc lib/Kconfig.debug index 28e9d6c98941,28e9d6c98941,41faf0b8df1d,28e9d6c98941,6a4b500e93d1,28e9d6c98941,28e9d6c98941..8820313edb23 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@@@@@@@ -970,9 -970,9 -970,9 -970,9 -970,9 -970,9 -970,9 +970,9 @@@@@@@@ config RCU_TORTURE_TEST_RUNNABL config RCU_CPU_STALL_TIMEOUT int "RCU CPU stall timeout in seconds" ---- -- depends on TREE_RCU || TREE_PREEMPT_RCU ++++ ++ depends on TREE_RCU || TREE_PREEMPT_RCU || RCU_TRACE range 3 300 -- ---- default 60 ++ ++++ default 21 help If a given RCU grace period extends more than the specified number of seconds, a CPU stall warning is printed. If the