From: Paul E. McKenney Date: Tue, 23 Oct 2012 22:12:39 +0000 (-0700) Subject: Merge branches 'doc.2012.10.23c', 'fixes.2012.10.23c', 'hotplug.2012.10.23c', 'srcu... X-Git-Tag: next-20121024~29^2~2 X-Git-Url: https://git.karo-electronics.de/?a=commitdiff_plain;h=38be7d35ad5d465b8f5fb8aa63274f50fb3f2d14;p=karo-tx-linux.git Merge branches 'doc.2012.10.23c', 'fixes.2012.10.23c', 'hotplug.2012.10.23c', 'srcu.2012.10.23c' and 'stall.2012.10.23c' into dev.3.8.2012.10.23d --- 38be7d35ad5d465b8f5fb8aa63274f50fb3f2d14 diff --cc kernel/rcutree.c index 74df86bd9204,ca3221595ed5,4eafb89bd60f,74df86bd9204,8ef81527a89f..027153cfa1f4 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c @@@@@@ -212,20 -212,20 -212,20 -212,20 -212,14 +212,14 @@@@@@ DEFINE_PER_CPU(struct rcu_dynticks, rcu #endif }; - ---static int blimit = 10; /* Maximum callbacks per rcu_do_batch. */ - ---static int qhimark = 10000; /* If this many pending, ignore blimit. */ - ---static int qlowmark = 100; /* Once only this many pending, use blimit. */ + +++static long blimit = 10; /* Maximum callbacks per rcu_do_batch. */ + +++static long qhimark = 10000; /* If this many pending, ignore blimit. */ + +++static long qlowmark = 100; /* Once only this many pending, use blimit. */ - ---module_param(blimit, int, 0444); - ---module_param(qhimark, int, 0444); - ---module_param(qlowmark, int, 0444); - -- - -- int rcu_cpu_stall_suppress __read_mostly; /* 1 = suppress stall warnings. */ - -- int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT; - -- - -- module_param(rcu_cpu_stall_suppress, int, 0644); - -- module_param(rcu_cpu_stall_timeout, int, 0644); + +++module_param(blimit, long, 0444); + +++module_param(qhimark, long, 0444); + +++module_param(qlowmark, long, 0444); - int rcu_cpu_stall_suppress __read_mostly; /* 1 = suppress stall warnings. */ - int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT; - - module_param(rcu_cpu_stall_suppress, int, 0644); - module_param(rcu_cpu_stall_timeout, int, 0644); - static ulong jiffies_till_first_fqs = RCU_JIFFIES_TILL_FORCE_QS; static ulong jiffies_till_next_fqs = RCU_JIFFIES_TILL_FORCE_QS; @@@@@@ -2308,10 -2347,10 -2305,32 -2308,10 -2301,10 +2337,32 @@@@@@ static int synchronize_sched_expedited_ */ void synchronize_sched_expedited(void) { -- -- int firstsnap, s, snap, trycount = 0; ++ ++ long firstsnap, s, snap; ++ ++ int trycount = 0; ++ ++ struct rcu_state *rsp = &rcu_sched_state; + + - - /* Note that atomic_inc_return() implies full memory barrier. */ - - firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started); ++ ++ /* ++ ++ * If we are in danger of counter wrap, just do synchronize_sched(). ++ ++ * By allowing sync_sched_expedited_started to advance no more than ++ ++ * ULONG_MAX/8 ahead of sync_sched_expedited_done, we are ensuring ++ ++ * that more than 3.5 billion CPUs would be required to force a ++ ++ * counter wrap on a 32-bit system. Quite a few more CPUs would of ++ ++ * course be required on a 64-bit system. ++ ++ */ ++ ++ if (ULONG_CMP_GE((ulong)atomic_long_read(&rsp->expedited_start), ++ ++ (ulong)atomic_long_read(&rsp->expedited_done) + ++ ++ ULONG_MAX / 8)) { ++ ++ synchronize_sched(); ++ ++ atomic_long_inc(&rsp->expedited_wrap); ++ ++ return; ++ ++ } + + - - /* Note that atomic_inc_return() implies full memory barrier. */ - - firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started); ++ ++ /* ++ ++ * Take a ticket. Note that atomic_inc_return() implies a ++ ++ * full memory barrier. ++ ++ */ ++ ++ snap = atomic_long_inc_return(&rsp->expedited_start); ++ ++ firstsnap = snap; get_online_cpus(); WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));