#endif
};
- ---static int blimit = 10; /* Maximum callbacks per rcu_do_batch. */
- ---static int qhimark = 10000; /* If this many pending, ignore blimit. */
- ---static int qlowmark = 100; /* Once only this many pending, use blimit. */
+ +++static long blimit = 10; /* Maximum callbacks per rcu_do_batch. */
+ +++static long qhimark = 10000; /* If this many pending, ignore blimit. */
+ +++static long qlowmark = 100; /* Once only this many pending, use blimit. */
- ---module_param(blimit, int, 0444);
- ---module_param(qhimark, int, 0444);
- ---module_param(qlowmark, int, 0444);
- --
- -- int rcu_cpu_stall_suppress __read_mostly; /* 1 = suppress stall warnings. */
- -- int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT;
- --
- -- module_param(rcu_cpu_stall_suppress, int, 0644);
- -- module_param(rcu_cpu_stall_timeout, int, 0644);
+ +++module_param(blimit, long, 0444);
+ +++module_param(qhimark, long, 0444);
+ +++module_param(qlowmark, long, 0444);
- int rcu_cpu_stall_suppress __read_mostly; /* 1 = suppress stall warnings. */
- int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT;
-
- module_param(rcu_cpu_stall_suppress, int, 0644);
- module_param(rcu_cpu_stall_timeout, int, 0644);
-
static ulong jiffies_till_first_fqs = RCU_JIFFIES_TILL_FORCE_QS;
static ulong jiffies_till_next_fqs = RCU_JIFFIES_TILL_FORCE_QS;
*/
void synchronize_sched_expedited(void)
{
-- -- int firstsnap, s, snap, trycount = 0;
++ ++ long firstsnap, s, snap;
++ ++ int trycount = 0;
++ ++ struct rcu_state *rsp = &rcu_sched_state;
+ +
- - /* Note that atomic_inc_return() implies full memory barrier. */
- - firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
++ ++ /*
++ ++ * If we are in danger of counter wrap, just do synchronize_sched().
++ ++ * By allowing sync_sched_expedited_started to advance no more than
++ ++ * ULONG_MAX/8 ahead of sync_sched_expedited_done, we are ensuring
++ ++ * that more than 3.5 billion CPUs would be required to force a
++ ++ * counter wrap on a 32-bit system. Quite a few more CPUs would of
++ ++ * course be required on a 64-bit system.
++ ++ */
++ ++ if (ULONG_CMP_GE((ulong)atomic_long_read(&rsp->expedited_start),
++ ++ (ulong)atomic_long_read(&rsp->expedited_done) +
++ ++ ULONG_MAX / 8)) {
++ ++ synchronize_sched();
++ ++ atomic_long_inc(&rsp->expedited_wrap);
++ ++ return;
++ ++ }
+ +
- - /* Note that atomic_inc_return() implies full memory barrier. */
- - firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
++ ++ /*
++ ++ * Take a ticket. Note that atomic_inc_return() implies a
++ ++ * full memory barrier.
++ ++ */
++ ++ snap = atomic_long_inc_return(&rsp->expedited_start);
++ ++ firstsnap = snap;
get_online_cpus();
WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));