]> git.karo-electronics.de Git - linux-beck.git/commitdiff
sched,rcu: Make cond_resched() report RCU quiescent states
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Mon, 17 Mar 2014 04:36:25 +0000 (21:36 -0700)
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Wed, 14 May 2014 16:46:11 +0000 (09:46 -0700)
Given a CPU running a loop containing cond_resched(), with no
other tasks runnable on that CPU, RCU will eventually report RCU
CPU stall warnings due to lack of quiescent states.  Fortunately,
every call to cond_resched() is a perfectly good quiescent state.
Unfortunately, invoking rcu_note_context_switch() is a bit heavyweight
for cond_resched(), especially given the need to disable preemption,
and, for RCU-preempt, interrupts as well.

This commit therefore maintains a per-CPU counter that causes
cond_resched(), cond_resched_lock(), and cond_resched_softirq() to call
rcu_note_context_switch(), but only about once per 256 invocations.
This ratio was chosen in keeping with the relative time constants of
RCU grace periods.

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Reviewed-by: Josh Triplett <josh@joshtriplett.org>
include/linux/rcupdate.h
kernel/rcu/update.c
kernel/sched/core.c

index 82973738125b3abcbd304c73fdf09655706beda2..97cc8d6679b4fdc0abe6cd630fc038b89b2ab985 100644 (file)
@@ -44,6 +44,7 @@
 #include <linux/debugobjects.h>
 #include <linux/bug.h>
 #include <linux/compiler.h>
+#include <linux/percpu.h>
 #include <asm/barrier.h>
 
 extern int rcu_expedited; /* for sysctl */
@@ -286,6 +287,41 @@ static inline void rcu_user_hooks_switch(struct task_struct *prev,
 bool __rcu_is_watching(void);
 #endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */
 
+/*
+ * Hooks for cond_resched() and friends to avoid RCU CPU stall warnings.
+ */
+
+#define RCU_COND_RESCHED_LIM 256       /* ms vs. 100s of ms. */
+DECLARE_PER_CPU(int, rcu_cond_resched_count);
+void rcu_resched(void);
+
+/*
+ * Is it time to report RCU quiescent states?
+ *
+ * Note unsynchronized access to rcu_cond_resched_count.  Yes, we might
+ * increment some random CPU's count, and possibly also load the result from
+ * yet another CPU's count.  We might even clobber some other CPU's attempt
+ * to zero its counter.  This is all OK because the goal is not precision,
+ * but rather reasonable amortization of rcu_note_context_switch() overhead
+ * and extremely high probability of avoiding RCU CPU stall warnings.
+ * Note that this function has to be preempted in just the wrong place,
+ * many thousands of times in a row, for anything bad to happen.
+ */
+static inline bool rcu_should_resched(void)
+{
+       return raw_cpu_inc_return(rcu_cond_resched_count) >=
+              RCU_COND_RESCHED_LIM;
+}
+
+/*
+ * Report quiscent states to RCU if it is time to do so.
+ */
+static inline void rcu_cond_resched(void)
+{
+       if (unlikely(rcu_should_resched()))
+               rcu_resched();
+}
+
 /*
  * Infrastructure to implement the synchronize_() primitives in
  * TREE_RCU and rcu_barrier_() primitives in TINY_RCU.
index 4c0a9b0af469a1f5fa3242d594653fee0f004505..ed7a0d72562c3734e97676ca45d0db8ea0e2cc5e 100644 (file)
@@ -338,3 +338,21 @@ static int __init check_cpu_stall_init(void)
 early_initcall(check_cpu_stall_init);
 
 #endif /* #ifdef CONFIG_RCU_STALL_COMMON */
+
+/*
+ * Hooks for cond_resched() and friends to avoid RCU CPU stall warnings.
+ */
+
+DEFINE_PER_CPU(int, rcu_cond_resched_count);
+
+/*
+ * Report a set of RCU quiescent states, for use by cond_resched()
+ * and friends.  Out of line due to being called infrequently.
+ */
+void rcu_resched(void)
+{
+       preempt_disable();
+       __this_cpu_write(rcu_cond_resched_count, 0);
+       rcu_note_context_switch(smp_processor_id());
+       preempt_enable();
+}
index 268a45ea238cc84f51ae7612bf0ba3c531b9887f..9f530c9ed9117de1757215607af4b219987a1559 100644 (file)
@@ -4051,6 +4051,7 @@ static void __cond_resched(void)
 
 int __sched _cond_resched(void)
 {
+       rcu_cond_resched();
        if (should_resched()) {
                __cond_resched();
                return 1;
@@ -4069,15 +4070,18 @@ EXPORT_SYMBOL(_cond_resched);
  */
 int __cond_resched_lock(spinlock_t *lock)
 {
+       bool need_rcu_resched = rcu_should_resched();
        int resched = should_resched();
        int ret = 0;
 
        lockdep_assert_held(lock);
 
-       if (spin_needbreak(lock) || resched) {
+       if (spin_needbreak(lock) || resched || need_rcu_resched) {
                spin_unlock(lock);
                if (resched)
                        __cond_resched();
+               else if (unlikely(need_rcu_resched))
+                       rcu_resched();
                else
                        cpu_relax();
                ret = 1;
@@ -4091,6 +4095,7 @@ int __sched __cond_resched_softirq(void)
 {
        BUG_ON(!in_softirq());
 
+       rcu_cond_resched();  /* BH disabled OK, just recording QSes. */
        if (should_resched()) {
                local_bh_enable();
                __cond_resched();