]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - kernel/rcutree_plugin.h
Merge tag 'ia64-3.5-merge' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl...
[karo-tx-linux.git] / kernel / rcutree_plugin.h
index c023464816bede2d7e5ec6f85b47d56b959119d1..2411000d98690aacd76d20acc039964402e83388 100644 (file)
@@ -153,7 +153,7 @@ static void rcu_preempt_qs(int cpu)
  *
  * Caller must disable preemption.
  */
-static void rcu_preempt_note_context_switch(int cpu)
+void rcu_preempt_note_context_switch(void)
 {
        struct task_struct *t = current;
        unsigned long flags;
@@ -164,7 +164,7 @@ static void rcu_preempt_note_context_switch(int cpu)
            (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
 
                /* Possibly blocking in an RCU read-side critical section. */
-               rdp = per_cpu_ptr(rcu_preempt_state.rda, cpu);
+               rdp = __this_cpu_ptr(rcu_preempt_state.rda);
                rnp = rdp->mynode;
                raw_spin_lock_irqsave(&rnp->lock, flags);
                t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED;
@@ -228,7 +228,7 @@ static void rcu_preempt_note_context_switch(int cpu)
         * means that we continue to block the current grace period.
         */
        local_irq_save(flags);
-       rcu_preempt_qs(cpu);
+       rcu_preempt_qs(smp_processor_id());
        local_irq_restore(flags);
 }
 
@@ -969,22 +969,6 @@ static void __init __rcu_init_preempt(void)
        rcu_init_one(&rcu_preempt_state, &rcu_preempt_data);
 }
 
-/*
- * Check for a task exiting while in a preemptible-RCU read-side
- * critical section, clean up if so.  No need to issue warnings,
- * as debug_check_no_locks_held() already does this if lockdep
- * is enabled.
- */
-void exit_rcu(void)
-{
-       struct task_struct *t = current;
-
-       if (t->rcu_read_lock_nesting == 0)
-               return;
-       t->rcu_read_lock_nesting = 1;
-       __rcu_read_unlock();
-}
-
 #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
 
 static struct rcu_state *rcu_state = &rcu_sched_state;
@@ -1017,14 +1001,6 @@ void rcu_force_quiescent_state(void)
 }
 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
 
-/*
- * Because preemptible RCU does not exist, we never have to check for
- * CPUs being in quiescent states.
- */
-static void rcu_preempt_note_context_switch(int cpu)
-{
-}
-
 /*
  * Because preemptible RCU does not exist, there are never any preempted
  * RCU readers.
@@ -1938,6 +1914,14 @@ static void rcu_prepare_for_idle(int cpu)
 {
 }
 
+/*
+ * Don't bother keeping a running count of the number of RCU callbacks
+ * posted because CONFIG_RCU_FAST_NO_HZ=n.
+ */
+static void rcu_idle_count_callbacks_posted(void)
+{
+}
+
 #else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */
 
 /*
@@ -1978,11 +1962,20 @@ static void rcu_prepare_for_idle(int cpu)
 #define RCU_IDLE_GP_DELAY 6            /* Roughly one grace period. */
 #define RCU_IDLE_LAZY_GP_DELAY (6 * HZ)        /* Roughly six seconds. */
 
+/* Loop counter for rcu_prepare_for_idle(). */
 static DEFINE_PER_CPU(int, rcu_dyntick_drain);
+/* If rcu_dyntick_holdoff==jiffies, don't try to enter dyntick-idle mode. */
 static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff);
-static DEFINE_PER_CPU(struct hrtimer, rcu_idle_gp_timer);
-static ktime_t rcu_idle_gp_wait;       /* If some non-lazy callbacks. */
-static ktime_t rcu_idle_lazy_gp_wait;  /* If only lazy callbacks. */
+/* Timer to awaken the CPU if it enters dyntick-idle mode with callbacks. */
+static DEFINE_PER_CPU(struct timer_list, rcu_idle_gp_timer);
+/* Scheduled expiry time for rcu_idle_gp_timer to allow reposting. */
+static DEFINE_PER_CPU(unsigned long, rcu_idle_gp_timer_expires);
+/* Enable special processing on first attempt to enter dyntick-idle mode. */
+static DEFINE_PER_CPU(bool, rcu_idle_first_pass);
+/* Running count of non-lazy callbacks posted, never decremented. */
+static DEFINE_PER_CPU(unsigned long, rcu_nonlazy_posted);
+/* Snapshot of rcu_nonlazy_posted to detect meaningful exits from idle. */
+static DEFINE_PER_CPU(unsigned long, rcu_nonlazy_posted_snap);
 
 /*
  * Allow the CPU to enter dyntick-idle mode if either: (1) There are no
@@ -1995,6 +1988,8 @@ static ktime_t rcu_idle_lazy_gp_wait;     /* If only lazy callbacks. */
  */
 int rcu_needs_cpu(int cpu)
 {
+       /* Flag a new idle sojourn to the idle-entry state machine. */
+       per_cpu(rcu_idle_first_pass, cpu) = 1;
        /* If no callbacks, RCU doesn't need the CPU. */
        if (!rcu_cpu_has_callbacks(cpu))
                return 0;
@@ -2044,17 +2039,35 @@ static bool rcu_cpu_has_nonlazy_callbacks(int cpu)
               rcu_preempt_cpu_has_nonlazy_callbacks(cpu);
 }
 
+/*
+ * Handler for smp_call_function_single().  The only point of this
+ * handler is to wake the CPU up, so the handler does only tracing.
+ */
+void rcu_idle_demigrate(void *unused)
+{
+       trace_rcu_prep_idle("Demigrate");
+}
+
 /*
  * Timer handler used to force CPU to start pushing its remaining RCU
  * callbacks in the case where it entered dyntick-idle mode with callbacks
  * pending.  The hander doesn't really need to do anything because the
  * real work is done upon re-entry to idle, or by the next scheduling-clock
  * interrupt should idle not be re-entered.
+ *
+ * One special case: the timer gets migrated without awakening the CPU
+ * on which the timer was scheduled on.  In this case, we must wake up
+ * that CPU.  We do so with smp_call_function_single().
  */
-static enum hrtimer_restart rcu_idle_gp_timer_func(struct hrtimer *hrtp)
+static void rcu_idle_gp_timer_func(unsigned long cpu_in)
 {
+       int cpu = (int)cpu_in;
+
        trace_rcu_prep_idle("Timer");
-       return HRTIMER_NORESTART;
+       if (cpu != smp_processor_id())
+               smp_call_function_single(cpu, rcu_idle_demigrate, NULL, 0);
+       else
+               WARN_ON_ONCE(1); /* Getting here can hang the system... */
 }
 
 /*
@@ -2062,19 +2075,11 @@ static enum hrtimer_restart rcu_idle_gp_timer_func(struct hrtimer *hrtp)
  */
 static void rcu_prepare_for_idle_init(int cpu)
 {
-       static int firsttime = 1;
-       struct hrtimer *hrtp = &per_cpu(rcu_idle_gp_timer, cpu);
-
-       hrtimer_init(hrtp, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-       hrtp->function = rcu_idle_gp_timer_func;
-       if (firsttime) {
-               unsigned int upj = jiffies_to_usecs(RCU_IDLE_GP_DELAY);
-
-               rcu_idle_gp_wait = ns_to_ktime(upj * (u64)1000);
-               upj = jiffies_to_usecs(RCU_IDLE_LAZY_GP_DELAY);
-               rcu_idle_lazy_gp_wait = ns_to_ktime(upj * (u64)1000);
-               firsttime = 0;
-       }
+       per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1;
+       setup_timer(&per_cpu(rcu_idle_gp_timer, cpu),
+                   rcu_idle_gp_timer_func, cpu);
+       per_cpu(rcu_idle_gp_timer_expires, cpu) = jiffies - 1;
+       per_cpu(rcu_idle_first_pass, cpu) = 1;
 }
 
 /*
@@ -2084,7 +2089,8 @@ static void rcu_prepare_for_idle_init(int cpu)
  */
 static void rcu_cleanup_after_idle(int cpu)
 {
-       hrtimer_cancel(&per_cpu(rcu_idle_gp_timer, cpu));
+       del_timer(&per_cpu(rcu_idle_gp_timer, cpu));
+       trace_rcu_prep_idle("Cleanup after idle");
 }
 
 /*
@@ -2108,6 +2114,29 @@ static void rcu_cleanup_after_idle(int cpu)
  */
 static void rcu_prepare_for_idle(int cpu)
 {
+       struct timer_list *tp;
+
+       /*
+        * If this is an idle re-entry, for example, due to use of
+        * RCU_NONIDLE() or the new idle-loop tracing API within the idle
+        * loop, then don't take any state-machine actions, unless the
+        * momentary exit from idle queued additional non-lazy callbacks.
+        * Instead, repost the rcu_idle_gp_timer if this CPU has callbacks
+        * pending.
+        */
+       if (!per_cpu(rcu_idle_first_pass, cpu) &&
+           (per_cpu(rcu_nonlazy_posted, cpu) ==
+            per_cpu(rcu_nonlazy_posted_snap, cpu))) {
+               if (rcu_cpu_has_callbacks(cpu)) {
+                       tp = &per_cpu(rcu_idle_gp_timer, cpu);
+                       mod_timer_pinned(tp, per_cpu(rcu_idle_gp_timer_expires, cpu));
+               }
+               return;
+       }
+       per_cpu(rcu_idle_first_pass, cpu) = 0;
+       per_cpu(rcu_nonlazy_posted_snap, cpu) =
+               per_cpu(rcu_nonlazy_posted, cpu) - 1;
+
        /*
         * If there are no callbacks on this CPU, enter dyntick-idle mode.
         * Also reset state to avoid prejudicing later attempts.
@@ -2140,11 +2169,15 @@ static void rcu_prepare_for_idle(int cpu)
                per_cpu(rcu_dyntick_drain, cpu) = 0;
                per_cpu(rcu_dyntick_holdoff, cpu) = jiffies;
                if (rcu_cpu_has_nonlazy_callbacks(cpu))
-                       hrtimer_start(&per_cpu(rcu_idle_gp_timer, cpu),
-                                     rcu_idle_gp_wait, HRTIMER_MODE_REL);
+                       per_cpu(rcu_idle_gp_timer_expires, cpu) =
+                                          jiffies + RCU_IDLE_GP_DELAY;
                else
-                       hrtimer_start(&per_cpu(rcu_idle_gp_timer, cpu),
-                                     rcu_idle_lazy_gp_wait, HRTIMER_MODE_REL);
+                       per_cpu(rcu_idle_gp_timer_expires, cpu) =
+                                          jiffies + RCU_IDLE_LAZY_GP_DELAY;
+               tp = &per_cpu(rcu_idle_gp_timer, cpu);
+               mod_timer_pinned(tp, per_cpu(rcu_idle_gp_timer_expires, cpu));
+               per_cpu(rcu_nonlazy_posted_snap, cpu) =
+                       per_cpu(rcu_nonlazy_posted, cpu);
                return; /* Nothing more to do immediately. */
        } else if (--per_cpu(rcu_dyntick_drain, cpu) <= 0) {
                /* We have hit the limit, so time to give up. */
@@ -2184,6 +2217,19 @@ static void rcu_prepare_for_idle(int cpu)
                trace_rcu_prep_idle("Callbacks drained");
 }
 
+/*
+ * Keep a running count of the number of non-lazy callbacks posted
+ * on this CPU.  This running counter (which is never decremented) allows
+ * rcu_prepare_for_idle() to detect when something out of the idle loop
+ * posts a callback, even if an equal number of callbacks are invoked.
+ * Of course, callbacks should only be posted from within a trace event
+ * designed to be called from idle or from within RCU_NONIDLE().
+ */
+static void rcu_idle_count_callbacks_posted(void)
+{
+       __this_cpu_add(rcu_nonlazy_posted, 1);
+}
+
 #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */
 
 #ifdef CONFIG_RCU_CPU_STALL_INFO
@@ -2192,14 +2238,12 @@ static void rcu_prepare_for_idle(int cpu)
 
 static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
 {
-       struct hrtimer *hrtp = &per_cpu(rcu_idle_gp_timer, cpu);
+       struct timer_list *tltp = &per_cpu(rcu_idle_gp_timer, cpu);
 
-       sprintf(cp, "drain=%d %c timer=%lld",
+       sprintf(cp, "drain=%d %c timer=%lu",
                per_cpu(rcu_dyntick_drain, cpu),
                per_cpu(rcu_dyntick_holdoff, cpu) == jiffies ? 'H' : '.',
-               hrtimer_active(hrtp)
-                       ? ktime_to_us(hrtimer_get_remaining(hrtp))
-                       : -1);
+               timer_pending(tltp) ? tltp->expires - jiffies : -1);
 }
 
 #else /* #ifdef CONFIG_RCU_FAST_NO_HZ */