]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - kernel/rcutree_plugin.h
Merge tag 'ia64-3.5-merge' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl...
[karo-tx-linux.git] / kernel / rcutree_plugin.h
index 50c17975d4f42904384225b37b1478d41d1e8fa5..2411000d98690aacd76d20acc039964402e83388 100644 (file)
@@ -153,7 +153,7 @@ static void rcu_preempt_qs(int cpu)
  *
  * Caller must disable preemption.
  */
-static void rcu_preempt_note_context_switch(int cpu)
+void rcu_preempt_note_context_switch(void)
 {
        struct task_struct *t = current;
        unsigned long flags;
@@ -164,7 +164,7 @@ static void rcu_preempt_note_context_switch(int cpu)
            (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
 
                /* Possibly blocking in an RCU read-side critical section. */
-               rdp = per_cpu_ptr(rcu_preempt_state.rda, cpu);
+               rdp = __this_cpu_ptr(rcu_preempt_state.rda);
                rnp = rdp->mynode;
                raw_spin_lock_irqsave(&rnp->lock, flags);
                t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED;
@@ -228,7 +228,7 @@ static void rcu_preempt_note_context_switch(int cpu)
         * means that we continue to block the current grace period.
         */
        local_irq_save(flags);
-       rcu_preempt_qs(cpu);
+       rcu_preempt_qs(smp_processor_id());
        local_irq_restore(flags);
 }
 
@@ -969,22 +969,6 @@ static void __init __rcu_init_preempt(void)
        rcu_init_one(&rcu_preempt_state, &rcu_preempt_data);
 }
 
-/*
- * Check for a task exiting while in a preemptible-RCU read-side
- * critical section, clean up if so.  No need to issue warnings,
- * as debug_check_no_locks_held() already does this if lockdep
- * is enabled.
- */
-void exit_rcu(void)
-{
-       struct task_struct *t = current;
-
-       if (t->rcu_read_lock_nesting == 0)
-               return;
-       t->rcu_read_lock_nesting = 1;
-       __rcu_read_unlock();
-}
-
 #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
 
 static struct rcu_state *rcu_state = &rcu_sched_state;
@@ -1017,14 +1001,6 @@ void rcu_force_quiescent_state(void)
 }
 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
 
-/*
- * Because preemptible RCU does not exist, we never have to check for
- * CPUs being in quiescent states.
- */
-static void rcu_preempt_note_context_switch(int cpu)
-{
-}
-
 /*
  * Because preemptible RCU does not exist, there are never any preempted
  * RCU readers.
@@ -1986,12 +1962,19 @@ static void rcu_idle_count_callbacks_posted(void)
 #define RCU_IDLE_GP_DELAY 6            /* Roughly one grace period. */
 #define RCU_IDLE_LAZY_GP_DELAY (6 * HZ)        /* Roughly six seconds. */
 
+/* Loop counter for rcu_prepare_for_idle(). */
 static DEFINE_PER_CPU(int, rcu_dyntick_drain);
+/* If rcu_dyntick_holdoff==jiffies, don't try to enter dyntick-idle mode. */
 static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff);
+/* Timer to awaken the CPU if it enters dyntick-idle mode with callbacks. */
 static DEFINE_PER_CPU(struct timer_list, rcu_idle_gp_timer);
+/* Scheduled expiry time for rcu_idle_gp_timer to allow reposting. */
 static DEFINE_PER_CPU(unsigned long, rcu_idle_gp_timer_expires);
+/* Enable special processing on first attempt to enter dyntick-idle mode. */
 static DEFINE_PER_CPU(bool, rcu_idle_first_pass);
+/* Running count of non-lazy callbacks posted, never decremented. */
 static DEFINE_PER_CPU(unsigned long, rcu_nonlazy_posted);
+/* Snapshot of rcu_nonlazy_posted to detect meaningful exits from idle. */
 static DEFINE_PER_CPU(unsigned long, rcu_nonlazy_posted_snap);
 
 /*
@@ -2056,16 +2039,35 @@ static bool rcu_cpu_has_nonlazy_callbacks(int cpu)
               rcu_preempt_cpu_has_nonlazy_callbacks(cpu);
 }
 
+/*
+ * Handler for smp_call_function_single().  The only point of this
+ * handler is to wake the CPU up, so the handler does only tracing.
+ */
+void rcu_idle_demigrate(void *unused)
+{
+       trace_rcu_prep_idle("Demigrate");
+}
+
 /*
  * Timer handler used to force CPU to start pushing its remaining RCU
  * callbacks in the case where it entered dyntick-idle mode with callbacks
  * pending.  The hander doesn't really need to do anything because the
  * real work is done upon re-entry to idle, or by the next scheduling-clock
  * interrupt should idle not be re-entered.
+ *
+ * One special case: the timer gets migrated without awakening the CPU
+ * on which the timer was scheduled on.  In this case, we must wake up
+ * that CPU.  We do so with smp_call_function_single().
  */
-static void rcu_idle_gp_timer_func(unsigned long unused)
+static void rcu_idle_gp_timer_func(unsigned long cpu_in)
 {
+       int cpu = (int)cpu_in;
+
        trace_rcu_prep_idle("Timer");
+       if (cpu != smp_processor_id())
+               smp_call_function_single(cpu, rcu_idle_demigrate, NULL, 0);
+       else
+               WARN_ON_ONCE(1); /* Getting here can hang the system... */
 }
 
 /*
@@ -2073,8 +2075,11 @@ static void rcu_idle_gp_timer_func(unsigned long unused)
  */
 static void rcu_prepare_for_idle_init(int cpu)
 {
+       per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1;
        setup_timer(&per_cpu(rcu_idle_gp_timer, cpu),
-                   rcu_idle_gp_timer_func, 0);
+                   rcu_idle_gp_timer_func, cpu);
+       per_cpu(rcu_idle_gp_timer_expires, cpu) = jiffies - 1;
+       per_cpu(rcu_idle_first_pass, cpu) = 1;
 }
 
 /*
@@ -2109,6 +2114,8 @@ static void rcu_cleanup_after_idle(int cpu)
  */
 static void rcu_prepare_for_idle(int cpu)
 {
+       struct timer_list *tp;
+
        /*
         * If this is an idle re-entry, for example, due to use of
         * RCU_NONIDLE() or the new idle-loop tracing API within the idle
@@ -2120,9 +2127,10 @@ static void rcu_prepare_for_idle(int cpu)
        if (!per_cpu(rcu_idle_first_pass, cpu) &&
            (per_cpu(rcu_nonlazy_posted, cpu) ==
             per_cpu(rcu_nonlazy_posted_snap, cpu))) {
-               if (rcu_cpu_has_callbacks(cpu))
-                       mod_timer(&per_cpu(rcu_idle_gp_timer, cpu),
-                                 per_cpu(rcu_idle_gp_timer_expires, cpu));
+               if (rcu_cpu_has_callbacks(cpu)) {
+                       tp = &per_cpu(rcu_idle_gp_timer, cpu);
+                       mod_timer_pinned(tp, per_cpu(rcu_idle_gp_timer_expires, cpu));
+               }
                return;
        }
        per_cpu(rcu_idle_first_pass, cpu) = 0;
@@ -2166,8 +2174,8 @@ static void rcu_prepare_for_idle(int cpu)
                else
                        per_cpu(rcu_idle_gp_timer_expires, cpu) =
                                           jiffies + RCU_IDLE_LAZY_GP_DELAY;
-               mod_timer(&per_cpu(rcu_idle_gp_timer, cpu),
-                         per_cpu(rcu_idle_gp_timer_expires, cpu));
+               tp = &per_cpu(rcu_idle_gp_timer, cpu);
+               mod_timer_pinned(tp, per_cpu(rcu_idle_gp_timer_expires, cpu));
                per_cpu(rcu_nonlazy_posted_snap, cpu) =
                        per_cpu(rcu_nonlazy_posted, cpu);
                return; /* Nothing more to do immediately. */
@@ -2210,10 +2218,12 @@ static void rcu_prepare_for_idle(int cpu)
 }
 
 /*
- * Keep a running count of callbacks posted so that rcu_prepare_for_idle()
- * can detect when something out of the idle loop posts a callback.
- * Of course, it had better do so either from a trace event designed to
- * be called from idle or from within RCU_NONIDLE().
+ * Keep a running count of the number of non-lazy callbacks posted
+ * on this CPU.  This running counter (which is never decremented) allows
+ * rcu_prepare_for_idle() to detect when something out of the idle loop
+ * posts a callback, even if an equal number of callbacks are invoked.
+ * Of course, callbacks should only be posted from within a trace event
+ * designed to be called from idle or from within RCU_NONIDLE().
  */
 static void rcu_idle_count_callbacks_posted(void)
 {