]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
rcu: Switch task's syscall hooks on context switch
authorFrederic Weisbecker <fweisbec@gmail.com>
Mon, 16 Jul 2012 22:06:40 +0000 (15:06 -0700)
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Tue, 18 Sep 2012 23:27:34 +0000 (16:27 -0700)
Clear the syscalls hook of a task when it's scheduled out so that if
the task migrates, it doesn't run the syscall slow path on a CPU
that might not need it.

Also set the syscalls hook on the next task if needed.

Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Alessio Igor Bogani <abogani@kernel.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Avi Kivity <avi@redhat.com>
Cc: Chris Metcalf <cmetcalf@tilera.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Geoff Levand <geoff@infradead.org>
Cc: Gilad Ben Yossef <gilad@benyossef.com>
Cc: Hakan Akkan <hakanakkan@gmail.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Josh Triplett <josh@joshtriplett.org>
Cc: Kevin Hilman <khilman@ti.com>
Cc: Max Krasnyansky <maxk@qualcomm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephen Hemminger <shemminger@vyatta.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Sven-Thorsten Dietrich <thebigcorporation@gmail.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Reviewed-by: Josh Triplett <josh@joshtriplett.org>
arch/um/drivers/mconsole_kern.c
include/linux/rcupdate.h
include/linux/sched.h
kernel/rcutree.c
kernel/sched/core.c

index 664a60e8dfb442fe2cb75c1ed5ba388a7bdb42b0..c17de0db6736e1bcef0fca9ab1a3d624cd271580 100644 (file)
@@ -705,6 +705,7 @@ static void stack_proc(void *arg)
        struct task_struct *from = current, *to = arg;
 
        to->thread.saved_task = from;
+       rcu_switch(from, to);
        switch_to(from, to, from);
 }
 
index 124fce43880dbbfb6e529b3989bc45e1c7c5d622..9265f4b576e6dca05ec58675758aa9c6cc9241cf 100644 (file)
@@ -197,6 +197,8 @@ extern void rcu_user_enter(void);
 extern void rcu_user_exit(void);
 extern void rcu_user_enter_after_irq(void);
 extern void rcu_user_exit_after_irq(void);
+extern void rcu_user_hooks_switch(struct task_struct *prev,
+                                 struct task_struct *next);
 #else
 static inline void rcu_user_enter(void) { }
 static inline void rcu_user_exit(void) { }
index b8c86648a2f95dc6f83aab668fd9a7e07f277b4e..cdaeeff56b1d0e79a78bd6bcdb4c94b99cb5a08e 100644 (file)
@@ -1886,6 +1886,14 @@ static inline void rcu_copy_process(struct task_struct *p)
 
 #endif
 
+static inline void rcu_switch(struct task_struct *prev,
+                             struct task_struct *next)
+{
+#ifdef CONFIG_RCU_USER_QS
+       rcu_user_hooks_switch(prev, next);
+#endif
+}
+
 static inline void tsk_restore_flags(struct task_struct *task,
                                unsigned long orig_flags, unsigned long flags)
 {
index 440574b2f424fcda144622d31e05fc7fb721e8c7..163a51585ffa087d50f42a925c7f8ad6c2033ee0 100644 (file)
@@ -719,6 +719,21 @@ int rcu_is_cpu_idle(void)
 }
 EXPORT_SYMBOL(rcu_is_cpu_idle);
 
+#ifdef CONFIG_RCU_USER_QS
+void rcu_user_hooks_switch(struct task_struct *prev,
+                          struct task_struct *next)
+{
+       struct rcu_dynticks *rdtp;
+
+       /* Interrupts are disabled in context switch */
+       rdtp = &__get_cpu_var(rcu_dynticks);
+       if (!rdtp->ignore_user_qs) {
+               clear_tsk_thread_flag(prev, TIF_NOHZ);
+               set_tsk_thread_flag(next, TIF_NOHZ);
+       }
+}
+#endif /* #ifdef CONFIG_RCU_USER_QS */
+
 #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
 
 /*
index fbf1fd098dc6cca687f0e9296a931aa0425c6fee..cf3f5f37bd8675694de12870986b41c6a0a5cf38 100644 (file)
@@ -2081,6 +2081,7 @@ context_switch(struct rq *rq, struct task_struct *prev,
 #endif
 
        /* Here we just switch the register state and the stack. */
+       rcu_switch(prev, next);
        switch_to(prev, next, prev);
 
        barrier();