From: Ingo Molnar Date: Tue, 19 Feb 2013 07:45:35 +0000 (+0100) Subject: manual merge of sched/core X-Git-Tag: next-20130220~38^2~14 X-Git-Url: https://git.karo-electronics.de/?a=commitdiff_plain;h=8c281e678b3c2b0ec2fed207fd94d596ebc4c9be;p=karo-tx-linux.git manual merge of sched/core Signed-off-by: Ingo Molnar --- 8c281e678b3c2b0ec2fed207fd94d596ebc4c9be diff --cc kernel/context_tracking.c index d566aba7e801,74f68f4dc6c2..2071a46472cd --- a/kernel/context_tracking.c +++ b/kernel/context_tracking.c @@@ -1,40 -1,11 +1,27 @@@ +/* + * Context tracking: Probe on high level context boundaries such as kernel + * and userspace. This includes syscalls and exceptions entry/exit. + * + * This is used by RCU to remove its dependency on the timer tick while a CPU + * runs in userspace. + * + * Started by Frederic Weisbecker: + * + * Copyright (C) 2012 Red Hat, Inc., Frederic Weisbecker + * + * Many thanks to Gilad Ben-Yossef, Paul McKenney, Ingo Molnar, Andrew Morton, + * Steven Rostedt, Peter Zijlstra for suggestions and improvements. + * + */ + #include + #include #include #include - #include #include + #include - struct context_tracking { - /* - * When active is false, probes are unset in order - * to minimize overhead: TIF flags are cleared - * and calls to user_enter/exit are ignored. This - * may be further optimized using static keys. - */ - bool active; - enum { - IN_KERNEL = 0, - IN_USER, - } state; - }; - - static DEFINE_PER_CPU(struct context_tracking, context_tracking) = { + DEFINE_PER_CPU(struct context_tracking, context_tracking) = { #ifdef CONFIG_CONTEXT_TRACKING_FORCE .active = true, #endif @@@ -70,15 -31,9 +57,16 @@@ void user_enter(void local_irq_save(flags); if (__this_cpu_read(context_tracking.active) && __this_cpu_read(context_tracking.state) != IN_USER) { - __this_cpu_write(context_tracking.state, IN_USER); + /* + * At this stage, only low level arch entry code remains and + * then we'll run in userspace. We can assume there won't be + * any RCU read-side critical section until the next call to + * user_exit() or rcu_irq_enter(). Let's remove RCU's dependency + * on the tick. + */ + vtime_user_enter(current); rcu_user_enter(); + __this_cpu_write(context_tracking.state, IN_USER); } local_irq_restore(flags); } @@@ -104,30 -55,31 +92,49 @@@ void user_exit(void local_irq_save(flags); if (__this_cpu_read(context_tracking.state) == IN_USER) { - __this_cpu_write(context_tracking.state, IN_KERNEL); + /* + * We are going to run code that may use RCU. Inform + * RCU core about that (ie: we may need the tick again). + */ rcu_user_exit(); + vtime_user_exit(current); + __this_cpu_write(context_tracking.state, IN_KERNEL); } local_irq_restore(flags); } + +/** + * context_tracking_task_switch - context switch the syscall callbacks + * @prev: the task that is being switched out + * @next: the task that is being switched in + * + * The context tracking uses the syscall slow path to implement its user-kernel + * boundaries probes on syscalls. This way it doesn't impact the syscall fast + * path on CPUs that don't do context tracking. + * + * But we need to clear the flag on the previous task because it may later + * migrate to some CPU that doesn't do the context tracking. As such the TIF + * flag may not be desired there. + */ + void guest_enter(void) + { + if (vtime_accounting_enabled()) + vtime_guest_enter(current); + else + __guest_enter(); + } + EXPORT_SYMBOL_GPL(guest_enter); + + void guest_exit(void) + { + if (vtime_accounting_enabled()) + vtime_guest_exit(current); + else + __guest_exit(); + } + EXPORT_SYMBOL_GPL(guest_exit); + void context_tracking_task_switch(struct task_struct *prev, struct task_struct *next) {