From b30f0e3ffedfa52b1d67a302ae5860c49998e5e2 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 12 May 2015 16:41:49 +0200 Subject: [PATCH] sched/preempt: Optimize preemption operations on __schedule() callers __schedule() disables preemption and some of its callers (the preempt_schedule*() family) also set PREEMPT_ACTIVE. So we have two preempt_count() modifications that could be performed at once. Lets remove the preemption disablement from __schedule() and pull this responsibility to its callers in order to optimize preempt_count() operations in a single place. Suggested-by: Linus Torvalds Signed-off-by: Frederic Weisbecker Signed-off-by: Peter Zijlstra (Intel) Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1431441711-29753-5-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar --- include/linux/preempt.h | 12 ++++++++++++ kernel/sched/core.c | 29 +++++++++-------------------- 2 files changed, 21 insertions(+), 20 deletions(-) diff --git a/include/linux/preempt.h b/include/linux/preempt.h index 4689ef210a13..45da394f2779 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h @@ -137,6 +137,18 @@ extern void preempt_count_sub(int val); #define preempt_count_inc() preempt_count_add(1) #define preempt_count_dec() preempt_count_sub(1) +#define preempt_active_enter() \ +do { \ + preempt_count_add(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET); \ + barrier(); \ +} while (0) + +#define preempt_active_exit() \ +do { \ + barrier(); \ + preempt_count_sub(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET); \ +} while (0) + #ifdef CONFIG_PREEMPT_COUNT #define preempt_disable() \ diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 355f9538ca33..5140db62c621 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2773,9 +2773,7 @@ again: * - return from syscall or exception to user-space * - return from interrupt-handler to user-space * - * WARNING: all callers must re-check need_resched() afterward and reschedule - * accordingly in case an event triggered the need for rescheduling (such as - * an interrupt waking up a task) while preemption was disabled in __schedule(). + * WARNING: must be called with preemption disabled! */ static void __sched __schedule(void) { @@ -2784,7 +2782,6 @@ static void __sched __schedule(void) struct rq *rq; int cpu; - preempt_disable(); cpu = smp_processor_id(); rq = cpu_rq(cpu); rcu_note_context_switch(); @@ -2848,8 +2845,6 @@ static void __sched __schedule(void) raw_spin_unlock_irq(&rq->lock); post_schedule(rq); - - sched_preempt_enable_no_resched(); } static inline void sched_submit_work(struct task_struct *tsk) @@ -2870,7 +2865,9 @@ asmlinkage __visible void __sched schedule(void) sched_submit_work(tsk); do { + preempt_disable(); __schedule(); + sched_preempt_enable_no_resched(); } while (need_resched()); } EXPORT_SYMBOL(schedule); @@ -2909,15 +2906,14 @@ void __sched schedule_preempt_disabled(void) static void __sched notrace preempt_schedule_common(void) { do { - __preempt_count_add(PREEMPT_ACTIVE); + preempt_active_enter(); __schedule(); - __preempt_count_sub(PREEMPT_ACTIVE); + preempt_active_exit(); /* * Check again in case we missed a preemption opportunity * between schedule and now. */ - barrier(); } while (need_resched()); } @@ -2964,7 +2960,7 @@ asmlinkage __visible void __sched notrace preempt_schedule_context(void) return; do { - __preempt_count_add(PREEMPT_ACTIVE); + preempt_active_enter(); /* * Needs preempt disabled in case user_exit() is traced * and the tracer calls preempt_enable_notrace() causing @@ -2974,8 +2970,7 @@ asmlinkage __visible void __sched notrace preempt_schedule_context(void) __schedule(); exception_exit(prev_ctx); - __preempt_count_sub(PREEMPT_ACTIVE); - barrier(); + preempt_active_exit(); } while (need_resched()); } EXPORT_SYMBOL_GPL(preempt_schedule_context); @@ -2999,17 +2994,11 @@ asmlinkage __visible void __sched preempt_schedule_irq(void) prev_state = exception_enter(); do { - __preempt_count_add(PREEMPT_ACTIVE); + preempt_active_enter(); local_irq_enable(); __schedule(); local_irq_disable(); - __preempt_count_sub(PREEMPT_ACTIVE); - - /* - * Check again in case we missed a preemption opportunity - * between schedule and now. - */ - barrier(); + preempt_active_exit(); } while (need_resched()); exception_exit(prev_state); -- 2.39.5