]> git.karo-electronics.de Git - linux-beck.git/commitdiff
sched/preempt: Optimize preemption operations on __schedule() callers
authorFrederic Weisbecker <fweisbec@gmail.com>
Tue, 12 May 2015 14:41:49 +0000 (16:41 +0200)
committerIngo Molnar <mingo@kernel.org>
Tue, 19 May 2015 06:39:12 +0000 (08:39 +0200)
__schedule() disables preemption and some of its callers
(the preempt_schedule*() family) also set PREEMPT_ACTIVE.

So we have two preempt_count() modifications that could be performed
at once.

Lets remove the preemption disablement from __schedule() and pull
this responsibility to its callers in order to optimize preempt_count()
operations in a single place.

Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/1431441711-29753-5-git-send-email-fweisbec@gmail.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
include/linux/preempt.h
kernel/sched/core.c

index 4689ef210a13c057a9c44880b1442437cdbd06af..45da394f2779a55058b7c047e559f1598fdba648 100644 (file)
@@ -137,6 +137,18 @@ extern void preempt_count_sub(int val);
 #define preempt_count_inc() preempt_count_add(1)
 #define preempt_count_dec() preempt_count_sub(1)
 
+#define preempt_active_enter() \
+do { \
+       preempt_count_add(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET); \
+       barrier(); \
+} while (0)
+
+#define preempt_active_exit() \
+do { \
+       barrier(); \
+       preempt_count_sub(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET); \
+} while (0)
+
 #ifdef CONFIG_PREEMPT_COUNT
 
 #define preempt_disable() \
index 355f9538ca33dbd9be4b082ad63751eaf73a4296..5140db62c621c125c764a68b060596986317e4aa 100644 (file)
@@ -2773,9 +2773,7 @@ again:
  *          - return from syscall or exception to user-space
  *          - return from interrupt-handler to user-space
  *
- * WARNING: all callers must re-check need_resched() afterward and reschedule
- * accordingly in case an event triggered the need for rescheduling (such as
- * an interrupt waking up a task) while preemption was disabled in __schedule().
+ * WARNING: must be called with preemption disabled!
  */
 static void __sched __schedule(void)
 {
@@ -2784,7 +2782,6 @@ static void __sched __schedule(void)
        struct rq *rq;
        int cpu;
 
-       preempt_disable();
        cpu = smp_processor_id();
        rq = cpu_rq(cpu);
        rcu_note_context_switch();
@@ -2848,8 +2845,6 @@ static void __sched __schedule(void)
                raw_spin_unlock_irq(&rq->lock);
 
        post_schedule(rq);
-
-       sched_preempt_enable_no_resched();
 }
 
 static inline void sched_submit_work(struct task_struct *tsk)
@@ -2870,7 +2865,9 @@ asmlinkage __visible void __sched schedule(void)
 
        sched_submit_work(tsk);
        do {
+               preempt_disable();
                __schedule();
+               sched_preempt_enable_no_resched();
        } while (need_resched());
 }
 EXPORT_SYMBOL(schedule);
@@ -2909,15 +2906,14 @@ void __sched schedule_preempt_disabled(void)
 static void __sched notrace preempt_schedule_common(void)
 {
        do {
-               __preempt_count_add(PREEMPT_ACTIVE);
+               preempt_active_enter();
                __schedule();
-               __preempt_count_sub(PREEMPT_ACTIVE);
+               preempt_active_exit();
 
                /*
                 * Check again in case we missed a preemption opportunity
                 * between schedule and now.
                 */
-               barrier();
        } while (need_resched());
 }
 
@@ -2964,7 +2960,7 @@ asmlinkage __visible void __sched notrace preempt_schedule_context(void)
                return;
 
        do {
-               __preempt_count_add(PREEMPT_ACTIVE);
+               preempt_active_enter();
                /*
                 * Needs preempt disabled in case user_exit() is traced
                 * and the tracer calls preempt_enable_notrace() causing
@@ -2974,8 +2970,7 @@ asmlinkage __visible void __sched notrace preempt_schedule_context(void)
                __schedule();
                exception_exit(prev_ctx);
 
-               __preempt_count_sub(PREEMPT_ACTIVE);
-               barrier();
+               preempt_active_exit();
        } while (need_resched());
 }
 EXPORT_SYMBOL_GPL(preempt_schedule_context);
@@ -2999,17 +2994,11 @@ asmlinkage __visible void __sched preempt_schedule_irq(void)
        prev_state = exception_enter();
 
        do {
-               __preempt_count_add(PREEMPT_ACTIVE);
+               preempt_active_enter();
                local_irq_enable();
                __schedule();
                local_irq_disable();
-               __preempt_count_sub(PREEMPT_ACTIVE);
-
-               /*
-                * Check again in case we missed a preemption opportunity
-                * between schedule and now.
-                */
-               barrier();
+               preempt_active_exit();
        } while (need_resched());
 
        exception_exit(prev_state);