]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
sched/core: Stop setting PREEMPT_ACTIVE
authorPeter Zijlstra <peterz@infradead.org>
Mon, 28 Sep 2015 16:09:19 +0000 (18:09 +0200)
committerIngo Molnar <mingo@kernel.org>
Tue, 6 Oct 2015 15:08:16 +0000 (17:08 +0200)
Now that nothing tests for PREEMPT_ACTIVE anymore, stop setting it.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Steven Rostedt <rostedt@goodmis.org>
Reviewed-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
include/linux/preempt.h
kernel/sched/core.c

index bea8dd8ff5e026f8fc3e3bc446af7aea7ec891e2..448dfd0b2ea6f8f2cd9c21205756b6f576235977 100644 (file)
@@ -146,18 +146,6 @@ extern void preempt_count_sub(int val);
 #define preempt_count_inc() preempt_count_add(1)
 #define preempt_count_dec() preempt_count_sub(1)
 
-#define preempt_active_enter() \
-do { \
-       preempt_count_add(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET); \
-       barrier(); \
-} while (0)
-
-#define preempt_active_exit() \
-do { \
-       barrier(); \
-       preempt_count_sub(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET); \
-} while (0)
-
 #ifdef CONFIG_PREEMPT_COUNT
 
 #define preempt_disable() \
index cfad7f5f74f8606fdca038316674c5f7ee68b674..6344d82a84f61d0ca62417ab6043515fe1eb74c3 100644 (file)
@@ -3201,9 +3201,9 @@ void __sched schedule_preempt_disabled(void)
 static void __sched notrace preempt_schedule_common(void)
 {
        do {
-               preempt_active_enter();
+               preempt_disable();
                __schedule(true);
-               preempt_active_exit();
+               sched_preempt_enable_no_resched();
 
                /*
                 * Check again in case we missed a preemption opportunity
@@ -3254,13 +3254,7 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
                return;
 
        do {
-               /*
-                * Use raw __prempt_count() ops that don't call function.
-                * We can't call functions before disabling preemption which
-                * disarm preemption tracing recursions.
-                */
-               __preempt_count_add(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET);
-               barrier();
+               preempt_disable_notrace();
                /*
                 * Needs preempt disabled in case user_exit() is traced
                 * and the tracer calls preempt_enable_notrace() causing
@@ -3270,8 +3264,7 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
                __schedule(true);
                exception_exit(prev_ctx);
 
-               barrier();
-               __preempt_count_sub(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET);
+               preempt_enable_no_resched_notrace();
        } while (need_resched());
 }
 EXPORT_SYMBOL_GPL(preempt_schedule_notrace);
@@ -3294,11 +3287,11 @@ asmlinkage __visible void __sched preempt_schedule_irq(void)
        prev_state = exception_enter();
 
        do {
-               preempt_active_enter();
+               preempt_disable();
                local_irq_enable();
                __schedule(true);
                local_irq_disable();
-               preempt_active_exit();
+               sched_preempt_enable_no_resched();
        } while (need_resched());
 
        exception_exit(prev_state);