]> git.karo-electronics.de Git - linux-beck.git/commitdiff
[PATCH] x86-64: Fix interrupt race in idle callback (3rd try)
authorVenkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Thu, 7 Dec 2006 01:14:13 +0000 (02:14 +0100)
committerAndi Kleen <andi@basil.nowhere.org>
Thu, 7 Dec 2006 01:14:13 +0000 (02:14 +0100)
Idle callbacks has some races when enter_idle() sets isidle and subsequent
interrupts that can happen on that CPU, before CPU goes to idle. Due to this,
an IDLE_END can get called before IDLE_START. To avoid these races, disable
interrupts before enter_idle and make sure that all idle routines do not
enable interrupts before entering idle.

Note that poll_idle() still has a this race as it has to enable interrupts
before going to idle. But, all other idle routines have the race fixed.

Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Andi Kleen <ak@suse.de>
arch/x86_64/kernel/process.c
include/asm-x86_64/processor.h

index 0b7b4caa4f74071bfe1797d6e575828241fa5f64..a418ee4c8c62a0d14102b22939dfa8c5b16d0f79 100644 (file)
@@ -127,6 +127,7 @@ static void default_idle(void)
  */
 static void poll_idle (void)
 {
+       local_irq_enable();
        cpu_relax();
 }
 
@@ -208,6 +209,12 @@ void cpu_idle (void)
                                idle = default_idle;
                        if (cpu_is_offline(smp_processor_id()))
                                play_dead();
+                       /*
+                        * Idle routines should keep interrupts disabled
+                        * from here on, until they go to idle.
+                        * Otherwise, idle callbacks can misfire.
+                        */
+                       local_irq_disable();
                        enter_idle();
                        idle();
                        /* In many cases the interrupt that ended idle
@@ -245,8 +252,16 @@ void mwait_idle_with_hints(unsigned long eax, unsigned long ecx)
 /* Default MONITOR/MWAIT with no hints, used for default C1 state */
 static void mwait_idle(void)
 {
-       local_irq_enable();
-       mwait_idle_with_hints(0,0);
+       if (!need_resched()) {
+               __monitor((void *)&current_thread_info()->flags, 0, 0);
+               smp_mb();
+               if (!need_resched())
+                       __sti_mwait(0, 0);
+               else
+                       local_irq_enable();
+       } else {
+               local_irq_enable();
+       }
 }
 
 void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
index cef17e0f828cc5d0080cf967d11e80a228f4d18b..76552d72804c6368ed210fd02811c0f9608308e7 100644 (file)
@@ -475,6 +475,14 @@ static inline void __mwait(unsigned long eax, unsigned long ecx)
                : :"a" (eax), "c" (ecx));
 }
 
+static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
+{
+       /* "mwait %eax,%ecx;" */
+       asm volatile(
+               "sti; .byte 0x0f,0x01,0xc9;"
+               : :"a" (eax), "c" (ecx));
+}
+
 extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx);
 
 #define stack_current() \