static inline void entering_irq(void)
{
irq_enter();
- exit_idle();
}
static inline void entering_ack_irq(void)
#ifndef _ASM_X86_IDLE_H
#define _ASM_X86_IDLE_H
-#ifdef CONFIG_X86_64
-void enter_idle(void);
-void exit_idle(void);
-#else /* !CONFIG_X86_64 */
-static inline void enter_idle(void) { }
-static inline void exit_idle(void) { }
-static inline void __exit_idle(void) { }
-#endif /* CONFIG_X86_64 */
-
void amd_e400_remove_cpu(int cpu);
#endif /* _ASM_X86_IDLE_H */
case KVM_PV_REASON_PAGE_NOT_PRESENT:
/* page is swapped out by the host. */
prev_state = exception_enter();
- exit_idle();
kvm_async_pf_task_wait((u32)read_cr2());
exception_exit(prev_state);
break;
case KVM_PV_REASON_PAGE_READY:
rcu_irq_enter();
- exit_idle();
kvm_async_pf_task_wake((u32)read_cr2());
rcu_irq_exit();
break;
}
#endif
-#ifdef CONFIG_X86_64
-void enter_idle(void)
-{
-}
-
-static void __exit_idle(void)
-{
-}
-
-/* Called from interrupts to signify idle end */
-void exit_idle(void)
-{
- /* idle loop has pid 0 */
- if (current->pid)
- return;
- __exit_idle();
-}
-#endif
-
void arch_cpu_idle_enter(void)
{
local_touch_nmi();
- enter_idle();
-}
-
-void arch_cpu_idle_exit(void)
-{
- __exit_idle();
}
void arch_cpu_idle_dead(void)
irq_enter();
#ifdef CONFIG_X86
- exit_idle();
inc_irq_stat(irq_hv_callback_count);
#endif