#include <linux/uaccess.h>
#include <linux/random.h>
#include <linux/hw_breakpoint.h>
-#include <linux/cpuidle.h>
#include <asm/cacheflush.h>
#include <asm/leds.h>
cpu_relax();
} else {
stop_critical_timings();
- if (cpuidle_call_idle())
- pm_idle();
+ pm_idle();
start_critical_timings();
/*
* This will eventually be removed - pm_idle
#include <linux/thread_info.h>
#include <linux/irqflags.h>
#include <linux/smp.h>
-#include <linux/cpuidle.h>
#include <asm/pgalloc.h>
#include <asm/system.h>
#include <linux/atomic.h>
#include <asm/smp.h>
-static void (*pm_idle)(void);
+void (*pm_idle)(void) = NULL;
static int hlt_counter;
local_irq_disable();
/* Don't trace irqs off for idle */
stop_critical_timings();
- if (cpuidle_call_idle())
- pm_idle();
+ pm_idle();
/*
* Sanity check to ensure that pm_idle() returns
* with IRQs enabled
#include <linux/uaccess.h>
#include <linux/io.h>
#include <linux/kdebug.h>
-#include <linux/cpuidle.h>
#include <asm/pgtable.h>
#include <asm/system.h>
local_irq_disable();
/* Don't trace irqs off for idle */
stop_critical_timings();
- if (cpuidle_idle_call())
- pm_idle();
+ pm_idle();
start_critical_timings();
}
tick_nohz_restart_sched_tick();
#include <linux/uaccess.h>
#include <linux/io.h>
#include <linux/ftrace.h>
-#include <linux/cpuidle.h>
#include <asm/pgtable.h>
#include <asm/system.h>
enter_idle();
/* Don't trace irqs off for idle */
stop_critical_timings();
- if (cpuidle_idle_call())
- pm_idle();
+ pm_idle();
start_critical_timings();
/* In many cases the interrupt that ended idle
DEFINE_MUTEX(cpuidle_lock);
LIST_HEAD(cpuidle_detected_devices);
+static void (*pm_idle_old)(void);
static int enabled_devices;
static int off __read_mostly;
-static int initialized __read_mostly;
int cpuidle_disabled(void)
{
* cpuidle_idle_call - the main idle loop
*
* NOTE: no locks or semaphores should be used here
- * return non-zero on failure
*/
-int cpuidle_idle_call(void)
+static void cpuidle_idle_call(void)
{
struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
struct cpuidle_state *target_state;
int next_state;
- if (off)
- return -ENODEV;
-
- if (!initialized)
- return -ENODEV;
-
/* check if the device is ready */
- if (!dev || !dev->enabled)
- return -EBUSY;
+ if (!dev || !dev->enabled) {
+ if (pm_idle_old)
+ pm_idle_old();
+ else
+#if defined(CONFIG_ARCH_HAS_DEFAULT_IDLE)
+ default_idle();
+#else
+ local_irq_enable();
+#endif
+ return;
+ }
#if 0
/* shows regressions, re-enable for 2.6.29 */
next_state = cpuidle_curr_governor->select(dev);
if (need_resched()) {
local_irq_enable();
- return 0;
+ return;
}
target_state = &dev->states[next_state];
/* give the governor an opportunity to reflect on the outcome */
if (cpuidle_curr_governor->reflect)
cpuidle_curr_governor->reflect(dev);
-
- return 0;
}
/**
*/
void cpuidle_install_idle_handler(void)
{
- if (enabled_devices) {
+ if (enabled_devices && (pm_idle != cpuidle_idle_call)) {
/* Make sure all changes finished before we switch to new idle */
smp_wmb();
- initialized = 1;
+ pm_idle = cpuidle_idle_call;
}
}
*/
void cpuidle_uninstall_idle_handler(void)
{
- if (enabled_devices) {
- initialized = 0;
+ if (enabled_devices && pm_idle_old && (pm_idle != pm_idle_old)) {
+ pm_idle = pm_idle_old;
cpuidle_kick_cpus();
}
}
if (cpuidle_disabled())
return -ENODEV;
+ pm_idle_old = pm_idle;
+
ret = cpuidle_add_class_sysfs(&cpu_sysdev_class);
if (ret)
return ret;
#ifdef CONFIG_CPU_IDLE
extern void disable_cpuidle(void);
-extern int cpuidle_idle_call(void);
extern int cpuidle_register_driver(struct cpuidle_driver *drv);
struct cpuidle_driver *cpuidle_get_driver(void);
#else
static inline void disable_cpuidle(void) { }
-static inline int cpuidle_idle_call(void) { return -ENODEV; }
static inline int cpuidle_register_driver(struct cpuidle_driver *drv)
{return -ENODEV; }