#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
extern void select_nohz_load_balancer(int stop_tick);
+extern void set_cpu_sd_state_idle(void);
extern int get_nohz_timer_target(void);
#else
static inline void select_nohz_load_balancer(int stop_tick) { }
+static inline void set_cpu_sd_state_idle(void);
#endif
/*
* single CPU.
*/
unsigned int power, power_orig;
+ /*
+ * Number of busy cpus in this group.
+ */
+ atomic_t nr_busy_cpus;
};
struct sched_group {
return;
}
+static inline void set_cpu_sd_state_busy(void)
+{
+ struct sched_domain *sd;
+ int cpu = smp_processor_id();
+
+ if (!test_bit(NOHZ_IDLE, nohz_flags(cpu)))
+ return;
+ clear_bit(NOHZ_IDLE, nohz_flags(cpu));
+
+ rcu_read_lock();
+ for_each_domain(cpu, sd)
+ atomic_inc(&sd->groups->sgp->nr_busy_cpus);
+ rcu_read_unlock();
+}
+
+void set_cpu_sd_state_idle(void)
+{
+ struct sched_domain *sd;
+ int cpu = smp_processor_id();
+
+ if (test_bit(NOHZ_IDLE, nohz_flags(cpu)))
+ return;
+ set_bit(NOHZ_IDLE, nohz_flags(cpu));
+
+ rcu_read_lock();
+ for_each_domain(cpu, sd)
+ atomic_dec(&sd->groups->sgp->nr_busy_cpus);
+ rcu_read_unlock();
+}
+
/*
* This routine will try to nominate the ilb (idle load balancing)
* owner among the cpus whose ticks are stopped. ilb owner will do the idle
* We may be recently in ticked or tickless idle mode. At the first
* busy tick after returning from idle, we will update the busy stats.
*/
+ set_cpu_sd_state_busy();
if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu))))
clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
cpu = smp_processor_id();
ts = &per_cpu(tick_cpu_sched, cpu);
+ /*
+ * Update the idle state in the scheduler domain hierarchy
+ * when tick_nohz_stop_sched_tick() is called from the idle loop.
+ * State will be updated to busy during the first busy tick after
+ * exiting idle.
+ */
+ if (inidle)
+ set_cpu_sd_state_idle();
+
/*
* Call to tick_nohz_start_idle stops the last_update_time from being
* updated. Thus, it must not be called in the event we are called from