]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
cpufreq: add interactive governor support
authorAnson Huang <b20788@freescale.com>
Tue, 17 Jun 2014 05:33:17 +0000 (13:33 +0800)
committerNitin Garg <nitin.garg@freescale.com>
Fri, 16 Jan 2015 03:16:44 +0000 (21:16 -0600)
This patch adds interactive governor support, the original files
are coming from google android experimental android-3.14 branch,
latest commit is:

c0eb5f75d cpufreq: interactive: Use generic get_cpu_idle_time() from cpufreq.c

Acked-by: Shawn Guo <shawn.guo@linaro.org>
Signed-off-by: Anson Huang <b20788@freescale.com>
drivers/cpufreq/Kconfig
drivers/cpufreq/Makefile
drivers/cpufreq/cpufreq_interactive.c [new file with mode: 0755]
include/linux/cpufreq.h
include/trace/events/cpufreq_interactive.h [new file with mode: 0755]

index 4b029c0944af5ef55b9bdb02a266cbcb3ecd2d23..05ad24f2471b5032f71bd786fb4306eb917c80a8 100644 (file)
@@ -102,6 +102,16 @@ config CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
          Be aware that not all cpufreq drivers support the conservative
          governor. If unsure have a look at the help section of the
          driver. Fallback governor will be the performance governor.
+
+config CPU_FREQ_DEFAULT_GOV_INTERACTIVE
+       bool "interactive"
+       select CPU_FREQ_GOV_INTERACTIVE
+       help
+         Use the CPUFreq governor 'interactive' as default. This allows
+         you to get a full dynamic cpu frequency capable system by simply
+         loading your cpufreq low-level hardware driver, using the
+         'interactive' governor for latency-sensitive workloads.
+
 endchoice
 
 config CPU_FREQ_GOV_PERFORMANCE
@@ -159,6 +169,19 @@ config CPU_FREQ_GOV_ONDEMAND
 
          If in doubt, say N.
 
+config CPU_FREQ_GOV_INTERACTIVE
+       tristate "'interactive' cpufreq policy governor"
+       help
+         'interactive' - This driver adds a dynamic cpufreq policy governor
+         designed for latency-sensitive workloads.
+         This governor attempts to reduce the latency of clock
+         increases so that the system is more responsive to
+         interactive workloads.
+         To compile this driver as a module, choose M here: the
+         module will be called cpufreq_interactive.
+         For details, take a look at linux/Documentation/cpu-freq.
+         If in doubt, say N.
+
 config CPU_FREQ_GOV_CONSERVATIVE
        tristate "'conservative' cpufreq governor"
        depends on CPU_FREQ
index dac58f67307a042e9712e2bed579f607b7c9522c..40d29eaa579ff88ad8bdf86d74001d0af8d55760 100644 (file)
@@ -9,6 +9,7 @@ obj-$(CONFIG_CPU_FREQ_GOV_POWERSAVE)    += cpufreq_powersave.o
 obj-$(CONFIG_CPU_FREQ_GOV_USERSPACE)   += cpufreq_userspace.o
 obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND)    += cpufreq_ondemand.o
 obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE)        += cpufreq_conservative.o
+obj-$(CONFIG_CPU_FREQ_GOV_INTERACTIVE) += cpufreq_interactive.o
 obj-$(CONFIG_CPU_FREQ_GOV_COMMON)              += cpufreq_governor.o
 
 obj-$(CONFIG_GENERIC_CPUFREQ_CPU0)     += cpufreq-cpu0.o
diff --git a/drivers/cpufreq/cpufreq_interactive.c b/drivers/cpufreq/cpufreq_interactive.c
new file mode 100755 (executable)
index 0000000..ff77b30
--- /dev/null
@@ -0,0 +1,1343 @@
+/*
+ * drivers/cpufreq/cpufreq_interactive.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Author: Mike Chan (mike@android.com)
+ *
+ */
+
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/cpufreq.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/rwsem.h>
+#include <linux/sched.h>
+#include <linux/sched/rt.h>
+#include <linux/tick.h>
+#include <linux/time.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
+#include <linux/kthread.h>
+#include <linux/slab.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/cpufreq_interactive.h>
+
+struct cpufreq_interactive_cpuinfo {
+       struct timer_list cpu_timer;
+       struct timer_list cpu_slack_timer;
+       spinlock_t load_lock; /* protects the next 4 fields */
+       u64 time_in_idle;
+       u64 time_in_idle_timestamp;
+       u64 cputime_speedadj;
+       u64 cputime_speedadj_timestamp;
+       struct cpufreq_policy *policy;
+       struct cpufreq_frequency_table *freq_table;
+       unsigned int target_freq;
+       unsigned int floor_freq;
+       u64 floor_validate_time;
+       u64 hispeed_validate_time;
+       struct rw_semaphore enable_sem;
+       int governor_enabled;
+};
+
+static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
+
+/* realtime thread handles frequency scaling */
+static struct task_struct *speedchange_task;
+static cpumask_t speedchange_cpumask;
+static spinlock_t speedchange_cpumask_lock;
+static struct mutex gov_lock;
+
+/* Target load.  Lower values result in higher CPU speeds. */
+#define DEFAULT_TARGET_LOAD 90
+static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD};
+
+#define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
+#define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
+static unsigned int default_above_hispeed_delay[] = {
+       DEFAULT_ABOVE_HISPEED_DELAY };
+
+struct cpufreq_interactive_tunables {
+       int usage_count;
+       /* Hi speed to bump to from lo speed when load burst (default max) */
+       unsigned int hispeed_freq;
+       /* Go to hi speed when CPU load at or above this value. */
+#define DEFAULT_GO_HISPEED_LOAD 99
+       unsigned long go_hispeed_load;
+       /* Target load. Lower values result in higher CPU speeds. */
+       spinlock_t target_loads_lock;
+       unsigned int *target_loads;
+       int ntarget_loads;
+       /*
+        * The minimum amount of time to spend at a frequency before we can ramp
+        * down.
+        */
+#define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
+       unsigned long min_sample_time;
+       /*
+        * The sample rate of the timer used to increase frequency
+        */
+       unsigned long timer_rate;
+       /*
+        * Wait this long before raising speed above hispeed, by default a
+        * single timer interval.
+        */
+       spinlock_t above_hispeed_delay_lock;
+       unsigned int *above_hispeed_delay;
+       int nabove_hispeed_delay;
+       /* Non-zero means indefinite speed boost active */
+       int boost_val;
+       /* Duration of a boot pulse in usecs */
+       int boostpulse_duration_val;
+       /* End time of boost pulse in ktime converted to usecs */
+       u64 boostpulse_endtime;
+       /*
+        * Max additional time to wait in idle, beyond timer_rate, at speeds
+        * above minimum before wakeup to reduce speed, or -1 if unnecessary.
+        */
+#define DEFAULT_TIMER_SLACK (4 * DEFAULT_TIMER_RATE)
+       int timer_slack_val;
+       bool io_is_busy;
+};
+
+/* For cases where we have single governor instance for system */
+struct cpufreq_interactive_tunables *common_tunables;
+
+static struct attribute_group *get_sysfs_attr(void);
+
+static void cpufreq_interactive_timer_resched(
+       struct cpufreq_interactive_cpuinfo *pcpu)
+{
+       struct cpufreq_interactive_tunables *tunables =
+               pcpu->policy->governor_data;
+       unsigned long expires;
+       unsigned long flags;
+
+       spin_lock_irqsave(&pcpu->load_lock, flags);
+       pcpu->time_in_idle =
+               get_cpu_idle_time(smp_processor_id(),
+                                 &pcpu->time_in_idle_timestamp,
+                                 tunables->io_is_busy);
+       pcpu->cputime_speedadj = 0;
+       pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
+       expires = jiffies + usecs_to_jiffies(tunables->timer_rate);
+       mod_timer_pinned(&pcpu->cpu_timer, expires);
+
+       if (tunables->timer_slack_val >= 0 &&
+           pcpu->target_freq > pcpu->policy->min) {
+               expires += usecs_to_jiffies(tunables->timer_slack_val);
+               mod_timer_pinned(&pcpu->cpu_slack_timer, expires);
+       }
+
+       spin_unlock_irqrestore(&pcpu->load_lock, flags);
+}
+
+/* The caller shall take enable_sem write semaphore to avoid any timer race.
+ * The cpu_timer and cpu_slack_timer must be deactivated when calling this
+ * function.
+ */
+static void cpufreq_interactive_timer_start(
+       struct cpufreq_interactive_tunables *tunables, int cpu)
+{
+       struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
+       unsigned long expires = jiffies +
+               usecs_to_jiffies(tunables->timer_rate);
+       unsigned long flags;
+
+       pcpu->cpu_timer.expires = expires;
+       add_timer_on(&pcpu->cpu_timer, cpu);
+       if (tunables->timer_slack_val >= 0 &&
+           pcpu->target_freq > pcpu->policy->min) {
+               expires += usecs_to_jiffies(tunables->timer_slack_val);
+               pcpu->cpu_slack_timer.expires = expires;
+               add_timer_on(&pcpu->cpu_slack_timer, cpu);
+       }
+
+       spin_lock_irqsave(&pcpu->load_lock, flags);
+       pcpu->time_in_idle =
+               get_cpu_idle_time(cpu, &pcpu->time_in_idle_timestamp,
+                                 tunables->io_is_busy);
+       pcpu->cputime_speedadj = 0;
+       pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
+       spin_unlock_irqrestore(&pcpu->load_lock, flags);
+}
+
+static unsigned int freq_to_above_hispeed_delay(
+       struct cpufreq_interactive_tunables *tunables,
+       unsigned int freq)
+{
+       int i;
+       unsigned int ret;
+       unsigned long flags;
+
+       spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
+
+       for (i = 0; i < tunables->nabove_hispeed_delay - 1 &&
+                       freq >= tunables->above_hispeed_delay[i+1]; i += 2)
+               ;
+
+       ret = tunables->above_hispeed_delay[i];
+       spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
+       return ret;
+}
+
+static unsigned int freq_to_targetload(
+       struct cpufreq_interactive_tunables *tunables, unsigned int freq)
+{
+       int i;
+       unsigned int ret;
+       unsigned long flags;
+
+       spin_lock_irqsave(&tunables->target_loads_lock, flags);
+
+       for (i = 0; i < tunables->ntarget_loads - 1 &&
+                   freq >= tunables->target_loads[i+1]; i += 2)
+               ;
+
+       ret = tunables->target_loads[i];
+       spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
+       return ret;
+}
+
+/*
+ * If increasing frequencies never map to a lower target load then
+ * choose_freq() will find the minimum frequency that does not exceed its
+ * target load given the current load.
+ */
+static unsigned int choose_freq(struct cpufreq_interactive_cpuinfo *pcpu,
+               unsigned int loadadjfreq)
+{
+       unsigned int freq = pcpu->policy->cur;
+       unsigned int prevfreq, freqmin, freqmax;
+       unsigned int tl;
+       int index;
+
+       freqmin = 0;
+       freqmax = UINT_MAX;
+
+       do {
+               prevfreq = freq;
+               tl = freq_to_targetload(pcpu->policy->governor_data, freq);
+
+               /*
+                * Find the lowest frequency where the computed load is less
+                * than or equal to the target load.
+                */
+
+               if (cpufreq_frequency_table_target(
+                           pcpu->policy, pcpu->freq_table, loadadjfreq / tl,
+                           CPUFREQ_RELATION_L, &index))
+                       break;
+               freq = pcpu->freq_table[index].frequency;
+
+               if (freq > prevfreq) {
+                       /* The previous frequency is too low. */
+                       freqmin = prevfreq;
+
+                       if (freq >= freqmax) {
+                               /*
+                                * Find the highest frequency that is less
+                                * than freqmax.
+                                */
+                               if (cpufreq_frequency_table_target(
+                                           pcpu->policy, pcpu->freq_table,
+                                           freqmax - 1, CPUFREQ_RELATION_H,
+                                           &index))
+                                       break;
+                               freq = pcpu->freq_table[index].frequency;
+
+                               if (freq == freqmin) {
+                                       /*
+                                        * The first frequency below freqmax
+                                        * has already been found to be too
+                                        * low.  freqmax is the lowest speed
+                                        * we found that is fast enough.
+                                        */
+                                       freq = freqmax;
+                                       break;
+                               }
+                       }
+               } else if (freq < prevfreq) {
+                       /* The previous frequency is high enough. */
+                       freqmax = prevfreq;
+
+                       if (freq <= freqmin) {
+                               /*
+                                * Find the lowest frequency that is higher
+                                * than freqmin.
+                                */
+                               if (cpufreq_frequency_table_target(
+                                           pcpu->policy, pcpu->freq_table,
+                                           freqmin + 1, CPUFREQ_RELATION_L,
+                                           &index))
+                                       break;
+                               freq = pcpu->freq_table[index].frequency;
+
+                               /*
+                                * If freqmax is the first frequency above
+                                * freqmin then we have already found that
+                                * this speed is fast enough.
+                                */
+                               if (freq == freqmax)
+                                       break;
+                       }
+               }
+
+               /* If same frequency chosen as previous then done. */
+       } while (freq != prevfreq);
+
+       return freq;
+}
+
+static u64 update_load(int cpu)
+{
+       struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
+       struct cpufreq_interactive_tunables *tunables =
+               pcpu->policy->governor_data;
+       u64 now;
+       u64 now_idle;
+       unsigned int delta_idle;
+       unsigned int delta_time;
+       u64 active_time;
+
+       now_idle = get_cpu_idle_time(cpu, &now, tunables->io_is_busy);
+       delta_idle = (unsigned int)(now_idle - pcpu->time_in_idle);
+       delta_time = (unsigned int)(now - pcpu->time_in_idle_timestamp);
+
+       if (delta_time <= delta_idle)
+               active_time = 0;
+       else
+               active_time = delta_time - delta_idle;
+
+       pcpu->cputime_speedadj += active_time * pcpu->policy->cur;
+
+       pcpu->time_in_idle = now_idle;
+       pcpu->time_in_idle_timestamp = now;
+       return now;
+}
+
+static void cpufreq_interactive_timer(unsigned long data)
+{
+       u64 now;
+       unsigned int delta_time;
+       u64 cputime_speedadj;
+       int cpu_load;
+       struct cpufreq_interactive_cpuinfo *pcpu =
+               &per_cpu(cpuinfo, data);
+       struct cpufreq_interactive_tunables *tunables =
+               pcpu->policy->governor_data;
+       unsigned int new_freq;
+       unsigned int loadadjfreq;
+       unsigned int index;
+       unsigned long flags;
+       bool boosted;
+
+       if (!down_read_trylock(&pcpu->enable_sem))
+               return;
+       if (!pcpu->governor_enabled)
+               goto exit;
+
+       spin_lock_irqsave(&pcpu->load_lock, flags);
+       now = update_load(data);
+       delta_time = (unsigned int)(now - pcpu->cputime_speedadj_timestamp);
+       cputime_speedadj = pcpu->cputime_speedadj;
+       spin_unlock_irqrestore(&pcpu->load_lock, flags);
+
+       if (WARN_ON_ONCE(!delta_time))
+               goto rearm;
+
+       do_div(cputime_speedadj, delta_time);
+       loadadjfreq = (unsigned int)cputime_speedadj * 100;
+       cpu_load = loadadjfreq / pcpu->target_freq;
+       boosted = tunables->boost_val || now < tunables->boostpulse_endtime;
+
+       if (cpu_load >= tunables->go_hispeed_load || boosted) {
+               if (pcpu->target_freq < tunables->hispeed_freq) {
+                       new_freq = tunables->hispeed_freq;
+               } else {
+                       new_freq = choose_freq(pcpu, loadadjfreq);
+
+                       if (new_freq < tunables->hispeed_freq)
+                               new_freq = tunables->hispeed_freq;
+               }
+       } else {
+               new_freq = choose_freq(pcpu, loadadjfreq);
+       }
+
+       if (pcpu->target_freq >= tunables->hispeed_freq &&
+           new_freq > pcpu->target_freq &&
+           now - pcpu->hispeed_validate_time <
+           freq_to_above_hispeed_delay(tunables, pcpu->target_freq)) {
+               trace_cpufreq_interactive_notyet(
+                       data, cpu_load, pcpu->target_freq,
+                       pcpu->policy->cur, new_freq);
+               goto rearm;
+       }
+
+       pcpu->hispeed_validate_time = now;
+
+       if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
+                                          new_freq, CPUFREQ_RELATION_L,
+                                          &index))
+               goto rearm;
+
+       new_freq = pcpu->freq_table[index].frequency;
+
+       /*
+        * Do not scale below floor_freq unless we have been at or above the
+        * floor frequency for the minimum sample time since last validated.
+        */
+       if (new_freq < pcpu->floor_freq) {
+               if (now - pcpu->floor_validate_time <
+                               tunables->min_sample_time) {
+                       trace_cpufreq_interactive_notyet(
+                               data, cpu_load, pcpu->target_freq,
+                               pcpu->policy->cur, new_freq);
+                       goto rearm;
+               }
+       }
+
+       /*
+        * Update the timestamp for checking whether speed has been held at
+        * or above the selected frequency for a minimum of min_sample_time,
+        * if not boosted to hispeed_freq.  If boosted to hispeed_freq then we
+        * allow the speed to drop as soon as the boostpulse duration expires
+        * (or the indefinite boost is turned off).
+        */
+
+       if (!boosted || new_freq > tunables->hispeed_freq) {
+               pcpu->floor_freq = new_freq;
+               pcpu->floor_validate_time = now;
+       }
+
+       if (pcpu->target_freq == new_freq) {
+               trace_cpufreq_interactive_already(
+                       data, cpu_load, pcpu->target_freq,
+                       pcpu->policy->cur, new_freq);
+               goto rearm_if_notmax;
+       }
+
+       trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
+                                        pcpu->policy->cur, new_freq);
+
+       pcpu->target_freq = new_freq;
+       spin_lock_irqsave(&speedchange_cpumask_lock, flags);
+       cpumask_set_cpu(data, &speedchange_cpumask);
+       spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
+       wake_up_process(speedchange_task);
+
+rearm_if_notmax:
+       /*
+        * Already set max speed and don't see a need to change that,
+        * wait until next idle to re-evaluate, don't need timer.
+        */
+       if (pcpu->target_freq == pcpu->policy->max)
+               goto exit;
+
+rearm:
+       if (!timer_pending(&pcpu->cpu_timer))
+               cpufreq_interactive_timer_resched(pcpu);
+
+exit:
+       up_read(&pcpu->enable_sem);
+       return;
+}
+
+static void cpufreq_interactive_idle_start(void)
+{
+       struct cpufreq_interactive_cpuinfo *pcpu =
+               &per_cpu(cpuinfo, smp_processor_id());
+       int pending;
+
+       if (!down_read_trylock(&pcpu->enable_sem))
+               return;
+       if (!pcpu->governor_enabled) {
+               up_read(&pcpu->enable_sem);
+               return;
+       }
+
+       pending = timer_pending(&pcpu->cpu_timer);
+
+       if (pcpu->target_freq != pcpu->policy->min) {
+               /*
+                * Entering idle while not at lowest speed.  On some
+                * platforms this can hold the other CPU(s) at that speed
+                * even though the CPU is idle. Set a timer to re-evaluate
+                * speed so this idle CPU doesn't hold the other CPUs above
+                * min indefinitely.  This should probably be a quirk of
+                * the CPUFreq driver.
+                */
+               if (!pending)
+                       cpufreq_interactive_timer_resched(pcpu);
+       }
+
+       up_read(&pcpu->enable_sem);
+}
+
+static void cpufreq_interactive_idle_end(void)
+{
+       struct cpufreq_interactive_cpuinfo *pcpu =
+               &per_cpu(cpuinfo, smp_processor_id());
+
+       if (!down_read_trylock(&pcpu->enable_sem))
+               return;
+       if (!pcpu->governor_enabled) {
+               up_read(&pcpu->enable_sem);
+               return;
+       }
+
+       /* Arm the timer for 1-2 ticks later if not already. */
+       if (!timer_pending(&pcpu->cpu_timer)) {
+               cpufreq_interactive_timer_resched(pcpu);
+       } else if (time_after_eq(jiffies, pcpu->cpu_timer.expires)) {
+               del_timer(&pcpu->cpu_timer);
+               del_timer(&pcpu->cpu_slack_timer);
+               cpufreq_interactive_timer(smp_processor_id());
+       }
+
+       up_read(&pcpu->enable_sem);
+}
+
+static int cpufreq_interactive_speedchange_task(void *data)
+{
+       unsigned int cpu;
+       cpumask_t tmp_mask;
+       unsigned long flags;
+       struct cpufreq_interactive_cpuinfo *pcpu;
+
+       while (1) {
+               set_current_state(TASK_INTERRUPTIBLE);
+               spin_lock_irqsave(&speedchange_cpumask_lock, flags);
+
+               if (cpumask_empty(&speedchange_cpumask)) {
+                       spin_unlock_irqrestore(&speedchange_cpumask_lock,
+                                              flags);
+                       schedule();
+
+                       if (kthread_should_stop())
+                               break;
+
+                       spin_lock_irqsave(&speedchange_cpumask_lock, flags);
+               }
+
+               set_current_state(TASK_RUNNING);
+               tmp_mask = speedchange_cpumask;
+               cpumask_clear(&speedchange_cpumask);
+               spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
+
+               for_each_cpu(cpu, &tmp_mask) {
+                       unsigned int j;
+                       unsigned int max_freq = 0;
+
+                       pcpu = &per_cpu(cpuinfo, cpu);
+                       if (!down_read_trylock(&pcpu->enable_sem))
+                               continue;
+                       if (!pcpu->governor_enabled) {
+                               up_read(&pcpu->enable_sem);
+                               continue;
+                       }
+
+                       for_each_cpu(j, pcpu->policy->cpus) {
+                               struct cpufreq_interactive_cpuinfo *pjcpu =
+                                       &per_cpu(cpuinfo, j);
+
+                               if (pjcpu->target_freq > max_freq)
+                                       max_freq = pjcpu->target_freq;
+                       }
+
+                       if (max_freq != pcpu->policy->cur)
+                               __cpufreq_driver_target(pcpu->policy,
+                                                       max_freq,
+                                                       CPUFREQ_RELATION_H);
+                       trace_cpufreq_interactive_setspeed(cpu,
+                                                    pcpu->target_freq,
+                                                    pcpu->policy->cur);
+
+                       up_read(&pcpu->enable_sem);
+               }
+       }
+
+       return 0;
+}
+
+static void cpufreq_interactive_boost(void)
+{
+       int i;
+       int anyboost = 0;
+       unsigned long flags;
+       struct cpufreq_interactive_cpuinfo *pcpu;
+       struct cpufreq_interactive_tunables *tunables;
+
+       spin_lock_irqsave(&speedchange_cpumask_lock, flags);
+
+       for_each_online_cpu(i) {
+               pcpu = &per_cpu(cpuinfo, i);
+               tunables = pcpu->policy->governor_data;
+
+               if (pcpu->target_freq < tunables->hispeed_freq) {
+                       pcpu->target_freq = tunables->hispeed_freq;
+                       cpumask_set_cpu(i, &speedchange_cpumask);
+                       pcpu->hispeed_validate_time =
+                               ktime_to_us(ktime_get());
+                       anyboost = 1;
+               }
+
+               /*
+                * Set floor freq and (re)start timer for when last
+                * validated.
+                */
+
+               pcpu->floor_freq = tunables->hispeed_freq;
+               pcpu->floor_validate_time = ktime_to_us(ktime_get());
+       }
+
+       spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
+
+       if (anyboost)
+               wake_up_process(speedchange_task);
+}
+
+static int cpufreq_interactive_notifier(
+       struct notifier_block *nb, unsigned long val, void *data)
+{
+       struct cpufreq_freqs *freq = data;
+       struct cpufreq_interactive_cpuinfo *pcpu;
+       int cpu;
+       unsigned long flags;
+
+       if (val == CPUFREQ_POSTCHANGE) {
+               pcpu = &per_cpu(cpuinfo, freq->cpu);
+               if (!down_read_trylock(&pcpu->enable_sem))
+                       return 0;
+               if (!pcpu->governor_enabled) {
+                       up_read(&pcpu->enable_sem);
+                       return 0;
+               }
+
+               for_each_cpu(cpu, pcpu->policy->cpus) {
+                       struct cpufreq_interactive_cpuinfo *pjcpu =
+                               &per_cpu(cpuinfo, cpu);
+                       if (cpu != freq->cpu) {
+                               if (!down_read_trylock(&pjcpu->enable_sem))
+                                       continue;
+                               if (!pjcpu->governor_enabled) {
+                                       up_read(&pjcpu->enable_sem);
+                                       continue;
+                               }
+                       }
+                       spin_lock_irqsave(&pjcpu->load_lock, flags);
+                       update_load(cpu);
+                       spin_unlock_irqrestore(&pjcpu->load_lock, flags);
+                       if (cpu != freq->cpu)
+                               up_read(&pjcpu->enable_sem);
+               }
+
+               up_read(&pcpu->enable_sem);
+       }
+       return 0;
+}
+
+static struct notifier_block cpufreq_notifier_block = {
+       .notifier_call = cpufreq_interactive_notifier,
+};
+
+static unsigned int *get_tokenized_data(const char *buf, int *num_tokens)
+{
+       const char *cp;
+       int i;
+       int ntokens = 1;
+       unsigned int *tokenized_data;
+       int err = -EINVAL;
+
+       cp = buf;
+       while ((cp = strpbrk(cp + 1, " :")))
+               ntokens++;
+
+       if (!(ntokens & 0x1))
+               goto err;
+
+       tokenized_data = kmalloc(ntokens * sizeof(unsigned int), GFP_KERNEL);
+       if (!tokenized_data) {
+               err = -ENOMEM;
+               goto err;
+       }
+
+       cp = buf;
+       i = 0;
+       while (i < ntokens) {
+               if (sscanf(cp, "%u", &tokenized_data[i++]) != 1)
+                       goto err_kfree;
+
+               cp = strpbrk(cp, " :");
+               if (!cp)
+                       break;
+               cp++;
+       }
+
+       if (i != ntokens)
+               goto err_kfree;
+
+       *num_tokens = ntokens;
+       return tokenized_data;
+
+err_kfree:
+       kfree(tokenized_data);
+err:
+       return ERR_PTR(err);
+}
+
+static ssize_t show_target_loads(
+       struct cpufreq_interactive_tunables *tunables,
+       char *buf)
+{
+       int i;
+       ssize_t ret = 0;
+       unsigned long flags;
+
+       spin_lock_irqsave(&tunables->target_loads_lock, flags);
+
+       for (i = 0; i < tunables->ntarget_loads; i++)
+               ret += sprintf(buf + ret, "%u%s", tunables->target_loads[i],
+                              i & 0x1 ? ":" : " ");
+
+       sprintf(buf + ret - 1, "\n");
+       spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
+       return ret;
+}
+
+static ssize_t store_target_loads(
+       struct cpufreq_interactive_tunables *tunables,
+       const char *buf, size_t count)
+{
+       int ntokens;
+       unsigned int *new_target_loads = NULL;
+       unsigned long flags;
+
+       new_target_loads = get_tokenized_data(buf, &ntokens);
+       if (IS_ERR(new_target_loads))
+               return PTR_RET(new_target_loads);
+
+       spin_lock_irqsave(&tunables->target_loads_lock, flags);
+       if (tunables->target_loads != default_target_loads)
+               kfree(tunables->target_loads);
+       tunables->target_loads = new_target_loads;
+       tunables->ntarget_loads = ntokens;
+       spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
+       return count;
+}
+
+static ssize_t show_above_hispeed_delay(
+       struct cpufreq_interactive_tunables *tunables, char *buf)
+{
+       int i;
+       ssize_t ret = 0;
+       unsigned long flags;
+
+       spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
+
+       for (i = 0; i < tunables->nabove_hispeed_delay; i++)
+               ret += sprintf(buf + ret, "%u%s",
+                              tunables->above_hispeed_delay[i],
+                              i & 0x1 ? ":" : " ");
+
+       sprintf(buf + ret - 1, "\n");
+       spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
+       return ret;
+}
+
+static ssize_t store_above_hispeed_delay(
+       struct cpufreq_interactive_tunables *tunables,
+       const char *buf, size_t count)
+{
+       int ntokens;
+       unsigned int *new_above_hispeed_delay = NULL;
+       unsigned long flags;
+
+       new_above_hispeed_delay = get_tokenized_data(buf, &ntokens);
+       if (IS_ERR(new_above_hispeed_delay))
+               return PTR_RET(new_above_hispeed_delay);
+
+       spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
+       if (tunables->above_hispeed_delay != default_above_hispeed_delay)
+               kfree(tunables->above_hispeed_delay);
+       tunables->above_hispeed_delay = new_above_hispeed_delay;
+       tunables->nabove_hispeed_delay = ntokens;
+       spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
+       return count;
+
+}
+
+static ssize_t show_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
+               char *buf)
+{
+       return sprintf(buf, "%u\n", tunables->hispeed_freq);
+}
+
+static ssize_t store_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
+               const char *buf, size_t count)
+{
+       int ret;
+       long unsigned int val;
+
+       ret = strict_strtoul(buf, 0, &val);
+       if (ret < 0)
+               return ret;
+       tunables->hispeed_freq = val;
+       return count;
+}
+
+static ssize_t show_go_hispeed_load(struct cpufreq_interactive_tunables
+               *tunables, char *buf)
+{
+       return sprintf(buf, "%lu\n", tunables->go_hispeed_load);
+}
+
+static ssize_t store_go_hispeed_load(struct cpufreq_interactive_tunables
+               *tunables, const char *buf, size_t count)
+{
+       int ret;
+       unsigned long val;
+
+       ret = strict_strtoul(buf, 0, &val);
+       if (ret < 0)
+               return ret;
+       tunables->go_hispeed_load = val;
+       return count;
+}
+
+static ssize_t show_min_sample_time(struct cpufreq_interactive_tunables
+               *tunables, char *buf)
+{
+       return sprintf(buf, "%lu\n", tunables->min_sample_time);
+}
+
+static ssize_t store_min_sample_time(struct cpufreq_interactive_tunables
+               *tunables, const char *buf, size_t count)
+{
+       int ret;
+       unsigned long val;
+
+       ret = strict_strtoul(buf, 0, &val);
+       if (ret < 0)
+               return ret;
+       tunables->min_sample_time = val;
+       return count;
+}
+
+static ssize_t show_timer_rate(struct cpufreq_interactive_tunables *tunables,
+               char *buf)
+{
+       return sprintf(buf, "%lu\n", tunables->timer_rate);
+}
+
+static ssize_t store_timer_rate(struct cpufreq_interactive_tunables *tunables,
+               const char *buf, size_t count)
+{
+       int ret;
+       unsigned long val;
+
+       ret = strict_strtoul(buf, 0, &val);
+       if (ret < 0)
+               return ret;
+       tunables->timer_rate = val;
+       return count;
+}
+
+static ssize_t show_timer_slack(struct cpufreq_interactive_tunables *tunables,
+               char *buf)
+{
+       return sprintf(buf, "%d\n", tunables->timer_slack_val);
+}
+
+static ssize_t store_timer_slack(struct cpufreq_interactive_tunables *tunables,
+               const char *buf, size_t count)
+{
+       int ret;
+       unsigned long val;
+
+       ret = kstrtol(buf, 10, &val);
+       if (ret < 0)
+               return ret;
+
+       tunables->timer_slack_val = val;
+       return count;
+}
+
+static ssize_t show_boost(struct cpufreq_interactive_tunables *tunables,
+                         char *buf)
+{
+       return sprintf(buf, "%d\n", tunables->boost_val);
+}
+
+static ssize_t store_boost(struct cpufreq_interactive_tunables *tunables,
+                          const char *buf, size_t count)
+{
+       int ret;
+       unsigned long val;
+
+       ret = kstrtoul(buf, 0, &val);
+       if (ret < 0)
+               return ret;
+
+       tunables->boost_val = val;
+
+       if (tunables->boost_val) {
+               trace_cpufreq_interactive_boost("on");
+               cpufreq_interactive_boost();
+       } else {
+               trace_cpufreq_interactive_unboost("off");
+       }
+
+       return count;
+}
+
+static ssize_t store_boostpulse(struct cpufreq_interactive_tunables *tunables,
+                               const char *buf, size_t count)
+{
+       int ret;
+       unsigned long val;
+
+       ret = kstrtoul(buf, 0, &val);
+       if (ret < 0)
+               return ret;
+
+       tunables->boostpulse_endtime = ktime_to_us(ktime_get()) +
+               tunables->boostpulse_duration_val;
+       trace_cpufreq_interactive_boost("pulse");
+       cpufreq_interactive_boost();
+       return count;
+}
+
+static ssize_t show_boostpulse_duration(struct cpufreq_interactive_tunables
+               *tunables, char *buf)
+{
+       return sprintf(buf, "%d\n", tunables->boostpulse_duration_val);
+}
+
+static ssize_t store_boostpulse_duration(struct cpufreq_interactive_tunables
+               *tunables, const char *buf, size_t count)
+{
+       int ret;
+       unsigned long val;
+
+       ret = kstrtoul(buf, 0, &val);
+       if (ret < 0)
+               return ret;
+
+       tunables->boostpulse_duration_val = val;
+       return count;
+}
+
+static ssize_t show_io_is_busy(struct cpufreq_interactive_tunables *tunables,
+               char *buf)
+{
+       return sprintf(buf, "%u\n", tunables->io_is_busy);
+}
+
+static ssize_t store_io_is_busy(struct cpufreq_interactive_tunables *tunables,
+               const char *buf, size_t count)
+{
+       int ret;
+       unsigned long val;
+
+       ret = kstrtoul(buf, 0, &val);
+       if (ret < 0)
+               return ret;
+       tunables->io_is_busy = val;
+       return count;
+}
+
+/*
+ * Create show/store routines
+ * - sys: One governor instance for complete SYSTEM
+ * - pol: One governor instance per struct cpufreq_policy
+ */
+#define show_gov_pol_sys(file_name)                                    \
+static ssize_t show_##file_name##_gov_sys                              \
+(struct kobject *kobj, struct attribute *attr, char *buf)              \
+{                                                                      \
+       return show_##file_name(common_tunables, buf);                  \
+}                                                                      \
+                                                                       \
+static ssize_t show_##file_name##_gov_pol                              \
+(struct cpufreq_policy *policy, char *buf)                             \
+{                                                                      \
+       return show_##file_name(policy->governor_data, buf);            \
+}
+
+#define store_gov_pol_sys(file_name)                                   \
+static ssize_t store_##file_name##_gov_sys                             \
+(struct kobject *kobj, struct attribute *attr, const char *buf,                \
+       size_t count)                                                   \
+{                                                                      \
+       return store_##file_name(common_tunables, buf, count);          \
+}                                                                      \
+                                                                       \
+static ssize_t store_##file_name##_gov_pol                             \
+(struct cpufreq_policy *policy, const char *buf, size_t count)         \
+{                                                                      \
+       return store_##file_name(policy->governor_data, buf, count);    \
+}
+
+#define show_store_gov_pol_sys(file_name)                              \
+show_gov_pol_sys(file_name);                                           \
+store_gov_pol_sys(file_name)
+
+show_store_gov_pol_sys(target_loads);
+show_store_gov_pol_sys(above_hispeed_delay);
+show_store_gov_pol_sys(hispeed_freq);
+show_store_gov_pol_sys(go_hispeed_load);
+show_store_gov_pol_sys(min_sample_time);
+show_store_gov_pol_sys(timer_rate);
+show_store_gov_pol_sys(timer_slack);
+show_store_gov_pol_sys(boost);
+store_gov_pol_sys(boostpulse);
+show_store_gov_pol_sys(boostpulse_duration);
+show_store_gov_pol_sys(io_is_busy);
+
+#define gov_sys_attr_rw(_name)                                         \
+static struct global_attr _name##_gov_sys =                            \
+__ATTR(_name, 0644, show_##_name##_gov_sys, store_##_name##_gov_sys)
+
+#define gov_pol_attr_rw(_name)                                         \
+static struct freq_attr _name##_gov_pol =                              \
+__ATTR(_name, 0644, show_##_name##_gov_pol, store_##_name##_gov_pol)
+
+#define gov_sys_pol_attr_rw(_name)                                     \
+       gov_sys_attr_rw(_name);                                         \
+       gov_pol_attr_rw(_name)
+
+gov_sys_pol_attr_rw(target_loads);
+gov_sys_pol_attr_rw(above_hispeed_delay);
+gov_sys_pol_attr_rw(hispeed_freq);
+gov_sys_pol_attr_rw(go_hispeed_load);
+gov_sys_pol_attr_rw(min_sample_time);
+gov_sys_pol_attr_rw(timer_rate);
+gov_sys_pol_attr_rw(timer_slack);
+gov_sys_pol_attr_rw(boost);
+gov_sys_pol_attr_rw(boostpulse_duration);
+gov_sys_pol_attr_rw(io_is_busy);
+
+static struct global_attr boostpulse_gov_sys =
+       __ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_sys);
+
+static struct freq_attr boostpulse_gov_pol =
+       __ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_pol);
+
+/* One Governor instance for entire system */
+static struct attribute *interactive_attributes_gov_sys[] = {
+       &target_loads_gov_sys.attr,
+       &above_hispeed_delay_gov_sys.attr,
+       &hispeed_freq_gov_sys.attr,
+       &go_hispeed_load_gov_sys.attr,
+       &min_sample_time_gov_sys.attr,
+       &timer_rate_gov_sys.attr,
+       &timer_slack_gov_sys.attr,
+       &boost_gov_sys.attr,
+       &boostpulse_gov_sys.attr,
+       &boostpulse_duration_gov_sys.attr,
+       &io_is_busy_gov_sys.attr,
+       NULL,
+};
+
+static struct attribute_group interactive_attr_group_gov_sys = {
+       .attrs = interactive_attributes_gov_sys,
+       .name = "interactive",
+};
+
+/* Per policy governor instance */
+static struct attribute *interactive_attributes_gov_pol[] = {
+       &target_loads_gov_pol.attr,
+       &above_hispeed_delay_gov_pol.attr,
+       &hispeed_freq_gov_pol.attr,
+       &go_hispeed_load_gov_pol.attr,
+       &min_sample_time_gov_pol.attr,
+       &timer_rate_gov_pol.attr,
+       &timer_slack_gov_pol.attr,
+       &boost_gov_pol.attr,
+       &boostpulse_gov_pol.attr,
+       &boostpulse_duration_gov_pol.attr,
+       &io_is_busy_gov_pol.attr,
+       NULL,
+};
+
+static struct attribute_group interactive_attr_group_gov_pol = {
+       .attrs = interactive_attributes_gov_pol,
+       .name = "interactive",
+};
+
+static struct attribute_group *get_sysfs_attr(void)
+{
+       if (have_governor_per_policy())
+               return &interactive_attr_group_gov_pol;
+       else
+               return &interactive_attr_group_gov_sys;
+}
+
+static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
+                                            unsigned long val,
+                                            void *data)
+{
+       switch (val) {
+       case IDLE_START:
+               cpufreq_interactive_idle_start();
+               break;
+       case IDLE_END:
+               cpufreq_interactive_idle_end();
+               break;
+       }
+
+       return 0;
+}
+
+static struct notifier_block cpufreq_interactive_idle_nb = {
+       .notifier_call = cpufreq_interactive_idle_notifier,
+};
+
+static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
+               unsigned int event)
+{
+       int rc;
+       unsigned int j;
+       struct cpufreq_interactive_cpuinfo *pcpu;
+       struct cpufreq_frequency_table *freq_table;
+       struct cpufreq_interactive_tunables *tunables;
+
+       if (have_governor_per_policy())
+               tunables = policy->governor_data;
+       else
+               tunables = common_tunables;
+
+       WARN_ON(!tunables && (event != CPUFREQ_GOV_POLICY_INIT));
+
+       switch (event) {
+       case CPUFREQ_GOV_POLICY_INIT:
+               if (have_governor_per_policy()) {
+                       WARN_ON(tunables);
+               } else if (tunables) {
+                       tunables->usage_count++;
+                       policy->governor_data = tunables;
+                       return 0;
+               }
+
+               tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
+               if (!tunables) {
+                       pr_err("%s: POLICY_INIT: kzalloc failed\n", __func__);
+                       return -ENOMEM;
+               }
+
+               tunables->usage_count = 1;
+               tunables->above_hispeed_delay = default_above_hispeed_delay;
+               tunables->nabove_hispeed_delay =
+                       ARRAY_SIZE(default_above_hispeed_delay);
+               tunables->go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
+               tunables->target_loads = default_target_loads;
+               tunables->ntarget_loads = ARRAY_SIZE(default_target_loads);
+               tunables->min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
+               tunables->timer_rate = DEFAULT_TIMER_RATE;
+               tunables->boostpulse_duration_val = DEFAULT_MIN_SAMPLE_TIME;
+               tunables->timer_slack_val = DEFAULT_TIMER_SLACK;
+
+               spin_lock_init(&tunables->target_loads_lock);
+               spin_lock_init(&tunables->above_hispeed_delay_lock);
+
+               policy->governor_data = tunables;
+               if (!have_governor_per_policy())
+                       common_tunables = tunables;
+
+               rc = sysfs_create_group(get_governor_parent_kobj(policy),
+                               get_sysfs_attr());
+               if (rc) {
+                       kfree(tunables);
+                       policy->governor_data = NULL;
+                       if (!have_governor_per_policy())
+                               common_tunables = NULL;
+                       return rc;
+               }
+
+               if (!policy->governor->initialized) {
+                       idle_notifier_register(&cpufreq_interactive_idle_nb);
+                       cpufreq_register_notifier(&cpufreq_notifier_block,
+                                       CPUFREQ_TRANSITION_NOTIFIER);
+               }
+
+               break;
+
+       case CPUFREQ_GOV_POLICY_EXIT:
+               if (!--tunables->usage_count) {
+                       if (policy->governor->initialized == 1) {
+                               cpufreq_unregister_notifier(&cpufreq_notifier_block,
+                                               CPUFREQ_TRANSITION_NOTIFIER);
+                               idle_notifier_unregister(&cpufreq_interactive_idle_nb);
+                       }
+
+                       sysfs_remove_group(get_governor_parent_kobj(policy),
+                                       get_sysfs_attr());
+                       kfree(tunables);
+                       common_tunables = NULL;
+               }
+
+               policy->governor_data = NULL;
+               break;
+
+       case CPUFREQ_GOV_START:
+               mutex_lock(&gov_lock);
+
+               freq_table = cpufreq_frequency_get_table(policy->cpu);
+               if (!tunables->hispeed_freq)
+                       tunables->hispeed_freq = policy->max;
+
+               for_each_cpu(j, policy->cpus) {
+                       pcpu = &per_cpu(cpuinfo, j);
+                       pcpu->policy = policy;
+                       pcpu->target_freq = policy->cur;
+                       pcpu->freq_table = freq_table;
+                       pcpu->floor_freq = pcpu->target_freq;
+                       pcpu->floor_validate_time =
+                               ktime_to_us(ktime_get());
+                       pcpu->hispeed_validate_time =
+                               pcpu->floor_validate_time;
+                       down_write(&pcpu->enable_sem);
+                       del_timer_sync(&pcpu->cpu_timer);
+                       del_timer_sync(&pcpu->cpu_slack_timer);
+                       cpufreq_interactive_timer_start(tunables, j);
+                       pcpu->governor_enabled = 1;
+                       up_write(&pcpu->enable_sem);
+               }
+
+               mutex_unlock(&gov_lock);
+               break;
+
+       case CPUFREQ_GOV_STOP:
+               mutex_lock(&gov_lock);
+               for_each_cpu(j, policy->cpus) {
+                       pcpu = &per_cpu(cpuinfo, j);
+                       down_write(&pcpu->enable_sem);
+                       pcpu->governor_enabled = 0;
+                       del_timer_sync(&pcpu->cpu_timer);
+                       del_timer_sync(&pcpu->cpu_slack_timer);
+                       up_write(&pcpu->enable_sem);
+               }
+
+               mutex_unlock(&gov_lock);
+               break;
+
+       case CPUFREQ_GOV_LIMITS:
+               if (policy->max < policy->cur)
+                       __cpufreq_driver_target(policy,
+                                       policy->max, CPUFREQ_RELATION_H);
+               else if (policy->min > policy->cur)
+                       __cpufreq_driver_target(policy,
+                                       policy->min, CPUFREQ_RELATION_L);
+               for_each_cpu(j, policy->cpus) {
+                       pcpu = &per_cpu(cpuinfo, j);
+
+                       /* hold write semaphore to avoid race */
+                       down_write(&pcpu->enable_sem);
+                       if (pcpu->governor_enabled == 0) {
+                               up_write(&pcpu->enable_sem);
+                               continue;
+                       }
+
+                       /* update target_freq firstly */
+                       if (policy->max < pcpu->target_freq)
+                               pcpu->target_freq = policy->max;
+                       else if (policy->min > pcpu->target_freq)
+                               pcpu->target_freq = policy->min;
+
+                       /* Reschedule timer.
+                        * Delete the timers, else the timer callback may
+                        * return without re-arm the timer when failed
+                        * acquire the semaphore. This race may cause timer
+                        * stopped unexpectedly.
+                        */
+                       del_timer_sync(&pcpu->cpu_timer);
+                       del_timer_sync(&pcpu->cpu_slack_timer);
+                       cpufreq_interactive_timer_start(tunables, j);
+                       up_write(&pcpu->enable_sem);
+               }
+               break;
+       }
+       return 0;
+}
+
+#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
+static
+#endif
+struct cpufreq_governor cpufreq_gov_interactive = {
+       .name = "interactive",
+       .governor = cpufreq_governor_interactive,
+       .max_transition_latency = 10000000,
+       .owner = THIS_MODULE,
+};
+
+static void cpufreq_interactive_nop_timer(unsigned long data)
+{
+}
+
+static int __init cpufreq_interactive_init(void)
+{
+       unsigned int i;
+       struct cpufreq_interactive_cpuinfo *pcpu;
+       struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
+
+       /* Initalize per-cpu timers */
+       for_each_possible_cpu(i) {
+               pcpu = &per_cpu(cpuinfo, i);
+               init_timer_deferrable(&pcpu->cpu_timer);
+               pcpu->cpu_timer.function = cpufreq_interactive_timer;
+               pcpu->cpu_timer.data = i;
+               init_timer(&pcpu->cpu_slack_timer);
+               pcpu->cpu_slack_timer.function = cpufreq_interactive_nop_timer;
+               spin_lock_init(&pcpu->load_lock);
+               init_rwsem(&pcpu->enable_sem);
+       }
+
+       spin_lock_init(&speedchange_cpumask_lock);
+       mutex_init(&gov_lock);
+       speedchange_task =
+               kthread_create(cpufreq_interactive_speedchange_task, NULL,
+                              "cfinteractive");
+       if (IS_ERR(speedchange_task))
+               return PTR_ERR(speedchange_task);
+
+       sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, &param);
+       get_task_struct(speedchange_task);
+
+       /* NB: wake up so the thread does not look hung to the freezer */
+       wake_up_process(speedchange_task);
+
+       return cpufreq_register_governor(&cpufreq_gov_interactive);
+}
+
+#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
+fs_initcall(cpufreq_interactive_init);
+#else
+module_init(cpufreq_interactive_init);
+#endif
+
+static void __exit cpufreq_interactive_exit(void)
+{
+       cpufreq_unregister_governor(&cpufreq_gov_interactive);
+       kthread_stop(speedchange_task);
+       put_task_struct(speedchange_task);
+}
+
+module_exit(cpufreq_interactive_exit);
+
+MODULE_AUTHOR("Mike Chan <mike@android.com>");
+MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
+       "Latency sensitive workloads");
+MODULE_LICENSE("GPL");
index 4d89e0e6f9ccaa641dd2a6b2c35c8c7b7bbf2916..b183c1e8e5c15d86892d4269fb33ac4ebde95a7a 100644 (file)
@@ -432,6 +432,9 @@ extern struct cpufreq_governor cpufreq_gov_ondemand;
 #elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE)
 extern struct cpufreq_governor cpufreq_gov_conservative;
 #define CPUFREQ_DEFAULT_GOVERNOR       (&cpufreq_gov_conservative)
+#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE)
+extern struct cpufreq_governor cpufreq_gov_interactive;
+#define CPUFREQ_DEFAULT_GOVERNOR        (&cpufreq_gov_interactive)
 #endif
 
 /*********************************************************************
diff --git a/include/trace/events/cpufreq_interactive.h b/include/trace/events/cpufreq_interactive.h
new file mode 100755 (executable)
index 0000000..951e6ca
--- /dev/null
@@ -0,0 +1,112 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM cpufreq_interactive
+
+#if !defined(_TRACE_CPUFREQ_INTERACTIVE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_CPUFREQ_INTERACTIVE_H
+
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(set,
+       TP_PROTO(u32 cpu_id, unsigned long targfreq,
+                unsigned long actualfreq),
+       TP_ARGS(cpu_id, targfreq, actualfreq),
+
+       TP_STRUCT__entry(
+           __field(          u32, cpu_id    )
+           __field(unsigned long, targfreq   )
+           __field(unsigned long, actualfreq )
+          ),
+
+       TP_fast_assign(
+           __entry->cpu_id = (u32) cpu_id;
+           __entry->targfreq = targfreq;
+           __entry->actualfreq = actualfreq;
+       ),
+
+       TP_printk("cpu=%u targ=%lu actual=%lu",
+             __entry->cpu_id, __entry->targfreq,
+             __entry->actualfreq)
+);
+
+DEFINE_EVENT(set, cpufreq_interactive_setspeed,
+       TP_PROTO(u32 cpu_id, unsigned long targfreq,
+            unsigned long actualfreq),
+       TP_ARGS(cpu_id, targfreq, actualfreq)
+);
+
+DECLARE_EVENT_CLASS(loadeval,
+           TP_PROTO(unsigned long cpu_id, unsigned long load,
+                    unsigned long curtarg, unsigned long curactual,
+                    unsigned long newtarg),
+                   TP_ARGS(cpu_id, load, curtarg, curactual, newtarg),
+
+           TP_STRUCT__entry(
+                   __field(unsigned long, cpu_id    )
+                   __field(unsigned long, load      )
+                   __field(unsigned long, curtarg   )
+                   __field(unsigned long, curactual )
+                   __field(unsigned long, newtarg   )
+           ),
+
+           TP_fast_assign(
+                   __entry->cpu_id = cpu_id;
+                   __entry->load = load;
+                   __entry->curtarg = curtarg;
+                   __entry->curactual = curactual;
+                   __entry->newtarg = newtarg;
+           ),
+
+           TP_printk("cpu=%lu load=%lu cur=%lu actual=%lu targ=%lu",
+                     __entry->cpu_id, __entry->load, __entry->curtarg,
+                     __entry->curactual, __entry->newtarg)
+);
+
+DEFINE_EVENT(loadeval, cpufreq_interactive_target,
+           TP_PROTO(unsigned long cpu_id, unsigned long load,
+                    unsigned long curtarg, unsigned long curactual,
+                    unsigned long newtarg),
+           TP_ARGS(cpu_id, load, curtarg, curactual, newtarg)
+);
+
+DEFINE_EVENT(loadeval, cpufreq_interactive_already,
+           TP_PROTO(unsigned long cpu_id, unsigned long load,
+                    unsigned long curtarg, unsigned long curactual,
+                    unsigned long newtarg),
+           TP_ARGS(cpu_id, load, curtarg, curactual, newtarg)
+);
+
+DEFINE_EVENT(loadeval, cpufreq_interactive_notyet,
+           TP_PROTO(unsigned long cpu_id, unsigned long load,
+                    unsigned long curtarg, unsigned long curactual,
+                    unsigned long newtarg),
+           TP_ARGS(cpu_id, load, curtarg, curactual, newtarg)
+);
+
+TRACE_EVENT(cpufreq_interactive_boost,
+           TP_PROTO(const char *s),
+           TP_ARGS(s),
+           TP_STRUCT__entry(
+                   __string(s, s)
+           ),
+           TP_fast_assign(
+                   __assign_str(s, s);
+           ),
+           TP_printk("%s", __get_str(s))
+);
+
+TRACE_EVENT(cpufreq_interactive_unboost,
+           TP_PROTO(const char *s),
+           TP_ARGS(s),
+           TP_STRUCT__entry(
+                   __string(s, s)
+           ),
+           TP_fast_assign(
+                   __assign_str(s, s);
+           ),
+           TP_printk("%s", __get_str(s))
+);
+
+#endif /* _TRACE_CPUFREQ_INTERACTIVE_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>