]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - drivers/cpufreq/intel_pstate.c
cpufreq: intel_pstate: Drop pointless initialization of PID parameters
[karo-tx-linux.git] / drivers / cpufreq / intel_pstate.c
index 3d37219a0dd7afc3108b017f1d2960868efb7903..efce4e7eeeca80c71c5f877bcaebd45f576a92c0 100644 (file)
@@ -84,6 +84,11 @@ static inline u64 div_ext_fp(u64 x, u64 y)
        return div64_u64(x << EXT_FRAC_BITS, y);
 }
 
+static inline int32_t percent_ext_fp(int percent)
+{
+       return div_ext_fp(percent, 100);
+}
+
 /**
  * struct sample -     Store performance sample
  * @core_avg_perf:     Ratio of APERF/MPERF which is the actual average
@@ -181,45 +186,22 @@ struct _pid {
 };
 
 /**
- * struct perf_limits - Store user and policy limits
- * @no_turbo:          User requested turbo state from intel_pstate sysfs
- * @turbo_disabled:    Platform turbo status either from msr
- *                     MSR_IA32_MISC_ENABLE or when maximum available pstate
- *                     matches the maximum turbo pstate
- * @max_perf_pct:      Effective maximum performance limit in percentage, this
- *                     is minimum of either limits enforced by cpufreq policy
- *                     or limits from user set limits via intel_pstate sysfs
- * @min_perf_pct:      Effective minimum performance limit in percentage, this
- *                     is maximum of either limits enforced by cpufreq policy
- *                     or limits from user set limits via intel_pstate sysfs
- * @max_perf:          This is a scaled value between 0 to 255 for max_perf_pct
- *                     This value is used to limit max pstate
- * @min_perf:          This is a scaled value between 0 to 255 for min_perf_pct
- *                     This value is used to limit min pstate
- * @max_policy_pct:    The maximum performance in percentage enforced by
- *                     cpufreq setpolicy interface
- * @max_sysfs_pct:     The maximum performance in percentage enforced by
- *                     intel pstate sysfs interface, unused when per cpu
- *                     controls are enforced
- * @min_policy_pct:    The minimum performance in percentage enforced by
- *                     cpufreq setpolicy interface
- * @min_sysfs_pct:     The minimum performance in percentage enforced by
- *                     intel pstate sysfs interface, unused when per cpu
- *                     controls are enforced
- *
- * Storage for user and policy defined limits.
+ * struct global_params - Global parameters, mostly tunable via sysfs.
+ * @no_turbo:          Whether or not to use turbo P-states.
+ * @turbo_disabled:    Whethet or not turbo P-states are available at all,
+ *                     based on the MSR_IA32_MISC_ENABLE value and whether or
+ *                     not the maximum reported turbo P-state is different from
+ *                     the maximum reported non-turbo one.
+ * @min_perf_pct:      Minimum capacity limit in percent of the maximum turbo
+ *                     P-state capacity.
+ * @max_perf_pct:      Maximum capacity limit in percent of the maximum turbo
+ *                     P-state capacity.
  */
-struct perf_limits {
-       int no_turbo;
-       int turbo_disabled;
+struct global_params {
+       bool no_turbo;
+       bool turbo_disabled;
        int max_perf_pct;
        int min_perf_pct;
-       int32_t max_perf;
-       int32_t min_perf;
-       int max_policy_pct;
-       int max_sysfs_pct;
-       int min_policy_pct;
-       int min_sysfs_pct;
 };
 
 /**
@@ -240,9 +222,10 @@ struct perf_limits {
  * @prev_cummulative_iowait: IO Wait time difference from last and
  *                     current sample
  * @sample:            Storage for storing last Sample data
- * @perf_limits:       Pointer to perf_limit unique to this CPU
- *                     Not all field in the structure are applicable
- *                     when per cpu controls are enforced
+ * @min_perf:          Minimum capacity limit as a fraction of the maximum
+ *                     turbo P-state capacity.
+ * @max_perf:          Maximum capacity limit as a fraction of the maximum
+ *                     turbo P-state capacity.
  * @acpi_perf_data:    Stores ACPI perf information read from _PSS
  * @valid_pss_table:   Set to true for valid ACPI _PSS entries found
  * @epp_powersave:     Last saved HWP energy performance preference
@@ -274,7 +257,8 @@ struct cpudata {
        u64     prev_tsc;
        u64     prev_cummulative_iowait;
        struct sample sample;
-       struct perf_limits *perf_limits;
+       int32_t min_perf;
+       int32_t max_perf;
 #ifdef CONFIG_ACPI
        struct acpi_processor_performance acpi_perf_data;
        bool valid_pss_table;
@@ -359,26 +343,7 @@ static bool driver_registered __read_mostly;
 static bool acpi_ppc;
 #endif
 
-static struct perf_limits performance_limits;
-static struct perf_limits powersave_limits;
-static struct perf_limits *limits;
-
-static void intel_pstate_init_limits(struct perf_limits *limits)
-{
-       memset(limits, 0, sizeof(*limits));
-       limits->max_perf_pct = 100;
-       limits->max_perf = int_ext_tofp(1);
-       limits->max_policy_pct = 100;
-       limits->max_sysfs_pct = 100;
-}
-
-static void intel_pstate_set_performance_limits(struct perf_limits *limits)
-{
-       intel_pstate_init_limits(limits);
-       limits->min_perf_pct = 100;
-       limits->min_perf = int_ext_tofp(1);
-       limits->min_sysfs_pct = 100;
-}
+static struct global_params global;
 
 static DEFINE_MUTEX(intel_pstate_driver_lock);
 static DEFINE_MUTEX(intel_pstate_limits_lock);
@@ -502,7 +467,7 @@ static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
         * correct max turbo frequency based on the turbo state.
         * Also need to convert to MHz as _PSS freq is in MHz.
         */
-       if (!limits->turbo_disabled)
+       if (!global.turbo_disabled)
                cpu->acpi_perf_data.states[0].core_frequency =
                                        policy->cpuinfo.max_freq / 1000;
        cpu->valid_pss_table = true;
@@ -621,11 +586,19 @@ static inline void update_turbo_state(void)
 
        cpu = all_cpu_data[0];
        rdmsrl(MSR_IA32_MISC_ENABLE, misc_en);
-       limits->turbo_disabled =
+       global.turbo_disabled =
                (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE ||
                 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate);
 }
 
+static int min_perf_pct_min(void)
+{
+       struct cpudata *cpu = all_cpu_data[0];
+
+       return DIV_ROUND_UP(cpu->pstate.min_pstate * 100,
+                           cpu->pstate.turbo_pstate);
+}
+
 static s16 intel_pstate_get_epb(struct cpudata *cpu_data)
 {
        u64 epb;
@@ -845,38 +818,31 @@ static struct freq_attr *hwp_cpufreq_attrs[] = {
 
 static void intel_pstate_hwp_set(struct cpufreq_policy *policy)
 {
-       int min, hw_min, max, hw_max, cpu, range, adj_range;
-       struct perf_limits *perf_limits = limits;
+       int min, hw_min, max, hw_max, cpu;
        u64 value, cap;
 
        for_each_cpu(cpu, policy->cpus) {
-               int max_perf_pct, min_perf_pct;
                struct cpudata *cpu_data = all_cpu_data[cpu];
                s16 epp;
 
-               if (per_cpu_limits)
-                       perf_limits = all_cpu_data[cpu]->perf_limits;
-
                rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap);
                hw_min = HWP_LOWEST_PERF(cap);
-               if (limits->no_turbo)
+               if (global.no_turbo)
                        hw_max = HWP_GUARANTEED_PERF(cap);
                else
                        hw_max = HWP_HIGHEST_PERF(cap);
-               range = hw_max - hw_min;
 
-               max_perf_pct = perf_limits->max_perf_pct;
-               min_perf_pct = perf_limits->min_perf_pct;
+               max = fp_ext_toint(hw_max * cpu_data->max_perf);
+               if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE)
+                       min = max;
+               else
+                       min = fp_ext_toint(hw_max * cpu_data->min_perf);
 
                rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
-               adj_range = min_perf_pct * range / 100;
-               min = hw_min + adj_range;
+
                value &= ~HWP_MIN_PERF(~0L);
                value |= HWP_MIN_PERF(min);
 
-               adj_range = max_perf_pct * range / 100;
-               max = hw_min + adj_range;
-
                value &= ~HWP_MAX_PERF(~0L);
                value |= HWP_MAX_PERF(max);
 
@@ -930,14 +896,6 @@ skip_epp:
        }
 }
 
-static int intel_pstate_hwp_set_policy(struct cpufreq_policy *policy)
-{
-       if (hwp_active)
-               intel_pstate_hwp_set(policy);
-
-       return 0;
-}
-
 static int intel_pstate_hwp_save_state(struct cpufreq_policy *policy)
 {
        struct cpudata *cpu_data = all_cpu_data[policy->cpu];
@@ -952,43 +910,32 @@ static int intel_pstate_hwp_save_state(struct cpufreq_policy *policy)
 
 static int intel_pstate_resume(struct cpufreq_policy *policy)
 {
-       int ret;
-
        if (!hwp_active)
                return 0;
 
        mutex_lock(&intel_pstate_limits_lock);
 
        all_cpu_data[policy->cpu]->epp_policy = 0;
-
-       ret = intel_pstate_hwp_set_policy(policy);
+       intel_pstate_hwp_set(policy);
 
        mutex_unlock(&intel_pstate_limits_lock);
 
-       return ret;
+       return 0;
 }
 
 static void intel_pstate_update_policies(void)
-       __releases(&intel_pstate_limits_lock)
-       __acquires(&intel_pstate_limits_lock)
 {
-       struct perf_limits *saved_limits = limits;
        int cpu;
 
-       mutex_unlock(&intel_pstate_limits_lock);
-
        for_each_possible_cpu(cpu)
                cpufreq_update_policy(cpu);
-
-       mutex_lock(&intel_pstate_limits_lock);
-
-       limits = saved_limits;
 }
 
 /************************** debugfs begin ************************/
 static int pid_param_set(void *data, u64 val)
 {
        *(u32 *)data = val;
+       pid_params.sample_rate_ns = pid_params.sample_rate_ms * NSEC_PER_MSEC;
        intel_pstate_reset_all_pid();
        return 0;
 }
@@ -1060,7 +1007,7 @@ static void intel_pstate_debug_hide_params(void)
        static ssize_t show_##file_name                                 \
        (struct kobject *kobj, struct attribute *attr, char *buf)       \
        {                                                               \
-               return sprintf(buf, "%u\n", limits->object);            \
+               return sprintf(buf, "%u\n", global.object);             \
        }
 
 static ssize_t intel_pstate_show_status(char *buf);
@@ -1151,10 +1098,10 @@ static ssize_t show_no_turbo(struct kobject *kobj,
        }
 
        update_turbo_state();
-       if (limits->turbo_disabled)
-               ret = sprintf(buf, "%u\n", limits->turbo_disabled);
+       if (global.turbo_disabled)
+               ret = sprintf(buf, "%u\n", global.turbo_disabled);
        else
-               ret = sprintf(buf, "%u\n", limits->no_turbo);
+               ret = sprintf(buf, "%u\n", global.no_turbo);
 
        mutex_unlock(&intel_pstate_driver_lock);
 
@@ -1181,19 +1128,28 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
        mutex_lock(&intel_pstate_limits_lock);
 
        update_turbo_state();
-       if (limits->turbo_disabled) {
+       if (global.turbo_disabled) {
                pr_warn("Turbo disabled by BIOS or unavailable on processor\n");
                mutex_unlock(&intel_pstate_limits_lock);
                mutex_unlock(&intel_pstate_driver_lock);
                return -EPERM;
        }
 
-       limits->no_turbo = clamp_t(int, input, 0, 1);
+       global.no_turbo = clamp_t(int, input, 0, 1);
 
-       intel_pstate_update_policies();
+       if (global.no_turbo) {
+               struct cpudata *cpu = all_cpu_data[0];
+               int pct = cpu->pstate.max_pstate * 100 / cpu->pstate.turbo_pstate;
+
+               /* Squash the global minimum into the permitted range. */
+               if (global.min_perf_pct > pct)
+                       global.min_perf_pct = pct;
+       }
 
        mutex_unlock(&intel_pstate_limits_lock);
 
+       intel_pstate_update_policies();
+
        mutex_unlock(&intel_pstate_driver_lock);
 
        return count;
@@ -1218,19 +1174,12 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
 
        mutex_lock(&intel_pstate_limits_lock);
 
-       limits->max_sysfs_pct = clamp_t(int, input, 0 , 100);
-       limits->max_perf_pct = min(limits->max_policy_pct,
-                                  limits->max_sysfs_pct);
-       limits->max_perf_pct = max(limits->min_policy_pct,
-                                  limits->max_perf_pct);
-       limits->max_perf_pct = max(limits->min_perf_pct,
-                                  limits->max_perf_pct);
-       limits->max_perf = div_ext_fp(limits->max_perf_pct, 100);
-
-       intel_pstate_update_policies();
+       global.max_perf_pct = clamp_t(int, input, global.min_perf_pct, 100);
 
        mutex_unlock(&intel_pstate_limits_lock);
 
+       intel_pstate_update_policies();
+
        mutex_unlock(&intel_pstate_driver_lock);
 
        return count;
@@ -1255,19 +1204,13 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
 
        mutex_lock(&intel_pstate_limits_lock);
 
-       limits->min_sysfs_pct = clamp_t(int, input, 0 , 100);
-       limits->min_perf_pct = max(limits->min_policy_pct,
-                                  limits->min_sysfs_pct);
-       limits->min_perf_pct = min(limits->max_policy_pct,
-                                  limits->min_perf_pct);
-       limits->min_perf_pct = min(limits->max_perf_pct,
-                                  limits->min_perf_pct);
-       limits->min_perf = div_ext_fp(limits->min_perf_pct, 100);
-
-       intel_pstate_update_policies();
+       global.min_perf_pct = clamp_t(int, input,
+                                     min_perf_pct_min(), global.max_perf_pct);
 
        mutex_unlock(&intel_pstate_limits_lock);
 
+       intel_pstate_update_policies();
+
        mutex_unlock(&intel_pstate_driver_lock);
 
        return count;
@@ -1387,7 +1330,7 @@ static u64 atom_get_val(struct cpudata *cpudata, int pstate)
        u32 vid;
 
        val = (u64)pstate << 8;
-       if (limits->no_turbo && !limits->turbo_disabled)
+       if (global.no_turbo && !global.turbo_disabled)
                val |= (u64)1 << 32;
 
        vid_fp = cpudata->vid.min + mul_fp(
@@ -1557,7 +1500,7 @@ static u64 core_get_val(struct cpudata *cpudata, int pstate)
        u64 val;
 
        val = (u64)pstate << 8;
-       if (limits->no_turbo && !limits->turbo_disabled)
+       if (global.no_turbo && !global.turbo_disabled)
                val |= (u64)1 << 32;
 
        return val;
@@ -1597,14 +1540,6 @@ static struct cpu_defaults core_params = {
 };
 
 static const struct cpu_defaults silvermont_params = {
-       .pid_policy = {
-               .sample_rate_ms = 10,
-               .deadband = 0,
-               .setpoint = 60,
-               .p_gain_pct = 14,
-               .d_gain_pct = 0,
-               .i_gain_pct = 4,
-       },
        .funcs = {
                .get_max = atom_get_max_pstate,
                .get_max_physical = atom_get_max_pstate,
@@ -1618,14 +1553,6 @@ static const struct cpu_defaults silvermont_params = {
 };
 
 static const struct cpu_defaults airmont_params = {
-       .pid_policy = {
-               .sample_rate_ms = 10,
-               .deadband = 0,
-               .setpoint = 60,
-               .p_gain_pct = 14,
-               .d_gain_pct = 0,
-               .i_gain_pct = 4,
-       },
        .funcs = {
                .get_max = atom_get_max_pstate,
                .get_max_physical = atom_get_max_pstate,
@@ -1659,14 +1586,6 @@ static const struct cpu_defaults knl_params = {
 };
 
 static const struct cpu_defaults bxt_params = {
-       .pid_policy = {
-               .sample_rate_ms = 10,
-               .deadband = 0,
-               .setpoint = 60,
-               .p_gain_pct = 14,
-               .d_gain_pct = 0,
-               .i_gain_pct = 4,
-       },
        .funcs = {
                .get_max = core_get_max_pstate,
                .get_max_physical = core_get_max_pstate_physical,
@@ -1683,24 +1602,20 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
        int max_perf = cpu->pstate.turbo_pstate;
        int max_perf_adj;
        int min_perf;
-       struct perf_limits *perf_limits = limits;
 
-       if (limits->no_turbo || limits->turbo_disabled)
+       if (global.no_turbo || global.turbo_disabled)
                max_perf = cpu->pstate.max_pstate;
 
-       if (per_cpu_limits)
-               perf_limits = cpu->perf_limits;
-
        /*
         * performance can be limited by user through sysfs, by cpufreq
         * policy, or by cpu specific default values determined through
         * experimentation.
         */
-       max_perf_adj = fp_ext_toint(max_perf * perf_limits->max_perf);
+       max_perf_adj = fp_ext_toint(max_perf * cpu->max_perf);
        *max = clamp_t(int, max_perf_adj,
                        cpu->pstate.min_pstate, cpu->pstate.turbo_pstate);
 
-       min_perf = fp_ext_toint(max_perf * perf_limits->min_perf);
+       min_perf = fp_ext_toint(max_perf * cpu->min_perf);
        *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf);
 }
 
@@ -1820,7 +1735,7 @@ static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu)
 
        sample->busy_scaled = busy_frac * 100;
 
-       target = limits->no_turbo || limits->turbo_disabled ?
+       target = global.no_turbo || global.turbo_disabled ?
                        cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
        target += target >> 2;
        target = mul_fp(target, busy_frac);
@@ -2001,18 +1916,11 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
        cpu = all_cpu_data[cpunum];
 
        if (!cpu) {
-               unsigned int size = sizeof(struct cpudata);
-
-               if (per_cpu_limits)
-                       size += sizeof(struct perf_limits);
-
-               cpu = kzalloc(size, GFP_KERNEL);
+               cpu = kzalloc(sizeof(*cpu), GFP_KERNEL);
                if (!cpu)
                        return -ENOMEM;
 
                all_cpu_data[cpunum] = cpu;
-               if (per_cpu_limits)
-                       cpu->perf_limits = (struct perf_limits *)(cpu + 1);
 
                cpu->epp_default = -EINVAL;
                cpu->epp_powersave = -EINVAL;
@@ -2077,48 +1985,68 @@ static void intel_pstate_clear_update_util_hook(unsigned int cpu)
        synchronize_sched();
 }
 
+static int intel_pstate_get_max_freq(struct cpudata *cpu)
+{
+       return global.turbo_disabled || global.no_turbo ?
+                       cpu->pstate.max_freq : cpu->pstate.turbo_freq;
+}
+
 static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy,
-                                           struct perf_limits *limits)
+                                           struct cpudata *cpu)
 {
+       int max_freq = intel_pstate_get_max_freq(cpu);
+       int32_t max_policy_perf, min_policy_perf;
 
-       limits->max_policy_pct = DIV_ROUND_UP(policy->max * 100,
-                                             policy->cpuinfo.max_freq);
-       limits->max_policy_pct = clamp_t(int, limits->max_policy_pct, 0, 100);
+       max_policy_perf = div_ext_fp(policy->max, max_freq);
+       max_policy_perf = clamp_t(int32_t, max_policy_perf, 0, int_ext_tofp(1));
        if (policy->max == policy->min) {
-               limits->min_policy_pct = limits->max_policy_pct;
+               min_policy_perf = max_policy_perf;
        } else {
-               limits->min_policy_pct = DIV_ROUND_UP(policy->min * 100,
-                                                     policy->cpuinfo.max_freq);
-               limits->min_policy_pct = clamp_t(int, limits->min_policy_pct,
-                                                0, 100);
+               min_policy_perf = div_ext_fp(policy->min, max_freq);
+               min_policy_perf = clamp_t(int32_t, min_policy_perf,
+                                         0, max_policy_perf);
        }
 
-       /* Normalize user input to [min_policy_pct, max_policy_pct] */
-       limits->min_perf_pct = max(limits->min_policy_pct,
-                                  limits->min_sysfs_pct);
-       limits->min_perf_pct = min(limits->max_policy_pct,
-                                  limits->min_perf_pct);
-       limits->max_perf_pct = min(limits->max_policy_pct,
-                                  limits->max_sysfs_pct);
-       limits->max_perf_pct = max(limits->min_policy_pct,
-                                  limits->max_perf_pct);
+       /* Normalize user input to [min_perf, max_perf] */
+       if (per_cpu_limits) {
+               cpu->min_perf = min_policy_perf;
+               cpu->max_perf = max_policy_perf;
+       } else {
+               int32_t global_min, global_max;
+
+               /* Global limits are in percent of the maximum turbo P-state. */
+               global_max = percent_ext_fp(global.max_perf_pct);
+               global_min = percent_ext_fp(global.min_perf_pct);
+               if (max_freq != cpu->pstate.turbo_freq) {
+                       int32_t turbo_factor;
+
+                       turbo_factor = div_ext_fp(cpu->pstate.turbo_pstate,
+                                                 cpu->pstate.max_pstate);
+                       global_min = mul_ext_fp(global_min, turbo_factor);
+                       global_max = mul_ext_fp(global_max, turbo_factor);
+               }
+               global_min = clamp_t(int32_t, global_min, 0, global_max);
+
+               cpu->min_perf = max(min_policy_perf, global_min);
+               cpu->min_perf = min(cpu->min_perf, max_policy_perf);
+               cpu->max_perf = min(max_policy_perf, global_max);
+               cpu->max_perf = max(min_policy_perf, cpu->max_perf);
 
-       /* Make sure min_perf_pct <= max_perf_pct */
-       limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct);
+               /* Make sure min_perf <= max_perf */
+               cpu->min_perf = min(cpu->min_perf, cpu->max_perf);
+       }
 
-       limits->min_perf = div_ext_fp(limits->min_perf_pct, 100);
-       limits->max_perf = div_ext_fp(limits->max_perf_pct, 100);
-       limits->max_perf = round_up(limits->max_perf, EXT_FRAC_BITS);
-       limits->min_perf = round_up(limits->min_perf, EXT_FRAC_BITS);
+       cpu->max_perf = round_up(cpu->max_perf, EXT_FRAC_BITS);
+       cpu->min_perf = round_up(cpu->min_perf, EXT_FRAC_BITS);
 
        pr_debug("cpu:%d max_perf_pct:%d min_perf_pct:%d\n", policy->cpu,
-                limits->max_perf_pct, limits->min_perf_pct);
+                fp_ext_toint(cpu->max_perf * 100),
+                fp_ext_toint(cpu->min_perf * 100));
 }
 
 static int intel_pstate_set_policy(struct cpufreq_policy *policy)
 {
        struct cpudata *cpu;
-       struct perf_limits *perf_limits = NULL;
 
        if (!policy->cpuinfo.max_freq)
                return -ENODEV;
@@ -2129,34 +2057,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
        cpu = all_cpu_data[policy->cpu];
        cpu->policy = policy->policy;
 
-       if (cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate &&
-           policy->max < policy->cpuinfo.max_freq &&
-           policy->max > cpu->pstate.max_pstate * cpu->pstate.scaling) {
-               pr_debug("policy->max > max non turbo frequency\n");
-               policy->max = policy->cpuinfo.max_freq;
-       }
-
-       if (per_cpu_limits)
-               perf_limits = cpu->perf_limits;
-
        mutex_lock(&intel_pstate_limits_lock);
 
-       if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) {
-               pr_debug("set performance\n");
-               if (!perf_limits) {
-                       limits = &performance_limits;
-                       perf_limits = limits;
-               }
-       } else {
-               pr_debug("set powersave\n");
-               if (!perf_limits) {
-                       limits = &powersave_limits;
-                       perf_limits = limits;
-               }
-
-       }
-
-       intel_pstate_update_perf_limits(policy, perf_limits);
+       intel_pstate_update_perf_limits(policy, cpu);
 
        if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) {
                /*
@@ -2169,45 +2072,38 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
 
        intel_pstate_set_update_util_hook(policy->cpu);
 
-       intel_pstate_hwp_set_policy(policy);
+       if (hwp_active)
+               intel_pstate_hwp_set(policy);
 
        mutex_unlock(&intel_pstate_limits_lock);
 
        return 0;
 }
 
+static void intel_pstate_adjust_policy_max(struct cpufreq_policy *policy,
+                                        struct cpudata *cpu)
+{
+       if (cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate &&
+           policy->max < policy->cpuinfo.max_freq &&
+           policy->max > cpu->pstate.max_freq) {
+               pr_debug("policy->max > max non turbo frequency\n");
+               policy->max = policy->cpuinfo.max_freq;
+       }
+}
+
 static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
 {
        struct cpudata *cpu = all_cpu_data[policy->cpu];
-       struct perf_limits *perf_limits;
-
-       if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
-               perf_limits = &performance_limits;
-       else
-               perf_limits = &powersave_limits;
 
        update_turbo_state();
-       policy->cpuinfo.max_freq = perf_limits->turbo_disabled ||
-                                       perf_limits->no_turbo ?
-                                       cpu->pstate.max_freq :
-                                       cpu->pstate.turbo_freq;
-
-       cpufreq_verify_within_cpu_limits(policy);
+       cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
+                                    intel_pstate_get_max_freq(cpu));
 
        if (policy->policy != CPUFREQ_POLICY_POWERSAVE &&
            policy->policy != CPUFREQ_POLICY_PERFORMANCE)
                return -EINVAL;
 
-       /* When per-CPU limits are used, sysfs limits are not used */
-       if (!per_cpu_limits) {
-               unsigned int max_freq, min_freq;
-
-               max_freq = policy->cpuinfo.max_freq *
-                                       perf_limits->max_sysfs_pct / 100;
-               min_freq = policy->cpuinfo.max_freq *
-                                       perf_limits->min_sysfs_pct / 100;
-               cpufreq_verify_within_limits(policy, min_freq, max_freq);
-       }
+       intel_pstate_adjust_policy_max(policy, cpu);
 
        return 0;
 }
@@ -2248,8 +2144,8 @@ static int __intel_pstate_cpu_init(struct cpufreq_policy *policy)
 
        cpu = all_cpu_data[policy->cpu];
 
-       if (per_cpu_limits)
-               intel_pstate_init_limits(cpu->perf_limits);
+       cpu->max_perf = int_ext_tofp(1);
+       cpu->min_perf = 0;
 
        policy->min = cpu->pstate.min_pstate * cpu->pstate.scaling;
        policy->max = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
@@ -2257,7 +2153,7 @@ static int __intel_pstate_cpu_init(struct cpufreq_policy *policy)
        /* cpuinfo and default policy values */
        policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling;
        update_turbo_state();
-       policy->cpuinfo.max_freq = limits->turbo_disabled ?
+       policy->cpuinfo.max_freq = global.turbo_disabled ?
                        cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
        policy->cpuinfo.max_freq *= cpu->pstate.scaling;
 
@@ -2277,7 +2173,7 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
                return ret;
 
        policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
-       if (limits->min_perf_pct == 100 && limits->max_perf_pct == 100)
+       if (IS_ENABLED(CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE))
                policy->policy = CPUFREQ_POLICY_PERFORMANCE;
        else
                policy->policy = CPUFREQ_POLICY_POWERSAVE;
@@ -2303,32 +2199,14 @@ static int intel_cpufreq_verify_policy(struct cpufreq_policy *policy)
        struct cpudata *cpu = all_cpu_data[policy->cpu];
 
        update_turbo_state();
-       policy->cpuinfo.max_freq = limits->turbo_disabled ?
-                       cpu->pstate.max_freq : cpu->pstate.turbo_freq;
+       cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
+                                    intel_pstate_get_max_freq(cpu));
 
-       cpufreq_verify_within_cpu_limits(policy);
+       intel_pstate_adjust_policy_max(policy, cpu);
 
-       return 0;
-}
+       intel_pstate_update_perf_limits(policy, cpu);
 
-static unsigned int intel_cpufreq_turbo_update(struct cpudata *cpu,
-                                              struct cpufreq_policy *policy,
-                                              unsigned int target_freq)
-{
-       unsigned int max_freq;
-
-       update_turbo_state();
-
-       max_freq = limits->no_turbo || limits->turbo_disabled ?
-                       cpu->pstate.max_freq : cpu->pstate.turbo_freq;
-       policy->cpuinfo.max_freq = max_freq;
-       if (policy->max > max_freq)
-               policy->max = max_freq;
-
-       if (target_freq > max_freq)
-               target_freq = max_freq;
-
-       return target_freq;
+       return 0;
 }
 
 static int intel_cpufreq_target(struct cpufreq_policy *policy,
@@ -2339,8 +2217,10 @@ static int intel_cpufreq_target(struct cpufreq_policy *policy,
        struct cpufreq_freqs freqs;
        int target_pstate;
 
+       update_turbo_state();
+
        freqs.old = policy->cur;
-       freqs.new = intel_cpufreq_turbo_update(cpu, policy, target_freq);
+       freqs.new = target_freq;
 
        cpufreq_freq_transition_begin(policy, &freqs);
        switch (relation) {
@@ -2372,7 +2252,8 @@ static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy,
        struct cpudata *cpu = all_cpu_data[policy->cpu];
        int target_pstate;
 
-       target_freq = intel_cpufreq_turbo_update(cpu, policy, target_freq);
+       update_turbo_state();
+
        target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling);
        target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
        intel_pstate_update_pstate(cpu, target_pstate);
@@ -2427,13 +2308,8 @@ static int intel_pstate_register_driver(void)
 {
        int ret;
 
-       intel_pstate_init_limits(&powersave_limits);
-       intel_pstate_set_performance_limits(&performance_limits);
-       if (IS_ENABLED(CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE) &&
-           intel_pstate_driver == &intel_pstate)
-               limits = &performance_limits;
-       else
-               limits = &powersave_limits;
+       memset(&global, 0, sizeof(global));
+       global.max_perf_pct = 100;
 
        ret = cpufreq_register_driver(intel_pstate_driver);
        if (ret) {
@@ -2441,6 +2317,8 @@ static int intel_pstate_register_driver(void)
                return ret;
        }
 
+       global.min_perf_pct = min_perf_pct_min();
+
        mutex_lock(&intel_pstate_limits_lock);
        driver_registered = true;
        mutex_unlock(&intel_pstate_limits_lock);
@@ -2548,9 +2426,15 @@ static void __init copy_pid_params(struct pstate_adjust_policy *policy)
 #ifdef CONFIG_ACPI
 static void intel_pstate_use_acpi_profile(void)
 {
-       if (acpi_gbl_FADT.preferred_profile == PM_MOBILE)
+       switch (acpi_gbl_FADT.preferred_profile) {
+       case PM_MOBILE:
+       case PM_TABLET:
+       case PM_APPLIANCE_PC:
+       case PM_DESKTOP:
+       case PM_WORKSTATION:
                pstate_funcs.get_target_pstate =
                                get_target_pstate_use_cpu_load;
+       }
 }
 #else
 static void intel_pstate_use_acpi_profile(void)
@@ -2706,28 +2590,33 @@ static const struct x86_cpu_id hwp_support_ids[] __initconst = {
 
 static int __init intel_pstate_init(void)
 {
-       const struct x86_cpu_id *id;
-       struct cpu_defaults *cpu_def;
-       int rc = 0;
+       int rc;
 
        if (no_load)
                return -ENODEV;
 
-       if (x86_match_cpu(hwp_support_ids) && !no_hwp) {
+       if (x86_match_cpu(hwp_support_ids)) {
                copy_cpu_funcs(&core_params.funcs);
-               hwp_active++;
-               intel_pstate.attr = hwp_cpufreq_attrs;
-               goto hwp_cpu_matched;
-       }
-
-       id = x86_match_cpu(intel_pstate_cpu_ids);
-       if (!id)
-               return -ENODEV;
+               if (no_hwp) {
+                       pstate_funcs.get_target_pstate = get_target_pstate_use_cpu_load;
+               } else {
+                       hwp_active++;
+                       intel_pstate.attr = hwp_cpufreq_attrs;
+                       goto hwp_cpu_matched;
+               }
+       } else {
+               const struct x86_cpu_id *id;
+               struct cpu_defaults *cpu_def;
 
-       cpu_def = (struct cpu_defaults *)id->driver_data;
+               id = x86_match_cpu(intel_pstate_cpu_ids);
+               if (!id)
+                       return -ENODEV;
 
-       copy_pid_params(&cpu_def->pid_policy);
-       copy_cpu_funcs(&cpu_def->funcs);
+               cpu_def = (struct cpu_defaults *)id->driver_data;
+               copy_cpu_funcs(&cpu_def->funcs);
+               if (pstate_funcs.get_target_pstate == get_target_pstate_use_performance)
+                       copy_pid_params(&cpu_def->pid_policy);
+       }
 
        if (intel_pstate_msrs_not_valid())
                return -ENODEV;