]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/cpufreq/arm_big_little.c
Merge remote-tracking branch 'net-next/master'
[karo-tx-linux.git] / drivers / cpufreq / arm_big_little.c
1 /*
2  * ARM big.LITTLE Platforms CPUFreq support
3  *
4  * Copyright (C) 2013 ARM Ltd.
5  * Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com>
6  *
7  * Copyright (C) 2013 Linaro.
8  * Viresh Kumar <viresh.kumar@linaro.org>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2 as
12  * published by the Free Software Foundation.
13  *
14  * This program is distributed "as is" WITHOUT ANY WARRANTY of any
15  * kind, whether express or implied; without even the implied warranty
16  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17  * GNU General Public License for more details.
18  */
19
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
22 #include <linux/clk.h>
23 #include <linux/cpu.h>
24 #include <linux/cpufreq.h>
25 #include <linux/cpumask.h>
26 #include <linux/export.h>
27 #include <linux/of_platform.h>
28 #include <linux/pm_opp.h>
29 #include <linux/slab.h>
30 #include <linux/topology.h>
31 #include <linux/types.h>
32
33 #include "arm_big_little.h"
34
35 /* Currently we support only two clusters */
36 #define MAX_CLUSTERS    2
37
38 static struct cpufreq_arm_bL_ops *arm_bL_ops;
39 static struct clk *clk[MAX_CLUSTERS];
40 static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS];
41 static atomic_t cluster_usage[MAX_CLUSTERS] = {ATOMIC_INIT(0), ATOMIC_INIT(0)};
42
43 static unsigned int bL_cpufreq_get(unsigned int cpu)
44 {
45         u32 cur_cluster = cpu_to_cluster(cpu);
46
47         return clk_get_rate(clk[cur_cluster]) / 1000;
48 }
49
50 /* Set clock frequency */
51 static int bL_cpufreq_set_target(struct cpufreq_policy *policy,
52                 unsigned int target_freq, unsigned int relation)
53 {
54         struct cpufreq_freqs freqs;
55         u32 cpu = policy->cpu, freq_tab_idx, cur_cluster;
56         int ret = 0;
57
58         cur_cluster = cpu_to_cluster(policy->cpu);
59
60         freqs.old = bL_cpufreq_get(policy->cpu);
61
62         /* Determine valid target frequency using freq_table */
63         cpufreq_frequency_table_target(policy, freq_table[cur_cluster],
64                         target_freq, relation, &freq_tab_idx);
65         freqs.new = freq_table[cur_cluster][freq_tab_idx].frequency;
66
67         pr_debug("%s: cpu: %d, cluster: %d, oldfreq: %d, target freq: %d, new freq: %d\n",
68                         __func__, cpu, cur_cluster, freqs.old, target_freq,
69                         freqs.new);
70
71         if (freqs.old == freqs.new)
72                 return 0;
73
74         cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
75
76         ret = clk_set_rate(clk[cur_cluster], freqs.new * 1000);
77         if (ret) {
78                 pr_err("clk_set_rate failed: %d\n", ret);
79                 freqs.new = freqs.old;
80         }
81
82         cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
83
84         return ret;
85 }
86
87 static void put_cluster_clk_and_freq_table(struct device *cpu_dev)
88 {
89         u32 cluster = cpu_to_cluster(cpu_dev->id);
90
91         if (!atomic_dec_return(&cluster_usage[cluster])) {
92                 clk_put(clk[cluster]);
93                 dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
94                 dev_dbg(cpu_dev, "%s: cluster: %d\n", __func__, cluster);
95         }
96 }
97
98 static int get_cluster_clk_and_freq_table(struct device *cpu_dev)
99 {
100         u32 cluster = cpu_to_cluster(cpu_dev->id);
101         char name[14] = "cpu-cluster.";
102         int ret;
103
104         if (atomic_inc_return(&cluster_usage[cluster]) != 1)
105                 return 0;
106
107         ret = arm_bL_ops->init_opp_table(cpu_dev);
108         if (ret) {
109                 dev_err(cpu_dev, "%s: init_opp_table failed, cpu: %d, err: %d\n",
110                                 __func__, cpu_dev->id, ret);
111                 goto atomic_dec;
112         }
113
114         ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table[cluster]);
115         if (ret) {
116                 dev_err(cpu_dev, "%s: failed to init cpufreq table, cpu: %d, err: %d\n",
117                                 __func__, cpu_dev->id, ret);
118                 goto atomic_dec;
119         }
120
121         name[12] = cluster + '0';
122         clk[cluster] = clk_get(cpu_dev, name);
123         if (!IS_ERR(clk[cluster])) {
124                 dev_dbg(cpu_dev, "%s: clk: %p & freq table: %p, cluster: %d\n",
125                                 __func__, clk[cluster], freq_table[cluster],
126                                 cluster);
127                 return 0;
128         }
129
130         dev_err(cpu_dev, "%s: Failed to get clk for cpu: %d, cluster: %d\n",
131                         __func__, cpu_dev->id, cluster);
132         ret = PTR_ERR(clk[cluster]);
133         dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
134
135 atomic_dec:
136         atomic_dec(&cluster_usage[cluster]);
137         dev_err(cpu_dev, "%s: Failed to get data for cluster: %d\n", __func__,
138                         cluster);
139         return ret;
140 }
141
142 /* Per-CPU initialization */
143 static int bL_cpufreq_init(struct cpufreq_policy *policy)
144 {
145         u32 cur_cluster = cpu_to_cluster(policy->cpu);
146         struct device *cpu_dev;
147         int ret;
148
149         cpu_dev = get_cpu_device(policy->cpu);
150         if (!cpu_dev) {
151                 pr_err("%s: failed to get cpu%d device\n", __func__,
152                                 policy->cpu);
153                 return -ENODEV;
154         }
155
156         ret = get_cluster_clk_and_freq_table(cpu_dev);
157         if (ret)
158                 return ret;
159
160         ret = cpufreq_table_validate_and_show(policy, freq_table[cur_cluster]);
161         if (ret) {
162                 dev_err(cpu_dev, "CPU %d, cluster: %d invalid freq table\n",
163                                 policy->cpu, cur_cluster);
164                 put_cluster_clk_and_freq_table(cpu_dev);
165                 return ret;
166         }
167
168         if (arm_bL_ops->get_transition_latency)
169                 policy->cpuinfo.transition_latency =
170                         arm_bL_ops->get_transition_latency(cpu_dev);
171         else
172                 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
173
174         cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu));
175
176         dev_info(cpu_dev, "%s: CPU %d initialized\n", __func__, policy->cpu);
177         return 0;
178 }
179
180 static int bL_cpufreq_exit(struct cpufreq_policy *policy)
181 {
182         struct device *cpu_dev;
183
184         cpu_dev = get_cpu_device(policy->cpu);
185         if (!cpu_dev) {
186                 pr_err("%s: failed to get cpu%d device\n", __func__,
187                                 policy->cpu);
188                 return -ENODEV;
189         }
190
191         cpufreq_frequency_table_put_attr(policy->cpu);
192         put_cluster_clk_and_freq_table(cpu_dev);
193         dev_dbg(cpu_dev, "%s: Exited, cpu: %d\n", __func__, policy->cpu);
194
195         return 0;
196 }
197
198 static struct cpufreq_driver bL_cpufreq_driver = {
199         .name                   = "arm-big-little",
200         .flags                  = CPUFREQ_STICKY |
201                                         CPUFREQ_HAVE_GOVERNOR_PER_POLICY,
202         .verify                 = cpufreq_generic_frequency_table_verify,
203         .target                 = bL_cpufreq_set_target,
204         .get                    = bL_cpufreq_get,
205         .init                   = bL_cpufreq_init,
206         .exit                   = bL_cpufreq_exit,
207         .attr                   = cpufreq_generic_attr,
208 };
209
210 int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops)
211 {
212         int ret;
213
214         if (arm_bL_ops) {
215                 pr_debug("%s: Already registered: %s, exiting\n", __func__,
216                                 arm_bL_ops->name);
217                 return -EBUSY;
218         }
219
220         if (!ops || !strlen(ops->name) || !ops->init_opp_table) {
221                 pr_err("%s: Invalid arm_bL_ops, exiting\n", __func__);
222                 return -ENODEV;
223         }
224
225         arm_bL_ops = ops;
226
227         ret = cpufreq_register_driver(&bL_cpufreq_driver);
228         if (ret) {
229                 pr_info("%s: Failed registering platform driver: %s, err: %d\n",
230                                 __func__, ops->name, ret);
231                 arm_bL_ops = NULL;
232         } else {
233                 pr_info("%s: Registered platform driver: %s\n", __func__,
234                                 ops->name);
235         }
236
237         return ret;
238 }
239 EXPORT_SYMBOL_GPL(bL_cpufreq_register);
240
241 void bL_cpufreq_unregister(struct cpufreq_arm_bL_ops *ops)
242 {
243         if (arm_bL_ops != ops) {
244                 pr_err("%s: Registered with: %s, can't unregister, exiting\n",
245                                 __func__, arm_bL_ops->name);
246                 return;
247         }
248
249         cpufreq_unregister_driver(&bL_cpufreq_driver);
250         pr_info("%s: Un-registered platform driver: %s\n", __func__,
251                         arm_bL_ops->name);
252         arm_bL_ops = NULL;
253 }
254 EXPORT_SYMBOL_GPL(bL_cpufreq_unregister);