]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/cpufreq/exynos5440-cpufreq.c
Merge branch 'dice-driver-playback-only' of git://git.alsa-project.org/alsa-kprivate...
[karo-tx-linux.git] / drivers / cpufreq / exynos5440-cpufreq.c
1 /*
2  * Copyright (c) 2013 Samsung Electronics Co., Ltd.
3  *              http://www.samsung.com
4  *
5  * Amit Daniel Kachhap <amit.daniel@samsung.com>
6  *
7  * EXYNOS5440 - CPU frequency scaling support
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12 */
13
14 #include <linux/clk.h>
15 #include <linux/cpu.h>
16 #include <linux/cpufreq.h>
17 #include <linux/err.h>
18 #include <linux/interrupt.h>
19 #include <linux/io.h>
20 #include <linux/module.h>
21 #include <linux/of_address.h>
22 #include <linux/of_irq.h>
23 #include <linux/opp.h>
24 #include <linux/platform_device.h>
25 #include <linux/slab.h>
26
27 /* Register definitions */
28 #define XMU_DVFS_CTRL           0x0060
29 #define XMU_PMU_P0_7            0x0064
30 #define XMU_C0_3_PSTATE         0x0090
31 #define XMU_P_LIMIT             0x00a0
32 #define XMU_P_STATUS            0x00a4
33 #define XMU_PMUEVTEN            0x00d0
34 #define XMU_PMUIRQEN            0x00d4
35 #define XMU_PMUIRQ              0x00d8
36
37 /* PMU mask and shift definations */
38 #define P_VALUE_MASK            0x7
39
40 #define XMU_DVFS_CTRL_EN_SHIFT  0
41
42 #define P0_7_CPUCLKDEV_SHIFT    21
43 #define P0_7_CPUCLKDEV_MASK     0x7
44 #define P0_7_ATBCLKDEV_SHIFT    18
45 #define P0_7_ATBCLKDEV_MASK     0x7
46 #define P0_7_CSCLKDEV_SHIFT     15
47 #define P0_7_CSCLKDEV_MASK      0x7
48 #define P0_7_CPUEMA_SHIFT       28
49 #define P0_7_CPUEMA_MASK        0xf
50 #define P0_7_L2EMA_SHIFT        24
51 #define P0_7_L2EMA_MASK         0xf
52 #define P0_7_VDD_SHIFT          8
53 #define P0_7_VDD_MASK           0x7f
54 #define P0_7_FREQ_SHIFT         0
55 #define P0_7_FREQ_MASK          0xff
56
57 #define C0_3_PSTATE_VALID_SHIFT 8
58 #define C0_3_PSTATE_CURR_SHIFT  4
59 #define C0_3_PSTATE_NEW_SHIFT   0
60
61 #define PSTATE_CHANGED_EVTEN_SHIFT      0
62
63 #define PSTATE_CHANGED_IRQEN_SHIFT      0
64
65 #define PSTATE_CHANGED_SHIFT            0
66
67 /* some constant values for clock divider calculation */
68 #define CPU_DIV_FREQ_MAX        500
69 #define CPU_DBG_FREQ_MAX        375
70 #define CPU_ATB_FREQ_MAX        500
71
72 #define PMIC_LOW_VOLT           0x30
73 #define PMIC_HIGH_VOLT          0x28
74
75 #define CPUEMA_HIGH             0x2
76 #define CPUEMA_MID              0x4
77 #define CPUEMA_LOW              0x7
78
79 #define L2EMA_HIGH              0x1
80 #define L2EMA_MID               0x3
81 #define L2EMA_LOW               0x4
82
83 #define DIV_TAB_MAX     2
84 /* frequency unit is 20MHZ */
85 #define FREQ_UNIT       20
86 #define MAX_VOLTAGE     1550000 /* In microvolt */
87 #define VOLTAGE_STEP    12500   /* In microvolt */
88
89 #define CPUFREQ_NAME            "exynos5440_dvfs"
90 #define DEF_TRANS_LATENCY       100000
91
92 enum cpufreq_level_index {
93         L0, L1, L2, L3, L4,
94         L5, L6, L7, L8, L9,
95 };
96 #define CPUFREQ_LEVEL_END       (L7 + 1)
97
98 struct exynos_dvfs_data {
99         void __iomem *base;
100         struct resource *mem;
101         int irq;
102         struct clk *cpu_clk;
103         unsigned int cur_frequency;
104         unsigned int latency;
105         struct cpufreq_frequency_table *freq_table;
106         unsigned int freq_count;
107         struct device *dev;
108         bool dvfs_enabled;
109         struct work_struct irq_work;
110 };
111
112 static struct exynos_dvfs_data *dvfs_info;
113 static DEFINE_MUTEX(cpufreq_lock);
114 static struct cpufreq_freqs freqs;
115
116 static int init_div_table(void)
117 {
118         struct cpufreq_frequency_table *freq_tbl = dvfs_info->freq_table;
119         unsigned int tmp, clk_div, ema_div, freq, volt_id;
120         int i = 0;
121         struct opp *opp;
122
123         rcu_read_lock();
124         for (i = 0; freq_tbl[i].frequency != CPUFREQ_TABLE_END; i++) {
125
126                 opp = opp_find_freq_exact(dvfs_info->dev,
127                                         freq_tbl[i].frequency * 1000, true);
128                 if (IS_ERR(opp)) {
129                         rcu_read_unlock();
130                         dev_err(dvfs_info->dev,
131                                 "failed to find valid OPP for %u KHZ\n",
132                                 freq_tbl[i].frequency);
133                         return PTR_ERR(opp);
134                 }
135
136                 freq = freq_tbl[i].frequency / 1000; /* In MHZ */
137                 clk_div = ((freq / CPU_DIV_FREQ_MAX) & P0_7_CPUCLKDEV_MASK)
138                                         << P0_7_CPUCLKDEV_SHIFT;
139                 clk_div |= ((freq / CPU_ATB_FREQ_MAX) & P0_7_ATBCLKDEV_MASK)
140                                         << P0_7_ATBCLKDEV_SHIFT;
141                 clk_div |= ((freq / CPU_DBG_FREQ_MAX) & P0_7_CSCLKDEV_MASK)
142                                         << P0_7_CSCLKDEV_SHIFT;
143
144                 /* Calculate EMA */
145                 volt_id = opp_get_voltage(opp);
146                 volt_id = (MAX_VOLTAGE - volt_id) / VOLTAGE_STEP;
147                 if (volt_id < PMIC_HIGH_VOLT) {
148                         ema_div = (CPUEMA_HIGH << P0_7_CPUEMA_SHIFT) |
149                                 (L2EMA_HIGH << P0_7_L2EMA_SHIFT);
150                 } else if (volt_id > PMIC_LOW_VOLT) {
151                         ema_div = (CPUEMA_LOW << P0_7_CPUEMA_SHIFT) |
152                                 (L2EMA_LOW << P0_7_L2EMA_SHIFT);
153                 } else {
154                         ema_div = (CPUEMA_MID << P0_7_CPUEMA_SHIFT) |
155                                 (L2EMA_MID << P0_7_L2EMA_SHIFT);
156                 }
157
158                 tmp = (clk_div | ema_div | (volt_id << P0_7_VDD_SHIFT)
159                         | ((freq / FREQ_UNIT) << P0_7_FREQ_SHIFT));
160
161                 __raw_writel(tmp, dvfs_info->base + XMU_PMU_P0_7 + 4 * i);
162         }
163
164         rcu_read_unlock();
165         return 0;
166 }
167
168 static void exynos_enable_dvfs(void)
169 {
170         unsigned int tmp, i, cpu;
171         struct cpufreq_frequency_table *freq_table = dvfs_info->freq_table;
172         /* Disable DVFS */
173         __raw_writel(0, dvfs_info->base + XMU_DVFS_CTRL);
174
175         /* Enable PSTATE Change Event */
176         tmp = __raw_readl(dvfs_info->base + XMU_PMUEVTEN);
177         tmp |= (1 << PSTATE_CHANGED_EVTEN_SHIFT);
178          __raw_writel(tmp, dvfs_info->base + XMU_PMUEVTEN);
179
180         /* Enable PSTATE Change IRQ */
181         tmp = __raw_readl(dvfs_info->base + XMU_PMUIRQEN);
182         tmp |= (1 << PSTATE_CHANGED_IRQEN_SHIFT);
183          __raw_writel(tmp, dvfs_info->base + XMU_PMUIRQEN);
184
185         /* Set initial performance index */
186         for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++)
187                 if (freq_table[i].frequency == dvfs_info->cur_frequency)
188                         break;
189
190         if (freq_table[i].frequency == CPUFREQ_TABLE_END) {
191                 dev_crit(dvfs_info->dev, "Boot up frequency not supported\n");
192                 /* Assign the highest frequency */
193                 i = 0;
194                 dvfs_info->cur_frequency = freq_table[i].frequency;
195         }
196
197         dev_info(dvfs_info->dev, "Setting dvfs initial frequency = %uKHZ",
198                                                 dvfs_info->cur_frequency);
199
200         for (cpu = 0; cpu < CONFIG_NR_CPUS; cpu++) {
201                 tmp = __raw_readl(dvfs_info->base + XMU_C0_3_PSTATE + cpu * 4);
202                 tmp &= ~(P_VALUE_MASK << C0_3_PSTATE_NEW_SHIFT);
203                 tmp |= (i << C0_3_PSTATE_NEW_SHIFT);
204                 __raw_writel(tmp, dvfs_info->base + XMU_C0_3_PSTATE + cpu * 4);
205         }
206
207         /* Enable DVFS */
208         __raw_writel(1 << XMU_DVFS_CTRL_EN_SHIFT,
209                                 dvfs_info->base + XMU_DVFS_CTRL);
210 }
211
212 static int exynos_verify_speed(struct cpufreq_policy *policy)
213 {
214         return cpufreq_frequency_table_verify(policy,
215                                               dvfs_info->freq_table);
216 }
217
218 static unsigned int exynos_getspeed(unsigned int cpu)
219 {
220         return dvfs_info->cur_frequency;
221 }
222
223 static int exynos_target(struct cpufreq_policy *policy,
224                           unsigned int target_freq,
225                           unsigned int relation)
226 {
227         unsigned int index, tmp;
228         int ret = 0, i;
229         struct cpufreq_frequency_table *freq_table = dvfs_info->freq_table;
230
231         mutex_lock(&cpufreq_lock);
232
233         ret = cpufreq_frequency_table_target(policy, freq_table,
234                                            target_freq, relation, &index);
235         if (ret)
236                 goto out;
237
238         freqs.old = dvfs_info->cur_frequency;
239         freqs.new = freq_table[index].frequency;
240
241         if (freqs.old == freqs.new)
242                 goto out;
243
244         cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
245
246         /* Set the target frequency in all C0_3_PSTATE register */
247         for_each_cpu(i, policy->cpus) {
248                 tmp = __raw_readl(dvfs_info->base + XMU_C0_3_PSTATE + i * 4);
249                 tmp &= ~(P_VALUE_MASK << C0_3_PSTATE_NEW_SHIFT);
250                 tmp |= (index << C0_3_PSTATE_NEW_SHIFT);
251
252                 __raw_writel(tmp, dvfs_info->base + XMU_C0_3_PSTATE + i * 4);
253         }
254 out:
255         mutex_unlock(&cpufreq_lock);
256         return ret;
257 }
258
259 static void exynos_cpufreq_work(struct work_struct *work)
260 {
261         unsigned int cur_pstate, index;
262         struct cpufreq_policy *policy = cpufreq_cpu_get(0); /* boot CPU */
263         struct cpufreq_frequency_table *freq_table = dvfs_info->freq_table;
264
265         /* Ensure we can access cpufreq structures */
266         if (unlikely(dvfs_info->dvfs_enabled == false))
267                 goto skip_work;
268
269         mutex_lock(&cpufreq_lock);
270         freqs.old = dvfs_info->cur_frequency;
271
272         cur_pstate = __raw_readl(dvfs_info->base + XMU_P_STATUS);
273         if (cur_pstate >> C0_3_PSTATE_VALID_SHIFT & 0x1)
274                 index = (cur_pstate >> C0_3_PSTATE_CURR_SHIFT) & P_VALUE_MASK;
275         else
276                 index = (cur_pstate >> C0_3_PSTATE_NEW_SHIFT) & P_VALUE_MASK;
277
278         if (likely(index < dvfs_info->freq_count)) {
279                 freqs.new = freq_table[index].frequency;
280                 dvfs_info->cur_frequency = freqs.new;
281         } else {
282                 dev_crit(dvfs_info->dev, "New frequency out of range\n");
283                 freqs.new = dvfs_info->cur_frequency;
284         }
285         cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
286
287         cpufreq_cpu_put(policy);
288         mutex_unlock(&cpufreq_lock);
289 skip_work:
290         enable_irq(dvfs_info->irq);
291 }
292
293 static irqreturn_t exynos_cpufreq_irq(int irq, void *id)
294 {
295         unsigned int tmp;
296
297         tmp = __raw_readl(dvfs_info->base + XMU_PMUIRQ);
298         if (tmp >> PSTATE_CHANGED_SHIFT & 0x1) {
299                 __raw_writel(tmp, dvfs_info->base + XMU_PMUIRQ);
300                 disable_irq_nosync(irq);
301                 schedule_work(&dvfs_info->irq_work);
302         }
303         return IRQ_HANDLED;
304 }
305
306 static void exynos_sort_descend_freq_table(void)
307 {
308         struct cpufreq_frequency_table *freq_tbl = dvfs_info->freq_table;
309         int i = 0, index;
310         unsigned int tmp_freq;
311         /*
312          * Exynos5440 clock controller state logic expects the cpufreq table to
313          * be in descending order. But the OPP library constructs the table in
314          * ascending order. So to make the table descending we just need to
315          * swap the i element with the N - i element.
316          */
317         for (i = 0; i < dvfs_info->freq_count / 2; i++) {
318                 index = dvfs_info->freq_count - i - 1;
319                 tmp_freq = freq_tbl[i].frequency;
320                 freq_tbl[i].frequency = freq_tbl[index].frequency;
321                 freq_tbl[index].frequency = tmp_freq;
322         }
323 }
324
325 static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy)
326 {
327         int ret;
328
329         ret = cpufreq_frequency_table_cpuinfo(policy, dvfs_info->freq_table);
330         if (ret) {
331                 dev_err(dvfs_info->dev, "Invalid frequency table: %d\n", ret);
332                 return ret;
333         }
334
335         policy->cur = dvfs_info->cur_frequency;
336         policy->cpuinfo.transition_latency = dvfs_info->latency;
337         cpumask_setall(policy->cpus);
338
339         cpufreq_frequency_table_get_attr(dvfs_info->freq_table, policy->cpu);
340
341         return 0;
342 }
343
344 static struct cpufreq_driver exynos_driver = {
345         .flags          = CPUFREQ_STICKY,
346         .verify         = exynos_verify_speed,
347         .target         = exynos_target,
348         .get            = exynos_getspeed,
349         .init           = exynos_cpufreq_cpu_init,
350         .name           = CPUFREQ_NAME,
351 };
352
353 static const struct of_device_id exynos_cpufreq_match[] = {
354         {
355                 .compatible = "samsung,exynos5440-cpufreq",
356         },
357         {},
358 };
359 MODULE_DEVICE_TABLE(of, exynos_cpufreq_match);
360
361 static int exynos_cpufreq_probe(struct platform_device *pdev)
362 {
363         int ret = -EINVAL;
364         struct device_node *np;
365         struct resource res;
366
367         np =  pdev->dev.of_node;
368         if (!np)
369                 return -ENODEV;
370
371         dvfs_info = devm_kzalloc(&pdev->dev, sizeof(*dvfs_info), GFP_KERNEL);
372         if (!dvfs_info) {
373                 ret = -ENOMEM;
374                 goto err_put_node;
375         }
376
377         dvfs_info->dev = &pdev->dev;
378
379         ret = of_address_to_resource(np, 0, &res);
380         if (ret)
381                 goto err_put_node;
382
383         dvfs_info->base = devm_ioremap_resource(dvfs_info->dev, &res);
384         if (IS_ERR(dvfs_info->base)) {
385                 ret = PTR_ERR(dvfs_info->base);
386                 goto err_put_node;
387         }
388
389         dvfs_info->irq = irq_of_parse_and_map(np, 0);
390         if (!dvfs_info->irq) {
391                 dev_err(dvfs_info->dev, "No cpufreq irq found\n");
392                 ret = -ENODEV;
393                 goto err_put_node;
394         }
395
396         ret = of_init_opp_table(dvfs_info->dev);
397         if (ret) {
398                 dev_err(dvfs_info->dev, "failed to init OPP table: %d\n", ret);
399                 goto err_put_node;
400         }
401
402         ret = opp_init_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table);
403         if (ret) {
404                 dev_err(dvfs_info->dev,
405                         "failed to init cpufreq table: %d\n", ret);
406                 goto err_put_node;
407         }
408         dvfs_info->freq_count = opp_get_opp_count(dvfs_info->dev);
409         exynos_sort_descend_freq_table();
410
411         if (of_property_read_u32(np, "clock-latency", &dvfs_info->latency))
412                 dvfs_info->latency = DEF_TRANS_LATENCY;
413
414         dvfs_info->cpu_clk = devm_clk_get(dvfs_info->dev, "armclk");
415         if (IS_ERR(dvfs_info->cpu_clk)) {
416                 dev_err(dvfs_info->dev, "Failed to get cpu clock\n");
417                 ret = PTR_ERR(dvfs_info->cpu_clk);
418                 goto err_free_table;
419         }
420
421         dvfs_info->cur_frequency = clk_get_rate(dvfs_info->cpu_clk);
422         if (!dvfs_info->cur_frequency) {
423                 dev_err(dvfs_info->dev, "Failed to get clock rate\n");
424                 ret = -EINVAL;
425                 goto err_free_table;
426         }
427         dvfs_info->cur_frequency /= 1000;
428
429         INIT_WORK(&dvfs_info->irq_work, exynos_cpufreq_work);
430         ret = devm_request_irq(dvfs_info->dev, dvfs_info->irq,
431                                 exynos_cpufreq_irq, IRQF_TRIGGER_NONE,
432                                 CPUFREQ_NAME, dvfs_info);
433         if (ret) {
434                 dev_err(dvfs_info->dev, "Failed to register IRQ\n");
435                 goto err_free_table;
436         }
437
438         ret = init_div_table();
439         if (ret) {
440                 dev_err(dvfs_info->dev, "Failed to initialise div table\n");
441                 goto err_free_table;
442         }
443
444         exynos_enable_dvfs();
445         ret = cpufreq_register_driver(&exynos_driver);
446         if (ret) {
447                 dev_err(dvfs_info->dev,
448                         "%s: failed to register cpufreq driver\n", __func__);
449                 goto err_free_table;
450         }
451
452         of_node_put(np);
453         dvfs_info->dvfs_enabled = true;
454         return 0;
455
456 err_free_table:
457         opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table);
458 err_put_node:
459         of_node_put(np);
460         dev_err(dvfs_info->dev, "%s: failed initialization\n", __func__);
461         return ret;
462 }
463
464 static int exynos_cpufreq_remove(struct platform_device *pdev)
465 {
466         cpufreq_unregister_driver(&exynos_driver);
467         opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table);
468         return 0;
469 }
470
471 static struct platform_driver exynos_cpufreq_platdrv = {
472         .driver = {
473                 .name   = "exynos5440-cpufreq",
474                 .owner  = THIS_MODULE,
475                 .of_match_table = exynos_cpufreq_match,
476         },
477         .probe          = exynos_cpufreq_probe,
478         .remove         = exynos_cpufreq_remove,
479 };
480 module_platform_driver(exynos_cpufreq_platdrv);
481
482 MODULE_AUTHOR("Amit Daniel Kachhap <amit.daniel@samsung.com>");
483 MODULE_DESCRIPTION("Exynos5440 cpufreq driver");
484 MODULE_LICENSE("GPL");