]> git.karo-electronics.de Git - karo-tx-linux.git/blob - arch/arm/plat-mxc/dvfs_core.c
518d064112fae6a0159956419bdf45189b429f4a
[karo-tx-linux.git] / arch / arm / plat-mxc / dvfs_core.c
1 /*
2  * Copyright 2008-2011 Freescale Semiconductor, Inc. All Rights Reserved.
3  */
4
5 /*
6  * The code contained herein is licensed under the GNU General Public
7  * License. You may obtain a copy of the GNU General Public License
8  * Version 2 or later at the following locations:
9  *
10  * http://www.opensource.org/licenses/gpl-license.html
11  * http://www.gnu.org/copyleft/gpl.html
12  */
13
14 /*!
15  * @file dvfs_core.c
16  *
17  * @brief A simplied driver for the Freescale Semiconductor MXC DVFS module.
18  *
19  * Upon initialization, the DVFS driver initializes the DVFS hardware
20  * sets up driver nodes attaches to the DVFS interrupt and initializes internal
21  * data structures. When the DVFS interrupt occurs the driver checks the cause
22  * of the interrupt (lower frequency, increase frequency or emergency) and
23  * changes the CPU voltage according to translation table that is loaded into
24  * the driver.
25  *
26  * @ingroup PM
27  */
28
29 #include <linux/module.h>
30 #include <linux/kernel.h>
31 #include <linux/init.h>
32 #include <linux/io.h>
33 #include <linux/fs.h>
34 #include <linux/interrupt.h>
35 #include <linux/jiffies.h>
36 #include <linux/device.h>
37 #include <linux/sysdev.h>
38 #include <linux/delay.h>
39 #include <linux/clk.h>
40 #include <linux/regulator/consumer.h>
41 #include <linux/input.h>
42 #include <linux/platform_device.h>
43 #if defined(CONFIG_CPU_FREQ)
44 #include <linux/cpufreq.h>
45 #endif
46 #include <asm/cpu.h>
47
48 #include <mach/hardware.h>
49 #include <mach/mxc_dvfs.h>
50
51 #define MXC_DVFSTHRS_UPTHR_MASK               0x0FC00000
52 #define MXC_DVFSTHRS_UPTHR_OFFSET             22
53 #define MXC_DVFSTHRS_DNTHR_MASK               0x003F0000
54 #define MXC_DVFSTHRS_DNTHR_OFFSET             16
55 #define MXC_DVFSTHRS_PNCTHR_MASK              0x0000003F
56 #define MXC_DVFSTHRS_PNCTHR_OFFSET            0
57
58 #define MXC_DVFSCOUN_DNCNT_MASK               0x00FF0000
59 #define MXC_DVFSCOUN_DNCNT_OFFSET             16
60 #define MXC_DVFSCOUN_UPCNT_MASK              0x000000FF
61 #define MXC_DVFSCOUN_UPCNT_OFFSET            0
62
63 #define MXC_DVFSEMAC_EMAC_MASK               0x000001FF
64 #define MXC_DVFSEMAC_EMAC_OFFSET             0
65
66 #define MXC_DVFSCNTR_DVFEV                   0x10000000
67 #define MXC_DVFSCNTR_LBMI                    0x08000000
68 #define MXC_DVFSCNTR_LBFL                    0x06000000
69 #define MXC_DVFSCNTR_DVFIS                   0x01000000
70 #define MXC_DVFSCNTR_FSVAIM                  0x00400000
71 #define MXC_DVFSCNTR_FSVAI_MASK              0x00300000
72 #define MXC_DVFSCNTR_FSVAI_OFFSET            20
73 #define MXC_DVFSCNTR_WFIM                    0x00080000
74 #define MXC_DVFSCNTR_WFIM_OFFSET             19
75 #define MXC_DVFSCNTR_MAXF_MASK               0x00040000
76 #define MXC_DVFSCNTR_MAXF_OFFSET             18
77 #define MXC_DVFSCNTR_MINF_MASK               0x00020000
78 #define MXC_DVFSCNTR_MINF_OFFSET             17
79 #define MXC_DVFSCNTR_LTBRSR_MASK             0x00000018
80 #define MXC_DVFSCNTR_LTBRSR_OFFSET           3
81 #define MXC_DVFSCNTR_DVFEN                   0x00000001
82
83 #define CCM_CDCR_SW_DVFS_EN                     0x20
84 #define CCM_CDCR_ARM_FREQ_SHIFT_DIVIDER         0x4
85 #define CCM_CDHIPR_ARM_PODF_BUSY                0x10000
86
87 int dvfs_core_is_active;
88 static struct mxc_dvfs_platform_data *dvfs_data;
89 static struct device *dvfs_dev;
90 static struct cpu_op *cpu_op_tbl;
91 static int dvfs_core_resume;
92 static int curr_op;
93 static int old_op;
94 static int dvfs_core_op;
95 static int dvfs_config_setpoint;
96
97 static int maxf;
98 static int minf;
99
100 extern void setup_pll(void);
101 extern int cpufreq_trig_needed;
102 extern int (*set_cpu_voltage)(u32 cpu_volt);
103
104 struct timeval core_prev_intr;
105
106 void dump_dvfs_core_regs(void);
107 void stop_dvfs(void);
108 struct dvfs_op *(*get_dvfs_core_op)(int *op);
109
110 static struct delayed_work dvfs_core_handler;
111
112 /*
113  * Clock structures
114  */
115 static struct clk *pll1_sw_clk;
116 static struct clk *cpu_clk;
117 static struct clk *dvfs_clk;
118
119 static int cpu_op_nr;
120 extern struct cpu_op *(*get_cpu_op)(int *op);
121 extern int (*set_cpu_voltage)(u32 cpu_volt);
122
123 static inline unsigned long dvfs_cpu_jiffies(unsigned long old, u_int div, u_int mult)
124 {
125 #if BITS_PER_LONG == 32
126
127         u64 result = ((u64) old) * ((u64) mult);
128         do_div(result, div);
129         return (unsigned long) result;
130
131 #elif BITS_PER_LONG == 64
132
133         unsigned long result = old * ((u64) mult);
134         result /= div;
135         return result;
136
137 #endif
138 }
139
140 enum {
141         FSVAI_FREQ_NOCHANGE = 0x0,
142         FSVAI_FREQ_INCREASE,
143         FSVAI_FREQ_DECREASE,
144         FSVAI_FREQ_EMERG,
145 };
146
147 /*
148  * Load tracking buffer source: 1 for ld_add; 0 for pre_ld_add; 2 for after EMA
149  */
150 #define DVFS_LTBRSR             (2 << MXC_DVFSCNTR_LTBRSR_OFFSET)
151
152 static struct dvfs_op *dvfs_core_setpoint;
153 extern int low_bus_freq_mode;
154 extern int high_bus_freq_mode;
155 extern int set_low_bus_freq(void);
156 extern int set_high_bus_freq(int high_bus_speed);
157 extern int low_freq_bus_used(void);
158
159 DEFINE_SPINLOCK(mxc_dvfs_core_lock);
160
161 static void dvfs_load_config(int set_point)
162 {
163         u32 reg;
164         reg = 0;
165
166         reg |= dvfs_core_setpoint[set_point].upthr << MXC_DVFSTHRS_UPTHR_OFFSET;
167         reg |= dvfs_core_setpoint[set_point].downthr <<
168             MXC_DVFSTHRS_DNTHR_OFFSET;
169         reg |= dvfs_core_setpoint[set_point].panicthr;
170         __raw_writel(reg, dvfs_data->membase + MXC_DVFSCORE_THRS);
171
172         reg = 0;
173         reg |= dvfs_core_setpoint[set_point].downcnt <<
174             MXC_DVFSCOUN_DNCNT_OFFSET;
175         reg |= dvfs_core_setpoint[set_point].upcnt << MXC_DVFSCOUN_UPCNT_OFFSET;
176         __raw_writel(reg, dvfs_data->membase + MXC_DVFSCORE_COUN);
177
178         /* Set EMAC value */
179         reg = __raw_readl(dvfs_data->membase + MXC_DVFSCORE_EMAC);
180         reg &= ~MXC_DVFSEMAC_EMAC_MASK;
181         reg |= dvfs_core_setpoint[set_point].emac << MXC_DVFSEMAC_EMAC_OFFSET;
182         __raw_writel(reg, dvfs_data->membase + MXC_DVFSCORE_EMAC);
183
184         dvfs_config_setpoint = set_point;
185 }
186
187 static int mx5_set_cpu_freq(int op)
188 {
189         int arm_podf;
190         int podf;
191         int vinc = 0;
192         int ret = 0;
193         int org_cpu_rate;
194         unsigned long rate = 0;
195         int gp_volt = 0;
196         u32 reg;
197         u32 reg1;
198         u32 en_sw_dvfs = 0;
199         unsigned long flags;
200
201         if (cpu_op_tbl[op].pll_rate != cpu_op_tbl[old_op].pll_rate) {
202                 org_cpu_rate = clk_get_rate(cpu_clk);
203                 rate = cpu_op_tbl[op].cpu_rate;
204
205                 if (org_cpu_rate == rate)
206                         return ret;
207
208                 gp_volt = cpu_op_tbl[op].cpu_voltage;
209                 if (gp_volt == 0)
210                         return ret;
211
212                 /*Set the voltage for the GP domain. */
213                 if (rate > org_cpu_rate) {
214                         ret = set_cpu_voltage(gp_volt);
215                         if (ret < 0) {
216                                 printk(KERN_DEBUG "COULD NOT SET GP VOLTAGE\n");
217                                 return ret;
218                         }
219                         udelay(dvfs_data->delay_time);
220                 }
221                 spin_lock_irqsave(&mxc_dvfs_core_lock, flags);
222                 /* PLL_RELOCK, set ARM_FREQ_SHIFT_DIVIDER */
223                 reg = __raw_readl(ccm_base + dvfs_data->ccm_cdcr_offset);
224                 /* Check if software_dvfs_en bit set */
225                 if ((reg & CCM_CDCR_SW_DVFS_EN) != 0)
226                         en_sw_dvfs = CCM_CDCR_SW_DVFS_EN;
227                 else
228                         en_sw_dvfs = 0x0;
229                 reg &= ~(CCM_CDCR_SW_DVFS_EN);
230                 reg &= 0xFFFFFFFB;
231                 __raw_writel(reg, ccm_base + dvfs_data->ccm_cdcr_offset);
232
233                 setup_pll();
234                 /* START the GPC main control FSM */
235                 /* set VINC */
236                 reg = __raw_readl(gpc_base + dvfs_data->gpc_vcr_offset);
237                 reg &= ~(MXC_GPCVCR_VINC_MASK | MXC_GPCVCR_VCNTU_MASK |
238                          MXC_GPCVCR_VCNT_MASK);
239
240                 if (rate > org_cpu_rate)
241                         reg |= 1 << MXC_GPCVCR_VINC_OFFSET;
242
243                 reg |= (1 << MXC_GPCVCR_VCNTU_OFFSET) |
244                        (1 << MXC_GPCVCR_VCNT_OFFSET);
245                 __raw_writel(reg, gpc_base + dvfs_data->gpc_vcr_offset);
246
247                 reg = __raw_readl(gpc_base + dvfs_data->gpc_cntr_offset);
248                 reg &= ~(MXC_GPCCNTR_ADU_MASK | MXC_GPCCNTR_FUPD_MASK);
249                 reg |= MXC_GPCCNTR_FUPD;
250                 reg |= MXC_GPCCNTR_ADU;
251                 __raw_writel(reg, gpc_base + dvfs_data->gpc_cntr_offset);
252
253                 reg |= MXC_GPCCNTR_STRT;
254                 __raw_writel(reg, gpc_base + dvfs_data->gpc_cntr_offset);
255                 while (__raw_readl(gpc_base + dvfs_data->gpc_cntr_offset)
256                                 & 0x4000)
257                         udelay(10);
258                 spin_unlock_irqrestore(&mxc_dvfs_core_lock, flags);
259
260                 if (rate < org_cpu_rate) {
261                         ret = set_cpu_voltage(gp_volt);
262                         if (ret < 0) {
263                                 printk(KERN_DEBUG
264                                        "COULD NOT SET GP VOLTAGE!!!!\n");
265                                 return ret;
266                         }
267                         udelay(dvfs_data->delay_time);
268                 }
269                 /* set software_dvfs_en bit back to original setting*/
270                 reg = __raw_readl(ccm_base + dvfs_data->ccm_cdcr_offset);
271                 reg &= ~(CCM_CDCR_SW_DVFS_EN);
272                 reg |= en_sw_dvfs;
273                 clk_set_rate(cpu_clk, rate);
274         } else {
275                 podf = cpu_op_tbl[op].cpu_podf;
276                 gp_volt = cpu_op_tbl[op].cpu_voltage;
277
278                 /* Change arm_podf only */
279                 /* set ARM_FREQ_SHIFT_DIVIDER */
280                 reg = __raw_readl(ccm_base + dvfs_data->ccm_cdcr_offset);
281
282                 /* Check if software_dvfs_en bit set */
283                 if ((reg & CCM_CDCR_SW_DVFS_EN) != 0)
284                         en_sw_dvfs = CCM_CDCR_SW_DVFS_EN;
285                 else
286                         en_sw_dvfs = 0x0;
287
288                 reg &= ~(CCM_CDCR_SW_DVFS_EN | CCM_CDCR_ARM_FREQ_SHIFT_DIVIDER);
289                 reg |= CCM_CDCR_ARM_FREQ_SHIFT_DIVIDER;
290                 __raw_writel(reg, ccm_base + dvfs_data->ccm_cdcr_offset);
291
292                 /* Get ARM_PODF */
293                 reg = __raw_readl(ccm_base + dvfs_data->ccm_cacrr_offset);
294                 arm_podf = reg & 0x07;
295                 if (podf == arm_podf) {
296                         printk(KERN_DEBUG
297                                "No need to change freq and voltage!!!!\n");
298                         return 0;
299                 }
300                 /* Check if FSVAI indicate freq up */
301                 if (podf < arm_podf) {
302                         ret = set_cpu_voltage(gp_volt);
303                         if (ret < 0) {
304                                 printk(KERN_DEBUG
305                                        "COULD NOT SET GP VOLTAGE!!!!\n");
306                                 return 0;
307                         }
308                         udelay(dvfs_data->delay_time);
309                         vinc = 1;
310                 } else {
311                         vinc = 0;
312                 }
313
314                 arm_podf = podf;
315                 /* Set ARM_PODF */
316                 reg &= 0xFFFFFFF8;
317                 reg |= arm_podf;
318                 spin_lock_irqsave(&mxc_dvfs_core_lock, flags);
319
320                 reg1 = __raw_readl(ccm_base + dvfs_data->ccm_cdhipr_offset);
321                 while (1) {
322                         if ((reg1 & CCM_CDHIPR_ARM_PODF_BUSY) == 0) {
323                                 __raw_writel(reg,
324                                         ccm_base + dvfs_data->ccm_cacrr_offset);
325                                 break;
326                         } else {
327                                 reg1 = __raw_readl(
328                                 ccm_base + dvfs_data->ccm_cdhipr_offset);
329                                 printk(KERN_DEBUG "ARM_PODF still in busy!!!!\n");
330                         }
331                 }
332                 /* set VINC */
333                 reg = __raw_readl(gpc_base + dvfs_data->gpc_vcr_offset);
334                 reg &=
335                     ~(MXC_GPCVCR_VINC_MASK | MXC_GPCVCR_VCNTU_MASK |
336                       MXC_GPCVCR_VCNT_MASK);
337                 reg |= (1 << MXC_GPCVCR_VCNTU_OFFSET) |
338                     (100 << MXC_GPCVCR_VCNT_OFFSET) |
339                     (vinc << MXC_GPCVCR_VINC_OFFSET);
340                 __raw_writel(reg, gpc_base + dvfs_data->gpc_vcr_offset);
341
342                 reg = __raw_readl(gpc_base + dvfs_data->gpc_cntr_offset);
343                 reg &= (~(MXC_GPCCNTR_ADU | MXC_GPCCNTR_FUPD
344                                 | MXC_GPCCNTR_STRT));
345                 __raw_writel(reg, gpc_base + dvfs_data->gpc_cntr_offset);
346                 reg = __raw_readl(gpc_base + dvfs_data->gpc_cntr_offset);
347                 reg |= MXC_GPCCNTR_ADU | MXC_GPCCNTR_FUPD;
348                 __raw_writel(reg, gpc_base + dvfs_data->gpc_cntr_offset);
349                 reg = __raw_readl(gpc_base + dvfs_data->gpc_cntr_offset);
350                 reg |= MXC_GPCCNTR_STRT;
351                 __raw_writel(reg, gpc_base + dvfs_data->gpc_cntr_offset);
352
353                 /* Wait for arm podf Enable */
354                 while ((__raw_readl(gpc_base + dvfs_data->gpc_cntr_offset) &
355                         MXC_GPCCNTR_STRT) == MXC_GPCCNTR_STRT) {
356                         printk(KERN_DEBUG "Waiting arm_podf enabled!\n");
357                         udelay(10);
358                 }
359                 spin_unlock_irqrestore(&mxc_dvfs_core_lock, flags);
360
361                 if (vinc == 0) {
362                         ret = set_cpu_voltage(gp_volt);
363                         if (ret < 0) {
364                                 printk(KERN_DEBUG
365                                        "COULD NOT SET GP VOLTAGE\n!!!");
366                                 return ret;
367                         }
368                         udelay(dvfs_data->delay_time);
369                 }
370
371                 /* Clear the ARM_FREQ_SHIFT_DIVIDER and */
372                 /* set software_dvfs_en bit back to original setting*/
373                 reg = __raw_readl(ccm_base + dvfs_data->ccm_cdcr_offset);
374                 reg &= ~(CCM_CDCR_SW_DVFS_EN | CCM_CDCR_ARM_FREQ_SHIFT_DIVIDER);
375                 reg |= en_sw_dvfs;
376                 __raw_writel(reg, ccm_base + dvfs_data->ccm_cdcr_offset);
377         }
378         return ret;
379 }
380
381 static int mx6_set_cpu_freq(int op)
382 {
383         int ret = 0;
384         int org_cpu_rate;
385         unsigned long rate = 0;
386         int gp_volt = cpu_op_tbl[op].cpu_voltage;
387
388         org_cpu_rate = clk_get_rate(cpu_clk);
389         rate = cpu_op_tbl[op].cpu_rate;
390
391         if (rate == org_cpu_rate)
392                 return ret;
393
394         if (rate > org_cpu_rate) {
395                 /* Increase voltage first. */
396                 ret = set_cpu_voltage(gp_volt);
397                 if (ret < 0) {
398                         printk(KERN_DEBUG "COULD NOT INCREASE GP VOLTAGE!!!!\n");
399                         return ret;
400                 }
401                 udelay(dvfs_data->delay_time);
402         }
403         ret = clk_set_rate(cpu_clk, rate);
404         if (ret != 0) {
405                 printk(KERN_DEBUG "cannot set CPU clock rate\n");
406                 return ret;
407         }
408
409         if (rate < org_cpu_rate) {
410                 /* Increase voltage first. */
411                 ret = set_cpu_voltage(gp_volt);
412                 if (ret < 0) {
413                         printk(KERN_DEBUG "COULD NOT INCREASE GP VOLTAGE!!!!\n");
414                         return ret;
415                 }
416         }
417         return ret;
418 }
419
420
421 static int set_cpu_freq(int op)
422 {
423         int ret = 0;
424
425         if (cpu_is_mx6q())
426                 ret = mx6_set_cpu_freq(op);
427         else
428                 ret = mx5_set_cpu_freq(op);
429
430         cpufreq_trig_needed = 1;
431         old_op = op;
432         return ret;
433 }
434
435 static int start_dvfs(void)
436 {
437         u32 reg, cpu_rate;
438         unsigned long flags;
439
440         if (dvfs_core_is_active)
441                 return 0;
442
443         spin_lock_irqsave(&mxc_dvfs_core_lock, flags);
444
445         clk_enable(dvfs_clk);
446
447         /* get current working point */
448         cpu_rate = clk_get_rate(cpu_clk);
449         curr_op = cpu_op_nr - 1;
450         do {
451                 if (cpu_rate <= cpu_op_tbl[curr_op].cpu_rate)
452                         break;
453         } while (--curr_op >= 0);
454         old_op = curr_op;
455
456         dvfs_load_config(curr_op);
457
458         if (curr_op == 0)
459                 maxf = 1;
460         else
461                 maxf = 0;
462         if (curr_op == (cpu_op_nr - 1))
463                 minf = 1;
464         else
465                 minf = 0;
466
467         /* config reg GPC_CNTR */
468         reg = __raw_readl(gpc_base + dvfs_data->gpc_cntr_offset);
469
470         reg &= ~MXC_GPCCNTR_GPCIRQM;
471         /* GPCIRQ=1, select ARM IRQ */
472         reg |= MXC_GPCCNTR_GPCIRQ_ARM;
473         /* ADU=1, select ARM domain */
474         if (!cpu_is_mx6q())
475                 reg |= MXC_GPCCNTR_ADU;
476         __raw_writel(reg, gpc_base + dvfs_data->gpc_cntr_offset);
477
478         /* Set PREDIV bits */
479         reg = __raw_readl(dvfs_data->membase + MXC_DVFSCORE_CNTR);
480         reg = (reg & ~(dvfs_data->prediv_mask));
481         reg |= (dvfs_data->prediv_val) << (dvfs_data->prediv_offset);
482         __raw_writel(reg, dvfs_data->membase + MXC_DVFSCORE_CNTR);
483
484         /* Enable DVFS interrupt */
485         reg = __raw_readl(dvfs_data->membase + MXC_DVFSCORE_CNTR);
486         /* FSVAIM=0 */
487         reg = (reg & ~MXC_DVFSCNTR_FSVAIM);
488         /* Set MAXF, MINF */
489         if (!cpu_is_mx6q()) {
490                 reg = (reg & ~(MXC_DVFSCNTR_MAXF_MASK
491                                         | MXC_DVFSCNTR_MINF_MASK));
492                 reg |= 1 << MXC_DVFSCNTR_MAXF_OFFSET;
493         }
494         /* Select ARM domain */
495         reg |= MXC_DVFSCNTR_DVFIS;
496         /* Enable DVFS frequency adjustment interrupt */
497         reg = (reg & ~MXC_DVFSCNTR_FSVAIM);
498         /* Set load tracking buffer register source */
499         reg = (reg & ~MXC_DVFSCNTR_LTBRSR_MASK);
500         reg |= DVFS_LTBRSR;
501         /* Set DIV3CK */
502         reg = (reg & ~(dvfs_data->div3ck_mask));
503         reg |= (dvfs_data->div3ck_val) << (dvfs_data->div3ck_offset);
504         __raw_writel(reg, dvfs_data->membase + MXC_DVFSCORE_CNTR);
505
506         /* Enable DVFS */
507         if (cpu_is_mx6q()) {
508                 unsigned long cpu_wfi = 0;
509                 int num_cpus = num_possible_cpus();
510                 reg = __raw_readl(dvfs_data->membase + MXC_DVFSCORE_EMAC);
511                 /* Need to enable DVFS tracking for each core that is active */
512                 do {
513                         if (cpu_active(num_cpus))
514                                 set_bit(num_cpus, &cpu_wfi);
515                 } while (num_cpus--);
516                 reg |= cpu_wfi << 9;
517                 __raw_writel(reg, dvfs_data->membase + MXC_DVFSCORE_EMAC);
518         } else {
519                 reg = __raw_readl(dvfs_data->membase + MXC_DVFSCORE_CNTR);
520                 reg |= MXC_DVFSCNTR_DVFEN;
521                 __raw_writel(reg, dvfs_data->membase + MXC_DVFSCORE_CNTR);
522         }
523
524         dvfs_core_is_active = 1;
525
526         spin_unlock_irqrestore(&mxc_dvfs_core_lock, flags);
527
528         printk(KERN_DEBUG "DVFS is started\n");
529
530         return 0;
531 }
532
533 /*!
534  * This function is called for module initialization.
535  * It sets up the DVFS hardware.
536  * It sets default values for DVFS thresholds and counters. The default
537  * values was chosen from a set of different reasonable values. They was tested
538  * and the default values in the driver gave the best results.
539  * More work should be done to find optimal values.
540  *
541  * @return   0 if successful; non-zero otherwise.
542  *
543  */
544 static int init_dvfs_controller(void)
545 {
546         /* DVFS loading config */
547         dvfs_load_config(0);
548
549         return 0;
550 }
551
552 static irqreturn_t dvfs_irq(int irq, void *dev_id)
553 {
554         u32 reg;
555
556         /* Check if DVFS0 (ARM) id requesting for freqency/voltage update */
557         if ((__raw_readl(gpc_base + dvfs_data->gpc_cntr_offset)
558                         & MXC_GPCCNTR_DVFS0CR) == 0)
559                 return IRQ_NONE;
560
561         /* Mask DVFS irq */
562         reg = __raw_readl(dvfs_data->membase + MXC_DVFSCORE_CNTR);
563         /* FSVAIM=1 */
564         reg |= MXC_DVFSCNTR_FSVAIM;
565         __raw_writel(reg, dvfs_data->membase + MXC_DVFSCORE_CNTR);
566
567         /* Mask GPC1 irq */
568         reg = __raw_readl(gpc_base + dvfs_data->gpc_cntr_offset);
569         reg |= MXC_GPCCNTR_GPCIRQM | 0x1000000;
570         __raw_writel(reg, gpc_base + dvfs_data->gpc_cntr_offset);
571
572         schedule_delayed_work(&dvfs_core_handler, 0);
573         return IRQ_HANDLED;
574 }
575
576 static void dvfs_core_work_handler(struct work_struct *work)
577 {
578         u32 fsvai;
579         u32 reg;
580         u32 curr_cpu = 0;
581         int ret = 0;
582         int low_freq_bus_ready = 0;
583         int bus_incr = 0, cpu_dcr = 0;
584         int cpu;
585
586         low_freq_bus_ready = low_freq_bus_used();
587
588         /* Check DVFS frequency adjustment interrupt status */
589         reg = __raw_readl(dvfs_data->membase + MXC_DVFSCORE_CNTR);
590         fsvai = (reg & MXC_DVFSCNTR_FSVAI_MASK) >> MXC_DVFSCNTR_FSVAI_OFFSET;
591         /* Check FSVAI, FSVAI=0 is error */
592         if (fsvai == FSVAI_FREQ_NOCHANGE) {
593                 /* Do nothing. Freq change is not required */
594                 goto END;
595         }
596         curr_cpu = clk_get_rate(cpu_clk);
597         /* If FSVAI indicate freq down,
598            check arm-clk is not in lowest frequency*/
599         if (fsvai == FSVAI_FREQ_DECREASE) {
600                 if (curr_cpu <= cpu_op_tbl[cpu_op_nr - 1].cpu_rate) {
601                         minf = 1;
602                         if (low_bus_freq_mode)
603                                 goto END;
604                 } else {
605                         /* freq down */
606                         curr_op++;
607                         maxf = 0;
608                         if (curr_op >= cpu_op_nr) {
609                                 curr_op = cpu_op_nr - 1;
610                                 goto END;
611                         }
612                         cpu_dcr = 1;
613                         dvfs_load_config(curr_op);
614                 }
615         } else {
616                 if (curr_cpu == cpu_op_tbl[0].cpu_rate) {
617                         maxf = 1;
618                         goto END;
619                 } else {
620                         if (!high_bus_freq_mode &&
621                                 dvfs_config_setpoint == (cpu_op_nr + 1)) {
622                                 /* bump up LP freq first. */
623                                 bus_incr = 1;
624                                 dvfs_load_config(cpu_op_nr);
625                         } else {
626                                 /* freq up */
627                                 curr_op = 0;
628                                 maxf = 1;
629                                 minf = 0;
630                                 dvfs_load_config(0);
631                         }
632                 }
633         }
634
635         low_freq_bus_ready = low_freq_bus_used();
636         if ((curr_op == cpu_op_nr - 1) && (!low_bus_freq_mode)
637             && (low_freq_bus_ready) && !bus_incr) {
638                 if (!minf)
639                         set_cpu_freq(curr_op);
640                 /* If dvfs_core_op is greater than cpu_op_nr, it implies
641                  * we support LPAPM mode for this platform.
642                  */
643                 if (dvfs_core_op > cpu_op_nr) {
644                         set_low_bus_freq();
645                         dvfs_load_config(cpu_op_nr + 1);
646                 }
647         } else {
648                 if (!high_bus_freq_mode)
649                         set_high_bus_freq(1);
650                 if (!bus_incr)
651                         ret = set_cpu_freq(curr_op);
652                 bus_incr = 0;
653         }
654
655 END:
656         if (cpufreq_trig_needed == 1) {
657                 /*Fix loops-per-jiffy */
658                 cpufreq_trig_needed = 0;
659                 for_each_online_cpu(cpu)
660                         per_cpu(cpu_data, cpu).loops_per_jiffy =
661                         dvfs_cpu_jiffies(per_cpu(cpu_data, cpu).loops_per_jiffy,
662                                 curr_cpu / 1000, clk_get_rate(cpu_clk) / 1000);
663         }
664
665         /* Set MAXF, MINF */
666         reg = __raw_readl(dvfs_data->membase + MXC_DVFSCORE_CNTR);
667         reg = (reg & ~(MXC_DVFSCNTR_MAXF_MASK | MXC_DVFSCNTR_MINF_MASK));
668         reg |= maxf << MXC_DVFSCNTR_MAXF_OFFSET;
669         reg |= minf << MXC_DVFSCNTR_MINF_OFFSET;
670
671         /* Enable DVFS interrupt */
672         /* FSVAIM=0 */
673         reg = (reg & ~MXC_DVFSCNTR_FSVAIM);
674         reg |= FSVAI_FREQ_NOCHANGE;
675         /* LBFL=1 */
676         reg = (reg & ~MXC_DVFSCNTR_LBFL);
677         reg |= MXC_DVFSCNTR_LBFL;
678         __raw_writel(reg, dvfs_data->membase + MXC_DVFSCORE_CNTR);
679         /*Unmask GPC1 IRQ */
680         reg = __raw_readl(gpc_base + dvfs_data->gpc_cntr_offset);
681         reg &= ~MXC_GPCCNTR_GPCIRQM;
682         __raw_writel(reg, gpc_base + dvfs_data->gpc_cntr_offset);
683
684 }
685
686
687 /*!
688  * This function disables the DVFS module.
689  */
690 void stop_dvfs(void)
691 {
692         u32 reg = 0;
693         unsigned long flags;
694         u32 curr_cpu;
695         int cpu;
696
697         if (dvfs_core_is_active) {
698
699                 /* Mask dvfs irq, disable DVFS */
700                 reg = __raw_readl(dvfs_data->membase
701                                   + MXC_DVFSCORE_CNTR);
702                 /* FSVAIM=1 */
703                 reg |= MXC_DVFSCNTR_FSVAIM;
704                 __raw_writel(reg, dvfs_data->membase
705                                   + MXC_DVFSCORE_CNTR);
706
707                 curr_op = 0;
708                 if (!high_bus_freq_mode)
709                         set_high_bus_freq(1);
710
711                 curr_cpu = clk_get_rate(cpu_clk);
712                 if (curr_cpu != cpu_op_tbl[curr_op].cpu_rate) {
713                         set_cpu_freq(curr_op);
714
715                         /*Fix loops-per-jiffy */
716                         for_each_online_cpu(cpu)
717                                 per_cpu(cpu_data, cpu).loops_per_jiffy =
718                                 dvfs_cpu_jiffies(per_cpu(cpu_data, cpu).loops_per_jiffy,
719                                         curr_cpu/1000, clk_get_rate(cpu_clk) / 1000);
720
721                 }
722                 spin_lock_irqsave(&mxc_dvfs_core_lock, flags);
723
724                 reg = __raw_readl(dvfs_data->membase
725                                   + MXC_DVFSCORE_CNTR);
726                 reg = (reg & ~MXC_DVFSCNTR_DVFEN);
727                 __raw_writel(reg, dvfs_data->membase
728                                   + MXC_DVFSCORE_CNTR);
729
730                 spin_unlock_irqrestore(&mxc_dvfs_core_lock, flags);
731
732                 dvfs_core_is_active = 0;
733
734                 clk_disable(dvfs_clk);
735         }
736
737         printk(KERN_DEBUG "DVFS is stopped\n");
738 }
739
740 void dump_dvfs_core_regs()
741 {
742         struct timeval cur;
743         u32 diff = 0;
744         if (core_prev_intr.tv_sec == 0)
745                 do_gettimeofday(&core_prev_intr);
746         else {
747                 do_gettimeofday(&cur);
748                 diff = (cur.tv_sec - core_prev_intr.tv_sec)*1000000
749                          + (cur.tv_usec - core_prev_intr.tv_usec);
750                 core_prev_intr = cur;
751         }
752         if (diff < 90000)
753                 printk(KERN_DEBUG "diff = %d\n", diff);
754
755         printk(KERN_INFO "THRS = 0x%08x\n",
756                         __raw_readl(dvfs_data->membase
757                                     + MXC_DVFSCORE_THRS));
758         printk(KERN_INFO "COUNT = 0x%08x\n",
759                         __raw_readl(dvfs_data->membase
760                                     + MXC_DVFSCORE_THRS + 0x04));
761         printk(KERN_INFO "SIG1 = 0x%08x\n",
762                         __raw_readl(dvfs_data->membase
763                                     + MXC_DVFSCORE_THRS + 0x08));
764         printk(KERN_INFO "SIG0 = 0x%08x\n",
765                         __raw_readl(dvfs_data->membase
766                                     + MXC_DVFSCORE_THRS + 0x0c));
767         printk(KERN_INFO "GPC0 = 0x%08x\n",
768                         __raw_readl(dvfs_data->membase
769                                     + MXC_DVFSCORE_THRS + 0x10));
770         printk(KERN_INFO "GPC1 = 0x%08x\n",
771                         __raw_readl(dvfs_data->membase
772                                     + MXC_DVFSCORE_THRS + 0x14));
773         printk(KERN_INFO "GPBT = 0x%08x\n",
774                         __raw_readl(dvfs_data->membase
775                                     + MXC_DVFSCORE_THRS + 0x18));
776         printk(KERN_INFO "EMAC = 0x%08x\n",
777                         __raw_readl(dvfs_data->membase
778                                     + MXC_DVFSCORE_THRS + 0x1c));
779         printk(KERN_INFO "CNTR = 0x%08x\n",
780                         __raw_readl(dvfs_data->membase
781                                     + MXC_DVFSCORE_THRS + 0x20));
782         printk(KERN_INFO "LTR0_0 = 0x%08x\n",
783                         __raw_readl(dvfs_data->membase
784                                     + MXC_DVFSCORE_THRS + 0x24));
785         printk(KERN_INFO "LTR0_1 = 0x%08x\n",
786                         __raw_readl(dvfs_data->membase
787                                     + MXC_DVFSCORE_THRS + 0x28));
788         printk(KERN_INFO "LTR1_0 = 0x%08x\n",
789                         __raw_readl(dvfs_data->membase
790                                     + MXC_DVFSCORE_THRS + 0x2c));
791         printk(KERN_DEBUG "LTR1_1 = 0x%08x\n",
792                         __raw_readl(dvfs_data->membase
793                                     + MXC_DVFSCORE_THRS + 0x30));
794         printk(KERN_INFO "PT0 = 0x%08x\n",
795                         __raw_readl(dvfs_data->membase
796                                     + MXC_DVFSCORE_THRS + 0x34));
797         printk(KERN_INFO "PT1 = 0x%08x\n",
798                         __raw_readl(dvfs_data->membase
799                                     + MXC_DVFSCORE_THRS + 0x38));
800         printk(KERN_INFO "PT2 = 0x%08x\n",
801                         __raw_readl(dvfs_data->membase
802                                     + MXC_DVFSCORE_THRS + 0x3c));
803         printk(KERN_INFO "PT3 = 0x%08x\n",
804                         __raw_readl(dvfs_data->membase
805                                     + MXC_DVFSCORE_THRS + 0x40));
806 }
807
808 static ssize_t downthreshold_show(struct device *dev,
809                                 struct device_attribute *attr, char *buf)
810 {
811         return sprintf(buf, "%u\n", dvfs_core_setpoint[0].downthr);
812 }
813
814 static ssize_t downthreshold_store(struct device *dev,
815                                  struct device_attribute *attr,
816                                  const char *buf, size_t size)
817 {
818         int ret = 0;
819         int val;
820         ret = sscanf(buf, "%u", &val);
821         dvfs_core_setpoint[0].downthr = val;
822
823         return size;
824 }
825
826 static ssize_t downcount_show(struct device *dev,
827                                 struct device_attribute *attr, char *buf)
828 {
829         return sprintf(buf, "%u\n", dvfs_core_setpoint[0].downcnt);
830 }
831
832 static ssize_t downcount_store(struct device *dev,
833                                  struct device_attribute *attr,
834                                  const char *buf, size_t size)
835 {
836         int ret = 0;
837         int val;
838         ret = sscanf(buf, "%u", &val);
839         dvfs_core_setpoint[0].downcnt = val;
840
841         return size;
842 }
843
844
845 static ssize_t dvfs_enable_show(struct device *dev,
846                                 struct device_attribute *attr, char *buf)
847 {
848         if (dvfs_core_is_active)
849                 return sprintf(buf, "DVFS is enabled\n");
850         else
851                 return sprintf(buf, "DVFS is disabled\n");
852 }
853
854 static ssize_t dvfs_enable_store(struct device *dev,
855                                  struct device_attribute *attr,
856                                  const char *buf, size_t size)
857 {
858         if (strstr(buf, "1") != NULL) {
859                 if (start_dvfs() != 0)
860                         printk(KERN_ERR "Failed to start DVFS\n");
861         } else if (strstr(buf, "0") != NULL)
862                 stop_dvfs();
863
864         return size;
865 }
866
867 static ssize_t dvfs_regs_show(struct device *dev,
868                                 struct device_attribute *attr, char *buf)
869 {
870         if (dvfs_core_is_active)
871                 dump_dvfs_core_regs();
872         return 0;
873 }
874
875 static ssize_t dvfs_regs_store(struct device *dev,
876                                  struct device_attribute *attr,
877                                  const char *buf, size_t size)
878 {
879         if (dvfs_core_is_active)
880                 dump_dvfs_core_regs();
881         return 0;
882
883         return size;
884 }
885
886 static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR,
887                                 dvfs_enable_show, dvfs_enable_store);
888 static DEVICE_ATTR(show_regs, S_IRUGO, dvfs_regs_show,
889                                 dvfs_regs_store);
890
891 /*!
892  * This is the probe routine for the DVFS driver.
893  *
894  * @param   pdev   The platform device structure
895  *
896  * @return         The function returns 0 on success
897  */
898 static int __devinit mxc_dvfs_core_probe(struct platform_device *pdev)
899 {
900         int err = 0;
901         struct resource *res;
902
903         printk(KERN_INFO "mxc_dvfs_core_probe\n");
904         dvfs_dev = &pdev->dev;
905         dvfs_data = pdev->dev.platform_data;
906
907         INIT_DELAYED_WORK(&dvfs_core_handler, dvfs_core_work_handler);
908
909         pll1_sw_clk = clk_get(NULL, "pll1_sw_clk");
910         if (IS_ERR(pll1_sw_clk)) {
911                 printk(KERN_INFO "%s: failed to get pll1_sw_clk\n", __func__);
912                 return PTR_ERR(pll1_sw_clk);
913         }
914
915         cpu_clk = clk_get(NULL, dvfs_data->clk1_id);
916         if (IS_ERR(cpu_clk)) {
917                 printk(KERN_ERR "%s: failed to get cpu clock\n", __func__);
918                 return PTR_ERR(cpu_clk);
919         }
920         if (!cpu_is_mx6q()) {
921                 dvfs_clk = clk_get(NULL, dvfs_data->clk2_id);
922                 if (IS_ERR(dvfs_clk)) {
923                         printk(KERN_ERR "%s: failed to get dvfs clock\n", __func__);
924                         return PTR_ERR(dvfs_clk);
925                 }
926         }
927         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
928         if (res == NULL) {
929                 err = -ENODEV;
930                 goto err1;
931         }
932         dvfs_data->membase = ioremap(res->start, res->end - res->start + 1);
933         /*
934          * Request the DVFS interrupt
935          */
936         dvfs_data->irq = platform_get_irq(pdev, 0);
937         if (dvfs_data->irq < 0) {
938                 err = dvfs_data->irq;
939                 goto err2;
940         }
941
942         /* request the DVFS interrupt */
943         err = request_irq(dvfs_data->irq, dvfs_irq, IRQF_SHARED, "dvfs",
944                           dvfs_dev);
945         if (err) {
946                 printk(KERN_ERR
947                        "DVFS: Unable to attach to DVFS interrupt,err = %d",
948                        err);
949                 goto err2;
950         }
951
952         dvfs_core_setpoint = get_dvfs_core_op(&dvfs_core_op);
953         if (dvfs_core_setpoint == NULL) {
954                 printk(KERN_ERR "No dvfs_core working point table defined\n");
955                 goto err3;
956         }
957
958         clk_enable(dvfs_clk);
959         err = init_dvfs_controller();
960         if (err) {
961                 printk(KERN_ERR "DVFS: Unable to initialize DVFS");
962                 return err;
963         }
964         clk_disable(dvfs_clk);
965
966         err = sysfs_create_file(&pdev->dev.kobj, &dev_attr_enable.attr);
967         if (err) {
968                 printk(KERN_ERR
969                        "DVFS: Unable to register sysdev entry for DVFS");
970                 goto err3;
971         }
972
973         err = sysfs_create_file(&dvfs_dev->kobj, &dev_attr_show_regs.attr);
974         if (err) {
975                 printk(KERN_ERR
976                        "DVFS: Unable to register sysdev entry for DVFS");
977                 goto err3;
978         }
979
980         /* Set the current working point. */
981         cpu_op_tbl = get_cpu_op(&cpu_op_nr);
982         old_op = 0;
983         curr_op = 0;
984         dvfs_core_resume = 0;
985         cpufreq_trig_needed = 0;
986
987         return err;
988 err3:
989         free_irq(dvfs_data->irq, dvfs_dev);
990 err2:
991         iounmap(dvfs_data->membase);
992 err1:
993         dev_err(&pdev->dev, "Failed to probe DVFS CORE\n");
994         return err;
995 }
996
997 /*!
998  * This function is called to put DVFS in a low power state.
999  *
1000  * @param   pdev  the device structure
1001  * @param   state the power state the device is entering
1002  *
1003  * @return  The function always returns 0.
1004  */
1005 static int mxc_dvfs_core_suspend(struct platform_device *pdev,
1006                                  pm_message_t state)
1007 {
1008         if (dvfs_core_is_active) {
1009                 dvfs_core_resume = 1;
1010                 stop_dvfs();
1011         }
1012
1013         return 0;
1014 }
1015
1016 /*!
1017  * This function is called to resume the MU from a low power state.
1018  *
1019  * @param   dev   the device structure
1020  * @param   level the stage in device suspension process that we want the
1021  *                device to be put in
1022  *
1023  * @return  The function always returns 0.
1024  */
1025 static int mxc_dvfs_core_resume(struct platform_device *pdev)
1026 {
1027         if (dvfs_core_resume) {
1028                 dvfs_core_resume = 0;
1029                 start_dvfs();
1030         }
1031
1032         return 0;
1033 }
1034
1035 static struct platform_driver mxc_dvfs_core_driver = {
1036         .driver = {
1037                    .name = "imx_dvfscore",
1038                    },
1039         .probe = mxc_dvfs_core_probe,
1040         .suspend = mxc_dvfs_core_suspend,
1041         .resume = mxc_dvfs_core_resume,
1042 };
1043
1044 static int __init dvfs_init(void)
1045 {
1046         if (platform_driver_register(&mxc_dvfs_core_driver) != 0) {
1047                 printk(KERN_ERR "mxc_dvfs_core_driver register failed\n");
1048                 return -ENODEV;
1049         }
1050
1051         dvfs_core_is_active = 0;
1052         printk(KERN_INFO "DVFS driver module loaded\n");
1053         return 0;
1054 }
1055
1056 static void __exit dvfs_cleanup(void)
1057 {
1058         stop_dvfs();
1059
1060         /* release the DVFS interrupt */
1061         free_irq(dvfs_data->irq, dvfs_dev);
1062
1063         sysfs_remove_file(&dvfs_dev->kobj, &dev_attr_enable.attr);
1064         /* Unregister the device structure */
1065         platform_driver_unregister(&mxc_dvfs_core_driver);
1066
1067         iounmap(ccm_base);
1068         iounmap(dvfs_data->membase);
1069         clk_put(cpu_clk);
1070         clk_put(dvfs_clk);
1071
1072         dvfs_core_is_active = 0;
1073         printk(KERN_INFO "DVFS driver module unloaded\n");
1074
1075 }
1076
1077 module_init(dvfs_init);
1078 module_exit(dvfs_cleanup);
1079
1080 MODULE_AUTHOR("Freescale Semiconductor, Inc.");
1081 MODULE_DESCRIPTION("DVFS driver");
1082 MODULE_LICENSE("GPL");