2 * Copyright 2008-2011 Freescale Semiconductor, Inc. All Rights Reserved.
6 * The code contained herein is licensed under the GNU General Public
7 * License. You may obtain a copy of the GNU General Public License
8 * Version 2 or later at the following locations:
10 * http://www.opensource.org/licenses/gpl-license.html
11 * http://www.gnu.org/copyleft/gpl.html
17 * @brief A simplied driver for the Freescale Semiconductor MXC DVFS module.
19 * Upon initialization, the DVFS driver initializes the DVFS hardware
20 * sets up driver nodes attaches to the DVFS interrupt and initializes internal
21 * data structures. When the DVFS interrupt occurs the driver checks the cause
22 * of the interrupt (lower frequency, increase frequency or emergency) and
23 * changes the CPU voltage according to translation table that is loaded into
29 #include <linux/module.h>
30 #include <linux/kernel.h>
31 #include <linux/init.h>
34 #include <linux/interrupt.h>
35 #include <linux/jiffies.h>
36 #include <linux/device.h>
37 #include <linux/sysdev.h>
38 #include <linux/delay.h>
39 #include <linux/clk.h>
40 #include <linux/regulator/consumer.h>
41 #include <linux/input.h>
42 #include <linux/platform_device.h>
43 #if defined(CONFIG_CPU_FREQ)
44 #include <linux/cpufreq.h>
48 #include <mach/hardware.h>
49 #include <mach/mxc_dvfs.h>
51 #define MXC_DVFSTHRS_UPTHR_MASK 0x0FC00000
52 #define MXC_DVFSTHRS_UPTHR_OFFSET 22
53 #define MXC_DVFSTHRS_DNTHR_MASK 0x003F0000
54 #define MXC_DVFSTHRS_DNTHR_OFFSET 16
55 #define MXC_DVFSTHRS_PNCTHR_MASK 0x0000003F
56 #define MXC_DVFSTHRS_PNCTHR_OFFSET 0
58 #define MXC_DVFSCOUN_DNCNT_MASK 0x00FF0000
59 #define MXC_DVFSCOUN_DNCNT_OFFSET 16
60 #define MXC_DVFSCOUN_UPCNT_MASK 0x000000FF
61 #define MXC_DVFSCOUN_UPCNT_OFFSET 0
63 #define MXC_DVFSEMAC_EMAC_MASK 0x000001FF
64 #define MXC_DVFSEMAC_EMAC_OFFSET 0
66 #define MXC_DVFSCNTR_DVFEV 0x10000000
67 #define MXC_DVFSCNTR_LBMI 0x08000000
68 #define MXC_DVFSCNTR_LBFL 0x06000000
69 #define MXC_DVFSCNTR_DVFIS 0x01000000
70 #define MXC_DVFSCNTR_FSVAIM 0x00400000
71 #define MXC_DVFSCNTR_FSVAI_MASK 0x00300000
72 #define MXC_DVFSCNTR_FSVAI_OFFSET 20
73 #define MXC_DVFSCNTR_WFIM 0x00080000
74 #define MXC_DVFSCNTR_WFIM_OFFSET 19
75 #define MXC_DVFSCNTR_MAXF_MASK 0x00040000
76 #define MXC_DVFSCNTR_MAXF_OFFSET 18
77 #define MXC_DVFSCNTR_MINF_MASK 0x00020000
78 #define MXC_DVFSCNTR_MINF_OFFSET 17
79 #define MXC_DVFSCNTR_LTBRSR_MASK 0x00000018
80 #define MXC_DVFSCNTR_LTBRSR_OFFSET 3
81 #define MXC_DVFSCNTR_DVFEN 0x00000001
83 #define CCM_CDCR_SW_DVFS_EN 0x20
84 #define CCM_CDCR_ARM_FREQ_SHIFT_DIVIDER 0x4
85 #define CCM_CDHIPR_ARM_PODF_BUSY 0x10000
87 int dvfs_core_is_active;
88 static struct mxc_dvfs_platform_data *dvfs_data;
89 static struct device *dvfs_dev;
90 static struct cpu_op *cpu_op_tbl;
91 static int dvfs_core_resume;
94 static int dvfs_core_op;
95 static int dvfs_config_setpoint;
100 extern void setup_pll(void);
101 extern int cpufreq_trig_needed;
102 extern int (*set_cpu_voltage)(u32 cpu_volt);
104 struct timeval core_prev_intr;
106 void dump_dvfs_core_regs(void);
107 void stop_dvfs(void);
108 struct dvfs_op *(*get_dvfs_core_op)(int *op);
110 static struct delayed_work dvfs_core_handler;
115 static struct clk *pll1_sw_clk;
116 static struct clk *cpu_clk;
117 static struct clk *dvfs_clk;
119 static int cpu_op_nr;
120 extern struct cpu_op *(*get_cpu_op)(int *op);
121 extern int (*set_cpu_voltage)(u32 cpu_volt);
123 static inline unsigned long dvfs_cpu_jiffies(unsigned long old, u_int div, u_int mult)
125 #if BITS_PER_LONG == 32
127 u64 result = ((u64) old) * ((u64) mult);
129 return (unsigned long) result;
131 #elif BITS_PER_LONG == 64
133 unsigned long result = old * ((u64) mult);
141 FSVAI_FREQ_NOCHANGE = 0x0,
148 * Load tracking buffer source: 1 for ld_add; 0 for pre_ld_add; 2 for after EMA
150 #define DVFS_LTBRSR (2 << MXC_DVFSCNTR_LTBRSR_OFFSET)
152 static struct dvfs_op *dvfs_core_setpoint;
153 extern int low_bus_freq_mode;
154 extern int high_bus_freq_mode;
155 extern int set_low_bus_freq(void);
156 extern int set_high_bus_freq(int high_bus_speed);
157 extern int low_freq_bus_used(void);
159 DEFINE_SPINLOCK(mxc_dvfs_core_lock);
161 static void dvfs_load_config(int set_point)
166 reg |= dvfs_core_setpoint[set_point].upthr << MXC_DVFSTHRS_UPTHR_OFFSET;
167 reg |= dvfs_core_setpoint[set_point].downthr <<
168 MXC_DVFSTHRS_DNTHR_OFFSET;
169 reg |= dvfs_core_setpoint[set_point].panicthr;
170 __raw_writel(reg, dvfs_data->membase + MXC_DVFSCORE_THRS);
173 reg |= dvfs_core_setpoint[set_point].downcnt <<
174 MXC_DVFSCOUN_DNCNT_OFFSET;
175 reg |= dvfs_core_setpoint[set_point].upcnt << MXC_DVFSCOUN_UPCNT_OFFSET;
176 __raw_writel(reg, dvfs_data->membase + MXC_DVFSCORE_COUN);
179 reg = __raw_readl(dvfs_data->membase + MXC_DVFSCORE_EMAC);
180 reg &= ~MXC_DVFSEMAC_EMAC_MASK;
181 reg |= dvfs_core_setpoint[set_point].emac << MXC_DVFSEMAC_EMAC_OFFSET;
182 __raw_writel(reg, dvfs_data->membase + MXC_DVFSCORE_EMAC);
184 dvfs_config_setpoint = set_point;
187 static int mx5_set_cpu_freq(int op)
194 unsigned long rate = 0;
201 if (cpu_op_tbl[op].pll_rate != cpu_op_tbl[old_op].pll_rate) {
202 org_cpu_rate = clk_get_rate(cpu_clk);
203 rate = cpu_op_tbl[op].cpu_rate;
205 if (org_cpu_rate == rate)
208 gp_volt = cpu_op_tbl[op].cpu_voltage;
212 /*Set the voltage for the GP domain. */
213 if (rate > org_cpu_rate) {
214 ret = set_cpu_voltage(gp_volt);
216 printk(KERN_DEBUG "COULD NOT SET GP VOLTAGE\n");
219 udelay(dvfs_data->delay_time);
221 spin_lock_irqsave(&mxc_dvfs_core_lock, flags);
222 /* PLL_RELOCK, set ARM_FREQ_SHIFT_DIVIDER */
223 reg = __raw_readl(ccm_base + dvfs_data->ccm_cdcr_offset);
224 /* Check if software_dvfs_en bit set */
225 if ((reg & CCM_CDCR_SW_DVFS_EN) != 0)
226 en_sw_dvfs = CCM_CDCR_SW_DVFS_EN;
229 reg &= ~(CCM_CDCR_SW_DVFS_EN);
231 __raw_writel(reg, ccm_base + dvfs_data->ccm_cdcr_offset);
234 /* START the GPC main control FSM */
236 reg = __raw_readl(gpc_base + dvfs_data->gpc_vcr_offset);
237 reg &= ~(MXC_GPCVCR_VINC_MASK | MXC_GPCVCR_VCNTU_MASK |
238 MXC_GPCVCR_VCNT_MASK);
240 if (rate > org_cpu_rate)
241 reg |= 1 << MXC_GPCVCR_VINC_OFFSET;
243 reg |= (1 << MXC_GPCVCR_VCNTU_OFFSET) |
244 (1 << MXC_GPCVCR_VCNT_OFFSET);
245 __raw_writel(reg, gpc_base + dvfs_data->gpc_vcr_offset);
247 reg = __raw_readl(gpc_base + dvfs_data->gpc_cntr_offset);
248 reg &= ~(MXC_GPCCNTR_ADU_MASK | MXC_GPCCNTR_FUPD_MASK);
249 reg |= MXC_GPCCNTR_FUPD;
250 reg |= MXC_GPCCNTR_ADU;
251 __raw_writel(reg, gpc_base + dvfs_data->gpc_cntr_offset);
253 reg |= MXC_GPCCNTR_STRT;
254 __raw_writel(reg, gpc_base + dvfs_data->gpc_cntr_offset);
255 while (__raw_readl(gpc_base + dvfs_data->gpc_cntr_offset)
258 spin_unlock_irqrestore(&mxc_dvfs_core_lock, flags);
260 if (rate < org_cpu_rate) {
261 ret = set_cpu_voltage(gp_volt);
264 "COULD NOT SET GP VOLTAGE!!!!\n");
267 udelay(dvfs_data->delay_time);
269 /* set software_dvfs_en bit back to original setting*/
270 reg = __raw_readl(ccm_base + dvfs_data->ccm_cdcr_offset);
271 reg &= ~(CCM_CDCR_SW_DVFS_EN);
273 clk_set_rate(cpu_clk, rate);
275 podf = cpu_op_tbl[op].cpu_podf;
276 gp_volt = cpu_op_tbl[op].cpu_voltage;
278 /* Change arm_podf only */
279 /* set ARM_FREQ_SHIFT_DIVIDER */
280 reg = __raw_readl(ccm_base + dvfs_data->ccm_cdcr_offset);
282 /* Check if software_dvfs_en bit set */
283 if ((reg & CCM_CDCR_SW_DVFS_EN) != 0)
284 en_sw_dvfs = CCM_CDCR_SW_DVFS_EN;
288 reg &= ~(CCM_CDCR_SW_DVFS_EN | CCM_CDCR_ARM_FREQ_SHIFT_DIVIDER);
289 reg |= CCM_CDCR_ARM_FREQ_SHIFT_DIVIDER;
290 __raw_writel(reg, ccm_base + dvfs_data->ccm_cdcr_offset);
293 reg = __raw_readl(ccm_base + dvfs_data->ccm_cacrr_offset);
294 arm_podf = reg & 0x07;
295 if (podf == arm_podf) {
297 "No need to change freq and voltage!!!!\n");
300 /* Check if FSVAI indicate freq up */
301 if (podf < arm_podf) {
302 ret = set_cpu_voltage(gp_volt);
305 "COULD NOT SET GP VOLTAGE!!!!\n");
308 udelay(dvfs_data->delay_time);
318 spin_lock_irqsave(&mxc_dvfs_core_lock, flags);
320 reg1 = __raw_readl(ccm_base + dvfs_data->ccm_cdhipr_offset);
322 if ((reg1 & CCM_CDHIPR_ARM_PODF_BUSY) == 0) {
324 ccm_base + dvfs_data->ccm_cacrr_offset);
328 ccm_base + dvfs_data->ccm_cdhipr_offset);
329 printk(KERN_DEBUG "ARM_PODF still in busy!!!!\n");
333 reg = __raw_readl(gpc_base + dvfs_data->gpc_vcr_offset);
335 ~(MXC_GPCVCR_VINC_MASK | MXC_GPCVCR_VCNTU_MASK |
336 MXC_GPCVCR_VCNT_MASK);
337 reg |= (1 << MXC_GPCVCR_VCNTU_OFFSET) |
338 (100 << MXC_GPCVCR_VCNT_OFFSET) |
339 (vinc << MXC_GPCVCR_VINC_OFFSET);
340 __raw_writel(reg, gpc_base + dvfs_data->gpc_vcr_offset);
342 reg = __raw_readl(gpc_base + dvfs_data->gpc_cntr_offset);
343 reg &= (~(MXC_GPCCNTR_ADU | MXC_GPCCNTR_FUPD
344 | MXC_GPCCNTR_STRT));
345 __raw_writel(reg, gpc_base + dvfs_data->gpc_cntr_offset);
346 reg = __raw_readl(gpc_base + dvfs_data->gpc_cntr_offset);
347 reg |= MXC_GPCCNTR_ADU | MXC_GPCCNTR_FUPD;
348 __raw_writel(reg, gpc_base + dvfs_data->gpc_cntr_offset);
349 reg = __raw_readl(gpc_base + dvfs_data->gpc_cntr_offset);
350 reg |= MXC_GPCCNTR_STRT;
351 __raw_writel(reg, gpc_base + dvfs_data->gpc_cntr_offset);
353 /* Wait for arm podf Enable */
354 while ((__raw_readl(gpc_base + dvfs_data->gpc_cntr_offset) &
355 MXC_GPCCNTR_STRT) == MXC_GPCCNTR_STRT) {
356 printk(KERN_DEBUG "Waiting arm_podf enabled!\n");
359 spin_unlock_irqrestore(&mxc_dvfs_core_lock, flags);
362 ret = set_cpu_voltage(gp_volt);
365 "COULD NOT SET GP VOLTAGE\n!!!");
368 udelay(dvfs_data->delay_time);
371 /* Clear the ARM_FREQ_SHIFT_DIVIDER and */
372 /* set software_dvfs_en bit back to original setting*/
373 reg = __raw_readl(ccm_base + dvfs_data->ccm_cdcr_offset);
374 reg &= ~(CCM_CDCR_SW_DVFS_EN | CCM_CDCR_ARM_FREQ_SHIFT_DIVIDER);
376 __raw_writel(reg, ccm_base + dvfs_data->ccm_cdcr_offset);
381 static int mx6_set_cpu_freq(int op)
385 unsigned long rate = 0;
386 int gp_volt = cpu_op_tbl[op].cpu_voltage;
388 org_cpu_rate = clk_get_rate(cpu_clk);
389 rate = cpu_op_tbl[op].cpu_rate;
391 if (rate == org_cpu_rate)
394 if (rate > org_cpu_rate) {
395 /* Increase voltage first. */
396 ret = set_cpu_voltage(gp_volt);
398 printk(KERN_DEBUG "COULD NOT INCREASE GP VOLTAGE!!!!\n");
401 udelay(dvfs_data->delay_time);
403 ret = clk_set_rate(cpu_clk, rate);
405 printk(KERN_DEBUG "cannot set CPU clock rate\n");
409 if (rate < org_cpu_rate) {
410 /* Increase voltage first. */
411 ret = set_cpu_voltage(gp_volt);
413 printk(KERN_DEBUG "COULD NOT INCREASE GP VOLTAGE!!!!\n");
421 static int set_cpu_freq(int op)
426 ret = mx6_set_cpu_freq(op);
428 ret = mx5_set_cpu_freq(op);
430 cpufreq_trig_needed = 1;
435 static int start_dvfs(void)
440 if (dvfs_core_is_active)
443 spin_lock_irqsave(&mxc_dvfs_core_lock, flags);
445 clk_enable(dvfs_clk);
447 /* get current working point */
448 cpu_rate = clk_get_rate(cpu_clk);
449 curr_op = cpu_op_nr - 1;
451 if (cpu_rate <= cpu_op_tbl[curr_op].cpu_rate)
453 } while (--curr_op >= 0);
456 dvfs_load_config(curr_op);
462 if (curr_op == (cpu_op_nr - 1))
467 /* config reg GPC_CNTR */
468 reg = __raw_readl(gpc_base + dvfs_data->gpc_cntr_offset);
470 reg &= ~MXC_GPCCNTR_GPCIRQM;
471 /* GPCIRQ=1, select ARM IRQ */
472 reg |= MXC_GPCCNTR_GPCIRQ_ARM;
473 /* ADU=1, select ARM domain */
475 reg |= MXC_GPCCNTR_ADU;
476 __raw_writel(reg, gpc_base + dvfs_data->gpc_cntr_offset);
478 /* Set PREDIV bits */
479 reg = __raw_readl(dvfs_data->membase + MXC_DVFSCORE_CNTR);
480 reg = (reg & ~(dvfs_data->prediv_mask));
481 reg |= (dvfs_data->prediv_val) << (dvfs_data->prediv_offset);
482 __raw_writel(reg, dvfs_data->membase + MXC_DVFSCORE_CNTR);
484 /* Enable DVFS interrupt */
485 reg = __raw_readl(dvfs_data->membase + MXC_DVFSCORE_CNTR);
487 reg = (reg & ~MXC_DVFSCNTR_FSVAIM);
489 if (!cpu_is_mx6q()) {
490 reg = (reg & ~(MXC_DVFSCNTR_MAXF_MASK
491 | MXC_DVFSCNTR_MINF_MASK));
492 reg |= 1 << MXC_DVFSCNTR_MAXF_OFFSET;
494 /* Select ARM domain */
495 reg |= MXC_DVFSCNTR_DVFIS;
496 /* Enable DVFS frequency adjustment interrupt */
497 reg = (reg & ~MXC_DVFSCNTR_FSVAIM);
498 /* Set load tracking buffer register source */
499 reg = (reg & ~MXC_DVFSCNTR_LTBRSR_MASK);
502 reg = (reg & ~(dvfs_data->div3ck_mask));
503 reg |= (dvfs_data->div3ck_val) << (dvfs_data->div3ck_offset);
504 __raw_writel(reg, dvfs_data->membase + MXC_DVFSCORE_CNTR);
508 unsigned long cpu_wfi = 0;
509 int num_cpus = num_possible_cpus();
510 reg = __raw_readl(dvfs_data->membase + MXC_DVFSCORE_EMAC);
511 /* Need to enable DVFS tracking for each core that is active */
513 if (cpu_active(num_cpus))
514 set_bit(num_cpus, &cpu_wfi);
515 } while (num_cpus--);
517 __raw_writel(reg, dvfs_data->membase + MXC_DVFSCORE_EMAC);
519 reg = __raw_readl(dvfs_data->membase + MXC_DVFSCORE_CNTR);
520 reg |= MXC_DVFSCNTR_DVFEN;
521 __raw_writel(reg, dvfs_data->membase + MXC_DVFSCORE_CNTR);
524 dvfs_core_is_active = 1;
526 spin_unlock_irqrestore(&mxc_dvfs_core_lock, flags);
528 printk(KERN_DEBUG "DVFS is started\n");
534 * This function is called for module initialization.
535 * It sets up the DVFS hardware.
536 * It sets default values for DVFS thresholds and counters. The default
537 * values was chosen from a set of different reasonable values. They was tested
538 * and the default values in the driver gave the best results.
539 * More work should be done to find optimal values.
541 * @return 0 if successful; non-zero otherwise.
544 static int init_dvfs_controller(void)
546 /* DVFS loading config */
552 static irqreturn_t dvfs_irq(int irq, void *dev_id)
556 /* Check if DVFS0 (ARM) id requesting for freqency/voltage update */
557 if ((__raw_readl(gpc_base + dvfs_data->gpc_cntr_offset)
558 & MXC_GPCCNTR_DVFS0CR) == 0)
562 reg = __raw_readl(dvfs_data->membase + MXC_DVFSCORE_CNTR);
564 reg |= MXC_DVFSCNTR_FSVAIM;
565 __raw_writel(reg, dvfs_data->membase + MXC_DVFSCORE_CNTR);
568 reg = __raw_readl(gpc_base + dvfs_data->gpc_cntr_offset);
569 reg |= MXC_GPCCNTR_GPCIRQM | 0x1000000;
570 __raw_writel(reg, gpc_base + dvfs_data->gpc_cntr_offset);
572 schedule_delayed_work(&dvfs_core_handler, 0);
576 static void dvfs_core_work_handler(struct work_struct *work)
582 int low_freq_bus_ready = 0;
583 int bus_incr = 0, cpu_dcr = 0;
586 low_freq_bus_ready = low_freq_bus_used();
588 /* Check DVFS frequency adjustment interrupt status */
589 reg = __raw_readl(dvfs_data->membase + MXC_DVFSCORE_CNTR);
590 fsvai = (reg & MXC_DVFSCNTR_FSVAI_MASK) >> MXC_DVFSCNTR_FSVAI_OFFSET;
591 /* Check FSVAI, FSVAI=0 is error */
592 if (fsvai == FSVAI_FREQ_NOCHANGE) {
593 /* Do nothing. Freq change is not required */
596 curr_cpu = clk_get_rate(cpu_clk);
597 /* If FSVAI indicate freq down,
598 check arm-clk is not in lowest frequency*/
599 if (fsvai == FSVAI_FREQ_DECREASE) {
600 if (curr_cpu <= cpu_op_tbl[cpu_op_nr - 1].cpu_rate) {
602 if (low_bus_freq_mode)
608 if (curr_op >= cpu_op_nr) {
609 curr_op = cpu_op_nr - 1;
613 dvfs_load_config(curr_op);
616 if (curr_cpu == cpu_op_tbl[0].cpu_rate) {
620 if (!high_bus_freq_mode &&
621 dvfs_config_setpoint == (cpu_op_nr + 1)) {
622 /* bump up LP freq first. */
624 dvfs_load_config(cpu_op_nr);
635 low_freq_bus_ready = low_freq_bus_used();
636 if ((curr_op == cpu_op_nr - 1) && (!low_bus_freq_mode)
637 && (low_freq_bus_ready) && !bus_incr) {
639 set_cpu_freq(curr_op);
640 /* If dvfs_core_op is greater than cpu_op_nr, it implies
641 * we support LPAPM mode for this platform.
643 if (dvfs_core_op > cpu_op_nr) {
645 dvfs_load_config(cpu_op_nr + 1);
648 if (!high_bus_freq_mode)
649 set_high_bus_freq(1);
651 ret = set_cpu_freq(curr_op);
656 if (cpufreq_trig_needed == 1) {
657 /*Fix loops-per-jiffy */
658 cpufreq_trig_needed = 0;
659 for_each_online_cpu(cpu)
660 per_cpu(cpu_data, cpu).loops_per_jiffy =
661 dvfs_cpu_jiffies(per_cpu(cpu_data, cpu).loops_per_jiffy,
662 curr_cpu / 1000, clk_get_rate(cpu_clk) / 1000);
666 reg = __raw_readl(dvfs_data->membase + MXC_DVFSCORE_CNTR);
667 reg = (reg & ~(MXC_DVFSCNTR_MAXF_MASK | MXC_DVFSCNTR_MINF_MASK));
668 reg |= maxf << MXC_DVFSCNTR_MAXF_OFFSET;
669 reg |= minf << MXC_DVFSCNTR_MINF_OFFSET;
671 /* Enable DVFS interrupt */
673 reg = (reg & ~MXC_DVFSCNTR_FSVAIM);
674 reg |= FSVAI_FREQ_NOCHANGE;
676 reg = (reg & ~MXC_DVFSCNTR_LBFL);
677 reg |= MXC_DVFSCNTR_LBFL;
678 __raw_writel(reg, dvfs_data->membase + MXC_DVFSCORE_CNTR);
680 reg = __raw_readl(gpc_base + dvfs_data->gpc_cntr_offset);
681 reg &= ~MXC_GPCCNTR_GPCIRQM;
682 __raw_writel(reg, gpc_base + dvfs_data->gpc_cntr_offset);
688 * This function disables the DVFS module.
697 if (dvfs_core_is_active) {
699 /* Mask dvfs irq, disable DVFS */
700 reg = __raw_readl(dvfs_data->membase
701 + MXC_DVFSCORE_CNTR);
703 reg |= MXC_DVFSCNTR_FSVAIM;
704 __raw_writel(reg, dvfs_data->membase
705 + MXC_DVFSCORE_CNTR);
708 if (!high_bus_freq_mode)
709 set_high_bus_freq(1);
711 curr_cpu = clk_get_rate(cpu_clk);
712 if (curr_cpu != cpu_op_tbl[curr_op].cpu_rate) {
713 set_cpu_freq(curr_op);
715 /*Fix loops-per-jiffy */
716 for_each_online_cpu(cpu)
717 per_cpu(cpu_data, cpu).loops_per_jiffy =
718 dvfs_cpu_jiffies(per_cpu(cpu_data, cpu).loops_per_jiffy,
719 curr_cpu/1000, clk_get_rate(cpu_clk) / 1000);
722 spin_lock_irqsave(&mxc_dvfs_core_lock, flags);
724 reg = __raw_readl(dvfs_data->membase
725 + MXC_DVFSCORE_CNTR);
726 reg = (reg & ~MXC_DVFSCNTR_DVFEN);
727 __raw_writel(reg, dvfs_data->membase
728 + MXC_DVFSCORE_CNTR);
730 spin_unlock_irqrestore(&mxc_dvfs_core_lock, flags);
732 dvfs_core_is_active = 0;
734 clk_disable(dvfs_clk);
737 printk(KERN_DEBUG "DVFS is stopped\n");
740 void dump_dvfs_core_regs()
744 if (core_prev_intr.tv_sec == 0)
745 do_gettimeofday(&core_prev_intr);
747 do_gettimeofday(&cur);
748 diff = (cur.tv_sec - core_prev_intr.tv_sec)*1000000
749 + (cur.tv_usec - core_prev_intr.tv_usec);
750 core_prev_intr = cur;
753 printk(KERN_DEBUG "diff = %d\n", diff);
755 printk(KERN_INFO "THRS = 0x%08x\n",
756 __raw_readl(dvfs_data->membase
757 + MXC_DVFSCORE_THRS));
758 printk(KERN_INFO "COUNT = 0x%08x\n",
759 __raw_readl(dvfs_data->membase
760 + MXC_DVFSCORE_THRS + 0x04));
761 printk(KERN_INFO "SIG1 = 0x%08x\n",
762 __raw_readl(dvfs_data->membase
763 + MXC_DVFSCORE_THRS + 0x08));
764 printk(KERN_INFO "SIG0 = 0x%08x\n",
765 __raw_readl(dvfs_data->membase
766 + MXC_DVFSCORE_THRS + 0x0c));
767 printk(KERN_INFO "GPC0 = 0x%08x\n",
768 __raw_readl(dvfs_data->membase
769 + MXC_DVFSCORE_THRS + 0x10));
770 printk(KERN_INFO "GPC1 = 0x%08x\n",
771 __raw_readl(dvfs_data->membase
772 + MXC_DVFSCORE_THRS + 0x14));
773 printk(KERN_INFO "GPBT = 0x%08x\n",
774 __raw_readl(dvfs_data->membase
775 + MXC_DVFSCORE_THRS + 0x18));
776 printk(KERN_INFO "EMAC = 0x%08x\n",
777 __raw_readl(dvfs_data->membase
778 + MXC_DVFSCORE_THRS + 0x1c));
779 printk(KERN_INFO "CNTR = 0x%08x\n",
780 __raw_readl(dvfs_data->membase
781 + MXC_DVFSCORE_THRS + 0x20));
782 printk(KERN_INFO "LTR0_0 = 0x%08x\n",
783 __raw_readl(dvfs_data->membase
784 + MXC_DVFSCORE_THRS + 0x24));
785 printk(KERN_INFO "LTR0_1 = 0x%08x\n",
786 __raw_readl(dvfs_data->membase
787 + MXC_DVFSCORE_THRS + 0x28));
788 printk(KERN_INFO "LTR1_0 = 0x%08x\n",
789 __raw_readl(dvfs_data->membase
790 + MXC_DVFSCORE_THRS + 0x2c));
791 printk(KERN_DEBUG "LTR1_1 = 0x%08x\n",
792 __raw_readl(dvfs_data->membase
793 + MXC_DVFSCORE_THRS + 0x30));
794 printk(KERN_INFO "PT0 = 0x%08x\n",
795 __raw_readl(dvfs_data->membase
796 + MXC_DVFSCORE_THRS + 0x34));
797 printk(KERN_INFO "PT1 = 0x%08x\n",
798 __raw_readl(dvfs_data->membase
799 + MXC_DVFSCORE_THRS + 0x38));
800 printk(KERN_INFO "PT2 = 0x%08x\n",
801 __raw_readl(dvfs_data->membase
802 + MXC_DVFSCORE_THRS + 0x3c));
803 printk(KERN_INFO "PT3 = 0x%08x\n",
804 __raw_readl(dvfs_data->membase
805 + MXC_DVFSCORE_THRS + 0x40));
808 static ssize_t downthreshold_show(struct device *dev,
809 struct device_attribute *attr, char *buf)
811 return sprintf(buf, "%u\n", dvfs_core_setpoint[0].downthr);
814 static ssize_t downthreshold_store(struct device *dev,
815 struct device_attribute *attr,
816 const char *buf, size_t size)
820 ret = sscanf(buf, "%u", &val);
821 dvfs_core_setpoint[0].downthr = val;
826 static ssize_t downcount_show(struct device *dev,
827 struct device_attribute *attr, char *buf)
829 return sprintf(buf, "%u\n", dvfs_core_setpoint[0].downcnt);
832 static ssize_t downcount_store(struct device *dev,
833 struct device_attribute *attr,
834 const char *buf, size_t size)
838 ret = sscanf(buf, "%u", &val);
839 dvfs_core_setpoint[0].downcnt = val;
845 static ssize_t dvfs_enable_show(struct device *dev,
846 struct device_attribute *attr, char *buf)
848 if (dvfs_core_is_active)
849 return sprintf(buf, "DVFS is enabled\n");
851 return sprintf(buf, "DVFS is disabled\n");
854 static ssize_t dvfs_enable_store(struct device *dev,
855 struct device_attribute *attr,
856 const char *buf, size_t size)
858 if (strstr(buf, "1") != NULL) {
859 if (start_dvfs() != 0)
860 printk(KERN_ERR "Failed to start DVFS\n");
861 } else if (strstr(buf, "0") != NULL)
867 static ssize_t dvfs_regs_show(struct device *dev,
868 struct device_attribute *attr, char *buf)
870 if (dvfs_core_is_active)
871 dump_dvfs_core_regs();
875 static ssize_t dvfs_regs_store(struct device *dev,
876 struct device_attribute *attr,
877 const char *buf, size_t size)
879 if (dvfs_core_is_active)
880 dump_dvfs_core_regs();
886 static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR,
887 dvfs_enable_show, dvfs_enable_store);
888 static DEVICE_ATTR(show_regs, S_IRUGO, dvfs_regs_show,
892 * This is the probe routine for the DVFS driver.
894 * @param pdev The platform device structure
896 * @return The function returns 0 on success
898 static int __devinit mxc_dvfs_core_probe(struct platform_device *pdev)
901 struct resource *res;
903 printk(KERN_INFO "mxc_dvfs_core_probe\n");
904 dvfs_dev = &pdev->dev;
905 dvfs_data = pdev->dev.platform_data;
907 INIT_DELAYED_WORK(&dvfs_core_handler, dvfs_core_work_handler);
909 pll1_sw_clk = clk_get(NULL, "pll1_sw_clk");
910 if (IS_ERR(pll1_sw_clk)) {
911 printk(KERN_INFO "%s: failed to get pll1_sw_clk\n", __func__);
912 return PTR_ERR(pll1_sw_clk);
915 cpu_clk = clk_get(NULL, dvfs_data->clk1_id);
916 if (IS_ERR(cpu_clk)) {
917 printk(KERN_ERR "%s: failed to get cpu clock\n", __func__);
918 return PTR_ERR(cpu_clk);
920 if (!cpu_is_mx6q()) {
921 dvfs_clk = clk_get(NULL, dvfs_data->clk2_id);
922 if (IS_ERR(dvfs_clk)) {
923 printk(KERN_ERR "%s: failed to get dvfs clock\n", __func__);
924 return PTR_ERR(dvfs_clk);
927 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
932 dvfs_data->membase = ioremap(res->start, res->end - res->start + 1);
934 * Request the DVFS interrupt
936 dvfs_data->irq = platform_get_irq(pdev, 0);
937 if (dvfs_data->irq < 0) {
938 err = dvfs_data->irq;
942 /* request the DVFS interrupt */
943 err = request_irq(dvfs_data->irq, dvfs_irq, IRQF_SHARED, "dvfs",
947 "DVFS: Unable to attach to DVFS interrupt,err = %d",
952 dvfs_core_setpoint = get_dvfs_core_op(&dvfs_core_op);
953 if (dvfs_core_setpoint == NULL) {
954 printk(KERN_ERR "No dvfs_core working point table defined\n");
958 clk_enable(dvfs_clk);
959 err = init_dvfs_controller();
961 printk(KERN_ERR "DVFS: Unable to initialize DVFS");
964 clk_disable(dvfs_clk);
966 err = sysfs_create_file(&pdev->dev.kobj, &dev_attr_enable.attr);
969 "DVFS: Unable to register sysdev entry for DVFS");
973 err = sysfs_create_file(&dvfs_dev->kobj, &dev_attr_show_regs.attr);
976 "DVFS: Unable to register sysdev entry for DVFS");
980 /* Set the current working point. */
981 cpu_op_tbl = get_cpu_op(&cpu_op_nr);
984 dvfs_core_resume = 0;
985 cpufreq_trig_needed = 0;
989 free_irq(dvfs_data->irq, dvfs_dev);
991 iounmap(dvfs_data->membase);
993 dev_err(&pdev->dev, "Failed to probe DVFS CORE\n");
998 * This function is called to put DVFS in a low power state.
1000 * @param pdev the device structure
1001 * @param state the power state the device is entering
1003 * @return The function always returns 0.
1005 static int mxc_dvfs_core_suspend(struct platform_device *pdev,
1008 if (dvfs_core_is_active) {
1009 dvfs_core_resume = 1;
1017 * This function is called to resume the MU from a low power state.
1019 * @param dev the device structure
1020 * @param level the stage in device suspension process that we want the
1021 * device to be put in
1023 * @return The function always returns 0.
1025 static int mxc_dvfs_core_resume(struct platform_device *pdev)
1027 if (dvfs_core_resume) {
1028 dvfs_core_resume = 0;
1035 static struct platform_driver mxc_dvfs_core_driver = {
1037 .name = "imx_dvfscore",
1039 .probe = mxc_dvfs_core_probe,
1040 .suspend = mxc_dvfs_core_suspend,
1041 .resume = mxc_dvfs_core_resume,
1044 static int __init dvfs_init(void)
1046 if (platform_driver_register(&mxc_dvfs_core_driver) != 0) {
1047 printk(KERN_ERR "mxc_dvfs_core_driver register failed\n");
1051 dvfs_core_is_active = 0;
1052 printk(KERN_INFO "DVFS driver module loaded\n");
1056 static void __exit dvfs_cleanup(void)
1060 /* release the DVFS interrupt */
1061 free_irq(dvfs_data->irq, dvfs_dev);
1063 sysfs_remove_file(&dvfs_dev->kobj, &dev_attr_enable.attr);
1064 /* Unregister the device structure */
1065 platform_driver_unregister(&mxc_dvfs_core_driver);
1068 iounmap(dvfs_data->membase);
1072 dvfs_core_is_active = 0;
1073 printk(KERN_INFO "DVFS driver module unloaded\n");
1077 module_init(dvfs_init);
1078 module_exit(dvfs_cleanup);
1080 MODULE_AUTHOR("Freescale Semiconductor, Inc.");
1081 MODULE_DESCRIPTION("DVFS driver");
1082 MODULE_LICENSE("GPL");