3 * Copyright (C) 2011 Freescale Semiconductor, Inc. All Rights Reserved.
7 * The code contained herein is licensed under the GNU General Public
8 * License. You may obtain a copy of the GNU General Public License
9 * Version 2 or later at the following locations:
11 * http://www.opensource.org/licenses/gpl-license.html
12 * http://www.gnu.org/copyleft/gpl.html
15 #include <linux/kernel.h>
16 #include <linux/init.h>
17 #include <linux/types.h>
18 #include <linux/time.h>
19 #include <linux/hrtimer.h>
21 #include <linux/errno.h>
22 #include <linux/delay.h>
23 #include <linux/clk.h>
25 #include <linux/clkdev.h>
26 #include <asm/div64.h>
27 #include <mach/hardware.h>
28 #include <mach/common.h>
29 #include <mach/clock.h>
30 #include <mach/mxc_dvfs.h>
32 #include "cpu_op-mx6.h"
34 #ifdef CONFIG_CLK_DEBUG
35 #define __INIT_CLK_DEBUG(n) .name = #n,
37 #define __INIT_CLK_DEBUG(n)
40 extern int mxc_jtag_enabled;
41 extern struct cpu_op *(*get_cpu_op)(int *op);
42 extern int lp_high_freq;
43 extern int lp_med_freq;
45 void __iomem *apll_base;
46 static struct clk pll1_sys_main_clk;
47 static struct clk pll2_528_bus_main_clk;
48 static struct clk pll2_pfd_400M;
49 static struct clk pll3_usb_otg_main_clk;
50 static struct clk pll4_audio_main_clk;
51 static struct clk pll5_video_main_clk;
52 static struct clk pll6_MLB_main_clk;
53 static struct clk pll7_usb_host_main_clk;
54 static struct clk pll8_enet_main_clk;
55 static struct clk apbh_dma_clk;
56 static struct clk openvg_axi_clk;
57 static struct clk enfc_clk;
58 static struct clk ipu1_di_clk_root;
59 static struct clk ipu2_di_clk_root;
60 static struct clk usdhc3_clk;
62 static struct cpu_op *cpu_op_tbl;
65 #define SPIN_DELAY 1000000 /* in nanoseconds */
67 #define AUDIO_VIDEO_MIN_CLK_FREQ 650000000
68 #define AUDIO_VIDEO_MAX_CLK_FREQ 1300000000
70 #define WAIT(exp, timeout) \
72 struct timespec nstimeofday; \
73 struct timespec curtime; \
75 getnstimeofday(&nstimeofday); \
77 getnstimeofday(&curtime); \
78 if ((curtime.tv_nsec - nstimeofday.tv_nsec) > (timeout)) { \
86 /* External clock values passed-in by the board code */
87 static unsigned long external_high_reference, external_low_reference;
88 static unsigned long oscillator_reference, ckih2_reference;
90 static void __calc_pre_post_dividers(u32 max_podf, u32 div, u32 *pre, u32 *post)
92 u32 min_pre, temp_pre, old_err, err;
94 /* Some of the podfs are 3 bits while others are 6 bits.
95 * Handle both cases here.
97 if (div >= 512 && (max_podf == 64)) {
98 /* For pre = 3bits and podf = 6 bits, max divider is 512. */
101 } else if (div >= 64 && (max_podf == 8)) {
102 /* For pre = 3bits and podf = 3 bits, max divider is 64. */
105 } else if (div >= 8) {
106 /* Find the minimum pre-divider for a max podf */
108 min_pre = (div - 1) / (1 << 6) + 1;
110 min_pre = (div - 1) / (1 << 3) + 1;
112 /* Now loop through to find the max pre-divider. */
113 for (temp_pre = 8; temp_pre >= min_pre; temp_pre--) {
114 err = div % temp_pre;
119 err = temp_pre - err;
125 *post = (div + *pre - 1) / *pre;
126 } else if (div < 8) {
132 static int _clk_enable(struct clk *clk)
135 reg = __raw_readl(clk->enable_reg);
136 reg |= MXC_CCM_CCGRx_CG_MASK << clk->enable_shift;
137 __raw_writel(reg, clk->enable_reg);
139 if (clk->flags & AHB_HIGH_SET_POINT)
141 else if (clk->flags & AHB_MED_SET_POINT)
147 static void _clk_disable(struct clk *clk)
150 reg = __raw_readl(clk->enable_reg);
151 reg &= ~(MXC_CCM_CCGRx_CG_MASK << clk->enable_shift);
152 __raw_writel(reg, clk->enable_reg);
154 if (clk->flags & AHB_HIGH_SET_POINT)
156 else if (clk->flags & AHB_MED_SET_POINT)
160 static void _clk_disable_inwait(struct clk *clk)
163 reg = __raw_readl(clk->enable_reg);
164 reg &= ~(MXC_CCM_CCGRx_CG_MASK << clk->enable_shift);
165 reg |= 1 << clk->enable_shift;
166 __raw_writel(reg, clk->enable_reg);
170 * For the 4-to-1 muxed input clock
172 static inline u32 _get_mux(struct clk *parent, struct clk *m0,
173 struct clk *m1, struct clk *m2, struct clk *m3)
177 else if (parent == m1)
179 else if (parent == m2)
181 else if (parent == m3)
189 static inline void __iomem *_get_pll_base(struct clk *pll)
191 if (pll == &pll1_sys_main_clk)
192 return PLL1_SYS_BASE_ADDR;
193 else if (pll == &pll2_528_bus_main_clk)
194 return PLL2_528_BASE_ADDR;
195 else if (pll == &pll3_usb_otg_main_clk)
196 return PLL3_480_USB1_BASE_ADDR;
197 else if (pll == &pll4_audio_main_clk)
198 return PLL4_AUDIO_BASE_ADDR;
199 else if (pll == &pll5_video_main_clk)
200 return PLL5_VIDEO_BASE_ADDR;
201 else if (pll == &pll6_MLB_main_clk)
202 return PLL6_MLB_BASE_ADDR;
203 else if (pll == &pll7_usb_host_main_clk)
204 return PLL7_480_USB2_BASE_ADDR;
205 else if (pll == &pll8_enet_main_clk)
206 return PLL8_ENET_BASE_ADDR;
214 * For the 6-to-1 muxed input clock
216 static inline u32 _get_mux6(struct clk *parent, struct clk *m0, struct clk *m1,
217 struct clk *m2, struct clk *m3, struct clk *m4,
222 else if (parent == m1)
224 else if (parent == m2)
226 else if (parent == m3)
228 else if (parent == m4)
230 else if (parent == m5)
237 static unsigned long get_high_reference_clock_rate(struct clk *clk)
239 return external_high_reference;
242 static unsigned long get_low_reference_clock_rate(struct clk *clk)
244 return external_low_reference;
247 static unsigned long get_oscillator_reference_clock_rate(struct clk *clk)
249 return oscillator_reference;
252 static unsigned long get_ckih2_reference_clock_rate(struct clk *clk)
254 return ckih2_reference;
257 /* External high frequency clock */
258 static struct clk ckih_clk = {
259 __INIT_CLK_DEBUG(ckih_clk)
260 .get_rate = get_high_reference_clock_rate,
263 static struct clk ckih2_clk = {
264 __INIT_CLK_DEBUG(ckih2_clk)
265 .get_rate = get_ckih2_reference_clock_rate,
268 static struct clk osc_clk = {
269 __INIT_CLK_DEBUG(osc_clk)
270 .get_rate = get_oscillator_reference_clock_rate,
273 /* External low frequency (32kHz) clock */
274 static struct clk ckil_clk = {
275 __INIT_CLK_DEBUG(ckil_clk)
276 .get_rate = get_low_reference_clock_rate,
279 static unsigned long pfd_round_rate(struct clk *clk, unsigned long rate)
284 tmp = (u64)clk_get_rate(clk->parent) * 18;
288 frac = frac < 12 ? 12 : frac;
289 frac = frac > 35 ? 35 : frac;
290 tmp = (u64)clk_get_rate(clk->parent) * 18;
295 static unsigned long pfd_get_rate(struct clk *clk)
299 tmp = (u64)clk_get_rate(clk->parent) * 18;
301 if (apbh_dma_clk.usecount == 0)
302 apbh_dma_clk.enable(&apbh_dma_clk);
304 frac = (__raw_readl(clk->enable_reg) >> clk->enable_shift) &
305 ANADIG_PFD_FRAC_MASK;
312 static int pfd_set_rate(struct clk *clk, unsigned long rate)
316 tmp = (u64)clk_get_rate(clk->parent) * 18;
318 if (apbh_dma_clk.usecount == 0)
319 apbh_dma_clk.enable(&apbh_dma_clk);
321 /* Round up the divider so that we don't set a rate
322 * higher than what is requested. */
326 frac = frac < 12 ? 12 : frac;
327 frac = frac > 35 ? 35 : frac;
328 /* clear clk frac bits */
329 __raw_writel(ANADIG_PFD_FRAC_MASK << clk->enable_shift,
330 (int)clk->enable_reg + 8);
331 /* set clk frac bits */
332 __raw_writel(frac << clk->enable_shift,
333 (int)clk->enable_reg + 4);
335 tmp = (u64)clk_get_rate(clk->parent) * 18;
338 if (apbh_dma_clk.usecount == 0)
339 apbh_dma_clk.disable(&apbh_dma_clk);
343 static int _clk_pfd_enable(struct clk *clk)
345 if (apbh_dma_clk.usecount == 0)
346 apbh_dma_clk.enable(&apbh_dma_clk);
348 /* clear clk gate bit */
349 __raw_writel((1 << (clk->enable_shift + 7)),
350 (int)clk->enable_reg + 8);
352 if (apbh_dma_clk.usecount == 0)
353 apbh_dma_clk.disable(&apbh_dma_clk);
358 static void _clk_pfd_disable(struct clk *clk)
360 if (apbh_dma_clk.usecount == 0)
361 apbh_dma_clk.enable(&apbh_dma_clk);
363 /* set clk gate bit */
364 __raw_writel((1 << (clk->enable_shift + 7)),
365 (int)clk->enable_reg + 4);
367 if (apbh_dma_clk.usecount == 0)
368 apbh_dma_clk.disable(&apbh_dma_clk);
371 static int _clk_pll_enable(struct clk *clk)
374 void __iomem *pllbase;
376 pllbase = _get_pll_base(clk);
378 reg = __raw_readl(pllbase);
379 reg &= ~ANADIG_PLL_BYPASS;
380 reg &= ~ANADIG_PLL_POWER_DOWN;
382 /* The 480MHz PLLs have the opposite definition for power bit. */
383 if (clk == &pll3_usb_otg_main_clk || clk == &pll7_usb_host_main_clk)
384 reg |= ANADIG_PLL_POWER_DOWN;
386 __raw_writel(reg, pllbase);
388 /* Wait for PLL to lock */
389 if (!WAIT(__raw_readl(pllbase) & ANADIG_PLL_LOCK,
391 panic("pll enable failed\n");
393 /* Enable the PLL output now*/
394 reg = __raw_readl(pllbase);
395 reg |= ANADIG_PLL_ENABLE;
396 __raw_writel(reg, pllbase);
401 static void _clk_pll_disable(struct clk *clk)
404 void __iomem *pllbase;
406 pllbase = _get_pll_base(clk);
408 reg = __raw_readl(pllbase);
409 reg |= ANADIG_PLL_BYPASS;
410 reg |= ANADIG_PLL_POWER_DOWN;
412 /* The 480MHz PLLs, pll3 & pll7, have the opposite
413 * definition for power bit.
415 if (clk == &pll3_usb_otg_main_clk || clk == &pll7_usb_host_main_clk)
416 reg &= ~ANADIG_PLL_POWER_DOWN;
417 __raw_writel(reg, pllbase);
420 static unsigned long _clk_pll1_main_get_rate(struct clk *clk)
425 div = __raw_readl(PLL1_SYS_BASE_ADDR) & ANADIG_PLL_SYS_DIV_SELECT_MASK;
426 val = (clk_get_rate(clk->parent) * div) / 2;
430 static int _clk_pll1_main_set_rate(struct clk *clk, unsigned long rate)
432 unsigned int reg, div;
434 if (rate < AUDIO_VIDEO_MIN_CLK_FREQ || rate > AUDIO_VIDEO_MAX_CLK_FREQ)
437 div = (rate * 2) / clk_get_rate(clk->parent) ;
439 reg = __raw_readl(PLL1_SYS_BASE_ADDR) & ~ANADIG_PLL_SYS_DIV_SELECT_MASK;
441 __raw_writel(reg, PLL1_SYS_BASE_ADDR);
446 static struct clk pll1_sys_main_clk = {
447 __INIT_CLK_DEBUG(pll1_sys_main_clk)
449 .get_rate = _clk_pll1_main_get_rate,
450 .set_rate = _clk_pll1_main_set_rate,
451 .enable = _clk_pll_enable,
452 .disable = _clk_pll_disable,
455 static int _clk_pll1_sw_set_parent(struct clk *clk, struct clk *parent)
459 reg = __raw_readl(MXC_CCM_CCSR);
461 if (parent == &pll1_sys_main_clk) {
462 reg &= ~MXC_CCM_CCSR_PLL1_SW_CLK_SEL;
463 __raw_writel(reg, MXC_CCM_CCSR);
464 /* Set the step_clk parent to be lp_apm, to save power. */
465 reg = __raw_readl(MXC_CCM_CCSR);
466 reg = (reg & ~MXC_CCM_CCSR_STEP_SEL);
468 /* Set STEP_CLK to be the parent*/
469 if (parent == &osc_clk) {
470 /* Set STEP_CLK to be sourced from LPAPM. */
471 reg = __raw_readl(MXC_CCM_CCSR);
472 reg = (reg & ~MXC_CCM_CCSR_STEP_SEL);
473 __raw_writel(reg, MXC_CCM_CCSR);
475 /* Set STEP_CLK to be sourced from PLL2-PDF (400MHz). */
476 reg = __raw_readl(MXC_CCM_CCSR);
477 reg |= MXC_CCM_CCSR_STEP_SEL;
478 __raw_writel(reg, MXC_CCM_CCSR);
481 reg = __raw_readl(MXC_CCM_CCSR);
482 reg |= MXC_CCM_CCSR_PLL1_SW_CLK_SEL;
484 __raw_writel(reg, MXC_CCM_CCSR);
488 static unsigned long _clk_pll1_sw_get_rate(struct clk *clk)
490 return clk_get_rate(clk->parent);
493 static struct clk pll1_sw_clk = {
494 __INIT_CLK_DEBUG(pll1_sw_clk)
495 .parent = &pll1_sys_main_clk,
496 .set_parent = _clk_pll1_sw_set_parent,
497 .get_rate = _clk_pll1_sw_get_rate,
500 static unsigned long _clk_pll2_main_get_rate(struct clk *clk)
505 div = __raw_readl(PLL2_528_BASE_ADDR) & ANADIG_PLL_528_DIV_SELECT;
508 val = clk_get_rate(clk->parent) * 22;
511 val = clk_get_rate(clk->parent) * 20;
516 static int _clk_pll2_main_set_rate(struct clk *clk, unsigned long rate)
518 unsigned int reg, div;
520 if (rate == 528000000)
522 else if (rate == 480000000)
527 reg = __raw_readl(PLL2_528_BASE_ADDR);
528 reg &= ~ANADIG_PLL_528_DIV_SELECT;
530 __raw_writel(reg, PLL2_528_BASE_ADDR);
535 static struct clk pll2_528_bus_main_clk = {
536 __INIT_CLK_DEBUG(pll2_528_bus_main_clk)
538 .get_rate = _clk_pll2_main_get_rate,
539 .set_rate = _clk_pll2_main_set_rate,
540 .enable = _clk_pll_enable,
541 .disable = _clk_pll_disable,
544 static struct clk pll2_pfd_400M = {
545 __INIT_CLK_DEBUG(pll2_pfd_400M)
546 .parent = &pll2_528_bus_main_clk,
547 .enable_reg = (void *)PFD_528_BASE_ADDR,
548 .enable_shift = ANADIG_PFD2_FRAC_OFFSET,
549 .enable = _clk_pfd_enable,
550 .disable = _clk_pfd_disable,
551 .get_rate = pfd_get_rate,
552 .set_rate = pfd_set_rate,
553 .get_rate = pfd_get_rate,
554 .round_rate = pfd_round_rate,
557 static struct clk pll2_pfd_352M = {
558 __INIT_CLK_DEBUG(pll2_pfd_352M)
559 .parent = &pll2_528_bus_main_clk,
560 .enable_reg = (void *)PFD_528_BASE_ADDR,
561 .enable_shift = ANADIG_PFD0_FRAC_OFFSET,
562 .enable = _clk_pfd_enable,
563 .disable = _clk_pfd_disable,
564 .set_rate = pfd_set_rate,
565 .get_rate = pfd_get_rate,
566 .round_rate = pfd_round_rate,
569 static struct clk pll2_pfd_594M = {
570 __INIT_CLK_DEBUG(pll2_pfd_594M)
571 .parent = &pll2_528_bus_main_clk,
572 .enable_reg = (void *)PFD_528_BASE_ADDR,
573 .enable_shift = ANADIG_PFD1_FRAC_OFFSET,
574 .enable = _clk_pfd_enable,
575 .disable = _clk_pfd_disable,
576 .set_rate = pfd_set_rate,
577 .get_rate = pfd_get_rate,
578 .round_rate = pfd_round_rate,
581 static unsigned long _clk_pll2_200M_get_rate(struct clk *clk)
583 return clk_get_rate(clk->parent) / 2;
586 static struct clk pll2_200M = {
587 __INIT_CLK_DEBUG(pll2_200M)
588 .parent = &pll2_pfd_400M,
589 .get_rate = _clk_pll2_200M_get_rate,
592 static unsigned long _clk_pll3_usb_otg_get_rate(struct clk *clk)
597 div = __raw_readl(PLL3_480_USB1_BASE_ADDR)
598 & ANADIG_PLL_480_DIV_SELECT_MASK;
601 val = clk_get_rate(clk->parent) * 22;
603 val = clk_get_rate(clk->parent) * 20;
607 static int _clk_pll3_usb_otg_set_rate(struct clk *clk, unsigned long rate)
609 unsigned int reg, div;
611 if (rate == 528000000)
613 else if (rate == 480000000)
618 reg = __raw_readl(PLL3_480_USB1_BASE_ADDR);
619 reg &= ~ANADIG_PLL_480_DIV_SELECT_MASK;
621 __raw_writel(reg, PLL3_480_USB1_BASE_ADDR);
627 /* same as pll3_main_clk. These two clocks should always be the same */
628 static struct clk pll3_usb_otg_main_clk = {
629 __INIT_CLK_DEBUG(pll3_usb_otg_main_clk)
631 .enable = _clk_pll_enable,
632 .disable = _clk_pll_disable,
633 .set_rate = _clk_pll3_usb_otg_set_rate,
634 .get_rate = _clk_pll3_usb_otg_get_rate,
637 static struct clk usb_phy1_clk = {
638 __INIT_CLK_DEBUG(usb_phy1_clk)
639 .parent = &pll3_usb_otg_main_clk,
640 .set_rate = _clk_pll3_usb_otg_set_rate,
641 .get_rate = _clk_pll3_usb_otg_get_rate,
644 static struct clk pll3_pfd_508M = {
645 __INIT_CLK_DEBUG(pll3_pfd_508M)
646 .parent = &pll3_usb_otg_main_clk,
647 .enable_reg = (void *)PFD_480_BASE_ADDR,
648 .enable_shift = ANADIG_PFD2_FRAC_OFFSET,
649 .enable = _clk_pfd_enable,
650 .disable = _clk_pfd_disable,
651 .set_rate = pfd_set_rate,
652 .get_rate = pfd_get_rate,
653 .round_rate = pfd_round_rate,
656 static struct clk pll3_pfd_454M = {
657 __INIT_CLK_DEBUG(pll3_pfd_454M)
658 .parent = &pll3_usb_otg_main_clk,
659 .enable_reg = (void *)PFD_480_BASE_ADDR,
660 .enable_shift = ANADIG_PFD3_FRAC_OFFSET,
661 .enable = _clk_pfd_enable,
662 .disable = _clk_pfd_disable,
663 .set_rate = pfd_set_rate,
664 .get_rate = pfd_get_rate,
665 .round_rate = pfd_round_rate,
668 static struct clk pll3_pfd_720M = {
669 __INIT_CLK_DEBUG(pll3_pfd_720M)
670 .parent = &pll3_usb_otg_main_clk,
671 .enable_reg = (void *)PFD_480_BASE_ADDR,
672 .enable_shift = ANADIG_PFD0_FRAC_OFFSET,
673 .enable = _clk_pfd_enable,
674 .disable = _clk_pfd_disable,
675 .set_rate = pfd_set_rate,
676 .get_rate = pfd_get_rate,
677 .round_rate = pfd_round_rate,
680 static struct clk pll3_pfd_540M = {
681 __INIT_CLK_DEBUG(pll3_pfd_540M)
682 .parent = &pll3_usb_otg_main_clk,
683 .enable_reg = (void *)PFD_480_BASE_ADDR,
684 .enable_shift = ANADIG_PFD1_FRAC_OFFSET,
685 .enable = _clk_pfd_enable,
686 .disable = _clk_pfd_disable,
687 .set_rate = pfd_set_rate,
688 .get_rate = pfd_get_rate,
689 .round_rate = pfd_round_rate,
690 .get_rate = pfd_get_rate,
693 static unsigned long _clk_pll3_sw_get_rate(struct clk *clk)
695 return clk_get_rate(clk->parent);
698 /* same as pll3_main_clk. These two clocks should always be the same */
699 static struct clk pll3_sw_clk = {
700 __INIT_CLK_DEBUG(pll3_sw_clk)
701 .parent = &pll3_usb_otg_main_clk,
702 .get_rate = _clk_pll3_sw_get_rate,
705 static unsigned long _clk_pll3_120M_get_rate(struct clk *clk)
707 return clk_get_rate(clk->parent) / 4;
710 static struct clk pll3_120M = {
711 __INIT_CLK_DEBUG(pll3_120M)
712 .parent = &pll3_sw_clk,
713 .get_rate = _clk_pll3_120M_get_rate,
716 static unsigned long _clk_pll3_80M_get_rate(struct clk *clk)
718 return clk_get_rate(clk->parent) / 6;
721 static struct clk pll3_80M = {
722 __INIT_CLK_DEBUG(pll3_80M)
723 .parent = &pll3_sw_clk,
724 .get_rate = _clk_pll3_80M_get_rate,
727 static unsigned long _clk_pll3_60M_get_rate(struct clk *clk)
729 return clk_get_rate(clk->parent) / 8;
732 static struct clk pll3_60M = {
733 __INIT_CLK_DEBUG(pll3_60M)
734 .parent = &pll3_sw_clk,
735 .get_rate = _clk_pll3_60M_get_rate,
738 static unsigned long _clk_audio_video_get_rate(struct clk *clk)
740 unsigned int div, mfn, mfd;
742 unsigned int parent_rate = clk_get_rate(clk->parent);
743 void __iomem *pllbase;
745 if (clk == &pll4_audio_main_clk)
746 pllbase = PLL4_AUDIO_BASE_ADDR;
748 pllbase = PLL5_VIDEO_BASE_ADDR;
750 div = __raw_readl(pllbase) & ANADIG_PLL_SYS_DIV_SELECT_MASK;
751 mfn = __raw_readl(pllbase + PLL_NUM_DIV_OFFSET);
752 mfd = __raw_readl(pllbase + PLL_DENOM_DIV_OFFSET);
754 rate = (parent_rate * div) + ((parent_rate / mfd) * mfn);
759 static int _clk_audio_video_set_rate(struct clk *clk, unsigned long rate)
761 unsigned int reg, div;
762 unsigned int mfn, mfd = 1000000;
764 unsigned int parent_rate = clk_get_rate(clk->parent);
765 void __iomem *pllbase;
767 if ((rate < AUDIO_VIDEO_MIN_CLK_FREQ) ||
768 (rate > AUDIO_VIDEO_MAX_CLK_FREQ))
771 if (clk == &pll4_audio_main_clk)
772 pllbase = PLL4_AUDIO_BASE_ADDR;
774 pllbase = PLL5_VIDEO_BASE_ADDR;
776 div = rate / parent_rate ;
777 temp64 = (u64) (rate - (div * parent_rate));
779 do_div(temp64, parent_rate);
782 reg = __raw_readl(pllbase) & ~ANADIG_PLL_SYS_DIV_SELECT_MASK;
784 __raw_writel(reg, pllbase);
785 __raw_writel(mfn, pllbase + PLL_NUM_DIV_OFFSET);
786 __raw_writel(mfd, pllbase + PLL_DENOM_DIV_OFFSET);
791 static unsigned long _clk_audio_video_round_rate(struct clk *clk,
794 if (rate < AUDIO_VIDEO_MIN_CLK_FREQ)
795 return AUDIO_VIDEO_MIN_CLK_FREQ;
797 if (rate > AUDIO_VIDEO_MAX_CLK_FREQ)
798 return AUDIO_VIDEO_MAX_CLK_FREQ;
804 static struct clk pll4_audio_main_clk = {
805 __INIT_CLK_DEBUG(pll4_audio_main_clk)
807 .enable = _clk_pll_enable,
808 .disable = _clk_pll_disable,
809 .set_rate = _clk_audio_video_set_rate,
810 .get_rate = _clk_audio_video_get_rate,
811 .round_rate = _clk_audio_video_round_rate,
815 static struct clk pll5_video_main_clk = {
816 __INIT_CLK_DEBUG(pll5_video_main_clk)
818 .enable = _clk_pll_enable,
819 .disable = _clk_pll_disable,
820 .set_rate = _clk_audio_video_set_rate,
821 .get_rate = _clk_audio_video_get_rate,
822 .round_rate = _clk_audio_video_round_rate,
825 static struct clk pll6_MLB_main_clk = {
826 __INIT_CLK_DEBUG(pll6_MLB_main_clk)
828 .enable = _clk_pll_enable,
829 .disable = _clk_pll_disable,
832 static unsigned long _clk_pll7_usb_otg_get_rate(struct clk *clk)
837 div = __raw_readl(PLL7_480_USB2_BASE_ADDR)
838 & ANADIG_PLL_480_DIV_SELECT_MASK;
841 val = clk_get_rate(clk->parent) * 22;
843 val = clk_get_rate(clk->parent) * 20;
847 static int _clk_pll7_usb_otg_set_rate(struct clk *clk, unsigned long rate)
849 unsigned int reg, div;
851 if (rate == 528000000)
853 else if (rate == 480000000)
858 reg = __raw_readl(PLL7_480_USB2_BASE_ADDR);
859 reg &= ~ANADIG_PLL_480_DIV_SELECT_MASK;
861 __raw_writel(reg, PLL7_480_USB2_BASE_ADDR);
866 static struct clk pll7_usb_host_main_clk = {
867 __INIT_CLK_DEBUG(pll7_usb_host_main_clk)
869 .enable = _clk_pll_enable,
870 .disable = _clk_pll_disable,
871 .set_rate = _clk_pll7_usb_otg_set_rate,
872 .get_rate = _clk_pll7_usb_otg_get_rate,
876 static struct clk pll8_enet_main_clk = {
877 __INIT_CLK_DEBUG(pll8_enet_main_clk)
879 .enable = _clk_pll_enable,
880 .disable = _clk_pll_disable,
883 static unsigned long _clk_arm_get_rate(struct clk *clk)
887 cacrr = __raw_readl(MXC_CCM_CACRR);
888 div = (cacrr & MXC_CCM_CACRR_ARM_PODF_MASK) + 1;
889 return clk_get_rate(clk->parent) / div;
892 static int _clk_arm_set_rate(struct clk *clk, unsigned long rate)
897 struct clk *old_parent = pll1_sw_clk.parent;
899 for (i = 0; i < cpu_op_nr; i++) {
900 if (rate == cpu_op_tbl[i].cpu_rate)
906 if (rate <= clk_get_rate(&pll2_pfd_400M)) {
908 * Move pll1_sw_clk to PLL2_PFD_400M
909 * so that we can disable PLL1.
911 if (pll2_pfd_400M.usecount == 0)
912 pll2_pfd_400M.enable(&pll2_pfd_400M);
913 pll1_sw_clk.set_parent(&pll1_sw_clk, &pll2_pfd_400M);
914 pll1_sw_clk.parent = &pll2_pfd_400M;
916 /* Rate is above 400MHz. We may need to relock PLL1. */
917 pll1_sw_clk.set_parent(&pll1_sw_clk, &osc_clk);
918 if (pll1_sys_main_clk.usecount == 0)
919 pll1_sys_main_clk.enable(&pll1_sys_main_clk);
920 pll1_sys_main_clk.set_rate(&pll1_sys_main_clk, cpu_op_tbl[i].pll_rate);
921 pll1_sw_clk.set_parent(&pll1_sw_clk, &pll1_sys_main_clk);
922 pll1_sw_clk.parent = &pll1_sys_main_clk;
925 parent_rate = clk_get_rate(clk->parent);
926 div = parent_rate / rate;
931 if ((parent_rate / div) > rate)
937 __raw_writel(div - 1, MXC_CCM_CACRR);
939 /* Increment current parent's usecount. */
940 pll1_sw_clk.parent->usecount++;
942 /* Decrement the current parent's usecount */
943 old_parent->usecount--;
945 if (old_parent->usecount == 0)
946 old_parent->disable(old_parent);
951 static struct clk cpu_clk = {
952 __INIT_CLK_DEBUG(cpu_clk)
953 .parent = &pll1_sw_clk,
954 .set_rate = _clk_arm_set_rate,
955 .get_rate = _clk_arm_get_rate,
958 static int _clk_periph_set_parent(struct clk *clk, struct clk *parent)
963 mux = _get_mux6(parent, &pll2_528_bus_main_clk, &pll2_pfd_400M,
964 &pll2_pfd_352M, &pll2_200M, &pll3_sw_clk, &osc_clk);
967 /* Set the pre_periph_clk multiplexer */
968 reg = __raw_readl(MXC_CCM_CBCMR);
969 reg &= ~MXC_CCM_CBCMR_PRE_PERIPH_CLK_SEL_MASK;
970 reg |= mux << MXC_CCM_CBCMR_PRE_PERIPH_CLK_SEL_OFFSET;
971 __raw_writel(reg, MXC_CCM_CBCMR);
973 /* Set the periph_clk_sel multiplexer. */
974 reg = __raw_readl(MXC_CCM_CBCDR);
975 reg &= ~MXC_CCM_CBCDR_PERIPH_CLK_SEL;
976 __raw_writel(reg, MXC_CCM_CBCDR);
978 reg = __raw_readl(MXC_CCM_CBCDR);
979 /* Set the periph_clk2_podf divider to divide by 1. */
980 reg &= ~MXC_CCM_CBCDR_PERIPH_CLK2_PODF_MASK;
981 __raw_writel(reg, MXC_CCM_CBCDR);
983 /* Set the periph_clk2_sel mux. */
984 reg = __raw_readl(MXC_CCM_CBCMR);
985 reg &= ~MXC_CCM_CBCMR_PERIPH_CLK2_SEL_MASK;
986 reg |= ((mux - 4) << MXC_CCM_CBCMR_PERIPH_CLK2_SEL_OFFSET);
987 __raw_writel(reg, MXC_CCM_CBCMR);
989 while (__raw_readl(MXC_CCM_CDHIPR))
992 reg = __raw_readl(MXC_CCM_CBCDR);
993 /* Set periph_clk_sel to select periph_clk2. */
994 reg |= MXC_CCM_CBCDR_PERIPH_CLK_SEL;
995 __raw_writel(reg, MXC_CCM_CBCDR);
998 if (!WAIT(!(__raw_readl(MXC_CCM_CDHIPR)
999 & MXC_CCM_CDHIPR_PERIPH_CLK_SEL_BUSY), SPIN_DELAY))
1000 panic("_clk_periph_set_parent failed\n");
1005 static unsigned long _clk_periph_get_rate(struct clk *clk)
1011 if ((clk->parent == &pll3_sw_clk) || (clk->parent == &osc_clk)) {
1012 reg = __raw_readl(MXC_CCM_CBCDR)
1013 & MXC_CCM_CBCDR_PERIPH_CLK2_PODF_MASK;
1014 div = (reg >> MXC_CCM_CBCDR_PERIPH_CLK2_PODF_OFFSET) + 1;
1016 val = clk_get_rate(clk->parent) / div;
1020 static struct clk periph_clk = {
1021 __INIT_CLK_DEBUG(periph_clk)
1022 .parent = &pll2_528_bus_main_clk,
1023 .set_parent = _clk_periph_set_parent,
1024 .get_rate = _clk_periph_get_rate,
1027 static unsigned long _clk_axi_get_rate(struct clk *clk)
1032 reg = __raw_readl(MXC_CCM_CBCDR) & MXC_CCM_CBCDR_AXI_PODF_MASK;
1033 div = (reg >> MXC_CCM_CBCDR_AXI_PODF_OFFSET);
1035 val = clk_get_rate(clk->parent) / (div + 1);
1039 static int _clk_axi_set_rate(struct clk *clk, unsigned long rate)
1042 u32 parent_rate = clk_get_rate(clk->parent);
1044 div = parent_rate / rate;
1048 if (((parent_rate / div) != rate) || (div > 8))
1051 reg = __raw_readl(MXC_CCM_CBCDR);
1052 reg &= ~MXC_CCM_CBCDR_AXI_PODF_MASK;
1053 reg |= (div - 1) << MXC_CCM_CBCDR_AXI_PODF_OFFSET;
1054 __raw_writel(reg, MXC_CCM_CBCDR);
1056 if (!WAIT(!(__raw_readl(MXC_CCM_CDHIPR)
1057 & MXC_CCM_CDHIPR_AXI_PODF_BUSY), SPIN_DELAY))
1058 panic("pll _clk_axi_a_set_rate failed\n");
1063 static unsigned long _clk_axi_round_rate(struct clk *clk,
1067 u32 parent_rate = clk_get_rate(clk->parent);
1069 div = parent_rate / rate;
1071 /* Make sure rate is not greater than the maximum
1072 * value for the clock.
1073 * Also prevent a div of 0.
1081 return parent_rate / div;
1084 static int _clk_axi_set_parent(struct clk *clk, struct clk *parent)
1089 mux = _get_mux6(parent, &periph_clk, &pll2_pfd_400M,
1090 &pll3_pfd_540M, NULL, NULL, NULL);
1093 /* Set the AXI_SEL mux */
1094 reg = __raw_readl(MXC_CCM_CBCDR) & ~MXC_CCM_CBCDR_AXI_SEL;
1095 __raw_writel(reg, MXC_CCM_CBCDR);
1097 /* Set the AXI_ALT_SEL mux. */
1098 reg = __raw_readl(MXC_CCM_CBCDR)
1099 & ~MXC_CCM_CBCDR_AXI_ALT_SEL_MASK;
1100 reg = ((mux - 1) << MXC_CCM_CBCDR_AXI_ALT_SEL_OFFSET);
1101 __raw_writel(reg, MXC_CCM_CBCDR);
1103 /* Set the AXI_SEL mux */
1104 reg = __raw_readl(MXC_CCM_CBCDR) & ~MXC_CCM_CBCDR_AXI_SEL;
1105 reg |= MXC_CCM_CBCDR_AXI_SEL;
1106 __raw_writel(reg, MXC_CCM_CBCDR);
1111 static struct clk axi_clk = {
1112 __INIT_CLK_DEBUG(axi_clk)
1113 .parent = &periph_clk,
1114 .set_parent = _clk_axi_set_parent,
1115 .set_rate = _clk_axi_set_rate,
1116 .get_rate = _clk_axi_get_rate,
1117 .round_rate = _clk_axi_round_rate,
1120 static unsigned long _clk_ahb_get_rate(struct clk *clk)
1124 reg = __raw_readl(MXC_CCM_CBCDR);
1125 div = ((reg & MXC_CCM_CBCDR_AHB_PODF_MASK) >>
1126 MXC_CCM_CBCDR_AHB_PODF_OFFSET) + 1;
1128 return clk_get_rate(clk->parent) / div;
1131 static int _clk_ahb_set_rate(struct clk *clk, unsigned long rate)
1134 u32 parent_rate = clk_get_rate(clk->parent);
1136 div = parent_rate / rate;
1139 if (((parent_rate / div) != rate) || (div > 8))
1142 reg = __raw_readl(MXC_CCM_CBCDR);
1143 reg &= ~MXC_CCM_CBCDR_AHB_PODF_MASK;
1144 reg |= (div - 1) << MXC_CCM_CBCDR_AHB_PODF_OFFSET;
1145 __raw_writel(reg, MXC_CCM_CBCDR);
1147 if (!WAIT(!(__raw_readl(MXC_CCM_CDHIPR) & MXC_CCM_CDHIPR_AHB_PODF_BUSY),
1149 panic("_clk_ahb_set_rate failed\n");
1154 static unsigned long _clk_ahb_round_rate(struct clk *clk,
1158 u32 parent_rate = clk_get_rate(clk->parent);
1160 div = parent_rate / rate;
1162 /* Make sure rate is not greater than the maximum value for the clock.
1163 * Also prevent a div of 0.
1171 return parent_rate / div;
1174 static struct clk ahb_clk = {
1175 __INIT_CLK_DEBUG(ahb_clk)
1176 .parent = &periph_clk,
1177 .get_rate = _clk_ahb_get_rate,
1178 .set_rate = _clk_ahb_set_rate,
1179 .round_rate = _clk_ahb_round_rate,
1182 static unsigned long _clk_ipg_get_rate(struct clk *clk)
1186 reg = __raw_readl(MXC_CCM_CBCDR);
1187 div = ((reg & MXC_CCM_CBCDR_IPG_PODF_MASK) >>
1188 MXC_CCM_CBCDR_IPG_PODF_OFFSET) + 1;
1190 return clk_get_rate(clk->parent) / div;
1194 static struct clk ipg_clk = {
1195 __INIT_CLK_DEBUG(ipg_clk)
1197 .get_rate = _clk_ipg_get_rate,
1200 static struct clk tzasc1_clk = {
1201 __INIT_CLK_DEBUG(tzasc1_clk)
1204 .enable_reg = MXC_CCM_CCGR2,
1205 .enable_shift = MXC_CCM_CCGRx_CG11_OFFSET,
1206 .enable = _clk_enable,
1207 .disable = _clk_disable_inwait,
1210 static struct clk tzasc2_clk = {
1211 __INIT_CLK_DEBUG(tzasc2_clk)
1214 .enable_reg = MXC_CCM_CCGR2,
1215 .enable_shift = MXC_CCM_CCGRx_CG12_OFFSET,
1216 .enable = _clk_enable,
1217 .disable = _clk_disable_inwait,
1220 static struct clk mx6fast1_clk = {
1221 __INIT_CLK_DEBUG(mx6fast1_clk)
1224 .enable_reg = MXC_CCM_CCGR4,
1225 .enable_shift = MXC_CCM_CCGRx_CG4_OFFSET,
1226 .enable = _clk_enable,
1227 .disable = _clk_disable_inwait,
1230 static struct clk mx6per1_clk = {
1231 __INIT_CLK_DEBUG(mx6per1_clk)
1234 .secondary = &mx6fast1_clk,
1235 .enable_reg = MXC_CCM_CCGR4,
1236 .enable_shift = MXC_CCM_CCGRx_CG6_OFFSET,
1237 .enable = _clk_enable,
1238 .disable = _clk_disable_inwait,
1241 static struct clk mx6per2_clk = {
1242 __INIT_CLK_DEBUG(mx6per2_clk)
1245 .enable_reg = MXC_CCM_CCGR4,
1246 .enable_shift = MXC_CCM_CCGRx_CG7_OFFSET,
1247 .enable = _clk_enable,
1248 .disable = _clk_disable_inwait,
1251 static unsigned long _clk_mmdc_ch0_axi_get_rate(struct clk *clk)
1255 reg = __raw_readl(MXC_CCM_CBCDR);
1256 div = ((reg & MXC_CCM_CBCDR_MMDC_CH0_PODF_MASK) >>
1257 MXC_CCM_CBCDR_MMDC_CH0_PODF_OFFSET) + 1;
1259 return clk_get_rate(clk->parent) / div;
1262 static int _clk_mmdc_ch0_axi_set_rate(struct clk *clk, unsigned long rate)
1265 u32 parent_rate = clk_get_rate(clk->parent);
1267 div = parent_rate / rate;
1270 if (((parent_rate / div) != rate) || (div > 8))
1273 reg = __raw_readl(MXC_CCM_CBCDR);
1274 reg &= ~MXC_CCM_CBCDR_MMDC_CH0_PODF_MASK;
1275 reg |= (div - 1) << MXC_CCM_CBCDR_MMDC_CH0_PODF_OFFSET;
1276 __raw_writel(reg, MXC_CCM_CBCDR);
1278 if (!WAIT(!(__raw_readl(MXC_CCM_CDHIPR)
1279 & MXC_CCM_CDHIPR_MMDC_CH0_PODF_BUSY),
1281 panic("_clk_mmdc_ch0_axi_set_rate failed\n");
1286 static unsigned long _clk_mmdc_ch0_axi_round_rate(struct clk *clk,
1290 u32 parent_rate = clk_get_rate(clk->parent);
1292 div = parent_rate / rate;
1294 /* Make sure rate is not greater than the maximum value for the clock.
1295 * Also prevent a div of 0.
1303 return parent_rate / div;
1306 static struct clk mmdc_ch0_axi_clk[] = {
1308 __INIT_CLK_DEBUG(mmdc_ch0_axi_clk)
1310 .parent = &periph_clk,
1311 .enable = _clk_enable,
1312 .disable = _clk_disable_inwait,
1313 .enable_reg = MXC_CCM_CCGR3,
1314 .enable_shift = MXC_CCM_CCGRx_CG10_OFFSET,
1315 .secondary = &mmdc_ch0_axi_clk[1],
1316 .get_rate = _clk_mmdc_ch0_axi_get_rate,
1317 .set_rate = _clk_mmdc_ch0_axi_set_rate,
1318 .round_rate = _clk_mmdc_ch0_axi_round_rate,
1321 __INIT_CLK_DEBUG(mmdc_ch0_ipg_clk)
1324 .enable = _clk_enable,
1325 .disable = _clk_disable_inwait,
1326 .enable_reg = MXC_CCM_CCGR3,
1327 .enable_shift = MXC_CCM_CCGRx_CG12_OFFSET,
1328 .secondary = &tzasc1_clk,
1332 static unsigned long _clk_mmdc_ch1_axi_get_rate(struct clk *clk)
1336 reg = __raw_readl(MXC_CCM_CBCDR);
1337 div = ((reg & MXC_CCM_CBCDR_MMDC_CH1_PODF_MASK) >>
1338 MXC_CCM_CBCDR_MMDC_CH1_PODF_OFFSET) + 1;
1340 return clk_get_rate(clk->parent) / div;
1343 static int _clk_mmdc_ch1_axi_set_rate(struct clk *clk, unsigned long rate)
1346 u32 parent_rate = clk_get_rate(clk->parent);
1348 div = parent_rate / rate;
1351 if (((parent_rate / div) != rate) || (div > 8))
1354 reg = __raw_readl(MXC_CCM_CBCDR);
1355 reg &= ~MXC_CCM_CBCDR_MMDC_CH1_PODF_MASK;
1356 reg |= (div - 1) << MXC_CCM_CBCDR_MMDC_CH1_PODF_OFFSET;
1357 __raw_writel(reg, MXC_CCM_CBCDR);
1359 if (!WAIT(!(__raw_readl(MXC_CCM_CDHIPR)
1360 & MXC_CCM_CDHIPR_MMDC_CH1_PODF_BUSY), SPIN_DELAY))
1361 panic("_clk_mmdc_ch1_axi_set_rate failed\n");
1366 static unsigned long _clk_mmdc_ch1_axi_round_rate(struct clk *clk,
1370 u32 parent_rate = clk_get_rate(clk->parent);
1372 div = parent_rate / rate;
1374 /* Make sure rate is not greater than the maximum value for the clock.
1375 * Also prevent a div of 0.
1383 return parent_rate / div;
1386 static struct clk mmdc_ch1_axi_clk[] = {
1388 __INIT_CLK_DEBUG(mmdc_ch1_axi_clk)
1390 .parent = &pll2_pfd_400M,
1391 .enable = _clk_enable,
1392 .disable = _clk_disable,
1393 .enable_reg = MXC_CCM_CCGR3,
1394 .enable_shift = MXC_CCM_CCGRx_CG11_OFFSET,
1395 .secondary = &mmdc_ch1_axi_clk[1],
1396 .get_rate = _clk_mmdc_ch1_axi_get_rate,
1397 .set_rate = _clk_mmdc_ch1_axi_set_rate,
1398 .round_rate = _clk_mmdc_ch1_axi_round_rate,
1402 __INIT_CLK_DEBUG(mmdc_ch1_ipg_clk)
1404 .enable = _clk_enable,
1405 .disable = _clk_disable,
1406 .enable_reg = MXC_CCM_CCGR3,
1407 .enable_shift = MXC_CCM_CCGRx_CG13_OFFSET,
1408 .secondary = &tzasc2_clk,
1412 static struct clk ocram_clk = {
1413 __INIT_CLK_DEBUG(ocram_clk)
1416 .enable_reg = MXC_CCM_CCGR3,
1417 .enable_shift = MXC_CCM_CCGRx_CG14_OFFSET,
1418 .enable = _clk_enable,
1419 .disable = _clk_disable_inwait,
1422 static unsigned long _clk_ipg_perclk_get_rate(struct clk *clk)
1426 reg = __raw_readl(MXC_CCM_CSCMR1);
1427 div = ((reg & MXC_CCM_CSCMR1_PERCLK_PODF_MASK) >>
1428 MXC_CCM_CSCMR1_PERCLK_PODF_OFFSET) + 1;
1430 return clk_get_rate(clk->parent) / div;
1433 static int _clk_ipg_perclk_set_rate(struct clk *clk, unsigned long rate)
1436 u32 parent_rate = clk_get_rate(clk->parent);
1438 div = parent_rate / rate;
1441 if (((parent_rate / div) != rate) || (div > 64))
1444 reg = __raw_readl(MXC_CCM_CSCMR1);
1445 reg &= ~MXC_CCM_CSCMR1_PERCLK_PODF_MASK;
1446 reg |= (div - 1) << MXC_CCM_CSCMR1_PERCLK_PODF_OFFSET;
1447 __raw_writel(reg, MXC_CCM_CSCMR1);
1453 static unsigned long _clk_ipg_perclk_round_rate(struct clk *clk,
1457 u32 parent_rate = clk_get_rate(clk->parent);
1459 div = parent_rate / rate;
1461 /* Make sure rate is not greater than the maximum value for the clock.
1462 * Also prevent a div of 0.
1470 return parent_rate / div;
1473 static struct clk ipg_perclk = {
1474 __INIT_CLK_DEBUG(ipg_perclk)
1476 .get_rate = _clk_ipg_perclk_get_rate,
1477 .set_rate = _clk_ipg_perclk_set_rate,
1478 .round_rate = _clk_ipg_perclk_round_rate,
1481 static struct clk spba_clk = {
1482 __INIT_CLK_DEBUG(spba_clk)
1484 .enable_reg = MXC_CCM_CCGR5,
1485 .enable_shift = MXC_CCM_CCGRx_CG6_OFFSET,
1486 .enable = _clk_enable,
1487 .disable = _clk_disable,
1490 static struct clk sdma_clk[] = {
1492 __INIT_CLK_DEBUG(sdma_clk)
1494 .enable_reg = MXC_CCM_CCGR5,
1495 .enable_shift = MXC_CCM_CCGRx_CG3_OFFSET,
1496 .enable = _clk_enable,
1497 .disable = _clk_disable,
1498 .secondary = &sdma_clk[1],
1501 .parent = &mx6per1_clk,
1502 #ifdef CONFIG_SDMA_IRAM
1503 .secondary = &ocram_clk,
1505 .secondary = &mmdc_ch0_axi_clk[0],
1510 static int _clk_gpu2d_axi_set_parent(struct clk *clk, struct clk *parent)
1512 u32 reg = __raw_readl(MXC_CCM_CBCMR) & ~MXC_CCM_CBCMR_GPU2D_AXI_CLK_SEL;
1514 if (parent == &ahb_clk)
1515 reg |= MXC_CCM_CBCMR_GPU2D_AXI_CLK_SEL;
1517 __raw_writel(reg, MXC_CCM_CBCMR);
1522 static struct clk gpu2d_axi_clk = {
1523 __INIT_CLK_DEBUG(gpu2d_axi_clk)
1525 .secondary = &mmdc_ch0_axi_clk[0],
1526 .set_parent = _clk_gpu2d_axi_set_parent,
1529 static int _clk_gpu3d_axi_set_parent(struct clk *clk, struct clk *parent)
1531 u32 reg = __raw_readl(MXC_CCM_CBCMR) & ~MXC_CCM_CBCMR_GPU3D_AXI_CLK_SEL;
1533 if (parent == &ahb_clk)
1534 reg |= MXC_CCM_CBCMR_GPU3D_AXI_CLK_SEL;
1536 __raw_writel(reg, MXC_CCM_CBCMR);
1541 static struct clk gpu3d_axi_clk = {
1542 __INIT_CLK_DEBUG(gpu3d_axi_clk)
1544 .secondary = &mmdc_ch0_axi_clk[0],
1545 .set_parent = _clk_gpu3d_axi_set_parent,
1548 static int _clk_pcie_axi_set_parent(struct clk *clk, struct clk *parent)
1550 u32 reg = __raw_readl(MXC_CCM_CBCMR) & ~MXC_CCM_CBCMR_PCIE_AXI_CLK_SEL;
1552 if (parent == &ahb_clk)
1553 reg |= MXC_CCM_CBCMR_PCIE_AXI_CLK_SEL;
1555 __raw_writel(reg, MXC_CCM_CBCMR);
1560 static struct clk pcie_axi_clk = {
1561 __INIT_CLK_DEBUG(pcie_axi_clk)
1563 .set_parent = _clk_pcie_axi_set_parent,
1566 static int _clk_vdo_axi_set_parent(struct clk *clk, struct clk *parent)
1568 u32 reg = __raw_readl(MXC_CCM_CBCMR) & ~MXC_CCM_CBCMR_VDOAXI_CLK_SEL;
1570 if (parent == &ahb_clk)
1571 reg |= MXC_CCM_CBCMR_VDOAXI_CLK_SEL;
1573 __raw_writel(reg, MXC_CCM_CBCMR);
1578 static struct clk vdo_axi_clk = {
1579 __INIT_CLK_DEBUG(vdo_axi_clk)
1581 .enable_reg = MXC_CCM_CCGR6,
1582 .enable_shift = MXC_CCM_CCGRx_CG6_OFFSET,
1583 .enable = _clk_enable,
1584 .disable = _clk_disable,
1585 .set_parent = _clk_vdo_axi_set_parent,
1588 static struct clk vdoa_clk = {
1589 __INIT_CLK_DEBUG(vdoa_clk)
1592 .secondary = &mx6fast1_clk,
1593 .enable_reg = MXC_CCM_CCGR2,
1594 .enable_shift = MXC_CCM_CCGRx_CG13_OFFSET,
1595 .enable = _clk_enable,
1596 .disable = _clk_disable,
1599 static struct clk gpt_clk[] = {
1601 __INIT_CLK_DEBUG(gpt_clk)
1602 .parent = &ipg_perclk,
1604 .enable_reg = MXC_CCM_CCGR1,
1605 .enable_shift = MXC_CCM_CCGRx_CG10_OFFSET,
1606 .enable = _clk_enable,
1607 .disable = _clk_disable,
1608 .secondary = &gpt_clk[1],
1611 __INIT_CLK_DEBUG(gpt_serial_clk)
1613 .enable_reg = MXC_CCM_CCGR1,
1614 .enable_shift = MXC_CCM_CCGRx_CG11_OFFSET,
1615 .enable = _clk_enable,
1616 .disable = _clk_disable,
1620 static struct clk iim_clk = {
1621 __INIT_CLK_DEBUG(iim_clk)
1623 .enable = _clk_enable,
1624 .enable_reg = MXC_CCM_CCGR2,
1625 .enable_shift = MXC_CCM_CCGRx_CG6_OFFSET,
1626 .disable = _clk_disable,
1629 static struct clk i2c_clk[] = {
1631 __INIT_CLK_DEBUG(i2c_clk_0)
1633 .parent = &ipg_perclk,
1634 .enable_reg = MXC_CCM_CCGR2,
1635 .enable_shift = MXC_CCM_CCGRx_CG3_OFFSET,
1636 .enable = _clk_enable,
1637 .disable = _clk_disable,
1640 __INIT_CLK_DEBUG(i2c_clk_1)
1642 .parent = &ipg_perclk,
1643 .enable_reg = MXC_CCM_CCGR2,
1644 .enable_shift = MXC_CCM_CCGRx_CG4_OFFSET,
1645 .enable = _clk_enable,
1646 .disable = _clk_disable,
1649 __INIT_CLK_DEBUG(i2c_clk_2)
1651 .parent = &ipg_perclk,
1652 .enable_reg = MXC_CCM_CCGR2,
1653 .enable_shift = MXC_CCM_CCGRx_CG5_OFFSET,
1654 .enable = _clk_enable,
1655 .disable = _clk_disable,
1659 static int _clk_vpu_axi_set_parent(struct clk *clk, struct clk *parent)
1662 u32 reg = __raw_readl(MXC_CCM_CBCMR)
1663 & ~MXC_CCM_CBCMR_VPU_AXI_CLK_SEL_MASK;
1665 mux = _get_mux6(parent, &axi_clk, &pll2_pfd_400M,
1666 &pll2_pfd_352M, NULL, NULL, NULL);
1668 reg |= (mux << MXC_CCM_CBCMR_VPU_AXI_CLK_SEL_OFFSET);
1670 __raw_writel(reg, MXC_CCM_CBCMR);
1675 static unsigned long _clk_vpu_axi_get_rate(struct clk *clk)
1679 reg = __raw_readl(MXC_CCM_CSCDR1);
1680 div = ((reg & MXC_CCM_CSCDR1_VPU_AXI_PODF_MASK) >>
1681 MXC_CCM_CSCDR1_VPU_AXI_PODF_OFFSET) + 1;
1683 return clk_get_rate(clk->parent) / div;
1686 static int _clk_vpu_axi_set_rate(struct clk *clk, unsigned long rate)
1689 u32 parent_rate = clk_get_rate(clk->parent);
1691 div = parent_rate / rate;
1694 if (((parent_rate / div) != rate) || (div > 8))
1697 reg = __raw_readl(MXC_CCM_CSCDR1);
1698 reg &= ~MXC_CCM_CSCDR1_VPU_AXI_PODF_MASK;
1699 reg |= (div - 1) << MXC_CCM_CSCDR1_VPU_AXI_PODF_OFFSET;
1700 __raw_writel(reg, MXC_CCM_CSCDR1);
1705 static unsigned long _clk_vpu_axi_round_rate(struct clk *clk,
1709 u32 parent_rate = clk_get_rate(clk->parent);
1711 div = parent_rate / rate;
1713 /* Make sure rate is not greater than the maximum value for the clock.
1714 * Also prevent a div of 0.
1722 return parent_rate / div;
1725 static struct clk vpu_clk[] = {
1727 __INIT_CLK_DEBUG(vpu_clk)
1729 .enable_reg = MXC_CCM_CCGR6,
1730 .enable_shift = MXC_CCM_CCGRx_CG7_OFFSET,
1731 .enable = _clk_enable,
1732 .disable = _clk_disable,
1733 .set_parent = _clk_vpu_axi_set_parent,
1734 .round_rate = _clk_vpu_axi_round_rate,
1735 .set_rate = _clk_vpu_axi_set_rate,
1736 .get_rate = _clk_vpu_axi_get_rate,
1737 .secondary = &vpu_clk[1],
1740 .parent = &mmdc_ch0_axi_clk[0],
1741 .secondary = &vpu_clk[2],
1744 .parent = &mx6fast1_clk,
1745 .secondary = &ocram_clk,
1750 static int _clk_ipu1_set_parent(struct clk *clk, struct clk *parent)
1753 u32 reg = __raw_readl(MXC_CCM_CSCDR3)
1754 & ~MXC_CCM_CSCDR3_IPU1_HSP_CLK_SEL_MASK;
1756 mux = _get_mux6(parent, &mmdc_ch0_axi_clk[0],
1757 &pll2_pfd_400M, &pll3_120M, &pll3_pfd_540M, NULL, NULL);
1759 reg |= (mux << MXC_CCM_CSCDR3_IPU1_HSP_CLK_SEL_OFFSET);
1761 __raw_writel(reg, MXC_CCM_CSCDR3);
1766 static unsigned long _clk_ipu1_get_rate(struct clk *clk)
1770 reg = __raw_readl(MXC_CCM_CSCDR3);
1771 div = ((reg & MXC_CCM_CSCDR3_IPU1_HSP_PODF_MASK) >>
1772 MXC_CCM_CSCDR3_IPU1_HSP_PODF_OFFSET) + 1;
1774 return clk_get_rate(clk->parent) / div;
1777 static int _clk_ipu1_set_rate(struct clk *clk, unsigned long rate)
1780 u32 parent_rate = clk_get_rate(clk->parent);
1782 div = parent_rate / rate;
1785 if (((parent_rate / div) != rate) || (div > 8))
1788 reg = __raw_readl(MXC_CCM_CSCDR3);
1789 reg &= ~MXC_CCM_CSCDR3_IPU1_HSP_PODF_MASK;
1790 reg |= (div - 1) << MXC_CCM_CSCDR3_IPU1_HSP_PODF_OFFSET;
1791 __raw_writel(reg, MXC_CCM_CSCDR3);
1796 static unsigned long _clk_ipu_round_rate(struct clk *clk,
1800 u32 parent_rate = clk_get_rate(clk->parent);
1802 div = parent_rate / rate;
1804 /* Make sure rate is not greater than the maximum value for the clock.
1805 * Also prevent a div of 0.
1813 return parent_rate / div;
1816 static struct clk ipu1_clk = {
1817 __INIT_CLK_DEBUG(ipu1_clk)
1818 .parent = &mmdc_ch0_axi_clk[0],
1819 .secondary = &mmdc_ch0_axi_clk[0],
1820 .enable_reg = MXC_CCM_CCGR3,
1821 .enable_shift = MXC_CCM_CCGRx_CG0_OFFSET,
1822 .enable = _clk_enable,
1823 .disable = _clk_disable,
1824 .set_parent = _clk_ipu1_set_parent,
1825 .round_rate = _clk_ipu_round_rate,
1826 .set_rate = _clk_ipu1_set_rate,
1827 .get_rate = _clk_ipu1_get_rate,
1828 .flags = AHB_HIGH_SET_POINT | CPU_FREQ_TRIG_UPDATE,
1831 static int _clk_cko1_clk0_set_parent(struct clk *clk, struct clk *parent)
1835 if (parent == &pll3_sw_clk)
1837 else if (parent == &pll2_528_bus_main_clk)
1839 else if (parent == &pll1_sys_main_clk)
1841 else if (parent == &pll5_video_main_clk)
1843 else if (parent == &axi_clk)
1845 else if (parent == &enfc_clk)
1847 else if (parent == &ipu1_di_clk_root)
1849 else if (parent == &ipu1_di_clk_root)
1851 else if (parent == &ipu2_di_clk_root)
1853 else if (parent == &ipu2_di_clk_root)
1855 else if (parent == &ahb_clk)
1857 else if (parent == &ipg_clk)
1859 else if (parent == &ipg_perclk)
1861 else if (parent == &ckil_clk)
1863 else if (parent == &pll4_audio_main_clk)
1868 reg = __raw_readl(MXC_CCM_CCOSR);
1869 reg &= ~MXC_CCM_CCOSR_CKOL_SEL_MASK;
1870 reg |= sel << MXC_CCM_CCOSR_CKOL_SEL_OFFSET;
1871 __raw_writel(reg, MXC_CCM_CCOSR);
1875 static unsigned long _clk_cko1_round_rate(struct clk *clk,
1879 u32 parent_rate = clk_get_rate(clk->parent);
1881 div = parent_rate / rate;
1883 /* Make sure rate is not greater than the maximum value for the clock.
1884 * Also prevent a div of 0.
1892 return parent_rate / div;
1895 static int _clk_cko1_set_rate(struct clk *clk, unsigned long rate)
1898 u32 parent_rate = clk_get_rate(clk->parent);
1900 div = parent_rate / rate;
1903 if (((parent_rate / div) != rate) || (div > 8))
1906 reg = __raw_readl(MXC_CCM_CCOSR);
1907 reg &= ~MXC_CCM_CCOSR_CKOL_DIV_MASK;
1908 reg |= div << MXC_CCM_CCOSR_CKOL_DIV_OFFSET;
1909 __raw_writel(reg, MXC_CCM_CCOSR);
1914 static unsigned long _clk_cko1_get_rate(struct clk *clk)
1918 reg = __raw_readl(MXC_CCM_CCOSR);
1919 div = ((reg & MXC_CCM_CCOSR_CKOL_DIV_MASK) >>
1920 MXC_CCM_CCOSR_CKOL_DIV_OFFSET) + 1;
1922 return clk_get_rate(clk->parent) / div;
1925 static int cko1_clk_enable(struct clk *clk)
1928 reg = __raw_readl(clk->enable_reg);
1929 reg |= clk->enable_shift;
1930 __raw_writel(reg, clk->enable_reg);
1935 static struct clk cko1_clk0 = {
1936 __INIT_CLK_DEBUG(cko1_clk0)
1938 .enable_reg = MXC_CCM_CCOSR,
1939 .enable_shift = MXC_CCM_CCOSR_CKOL_EN,
1940 .enable = cko1_clk_enable,
1941 .disable = _clk_disable,
1942 .set_parent = _clk_cko1_clk0_set_parent,
1943 .round_rate = _clk_cko1_round_rate,
1944 .set_rate = _clk_cko1_set_rate,
1945 .get_rate = _clk_cko1_get_rate,
1948 static int _clk_ipu2_set_parent(struct clk *clk, struct clk *parent)
1951 u32 reg = __raw_readl(MXC_CCM_CSCDR3)
1952 & ~MXC_CCM_CSCDR3_IPU2_HSP_CLK_SEL_MASK;
1954 mux = _get_mux6(parent, &mmdc_ch0_axi_clk[0],
1955 &pll2_pfd_400M, &pll3_120M, &pll3_pfd_540M, NULL, NULL);
1957 reg |= (mux << MXC_CCM_CSCDR3_IPU2_HSP_CLK_SEL_OFFSET);
1959 __raw_writel(reg, MXC_CCM_CSCDR3);
1964 static unsigned long _clk_ipu2_get_rate(struct clk *clk)
1968 reg = __raw_readl(MXC_CCM_CSCDR3);
1969 div = ((reg & MXC_CCM_CSCDR3_IPU2_HSP_PODF_MASK) >>
1970 MXC_CCM_CSCDR3_IPU2_HSP_PODF_OFFSET) + 1;
1972 return clk_get_rate(clk->parent) / div;
1975 static int _clk_ipu2_set_rate(struct clk *clk, unsigned long rate)
1978 u32 parent_rate = clk_get_rate(clk->parent);
1980 div = parent_rate / rate;
1983 if (((parent_rate / div) != rate) || (div > 8))
1986 reg = __raw_readl(MXC_CCM_CSCDR3);
1987 reg &= ~MXC_CCM_CSCDR3_IPU2_HSP_PODF_MASK;
1988 reg |= (div - 1) << MXC_CCM_CSCDR3_IPU2_HSP_PODF_OFFSET;
1989 __raw_writel(reg, MXC_CCM_CSCDR3);
1994 static struct clk ipu2_clk = {
1995 __INIT_CLK_DEBUG(ipu2_clk)
1996 .parent = &mmdc_ch0_axi_clk[0],
1997 .secondary = &mmdc_ch0_axi_clk[0],
1998 .enable_reg = MXC_CCM_CCGR3,
1999 .enable_shift = MXC_CCM_CCGRx_CG3_OFFSET,
2000 .enable = _clk_enable,
2001 .disable = _clk_disable,
2002 .set_parent = _clk_ipu2_set_parent,
2003 .round_rate = _clk_ipu_round_rate,
2004 .set_rate = _clk_ipu2_set_rate,
2005 .get_rate = _clk_ipu2_get_rate,
2006 .flags = AHB_HIGH_SET_POINT | CPU_FREQ_TRIG_UPDATE,
2009 static struct clk usdhc_dep_clk = {
2010 .parent = &mmdc_ch0_axi_clk[0],
2011 .secondary = &mx6per1_clk,
2012 .flags = AHB_HIGH_SET_POINT | CPU_FREQ_TRIG_UPDATE,
2015 static unsigned long _clk_usdhc_round_rate(struct clk *clk,
2019 u32 parent_rate = clk_get_rate(clk->parent);
2021 div = parent_rate / rate;
2023 /* Make sure rate is not greater than the maximum value for the clock.
2024 * Also prevent a div of 0.
2032 return parent_rate / div;
2035 static int _clk_usdhc1_set_parent(struct clk *clk, struct clk *parent)
2037 u32 reg = __raw_readl(MXC_CCM_CSCMR1) & ~MXC_CCM_CSCMR1_USDHC1_CLK_SEL;
2039 if (parent == &pll2_pfd_352M)
2040 reg |= (MXC_CCM_CSCMR1_USDHC1_CLK_SEL);
2042 __raw_writel(reg, MXC_CCM_CSCMR1);
2047 static unsigned long _clk_usdhc1_get_rate(struct clk *clk)
2051 reg = __raw_readl(MXC_CCM_CSCDR1);
2052 div = ((reg & MXC_CCM_CSCDR1_USDHC1_PODF_MASK) >>
2053 MXC_CCM_CSCDR1_USDHC1_PODF_OFFSET) + 1;
2055 return clk_get_rate(clk->parent) / div;
2058 static int _clk_usdhc1_set_rate(struct clk *clk, unsigned long rate)
2061 u32 parent_rate = clk_get_rate(clk->parent);
2063 div = parent_rate / rate;
2066 if (((parent_rate / div) != rate) || (div > 8))
2069 reg = __raw_readl(MXC_CCM_CSCDR1);
2070 reg &= ~MXC_CCM_CSCDR1_USDHC1_PODF_MASK;
2071 reg |= (div - 1) << MXC_CCM_CSCDR1_USDHC1_PODF_OFFSET;
2072 __raw_writel(reg, MXC_CCM_CSCDR1);
2077 static struct clk usdhc1_clk = {
2078 __INIT_CLK_DEBUG(usdhc1_clk)
2080 .parent = &pll2_pfd_400M,
2081 .secondary = &usdhc_dep_clk,
2082 .enable_reg = MXC_CCM_CCGR6,
2083 .enable_shift = MXC_CCM_CCGRx_CG1_OFFSET,
2084 .enable = _clk_enable,
2085 .disable = _clk_disable,
2086 .set_parent = _clk_usdhc1_set_parent,
2087 .round_rate = _clk_usdhc_round_rate,
2088 .set_rate = _clk_usdhc1_set_rate,
2089 .get_rate = _clk_usdhc1_get_rate,
2090 .flags = AHB_HIGH_SET_POINT | CPU_FREQ_TRIG_UPDATE,
2093 static int _clk_usdhc2_set_parent(struct clk *clk, struct clk *parent)
2095 u32 reg = __raw_readl(MXC_CCM_CSCMR1) & ~MXC_CCM_CSCMR1_USDHC2_CLK_SEL;
2097 if (parent == &pll2_pfd_352M)
2098 reg |= (MXC_CCM_CSCMR1_USDHC2_CLK_SEL);
2100 __raw_writel(reg, MXC_CCM_CSCMR1);
2105 static unsigned long _clk_usdhc2_get_rate(struct clk *clk)
2109 reg = __raw_readl(MXC_CCM_CSCDR1);
2110 div = ((reg & MXC_CCM_CSCDR1_USDHC2_PODF_MASK) >>
2111 MXC_CCM_CSCDR1_USDHC2_PODF_OFFSET) + 1;
2113 return clk_get_rate(clk->parent) / div;
2116 static int _clk_usdhc2_set_rate(struct clk *clk, unsigned long rate)
2119 u32 parent_rate = clk_get_rate(clk->parent);
2121 div = parent_rate / rate;
2124 if (((parent_rate / div) != rate) || (div > 8))
2127 reg = __raw_readl(MXC_CCM_CSCDR1);
2128 reg &= ~MXC_CCM_CSCDR1_USDHC2_PODF_MASK;
2129 reg |= (div - 1) << MXC_CCM_CSCDR1_USDHC2_PODF_OFFSET;
2130 __raw_writel(reg, MXC_CCM_CSCDR1);
2135 static struct clk usdhc2_clk = {
2136 __INIT_CLK_DEBUG(usdhc2_clk)
2138 .parent = &pll2_pfd_400M,
2139 .secondary = &usdhc_dep_clk,
2140 .enable_reg = MXC_CCM_CCGR6,
2141 .enable_shift = MXC_CCM_CCGRx_CG2_OFFSET,
2142 .enable = _clk_enable,
2143 .disable = _clk_disable,
2144 .set_parent = _clk_usdhc2_set_parent,
2145 .round_rate = _clk_usdhc_round_rate,
2146 .set_rate = _clk_usdhc2_set_rate,
2147 .get_rate = _clk_usdhc2_get_rate,
2148 .flags = AHB_HIGH_SET_POINT | CPU_FREQ_TRIG_UPDATE,
2151 static int _clk_usdhc3_set_parent(struct clk *clk, struct clk *parent)
2153 u32 reg = __raw_readl(MXC_CCM_CSCMR1) & ~MXC_CCM_CSCMR1_USDHC3_CLK_SEL;
2155 if (parent == &pll2_pfd_352M)
2156 reg |= (MXC_CCM_CSCMR1_USDHC3_CLK_SEL);
2158 __raw_writel(reg, MXC_CCM_CSCMR1);
2163 static unsigned long _clk_usdhc3_get_rate(struct clk *clk)
2167 reg = __raw_readl(MXC_CCM_CSCDR1);
2168 div = ((reg & MXC_CCM_CSCDR1_USDHC3_PODF_MASK) >>
2169 MXC_CCM_CSCDR1_USDHC3_PODF_OFFSET) + 1;
2171 return clk_get_rate(clk->parent) / div;
2174 static int _clk_usdhc3_set_rate(struct clk *clk, unsigned long rate)
2177 u32 parent_rate = clk_get_rate(clk->parent);
2179 div = parent_rate / rate;
2182 if (((parent_rate / div) != rate) || (div > 8))
2185 reg = __raw_readl(MXC_CCM_CSCDR1);
2186 reg &= ~MXC_CCM_CSCDR1_USDHC3_PODF_MASK;
2187 reg |= (div - 1) << MXC_CCM_CSCDR1_USDHC3_PODF_OFFSET;
2188 __raw_writel(reg, MXC_CCM_CSCDR1);
2194 static struct clk usdhc3_clk = {
2195 __INIT_CLK_DEBUG(usdhc3_clk)
2197 .parent = &pll2_pfd_400M,
2198 .secondary = &usdhc_dep_clk,
2199 .enable_reg = MXC_CCM_CCGR6,
2200 .enable_shift = MXC_CCM_CCGRx_CG3_OFFSET,
2201 .enable = _clk_enable,
2202 .disable = _clk_disable,
2203 .set_parent = _clk_usdhc3_set_parent,
2204 .round_rate = _clk_usdhc_round_rate,
2205 .set_rate = _clk_usdhc3_set_rate,
2206 .get_rate = _clk_usdhc3_get_rate,
2207 .flags = AHB_HIGH_SET_POINT | CPU_FREQ_TRIG_UPDATE,
2210 static int _clk_usdhc4_set_parent(struct clk *clk, struct clk *parent)
2212 u32 reg = __raw_readl(MXC_CCM_CSCMR1) & ~MXC_CCM_CSCMR1_USDHC4_CLK_SEL;
2214 if (parent == &pll2_pfd_352M)
2215 reg |= (MXC_CCM_CSCMR1_USDHC4_CLK_SEL);
2217 __raw_writel(reg, MXC_CCM_CSCMR1);
2222 static unsigned long _clk_usdhc4_get_rate(struct clk *clk)
2226 reg = __raw_readl(MXC_CCM_CSCDR1);
2227 div = ((reg & MXC_CCM_CSCDR1_USDHC4_PODF_MASK) >>
2228 MXC_CCM_CSCDR1_USDHC4_PODF_OFFSET) + 1;
2230 return clk_get_rate(clk->parent) / div;
2233 static int _clk_usdhc4_set_rate(struct clk *clk, unsigned long rate)
2236 u32 parent_rate = clk_get_rate(clk->parent);
2238 div = parent_rate / rate;
2241 if (((parent_rate / div) != rate) || (div > 8))
2244 reg = __raw_readl(MXC_CCM_CSCDR1);
2245 reg &= ~MXC_CCM_CSCDR1_USDHC4_PODF_MASK;
2246 reg |= (div - 1) << MXC_CCM_CSCDR1_USDHC4_PODF_OFFSET;
2247 __raw_writel(reg, MXC_CCM_CSCDR1);
2253 static struct clk usdhc4_clk = {
2254 __INIT_CLK_DEBUG(usdhc4_clk)
2256 .parent = &pll2_pfd_400M,
2257 .secondary = &usdhc_dep_clk,
2258 .enable_reg = MXC_CCM_CCGR6,
2259 .enable_shift = MXC_CCM_CCGRx_CG4_OFFSET,
2260 .enable = _clk_enable,
2261 .disable = _clk_disable,
2262 .set_parent = _clk_usdhc4_set_parent,
2263 .round_rate = _clk_usdhc_round_rate,
2264 .set_rate = _clk_usdhc4_set_rate,
2265 .get_rate = _clk_usdhc4_get_rate,
2266 .flags = AHB_HIGH_SET_POINT | CPU_FREQ_TRIG_UPDATE,
2269 static unsigned long _clk_ssi_round_rate(struct clk *clk,
2273 u32 parent_rate = clk_get_rate(clk->parent);
2274 u32 div = parent_rate / rate;
2276 if (parent_rate % rate)
2279 __calc_pre_post_dividers(1 << 6, div, &pre, &post);
2281 return parent_rate / (pre * post);
2284 static unsigned long _clk_ssi1_get_rate(struct clk *clk)
2286 u32 reg, prediv, podf;
2288 reg = __raw_readl(MXC_CCM_CS1CDR);
2290 prediv = ((reg & MXC_CCM_CS1CDR_SSI1_CLK_PRED_MASK)
2291 >> MXC_CCM_CS1CDR_SSI1_CLK_PRED_OFFSET) + 1;
2292 podf = ((reg & MXC_CCM_CS1CDR_SSI1_CLK_PODF_MASK)
2293 >> MXC_CCM_CS1CDR_SSI1_CLK_PODF_OFFSET) + 1;
2295 return clk_get_rate(clk->parent) / (prediv * podf);
2298 static int _clk_ssi1_set_rate(struct clk *clk, unsigned long rate)
2300 u32 reg, div, pre, post;
2301 u32 parent_rate = clk_get_rate(clk->parent);
2303 div = parent_rate / rate;
2306 if (((parent_rate / div) != rate) || div > 512)
2309 __calc_pre_post_dividers(1 << 6, div, &pre, &post);
2311 reg = __raw_readl(MXC_CCM_CS1CDR);
2312 reg &= ~(MXC_CCM_CS1CDR_SSI1_CLK_PRED_MASK |
2313 MXC_CCM_CS1CDR_SSI1_CLK_PODF_MASK);
2314 reg |= (post - 1) << MXC_CCM_CS1CDR_SSI1_CLK_PODF_OFFSET;
2315 reg |= (pre - 1) << MXC_CCM_CS1CDR_SSI1_CLK_PRED_OFFSET;
2317 __raw_writel(reg, MXC_CCM_CS1CDR);
2323 static int _clk_ssi1_set_parent(struct clk *clk, struct clk *parent)
2327 reg = __raw_readl(MXC_CCM_CSCMR1)
2328 & ~MXC_CCM_CSCMR1_SSI1_CLK_SEL_MASK;
2330 mux = _get_mux6(parent, &pll3_pfd_508M, &pll3_pfd_454M,
2331 &pll4_audio_main_clk, NULL, NULL, NULL);
2332 reg |= (mux << MXC_CCM_CSCMR1_SSI1_CLK_SEL_OFFSET);
2334 __raw_writel(reg, MXC_CCM_CSCMR1);
2339 static struct clk ssi1_clk = {
2340 __INIT_CLK_DEBUG(ssi1_clk)
2341 .parent = &pll3_pfd_508M,
2342 .enable_reg = MXC_CCM_CCGR5,
2343 .enable_shift = MXC_CCM_CCGRx_CG9_OFFSET,
2344 .enable = _clk_enable,
2345 .disable = _clk_disable,
2346 .set_parent = _clk_ssi1_set_parent,
2347 .set_rate = _clk_ssi1_set_rate,
2348 .round_rate = _clk_ssi_round_rate,
2349 .get_rate = _clk_ssi1_get_rate,
2350 #ifdef CONFIG_SND_MXC_SOC_IRAM
2351 .secondary = &ocram_clk,
2353 .secondary = &mmdc_ch0_axi_clk[0],
2357 static unsigned long _clk_ssi2_get_rate(struct clk *clk)
2359 u32 reg, prediv, podf;
2361 reg = __raw_readl(MXC_CCM_CS2CDR);
2363 prediv = ((reg & MXC_CCM_CS2CDR_SSI2_CLK_PRED_MASK)
2364 >> MXC_CCM_CS2CDR_SSI2_CLK_PRED_OFFSET) + 1;
2365 podf = ((reg & MXC_CCM_CS2CDR_SSI2_CLK_PODF_MASK)
2366 >> MXC_CCM_CS2CDR_SSI2_CLK_PODF_OFFSET) + 1;
2368 return clk_get_rate(clk->parent) / (prediv * podf);
2371 static int _clk_ssi2_set_rate(struct clk *clk, unsigned long rate)
2373 u32 reg, div, pre, post;
2374 u32 parent_rate = clk_get_rate(clk->parent);
2376 div = parent_rate / rate;
2379 if (((parent_rate / div) != rate) || div > 512)
2382 __calc_pre_post_dividers(1 << 6, div, &pre, &post);
2384 reg = __raw_readl(MXC_CCM_CS2CDR);
2385 reg &= ~(MXC_CCM_CS2CDR_SSI2_CLK_PRED_MASK |
2386 MXC_CCM_CS2CDR_SSI2_CLK_PODF_MASK);
2387 reg |= (post - 1) << MXC_CCM_CS2CDR_SSI2_CLK_PODF_OFFSET;
2388 reg |= (pre - 1) << MXC_CCM_CS2CDR_SSI2_CLK_PRED_OFFSET;
2390 __raw_writel(reg, MXC_CCM_CS2CDR);
2396 static int _clk_ssi2_set_parent(struct clk *clk, struct clk *parent)
2400 reg = __raw_readl(MXC_CCM_CSCMR1)
2401 & ~MXC_CCM_CSCMR1_SSI2_CLK_SEL_MASK;
2403 mux = _get_mux6(parent, &pll3_pfd_508M, &pll3_pfd_454M,
2404 &pll4_audio_main_clk, NULL, NULL, NULL);
2405 reg |= (mux << MXC_CCM_CSCMR1_SSI2_CLK_SEL_OFFSET);
2407 __raw_writel(reg, MXC_CCM_CSCMR1);
2412 static struct clk ssi2_clk = {
2413 __INIT_CLK_DEBUG(ssi2_clk)
2414 .parent = &pll3_pfd_508M,
2415 .enable_reg = MXC_CCM_CCGR5,
2416 .enable_shift = MXC_CCM_CCGRx_CG10_OFFSET,
2417 .enable = _clk_enable,
2418 .disable = _clk_disable,
2419 .set_parent = _clk_ssi2_set_parent,
2420 .set_rate = _clk_ssi2_set_rate,
2421 .round_rate = _clk_ssi_round_rate,
2422 .get_rate = _clk_ssi2_get_rate,
2423 #ifdef CONFIG_SND_MXC_SOC_IRAM
2424 .secondary = &ocram_clk,
2426 .secondary = &mmdc_ch0_axi_clk[0],
2430 static unsigned long _clk_ssi3_get_rate(struct clk *clk)
2432 u32 reg, prediv, podf;
2434 reg = __raw_readl(MXC_CCM_CS1CDR);
2436 prediv = ((reg & MXC_CCM_CS1CDR_SSI1_CLK_PRED_MASK)
2437 >> MXC_CCM_CS1CDR_SSI1_CLK_PRED_OFFSET) + 1;
2438 podf = ((reg & MXC_CCM_CS1CDR_SSI1_CLK_PODF_MASK)
2439 >> MXC_CCM_CS1CDR_SSI1_CLK_PODF_OFFSET) + 1;
2441 return clk_get_rate(clk->parent) / (prediv * podf);
2444 static int _clk_ssi3_set_rate(struct clk *clk, unsigned long rate)
2446 u32 reg, div, pre, post;
2447 u32 parent_rate = clk_get_rate(clk->parent);
2449 div = parent_rate / rate;
2452 if (((parent_rate / div) != rate) || div > 512)
2455 __calc_pre_post_dividers(1 << 6, div, &pre, &post);
2457 reg = __raw_readl(MXC_CCM_CS1CDR);
2458 reg &= ~(MXC_CCM_CS1CDR_SSI3_CLK_PODF_MASK|
2459 MXC_CCM_CS1CDR_SSI3_CLK_PRED_MASK);
2460 reg |= (post - 1) << MXC_CCM_CS1CDR_SSI3_CLK_PODF_OFFSET;
2461 reg |= (pre - 1) << MXC_CCM_CS1CDR_SSI3_CLK_PRED_OFFSET;
2463 __raw_writel(reg, MXC_CCM_CS1CDR);
2469 static int _clk_ssi3_set_parent(struct clk *clk, struct clk *parent)
2473 reg = __raw_readl(MXC_CCM_CSCMR1) & ~MXC_CCM_CSCMR1_SSI3_CLK_SEL_MASK;
2475 mux = _get_mux6(parent, &pll3_pfd_508M, &pll3_pfd_454M,
2476 &pll4_audio_main_clk, NULL, NULL, NULL);
2477 reg |= (mux << MXC_CCM_CSCMR1_SSI3_CLK_SEL_OFFSET);
2479 __raw_writel(reg, MXC_CCM_CSCMR1);
2484 static struct clk ssi3_clk = {
2485 __INIT_CLK_DEBUG(ssi3_clk)
2486 .parent = &pll3_pfd_508M,
2487 .enable_reg = MXC_CCM_CCGR5,
2488 .enable_shift = MXC_CCM_CCGRx_CG11_OFFSET,
2489 .enable = _clk_enable,
2490 .disable = _clk_disable,
2491 .set_parent = _clk_ssi3_set_parent,
2492 .set_rate = _clk_ssi3_set_rate,
2493 .round_rate = _clk_ssi_round_rate,
2494 .get_rate = _clk_ssi3_get_rate,
2495 #ifdef CONFIG_SND_MXC_SOC_IRAM
2496 .secondary = &ocram_clk,
2498 .secondary = &mmdc_ch0_axi_clk[0],
2502 static unsigned long _clk_ldb_di_round_rate(struct clk *clk,
2505 u32 parent_rate = clk_get_rate(clk->parent);
2507 if (rate * 7 <= parent_rate + parent_rate/20)
2508 return parent_rate / 7;
2510 return 2 * parent_rate / 7;
2513 static unsigned long _clk_ldb_di0_get_rate(struct clk *clk)
2517 div = __raw_readl(MXC_CCM_CSCMR2) &
2518 MXC_CCM_CSCMR2_LDB_DI0_IPU_DIV;
2521 return clk_get_rate(clk->parent) / 7;
2523 return (2 * clk_get_rate(clk->parent)) / 7;
2526 static int _clk_ldb_di0_set_rate(struct clk *clk, unsigned long rate)
2529 u32 parent_rate = clk_get_rate(clk->parent);
2531 if (rate * 7 <= parent_rate + parent_rate/20) {
2533 rate = parent_rate / 7;
2535 rate = 2 * parent_rate / 7;
2537 reg = __raw_readl(MXC_CCM_CSCMR2);
2539 reg |= MXC_CCM_CSCMR2_LDB_DI0_IPU_DIV;
2541 reg &= ~MXC_CCM_CSCMR2_LDB_DI0_IPU_DIV;
2543 __raw_writel(reg, MXC_CCM_CSCMR2);
2548 static int _clk_ldb_di0_set_parent(struct clk *clk, struct clk *parent)
2552 reg = __raw_readl(MXC_CCM_CS2CDR)
2553 & ~MXC_CCM_CS2CDR_LDB_DI0_CLK_SEL_MASK;
2555 mux = _get_mux6(parent, &pll5_video_main_clk,
2556 &pll2_pfd_352M, &pll2_pfd_400M, &pll3_pfd_540M,
2557 &pll3_usb_otg_main_clk, NULL);
2558 reg |= (mux << MXC_CCM_CS2CDR_LDB_DI0_CLK_SEL_OFFSET);
2560 __raw_writel(reg, MXC_CCM_CS2CDR);
2565 static struct clk ldb_di0_clk = {
2566 __INIT_CLK_DEBUG(ldb_di0_clk)
2568 .parent = &pll3_pfd_540M,
2569 .enable_reg = MXC_CCM_CCGR3,
2570 .enable_shift = MXC_CCM_CCGRx_CG6_OFFSET,
2571 .enable = _clk_enable,
2572 .disable = _clk_disable,
2573 .set_parent = _clk_ldb_di0_set_parent,
2574 .set_rate = _clk_ldb_di0_set_rate,
2575 .round_rate = _clk_ldb_di_round_rate,
2576 .get_rate = _clk_ldb_di0_get_rate,
2577 .flags = AHB_HIGH_SET_POINT | CPU_FREQ_TRIG_UPDATE,
2580 static unsigned long _clk_ldb_di1_get_rate(struct clk *clk)
2584 div = __raw_readl(MXC_CCM_CSCMR2) &
2585 MXC_CCM_CSCMR2_LDB_DI1_IPU_DIV;
2588 return clk_get_rate(clk->parent) / 7;
2590 return (2 * clk_get_rate(clk->parent)) / 7;
2593 static int _clk_ldb_di1_set_rate(struct clk *clk, unsigned long rate)
2596 u32 parent_rate = clk_get_rate(clk->parent);
2598 if (rate * 7 <= parent_rate + parent_rate/20) {
2600 rate = parent_rate / 7;
2602 rate = 2 * parent_rate / 7;
2604 reg = __raw_readl(MXC_CCM_CSCMR2);
2606 reg |= MXC_CCM_CSCMR2_LDB_DI1_IPU_DIV;
2608 reg &= ~MXC_CCM_CSCMR2_LDB_DI1_IPU_DIV;
2610 __raw_writel(reg, MXC_CCM_CSCMR2);
2615 static int _clk_ldb_di1_set_parent(struct clk *clk, struct clk *parent)
2619 reg = __raw_readl(MXC_CCM_CS2CDR)
2620 & ~MXC_CCM_CS2CDR_LDB_DI1_CLK_SEL_MASK;
2622 mux = _get_mux6(parent, &pll5_video_main_clk,
2623 &pll2_pfd_352M, &pll2_pfd_400M, &pll3_pfd_540M,
2624 &pll3_usb_otg_main_clk, NULL);
2625 reg |= (mux << MXC_CCM_CS2CDR_LDB_DI1_CLK_SEL_OFFSET);
2627 __raw_writel(reg, MXC_CCM_CS2CDR);
2632 static struct clk ldb_di1_clk = {
2633 __INIT_CLK_DEBUG(ldb_di1_clk)
2635 .parent = &pll3_pfd_540M,
2636 .enable_reg = MXC_CCM_CCGR3,
2637 .enable_shift = MXC_CCM_CCGRx_CG7_OFFSET,
2638 .enable = _clk_enable,
2639 .disable = _clk_disable,
2640 .set_parent = _clk_ldb_di1_set_parent,
2641 .set_rate = _clk_ldb_di1_set_rate,
2642 .round_rate = _clk_ldb_di_round_rate,
2643 .get_rate = _clk_ldb_di1_get_rate,
2644 .flags = AHB_HIGH_SET_POINT | CPU_FREQ_TRIG_UPDATE,
2648 static unsigned long _clk_ipu_di_round_rate(struct clk *clk,
2652 u32 parent_rate = clk_get_rate(clk->parent);
2654 if ((clk->parent == &ldb_di0_clk) ||
2655 (clk->parent == &ldb_di1_clk))
2658 div = parent_rate / rate;
2660 /* Make sure rate is not greater than the maximum value for the clock.
2661 * Also prevent a div of 0.
2669 return parent_rate / div;
2672 static unsigned long _clk_ipu1_di0_get_rate(struct clk *clk)
2676 if ((clk->parent == &ldb_di0_clk) ||
2677 (clk->parent == &ldb_di1_clk))
2678 return clk_get_rate(clk->parent);
2680 reg = __raw_readl(MXC_CCM_CHSCCDR);
2682 div = ((reg & MXC_CCM_CHSCCDR_IPU1_DI0_PODF_MASK) >>
2683 MXC_CCM_CHSCCDR_IPU1_DI0_PODF_OFFSET) + 1;
2685 return clk_get_rate(clk->parent) / div;
2688 static int _clk_ipu1_di0_set_rate(struct clk *clk, unsigned long rate)
2691 u32 parent_rate = clk_get_rate(clk->parent);
2693 if ((clk->parent == &ldb_di0_clk) ||
2694 (clk->parent == &ldb_di1_clk)) {
2695 if (parent_rate == rate)
2701 div = parent_rate / rate;
2704 if (((parent_rate / div) != rate) || (div > 8))
2707 reg = __raw_readl(MXC_CCM_CHSCCDR);
2708 reg &= ~MXC_CCM_CHSCCDR_IPU1_DI0_PODF_MASK;
2709 reg |= (div - 1) << MXC_CCM_CHSCCDR_IPU1_DI0_PODF_OFFSET;
2710 __raw_writel(reg, MXC_CCM_CHSCCDR);
2716 static int _clk_ipu1_di0_set_parent(struct clk *clk, struct clk *parent)
2720 if (parent == &ldb_di0_clk)
2722 else if (parent == &ldb_di1_clk)
2725 reg = __raw_readl(MXC_CCM_CHSCCDR)
2726 & ~MXC_CCM_CHSCCDR_IPU1_DI0_PRE_CLK_SEL_MASK;
2728 mux = _get_mux6(parent, &mmdc_ch0_axi_clk[0],
2729 &pll3_usb_otg_main_clk, &pll5_video_main_clk,
2730 &pll2_pfd_352M, &pll2_pfd_400M, &pll3_pfd_540M);
2731 reg |= (mux << MXC_CCM_CHSCCDR_IPU1_DI0_PRE_CLK_SEL_OFFSET);
2733 __raw_writel(reg, MXC_CCM_CHSCCDR);
2735 /* Derive clock from divided pre-muxed ipu1_di0 clock.*/
2739 reg = __raw_readl(MXC_CCM_CHSCCDR)
2740 & ~MXC_CCM_CHSCCDR_IPU1_DI0_CLK_SEL_MASK;
2741 __raw_writel(reg | (mux << MXC_CCM_CHSCCDR_IPU1_DI0_CLK_SEL_OFFSET),
2747 static unsigned long _clk_ipu1_di1_get_rate(struct clk *clk)
2751 if ((clk->parent == &ldb_di0_clk) ||
2752 (clk->parent == &ldb_di1_clk))
2753 return clk_get_rate(clk->parent);
2755 reg = __raw_readl(MXC_CCM_CHSCCDR);
2757 div = ((reg & MXC_CCM_CHSCCDR_IPU1_DI1_PODF_MASK)
2758 >> MXC_CCM_CHSCCDR_IPU1_DI1_PODF_OFFSET) + 1;
2760 return clk_get_rate(clk->parent) / div;
2763 static int _clk_ipu1_di1_set_rate(struct clk *clk, unsigned long rate)
2766 u32 parent_rate = clk_get_rate(clk->parent);
2768 if ((clk->parent == &ldb_di0_clk) ||
2769 (clk->parent == &ldb_di1_clk)) {
2770 if (parent_rate == rate)
2776 div = parent_rate / rate;
2779 if (((parent_rate / div) != rate) || (div > 8))
2782 reg = __raw_readl(MXC_CCM_CHSCCDR);
2783 reg &= ~MXC_CCM_CHSCCDR_IPU1_DI1_PODF_MASK;
2784 reg |= (div - 1) << MXC_CCM_CHSCCDR_IPU1_DI1_PODF_OFFSET;
2785 __raw_writel(reg, MXC_CCM_CHSCCDR);
2791 static int _clk_ipu1_di1_set_parent(struct clk *clk, struct clk *parent)
2795 if (parent == &ldb_di0_clk)
2797 else if (parent == &ldb_di1_clk)
2800 reg = __raw_readl(MXC_CCM_CHSCCDR)
2801 & ~MXC_CCM_CHSCCDR_IPU1_DI1_PRE_CLK_SEL_MASK;
2803 mux = _get_mux6(parent, &mmdc_ch0_axi_clk[0],
2804 &pll3_usb_otg_main_clk, &pll5_video_main_clk,
2805 &pll2_pfd_352M, &pll2_pfd_400M, &pll3_pfd_540M);
2806 reg |= (mux << MXC_CCM_CHSCCDR_IPU1_DI1_PRE_CLK_SEL_OFFSET);
2808 __raw_writel(reg, MXC_CCM_CHSCCDR);
2810 /* Derive clock from divided pre-muxed ipu1_di0 clock.*/
2813 reg = __raw_readl(MXC_CCM_CHSCCDR)
2814 & ~MXC_CCM_CHSCCDR_IPU1_DI1_CLK_SEL_MASK;
2815 __raw_writel(reg | (mux << MXC_CCM_CHSCCDR_IPU1_DI1_CLK_SEL_OFFSET),
2821 static struct clk ipu1_di_clk[] = {
2823 __INIT_CLK_DEBUG(ipu1_di_clk_0)
2825 .parent = &pll5_video_main_clk,
2826 .enable_reg = MXC_CCM_CCGR3,
2827 .enable_shift = MXC_CCM_CCGRx_CG1_OFFSET,
2828 .enable = _clk_enable,
2829 .disable = _clk_disable,
2830 .set_parent = _clk_ipu1_di0_set_parent,
2831 .set_rate = _clk_ipu1_di0_set_rate,
2832 .round_rate = _clk_ipu_di_round_rate,
2833 .get_rate = _clk_ipu1_di0_get_rate,
2834 .flags = AHB_HIGH_SET_POINT | CPU_FREQ_TRIG_UPDATE,
2837 __INIT_CLK_DEBUG(ipu1_di_clk_1)
2839 .parent = &pll5_video_main_clk,
2840 .enable_reg = MXC_CCM_CCGR3,
2841 .enable_shift = MXC_CCM_CCGRx_CG2_OFFSET,
2842 .enable = _clk_enable,
2843 .disable = _clk_disable,
2844 .set_parent = _clk_ipu1_di1_set_parent,
2845 .set_rate = _clk_ipu1_di1_set_rate,
2846 .round_rate = _clk_ipu_di_round_rate,
2847 .get_rate = _clk_ipu1_di1_get_rate,
2848 .flags = AHB_HIGH_SET_POINT | CPU_FREQ_TRIG_UPDATE,
2852 static unsigned long _clk_ipu2_di0_get_rate(struct clk *clk)
2856 if ((clk->parent == &ldb_di0_clk) ||
2857 (clk->parent == &ldb_di1_clk))
2858 return clk_get_rate(clk->parent);
2860 reg = __raw_readl(MXC_CCM_CSCDR2);
2862 div = ((reg & MXC_CCM_CSCDR2_IPU2_DI0_PODF_MASK) >>
2863 MXC_CCM_CSCDR2_IPU2_DI0_PODF_OFFSET) + 1;
2865 return clk_get_rate(clk->parent) / div;
2868 static int _clk_ipu2_di0_set_rate(struct clk *clk, unsigned long rate)
2871 u32 parent_rate = clk_get_rate(clk->parent);
2873 if ((clk->parent == &ldb_di0_clk) ||
2874 (clk->parent == &ldb_di1_clk)) {
2875 if (parent_rate == rate)
2881 div = parent_rate / rate;
2884 if (((parent_rate / div) != rate) || (div > 8))
2887 reg = __raw_readl(MXC_CCM_CSCDR2);
2888 reg &= ~MXC_CCM_CSCDR2_IPU2_DI0_PODF_MASK;
2889 reg |= (div - 1) << MXC_CCM_CSCDR2_IPU2_DI0_PODF_OFFSET;
2890 __raw_writel(reg, MXC_CCM_CSCDR2);
2895 static int _clk_ipu2_di0_set_parent(struct clk *clk, struct clk *parent)
2899 if (parent == &ldb_di0_clk)
2901 else if (parent == &ldb_di1_clk)
2904 reg = __raw_readl(MXC_CCM_CSCDR2)
2905 & ~MXC_CCM_CSCDR2_IPU2_DI0_PRE_CLK_SEL_MASK;
2907 mux = _get_mux6(parent, &mmdc_ch0_axi_clk[0],
2908 &pll3_usb_otg_main_clk, &pll5_video_main_clk,
2909 &pll2_pfd_352M, &pll2_pfd_400M, &pll3_pfd_540M);
2910 reg |= (mux << MXC_CCM_CSCDR2_IPU2_DI0_PRE_CLK_SEL_OFFSET);
2912 __raw_writel(reg, MXC_CCM_CSCDR2);
2914 /* Derive clock from divided pre-muxed ipu2_di0 clock.*/
2917 reg = __raw_readl(MXC_CCM_CSCDR2)
2918 & ~MXC_CCM_CSCDR2_IPU2_DI0_CLK_SEL_MASK;
2919 __raw_writel(reg | (mux << MXC_CCM_CSCDR2_IPU2_DI0_CLK_SEL_OFFSET),
2925 static unsigned long _clk_ipu2_di1_get_rate(struct clk *clk)
2929 if ((clk->parent == &ldb_di0_clk) ||
2930 (clk->parent == &ldb_di1_clk))
2931 return clk_get_rate(clk->parent);
2933 reg = __raw_readl(MXC_CCM_CSCDR2);
2935 div = ((reg & MXC_CCM_CSCDR2_IPU2_DI1_PODF_MASK)
2936 >> MXC_CCM_CSCDR2_IPU2_DI1_PODF_OFFSET) + 1;
2938 return clk_get_rate(clk->parent) / div;
2941 static int _clk_ipu2_di1_set_rate(struct clk *clk, unsigned long rate)
2944 u32 parent_rate = clk_get_rate(clk->parent);
2946 if ((clk->parent == &ldb_di0_clk) ||
2947 (clk->parent == &ldb_di1_clk)) {
2948 if (parent_rate == rate)
2954 div = parent_rate / rate;
2957 if (((parent_rate / div) != rate) || (div > 8))
2960 reg = __raw_readl(MXC_CCM_CSCDR2);
2961 reg &= ~MXC_CCM_CSCDR2_IPU2_DI1_PODF_MASK;
2962 reg |= (div - 1) << MXC_CCM_CSCDR2_IPU2_DI1_PODF_OFFSET;
2963 __raw_writel(reg, MXC_CCM_CSCDR2);
2968 static int _clk_ipu2_di1_set_parent(struct clk *clk, struct clk *parent)
2972 if (parent == &ldb_di0_clk)
2974 else if (parent == &ldb_di1_clk)
2977 reg = __raw_readl(MXC_CCM_CSCDR2)
2978 & ~MXC_CCM_CSCDR2_IPU2_DI1_PRE_CLK_SEL_MASK;
2980 mux = _get_mux6(parent, &mmdc_ch0_axi_clk[0],
2981 &pll3_usb_otg_main_clk, &pll5_video_main_clk,
2982 &pll2_pfd_352M, &pll2_pfd_400M, &pll3_pfd_540M);
2983 reg |= (mux << MXC_CCM_CSCDR2_IPU2_DI1_PRE_CLK_SEL_OFFSET);
2985 __raw_writel(reg, MXC_CCM_CSCDR2);
2987 /* Derive clock from divided pre-muxed ipu1_di0 clock.*/
2990 reg = __raw_readl(MXC_CCM_CSCDR2)
2991 & ~MXC_CCM_CSCDR2_IPU2_DI1_CLK_SEL_MASK;
2992 __raw_writel(reg | (mux << MXC_CCM_CSCDR2_IPU2_DI1_CLK_SEL_OFFSET),
2998 static struct clk ipu2_di_clk[] = {
3000 __INIT_CLK_DEBUG(ipu2_di_clk_0)
3002 .parent = &pll5_video_main_clk,
3003 .enable_reg = MXC_CCM_CCGR3,
3004 .enable_shift = MXC_CCM_CCGRx_CG4_OFFSET,
3005 .enable = _clk_enable,
3006 .disable = _clk_disable,
3007 .set_parent = _clk_ipu2_di0_set_parent,
3008 .set_rate = _clk_ipu2_di0_set_rate,
3009 .round_rate = _clk_ipu_di_round_rate,
3010 .get_rate = _clk_ipu2_di0_get_rate,
3011 .flags = AHB_HIGH_SET_POINT | CPU_FREQ_TRIG_UPDATE,
3014 __INIT_CLK_DEBUG(ipu2_di_clk_1)
3016 .parent = &pll5_video_main_clk,
3017 .enable_reg = MXC_CCM_CCGR3,
3018 .enable_shift = MXC_CCM_CCGRx_CG5_OFFSET,
3019 .enable = _clk_enable,
3020 .disable = _clk_disable,
3021 .set_parent = _clk_ipu2_di1_set_parent,
3022 .set_rate = _clk_ipu2_di1_set_rate,
3023 .round_rate = _clk_ipu_di_round_rate,
3024 .get_rate = _clk_ipu2_di1_get_rate,
3025 .flags = AHB_HIGH_SET_POINT | CPU_FREQ_TRIG_UPDATE,
3029 static struct clk can2_clk[] = {
3031 __INIT_CLK_DEBUG(can2_module_clk)
3033 .parent = &pll3_60M,
3034 .enable_reg = MXC_CCM_CCGR0,
3035 .enable_shift = MXC_CCM_CCGRx_CG9_OFFSET,
3036 .enable = _clk_enable,
3037 .disable = _clk_disable,
3038 .secondary = &can2_clk[1],
3039 .flags = AHB_HIGH_SET_POINT | CPU_FREQ_TRIG_UPDATE,
3042 __INIT_CLK_DEBUG(can2_serial_clk)
3044 .parent = &pll3_60M,
3045 .enable_reg = MXC_CCM_CCGR0,
3046 .enable_shift = MXC_CCM_CCGRx_CG10_OFFSET,
3047 .enable = _clk_enable,
3048 .disable = _clk_disable,
3053 static struct clk can1_clk[] = {
3055 __INIT_CLK_DEBUG(can1_module_clk)
3057 .parent = &pll3_60M,
3058 .enable_reg = MXC_CCM_CCGR0,
3059 .enable_shift = MXC_CCM_CCGRx_CG7_OFFSET,
3060 .enable = _clk_enable,
3061 .disable = _clk_disable,
3062 .secondary = &can1_clk[1],
3063 .flags = AHB_HIGH_SET_POINT | CPU_FREQ_TRIG_UPDATE,
3066 __INIT_CLK_DEBUG(can1_serial_clk)
3068 .parent = &pll3_60M,
3069 .enable_reg = MXC_CCM_CCGR0,
3070 .enable_shift = MXC_CCM_CCGRx_CG8_OFFSET,
3071 .enable = _clk_enable,
3072 .disable = _clk_disable,
3076 static unsigned long _clk_spdif_round_rate(struct clk *clk,
3080 u32 parent_rate = clk_get_rate(clk->parent);
3081 u32 div = parent_rate / rate;
3083 if (parent_rate % rate)
3086 __calc_pre_post_dividers(1 << 3, div, &pre, &post);
3088 return parent_rate / (pre * post);
3091 static int _clk_spdif0_set_parent(struct clk *clk, struct clk *parent)
3095 reg = __raw_readl(MXC_CCM_CDCDR)
3096 & ~MXC_CCM_CDCDR_SPDIF0_CLK_SEL_MASK;
3098 mux = _get_mux6(parent, &pll4_audio_main_clk,
3099 &pll3_pfd_508M, &pll3_pfd_454M,
3100 &pll3_sw_clk, NULL, NULL);
3101 reg |= mux << MXC_CCM_CDCDR_SPDIF0_CLK_SEL_OFFSET;
3103 __raw_writel(reg, MXC_CCM_CDCDR);
3108 static unsigned long _clk_spdif0_get_rate(struct clk *clk)
3110 u32 reg, pred, podf;
3112 reg = __raw_readl(MXC_CCM_CDCDR);
3114 pred = ((reg & MXC_CCM_CDCDR_SPDIF0_CLK_PRED_MASK)
3115 >> MXC_CCM_CDCDR_SPDIF0_CLK_PRED_OFFSET) + 1;
3116 podf = ((reg & MXC_CCM_CDCDR_SPDIF0_CLK_PODF_MASK)
3117 >> MXC_CCM_CDCDR_SPDIF0_CLK_PODF_OFFSET) + 1;
3119 return clk_get_rate(clk->parent) / (pred * podf);
3122 static int _clk_spdif0_set_rate(struct clk *clk, unsigned long rate)
3124 u32 reg, div, pre, post;
3125 u32 parent_rate = clk_get_rate(clk->parent);
3127 div = parent_rate / rate;
3130 if (((parent_rate / div) != rate) || div > 64)
3133 __calc_pre_post_dividers(1 << 3, div, &pre, &post);
3135 reg = __raw_readl(MXC_CCM_CDCDR);
3136 reg &= ~(MXC_CCM_CDCDR_SPDIF0_CLK_PRED_MASK|
3137 MXC_CCM_CDCDR_SPDIF0_CLK_PODF_MASK);
3138 reg |= (post - 1) << MXC_CCM_CDCDR_SPDIF0_CLK_PODF_OFFSET;
3139 reg |= (pre - 1) << MXC_CCM_CDCDR_SPDIF0_CLK_PRED_OFFSET;
3141 __raw_writel(reg, MXC_CCM_CDCDR);
3146 static struct clk spdif0_clk[] = {
3148 __INIT_CLK_DEBUG(spdif0_clk_0)
3150 .parent = &pll3_sw_clk,
3151 .enable = _clk_enable,
3152 .enable_reg = MXC_CCM_CCGR5,
3153 .enable_shift = MXC_CCM_CCGRx_CG7_OFFSET,
3154 .disable = _clk_disable,
3155 .secondary = &spdif0_clk[1],
3156 .set_rate = _clk_spdif0_set_rate,
3157 .get_rate = _clk_spdif0_get_rate,
3158 .set_parent = _clk_spdif0_set_parent,
3159 .round_rate = _clk_spdif_round_rate,
3160 .flags = AHB_HIGH_SET_POINT | CPU_FREQ_TRIG_UPDATE,
3163 __INIT_CLK_DEBUG(spdif0_clk_1)
3166 .secondary = &spba_clk,
3170 static unsigned long _clk_esai_round_rate(struct clk *clk,
3174 u32 parent_rate = clk_get_rate(clk->parent);
3175 u32 div = parent_rate / rate;
3177 if (parent_rate % rate)
3180 __calc_pre_post_dividers(1 << 3, div, &pre, &post);
3182 return parent_rate / (pre * post);
3185 static int _clk_esai_set_parent(struct clk *clk, struct clk *parent)
3189 reg = __raw_readl(MXC_CCM_CSCMR2) & ~MXC_CCM_CSCMR2_ESAI_CLK_SEL_MASK;
3191 mux = _get_mux6(parent, &pll4_audio_main_clk, &pll3_pfd_508M,
3192 &pll3_pfd_454M, &pll3_sw_clk, NULL, NULL);
3193 reg |= mux << MXC_CCM_CSCMR2_ESAI_CLK_SEL_OFFSET;
3195 __raw_writel(reg, MXC_CCM_CSCMR2);
3200 static unsigned long _clk_esai_get_rate(struct clk *clk)
3202 u32 reg, pred, podf;
3204 reg = __raw_readl(MXC_CCM_CS1CDR);
3206 pred = ((reg & MXC_CCM_CS1CDR_ESAI_CLK_PRED_MASK)
3207 >> MXC_CCM_CS1CDR_ESAI_CLK_PRED_OFFSET) + 1;
3208 podf = ((reg & MXC_CCM_CS1CDR_ESAI_CLK_PODF_MASK)
3209 >> MXC_CCM_CS1CDR_ESAI_CLK_PODF_OFFSET) + 1;
3211 return clk_get_rate(clk->parent) / (pred * podf);
3214 static int _clk_esai_set_rate(struct clk *clk, unsigned long rate)
3216 u32 reg, div, pre, post;
3217 u32 parent_rate = clk_get_rate(clk->parent);
3219 div = parent_rate / rate;
3222 if (((parent_rate / div) != rate) || div > 64)
3225 __calc_pre_post_dividers(1 << 3, div, &pre, &post);
3227 reg = __raw_readl(MXC_CCM_CS1CDR);
3228 reg &= ~(MXC_CCM_CS1CDR_ESAI_CLK_PRED_MASK|
3229 MXC_CCM_CS1CDR_ESAI_CLK_PODF_MASK);
3230 reg |= (post - 1) << MXC_CCM_CS1CDR_ESAI_CLK_PODF_OFFSET;
3231 reg |= (pre - 1) << MXC_CCM_CS1CDR_ESAI_CLK_PRED_OFFSET;
3233 __raw_writel(reg, MXC_CCM_CS1CDR);
3238 static struct clk esai_clk = {
3239 __INIT_CLK_DEBUG(esai_clk)
3241 .parent = &pll3_sw_clk,
3242 .secondary = &spba_clk,
3243 .enable_reg = MXC_CCM_CCGR1,
3244 .enable_shift = MXC_CCM_CCGRx_CG8_OFFSET,
3245 .enable = _clk_enable,
3246 .disable = _clk_disable,
3247 .set_rate = _clk_esai_set_rate,
3248 .get_rate = _clk_esai_get_rate,
3249 .set_parent = _clk_esai_set_parent,
3250 .round_rate = _clk_esai_round_rate,
3253 static int _clk_enet_enable(struct clk *clk)
3257 /* Enable ENET ref clock */
3258 reg = __raw_readl(PLL8_ENET_BASE_ADDR);
3259 reg &= ~ANADIG_PLL_BYPASS;
3260 reg &= ~ANADIG_PLL_ENABLE;
3261 __raw_writel(reg, PLL8_ENET_BASE_ADDR);
3267 static void _clk_enet_disable(struct clk *clk)
3273 /* Enable ENET ref clock */
3274 reg = __raw_readl(PLL8_ENET_BASE_ADDR);
3275 reg |= ANADIG_PLL_BYPASS;
3276 reg |= ANADIG_PLL_ENABLE;
3277 __raw_writel(reg, PLL8_ENET_BASE_ADDR);
3280 static int _clk_enet_set_rate(struct clk *clk, unsigned long rate)
3282 unsigned int reg, div = 1;
3300 reg = __raw_readl(PLL8_ENET_BASE_ADDR);
3301 reg &= ~ANADIG_PLL_ENET_DIV_SELECT_MASK;
3302 reg |= (div << ANADIG_PLL_ENET_DIV_SELECT_OFFSET);
3303 __raw_writel(reg, PLL8_ENET_BASE_ADDR);
3308 static unsigned long _clk_enet_get_rate(struct clk *clk)
3312 div = (__raw_readl(PLL8_ENET_BASE_ADDR))
3313 & ANADIG_PLL_ENET_DIV_SELECT_MASK;
3315 return 500000000 / (div + 1);
3318 static struct clk enet_clk[] = {
3320 __INIT_CLK_DEBUG(enet_clk)
3322 .parent = &pll8_enet_main_clk,
3323 .enable_reg = MXC_CCM_CCGR1,
3324 .enable_shift = MXC_CCM_CCGRx_CG5_OFFSET,
3325 .enable = _clk_enet_enable,
3326 .disable = _clk_enet_disable,
3327 .set_rate = _clk_enet_set_rate,
3328 .get_rate = _clk_enet_get_rate,
3329 .secondary = &enet_clk[1],
3330 .flags = AHB_HIGH_SET_POINT | CPU_FREQ_TRIG_UPDATE,
3333 .parent = &mmdc_ch0_axi_clk[0],
3334 .secondary = &mx6per1_clk,
3338 static struct clk ecspi_clk[] = {
3340 __INIT_CLK_DEBUG(ecspi0_clk)
3342 .parent = &pll3_60M,
3343 .secondary = &spba_clk,
3344 .enable_reg = MXC_CCM_CCGR1,
3345 .enable_shift = MXC_CCM_CCGRx_CG0_OFFSET,
3346 .enable = _clk_enable,
3347 .disable = _clk_disable,
3350 __INIT_CLK_DEBUG(ecspi1_clk)
3352 .parent = &pll3_60M,
3353 .secondary = &spba_clk,
3354 .enable_reg = MXC_CCM_CCGR1,
3355 .enable_shift = MXC_CCM_CCGRx_CG1_OFFSET,
3356 .enable = _clk_enable,
3357 .disable = _clk_disable,
3360 __INIT_CLK_DEBUG(ecspi2_clk)
3362 .parent = &pll3_60M,
3363 .secondary = &spba_clk,
3364 .enable_reg = MXC_CCM_CCGR1,
3365 .enable_shift = MXC_CCM_CCGRx_CG2_OFFSET,
3366 .enable = _clk_enable,
3367 .disable = _clk_disable,
3370 __INIT_CLK_DEBUG(ecspi3_clk)
3372 .parent = &pll3_60M,
3373 .secondary = &spba_clk,
3374 .enable_reg = MXC_CCM_CCGR1,
3375 .enable_shift = MXC_CCM_CCGRx_CG3_OFFSET,
3376 .enable = _clk_enable,
3377 .disable = _clk_disable,
3380 __INIT_CLK_DEBUG(ecspi4_clk)
3382 .parent = &pll3_60M,
3383 .secondary = &spba_clk,
3384 .enable_reg = MXC_CCM_CCGR1,
3385 .enable_shift = MXC_CCM_CCGRx_CG4_OFFSET,
3386 .enable = _clk_enable,
3387 .disable = _clk_disable,
3391 static unsigned long _clk_emi_slow_round_rate(struct clk *clk,
3395 u32 parent_rate = clk_get_rate(clk->parent);
3397 div = parent_rate / rate;
3399 /* Make sure rate is not greater than the maximum value for the clock.
3400 * Also prevent a div of 0.
3408 return parent_rate / div;
3411 static int _clk_emi_slow_set_parent(struct clk *clk, struct clk *parent)
3414 u32 reg = __raw_readl(MXC_CCM_CSCMR1)
3415 & ~MXC_CCM_CSCMR1_ACLK_EMI_SLOW_MASK;
3417 mux = _get_mux6(parent, &axi_clk, &pll3_usb_otg_main_clk,
3418 &pll2_pfd_400M, &pll2_pfd_352M, NULL, NULL);
3419 reg |= (mux << MXC_CCM_CSCMR1_ACLK_EMI_SLOW_OFFSET);
3420 __raw_writel(reg, MXC_CCM_CSCMR1);
3425 static unsigned long _clk_emi_slow_get_rate(struct clk *clk)
3429 reg = __raw_readl(MXC_CCM_CSCMR1);
3430 div = ((reg & MXC_CCM_CSCMR1_ACLK_EMI_SLOW_PODF_MASK) >>
3431 MXC_CCM_CSCMR1_ACLK_EMI_SLOW_PODF_OFFSET) + 1;
3433 return clk_get_rate(clk->parent) / div;
3436 static int _clk_emi_slow_set_rate(struct clk *clk, unsigned long rate)
3439 u32 parent_rate = clk_get_rate(clk->parent);
3441 div = parent_rate / rate;
3444 if (((parent_rate / div) != rate) || (div > 8))
3447 reg = __raw_readl(MXC_CCM_CSCMR1);
3448 reg &= ~MXC_CCM_CSCMR1_ACLK_EMI_SLOW_PODF_MASK;
3449 reg |= (div - 1) << MXC_CCM_CSCMR1_ACLK_EMI_SLOW_PODF_OFFSET;
3450 __raw_writel(reg, MXC_CCM_CSCMR1);
3455 static struct clk emi_slow_clk = {
3456 __INIT_CLK_DEBUG(emi_slow_clk)
3459 .enable_reg = MXC_CCM_CCGR6,
3460 .enable_shift = MXC_CCM_CCGRx_CG5_OFFSET,
3461 .enable = _clk_enable,
3462 .disable = _clk_disable,
3463 .set_rate = _clk_emi_slow_set_rate,
3464 .get_rate = _clk_emi_slow_get_rate,
3465 .round_rate = _clk_emi_slow_round_rate,
3466 .set_parent = _clk_emi_slow_set_parent,
3469 static unsigned long _clk_emi_round_rate(struct clk *clk,
3473 u32 parent_rate = clk_get_rate(clk->parent);
3475 div = parent_rate / rate;
3477 /* Make sure rate is not greater than the maximum value for the clock.
3478 * Also prevent a div of 0.
3486 return parent_rate / div;
3489 static int _clk_emi_set_parent(struct clk *clk, struct clk *parent)
3492 u32 reg = __raw_readl(MXC_CCM_CSCMR1) & ~MXC_CCM_CSCMR1_ACLK_EMI_MASK;
3494 mux = _get_mux6(parent, &axi_clk, &pll3_usb_otg_main_clk,
3495 &pll2_pfd_400M, &pll2_pfd_352M, NULL, NULL);
3496 reg |= (mux << MXC_CCM_CSCMR1_ACLK_EMI_OFFSET);
3497 __raw_writel(reg, MXC_CCM_CSCMR1);
3502 static unsigned long _clk_emi_get_rate(struct clk *clk)
3506 reg = __raw_readl(MXC_CCM_CSCMR1);
3507 div = ((reg & MXC_CCM_CSCMR1_ACLK_EMI_PODF_MASK) >>
3508 MXC_CCM_CSCMR1_ACLK_EMI_PODF_OFFSET) + 1;
3510 return clk_get_rate(clk->parent) / div;
3513 static int _clk_emi_set_rate(struct clk *clk, unsigned long rate)
3516 u32 parent_rate = clk_get_rate(clk->parent);
3518 div = parent_rate / rate;
3521 if (((parent_rate / div) != rate) || (div > 8))
3524 reg = __raw_readl(MXC_CCM_CSCMR1);
3525 reg &= ~MXC_CCM_CSCMR1_ACLK_EMI_PODF_MASK;
3526 reg |= (div - 1) << MXC_CCM_CSCMR1_ACLK_EMI_PODF_OFFSET;
3527 __raw_writel(reg, MXC_CCM_CSCMR1);
3532 static struct clk emi_clk = {
3533 __INIT_CLK_DEBUG(emi_clk)
3536 .set_rate = _clk_emi_set_rate,
3537 .get_rate = _clk_emi_get_rate,
3538 .round_rate = _clk_emi_round_rate,
3539 .set_parent = _clk_emi_set_parent,
3542 static unsigned long _clk_enfc_round_rate(struct clk *clk,
3546 u32 parent_rate = clk_get_rate(clk->parent);
3547 u32 div = parent_rate / rate;
3549 if (parent_rate % rate)
3552 __calc_pre_post_dividers(1 << 6, div, &pre, &post);
3554 return parent_rate / (pre * post);
3557 static int _clk_enfc_set_parent(struct clk *clk, struct clk *parent)
3561 reg = __raw_readl(MXC_CCM_CS2CDR)
3562 & ~MXC_CCM_CS2CDR_ENFC_CLK_SEL_MASK;
3564 mux = _get_mux6(parent, &pll2_pfd_352M,
3565 &pll2_528_bus_main_clk, &pll3_usb_otg_main_clk,
3566 &pll2_pfd_400M, NULL, NULL);
3567 reg |= mux << MXC_CCM_CS2CDR_ENFC_CLK_SEL_OFFSET;
3569 __raw_writel(reg, MXC_CCM_CS2CDR);
3574 static unsigned long _clk_enfc_get_rate(struct clk *clk)
3576 u32 reg, pred, podf;
3578 reg = __raw_readl(MXC_CCM_CS2CDR);
3580 pred = ((reg & MXC_CCM_CS2CDR_ENFC_CLK_PRED_MASK)
3581 >> MXC_CCM_CS2CDR_ENFC_CLK_PRED_OFFSET) + 1;
3582 podf = ((reg & MXC_CCM_CS2CDR_ENFC_CLK_PODF_MASK)
3583 >> MXC_CCM_CS2CDR_ENFC_CLK_PODF_OFFSET) + 1;
3585 return clk_get_rate(clk->parent) / (pred * podf);
3588 static int _clk_enfc_set_rate(struct clk *clk, unsigned long rate)
3590 u32 reg, div, pre, post;
3591 u32 parent_rate = clk_get_rate(clk->parent);
3593 div = parent_rate / rate;
3596 if (((parent_rate / div) != rate) || div > 512)
3599 __calc_pre_post_dividers(1 << 6, div, &pre, &post);
3601 reg = __raw_readl(MXC_CCM_CS2CDR);
3602 reg &= ~(MXC_CCM_CS2CDR_ENFC_CLK_PRED_MASK|
3603 MXC_CCM_CS2CDR_ENFC_CLK_PODF_MASK);
3604 reg |= (post - 1) << MXC_CCM_CS2CDR_ENFC_CLK_PODF_OFFSET;
3605 reg |= (pre - 1) << MXC_CCM_CS2CDR_ENFC_CLK_PRED_OFFSET;
3607 __raw_writel(reg, MXC_CCM_CS2CDR);
3612 static struct clk enfc_clk = {
3613 __INIT_CLK_DEBUG(enfc_clk)
3615 .parent = &pll2_pfd_352M,
3616 .enable_reg = MXC_CCM_CCGR2,
3617 .enable_shift = MXC_CCM_CCGRx_CG7_OFFSET,
3618 .enable = _clk_enable,
3619 .disable = _clk_disable,
3620 .set_rate = _clk_enfc_set_rate,
3621 .get_rate = _clk_enfc_get_rate,
3622 .round_rate = _clk_enfc_round_rate,
3623 .set_parent = _clk_enfc_set_parent,
3626 static unsigned long _clk_uart_round_rate(struct clk *clk,
3630 u32 parent_rate = clk_get_rate(clk->parent);
3632 div = parent_rate / rate;
3634 /* Make sure rate is not greater than the maximum value for the clock.
3635 * Also prevent a div of 0.
3643 return parent_rate / div;
3646 static int _clk_uart_set_rate(struct clk *clk, unsigned long rate)
3649 u32 parent_rate = clk_get_rate(clk->parent);
3651 div = parent_rate / rate;
3654 if (((parent_rate / div) != rate) || (div > 64))
3657 reg = __raw_readl(MXC_CCM_CSCDR1) & MXC_CCM_CSCDR1_UART_CLK_PODF_MASK;
3658 reg |= ((div - 1) << MXC_CCM_CSCDR1_UART_CLK_PODF_OFFSET);
3660 __raw_writel(reg, MXC_CCM_CSCDR1);
3665 static unsigned long _clk_uart_get_rate(struct clk *clk)
3670 reg = __raw_readl(MXC_CCM_CSCDR1) & MXC_CCM_CSCDR1_UART_CLK_PODF_MASK;
3671 div = (reg >> MXC_CCM_CSCDR1_UART_CLK_PODF_OFFSET) + 1;
3672 val = clk_get_rate(clk->parent) / div;
3677 static struct clk uart_clk[] = {
3679 __INIT_CLK_DEBUG(uart_clk)
3681 .parent = &pll3_80M,
3682 .enable_reg = MXC_CCM_CCGR5,
3683 .enable_shift = MXC_CCM_CCGRx_CG12_OFFSET,
3684 .enable = _clk_enable,
3685 .disable = _clk_disable,
3686 .secondary = &uart_clk[1],
3687 .set_rate = _clk_uart_set_rate,
3688 .get_rate = _clk_uart_get_rate,
3689 .round_rate = _clk_uart_round_rate,
3692 __INIT_CLK_DEBUG(uart_serial_clk)
3694 .enable_reg = MXC_CCM_CCGR5,
3695 .enable_shift = MXC_CCM_CCGRx_CG13_OFFSET,
3696 .enable = _clk_enable,
3697 .disable = _clk_disable,
3701 static unsigned long _clk_hsi_tx_round_rate(struct clk *clk,
3705 u32 parent_rate = clk_get_rate(clk->parent);
3707 div = parent_rate / rate;
3709 /* Make sure rate is not greater than the maximum value for the clock.
3710 * Also prevent a div of 0.
3718 return parent_rate / div;
3721 static int _clk_hsi_tx_set_parent(struct clk *clk, struct clk *parent)
3723 u32 reg = __raw_readl(MXC_CCM_CDCDR) & ~MXC_CCM_CDCDR_HSI_TX_CLK_SEL;
3725 if (parent == &pll2_pfd_400M)
3726 reg |= (MXC_CCM_CDCDR_HSI_TX_CLK_SEL);
3728 __raw_writel(reg, MXC_CCM_CDCDR);
3733 static unsigned long _clk_hsi_tx_get_rate(struct clk *clk)
3737 reg = __raw_readl(MXC_CCM_CDCDR);
3738 div = ((reg & MXC_CCM_CDCDR_HSI_TX_PODF_MASK) >>
3739 MXC_CCM_CDCDR_HSI_TX_PODF_OFFSET) + 1;
3741 return clk_get_rate(clk->parent) / div;
3744 static int _clk_hsi_tx_set_rate(struct clk *clk, unsigned long rate)
3747 u32 parent_rate = clk_get_rate(clk->parent);
3749 div = parent_rate / rate;
3752 if (((parent_rate / div) != rate) || (div > 8))
3755 reg = __raw_readl(MXC_CCM_CDCDR);
3756 reg &= ~MXC_CCM_CDCDR_HSI_TX_PODF_MASK;
3757 reg |= (div - 1) << MXC_CCM_CDCDR_HSI_TX_PODF_OFFSET;
3758 __raw_writel(reg, MXC_CCM_CDCDR);
3763 static struct clk hsi_tx_clk[] = {
3765 __INIT_CLK_DEBUG(hsi_tx_clk)
3767 .parent = &pll2_pfd_400M,
3768 .enable_reg = MXC_CCM_CCGR3,
3769 .enable_shift = MXC_CCM_CCGRx_CG8_OFFSET,
3770 .enable = _clk_enable,
3771 .disable = _clk_disable,
3772 .set_parent = _clk_hsi_tx_set_parent,
3773 .round_rate = _clk_hsi_tx_round_rate,
3774 .set_rate = _clk_hsi_tx_set_rate,
3775 .get_rate = _clk_hsi_tx_get_rate,
3776 .secondary = &hsi_tx_clk[1],
3777 .flags = AHB_HIGH_SET_POINT | CPU_FREQ_TRIG_UPDATE,
3780 .parent = &mx6per1_clk,
3781 .secondary = &mx6per2_clk,
3785 static struct clk hdmi_clk[] = {
3787 __INIT_CLK_DEBUG(hdmi_isfr_clk)
3789 .parent = &pll3_pfd_540M,
3790 .enable_reg = MXC_CCM_CCGR2,
3791 .enable_shift = MXC_CCM_CCGRx_CG2_OFFSET,
3792 .enable = _clk_enable,
3793 .disable = _clk_disable,
3794 .flags = AHB_HIGH_SET_POINT | CPU_FREQ_TRIG_UPDATE,
3797 __INIT_CLK_DEBUG(hdmi_iahb_clk)
3800 .enable_reg = MXC_CCM_CCGR2,
3801 .enable_shift = MXC_CCM_CCGRx_CG0_OFFSET,
3802 .enable = _clk_enable,
3803 .disable = _clk_disable,
3807 static struct clk caam_clk[] = {
3809 __INIT_CLK_DEBUG(caam_mem_clk)
3811 .enable_reg = MXC_CCM_CCGR0,
3812 .enable_shift = MXC_CCM_CCGRx_CG4_OFFSET,
3813 .enable = _clk_enable,
3814 .disable = _clk_disable,
3815 .secondary = &caam_clk[1],
3816 .flags = AHB_HIGH_SET_POINT | CPU_FREQ_TRIG_UPDATE,
3819 __INIT_CLK_DEBUG(caam_aclk_clk)
3821 .enable_reg = MXC_CCM_CCGR0,
3822 .enable_shift = MXC_CCM_CCGRx_CG5_OFFSET,
3823 .enable = _clk_enable,
3824 .disable = _clk_disable,
3825 .secondary = &caam_clk[2],
3828 __INIT_CLK_DEBUG(caam_ipg_clk)
3830 .enable_reg = MXC_CCM_CCGR0,
3831 .enable_shift = MXC_CCM_CCGRx_CG4_OFFSET,
3832 .enable = _clk_enable,
3833 .disable = _clk_disable,
3834 .parent = &mmdc_ch0_axi_clk[0],
3835 .secondary = &mx6per1_clk,
3839 static int _clk_asrc_serial_set_parent(struct clk *clk, struct clk *parent)
3843 reg = __raw_readl(MXC_CCM_CDCDR) & ~MXC_CCM_CDCDR_SPDIF1_CLK_SEL_MASK;
3845 mux = _get_mux6(parent, &pll4_audio_main_clk, &pll3_pfd_508M,
3846 &pll3_pfd_454M, &pll3_sw_clk, NULL, NULL);
3847 reg |= mux << MXC_CCM_CDCDR_SPDIF1_CLK_SEL_OFFSET;
3849 __raw_writel(reg, MXC_CCM_CDCDR);
3854 static unsigned long _clk_asrc_serial_get_rate(struct clk *clk)
3856 u32 reg, pred, podf;
3858 reg = __raw_readl(MXC_CCM_CDCDR);
3860 pred = ((reg & MXC_CCM_CDCDR_SPDIF1_CLK_PRED_MASK)
3861 >> MXC_CCM_CDCDR_SPDIF1_CLK_PRED_OFFSET) + 1;
3862 podf = ((reg & MXC_CCM_CDCDR_SPDIF1_CLK_PODF_MASK)
3863 >> MXC_CCM_CDCDR_SPDIF1_CLK_PODF_OFFSET) + 1;
3865 return clk_get_rate(clk->parent) / (pred * podf);
3868 static int _clk_asrc_serial_set_rate(struct clk *clk, unsigned long rate)
3870 u32 reg, div, pre, post;
3871 u32 parent_rate = clk_get_rate(clk->parent);
3873 div = parent_rate / rate;
3876 if (((parent_rate / div) != rate) || div > 64)
3879 __calc_pre_post_dividers(1 << 3, div, &pre, &post);
3881 reg = __raw_readl(MXC_CCM_CDCDR);
3882 reg &= ~(MXC_CCM_CDCDR_SPDIF1_CLK_PRED_MASK|
3883 MXC_CCM_CDCDR_SPDIF1_CLK_PODF_MASK);
3884 reg |= (post - 1) << MXC_CCM_CDCDR_SPDIF1_CLK_PODF_OFFSET;
3885 reg |= (pre - 1) << MXC_CCM_CDCDR_SPDIF1_CLK_PRED_OFFSET;
3887 __raw_writel(reg, MXC_CCM_CDCDR);
3892 static unsigned long _clk_asrc_serial_round_rate(struct clk *clk,
3896 u32 parent_rate = clk_get_rate(clk->parent);
3897 u32 div = parent_rate / rate;
3899 if (parent_rate % rate)
3902 __calc_pre_post_dividers(1 << 3, div, &pre, &post);
3904 return parent_rate / (pre * post);
3907 static struct clk asrc_clk[] = {
3909 __INIT_CLK_DEBUG(asrc_clk)
3911 .parent = &pll4_audio_main_clk,
3912 .enable_reg = MXC_CCM_CCGR0,
3913 .enable_shift = MXC_CCM_CCGRx_CG3_OFFSET,
3914 .enable = _clk_enable,
3915 .disable = _clk_disable,
3916 .secondary = &spba_clk,
3919 /*In the MX6 spec, asrc_serial_clk is listed as SPDIF1 clk
3920 * This clock can never be gated and does not have any
3921 * CCGR bits associated with it.
3923 __INIT_CLK_DEBUG(asrc_serial_clk)
3925 .parent = &pll3_sw_clk,
3926 .set_rate = _clk_asrc_serial_set_rate,
3927 .get_rate = _clk_asrc_serial_get_rate,
3928 .set_parent = _clk_asrc_serial_set_parent,
3929 .round_rate = _clk_asrc_serial_round_rate,
3933 static struct clk apbh_dma_clk = {
3934 __INIT_CLK_DEBUG(apbh_dma_clk)
3935 .parent = &usdhc3_clk,
3936 .secondary = &mx6per1_clk,
3937 .enable = _clk_enable,
3938 .disable = _clk_disable_inwait,
3939 .enable_reg = MXC_CCM_CCGR0,
3940 .enable_shift = MXC_CCM_CCGRx_CG2_OFFSET,
3943 static struct clk aips_tz2_clk = {
3944 __INIT_CLK_DEBUG(aips_tz2_clk)
3946 .enable_reg = MXC_CCM_CCGR0,
3947 .enable_shift = MXC_CCM_CCGRx_CG1_OFFSET,
3948 .enable = _clk_enable,
3949 .disable = _clk_disable_inwait,
3952 static struct clk aips_tz1_clk = {
3953 __INIT_CLK_DEBUG(aips_tz1_clk)
3955 .enable_reg = MXC_CCM_CCGR0,
3956 .enable_shift = MXC_CCM_CCGRx_CG0_OFFSET,
3957 .enable = _clk_enable,
3958 .disable = _clk_disable_inwait,
3962 static struct clk openvg_axi_clk = {
3963 __INIT_CLK_DEBUG(openvg_axi_clk)
3964 .parent = &gpu2d_axi_clk,
3965 .enable = _clk_enable,
3966 .enable_reg = MXC_CCM_CCGR3,
3967 .enable_shift = MXC_CCM_CCGRx_CG15_OFFSET,
3968 .disable = _clk_disable,
3969 .flags = AHB_HIGH_SET_POINT | CPU_FREQ_TRIG_UPDATE,
3972 static unsigned long _clk_gpu3d_core_round_rate(struct clk *clk,
3976 u32 parent_rate = clk_get_rate(clk->parent);
3978 div = parent_rate / rate;
3980 /* Make sure rate is not greater than the maximum value for the clock.
3981 * Also prevent a div of 0.
3989 return parent_rate / div;
3992 static int _clk_gpu3d_core_set_parent(struct clk *clk, struct clk *parent)
3995 u32 reg = __raw_readl(MXC_CCM_CBCMR)
3996 & ~MXC_CCM_CBCMR_GPU3D_CORE_CLK_SEL_MASK;
3998 mux = _get_mux6(parent, &mmdc_ch0_axi_clk[0],
3999 &pll3_usb_otg_main_clk,
4000 &pll2_pfd_594M, &pll2_pfd_400M, NULL, NULL);
4001 reg |= (mux << MXC_CCM_CBCMR_GPU3D_CORE_CLK_SEL_OFFSET);
4002 __raw_writel(reg, MXC_CCM_CBCMR);
4007 static unsigned long _clk_gpu3d_core_get_rate(struct clk *clk)
4011 reg = __raw_readl(MXC_CCM_CBCMR);
4012 div = ((reg & MXC_CCM_CBCMR_GPU3D_CORE_PODF_MASK) >>
4013 MXC_CCM_CBCMR_GPU3D_CORE_PODF_OFFSET) + 1;
4015 return clk_get_rate(clk->parent) / div;
4018 static int _clk_gpu3d_core_set_rate(struct clk *clk, unsigned long rate)
4021 u32 parent_rate = clk_get_rate(clk->parent);
4023 div = parent_rate / rate;
4029 reg = __raw_readl(MXC_CCM_CBCMR);
4030 reg &= ~MXC_CCM_CBCMR_GPU3D_CORE_PODF_MASK;
4031 reg |= (div - 1) << MXC_CCM_CBCMR_GPU3D_CORE_PODF_OFFSET;
4032 __raw_writel(reg, MXC_CCM_CBCMR);
4037 static struct clk gpu3d_core_clk[] = {
4039 __INIT_CLK_DEBUG(gpu3d_core_clk)
4040 .parent = &pll2_pfd_594M,
4041 .enable = _clk_enable,
4042 .enable_reg = MXC_CCM_CCGR1,
4043 .enable_shift = MXC_CCM_CCGRx_CG13_OFFSET,
4044 .disable = _clk_disable,
4045 .set_parent = _clk_gpu3d_core_set_parent,
4046 .set_rate = _clk_gpu3d_core_set_rate,
4047 .get_rate = _clk_gpu3d_core_get_rate,
4048 .round_rate = _clk_gpu3d_core_round_rate,
4049 .secondary = &gpu3d_core_clk[1],
4050 .flags = AHB_HIGH_SET_POINT | CPU_FREQ_TRIG_UPDATE,
4053 .parent = &gpu3d_axi_clk,
4054 .secondary = &mx6fast1_clk,
4058 static unsigned long _clk_gpu2d_core_round_rate(struct clk *clk,
4062 u32 parent_rate = clk_get_rate(clk->parent);
4064 div = parent_rate / rate;
4066 /* Make sure rate is not greater than the maximum value for the clock.
4067 * Also prevent a div of 0.
4075 return parent_rate / div;
4078 static int _clk_gpu2d_core_set_parent(struct clk *clk, struct clk *parent)
4081 u32 reg = __raw_readl(MXC_CCM_CBCMR) &
4082 ~MXC_CCM_CBCMR_GPU2D_CLK_SEL_MASK;
4084 mux = _get_mux6(parent, &axi_clk, &pll3_usb_otg_main_clk,
4085 &pll2_pfd_352M, &pll2_pfd_400M, NULL, NULL);
4086 reg |= (mux << MXC_CCM_CBCMR_GPU2D_CLK_SEL_OFFSET);
4087 __raw_writel(reg, MXC_CCM_CBCMR);
4092 static unsigned long _clk_gpu2d_core_get_rate(struct clk *clk)
4096 reg = __raw_readl(MXC_CCM_CBCMR);
4097 div = ((reg & MXC_CCM_CBCMR_GPU2D_CORE_PODF_MASK) >>
4098 MXC_CCM_CBCMR_GPU2D_CORE_PODF_OFFSET) + 1;
4100 return clk_get_rate(clk->parent) / div;
4103 static int _clk_gpu2d_core_set_rate(struct clk *clk, unsigned long rate)
4106 u32 parent_rate = clk_get_rate(clk->parent);
4108 div = parent_rate / rate;
4111 if (((parent_rate / div) != rate) || (div > 8))
4114 reg = __raw_readl(MXC_CCM_CBCMR);
4115 reg &= ~MXC_CCM_CBCMR_GPU3D_CORE_PODF_MASK;
4116 reg |= (div - 1) << MXC_CCM_CBCMR_GPU3D_CORE_PODF_OFFSET;
4117 __raw_writel(reg, MXC_CCM_CBCMR);
4121 static struct clk gpu2d_core_clk[] = {
4123 __INIT_CLK_DEBUG(gpu2d_core_clk)
4124 .parent = &pll2_pfd_352M,
4125 .enable = _clk_enable,
4126 .enable_reg = MXC_CCM_CCGR1,
4127 .enable_shift = MXC_CCM_CCGRx_CG12_OFFSET,
4128 .disable = _clk_disable,
4129 .set_parent = _clk_gpu2d_core_set_parent,
4130 .set_rate = _clk_gpu2d_core_set_rate,
4131 .get_rate = _clk_gpu2d_core_get_rate,
4132 .round_rate = _clk_gpu2d_core_round_rate,
4133 .secondary = &gpu2d_core_clk[0],
4134 .flags = AHB_HIGH_SET_POINT | CPU_FREQ_TRIG_UPDATE,
4137 .parent = &gpu2d_axi_clk,
4138 .secondary = &mx6fast1_clk,
4142 static unsigned long _clk_gpu3d_shader_round_rate(struct clk *clk,
4146 u32 parent_rate = clk_get_rate(clk->parent);
4148 div = parent_rate / rate;
4150 /* Make sure rate is not greater than the maximum value for the clock.
4151 * Also prevent a div of 0.
4159 return parent_rate / div;
4162 static int _clk_gpu3d_shader_set_parent(struct clk *clk, struct clk *parent)
4165 u32 reg = __raw_readl(MXC_CCM_CBCMR)
4166 & ~MXC_CCM_CBCMR_GPU3D_SHADER_CLK_SEL_MASK;
4168 mux = _get_mux6(parent, &mmdc_ch0_axi_clk[0],
4169 &pll3_usb_otg_main_clk,
4170 &pll2_pfd_594M, &pll3_pfd_720M, NULL, NULL);
4171 reg |= (mux << MXC_CCM_CBCMR_GPU3D_SHADER_CLK_SEL_OFFSET);
4172 __raw_writel(reg, MXC_CCM_CBCMR);
4177 static unsigned long _clk_gpu3d_shader_get_rate(struct clk *clk)
4181 reg = __raw_readl(MXC_CCM_CBCMR);
4182 div = ((reg & MXC_CCM_CBCMR_GPU3D_SHADER_PODF_MASK) >>
4183 MXC_CCM_CBCMR_GPU3D_SHADER_PODF_OFFSET) + 1;
4185 return clk_get_rate(clk->parent) / div;
4188 static int _clk_gpu3d_shader_set_rate(struct clk *clk, unsigned long rate)
4191 u32 parent_rate = clk_get_rate(clk->parent);
4193 div = parent_rate / rate;
4199 reg = __raw_readl(MXC_CCM_CBCMR);
4200 reg &= ~MXC_CCM_CBCMR_GPU3D_SHADER_PODF_MASK;
4201 reg |= (div - 1) << MXC_CCM_CBCMR_GPU3D_SHADER_PODF_OFFSET;
4202 __raw_writel(reg, MXC_CCM_CBCMR);
4208 static struct clk gpu3d_shader_clk = {
4209 __INIT_CLK_DEBUG(gpu3d_shader_clk)
4210 .parent = &pll3_pfd_720M,
4211 .secondary = &mmdc_ch0_axi_clk[0],
4212 .enable = _clk_enable,
4213 .enable_reg = MXC_CCM_CCGR1,
4214 .enable_shift = MXC_CCM_CCGRx_CG13_OFFSET,
4215 .disable = _clk_disable,
4216 .set_parent = _clk_gpu3d_shader_set_parent,
4217 .set_rate = _clk_gpu3d_shader_set_rate,
4218 .get_rate = _clk_gpu3d_shader_get_rate,
4219 .round_rate = _clk_gpu3d_shader_round_rate,
4220 .flags = AHB_HIGH_SET_POINT | CPU_FREQ_TRIG_UPDATE,
4223 /* set the parent by the ipcg table */
4224 static struct clk gpmi_nfc_clk[] = {
4226 __INIT_CLK_DEBUG(gpmi_io_clk)
4227 .parent = &enfc_clk,
4228 .secondary = &gpmi_nfc_clk[1],
4229 .enable = _clk_enable,
4230 .enable_reg = MXC_CCM_CCGR4,
4231 .enable_shift = MXC_CCM_CCGRx_CG14_OFFSET,
4232 .disable = _clk_disable,
4234 { /* gpmi_apb_clk */
4235 __INIT_CLK_DEBUG(gpmi_apb_clk)
4236 .parent = &usdhc3_clk,
4237 .secondary = &gpmi_nfc_clk[2],
4238 .enable = _clk_enable,
4239 .enable_reg = MXC_CCM_CCGR4,
4240 .enable_shift = MXC_CCM_CCGRx_CG15_OFFSET,
4241 .disable = _clk_disable,
4244 __INIT_CLK_DEBUG(gpmi_bch_clk)
4245 .parent = &usdhc4_clk,
4246 .secondary = &gpmi_nfc_clk[3],
4247 .enable = _clk_enable,
4248 .enable_reg = MXC_CCM_CCGR4,
4249 .enable_shift = MXC_CCM_CCGRx_CG13_OFFSET,
4250 .disable = _clk_disable,
4253 __INIT_CLK_DEBUG(gpmi_bch_apb_clk)
4254 .parent = &usdhc3_clk,
4255 .secondary = &gpmi_nfc_clk[4],
4256 .enable = _clk_enable,
4257 .enable_reg = MXC_CCM_CCGR4,
4258 .enable_shift = MXC_CCM_CCGRx_CG12_OFFSET,
4259 .disable = _clk_disable,
4261 { /* bch relative clk */
4262 __INIT_CLK_DEBUG(pl301_mx6qperl_bch)
4263 .parent = &mx6per1_clk,
4264 .secondary = &mmdc_ch0_axi_clk[0],
4268 static struct clk pwm_clk[] = {
4270 __INIT_CLK_DEBUG(pwm_clk_0)
4271 .parent = &ipg_perclk,
4273 .enable_reg = MXC_CCM_CCGR4,
4274 .enable_shift = MXC_CCM_CCGRx_CG8_OFFSET,
4275 .enable = _clk_enable,
4276 .disable = _clk_disable,
4279 __INIT_CLK_DEBUG(pwm_clk_1)
4280 .parent = &ipg_perclk,
4282 .enable_reg = MXC_CCM_CCGR4,
4283 .enable_shift = MXC_CCM_CCGRx_CG9_OFFSET,
4284 .enable = _clk_enable,
4285 .disable = _clk_disable,
4288 __INIT_CLK_DEBUG(pwm_clk_2)
4289 .parent = &ipg_perclk,
4291 .enable_reg = MXC_CCM_CCGR4,
4292 .enable_shift = MXC_CCM_CCGRx_CG10_OFFSET,
4293 .enable = _clk_enable,
4294 .disable = _clk_disable,
4297 __INIT_CLK_DEBUG(pwm_clk_3)
4298 .parent = &ipg_perclk,
4300 .enable_reg = MXC_CCM_CCGR4,
4301 .enable_shift = MXC_CCM_CCGRx_CG11_OFFSET,
4302 .enable = _clk_enable,
4303 .disable = _clk_disable,
4307 static int _clk_pcie_enable(struct clk *clk)
4311 /* Enable SATA ref clock */
4312 reg = __raw_readl(PLL8_ENET_BASE_ADDR);
4313 reg |= ANADIG_PLL_ENET_EN_PCIE;
4314 __raw_writel(reg, PLL8_ENET_BASE_ADDR);
4321 static void _clk_pcie_disable(struct clk *clk)
4327 /* Disable SATA ref clock */
4328 reg = __raw_readl(PLL8_ENET_BASE_ADDR);
4329 reg &= ~ANADIG_PLL_ENET_EN_PCIE;
4330 __raw_writel(reg, PLL8_ENET_BASE_ADDR);
4333 static struct clk pcie_clk[] = {
4335 __INIT_CLK_DEBUG(pcie_clk)
4336 .parent = &pcie_axi_clk,
4337 .enable = _clk_pcie_enable,
4338 .disable = _clk_pcie_disable,
4339 .enable_reg = MXC_CCM_CCGR4,
4340 .enable_shift = MXC_CCM_CCGRx_CG0_OFFSET,
4341 .secondary = &pcie_clk[1],
4342 .flags = AHB_HIGH_SET_POINT | CPU_FREQ_TRIG_UPDATE,
4345 .parent = &mmdc_ch0_axi_clk[0],
4346 .secondary = &mx6fast1_clk,
4350 static int _clk_sata_enable(struct clk *clk)
4354 /* Clear Power Down and Enable PLLs */
4355 reg = __raw_readl(PLL8_ENET_BASE_ADDR);
4356 reg &= ~ANADIG_PLL_ENET_POWER_DOWN;
4357 __raw_writel(reg, PLL8_ENET_BASE_ADDR);
4359 reg = __raw_readl(PLL8_ENET_BASE_ADDR);
4360 reg |= ANADIG_PLL_ENET_EN;
4361 __raw_writel(reg, PLL8_ENET_BASE_ADDR);
4363 /* Waiting for the PLL is locked */
4364 if (!WAIT(ANADIG_PLL_ENET_LOCK & __raw_readl(PLL8_ENET_BASE_ADDR),
4366 panic("pll8 lock failed\n");
4368 /* Disable the bypass */
4369 reg = __raw_readl(PLL8_ENET_BASE_ADDR);
4370 reg &= ~ANADIG_PLL_ENET_BYPASS;
4371 __raw_writel(reg, PLL8_ENET_BASE_ADDR);
4373 /* Enable SATA ref clock */
4374 reg = __raw_readl(PLL8_ENET_BASE_ADDR);
4375 reg |= ANADIG_PLL_ENET_EN_SATA;
4376 __raw_writel(reg, PLL8_ENET_BASE_ADDR);
4383 static void _clk_sata_disable(struct clk *clk)
4389 /* Disable SATA ref clock */
4390 reg = __raw_readl(PLL8_ENET_BASE_ADDR);
4391 reg &= ~ANADIG_PLL_ENET_EN_SATA;
4392 __raw_writel(reg, PLL8_ENET_BASE_ADDR);
4395 static struct clk sata_clk[] = {
4397 __INIT_CLK_DEBUG(sata_clk)
4399 .enable = _clk_sata_enable,
4400 .enable_reg = MXC_CCM_CCGR5,
4401 .enable_shift = MXC_CCM_CCGRx_CG2_OFFSET,
4402 .disable = _clk_sata_disable,
4403 .secondary = &sata_clk[1],
4404 .flags = AHB_HIGH_SET_POINT | CPU_FREQ_TRIG_UPDATE,
4407 .parent = &mmdc_ch0_axi_clk[0],
4408 .secondary = &mx6per1_clk,
4412 static struct clk usboh3_clk[] = {
4414 __INIT_CLK_DEBUG(usboh3_clk)
4416 .enable = _clk_enable,
4417 .enable_reg = MXC_CCM_CCGR6,
4418 .enable_shift = MXC_CCM_CCGRx_CG0_OFFSET,
4419 .disable = _clk_disable,
4420 .secondary = &usboh3_clk[1],
4421 .flags = AHB_HIGH_SET_POINT | CPU_FREQ_TRIG_UPDATE,
4424 .parent = &mmdc_ch0_axi_clk[0],
4425 .secondary = &mx6per1_clk,
4429 static int _clk_enable1(struct clk *clk)
4432 reg = __raw_readl(clk->enable_reg);
4433 reg |= 1 << clk->enable_shift;
4434 __raw_writel(reg, clk->enable_reg);
4439 static void _clk_disable1(struct clk *clk)
4442 reg = __raw_readl(clk->enable_reg);
4443 reg &= ~(1 << clk->enable_shift);
4444 __raw_writel(reg, clk->enable_reg);
4447 static int _clk_clko_set_parent(struct clk *clk, struct clk *parent)
4451 if (parent == &pll3_usb_otg_main_clk)
4453 else if (parent == &pll2_528_bus_main_clk)
4455 else if (parent == &pll1_sys_main_clk)
4457 else if (parent == &pll5_video_main_clk)
4459 else if (parent == &axi_clk)
4461 else if (parent == &enfc_clk)
4463 else if (parent == &ipu1_di_clk[0])
4465 else if (parent == &ipu1_di_clk[1])
4467 else if (parent == &ipu2_di_clk[0])
4469 else if (parent == &ipu2_di_clk[1])
4471 else if (parent == &ahb_clk)
4473 else if (parent == &ipg_clk)
4475 else if (parent == &ipg_perclk)
4477 else if (parent == &ckil_clk)
4479 else if (parent == &pll4_audio_main_clk)
4484 reg = __raw_readl(MXC_CCM_CCOSR);
4485 reg &= ~MXC_CCM_CCOSR_CKOL_SEL_MASK;
4486 reg |= sel << MXC_CCM_CCOSR_CKOL_SEL_OFFSET;
4487 __raw_writel(reg, MXC_CCM_CCOSR);
4491 static unsigned long _clk_clko_get_rate(struct clk *clk)
4493 u32 reg = __raw_readl(MXC_CCM_CCOSR);
4494 u32 div = ((reg & MXC_CCM_CCOSR_CKOL_DIV_MASK) >>
4495 MXC_CCM_CCOSR_CKOL_DIV_OFFSET) + 1;
4496 return clk_get_rate(clk->parent) / div;
4499 static int _clk_clko_set_rate(struct clk *clk, unsigned long rate)
4502 u32 parent_rate = clk_get_rate(clk->parent);
4503 u32 div = parent_rate / rate;
4507 if (((parent_rate / div) != rate) || (div > 8))
4510 reg = __raw_readl(MXC_CCM_CCOSR);
4511 reg &= ~MXC_CCM_CCOSR_CKOL_DIV_MASK;
4512 reg |= (div - 1) << MXC_CCM_CCOSR_CKOL_DIV_OFFSET;
4513 __raw_writel(reg, MXC_CCM_CCOSR);
4517 static unsigned long _clk_clko_round_rate(struct clk *clk,
4520 u32 parent_rate = clk_get_rate(clk->parent);
4521 u32 div = parent_rate / rate;
4523 /* Make sure rate is not greater than the maximum value for the clock.
4524 * Also prevent a div of 0.
4530 return parent_rate / div;
4533 static struct clk clko_clk = {
4534 __INIT_CLK_DEBUG(clko_clk)
4535 .parent = &pll2_528_bus_main_clk,
4536 .enable = _clk_enable1,
4537 .enable_reg = MXC_CCM_CCOSR,
4538 .enable_shift = MXC_CCM_CCOSR_CKOL_EN_OFFSET,
4539 .disable = _clk_disable1,
4540 .set_parent = _clk_clko_set_parent,
4541 .set_rate = _clk_clko_set_rate,
4542 .get_rate = _clk_clko_get_rate,
4543 .round_rate = _clk_clko_round_rate,
4546 static struct clk dummy_clk = {
4550 #define _REGISTER_CLOCK(d, n, c) \
4558 static struct clk_lookup lookups[] = {
4559 _REGISTER_CLOCK(NULL, "osc", osc_clk),
4560 _REGISTER_CLOCK(NULL, "ckih", ckih_clk),
4561 _REGISTER_CLOCK(NULL, "ckih2", ckih2_clk),
4562 _REGISTER_CLOCK(NULL, "ckil", ckil_clk),
4563 _REGISTER_CLOCK(NULL, "pll1_main_clk", pll1_sys_main_clk),
4564 _REGISTER_CLOCK(NULL, "pll1_sw_clk", pll1_sw_clk),
4565 _REGISTER_CLOCK(NULL, "pll2", pll2_528_bus_main_clk),
4566 _REGISTER_CLOCK(NULL, "pll2_pfd_400M", pll2_pfd_400M),
4567 _REGISTER_CLOCK(NULL, "pll2_pfd_352M", pll2_pfd_352M),
4568 _REGISTER_CLOCK(NULL, "pll2_pfd_594M", pll2_pfd_594M),
4569 _REGISTER_CLOCK(NULL, "pll2_200M", pll2_200M),
4570 _REGISTER_CLOCK(NULL, "pll3_main_clk", pll3_usb_otg_main_clk),
4571 _REGISTER_CLOCK(NULL, "pll3_pfd_508M", pll3_pfd_508M),
4572 _REGISTER_CLOCK(NULL, "pll3_pfd_454M", pll3_pfd_454M),
4573 _REGISTER_CLOCK(NULL, "pll3_pfd_720M", pll3_pfd_720M),
4574 _REGISTER_CLOCK(NULL, "pll3_pfd_540M", pll3_pfd_540M),
4575 _REGISTER_CLOCK(NULL, "pll3_sw_clk", pll3_sw_clk),
4576 _REGISTER_CLOCK(NULL, "pll3_120M", pll3_120M),
4577 _REGISTER_CLOCK(NULL, "pll3_120M", pll3_80M),
4578 _REGISTER_CLOCK(NULL, "pll3_120M", pll3_60M),
4579 _REGISTER_CLOCK(NULL, "pll4", pll4_audio_main_clk),
4580 _REGISTER_CLOCK(NULL, "pll5", pll5_video_main_clk),
4581 _REGISTER_CLOCK(NULL, "pll4", pll6_MLB_main_clk),
4582 _REGISTER_CLOCK(NULL, "pll3", pll7_usb_host_main_clk),
4583 _REGISTER_CLOCK(NULL, "pll4", pll8_enet_main_clk),
4584 _REGISTER_CLOCK(NULL, "cpu_clk", cpu_clk),
4585 _REGISTER_CLOCK(NULL, "periph_clk", periph_clk),
4586 _REGISTER_CLOCK(NULL, "axi_clk", axi_clk),
4587 _REGISTER_CLOCK(NULL, "mmdc_ch0_axi", mmdc_ch0_axi_clk[0]),
4588 _REGISTER_CLOCK(NULL, "mmdc_ch1_axi", mmdc_ch1_axi_clk[0]),
4589 _REGISTER_CLOCK(NULL, "ahb", ahb_clk),
4590 _REGISTER_CLOCK(NULL, "ipg_clk", ipg_clk),
4591 _REGISTER_CLOCK(NULL, "ipg_perclk", ipg_perclk),
4592 _REGISTER_CLOCK(NULL, "spba", spba_clk),
4593 _REGISTER_CLOCK("imx-sdma", NULL, sdma_clk[0]),
4594 _REGISTER_CLOCK(NULL, "gpu2d_axi_clk", gpu2d_axi_clk),
4595 _REGISTER_CLOCK(NULL, "gpu3d_axi_clk", gpu3d_axi_clk),
4596 _REGISTER_CLOCK(NULL, "pcie_axi_clk", pcie_axi_clk),
4597 _REGISTER_CLOCK(NULL, "vdo_axi_clk", vdo_axi_clk),
4598 _REGISTER_CLOCK(NULL, "iim_clk", iim_clk),
4599 _REGISTER_CLOCK(NULL, "i2c_clk", i2c_clk[0]),
4600 _REGISTER_CLOCK("imx-i2c.1", NULL, i2c_clk[1]),
4601 _REGISTER_CLOCK("imx-i2c.2", NULL, i2c_clk[2]),
4602 _REGISTER_CLOCK(NULL, "vpu_clk", vpu_clk[0]),
4603 _REGISTER_CLOCK(NULL, "ipu1_clk", ipu1_clk),
4604 _REGISTER_CLOCK(NULL, "ipu2_clk", ipu2_clk),
4605 _REGISTER_CLOCK(NULL, "cko1_clk0", cko1_clk0),
4606 _REGISTER_CLOCK("sdhci-esdhc-imx.0", NULL, usdhc1_clk),
4607 _REGISTER_CLOCK("sdhci-esdhc-imx.1", NULL, usdhc2_clk),
4608 _REGISTER_CLOCK("sdhci-esdhc-imx.2", NULL, usdhc3_clk),
4609 _REGISTER_CLOCK("sdhci-esdhc-imx.3", NULL, usdhc4_clk),
4610 _REGISTER_CLOCK("imx-ssi.0", NULL, ssi1_clk),
4611 _REGISTER_CLOCK("imx-ssi.1", NULL, ssi2_clk),
4612 _REGISTER_CLOCK("imx-ssi.2", NULL, ssi3_clk),
4613 _REGISTER_CLOCK(NULL, "ipu1_di0_clk", ipu1_di_clk[0]),
4614 _REGISTER_CLOCK(NULL, "ipu1_di1_clk", ipu1_di_clk[1]),
4615 _REGISTER_CLOCK(NULL, "ipu2_di0_clk", ipu2_di_clk[0]),
4616 _REGISTER_CLOCK(NULL, "ipu2_di1_clk", ipu2_di_clk[1]),
4617 _REGISTER_CLOCK("FlexCAN.0", "can_clk", can1_clk[0]),
4618 _REGISTER_CLOCK("FlexCAN.1", "can_clk", can2_clk[0]),
4619 _REGISTER_CLOCK(NULL, "ldb_di0_clk", ldb_di0_clk),
4620 _REGISTER_CLOCK(NULL, "ldb_di1_clk", ldb_di1_clk),
4621 _REGISTER_CLOCK("mxc_spdif.0", NULL, spdif0_clk[0]),
4622 _REGISTER_CLOCK(NULL, "esai_clk", esai_clk),
4623 _REGISTER_CLOCK("imx6q-ecspi.0", NULL, ecspi_clk[0]),
4624 _REGISTER_CLOCK("imx6q-ecspi.1", NULL, ecspi_clk[1]),
4625 _REGISTER_CLOCK("imx6q-ecspi.2", NULL, ecspi_clk[2]),
4626 _REGISTER_CLOCK("imx6q-ecspi.3", NULL, ecspi_clk[3]),
4627 _REGISTER_CLOCK("imx6q-ecspi.4", NULL, ecspi_clk[4]),
4628 _REGISTER_CLOCK(NULL, "emi_slow_clk", emi_slow_clk),
4629 _REGISTER_CLOCK(NULL, "emi_clk", emi_clk),
4630 _REGISTER_CLOCK(NULL, "enfc_clk", enfc_clk),
4631 _REGISTER_CLOCK("imx-uart.0", NULL, uart_clk[0]),
4632 _REGISTER_CLOCK("imx-uart.1", NULL, uart_clk[0]),
4633 _REGISTER_CLOCK("imx-uart.2", NULL, uart_clk[0]),
4634 _REGISTER_CLOCK("imx-uart.3", NULL, uart_clk[0]),
4635 _REGISTER_CLOCK(NULL, "hsi_tx", hsi_tx_clk[0]),
4636 _REGISTER_CLOCK(NULL, "caam_clk", caam_clk[0]),
4637 _REGISTER_CLOCK(NULL, "asrc_clk", asrc_clk[0]),
4638 _REGISTER_CLOCK(NULL, "asrc_serial_clk", asrc_clk[1]),
4639 _REGISTER_CLOCK("mxs-dma-apbh", NULL, apbh_dma_clk),
4640 _REGISTER_CLOCK(NULL, "openvg_axi_clk", openvg_axi_clk),
4641 _REGISTER_CLOCK(NULL, "gpu3d_clk", gpu3d_core_clk[0]),
4642 _REGISTER_CLOCK(NULL, "gpu2d_clk", gpu2d_core_clk[0]),
4643 _REGISTER_CLOCK(NULL, "gpu3d_shader_clk", gpu3d_shader_clk),
4644 _REGISTER_CLOCK(NULL, "gpt", gpt_clk[0]),
4645 _REGISTER_CLOCK("imx6q-gpmi-nfc.0", NULL, gpmi_nfc_clk[0]),
4646 _REGISTER_CLOCK(NULL, "gpmi-apb", gpmi_nfc_clk[1]),
4647 _REGISTER_CLOCK(NULL, "bch", gpmi_nfc_clk[2]),
4648 _REGISTER_CLOCK(NULL, "bch-apb", gpmi_nfc_clk[3]),
4649 _REGISTER_CLOCK(NULL, "pl301_mx6qperl-bch", gpmi_nfc_clk[4]),
4650 _REGISTER_CLOCK("mxc_pwm.0", NULL, pwm_clk[0]),
4651 _REGISTER_CLOCK("mxc_pwm.1", NULL, pwm_clk[1]),
4652 _REGISTER_CLOCK("mxc_pwm.2", NULL, pwm_clk[2]),
4653 _REGISTER_CLOCK("mxc_pwm.3", NULL, pwm_clk[3]),
4654 _REGISTER_CLOCK(NULL, "pcie_clk", pcie_clk[0]),
4655 _REGISTER_CLOCK("fec.0", NULL, enet_clk[0]),
4656 _REGISTER_CLOCK(NULL, "imx_sata_clk", sata_clk[0]),
4657 _REGISTER_CLOCK(NULL, "usboh3_clk", usboh3_clk[0]),
4658 _REGISTER_CLOCK(NULL, "usb_phy1_clk", usb_phy1_clk),
4659 _REGISTER_CLOCK("imx2-wdt.0", NULL, dummy_clk),
4660 _REGISTER_CLOCK("imx2-wdt.1", NULL, dummy_clk),
4661 _REGISTER_CLOCK(NULL, "hdmi_isfr_clk", hdmi_clk[0]),
4662 _REGISTER_CLOCK(NULL, "hdmi_iahb_clk", hdmi_clk[1]),
4663 _REGISTER_CLOCK(NULL, NULL, vdoa_clk),
4664 _REGISTER_CLOCK(NULL, NULL, aips_tz2_clk),
4665 _REGISTER_CLOCK(NULL, NULL, aips_tz1_clk),
4666 _REGISTER_CLOCK(NULL, "clko_clk", clko_clk),
4670 static void clk_tree_init(void)
4675 reg = __raw_readl(MMDC_MDMISC_OFFSET);
4676 if ((reg & MMDC_MDMISC_DDR_TYPE_MASK) ==
4677 (0x1 << MMDC_MDMISC_DDR_TYPE_OFFSET)) {
4678 clk_set_parent(&periph_clk, &pll2_pfd_400M);
4679 printk(KERN_INFO "Set periph_clk's parent to pll2_pfd_400M!\n");
4684 int __init mx6_clocks_init(unsigned long ckil, unsigned long osc,
4685 unsigned long ckih1, unsigned long ckih2)
4690 external_low_reference = ckil;
4691 external_high_reference = ckih1;
4692 ckih2_reference = ckih2;
4693 oscillator_reference = osc;
4695 apll_base = ioremap(ANATOP_BASE_ADDR, SZ_4K);
4697 for (i = 0; i < ARRAY_SIZE(lookups); i++) {
4698 clkdev_add(&lookups[i]);
4699 clk_debug_register(lookups[i].clk);
4702 /* Disable un-necessary PFDs & PLLs */
4704 /* keep correct count. */
4705 clk_enable(&cpu_clk);
4706 clk_enable(&periph_clk);
4710 if (pll2_pfd_400M.usecount == 0)
4711 pll2_pfd_400M.disable(&pll2_pfd_400M);
4712 pll2_pfd_352M.disable(&pll2_pfd_352M);
4713 pll2_pfd_594M.disable(&pll2_pfd_594M);
4715 pll3_pfd_454M.disable(&pll3_pfd_454M);
4716 pll3_pfd_508M.disable(&pll3_pfd_508M);
4717 pll3_pfd_540M.disable(&pll3_pfd_540M);
4718 pll3_pfd_720M.disable(&pll3_pfd_720M);
4720 pll3_usb_otg_main_clk.disable(&pll3_usb_otg_main_clk);
4721 pll4_audio_main_clk.disable(&pll4_audio_main_clk);
4722 pll5_video_main_clk.disable(&pll5_video_main_clk);
4723 pll6_MLB_main_clk.disable(&pll6_MLB_main_clk);
4724 pll7_usb_host_main_clk.disable(&pll7_usb_host_main_clk);
4725 pll8_enet_main_clk.disable(&pll8_enet_main_clk);
4727 sata_clk[0].disable(&sata_clk[0]);
4728 pcie_clk[0].disable(&pcie_clk[0]);
4730 /* Initialize Audio and Video PLLs to valid frequency (650MHz). */
4731 clk_set_rate(&pll4_audio_main_clk, 650000000);
4732 clk_set_rate(&pll5_video_main_clk, 650000000);
4734 clk_set_parent(&ipu1_di_clk[0], &pll5_video_main_clk);
4735 clk_set_parent(&ipu1_di_clk[1], &pll5_video_main_clk);
4736 clk_set_parent(&ipu2_di_clk[0], &pll5_video_main_clk);
4737 clk_set_parent(&ipu2_di_clk[1], &pll5_video_main_clk);
4739 clk_set_parent(&cko1_clk0, &ipg_clk);
4740 clk_set_rate(&cko1_clk0, 22000000);
4741 clk_enable(&cko1_clk0);
4743 clk_set_parent(&gpu3d_shader_clk, &pll2_pfd_594M);
4744 clk_set_rate(&gpu3d_shader_clk, 594000000);
4745 clk_set_parent(&gpu3d_core_clk[0], &mmdc_ch0_axi_clk[0]);
4746 clk_set_rate(&gpu3d_core_clk[0], 528000000);
4749 * FIXME: asrc needs to use asrc_serial(spdif1) clock to do sample rate convertion,
4750 * however we found it only works when set to 1.5M clock and the
4751 * parent is pll3_sw_clk.
4753 clk_set_parent(&asrc_clk[1], &pll3_sw_clk);
4754 clk_set_rate(&asrc_clk[1], 1500000);
4756 /* set the NAND to 11MHz. Too fast will cause dma timeout. */
4757 clk_set_rate(&enfc_clk, enfc_clk.round_rate(&enfc_clk, 11000000));
4760 cpu_op_tbl = get_cpu_op(&cpu_op_nr);
4762 /* Gate off all possible clocks */
4763 if (mxc_jtag_enabled) {
4764 __raw_writel(3 << MXC_CCM_CCGRx_CG11_OFFSET |
4765 3 << MXC_CCM_CCGRx_CG2_OFFSET |
4766 3 << MXC_CCM_CCGRx_CG1_OFFSET |
4767 3 << MXC_CCM_CCGRx_CG0_OFFSET, MXC_CCM_CCGR0);
4769 __raw_writel(1 << MXC_CCM_CCGRx_CG11_OFFSET |
4770 3 << MXC_CCM_CCGRx_CG2_OFFSET |
4771 3 << MXC_CCM_CCGRx_CG1_OFFSET |
4772 3 << MXC_CCM_CCGRx_CG0_OFFSET, MXC_CCM_CCGR0);
4774 __raw_writel(3 << MXC_CCM_CCGRx_CG10_OFFSET, MXC_CCM_CCGR1);
4775 __raw_writel(1 << MXC_CCM_CCGRx_CG12_OFFSET |
4776 1 << MXC_CCM_CCGRx_CG11_OFFSET |
4777 3 << MXC_CCM_CCGRx_CG10_OFFSET |
4778 3 << MXC_CCM_CCGRx_CG9_OFFSET |
4779 3 << MXC_CCM_CCGRx_CG8_OFFSET, MXC_CCM_CCGR2);
4780 __raw_writel(1 << MXC_CCM_CCGRx_CG14_OFFSET |
4781 3 << MXC_CCM_CCGRx_CG13_OFFSET |
4782 3 << MXC_CCM_CCGRx_CG12_OFFSET |
4783 3 << MXC_CCM_CCGRx_CG11_OFFSET |
4784 3 << MXC_CCM_CCGRx_CG10_OFFSET, MXC_CCM_CCGR3);
4785 __raw_writel(3 << MXC_CCM_CCGRx_CG7_OFFSET |
4786 1 << MXC_CCM_CCGRx_CG6_OFFSET |
4787 1 << MXC_CCM_CCGRx_CG4_OFFSET, MXC_CCM_CCGR4);
4788 __raw_writel(1 << MXC_CCM_CCGRx_CG0_OFFSET, MXC_CCM_CCGR5);
4790 __raw_writel(0, MXC_CCM_CCGR6);
4792 /* Lower the ipg_perclk frequency to 8.25MHz. */
4793 clk_set_rate(&ipg_perclk, 8250000);
4796 clk_set_parent(&spdif0_clk[0], &pll3_pfd_454M);
4798 base = ioremap(GPT_BASE_ADDR, SZ_4K);
4799 mxc_timer_init(&gpt_clk[0], base, MXC_INT_GPT);