3 * Copyright (C) 2011 Freescale Semiconductor, Inc. All Rights Reserved.
7 * The code contained herein is licensed under the GNU General Public
8 * License. You may obtain a copy of the GNU General Public License
9 * Version 2 or later at the following locations:
11 * http://www.opensource.org/licenses/gpl-license.html
12 * http://www.gnu.org/copyleft/gpl.html
15 #include <linux/kernel.h>
16 #include <linux/init.h>
17 #include <linux/types.h>
18 #include <linux/time.h>
19 #include <linux/hrtimer.h>
21 #include <linux/errno.h>
22 #include <linux/delay.h>
23 #include <linux/clk.h>
25 #include <linux/clkdev.h>
26 #include <asm/div64.h>
27 #include <mach/hardware.h>
28 #include <mach/common.h>
29 #include <mach/clock.h>
30 #include <mach/mxc_dvfs.h>
32 #include "cpu_op-mx6.h"
34 #ifdef CONFIG_CLK_DEBUG
35 #define __INIT_CLK_DEBUG(n) .name = #n,
37 #define __INIT_CLK_DEBUG(n)
40 extern int mxc_jtag_enabled;
41 extern struct cpu_op *(*get_cpu_op)(int *op);
43 void __iomem *apll_base;
44 static struct clk pll1_sys_main_clk;
45 static struct clk pll2_528_bus_main_clk;
46 static struct clk pll3_usb_otg_main_clk;
47 static struct clk pll4_audio_main_clk;
48 static struct clk pll5_video_main_clk;
49 static struct clk pll6_MLB_main_clk;
50 static struct clk pll7_usb_host_main_clk;
51 static struct clk pll8_enet_main_clk;
52 static struct clk apbh_dma_clk;
53 static struct clk openvg_axi_clk;
54 static struct cpu_op *cpu_op_tbl;
57 #define SPIN_DELAY 1000000 /* in nanoseconds */
59 #define AUDIO_VIDEO_MIN_CLK_FREQ 650000000
60 #define AUDIO_VIDEO_MAX_CLK_FREQ 1300000000
62 #define WAIT(exp, timeout) \
64 struct timespec nstimeofday; \
65 struct timespec curtime; \
67 getnstimeofday(&nstimeofday); \
69 getnstimeofday(&curtime); \
70 if ((curtime.tv_nsec - nstimeofday.tv_nsec) > (timeout)) { \
78 /* External clock values passed-in by the board code */
79 static unsigned long external_high_reference, external_low_reference;
80 static unsigned long oscillator_reference, ckih2_reference;
82 static void __calc_pre_post_dividers(u32 max_podf, u32 div, u32 *pre, u32 *post)
84 u32 min_pre, temp_pre, old_err, err;
86 /* Some of the podfs are 3 bits while others are 6 bits.
87 * Handle both cases here.
89 if (div >= 512 && (max_podf == 64)) {
90 /* For pre = 3bits and podf = 6 bits, max divider is 512. */
93 } else if (div >= 64 && (max_podf == 8)) {
94 /* For pre = 3bits and podf = 3 bits, max divider is 64. */
97 } else if (div >= 8) {
98 /* Find the minimum pre-divider for a max podf */
100 min_pre = (div - 1) / (1 << 6) + 1;
102 min_pre = (div - 1) / (1 << 3) + 1;
104 /* Now loop through to find the max pre-divider. */
105 for (temp_pre = 8; temp_pre >= min_pre; temp_pre--) {
106 err = div % temp_pre;
111 err = temp_pre - err;
117 *post = (div + *pre - 1) / *pre;
118 } else if (div < 8) {
124 static int _clk_enable(struct clk *clk)
127 reg = __raw_readl(clk->enable_reg);
128 reg |= MXC_CCM_CCGRx_CG_MASK << clk->enable_shift;
129 __raw_writel(reg, clk->enable_reg);
134 static void _clk_disable(struct clk *clk)
137 reg = __raw_readl(clk->enable_reg);
138 reg &= ~(MXC_CCM_CCGRx_CG_MASK << clk->enable_shift);
139 __raw_writel(reg, clk->enable_reg);
142 static void _clk_disable_inwait(struct clk *clk)
145 reg = __raw_readl(clk->enable_reg);
146 reg &= ~(MXC_CCM_CCGRx_CG_MASK << clk->enable_shift);
147 reg |= 1 << clk->enable_shift;
148 __raw_writel(reg, clk->enable_reg);
152 * For the 4-to-1 muxed input clock
154 static inline u32 _get_mux(struct clk *parent, struct clk *m0,
155 struct clk *m1, struct clk *m2, struct clk *m3)
159 else if (parent == m1)
161 else if (parent == m2)
163 else if (parent == m3)
171 static inline void __iomem *_get_pll_base(struct clk *pll)
173 if (pll == &pll1_sys_main_clk)
174 return PLL1_SYS_BASE_ADDR;
175 else if (pll == &pll2_528_bus_main_clk)
176 return PLL2_528_BASE_ADDR;
177 else if (pll == &pll3_usb_otg_main_clk)
178 return PLL3_480_USB1_BASE_ADDR;
179 else if (pll == &pll4_audio_main_clk)
180 return PLL4_AUDIO_BASE_ADDR;
181 else if (pll == &pll5_video_main_clk)
182 return PLL5_VIDEO_BASE_ADDR;
183 else if (pll == &pll6_MLB_main_clk)
184 return PLL6_MLB_BASE_ADDR;
185 else if (pll == &pll7_usb_host_main_clk)
186 return PLL7_480_USB2_BASE_ADDR;
187 else if (pll == &pll8_enet_main_clk)
188 return PLL8_ENET_BASE_ADDR;
196 * For the 6-to-1 muxed input clock
198 static inline u32 _get_mux6(struct clk *parent, struct clk *m0, struct clk *m1,
199 struct clk *m2, struct clk *m3, struct clk *m4,
204 else if (parent == m1)
206 else if (parent == m2)
208 else if (parent == m3)
210 else if (parent == m4)
212 else if (parent == m5)
219 static unsigned long get_high_reference_clock_rate(struct clk *clk)
221 return external_high_reference;
224 static unsigned long get_low_reference_clock_rate(struct clk *clk)
226 return external_low_reference;
229 static unsigned long get_oscillator_reference_clock_rate(struct clk *clk)
231 return oscillator_reference;
234 static unsigned long get_ckih2_reference_clock_rate(struct clk *clk)
236 return ckih2_reference;
239 /* External high frequency clock */
240 static struct clk ckih_clk = {
241 __INIT_CLK_DEBUG(ckih_clk)
242 .get_rate = get_high_reference_clock_rate,
245 static struct clk ckih2_clk = {
246 __INIT_CLK_DEBUG(ckih2_clk)
247 .get_rate = get_ckih2_reference_clock_rate,
250 static struct clk osc_clk = {
251 __INIT_CLK_DEBUG(osc_clk)
252 .get_rate = get_oscillator_reference_clock_rate,
255 /* External low frequency (32kHz) clock */
256 static struct clk ckil_clk = {
257 __INIT_CLK_DEBUG(ckil_clk)
258 .get_rate = get_low_reference_clock_rate,
261 static unsigned long pfd_round_rate(struct clk *clk, unsigned long rate)
266 tmp = (u64)clk_get_rate(clk->parent) * 18;
270 frac = frac < 12 ? 12 : frac;
271 frac = frac > 35 ? 35 : frac;
272 tmp = (u64)clk_get_rate(clk->parent) * 18;
277 static unsigned long pfd_get_rate(struct clk *clk)
281 tmp = (u64)clk_get_rate(clk->parent) * 18;
283 if (apbh_dma_clk.usecount == 0)
284 apbh_dma_clk.enable(&apbh_dma_clk);
286 frac = (__raw_readl(clk->enable_reg) >> clk->enable_shift) &
287 ANADIG_PFD_FRAC_MASK;
294 static int pfd_set_rate(struct clk *clk, unsigned long rate)
298 tmp = (u64)clk_get_rate(clk->parent) * 18;
300 if (apbh_dma_clk.usecount == 0)
301 apbh_dma_clk.enable(&apbh_dma_clk);
303 /* Round up the divider so that we don't set a rate
304 * higher than what is requested. */
308 frac = frac < 12 ? 12 : frac;
309 frac = frac > 35 ? 35 : frac;
310 /* clear clk frac bits */
311 __raw_writel(ANADIG_PFD_FRAC_MASK << clk->enable_shift,
312 (int)clk->enable_reg + 8);
313 /* set clk frac bits */
314 __raw_writel(frac << clk->enable_shift,
315 (int)clk->enable_reg + 4);
317 tmp = (u64)clk_get_rate(clk->parent) * 18;
320 if (apbh_dma_clk.usecount == 0)
321 apbh_dma_clk.disable(&apbh_dma_clk);
325 static int _clk_pfd_enable(struct clk *clk)
327 if (apbh_dma_clk.usecount == 0)
328 apbh_dma_clk.enable(&apbh_dma_clk);
330 /* clear clk gate bit */
331 __raw_writel((1 << (clk->enable_shift + 7)),
332 (int)clk->enable_reg + 8);
334 if (apbh_dma_clk.usecount == 0)
335 apbh_dma_clk.disable(&apbh_dma_clk);
340 static void _clk_pfd_disable(struct clk *clk)
342 if (apbh_dma_clk.usecount == 0)
343 apbh_dma_clk.enable(&apbh_dma_clk);
345 /* set clk gate bit */
346 __raw_writel((1 << (clk->enable_shift + 7)),
347 (int)clk->enable_reg + 4);
349 if (apbh_dma_clk.usecount == 0)
350 apbh_dma_clk.disable(&apbh_dma_clk);
353 static int _clk_pll_enable(struct clk *clk)
356 void __iomem *pllbase;
358 pllbase = _get_pll_base(clk);
360 reg = __raw_readl(pllbase);
361 reg &= ~ANADIG_PLL_BYPASS;
362 reg &= ~ANADIG_PLL_POWER_DOWN;
364 /* The 480MHz PLLs have the opposite definition for power bit. */
365 if (clk == &pll3_usb_otg_main_clk || clk == &pll7_usb_host_main_clk)
366 reg |= ANADIG_PLL_POWER_DOWN;
368 __raw_writel(reg, pllbase);
370 /* Wait for PLL to lock */
371 if (!WAIT(__raw_readl(pllbase) & ANADIG_PLL_LOCK,
373 panic("pll enable failed\n");
375 /* Enable the PLL output now*/
376 reg = __raw_readl(pllbase);
377 reg |= ANADIG_PLL_ENABLE;
378 __raw_writel(reg, pllbase);
383 static void _clk_pll_disable(struct clk *clk)
386 void __iomem *pllbase;
388 pllbase = _get_pll_base(clk);
390 reg = __raw_readl(pllbase);
391 reg &= ~ANADIG_PLL_ENABLE;
392 reg |= ANADIG_PLL_BYPASS;
393 reg |= ANADIG_PLL_POWER_DOWN;
395 /* The 480MHz PLLs, pll3 & pll7, have the opposite
396 * definition for power bit.
398 if (clk == &pll3_usb_otg_main_clk || clk == &pll7_usb_host_main_clk)
399 reg &= ~ANADIG_PLL_POWER_DOWN;
400 __raw_writel(reg, pllbase);
403 static unsigned long _clk_pll1_main_get_rate(struct clk *clk)
408 div = __raw_readl(PLL1_SYS_BASE_ADDR) & ANADIG_PLL_SYS_DIV_SELECT_MASK;
409 val = (clk_get_rate(clk->parent) * div) / 2;
413 static int _clk_pll1_main_set_rate(struct clk *clk, unsigned long rate)
415 unsigned int reg, div;
417 if (rate < AUDIO_VIDEO_MIN_CLK_FREQ || rate > AUDIO_VIDEO_MAX_CLK_FREQ)
420 div = (rate * 2) / clk_get_rate(clk->parent) ;
422 reg = __raw_readl(PLL1_SYS_BASE_ADDR) & ~ANADIG_PLL_SYS_DIV_SELECT_MASK;
424 __raw_writel(reg, PLL1_SYS_BASE_ADDR);
429 static struct clk pll1_sys_main_clk = {
430 __INIT_CLK_DEBUG(pll1_sys_main_clk)
432 .get_rate = _clk_pll1_main_get_rate,
433 .set_rate = _clk_pll1_main_set_rate,
434 .enable = _clk_pll_enable,
435 .disable = _clk_pll_disable,
438 static int _clk_pll1_sw_set_parent(struct clk *clk, struct clk *parent)
442 reg = __raw_readl(MXC_CCM_CCSR);
444 if (parent == &pll1_sys_main_clk) {
445 reg &= ~MXC_CCM_CCSR_PLL1_SW_CLK_SEL;
446 __raw_writel(reg, MXC_CCM_CCSR);
447 /* Set the step_clk parent to be lp_apm, to save power. */
448 reg = __raw_readl(MXC_CCM_CCSR);
449 reg = (reg & ~MXC_CCM_CCSR_STEP_SEL);
451 /* Set STEP_CLK to be the parent*/
452 if (parent == &osc_clk) {
453 /* Set STEP_CLK to be sourced from LPAPM. */
454 reg = __raw_readl(MXC_CCM_CCSR);
455 reg = (reg & ~MXC_CCM_CCSR_STEP_SEL);
456 __raw_writel(reg, MXC_CCM_CCSR);
458 /* Set STEP_CLK to be sourced from PLL2-PDF (400MHz). */
459 reg = __raw_readl(MXC_CCM_CCSR);
460 reg |= MXC_CCM_CCSR_STEP_SEL;
461 __raw_writel(reg, MXC_CCM_CCSR);
464 reg = __raw_readl(MXC_CCM_CCSR);
465 reg |= MXC_CCM_CCSR_PLL1_SW_CLK_SEL;
466 reg = __raw_readl(MXC_CCM_CCSR);
468 __raw_writel(reg, MXC_CCM_CCSR);
473 static unsigned long _clk_pll1_sw_get_rate(struct clk *clk)
475 return clk_get_rate(clk->parent);
478 static struct clk pll1_sw_clk = {
479 __INIT_CLK_DEBUG(pll1_sw_clk)
480 .parent = &pll1_sys_main_clk,
481 .set_parent = _clk_pll1_sw_set_parent,
482 .get_rate = _clk_pll1_sw_get_rate,
485 static unsigned long _clk_pll2_main_get_rate(struct clk *clk)
490 div = __raw_readl(PLL2_528_BASE_ADDR) & ANADIG_PLL_528_DIV_SELECT;
493 val = clk_get_rate(clk->parent) * 22;
496 val = clk_get_rate(clk->parent) * 20;
501 static int _clk_pll2_main_set_rate(struct clk *clk, unsigned long rate)
503 unsigned int reg, div;
505 if (rate == 528000000)
507 else if (rate == 480000000)
512 reg = __raw_readl(PLL2_528_BASE_ADDR);
513 reg &= ~ANADIG_PLL_528_DIV_SELECT;
515 __raw_writel(reg, PLL2_528_BASE_ADDR);
520 static struct clk pll2_528_bus_main_clk = {
521 __INIT_CLK_DEBUG(pll2_528_bus_main_clk)
523 .get_rate = _clk_pll2_main_get_rate,
524 .set_rate = _clk_pll2_main_set_rate,
525 .enable = _clk_pll_enable,
526 .disable = _clk_pll_disable,
529 static struct clk pll2_pfd_400M = {
530 __INIT_CLK_DEBUG(pll2_pfd_400M)
531 .parent = &pll2_528_bus_main_clk,
532 .enable_reg = (void *)PFD_528_BASE_ADDR,
533 .enable_shift = ANADIG_PFD2_FRAC_OFFSET,
534 .enable = _clk_pfd_enable,
535 .disable = _clk_pfd_disable,
536 .get_rate = pfd_get_rate,
537 .set_rate = pfd_set_rate,
538 .get_rate = pfd_get_rate,
539 .round_rate = pfd_round_rate,
542 static struct clk pll2_pfd_352M = {
543 __INIT_CLK_DEBUG(pll2_pfd_352M)
544 .parent = &pll2_528_bus_main_clk,
545 .enable_reg = (void *)PFD_528_BASE_ADDR,
546 .enable_shift = ANADIG_PFD0_FRAC_OFFSET,
547 .enable = _clk_pfd_enable,
548 .disable = _clk_pfd_disable,
549 .set_rate = pfd_set_rate,
550 .get_rate = pfd_get_rate,
551 .round_rate = pfd_round_rate,
554 static struct clk pll2_pfd_594M = {
555 __INIT_CLK_DEBUG(pll2_pfd_594M)
556 .parent = &pll2_528_bus_main_clk,
557 .enable_reg = (void *)PFD_528_BASE_ADDR,
558 .enable_shift = ANADIG_PFD1_FRAC_OFFSET,
559 .enable = _clk_pfd_enable,
560 .disable = _clk_pfd_disable,
561 .set_rate = pfd_set_rate,
562 .get_rate = pfd_get_rate,
563 .round_rate = pfd_round_rate,
566 static unsigned long _clk_pll2_200M_get_rate(struct clk *clk)
568 return clk_get_rate(clk->parent) / 2;
571 static struct clk pll2_200M = {
572 __INIT_CLK_DEBUG(pll2_200M)
573 .parent = &pll2_pfd_400M,
574 .get_rate = _clk_pll2_200M_get_rate,
577 static unsigned long _clk_pll3_usb_otg_get_rate(struct clk *clk)
582 div = __raw_readl(PLL3_480_USB1_BASE_ADDR)
583 & ANADIG_PLL_480_DIV_SELECT_MASK;
586 val = clk_get_rate(clk->parent) * 22;
588 val = clk_get_rate(clk->parent) * 20;
592 static int _clk_pll3_usb_otg_set_rate(struct clk *clk, unsigned long rate)
594 unsigned int reg, div;
596 if (rate == 528000000)
598 else if (rate == 480000000)
603 reg = __raw_readl(PLL3_480_USB1_BASE_ADDR);
604 reg &= ~ANADIG_PLL_480_DIV_SELECT_MASK;
606 __raw_writel(reg, PLL3_480_USB1_BASE_ADDR);
612 /* same as pll3_main_clk. These two clocks should always be the same */
613 static struct clk pll3_usb_otg_main_clk = {
614 __INIT_CLK_DEBUG(pll3_usb_otg_main_clk)
616 .enable = _clk_pll_enable,
617 .disable = _clk_pll_disable,
618 .set_rate = _clk_pll3_usb_otg_set_rate,
619 .get_rate = _clk_pll3_usb_otg_get_rate,
622 static struct clk usb_phy1_clk = {
623 __INIT_CLK_DEBUG(usb_phy1_clk)
624 .parent = &pll3_usb_otg_main_clk,
625 .set_rate = _clk_pll3_usb_otg_set_rate,
626 .get_rate = _clk_pll3_usb_otg_get_rate,
629 static struct clk pll3_pfd_508M = {
630 __INIT_CLK_DEBUG(pll3_pfd_508M)
631 .parent = &pll3_usb_otg_main_clk,
632 .enable_reg = (void *)PFD_480_BASE_ADDR,
633 .enable_shift = ANADIG_PFD2_FRAC_OFFSET,
634 .enable = _clk_pfd_enable,
635 .disable = _clk_pfd_disable,
636 .set_rate = pfd_set_rate,
637 .get_rate = pfd_get_rate,
638 .round_rate = pfd_round_rate,
641 static struct clk pll3_pfd_454M = {
642 __INIT_CLK_DEBUG(pll3_pfd_454M)
643 .parent = &pll3_usb_otg_main_clk,
644 .enable_reg = (void *)PFD_480_BASE_ADDR,
645 .enable_shift = ANADIG_PFD3_FRAC_OFFSET,
646 .enable = _clk_pfd_enable,
647 .disable = _clk_pfd_disable,
648 .set_rate = pfd_set_rate,
649 .get_rate = pfd_get_rate,
650 .round_rate = pfd_round_rate,
653 static struct clk pll3_pfd_720M = {
654 __INIT_CLK_DEBUG(pll3_pfd_720M)
655 .parent = &pll3_usb_otg_main_clk,
656 .enable_reg = (void *)PFD_480_BASE_ADDR,
657 .enable_shift = ANADIG_PFD0_FRAC_OFFSET,
658 .enable = _clk_pfd_enable,
659 .disable = _clk_pfd_disable,
660 .set_rate = pfd_set_rate,
661 .get_rate = pfd_get_rate,
662 .round_rate = pfd_round_rate,
665 static struct clk pll3_pfd_540M = {
666 __INIT_CLK_DEBUG(pll3_pfd_540M)
667 .parent = &pll3_usb_otg_main_clk,
668 .enable_reg = (void *)PFD_480_BASE_ADDR,
669 .enable_shift = ANADIG_PFD1_FRAC_OFFSET,
670 .enable = _clk_pfd_enable,
671 .disable = _clk_pfd_disable,
672 .set_rate = pfd_set_rate,
673 .get_rate = pfd_get_rate,
674 .round_rate = pfd_round_rate,
675 .get_rate = pfd_get_rate,
678 static unsigned long _clk_pll3_sw_get_rate(struct clk *clk)
680 return clk_get_rate(clk->parent);
683 /* same as pll3_main_clk. These two clocks should always be the same */
684 static struct clk pll3_sw_clk = {
685 __INIT_CLK_DEBUG(pll3_sw_clk)
686 .parent = &pll3_usb_otg_main_clk,
687 .get_rate = _clk_pll3_sw_get_rate,
690 static unsigned long _clk_pll3_120M_get_rate(struct clk *clk)
692 return clk_get_rate(clk->parent) / 4;
695 static struct clk pll3_120M = {
696 __INIT_CLK_DEBUG(pll3_120M)
697 .parent = &pll3_sw_clk,
698 .get_rate = _clk_pll3_120M_get_rate,
701 static unsigned long _clk_pll3_80M_get_rate(struct clk *clk)
703 return clk_get_rate(clk->parent) / 6;
706 static struct clk pll3_80M = {
707 __INIT_CLK_DEBUG(pll3_80M)
708 .parent = &pll3_sw_clk,
709 .get_rate = _clk_pll3_80M_get_rate,
712 static unsigned long _clk_pll3_60M_get_rate(struct clk *clk)
714 return clk_get_rate(clk->parent) / 8;
717 static struct clk pll3_60M = {
718 __INIT_CLK_DEBUG(pll3_60M)
719 .parent = &pll3_sw_clk,
720 .get_rate = _clk_pll3_60M_get_rate,
723 static unsigned long _clk_audio_video_get_rate(struct clk *clk)
725 unsigned int div, mfn, mfd;
727 unsigned int parent_rate = clk_get_rate(clk->parent);
728 void __iomem *pllbase;
730 if (clk == &pll4_audio_main_clk)
731 pllbase = PLL4_AUDIO_BASE_ADDR;
733 pllbase = PLL5_VIDEO_BASE_ADDR;
735 div = __raw_readl(pllbase) & ANADIG_PLL_SYS_DIV_SELECT_MASK;
736 mfn = __raw_readl(pllbase + PLL_NUM_DIV_OFFSET);
737 mfd = __raw_readl(pllbase + PLL_DENOM_DIV_OFFSET);
739 rate = (parent_rate * div) + ((parent_rate / mfd) * mfn);
744 static int _clk_audio_video_set_rate(struct clk *clk, unsigned long rate)
746 unsigned int reg, div;
747 unsigned int mfn, mfd = 1000000;
749 unsigned int parent_rate = clk_get_rate(clk->parent);
750 void __iomem *pllbase;
752 if ((rate < AUDIO_VIDEO_MIN_CLK_FREQ) ||
753 (rate > AUDIO_VIDEO_MAX_CLK_FREQ))
756 if (clk == &pll4_audio_main_clk)
757 pllbase = PLL4_AUDIO_BASE_ADDR;
759 pllbase = PLL5_VIDEO_BASE_ADDR;
761 div = rate / parent_rate ;
762 temp64 = (u64) (rate - (div * parent_rate));
764 do_div(temp64, parent_rate);
767 reg = __raw_readl(pllbase) & ~ANADIG_PLL_SYS_DIV_SELECT_MASK;
769 __raw_writel(reg, pllbase);
770 __raw_writel(mfn, pllbase + PLL_NUM_DIV_OFFSET);
771 __raw_writel(mfd, pllbase + PLL_DENOM_DIV_OFFSET);
776 static unsigned long _clk_audio_video_round_rate(struct clk *clk,
779 if (rate < AUDIO_VIDEO_MIN_CLK_FREQ)
780 return AUDIO_VIDEO_MIN_CLK_FREQ;
782 if (rate > AUDIO_VIDEO_MAX_CLK_FREQ)
783 return AUDIO_VIDEO_MAX_CLK_FREQ;
789 static struct clk pll4_audio_main_clk = {
790 __INIT_CLK_DEBUG(pll4_audio_main_clk)
792 .enable = _clk_pll_enable,
793 .disable = _clk_pll_disable,
794 .set_rate = _clk_audio_video_set_rate,
795 .get_rate = _clk_audio_video_get_rate,
796 .round_rate = _clk_audio_video_round_rate,
800 static struct clk pll5_video_main_clk = {
801 __INIT_CLK_DEBUG(pll5_video_main_clk)
803 .enable = _clk_pll_enable,
804 .disable = _clk_pll_disable,
805 .set_rate = _clk_audio_video_set_rate,
806 .get_rate = _clk_audio_video_get_rate,
807 .round_rate = _clk_audio_video_round_rate,
810 static struct clk pll6_MLB_main_clk = {
811 __INIT_CLK_DEBUG(pll6_MLB_main_clk)
813 .enable = _clk_pll_enable,
814 .disable = _clk_pll_disable,
817 static unsigned long _clk_pll7_usb_otg_get_rate(struct clk *clk)
822 div = __raw_readl(PLL7_480_USB2_BASE_ADDR)
823 & ANADIG_PLL_480_DIV_SELECT_MASK;
826 val = clk_get_rate(clk->parent) * 22;
828 val = clk_get_rate(clk->parent) * 20;
832 static int _clk_pll7_usb_otg_set_rate(struct clk *clk, unsigned long rate)
834 unsigned int reg, div;
836 if (rate == 528000000)
838 else if (rate == 480000000)
843 reg = __raw_readl(PLL7_480_USB2_BASE_ADDR);
844 reg &= ~ANADIG_PLL_480_DIV_SELECT_MASK;
846 __raw_writel(reg, PLL7_480_USB2_BASE_ADDR);
851 static struct clk pll7_usb_host_main_clk = {
852 __INIT_CLK_DEBUG(pll7_usb_host_main_clk)
854 .enable = _clk_pll_enable,
855 .disable = _clk_pll_disable,
856 .set_rate = _clk_pll7_usb_otg_set_rate,
857 .get_rate = _clk_pll7_usb_otg_get_rate,
861 static struct clk pll8_enet_main_clk = {
862 __INIT_CLK_DEBUG(pll8_enet_main_clk)
864 .enable = _clk_pll_enable,
865 .disable = _clk_pll_disable,
868 static unsigned long _clk_arm_get_rate(struct clk *clk)
872 cacrr = __raw_readl(MXC_CCM_CACRR);
873 div = (cacrr & MXC_CCM_CACRR_ARM_PODF_MASK) + 1;
874 return clk_get_rate(clk->parent) / div;
877 static int _clk_arm_set_rate(struct clk *clk, unsigned long rate)
884 for (i = 0; i < cpu_op_nr; i++) {
885 if (rate == cpu_op_tbl[i].cpu_rate)
891 if (cpu_op_tbl[i].pll_rate != clk_get_rate(&pll1_sys_main_clk)) {
892 /* Change the PLL1 rate. */
893 if (pll2_pfd_400M.usecount != 0)
894 pll1_sw_clk.set_parent(&pll1_sw_clk, &pll2_pfd_400M);
896 pll1_sw_clk.set_parent(&pll1_sw_clk, &osc_clk);
897 pll1_sys_main_clk.set_rate(&pll1_sys_main_clk, cpu_op_tbl[i].pll_rate);
898 pll1_sw_clk.set_parent(&pll1_sw_clk, &pll1_sys_main_clk);
901 parent_rate = clk_get_rate(clk->parent);
902 div = parent_rate / rate;
907 if ((parent_rate / div) > rate)
913 __raw_writel(div - 1, MXC_CCM_CACRR);
918 static struct clk cpu_clk = {
919 __INIT_CLK_DEBUG(cpu_clk)
920 .parent = &pll1_sw_clk,
921 .set_rate = _clk_arm_set_rate,
922 .get_rate = _clk_arm_get_rate,
925 static int _clk_periph_set_parent(struct clk *clk, struct clk *parent)
930 mux = _get_mux6(parent, &pll2_528_bus_main_clk, &pll2_pfd_400M,
931 &pll2_pfd_352M, &pll2_200M, &pll3_sw_clk, &osc_clk);
934 /* Set the pre_periph_clk multiplexer */
935 reg = __raw_readl(MXC_CCM_CBCMR);
936 reg &= ~MXC_CCM_CBCMR_PRE_PERIPH_CLK_SEL_MASK;
937 reg |= mux << MXC_CCM_CBCMR_PRE_PERIPH_CLK_SEL_OFFSET;
938 __raw_writel(reg, MXC_CCM_CBCMR);
940 /* Set the periph_clk_sel multiplexer. */
941 reg = __raw_readl(MXC_CCM_CBCDR);
942 reg &= ~MXC_CCM_CBCDR_PERIPH_CLK_SEL;
943 __raw_writel(reg, MXC_CCM_CBCDR);
945 reg = __raw_readl(MXC_CCM_CBCDR);
946 /* Set the periph_clk2_podf divider to divide by 1. */
947 reg &= ~MXC_CCM_CBCDR_PERIPH_CLK2_PODF_MASK;
948 __raw_writel(reg, MXC_CCM_CBCDR);
950 /* Set the periph_clk2_sel mux. */
951 reg = __raw_readl(MXC_CCM_CBCMR);
952 reg &= ~MXC_CCM_CBCMR_PERIPH_CLK2_SEL_MASK;
953 reg |= ((mux - 4) << MXC_CCM_CBCMR_PERIPH_CLK2_SEL_OFFSET);
954 __raw_writel(reg, MXC_CCM_CBCMR);
956 reg = __raw_readl(MXC_CCM_CBCDR);
957 /* Set periph_clk_sel to select periph_clk2. */
958 reg |= MXC_CCM_CBCDR_PERIPH_CLK_SEL;
959 __raw_writel(reg, MXC_CCM_CBCDR);
962 if (!WAIT(!(__raw_readl(MXC_CCM_CDHIPR)
963 & MXC_CCM_CDHIPR_PERIPH_CLK_SEL_BUSY), SPIN_DELAY))
964 panic("_clk_periph_set_parent failed\n");
969 static unsigned long _clk_periph_get_rate(struct clk *clk)
975 if ((clk->parent == &pll3_sw_clk) || (clk->parent == &osc_clk)) {
976 reg = __raw_readl(MXC_CCM_CBCDR)
977 & MXC_CCM_CBCDR_PERIPH_CLK2_PODF_MASK;
978 div = (reg >> MXC_CCM_CBCDR_PERIPH_CLK2_PODF_OFFSET) + 1;
980 val = clk_get_rate(clk->parent) / div;
984 static struct clk periph_clk = {
985 __INIT_CLK_DEBUG(periph_clk)
986 .parent = &pll2_528_bus_main_clk,
987 .set_parent = _clk_periph_set_parent,
988 .get_rate = _clk_periph_get_rate,
991 static unsigned long _clk_axi_get_rate(struct clk *clk)
996 reg = __raw_readl(MXC_CCM_CBCDR) & MXC_CCM_CBCDR_AXI_PODF_MASK;
997 div = (reg >> MXC_CCM_CBCDR_AXI_PODF_OFFSET);
999 val = clk_get_rate(clk->parent) / (div + 1);
1003 static int _clk_axi_set_rate(struct clk *clk, unsigned long rate)
1006 u32 parent_rate = clk_get_rate(clk->parent);
1008 div = parent_rate / rate;
1012 if (((parent_rate / div) != rate) || (div > 8))
1015 reg = __raw_readl(MXC_CCM_CBCDR);
1016 reg &= ~MXC_CCM_CBCDR_AXI_PODF_MASK;
1017 reg |= (div - 1) << MXC_CCM_CBCDR_AXI_PODF_OFFSET;
1018 __raw_writel(reg, MXC_CCM_CBCDR);
1020 if (!WAIT(!(__raw_readl(MXC_CCM_CDHIPR)
1021 & MXC_CCM_CDHIPR_AXI_PODF_BUSY), SPIN_DELAY))
1022 panic("pll _clk_axi_a_set_rate failed\n");
1027 static unsigned long _clk_axi_round_rate(struct clk *clk,
1031 u32 parent_rate = clk_get_rate(clk->parent);
1033 div = parent_rate / rate;
1035 /* Make sure rate is not greater than the maximum
1036 * value for the clock.
1037 * Also prevent a div of 0.
1045 return parent_rate / div;
1048 static int _clk_axi_set_parent(struct clk *clk, struct clk *parent)
1053 mux = _get_mux6(parent, &periph_clk, &pll2_pfd_400M,
1054 &pll3_pfd_540M, NULL, NULL, NULL);
1057 /* Set the AXI_SEL mux */
1058 reg = __raw_readl(MXC_CCM_CBCDR) & ~MXC_CCM_CBCDR_AXI_SEL;
1059 __raw_writel(reg, MXC_CCM_CBCDR);
1061 /* Set the AXI_ALT_SEL mux. */
1062 reg = __raw_readl(MXC_CCM_CBCDR)
1063 & ~MXC_CCM_CBCDR_AXI_ALT_SEL_MASK;
1064 reg = ((mux - 1) << MXC_CCM_CBCDR_AXI_ALT_SEL_OFFSET);
1065 __raw_writel(reg, MXC_CCM_CBCDR);
1067 /* Set the AXI_SEL mux */
1068 reg = __raw_readl(MXC_CCM_CBCDR) & ~MXC_CCM_CBCDR_AXI_SEL;
1069 reg |= MXC_CCM_CBCDR_AXI_SEL;
1070 __raw_writel(reg, MXC_CCM_CBCDR);
1075 static struct clk axi_clk = {
1076 __INIT_CLK_DEBUG(axi_clk)
1077 .parent = &periph_clk,
1078 .set_parent = _clk_axi_set_parent,
1079 .set_rate = _clk_axi_set_rate,
1080 .get_rate = _clk_axi_get_rate,
1081 .round_rate = _clk_axi_round_rate,
1084 static unsigned long _clk_ahb_get_rate(struct clk *clk)
1088 reg = __raw_readl(MXC_CCM_CBCDR);
1089 div = ((reg & MXC_CCM_CBCDR_AHB_PODF_MASK) >>
1090 MXC_CCM_CBCDR_AHB_PODF_OFFSET) + 1;
1092 return clk_get_rate(clk->parent) / div;
1095 static int _clk_ahb_set_rate(struct clk *clk, unsigned long rate)
1098 u32 parent_rate = clk_get_rate(clk->parent);
1100 div = parent_rate / rate;
1103 if (((parent_rate / div) != rate) || (div > 8))
1106 reg = __raw_readl(MXC_CCM_CBCDR);
1107 reg &= ~MXC_CCM_CBCDR_AHB_PODF_MASK;
1108 reg |= (div - 1) << MXC_CCM_CBCDR_AHB_PODF_OFFSET;
1109 __raw_writel(reg, MXC_CCM_CBCDR);
1111 if (!WAIT(!(__raw_readl(MXC_CCM_CDHIPR) & MXC_CCM_CDHIPR_AHB_PODF_BUSY),
1113 panic("_clk_ahb_set_rate failed\n");
1118 static unsigned long _clk_ahb_round_rate(struct clk *clk,
1122 u32 parent_rate = clk_get_rate(clk->parent);
1124 div = parent_rate / rate;
1126 /* Make sure rate is not greater than the maximum value for the clock.
1127 * Also prevent a div of 0.
1135 return parent_rate / div;
1138 static struct clk ahb_clk = {
1139 __INIT_CLK_DEBUG(ahb_clk)
1140 .parent = &periph_clk,
1141 .get_rate = _clk_ahb_get_rate,
1142 .set_rate = _clk_ahb_set_rate,
1143 .round_rate = _clk_ahb_round_rate,
1146 static unsigned long _clk_ipg_get_rate(struct clk *clk)
1150 reg = __raw_readl(MXC_CCM_CBCDR);
1151 div = ((reg & MXC_CCM_CBCDR_IPG_PODF_MASK) >>
1152 MXC_CCM_CBCDR_IPG_PODF_OFFSET) + 1;
1154 return clk_get_rate(clk->parent) / div;
1158 static struct clk ipg_clk = {
1159 __INIT_CLK_DEBUG(ipg_clk)
1161 .get_rate = _clk_ipg_get_rate,
1164 static unsigned long _clk_mmdc_ch0_axi_get_rate(struct clk *clk)
1168 reg = __raw_readl(MXC_CCM_CBCDR);
1169 div = ((reg & MXC_CCM_CBCDR_MMDC_CH0_PODF_MASK) >>
1170 MXC_CCM_CBCDR_MMDC_CH0_PODF_OFFSET) + 1;
1172 return clk_get_rate(clk->parent) / div;
1175 static int _clk_mmdc_ch0_axi_set_rate(struct clk *clk, unsigned long rate)
1178 u32 parent_rate = clk_get_rate(clk->parent);
1180 div = parent_rate / rate;
1183 if (((parent_rate / div) != rate) || (div > 8))
1186 reg = __raw_readl(MXC_CCM_CBCDR);
1187 reg &= ~MXC_CCM_CBCDR_MMDC_CH0_PODF_MASK;
1188 reg |= (div - 1) << MXC_CCM_CBCDR_MMDC_CH0_PODF_OFFSET;
1189 __raw_writel(reg, MXC_CCM_CBCDR);
1191 if (!WAIT(!(__raw_readl(MXC_CCM_CDHIPR)
1192 & MXC_CCM_CDHIPR_MMDC_CH0_PODF_BUSY),
1194 panic("_clk_mmdc_ch0_axi_set_rate failed\n");
1199 static unsigned long _clk_mmdc_ch0_axi_round_rate(struct clk *clk,
1203 u32 parent_rate = clk_get_rate(clk->parent);
1205 div = parent_rate / rate;
1207 /* Make sure rate is not greater than the maximum value for the clock.
1208 * Also prevent a div of 0.
1216 return parent_rate / div;
1219 static struct clk mmdc_ch0_axi_clk[] = {
1221 __INIT_CLK_DEBUG(mmdc_ch0_axi_clk)
1223 .parent = &periph_clk,
1224 .enable = _clk_enable,
1225 .disable = _clk_disable,
1226 .enable_reg = MXC_CCM_CCGR3,
1227 .enable_shift = MXC_CCM_CCGRx_CG10_OFFSET,
1228 .secondary = &mmdc_ch0_axi_clk[1],
1229 .get_rate = _clk_mmdc_ch0_axi_get_rate,
1230 .set_rate = _clk_mmdc_ch0_axi_set_rate,
1231 .round_rate = _clk_mmdc_ch0_axi_round_rate,
1234 __INIT_CLK_DEBUG(mmdc_ch0_ipg_clk)
1237 .enable = _clk_enable,
1238 .disable = _clk_disable,
1239 .enable_reg = MXC_CCM_CCGR3,
1240 .enable_shift = MXC_CCM_CCGRx_CG12_OFFSET,
1244 static unsigned long _clk_mmdc_ch1_axi_get_rate(struct clk *clk)
1248 reg = __raw_readl(MXC_CCM_CBCDR);
1249 div = ((reg & MXC_CCM_CBCDR_MMDC_CH1_PODF_MASK) >>
1250 MXC_CCM_CBCDR_MMDC_CH1_PODF_OFFSET) + 1;
1252 return clk_get_rate(clk->parent) / div;
1255 static int _clk_mmdc_ch1_axi_set_rate(struct clk *clk, unsigned long rate)
1258 u32 parent_rate = clk_get_rate(clk->parent);
1260 div = parent_rate / rate;
1263 if (((parent_rate / div) != rate) || (div > 8))
1266 reg = __raw_readl(MXC_CCM_CBCDR);
1267 reg &= ~MXC_CCM_CBCDR_MMDC_CH1_PODF_MASK;
1268 reg |= (div - 1) << MXC_CCM_CBCDR_MMDC_CH1_PODF_OFFSET;
1269 __raw_writel(reg, MXC_CCM_CBCDR);
1271 if (!WAIT(!(__raw_readl(MXC_CCM_CDHIPR)
1272 & MXC_CCM_CDHIPR_MMDC_CH1_PODF_BUSY), SPIN_DELAY))
1273 panic("_clk_mmdc_ch1_axi_set_rate failed\n");
1278 static unsigned long _clk_mmdc_ch1_axi_round_rate(struct clk *clk,
1282 u32 parent_rate = clk_get_rate(clk->parent);
1284 div = parent_rate / rate;
1286 /* Make sure rate is not greater than the maximum value for the clock.
1287 * Also prevent a div of 0.
1295 return parent_rate / div;
1298 static struct clk mmdc_ch1_axi_clk[] = {
1300 __INIT_CLK_DEBUG(mmdc_ch1_axi_clk)
1302 .parent = &pll2_pfd_400M,
1303 .enable = _clk_enable,
1304 .disable = _clk_disable,
1305 .enable_reg = MXC_CCM_CCGR3,
1306 .enable_shift = MXC_CCM_CCGRx_CG11_OFFSET,
1307 .secondary = &mmdc_ch1_axi_clk[1],
1308 .get_rate = _clk_mmdc_ch1_axi_get_rate,
1309 .set_rate = _clk_mmdc_ch1_axi_set_rate,
1310 .round_rate = _clk_mmdc_ch1_axi_round_rate,
1314 __INIT_CLK_DEBUG(mmdc_ch1_ipg_clk)
1316 .enable = _clk_enable,
1317 .disable = _clk_disable,
1318 .enable_reg = MXC_CCM_CCGR3,
1319 .enable_shift = MXC_CCM_CCGRx_CG13_OFFSET,
1323 static unsigned long _clk_ipg_perclk_get_rate(struct clk *clk)
1327 reg = __raw_readl(MXC_CCM_CSCMR1);
1328 div = ((reg & MXC_CCM_CSCMR1_PERCLK_PODF_MASK) >>
1329 MXC_CCM_CSCMR1_PERCLK_PODF_OFFSET) + 1;
1331 return clk_get_rate(clk->parent) / div;
1334 static int _clk_ipg_perclk_set_rate(struct clk *clk, unsigned long rate)
1337 u32 parent_rate = clk_get_rate(clk->parent);
1339 div = parent_rate / rate;
1342 if (((parent_rate / div) != rate) || (div > 64))
1345 reg = __raw_readl(MXC_CCM_CSCMR1);
1346 reg &= ~MXC_CCM_CSCMR1_PERCLK_PODF_MASK;
1347 reg |= (div - 1) << MXC_CCM_CSCMR1_PERCLK_PODF_OFFSET;
1348 __raw_writel(reg, MXC_CCM_CSCMR1);
1354 static unsigned long _clk_ipg_perclk_round_rate(struct clk *clk,
1358 u32 parent_rate = clk_get_rate(clk->parent);
1360 div = parent_rate / rate;
1362 /* Make sure rate is not greater than the maximum value for the clock.
1363 * Also prevent a div of 0.
1371 return parent_rate / div;
1374 static struct clk ipg_perclk = {
1375 __INIT_CLK_DEBUG(ipg_perclk)
1377 .get_rate = _clk_ipg_perclk_get_rate,
1378 .set_rate = _clk_ipg_perclk_set_rate,
1379 .round_rate = _clk_ipg_perclk_round_rate,
1382 static struct clk spba_clk = {
1383 __INIT_CLK_DEBUG(spba_clk)
1385 .enable_reg = MXC_CCM_CCGR5,
1386 .enable_shift = MXC_CCM_CCGRx_CG6_OFFSET,
1387 .enable = _clk_enable,
1388 .disable = _clk_disable,
1391 static struct clk sdma_clk = {
1392 __INIT_CLK_DEBUG(sdma_clk)
1394 .enable_reg = MXC_CCM_CCGR5,
1395 .enable_shift = MXC_CCM_CCGRx_CG3_OFFSET,
1396 .enable = _clk_enable,
1397 .disable = _clk_disable,
1400 static int _clk_gpu2d_axi_set_parent(struct clk *clk, struct clk *parent)
1402 u32 reg = __raw_readl(MXC_CCM_CBCMR) & ~MXC_CCM_CBCMR_GPU2D_AXI_CLK_SEL;
1404 if (parent == &ahb_clk)
1405 reg |= MXC_CCM_CBCMR_GPU2D_AXI_CLK_SEL;
1407 __raw_writel(reg, MXC_CCM_CBCMR);
1412 static struct clk gpu2d_axi_clk = {
1413 __INIT_CLK_DEBUG(gpu2d_axi_clk)
1415 .secondary = &openvg_axi_clk,
1416 .set_parent = _clk_gpu2d_axi_set_parent,
1419 static int _clk_gpu3d_axi_set_parent(struct clk *clk, struct clk *parent)
1421 u32 reg = __raw_readl(MXC_CCM_CBCMR) & ~MXC_CCM_CBCMR_GPU3D_AXI_CLK_SEL;
1423 if (parent == &ahb_clk)
1424 reg |= MXC_CCM_CBCMR_GPU3D_AXI_CLK_SEL;
1426 __raw_writel(reg, MXC_CCM_CBCMR);
1431 static struct clk gpu3d_axi_clk = {
1432 __INIT_CLK_DEBUG(gpu3d_axi_clk)
1434 .set_parent = _clk_gpu3d_axi_set_parent,
1437 static int _clk_pcie_axi_set_parent(struct clk *clk, struct clk *parent)
1439 u32 reg = __raw_readl(MXC_CCM_CBCMR) & ~MXC_CCM_CBCMR_PCIE_AXI_CLK_SEL;
1441 if (parent == &ahb_clk)
1442 reg |= MXC_CCM_CBCMR_PCIE_AXI_CLK_SEL;
1444 __raw_writel(reg, MXC_CCM_CBCMR);
1449 static struct clk pcie_axi_clk = {
1450 __INIT_CLK_DEBUG(pcie_axi_clk)
1452 .set_parent = _clk_pcie_axi_set_parent,
1455 static int _clk_vdo_axi_set_parent(struct clk *clk, struct clk *parent)
1457 u32 reg = __raw_readl(MXC_CCM_CBCMR) & ~MXC_CCM_CBCMR_VDOAXI_CLK_SEL;
1459 if (parent == &ahb_clk)
1460 reg |= MXC_CCM_CBCMR_VDOAXI_CLK_SEL;
1462 __raw_writel(reg, MXC_CCM_CBCMR);
1467 static struct clk vdo_axi_clk = {
1468 __INIT_CLK_DEBUG(vdo_axi_clk)
1470 .enable_reg = MXC_CCM_CCGR6,
1471 .enable_shift = MXC_CCM_CCGRx_CG6_OFFSET,
1472 .enable = _clk_enable,
1473 .disable = _clk_disable,
1474 .set_parent = _clk_vdo_axi_set_parent,
1477 static struct clk vdoa_clk = {
1478 __INIT_CLK_DEBUG(vdoa_clk)
1481 .enable_reg = MXC_CCM_CCGR2,
1482 .enable_shift = MXC_CCM_CCGRx_CG13_OFFSET,
1483 .enable = _clk_enable,
1484 .disable = _clk_disable,
1487 static struct clk gpt_clk[] = {
1489 __INIT_CLK_DEBUG(gpt_clk)
1490 .parent = &ipg_perclk,
1492 .enable_reg = MXC_CCM_CCGR1,
1493 .enable_shift = MXC_CCM_CCGRx_CG10_OFFSET,
1494 .enable = _clk_enable,
1495 .disable = _clk_disable,
1496 .secondary = &gpt_clk[1],
1499 __INIT_CLK_DEBUG(gpt_serial_clk)
1501 .enable_reg = MXC_CCM_CCGR1,
1502 .enable_shift = MXC_CCM_CCGRx_CG11_OFFSET,
1503 .enable = _clk_enable,
1504 .disable = _clk_disable,
1508 static struct clk iim_clk = {
1509 __INIT_CLK_DEBUG(iim_clk)
1511 .enable = _clk_enable,
1512 .enable_reg = MXC_CCM_CCGR2,
1513 .enable_shift = MXC_CCM_CCGRx_CG6_OFFSET,
1514 .disable = _clk_disable,
1517 static struct clk i2c_clk[] = {
1519 __INIT_CLK_DEBUG(i2c_clk_0)
1521 .parent = &ipg_perclk,
1522 .enable_reg = MXC_CCM_CCGR2,
1523 .enable_shift = MXC_CCM_CCGRx_CG3_OFFSET,
1524 .enable = _clk_enable,
1525 .disable = _clk_disable,
1528 __INIT_CLK_DEBUG(i2c_clk_1)
1530 .parent = &ipg_perclk,
1531 .enable_reg = MXC_CCM_CCGR2,
1532 .enable_shift = MXC_CCM_CCGRx_CG4_OFFSET,
1533 .enable = _clk_enable,
1534 .disable = _clk_disable,
1537 __INIT_CLK_DEBUG(i2c_clk_2)
1539 .parent = &ipg_perclk,
1540 .enable_reg = MXC_CCM_CCGR2,
1541 .enable_shift = MXC_CCM_CCGRx_CG5_OFFSET,
1542 .enable = _clk_enable,
1543 .disable = _clk_disable,
1547 static int _clk_vpu_axi_set_parent(struct clk *clk, struct clk *parent)
1550 u32 reg = __raw_readl(MXC_CCM_CBCMR)
1551 & ~MXC_CCM_CBCMR_VPU_AXI_CLK_SEL_MASK;
1553 mux = _get_mux6(parent, &axi_clk, &pll2_pfd_400M,
1554 &pll2_pfd_352M, NULL, NULL, NULL);
1556 reg |= (mux << MXC_CCM_CBCMR_VPU_AXI_CLK_SEL_OFFSET);
1558 __raw_writel(reg, MXC_CCM_CBCMR);
1563 static unsigned long _clk_vpu_axi_get_rate(struct clk *clk)
1567 reg = __raw_readl(MXC_CCM_CSCDR1);
1568 div = ((reg & MXC_CCM_CSCDR1_VPU_AXI_PODF_MASK) >>
1569 MXC_CCM_CSCDR1_VPU_AXI_PODF_OFFSET) + 1;
1571 return clk_get_rate(clk->parent) / div;
1574 static int _clk_vpu_axi_set_rate(struct clk *clk, unsigned long rate)
1577 u32 parent_rate = clk_get_rate(clk->parent);
1579 div = parent_rate / rate;
1582 if (((parent_rate / div) != rate) || (div > 8))
1585 reg = __raw_readl(MXC_CCM_CSCDR1);
1586 reg &= ~MXC_CCM_CSCDR1_VPU_AXI_PODF_MASK;
1587 reg |= (div - 1) << MXC_CCM_CSCDR1_VPU_AXI_PODF_OFFSET;
1588 __raw_writel(reg, MXC_CCM_CSCDR1);
1593 static unsigned long _clk_vpu_axi_round_rate(struct clk *clk,
1597 u32 parent_rate = clk_get_rate(clk->parent);
1599 div = parent_rate / rate;
1601 /* Make sure rate is not greater than the maximum value for the clock.
1602 * Also prevent a div of 0.
1610 return parent_rate / div;
1613 static struct clk vpu_clk = {
1614 __INIT_CLK_DEBUG(vpu_clk)
1616 .enable_reg = MXC_CCM_CCGR6,
1617 .enable_shift = MXC_CCM_CCGRx_CG7_OFFSET,
1618 .enable = _clk_enable,
1619 .disable = _clk_disable,
1620 .set_parent = _clk_vpu_axi_set_parent,
1621 .round_rate = _clk_vpu_axi_round_rate,
1622 .set_rate = _clk_vpu_axi_set_rate,
1623 .get_rate = _clk_vpu_axi_get_rate,
1626 static int _clk_ipu1_set_parent(struct clk *clk, struct clk *parent)
1629 u32 reg = __raw_readl(MXC_CCM_CSCDR3)
1630 & ~MXC_CCM_CSCDR3_IPU1_HSP_CLK_SEL_MASK;
1632 mux = _get_mux6(parent, &mmdc_ch0_axi_clk[0],
1633 &pll2_pfd_400M, &pll3_120M, &pll3_pfd_540M, NULL, NULL);
1635 reg |= (mux << MXC_CCM_CSCDR3_IPU1_HSP_CLK_SEL_OFFSET);
1637 __raw_writel(reg, MXC_CCM_CSCDR3);
1642 static unsigned long _clk_ipu1_get_rate(struct clk *clk)
1646 reg = __raw_readl(MXC_CCM_CSCDR3);
1647 div = ((reg & MXC_CCM_CSCDR3_IPU1_HSP_PODF_MASK) >>
1648 MXC_CCM_CSCDR3_IPU1_HSP_PODF_OFFSET) + 1;
1650 return clk_get_rate(clk->parent) / div;
1653 static int _clk_ipu1_set_rate(struct clk *clk, unsigned long rate)
1656 u32 parent_rate = clk_get_rate(clk->parent);
1658 div = parent_rate / rate;
1661 if (((parent_rate / div) != rate) || (div > 8))
1664 reg = __raw_readl(MXC_CCM_CSCDR3);
1665 reg &= ~MXC_CCM_CSCDR3_IPU1_HSP_PODF_MASK;
1666 reg |= (div - 1) << MXC_CCM_CSCDR3_IPU1_HSP_PODF_OFFSET;
1667 __raw_writel(reg, MXC_CCM_CSCDR3);
1672 static unsigned long _clk_ipu_round_rate(struct clk *clk,
1676 u32 parent_rate = clk_get_rate(clk->parent);
1678 div = parent_rate / rate;
1680 /* Make sure rate is not greater than the maximum value for the clock.
1681 * Also prevent a div of 0.
1689 return parent_rate / div;
1692 static struct clk ipu1_clk = {
1693 __INIT_CLK_DEBUG(ipu1_clk)
1694 .parent = &mmdc_ch0_axi_clk[0],
1695 .enable_reg = MXC_CCM_CCGR3,
1696 .enable_shift = MXC_CCM_CCGRx_CG0_OFFSET,
1697 .enable = _clk_enable,
1698 .disable = _clk_disable,
1699 .set_parent = _clk_ipu1_set_parent,
1700 .round_rate = _clk_ipu_round_rate,
1701 .set_rate = _clk_ipu1_set_rate,
1702 .get_rate = _clk_ipu1_get_rate,
1705 static int _clk_ipu2_set_parent(struct clk *clk, struct clk *parent)
1708 u32 reg = __raw_readl(MXC_CCM_CSCDR3)
1709 & ~MXC_CCM_CSCDR3_IPU2_HSP_CLK_SEL_MASK;
1711 mux = _get_mux6(parent, &mmdc_ch0_axi_clk[0],
1712 &pll2_pfd_400M, &pll3_120M, &pll3_pfd_540M, NULL, NULL);
1714 reg |= (mux << MXC_CCM_CSCDR3_IPU2_HSP_CLK_SEL_OFFSET);
1716 __raw_writel(reg, MXC_CCM_CSCDR3);
1721 static unsigned long _clk_ipu2_get_rate(struct clk *clk)
1725 reg = __raw_readl(MXC_CCM_CSCDR3);
1726 div = ((reg & MXC_CCM_CSCDR3_IPU2_HSP_PODF_MASK) >>
1727 MXC_CCM_CSCDR3_IPU2_HSP_PODF_OFFSET) + 1;
1729 return clk_get_rate(clk->parent) / div;
1732 static int _clk_ipu2_set_rate(struct clk *clk, unsigned long rate)
1735 u32 parent_rate = clk_get_rate(clk->parent);
1737 div = parent_rate / rate;
1740 if (((parent_rate / div) != rate) || (div > 8))
1743 reg = __raw_readl(MXC_CCM_CSCDR3);
1744 reg &= ~MXC_CCM_CSCDR3_IPU2_HSP_PODF_MASK;
1745 reg |= (div - 1) << MXC_CCM_CSCDR3_IPU2_HSP_PODF_OFFSET;
1746 __raw_writel(reg, MXC_CCM_CSCDR3);
1751 static struct clk ipu2_clk = {
1752 __INIT_CLK_DEBUG(ipu2_clk)
1753 .parent = &mmdc_ch0_axi_clk[0],
1754 .enable_reg = MXC_CCM_CCGR3,
1755 .enable_shift = MXC_CCM_CCGRx_CG3_OFFSET,
1756 .enable = _clk_enable,
1757 .disable = _clk_disable,
1758 .set_parent = _clk_ipu2_set_parent,
1759 .round_rate = _clk_ipu_round_rate,
1760 .set_rate = _clk_ipu2_set_rate,
1761 .get_rate = _clk_ipu2_get_rate,
1764 static unsigned long _clk_usdhc_round_rate(struct clk *clk,
1768 u32 parent_rate = clk_get_rate(clk->parent);
1770 div = parent_rate / rate;
1772 /* Make sure rate is not greater than the maximum value for the clock.
1773 * Also prevent a div of 0.
1781 return parent_rate / div;
1784 static int _clk_usdhc1_set_parent(struct clk *clk, struct clk *parent)
1786 u32 reg = __raw_readl(MXC_CCM_CSCMR1) & ~MXC_CCM_CSCMR1_USDHC1_CLK_SEL;
1788 if (parent == &pll2_pfd_352M)
1789 reg |= (MXC_CCM_CSCMR1_USDHC1_CLK_SEL);
1791 __raw_writel(reg, MXC_CCM_CSCMR1);
1796 static unsigned long _clk_usdhc1_get_rate(struct clk *clk)
1800 reg = __raw_readl(MXC_CCM_CSCDR1);
1801 div = ((reg & MXC_CCM_CSCDR1_USDHC1_PODF_MASK) >>
1802 MXC_CCM_CSCDR1_USDHC1_PODF_OFFSET) + 1;
1804 return clk_get_rate(clk->parent) / div;
1807 static int _clk_usdhc1_set_rate(struct clk *clk, unsigned long rate)
1810 u32 parent_rate = clk_get_rate(clk->parent);
1812 div = parent_rate / rate;
1815 if (((parent_rate / div) != rate) || (div > 8))
1818 reg = __raw_readl(MXC_CCM_CSCDR1);
1819 reg &= ~MXC_CCM_CSCDR1_USDHC1_PODF_MASK;
1820 reg |= (div - 1) << MXC_CCM_CSCDR1_USDHC1_PODF_OFFSET;
1821 __raw_writel(reg, MXC_CCM_CSCDR1);
1826 static struct clk usdhc1_clk = {
1827 __INIT_CLK_DEBUG(usdhc1_clk)
1829 .parent = &pll2_pfd_400M,
1830 .enable_reg = MXC_CCM_CCGR6,
1831 .enable_shift = MXC_CCM_CCGRx_CG1_OFFSET,
1832 .enable = _clk_enable,
1833 .disable = _clk_disable,
1834 .set_parent = _clk_usdhc1_set_parent,
1835 .round_rate = _clk_usdhc_round_rate,
1836 .set_rate = _clk_usdhc1_set_rate,
1837 .get_rate = _clk_usdhc1_get_rate,
1840 static int _clk_usdhc2_set_parent(struct clk *clk, struct clk *parent)
1842 u32 reg = __raw_readl(MXC_CCM_CSCMR1) & ~MXC_CCM_CSCMR1_USDHC2_CLK_SEL;
1844 if (parent == &pll2_pfd_352M)
1845 reg |= (MXC_CCM_CSCMR1_USDHC2_CLK_SEL);
1847 __raw_writel(reg, MXC_CCM_CSCMR1);
1852 static unsigned long _clk_usdhc2_get_rate(struct clk *clk)
1856 reg = __raw_readl(MXC_CCM_CSCDR1);
1857 div = ((reg & MXC_CCM_CSCDR1_USDHC2_PODF_MASK) >>
1858 MXC_CCM_CSCDR1_USDHC2_PODF_OFFSET) + 1;
1860 return clk_get_rate(clk->parent) / div;
1863 static int _clk_usdhc2_set_rate(struct clk *clk, unsigned long rate)
1866 u32 parent_rate = clk_get_rate(clk->parent);
1868 div = parent_rate / rate;
1871 if (((parent_rate / div) != rate) || (div > 8))
1874 reg = __raw_readl(MXC_CCM_CSCDR1);
1875 reg &= ~MXC_CCM_CSCDR1_USDHC2_PODF_MASK;
1876 reg |= (div - 1) << MXC_CCM_CSCDR1_USDHC2_PODF_OFFSET;
1877 __raw_writel(reg, MXC_CCM_CSCDR1);
1882 static struct clk usdhc2_clk = {
1883 __INIT_CLK_DEBUG(usdhc2_clk)
1885 .parent = &pll2_pfd_400M,
1886 .enable_reg = MXC_CCM_CCGR6,
1887 .enable_shift = MXC_CCM_CCGRx_CG2_OFFSET,
1888 .enable = _clk_enable,
1889 .disable = _clk_disable,
1890 .set_parent = _clk_usdhc2_set_parent,
1891 .round_rate = _clk_usdhc_round_rate,
1892 .set_rate = _clk_usdhc2_set_rate,
1893 .get_rate = _clk_usdhc2_get_rate,
1896 static int _clk_usdhc3_set_parent(struct clk *clk, struct clk *parent)
1898 u32 reg = __raw_readl(MXC_CCM_CSCMR1) & ~MXC_CCM_CSCMR1_USDHC3_CLK_SEL;
1900 if (parent == &pll2_pfd_352M)
1901 reg |= (MXC_CCM_CSCMR1_USDHC3_CLK_SEL);
1903 __raw_writel(reg, MXC_CCM_CSCMR1);
1908 static unsigned long _clk_usdhc3_get_rate(struct clk *clk)
1912 reg = __raw_readl(MXC_CCM_CSCDR1);
1913 div = ((reg & MXC_CCM_CSCDR1_USDHC3_PODF_MASK) >>
1914 MXC_CCM_CSCDR1_USDHC3_PODF_OFFSET) + 1;
1916 return clk_get_rate(clk->parent) / div;
1919 static int _clk_usdhc3_set_rate(struct clk *clk, unsigned long rate)
1922 u32 parent_rate = clk_get_rate(clk->parent);
1924 div = parent_rate / rate;
1927 if (((parent_rate / div) != rate) || (div > 8))
1930 reg = __raw_readl(MXC_CCM_CSCDR1);
1931 reg &= ~MXC_CCM_CSCDR1_USDHC3_PODF_MASK;
1932 reg |= (div - 1) << MXC_CCM_CSCDR1_USDHC3_PODF_OFFSET;
1933 __raw_writel(reg, MXC_CCM_CSCDR1);
1939 static struct clk usdhc3_clk = {
1940 __INIT_CLK_DEBUG(usdhc3_clk)
1942 .parent = &pll2_pfd_400M,
1943 .enable_reg = MXC_CCM_CCGR6,
1944 .enable_shift = MXC_CCM_CCGRx_CG3_OFFSET,
1945 .enable = _clk_enable,
1946 .disable = _clk_disable,
1947 .set_parent = _clk_usdhc3_set_parent,
1948 .round_rate = _clk_usdhc_round_rate,
1949 .set_rate = _clk_usdhc3_set_rate,
1950 .get_rate = _clk_usdhc3_get_rate,
1953 static int _clk_usdhc4_set_parent(struct clk *clk, struct clk *parent)
1955 u32 reg = __raw_readl(MXC_CCM_CSCMR1) & ~MXC_CCM_CSCMR1_USDHC4_CLK_SEL;
1957 if (parent == &pll2_pfd_352M)
1958 reg |= (MXC_CCM_CSCMR1_USDHC4_CLK_SEL);
1960 __raw_writel(reg, MXC_CCM_CSCMR1);
1965 static unsigned long _clk_usdhc4_get_rate(struct clk *clk)
1969 reg = __raw_readl(MXC_CCM_CSCDR1);
1970 div = ((reg & MXC_CCM_CSCDR1_USDHC4_PODF_MASK) >>
1971 MXC_CCM_CSCDR1_USDHC4_PODF_OFFSET) + 1;
1973 return clk_get_rate(clk->parent) / div;
1976 static int _clk_usdhc4_set_rate(struct clk *clk, unsigned long rate)
1979 u32 parent_rate = clk_get_rate(clk->parent);
1981 div = parent_rate / rate;
1984 if (((parent_rate / div) != rate) || (div > 8))
1987 reg = __raw_readl(MXC_CCM_CSCDR1);
1988 reg &= ~MXC_CCM_CSCDR1_USDHC4_PODF_MASK;
1989 reg |= (div - 1) << MXC_CCM_CSCDR1_USDHC4_PODF_OFFSET;
1990 __raw_writel(reg, MXC_CCM_CSCDR1);
1996 static struct clk usdhc4_clk = {
1997 __INIT_CLK_DEBUG(usdhc4_clk)
1999 .parent = &pll2_pfd_400M,
2000 .enable_reg = MXC_CCM_CCGR6,
2001 .enable_shift = MXC_CCM_CCGRx_CG4_OFFSET,
2002 .enable = _clk_enable,
2003 .disable = _clk_disable,
2004 .set_parent = _clk_usdhc4_set_parent,
2005 .round_rate = _clk_usdhc_round_rate,
2006 .set_rate = _clk_usdhc4_set_rate,
2007 .get_rate = _clk_usdhc4_get_rate,
2010 static unsigned long _clk_ssi_round_rate(struct clk *clk,
2014 u32 parent_rate = clk_get_rate(clk->parent);
2015 u32 div = parent_rate / rate;
2017 if (parent_rate % rate)
2020 __calc_pre_post_dividers(1 << 6, div, &pre, &post);
2022 return parent_rate / (pre * post);
2025 static unsigned long _clk_ssi1_get_rate(struct clk *clk)
2027 u32 reg, prediv, podf;
2029 reg = __raw_readl(MXC_CCM_CS1CDR);
2031 prediv = ((reg & MXC_CCM_CS1CDR_SSI1_CLK_PRED_MASK)
2032 >> MXC_CCM_CS1CDR_SSI1_CLK_PRED_OFFSET) + 1;
2033 podf = ((reg & MXC_CCM_CS1CDR_SSI1_CLK_PODF_MASK)
2034 >> MXC_CCM_CS1CDR_SSI1_CLK_PODF_OFFSET) + 1;
2036 return clk_get_rate(clk->parent) / (prediv * podf);
2039 static int _clk_ssi1_set_rate(struct clk *clk, unsigned long rate)
2041 u32 reg, div, pre, post;
2042 u32 parent_rate = clk_get_rate(clk->parent);
2044 div = parent_rate / rate;
2047 if (((parent_rate / div) != rate) || div > 512)
2050 __calc_pre_post_dividers(1 << 6, div, &pre, &post);
2052 reg = __raw_readl(MXC_CCM_CS1CDR);
2053 reg &= ~(MXC_CCM_CS1CDR_SSI1_CLK_PRED_MASK |
2054 MXC_CCM_CS1CDR_SSI1_CLK_PODF_MASK);
2055 reg |= (post - 1) << MXC_CCM_CS1CDR_SSI1_CLK_PODF_OFFSET;
2056 reg |= (pre - 1) << MXC_CCM_CS1CDR_SSI1_CLK_PRED_OFFSET;
2058 __raw_writel(reg, MXC_CCM_CS1CDR);
2064 static int _clk_ssi1_set_parent(struct clk *clk, struct clk *parent)
2068 reg = __raw_readl(MXC_CCM_CSCMR1)
2069 & ~MXC_CCM_CSCMR1_SSI1_CLK_SEL_MASK;
2071 mux = _get_mux6(parent, &pll3_pfd_508M, &pll3_pfd_454M,
2072 &pll4_audio_main_clk, NULL, NULL, NULL);
2073 reg |= (mux << MXC_CCM_CSCMR1_SSI1_CLK_SEL_OFFSET);
2075 __raw_writel(reg, MXC_CCM_CSCMR1);
2080 static struct clk ssi1_clk = {
2081 __INIT_CLK_DEBUG(ssi1_clk)
2082 .parent = &pll3_pfd_508M,
2083 .enable_reg = MXC_CCM_CCGR5,
2084 .enable_shift = MXC_CCM_CCGRx_CG9_OFFSET,
2085 .enable = _clk_enable,
2086 .disable = _clk_disable,
2087 .set_parent = _clk_ssi1_set_parent,
2088 .set_rate = _clk_ssi1_set_rate,
2089 .round_rate = _clk_ssi_round_rate,
2090 .get_rate = _clk_ssi1_get_rate,
2093 static unsigned long _clk_ssi2_get_rate(struct clk *clk)
2095 u32 reg, prediv, podf;
2097 reg = __raw_readl(MXC_CCM_CS2CDR);
2099 prediv = ((reg & MXC_CCM_CS2CDR_SSI2_CLK_PRED_MASK)
2100 >> MXC_CCM_CS2CDR_SSI2_CLK_PRED_OFFSET) + 1;
2101 podf = ((reg & MXC_CCM_CS2CDR_SSI2_CLK_PODF_MASK)
2102 >> MXC_CCM_CS2CDR_SSI2_CLK_PODF_OFFSET) + 1;
2104 return clk_get_rate(clk->parent) / (prediv * podf);
2107 static int _clk_ssi2_set_rate(struct clk *clk, unsigned long rate)
2109 u32 reg, div, pre, post;
2110 u32 parent_rate = clk_get_rate(clk->parent);
2112 div = parent_rate / rate;
2115 if (((parent_rate / div) != rate) || div > 512)
2118 __calc_pre_post_dividers(1 << 6, div, &pre, &post);
2120 reg = __raw_readl(MXC_CCM_CS2CDR);
2121 reg &= ~(MXC_CCM_CS2CDR_SSI2_CLK_PRED_MASK |
2122 MXC_CCM_CS2CDR_SSI2_CLK_PODF_MASK);
2123 reg |= (post - 1) << MXC_CCM_CS2CDR_SSI2_CLK_PODF_OFFSET;
2124 reg |= (pre - 1) << MXC_CCM_CS2CDR_SSI2_CLK_PRED_OFFSET;
2126 __raw_writel(reg, MXC_CCM_CS2CDR);
2132 static int _clk_ssi2_set_parent(struct clk *clk, struct clk *parent)
2136 reg = __raw_readl(MXC_CCM_CSCMR1)
2137 & ~MXC_CCM_CSCMR1_SSI2_CLK_SEL_MASK;
2139 mux = _get_mux6(parent, &pll3_pfd_508M, &pll3_pfd_454M,
2140 &pll4_audio_main_clk, NULL, NULL, NULL);
2141 reg |= (mux << MXC_CCM_CSCMR1_SSI2_CLK_SEL_OFFSET);
2143 __raw_writel(reg, MXC_CCM_CSCMR1);
2148 static struct clk ssi2_clk = {
2149 __INIT_CLK_DEBUG(ssi2_clk)
2150 .parent = &pll3_pfd_508M,
2151 .enable_reg = MXC_CCM_CCGR5,
2152 .enable_shift = MXC_CCM_CCGRx_CG10_OFFSET,
2153 .enable = _clk_enable,
2154 .disable = _clk_disable,
2155 .set_parent = _clk_ssi2_set_parent,
2156 .set_rate = _clk_ssi2_set_rate,
2157 .round_rate = _clk_ssi_round_rate,
2158 .get_rate = _clk_ssi2_get_rate,
2161 static unsigned long _clk_ssi3_get_rate(struct clk *clk)
2163 u32 reg, prediv, podf;
2165 reg = __raw_readl(MXC_CCM_CS1CDR);
2167 prediv = ((reg & MXC_CCM_CS1CDR_SSI1_CLK_PRED_MASK)
2168 >> MXC_CCM_CS1CDR_SSI1_CLK_PRED_OFFSET) + 1;
2169 podf = ((reg & MXC_CCM_CS1CDR_SSI1_CLK_PODF_MASK)
2170 >> MXC_CCM_CS1CDR_SSI1_CLK_PODF_OFFSET) + 1;
2172 return clk_get_rate(clk->parent) / (prediv * podf);
2175 static int _clk_ssi3_set_rate(struct clk *clk, unsigned long rate)
2177 u32 reg, div, pre, post;
2178 u32 parent_rate = clk_get_rate(clk->parent);
2180 div = parent_rate / rate;
2183 if (((parent_rate / div) != rate) || div > 512)
2186 __calc_pre_post_dividers(1 << 6, div, &pre, &post);
2188 reg = __raw_readl(MXC_CCM_CS1CDR);
2189 reg &= ~(MXC_CCM_CS1CDR_SSI3_CLK_PODF_MASK|
2190 MXC_CCM_CS1CDR_SSI3_CLK_PRED_MASK);
2191 reg |= (post - 1) << MXC_CCM_CS1CDR_SSI3_CLK_PODF_OFFSET;
2192 reg |= (pre - 1) << MXC_CCM_CS1CDR_SSI3_CLK_PRED_OFFSET;
2194 __raw_writel(reg, MXC_CCM_CS1CDR);
2200 static int _clk_ssi3_set_parent(struct clk *clk, struct clk *parent)
2204 reg = __raw_readl(MXC_CCM_CSCMR1) & ~MXC_CCM_CSCMR1_SSI3_CLK_SEL_MASK;
2206 mux = _get_mux6(parent, &pll3_pfd_508M, &pll3_pfd_454M,
2207 &pll4_audio_main_clk, NULL, NULL, NULL);
2208 reg |= (mux << MXC_CCM_CSCMR1_SSI3_CLK_SEL_OFFSET);
2210 __raw_writel(reg, MXC_CCM_CSCMR1);
2215 static struct clk ssi3_clk = {
2216 __INIT_CLK_DEBUG(ssi3_clk)
2217 .parent = &pll3_pfd_508M,
2218 .enable_reg = MXC_CCM_CCGR5,
2219 .enable_shift = MXC_CCM_CCGRx_CG11_OFFSET,
2220 .enable = _clk_enable,
2221 .disable = _clk_disable,
2222 .set_parent = _clk_ssi3_set_parent,
2223 .set_rate = _clk_ssi3_set_rate,
2224 .round_rate = _clk_ssi_round_rate,
2225 .get_rate = _clk_ssi3_get_rate,
2228 static unsigned long _clk_ldb_di_round_rate(struct clk *clk,
2231 u32 parent_rate = clk_get_rate(clk->parent);
2233 if (rate * 7 <= parent_rate + parent_rate/20)
2234 return parent_rate / 7;
2236 return 2 * parent_rate / 7;
2239 static unsigned long _clk_ldb_di0_get_rate(struct clk *clk)
2243 div = __raw_readl(MXC_CCM_CSCMR2) &
2244 MXC_CCM_CSCMR2_LDB_DI0_IPU_DIV;
2247 return clk_get_rate(clk->parent) / 7;
2249 return (2 * clk_get_rate(clk->parent)) / 7;
2252 static int _clk_ldb_di0_set_rate(struct clk *clk, unsigned long rate)
2255 u32 parent_rate = clk_get_rate(clk->parent);
2257 if (rate * 7 <= parent_rate + parent_rate/20) {
2259 rate = parent_rate / 7;
2261 rate = 2 * parent_rate / 7;
2263 reg = __raw_readl(MXC_CCM_CSCMR2);
2265 reg |= MXC_CCM_CSCMR2_LDB_DI0_IPU_DIV;
2267 reg &= ~MXC_CCM_CSCMR2_LDB_DI0_IPU_DIV;
2269 __raw_writel(reg, MXC_CCM_CSCMR2);
2274 static int _clk_ldb_di0_set_parent(struct clk *clk, struct clk *parent)
2278 reg = __raw_readl(MXC_CCM_CS2CDR)
2279 & ~MXC_CCM_CS2CDR_LDB_DI0_CLK_SEL_MASK;
2281 mux = _get_mux6(parent, &pll5_video_main_clk,
2282 &pll2_pfd_352M, &pll2_pfd_400M, &pll3_pfd_540M,
2283 &pll3_usb_otg_main_clk, NULL);
2284 reg |= (mux << MXC_CCM_CS2CDR_LDB_DI0_CLK_SEL_OFFSET);
2286 __raw_writel(reg, MXC_CCM_CS2CDR);
2291 static struct clk ldb_di0_clk = {
2292 __INIT_CLK_DEBUG(ldb_di0_clk)
2294 .parent = &pll3_pfd_540M,
2295 .enable_reg = MXC_CCM_CCGR3,
2296 .enable_shift = MXC_CCM_CCGRx_CG6_OFFSET,
2297 .enable = _clk_enable,
2298 .disable = _clk_disable,
2299 .set_parent = _clk_ldb_di0_set_parent,
2300 .set_rate = _clk_ldb_di0_set_rate,
2301 .round_rate = _clk_ldb_di_round_rate,
2302 .get_rate = _clk_ldb_di0_get_rate,
2305 static unsigned long _clk_ldb_di1_get_rate(struct clk *clk)
2309 div = __raw_readl(MXC_CCM_CSCMR2) &
2310 MXC_CCM_CSCMR2_LDB_DI1_IPU_DIV;
2313 return clk_get_rate(clk->parent) / 7;
2315 return (2 * clk_get_rate(clk->parent)) / 7;
2318 static int _clk_ldb_di1_set_rate(struct clk *clk, unsigned long rate)
2321 u32 parent_rate = clk_get_rate(clk->parent);
2323 if (rate * 7 <= parent_rate + parent_rate/20) {
2325 rate = parent_rate / 7;
2327 rate = 2 * parent_rate / 7;
2329 reg = __raw_readl(MXC_CCM_CSCMR2);
2331 reg |= MXC_CCM_CSCMR2_LDB_DI1_IPU_DIV;
2333 reg &= ~MXC_CCM_CSCMR2_LDB_DI1_IPU_DIV;
2335 __raw_writel(reg, MXC_CCM_CSCMR2);
2340 static int _clk_ldb_di1_set_parent(struct clk *clk, struct clk *parent)
2344 reg = __raw_readl(MXC_CCM_CS2CDR)
2345 & ~MXC_CCM_CS2CDR_LDB_DI1_CLK_SEL_MASK;
2347 mux = _get_mux6(parent, &pll5_video_main_clk,
2348 &pll2_pfd_352M, &pll2_pfd_400M, &pll3_pfd_540M,
2349 &pll3_usb_otg_main_clk, NULL);
2350 reg |= (mux << MXC_CCM_CS2CDR_LDB_DI1_CLK_SEL_OFFSET);
2352 __raw_writel(reg, MXC_CCM_CS2CDR);
2357 static struct clk ldb_di1_clk = {
2358 __INIT_CLK_DEBUG(ldb_di1_clk)
2360 .parent = &pll3_pfd_540M,
2361 .enable_reg = MXC_CCM_CCGR3,
2362 .enable_shift = MXC_CCM_CCGRx_CG7_OFFSET,
2363 .enable = _clk_enable,
2364 .disable = _clk_disable,
2365 .set_parent = _clk_ldb_di1_set_parent,
2366 .set_rate = _clk_ldb_di1_set_rate,
2367 .round_rate = _clk_ldb_di_round_rate,
2368 .get_rate = _clk_ldb_di1_get_rate,
2372 static unsigned long _clk_ipu_di_round_rate(struct clk *clk,
2376 u32 parent_rate = clk_get_rate(clk->parent);
2378 if ((clk->parent == &ldb_di0_clk) ||
2379 (clk->parent == &ldb_di1_clk))
2382 div = parent_rate / rate;
2384 /* Make sure rate is not greater than the maximum value for the clock.
2385 * Also prevent a div of 0.
2393 return parent_rate / div;
2396 static unsigned long _clk_ipu1_di0_get_rate(struct clk *clk)
2400 if ((clk->parent == &ldb_di0_clk) ||
2401 (clk->parent == &ldb_di1_clk))
2402 return clk_get_rate(clk->parent);
2404 reg = __raw_readl(MXC_CCM_CHSCCDR);
2406 div = ((reg & MXC_CCM_CHSCCDR_IPU1_DI0_PODF_MASK) >>
2407 MXC_CCM_CHSCCDR_IPU1_DI0_PODF_OFFSET) + 1;
2409 return clk_get_rate(clk->parent) / div;
2412 static int _clk_ipu1_di0_set_rate(struct clk *clk, unsigned long rate)
2415 u32 parent_rate = clk_get_rate(clk->parent);
2417 if ((clk->parent == &ldb_di0_clk) ||
2418 (clk->parent == &ldb_di1_clk)) {
2419 if (parent_rate == rate)
2425 div = parent_rate / rate;
2428 if (((parent_rate / div) != rate) || (div > 8))
2431 reg = __raw_readl(MXC_CCM_CHSCCDR);
2432 reg &= ~MXC_CCM_CHSCCDR_IPU1_DI0_PODF_MASK;
2433 reg |= (div - 1) << MXC_CCM_CHSCCDR_IPU1_DI0_PODF_OFFSET;
2434 __raw_writel(reg, MXC_CCM_CHSCCDR);
2440 static int _clk_ipu1_di0_set_parent(struct clk *clk, struct clk *parent)
2444 if (parent == &ldb_di0_clk)
2446 else if (parent == &ldb_di1_clk)
2449 reg = __raw_readl(MXC_CCM_CHSCCDR)
2450 & ~MXC_CCM_CHSCCDR_IPU1_DI0_PRE_CLK_SEL_MASK;
2452 mux = _get_mux6(parent, &mmdc_ch0_axi_clk[0],
2453 &pll3_usb_otg_main_clk, &pll5_video_main_clk,
2454 &pll2_pfd_352M, &pll2_pfd_400M, &pll3_pfd_540M);
2455 reg |= (mux << MXC_CCM_CHSCCDR_IPU1_DI0_PRE_CLK_SEL_OFFSET);
2457 __raw_writel(reg, MXC_CCM_CHSCCDR);
2459 /* Derive clock from divided pre-muxed ipu1_di0 clock.*/
2463 reg = __raw_readl(MXC_CCM_CHSCCDR)
2464 & ~MXC_CCM_CHSCCDR_IPU1_DI0_CLK_SEL_MASK;
2465 __raw_writel(reg | (mux << MXC_CCM_CHSCCDR_IPU1_DI0_CLK_SEL_OFFSET),
2471 static unsigned long _clk_ipu1_di1_get_rate(struct clk *clk)
2475 if ((clk->parent == &ldb_di0_clk) ||
2476 (clk->parent == &ldb_di1_clk))
2477 return clk_get_rate(clk->parent);
2479 reg = __raw_readl(MXC_CCM_CHSCCDR);
2481 div = ((reg & MXC_CCM_CHSCCDR_IPU1_DI1_PODF_MASK)
2482 >> MXC_CCM_CHSCCDR_IPU1_DI1_PODF_OFFSET) + 1;
2484 return clk_get_rate(clk->parent) / div;
2487 static int _clk_ipu1_di1_set_rate(struct clk *clk, unsigned long rate)
2490 u32 parent_rate = clk_get_rate(clk->parent);
2492 if ((clk->parent == &ldb_di0_clk) ||
2493 (clk->parent == &ldb_di1_clk)) {
2494 if (parent_rate == rate)
2500 div = parent_rate / rate;
2503 if (((parent_rate / div) != rate) || (div > 8))
2506 reg = __raw_readl(MXC_CCM_CHSCCDR);
2507 reg &= ~MXC_CCM_CHSCCDR_IPU1_DI1_PODF_MASK;
2508 reg |= (div - 1) << MXC_CCM_CHSCCDR_IPU1_DI1_PODF_OFFSET;
2509 __raw_writel(reg, MXC_CCM_CHSCCDR);
2515 static int _clk_ipu1_di1_set_parent(struct clk *clk, struct clk *parent)
2519 if (parent == &ldb_di0_clk)
2521 else if (parent == &ldb_di1_clk)
2524 reg = __raw_readl(MXC_CCM_CHSCCDR)
2525 & ~MXC_CCM_CHSCCDR_IPU1_DI1_PRE_CLK_SEL_MASK;
2527 mux = _get_mux6(parent, &mmdc_ch0_axi_clk[0],
2528 &pll3_usb_otg_main_clk, &pll5_video_main_clk,
2529 &pll2_pfd_352M, &pll2_pfd_400M, &pll3_pfd_540M);
2530 reg |= (mux << MXC_CCM_CHSCCDR_IPU1_DI1_PRE_CLK_SEL_OFFSET);
2532 __raw_writel(reg, MXC_CCM_CHSCCDR);
2534 /* Derive clock from divided pre-muxed ipu1_di0 clock.*/
2537 reg = __raw_readl(MXC_CCM_CHSCCDR)
2538 & ~MXC_CCM_CHSCCDR_IPU1_DI1_CLK_SEL_MASK;
2539 __raw_writel(reg | (mux << MXC_CCM_CHSCCDR_IPU1_DI1_CLK_SEL_OFFSET),
2545 static struct clk ipu1_di_clk[] = {
2547 __INIT_CLK_DEBUG(ipu1_di_clk_0)
2549 .parent = &pll5_video_main_clk,
2550 .enable_reg = MXC_CCM_CCGR3,
2551 .enable_shift = MXC_CCM_CCGRx_CG1_OFFSET,
2552 .enable = _clk_enable,
2553 .disable = _clk_disable,
2554 .set_parent = _clk_ipu1_di0_set_parent,
2555 .set_rate = _clk_ipu1_di0_set_rate,
2556 .round_rate = _clk_ipu_di_round_rate,
2557 .get_rate = _clk_ipu1_di0_get_rate,
2560 __INIT_CLK_DEBUG(ipu1_di_clk_1)
2562 .parent = &pll5_video_main_clk,
2563 .enable_reg = MXC_CCM_CCGR3,
2564 .enable_shift = MXC_CCM_CCGRx_CG2_OFFSET,
2565 .enable = _clk_enable,
2566 .disable = _clk_disable,
2567 .set_parent = _clk_ipu1_di1_set_parent,
2568 .set_rate = _clk_ipu1_di1_set_rate,
2569 .round_rate = _clk_ipu_di_round_rate,
2570 .get_rate = _clk_ipu1_di1_get_rate,
2574 static unsigned long _clk_ipu2_di0_get_rate(struct clk *clk)
2578 if ((clk->parent == &ldb_di0_clk) ||
2579 (clk->parent == &ldb_di1_clk))
2580 return clk_get_rate(clk->parent);
2582 reg = __raw_readl(MXC_CCM_CSCDR2);
2584 div = ((reg & MXC_CCM_CSCDR2_IPU2_DI0_PODF_MASK) >>
2585 MXC_CCM_CSCDR2_IPU2_DI0_PODF_OFFSET) + 1;
2587 return clk_get_rate(clk->parent) / div;
2590 static int _clk_ipu2_di0_set_rate(struct clk *clk, unsigned long rate)
2593 u32 parent_rate = clk_get_rate(clk->parent);
2595 if ((clk->parent == &ldb_di0_clk) ||
2596 (clk->parent == &ldb_di1_clk)) {
2597 if (parent_rate == rate)
2603 div = parent_rate / rate;
2606 if (((parent_rate / div) != rate) || (div > 8))
2609 reg = __raw_readl(MXC_CCM_CSCDR2);
2610 reg &= ~MXC_CCM_CSCDR2_IPU2_DI0_PODF_MASK;
2611 reg |= (div - 1) << MXC_CCM_CSCDR2_IPU2_DI0_PODF_OFFSET;
2612 __raw_writel(reg, MXC_CCM_CSCDR2);
2617 static int _clk_ipu2_di0_set_parent(struct clk *clk, struct clk *parent)
2621 if (parent == &ldb_di0_clk)
2623 else if (parent == &ldb_di1_clk)
2626 reg = __raw_readl(MXC_CCM_CSCDR2)
2627 & ~MXC_CCM_CSCDR2_IPU2_DI0_PRE_CLK_SEL_MASK;
2629 mux = _get_mux6(parent, &mmdc_ch0_axi_clk[0],
2630 &pll3_usb_otg_main_clk, &pll5_video_main_clk,
2631 &pll2_pfd_352M, &pll2_pfd_400M, &pll3_pfd_540M);
2632 reg |= (mux << MXC_CCM_CSCDR2_IPU2_DI0_PRE_CLK_SEL_OFFSET);
2634 __raw_writel(reg, MXC_CCM_CSCDR2);
2636 /* Derive clock from divided pre-muxed ipu2_di0 clock.*/
2639 reg = __raw_readl(MXC_CCM_CSCDR2)
2640 & ~MXC_CCM_CSCDR2_IPU2_DI0_CLK_SEL_MASK;
2641 __raw_writel(reg | (mux << MXC_CCM_CSCDR2_IPU2_DI0_CLK_SEL_OFFSET),
2647 static unsigned long _clk_ipu2_di1_get_rate(struct clk *clk)
2651 if ((clk->parent == &ldb_di0_clk) ||
2652 (clk->parent == &ldb_di1_clk))
2653 return clk_get_rate(clk->parent);
2655 reg = __raw_readl(MXC_CCM_CSCDR2);
2657 div = ((reg & MXC_CCM_CSCDR2_IPU2_DI1_PODF_MASK)
2658 >> MXC_CCM_CSCDR2_IPU2_DI1_PODF_OFFSET) + 1;
2660 return clk_get_rate(clk->parent) / div;
2663 static int _clk_ipu2_di1_set_rate(struct clk *clk, unsigned long rate)
2666 u32 parent_rate = clk_get_rate(clk->parent);
2668 if ((clk->parent == &ldb_di0_clk) ||
2669 (clk->parent == &ldb_di1_clk)) {
2670 if (parent_rate == rate)
2676 div = parent_rate / rate;
2679 if (((parent_rate / div) != rate) || (div > 8))
2682 reg = __raw_readl(MXC_CCM_CSCDR2);
2683 reg &= ~MXC_CCM_CSCDR2_IPU2_DI1_PODF_MASK;
2684 reg |= (div - 1) << MXC_CCM_CSCDR2_IPU2_DI1_PODF_OFFSET;
2685 __raw_writel(reg, MXC_CCM_CSCDR2);
2690 static int _clk_ipu2_di1_set_parent(struct clk *clk, struct clk *parent)
2694 if (parent == &ldb_di0_clk)
2696 else if (parent == &ldb_di1_clk)
2699 reg = __raw_readl(MXC_CCM_CSCDR2)
2700 & ~MXC_CCM_CSCDR2_IPU2_DI1_PRE_CLK_SEL_MASK;
2702 mux = _get_mux6(parent, &mmdc_ch0_axi_clk[0],
2703 &pll3_usb_otg_main_clk, &pll5_video_main_clk,
2704 &pll2_pfd_352M, &pll2_pfd_400M, &pll3_pfd_540M);
2705 reg |= (mux << MXC_CCM_CSCDR2_IPU2_DI1_PRE_CLK_SEL_OFFSET);
2707 __raw_writel(reg, MXC_CCM_CSCDR2);
2709 /* Derive clock from divided pre-muxed ipu1_di0 clock.*/
2712 reg = __raw_readl(MXC_CCM_CSCDR2)
2713 & ~MXC_CCM_CSCDR2_IPU2_DI1_CLK_SEL_MASK;
2714 __raw_writel(reg | (mux << MXC_CCM_CSCDR2_IPU2_DI1_CLK_SEL_OFFSET),
2720 static struct clk ipu2_di_clk[] = {
2722 __INIT_CLK_DEBUG(ipu2_di_clk_0)
2724 .parent = &pll5_video_main_clk,
2725 .enable_reg = MXC_CCM_CCGR3,
2726 .enable_shift = MXC_CCM_CCGRx_CG4_OFFSET,
2727 .enable = _clk_enable,
2728 .disable = _clk_disable,
2729 .set_parent = _clk_ipu2_di0_set_parent,
2730 .set_rate = _clk_ipu2_di0_set_rate,
2731 .round_rate = _clk_ipu_di_round_rate,
2732 .get_rate = _clk_ipu2_di0_get_rate,
2735 __INIT_CLK_DEBUG(ipu2_di_clk_1)
2737 .parent = &pll5_video_main_clk,
2738 .enable_reg = MXC_CCM_CCGR3,
2739 .enable_shift = MXC_CCM_CCGRx_CG5_OFFSET,
2740 .enable = _clk_enable,
2741 .disable = _clk_disable,
2742 .set_parent = _clk_ipu2_di1_set_parent,
2743 .set_rate = _clk_ipu2_di1_set_rate,
2744 .round_rate = _clk_ipu_di_round_rate,
2745 .get_rate = _clk_ipu2_di1_get_rate,
2749 static struct clk can2_clk[] = {
2751 __INIT_CLK_DEBUG(can2_module_clk)
2753 .parent = &pll3_60M,
2754 .enable_reg = MXC_CCM_CCGR0,
2755 .enable_shift = MXC_CCM_CCGRx_CG9_OFFSET,
2756 .enable = _clk_enable,
2757 .disable = _clk_disable,
2758 .secondary = &can2_clk[1],
2761 __INIT_CLK_DEBUG(can2_serial_clk)
2763 .parent = &pll3_60M,
2764 .enable_reg = MXC_CCM_CCGR0,
2765 .enable_shift = MXC_CCM_CCGRx_CG10_OFFSET,
2766 .enable = _clk_enable,
2767 .disable = _clk_disable,
2772 static struct clk can1_clk[] = {
2774 __INIT_CLK_DEBUG(can1_module_clk)
2776 .parent = &pll3_60M,
2777 .enable_reg = MXC_CCM_CCGR0,
2778 .enable_shift = MXC_CCM_CCGRx_CG7_OFFSET,
2779 .enable = _clk_enable,
2780 .disable = _clk_disable,
2781 .secondary = &can1_clk[1],
2784 __INIT_CLK_DEBUG(can1_serial_clk)
2786 .parent = &pll3_60M,
2787 .enable_reg = MXC_CCM_CCGR0,
2788 .enable_shift = MXC_CCM_CCGRx_CG8_OFFSET,
2789 .enable = _clk_enable,
2790 .disable = _clk_disable,
2794 static unsigned long _clk_spdif_round_rate(struct clk *clk,
2798 u32 parent_rate = clk_get_rate(clk->parent);
2799 u32 div = parent_rate / rate;
2801 if (parent_rate % rate)
2804 __calc_pre_post_dividers(1 << 3, div, &pre, &post);
2806 return parent_rate / (pre * post);
2809 static int _clk_spdif0_set_parent(struct clk *clk, struct clk *parent)
2813 reg = __raw_readl(MXC_CCM_CDCDR)
2814 & ~MXC_CCM_CDCDR_SPDIF0_CLK_SEL_MASK;
2816 mux = _get_mux6(parent, &pll4_audio_main_clk,
2817 &pll3_pfd_508M, &pll3_pfd_454M,
2818 &pll3_sw_clk, NULL, NULL);
2819 reg |= mux << MXC_CCM_CDCDR_SPDIF0_CLK_SEL_OFFSET;
2821 __raw_writel(reg, MXC_CCM_CDCDR);
2826 static unsigned long _clk_spdif0_get_rate(struct clk *clk)
2828 u32 reg, pred, podf;
2830 reg = __raw_readl(MXC_CCM_CDCDR);
2832 pred = ((reg & MXC_CCM_CDCDR_SPDIF0_CLK_PRED_MASK)
2833 >> MXC_CCM_CDCDR_SPDIF0_CLK_PRED_OFFSET) + 1;
2834 podf = ((reg & MXC_CCM_CDCDR_SPDIF0_CLK_PODF_MASK)
2835 >> MXC_CCM_CDCDR_SPDIF0_CLK_PODF_OFFSET) + 1;
2837 return clk_get_rate(clk->parent) / (pred * podf);
2840 static int _clk_spdif0_set_rate(struct clk *clk, unsigned long rate)
2842 u32 reg, div, pre, post;
2843 u32 parent_rate = clk_get_rate(clk->parent);
2845 div = parent_rate / rate;
2848 if (((parent_rate / div) != rate) || div > 64)
2851 __calc_pre_post_dividers(1 << 3, div, &pre, &post);
2853 reg = __raw_readl(MXC_CCM_CDCDR);
2854 reg &= ~(MXC_CCM_CDCDR_SPDIF0_CLK_PRED_MASK|
2855 MXC_CCM_CDCDR_SPDIF0_CLK_PODF_MASK);
2856 reg |= (post - 1) << MXC_CCM_CDCDR_SPDIF0_CLK_PODF_OFFSET;
2857 reg |= (pre - 1) << MXC_CCM_CDCDR_SPDIF0_CLK_PRED_OFFSET;
2859 __raw_writel(reg, MXC_CCM_CDCDR);
2864 static struct clk spdif0_clk[] = {
2866 __INIT_CLK_DEBUG(spdif0_clk_0)
2868 .parent = &pll3_sw_clk,
2869 .enable = _clk_enable,
2870 .enable_reg = MXC_CCM_CCGR5,
2871 .enable_shift = MXC_CCM_CCGRx_CG7_OFFSET,
2872 .disable = _clk_disable,
2873 .secondary = &spdif0_clk[1],
2874 .set_rate = _clk_spdif0_set_rate,
2875 .get_rate = _clk_spdif0_get_rate,
2876 .set_parent = _clk_spdif0_set_parent,
2877 .round_rate = _clk_spdif_round_rate,
2880 __INIT_CLK_DEBUG(spdif0_clk_1)
2883 .secondary = &spba_clk,
2887 static unsigned long _clk_esai_round_rate(struct clk *clk,
2891 u32 parent_rate = clk_get_rate(clk->parent);
2892 u32 div = parent_rate / rate;
2894 if (parent_rate % rate)
2897 __calc_pre_post_dividers(1 << 3, div, &pre, &post);
2899 return parent_rate / (pre * post);
2902 static int _clk_esai_set_parent(struct clk *clk, struct clk *parent)
2906 reg = __raw_readl(MXC_CCM_CSCMR2) & ~MXC_CCM_CSCMR2_ESAI_CLK_SEL_MASK;
2908 mux = _get_mux6(parent, &pll4_audio_main_clk, &pll3_pfd_508M,
2909 &pll3_pfd_454M, &pll3_sw_clk, NULL, NULL);
2910 reg |= mux << MXC_CCM_CSCMR2_ESAI_CLK_SEL_OFFSET;
2912 __raw_writel(reg, MXC_CCM_CSCMR2);
2917 static unsigned long _clk_esai_get_rate(struct clk *clk)
2919 u32 reg, pred, podf;
2921 reg = __raw_readl(MXC_CCM_CS1CDR);
2923 pred = ((reg & MXC_CCM_CS1CDR_ESAI_CLK_PRED_MASK)
2924 >> MXC_CCM_CS1CDR_ESAI_CLK_PRED_OFFSET) + 1;
2925 podf = ((reg & MXC_CCM_CS1CDR_ESAI_CLK_PODF_MASK)
2926 >> MXC_CCM_CS1CDR_ESAI_CLK_PODF_OFFSET) + 1;
2928 return clk_get_rate(clk->parent) / (pred * podf);
2931 static int _clk_esai_set_rate(struct clk *clk, unsigned long rate)
2933 u32 reg, div, pre, post;
2934 u32 parent_rate = clk_get_rate(clk->parent);
2936 div = parent_rate / rate;
2939 if (((parent_rate / div) != rate) || div > 64)
2942 __calc_pre_post_dividers(1 << 3, div, &pre, &post);
2944 reg = __raw_readl(MXC_CCM_CS1CDR);
2945 reg &= ~(MXC_CCM_CS1CDR_ESAI_CLK_PRED_MASK|
2946 MXC_CCM_CS1CDR_ESAI_CLK_PODF_MASK);
2947 reg |= (post - 1) << MXC_CCM_CS1CDR_ESAI_CLK_PODF_OFFSET;
2948 reg |= (pre - 1) << MXC_CCM_CS1CDR_ESAI_CLK_PRED_OFFSET;
2950 __raw_writel(reg, MXC_CCM_CS1CDR);
2955 static struct clk esai_clk = {
2956 __INIT_CLK_DEBUG(esai_clk)
2958 .parent = &pll3_sw_clk,
2959 .enable_reg = MXC_CCM_CCGR1,
2960 .enable_shift = MXC_CCM_CCGRx_CG8_OFFSET,
2961 .enable = _clk_enable,
2962 .disable = _clk_disable,
2963 .set_rate = _clk_esai_set_rate,
2964 .get_rate = _clk_esai_get_rate,
2965 .set_parent = _clk_esai_set_parent,
2966 .round_rate = _clk_esai_round_rate,
2969 static int _clk_enet_enable(struct clk *clk)
2973 /* Enable ENET ref clock */
2974 reg = __raw_readl(PLL8_ENET_BASE_ADDR);
2975 reg &= ~ANADIG_PLL_BYPASS;
2976 reg &= ~ANADIG_PLL_ENABLE;
2977 __raw_writel(reg, PLL8_ENET_BASE_ADDR);
2983 static void _clk_enet_disable(struct clk *clk)
2989 /* Enable ENET ref clock */
2990 reg = __raw_readl(PLL8_ENET_BASE_ADDR);
2991 reg |= ANADIG_PLL_BYPASS;
2992 reg |= ANADIG_PLL_ENABLE;
2993 __raw_writel(reg, PLL8_ENET_BASE_ADDR);
2996 static int _clk_enet_set_rate(struct clk *clk, unsigned long rate)
2998 unsigned int reg, div = 1;
3016 reg = __raw_readl(PLL8_ENET_BASE_ADDR);
3017 reg &= ~ANADIG_PLL_ENET_DIV_SELECT_MASK;
3018 reg |= (div << ANADIG_PLL_ENET_DIV_SELECT_OFFSET);
3019 __raw_writel(reg, PLL8_ENET_BASE_ADDR);
3024 static unsigned long _clk_enet_get_rate(struct clk *clk)
3028 div = (__raw_readl(PLL8_ENET_BASE_ADDR))
3029 & ANADIG_PLL_ENET_DIV_SELECT_MASK;
3031 return 500000000 / (div + 1);
3034 static struct clk enet_clk = {
3035 __INIT_CLK_DEBUG(enet_clk)
3037 .parent = &pll8_enet_main_clk,
3038 .enable_reg = MXC_CCM_CCGR1,
3039 .enable_shift = MXC_CCM_CCGRx_CG5_OFFSET,
3040 .enable = _clk_enet_enable,
3041 .disable = _clk_enet_disable,
3042 .set_rate = _clk_enet_set_rate,
3043 .get_rate = _clk_enet_get_rate,
3046 static struct clk ecspi_clk[] = {
3048 __INIT_CLK_DEBUG(ecspi0_clk)
3050 .parent = &pll3_60M,
3051 .enable_reg = MXC_CCM_CCGR1,
3052 .enable_shift = MXC_CCM_CCGRx_CG0_OFFSET,
3053 .enable = _clk_enable,
3054 .disable = _clk_disable,
3057 __INIT_CLK_DEBUG(ecspi1_clk)
3059 .parent = &pll3_60M,
3060 .enable_reg = MXC_CCM_CCGR1,
3061 .enable_shift = MXC_CCM_CCGRx_CG1_OFFSET,
3062 .enable = _clk_enable,
3063 .disable = _clk_disable,
3066 __INIT_CLK_DEBUG(ecspi2_clk)
3068 .parent = &pll3_60M,
3069 .enable_reg = MXC_CCM_CCGR1,
3070 .enable_shift = MXC_CCM_CCGRx_CG2_OFFSET,
3071 .enable = _clk_enable,
3072 .disable = _clk_disable,
3075 __INIT_CLK_DEBUG(ecspi3_clk)
3077 .parent = &pll3_60M,
3078 .enable_reg = MXC_CCM_CCGR1,
3079 .enable_shift = MXC_CCM_CCGRx_CG3_OFFSET,
3080 .enable = _clk_enable,
3081 .disable = _clk_disable,
3084 __INIT_CLK_DEBUG(ecspi4_clk)
3086 .parent = &pll3_60M,
3087 .enable_reg = MXC_CCM_CCGR1,
3088 .enable_shift = MXC_CCM_CCGRx_CG4_OFFSET,
3089 .enable = _clk_enable,
3090 .disable = _clk_disable,
3094 static unsigned long _clk_emi_slow_round_rate(struct clk *clk,
3098 u32 parent_rate = clk_get_rate(clk->parent);
3100 div = parent_rate / rate;
3102 /* Make sure rate is not greater than the maximum value for the clock.
3103 * Also prevent a div of 0.
3111 return parent_rate / div;
3114 static int _clk_emi_slow_set_parent(struct clk *clk, struct clk *parent)
3117 u32 reg = __raw_readl(MXC_CCM_CSCMR1)
3118 & ~MXC_CCM_CSCMR1_ACLK_EMI_SLOW_MASK;
3120 mux = _get_mux6(parent, &axi_clk, &pll3_usb_otg_main_clk,
3121 &pll2_pfd_400M, &pll2_pfd_352M, NULL, NULL);
3122 reg |= (mux << MXC_CCM_CSCMR1_ACLK_EMI_SLOW_OFFSET);
3123 __raw_writel(reg, MXC_CCM_CSCMR1);
3128 static unsigned long _clk_emi_slow_get_rate(struct clk *clk)
3132 reg = __raw_readl(MXC_CCM_CSCMR1);
3133 div = ((reg & MXC_CCM_CSCMR1_ACLK_EMI_SLOW_PODF_MASK) >>
3134 MXC_CCM_CSCMR1_ACLK_EMI_SLOW_PODF_OFFSET) + 1;
3136 return clk_get_rate(clk->parent) / div;
3139 static int _clk_emi_slow_set_rate(struct clk *clk, unsigned long rate)
3142 u32 parent_rate = clk_get_rate(clk->parent);
3144 div = parent_rate / rate;
3147 if (((parent_rate / div) != rate) || (div > 8))
3150 reg = __raw_readl(MXC_CCM_CSCMR1);
3151 reg &= ~MXC_CCM_CSCMR1_ACLK_EMI_SLOW_PODF_MASK;
3152 reg |= (div - 1) << MXC_CCM_CSCMR1_ACLK_EMI_SLOW_PODF_OFFSET;
3153 __raw_writel(reg, MXC_CCM_CSCMR1);
3158 static struct clk emi_slow_clk = {
3159 __INIT_CLK_DEBUG(emi_slow_clk)
3162 .enable_reg = MXC_CCM_CCGR6,
3163 .enable_shift = MXC_CCM_CCGRx_CG5_OFFSET,
3164 .enable = _clk_enable,
3165 .disable = _clk_disable,
3166 .set_rate = _clk_emi_slow_set_rate,
3167 .get_rate = _clk_emi_slow_get_rate,
3168 .round_rate = _clk_emi_slow_round_rate,
3169 .set_parent = _clk_emi_slow_set_parent,
3172 static unsigned long _clk_emi_round_rate(struct clk *clk,
3176 u32 parent_rate = clk_get_rate(clk->parent);
3178 div = parent_rate / rate;
3180 /* Make sure rate is not greater than the maximum value for the clock.
3181 * Also prevent a div of 0.
3189 return parent_rate / div;
3192 static int _clk_emi_set_parent(struct clk *clk, struct clk *parent)
3195 u32 reg = __raw_readl(MXC_CCM_CSCMR1) & ~MXC_CCM_CSCMR1_ACLK_EMI_MASK;
3197 mux = _get_mux6(parent, &axi_clk, &pll3_usb_otg_main_clk,
3198 &pll2_pfd_400M, &pll2_pfd_352M, NULL, NULL);
3199 reg |= (mux << MXC_CCM_CSCMR1_ACLK_EMI_OFFSET);
3200 __raw_writel(reg, MXC_CCM_CSCMR1);
3205 static unsigned long _clk_emi_get_rate(struct clk *clk)
3209 reg = __raw_readl(MXC_CCM_CSCMR1);
3210 div = ((reg & MXC_CCM_CSCMR1_ACLK_EMI_PODF_MASK) >>
3211 MXC_CCM_CSCMR1_ACLK_EMI_PODF_OFFSET) + 1;
3213 return clk_get_rate(clk->parent) / div;
3216 static int _clk_emi_set_rate(struct clk *clk, unsigned long rate)
3219 u32 parent_rate = clk_get_rate(clk->parent);
3221 div = parent_rate / rate;
3224 if (((parent_rate / div) != rate) || (div > 8))
3227 reg = __raw_readl(MXC_CCM_CSCMR1);
3228 reg &= ~MXC_CCM_CSCMR1_ACLK_EMI_PODF_MASK;
3229 reg |= (div - 1) << MXC_CCM_CSCMR1_ACLK_EMI_PODF_OFFSET;
3230 __raw_writel(reg, MXC_CCM_CSCMR1);
3235 static struct clk emi_clk = {
3236 __INIT_CLK_DEBUG(emi_clk)
3239 .set_rate = _clk_emi_set_rate,
3240 .get_rate = _clk_emi_get_rate,
3241 .round_rate = _clk_emi_round_rate,
3242 .set_parent = _clk_emi_set_parent,
3245 static unsigned long _clk_enfc_round_rate(struct clk *clk,
3249 u32 parent_rate = clk_get_rate(clk->parent);
3250 u32 div = parent_rate / rate;
3252 if (parent_rate % rate)
3255 __calc_pre_post_dividers(1 << 6, div, &pre, &post);
3257 return parent_rate / (pre * post);
3260 static int _clk_enfc_set_parent(struct clk *clk, struct clk *parent)
3264 reg = __raw_readl(MXC_CCM_CS2CDR)
3265 & ~MXC_CCM_CS2CDR_ENFC_CLK_SEL_MASK;
3267 mux = _get_mux6(parent, &pll2_pfd_352M,
3268 &pll2_528_bus_main_clk, &pll3_usb_otg_main_clk,
3269 &pll2_pfd_400M, NULL, NULL);
3270 reg |= mux << MXC_CCM_CS2CDR_ENFC_CLK_SEL_OFFSET;
3272 __raw_writel(reg, MXC_CCM_CS2CDR);
3277 static unsigned long _clk_enfc_get_rate(struct clk *clk)
3279 u32 reg, pred, podf;
3281 reg = __raw_readl(MXC_CCM_CS2CDR);
3283 pred = ((reg & MXC_CCM_CS2CDR_ENFC_CLK_PRED_MASK)
3284 >> MXC_CCM_CS2CDR_ENFC_CLK_PRED_OFFSET) + 1;
3285 podf = ((reg & MXC_CCM_CS2CDR_ENFC_CLK_PODF_MASK)
3286 >> MXC_CCM_CS2CDR_ENFC_CLK_PODF_OFFSET) + 1;
3288 return clk_get_rate(clk->parent) / (pred * podf);
3291 static int _clk_enfc_set_rate(struct clk *clk, unsigned long rate)
3293 u32 reg, div, pre, post;
3294 u32 parent_rate = clk_get_rate(clk->parent);
3296 div = parent_rate / rate;
3299 if (((parent_rate / div) != rate) || div > 512)
3302 __calc_pre_post_dividers(1 << 6, div, &pre, &post);
3304 reg = __raw_readl(MXC_CCM_CS2CDR);
3305 reg &= ~(MXC_CCM_CS2CDR_ENFC_CLK_PRED_MASK|
3306 MXC_CCM_CS2CDR_ENFC_CLK_PODF_MASK);
3307 reg |= (post - 1) << MXC_CCM_CS2CDR_ENFC_CLK_PODF_OFFSET;
3308 reg |= (pre - 1) << MXC_CCM_CS2CDR_ENFC_CLK_PRED_OFFSET;
3310 __raw_writel(reg, MXC_CCM_CS2CDR);
3315 static struct clk enfc_clk = {
3316 __INIT_CLK_DEBUG(enfc_clk)
3318 .parent = &pll2_pfd_352M,
3319 .enable_reg = MXC_CCM_CCGR2,
3320 .enable_shift = MXC_CCM_CCGRx_CG7_OFFSET,
3321 .enable = _clk_enable,
3322 .disable = _clk_disable,
3323 .set_rate = _clk_enfc_set_rate,
3324 .get_rate = _clk_enfc_get_rate,
3325 .round_rate = _clk_enfc_round_rate,
3326 .set_parent = _clk_enfc_set_parent,
3329 static unsigned long _clk_uart_round_rate(struct clk *clk,
3333 u32 parent_rate = clk_get_rate(clk->parent);
3335 div = parent_rate / rate;
3337 /* Make sure rate is not greater than the maximum value for the clock.
3338 * Also prevent a div of 0.
3346 return parent_rate / div;
3349 static int _clk_uart_set_rate(struct clk *clk, unsigned long rate)
3352 u32 parent_rate = clk_get_rate(clk->parent);
3354 div = parent_rate / rate;
3357 if (((parent_rate / div) != rate) || (div > 64))
3360 reg = __raw_readl(MXC_CCM_CSCDR1) & MXC_CCM_CSCDR1_UART_CLK_PODF_MASK;
3361 reg |= ((div - 1) << MXC_CCM_CSCDR1_UART_CLK_PODF_OFFSET);
3363 __raw_writel(reg, MXC_CCM_CSCDR1);
3368 static unsigned long _clk_uart_get_rate(struct clk *clk)
3373 reg = __raw_readl(MXC_CCM_CSCDR1) & MXC_CCM_CSCDR1_UART_CLK_PODF_MASK;
3374 div = (reg >> MXC_CCM_CSCDR1_UART_CLK_PODF_OFFSET) + 1;
3375 val = clk_get_rate(clk->parent) / div;
3380 static struct clk uart_clk[] = {
3382 __INIT_CLK_DEBUG(uart_clk)
3384 .parent = &pll3_80M,
3385 .enable_reg = MXC_CCM_CCGR5,
3386 .enable_shift = MXC_CCM_CCGRx_CG12_OFFSET,
3387 .enable = _clk_enable,
3388 .disable = _clk_disable,
3389 .secondary = &uart_clk[1],
3390 .set_rate = _clk_uart_set_rate,
3391 .get_rate = _clk_uart_get_rate,
3392 .round_rate = _clk_uart_round_rate,
3395 __INIT_CLK_DEBUG(uart_serial_clk)
3397 .enable_reg = MXC_CCM_CCGR5,
3398 .enable_shift = MXC_CCM_CCGRx_CG13_OFFSET,
3399 .enable = _clk_enable,
3400 .disable = _clk_disable,
3404 static unsigned long _clk_hsi_tx_round_rate(struct clk *clk,
3408 u32 parent_rate = clk_get_rate(clk->parent);
3410 div = parent_rate / rate;
3412 /* Make sure rate is not greater than the maximum value for the clock.
3413 * Also prevent a div of 0.
3421 return parent_rate / div;
3424 static int _clk_hsi_tx_set_parent(struct clk *clk, struct clk *parent)
3426 u32 reg = __raw_readl(MXC_CCM_CDCDR) & ~MXC_CCM_CDCDR_HSI_TX_CLK_SEL;
3428 if (parent == &pll2_pfd_400M)
3429 reg |= (MXC_CCM_CDCDR_HSI_TX_CLK_SEL);
3431 __raw_writel(reg, MXC_CCM_CDCDR);
3436 static unsigned long _clk_hsi_tx_get_rate(struct clk *clk)
3440 reg = __raw_readl(MXC_CCM_CDCDR);
3441 div = ((reg & MXC_CCM_CDCDR_HSI_TX_PODF_MASK) >>
3442 MXC_CCM_CDCDR_HSI_TX_PODF_OFFSET) + 1;
3444 return clk_get_rate(clk->parent) / div;
3447 static int _clk_hsi_tx_set_rate(struct clk *clk, unsigned long rate)
3450 u32 parent_rate = clk_get_rate(clk->parent);
3452 div = parent_rate / rate;
3455 if (((parent_rate / div) != rate) || (div > 8))
3458 reg = __raw_readl(MXC_CCM_CDCDR);
3459 reg &= ~MXC_CCM_CDCDR_HSI_TX_PODF_MASK;
3460 reg |= (div - 1) << MXC_CCM_CDCDR_HSI_TX_PODF_OFFSET;
3461 __raw_writel(reg, MXC_CCM_CDCDR);
3466 static struct clk hsi_tx_clk = {
3467 __INIT_CLK_DEBUG(hsi_tx_clk)
3469 .parent = &pll2_pfd_400M,
3470 .enable_reg = MXC_CCM_CCGR3,
3471 .enable_shift = MXC_CCM_CCGRx_CG8_OFFSET,
3472 .enable = _clk_enable,
3473 .disable = _clk_disable,
3474 .set_parent = _clk_hsi_tx_set_parent,
3475 .round_rate = _clk_hsi_tx_round_rate,
3476 .set_rate = _clk_hsi_tx_set_rate,
3477 .get_rate = _clk_hsi_tx_get_rate,
3480 static struct clk hdmi_clk[] = {
3482 __INIT_CLK_DEBUG(hdmi_isfr_clk)
3484 .parent = &pll3_pfd_540M,
3485 .enable_reg = MXC_CCM_CCGR2,
3486 .enable_shift = MXC_CCM_CCGRx_CG2_OFFSET,
3487 .enable = _clk_enable,
3488 .disable = _clk_disable,
3491 __INIT_CLK_DEBUG(hdmi_iahb_clk)
3494 .enable_reg = MXC_CCM_CCGR2,
3495 .enable_shift = MXC_CCM_CCGRx_CG0_OFFSET,
3496 .enable = _clk_enable,
3497 .disable = _clk_disable,
3501 static struct clk caam_clk[] = {
3503 __INIT_CLK_DEBUG(caam_mem_clk)
3505 .enable_reg = MXC_CCM_CCGR0,
3506 .enable_shift = MXC_CCM_CCGRx_CG4_OFFSET,
3507 .enable = _clk_enable,
3508 .disable = _clk_disable,
3509 .secondary = &caam_clk[1],
3512 __INIT_CLK_DEBUG(caam_aclk_clk)
3514 .enable_reg = MXC_CCM_CCGR0,
3515 .enable_shift = MXC_CCM_CCGRx_CG5_OFFSET,
3516 .enable = _clk_enable,
3517 .disable = _clk_disable,
3520 __INIT_CLK_DEBUG(caam_ipg_clk)
3522 .enable_reg = MXC_CCM_CCGR0,
3523 .enable_shift = MXC_CCM_CCGRx_CG4_OFFSET,
3524 .enable = _clk_enable,
3525 .disable = _clk_disable,
3529 static int _clk_asrc_serial_set_parent(struct clk *clk, struct clk *parent)
3533 reg = __raw_readl(MXC_CCM_CDCDR) & ~MXC_CCM_CDCDR_SPDIF1_CLK_SEL_MASK;
3535 mux = _get_mux6(parent, &pll4_audio_main_clk, &pll3_pfd_508M,
3536 &pll3_pfd_454M, &pll3_sw_clk, NULL, NULL);
3537 reg |= mux << MXC_CCM_CDCDR_SPDIF1_CLK_SEL_OFFSET;
3539 __raw_writel(reg, MXC_CCM_CDCDR);
3544 static unsigned long _clk_asrc_serial_get_rate(struct clk *clk)
3546 u32 reg, pred, podf;
3548 reg = __raw_readl(MXC_CCM_CDCDR);
3550 pred = ((reg & MXC_CCM_CDCDR_SPDIF1_CLK_PRED_MASK)
3551 >> MXC_CCM_CDCDR_SPDIF1_CLK_PRED_OFFSET) + 1;
3552 podf = ((reg & MXC_CCM_CDCDR_SPDIF1_CLK_PODF_MASK)
3553 >> MXC_CCM_CDCDR_SPDIF1_CLK_PODF_OFFSET) + 1;
3555 return clk_get_rate(clk->parent) / (pred * podf);
3558 static int _clk_asrc_serial_set_rate(struct clk *clk, unsigned long rate)
3560 u32 reg, div, pre, post;
3561 u32 parent_rate = clk_get_rate(clk->parent);
3563 div = parent_rate / rate;
3566 if (((parent_rate / div) != rate) || div > 64)
3569 __calc_pre_post_dividers(1 << 3, div, &pre, &post);
3571 reg = __raw_readl(MXC_CCM_CDCDR);
3572 reg &= ~(MXC_CCM_CDCDR_SPDIF1_CLK_PRED_MASK|
3573 MXC_CCM_CDCDR_SPDIF1_CLK_PODF_MASK);
3574 reg |= (post - 1) << MXC_CCM_CDCDR_SPDIF1_CLK_PODF_OFFSET;
3575 reg |= (pre - 1) << MXC_CCM_CDCDR_SPDIF1_CLK_PRED_OFFSET;
3577 __raw_writel(reg, MXC_CCM_CDCDR);
3582 static unsigned long _clk_asrc_serial_round_rate(struct clk *clk,
3586 u32 parent_rate = clk_get_rate(clk->parent);
3587 u32 div = parent_rate / rate;
3589 if (parent_rate % rate)
3592 __calc_pre_post_dividers(1 << 3, div, &pre, &post);
3594 return parent_rate / (pre * post);
3597 static struct clk asrc_clk[] = {
3599 __INIT_CLK_DEBUG(asrc_clk)
3601 .parent = &pll4_audio_main_clk,
3602 .enable_reg = MXC_CCM_CCGR0,
3603 .enable_shift = MXC_CCM_CCGRx_CG3_OFFSET,
3604 .enable = _clk_enable,
3605 .disable = _clk_disable,
3608 /*In the MX6 spec, asrc_serial_clk is listed as SPDIF1 clk
3609 * This clock can never be gated and does not have any
3610 * CCGR bits associated with it.
3612 __INIT_CLK_DEBUG(asrc_serial_clk)
3614 .parent = &pll3_sw_clk,
3615 .set_rate = _clk_asrc_serial_set_rate,
3616 .get_rate = _clk_asrc_serial_get_rate,
3617 .set_parent = _clk_asrc_serial_set_parent,
3618 .round_rate = _clk_asrc_serial_round_rate,
3622 static struct clk apbh_dma_clk = {
3623 __INIT_CLK_DEBUG(apbh_dma_clk)
3624 .parent = &usdhc3_clk,
3625 .enable = _clk_enable,
3626 .disable = _clk_disable_inwait,
3627 .enable_reg = MXC_CCM_CCGR0,
3628 .enable_shift = MXC_CCM_CCGRx_CG2_OFFSET,
3631 static struct clk aips_tz2_clk = {
3632 __INIT_CLK_DEBUG(aips_tz2_clk)
3634 .enable_reg = MXC_CCM_CCGR0,
3635 .enable_shift = MXC_CCM_CCGRx_CG1_OFFSET,
3636 .enable = _clk_enable,
3637 .disable = _clk_disable_inwait,
3640 static struct clk aips_tz1_clk = {
3641 __INIT_CLK_DEBUG(aips_tz1_clk)
3643 .enable_reg = MXC_CCM_CCGR0,
3644 .enable_shift = MXC_CCM_CCGRx_CG0_OFFSET,
3645 .enable = _clk_enable,
3646 .disable = _clk_disable_inwait,
3650 static struct clk openvg_axi_clk = {
3651 __INIT_CLK_DEBUG(openvg_axi_clk)
3652 .enable = _clk_enable,
3653 .enable_reg = MXC_CCM_CCGR3,
3654 .enable_shift = MXC_CCM_CCGRx_CG15_OFFSET,
3655 .disable = _clk_disable,
3658 static unsigned long _clk_gpu3d_core_round_rate(struct clk *clk,
3662 u32 parent_rate = clk_get_rate(clk->parent);
3664 div = parent_rate / rate;
3666 /* Make sure rate is not greater than the maximum value for the clock.
3667 * Also prevent a div of 0.
3675 return parent_rate / div;
3678 static int _clk_gpu3d_core_set_parent(struct clk *clk, struct clk *parent)
3681 u32 reg = __raw_readl(MXC_CCM_CBCMR)
3682 & ~MXC_CCM_CBCMR_GPU3D_CORE_CLK_SEL_MASK;
3684 mux = _get_mux6(parent, &mmdc_ch0_axi_clk[0],
3685 &pll3_usb_otg_main_clk,
3686 &pll2_pfd_594M, &pll2_pfd_400M, NULL, NULL);
3687 reg |= (mux << MXC_CCM_CBCMR_GPU3D_CORE_CLK_SEL_OFFSET);
3688 __raw_writel(reg, MXC_CCM_CBCMR);
3693 static unsigned long _clk_gpu3d_core_get_rate(struct clk *clk)
3697 reg = __raw_readl(MXC_CCM_CBCMR);
3698 div = ((reg & MXC_CCM_CBCMR_GPU3D_CORE_PODF_MASK) >>
3699 MXC_CCM_CBCMR_GPU3D_CORE_PODF_OFFSET) + 1;
3701 return clk_get_rate(clk->parent) / div;
3704 static int _clk_gpu3d_core_set_rate(struct clk *clk, unsigned long rate)
3707 u32 parent_rate = clk_get_rate(clk->parent);
3709 div = parent_rate / rate;
3715 reg = __raw_readl(MXC_CCM_CBCMR);
3716 reg &= ~MXC_CCM_CBCMR_GPU3D_CORE_PODF_MASK;
3717 reg |= (div - 1) << MXC_CCM_CBCMR_GPU3D_CORE_PODF_OFFSET;
3718 __raw_writel(reg, MXC_CCM_CBCMR);
3723 static struct clk gpu3d_core_clk = {
3724 __INIT_CLK_DEBUG(gpu3d_core_clk)
3725 .parent = &pll2_pfd_594M,
3726 .secondary = &gpu3d_axi_clk,
3727 .enable = _clk_enable,
3728 .enable_reg = MXC_CCM_CCGR1,
3729 .enable_shift = MXC_CCM_CCGRx_CG13_OFFSET,
3730 .disable = _clk_disable,
3731 .set_parent = _clk_gpu3d_core_set_parent,
3732 .set_rate = _clk_gpu3d_core_set_rate,
3733 .get_rate = _clk_gpu3d_core_get_rate,
3734 .round_rate = _clk_gpu3d_core_round_rate,
3737 static unsigned long _clk_gpu2d_core_round_rate(struct clk *clk,
3741 u32 parent_rate = clk_get_rate(clk->parent);
3743 div = parent_rate / rate;
3745 /* Make sure rate is not greater than the maximum value for the clock.
3746 * Also prevent a div of 0.
3754 return parent_rate / div;
3757 static int _clk_gpu2d_core_set_parent(struct clk *clk, struct clk *parent)
3760 u32 reg = __raw_readl(MXC_CCM_CBCMR) &
3761 ~MXC_CCM_CBCMR_GPU2D_CLK_SEL_MASK;
3763 mux = _get_mux6(parent, &axi_clk, &pll3_usb_otg_main_clk,
3764 &pll2_pfd_352M, &pll2_pfd_400M, NULL, NULL);
3765 reg |= (mux << MXC_CCM_CBCMR_GPU2D_CLK_SEL_OFFSET);
3766 __raw_writel(reg, MXC_CCM_CBCMR);
3771 static unsigned long _clk_gpu2d_core_get_rate(struct clk *clk)
3775 reg = __raw_readl(MXC_CCM_CBCMR);
3776 div = ((reg & MXC_CCM_CBCMR_GPU2D_CORE_PODF_MASK) >>
3777 MXC_CCM_CBCMR_GPU2D_CORE_PODF_OFFSET) + 1;
3779 return clk_get_rate(clk->parent) / div;
3782 static int _clk_gpu2d_core_set_rate(struct clk *clk, unsigned long rate)
3785 u32 parent_rate = clk_get_rate(clk->parent);
3787 div = parent_rate / rate;
3790 if (((parent_rate / div) != rate) || (div > 8))
3793 reg = __raw_readl(MXC_CCM_CBCMR);
3794 reg &= ~MXC_CCM_CBCMR_GPU3D_CORE_PODF_MASK;
3795 reg |= (div - 1) << MXC_CCM_CBCMR_GPU3D_CORE_PODF_OFFSET;
3796 __raw_writel(reg, MXC_CCM_CBCMR);
3800 static struct clk gpu2d_core_clk = {
3801 __INIT_CLK_DEBUG(gpu2d_core_clk)
3802 .parent = &pll2_pfd_352M,
3803 .secondary = &gpu2d_axi_clk,
3804 .enable = _clk_enable,
3805 .enable_reg = MXC_CCM_CCGR1,
3806 .enable_shift = MXC_CCM_CCGRx_CG12_OFFSET,
3807 .disable = _clk_disable,
3808 .set_parent = _clk_gpu2d_core_set_parent,
3809 .set_rate = _clk_gpu2d_core_set_rate,
3810 .get_rate = _clk_gpu2d_core_get_rate,
3811 .round_rate = _clk_gpu2d_core_round_rate,
3814 static unsigned long _clk_gpu3d_shader_round_rate(struct clk *clk,
3818 u32 parent_rate = clk_get_rate(clk->parent);
3820 div = parent_rate / rate;
3822 /* Make sure rate is not greater than the maximum value for the clock.
3823 * Also prevent a div of 0.
3831 return parent_rate / div;
3834 static int _clk_gpu3d_shader_set_parent(struct clk *clk, struct clk *parent)
3837 u32 reg = __raw_readl(MXC_CCM_CBCMR)
3838 & ~MXC_CCM_CBCMR_GPU3D_SHADER_CLK_SEL_MASK;
3840 mux = _get_mux6(parent, &mmdc_ch0_axi_clk[0],
3841 &pll3_usb_otg_main_clk,
3842 &pll2_pfd_594M, &pll3_pfd_720M, NULL, NULL);
3843 reg |= (mux << MXC_CCM_CBCMR_GPU3D_SHADER_CLK_SEL_OFFSET);
3844 __raw_writel(reg, MXC_CCM_CBCMR);
3849 static unsigned long _clk_gpu3d_shader_get_rate(struct clk *clk)
3853 reg = __raw_readl(MXC_CCM_CBCMR);
3854 div = ((reg & MXC_CCM_CBCMR_GPU3D_SHADER_PODF_MASK) >>
3855 MXC_CCM_CBCMR_GPU3D_SHADER_PODF_OFFSET) + 1;
3857 return clk_get_rate(clk->parent) / div;
3860 static int _clk_gpu3d_shader_set_rate(struct clk *clk, unsigned long rate)
3863 u32 parent_rate = clk_get_rate(clk->parent);
3865 div = parent_rate / rate;
3871 reg = __raw_readl(MXC_CCM_CBCMR);
3872 reg &= ~MXC_CCM_CBCMR_GPU3D_SHADER_PODF_MASK;
3873 reg |= (div - 1) << MXC_CCM_CBCMR_GPU3D_SHADER_PODF_OFFSET;
3874 __raw_writel(reg, MXC_CCM_CBCMR);
3880 static struct clk gpu3d_shader_clk = {
3881 __INIT_CLK_DEBUG(gpu3d_shader_clk)
3882 .parent = &pll3_pfd_720M,
3883 .enable = _clk_enable,
3884 .enable_reg = MXC_CCM_CCGR1,
3885 .enable_shift = MXC_CCM_CCGRx_CG13_OFFSET,
3886 .disable = _clk_disable,
3887 .set_parent = _clk_gpu3d_shader_set_parent,
3888 .set_rate = _clk_gpu3d_shader_set_rate,
3889 .get_rate = _clk_gpu3d_shader_get_rate,
3890 .round_rate = _clk_gpu3d_shader_round_rate,
3893 /* set the parent by the ipcg table */
3894 static struct clk gpmi_nfc_clk[] = {
3896 __INIT_CLK_DEBUG(gpmi_io_clk)
3897 .parent = &enfc_clk,
3898 .secondary = &gpmi_nfc_clk[1],
3899 .enable = _clk_enable,
3900 .enable_reg = MXC_CCM_CCGR4,
3901 .enable_shift = MXC_CCM_CCGRx_CG14_OFFSET,
3902 .disable = _clk_disable,
3904 { /* gpmi_apb_clk */
3905 __INIT_CLK_DEBUG(gpmi_apb_clk)
3906 .parent = &usdhc3_clk,
3907 .secondary = &gpmi_nfc_clk[2],
3908 .enable = _clk_enable,
3909 .enable_reg = MXC_CCM_CCGR4,
3910 .enable_shift = MXC_CCM_CCGRx_CG15_OFFSET,
3911 .disable = _clk_disable,
3914 __INIT_CLK_DEBUG(gpmi_bch_clk)
3915 .parent = &usdhc4_clk,
3916 .secondary = &gpmi_nfc_clk[3],
3917 .enable = _clk_enable,
3918 .enable_reg = MXC_CCM_CCGR4,
3919 .enable_shift = MXC_CCM_CCGRx_CG13_OFFSET,
3920 .disable = _clk_disable,
3923 __INIT_CLK_DEBUG(gpmi_bch_apb_clk)
3924 .parent = &usdhc3_clk,
3925 .secondary = &gpmi_nfc_clk[4],
3926 .enable = _clk_enable,
3927 .enable_reg = MXC_CCM_CCGR4,
3928 .enable_shift = MXC_CCM_CCGRx_CG12_OFFSET,
3929 .disable = _clk_disable,
3931 { /* bch relative clk */
3932 __INIT_CLK_DEBUG(pl301_mx6qperl_bch)
3933 .parent = &usdhc3_clk,
3934 .enable = _clk_enable,
3935 .enable_reg = MXC_CCM_CCGR4,
3936 .enable_shift = MXC_CCM_CCGRx_CG6_OFFSET,
3937 .disable = _clk_disable,
3941 static struct clk pwm_clk[] = {
3943 __INIT_CLK_DEBUG(pwm_clk_0)
3944 .parent = &ipg_perclk,
3946 .enable_reg = MXC_CCM_CCGR4,
3947 .enable_shift = MXC_CCM_CCGRx_CG8_OFFSET,
3948 .enable = _clk_enable,
3949 .disable = _clk_disable,
3952 __INIT_CLK_DEBUG(pwm_clk_1)
3953 .parent = &ipg_perclk,
3955 .enable_reg = MXC_CCM_CCGR4,
3956 .enable_shift = MXC_CCM_CCGRx_CG9_OFFSET,
3957 .enable = _clk_enable,
3958 .disable = _clk_disable,
3961 __INIT_CLK_DEBUG(pwm_clk_2)
3962 .parent = &ipg_perclk,
3964 .enable_reg = MXC_CCM_CCGR4,
3965 .enable_shift = MXC_CCM_CCGRx_CG10_OFFSET,
3966 .enable = _clk_enable,
3967 .disable = _clk_disable,
3970 __INIT_CLK_DEBUG(pwm_clk_3)
3971 .parent = &ipg_perclk,
3973 .enable_reg = MXC_CCM_CCGR4,
3974 .enable_shift = MXC_CCM_CCGRx_CG11_OFFSET,
3975 .enable = _clk_enable,
3976 .disable = _clk_disable,
3980 static int _clk_pcie_enable(struct clk *clk)
3984 /* Enable SATA ref clock */
3985 reg = __raw_readl(PLL8_ENET_BASE_ADDR);
3986 reg |= ANADIG_PLL_ENET_EN_PCIE;
3987 __raw_writel(reg, PLL8_ENET_BASE_ADDR);
3994 static void _clk_pcie_disable(struct clk *clk)
4000 /* Disable SATA ref clock */
4001 reg = __raw_readl(PLL8_ENET_BASE_ADDR);
4002 reg &= ~ANADIG_PLL_ENET_EN_PCIE;
4003 __raw_writel(reg, PLL8_ENET_BASE_ADDR);
4006 static struct clk pcie_clk = {
4007 __INIT_CLK_DEBUG(pcie_clk)
4008 .parent = &pcie_axi_clk,
4009 .enable = _clk_pcie_enable,
4010 .disable = _clk_pcie_disable,
4011 .enable_reg = MXC_CCM_CCGR4,
4012 .enable_shift = MXC_CCM_CCGRx_CG0_OFFSET,
4015 static int _clk_sata_enable(struct clk *clk)
4019 /* Clear Power Down and Enable PLLs */
4020 reg = __raw_readl(PLL8_ENET_BASE_ADDR);
4021 reg &= ~ANADIG_PLL_ENET_POWER_DOWN;
4022 __raw_writel(reg, PLL8_ENET_BASE_ADDR);
4024 reg = __raw_readl(PLL8_ENET_BASE_ADDR);
4025 reg |= ANADIG_PLL_ENET_EN;
4026 __raw_writel(reg, PLL8_ENET_BASE_ADDR);
4028 /* Waiting for the PLL is locked */
4029 if (!WAIT(ANADIG_PLL_ENET_LOCK & __raw_readl(PLL8_ENET_BASE_ADDR),
4031 panic("pll8 lock failed\n");
4033 /* Disable the bypass */
4034 reg = __raw_readl(PLL8_ENET_BASE_ADDR);
4035 reg &= ~ANADIG_PLL_ENET_BYPASS;
4036 __raw_writel(reg, PLL8_ENET_BASE_ADDR);
4038 /* Enable SATA ref clock */
4039 reg = __raw_readl(PLL8_ENET_BASE_ADDR);
4040 reg |= ANADIG_PLL_ENET_EN_SATA;
4041 __raw_writel(reg, PLL8_ENET_BASE_ADDR);
4048 static void _clk_sata_disable(struct clk *clk)
4054 /* Disable SATA ref clock */
4055 reg = __raw_readl(PLL8_ENET_BASE_ADDR);
4056 reg &= ~ANADIG_PLL_ENET_EN_SATA;
4057 __raw_writel(reg, PLL8_ENET_BASE_ADDR);
4060 static struct clk sata_clk = {
4061 __INIT_CLK_DEBUG(sata_clk)
4063 .enable = _clk_sata_enable,
4064 .enable_reg = MXC_CCM_CCGR5,
4065 .enable_shift = MXC_CCM_CCGRx_CG2_OFFSET,
4066 .disable = _clk_sata_disable,
4069 static struct clk usboh3_clk = {
4070 __INIT_CLK_DEBUG(usboh3_clk)
4072 .enable = _clk_enable,
4073 .enable_reg = MXC_CCM_CCGR6,
4074 .enable_shift = MXC_CCM_CCGRx_CG0_OFFSET,
4075 .disable = _clk_disable,
4078 static struct clk dummy_clk = {
4082 #define _REGISTER_CLOCK(d, n, c) \
4090 static struct clk_lookup lookups[] = {
4091 _REGISTER_CLOCK(NULL, "osc", osc_clk),
4092 _REGISTER_CLOCK(NULL, "ckih", ckih_clk),
4093 _REGISTER_CLOCK(NULL, "ckih2", ckih2_clk),
4094 _REGISTER_CLOCK(NULL, "ckil", ckil_clk),
4095 _REGISTER_CLOCK(NULL, "pll1_main_clk", pll1_sys_main_clk),
4096 _REGISTER_CLOCK(NULL, "pll1_sw_clk", pll1_sw_clk),
4097 _REGISTER_CLOCK(NULL, "pll2", pll2_528_bus_main_clk),
4098 _REGISTER_CLOCK(NULL, "pll2_pfd_400M", pll2_pfd_400M),
4099 _REGISTER_CLOCK(NULL, "pll2_pfd_352M", pll2_pfd_352M),
4100 _REGISTER_CLOCK(NULL, "pll2_pfd_594M", pll2_pfd_594M),
4101 _REGISTER_CLOCK(NULL, "pll2_200M", pll2_200M),
4102 _REGISTER_CLOCK(NULL, "pll3_main_clk", pll3_usb_otg_main_clk),
4103 _REGISTER_CLOCK(NULL, "pll3_pfd_508M", pll3_pfd_508M),
4104 _REGISTER_CLOCK(NULL, "pll3_pfd_454M", pll3_pfd_454M),
4105 _REGISTER_CLOCK(NULL, "pll3_pfd_720M", pll3_pfd_720M),
4106 _REGISTER_CLOCK(NULL, "pll3_pfd_540M", pll3_pfd_540M),
4107 _REGISTER_CLOCK(NULL, "pll3_sw_clk", pll3_sw_clk),
4108 _REGISTER_CLOCK(NULL, "pll3_120M", pll3_120M),
4109 _REGISTER_CLOCK(NULL, "pll3_120M", pll3_80M),
4110 _REGISTER_CLOCK(NULL, "pll3_120M", pll3_60M),
4111 _REGISTER_CLOCK(NULL, "pll4", pll4_audio_main_clk),
4112 _REGISTER_CLOCK(NULL, "pll5", pll5_video_main_clk),
4113 _REGISTER_CLOCK(NULL, "pll4", pll6_MLB_main_clk),
4114 _REGISTER_CLOCK(NULL, "pll3", pll7_usb_host_main_clk),
4115 _REGISTER_CLOCK(NULL, "pll4", pll8_enet_main_clk),
4116 _REGISTER_CLOCK(NULL, "cpu_clk", cpu_clk),
4117 _REGISTER_CLOCK(NULL, "periph_clk", periph_clk),
4118 _REGISTER_CLOCK(NULL, "axi_clk", axi_clk),
4119 _REGISTER_CLOCK(NULL, "mmdc_ch0_axi", mmdc_ch0_axi_clk[0]),
4120 _REGISTER_CLOCK(NULL, "mmdc_ch1_axi", mmdc_ch1_axi_clk[0]),
4121 _REGISTER_CLOCK(NULL, "ahb", ahb_clk),
4122 _REGISTER_CLOCK(NULL, "ipg_clk", ipg_clk),
4123 _REGISTER_CLOCK(NULL, "ipg_perclk", ipg_perclk),
4124 _REGISTER_CLOCK(NULL, "spba", spba_clk),
4125 _REGISTER_CLOCK("imx-sdma", NULL, sdma_clk),
4126 _REGISTER_CLOCK(NULL, "gpu2d_axi_clk", gpu2d_axi_clk),
4127 _REGISTER_CLOCK(NULL, "gpu3d_axi_clk", gpu3d_axi_clk),
4128 _REGISTER_CLOCK(NULL, "pcie_axi_clk", pcie_axi_clk),
4129 _REGISTER_CLOCK(NULL, "vdo_axi_clk", vdo_axi_clk),
4130 _REGISTER_CLOCK(NULL, "iim_clk", iim_clk),
4131 _REGISTER_CLOCK(NULL, "i2c_clk", i2c_clk[0]),
4132 _REGISTER_CLOCK("imx-i2c.1", NULL, i2c_clk[1]),
4133 _REGISTER_CLOCK("imx-i2c.2", NULL, i2c_clk[2]),
4134 _REGISTER_CLOCK(NULL, "vpu_clk", vpu_clk),
4135 _REGISTER_CLOCK(NULL, "ipu1_clk", ipu1_clk),
4136 _REGISTER_CLOCK(NULL, "ipu2_clk", ipu2_clk),
4137 _REGISTER_CLOCK("sdhci-esdhc-imx.0", NULL, usdhc1_clk),
4138 _REGISTER_CLOCK("sdhci-esdhc-imx.1", NULL, usdhc2_clk),
4139 _REGISTER_CLOCK("sdhci-esdhc-imx.2", NULL, usdhc3_clk),
4140 _REGISTER_CLOCK("sdhci-esdhc-imx.3", NULL, usdhc4_clk),
4141 _REGISTER_CLOCK("imx-ssi.0", NULL, ssi1_clk),
4142 _REGISTER_CLOCK("imx-ssi.1", NULL, ssi2_clk),
4143 _REGISTER_CLOCK("imx-ssi.2", NULL, ssi3_clk),
4144 _REGISTER_CLOCK(NULL, "ipu1_di0_clk", ipu1_di_clk[0]),
4145 _REGISTER_CLOCK(NULL, "ipu1_di1_clk", ipu1_di_clk[1]),
4146 _REGISTER_CLOCK(NULL, "ipu2_di0_clk", ipu2_di_clk[0]),
4147 _REGISTER_CLOCK(NULL, "ipu2_di1_clk", ipu2_di_clk[1]),
4148 _REGISTER_CLOCK("FlexCAN.0", "can_clk", can1_clk[0]),
4149 _REGISTER_CLOCK("FlexCAN.1", "can_clk", can2_clk[0]),
4150 _REGISTER_CLOCK(NULL, "ldb_di0_clk", ldb_di0_clk),
4151 _REGISTER_CLOCK(NULL, "ldb_di1_clk", ldb_di1_clk),
4152 _REGISTER_CLOCK(NULL, "mxc_alsa_spdif.0", spdif0_clk[0]),
4153 _REGISTER_CLOCK(NULL, "esai_clk", esai_clk),
4154 _REGISTER_CLOCK("mxc_spi.0", NULL, ecspi_clk[0]),
4155 _REGISTER_CLOCK("mxc_spi.1", NULL, ecspi_clk[1]),
4156 _REGISTER_CLOCK("mxc_spi.2", NULL, ecspi_clk[2]),
4157 _REGISTER_CLOCK("mxc_spi.3", NULL, ecspi_clk[3]),
4158 _REGISTER_CLOCK("mxc_spi.4", NULL, ecspi_clk[4]),
4159 _REGISTER_CLOCK(NULL, "emi_slow_clk", emi_slow_clk),
4160 _REGISTER_CLOCK(NULL, "emi_clk", emi_clk),
4161 _REGISTER_CLOCK(NULL, "enfc_clk", enfc_clk),
4162 _REGISTER_CLOCK("imx-uart.0", NULL, uart_clk[0]),
4163 _REGISTER_CLOCK(NULL, "hsi_tx", hsi_tx_clk),
4164 _REGISTER_CLOCK(NULL, "caam_clk", caam_clk[0]),
4165 _REGISTER_CLOCK(NULL, "asrc_clk", asrc_clk[0]),
4166 _REGISTER_CLOCK(NULL, "asrc_serial_clk", asrc_clk[1]),
4167 _REGISTER_CLOCK("mxs-dma-apbh", NULL, apbh_dma_clk),
4168 _REGISTER_CLOCK(NULL, "openvg_axi_clk", openvg_axi_clk),
4169 _REGISTER_CLOCK(NULL, "gpu3d_clk", gpu3d_core_clk),
4170 _REGISTER_CLOCK(NULL, "gpu2d_clk", gpu2d_core_clk),
4171 _REGISTER_CLOCK(NULL, "gpu3d_shader_clk", gpu3d_shader_clk),
4172 _REGISTER_CLOCK(NULL, "gpt", gpt_clk[0]),
4173 _REGISTER_CLOCK("imx6q-gpmi-nfc.0", NULL, gpmi_nfc_clk[0]),
4174 _REGISTER_CLOCK(NULL, "gpmi-apb", gpmi_nfc_clk[1]),
4175 _REGISTER_CLOCK(NULL, "bch", gpmi_nfc_clk[2]),
4176 _REGISTER_CLOCK(NULL, "bch-apb", gpmi_nfc_clk[3]),
4177 _REGISTER_CLOCK(NULL, "pl301_mx6qperl-bch", gpmi_nfc_clk[4]),
4178 _REGISTER_CLOCK("mxc_pwm.0", NULL, pwm_clk[0]),
4179 _REGISTER_CLOCK("mxc_pwm.1", NULL, pwm_clk[1]),
4180 _REGISTER_CLOCK("mxc_pwm.2", NULL, pwm_clk[2]),
4181 _REGISTER_CLOCK("mxc_pwm.3", NULL, pwm_clk[3]),
4182 _REGISTER_CLOCK(NULL, "pcie_clk", pcie_clk),
4183 _REGISTER_CLOCK("fec.0", NULL, enet_clk),
4184 _REGISTER_CLOCK(NULL, "imx_sata_clk", sata_clk),
4185 _REGISTER_CLOCK(NULL, "usboh3_clk", usboh3_clk),
4186 _REGISTER_CLOCK(NULL, "usb_phy1_clk", usb_phy1_clk),
4187 _REGISTER_CLOCK("imx2-wdt.0", NULL, dummy_clk),
4188 _REGISTER_CLOCK("imx2-wdt.1", NULL, dummy_clk),
4189 _REGISTER_CLOCK(NULL, "hdmi_isfr_clk", hdmi_clk[0]),
4190 _REGISTER_CLOCK(NULL, "hdmi_iahb_clk", hdmi_clk[1]),
4191 _REGISTER_CLOCK(NULL, NULL, vdoa_clk),
4192 _REGISTER_CLOCK(NULL, NULL, aips_tz2_clk),
4193 _REGISTER_CLOCK(NULL, NULL, aips_tz1_clk),
4197 static void clk_tree_init(void)
4202 reg = __raw_readl(MMDC_MDMISC_OFFSET);
4203 if ((reg & MMDC_MDMISC_DDR_TYPE_MASK) ==
4204 (0x1 << MMDC_MDMISC_DDR_TYPE_OFFSET)) {
4205 clk_set_parent(&periph_clk, &pll2_pfd_400M);
4206 printk(KERN_INFO "Set periph_clk's parent to pll2_pfd_400M!\n");
4211 int __init mx6_clocks_init(unsigned long ckil, unsigned long osc,
4212 unsigned long ckih1, unsigned long ckih2)
4217 external_low_reference = ckil;
4218 external_high_reference = ckih1;
4219 ckih2_reference = ckih2;
4220 oscillator_reference = osc;
4222 apll_base = ioremap(ANATOP_BASE_ADDR, SZ_4K);
4224 for (i = 0; i < ARRAY_SIZE(lookups); i++) {
4225 clkdev_add(&lookups[i]);
4226 clk_debug_register(lookups[i].clk);
4231 /* enable mmdc_ch0_axi_clk to make sure the usecount is > 0
4232 * or ipu's parent is mmdc_ch0_axi_clk, if ipu disable clk,
4233 * mmdc_ch0_axi_clk will also be disabled, system will hang */
4234 clk_enable(&mmdc_ch0_axi_clk[0]);
4235 /* Initialize Audio and Video PLLs to valid frequency (650MHz). */
4236 clk_set_rate(&pll4_audio_main_clk, 650000000);
4237 clk_set_rate(&pll5_video_main_clk, 650000000);
4239 clk_set_parent(&ipu1_di_clk[0], &pll5_video_main_clk);
4240 clk_set_parent(&ipu1_di_clk[1], &pll5_video_main_clk);
4241 clk_set_parent(&ipu2_di_clk[0], &pll5_video_main_clk);
4242 clk_set_parent(&ipu2_di_clk[1], &pll5_video_main_clk);
4244 clk_set_parent(&gpu3d_shader_clk, &pll2_pfd_594M);
4245 clk_set_rate(&gpu3d_shader_clk, 594000000);
4246 clk_set_parent(&gpu3d_core_clk, &mmdc_ch0_axi_clk[0]);
4247 clk_set_rate(&gpu3d_core_clk, 528000000);
4250 * FIXME: asrc needs to use asrc_serial(spdif1) clock to do sample rate convertion,
4251 * however we found it only works when set to 1.5M clock and the
4252 * parent is pll3_sw_clk.
4254 clk_set_parent(&asrc_clk[1], &pll3_sw_clk);
4255 clk_set_rate(&asrc_clk[1], 1500000);
4257 /* set the NAND to 11MHz. Too fast will cause dma timeout. */
4258 clk_set_rate(&enfc_clk, enfc_clk.round_rate(&enfc_clk, 11000000));
4261 cpu_op_tbl = get_cpu_op(&cpu_op_nr);
4263 /* Gate off all possible clocks */
4264 if (mxc_jtag_enabled) {
4265 __raw_writel(3 << MXC_CCM_CCGRx_CG11_OFFSET |
4266 3 << MXC_CCM_CCGRx_CG2_OFFSET |
4267 3 << MXC_CCM_CCGRx_CG1_OFFSET |
4268 3 << MXC_CCM_CCGRx_CG0_OFFSET, MXC_CCM_CCGR0);
4270 __raw_writel(1 << MXC_CCM_CCGRx_CG11_OFFSET |
4271 3 << MXC_CCM_CCGRx_CG2_OFFSET |
4272 3 << MXC_CCM_CCGRx_CG1_OFFSET |
4273 3 << MXC_CCM_CCGRx_CG0_OFFSET, MXC_CCM_CCGR0);
4275 __raw_writel(3 << MXC_CCM_CCGRx_CG10_OFFSET, MXC_CCM_CCGR1);
4276 __raw_writel(3 << MXC_CCM_CCGRx_CG12_OFFSET |
4277 3 << MXC_CCM_CCGRx_CG11_OFFSET |
4278 3 << MXC_CCM_CCGRx_CG10_OFFSET |
4279 3 << MXC_CCM_CCGRx_CG9_OFFSET |
4280 3 << MXC_CCM_CCGRx_CG8_OFFSET, MXC_CCM_CCGR2);
4281 __raw_writel(3 << MXC_CCM_CCGRx_CG14_OFFSET |
4282 3 << MXC_CCM_CCGRx_CG13_OFFSET |
4283 3 << MXC_CCM_CCGRx_CG12_OFFSET |
4284 3 << MXC_CCM_CCGRx_CG11_OFFSET |
4285 3 << MXC_CCM_CCGRx_CG10_OFFSET, MXC_CCM_CCGR3);
4286 __raw_writel(3 << MXC_CCM_CCGRx_CG7_OFFSET |
4287 3 << MXC_CCM_CCGRx_CG4_OFFSET, MXC_CCM_CCGR4);
4288 __raw_writel(3 << MXC_CCM_CCGRx_CG3_OFFSET |
4289 3 << MXC_CCM_CCGRx_CG0_OFFSET, MXC_CCM_CCGR5);
4290 __raw_writel(0, MXC_CCM_CCGR6);
4292 /* Lower the ipg_perclk frequency to 11MHz. */
4293 clk_set_rate(&ipg_perclk, 11000000);
4295 base = ioremap(GPT_BASE_ADDR, SZ_4K);
4296 mxc_timer_init(&gpt_clk[0], base, MXC_INT_GPT);