3 * Copyright (C) 2011 Freescale Semiconductor, Inc. All Rights Reserved.
7 * The code contained herein is licensed under the GNU General Public
8 * License. You may obtain a copy of the GNU General Public License
9 * Version 2 or later at the following locations:
11 * http://www.opensource.org/licenses/gpl-license.html
12 * http://www.gnu.org/copyleft/gpl.html
15 #include <linux/kernel.h>
16 #include <linux/init.h>
17 #include <linux/types.h>
18 #include <linux/time.h>
19 #include <linux/hrtimer.h>
21 #include <linux/errno.h>
22 #include <linux/delay.h>
23 #include <linux/clk.h>
25 #include <linux/clkdev.h>
26 #include <asm/div64.h>
27 #include <mach/hardware.h>
28 #include <mach/common.h>
29 #include <mach/clock.h>
30 #include <mach/mxc_dvfs.h>
33 #ifdef CONFIG_CLK_DEBUG
34 #define __INIT_CLK_DEBUG(n) .name = #n,
36 #define __INIT_CLK_DEBUG(n)
39 void __iomem *apll_base;
40 static struct clk pll1_sys_main_clk;
41 static struct clk pll2_528_bus_main_clk;
42 static struct clk pll3_usb_otg_main_clk;
43 static struct clk pll4_audio_main_clk;
44 static struct clk pll5_video_main_clk;
45 static struct clk pll6_MLB_main_clk;
46 static struct clk pll7_usb_host_main_clk;
47 static struct clk pll8_enet_main_clk;
48 static struct clk apbh_dma_clk;
50 #define SPIN_DELAY 1000000 /* in nanoseconds */
52 #define WAIT(exp, timeout) \
54 struct timespec nstimeofday; \
55 struct timespec curtime; \
57 getnstimeofday(&nstimeofday); \
59 getnstimeofday(&curtime); \
60 if ((curtime.tv_nsec - nstimeofday.tv_nsec) > (timeout)) { \
68 /* External clock values passed-in by the board code */
69 static unsigned long external_high_reference, external_low_reference;
70 static unsigned long oscillator_reference, ckih2_reference;
72 static void __calc_pre_post_dividers(u32 div, u32 *pre, u32 *post)
74 u32 min_pre, temp_pre, old_err, err;
79 } else if (div >= 8) {
80 min_pre = (div - 1) / 64 + 1;
82 for (temp_pre = 8; temp_pre >= min_pre; temp_pre--) {
94 *post = (div + *pre - 1) / *pre;
101 static int _clk_enable(struct clk *clk)
104 reg = __raw_readl(clk->enable_reg);
105 reg |= MXC_CCM_CCGRx_CG_MASK << clk->enable_shift;
106 __raw_writel(reg, clk->enable_reg);
111 static void _clk_disable(struct clk *clk)
114 reg = __raw_readl(clk->enable_reg);
115 reg &= ~(MXC_CCM_CCGRx_CG_MASK << clk->enable_shift);
116 /* TODO: un-comment the disable code */
117 /* __raw_writel(reg, clk->enable_reg); */
121 static void _clk_disable_inwait(struct clk *clk)
124 reg = __raw_readl(clk->enable_reg);
125 reg &= ~(MXC_CCM_CCGRx_CG_MASK << clk->enable_shift);
126 reg |= 1 << clk->enable_shift;
127 /* TODO: un-comment the disable code */
128 /* __raw_writel(reg, clk->enable_reg); */
132 * For the 4-to-1 muxed input clock
134 static inline u32 _get_mux(struct clk *parent, struct clk *m0,
135 struct clk *m1, struct clk *m2, struct clk *m3)
139 else if (parent == m1)
141 else if (parent == m2)
143 else if (parent == m3)
151 static inline void __iomem *_get_pll_base(struct clk *pll)
153 if (pll == &pll1_sys_main_clk)
154 return PLL1_SYS_BASE_ADDR;
155 else if (pll == &pll2_528_bus_main_clk)
156 return PLL2_528_BASE_ADDR;
157 else if (pll == &pll3_usb_otg_main_clk)
158 return PLL3_480_USB1_BASE_ADDR;
159 else if (pll == &pll4_audio_main_clk)
160 return PLL4_AUDIO_BASE_ADDR;
161 else if (pll == &pll5_video_main_clk)
162 return PLL5_VIDEO_BASE_ADDR;
163 else if (pll == &pll6_MLB_main_clk)
164 return PLL6_MLB_BASE_ADDR;
165 else if (pll == &pll7_usb_host_main_clk)
166 return PLL7_480_USB2_BASE_ADDR;
167 else if (pll == &pll8_enet_main_clk)
168 return PLL8_ENET_BASE_ADDR;
176 * For the 6-to-1 muxed input clock
178 static inline u32 _get_mux6(struct clk *parent, struct clk *m0, struct clk *m1,
179 struct clk *m2, struct clk *m3, struct clk *m4,
184 else if (parent == m1)
186 else if (parent == m2)
188 else if (parent == m3)
190 else if (parent == m4)
192 else if (parent == m5)
199 static unsigned long get_high_reference_clock_rate(struct clk *clk)
201 return external_high_reference;
204 static unsigned long get_low_reference_clock_rate(struct clk *clk)
206 return external_low_reference;
209 static unsigned long get_oscillator_reference_clock_rate(struct clk *clk)
211 return oscillator_reference;
214 static unsigned long get_ckih2_reference_clock_rate(struct clk *clk)
216 return ckih2_reference;
219 /* External high frequency clock */
220 static struct clk ckih_clk = {
221 __INIT_CLK_DEBUG(ckih_clk)
222 .get_rate = get_high_reference_clock_rate,
225 static struct clk ckih2_clk = {
226 __INIT_CLK_DEBUG(ckih2_clk)
227 .get_rate = get_ckih2_reference_clock_rate,
230 static struct clk osc_clk = {
231 __INIT_CLK_DEBUG(osc_clk)
232 .get_rate = get_oscillator_reference_clock_rate,
235 /* External low frequency (32kHz) clock */
236 static struct clk ckil_clk = {
237 __INIT_CLK_DEBUG(ckil_clk)
238 .get_rate = get_low_reference_clock_rate,
241 static unsigned long pfd_round_rate(struct clk *clk, unsigned long rate)
246 tmp = (u64)clk_get_rate(clk->parent) * 18;
249 frac = frac < 18 ? 18 : frac;
250 frac = frac > 35 ? 35 : frac;
255 static unsigned long pfd_get_rate(struct clk *clk)
259 tmp = (u64)clk_get_rate(clk->parent) * 18;
261 if (apbh_dma_clk.usecount == 0)
262 apbh_dma_clk.enable(&apbh_dma_clk);
264 frac = (__raw_readl(clk->enable_reg) >> clk->enable_shift) &
265 ANADIG_PFD_FRAC_MASK;
272 static int pfd_set_rate(struct clk *clk, unsigned long rate)
276 tmp = (u64)clk_get_rate(clk->parent) * 18;
278 if (apbh_dma_clk.usecount == 0)
279 apbh_dma_clk.enable(&apbh_dma_clk);
281 /* Round up the divider so that we don't set a rate
282 * higher than what is requested. */
286 frac = frac < 12 ? 12 : frac;
287 frac = frac > 35 ? 35 : frac;
288 /* clear clk frac bits */
289 __raw_writel(ANADIG_PFD_FRAC_MASK << clk->enable_shift,
290 (int)clk->enable_reg + 8);
291 /* set clk frac bits */
292 __raw_writel(frac << clk->enable_shift,
293 (int)clk->enable_reg + 4);
295 tmp = (u64)clk_get_rate(clk->parent) * 18;
298 if (apbh_dma_clk.usecount == 0)
299 apbh_dma_clk.disable(&apbh_dma_clk);
303 static int _clk_pfd_enable(struct clk *clk)
305 if (apbh_dma_clk.usecount == 0)
306 apbh_dma_clk.enable(&apbh_dma_clk);
308 /* clear clk gate bit */
309 __raw_writel((1 << (clk->enable_shift + 7)),
310 (int)clk->enable_reg + 8);
312 if (apbh_dma_clk.usecount == 0)
313 apbh_dma_clk.disable(&apbh_dma_clk);
318 static void _clk_pfd_disable(struct clk *clk)
320 if (apbh_dma_clk.usecount == 0)
321 apbh_dma_clk.enable(&apbh_dma_clk);
323 /* set clk gate bit */
324 __raw_writel((1 << (clk->enable_shift + 7)),
325 (int)clk->enable_reg + 4);
327 if (apbh_dma_clk.usecount == 0)
328 apbh_dma_clk.disable(&apbh_dma_clk);
331 static void _clk_usb_phy_enable(struct clk *clk)
334 usb_phy_reg = __raw_readl(clk->enable_reg);
335 __raw_writel(usb_phy_reg | clk->enable_shift, clk->enable_reg);
338 static void _clk_usb_phy_disable(struct clk *clk)
341 usb_phy_reg = __raw_readl(clk->enable_reg);
342 __raw_writel(usb_phy_reg & (~clk->enable_shift), clk->enable_reg);
345 static int _clk_pll_enable(struct clk *clk)
348 void __iomem *pllbase;
350 pllbase = _get_pll_base(clk);
352 reg = __raw_readl(pllbase);
353 reg &= ~ANADIG_PLL_BYPASS;
354 reg &= ~ANADIG_PLL_POWER_DOWN;
356 /* The 480MHz PLLs have the opposite definition for power bit. */
357 if (clk == &pll3_usb_otg_main_clk || clk == &pll7_usb_host_main_clk)
358 reg |= ANADIG_PLL_POWER_DOWN;
360 __raw_writel(reg, pllbase);
362 /* Wait for PLL to lock */
363 if (!WAIT(__raw_readl(pllbase) & ANADIG_PLL_LOCK,
365 panic("pll enable failed\n");
367 /* Enable the PLL output now*/
368 reg = __raw_readl(pllbase);
369 reg |= ANADIG_PLL_ENABLE;
370 __raw_writel(reg, pllbase);
375 static void _clk_pll_disable(struct clk *clk)
378 void __iomem *pllbase;
380 pllbase = _get_pll_base(clk);
382 reg = __raw_readl(pllbase);
383 reg &= ~ANADIG_PLL_ENABLE;
384 reg |= ANADIG_PLL_BYPASS;
385 reg |= ANADIG_PLL_POWER_DOWN;
386 if (clk == &pll3_usb_otg_main_clk || clk == &pll7_usb_host_main_clk)
387 reg &= ~ANADIG_PLL_POWER_DOWN;
388 __raw_writel(reg, pllbase);
391 static unsigned long _clk_pll1_main_get_rate(struct clk *clk)
396 div = __raw_readl(PLL1_SYS_BASE_ADDR) & ANADIG_PLL_SYS_DIV_SELECT_MASK;
397 val = (clk_get_rate(clk->parent) * div) / 2;
401 static int _clk_pll1_main_set_rate(struct clk *clk, unsigned long rate)
403 unsigned int reg, div;
405 if (rate/1000 < 650000 || rate/1000 > 1300000000)
408 div = (rate * 2) / clk_get_rate(clk->parent) ;
410 reg = __raw_readl(PLL1_SYS_BASE_ADDR) & ~ANADIG_PLL_SYS_DIV_SELECT_MASK;
412 __raw_writel(reg, PLL1_SYS_BASE_ADDR);
417 static struct clk pll1_sys_main_clk = {
418 __INIT_CLK_DEBUG(pll1_sys_main_clk)
420 .get_rate = _clk_pll1_main_get_rate,
421 .set_rate = _clk_pll1_main_set_rate,
422 .enable = _clk_pll_enable,
423 .disable = _clk_pll_disable,
426 static int _clk_pll1_sw_set_parent(struct clk *clk, struct clk *parent)
430 reg = __raw_readl(MXC_CCM_CCSR);
432 if (parent == &pll1_sys_main_clk) {
433 reg &= ~MXC_CCM_CCSR_PLL1_SW_CLK_SEL;
434 __raw_writel(reg, MXC_CCM_CCSR);
435 /* Set the step_clk parent to be lp_apm, to save power. */
436 reg = __raw_readl(MXC_CCM_CCSR);
437 reg = (reg & ~MXC_CCM_CCSR_STEP_SEL);
439 /* Set STEP_CLK to be the parent*/
440 if (parent == &osc_clk) {
441 /* Set STEP_CLK to be sourced from LPAPM. */
442 reg = __raw_readl(MXC_CCM_CCSR);
443 reg = (reg & ~MXC_CCM_CCSR_STEP_SEL);
444 __raw_writel(reg, MXC_CCM_CCSR);
446 /* Set STEP_CLK to be sourced from PLL2-PDF (400MHz). */
447 reg = __raw_readl(MXC_CCM_CCSR);
448 reg |= MXC_CCM_CCSR_STEP_SEL;
449 __raw_writel(reg, MXC_CCM_CCSR);
452 reg = __raw_readl(MXC_CCM_CCSR);
453 reg |= MXC_CCM_CCSR_PLL1_SW_CLK_SEL;
454 reg = __raw_readl(MXC_CCM_CCSR);
456 __raw_writel(reg, MXC_CCM_CCSR);
461 static unsigned long _clk_pll1_sw_get_rate(struct clk *clk)
463 return clk_get_rate(clk->parent);
466 static struct clk pll1_sw_clk = {
467 __INIT_CLK_DEBUG(pll1_sw_clk)
468 .parent = &pll1_sys_main_clk,
469 .set_parent = _clk_pll1_sw_set_parent,
470 .get_rate = _clk_pll1_sw_get_rate,
473 static unsigned long _clk_pll2_main_get_rate(struct clk *clk)
478 div = __raw_readl(PLL2_528_BASE_ADDR) & ANADIG_PLL_528_DIV_SELECT;
481 val = clk_get_rate(clk->parent) * 22;
484 val = clk_get_rate(clk->parent) * 20;
489 static int _clk_pll2_main_set_rate(struct clk *clk, unsigned long rate)
491 unsigned int reg, div;
493 if (rate == 528000000)
495 else if (rate == 480000000)
500 reg = __raw_readl(PLL2_528_BASE_ADDR);
501 reg &= ~ANADIG_PLL_528_DIV_SELECT;
503 __raw_writel(reg, PLL2_528_BASE_ADDR);
508 static struct clk pll2_528_bus_main_clk = {
509 __INIT_CLK_DEBUG(pll2_528_bus_main_clk)
511 .get_rate = _clk_pll2_main_get_rate,
512 .set_rate = _clk_pll2_main_set_rate,
513 .enable = _clk_pll_enable,
514 .disable = _clk_pll_disable,
517 static struct clk pll2_pfd_400M = {
518 __INIT_CLK_DEBUG(pll2_pfd_400M)
519 .parent = &pll2_528_bus_main_clk,
520 .enable_reg = (void *)PFD_528_BASE_ADDR,
521 .enable_shift = ANADIG_PFD2_FRAC_OFFSET,
522 .enable = _clk_pfd_enable,
523 .disable = _clk_pfd_disable,
524 .get_rate = pfd_get_rate,
525 .set_rate = pfd_set_rate,
526 .get_rate = pfd_get_rate,
527 .round_rate = pfd_round_rate,
530 static struct clk pll2_pfd_352M = {
531 __INIT_CLK_DEBUG(pll2_pfd_352M)
532 .parent = &pll2_528_bus_main_clk,
533 .enable_reg = (void *)PFD_528_BASE_ADDR,
534 .enable_shift = ANADIG_PFD0_FRAC_OFFSET,
535 .enable = _clk_pfd_enable,
536 .disable = _clk_pfd_disable,
537 .set_rate = pfd_set_rate,
538 .get_rate = pfd_get_rate,
539 .round_rate = pfd_round_rate,
542 static struct clk pll2_pfd_594M = {
543 __INIT_CLK_DEBUG(pll2_pfd_594M)
544 .parent = &pll2_528_bus_main_clk,
545 .enable_reg = (void *)PFD_528_BASE_ADDR,
546 .enable_shift = ANADIG_PFD1_FRAC_OFFSET,
547 .enable = _clk_pfd_enable,
548 .disable = _clk_pfd_disable,
549 .set_rate = pfd_set_rate,
550 .get_rate = pfd_get_rate,
551 .round_rate = pfd_round_rate,
554 static unsigned long _clk_pll2_200M_get_rate(struct clk *clk)
556 return clk_get_rate(clk->parent) / 2;
559 static struct clk pll2_200M = {
560 __INIT_CLK_DEBUG(pll2_200M)
561 .parent = &pll2_pfd_400M,
562 .get_rate = _clk_pll2_200M_get_rate,
565 static unsigned long _clk_pll3_usb_otg_get_rate(struct clk *clk)
570 div = __raw_readl(PLL3_480_USB1_BASE_ADDR)
571 & ANADIG_PLL_480_DIV_SELECT_MASK;
574 val = clk_get_rate(clk->parent) * 22;
576 val = clk_get_rate(clk->parent) * 20;
580 static int _clk_pll3_usb_otg_set_rate(struct clk *clk, unsigned long rate)
582 unsigned int reg, div;
584 if (rate == 528000000)
586 else if (rate == 480000000)
591 reg = __raw_readl(PLL3_480_USB1_BASE_ADDR);
592 reg &= ~ANADIG_PLL_480_DIV_SELECT_MASK;
594 __raw_writel(reg, PLL3_480_USB1_BASE_ADDR);
600 /* same as pll3_main_clk. These two clocks should always be the same */
601 static struct clk pll3_usb_otg_main_clk = {
602 __INIT_CLK_DEBUG(pll3_usb_otg_main_clk)
604 .enable = _clk_pll_enable,
605 .disable = _clk_pll_disable,
606 .set_rate = _clk_pll3_usb_otg_set_rate,
607 .get_rate = _clk_pll3_usb_otg_get_rate,
610 static struct clk usb_phy1_clk = {
611 __INIT_CLK_DEBUG(usb_phy1_clk)
612 .parent = &pll3_usb_otg_main_clk,
613 .enable = _clk_usb_phy_enable,
614 .disable = _clk_usb_phy_disable,
615 .enable_reg = (void *)PLL3_480_USB1_BASE_ADDR,
616 .enable_shift = ANADIG_PLL_480_EN_USB_CLKS,
617 .set_rate = _clk_pll3_usb_otg_set_rate,
618 .get_rate = _clk_pll3_usb_otg_get_rate,
622 static struct clk pll3_pfd_508M = {
623 __INIT_CLK_DEBUG(pll3_pfd_508M)
624 .parent = &pll3_usb_otg_main_clk,
625 .enable_reg = (void *)PFD_480_BASE_ADDR,
626 .enable_shift = ANADIG_PFD2_FRAC_OFFSET,
627 .enable = _clk_pfd_enable,
628 .disable = _clk_pfd_disable,
629 .set_rate = pfd_set_rate,
630 .round_rate = pfd_round_rate,
633 static struct clk pll3_pfd_454M = {
634 __INIT_CLK_DEBUG(pll3_pfd_454M)
635 .parent = &pll3_usb_otg_main_clk,
636 .enable_reg = (void *)PFD_480_BASE_ADDR,
637 .enable_shift = ANADIG_PFD3_FRAC_OFFSET,
638 .enable = _clk_pfd_enable,
639 .disable = _clk_pfd_disable,
640 .set_rate = pfd_set_rate,
641 .get_rate = pfd_get_rate,
642 .round_rate = pfd_round_rate,
645 static struct clk pll3_pfd_720M = {
646 __INIT_CLK_DEBUG(pll3_pfd_720M)
647 .parent = &pll3_usb_otg_main_clk,
648 .enable_reg = (void *)PFD_480_BASE_ADDR,
649 .enable_shift = ANADIG_PFD0_FRAC_OFFSET,
650 .enable = _clk_pfd_enable,
651 .disable = _clk_pfd_disable,
652 .set_rate = pfd_set_rate,
653 .get_rate = pfd_get_rate,
654 .round_rate = pfd_round_rate,
657 static struct clk pll3_pfd_540M = {
658 __INIT_CLK_DEBUG(pll3_pfd_540M)
659 .parent = &pll3_usb_otg_main_clk,
660 .enable_reg = (void *)PFD_480_BASE_ADDR,
661 .enable_shift = ANADIG_PFD1_FRAC_OFFSET,
662 .enable = _clk_pfd_enable,
663 .disable = _clk_pfd_disable,
664 .set_rate = pfd_set_rate,
665 .get_rate = pfd_get_rate,
666 .round_rate = pfd_round_rate,
667 .get_rate = pfd_get_rate,
670 static unsigned long _clk_pll3_sw_get_rate(struct clk *clk)
672 return clk_get_rate(clk->parent);
675 /* same as pll3_main_clk. These two clocks should always be the same */
676 static struct clk pll3_sw_clk = {
677 __INIT_CLK_DEBUG(pll3_sw_clk)
678 .parent = &pll3_usb_otg_main_clk,
679 .get_rate = _clk_pll3_sw_get_rate,
682 static unsigned long _clk_pll3_120M_get_rate(struct clk *clk)
684 return clk_get_rate(clk->parent) / 4;
687 static struct clk pll3_120M = {
688 __INIT_CLK_DEBUG(pll3_120M)
689 .parent = &pll3_sw_clk,
690 .get_rate = _clk_pll3_120M_get_rate,
693 static unsigned long _clk_pll3_80M_get_rate(struct clk *clk)
695 return clk_get_rate(clk->parent) / 6;
698 static struct clk pll3_80M = {
699 __INIT_CLK_DEBUG(pll3_80M)
700 .parent = &pll3_sw_clk,
701 .get_rate = _clk_pll3_80M_get_rate,
704 static unsigned long _clk_pll3_60M_get_rate(struct clk *clk)
706 return clk_get_rate(clk->parent) / 8;
709 static struct clk pll3_60M = {
710 __INIT_CLK_DEBUG(pll3_60M)
711 .parent = &pll3_sw_clk,
712 .get_rate = _clk_pll3_60M_get_rate,
715 static struct clk pll4_audio_main_clk = {
716 __INIT_CLK_DEBUG(pll4_audio_main_clk)
718 .enable = _clk_pll_enable,
719 .disable = _clk_pll_disable,
722 static struct clk pll5_video_main_clk = {
723 __INIT_CLK_DEBUG(pll5_video_main_clk)
725 .enable = _clk_pll_enable,
726 .disable = _clk_pll_disable,
729 static struct clk pll6_MLB_main_clk = {
730 __INIT_CLK_DEBUG(pll6_MLB_main_clk)
732 .enable = _clk_pll_enable,
733 .disable = _clk_pll_disable,
736 static unsigned long _clk_pll7_usb_otg_get_rate(struct clk *clk)
741 div = __raw_readl(PLL7_480_USB2_BASE_ADDR)
742 & ANADIG_PLL_480_DIV_SELECT_MASK;
745 val = clk_get_rate(clk->parent) * 22;
747 val = clk_get_rate(clk->parent) * 20;
751 static int _clk_pll7_usb_otg_set_rate(struct clk *clk, unsigned long rate)
753 unsigned int reg, div;
755 if (rate == 528000000)
757 else if (rate == 480000000)
762 reg = __raw_readl(PLL7_480_USB2_BASE_ADDR);
763 reg &= ~ANADIG_PLL_480_DIV_SELECT_MASK;
765 __raw_writel(reg, PLL7_480_USB2_BASE_ADDR);
770 static struct clk pll7_usb_host_main_clk = {
771 __INIT_CLK_DEBUG(pll7_usb_host_main_clk)
773 .enable = _clk_pll_enable,
774 .disable = _clk_pll_disable,
775 .set_rate = _clk_pll7_usb_otg_set_rate,
776 .get_rate = _clk_pll7_usb_otg_get_rate,
780 static struct clk usb_phy2_clk = {
781 __INIT_CLK_DEBUG(usb_phy2_clk)
782 .parent = &pll7_usb_host_main_clk,
783 .enable = _clk_usb_phy_enable,
784 .disable = _clk_usb_phy_disable,
785 .enable_reg = (void *)PLL7_480_USB2_BASE_ADDR,
786 .enable_shift = ANADIG_PLL_480_EN_USB_CLKS,
787 .set_rate = _clk_pll7_usb_otg_set_rate,
788 .get_rate = _clk_pll7_usb_otg_get_rate,
792 static struct clk pll8_enet_main_clk = {
793 __INIT_CLK_DEBUG(pll8_enet_main_clk)
795 .enable = _clk_pll_enable,
796 .disable = _clk_pll_disable,
799 static unsigned long _clk_arm_get_rate(struct clk *clk)
803 cacrr = __raw_readl(MXC_CCM_CACRR);
804 div = (cacrr & MXC_CCM_CACRR_ARM_PODF_MASK) + 1;
805 return clk_get_rate(clk->parent) / div;
808 static int _clk_arm_set_rate(struct clk *clk, unsigned long rate)
812 div = (clk_get_rate(clk->parent) / rate);
816 __raw_writel(div - 1, MXC_CCM_CACRR);
821 static struct clk cpu_clk = {
822 __INIT_CLK_DEBUG(cpu_clk)
823 .parent = &pll1_sw_clk,
824 .set_rate = _clk_arm_set_rate,
825 .get_rate = _clk_arm_get_rate,
828 static int _clk_periph_set_parent(struct clk *clk, struct clk *parent)
833 mux = _get_mux6(parent, &pll2_528_bus_main_clk, &pll2_pfd_400M,
834 &pll2_pfd_352M, &pll2_200M, &pll3_sw_clk, &osc_clk);
837 /* Set the pre_periph_clk multiplexer */
838 reg = __raw_readl(MXC_CCM_CBCMR);
839 reg &= ~MXC_CCM_CBCMR_PRE_PERIPH_CLK_SEL_MASK;
840 reg |= mux << MXC_CCM_CBCMR_PRE_PERIPH_CLK_SEL_OFFSET;
841 __raw_writel(reg, MXC_CCM_CBCMR);
843 /* Set the periph_clk_sel multiplexer. */
844 reg = __raw_readl(MXC_CCM_CBCDR);
845 reg &= ~MXC_CCM_CBCDR_PERIPH_CLK_SEL;
846 __raw_writel(reg, MXC_CCM_CBCDR);
848 /* Set the periph_clk2_podf divider to divide by 1. */
849 reg = __raw_readl(MXC_CCM_CBCDR);
850 reg &= ~MXC_CCM_CBCDR_PERIPH_CLK2_PODF_MASK;
851 __raw_writel(reg, MXC_CCM_CBCDR);
853 /* Set the periph_clk2_sel mux. */
854 reg = __raw_readl(MXC_CCM_CBCMR);
855 reg &= ~MXC_CCM_CBCMR_PERIPH_CLK2_SEL_MASK;
856 reg |= ((mux - 4) << MXC_CCM_CBCMR_PERIPH_CLK2_SEL_OFFSET);
857 __raw_writel(reg, MXC_CCM_CBCMR);
860 if (!WAIT(!(__raw_readl(MXC_CCM_CDHIPR)
861 & MXC_CCM_CDHIPR_PERIPH_CLK_SEL_BUSY), SPIN_DELAY))
862 panic("pll _clk_axi_a_set_rate failed\n");
867 static unsigned long _clk_periph_get_rate(struct clk *clk)
873 if ((clk->parent == &pll3_sw_clk) || (clk->parent == &osc_clk)) {
874 reg = __raw_readl(MXC_CCM_CBCDR)
875 & MXC_CCM_CBCDR_PERIPH_CLK2_PODF_MASK;
876 div = (reg >> MXC_CCM_CBCDR_PERIPH_CLK2_PODF_OFFSET) + 1;
878 val = clk_get_rate(clk->parent) / div;
882 static struct clk periph_clk = {
883 __INIT_CLK_DEBUG(periph_clk)
884 .parent = &pll2_528_bus_main_clk,
885 .set_parent = _clk_periph_set_parent,
886 .get_rate = _clk_periph_get_rate,
889 static unsigned long _clk_axi_get_rate(struct clk *clk)
894 reg = __raw_readl(MXC_CCM_CBCDR) & MXC_CCM_CBCDR_AXI_PODF_MASK;
895 div = (reg >> MXC_CCM_CBCDR_AXI_PODF_OFFSET);
897 val = clk_get_rate(clk->parent) / (div + 1);
901 static int _clk_axi_set_rate(struct clk *clk, unsigned long rate)
904 u32 parent_rate = clk_get_rate(clk->parent);
906 div = parent_rate / rate;
910 if (((parent_rate / div) != rate) || (div > 8))
913 reg = __raw_readl(MXC_CCM_CBCDR);
914 reg &= ~MXC_CCM_CBCDR_AXI_PODF_MASK;
915 reg |= (div - 1) << MXC_CCM_CBCDR_AXI_PODF_OFFSET;
916 __raw_writel(reg, MXC_CCM_CBCDR);
918 if (!WAIT(!(__raw_readl(MXC_CCM_CDHIPR)
919 & MXC_CCM_CDHIPR_AXI_PODF_BUSY), SPIN_DELAY))
920 panic("pll _clk_axi_a_set_rate failed\n");
925 static unsigned long _clk_axi_round_rate(struct clk *clk,
929 u32 parent_rate = clk_get_rate(clk->parent);
931 div = parent_rate / rate;
933 /* Make sure rate is not greater than the maximum
934 * value for the clock.
935 * Also prevent a div of 0.
943 return parent_rate / div;
946 static int _clk_axi_set_parent(struct clk *clk, struct clk *parent)
951 mux = _get_mux6(parent, &periph_clk, &pll2_pfd_400M,
952 &pll3_pfd_540M, NULL, NULL, NULL);
955 /* Set the AXI_SEL mux */
956 reg = __raw_readl(MXC_CCM_CBCDR) & ~MXC_CCM_CBCDR_AXI_SEL;
957 __raw_writel(reg, MXC_CCM_CBCDR);
959 /* Set the AXI_ALT_SEL mux. */
960 reg = __raw_readl(MXC_CCM_CBCDR)
961 & ~MXC_CCM_CBCDR_AXI_ALT_SEL_MASK;
962 reg = ((mux - 1) << MXC_CCM_CBCDR_AXI_ALT_SEL_OFFSET);\
963 __raw_writel(reg, MXC_CCM_CBCDR);
965 /* Set the AXI_SEL mux */
966 reg = __raw_readl(MXC_CCM_CBCDR) & ~MXC_CCM_CBCDR_AXI_SEL;
967 reg |= MXC_CCM_CBCDR_AXI_SEL;
968 __raw_writel(reg, MXC_CCM_CBCDR);
973 static struct clk axi_clk = {
974 __INIT_CLK_DEBUG(axi_clk)
975 .parent = &periph_clk,
976 .set_parent = _clk_axi_set_parent,
977 .set_rate = _clk_axi_set_rate,
978 .get_rate = _clk_axi_get_rate,
979 .round_rate = _clk_axi_round_rate,
982 static unsigned long _clk_ahb_get_rate(struct clk *clk)
986 reg = __raw_readl(MXC_CCM_CBCDR);
987 div = ((reg & MXC_CCM_CBCDR_AHB_PODF_MASK) >>
988 MXC_CCM_CBCDR_AHB_PODF_OFFSET) + 1;
990 return clk_get_rate(clk->parent) / div;
993 static int _clk_ahb_set_rate(struct clk *clk, unsigned long rate)
996 u32 parent_rate = clk_get_rate(clk->parent);
998 div = parent_rate / rate;
1001 if (((parent_rate / div) != rate) || (div > 8))
1004 reg = __raw_readl(MXC_CCM_CBCDR);
1005 reg &= ~MXC_CCM_CBCDR_AHB_PODF_MASK;
1006 reg |= (div - 1) << MXC_CCM_CBCDR_AHB_PODF_OFFSET;
1007 __raw_writel(reg, MXC_CCM_CBCDR);
1009 if (!WAIT(!(__raw_readl(MXC_CCM_CDHIPR) & MXC_CCM_CDHIPR_AHB_PODF_BUSY),
1011 panic("_clk_ahb_set_rate failed\n");
1016 static unsigned long _clk_ahb_round_rate(struct clk *clk,
1020 u32 parent_rate = clk_get_rate(clk->parent);
1022 div = parent_rate / rate;
1024 /* Make sure rate is not greater than the maximum value for the clock.
1025 * Also prevent a div of 0.
1033 return parent_rate / div;
1036 static struct clk ahb_clk = {
1037 __INIT_CLK_DEBUG(ahb_clk)
1038 .parent = &periph_clk,
1039 .get_rate = _clk_ahb_get_rate,
1040 .set_rate = _clk_ahb_set_rate,
1041 .round_rate = _clk_ahb_round_rate,
1044 static unsigned long _clk_ipg_get_rate(struct clk *clk)
1048 reg = __raw_readl(MXC_CCM_CBCDR);
1049 div = ((reg & MXC_CCM_CBCDR_IPG_PODF_MASK) >>
1050 MXC_CCM_CBCDR_IPG_PODF_OFFSET) + 1;
1052 return clk_get_rate(clk->parent) / div;
1056 static struct clk ipg_clk = {
1057 __INIT_CLK_DEBUG(ipg_clk)
1059 .get_rate = _clk_ipg_get_rate,
1062 static unsigned long _clk_mmdc_ch0_axi_get_rate(struct clk *clk)
1066 reg = __raw_readl(MXC_CCM_CBCDR);
1067 div = ((reg & MXC_CCM_CBCDR_MMDC_CH0_PODF_MASK) >>
1068 MXC_CCM_CBCDR_MMDC_CH0_PODF_OFFSET) + 1;
1070 return clk_get_rate(clk->parent) / div;
1073 static int _clk_mmdc_ch0_axi_set_rate(struct clk *clk, unsigned long rate)
1076 u32 parent_rate = clk_get_rate(clk->parent);
1078 div = parent_rate / rate;
1081 if (((parent_rate / div) != rate) || (div > 8))
1084 reg = __raw_readl(MXC_CCM_CBCDR);
1085 reg &= ~MXC_CCM_CBCDR_MMDC_CH0_PODF_MASK;
1086 reg |= (div - 1) << MXC_CCM_CBCDR_MMDC_CH0_PODF_OFFSET;
1087 __raw_writel(reg, MXC_CCM_CBCDR);
1089 if (!WAIT(!(__raw_readl(MXC_CCM_CDHIPR)
1090 & MXC_CCM_CDHIPR_MMDC_CH0_PODF_BUSY),
1092 panic("_clk_mmdc_ch0_axi_set_rate failed\n");
1097 static unsigned long _clk_mmdc_ch0_axi_round_rate(struct clk *clk,
1101 u32 parent_rate = clk_get_rate(clk->parent);
1103 div = parent_rate / rate;
1105 /* Make sure rate is not greater than the maximum value for the clock.
1106 * Also prevent a div of 0.
1114 return parent_rate / div;
1117 static struct clk mmdc_ch0_axi_clk[] = {
1119 __INIT_CLK_DEBUG(mmdc_ch0_axi_clk)
1121 .parent = &periph_clk,
1122 .enable = _clk_enable,
1123 .disable = _clk_disable,
1124 .enable_reg = MXC_CCM_CCGR3,
1125 .enable_shift = MXC_CCM_CCGRx_CG10_OFFSET,
1126 .secondary = &mmdc_ch0_axi_clk[1],
1127 .get_rate = _clk_mmdc_ch0_axi_get_rate,
1128 .set_rate = _clk_mmdc_ch0_axi_set_rate,
1129 .round_rate = _clk_mmdc_ch0_axi_round_rate,
1132 __INIT_CLK_DEBUG(mmdc_ch0_ipg_clk)
1135 .enable = _clk_enable,
1136 .disable = _clk_disable,
1137 .enable_reg = MXC_CCM_CCGR3,
1138 .enable_shift = MXC_CCM_CCGRx_CG12_OFFSET,
1142 static unsigned long _clk_mmdc_ch1_axi_get_rate(struct clk *clk)
1146 reg = __raw_readl(MXC_CCM_CBCDR);
1147 div = ((reg & MXC_CCM_CBCDR_MMDC_CH1_PODF_MASK) >>
1148 MXC_CCM_CBCDR_MMDC_CH1_PODF_OFFSET) + 1;
1150 return clk_get_rate(clk->parent) / div;
1153 static int _clk_mmdc_ch1_axi_set_rate(struct clk *clk, unsigned long rate)
1156 u32 parent_rate = clk_get_rate(clk->parent);
1158 div = parent_rate / rate;
1161 if (((parent_rate / div) != rate) || (div > 8))
1164 reg = __raw_readl(MXC_CCM_CBCDR);
1165 reg &= ~MXC_CCM_CBCDR_MMDC_CH1_PODF_MASK;
1166 reg |= (div - 1) << MXC_CCM_CBCDR_MMDC_CH1_PODF_OFFSET;
1167 __raw_writel(reg, MXC_CCM_CBCDR);
1169 if (!WAIT(!(__raw_readl(MXC_CCM_CDHIPR)
1170 & MXC_CCM_CDHIPR_MMDC_CH1_PODF_BUSY), SPIN_DELAY))
1171 panic("_clk_mmdc_ch1_axi_set_rate failed\n");
1176 static unsigned long _clk_mmdc_ch1_axi_round_rate(struct clk *clk,
1180 u32 parent_rate = clk_get_rate(clk->parent);
1182 div = parent_rate / rate;
1184 /* Make sure rate is not greater than the maximum value for the clock.
1185 * Also prevent a div of 0.
1193 return parent_rate / div;
1196 static struct clk mmdc_ch1_axi_clk[] = {
1198 __INIT_CLK_DEBUG(mmdc_ch1_axi_clk)
1200 .parent = &pll2_pfd_400M,
1201 .enable = _clk_enable,
1202 .disable = _clk_disable,
1203 .enable_reg = MXC_CCM_CCGR3,
1204 .enable_shift = MXC_CCM_CCGRx_CG11_OFFSET,
1205 .secondary = &mmdc_ch1_axi_clk[1],
1206 .get_rate = _clk_mmdc_ch1_axi_get_rate,
1207 .set_rate = _clk_mmdc_ch1_axi_set_rate,
1208 .round_rate = _clk_mmdc_ch1_axi_round_rate,
1212 __INIT_CLK_DEBUG(mmdc_ch1_ipg_clk)
1214 .enable = _clk_enable,
1215 .disable = _clk_disable,
1216 .enable_reg = MXC_CCM_CCGR3,
1217 .enable_shift = MXC_CCM_CCGRx_CG13_OFFSET,
1221 static struct clk ipg_perclk = {
1222 __INIT_CLK_DEBUG(ipg_perclk)
1226 static struct clk spba_clk = {
1227 __INIT_CLK_DEBUG(spba_clk)
1229 .enable_reg = MXC_CCM_CCGR5,
1230 .enable_shift = MXC_CCM_CCGRx_CG6_OFFSET,
1231 .enable = _clk_enable,
1232 .disable = _clk_disable,
1235 static struct clk sdma_clk = {
1236 __INIT_CLK_DEBUG(sdma_clk)
1238 .enable_reg = MXC_CCM_CCGR5,
1239 .enable_shift = MXC_CCM_CCGRx_CG3_OFFSET,
1240 .enable = _clk_enable,
1241 .disable = _clk_disable,
1244 static int _clk_gpu2d_axi_set_parent(struct clk *clk, struct clk *parent)
1246 u32 reg = __raw_readl(MXC_CCM_CBCMR) & MXC_CCM_CBCMR_GPU2D_AXI_CLK_SEL;
1248 if (parent == &ahb_clk)
1249 reg |= MXC_CCM_CBCMR_GPU2D_AXI_CLK_SEL;
1251 __raw_writel(reg, MXC_CCM_CBCMR);
1256 static struct clk gpu2d_axi_clk = {
1257 __INIT_CLK_DEBUG(gpu2d_axi_clk)
1259 .set_parent = _clk_gpu2d_axi_set_parent,
1262 static int _clk_gpu3d_axi_set_parent(struct clk *clk, struct clk *parent)
1264 u32 reg = __raw_readl(MXC_CCM_CBCMR) & MXC_CCM_CBCMR_GPU3D_AXI_CLK_SEL;
1266 if (parent == &ahb_clk)
1267 reg |= MXC_CCM_CBCMR_GPU3D_AXI_CLK_SEL;
1269 __raw_writel(reg, MXC_CCM_CBCMR);
1274 static struct clk gpu3d_axi_clk = {
1275 __INIT_CLK_DEBUG(gpu3d_axi_clk)
1277 .set_parent = _clk_gpu3d_axi_set_parent,
1280 static int _clk_pcie_axi_set_parent(struct clk *clk, struct clk *parent)
1282 u32 reg = __raw_readl(MXC_CCM_CBCMR) & MXC_CCM_CBCMR_PCIE_AXI_CLK_SEL;
1284 if (parent == &ahb_clk)
1285 reg |= MXC_CCM_CBCMR_PCIE_AXI_CLK_SEL;
1287 __raw_writel(reg, MXC_CCM_CBCMR);
1292 static struct clk pcie_axi_clk = {
1293 __INIT_CLK_DEBUG(pcie_axi_clk)
1295 .set_parent = _clk_pcie_axi_set_parent,
1298 static int _clk_vdo_axi_set_parent(struct clk *clk, struct clk *parent)
1300 u32 reg = __raw_readl(MXC_CCM_CBCMR) & MXC_CCM_CBCMR_VDOAXI_CLK_SEL;
1302 if (parent == &ahb_clk)
1303 reg |= MXC_CCM_CBCMR_VDOAXI_CLK_SEL;
1305 __raw_writel(reg, MXC_CCM_CBCMR);
1310 static struct clk vdo_axi_clk = {
1311 __INIT_CLK_DEBUG(vdo_axi_clk)
1313 .enable_reg = MXC_CCM_CCGR6,
1314 .enable_shift = MXC_CCM_CCGRx_CG6_OFFSET,
1315 .enable = _clk_enable,
1316 .disable = _clk_disable,
1317 .set_parent = _clk_vdo_axi_set_parent,
1320 static struct clk vdoa_clk = {
1321 __INIT_CLK_DEBUG(vdoa_clk)
1324 .enable_reg = MXC_CCM_CCGR2,
1325 .enable_shift = MXC_CCM_CCGRx_CG13_OFFSET,
1326 .enable = _clk_enable,
1327 .disable = _clk_disable,
1330 static struct clk gpt_clk[] = {
1332 __INIT_CLK_DEBUG(gpt_clk)
1333 .parent = &ipg_perclk,
1335 .enable_reg = MXC_CCM_CCGR1,
1336 .enable_shift = MXC_CCM_CCGRx_CG10_OFFSET,
1337 .enable = _clk_enable,
1338 .disable = _clk_disable,
1339 .secondary = &gpt_clk[1],
1342 __INIT_CLK_DEBUG(gpt_serial_clk)
1344 .enable_reg = MXC_CCM_CCGR1,
1345 .enable_shift = MXC_CCM_CCGRx_CG11_OFFSET,
1346 .enable = _clk_enable,
1347 .disable = _clk_disable,
1351 static struct clk iim_clk = {
1352 __INIT_CLK_DEBUG(iim_clk)
1354 .enable = _clk_enable,
1355 .enable_reg = MXC_CCM_CCGR2,
1356 .enable_shift = MXC_CCM_CCGRx_CG6_OFFSET,
1357 .disable = _clk_disable,
1360 static struct clk i2c_clk[] = {
1362 __INIT_CLK_DEBUG(i2c_clk_0)
1364 .parent = &ipg_perclk,
1365 .enable_reg = MXC_CCM_CCGR2,
1366 .enable_shift = MXC_CCM_CCGRx_CG3_OFFSET,
1367 .enable = _clk_enable,
1368 .disable = _clk_disable,
1371 __INIT_CLK_DEBUG(i2c_clk_1)
1373 .parent = &ipg_perclk,
1374 .enable_reg = MXC_CCM_CCGR2,
1375 .enable_shift = MXC_CCM_CCGRx_CG4_OFFSET,
1376 .enable = _clk_enable,
1377 .disable = _clk_disable,
1380 __INIT_CLK_DEBUG(i2c_clk_2)
1382 .parent = &ipg_perclk,
1383 .enable_reg = MXC_CCM_CCGR2,
1384 .enable_shift = MXC_CCM_CCGRx_CG5_OFFSET,
1385 .enable = _clk_enable,
1386 .disable = _clk_disable,
1390 static int _clk_vpu_axi_set_parent(struct clk *clk, struct clk *parent)
1393 u32 reg = __raw_readl(MXC_CCM_CBCMR)
1394 & MXC_CCM_CBCMR_VPU_AXI_CLK_SEL_MASK;
1396 mux = _get_mux6(parent, &axi_clk, &pll2_pfd_400M,
1397 &pll2_pfd_352M, NULL, NULL, NULL);
1399 reg |= (mux << MXC_CCM_CBCMR_VPU_AXI_CLK_SEL_OFFSET);
1401 __raw_writel(reg, MXC_CCM_CBCMR);
1406 static unsigned long _clk_vpu_axi_get_rate(struct clk *clk)
1410 reg = __raw_readl(MXC_CCM_CSCDR1);
1411 div = ((reg & MXC_CCM_CSCDR1_VPU_AXI_PODF_MASK) >>
1412 MXC_CCM_CSCDR1_VPU_AXI_PODF_OFFSET) + 1;
1414 return clk_get_rate(clk->parent) / div;
1417 static int _clk_vpu_axi_set_rate(struct clk *clk, unsigned long rate)
1420 u32 parent_rate = clk_get_rate(clk->parent);
1422 div = parent_rate / rate;
1425 if (((parent_rate / div) != rate) || (div > 8))
1428 reg = __raw_readl(MXC_CCM_CSCDR1);
1429 reg &= ~MXC_CCM_CSCDR1_VPU_AXI_PODF_MASK;
1430 reg |= (div - 1) << MXC_CCM_CSCDR1_VPU_AXI_PODF_OFFSET;
1431 __raw_writel(reg, MXC_CCM_CSCDR1);
1436 static unsigned long _clk_vpu_axi_round_rate(struct clk *clk,
1440 u32 parent_rate = clk_get_rate(clk->parent);
1442 div = parent_rate / rate;
1444 /* Make sure rate is not greater than the maximum value for the clock.
1445 * Also prevent a div of 0.
1453 return parent_rate / div;
1456 static struct clk vpu_clk = {
1457 __INIT_CLK_DEBUG(vpu_clk)
1459 .enable_reg = MXC_CCM_CCGR6,
1460 .enable_shift = MXC_CCM_CCGRx_CG7_OFFSET,
1461 .enable = _clk_enable,
1462 .disable = _clk_disable,
1463 .set_parent = _clk_vpu_axi_set_parent,
1464 .round_rate = _clk_vpu_axi_round_rate,
1465 .set_rate = _clk_vpu_axi_set_rate,
1466 .get_rate = _clk_vpu_axi_get_rate,
1469 static int _clk_ipu1_set_parent(struct clk *clk, struct clk *parent)
1472 u32 reg = __raw_readl(MXC_CCM_CSCDR3)
1473 & MXC_CCM_CSCDR3_IPU1_HSP_CLK_SEL_MASK;
1475 mux = _get_mux6(parent, &mmdc_ch0_axi_clk[0],
1476 &pll2_pfd_400M, &pll3_120M, &pll3_pfd_540M, NULL, NULL);
1478 reg |= (mux << MXC_CCM_CSCDR3_IPU1_HSP_CLK_SEL_OFFSET);
1480 __raw_writel(reg, MXC_CCM_CSCDR3);
1485 static unsigned long _clk_ipu1_get_rate(struct clk *clk)
1489 reg = __raw_readl(MXC_CCM_CSCDR3);
1490 div = ((reg & MXC_CCM_CSCDR3_IPU1_HSP_PODF_MASK) >>
1491 MXC_CCM_CSCDR3_IPU1_HSP_PODF_OFFSET) + 1;
1493 return clk_get_rate(clk->parent) / div;
1496 static int _clk_ipu1_set_rate(struct clk *clk, unsigned long rate)
1499 u32 parent_rate = clk_get_rate(clk->parent);
1501 div = parent_rate / rate;
1504 if (((parent_rate / div) != rate) || (div > 8))
1507 reg = __raw_readl(MXC_CCM_CSCDR3);
1508 reg &= ~MXC_CCM_CSCDR3_IPU1_HSP_PODF_MASK;
1509 reg |= (div - 1) << MXC_CCM_CSCDR3_IPU1_HSP_PODF_OFFSET;
1510 __raw_writel(reg, MXC_CCM_CSCDR3);
1515 static unsigned long _clk_ipu_round_rate(struct clk *clk,
1519 u32 parent_rate = clk_get_rate(clk->parent);
1521 div = parent_rate / rate;
1523 /* Make sure rate is not greater than the maximum value for the clock.
1524 * Also prevent a div of 0.
1532 return parent_rate / div;
1535 static struct clk ipu1_clk = {
1536 __INIT_CLK_DEBUG(ipu1_clk)
1537 .parent = &mmdc_ch0_axi_clk[0],
1538 .enable_reg = MXC_CCM_CCGR3,
1539 .enable_shift = MXC_CCM_CCGRx_CG0_OFFSET,
1540 .enable = _clk_enable,
1541 .disable = _clk_disable,
1542 .set_parent = _clk_ipu1_set_parent,
1543 .round_rate = _clk_ipu_round_rate,
1544 .set_rate = _clk_ipu1_set_rate,
1545 .get_rate = _clk_ipu1_get_rate,
1548 static int _clk_ipu2_set_parent(struct clk *clk, struct clk *parent)
1551 u32 reg = __raw_readl(MXC_CCM_CSCDR3)
1552 & MXC_CCM_CSCDR3_IPU2_HSP_CLK_SEL_MASK;
1554 mux = _get_mux6(parent, &mmdc_ch0_axi_clk[0],
1555 &pll2_pfd_400M, &pll3_120M, &pll3_pfd_540M, NULL, NULL);
1557 reg |= (mux << MXC_CCM_CSCDR3_IPU2_HSP_CLK_SEL_OFFSET);
1559 __raw_writel(reg, MXC_CCM_CSCDR3);
1564 static unsigned long _clk_ipu2_get_rate(struct clk *clk)
1568 reg = __raw_readl(MXC_CCM_CSCDR3);
1569 div = ((reg & MXC_CCM_CSCDR3_IPU2_HSP_PODF_MASK) >>
1570 MXC_CCM_CSCDR3_IPU2_HSP_PODF_OFFSET) + 1;
1572 return clk_get_rate(clk->parent) / div;
1575 static int _clk_ipu2_set_rate(struct clk *clk, unsigned long rate)
1578 u32 parent_rate = clk_get_rate(clk->parent);
1580 div = parent_rate / rate;
1583 if (((parent_rate / div) != rate) || (div > 8))
1586 reg = __raw_readl(MXC_CCM_CSCDR3);
1587 reg &= ~MXC_CCM_CSCDR3_IPU2_HSP_PODF_MASK;
1588 reg |= (div - 1) << MXC_CCM_CSCDR3_IPU2_HSP_PODF_OFFSET;
1589 __raw_writel(reg, MXC_CCM_CSCDR3);
1594 static struct clk ipu2_clk = {
1595 __INIT_CLK_DEBUG(ipu2_clk)
1596 .parent = &mmdc_ch0_axi_clk[0],
1597 .enable_reg = MXC_CCM_CCGR3,
1598 .enable_shift = MXC_CCM_CCGRx_CG3_OFFSET,
1599 .enable = _clk_enable,
1600 .disable = _clk_disable,
1601 .set_parent = _clk_ipu2_set_parent,
1602 .round_rate = _clk_ipu_round_rate,
1603 .set_rate = _clk_ipu2_set_rate,
1604 .get_rate = _clk_ipu2_get_rate,
1607 static unsigned long _clk_usdhc_round_rate(struct clk *clk,
1611 u32 parent_rate = clk_get_rate(clk->parent);
1613 div = parent_rate / rate;
1615 /* Make sure rate is not greater than the maximum value for the clock.
1616 * Also prevent a div of 0.
1624 return parent_rate / div;
1627 static int _clk_usdhc1_set_parent(struct clk *clk, struct clk *parent)
1629 u32 reg = __raw_readl(MXC_CCM_CSCMR1) & MXC_CCM_CSCMR1_USDHC1_CLK_SEL;
1631 if (parent == &pll2_pfd_352M)
1632 reg |= (MXC_CCM_CSCMR1_USDHC1_CLK_SEL);
1634 __raw_writel(reg, MXC_CCM_CSCMR1);
1639 static unsigned long _clk_usdhc1_get_rate(struct clk *clk)
1643 reg = __raw_readl(MXC_CCM_CSCDR1);
1644 div = ((reg & MXC_CCM_CSCDR1_USDHC1_PODF_MASK) >>
1645 MXC_CCM_CSCDR1_USDHC1_PODF_OFFSET) + 1;
1647 return clk_get_rate(clk->parent) / div;
1650 static int _clk_usdhc1_set_rate(struct clk *clk, unsigned long rate)
1653 u32 parent_rate = clk_get_rate(clk->parent);
1655 div = parent_rate / rate;
1658 if (((parent_rate / div) != rate) || (div > 8))
1661 reg = __raw_readl(MXC_CCM_CSCDR1);
1662 reg &= ~MXC_CCM_CSCDR1_USDHC1_PODF_MASK;
1663 reg |= (div - 1) << MXC_CCM_CSCDR1_USDHC1_PODF_OFFSET;
1664 __raw_writel(reg, MXC_CCM_CSCDR1);
1669 static struct clk usdhc1_clk = {
1670 __INIT_CLK_DEBUG(usdhc1_clk)
1672 .parent = &pll2_pfd_400M,
1673 .enable_reg = MXC_CCM_CCGR6,
1674 .enable_shift = MXC_CCM_CCGRx_CG1_OFFSET,
1675 .enable = _clk_enable,
1676 .disable = _clk_disable,
1677 .set_parent = _clk_usdhc1_set_parent,
1678 .round_rate = _clk_usdhc_round_rate,
1679 .set_rate = _clk_usdhc1_set_rate,
1680 .get_rate = _clk_usdhc1_get_rate,
1683 static int _clk_usdhc2_set_parent(struct clk *clk, struct clk *parent)
1685 u32 reg = __raw_readl(MXC_CCM_CSCMR1) & MXC_CCM_CSCMR1_USDHC2_CLK_SEL;
1687 if (parent == &pll2_pfd_352M)
1688 reg |= (MXC_CCM_CSCMR1_USDHC2_CLK_SEL);
1690 __raw_writel(reg, MXC_CCM_CSCMR1);
1695 static unsigned long _clk_usdhc2_get_rate(struct clk *clk)
1699 reg = __raw_readl(MXC_CCM_CSCDR1);
1700 div = ((reg & MXC_CCM_CSCDR1_USDHC2_PODF_MASK) >>
1701 MXC_CCM_CSCDR1_USDHC2_PODF_OFFSET) + 1;
1703 return clk_get_rate(clk->parent) / div;
1706 static int _clk_usdhc2_set_rate(struct clk *clk, unsigned long rate)
1709 u32 parent_rate = clk_get_rate(clk->parent);
1711 div = parent_rate / rate;
1714 if (((parent_rate / div) != rate) || (div > 8))
1717 reg = __raw_readl(MXC_CCM_CSCDR1);
1718 reg &= ~MXC_CCM_CSCDR1_USDHC2_PODF_MASK;
1719 reg |= (div - 1) << MXC_CCM_CSCDR1_USDHC2_PODF_OFFSET;
1720 __raw_writel(reg, MXC_CCM_CSCDR1);
1725 static struct clk usdhc2_clk = {
1726 __INIT_CLK_DEBUG(usdhc2_clk)
1728 .parent = &pll2_pfd_400M,
1729 .enable_reg = MXC_CCM_CCGR6,
1730 .enable_shift = MXC_CCM_CCGRx_CG2_OFFSET,
1731 .enable = _clk_enable,
1732 .disable = _clk_disable,
1733 .set_parent = _clk_usdhc2_set_parent,
1734 .round_rate = _clk_usdhc_round_rate,
1735 .set_rate = _clk_usdhc2_set_rate,
1736 .get_rate = _clk_usdhc2_get_rate,
1739 static int _clk_usdhc3_set_parent(struct clk *clk, struct clk *parent)
1741 u32 reg = __raw_readl(MXC_CCM_CSCMR1) & MXC_CCM_CSCMR1_USDHC3_CLK_SEL;
1743 if (parent == &pll2_pfd_352M)
1744 reg |= (MXC_CCM_CSCMR1_USDHC3_CLK_SEL);
1746 __raw_writel(reg, MXC_CCM_CSCMR1);
1751 static unsigned long _clk_usdhc3_get_rate(struct clk *clk)
1755 reg = __raw_readl(MXC_CCM_CSCDR1);
1756 div = ((reg & MXC_CCM_CSCDR1_USDHC3_PODF_MASK) >>
1757 MXC_CCM_CSCDR1_USDHC3_PODF_OFFSET) + 1;
1759 return clk_get_rate(clk->parent) / div;
1762 static int _clk_usdhc3_set_rate(struct clk *clk, unsigned long rate)
1765 u32 parent_rate = clk_get_rate(clk->parent);
1767 div = parent_rate / rate;
1770 if (((parent_rate / div) != rate) || (div > 8))
1773 reg = __raw_readl(MXC_CCM_CSCDR1);
1774 reg &= ~MXC_CCM_CSCDR1_USDHC3_PODF_MASK;
1775 reg |= (div - 1) << MXC_CCM_CSCDR1_USDHC3_PODF_OFFSET;
1776 __raw_writel(reg, MXC_CCM_CSCDR1);
1782 static struct clk usdhc3_clk = {
1783 __INIT_CLK_DEBUG(usdhc3_clk)
1785 .parent = &pll2_pfd_400M,
1786 .enable_reg = MXC_CCM_CCGR6,
1787 .enable_shift = MXC_CCM_CCGRx_CG3_OFFSET,
1788 .enable = _clk_enable,
1789 .disable = _clk_disable,
1790 .set_parent = _clk_usdhc3_set_parent,
1791 .round_rate = _clk_usdhc_round_rate,
1792 .set_rate = _clk_usdhc3_set_rate,
1793 .get_rate = _clk_usdhc3_get_rate,
1796 static int _clk_usdhc4_set_parent(struct clk *clk, struct clk *parent)
1798 u32 reg = __raw_readl(MXC_CCM_CSCMR1) & MXC_CCM_CSCMR1_USDHC4_CLK_SEL;
1800 if (parent == &pll2_pfd_352M)
1801 reg |= (MXC_CCM_CSCMR1_USDHC4_CLK_SEL);
1803 __raw_writel(reg, MXC_CCM_CSCMR1);
1808 static unsigned long _clk_usdhc4_get_rate(struct clk *clk)
1812 reg = __raw_readl(MXC_CCM_CSCDR1);
1813 div = ((reg & MXC_CCM_CSCDR1_USDHC4_PODF_MASK) >>
1814 MXC_CCM_CSCDR1_USDHC4_PODF_OFFSET) + 1;
1816 return clk_get_rate(clk->parent) / div;
1819 static int _clk_usdhc4_set_rate(struct clk *clk, unsigned long rate)
1822 u32 parent_rate = clk_get_rate(clk->parent);
1824 div = parent_rate / rate;
1827 if (((parent_rate / div) != rate) || (div > 8))
1830 reg = __raw_readl(MXC_CCM_CSCDR1);
1831 reg &= ~MXC_CCM_CSCDR1_USDHC4_PODF_MASK;
1832 reg |= (div - 1) << MXC_CCM_CSCDR1_USDHC4_PODF_OFFSET;
1833 __raw_writel(reg, MXC_CCM_CSCDR1);
1839 static struct clk usdhc4_clk = {
1840 __INIT_CLK_DEBUG(usdhc4_clk)
1842 .parent = &pll2_pfd_400M,
1843 .enable_reg = MXC_CCM_CCGR6,
1844 .enable_shift = MXC_CCM_CCGRx_CG4_OFFSET,
1845 .enable = _clk_enable,
1846 .disable = _clk_disable,
1847 .set_parent = _clk_usdhc4_set_parent,
1848 .round_rate = _clk_usdhc_round_rate,
1849 .set_rate = _clk_usdhc4_set_rate,
1850 .get_rate = _clk_usdhc4_get_rate,
1853 static unsigned long _clk_ssi_round_rate(struct clk *clk,
1857 u32 parent_rate = clk_get_rate(clk->parent);
1858 u32 div = parent_rate / rate;
1860 if (parent_rate % rate)
1863 __calc_pre_post_dividers(div, &pre, &post);
1865 return parent_rate / (pre * post);
1868 static unsigned long _clk_ssi1_get_rate(struct clk *clk)
1870 u32 reg, prediv, podf;
1872 reg = __raw_readl(MXC_CCM_CS1CDR);
1874 prediv = ((reg & MXC_CCM_CS1CDR_SSI1_CLK_PRED_MASK)
1875 >> MXC_CCM_CS1CDR_SSI1_CLK_PRED_OFFSET) + 1;
1876 podf = ((reg & MXC_CCM_CS1CDR_SSI1_CLK_PODF_MASK)
1877 >> MXC_CCM_CS1CDR_SSI1_CLK_PODF_OFFSET) + 1;
1879 return clk_get_rate(clk->parent) / (prediv * podf);
1882 static int _clk_ssi1_set_rate(struct clk *clk, unsigned long rate)
1884 u32 reg, div, pre, post;
1885 u32 parent_rate = clk_get_rate(clk->parent);
1887 div = parent_rate / rate;
1890 if (((parent_rate / div) != rate) || div > 512)
1893 __calc_pre_post_dividers(div, &pre, &post);
1895 reg = __raw_readl(MXC_CCM_CS1CDR);
1896 reg &= ~(MXC_CCM_CS1CDR_SSI1_CLK_PRED_MASK |
1897 MXC_CCM_CS1CDR_SSI1_CLK_PODF_MASK);
1898 reg |= (post - 1) << MXC_CCM_CS1CDR_SSI1_CLK_PODF_OFFSET;
1899 reg |= (pre - 1) << MXC_CCM_CS1CDR_SSI1_CLK_PRED_OFFSET;
1901 __raw_writel(reg, MXC_CCM_CS1CDR);
1907 static int _clk_ssi1_set_parent(struct clk *clk, struct clk *parent)
1911 reg = __raw_readl(MXC_CCM_CSCMR1)
1912 & MXC_CCM_CSCMR1_SSI1_CLK_SEL_MASK;
1914 mux = _get_mux6(parent, &pll3_pfd_508M, &pll3_pfd_454M,
1915 &pll4_audio_main_clk, NULL, NULL, NULL);
1916 reg |= (mux << MXC_CCM_CSCMR1_SSI1_CLK_SEL_OFFSET);
1918 __raw_writel(reg, MXC_CCM_CSCMR1);
1923 static struct clk ssi1_clk = {
1924 __INIT_CLK_DEBUG(ssi1_clk)
1925 .parent = &pll3_pfd_508M,
1926 .enable_reg = MXC_CCM_CCGR5,
1927 .enable_shift = MXC_CCM_CCGRx_CG9_OFFSET,
1928 .enable = _clk_enable,
1929 .disable = _clk_disable,
1930 .set_parent = _clk_ssi1_set_parent,
1931 .set_rate = _clk_ssi1_set_rate,
1932 .round_rate = _clk_ssi_round_rate,
1933 .get_rate = _clk_ssi1_get_rate,
1936 static unsigned long _clk_ssi2_get_rate(struct clk *clk)
1938 u32 reg, prediv, podf;
1940 reg = __raw_readl(MXC_CCM_CS2CDR);
1942 prediv = ((reg & MXC_CCM_CS2CDR_SSI2_CLK_PRED_MASK)
1943 >> MXC_CCM_CS2CDR_SSI2_CLK_PRED_OFFSET) + 1;
1944 podf = ((reg & MXC_CCM_CS2CDR_SSI2_CLK_PODF_MASK)
1945 >> MXC_CCM_CS2CDR_SSI2_CLK_PODF_OFFSET) + 1;
1947 return clk_get_rate(clk->parent) / (prediv * podf);
1950 static int _clk_ssi2_set_rate(struct clk *clk, unsigned long rate)
1952 u32 reg, div, pre, post;
1953 u32 parent_rate = clk_get_rate(clk->parent);
1955 div = parent_rate / rate;
1958 if (((parent_rate / div) != rate) || div > 512)
1961 __calc_pre_post_dividers(div, &pre, &post);
1963 reg = __raw_readl(MXC_CCM_CS2CDR);
1964 reg &= ~(MXC_CCM_CS2CDR_SSI2_CLK_PRED_MASK |
1965 MXC_CCM_CS2CDR_SSI2_CLK_PODF_MASK);
1966 reg |= (post - 1) << MXC_CCM_CS2CDR_SSI2_CLK_PODF_OFFSET;
1967 reg |= (pre - 1) << MXC_CCM_CS2CDR_SSI2_CLK_PRED_OFFSET;
1969 __raw_writel(reg, MXC_CCM_CS2CDR);
1975 static int _clk_ssi2_set_parent(struct clk *clk, struct clk *parent)
1979 reg = __raw_readl(MXC_CCM_CSCMR1)
1980 & MXC_CCM_CSCMR1_SSI2_CLK_SEL_MASK;
1982 mux = _get_mux6(parent, &pll3_pfd_508M, &pll3_pfd_454M,
1983 &pll4_audio_main_clk, NULL, NULL, NULL);
1984 reg |= (mux << MXC_CCM_CSCMR1_SSI2_CLK_SEL_OFFSET);
1986 __raw_writel(reg, MXC_CCM_CSCMR1);
1991 static struct clk ssi2_clk = {
1992 __INIT_CLK_DEBUG(ssi2_clk)
1993 .parent = &pll3_pfd_508M,
1994 .enable_reg = MXC_CCM_CCGR5,
1995 .enable_shift = MXC_CCM_CCGRx_CG10_OFFSET,
1996 .enable = _clk_enable,
1997 .disable = _clk_disable,
1998 .set_parent = _clk_ssi2_set_parent,
1999 .set_rate = _clk_ssi2_set_rate,
2000 .round_rate = _clk_ssi_round_rate,
2001 .get_rate = _clk_ssi2_get_rate,
2004 static unsigned long _clk_ssi3_get_rate(struct clk *clk)
2006 u32 reg, prediv, podf;
2008 reg = __raw_readl(MXC_CCM_CS1CDR);
2010 prediv = ((reg & MXC_CCM_CS1CDR_SSI1_CLK_PRED_MASK)
2011 >> MXC_CCM_CS1CDR_SSI1_CLK_PRED_OFFSET) + 1;
2012 podf = ((reg & MXC_CCM_CS1CDR_SSI1_CLK_PODF_MASK)
2013 >> MXC_CCM_CS1CDR_SSI1_CLK_PODF_OFFSET) + 1;
2015 return clk_get_rate(clk->parent) / (prediv * podf);
2018 static int _clk_ssi3_set_rate(struct clk *clk, unsigned long rate)
2020 u32 reg, div, pre, post;
2021 u32 parent_rate = clk_get_rate(clk->parent);
2023 div = parent_rate / rate;
2026 if (((parent_rate / div) != rate) || div > 512)
2029 __calc_pre_post_dividers(div, &pre, &post);
2031 reg = __raw_readl(MXC_CCM_CS1CDR);
2032 reg &= ~(MXC_CCM_CS1CDR_SSI3_CLK_PODF_MASK|
2033 MXC_CCM_CS1CDR_SSI3_CLK_PRED_MASK);
2034 reg |= (post - 1) << MXC_CCM_CS1CDR_SSI3_CLK_PODF_OFFSET;
2035 reg |= (pre - 1) << MXC_CCM_CS1CDR_SSI3_CLK_PRED_OFFSET;
2037 __raw_writel(reg, MXC_CCM_CS1CDR);
2043 static int _clk_ssi3_set_parent(struct clk *clk, struct clk *parent)
2047 reg = __raw_readl(MXC_CCM_CSCMR1) & MXC_CCM_CSCMR1_SSI3_CLK_SEL_MASK;
2049 mux = _get_mux6(parent, &pll3_pfd_508M, &pll3_pfd_454M,
2050 &pll4_audio_main_clk, NULL, NULL, NULL);
2051 reg |= (mux << MXC_CCM_CSCMR1_SSI3_CLK_SEL_OFFSET);
2053 __raw_writel(reg, MXC_CCM_CSCMR1);
2058 static struct clk ssi3_clk = {
2059 __INIT_CLK_DEBUG(ssi3_clk)
2060 .parent = &pll3_pfd_508M,
2061 .enable_reg = MXC_CCM_CCGR5,
2062 .enable_shift = MXC_CCM_CCGRx_CG11_OFFSET,
2063 .enable = _clk_enable,
2064 .disable = _clk_disable,
2065 .set_parent = _clk_ssi3_set_parent,
2066 .set_rate = _clk_ssi3_set_rate,
2067 .round_rate = _clk_ssi_round_rate,
2068 .get_rate = _clk_ssi3_get_rate,
2071 static unsigned long _clk_ldb_di_round_rate(struct clk *clk,
2074 u32 parent_rate = clk_get_rate(clk->parent);
2076 if (rate * 7 <= parent_rate + parent_rate/20)
2077 return parent_rate / 7;
2079 return 2 * parent_rate / 7;
2082 static unsigned long _clk_ldb_di0_get_rate(struct clk *clk)
2086 div = __raw_readl(MXC_CCM_CSCMR2) &
2087 MXC_CCM_CSCMR2_LDB_DI0_IPU_DIV;
2090 return clk_get_rate(clk->parent) / 7;
2092 return (2 * clk_get_rate(clk->parent)) / 7;
2095 static int _clk_ldb_di0_set_rate(struct clk *clk, unsigned long rate)
2098 u32 parent_rate = clk_get_rate(clk->parent);
2100 if (rate * 7 <= parent_rate + parent_rate/20) {
2102 rate = parent_rate / 7;
2104 rate = 2 * parent_rate / 7;
2106 reg = __raw_readl(MXC_CCM_CSCMR2);
2108 reg |= MXC_CCM_CSCMR2_LDB_DI0_IPU_DIV;
2110 reg &= ~MXC_CCM_CSCMR2_LDB_DI0_IPU_DIV;
2112 __raw_writel(reg, MXC_CCM_CSCMR2);
2117 static int _clk_ldb_di0_set_parent(struct clk *clk, struct clk *parent)
2121 reg = __raw_readl(MXC_CCM_CS2CDR)
2122 & MXC_CCM_CS2CDR_LDB_DI0_CLK_SEL_MASK;
2124 mux = _get_mux6(parent, &pll5_video_main_clk,
2125 &pll2_pfd_352M, &pll2_pfd_400M, &pll3_pfd_540M,
2126 &pll3_usb_otg_main_clk, NULL);
2127 reg |= (mux << MXC_CCM_CS2CDR_LDB_DI0_CLK_SEL_OFFSET);
2129 __raw_writel(reg, MXC_CCM_CS2CDR);
2134 static struct clk ldb_di0_clk = {
2135 __INIT_CLK_DEBUG(ldb_di0_clk)
2137 .parent = &pll3_pfd_540M,
2138 .enable_reg = MXC_CCM_CCGR3,
2139 .enable_shift = MXC_CCM_CCGRx_CG13_OFFSET,
2140 .enable = _clk_enable,
2141 .disable = _clk_disable,
2142 .set_parent = _clk_ldb_di0_set_parent,
2143 .set_rate = _clk_ldb_di0_set_rate,
2144 .round_rate = _clk_ldb_di_round_rate,
2145 .get_rate = _clk_ldb_di0_get_rate,
2148 static unsigned long _clk_ldb_di1_get_rate(struct clk *clk)
2152 div = __raw_readl(MXC_CCM_CSCMR2) &
2153 MXC_CCM_CSCMR2_LDB_DI1_IPU_DIV;
2156 return clk_get_rate(clk->parent) / 7;
2158 return (2 * clk_get_rate(clk->parent)) / 7;
2161 static int _clk_ldb_di1_set_rate(struct clk *clk, unsigned long rate)
2164 u32 parent_rate = clk_get_rate(clk->parent);
2166 if (rate * 7 <= parent_rate + parent_rate/20) {
2168 rate = parent_rate / 7;
2170 rate = 2 * parent_rate / 7;
2172 reg = __raw_readl(MXC_CCM_CSCMR2);
2174 reg |= MXC_CCM_CSCMR2_LDB_DI1_IPU_DIV;
2176 reg &= ~MXC_CCM_CSCMR2_LDB_DI1_IPU_DIV;
2178 __raw_writel(reg, MXC_CCM_CSCMR2);
2183 static int _clk_ldb_di1_set_parent(struct clk *clk, struct clk *parent)
2187 reg = __raw_readl(MXC_CCM_CS2CDR)
2188 & MXC_CCM_CS2CDR_LDB_DI1_CLK_SEL_MASK;
2190 mux = _get_mux6(parent, &pll5_video_main_clk,
2191 &pll2_pfd_352M, &pll2_pfd_400M, &pll3_pfd_540M,
2192 &pll3_usb_otg_main_clk, NULL);
2193 reg |= (mux << MXC_CCM_CS2CDR_LDB_DI1_CLK_SEL_OFFSET);
2195 __raw_writel(reg, MXC_CCM_CS2CDR);
2200 static struct clk ldb_di1_clk = {
2201 __INIT_CLK_DEBUG(ldb_di1_clk)
2203 .parent = &pll3_pfd_540M,
2204 .enable_reg = MXC_CCM_CCGR3,
2205 .enable_shift = MXC_CCM_CCGRx_CG14_OFFSET,
2206 .enable = _clk_enable,
2207 .disable = _clk_disable,
2208 .set_parent = _clk_ldb_di1_set_parent,
2209 .set_rate = _clk_ldb_di1_set_rate,
2210 .round_rate = _clk_ldb_di_round_rate,
2211 .get_rate = _clk_ldb_di1_get_rate,
2215 static unsigned long _clk_ipu_di_round_rate(struct clk *clk,
2219 u32 parent_rate = clk_get_rate(clk->parent);
2221 if ((clk->parent == &ldb_di0_clk) ||
2222 (clk->parent == &ldb_di1_clk))
2225 div = parent_rate / rate;
2227 /* Make sure rate is not greater than the maximum value for the clock.
2228 * Also prevent a div of 0.
2236 return parent_rate / div;
2239 static unsigned long _clk_ipu1_di0_get_rate(struct clk *clk)
2243 if ((clk->parent == &ldb_di0_clk) ||
2244 (clk->parent == &ldb_di1_clk))
2245 return clk_get_rate(clk->parent);
2247 reg = __raw_readl(MXC_CCM_CHSCCDR);
2249 div = ((reg & MXC_CCM_CHSCCDR_IPU1_DI0_PODF_MASK) >>
2250 MXC_CCM_CHSCCDR_IPU1_DI0_PODF_OFFSET) + 1;
2252 return clk_get_rate(clk->parent) / div;
2255 static int _clk_ipu1_di0_set_rate(struct clk *clk, unsigned long rate)
2258 u32 parent_rate = clk_get_rate(clk->parent);
2260 if ((clk->parent == &ldb_di0_clk) ||
2261 (clk->parent == &ldb_di1_clk)) {
2262 if (parent_rate == rate)
2268 div = parent_rate / rate;
2271 if (((parent_rate / div) != rate) || (div > 8))
2274 reg = __raw_readl(MXC_CCM_CHSCCDR);
2275 reg &= ~MXC_CCM_CHSCCDR_IPU1_DI0_PODF_MASK;
2276 reg |= (div - 1) << MXC_CCM_CHSCCDR_IPU1_DI0_PODF_OFFSET;
2277 __raw_writel(reg, MXC_CCM_CHSCCDR);
2283 static int _clk_ipu1_di0_set_parent(struct clk *clk, struct clk *parent)
2287 if (parent == &ldb_di0_clk)
2289 else if (parent == &ldb_di1_clk)
2292 reg = __raw_readl(MXC_CCM_CHSCCDR)
2293 & ~MXC_CCM_CHSCCDR_IPU1_DI0_PRE_CLK_SEL_MASK;
2295 mux = _get_mux6(parent, &mmdc_ch0_axi_clk[0],
2296 &pll3_usb_otg_main_clk, &pll5_video_main_clk,
2297 &pll2_pfd_352M, &pll2_pfd_400M, &pll3_pfd_540M);
2298 reg |= (mux << MXC_CCM_CHSCCDR_IPU1_DI0_PRE_CLK_SEL_OFFSET);
2300 __raw_writel(reg, MXC_CCM_CHSCCDR);
2302 /* Derive clock from divided pre-muxed ipu1_di0 clock.*/
2306 reg = __raw_readl(MXC_CCM_CHSCCDR)
2307 & ~MXC_CCM_CHSCCDR_IPU1_DI0_CLK_SEL_MASK;
2308 __raw_writel(reg | (mux << MXC_CCM_CHSCCDR_IPU1_DI0_CLK_SEL_OFFSET),
2314 static unsigned long _clk_ipu1_di1_get_rate(struct clk *clk)
2318 if ((clk->parent == &ldb_di0_clk) ||
2319 (clk->parent == &ldb_di1_clk))
2320 return clk_get_rate(clk->parent);
2322 reg = __raw_readl(MXC_CCM_CHSCCDR);
2324 div = (reg & MXC_CCM_CHSCCDR_IPU1_DI1_PODF_MASK) + 1;
2326 return clk_get_rate(clk->parent) / div;
2329 static int _clk_ipu1_di1_set_rate(struct clk *clk, unsigned long rate)
2332 u32 parent_rate = clk_get_rate(clk->parent);
2334 if ((clk->parent == &ldb_di0_clk) ||
2335 (clk->parent == &ldb_di1_clk)) {
2336 if (parent_rate == rate)
2342 div = parent_rate / rate;
2345 if (((parent_rate / div) != rate) || (div > 8))
2348 reg = __raw_readl(MXC_CCM_CHSCCDR);
2349 reg &= ~MXC_CCM_CHSCCDR_IPU1_DI1_PODF_MASK;
2350 reg |= (div - 1) << MXC_CCM_CHSCCDR_IPU1_DI1_PODF_OFFSET;
2351 __raw_writel(reg, MXC_CCM_CHSCCDR);
2357 static int _clk_ipu1_di1_set_parent(struct clk *clk, struct clk *parent)
2361 if (parent == &ldb_di0_clk)
2363 else if (parent == &ldb_di1_clk)
2366 reg = __raw_readl(MXC_CCM_CHSCCDR)
2367 & ~MXC_CCM_CHSCCDR_IPU1_DI1_PRE_CLK_SEL_MASK;
2369 mux = _get_mux6(parent, &mmdc_ch0_axi_clk[0],
2370 &pll3_usb_otg_main_clk, &pll5_video_main_clk,
2371 &pll2_pfd_352M, &pll2_pfd_400M, &pll3_pfd_540M);
2372 reg |= (mux << MXC_CCM_CHSCCDR_IPU1_DI1_PRE_CLK_SEL_OFFSET);
2374 __raw_writel(reg, MXC_CCM_CHSCCDR);
2376 /* Derive clock from divided pre-muxed ipu1_di0 clock.*/
2379 reg = __raw_readl(MXC_CCM_CHSCCDR)
2380 & ~MXC_CCM_CHSCCDR_IPU1_DI1_CLK_SEL_MASK;
2381 __raw_writel(reg | (mux << MXC_CCM_CHSCCDR_IPU1_DI1_CLK_SEL_OFFSET),
2387 static struct clk ipu1_di_clk[] = {
2389 __INIT_CLK_DEBUG(ipu1_di_clk_0)
2391 .parent = &pll3_pfd_540M,
2392 .enable_reg = MXC_CCM_CCGR3,
2393 .enable_shift = MXC_CCM_CCGRx_CG1_OFFSET,
2394 .enable = _clk_enable,
2395 .disable = _clk_disable,
2396 .set_parent = _clk_ipu1_di0_set_parent,
2397 .set_rate = _clk_ipu1_di0_set_rate,
2398 .round_rate = _clk_ipu_di_round_rate,
2399 .get_rate = _clk_ipu1_di0_get_rate,
2402 __INIT_CLK_DEBUG(ipu1_di_clk_1)
2404 .parent = &pll3_pfd_540M,
2405 .enable_reg = MXC_CCM_CCGR3,
2406 .enable_shift = MXC_CCM_CCGRx_CG2_OFFSET,
2407 .enable = _clk_enable,
2408 .disable = _clk_disable,
2409 .set_parent = _clk_ipu1_di1_set_parent,
2410 .set_rate = _clk_ipu1_di1_set_rate,
2411 .round_rate = _clk_ipu_di_round_rate,
2412 .get_rate = _clk_ipu1_di1_get_rate,
2416 static unsigned long _clk_ipu2_di0_get_rate(struct clk *clk)
2420 if ((clk->parent == &ldb_di0_clk) ||
2421 (clk->parent == &ldb_di1_clk))
2422 return clk_get_rate(clk->parent);
2424 reg = __raw_readl(MXC_CCM_CHSCCDR);
2426 div = (reg & MXC_CCM_CHSCCDR_IPU2_DI0_PODF_MASK) + 1;
2428 return clk_get_rate(clk->parent) / div;
2431 static int _clk_ipu2_di0_set_rate(struct clk *clk, unsigned long rate)
2434 u32 parent_rate = clk_get_rate(clk->parent);
2436 if ((clk->parent == &ldb_di0_clk) ||
2437 (clk->parent == &ldb_di1_clk)) {
2438 if (parent_rate == rate)
2444 div = parent_rate / rate;
2447 if (((parent_rate / div) != rate) || (div > 8))
2450 reg = __raw_readl(MXC_CCM_CHSCCDR);
2451 reg &= ~MXC_CCM_CHSCCDR_IPU2_DI0_PODF_MASK;
2452 reg |= (div - 1) << MXC_CCM_CHSCCDR_IPU2_DI0_PODF_OFFSET;
2453 __raw_writel(reg, MXC_CCM_CHSCCDR);
2458 static int _clk_ipu2_di0_set_parent(struct clk *clk, struct clk *parent)
2462 if (parent == &ldb_di0_clk)
2464 else if (parent == &ldb_di1_clk)
2467 reg = __raw_readl(MXC_CCM_CHSCCDR)
2468 & ~MXC_CCM_CHSCCDR_IPU2_DI0_PRE_CLK_SEL_MASK;
2470 mux = _get_mux6(parent, &mmdc_ch0_axi_clk[0],
2471 &pll3_usb_otg_main_clk, &pll5_video_main_clk,
2472 &pll2_pfd_352M, &pll2_pfd_400M, &pll3_pfd_540M);
2473 reg |= (mux << MXC_CCM_CHSCCDR_IPU2_DI0_PRE_CLK_SEL_OFFSET);
2475 __raw_writel(reg, MXC_CCM_CHSCCDR);
2477 /* Derive clock from divided pre-muxed ipu2_di0 clock.*/
2480 reg = __raw_readl(MXC_CCM_CHSCCDR)
2481 & ~MXC_CCM_CHSCCDR_IPU2_DI0_CLK_SEL_MASK;
2482 __raw_writel(reg | (mux << MXC_CCM_CHSCCDR_IPU2_DI0_CLK_SEL_OFFSET),
2488 static unsigned long _clk_ipu2_di1_get_rate(struct clk *clk)
2492 if ((clk->parent == &ldb_di0_clk) ||
2493 (clk->parent == &ldb_di1_clk))
2494 return clk_get_rate(clk->parent);
2496 reg = __raw_readl(MXC_CCM_CHSCCDR);
2498 div = (reg & MXC_CCM_CHSCCDR_IPU2_DI1_PODF_MASK) + 1;
2500 return clk_get_rate(clk->parent) / div;
2503 static int _clk_ipu2_di1_set_rate(struct clk *clk, unsigned long rate)
2506 u32 parent_rate = clk_get_rate(clk->parent);
2508 if ((clk->parent == &ldb_di0_clk) ||
2509 (clk->parent == &ldb_di1_clk)) {
2510 if (parent_rate == rate)
2516 div = parent_rate / rate;
2519 if (((parent_rate / div) != rate) || (div > 8))
2522 reg = __raw_readl(MXC_CCM_CHSCCDR);
2523 reg &= ~MXC_CCM_CHSCCDR_IPU2_DI1_PODF_MASK;
2524 reg |= (div - 1) << MXC_CCM_CHSCCDR_IPU2_DI1_PODF_OFFSET;
2525 __raw_writel(reg, MXC_CCM_CHSCCDR);
2530 static int _clk_ipu2_di1_set_parent(struct clk *clk, struct clk *parent)
2534 if (parent == &ldb_di0_clk)
2536 else if (parent == &ldb_di1_clk)
2539 reg = __raw_readl(MXC_CCM_CHSCCDR)
2540 & ~MXC_CCM_CHSCCDR_IPU2_DI1_PRE_CLK_SEL_MASK;
2542 mux = _get_mux6(parent, &mmdc_ch0_axi_clk[0],
2543 &pll3_usb_otg_main_clk, &pll5_video_main_clk,
2544 &pll2_pfd_352M, &pll2_pfd_400M, &pll3_pfd_540M);
2545 reg |= (mux << MXC_CCM_CHSCCDR_IPU2_DI1_PRE_CLK_SEL_OFFSET);
2547 __raw_writel(reg, MXC_CCM_CHSCCDR);
2549 /* Derive clock from divided pre-muxed ipu1_di0 clock.*/
2552 reg = __raw_readl(MXC_CCM_CHSCCDR)
2553 & ~MXC_CCM_CHSCCDR_IPU2_DI1_CLK_SEL_MASK;
2554 __raw_writel(reg | (mux << MXC_CCM_CHSCCDR_IPU2_DI1_CLK_SEL_OFFSET),
2560 static struct clk ipu2_di_clk[] = {
2562 __INIT_CLK_DEBUG(ipu2_di_clk_0)
2564 .parent = &pll3_pfd_540M,
2565 .enable_reg = MXC_CCM_CCGR3,
2566 .enable_shift = MXC_CCM_CCGRx_CG1_OFFSET,
2567 .enable = _clk_enable,
2568 .disable = _clk_disable,
2569 .set_parent = _clk_ipu2_di0_set_parent,
2570 .set_rate = _clk_ipu2_di0_set_rate,
2571 .round_rate = _clk_ipu_di_round_rate,
2572 .get_rate = _clk_ipu2_di0_get_rate,
2575 __INIT_CLK_DEBUG(ipu2_di_clk_1)
2577 .parent = &pll3_pfd_540M,
2578 .enable_reg = MXC_CCM_CCGR3,
2579 .enable_shift = MXC_CCM_CCGRx_CG2_OFFSET,
2580 .enable = _clk_enable,
2581 .disable = _clk_disable,
2582 .set_parent = _clk_ipu2_di1_set_parent,
2583 .set_rate = _clk_ipu2_di1_set_rate,
2584 .round_rate = _clk_ipu_di_round_rate,
2585 .get_rate = _clk_ipu2_di1_get_rate,
2589 static struct clk can2_clk[] = {
2591 __INIT_CLK_DEBUG(can2_module_clk)
2593 .parent = &pll3_sw_clk,
2594 .enable_reg = MXC_CCM_CCGR0,
2595 .enable_shift = MXC_CCM_CCGRx_CG9_OFFSET,
2596 .enable = _clk_enable,
2597 .disable = _clk_disable,
2598 .secondary = &can2_clk[1],
2601 __INIT_CLK_DEBUG(can2_serial_clk)
2603 .parent = &pll3_sw_clk,
2604 .enable_reg = MXC_CCM_CCGR0,
2605 .enable_shift = MXC_CCM_CCGRx_CG10_OFFSET,
2606 .enable = _clk_enable,
2607 .disable = _clk_disable,
2612 static struct clk can1_clk[] = {
2614 __INIT_CLK_DEBUG(can1_module_clk)
2616 .parent = &pll3_sw_clk,
2617 .enable_reg = MXC_CCM_CCGR0,
2618 .enable_shift = MXC_CCM_CCGRx_CG7_OFFSET,
2619 .enable = _clk_enable,
2620 .disable = _clk_disable,
2621 .secondary = &can1_clk[1],
2624 __INIT_CLK_DEBUG(can1_serial_clk)
2626 .parent = &pll3_sw_clk,
2627 .enable_reg = MXC_CCM_CCGR0,
2628 .enable_shift = MXC_CCM_CCGRx_CG8_OFFSET,
2629 .enable = _clk_enable,
2630 .disable = _clk_disable,
2634 static unsigned long _clk_spdif_round_rate(struct clk *clk,
2638 u32 parent_rate = clk_get_rate(clk->parent);
2639 u32 div = parent_rate / rate;
2641 if (parent_rate % rate)
2644 __calc_pre_post_dividers(div, &pre, &post);
2646 return parent_rate / (pre * post);
2649 static int _clk_spdif0_set_parent(struct clk *clk, struct clk *parent)
2653 reg = __raw_readl(MXC_CCM_CDCDR)
2654 & MXC_CCM_CDCDR_SPDIF0_CLK_SEL_MASK;
2656 mux = _get_mux6(parent, &pll4_audio_main_clk,
2657 &pll3_pfd_508M, &pll3_pfd_454M,
2658 &pll3_sw_clk, NULL, NULL);
2659 reg |= mux << MXC_CCM_CDCDR_SPDIF0_CLK_SEL_OFFSET;
2661 __raw_writel(reg, MXC_CCM_CDCDR);
2666 static unsigned long _clk_spdif0_get_rate(struct clk *clk)
2668 u32 reg, pred, podf;
2670 reg = __raw_readl(MXC_CCM_CDCDR);
2672 pred = ((reg & MXC_CCM_CDCDR_SPDIF0_CLK_PRED_MASK)
2673 >> MXC_CCM_CDCDR_SPDIF0_CLK_PRED_OFFSET) + 1;
2674 podf = ((reg & MXC_CCM_CDCDR_SPDIF0_CLK_PODF_MASK)
2675 >> MXC_CCM_CDCDR_SPDIF0_CLK_PODF_OFFSET) + 1;
2677 return clk_get_rate(clk->parent) / (pred * podf);
2680 static int _clk_spdif0_set_rate(struct clk *clk, unsigned long rate)
2682 u32 reg, div, pre, post;
2683 u32 parent_rate = clk_get_rate(clk->parent);
2685 div = parent_rate / rate;
2688 if (((parent_rate / div) != rate) || div > 512)
2691 __calc_pre_post_dividers(div, &pre, &post);
2693 reg = __raw_readl(MXC_CCM_CDCDR);
2694 reg &= ~(MXC_CCM_CDCDR_SPDIF0_CLK_PRED_MASK|
2695 MXC_CCM_CDCDR_SPDIF0_CLK_PODF_MASK);
2696 reg |= (post - 1) << MXC_CCM_CDCDR_SPDIF0_CLK_PODF_OFFSET;
2697 reg |= (pre - 1) << MXC_CCM_CDCDR_SPDIF0_CLK_PRED_OFFSET;
2699 __raw_writel(reg, MXC_CCM_CDCDR);
2704 static struct clk spdif0_clk[] = {
2706 __INIT_CLK_DEBUG(spdif0_clk_0)
2708 .parent = &pll3_sw_clk,
2709 .enable = _clk_enable,
2710 .enable_reg = MXC_CCM_CCGR5,
2711 .enable_shift = MXC_CCM_CCGRx_CG7_OFFSET,
2712 .disable = _clk_disable,
2713 .secondary = &spdif0_clk[1],
2714 .set_rate = _clk_spdif0_set_rate,
2715 .get_rate = _clk_spdif0_get_rate,
2716 .set_parent = _clk_spdif0_set_parent,
2717 .round_rate = _clk_spdif_round_rate,
2720 __INIT_CLK_DEBUG(spdif0_clk_1)
2723 .secondary = &spba_clk,
2727 static int _clk_spdif1_set_parent(struct clk *clk, struct clk *parent)
2731 reg = __raw_readl(MXC_CCM_CDCDR) & MXC_CCM_CDCDR_SPDIF1_CLK_SEL_MASK;
2733 mux = _get_mux6(parent, &pll4_audio_main_clk, &pll3_pfd_508M,
2734 &pll3_pfd_454M, &pll3_sw_clk, NULL, NULL);
2735 reg |= mux << MXC_CCM_CDCDR_SPDIF1_CLK_SEL_OFFSET;
2737 __raw_writel(reg, MXC_CCM_CDCDR);
2742 static unsigned long _clk_spdif1_get_rate(struct clk *clk)
2744 u32 reg, pred, podf;
2746 reg = __raw_readl(MXC_CCM_CDCDR);
2748 pred = ((reg & MXC_CCM_CDCDR_SPDIF1_CLK_PRED_MASK)
2749 >> MXC_CCM_CDCDR_SPDIF1_CLK_PRED_OFFSET) + 1;
2750 podf = ((reg & MXC_CCM_CDCDR_SPDIF1_CLK_PODF_MASK)
2751 >> MXC_CCM_CDCDR_SPDIF1_CLK_PODF_OFFSET) + 1;
2753 return clk_get_rate(clk->parent) / (pred * podf);
2756 static int _clk_spdif1_set_rate(struct clk *clk, unsigned long rate)
2758 u32 reg, div, pre, post;
2759 u32 parent_rate = clk_get_rate(clk->parent);
2761 div = parent_rate / rate;
2764 if (((parent_rate / div) != rate) || div > 512)
2767 __calc_pre_post_dividers(div, &pre, &post);
2769 reg = __raw_readl(MXC_CCM_CDCDR);
2770 reg &= ~(MXC_CCM_CDCDR_SPDIF1_CLK_PRED_MASK|
2771 MXC_CCM_CDCDR_SPDIF1_CLK_PODF_MASK);
2772 reg |= (post - 1) << MXC_CCM_CDCDR_SPDIF1_CLK_PODF_OFFSET;
2773 reg |= (pre - 1) << MXC_CCM_CDCDR_SPDIF1_CLK_PRED_OFFSET;
2775 __raw_writel(reg, MXC_CCM_CDCDR);
2780 static struct clk spdif1_clk[] = {
2782 __INIT_CLK_DEBUG(spdif1_clk_0)
2784 .parent = &pll3_sw_clk,
2785 .enable = _clk_enable,
2786 .enable_reg = MXC_CCM_CCGR5,
2787 .enable_shift = MXC_CCM_CCGRx_CG7_OFFSET,
2788 .disable = _clk_disable,
2789 .secondary = &spdif1_clk[1],
2790 .set_rate = _clk_spdif1_set_rate,
2791 .get_rate = _clk_spdif1_get_rate,
2792 .set_parent = _clk_spdif1_set_parent,
2793 .round_rate = _clk_spdif_round_rate,
2796 __INIT_CLK_DEBUG(spdif1_clk_1)
2799 .secondary = &spba_clk,
2803 static unsigned long _clk_esai_round_rate(struct clk *clk,
2807 u32 parent_rate = clk_get_rate(clk->parent);
2808 u32 div = parent_rate / rate;
2810 if (parent_rate % rate)
2813 __calc_pre_post_dividers(div, &pre, &post);
2815 return parent_rate / (pre * post);
2818 static int _clk_esai_set_parent(struct clk *clk, struct clk *parent)
2822 reg = __raw_readl(MXC_CCM_CSCMR2) & MXC_CCM_CSCMR2_ESAI_CLK_SEL_MASK;
2824 mux = _get_mux6(parent, &pll4_audio_main_clk, &pll3_pfd_508M,
2825 &pll3_pfd_454M, &pll3_sw_clk, NULL, NULL);
2826 reg |= mux << MXC_CCM_CSCMR2_ESAI_CLK_SEL_OFFSET;
2828 __raw_writel(reg, MXC_CCM_CSCMR2);
2833 static unsigned long _clk_esai_get_rate(struct clk *clk)
2835 u32 reg, pred, podf;
2837 reg = __raw_readl(MXC_CCM_CS1CDR);
2839 pred = ((reg & MXC_CCM_CS1CDR_ESAI_CLK_PRED_MASK)
2840 >> MXC_CCM_CS1CDR_ESAI_CLK_PRED_OFFSET) + 1;
2841 podf = ((reg & MXC_CCM_CS1CDR_ESAI_CLK_PODF_MASK)
2842 >> MXC_CCM_CS1CDR_ESAI_CLK_PODF_OFFSET) + 1;
2844 return clk_get_rate(clk->parent) / (pred * podf);
2847 static int _clk_esai_set_rate(struct clk *clk, unsigned long rate)
2849 u32 reg, div, pre, post;
2850 u32 parent_rate = clk_get_rate(clk->parent);
2852 div = parent_rate / rate;
2855 if (((parent_rate / div) != rate) || div > 512)
2858 __calc_pre_post_dividers(div, &pre, &post);
2860 reg = __raw_readl(MXC_CCM_CS1CDR);
2861 reg &= ~(MXC_CCM_CS1CDR_ESAI_CLK_PRED_MASK|
2862 MXC_CCM_CS1CDR_ESAI_CLK_PODF_MASK);
2863 reg |= (post - 1) << MXC_CCM_CS1CDR_ESAI_CLK_PODF_OFFSET;
2864 reg |= (pre - 1) << MXC_CCM_CS1CDR_ESAI_CLK_PRED_OFFSET;
2866 __raw_writel(reg, MXC_CCM_CS1CDR);
2871 static struct clk esai_clk = {
2872 __INIT_CLK_DEBUG(esai_clk)
2874 .parent = &pll3_sw_clk,
2875 .enable_reg = MXC_CCM_CCGR1,
2876 .enable_shift = MXC_CCM_CCGRx_CG8_OFFSET,
2877 .enable = _clk_enable,
2878 .disable = _clk_disable,
2879 .set_rate = _clk_esai_set_rate,
2880 .get_rate = _clk_esai_get_rate,
2881 .set_parent = _clk_esai_set_parent,
2882 .round_rate = _clk_esai_round_rate,
2885 static int _clk_enet_enable(struct clk *clk)
2889 /* Enable ENET ref clock */
2890 reg = __raw_readl(PLL8_ENET_BASE_ADDR);
2891 reg &= ~ANADIG_PLL_BYPASS;
2892 reg &= ~ANADIG_PLL_ENABLE;
2893 __raw_writel(reg, PLL8_ENET_BASE_ADDR);
2899 static void _clk_enet_disable(struct clk *clk)
2905 /* Enable ENET ref clock */
2906 reg = __raw_readl(PLL8_ENET_BASE_ADDR);
2907 reg |= ANADIG_PLL_BYPASS;
2908 reg |= ANADIG_PLL_ENABLE;
2909 __raw_writel(reg, PLL8_ENET_BASE_ADDR);
2912 static int _clk_enet_set_rate(struct clk *clk, unsigned long rate)
2914 unsigned int reg, div = 1;
2932 reg = __raw_readl(PLL8_ENET_BASE_ADDR);
2933 reg &= ~ANADIG_PLL_ENET_DIV_SELECT_MASK;
2934 reg |= (div << ANADIG_PLL_ENET_DIV_SELECT_OFFSET);
2935 __raw_writel(reg, PLL8_ENET_BASE_ADDR);
2940 static unsigned long _clk_enet_get_rate(struct clk *clk)
2944 div = (__raw_readl(PLL8_ENET_BASE_ADDR))
2945 & ANADIG_PLL_ENET_DIV_SELECT_MASK;
2947 return 500000000 / (div + 1);
2950 static struct clk enet_clk = {
2951 __INIT_CLK_DEBUG(enet_clk)
2953 .parent = &pll8_enet_main_clk,
2954 .enable_reg = MXC_CCM_CCGR1,
2955 .enable_shift = MXC_CCM_CCGRx_CG10_OFFSET,
2956 .enable = _clk_enet_enable,
2957 .disable = _clk_enet_disable,
2958 .set_rate = _clk_enet_set_rate,
2959 .get_rate = _clk_enet_get_rate,
2962 static struct clk ecspi_clk[] = {
2964 __INIT_CLK_DEBUG(ecspi0_clk)
2966 .parent = &pll3_60M,
2967 .enable_reg = MXC_CCM_CCGR1,
2968 .enable_shift = MXC_CCM_CCGRx_CG0_OFFSET,
2969 .enable = _clk_enable,
2970 .disable = _clk_disable,
2973 __INIT_CLK_DEBUG(ecspi1_clk)
2975 .parent = &pll3_60M,
2976 .enable_reg = MXC_CCM_CCGR1,
2977 .enable_shift = MXC_CCM_CCGRx_CG1_OFFSET,
2978 .enable = _clk_enable,
2979 .disable = _clk_disable,
2982 __INIT_CLK_DEBUG(ecspi2_clk)
2984 .parent = &pll3_60M,
2985 .enable_reg = MXC_CCM_CCGR1,
2986 .enable_shift = MXC_CCM_CCGRx_CG2_OFFSET,
2987 .enable = _clk_enable,
2988 .disable = _clk_disable,
2991 __INIT_CLK_DEBUG(ecspi3_clk)
2993 .parent = &pll3_60M,
2994 .enable_reg = MXC_CCM_CCGR1,
2995 .enable_shift = MXC_CCM_CCGRx_CG3_OFFSET,
2996 .enable = _clk_enable,
2997 .disable = _clk_disable,
3000 __INIT_CLK_DEBUG(ecspi4_clk)
3002 .parent = &pll3_60M,
3003 .enable_reg = MXC_CCM_CCGR1,
3004 .enable_shift = MXC_CCM_CCGRx_CG4_OFFSET,
3005 .enable = _clk_enable,
3006 .disable = _clk_disable,
3010 static unsigned long _clk_emi_slow_round_rate(struct clk *clk,
3014 u32 parent_rate = clk_get_rate(clk->parent);
3016 div = parent_rate / rate;
3018 /* Make sure rate is not greater than the maximum value for the clock.
3019 * Also prevent a div of 0.
3027 return parent_rate / div;
3030 static int _clk_emi_slow_set_parent(struct clk *clk, struct clk *parent)
3033 u32 reg = __raw_readl(MXC_CCM_CSCMR1)
3034 & MXC_CCM_CSCMR1_ACLK_EMI_SLOW_MASK;
3036 mux = _get_mux6(parent, &axi_clk, &pll3_usb_otg_main_clk,
3037 &pll2_pfd_400M, &pll2_pfd_352M, NULL, NULL);
3038 reg |= (mux << MXC_CCM_CSCMR1_ACLK_EMI_SLOW_OFFSET);
3039 __raw_writel(reg, MXC_CCM_CSCMR1);
3044 static unsigned long _clk_emi_slow_get_rate(struct clk *clk)
3048 reg = __raw_readl(MXC_CCM_CSCMR1);
3049 div = ((reg & MXC_CCM_CSCMR1_ACLK_EMI_SLOW_PODF_MASK) >>
3050 MXC_CCM_CSCMR1_ACLK_EMI_SLOW_PODF_OFFSET) + 1;
3052 return clk_get_rate(clk->parent) / div;
3055 static int _clk_emi_slow_set_rate(struct clk *clk, unsigned long rate)
3058 u32 parent_rate = clk_get_rate(clk->parent);
3060 div = parent_rate / rate;
3063 if (((parent_rate / div) != rate) || (div > 8))
3066 reg = __raw_readl(MXC_CCM_CSCMR1);
3067 reg &= ~MXC_CCM_CSCMR1_ACLK_EMI_SLOW_PODF_MASK;
3068 reg |= (div - 1) << MXC_CCM_CSCMR1_ACLK_EMI_SLOW_PODF_OFFSET;
3069 __raw_writel(reg, MXC_CCM_CSCMR1);
3074 static struct clk emi_slow_clk = {
3075 __INIT_CLK_DEBUG(emi_slow_clk)
3078 .enable_reg = MXC_CCM_CCGR6,
3079 .enable_shift = MXC_CCM_CCGRx_CG5_OFFSET,
3080 .enable = _clk_enable,
3081 .disable = _clk_disable,
3082 .set_rate = _clk_emi_slow_set_rate,
3083 .get_rate = _clk_emi_slow_get_rate,
3084 .round_rate = _clk_emi_slow_round_rate,
3085 .set_parent = _clk_emi_slow_set_parent,
3088 static unsigned long _clk_emi_round_rate(struct clk *clk,
3092 u32 parent_rate = clk_get_rate(clk->parent);
3094 div = parent_rate / rate;
3096 /* Make sure rate is not greater than the maximum value for the clock.
3097 * Also prevent a div of 0.
3105 return parent_rate / div;
3108 static int _clk_emi_set_parent(struct clk *clk, struct clk *parent)
3111 u32 reg = __raw_readl(MXC_CCM_CSCMR1) & MXC_CCM_CSCMR1_ACLK_EMI_MASK;
3113 mux = _get_mux6(parent, &axi_clk, &pll3_usb_otg_main_clk,
3114 &pll2_pfd_400M, &pll2_pfd_352M, NULL, NULL);
3115 reg |= (mux << MXC_CCM_CSCMR1_ACLK_EMI_OFFSET);
3116 __raw_writel(reg, MXC_CCM_CSCMR1);
3121 static unsigned long _clk_emi_get_rate(struct clk *clk)
3125 reg = __raw_readl(MXC_CCM_CSCMR1);
3126 div = ((reg & MXC_CCM_CSCMR1_ACLK_EMI_PODF_MASK) >>
3127 MXC_CCM_CSCMR1_ACLK_EMI_PODF_OFFSET) + 1;
3129 return clk_get_rate(clk->parent) / div;
3132 static int _clk_emi_set_rate(struct clk *clk, unsigned long rate)
3135 u32 parent_rate = clk_get_rate(clk->parent);
3137 div = parent_rate / rate;
3140 if (((parent_rate / div) != rate) || (div > 8))
3143 reg = __raw_readl(MXC_CCM_CSCMR1);
3144 reg &= ~MXC_CCM_CSCMR1_ACLK_EMI_PODF_MASK;
3145 reg |= (div - 1) << MXC_CCM_CSCMR1_ACLK_EMI_PODF_OFFSET;
3146 __raw_writel(reg, MXC_CCM_CSCMR1);
3151 static struct clk emi_clk = {
3152 __INIT_CLK_DEBUG(emi_clk)
3155 .set_rate = _clk_emi_set_rate,
3156 .get_rate = _clk_emi_get_rate,
3157 .round_rate = _clk_emi_round_rate,
3158 .set_parent = _clk_emi_set_parent,
3161 static unsigned long _clk_enfc_round_rate(struct clk *clk,
3165 u32 parent_rate = clk_get_rate(clk->parent);
3166 u32 div = parent_rate / rate;
3168 if (parent_rate % rate)
3171 __calc_pre_post_dividers(div, &pre, &post);
3173 return parent_rate / (pre * post);
3176 static int _clk_enfc_set_parent(struct clk *clk, struct clk *parent)
3180 reg = __raw_readl(MXC_CCM_CS2CDR)
3181 & MXC_CCM_CS2CDR_ENFC_CLK_SEL_MASK;
3183 mux = _get_mux6(parent, &pll2_pfd_352M,
3184 &pll2_528_bus_main_clk, &pll3_usb_otg_main_clk,
3185 &pll2_pfd_400M, NULL, NULL);
3186 reg |= mux << MXC_CCM_CS2CDR_ENFC_CLK_SEL_OFFSET;
3188 __raw_writel(reg, MXC_CCM_CS2CDR);
3193 static unsigned long _clk_enfc_get_rate(struct clk *clk)
3195 u32 reg, pred, podf;
3197 reg = __raw_readl(MXC_CCM_CS2CDR);
3199 pred = ((reg & MXC_CCM_CS2CDR_ENFC_CLK_PRED_MASK)
3200 >> MXC_CCM_CS2CDR_ENFC_CLK_PRED_OFFSET) + 1;
3201 podf = ((reg & MXC_CCM_CS2CDR_ENFC_CLK_PODF_MASK)
3202 >> MXC_CCM_CS2CDR_ENFC_CLK_PODF_OFFSET) + 1;
3204 return clk_get_rate(clk->parent) / (pred * podf);
3207 static int _clk_enfc_set_rate(struct clk *clk, unsigned long rate)
3209 u32 reg, div, pre, post;
3210 u32 parent_rate = clk_get_rate(clk->parent);
3212 div = parent_rate / rate;
3215 if (((parent_rate / div) != rate) || div > 512)
3218 __calc_pre_post_dividers(div, &pre, &post);
3220 reg = __raw_readl(MXC_CCM_CS2CDR);
3221 reg &= ~(MXC_CCM_CS2CDR_ENFC_CLK_PRED_MASK|
3222 MXC_CCM_CS2CDR_ENFC_CLK_PODF_MASK);
3223 reg |= (post - 1) << MXC_CCM_CS2CDR_ENFC_CLK_PODF_OFFSET;
3224 reg |= (pre - 1) << MXC_CCM_CS2CDR_ENFC_CLK_PRED_OFFSET;
3226 __raw_writel(reg, MXC_CCM_CS2CDR);
3231 static struct clk enfc_clk = {
3232 __INIT_CLK_DEBUG(enfc_clk)
3234 .parent = &pll2_pfd_352M,
3235 .enable_reg = MXC_CCM_CCGR2,
3236 .enable_shift = MXC_CCM_CCGRx_CG7_OFFSET,
3237 .enable = _clk_enable,
3238 .disable = _clk_disable,
3239 .set_rate = _clk_enfc_set_rate,
3240 .get_rate = _clk_enfc_get_rate,
3241 .round_rate = _clk_enfc_round_rate,
3242 .set_parent = _clk_enfc_set_parent,
3245 static unsigned long _clk_uart_round_rate(struct clk *clk,
3249 u32 parent_rate = clk_get_rate(clk->parent);
3251 div = parent_rate / rate;
3253 /* Make sure rate is not greater than the maximum value for the clock.
3254 * Also prevent a div of 0.
3262 return parent_rate / div;
3265 static int _clk_uart_set_rate(struct clk *clk, unsigned long rate)
3268 u32 parent_rate = clk_get_rate(clk->parent);
3270 div = parent_rate / rate;
3273 if (((parent_rate / div) != rate) || (div > 64))
3276 reg = __raw_readl(MXC_CCM_CSCDR1) & MXC_CCM_CSCDR1_UART_CLK_PODF_MASK;
3277 reg |= ((div - 1) << MXC_CCM_CSCDR1_UART_CLK_PODF_OFFSET);
3279 __raw_writel(reg, MXC_CCM_CSCDR1);
3284 static unsigned long _clk_uart_get_rate(struct clk *clk)
3289 reg = __raw_readl(MXC_CCM_CSCDR1) & MXC_CCM_CSCDR1_UART_CLK_PODF_MASK;
3290 div = (reg >> MXC_CCM_CSCDR1_UART_CLK_PODF_OFFSET) + 1;
3291 val = clk_get_rate(clk->parent) / div;
3296 static struct clk uart_clk[] = {
3298 __INIT_CLK_DEBUG(uart_clk)
3300 .parent = &pll3_80M,
3301 .enable_reg = MXC_CCM_CCGR5,
3302 .enable_shift = MXC_CCM_CCGRx_CG12_OFFSET,
3303 .enable = _clk_enable,
3304 .disable = _clk_disable,
3305 .secondary = &uart_clk[1],
3306 .set_rate = _clk_uart_set_rate,
3307 .get_rate = _clk_uart_get_rate,
3308 .round_rate = _clk_uart_round_rate,
3311 __INIT_CLK_DEBUG(uart_serial_clk)
3313 .enable_reg = MXC_CCM_CCGR5,
3314 .enable_shift = MXC_CCM_CCGRx_CG13_OFFSET,
3315 .enable = _clk_enable,
3316 .disable = _clk_disable,
3320 static unsigned long _clk_hsi_tx_round_rate(struct clk *clk,
3324 u32 parent_rate = clk_get_rate(clk->parent);
3326 div = parent_rate / rate;
3328 /* Make sure rate is not greater than the maximum value for the clock.
3329 * Also prevent a div of 0.
3337 return parent_rate / div;
3340 static int _clk_hsi_tx_set_parent(struct clk *clk, struct clk *parent)
3342 u32 reg = __raw_readl(MXC_CCM_CDCDR) & MXC_CCM_CDCDR_HSI_TX_CLK_SEL;
3344 if (parent == &pll2_pfd_400M)
3345 reg |= (MXC_CCM_CDCDR_HSI_TX_CLK_SEL);
3347 __raw_writel(reg, MXC_CCM_CDCDR);
3352 static unsigned long _clk_hsi_tx_get_rate(struct clk *clk)
3356 reg = __raw_readl(MXC_CCM_CDCDR);
3357 div = ((reg & MXC_CCM_CDCDR_HSI_TX_PODF_MASK) >>
3358 MXC_CCM_CDCDR_HSI_TX_PODF_OFFSET) + 1;
3360 return clk_get_rate(clk->parent) / div;
3363 static int _clk_hsi_tx_set_rate(struct clk *clk, unsigned long rate)
3366 u32 parent_rate = clk_get_rate(clk->parent);
3368 div = parent_rate / rate;
3371 if (((parent_rate / div) != rate) || (div > 8))
3374 reg = __raw_readl(MXC_CCM_CDCDR);
3375 reg &= ~MXC_CCM_CDCDR_HSI_TX_PODF_MASK;
3376 reg |= (div - 1) << MXC_CCM_CDCDR_HSI_TX_PODF_OFFSET;
3377 __raw_writel(reg, MXC_CCM_CDCDR);
3382 static struct clk hsi_tx_clk = {
3383 __INIT_CLK_DEBUG(hsi_tx_clk)
3385 .parent = &pll2_pfd_400M,
3386 .enable_reg = MXC_CCM_CCGR3,
3387 .enable_shift = MXC_CCM_CCGRx_CG8_OFFSET,
3388 .enable = _clk_enable,
3389 .disable = _clk_disable,
3390 .set_parent = _clk_hsi_tx_set_parent,
3391 .round_rate = _clk_hsi_tx_round_rate,
3392 .set_rate = _clk_hsi_tx_set_rate,
3393 .get_rate = _clk_hsi_tx_get_rate,
3396 static struct clk video_27M_clk = {
3397 __INIT_CLK_DEBUG(video_27M_clk)
3399 .parent = &pll2_pfd_400M,
3400 .enable_reg = MXC_CCM_CCGR2,
3401 .enable_shift = MXC_CCM_CCGRx_CG2_OFFSET,
3402 .enable = _clk_enable,
3403 .disable = _clk_disable,
3406 static struct clk caam_clk[] = {
3408 __INIT_CLK_DEBUG(caam_mem_clk)
3410 .enable_reg = MXC_CCM_CCGR0,
3411 .enable_shift = MXC_CCM_CCGRx_CG4_OFFSET,
3412 .enable = _clk_enable,
3413 .disable = _clk_disable,
3414 .secondary = &caam_clk[1],
3417 __INIT_CLK_DEBUG(caam_aclk_clk)
3419 .enable_reg = MXC_CCM_CCGR0,
3420 .enable_shift = MXC_CCM_CCGRx_CG5_OFFSET,
3421 .enable = _clk_enable,
3422 .disable = _clk_disable,
3425 __INIT_CLK_DEBUG(caam_ipg_clk)
3427 .enable_reg = MXC_CCM_CCGR0,
3428 .enable_shift = MXC_CCM_CCGRx_CG4_OFFSET,
3429 .enable = _clk_enable,
3430 .disable = _clk_disable,
3434 static struct clk asrc_clk = {
3435 __INIT_CLK_DEBUG(asrc_clk)
3437 .parent = &pll4_audio_main_clk,
3438 .enable_reg = MXC_CCM_CCGR0,
3439 .enable_shift = MXC_CCM_CCGRx_CG3_OFFSET,
3440 .enable = _clk_enable,
3441 .disable = _clk_disable,
3444 static struct clk apbh_dma_clk = {
3445 __INIT_CLK_DEBUG(apbh_dma_clk)
3447 .enable = _clk_enable,
3448 .disable = _clk_disable_inwait,
3449 .enable_reg = MXC_CCM_CCGR0,
3450 .enable_shift = MXC_CCM_CCGRx_CG2_OFFSET,
3453 static struct clk aips_tz2_clk = {
3454 __INIT_CLK_DEBUG(aips_tz2_clk)
3456 .enable_reg = MXC_CCM_CCGR0,
3457 .enable_shift = MXC_CCM_CCGRx_CG1_OFFSET,
3458 .enable = _clk_enable,
3459 .disable = _clk_disable_inwait,
3462 static struct clk aips_tz1_clk = {
3463 __INIT_CLK_DEBUG(aips_tz1_clk)
3465 .enable_reg = MXC_CCM_CCGR0,
3466 .enable_shift = MXC_CCM_CCGRx_CG0_OFFSET,
3467 .enable = _clk_enable,
3468 .disable = _clk_disable_inwait,
3472 static struct clk openvg_axi_clk = {
3473 __INIT_CLK_DEBUG(openvg_axi_clk)
3474 .enable = _clk_enable,
3475 .enable_reg = MXC_CCM_CCGR3,
3476 .enable_shift = MXC_CCM_CCGRx_CG15_OFFSET,
3477 .disable = _clk_disable,
3480 static unsigned long _clk_gpu3d_core_round_rate(struct clk *clk,
3484 u32 parent_rate = clk_get_rate(clk->parent);
3486 div = parent_rate / rate;
3488 /* Make sure rate is not greater than the maximum value for the clock.
3489 * Also prevent a div of 0.
3497 return parent_rate / div;
3500 static int _clk_gpu3d_core_set_parent(struct clk *clk, struct clk *parent)
3503 u32 reg = __raw_readl(MXC_CCM_CBCMR)
3504 & MXC_CCM_CBCMR_GPU3D_CORE_CLK_SEL_MASK;
3506 mux = _get_mux6(parent, &mmdc_ch0_axi_clk[0],
3507 &pll3_usb_otg_main_clk,
3508 &pll2_pfd_594M, &pll2_pfd_400M, NULL, NULL);
3509 reg |= (mux << MXC_CCM_CBCMR_GPU3D_CORE_CLK_SEL_OFFSET);
3510 __raw_writel(reg, MXC_CCM_CBCMR);
3515 static unsigned long _clk_gpu3d_core_get_rate(struct clk *clk)
3519 reg = __raw_readl(MXC_CCM_CBCMR);
3520 div = ((reg & MXC_CCM_CBCMR_GPU3D_CORE_PODF_MASK) >>
3521 MXC_CCM_CBCMR_GPU3D_CORE_PODF_OFFSET) + 1;
3523 return clk_get_rate(clk->parent) / div;
3526 static int _clk_gpu3d_core_set_rate(struct clk *clk, unsigned long rate)
3529 u32 parent_rate = clk_get_rate(clk->parent);
3531 div = parent_rate / rate;
3534 if (((parent_rate / div) != rate) || (div > 8))
3537 reg = __raw_readl(MXC_CCM_CBCMR);
3538 reg &= ~MXC_CCM_CBCMR_GPU3D_CORE_PODF_MASK;
3539 reg |= (div - 1) << MXC_CCM_CBCMR_GPU3D_CORE_PODF_OFFSET;
3540 __raw_writel(reg, MXC_CCM_CBCMR);
3545 static struct clk gpu3d_core_clk = {
3546 __INIT_CLK_DEBUG(gpu3d_core_clk)
3547 .parent = &pll2_pfd_594M,
3548 .enable = _clk_enable,
3549 .enable_reg = MXC_CCM_CCGR1,
3550 .enable_shift = MXC_CCM_CCGRx_CG13_OFFSET,
3551 .disable = _clk_disable,
3552 .set_parent = _clk_gpu3d_core_set_parent,
3553 .set_rate = _clk_gpu3d_core_set_rate,
3554 .get_rate = _clk_gpu3d_core_get_rate,
3555 .round_rate = _clk_gpu3d_core_round_rate,
3558 static unsigned long _clk_gpu2d_core_round_rate(struct clk *clk,
3562 u32 parent_rate = clk_get_rate(clk->parent);
3564 div = parent_rate / rate;
3566 /* Make sure rate is not greater than the maximum value for the clock.
3567 * Also prevent a div of 0.
3575 return parent_rate / div;
3578 static int _clk_gpu2d_core_set_parent(struct clk *clk, struct clk *parent)
3581 u32 reg = __raw_readl(MXC_CCM_CBCMR) & MXC_CCM_CBCMR_GPU2D_CLK_SEL_MASK;
3583 mux = _get_mux6(parent, &axi_clk, &pll3_usb_otg_main_clk,
3584 &pll2_pfd_352M, &pll2_pfd_400M, NULL, NULL);
3585 reg |= (mux << MXC_CCM_CBCMR_GPU2D_CLK_SEL_OFFSET);
3586 __raw_writel(reg, MXC_CCM_CBCMR);
3591 static unsigned long _clk_gpu2d_core_get_rate(struct clk *clk)
3595 reg = __raw_readl(MXC_CCM_CBCMR);
3596 div = ((reg & MXC_CCM_CBCMR_GPU2D_CORE_PODF_MASK) >>
3597 MXC_CCM_CBCMR_GPU2D_CORE_PODF_OFFSET) + 1;
3599 return clk_get_rate(clk->parent) / div;
3602 static int _clk_gpu2d_core_set_rate(struct clk *clk, unsigned long rate)
3605 u32 parent_rate = clk_get_rate(clk->parent);
3607 div = parent_rate / rate;
3610 if (((parent_rate / div) != rate) || (div > 8))
3613 reg = __raw_readl(MXC_CCM_CBCMR);
3614 reg &= ~MXC_CCM_CBCMR_GPU3D_CORE_PODF_MASK;
3615 reg |= (div - 1) << MXC_CCM_CBCMR_GPU3D_CORE_PODF_OFFSET;
3616 __raw_writel(reg, MXC_CCM_CBCMR);
3620 static struct clk gpu2d_core_clk = {
3621 __INIT_CLK_DEBUG(gpu2d_core_clk)
3622 .parent = &pll2_pfd_352M,
3623 .enable = _clk_enable,
3624 .enable_reg = MXC_CCM_CCGR1,
3625 .enable_shift = MXC_CCM_CCGRx_CG12_OFFSET,
3626 .disable = _clk_disable,
3627 .set_parent = _clk_gpu2d_core_set_parent,
3628 .set_rate = _clk_gpu2d_core_set_rate,
3629 .get_rate = _clk_gpu2d_core_get_rate,
3630 .round_rate = _clk_gpu2d_core_round_rate,
3633 static unsigned long _clk_gpu3d_shader_round_rate(struct clk *clk,
3637 u32 parent_rate = clk_get_rate(clk->parent);
3639 div = parent_rate / rate;
3641 /* Make sure rate is not greater than the maximum value for the clock.
3642 * Also prevent a div of 0.
3650 return parent_rate / div;
3653 static int _clk_gpu3d_shader_set_parent(struct clk *clk, struct clk *parent)
3656 u32 reg = __raw_readl(MXC_CCM_CBCMR)
3657 & MXC_CCM_CBCMR_GPU3D_SHADER_CLK_SEL_MASK;
3659 mux = _get_mux6(parent, &mmdc_ch0_axi_clk[0],
3660 &pll3_usb_otg_main_clk,
3661 &pll2_pfd_594M, &pll3_pfd_720M, NULL, NULL);
3662 reg |= (mux << MXC_CCM_CBCMR_GPU3D_SHADER_CLK_SEL_OFFSET);
3663 __raw_writel(reg, MXC_CCM_CBCMR);
3668 static unsigned long _clk_gpu3d_shader_get_rate(struct clk *clk)
3672 reg = __raw_readl(MXC_CCM_CBCMR);
3673 div = ((reg & MXC_CCM_CBCMR_GPU3D_SHADER_PODF_MASK) >>
3674 MXC_CCM_CBCMR_GPU3D_SHADER_PODF_OFFSET) + 1;
3676 return clk_get_rate(clk->parent) / div;
3679 static int _clk_gpu3d_shader_set_rate(struct clk *clk, unsigned long rate)
3682 u32 parent_rate = clk_get_rate(clk->parent);
3684 div = parent_rate / rate;
3687 if (((parent_rate / div) != rate) || (div > 8))
3690 reg = __raw_readl(MXC_CCM_CBCMR);
3691 reg &= ~MXC_CCM_CBCMR_GPU3D_SHADER_PODF_MASK;
3692 reg |= (div - 1) << MXC_CCM_CBCMR_GPU3D_SHADER_PODF_OFFSET;
3693 __raw_writel(reg, MXC_CCM_CBCMR);
3699 static struct clk gpu3d_shader_clk = {
3700 __INIT_CLK_DEBUG(gpu3d_shader_clk)
3701 .parent = &pll3_pfd_720M,
3702 .enable = _clk_enable,
3703 .enable_reg = MXC_CCM_CCGR1,
3704 .enable_shift = MXC_CCM_CCGRx_CG13_OFFSET,
3705 .disable = _clk_disable,
3706 .set_parent = _clk_gpu3d_shader_set_parent,
3707 .set_rate = _clk_gpu3d_shader_set_rate,
3708 .get_rate = _clk_gpu3d_shader_get_rate,
3709 .round_rate = _clk_gpu3d_shader_round_rate,
3712 static struct clk gpmi_nfc_clk[] = {
3714 __INIT_CLK_DEBUG(gpmi_io_clk)
3716 .secondary = &gpmi_nfc_clk[1],
3717 .enable = _clk_enable,
3718 .enable_reg = MXC_CCM_CCGR4,
3719 .enable_shift = MXC_CCM_CCGRx_CG14_OFFSET,
3720 .disable = _clk_disable,
3722 { /* gpmi_apb_clk */
3723 __INIT_CLK_DEBUG(gpmi_apb_clk)
3724 .parent = &apbh_dma_clk,
3725 .secondary = &gpmi_nfc_clk[2],
3726 .enable = _clk_enable,
3727 .enable_reg = MXC_CCM_CCGR4,
3728 .enable_shift = MXC_CCM_CCGRx_CG15_OFFSET,
3729 .disable = _clk_disable,
3732 __INIT_CLK_DEBUG(gpmi_bch_clk)
3734 .secondary = &gpmi_nfc_clk[3],
3735 .enable = _clk_enable,
3736 .enable_reg = MXC_CCM_CCGR4,
3737 .enable_shift = MXC_CCM_CCGRx_CG13_OFFSET,
3738 .disable = _clk_disable,
3741 __INIT_CLK_DEBUG(gpmi_bch_apb_clk)
3742 .parent = &apbh_dma_clk,
3743 .enable = _clk_enable,
3744 .enable_reg = MXC_CCM_CCGR4,
3745 .enable_shift = MXC_CCM_CCGRx_CG12_OFFSET,
3746 .disable = _clk_disable,
3750 static struct clk pwm_clk[] = {
3752 __INIT_CLK_DEBUG(pwm_clk_0)
3753 .parent = &ipg_perclk,
3755 .enable_reg = MXC_CCM_CCGR4,
3756 .enable_shift = MXC_CCM_CCGRx_CG8_OFFSET,
3757 .enable = _clk_enable,
3758 .disable = _clk_disable,
3761 __INIT_CLK_DEBUG(pwm_clk_1)
3762 .parent = &ipg_perclk,
3764 .enable_reg = MXC_CCM_CCGR4,
3765 .enable_shift = MXC_CCM_CCGRx_CG9_OFFSET,
3766 .enable = _clk_enable,
3767 .disable = _clk_disable,
3770 __INIT_CLK_DEBUG(pwm_clk_2)
3771 .parent = &ipg_perclk,
3773 .enable_reg = MXC_CCM_CCGR4,
3774 .enable_shift = MXC_CCM_CCGRx_CG10_OFFSET,
3775 .enable = _clk_enable,
3776 .disable = _clk_disable,
3779 __INIT_CLK_DEBUG(pwm_clk_3)
3780 .parent = &ipg_perclk,
3782 .enable_reg = MXC_CCM_CCGR4,
3783 .enable_shift = MXC_CCM_CCGRx_CG11_OFFSET,
3784 .enable = _clk_enable,
3785 .disable = _clk_disable,
3789 static int _clk_pcie_enable(struct clk *clk)
3793 /* Enable SATA ref clock */
3794 reg = __raw_readl(PLL8_ENET_BASE_ADDR);
3795 reg |= ANADIG_PLL_ENET_EN_PCIE;
3796 __raw_writel(reg, PLL8_ENET_BASE_ADDR);
3803 static void _clk_pcie_disable(struct clk *clk)
3809 /* Disable SATA ref clock */
3810 reg = __raw_readl(PLL8_ENET_BASE_ADDR);
3811 reg &= ~ANADIG_PLL_ENET_EN_PCIE;
3812 __raw_writel(reg, PLL8_ENET_BASE_ADDR);
3815 static struct clk pcie_clk = {
3816 __INIT_CLK_DEBUG(pcie_clk)
3817 .parent = &pcie_axi_clk,
3818 .enable = _clk_pcie_enable,
3819 .disable = _clk_pcie_disable,
3820 .enable_reg = MXC_CCM_CCGR4,
3821 .enable_shift = MXC_CCM_CCGRx_CG0_OFFSET,
3824 static int _clk_sata_enable(struct clk *clk)
3827 unsigned int cyclecount;
3829 /* Clear Power Down and Enable PLLs */
3830 reg = __raw_readl(PLL8_ENET_BASE_ADDR);
3831 reg &= ~ANADIG_PLL_ENET_POWER_DOWN;
3832 __raw_writel(reg, PLL8_ENET_BASE_ADDR);
3834 reg = __raw_readl(PLL8_ENET_BASE_ADDR);
3835 reg |= ANADIG_PLL_ENET_EN;
3836 __raw_writel(reg, PLL8_ENET_BASE_ADDR);
3838 /* Waiting for the PLL is locked */
3839 if (!WAIT(ANADIG_PLL_ENET_LOCK & __raw_readl(PLL8_ENET_BASE_ADDR),
3841 panic("pll8 lock failed\n");
3843 /* Disable the bypass */
3844 reg = __raw_readl(PLL8_ENET_BASE_ADDR);
3845 reg &= ~ANADIG_PLL_ENET_BYPASS;
3846 __raw_writel(reg, PLL8_ENET_BASE_ADDR);
3848 /* Enable SATA ref clock */
3849 reg = __raw_readl(PLL8_ENET_BASE_ADDR);
3850 reg |= ANADIG_PLL_ENET_EN_SATA;
3851 __raw_writel(reg, PLL8_ENET_BASE_ADDR);
3858 static void _clk_sata_disable(struct clk *clk)
3864 /* Disable SATA ref clock */
3865 reg = __raw_readl(PLL8_ENET_BASE_ADDR);
3866 reg &= ~ANADIG_PLL_ENET_EN_SATA;
3867 __raw_writel(reg, PLL8_ENET_BASE_ADDR);
3870 static struct clk sata_clk = {
3871 __INIT_CLK_DEBUG(sata_clk)
3873 .enable = _clk_sata_enable,
3874 .enable_reg = MXC_CCM_CCGR5,
3875 .enable_shift = MXC_CCM_CCGRx_CG2_OFFSET,
3876 .disable = _clk_sata_disable,
3879 static struct clk usboh3_clk = {
3880 __INIT_CLK_DEBUG(usboh3_clk)
3882 .enable = _clk_enable,
3883 .enable_reg = MXC_CCM_CCGR6,
3884 .enable_shift = MXC_CCM_CCGRx_CG0_OFFSET,
3885 .disable = _clk_disable,
3888 #define _REGISTER_CLOCK(d, n, c) \
3896 static struct clk_lookup lookups[] = {
3897 _REGISTER_CLOCK(NULL, "osc", osc_clk),
3898 _REGISTER_CLOCK(NULL, "ckih", ckih_clk),
3899 _REGISTER_CLOCK(NULL, "ckih2", ckih2_clk),
3900 _REGISTER_CLOCK(NULL, "ckil", ckil_clk),
3901 _REGISTER_CLOCK(NULL, "pll1_main_clk", pll1_sys_main_clk),
3902 _REGISTER_CLOCK(NULL, "pll1_sw_clk", pll1_sw_clk),
3903 _REGISTER_CLOCK(NULL, "pll2", pll2_528_bus_main_clk),
3904 _REGISTER_CLOCK(NULL, "pll2_pfd_400M", pll2_pfd_400M),
3905 _REGISTER_CLOCK(NULL, "pll2_pfd_352M", pll2_pfd_352M),
3906 _REGISTER_CLOCK(NULL, "pll2_pfd_594M", pll2_pfd_594M),
3907 _REGISTER_CLOCK(NULL, "pll2_200M", pll2_200M),
3908 _REGISTER_CLOCK(NULL, "pll3_main_clk", pll3_usb_otg_main_clk),
3909 _REGISTER_CLOCK(NULL, "pll3_pfd_508M", pll3_pfd_508M),
3910 _REGISTER_CLOCK(NULL, "pll3_pfd_454M", pll3_pfd_454M),
3911 _REGISTER_CLOCK(NULL, "pll3_pfd_720M", pll3_pfd_720M),
3912 _REGISTER_CLOCK(NULL, "pll3_pfd_540M", pll3_pfd_540M),
3913 _REGISTER_CLOCK(NULL, "pll3_sw_clk", pll3_sw_clk),
3914 _REGISTER_CLOCK(NULL, "pll3_120M", pll3_120M),
3915 _REGISTER_CLOCK(NULL, "pll3_120M", pll3_80M),
3916 _REGISTER_CLOCK(NULL, "pll3_120M", pll3_60M),
3917 _REGISTER_CLOCK(NULL, "pll4", pll4_audio_main_clk),
3918 _REGISTER_CLOCK(NULL, "pll5", pll5_video_main_clk),
3919 _REGISTER_CLOCK(NULL, "pll4", pll6_MLB_main_clk),
3920 _REGISTER_CLOCK(NULL, "pll3", pll7_usb_host_main_clk),
3921 _REGISTER_CLOCK(NULL, "pll4", pll8_enet_main_clk),
3922 _REGISTER_CLOCK(NULL, "cpu_clk", cpu_clk),
3923 _REGISTER_CLOCK(NULL, "periph_clk", periph_clk),
3924 _REGISTER_CLOCK(NULL, "axi_clk", axi_clk),
3925 _REGISTER_CLOCK(NULL, "mmdc_ch0_axi", mmdc_ch0_axi_clk[0]),
3926 _REGISTER_CLOCK(NULL, "mmdc_ch1_axi", mmdc_ch1_axi_clk[0]),
3927 _REGISTER_CLOCK(NULL, "ahb", ahb_clk),
3928 _REGISTER_CLOCK(NULL, "ipg_clk", ipg_clk),
3929 _REGISTER_CLOCK(NULL, "ipg_perclk", ipg_perclk),
3930 _REGISTER_CLOCK(NULL, "spba", spba_clk),
3931 _REGISTER_CLOCK("imx-sdma", NULL, sdma_clk),
3932 _REGISTER_CLOCK(NULL, "gpu2d_axi_clk", gpu2d_axi_clk),
3933 _REGISTER_CLOCK(NULL, "gpu3d_axi_clk", gpu3d_axi_clk),
3934 _REGISTER_CLOCK(NULL, "pcie_axi_clk", pcie_axi_clk),
3935 _REGISTER_CLOCK(NULL, "vdo_axi_clk", vdo_axi_clk),
3936 _REGISTER_CLOCK(NULL, "iim_clk", iim_clk),
3937 _REGISTER_CLOCK(NULL, "i2c_clk", i2c_clk[0]),
3938 _REGISTER_CLOCK("imx-i2c.1", NULL, i2c_clk[1]),
3939 _REGISTER_CLOCK("imx-i2c.2", NULL, i2c_clk[2]),
3940 _REGISTER_CLOCK(NULL, "vpu_clk", vpu_clk),
3941 _REGISTER_CLOCK(NULL, "ipu1_clk", ipu1_clk),
3942 _REGISTER_CLOCK(NULL, "ipu2_clk", ipu2_clk),
3943 _REGISTER_CLOCK("sdhci-esdhc-imx.0", NULL, usdhc1_clk),
3944 _REGISTER_CLOCK("sdhci-esdhc-imx.1", NULL, usdhc2_clk),
3945 _REGISTER_CLOCK("sdhci-esdhc-imx.2", NULL, usdhc3_clk),
3946 _REGISTER_CLOCK("sdhci-esdhc-imx.3", NULL, usdhc4_clk),
3947 _REGISTER_CLOCK("imx-ssi.0", NULL, ssi1_clk),
3948 _REGISTER_CLOCK("imx-ssi.1", NULL, ssi2_clk),
3949 _REGISTER_CLOCK("imx-ssi.2", NULL, ssi3_clk),
3950 _REGISTER_CLOCK(NULL, "ipu1_di0_clk", ipu1_di_clk[0]),
3951 _REGISTER_CLOCK(NULL, "ipu1_di1_clk", ipu1_di_clk[1]),
3952 _REGISTER_CLOCK(NULL, "ipu2_di0_clk", ipu2_di_clk[0]),
3953 _REGISTER_CLOCK(NULL, "ipu2_di1_clk", ipu2_di_clk[1]),
3954 _REGISTER_CLOCK("FlexCAN.0", "can_clk", can1_clk[0]),
3955 _REGISTER_CLOCK("FlexCAN.1", "can_clk", can2_clk[0]),
3956 _REGISTER_CLOCK(NULL, "ldb_di0_clk", ldb_di0_clk),
3957 _REGISTER_CLOCK(NULL, "ldb_di1_clk", ldb_di1_clk),
3958 _REGISTER_CLOCK("mxc_alsa_spdif.0", NULL, spdif0_clk[0]),
3959 _REGISTER_CLOCK("mxc_alsa_spdif.1", NULL, spdif1_clk[0]),
3960 _REGISTER_CLOCK(NULL, "esai_clk", esai_clk),
3961 _REGISTER_CLOCK("mxc_spi.0", NULL, ecspi_clk[0]),
3962 _REGISTER_CLOCK("mxc_spi.1", NULL, ecspi_clk[1]),
3963 _REGISTER_CLOCK("mxc_spi.2", NULL, ecspi_clk[2]),
3964 _REGISTER_CLOCK("mxc_spi.3", NULL, ecspi_clk[3]),
3965 _REGISTER_CLOCK("mxc_spi.4", NULL, ecspi_clk[4]),
3966 _REGISTER_CLOCK(NULL, "emi_slow_clk", emi_slow_clk),
3967 _REGISTER_CLOCK(NULL, "emi_clk", emi_clk),
3968 _REGISTER_CLOCK(NULL, "enfc_clk", enfc_clk),
3969 _REGISTER_CLOCK("imx-uart.0", NULL, uart_clk[0]),
3970 _REGISTER_CLOCK(NULL, "hsi_tx", hsi_tx_clk),
3971 _REGISTER_CLOCK(NULL, "caam_clk", caam_clk[0]),
3972 _REGISTER_CLOCK(NULL, "asrc_clk", asrc_clk),
3973 _REGISTER_CLOCK(NULL, "apbh_dma_clk", apbh_dma_clk),
3974 _REGISTER_CLOCK(NULL, "openvg_axi_clk", openvg_axi_clk),
3975 _REGISTER_CLOCK(NULL, "gpu3d_clk", gpu3d_core_clk),
3976 _REGISTER_CLOCK(NULL, "gpu2d_clk", gpu2d_core_clk),
3977 _REGISTER_CLOCK(NULL, "gpu3d_shader_clk", gpu3d_shader_clk),
3978 _REGISTER_CLOCK(NULL, "gpt", gpt_clk[0]),
3979 _REGISTER_CLOCK(NULL, "gpmi-nfc", gpmi_nfc_clk[0]),
3980 _REGISTER_CLOCK(NULL, "gpmi-apb", gpmi_nfc_clk[1]),
3981 _REGISTER_CLOCK(NULL, "bch", gpmi_nfc_clk[2]),
3982 _REGISTER_CLOCK(NULL, "bch-apb", gpmi_nfc_clk[3]),
3983 _REGISTER_CLOCK("mxc_pwm.0", NULL, pwm_clk[0]),
3984 _REGISTER_CLOCK("mxc_pwm.1", NULL, pwm_clk[1]),
3985 _REGISTER_CLOCK("mxc_pwm.2", NULL, pwm_clk[2]),
3986 _REGISTER_CLOCK("mxc_pwm.3", NULL, pwm_clk[3]),
3987 _REGISTER_CLOCK(NULL, "pcie_clk", pcie_clk),
3988 _REGISTER_CLOCK("fec.0", NULL, enet_clk),
3989 _REGISTER_CLOCK(NULL, "imx_sata_clk", sata_clk),
3990 _REGISTER_CLOCK(NULL, "usboh3_clk", usboh3_clk),
3991 _REGISTER_CLOCK(NULL, "usb_phy1_clk", usb_phy1_clk),
3992 _REGISTER_CLOCK(NULL, "usb_phy2_clk", usb_phy2_clk),
3993 _REGISTER_CLOCK(NULL, "video_27M_clk", video_27M_clk),
3997 static void clk_tree_init(void)
4004 int __init mx6_clocks_init(unsigned long ckil, unsigned long osc,
4005 unsigned long ckih1, unsigned long ckih2)
4012 external_low_reference = ckil;
4013 external_high_reference = ckih1;
4014 ckih2_reference = ckih2;
4015 oscillator_reference = osc;
4017 apll_base = ioremap(ANATOP_BASE_ADDR, SZ_4K);
4021 for (i = 0; i < ARRAY_SIZE(lookups); i++) {
4022 clkdev_add(&lookups[i]);
4023 clk_debug_register(lookups[i].clk);
4026 /* Make sure all clocks are ON initially */
4027 __raw_writel(0xFFFFFFFF, MXC_CCM_CCGR0);
4028 __raw_writel(0xFFFFFFFF, MXC_CCM_CCGR1);
4029 __raw_writel(0xFFFFFFFF, MXC_CCM_CCGR2);
4030 __raw_writel(0xFFFFFFFF, MXC_CCM_CCGR3);
4031 __raw_writel(0xFFFFFFFF, MXC_CCM_CCGR4);
4032 __raw_writel(0xFFFFFFFF, MXC_CCM_CCGR5);
4033 __raw_writel(0xFFFFFFFF, MXC_CCM_CCGR6);
4035 base = ioremap(GPT_BASE_ADDR, SZ_4K);
4036 mxc_timer_init(&gpt_clk[0], base, MXC_INT_GPT);