3 * Copyright (C) 2011 Freescale Semiconductor, Inc. All Rights Reserved.
7 * The code contained herein is licensed under the GNU General Public
8 * License. You may obtain a copy of the GNU General Public License
9 * Version 2 or later at the following locations:
11 * http://www.opensource.org/licenses/gpl-license.html
12 * http://www.gnu.org/copyleft/gpl.html
15 #include <linux/kernel.h>
16 #include <linux/init.h>
17 #include <linux/types.h>
18 #include <linux/time.h>
19 #include <linux/hrtimer.h>
21 #include <linux/errno.h>
22 #include <linux/delay.h>
23 #include <linux/clk.h>
25 #include <linux/clkdev.h>
26 #include <asm/div64.h>
27 #include <mach/hardware.h>
28 #include <mach/common.h>
29 #include <mach/clock.h>
30 #include <mach/mxc_dvfs.h>
33 #ifdef CONFIG_CLK_DEBUG
34 #define __INIT_CLK_DEBUG(n) .name = #n,
36 #define __INIT_CLK_DEBUG(n)
39 void __iomem *apll_base;
40 static struct clk pll1_sys_main_clk;
41 static struct clk pll2_528_bus_main_clk;
42 static struct clk pll3_usb_otg_main_clk;
43 static struct clk pll4_audio_main_clk;
44 static struct clk pll5_video_main_clk;
45 static struct clk pll6_MLB_main_clk;
46 static struct clk pll7_usb_host_main_clk;
47 static struct clk pll8_enet_main_clk;
48 static struct clk apbh_dma_clk;
50 #define SPIN_DELAY 1000000 /* in nanoseconds */
52 #define WAIT(exp, timeout) \
54 struct timespec nstimeofday; \
55 struct timespec curtime; \
57 getnstimeofday(&nstimeofday); \
59 getnstimeofday(&curtime); \
60 if ((curtime.tv_nsec - nstimeofday.tv_nsec) > (timeout)) { \
68 /* External clock values passed-in by the board code */
69 static unsigned long external_high_reference, external_low_reference;
70 static unsigned long oscillator_reference, ckih2_reference;
72 static void __calc_pre_post_dividers(u32 div, u32 *pre, u32 *post)
74 u32 min_pre, temp_pre, old_err, err;
79 } else if (div >= 8) {
80 min_pre = (div - 1) / 64 + 1;
82 for (temp_pre = 8; temp_pre >= min_pre; temp_pre--) {
94 *post = (div + *pre - 1) / *pre;
101 static int _clk_enable(struct clk *clk)
104 reg = __raw_readl(clk->enable_reg);
105 reg |= MXC_CCM_CCGRx_CG_MASK << clk->enable_shift;
106 __raw_writel(reg, clk->enable_reg);
111 static void _clk_disable(struct clk *clk)
114 reg = __raw_readl(clk->enable_reg);
115 reg &= ~(MXC_CCM_CCGRx_CG_MASK << clk->enable_shift);
116 /* TODO: un-comment the disable code */
117 /* __raw_writel(reg, clk->enable_reg); */
121 static void _clk_disable_inwait(struct clk *clk)
124 reg = __raw_readl(clk->enable_reg);
125 reg &= ~(MXC_CCM_CCGRx_CG_MASK << clk->enable_shift);
126 reg |= 1 << clk->enable_shift;
127 /* TODO: un-comment the disable code */
128 /* __raw_writel(reg, clk->enable_reg); */
132 * For the 4-to-1 muxed input clock
134 static inline u32 _get_mux(struct clk *parent, struct clk *m0,
135 struct clk *m1, struct clk *m2, struct clk *m3)
139 else if (parent == m1)
141 else if (parent == m2)
143 else if (parent == m3)
151 static inline void __iomem *_get_pll_base(struct clk *pll)
153 if (pll == &pll1_sys_main_clk)
154 return PLL1_SYS_BASE_ADDR;
155 else if (pll == &pll2_528_bus_main_clk)
156 return PLL2_528_BASE_ADDR;
157 else if (pll == &pll3_usb_otg_main_clk)
158 return PLL3_480_USB1_BASE_ADDR;
159 else if (pll == &pll4_audio_main_clk)
160 return PLL4_AUDIO_BASE_ADDR;
161 else if (pll == &pll5_video_main_clk)
162 return PLL5_VIDEO_BASE_ADDR;
163 else if (pll == &pll6_MLB_main_clk)
164 return PLL6_MLB_BASE_ADDR;
165 else if (pll == &pll7_usb_host_main_clk)
166 return PLL7_480_USB2_BASE_ADDR;
167 else if (pll == &pll8_enet_main_clk)
168 return PLL8_ENET_BASE_ADDR;
176 * For the 6-to-1 muxed input clock
178 static inline u32 _get_mux6(struct clk *parent, struct clk *m0, struct clk *m1,
179 struct clk *m2, struct clk *m3, struct clk *m4,
184 else if (parent == m1)
186 else if (parent == m2)
188 else if (parent == m3)
190 else if (parent == m4)
192 else if (parent == m5)
199 static unsigned long get_high_reference_clock_rate(struct clk *clk)
201 return external_high_reference;
204 static unsigned long get_low_reference_clock_rate(struct clk *clk)
206 return external_low_reference;
209 static unsigned long get_oscillator_reference_clock_rate(struct clk *clk)
211 return oscillator_reference;
214 static unsigned long get_ckih2_reference_clock_rate(struct clk *clk)
216 return ckih2_reference;
219 /* External high frequency clock */
220 static struct clk ckih_clk = {
221 __INIT_CLK_DEBUG(ckih_clk)
222 .get_rate = get_high_reference_clock_rate,
225 static struct clk ckih2_clk = {
226 __INIT_CLK_DEBUG(ckih2_clk)
227 .get_rate = get_ckih2_reference_clock_rate,
230 static struct clk osc_clk = {
231 __INIT_CLK_DEBUG(osc_clk)
232 .get_rate = get_oscillator_reference_clock_rate,
235 /* External low frequency (32kHz) clock */
236 static struct clk ckil_clk = {
237 __INIT_CLK_DEBUG(ckil_clk)
238 .get_rate = get_low_reference_clock_rate,
241 static unsigned long pfd_round_rate(struct clk *clk, unsigned long rate)
246 tmp = (u64)clk_get_rate(clk->parent) * 18;
249 frac = frac < 18 ? 18 : frac;
250 frac = frac > 35 ? 35 : frac;
255 static unsigned long pfd_get_rate(struct clk *clk)
259 tmp = (u64)clk_get_rate(clk->parent) * 18;
261 if (apbh_dma_clk.usecount == 0)
262 apbh_dma_clk.enable(&apbh_dma_clk);
264 frac = (__raw_readl(clk->enable_reg) >> clk->enable_shift) &
265 ANADIG_PFD_FRAC_MASK;
272 static int pfd_set_rate(struct clk *clk, unsigned long rate)
276 tmp = (u64)clk_get_rate(clk->parent) * 18;
278 if (apbh_dma_clk.usecount == 0)
279 apbh_dma_clk.enable(&apbh_dma_clk);
281 /* Round up the divider so that we don't set a rate
282 * higher than what is requested. */
286 frac = frac < 12 ? 12 : frac;
287 frac = frac > 35 ? 35 : frac;
288 /* clear clk frac bits */
289 __raw_writel(ANADIG_PFD_FRAC_MASK << clk->enable_shift,
290 (int)clk->enable_reg + 8);
291 /* set clk frac bits */
292 __raw_writel(frac << clk->enable_shift,
293 (int)clk->enable_reg + 4);
295 tmp = (u64)clk_get_rate(clk->parent) * 18;
298 if (apbh_dma_clk.usecount == 0)
299 apbh_dma_clk.disable(&apbh_dma_clk);
303 static int _clk_pfd_enable(struct clk *clk)
305 if (apbh_dma_clk.usecount == 0)
306 apbh_dma_clk.enable(&apbh_dma_clk);
308 /* clear clk gate bit */
309 __raw_writel((1 << (clk->enable_shift + 7)),
310 (int)clk->enable_reg + 8);
312 if (apbh_dma_clk.usecount == 0)
313 apbh_dma_clk.disable(&apbh_dma_clk);
318 static void _clk_pfd_disable(struct clk *clk)
320 if (apbh_dma_clk.usecount == 0)
321 apbh_dma_clk.enable(&apbh_dma_clk);
323 /* set clk gate bit */
324 __raw_writel((1 << (clk->enable_shift + 7)),
325 (int)clk->enable_reg + 4);
327 if (apbh_dma_clk.usecount == 0)
328 apbh_dma_clk.disable(&apbh_dma_clk);
331 static void _clk_usb_phy_enable(struct clk *clk)
334 usb_phy_reg = __raw_readl(clk->enable_reg);
335 __raw_writel(usb_phy_reg | clk->enable_shift, clk->enable_reg);
338 static void _clk_usb_phy_disable(struct clk *clk)
341 usb_phy_reg = __raw_readl(clk->enable_reg);
342 __raw_writel(usb_phy_reg & (~clk->enable_shift), clk->enable_reg);
345 static int _clk_pll_enable(struct clk *clk)
348 void __iomem *pllbase;
350 pllbase = _get_pll_base(clk);
352 reg = __raw_readl(pllbase);
353 reg &= ~ANADIG_PLL_BYPASS;
354 reg &= ~ANADIG_PLL_POWER_DOWN;
356 /* The 480MHz PLLs have the opposite definition for power bit. */
357 if (clk == &pll3_usb_otg_main_clk || clk == &pll7_usb_host_main_clk)
358 reg |= ANADIG_PLL_POWER_DOWN;
360 __raw_writel(reg, pllbase);
362 /* Wait for PLL to lock */
363 if (!WAIT(__raw_readl(pllbase) & ANADIG_PLL_LOCK,
365 panic("pll enable failed\n");
367 /* Enable the PLL output now*/
368 reg = __raw_readl(pllbase);
369 reg |= ANADIG_PLL_ENABLE;
370 __raw_writel(reg, pllbase);
375 static void _clk_pll_disable(struct clk *clk)
378 void __iomem *pllbase;
380 pllbase = _get_pll_base(clk);
382 reg = __raw_readl(pllbase);
383 reg &= ~ANADIG_PLL_ENABLE;
384 reg |= ANADIG_PLL_BYPASS;
385 reg |= ANADIG_PLL_POWER_DOWN;
386 if (clk == &pll3_usb_otg_main_clk || clk == &pll7_usb_host_main_clk)
387 reg &= ~ANADIG_PLL_POWER_DOWN;
388 __raw_writel(reg, pllbase);
391 static unsigned long _clk_pll1_main_get_rate(struct clk *clk)
396 div = __raw_readl(PLL1_SYS_BASE_ADDR) & ANADIG_PLL_SYS_DIV_SELECT_MASK;
397 val = (clk_get_rate(clk->parent) * div) / 2;
401 static int _clk_pll1_main_set_rate(struct clk *clk, unsigned long rate)
403 unsigned int reg, div;
405 if (rate/1000 < 650000 || rate/1000 > 1300000000)
408 div = (rate * 2) / clk_get_rate(clk->parent) ;
410 reg = __raw_readl(PLL1_SYS_BASE_ADDR) & ~ANADIG_PLL_SYS_DIV_SELECT_MASK;
412 __raw_writel(reg, PLL1_SYS_BASE_ADDR);
417 static struct clk pll1_sys_main_clk = {
418 __INIT_CLK_DEBUG(pll1_sys_main_clk)
420 .get_rate = _clk_pll1_main_get_rate,
421 .set_rate = _clk_pll1_main_set_rate,
422 .enable = _clk_pll_enable,
423 .disable = _clk_pll_disable,
426 static int _clk_pll1_sw_set_parent(struct clk *clk, struct clk *parent)
430 reg = __raw_readl(MXC_CCM_CCSR);
432 if (parent == &pll1_sys_main_clk) {
433 reg &= ~MXC_CCM_CCSR_PLL1_SW_CLK_SEL;
434 __raw_writel(reg, MXC_CCM_CCSR);
435 /* Set the step_clk parent to be lp_apm, to save power. */
436 reg = __raw_readl(MXC_CCM_CCSR);
437 reg = (reg & ~MXC_CCM_CCSR_STEP_SEL);
439 /* Set STEP_CLK to be the parent*/
440 if (parent == &osc_clk) {
441 /* Set STEP_CLK to be sourced from LPAPM. */
442 reg = __raw_readl(MXC_CCM_CCSR);
443 reg = (reg & ~MXC_CCM_CCSR_STEP_SEL);
444 __raw_writel(reg, MXC_CCM_CCSR);
446 /* Set STEP_CLK to be sourced from PLL2-PDF (400MHz). */
447 reg = __raw_readl(MXC_CCM_CCSR);
448 reg |= MXC_CCM_CCSR_STEP_SEL;
449 __raw_writel(reg, MXC_CCM_CCSR);
452 reg = __raw_readl(MXC_CCM_CCSR);
453 reg |= MXC_CCM_CCSR_PLL1_SW_CLK_SEL;
454 reg = __raw_readl(MXC_CCM_CCSR);
456 __raw_writel(reg, MXC_CCM_CCSR);
461 static unsigned long _clk_pll1_sw_get_rate(struct clk *clk)
463 return clk_get_rate(clk->parent);
466 static struct clk pll1_sw_clk = {
467 __INIT_CLK_DEBUG(pll1_sw_clk)
468 .parent = &pll1_sys_main_clk,
469 .set_parent = _clk_pll1_sw_set_parent,
470 .get_rate = _clk_pll1_sw_get_rate,
473 static unsigned long _clk_pll2_main_get_rate(struct clk *clk)
478 div = __raw_readl(PLL2_528_BASE_ADDR) & ANADIG_PLL_528_DIV_SELECT;
481 val = clk_get_rate(clk->parent) * 22;
484 val = clk_get_rate(clk->parent) * 20;
489 static int _clk_pll2_main_set_rate(struct clk *clk, unsigned long rate)
491 unsigned int reg, div;
493 if (rate == 528000000)
495 else if (rate == 480000000)
500 reg = __raw_readl(PLL2_528_BASE_ADDR);
501 reg &= ~ANADIG_PLL_528_DIV_SELECT;
503 __raw_writel(reg, PLL2_528_BASE_ADDR);
508 static struct clk pll2_528_bus_main_clk = {
509 __INIT_CLK_DEBUG(pll2_528_bus_main_clk)
511 .get_rate = _clk_pll2_main_get_rate,
512 .set_rate = _clk_pll2_main_set_rate,
513 .enable = _clk_pll_enable,
514 .disable = _clk_pll_disable,
517 static struct clk pll2_pfd_400M = {
518 __INIT_CLK_DEBUG(pll2_pfd_400M)
519 .parent = &pll2_528_bus_main_clk,
520 .enable_reg = (void *)PFD_528_BASE_ADDR,
521 .enable_shift = ANADIG_PFD2_FRAC_OFFSET,
522 .enable = _clk_pfd_enable,
523 .disable = _clk_pfd_disable,
524 .get_rate = pfd_get_rate,
525 .set_rate = pfd_set_rate,
526 .get_rate = pfd_get_rate,
527 .round_rate = pfd_round_rate,
530 static struct clk pll2_pfd_352M = {
531 __INIT_CLK_DEBUG(pll2_pfd_352M)
532 .parent = &pll2_528_bus_main_clk,
533 .enable_reg = (void *)PFD_528_BASE_ADDR,
534 .enable_shift = ANADIG_PFD0_FRAC_OFFSET,
535 .enable = _clk_pfd_enable,
536 .disable = _clk_pfd_disable,
537 .set_rate = pfd_set_rate,
538 .get_rate = pfd_get_rate,
539 .round_rate = pfd_round_rate,
542 static struct clk pll2_pfd_594M = {
543 __INIT_CLK_DEBUG(pll2_pfd_594M)
544 .parent = &pll2_528_bus_main_clk,
545 .enable_reg = (void *)PFD_528_BASE_ADDR,
546 .enable_shift = ANADIG_PFD1_FRAC_OFFSET,
547 .enable = _clk_pfd_enable,
548 .disable = _clk_pfd_disable,
549 .set_rate = pfd_set_rate,
550 .get_rate = pfd_get_rate,
551 .round_rate = pfd_round_rate,
554 static unsigned long _clk_pll2_200M_get_rate(struct clk *clk)
556 return clk_get_rate(clk->parent) / 2;
559 static struct clk pll2_200M = {
560 __INIT_CLK_DEBUG(pll2_200M)
561 .parent = &pll2_pfd_400M,
562 .get_rate = _clk_pll2_200M_get_rate,
565 static unsigned long _clk_pll3_usb_otg_get_rate(struct clk *clk)
570 div = __raw_readl(PLL3_480_USB1_BASE_ADDR)
571 & ANADIG_PLL_480_DIV_SELECT_MASK;
574 val = clk_get_rate(clk->parent) * 22;
576 val = clk_get_rate(clk->parent) * 20;
580 static int _clk_pll3_usb_otg_set_rate(struct clk *clk, unsigned long rate)
582 unsigned int reg, div;
584 if (rate == 528000000)
586 else if (rate == 480000000)
591 reg = __raw_readl(PLL3_480_USB1_BASE_ADDR);
592 reg &= ~ANADIG_PLL_480_DIV_SELECT_MASK;
594 __raw_writel(reg, PLL3_480_USB1_BASE_ADDR);
600 /* same as pll3_main_clk. These two clocks should always be the same */
601 static struct clk pll3_usb_otg_main_clk = {
602 __INIT_CLK_DEBUG(pll3_usb_otg_main_clk)
604 .enable = _clk_pll_enable,
605 .disable = _clk_pll_disable,
606 .set_rate = _clk_pll3_usb_otg_set_rate,
607 .get_rate = _clk_pll3_usb_otg_get_rate,
610 static struct clk usb_phy1_clk = {
611 __INIT_CLK_DEBUG(usb_phy1_clk)
612 .parent = &pll3_usb_otg_main_clk,
613 .enable = _clk_usb_phy_enable,
614 .disable = _clk_usb_phy_disable,
615 .enable_reg = (void *)PLL3_480_USB1_BASE_ADDR,
616 .enable_shift = ANADIG_PLL_480_EN_USB_CLKS,
617 .set_rate = _clk_pll3_usb_otg_set_rate,
618 .get_rate = _clk_pll3_usb_otg_get_rate,
622 static struct clk pll3_pfd_508M = {
623 __INIT_CLK_DEBUG(pll3_pfd_508M)
624 .parent = &pll3_usb_otg_main_clk,
625 .enable_reg = (void *)PFD_480_BASE_ADDR,
626 .enable_shift = ANADIG_PFD2_FRAC_OFFSET,
627 .enable = _clk_pfd_enable,
628 .disable = _clk_pfd_disable,
629 .set_rate = pfd_set_rate,
630 .round_rate = pfd_round_rate,
633 static struct clk pll3_pfd_454M = {
634 __INIT_CLK_DEBUG(pll3_pfd_454M)
635 .parent = &pll3_usb_otg_main_clk,
636 .enable_reg = (void *)PFD_480_BASE_ADDR,
637 .enable_shift = ANADIG_PFD3_FRAC_OFFSET,
638 .enable = _clk_pfd_enable,
639 .disable = _clk_pfd_disable,
640 .set_rate = pfd_set_rate,
641 .get_rate = pfd_get_rate,
642 .round_rate = pfd_round_rate,
645 static struct clk pll3_pfd_720M = {
646 __INIT_CLK_DEBUG(pll3_pfd_720M)
647 .parent = &pll3_usb_otg_main_clk,
648 .enable_reg = (void *)PFD_480_BASE_ADDR,
649 .enable_shift = ANADIG_PFD0_FRAC_OFFSET,
650 .enable = _clk_pfd_enable,
651 .disable = _clk_pfd_disable,
652 .set_rate = pfd_set_rate,
653 .get_rate = pfd_get_rate,
654 .round_rate = pfd_round_rate,
657 static struct clk pll3_pfd_540M = {
658 __INIT_CLK_DEBUG(pll3_pfd_540M)
659 .parent = &pll3_usb_otg_main_clk,
660 .enable_reg = (void *)PFD_480_BASE_ADDR,
661 .enable_shift = ANADIG_PFD1_FRAC_OFFSET,
662 .enable = _clk_pfd_enable,
663 .disable = _clk_pfd_disable,
664 .set_rate = pfd_set_rate,
665 .get_rate = pfd_get_rate,
666 .round_rate = pfd_round_rate,
667 .get_rate = pfd_get_rate,
670 static unsigned long _clk_pll3_sw_get_rate(struct clk *clk)
672 return clk_get_rate(clk->parent);
675 /* same as pll3_main_clk. These two clocks should always be the same */
676 static struct clk pll3_sw_clk = {
677 __INIT_CLK_DEBUG(pll3_sw_clk)
678 .parent = &pll3_usb_otg_main_clk,
679 .get_rate = _clk_pll3_sw_get_rate,
682 static unsigned long _clk_pll3_120M_get_rate(struct clk *clk)
684 return clk_get_rate(clk->parent) / 4;
687 static struct clk pll3_120M = {
688 __INIT_CLK_DEBUG(pll3_120M)
689 .parent = &pll3_sw_clk,
690 .get_rate = _clk_pll3_120M_get_rate,
693 static unsigned long _clk_pll3_80M_get_rate(struct clk *clk)
695 return clk_get_rate(clk->parent) / 6;
698 static struct clk pll3_80M = {
699 __INIT_CLK_DEBUG(pll3_80M)
700 .parent = &pll3_sw_clk,
701 .get_rate = _clk_pll3_80M_get_rate,
704 static unsigned long _clk_pll3_60M_get_rate(struct clk *clk)
706 return clk_get_rate(clk->parent) / 8;
709 static struct clk pll3_60M = {
710 __INIT_CLK_DEBUG(pll3_60M)
711 .parent = &pll3_sw_clk,
712 .get_rate = _clk_pll3_60M_get_rate,
715 static struct clk pll4_audio_main_clk = {
716 __INIT_CLK_DEBUG(pll4_audio_main_clk)
718 .enable = _clk_pll_enable,
719 .disable = _clk_pll_disable,
722 static struct clk pll5_video_main_clk = {
723 __INIT_CLK_DEBUG(pll5_video_main_clk)
725 .enable = _clk_pll_enable,
726 .disable = _clk_pll_disable,
729 static struct clk pll6_MLB_main_clk = {
730 __INIT_CLK_DEBUG(pll6_MLB_main_clk)
732 .enable = _clk_pll_enable,
733 .disable = _clk_pll_disable,
736 static unsigned long _clk_pll7_usb_otg_get_rate(struct clk *clk)
741 div = __raw_readl(PLL7_480_USB2_BASE_ADDR)
742 & ANADIG_PLL_480_DIV_SELECT_MASK;
745 val = clk_get_rate(clk->parent) * 22;
747 val = clk_get_rate(clk->parent) * 20;
751 static int _clk_pll7_usb_otg_set_rate(struct clk *clk, unsigned long rate)
753 unsigned int reg, div;
755 if (rate == 528000000)
757 else if (rate == 480000000)
762 reg = __raw_readl(PLL7_480_USB2_BASE_ADDR);
763 reg &= ~ANADIG_PLL_480_DIV_SELECT_MASK;
765 __raw_writel(reg, PLL7_480_USB2_BASE_ADDR);
770 static struct clk pll7_usb_host_main_clk = {
771 __INIT_CLK_DEBUG(pll7_usb_host_main_clk)
773 .enable = _clk_pll_enable,
774 .disable = _clk_pll_disable,
775 .set_rate = _clk_pll7_usb_otg_set_rate,
776 .get_rate = _clk_pll7_usb_otg_get_rate,
780 static struct clk usb_phy2_clk = {
781 __INIT_CLK_DEBUG(usb_phy2_clk)
782 .parent = &pll7_usb_host_main_clk,
783 .enable = _clk_usb_phy_enable,
784 .disable = _clk_usb_phy_disable,
785 .enable_reg = (void *)PLL7_480_USB2_BASE_ADDR,
786 .enable_shift = ANADIG_PLL_480_EN_USB_CLKS,
787 .set_rate = _clk_pll7_usb_otg_set_rate,
788 .get_rate = _clk_pll7_usb_otg_get_rate,
792 static struct clk pll8_enet_main_clk = {
793 __INIT_CLK_DEBUG(pll8_enet_main_clk)
795 .enable = _clk_pll_enable,
796 .disable = _clk_pll_disable,
799 static unsigned long _clk_arm_get_rate(struct clk *clk)
803 cacrr = __raw_readl(MXC_CCM_CACRR);
804 div = (cacrr & MXC_CCM_CACRR_ARM_PODF_MASK) + 1;
805 return clk_get_rate(clk->parent) / div;
808 static int _clk_arm_set_rate(struct clk *clk, unsigned long rate)
812 div = (clk_get_rate(clk->parent) / rate);
816 __raw_writel(div - 1, MXC_CCM_CACRR);
821 static struct clk cpu_clk = {
822 __INIT_CLK_DEBUG(cpu_clk)
823 .parent = &pll1_sw_clk,
824 .set_rate = _clk_arm_set_rate,
825 .get_rate = _clk_arm_get_rate,
828 static int _clk_periph_set_parent(struct clk *clk, struct clk *parent)
833 mux = _get_mux6(parent, &pll2_528_bus_main_clk, &pll2_pfd_400M,
834 &pll2_pfd_352M, &pll2_200M, &pll3_sw_clk, &osc_clk);
837 /* Set the pre_periph_clk multiplexer */
838 reg = __raw_readl(MXC_CCM_CBCMR);
839 reg &= ~MXC_CCM_CBCMR_PRE_PERIPH_CLK_SEL_MASK;
840 reg |= mux << MXC_CCM_CBCMR_PRE_PERIPH_CLK_SEL_OFFSET;
841 __raw_writel(reg, MXC_CCM_CBCMR);
843 /* Set the periph_clk_sel multiplexer. */
844 reg = __raw_readl(MXC_CCM_CBCDR);
845 reg &= ~MXC_CCM_CBCDR_PERIPH_CLK_SEL;
846 __raw_writel(reg, MXC_CCM_CBCDR);
848 /* Set the periph_clk2_podf divider to divide by 1. */
849 reg = __raw_readl(MXC_CCM_CBCDR);
850 reg &= ~MXC_CCM_CBCDR_PERIPH_CLK2_PODF_MASK;
851 __raw_writel(reg, MXC_CCM_CBCDR);
853 /* Set the periph_clk2_sel mux. */
854 reg = __raw_readl(MXC_CCM_CBCMR);
855 reg &= ~MXC_CCM_CBCMR_PERIPH_CLK2_SEL_MASK;
856 reg |= ((mux - 4) << MXC_CCM_CBCMR_PERIPH_CLK2_SEL_OFFSET);
857 __raw_writel(reg, MXC_CCM_CBCMR);
860 if (!WAIT(!(__raw_readl(MXC_CCM_CDHIPR)
861 & MXC_CCM_CDHIPR_PERIPH_CLK_SEL_BUSY), SPIN_DELAY))
862 panic("pll _clk_axi_a_set_rate failed\n");
867 static unsigned long _clk_periph_get_rate(struct clk *clk)
873 if ((clk->parent == &pll3_sw_clk) || (clk->parent == &osc_clk)) {
874 reg = __raw_readl(MXC_CCM_CBCDR)
875 & MXC_CCM_CBCDR_PERIPH_CLK2_PODF_MASK;
876 div = (reg >> MXC_CCM_CBCDR_PERIPH_CLK2_PODF_OFFSET) + 1;
878 val = clk_get_rate(clk->parent) / div;
882 static struct clk periph_clk = {
883 __INIT_CLK_DEBUG(periph_clk)
884 .parent = &pll2_528_bus_main_clk,
885 .set_parent = _clk_periph_set_parent,
886 .get_rate = _clk_periph_get_rate,
889 static unsigned long _clk_axi_get_rate(struct clk *clk)
894 reg = __raw_readl(MXC_CCM_CBCDR) & MXC_CCM_CBCDR_AXI_PODF_MASK;
895 div = (reg >> MXC_CCM_CBCDR_AXI_PODF_OFFSET);
897 val = clk_get_rate(clk->parent) / (div + 1);
901 static int _clk_axi_set_rate(struct clk *clk, unsigned long rate)
904 u32 parent_rate = clk_get_rate(clk->parent);
906 div = parent_rate / rate;
910 if (((parent_rate / div) != rate) || (div > 8))
913 reg = __raw_readl(MXC_CCM_CBCDR);
914 reg &= ~MXC_CCM_CBCDR_AXI_PODF_MASK;
915 reg |= (div - 1) << MXC_CCM_CBCDR_AXI_PODF_OFFSET;
916 __raw_writel(reg, MXC_CCM_CBCDR);
918 if (!WAIT(!(__raw_readl(MXC_CCM_CDHIPR)
919 & MXC_CCM_CDHIPR_AXI_PODF_BUSY), SPIN_DELAY))
920 panic("pll _clk_axi_a_set_rate failed\n");
925 static unsigned long _clk_axi_round_rate(struct clk *clk,
929 u32 parent_rate = clk_get_rate(clk->parent);
931 div = parent_rate / rate;
933 /* Make sure rate is not greater than the maximum
934 * value for the clock.
935 * Also prevent a div of 0.
943 return parent_rate / div;
946 static int _clk_axi_set_parent(struct clk *clk, struct clk *parent)
951 mux = _get_mux6(parent, &periph_clk, &pll2_pfd_400M,
952 &pll3_pfd_540M, NULL, NULL, NULL);
955 /* Set the AXI_SEL mux */
956 reg = __raw_readl(MXC_CCM_CBCDR) & ~MXC_CCM_CBCDR_AXI_SEL;
957 __raw_writel(reg, MXC_CCM_CBCDR);
959 /* Set the AXI_ALT_SEL mux. */
960 reg = __raw_readl(MXC_CCM_CBCDR)
961 & ~MXC_CCM_CBCDR_AXI_ALT_SEL_MASK;
962 reg = ((mux - 1) << MXC_CCM_CBCDR_AXI_ALT_SEL_OFFSET);\
963 __raw_writel(reg, MXC_CCM_CBCDR);
965 /* Set the AXI_SEL mux */
966 reg = __raw_readl(MXC_CCM_CBCDR) & ~MXC_CCM_CBCDR_AXI_SEL;
967 reg |= MXC_CCM_CBCDR_AXI_SEL;
968 __raw_writel(reg, MXC_CCM_CBCDR);
973 static struct clk axi_clk = {
974 __INIT_CLK_DEBUG(axi_clk)
975 .parent = &periph_clk,
976 .set_parent = _clk_axi_set_parent,
977 .set_rate = _clk_axi_set_rate,
978 .get_rate = _clk_axi_get_rate,
979 .round_rate = _clk_axi_round_rate,
982 static unsigned long _clk_ahb_get_rate(struct clk *clk)
986 reg = __raw_readl(MXC_CCM_CBCDR);
987 div = ((reg & MXC_CCM_CBCDR_AHB_PODF_MASK) >>
988 MXC_CCM_CBCDR_AHB_PODF_OFFSET) + 1;
990 return clk_get_rate(clk->parent) / div;
993 static int _clk_ahb_set_rate(struct clk *clk, unsigned long rate)
996 u32 parent_rate = clk_get_rate(clk->parent);
998 div = parent_rate / rate;
1001 if (((parent_rate / div) != rate) || (div > 8))
1004 reg = __raw_readl(MXC_CCM_CBCDR);
1005 reg &= ~MXC_CCM_CBCDR_AHB_PODF_MASK;
1006 reg |= (div - 1) << MXC_CCM_CBCDR_AHB_PODF_OFFSET;
1007 __raw_writel(reg, MXC_CCM_CBCDR);
1009 if (!WAIT(!(__raw_readl(MXC_CCM_CDHIPR) & MXC_CCM_CDHIPR_AHB_PODF_BUSY),
1011 panic("_clk_ahb_set_rate failed\n");
1016 static unsigned long _clk_ahb_round_rate(struct clk *clk,
1020 u32 parent_rate = clk_get_rate(clk->parent);
1022 div = parent_rate / rate;
1024 /* Make sure rate is not greater than the maximum value for the clock.
1025 * Also prevent a div of 0.
1033 return parent_rate / div;
1036 static struct clk ahb_clk = {
1037 __INIT_CLK_DEBUG(ahb_clk)
1038 .parent = &periph_clk,
1039 .get_rate = _clk_ahb_get_rate,
1040 .set_rate = _clk_ahb_set_rate,
1041 .round_rate = _clk_ahb_round_rate,
1044 static unsigned long _clk_ipg_get_rate(struct clk *clk)
1048 reg = __raw_readl(MXC_CCM_CBCDR);
1049 div = ((reg & MXC_CCM_CBCDR_IPG_PODF_MASK) >>
1050 MXC_CCM_CBCDR_IPG_PODF_OFFSET) + 1;
1052 return clk_get_rate(clk->parent) / div;
1056 static struct clk ipg_clk = {
1057 __INIT_CLK_DEBUG(ipg_clk)
1059 .get_rate = _clk_ipg_get_rate,
1062 static unsigned long _clk_mmdc_ch0_axi_get_rate(struct clk *clk)
1066 reg = __raw_readl(MXC_CCM_CBCDR);
1067 div = ((reg & MXC_CCM_CBCDR_MMDC_CH0_PODF_MASK) >>
1068 MXC_CCM_CBCDR_MMDC_CH0_PODF_OFFSET) + 1;
1070 return clk_get_rate(clk->parent) / div;
1073 static int _clk_mmdc_ch0_axi_set_rate(struct clk *clk, unsigned long rate)
1076 u32 parent_rate = clk_get_rate(clk->parent);
1078 div = parent_rate / rate;
1081 if (((parent_rate / div) != rate) || (div > 8))
1084 reg = __raw_readl(MXC_CCM_CBCDR);
1085 reg &= ~MXC_CCM_CBCDR_MMDC_CH0_PODF_MASK;
1086 reg |= (div - 1) << MXC_CCM_CBCDR_MMDC_CH0_PODF_OFFSET;
1087 __raw_writel(reg, MXC_CCM_CBCDR);
1089 if (!WAIT(!(__raw_readl(MXC_CCM_CDHIPR)
1090 & MXC_CCM_CDHIPR_MMDC_CH0_PODF_BUSY),
1092 panic("_clk_mmdc_ch0_axi_set_rate failed\n");
1097 static unsigned long _clk_mmdc_ch0_axi_round_rate(struct clk *clk,
1101 u32 parent_rate = clk_get_rate(clk->parent);
1103 div = parent_rate / rate;
1105 /* Make sure rate is not greater than the maximum value for the clock.
1106 * Also prevent a div of 0.
1114 return parent_rate / div;
1117 static struct clk mmdc_ch0_axi_clk[] = {
1119 __INIT_CLK_DEBUG(mmdc_ch0_axi_clk)
1121 .parent = &periph_clk,
1122 .enable = _clk_enable,
1123 .disable = _clk_disable,
1124 .enable_reg = MXC_CCM_CCGR3,
1125 .enable_shift = MXC_CCM_CCGRx_CG10_OFFSET,
1126 .secondary = &mmdc_ch0_axi_clk[1],
1127 .get_rate = _clk_mmdc_ch0_axi_get_rate,
1128 .set_rate = _clk_mmdc_ch0_axi_set_rate,
1129 .round_rate = _clk_mmdc_ch0_axi_round_rate,
1132 __INIT_CLK_DEBUG(mmdc_ch0_ipg_clk)
1135 .enable = _clk_enable,
1136 .disable = _clk_disable,
1137 .enable_reg = MXC_CCM_CCGR3,
1138 .enable_shift = MXC_CCM_CCGRx_CG12_OFFSET,
1142 static unsigned long _clk_mmdc_ch1_axi_get_rate(struct clk *clk)
1146 reg = __raw_readl(MXC_CCM_CBCDR);
1147 div = ((reg & MXC_CCM_CBCDR_MMDC_CH1_PODF_MASK) >>
1148 MXC_CCM_CBCDR_MMDC_CH1_PODF_OFFSET) + 1;
1150 return clk_get_rate(clk->parent) / div;
1153 static int _clk_mmdc_ch1_axi_set_rate(struct clk *clk, unsigned long rate)
1156 u32 parent_rate = clk_get_rate(clk->parent);
1158 div = parent_rate / rate;
1161 if (((parent_rate / div) != rate) || (div > 8))
1164 reg = __raw_readl(MXC_CCM_CBCDR);
1165 reg &= ~MXC_CCM_CBCDR_MMDC_CH1_PODF_MASK;
1166 reg |= (div - 1) << MXC_CCM_CBCDR_MMDC_CH1_PODF_OFFSET;
1167 __raw_writel(reg, MXC_CCM_CBCDR);
1169 if (!WAIT(!(__raw_readl(MXC_CCM_CDHIPR)
1170 & MXC_CCM_CDHIPR_MMDC_CH1_PODF_BUSY), SPIN_DELAY))
1171 panic("_clk_mmdc_ch1_axi_set_rate failed\n");
1176 static unsigned long _clk_mmdc_ch1_axi_round_rate(struct clk *clk,
1180 u32 parent_rate = clk_get_rate(clk->parent);
1182 div = parent_rate / rate;
1184 /* Make sure rate is not greater than the maximum value for the clock.
1185 * Also prevent a div of 0.
1193 return parent_rate / div;
1196 static struct clk mmdc_ch1_axi_clk[] = {
1198 __INIT_CLK_DEBUG(mmdc_ch1_axi_clk)
1200 .parent = &pll2_pfd_400M,
1201 .enable = _clk_enable,
1202 .disable = _clk_disable,
1203 .enable_reg = MXC_CCM_CCGR3,
1204 .enable_shift = MXC_CCM_CCGRx_CG11_OFFSET,
1205 .secondary = &mmdc_ch1_axi_clk[1],
1206 .get_rate = _clk_mmdc_ch1_axi_get_rate,
1207 .set_rate = _clk_mmdc_ch1_axi_set_rate,
1208 .round_rate = _clk_mmdc_ch1_axi_round_rate,
1212 __INIT_CLK_DEBUG(mmdc_ch1_ipg_clk)
1214 .enable = _clk_enable,
1215 .disable = _clk_disable,
1216 .enable_reg = MXC_CCM_CCGR3,
1217 .enable_shift = MXC_CCM_CCGRx_CG13_OFFSET,
1221 static struct clk ipg_perclk = {
1222 __INIT_CLK_DEBUG(ipg_perclk)
1226 static struct clk spba_clk = {
1227 __INIT_CLK_DEBUG(spba_clk)
1229 .enable_reg = MXC_CCM_CCGR5,
1230 .enable_shift = MXC_CCM_CCGRx_CG6_OFFSET,
1231 .enable = _clk_enable,
1232 .disable = _clk_disable,
1235 static struct clk sdma_clk = {
1236 __INIT_CLK_DEBUG(sdma_clk)
1238 .enable_reg = MXC_CCM_CCGR5,
1239 .enable_shift = MXC_CCM_CCGRx_CG3_OFFSET,
1240 .enable = _clk_enable,
1241 .disable = _clk_disable,
1244 static int _clk_gpu2d_axi_set_parent(struct clk *clk, struct clk *parent)
1246 u32 reg = __raw_readl(MXC_CCM_CBCMR) & MXC_CCM_CBCMR_GPU2D_AXI_CLK_SEL;
1248 if (parent == &ahb_clk)
1249 reg |= MXC_CCM_CBCMR_GPU2D_AXI_CLK_SEL;
1251 __raw_writel(reg, MXC_CCM_CBCMR);
1256 static struct clk gpu2d_axi_clk = {
1257 __INIT_CLK_DEBUG(gpu2d_axi_clk)
1259 .set_parent = _clk_gpu2d_axi_set_parent,
1260 .get_rate = _clk_axi_get_rate,
1263 static int _clk_gpu3d_axi_set_parent(struct clk *clk, struct clk *parent)
1265 u32 reg = __raw_readl(MXC_CCM_CBCMR) & MXC_CCM_CBCMR_GPU3D_AXI_CLK_SEL;
1267 if (parent == &ahb_clk)
1268 reg |= MXC_CCM_CBCMR_GPU3D_AXI_CLK_SEL;
1270 __raw_writel(reg, MXC_CCM_CBCMR);
1275 static struct clk gpu3d_axi_clk = {
1276 __INIT_CLK_DEBUG(gpu3d_axi_clk)
1278 .set_parent = _clk_gpu3d_axi_set_parent,
1279 .get_rate = _clk_axi_get_rate,
1282 static int _clk_pcie_axi_set_parent(struct clk *clk, struct clk *parent)
1284 u32 reg = __raw_readl(MXC_CCM_CBCMR) & MXC_CCM_CBCMR_PCIE_AXI_CLK_SEL;
1286 if (parent == &ahb_clk)
1287 reg |= MXC_CCM_CBCMR_PCIE_AXI_CLK_SEL;
1289 __raw_writel(reg, MXC_CCM_CBCMR);
1294 static struct clk pcie_axi_clk = {
1295 __INIT_CLK_DEBUG(pcie_axi_clk)
1297 .set_parent = _clk_pcie_axi_set_parent,
1298 .get_rate = _clk_axi_get_rate,
1301 static int _clk_vdo_axi_set_parent(struct clk *clk, struct clk *parent)
1303 u32 reg = __raw_readl(MXC_CCM_CBCMR) & MXC_CCM_CBCMR_VDOAXI_CLK_SEL;
1305 if (parent == &ahb_clk)
1306 reg |= MXC_CCM_CBCMR_VDOAXI_CLK_SEL;
1308 __raw_writel(reg, MXC_CCM_CBCMR);
1313 static struct clk vdo_axi_clk = {
1314 __INIT_CLK_DEBUG(vdo_axi_clk)
1316 .enable_reg = MXC_CCM_CCGR6,
1317 .enable_shift = MXC_CCM_CCGRx_CG6_OFFSET,
1318 .enable = _clk_enable,
1319 .disable = _clk_disable,
1320 .set_parent = _clk_vdo_axi_set_parent,
1321 .get_rate = _clk_axi_get_rate,
1324 static struct clk vdoa_clk = {
1325 __INIT_CLK_DEBUG(vdoa_clk)
1328 .enable_reg = MXC_CCM_CCGR2,
1329 .enable_shift = MXC_CCM_CCGRx_CG13_OFFSET,
1330 .enable = _clk_enable,
1331 .disable = _clk_disable,
1334 static struct clk gpt_clk[] = {
1336 __INIT_CLK_DEBUG(gpt_clk)
1337 .parent = &ipg_perclk,
1339 .enable_reg = MXC_CCM_CCGR1,
1340 .enable_shift = MXC_CCM_CCGRx_CG10_OFFSET,
1341 .enable = _clk_enable,
1342 .disable = _clk_disable,
1343 .secondary = &gpt_clk[1],
1346 __INIT_CLK_DEBUG(gpt_serial_clk)
1348 .enable_reg = MXC_CCM_CCGR1,
1349 .enable_shift = MXC_CCM_CCGRx_CG11_OFFSET,
1350 .enable = _clk_enable,
1351 .disable = _clk_disable,
1355 static struct clk iim_clk = {
1356 __INIT_CLK_DEBUG(iim_clk)
1358 .enable = _clk_enable,
1359 .enable_reg = MXC_CCM_CCGR2,
1360 .enable_shift = MXC_CCM_CCGRx_CG6_OFFSET,
1361 .disable = _clk_disable,
1364 static struct clk i2c_clk[] = {
1366 __INIT_CLK_DEBUG(i2c_clk_0)
1368 .parent = &ipg_perclk,
1369 .enable_reg = MXC_CCM_CCGR2,
1370 .enable_shift = MXC_CCM_CCGRx_CG3_OFFSET,
1371 .enable = _clk_enable,
1372 .disable = _clk_disable,
1375 __INIT_CLK_DEBUG(i2c_clk_1)
1377 .parent = &ipg_perclk,
1378 .enable_reg = MXC_CCM_CCGR2,
1379 .enable_shift = MXC_CCM_CCGRx_CG4_OFFSET,
1380 .enable = _clk_enable,
1381 .disable = _clk_disable,
1384 __INIT_CLK_DEBUG(i2c_clk_2)
1386 .parent = &ipg_perclk,
1387 .enable_reg = MXC_CCM_CCGR2,
1388 .enable_shift = MXC_CCM_CCGRx_CG5_OFFSET,
1389 .enable = _clk_enable,
1390 .disable = _clk_disable,
1394 static int _clk_vpu_axi_set_parent(struct clk *clk, struct clk *parent)
1397 u32 reg = __raw_readl(MXC_CCM_CBCMR)
1398 & MXC_CCM_CBCMR_VPU_AXI_CLK_SEL_MASK;
1400 mux = _get_mux6(parent, &axi_clk, &pll2_pfd_400M,
1401 &pll2_pfd_352M, NULL, NULL, NULL);
1403 reg |= (mux << MXC_CCM_CBCMR_VPU_AXI_CLK_SEL_OFFSET);
1405 __raw_writel(reg, MXC_CCM_CBCMR);
1410 static unsigned long _clk_vpu_axi_get_rate(struct clk *clk)
1414 reg = __raw_readl(MXC_CCM_CSCDR1);
1415 div = ((reg & MXC_CCM_CSCDR1_VPU_AXI_PODF_MASK) >>
1416 MXC_CCM_CSCDR1_VPU_AXI_PODF_OFFSET) + 1;
1418 return clk_get_rate(clk->parent) / div;
1421 static int _clk_vpu_axi_set_rate(struct clk *clk, unsigned long rate)
1424 u32 parent_rate = clk_get_rate(clk->parent);
1426 div = parent_rate / rate;
1429 if (((parent_rate / div) != rate) || (div > 8))
1432 reg = __raw_readl(MXC_CCM_CSCDR1);
1433 reg &= ~MXC_CCM_CSCDR1_VPU_AXI_PODF_MASK;
1434 reg |= (div - 1) << MXC_CCM_CSCDR1_VPU_AXI_PODF_OFFSET;
1435 __raw_writel(reg, MXC_CCM_CSCDR1);
1440 static unsigned long _clk_vpu_axi_round_rate(struct clk *clk,
1444 u32 parent_rate = clk_get_rate(clk->parent);
1446 div = parent_rate / rate;
1448 /* Make sure rate is not greater than the maximum value for the clock.
1449 * Also prevent a div of 0.
1457 return parent_rate / div;
1460 static struct clk vpu_clk = {
1461 __INIT_CLK_DEBUG(vpu_clk)
1463 .enable_reg = MXC_CCM_CCGR6,
1464 .enable_shift = MXC_CCM_CCGRx_CG7_OFFSET,
1465 .enable = _clk_enable,
1466 .disable = _clk_disable,
1467 .set_parent = _clk_vpu_axi_set_parent,
1468 .round_rate = _clk_vpu_axi_round_rate,
1469 .set_rate = _clk_vpu_axi_set_rate,
1470 .get_rate = _clk_vpu_axi_get_rate,
1473 static int _clk_ipu1_set_parent(struct clk *clk, struct clk *parent)
1476 u32 reg = __raw_readl(MXC_CCM_CSCDR3)
1477 & MXC_CCM_CSCDR3_IPU1_HSP_CLK_SEL_MASK;
1479 mux = _get_mux6(parent, &mmdc_ch0_axi_clk[0],
1480 &pll2_pfd_400M, &pll3_120M, &pll3_pfd_540M, NULL, NULL);
1482 reg |= (mux << MXC_CCM_CSCDR3_IPU1_HSP_CLK_SEL_OFFSET);
1484 __raw_writel(reg, MXC_CCM_CSCDR3);
1489 static unsigned long _clk_ipu1_get_rate(struct clk *clk)
1493 reg = __raw_readl(MXC_CCM_CSCDR3);
1494 div = ((reg & MXC_CCM_CSCDR3_IPU1_HSP_PODF_MASK) >>
1495 MXC_CCM_CSCDR3_IPU1_HSP_PODF_OFFSET) + 1;
1497 return clk_get_rate(clk->parent) / div;
1500 static int _clk_ipu1_set_rate(struct clk *clk, unsigned long rate)
1503 u32 parent_rate = clk_get_rate(clk->parent);
1505 div = parent_rate / rate;
1508 if (((parent_rate / div) != rate) || (div > 8))
1511 reg = __raw_readl(MXC_CCM_CSCDR3);
1512 reg &= ~MXC_CCM_CSCDR3_IPU1_HSP_PODF_MASK;
1513 reg |= (div - 1) << MXC_CCM_CSCDR3_IPU1_HSP_PODF_OFFSET;
1514 __raw_writel(reg, MXC_CCM_CSCDR3);
1519 static unsigned long _clk_ipu_round_rate(struct clk *clk,
1523 u32 parent_rate = clk_get_rate(clk->parent);
1525 div = parent_rate / rate;
1527 /* Make sure rate is not greater than the maximum value for the clock.
1528 * Also prevent a div of 0.
1536 return parent_rate / div;
1539 static struct clk ipu1_clk = {
1540 __INIT_CLK_DEBUG(ipu1_clk)
1541 .parent = &mmdc_ch0_axi_clk[0],
1542 .enable_reg = MXC_CCM_CCGR3,
1543 .enable_shift = MXC_CCM_CCGRx_CG0_OFFSET,
1544 .enable = _clk_enable,
1545 .disable = _clk_disable,
1546 .set_parent = _clk_ipu1_set_parent,
1547 .round_rate = _clk_ipu_round_rate,
1548 .set_rate = _clk_ipu1_set_rate,
1549 .get_rate = _clk_ipu1_get_rate,
1552 static int _clk_ipu2_set_parent(struct clk *clk, struct clk *parent)
1555 u32 reg = __raw_readl(MXC_CCM_CSCDR3)
1556 & MXC_CCM_CSCDR3_IPU2_HSP_CLK_SEL_MASK;
1558 mux = _get_mux6(parent, &mmdc_ch0_axi_clk[0],
1559 &pll2_pfd_400M, &pll3_120M, &pll3_pfd_540M, NULL, NULL);
1561 reg |= (mux << MXC_CCM_CSCDR3_IPU2_HSP_CLK_SEL_OFFSET);
1563 __raw_writel(reg, MXC_CCM_CSCDR3);
1568 static unsigned long _clk_ipu2_get_rate(struct clk *clk)
1572 reg = __raw_readl(MXC_CCM_CSCDR3);
1573 div = ((reg & MXC_CCM_CSCDR3_IPU2_HSP_PODF_MASK) >>
1574 MXC_CCM_CSCDR3_IPU2_HSP_PODF_OFFSET) + 1;
1576 return clk_get_rate(clk->parent) / div;
1579 static int _clk_ipu2_set_rate(struct clk *clk, unsigned long rate)
1582 u32 parent_rate = clk_get_rate(clk->parent);
1584 div = parent_rate / rate;
1587 if (((parent_rate / div) != rate) || (div > 8))
1590 reg = __raw_readl(MXC_CCM_CSCDR3);
1591 reg &= ~MXC_CCM_CSCDR3_IPU2_HSP_PODF_MASK;
1592 reg |= (div - 1) << MXC_CCM_CSCDR3_IPU2_HSP_PODF_OFFSET;
1593 __raw_writel(reg, MXC_CCM_CSCDR3);
1598 static struct clk ipu2_clk = {
1599 __INIT_CLK_DEBUG(ipu2_clk)
1600 .parent = &mmdc_ch0_axi_clk[0],
1601 .enable_reg = MXC_CCM_CCGR3,
1602 .enable_shift = MXC_CCM_CCGRx_CG3_OFFSET,
1603 .enable = _clk_enable,
1604 .disable = _clk_disable,
1605 .set_parent = _clk_ipu2_set_parent,
1606 .round_rate = _clk_ipu_round_rate,
1607 .set_rate = _clk_ipu2_set_rate,
1608 .get_rate = _clk_ipu2_get_rate,
1611 static unsigned long _clk_usdhc_round_rate(struct clk *clk,
1615 u32 parent_rate = clk_get_rate(clk->parent);
1617 div = parent_rate / rate;
1619 /* Make sure rate is not greater than the maximum value for the clock.
1620 * Also prevent a div of 0.
1628 return parent_rate / div;
1631 static int _clk_usdhc1_set_parent(struct clk *clk, struct clk *parent)
1633 u32 reg = __raw_readl(MXC_CCM_CSCMR1) & MXC_CCM_CSCMR1_USDHC1_CLK_SEL;
1635 if (parent == &pll2_pfd_352M)
1636 reg |= (MXC_CCM_CSCMR1_USDHC1_CLK_SEL);
1638 __raw_writel(reg, MXC_CCM_CSCMR1);
1643 static unsigned long _clk_usdhc1_get_rate(struct clk *clk)
1647 reg = __raw_readl(MXC_CCM_CSCDR1);
1648 div = ((reg & MXC_CCM_CSCDR1_USDHC1_PODF_MASK) >>
1649 MXC_CCM_CSCDR1_USDHC1_PODF_OFFSET) + 1;
1651 return clk_get_rate(clk->parent) / div;
1654 static int _clk_usdhc1_set_rate(struct clk *clk, unsigned long rate)
1657 u32 parent_rate = clk_get_rate(clk->parent);
1659 div = parent_rate / rate;
1662 if (((parent_rate / div) != rate) || (div > 8))
1665 reg = __raw_readl(MXC_CCM_CSCDR1);
1666 reg &= ~MXC_CCM_CSCDR1_USDHC1_PODF_MASK;
1667 reg |= (div - 1) << MXC_CCM_CSCDR1_USDHC1_PODF_OFFSET;
1668 __raw_writel(reg, MXC_CCM_CSCDR1);
1673 static struct clk usdhc1_clk = {
1674 __INIT_CLK_DEBUG(usdhc1_clk)
1676 .parent = &pll2_pfd_400M,
1677 .enable_reg = MXC_CCM_CCGR6,
1678 .enable_shift = MXC_CCM_CCGRx_CG1_OFFSET,
1679 .enable = _clk_enable,
1680 .disable = _clk_disable,
1681 .set_parent = _clk_usdhc1_set_parent,
1682 .round_rate = _clk_usdhc_round_rate,
1683 .set_rate = _clk_usdhc1_set_rate,
1684 .get_rate = _clk_usdhc1_get_rate,
1687 static int _clk_usdhc2_set_parent(struct clk *clk, struct clk *parent)
1689 u32 reg = __raw_readl(MXC_CCM_CSCMR1) & MXC_CCM_CSCMR1_USDHC2_CLK_SEL;
1691 if (parent == &pll2_pfd_352M)
1692 reg |= (MXC_CCM_CSCMR1_USDHC2_CLK_SEL);
1694 __raw_writel(reg, MXC_CCM_CSCMR1);
1699 static unsigned long _clk_usdhc2_get_rate(struct clk *clk)
1703 reg = __raw_readl(MXC_CCM_CSCDR1);
1704 div = ((reg & MXC_CCM_CSCDR1_USDHC2_PODF_MASK) >>
1705 MXC_CCM_CSCDR1_USDHC2_PODF_OFFSET) + 1;
1707 return clk_get_rate(clk->parent) / div;
1710 static int _clk_usdhc2_set_rate(struct clk *clk, unsigned long rate)
1713 u32 parent_rate = clk_get_rate(clk->parent);
1715 div = parent_rate / rate;
1718 if (((parent_rate / div) != rate) || (div > 8))
1721 reg = __raw_readl(MXC_CCM_CSCDR1);
1722 reg &= ~MXC_CCM_CSCDR1_USDHC2_PODF_MASK;
1723 reg |= (div - 1) << MXC_CCM_CSCDR1_USDHC2_PODF_OFFSET;
1724 __raw_writel(reg, MXC_CCM_CSCDR1);
1729 static struct clk usdhc2_clk = {
1730 __INIT_CLK_DEBUG(usdhc2_clk)
1732 .parent = &pll2_pfd_400M,
1733 .enable_reg = MXC_CCM_CCGR6,
1734 .enable_shift = MXC_CCM_CCGRx_CG2_OFFSET,
1735 .enable = _clk_enable,
1736 .disable = _clk_disable,
1737 .set_parent = _clk_usdhc2_set_parent,
1738 .round_rate = _clk_usdhc_round_rate,
1739 .set_rate = _clk_usdhc2_set_rate,
1740 .get_rate = _clk_usdhc2_get_rate,
1743 static int _clk_usdhc3_set_parent(struct clk *clk, struct clk *parent)
1745 u32 reg = __raw_readl(MXC_CCM_CSCMR1) & MXC_CCM_CSCMR1_USDHC3_CLK_SEL;
1747 if (parent == &pll2_pfd_352M)
1748 reg |= (MXC_CCM_CSCMR1_USDHC3_CLK_SEL);
1750 __raw_writel(reg, MXC_CCM_CSCMR1);
1755 static unsigned long _clk_usdhc3_get_rate(struct clk *clk)
1759 reg = __raw_readl(MXC_CCM_CSCDR1);
1760 div = ((reg & MXC_CCM_CSCDR1_USDHC3_PODF_MASK) >>
1761 MXC_CCM_CSCDR1_USDHC3_PODF_OFFSET) + 1;
1763 return clk_get_rate(clk->parent) / div;
1766 static int _clk_usdhc3_set_rate(struct clk *clk, unsigned long rate)
1769 u32 parent_rate = clk_get_rate(clk->parent);
1771 div = parent_rate / rate;
1774 if (((parent_rate / div) != rate) || (div > 8))
1777 reg = __raw_readl(MXC_CCM_CSCDR1);
1778 reg &= ~MXC_CCM_CSCDR1_USDHC3_PODF_MASK;
1779 reg |= (div - 1) << MXC_CCM_CSCDR1_USDHC3_PODF_OFFSET;
1780 __raw_writel(reg, MXC_CCM_CSCDR1);
1786 static struct clk usdhc3_clk = {
1787 __INIT_CLK_DEBUG(usdhc3_clk)
1789 .parent = &pll2_pfd_400M,
1790 .enable_reg = MXC_CCM_CCGR6,
1791 .enable_shift = MXC_CCM_CCGRx_CG3_OFFSET,
1792 .enable = _clk_enable,
1793 .disable = _clk_disable,
1794 .set_parent = _clk_usdhc3_set_parent,
1795 .round_rate = _clk_usdhc_round_rate,
1796 .set_rate = _clk_usdhc3_set_rate,
1797 .get_rate = _clk_usdhc3_get_rate,
1800 static int _clk_usdhc4_set_parent(struct clk *clk, struct clk *parent)
1802 u32 reg = __raw_readl(MXC_CCM_CSCMR1) & MXC_CCM_CSCMR1_USDHC4_CLK_SEL;
1804 if (parent == &pll2_pfd_352M)
1805 reg |= (MXC_CCM_CSCMR1_USDHC4_CLK_SEL);
1807 __raw_writel(reg, MXC_CCM_CSCMR1);
1812 static unsigned long _clk_usdhc4_get_rate(struct clk *clk)
1816 reg = __raw_readl(MXC_CCM_CSCDR1);
1817 div = ((reg & MXC_CCM_CSCDR1_USDHC4_PODF_MASK) >>
1818 MXC_CCM_CSCDR1_USDHC4_PODF_OFFSET) + 1;
1820 return clk_get_rate(clk->parent) / div;
1823 static int _clk_usdhc4_set_rate(struct clk *clk, unsigned long rate)
1826 u32 parent_rate = clk_get_rate(clk->parent);
1828 div = parent_rate / rate;
1831 if (((parent_rate / div) != rate) || (div > 8))
1834 reg = __raw_readl(MXC_CCM_CSCDR1);
1835 reg &= ~MXC_CCM_CSCDR1_USDHC4_PODF_MASK;
1836 reg |= (div - 1) << MXC_CCM_CSCDR1_USDHC4_PODF_OFFSET;
1837 __raw_writel(reg, MXC_CCM_CSCDR1);
1843 static struct clk usdhc4_clk = {
1844 __INIT_CLK_DEBUG(usdhc4_clk)
1846 .parent = &pll2_pfd_400M,
1847 .enable_reg = MXC_CCM_CCGR6,
1848 .enable_shift = MXC_CCM_CCGRx_CG4_OFFSET,
1849 .enable = _clk_enable,
1850 .disable = _clk_disable,
1851 .set_parent = _clk_usdhc4_set_parent,
1852 .round_rate = _clk_usdhc_round_rate,
1853 .set_rate = _clk_usdhc4_set_rate,
1854 .get_rate = _clk_usdhc4_get_rate,
1857 static unsigned long _clk_ssi_round_rate(struct clk *clk,
1861 u32 parent_rate = clk_get_rate(clk->parent);
1862 u32 div = parent_rate / rate;
1864 if (parent_rate % rate)
1867 __calc_pre_post_dividers(div, &pre, &post);
1869 return parent_rate / (pre * post);
1872 static unsigned long _clk_ssi1_get_rate(struct clk *clk)
1874 u32 reg, prediv, podf;
1876 reg = __raw_readl(MXC_CCM_CS1CDR);
1878 prediv = ((reg & MXC_CCM_CS1CDR_SSI1_CLK_PRED_MASK)
1879 >> MXC_CCM_CS1CDR_SSI1_CLK_PRED_OFFSET) + 1;
1880 podf = ((reg & MXC_CCM_CS1CDR_SSI1_CLK_PODF_MASK)
1881 >> MXC_CCM_CS1CDR_SSI1_CLK_PODF_OFFSET) + 1;
1883 return clk_get_rate(clk->parent) / (prediv * podf);
1886 static int _clk_ssi1_set_rate(struct clk *clk, unsigned long rate)
1888 u32 reg, div, pre, post;
1889 u32 parent_rate = clk_get_rate(clk->parent);
1891 div = parent_rate / rate;
1894 if (((parent_rate / div) != rate) || div > 512)
1897 __calc_pre_post_dividers(div, &pre, &post);
1899 reg = __raw_readl(MXC_CCM_CS1CDR);
1900 reg &= ~(MXC_CCM_CS1CDR_SSI1_CLK_PRED_MASK |
1901 MXC_CCM_CS1CDR_SSI1_CLK_PODF_MASK);
1902 reg |= (post - 1) << MXC_CCM_CS1CDR_SSI1_CLK_PODF_OFFSET;
1903 reg |= (pre - 1) << MXC_CCM_CS1CDR_SSI1_CLK_PRED_OFFSET;
1905 __raw_writel(reg, MXC_CCM_CS1CDR);
1911 static int _clk_ssi1_set_parent(struct clk *clk, struct clk *parent)
1915 reg = __raw_readl(MXC_CCM_CSCMR1)
1916 & MXC_CCM_CSCMR1_SSI1_CLK_SEL_MASK;
1918 mux = _get_mux6(parent, &pll3_pfd_508M, &pll3_pfd_454M,
1919 &pll4_audio_main_clk, NULL, NULL, NULL);
1920 reg |= (mux << MXC_CCM_CSCMR1_SSI1_CLK_SEL_OFFSET);
1922 __raw_writel(reg, MXC_CCM_CSCMR1);
1927 static struct clk ssi1_clk = {
1928 __INIT_CLK_DEBUG(ssi1_clk)
1929 .parent = &pll3_pfd_508M,
1930 .enable_reg = MXC_CCM_CCGR5,
1931 .enable_shift = MXC_CCM_CCGRx_CG9_OFFSET,
1932 .enable = _clk_enable,
1933 .disable = _clk_disable,
1934 .set_parent = _clk_ssi1_set_parent,
1935 .set_rate = _clk_ssi1_set_rate,
1936 .round_rate = _clk_ssi_round_rate,
1937 .get_rate = _clk_ssi1_get_rate,
1940 static unsigned long _clk_ssi2_get_rate(struct clk *clk)
1942 u32 reg, prediv, podf;
1944 reg = __raw_readl(MXC_CCM_CS2CDR);
1946 prediv = ((reg & MXC_CCM_CS2CDR_SSI2_CLK_PRED_MASK)
1947 >> MXC_CCM_CS2CDR_SSI2_CLK_PRED_OFFSET) + 1;
1948 podf = ((reg & MXC_CCM_CS2CDR_SSI2_CLK_PODF_MASK)
1949 >> MXC_CCM_CS2CDR_SSI2_CLK_PODF_OFFSET) + 1;
1951 return clk_get_rate(clk->parent) / (prediv * podf);
1954 static int _clk_ssi2_set_rate(struct clk *clk, unsigned long rate)
1956 u32 reg, div, pre, post;
1957 u32 parent_rate = clk_get_rate(clk->parent);
1959 div = parent_rate / rate;
1962 if (((parent_rate / div) != rate) || div > 512)
1965 __calc_pre_post_dividers(div, &pre, &post);
1967 reg = __raw_readl(MXC_CCM_CS2CDR);
1968 reg &= ~(MXC_CCM_CS2CDR_SSI2_CLK_PRED_MASK |
1969 MXC_CCM_CS2CDR_SSI2_CLK_PODF_MASK);
1970 reg |= (post - 1) << MXC_CCM_CS2CDR_SSI2_CLK_PODF_OFFSET;
1971 reg |= (pre - 1) << MXC_CCM_CS2CDR_SSI2_CLK_PRED_OFFSET;
1973 __raw_writel(reg, MXC_CCM_CS2CDR);
1979 static int _clk_ssi2_set_parent(struct clk *clk, struct clk *parent)
1983 reg = __raw_readl(MXC_CCM_CSCMR1)
1984 & MXC_CCM_CSCMR1_SSI2_CLK_SEL_MASK;
1986 mux = _get_mux6(parent, &pll3_pfd_508M, &pll3_pfd_454M,
1987 &pll4_audio_main_clk, NULL, NULL, NULL);
1988 reg |= (mux << MXC_CCM_CSCMR1_SSI2_CLK_SEL_OFFSET);
1990 __raw_writel(reg, MXC_CCM_CSCMR1);
1995 static struct clk ssi2_clk = {
1996 __INIT_CLK_DEBUG(ssi2_clk)
1997 .parent = &pll3_pfd_508M,
1998 .enable_reg = MXC_CCM_CCGR5,
1999 .enable_shift = MXC_CCM_CCGRx_CG10_OFFSET,
2000 .enable = _clk_enable,
2001 .disable = _clk_disable,
2002 .set_parent = _clk_ssi2_set_parent,
2003 .set_rate = _clk_ssi2_set_rate,
2004 .round_rate = _clk_ssi_round_rate,
2005 .get_rate = _clk_ssi2_get_rate,
2008 static unsigned long _clk_ssi3_get_rate(struct clk *clk)
2010 u32 reg, prediv, podf;
2012 reg = __raw_readl(MXC_CCM_CS1CDR);
2014 prediv = ((reg & MXC_CCM_CS1CDR_SSI1_CLK_PRED_MASK)
2015 >> MXC_CCM_CS1CDR_SSI1_CLK_PRED_OFFSET) + 1;
2016 podf = ((reg & MXC_CCM_CS1CDR_SSI1_CLK_PODF_MASK)
2017 >> MXC_CCM_CS1CDR_SSI1_CLK_PODF_OFFSET) + 1;
2019 return clk_get_rate(clk->parent) / (prediv * podf);
2022 static int _clk_ssi3_set_rate(struct clk *clk, unsigned long rate)
2024 u32 reg, div, pre, post;
2025 u32 parent_rate = clk_get_rate(clk->parent);
2027 div = parent_rate / rate;
2030 if (((parent_rate / div) != rate) || div > 512)
2033 __calc_pre_post_dividers(div, &pre, &post);
2035 reg = __raw_readl(MXC_CCM_CS1CDR);
2036 reg &= ~(MXC_CCM_CS1CDR_SSI3_CLK_PODF_MASK|
2037 MXC_CCM_CS1CDR_SSI3_CLK_PRED_MASK);
2038 reg |= (post - 1) << MXC_CCM_CS1CDR_SSI3_CLK_PODF_OFFSET;
2039 reg |= (pre - 1) << MXC_CCM_CS1CDR_SSI3_CLK_PRED_OFFSET;
2041 __raw_writel(reg, MXC_CCM_CS1CDR);
2047 static int _clk_ssi3_set_parent(struct clk *clk, struct clk *parent)
2051 reg = __raw_readl(MXC_CCM_CSCMR1) & MXC_CCM_CSCMR1_SSI3_CLK_SEL_MASK;
2053 mux = _get_mux6(parent, &pll3_pfd_508M, &pll3_pfd_454M,
2054 &pll4_audio_main_clk, NULL, NULL, NULL);
2055 reg |= (mux << MXC_CCM_CSCMR1_SSI3_CLK_SEL_OFFSET);
2057 __raw_writel(reg, MXC_CCM_CSCMR1);
2062 static struct clk ssi3_clk = {
2063 __INIT_CLK_DEBUG(ssi3_clk)
2064 .parent = &pll3_pfd_508M,
2065 .enable_reg = MXC_CCM_CCGR5,
2066 .enable_shift = MXC_CCM_CCGRx_CG11_OFFSET,
2067 .enable = _clk_enable,
2068 .disable = _clk_disable,
2069 .set_parent = _clk_ssi3_set_parent,
2070 .set_rate = _clk_ssi3_set_rate,
2071 .round_rate = _clk_ssi_round_rate,
2072 .get_rate = _clk_ssi3_get_rate,
2075 static unsigned long _clk_ldb_di_round_rate(struct clk *clk,
2078 u32 parent_rate = clk_get_rate(clk->parent);
2080 if (rate * 7 <= parent_rate + parent_rate/20)
2081 return parent_rate / 7;
2083 return 2 * parent_rate / 7;
2086 static unsigned long _clk_ldb_di0_get_rate(struct clk *clk)
2090 div = __raw_readl(MXC_CCM_CSCMR2) &
2091 MXC_CCM_CSCMR2_LDB_DI0_IPU_DIV;
2094 return clk_get_rate(clk->parent) / 7;
2096 return (2 * clk_get_rate(clk->parent)) / 7;
2099 static int _clk_ldb_di0_set_rate(struct clk *clk, unsigned long rate)
2102 u32 parent_rate = clk_get_rate(clk->parent);
2104 if (rate * 7 <= parent_rate + parent_rate/20) {
2106 rate = parent_rate / 7;
2108 rate = 2 * parent_rate / 7;
2110 reg = __raw_readl(MXC_CCM_CSCMR2);
2112 reg |= MXC_CCM_CSCMR2_LDB_DI0_IPU_DIV;
2114 reg &= ~MXC_CCM_CSCMR2_LDB_DI0_IPU_DIV;
2116 __raw_writel(reg, MXC_CCM_CSCMR2);
2121 static int _clk_ldb_di0_set_parent(struct clk *clk, struct clk *parent)
2125 reg = __raw_readl(MXC_CCM_CS2CDR)
2126 & MXC_CCM_CS2CDR_LDB_DI0_CLK_SEL_MASK;
2128 mux = _get_mux6(parent, &pll5_video_main_clk,
2129 &pll2_pfd_352M, &pll2_pfd_400M, &pll3_pfd_540M,
2130 &pll3_usb_otg_main_clk, NULL);
2131 reg |= (mux << MXC_CCM_CS2CDR_LDB_DI0_CLK_SEL_OFFSET);
2133 __raw_writel(reg, MXC_CCM_CS2CDR);
2138 static struct clk ldb_di0_clk = {
2139 __INIT_CLK_DEBUG(ldb_di0_clk)
2141 .parent = &pll3_pfd_540M,
2142 .enable_reg = MXC_CCM_CCGR3,
2143 .enable_shift = MXC_CCM_CCGRx_CG13_OFFSET,
2144 .enable = _clk_enable,
2145 .disable = _clk_disable,
2146 .set_parent = _clk_ldb_di0_set_parent,
2147 .set_rate = _clk_ldb_di0_set_rate,
2148 .round_rate = _clk_ldb_di_round_rate,
2149 .get_rate = _clk_ldb_di0_get_rate,
2152 static unsigned long _clk_ldb_di1_get_rate(struct clk *clk)
2156 div = __raw_readl(MXC_CCM_CSCMR2) &
2157 MXC_CCM_CSCMR2_LDB_DI1_IPU_DIV;
2160 return clk_get_rate(clk->parent) / 7;
2162 return (2 * clk_get_rate(clk->parent)) / 7;
2165 static int _clk_ldb_di1_set_rate(struct clk *clk, unsigned long rate)
2168 u32 parent_rate = clk_get_rate(clk->parent);
2170 if (rate * 7 <= parent_rate + parent_rate/20) {
2172 rate = parent_rate / 7;
2174 rate = 2 * parent_rate / 7;
2176 reg = __raw_readl(MXC_CCM_CSCMR2);
2178 reg |= MXC_CCM_CSCMR2_LDB_DI1_IPU_DIV;
2180 reg &= ~MXC_CCM_CSCMR2_LDB_DI1_IPU_DIV;
2182 __raw_writel(reg, MXC_CCM_CSCMR2);
2187 static int _clk_ldb_di1_set_parent(struct clk *clk, struct clk *parent)
2191 reg = __raw_readl(MXC_CCM_CS2CDR)
2192 & MXC_CCM_CS2CDR_LDB_DI1_CLK_SEL_MASK;
2194 mux = _get_mux6(parent, &pll5_video_main_clk,
2195 &pll2_pfd_352M, &pll2_pfd_400M, &pll3_pfd_540M,
2196 &pll3_usb_otg_main_clk, NULL);
2197 reg |= (mux << MXC_CCM_CS2CDR_LDB_DI1_CLK_SEL_OFFSET);
2199 __raw_writel(reg, MXC_CCM_CS2CDR);
2204 static struct clk ldb_di1_clk = {
2205 __INIT_CLK_DEBUG(ldb_di1_clk)
2207 .parent = &pll3_pfd_540M,
2208 .enable_reg = MXC_CCM_CCGR3,
2209 .enable_shift = MXC_CCM_CCGRx_CG14_OFFSET,
2210 .enable = _clk_enable,
2211 .disable = _clk_disable,
2212 .set_parent = _clk_ldb_di1_set_parent,
2213 .set_rate = _clk_ldb_di1_set_rate,
2214 .round_rate = _clk_ldb_di_round_rate,
2215 .get_rate = _clk_ldb_di1_get_rate,
2219 static unsigned long _clk_ipu_di_round_rate(struct clk *clk,
2223 u32 parent_rate = clk_get_rate(clk->parent);
2225 if ((clk->parent == &ldb_di0_clk) ||
2226 (clk->parent == &ldb_di1_clk))
2229 div = parent_rate / rate;
2231 /* Make sure rate is not greater than the maximum value for the clock.
2232 * Also prevent a div of 0.
2240 return parent_rate / div;
2243 static unsigned long _clk_ipu1_di0_get_rate(struct clk *clk)
2247 if ((clk->parent == &ldb_di0_clk) ||
2248 (clk->parent == &ldb_di1_clk))
2249 return clk_get_rate(clk->parent);
2251 reg = __raw_readl(MXC_CCM_CHSCCDR);
2253 div = ((reg & MXC_CCM_CHSCCDR_IPU1_DI0_PODF_MASK) >>
2254 MXC_CCM_CHSCCDR_IPU1_DI0_PODF_OFFSET) + 1;
2256 return clk_get_rate(clk->parent) / div;
2259 static int _clk_ipu1_di0_set_rate(struct clk *clk, unsigned long rate)
2262 u32 parent_rate = clk_get_rate(clk->parent);
2264 if ((clk->parent == &ldb_di0_clk) ||
2265 (clk->parent == &ldb_di1_clk)) {
2266 if (parent_rate == rate)
2272 div = parent_rate / rate;
2275 if (((parent_rate / div) != rate) || (div > 8))
2278 reg = __raw_readl(MXC_CCM_CHSCCDR);
2279 reg &= ~MXC_CCM_CHSCCDR_IPU1_DI0_PODF_MASK;
2280 reg |= (div - 1) << MXC_CCM_CHSCCDR_IPU1_DI0_PODF_OFFSET;
2281 __raw_writel(reg, MXC_CCM_CHSCCDR);
2287 static int _clk_ipu1_di0_set_parent(struct clk *clk, struct clk *parent)
2291 if (parent == &ldb_di0_clk)
2293 else if (parent == &ldb_di1_clk)
2296 reg = __raw_readl(MXC_CCM_CHSCCDR)
2297 & ~MXC_CCM_CHSCCDR_IPU1_DI0_PRE_CLK_SEL_MASK;
2299 mux = _get_mux6(parent, &mmdc_ch0_axi_clk[0],
2300 &pll3_usb_otg_main_clk, &pll5_video_main_clk,
2301 &pll2_pfd_352M, &pll2_pfd_400M, &pll3_pfd_540M);
2302 reg |= (mux << MXC_CCM_CHSCCDR_IPU1_DI0_PRE_CLK_SEL_OFFSET);
2304 __raw_writel(reg, MXC_CCM_CHSCCDR);
2306 /* Derive clock from divided pre-muxed ipu1_di0 clock.*/
2310 reg = __raw_readl(MXC_CCM_CHSCCDR)
2311 & ~MXC_CCM_CHSCCDR_IPU1_DI0_CLK_SEL_MASK;
2312 __raw_writel(reg | (mux << MXC_CCM_CHSCCDR_IPU1_DI0_CLK_SEL_OFFSET),
2318 static unsigned long _clk_ipu1_di1_get_rate(struct clk *clk)
2322 if ((clk->parent == &ldb_di0_clk) ||
2323 (clk->parent == &ldb_di1_clk))
2324 return clk_get_rate(clk->parent);
2326 reg = __raw_readl(MXC_CCM_CHSCCDR);
2328 div = (reg & MXC_CCM_CHSCCDR_IPU1_DI1_PODF_MASK) + 1;
2330 return clk_get_rate(clk->parent) / div;
2333 static int _clk_ipu1_di1_set_rate(struct clk *clk, unsigned long rate)
2336 u32 parent_rate = clk_get_rate(clk->parent);
2338 if ((clk->parent == &ldb_di0_clk) ||
2339 (clk->parent == &ldb_di1_clk)) {
2340 if (parent_rate == rate)
2346 div = parent_rate / rate;
2349 if (((parent_rate / div) != rate) || (div > 8))
2352 reg = __raw_readl(MXC_CCM_CHSCCDR);
2353 reg &= ~MXC_CCM_CHSCCDR_IPU1_DI1_PODF_MASK;
2354 reg |= (div - 1) << MXC_CCM_CHSCCDR_IPU1_DI1_PODF_OFFSET;
2355 __raw_writel(reg, MXC_CCM_CHSCCDR);
2361 static int _clk_ipu1_di1_set_parent(struct clk *clk, struct clk *parent)
2365 if (parent == &ldb_di0_clk)
2367 else if (parent == &ldb_di1_clk)
2370 reg = __raw_readl(MXC_CCM_CHSCCDR)
2371 & ~MXC_CCM_CHSCCDR_IPU1_DI1_PRE_CLK_SEL_MASK;
2373 mux = _get_mux6(parent, &mmdc_ch0_axi_clk[0],
2374 &pll3_usb_otg_main_clk, &pll5_video_main_clk,
2375 &pll2_pfd_352M, &pll2_pfd_400M, &pll3_pfd_540M);
2376 reg |= (mux << MXC_CCM_CHSCCDR_IPU1_DI1_PRE_CLK_SEL_OFFSET);
2378 __raw_writel(reg, MXC_CCM_CHSCCDR);
2380 /* Derive clock from divided pre-muxed ipu1_di0 clock.*/
2383 reg = __raw_readl(MXC_CCM_CHSCCDR)
2384 & ~MXC_CCM_CHSCCDR_IPU1_DI1_CLK_SEL_MASK;
2385 __raw_writel(reg | (mux << MXC_CCM_CHSCCDR_IPU1_DI1_CLK_SEL_OFFSET),
2391 static struct clk ipu1_di_clk[] = {
2393 __INIT_CLK_DEBUG(ipu1_di_clk_0)
2395 .parent = &pll3_pfd_540M,
2396 .enable_reg = MXC_CCM_CCGR3,
2397 .enable_shift = MXC_CCM_CCGRx_CG1_OFFSET,
2398 .enable = _clk_enable,
2399 .disable = _clk_disable,
2400 .set_parent = _clk_ipu1_di0_set_parent,
2401 .set_rate = _clk_ipu1_di0_set_rate,
2402 .round_rate = _clk_ipu_di_round_rate,
2403 .get_rate = _clk_ipu1_di0_get_rate,
2406 __INIT_CLK_DEBUG(ipu1_di_clk_1)
2408 .parent = &pll3_pfd_540M,
2409 .enable_reg = MXC_CCM_CCGR3,
2410 .enable_shift = MXC_CCM_CCGRx_CG2_OFFSET,
2411 .enable = _clk_enable,
2412 .disable = _clk_disable,
2413 .set_parent = _clk_ipu1_di1_set_parent,
2414 .set_rate = _clk_ipu1_di1_set_rate,
2415 .round_rate = _clk_ipu_di_round_rate,
2416 .get_rate = _clk_ipu1_di1_get_rate,
2420 static unsigned long _clk_ipu2_di0_get_rate(struct clk *clk)
2424 if ((clk->parent == &ldb_di0_clk) ||
2425 (clk->parent == &ldb_di1_clk))
2426 return clk_get_rate(clk->parent);
2428 reg = __raw_readl(MXC_CCM_CHSCCDR);
2430 div = (reg & MXC_CCM_CHSCCDR_IPU2_DI0_PODF_MASK) + 1;
2432 return clk_get_rate(clk->parent) / div;
2435 static int _clk_ipu2_di0_set_rate(struct clk *clk, unsigned long rate)
2438 u32 parent_rate = clk_get_rate(clk->parent);
2440 if ((clk->parent == &ldb_di0_clk) ||
2441 (clk->parent == &ldb_di1_clk)) {
2442 if (parent_rate == rate)
2448 div = parent_rate / rate;
2451 if (((parent_rate / div) != rate) || (div > 8))
2454 reg = __raw_readl(MXC_CCM_CHSCCDR);
2455 reg &= ~MXC_CCM_CHSCCDR_IPU2_DI0_PODF_MASK;
2456 reg |= (div - 1) << MXC_CCM_CHSCCDR_IPU2_DI0_PODF_OFFSET;
2457 __raw_writel(reg, MXC_CCM_CHSCCDR);
2462 static int _clk_ipu2_di0_set_parent(struct clk *clk, struct clk *parent)
2466 if (parent == &ldb_di0_clk)
2468 else if (parent == &ldb_di1_clk)
2471 reg = __raw_readl(MXC_CCM_CHSCCDR)
2472 & ~MXC_CCM_CHSCCDR_IPU2_DI0_PRE_CLK_SEL_MASK;
2474 mux = _get_mux6(parent, &mmdc_ch0_axi_clk[0],
2475 &pll3_usb_otg_main_clk, &pll5_video_main_clk,
2476 &pll2_pfd_352M, &pll2_pfd_400M, &pll3_pfd_540M);
2477 reg |= (mux << MXC_CCM_CHSCCDR_IPU2_DI0_PRE_CLK_SEL_OFFSET);
2479 __raw_writel(reg, MXC_CCM_CHSCCDR);
2481 /* Derive clock from divided pre-muxed ipu2_di0 clock.*/
2484 reg = __raw_readl(MXC_CCM_CHSCCDR)
2485 & ~MXC_CCM_CHSCCDR_IPU2_DI0_CLK_SEL_MASK;
2486 __raw_writel(reg | (mux << MXC_CCM_CHSCCDR_IPU2_DI0_CLK_SEL_OFFSET),
2492 static unsigned long _clk_ipu2_di1_get_rate(struct clk *clk)
2496 if ((clk->parent == &ldb_di0_clk) ||
2497 (clk->parent == &ldb_di1_clk))
2498 return clk_get_rate(clk->parent);
2500 reg = __raw_readl(MXC_CCM_CHSCCDR);
2502 div = (reg & MXC_CCM_CHSCCDR_IPU2_DI1_PODF_MASK) + 1;
2504 return clk_get_rate(clk->parent) / div;
2507 static int _clk_ipu2_di1_set_rate(struct clk *clk, unsigned long rate)
2510 u32 parent_rate = clk_get_rate(clk->parent);
2512 if ((clk->parent == &ldb_di0_clk) ||
2513 (clk->parent == &ldb_di1_clk)) {
2514 if (parent_rate == rate)
2520 div = parent_rate / rate;
2523 if (((parent_rate / div) != rate) || (div > 8))
2526 reg = __raw_readl(MXC_CCM_CHSCCDR);
2527 reg &= ~MXC_CCM_CHSCCDR_IPU2_DI1_PODF_MASK;
2528 reg |= (div - 1) << MXC_CCM_CHSCCDR_IPU2_DI1_PODF_OFFSET;
2529 __raw_writel(reg, MXC_CCM_CHSCCDR);
2534 static int _clk_ipu2_di1_set_parent(struct clk *clk, struct clk *parent)
2538 if (parent == &ldb_di0_clk)
2540 else if (parent == &ldb_di1_clk)
2543 reg = __raw_readl(MXC_CCM_CHSCCDR)
2544 & ~MXC_CCM_CHSCCDR_IPU2_DI1_PRE_CLK_SEL_MASK;
2546 mux = _get_mux6(parent, &mmdc_ch0_axi_clk[0],
2547 &pll3_usb_otg_main_clk, &pll5_video_main_clk,
2548 &pll2_pfd_352M, &pll2_pfd_400M, &pll3_pfd_540M);
2549 reg |= (mux << MXC_CCM_CHSCCDR_IPU2_DI1_PRE_CLK_SEL_OFFSET);
2551 __raw_writel(reg, MXC_CCM_CHSCCDR);
2553 /* Derive clock from divided pre-muxed ipu1_di0 clock.*/
2556 reg = __raw_readl(MXC_CCM_CHSCCDR)
2557 & ~MXC_CCM_CHSCCDR_IPU2_DI1_CLK_SEL_MASK;
2558 __raw_writel(reg | (mux << MXC_CCM_CHSCCDR_IPU2_DI1_CLK_SEL_OFFSET),
2564 static struct clk ipu2_di_clk[] = {
2566 __INIT_CLK_DEBUG(ipu2_di_clk_0)
2568 .parent = &pll3_pfd_540M,
2569 .enable_reg = MXC_CCM_CCGR3,
2570 .enable_shift = MXC_CCM_CCGRx_CG1_OFFSET,
2571 .enable = _clk_enable,
2572 .disable = _clk_disable,
2573 .set_parent = _clk_ipu2_di0_set_parent,
2574 .set_rate = _clk_ipu2_di0_set_rate,
2575 .round_rate = _clk_ipu_di_round_rate,
2576 .get_rate = _clk_ipu2_di0_get_rate,
2579 __INIT_CLK_DEBUG(ipu2_di_clk_1)
2581 .parent = &pll3_pfd_540M,
2582 .enable_reg = MXC_CCM_CCGR3,
2583 .enable_shift = MXC_CCM_CCGRx_CG2_OFFSET,
2584 .enable = _clk_enable,
2585 .disable = _clk_disable,
2586 .set_parent = _clk_ipu2_di1_set_parent,
2587 .set_rate = _clk_ipu2_di1_set_rate,
2588 .round_rate = _clk_ipu_di_round_rate,
2589 .get_rate = _clk_ipu2_di1_get_rate,
2593 static struct clk can2_clk[] = {
2595 __INIT_CLK_DEBUG(can2_module_clk)
2597 .parent = &pll3_sw_clk,
2598 .enable_reg = MXC_CCM_CCGR0,
2599 .enable_shift = MXC_CCM_CCGRx_CG9_OFFSET,
2600 .enable = _clk_enable,
2601 .disable = _clk_disable,
2602 .secondary = &can2_clk[1],
2605 __INIT_CLK_DEBUG(can2_serial_clk)
2607 .parent = &pll3_sw_clk,
2608 .enable_reg = MXC_CCM_CCGR0,
2609 .enable_shift = MXC_CCM_CCGRx_CG10_OFFSET,
2610 .enable = _clk_enable,
2611 .disable = _clk_disable,
2616 static struct clk can1_clk[] = {
2618 __INIT_CLK_DEBUG(can1_module_clk)
2620 .parent = &pll3_sw_clk,
2621 .enable_reg = MXC_CCM_CCGR0,
2622 .enable_shift = MXC_CCM_CCGRx_CG7_OFFSET,
2623 .enable = _clk_enable,
2624 .disable = _clk_disable,
2625 .secondary = &can1_clk[1],
2628 __INIT_CLK_DEBUG(can1_serial_clk)
2630 .parent = &pll3_sw_clk,
2631 .enable_reg = MXC_CCM_CCGR0,
2632 .enable_shift = MXC_CCM_CCGRx_CG8_OFFSET,
2633 .enable = _clk_enable,
2634 .disable = _clk_disable,
2638 static unsigned long _clk_spdif_round_rate(struct clk *clk,
2642 u32 parent_rate = clk_get_rate(clk->parent);
2643 u32 div = parent_rate / rate;
2645 if (parent_rate % rate)
2648 __calc_pre_post_dividers(div, &pre, &post);
2650 return parent_rate / (pre * post);
2653 static int _clk_spdif0_set_parent(struct clk *clk, struct clk *parent)
2657 reg = __raw_readl(MXC_CCM_CDCDR)
2658 & MXC_CCM_CDCDR_SPDIF0_CLK_SEL_MASK;
2660 mux = _get_mux6(parent, &pll4_audio_main_clk,
2661 &pll3_pfd_508M, &pll3_pfd_454M,
2662 &pll3_sw_clk, NULL, NULL);
2663 reg |= mux << MXC_CCM_CDCDR_SPDIF0_CLK_SEL_OFFSET;
2665 __raw_writel(reg, MXC_CCM_CDCDR);
2670 static unsigned long _clk_spdif0_get_rate(struct clk *clk)
2672 u32 reg, pred, podf;
2674 reg = __raw_readl(MXC_CCM_CDCDR);
2676 pred = ((reg & MXC_CCM_CDCDR_SPDIF0_CLK_PRED_MASK)
2677 >> MXC_CCM_CDCDR_SPDIF0_CLK_PRED_OFFSET) + 1;
2678 podf = ((reg & MXC_CCM_CDCDR_SPDIF0_CLK_PODF_MASK)
2679 >> MXC_CCM_CDCDR_SPDIF0_CLK_PODF_OFFSET) + 1;
2681 return clk_get_rate(clk->parent) / (pred * podf);
2684 static int _clk_spdif0_set_rate(struct clk *clk, unsigned long rate)
2686 u32 reg, div, pre, post;
2687 u32 parent_rate = clk_get_rate(clk->parent);
2689 div = parent_rate / rate;
2692 if (((parent_rate / div) != rate) || div > 512)
2695 __calc_pre_post_dividers(div, &pre, &post);
2697 reg = __raw_readl(MXC_CCM_CDCDR);
2698 reg &= ~(MXC_CCM_CDCDR_SPDIF0_CLK_PRED_MASK|
2699 MXC_CCM_CDCDR_SPDIF0_CLK_PODF_MASK);
2700 reg |= (post - 1) << MXC_CCM_CDCDR_SPDIF0_CLK_PODF_OFFSET;
2701 reg |= (pre - 1) << MXC_CCM_CDCDR_SPDIF0_CLK_PRED_OFFSET;
2703 __raw_writel(reg, MXC_CCM_CDCDR);
2708 static struct clk spdif0_clk[] = {
2710 __INIT_CLK_DEBUG(spdif0_clk_0)
2712 .parent = &pll3_sw_clk,
2713 .enable = _clk_enable,
2714 .enable_reg = MXC_CCM_CCGR5,
2715 .enable_shift = MXC_CCM_CCGRx_CG7_OFFSET,
2716 .disable = _clk_disable,
2717 .secondary = &spdif0_clk[1],
2718 .set_rate = _clk_spdif0_set_rate,
2719 .get_rate = _clk_spdif0_get_rate,
2720 .set_parent = _clk_spdif0_set_parent,
2721 .round_rate = _clk_spdif_round_rate,
2724 __INIT_CLK_DEBUG(spdif0_clk_1)
2727 .secondary = &spba_clk,
2731 static int _clk_spdif1_set_parent(struct clk *clk, struct clk *parent)
2735 reg = __raw_readl(MXC_CCM_CDCDR) & MXC_CCM_CDCDR_SPDIF1_CLK_SEL_MASK;
2737 mux = _get_mux6(parent, &pll4_audio_main_clk, &pll3_pfd_508M,
2738 &pll3_pfd_454M, &pll3_sw_clk, NULL, NULL);
2739 reg |= mux << MXC_CCM_CDCDR_SPDIF1_CLK_SEL_OFFSET;
2741 __raw_writel(reg, MXC_CCM_CDCDR);
2746 static unsigned long _clk_spdif1_get_rate(struct clk *clk)
2748 u32 reg, pred, podf;
2750 reg = __raw_readl(MXC_CCM_CDCDR);
2752 pred = ((reg & MXC_CCM_CDCDR_SPDIF1_CLK_PRED_MASK)
2753 >> MXC_CCM_CDCDR_SPDIF1_CLK_PRED_OFFSET) + 1;
2754 podf = ((reg & MXC_CCM_CDCDR_SPDIF1_CLK_PODF_MASK)
2755 >> MXC_CCM_CDCDR_SPDIF1_CLK_PODF_OFFSET) + 1;
2757 return clk_get_rate(clk->parent) / (pred * podf);
2760 static int _clk_spdif1_set_rate(struct clk *clk, unsigned long rate)
2762 u32 reg, div, pre, post;
2763 u32 parent_rate = clk_get_rate(clk->parent);
2765 div = parent_rate / rate;
2768 if (((parent_rate / div) != rate) || div > 512)
2771 __calc_pre_post_dividers(div, &pre, &post);
2773 reg = __raw_readl(MXC_CCM_CDCDR);
2774 reg &= ~(MXC_CCM_CDCDR_SPDIF1_CLK_PRED_MASK|
2775 MXC_CCM_CDCDR_SPDIF1_CLK_PODF_MASK);
2776 reg |= (post - 1) << MXC_CCM_CDCDR_SPDIF1_CLK_PODF_OFFSET;
2777 reg |= (pre - 1) << MXC_CCM_CDCDR_SPDIF1_CLK_PRED_OFFSET;
2779 __raw_writel(reg, MXC_CCM_CDCDR);
2784 static struct clk spdif1_clk[] = {
2786 __INIT_CLK_DEBUG(spdif1_clk_0)
2788 .parent = &pll3_sw_clk,
2789 .enable = _clk_enable,
2790 .enable_reg = MXC_CCM_CCGR5,
2791 .enable_shift = MXC_CCM_CCGRx_CG7_OFFSET,
2792 .disable = _clk_disable,
2793 .secondary = &spdif1_clk[1],
2794 .set_rate = _clk_spdif1_set_rate,
2795 .get_rate = _clk_spdif1_get_rate,
2796 .set_parent = _clk_spdif1_set_parent,
2797 .round_rate = _clk_spdif_round_rate,
2800 __INIT_CLK_DEBUG(spdif1_clk_1)
2803 .secondary = &spba_clk,
2807 static unsigned long _clk_esai_round_rate(struct clk *clk,
2811 u32 parent_rate = clk_get_rate(clk->parent);
2812 u32 div = parent_rate / rate;
2814 if (parent_rate % rate)
2817 __calc_pre_post_dividers(div, &pre, &post);
2819 return parent_rate / (pre * post);
2822 static int _clk_esai_set_parent(struct clk *clk, struct clk *parent)
2826 reg = __raw_readl(MXC_CCM_CSCMR2) & MXC_CCM_CSCMR2_ESAI_CLK_SEL_MASK;
2828 mux = _get_mux6(parent, &pll4_audio_main_clk, &pll3_pfd_508M,
2829 &pll3_pfd_454M, &pll3_sw_clk, NULL, NULL);
2830 reg |= mux << MXC_CCM_CSCMR2_ESAI_CLK_SEL_OFFSET;
2832 __raw_writel(reg, MXC_CCM_CSCMR2);
2837 static unsigned long _clk_esai_get_rate(struct clk *clk)
2839 u32 reg, pred, podf;
2841 reg = __raw_readl(MXC_CCM_CS1CDR);
2843 pred = ((reg & MXC_CCM_CS1CDR_ESAI_CLK_PRED_MASK)
2844 >> MXC_CCM_CS1CDR_ESAI_CLK_PRED_OFFSET) + 1;
2845 podf = ((reg & MXC_CCM_CS1CDR_ESAI_CLK_PODF_MASK)
2846 >> MXC_CCM_CS1CDR_ESAI_CLK_PODF_OFFSET) + 1;
2848 return clk_get_rate(clk->parent) / (pred * podf);
2851 static int _clk_esai_set_rate(struct clk *clk, unsigned long rate)
2853 u32 reg, div, pre, post;
2854 u32 parent_rate = clk_get_rate(clk->parent);
2856 div = parent_rate / rate;
2859 if (((parent_rate / div) != rate) || div > 512)
2862 __calc_pre_post_dividers(div, &pre, &post);
2864 reg = __raw_readl(MXC_CCM_CS1CDR);
2865 reg &= ~(MXC_CCM_CS1CDR_ESAI_CLK_PRED_MASK|
2866 MXC_CCM_CS1CDR_ESAI_CLK_PODF_MASK);
2867 reg |= (post - 1) << MXC_CCM_CS1CDR_ESAI_CLK_PODF_OFFSET;
2868 reg |= (pre - 1) << MXC_CCM_CS1CDR_ESAI_CLK_PRED_OFFSET;
2870 __raw_writel(reg, MXC_CCM_CS1CDR);
2875 static struct clk esai_clk = {
2876 __INIT_CLK_DEBUG(esai_clk)
2878 .parent = &pll3_sw_clk,
2879 .enable_reg = MXC_CCM_CCGR1,
2880 .enable_shift = MXC_CCM_CCGRx_CG8_OFFSET,
2881 .enable = _clk_enable,
2882 .disable = _clk_disable,
2883 .set_rate = _clk_esai_set_rate,
2884 .get_rate = _clk_esai_get_rate,
2885 .set_parent = _clk_esai_set_parent,
2886 .round_rate = _clk_esai_round_rate,
2889 static int _clk_enet_enable(struct clk *clk)
2893 /* Enable ENET ref clock */
2894 reg = __raw_readl(PLL8_ENET_BASE_ADDR);
2895 reg &= ~ANADIG_PLL_BYPASS;
2896 reg &= ~ANADIG_PLL_ENABLE;
2897 __raw_writel(reg, PLL8_ENET_BASE_ADDR);
2903 static void _clk_enet_disable(struct clk *clk)
2909 /* Enable ENET ref clock */
2910 reg = __raw_readl(PLL8_ENET_BASE_ADDR);
2911 reg |= ANADIG_PLL_BYPASS;
2912 reg |= ANADIG_PLL_ENABLE;
2913 __raw_writel(reg, PLL8_ENET_BASE_ADDR);
2916 static int _clk_enet_set_rate(struct clk *clk, unsigned long rate)
2918 unsigned int reg, div = 1;
2936 reg = __raw_readl(PLL8_ENET_BASE_ADDR);
2937 reg &= ~ANADIG_PLL_ENET_DIV_SELECT_MASK;
2938 reg |= (div << ANADIG_PLL_ENET_DIV_SELECT_OFFSET);
2939 __raw_writel(reg, PLL8_ENET_BASE_ADDR);
2944 static unsigned long _clk_enet_get_rate(struct clk *clk)
2948 div = (__raw_readl(PLL8_ENET_BASE_ADDR))
2949 & ANADIG_PLL_ENET_DIV_SELECT_MASK;
2951 return 500000000 / (div + 1);
2954 static struct clk enet_clk = {
2955 __INIT_CLK_DEBUG(enet_clk)
2957 .parent = &pll8_enet_main_clk,
2958 .enable_reg = MXC_CCM_CCGR1,
2959 .enable_shift = MXC_CCM_CCGRx_CG10_OFFSET,
2960 .enable = _clk_enet_enable,
2961 .disable = _clk_enet_disable,
2962 .set_rate = _clk_enet_set_rate,
2963 .get_rate = _clk_enet_get_rate,
2966 static struct clk ecspi_clk[] = {
2968 __INIT_CLK_DEBUG(ecspi0_clk)
2970 .parent = &pll3_60M,
2971 .enable_reg = MXC_CCM_CCGR1,
2972 .enable_shift = MXC_CCM_CCGRx_CG0_OFFSET,
2973 .enable = _clk_enable,
2974 .disable = _clk_disable,
2977 __INIT_CLK_DEBUG(ecspi1_clk)
2979 .parent = &pll3_60M,
2980 .enable_reg = MXC_CCM_CCGR1,
2981 .enable_shift = MXC_CCM_CCGRx_CG1_OFFSET,
2982 .enable = _clk_enable,
2983 .disable = _clk_disable,
2986 __INIT_CLK_DEBUG(ecspi2_clk)
2988 .parent = &pll3_60M,
2989 .enable_reg = MXC_CCM_CCGR1,
2990 .enable_shift = MXC_CCM_CCGRx_CG2_OFFSET,
2991 .enable = _clk_enable,
2992 .disable = _clk_disable,
2995 __INIT_CLK_DEBUG(ecspi3_clk)
2997 .parent = &pll3_60M,
2998 .enable_reg = MXC_CCM_CCGR1,
2999 .enable_shift = MXC_CCM_CCGRx_CG3_OFFSET,
3000 .enable = _clk_enable,
3001 .disable = _clk_disable,
3004 __INIT_CLK_DEBUG(ecspi4_clk)
3006 .parent = &pll3_60M,
3007 .enable_reg = MXC_CCM_CCGR1,
3008 .enable_shift = MXC_CCM_CCGRx_CG4_OFFSET,
3009 .enable = _clk_enable,
3010 .disable = _clk_disable,
3014 static unsigned long _clk_emi_slow_round_rate(struct clk *clk,
3018 u32 parent_rate = clk_get_rate(clk->parent);
3020 div = parent_rate / rate;
3022 /* Make sure rate is not greater than the maximum value for the clock.
3023 * Also prevent a div of 0.
3031 return parent_rate / div;
3034 static int _clk_emi_slow_set_parent(struct clk *clk, struct clk *parent)
3037 u32 reg = __raw_readl(MXC_CCM_CSCMR1)
3038 & MXC_CCM_CSCMR1_ACLK_EMI_SLOW_MASK;
3040 mux = _get_mux6(parent, &axi_clk, &pll3_usb_otg_main_clk,
3041 &pll2_pfd_400M, &pll2_pfd_352M, NULL, NULL);
3042 reg |= (mux << MXC_CCM_CSCMR1_ACLK_EMI_SLOW_OFFSET);
3043 __raw_writel(reg, MXC_CCM_CSCMR1);
3048 static unsigned long _clk_emi_slow_get_rate(struct clk *clk)
3052 reg = __raw_readl(MXC_CCM_CSCMR1);
3053 div = ((reg & MXC_CCM_CSCMR1_ACLK_EMI_SLOW_PODF_MASK) >>
3054 MXC_CCM_CSCMR1_ACLK_EMI_SLOW_PODF_OFFSET) + 1;
3056 return clk_get_rate(clk->parent) / div;
3059 static int _clk_emi_slow_set_rate(struct clk *clk, unsigned long rate)
3062 u32 parent_rate = clk_get_rate(clk->parent);
3064 div = parent_rate / rate;
3067 if (((parent_rate / div) != rate) || (div > 8))
3070 reg = __raw_readl(MXC_CCM_CSCMR1);
3071 reg &= ~MXC_CCM_CSCMR1_ACLK_EMI_SLOW_PODF_MASK;
3072 reg |= (div - 1) << MXC_CCM_CSCMR1_ACLK_EMI_SLOW_PODF_OFFSET;
3073 __raw_writel(reg, MXC_CCM_CSCMR1);
3078 static struct clk emi_slow_clk = {
3079 __INIT_CLK_DEBUG(emi_slow_clk)
3082 .enable_reg = MXC_CCM_CCGR6,
3083 .enable_shift = MXC_CCM_CCGRx_CG5_OFFSET,
3084 .enable = _clk_enable,
3085 .disable = _clk_disable,
3086 .set_rate = _clk_emi_slow_set_rate,
3087 .get_rate = _clk_emi_slow_get_rate,
3088 .round_rate = _clk_emi_slow_round_rate,
3089 .set_parent = _clk_emi_slow_set_parent,
3092 static unsigned long _clk_emi_round_rate(struct clk *clk,
3096 u32 parent_rate = clk_get_rate(clk->parent);
3098 div = parent_rate / rate;
3100 /* Make sure rate is not greater than the maximum value for the clock.
3101 * Also prevent a div of 0.
3109 return parent_rate / div;
3112 static int _clk_emi_set_parent(struct clk *clk, struct clk *parent)
3115 u32 reg = __raw_readl(MXC_CCM_CSCMR1) & MXC_CCM_CSCMR1_ACLK_EMI_MASK;
3117 mux = _get_mux6(parent, &axi_clk, &pll3_usb_otg_main_clk,
3118 &pll2_pfd_400M, &pll2_pfd_352M, NULL, NULL);
3119 reg |= (mux << MXC_CCM_CSCMR1_ACLK_EMI_OFFSET);
3120 __raw_writel(reg, MXC_CCM_CSCMR1);
3125 static unsigned long _clk_emi_get_rate(struct clk *clk)
3129 reg = __raw_readl(MXC_CCM_CSCMR1);
3130 div = ((reg & MXC_CCM_CSCMR1_ACLK_EMI_PODF_MASK) >>
3131 MXC_CCM_CSCMR1_ACLK_EMI_PODF_OFFSET) + 1;
3133 return clk_get_rate(clk->parent) / div;
3136 static int _clk_emi_set_rate(struct clk *clk, unsigned long rate)
3139 u32 parent_rate = clk_get_rate(clk->parent);
3141 div = parent_rate / rate;
3144 if (((parent_rate / div) != rate) || (div > 8))
3147 reg = __raw_readl(MXC_CCM_CSCMR1);
3148 reg &= ~MXC_CCM_CSCMR1_ACLK_EMI_PODF_MASK;
3149 reg |= (div - 1) << MXC_CCM_CSCMR1_ACLK_EMI_PODF_OFFSET;
3150 __raw_writel(reg, MXC_CCM_CSCMR1);
3155 static struct clk emi_clk = {
3156 __INIT_CLK_DEBUG(emi_clk)
3159 .set_rate = _clk_emi_set_rate,
3160 .get_rate = _clk_emi_get_rate,
3161 .round_rate = _clk_emi_round_rate,
3162 .set_parent = _clk_emi_set_parent,
3165 static unsigned long _clk_enfc_round_rate(struct clk *clk,
3169 u32 parent_rate = clk_get_rate(clk->parent);
3170 u32 div = parent_rate / rate;
3172 if (parent_rate % rate)
3175 __calc_pre_post_dividers(div, &pre, &post);
3177 return parent_rate / (pre * post);
3180 static int _clk_enfc_set_parent(struct clk *clk, struct clk *parent)
3184 reg = __raw_readl(MXC_CCM_CS2CDR)
3185 & MXC_CCM_CS2CDR_ENFC_CLK_SEL_MASK;
3187 mux = _get_mux6(parent, &pll2_pfd_352M,
3188 &pll2_528_bus_main_clk, &pll3_usb_otg_main_clk,
3189 &pll2_pfd_400M, NULL, NULL);
3190 reg |= mux << MXC_CCM_CS2CDR_ENFC_CLK_SEL_OFFSET;
3192 __raw_writel(reg, MXC_CCM_CS2CDR);
3197 static unsigned long _clk_enfc_get_rate(struct clk *clk)
3199 u32 reg, pred, podf;
3201 reg = __raw_readl(MXC_CCM_CS2CDR);
3203 pred = ((reg & MXC_CCM_CS2CDR_ENFC_CLK_PRED_MASK)
3204 >> MXC_CCM_CS2CDR_ENFC_CLK_PRED_OFFSET) + 1;
3205 podf = ((reg & MXC_CCM_CS2CDR_ENFC_CLK_PODF_MASK)
3206 >> MXC_CCM_CS2CDR_ENFC_CLK_PODF_OFFSET) + 1;
3208 return clk_get_rate(clk->parent) / (pred * podf);
3211 static int _clk_enfc_set_rate(struct clk *clk, unsigned long rate)
3213 u32 reg, div, pre, post;
3214 u32 parent_rate = clk_get_rate(clk->parent);
3216 div = parent_rate / rate;
3219 if (((parent_rate / div) != rate) || div > 512)
3222 __calc_pre_post_dividers(div, &pre, &post);
3224 reg = __raw_readl(MXC_CCM_CS2CDR);
3225 reg &= ~(MXC_CCM_CS2CDR_ENFC_CLK_PRED_MASK|
3226 MXC_CCM_CS2CDR_ENFC_CLK_PODF_MASK);
3227 reg |= (post - 1) << MXC_CCM_CS2CDR_ENFC_CLK_PODF_OFFSET;
3228 reg |= (pre - 1) << MXC_CCM_CS2CDR_ENFC_CLK_PRED_OFFSET;
3230 __raw_writel(reg, MXC_CCM_CS2CDR);
3235 static struct clk enfc_clk = {
3236 __INIT_CLK_DEBUG(enfc_clk)
3238 .parent = &pll2_pfd_352M,
3239 .enable_reg = MXC_CCM_CCGR2,
3240 .enable_shift = MXC_CCM_CCGRx_CG7_OFFSET,
3241 .enable = _clk_enable,
3242 .disable = _clk_disable,
3243 .set_rate = _clk_enfc_set_rate,
3244 .get_rate = _clk_enfc_get_rate,
3245 .round_rate = _clk_enfc_round_rate,
3246 .set_parent = _clk_enfc_set_parent,
3249 static unsigned long _clk_uart_round_rate(struct clk *clk,
3253 u32 parent_rate = clk_get_rate(clk->parent);
3255 div = parent_rate / rate;
3257 /* Make sure rate is not greater than the maximum value for the clock.
3258 * Also prevent a div of 0.
3266 return parent_rate / div;
3269 static int _clk_uart_set_rate(struct clk *clk, unsigned long rate)
3272 u32 parent_rate = clk_get_rate(clk->parent);
3274 div = parent_rate / rate;
3277 if (((parent_rate / div) != rate) || (div > 64))
3280 reg = __raw_readl(MXC_CCM_CSCDR1) & MXC_CCM_CSCDR1_UART_CLK_PODF_MASK;
3281 reg |= ((div - 1) << MXC_CCM_CSCDR1_UART_CLK_PODF_OFFSET);
3283 __raw_writel(reg, MXC_CCM_CSCDR1);
3288 static unsigned long _clk_uart_get_rate(struct clk *clk)
3293 reg = __raw_readl(MXC_CCM_CSCDR1) & MXC_CCM_CSCDR1_UART_CLK_PODF_MASK;
3294 div = (reg >> MXC_CCM_CSCDR1_UART_CLK_PODF_OFFSET) + 1;
3295 val = clk_get_rate(clk->parent) / div;
3300 static struct clk uart_clk[] = {
3302 __INIT_CLK_DEBUG(uart_clk)
3304 .parent = &pll3_80M,
3305 .enable_reg = MXC_CCM_CCGR5,
3306 .enable_shift = MXC_CCM_CCGRx_CG12_OFFSET,
3307 .enable = _clk_enable,
3308 .disable = _clk_disable,
3309 .secondary = &uart_clk[1],
3310 .set_rate = _clk_uart_set_rate,
3311 .get_rate = _clk_uart_get_rate,
3312 .round_rate = _clk_uart_round_rate,
3315 __INIT_CLK_DEBUG(uart_serial_clk)
3317 .enable_reg = MXC_CCM_CCGR5,
3318 .enable_shift = MXC_CCM_CCGRx_CG13_OFFSET,
3319 .enable = _clk_enable,
3320 .disable = _clk_disable,
3324 static unsigned long _clk_hsi_tx_round_rate(struct clk *clk,
3328 u32 parent_rate = clk_get_rate(clk->parent);
3330 div = parent_rate / rate;
3332 /* Make sure rate is not greater than the maximum value for the clock.
3333 * Also prevent a div of 0.
3341 return parent_rate / div;
3344 static int _clk_hsi_tx_set_parent(struct clk *clk, struct clk *parent)
3346 u32 reg = __raw_readl(MXC_CCM_CDCDR) & MXC_CCM_CDCDR_HSI_TX_CLK_SEL;
3348 if (parent == &pll2_pfd_400M)
3349 reg |= (MXC_CCM_CDCDR_HSI_TX_CLK_SEL);
3351 __raw_writel(reg, MXC_CCM_CDCDR);
3356 static unsigned long _clk_hsi_tx_get_rate(struct clk *clk)
3360 reg = __raw_readl(MXC_CCM_CDCDR);
3361 div = ((reg & MXC_CCM_CDCDR_HSI_TX_PODF_MASK) >>
3362 MXC_CCM_CDCDR_HSI_TX_PODF_OFFSET) + 1;
3364 return clk_get_rate(clk->parent) / div;
3367 static int _clk_hsi_tx_set_rate(struct clk *clk, unsigned long rate)
3370 u32 parent_rate = clk_get_rate(clk->parent);
3372 div = parent_rate / rate;
3375 if (((parent_rate / div) != rate) || (div > 8))
3378 reg = __raw_readl(MXC_CCM_CDCDR);
3379 reg &= ~MXC_CCM_CDCDR_HSI_TX_PODF_MASK;
3380 reg |= (div - 1) << MXC_CCM_CDCDR_HSI_TX_PODF_OFFSET;
3381 __raw_writel(reg, MXC_CCM_CDCDR);
3386 static struct clk hsi_tx_clk = {
3387 __INIT_CLK_DEBUG(hsi_tx_clk)
3389 .parent = &pll2_pfd_400M,
3390 .enable_reg = MXC_CCM_CCGR3,
3391 .enable_shift = MXC_CCM_CCGRx_CG8_OFFSET,
3392 .enable = _clk_enable,
3393 .disable = _clk_disable,
3394 .set_parent = _clk_hsi_tx_set_parent,
3395 .round_rate = _clk_hsi_tx_round_rate,
3396 .set_rate = _clk_hsi_tx_set_rate,
3397 .get_rate = _clk_hsi_tx_get_rate,
3400 static struct clk video_27M_clk = {
3401 __INIT_CLK_DEBUG(video_27M_clk)
3403 .parent = &pll2_pfd_400M,
3404 .enable_reg = MXC_CCM_CCGR2,
3405 .enable_shift = MXC_CCM_CCGRx_CG2_OFFSET,
3406 .enable = _clk_enable,
3407 .disable = _clk_disable,
3410 static struct clk caam_clk[] = {
3412 __INIT_CLK_DEBUG(caam_mem_clk)
3414 .enable_reg = MXC_CCM_CCGR0,
3415 .enable_shift = MXC_CCM_CCGRx_CG4_OFFSET,
3416 .enable = _clk_enable,
3417 .disable = _clk_disable,
3418 .secondary = &caam_clk[1],
3421 __INIT_CLK_DEBUG(caam_aclk_clk)
3423 .enable_reg = MXC_CCM_CCGR0,
3424 .enable_shift = MXC_CCM_CCGRx_CG5_OFFSET,
3425 .enable = _clk_enable,
3426 .disable = _clk_disable,
3429 __INIT_CLK_DEBUG(caam_ipg_clk)
3431 .enable_reg = MXC_CCM_CCGR0,
3432 .enable_shift = MXC_CCM_CCGRx_CG4_OFFSET,
3433 .enable = _clk_enable,
3434 .disable = _clk_disable,
3438 static struct clk asrc_clk = {
3439 __INIT_CLK_DEBUG(asrc_clk)
3441 .parent = &pll4_audio_main_clk,
3442 .enable_reg = MXC_CCM_CCGR0,
3443 .enable_shift = MXC_CCM_CCGRx_CG3_OFFSET,
3444 .enable = _clk_enable,
3445 .disable = _clk_disable,
3448 static struct clk apbh_dma_clk = {
3449 __INIT_CLK_DEBUG(apbh_dma_clk)
3451 .enable = _clk_enable,
3452 .disable = _clk_disable_inwait,
3453 .enable_reg = MXC_CCM_CCGR0,
3454 .enable_shift = MXC_CCM_CCGRx_CG2_OFFSET,
3457 static struct clk aips_tz2_clk = {
3458 __INIT_CLK_DEBUG(aips_tz2_clk)
3460 .enable_reg = MXC_CCM_CCGR0,
3461 .enable_shift = MXC_CCM_CCGRx_CG1_OFFSET,
3462 .enable = _clk_enable,
3463 .disable = _clk_disable_inwait,
3466 static struct clk aips_tz1_clk = {
3467 __INIT_CLK_DEBUG(aips_tz1_clk)
3469 .enable_reg = MXC_CCM_CCGR0,
3470 .enable_shift = MXC_CCM_CCGRx_CG0_OFFSET,
3471 .enable = _clk_enable,
3472 .disable = _clk_disable_inwait,
3476 static struct clk openvg_axi_clk = {
3477 __INIT_CLK_DEBUG(openvg_axi_clk)
3478 .enable = _clk_enable,
3479 .enable_reg = MXC_CCM_CCGR3,
3480 .enable_shift = MXC_CCM_CCGRx_CG15_OFFSET,
3481 .disable = _clk_disable,
3484 static unsigned long _clk_gpu3d_core_round_rate(struct clk *clk,
3488 u32 parent_rate = clk_get_rate(clk->parent);
3490 div = parent_rate / rate;
3492 /* Make sure rate is not greater than the maximum value for the clock.
3493 * Also prevent a div of 0.
3501 return parent_rate / div;
3504 static int _clk_gpu3d_core_set_parent(struct clk *clk, struct clk *parent)
3507 u32 reg = __raw_readl(MXC_CCM_CBCMR)
3508 & MXC_CCM_CBCMR_GPU3D_CORE_CLK_SEL_MASK;
3510 mux = _get_mux6(parent, &mmdc_ch0_axi_clk[0],
3511 &pll3_usb_otg_main_clk,
3512 &pll2_pfd_594M, &pll2_pfd_400M, NULL, NULL);
3513 reg |= (mux << MXC_CCM_CBCMR_GPU3D_CORE_CLK_SEL_OFFSET);
3514 __raw_writel(reg, MXC_CCM_CBCMR);
3519 static unsigned long _clk_gpu3d_core_get_rate(struct clk *clk)
3523 reg = __raw_readl(MXC_CCM_CBCMR);
3524 div = ((reg & MXC_CCM_CBCMR_GPU3D_CORE_PODF_MASK) >>
3525 MXC_CCM_CBCMR_GPU3D_CORE_PODF_OFFSET) + 1;
3527 return clk_get_rate(clk->parent) / div;
3530 static int _clk_gpu3d_core_set_rate(struct clk *clk, unsigned long rate)
3533 u32 parent_rate = clk_get_rate(clk->parent);
3535 div = parent_rate / rate;
3538 if (((parent_rate / div) != rate) || (div > 8))
3541 reg = __raw_readl(MXC_CCM_CBCMR);
3542 reg &= ~MXC_CCM_CBCMR_GPU3D_CORE_PODF_MASK;
3543 reg |= (div - 1) << MXC_CCM_CBCMR_GPU3D_CORE_PODF_OFFSET;
3544 __raw_writel(reg, MXC_CCM_CBCMR);
3549 static struct clk gpu3d_core_clk = {
3550 __INIT_CLK_DEBUG(gpu3d_core_clk)
3551 .parent = &pll2_pfd_594M,
3552 .enable = _clk_enable,
3553 .enable_reg = MXC_CCM_CCGR1,
3554 .enable_shift = MXC_CCM_CCGRx_CG13_OFFSET,
3555 .disable = _clk_disable,
3556 .set_parent = _clk_gpu3d_core_set_parent,
3557 .set_rate = _clk_gpu3d_core_set_rate,
3558 .get_rate = _clk_gpu3d_core_get_rate,
3559 .round_rate = _clk_gpu3d_core_round_rate,
3562 static unsigned long _clk_gpu2d_core_round_rate(struct clk *clk,
3566 u32 parent_rate = clk_get_rate(clk->parent);
3568 div = parent_rate / rate;
3570 /* Make sure rate is not greater than the maximum value for the clock.
3571 * Also prevent a div of 0.
3579 return parent_rate / div;
3582 static int _clk_gpu2d_core_set_parent(struct clk *clk, struct clk *parent)
3585 u32 reg = __raw_readl(MXC_CCM_CBCMR) & MXC_CCM_CBCMR_GPU2D_CLK_SEL_MASK;
3587 mux = _get_mux6(parent, &axi_clk, &pll3_usb_otg_main_clk,
3588 &pll2_pfd_352M, &pll2_pfd_400M, NULL, NULL);
3589 reg |= (mux << MXC_CCM_CBCMR_GPU2D_CLK_SEL_OFFSET);
3590 __raw_writel(reg, MXC_CCM_CBCMR);
3595 static unsigned long _clk_gpu2d_core_get_rate(struct clk *clk)
3599 reg = __raw_readl(MXC_CCM_CBCMR);
3600 div = ((reg & MXC_CCM_CBCMR_GPU2D_CORE_PODF_MASK) >>
3601 MXC_CCM_CBCMR_GPU2D_CORE_PODF_OFFSET) + 1;
3603 return clk_get_rate(clk->parent) / div;
3606 static int _clk_gpu2d_core_set_rate(struct clk *clk, unsigned long rate)
3609 u32 parent_rate = clk_get_rate(clk->parent);
3611 div = parent_rate / rate;
3614 if (((parent_rate / div) != rate) || (div > 8))
3617 reg = __raw_readl(MXC_CCM_CBCMR);
3618 reg &= ~MXC_CCM_CBCMR_GPU3D_CORE_PODF_MASK;
3619 reg |= (div - 1) << MXC_CCM_CBCMR_GPU3D_CORE_PODF_OFFSET;
3620 __raw_writel(reg, MXC_CCM_CBCMR);
3624 static struct clk gpu2d_core_clk = {
3625 __INIT_CLK_DEBUG(gpu2d_core_clk)
3626 .parent = &pll2_pfd_352M,
3627 .enable = _clk_enable,
3628 .enable_reg = MXC_CCM_CCGR1,
3629 .enable_shift = MXC_CCM_CCGRx_CG12_OFFSET,
3630 .disable = _clk_disable,
3631 .set_parent = _clk_gpu2d_core_set_parent,
3632 .set_rate = _clk_gpu2d_core_set_rate,
3633 .get_rate = _clk_gpu2d_core_get_rate,
3634 .round_rate = _clk_gpu2d_core_round_rate,
3637 static unsigned long _clk_gpu3d_shader_round_rate(struct clk *clk,
3641 u32 parent_rate = clk_get_rate(clk->parent);
3643 div = parent_rate / rate;
3645 /* Make sure rate is not greater than the maximum value for the clock.
3646 * Also prevent a div of 0.
3654 return parent_rate / div;
3657 static int _clk_gpu3d_shader_set_parent(struct clk *clk, struct clk *parent)
3660 u32 reg = __raw_readl(MXC_CCM_CBCMR)
3661 & MXC_CCM_CBCMR_GPU3D_SHADER_CLK_SEL_MASK;
3663 mux = _get_mux6(parent, &mmdc_ch0_axi_clk[0],
3664 &pll3_usb_otg_main_clk,
3665 &pll2_pfd_594M, &pll3_pfd_720M, NULL, NULL);
3666 reg |= (mux << MXC_CCM_CBCMR_GPU3D_SHADER_CLK_SEL_OFFSET);
3667 __raw_writel(reg, MXC_CCM_CBCMR);
3672 static unsigned long _clk_gpu3d_shader_get_rate(struct clk *clk)
3676 reg = __raw_readl(MXC_CCM_CBCMR);
3677 div = ((reg & MXC_CCM_CBCMR_GPU3D_SHADER_PODF_MASK) >>
3678 MXC_CCM_CBCMR_GPU3D_SHADER_PODF_OFFSET) + 1;
3680 return clk_get_rate(clk->parent) / div;
3683 static int _clk_gpu3d_shader_set_rate(struct clk *clk, unsigned long rate)
3686 u32 parent_rate = clk_get_rate(clk->parent);
3688 div = parent_rate / rate;
3691 if (((parent_rate / div) != rate) || (div > 8))
3694 reg = __raw_readl(MXC_CCM_CBCMR);
3695 reg &= ~MXC_CCM_CBCMR_GPU3D_SHADER_PODF_MASK;
3696 reg |= (div - 1) << MXC_CCM_CBCMR_GPU3D_SHADER_PODF_OFFSET;
3697 __raw_writel(reg, MXC_CCM_CBCMR);
3703 static struct clk gpu3d_shader_clk = {
3704 __INIT_CLK_DEBUG(gpu3d_shader_clk)
3705 .parent = &pll3_pfd_720M,
3706 .enable = _clk_enable,
3707 .enable_reg = MXC_CCM_CCGR1,
3708 .enable_shift = MXC_CCM_CCGRx_CG13_OFFSET,
3709 .disable = _clk_disable,
3710 .set_parent = _clk_gpu3d_shader_set_parent,
3711 .set_rate = _clk_gpu3d_shader_set_rate,
3712 .get_rate = _clk_gpu3d_shader_get_rate,
3713 .round_rate = _clk_gpu3d_shader_round_rate,
3716 static struct clk gpmi_nfc_clk[] = {
3718 __INIT_CLK_DEBUG(gpmi_io_clk)
3720 .secondary = &gpmi_nfc_clk[1],
3721 .enable = _clk_enable,
3722 .enable_reg = MXC_CCM_CCGR4,
3723 .enable_shift = MXC_CCM_CCGRx_CG14_OFFSET,
3724 .disable = _clk_disable,
3726 { /* gpmi_apb_clk */
3727 __INIT_CLK_DEBUG(gpmi_apb_clk)
3728 .parent = &apbh_dma_clk,
3729 .secondary = &gpmi_nfc_clk[2],
3730 .enable = _clk_enable,
3731 .enable_reg = MXC_CCM_CCGR4,
3732 .enable_shift = MXC_CCM_CCGRx_CG15_OFFSET,
3733 .disable = _clk_disable,
3736 __INIT_CLK_DEBUG(gpmi_bch_clk)
3738 .secondary = &gpmi_nfc_clk[3],
3739 .enable = _clk_enable,
3740 .enable_reg = MXC_CCM_CCGR4,
3741 .enable_shift = MXC_CCM_CCGRx_CG13_OFFSET,
3742 .disable = _clk_disable,
3745 __INIT_CLK_DEBUG(gpmi_bch_apb_clk)
3746 .parent = &apbh_dma_clk,
3747 .enable = _clk_enable,
3748 .enable_reg = MXC_CCM_CCGR4,
3749 .enable_shift = MXC_CCM_CCGRx_CG12_OFFSET,
3750 .disable = _clk_disable,
3754 static struct clk pwm_clk[] = {
3756 __INIT_CLK_DEBUG(pwm_clk_0)
3757 .parent = &ipg_perclk,
3759 .enable_reg = MXC_CCM_CCGR4,
3760 .enable_shift = MXC_CCM_CCGRx_CG8_OFFSET,
3761 .enable = _clk_enable,
3762 .disable = _clk_disable,
3765 __INIT_CLK_DEBUG(pwm_clk_1)
3766 .parent = &ipg_perclk,
3768 .enable_reg = MXC_CCM_CCGR4,
3769 .enable_shift = MXC_CCM_CCGRx_CG9_OFFSET,
3770 .enable = _clk_enable,
3771 .disable = _clk_disable,
3774 __INIT_CLK_DEBUG(pwm_clk_2)
3775 .parent = &ipg_perclk,
3777 .enable_reg = MXC_CCM_CCGR4,
3778 .enable_shift = MXC_CCM_CCGRx_CG10_OFFSET,
3779 .enable = _clk_enable,
3780 .disable = _clk_disable,
3783 __INIT_CLK_DEBUG(pwm_clk_3)
3784 .parent = &ipg_perclk,
3786 .enable_reg = MXC_CCM_CCGR4,
3787 .enable_shift = MXC_CCM_CCGRx_CG11_OFFSET,
3788 .enable = _clk_enable,
3789 .disable = _clk_disable,
3793 static int _clk_pcie_enable(struct clk *clk)
3797 /* Enable SATA ref clock */
3798 reg = __raw_readl(PLL8_ENET_BASE_ADDR);
3799 reg |= ANADIG_PLL_ENET_EN_PCIE;
3800 __raw_writel(reg, PLL8_ENET_BASE_ADDR);
3807 static void _clk_pcie_disable(struct clk *clk)
3813 /* Disable SATA ref clock */
3814 reg = __raw_readl(PLL8_ENET_BASE_ADDR);
3815 reg &= ~ANADIG_PLL_ENET_EN_PCIE;
3816 __raw_writel(reg, PLL8_ENET_BASE_ADDR);
3819 static struct clk pcie_clk = {
3820 __INIT_CLK_DEBUG(pcie_clk)
3821 .parent = &pcie_axi_clk,
3822 .enable = _clk_pcie_enable,
3823 .disable = _clk_pcie_disable,
3824 .enable_reg = MXC_CCM_CCGR4,
3825 .enable_shift = MXC_CCM_CCGRx_CG0_OFFSET,
3828 static int _clk_sata_enable(struct clk *clk)
3832 /* Enable SATA ref clock */
3833 reg = __raw_readl(PLL8_ENET_BASE_ADDR);
3834 reg |= ANADIG_PLL_ENET_EN_SATA;
3835 __raw_writel(reg, PLL8_ENET_BASE_ADDR);
3842 static void _clk_sata_disable(struct clk *clk)
3848 /* Disable SATA ref clock */
3849 reg = __raw_readl(PLL8_ENET_BASE_ADDR);
3850 reg &= ~ANADIG_PLL_ENET_EN_SATA;
3851 __raw_writel(reg, PLL8_ENET_BASE_ADDR);
3854 static struct clk sata_clk = {
3855 __INIT_CLK_DEBUG(sata_clk)
3857 .enable = _clk_sata_enable,
3858 .enable_reg = MXC_CCM_CCGR5,
3859 .enable_shift = MXC_CCM_CCGRx_CG2_OFFSET,
3860 .disable = _clk_sata_disable,
3863 static struct clk usboh3_clk = {
3864 __INIT_CLK_DEBUG(usboh3_clk)
3866 .enable = _clk_enable,
3867 .enable_reg = MXC_CCM_CCGR6,
3868 .enable_shift = MXC_CCM_CCGRx_CG0_OFFSET,
3869 .disable = _clk_disable,
3872 #define _REGISTER_CLOCK(d, n, c) \
3880 static struct clk_lookup lookups[] = {
3881 _REGISTER_CLOCK(NULL, "osc", osc_clk),
3882 _REGISTER_CLOCK(NULL, "ckih", ckih_clk),
3883 _REGISTER_CLOCK(NULL, "ckih2", ckih2_clk),
3884 _REGISTER_CLOCK(NULL, "ckil", ckil_clk),
3885 _REGISTER_CLOCK(NULL, "pll1_main_clk", pll1_sys_main_clk),
3886 _REGISTER_CLOCK(NULL, "pll1_sw_clk", pll1_sw_clk),
3887 _REGISTER_CLOCK(NULL, "pll2", pll2_528_bus_main_clk),
3888 _REGISTER_CLOCK(NULL, "pll2_pfd_400M", pll2_pfd_400M),
3889 _REGISTER_CLOCK(NULL, "pll2_pfd_352M", pll2_pfd_352M),
3890 _REGISTER_CLOCK(NULL, "pll2_pfd_594M", pll2_pfd_594M),
3891 _REGISTER_CLOCK(NULL, "pll2_200M", pll2_200M),
3892 _REGISTER_CLOCK(NULL, "pll3_main_clk", pll3_usb_otg_main_clk),
3893 _REGISTER_CLOCK(NULL, "pll3_pfd_508M", pll3_pfd_508M),
3894 _REGISTER_CLOCK(NULL, "pll3_pfd_454M", pll3_pfd_454M),
3895 _REGISTER_CLOCK(NULL, "pll3_pfd_720M", pll3_pfd_720M),
3896 _REGISTER_CLOCK(NULL, "pll3_pfd_540M", pll3_pfd_540M),
3897 _REGISTER_CLOCK(NULL, "pll3_sw_clk", pll3_sw_clk),
3898 _REGISTER_CLOCK(NULL, "pll3_120M", pll3_120M),
3899 _REGISTER_CLOCK(NULL, "pll3_120M", pll3_80M),
3900 _REGISTER_CLOCK(NULL, "pll3_120M", pll3_60M),
3901 _REGISTER_CLOCK(NULL, "pll4", pll4_audio_main_clk),
3902 _REGISTER_CLOCK(NULL, "pll5", pll5_video_main_clk),
3903 _REGISTER_CLOCK(NULL, "pll4", pll6_MLB_main_clk),
3904 _REGISTER_CLOCK(NULL, "pll3", pll7_usb_host_main_clk),
3905 _REGISTER_CLOCK(NULL, "pll4", pll8_enet_main_clk),
3906 _REGISTER_CLOCK(NULL, "cpu_clk", cpu_clk),
3907 _REGISTER_CLOCK(NULL, "periph_clk", periph_clk),
3908 _REGISTER_CLOCK(NULL, "axi_clk", axi_clk),
3909 _REGISTER_CLOCK(NULL, "mmdc_ch0_axi", mmdc_ch0_axi_clk[0]),
3910 _REGISTER_CLOCK(NULL, "mmdc_ch1_axi", mmdc_ch1_axi_clk[0]),
3911 _REGISTER_CLOCK(NULL, "ahb", ahb_clk),
3912 _REGISTER_CLOCK(NULL, "ipg_clk", ipg_clk),
3913 _REGISTER_CLOCK(NULL, "ipg_perclk", ipg_perclk),
3914 _REGISTER_CLOCK(NULL, "spba", spba_clk),
3915 _REGISTER_CLOCK("imx-sdma", NULL, sdma_clk),
3916 _REGISTER_CLOCK(NULL, "gpu2d_axi_clk", gpu2d_axi_clk),
3917 _REGISTER_CLOCK(NULL, "gpu3d_axi_clk", gpu3d_axi_clk),
3918 _REGISTER_CLOCK(NULL, "pcie_axi_clk", pcie_axi_clk),
3919 _REGISTER_CLOCK(NULL, "vdo_axi_clk", vdo_axi_clk),
3920 _REGISTER_CLOCK(NULL, "iim_clk", iim_clk),
3921 _REGISTER_CLOCK(NULL, "i2c_clk", i2c_clk[0]),
3922 _REGISTER_CLOCK("imx-i2c.1", NULL, i2c_clk[1]),
3923 _REGISTER_CLOCK("imx-i2c.2", NULL, i2c_clk[2]),
3924 _REGISTER_CLOCK(NULL, "vpu_clk", vpu_clk),
3925 _REGISTER_CLOCK(NULL, "ipu1_clk", ipu1_clk),
3926 _REGISTER_CLOCK(NULL, "ipu2_clk", ipu2_clk),
3927 _REGISTER_CLOCK("sdhci-esdhc-imx.0", NULL, usdhc1_clk),
3928 _REGISTER_CLOCK("sdhci-esdhc-imx.1", NULL, usdhc2_clk),
3929 _REGISTER_CLOCK("sdhci-esdhc-imx.2", NULL, usdhc3_clk),
3930 _REGISTER_CLOCK("sdhci-esdhc-imx.3", NULL, usdhc4_clk),
3931 _REGISTER_CLOCK("imx-ssi.0", NULL, ssi1_clk),
3932 _REGISTER_CLOCK("imx-ssi.1", NULL, ssi2_clk),
3933 _REGISTER_CLOCK("imx-ssi.2", NULL, ssi3_clk),
3934 _REGISTER_CLOCK(NULL, "ipu1_di0_clk", ipu1_di_clk[0]),
3935 _REGISTER_CLOCK(NULL, "ipu1_di1_clk", ipu1_di_clk[1]),
3936 _REGISTER_CLOCK(NULL, "ipu2_di0_clk", ipu2_di_clk[0]),
3937 _REGISTER_CLOCK(NULL, "ipu2_di1_clk", ipu2_di_clk[1]),
3938 _REGISTER_CLOCK("FlexCAN.0", "can_clk", can1_clk[0]),
3939 _REGISTER_CLOCK("FlexCAN.1", "can_clk", can2_clk[0]),
3940 _REGISTER_CLOCK(NULL, "ldb_di0_clk", ldb_di0_clk),
3941 _REGISTER_CLOCK(NULL, "ldb_di1_clk", ldb_di1_clk),
3942 _REGISTER_CLOCK("mxc_alsa_spdif.0", NULL, spdif0_clk[0]),
3943 _REGISTER_CLOCK("mxc_alsa_spdif.1", NULL, spdif1_clk[0]),
3944 _REGISTER_CLOCK(NULL, "esai_clk", esai_clk),
3945 _REGISTER_CLOCK("mxc_spi.0", NULL, ecspi_clk[0]),
3946 _REGISTER_CLOCK("mxc_spi.1", NULL, ecspi_clk[1]),
3947 _REGISTER_CLOCK("mxc_spi.2", NULL, ecspi_clk[2]),
3948 _REGISTER_CLOCK("mxc_spi.3", NULL, ecspi_clk[3]),
3949 _REGISTER_CLOCK("mxc_spi.4", NULL, ecspi_clk[4]),
3950 _REGISTER_CLOCK(NULL, "emi_slow_clk", emi_slow_clk),
3951 _REGISTER_CLOCK(NULL, "emi_clk", emi_clk),
3952 _REGISTER_CLOCK(NULL, "enfc_clk", enfc_clk),
3953 _REGISTER_CLOCK("imx-uart.0", NULL, uart_clk[0]),
3954 _REGISTER_CLOCK(NULL, "hsi_tx", hsi_tx_clk),
3955 _REGISTER_CLOCK(NULL, "caam_clk", caam_clk[0]),
3956 _REGISTER_CLOCK(NULL, "asrc_clk", asrc_clk),
3957 _REGISTER_CLOCK(NULL, "apbh_dma_clk", apbh_dma_clk),
3958 _REGISTER_CLOCK(NULL, "openvg_axi_clk", openvg_axi_clk),
3959 _REGISTER_CLOCK(NULL, "gpu3d_clk", gpu3d_core_clk),
3960 _REGISTER_CLOCK(NULL, "gpu2d_clk", gpu2d_core_clk),
3961 _REGISTER_CLOCK(NULL, "gpu3d_shader_clk", gpu3d_shader_clk),
3962 _REGISTER_CLOCK(NULL, "gpt", gpt_clk[0]),
3963 _REGISTER_CLOCK(NULL, "gpmi-nfc", gpmi_nfc_clk[0]),
3964 _REGISTER_CLOCK(NULL, "gpmi-apb", gpmi_nfc_clk[1]),
3965 _REGISTER_CLOCK(NULL, "bch", gpmi_nfc_clk[2]),
3966 _REGISTER_CLOCK(NULL, "bch-apb", gpmi_nfc_clk[3]),
3967 _REGISTER_CLOCK("mxc_pwm.0", NULL, pwm_clk[0]),
3968 _REGISTER_CLOCK("mxc_pwm.1", NULL, pwm_clk[1]),
3969 _REGISTER_CLOCK("mxc_pwm.2", NULL, pwm_clk[2]),
3970 _REGISTER_CLOCK("mxc_pwm.3", NULL, pwm_clk[3]),
3971 _REGISTER_CLOCK(NULL, "pcie_clk", pcie_clk),
3972 _REGISTER_CLOCK(NULL, "enet_clk", enet_clk),
3973 _REGISTER_CLOCK(NULL, "imx_sata_clk", sata_clk),
3974 _REGISTER_CLOCK(NULL, "usboh3_clk", usboh3_clk),
3975 _REGISTER_CLOCK(NULL, "usb_phy1_clk", usb_phy1_clk),
3976 _REGISTER_CLOCK(NULL, "usb_phy2_clk", usb_phy2_clk),
3977 _REGISTER_CLOCK(NULL, "video_27M_clk", video_27M_clk),
3981 static void clk_tree_init(void)
3988 int __init mx6_clocks_init(unsigned long ckil, unsigned long osc,
3989 unsigned long ckih1, unsigned long ckih2)
3996 external_low_reference = ckil;
3997 external_high_reference = ckih1;
3998 ckih2_reference = ckih2;
3999 oscillator_reference = osc;
4001 apll_base = ioremap(ANATOP_BASE_ADDR, SZ_4K);
4005 for (i = 0; i < ARRAY_SIZE(lookups); i++) {
4006 clkdev_add(&lookups[i]);
4007 clk_debug_register(lookups[i].clk);
4010 /* Make sure all clocks are ON initially */
4011 __raw_writel(0xFFFFFFFF, MXC_CCM_CCGR0);
4012 __raw_writel(0xFFFFFFFF, MXC_CCM_CCGR1);
4013 __raw_writel(0xFFFFFFFF, MXC_CCM_CCGR2);
4014 __raw_writel(0xFFFFFFFF, MXC_CCM_CCGR3);
4015 __raw_writel(0xFFFFFFFF, MXC_CCM_CCGR4);
4016 __raw_writel(0xFFFFFFFF, MXC_CCM_CCGR5);
4017 __raw_writel(0xFFFFFFFF, MXC_CCM_CCGR6);
4019 base = ioremap(GPT_BASE_ADDR, SZ_4K);
4020 mxc_timer_init(&gpt_clk[0], base, MXC_INT_GPT);