2 * linux/arch/arm/plat-omap/dmtimer.c
4 * OMAP Dual-Mode Timers
6 * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
7 * Tarun Kanti DebBarma <tarun.kanti@ti.com>
8 * Thara Gopinath <thara@ti.com>
10 * dmtimer adaptation to platform_driver.
12 * Copyright (C) 2005 Nokia Corporation
13 * OMAP2 support by Juha Yrjola
14 * API improvements and OMAP2 clock framework support by Timo Teras
16 * Copyright (C) 2009 Texas Instruments
17 * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
19 * This program is free software; you can redistribute it and/or modify it
20 * under the terms of the GNU General Public License as published by the
21 * Free Software Foundation; either version 2 of the License, or (at your
22 * option) any later version.
24 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
25 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
26 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
27 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
29 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 * You should have received a copy of the GNU General Public License along
34 * with this program; if not, write to the Free Software Foundation, Inc.,
35 * 675 Mass Ave, Cambridge, MA 02139, USA.
38 #include <linux/module.h>
40 #include <linux/slab.h>
41 #include <linux/err.h>
42 #include <linux/pm_runtime.h>
44 #include <plat/dmtimer.h>
46 #include <mach/hardware.h>
48 static LIST_HEAD(omap_timer_list);
49 static DEFINE_SPINLOCK(dm_timer_lock);
52 * omap_dm_timer_read_reg - read timer registers in posted and non-posted mode
53 * @timer: timer pointer over which read operation to perform
54 * @reg: lowest byte holds the register offset
56 * The posted mode bit is encoded in reg. Note that in posted mode write
57 * pending bit must be checked. Otherwise a read of a non completed write
58 * will produce an error.
60 static inline u32 omap_dm_timer_read_reg(struct omap_dm_timer *timer, u32 reg)
62 WARN_ON((reg & 0xff) < _OMAP_TIMER_WAKEUP_EN_OFFSET);
63 return __omap_dm_timer_read(timer, reg, timer->posted);
67 * omap_dm_timer_write_reg - write timer registers in posted and non-posted mode
68 * @timer: timer pointer over which write operation is to perform
69 * @reg: lowest byte holds the register offset
70 * @value: data to write into the register
72 * The posted mode bit is encoded in reg. Note that in posted mode the write
73 * pending bit must be checked. Otherwise a write on a register which has a
74 * pending write will be lost.
76 static void omap_dm_timer_write_reg(struct omap_dm_timer *timer, u32 reg,
79 WARN_ON((reg & 0xff) < _OMAP_TIMER_WAKEUP_EN_OFFSET);
80 __omap_dm_timer_write(timer, reg, value, timer->posted);
83 static void omap_timer_restore_context(struct omap_dm_timer *timer)
85 __raw_writel(timer->context.tiocp_cfg,
86 timer->io_base + OMAP_TIMER_OCP_CFG_OFFSET);
87 if (timer->revision == 1)
88 __raw_writel(timer->context.tistat, timer->sys_stat);
90 __raw_writel(timer->context.tisr, timer->irq_stat);
91 omap_dm_timer_write_reg(timer, OMAP_TIMER_WAKEUP_EN_REG,
93 omap_dm_timer_write_reg(timer, OMAP_TIMER_COUNTER_REG,
95 omap_dm_timer_write_reg(timer, OMAP_TIMER_LOAD_REG,
97 omap_dm_timer_write_reg(timer, OMAP_TIMER_MATCH_REG,
99 omap_dm_timer_write_reg(timer, OMAP_TIMER_IF_CTRL_REG,
100 timer->context.tsicr);
101 __raw_writel(timer->context.tier, timer->irq_ena);
102 omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG,
103 timer->context.tclr);
106 static void omap_dm_timer_wait_for_reset(struct omap_dm_timer *timer)
110 if (!timer->sys_stat)
114 while (!(__raw_readl(timer->sys_stat) & 1)) {
117 printk(KERN_ERR "Timer failed to reset\n");
123 static void omap_dm_timer_reset(struct omap_dm_timer *timer)
125 omap_dm_timer_enable(timer);
126 if (timer->pdev->id != 1) {
127 omap_dm_timer_write_reg(timer, OMAP_TIMER_IF_CTRL_REG, 0x06);
128 omap_dm_timer_wait_for_reset(timer);
131 __omap_dm_timer_reset(timer, 0, 0);
132 omap_dm_timer_disable(timer);
136 int omap_dm_timer_prepare(struct omap_dm_timer *timer)
138 struct dmtimer_platform_data *pdata = timer->pdev->dev.platform_data;
141 timer->fclk = clk_get(&timer->pdev->dev, "fck");
142 if (WARN_ON_ONCE(IS_ERR_OR_NULL(timer->fclk))) {
144 dev_err(&timer->pdev->dev, ": No fclk handle.\n");
148 if (pdata->needs_manual_reset)
149 omap_dm_timer_reset(timer);
151 ret = omap_dm_timer_set_source(timer, OMAP_TIMER_SRC_32_KHZ);
157 struct omap_dm_timer *omap_dm_timer_request(void)
159 struct omap_dm_timer *timer = NULL, *t;
163 spin_lock_irqsave(&dm_timer_lock, flags);
164 list_for_each_entry(t, &omap_timer_list, node) {
174 ret = omap_dm_timer_prepare(timer);
180 spin_unlock_irqrestore(&dm_timer_lock, flags);
183 pr_debug("%s: timer request failed!\n", __func__);
187 EXPORT_SYMBOL_GPL(omap_dm_timer_request);
189 struct omap_dm_timer *omap_dm_timer_request_specific(int id)
191 struct omap_dm_timer *timer = NULL, *t;
195 spin_lock_irqsave(&dm_timer_lock, flags);
196 list_for_each_entry(t, &omap_timer_list, node) {
197 if (t->pdev->id == id && !t->reserved) {
205 ret = omap_dm_timer_prepare(timer);
211 spin_unlock_irqrestore(&dm_timer_lock, flags);
214 pr_debug("%s: timer%d request failed!\n", __func__, id);
218 EXPORT_SYMBOL_GPL(omap_dm_timer_request_specific);
220 int omap_dm_timer_free(struct omap_dm_timer *timer)
222 if (unlikely(!timer))
225 clk_put(timer->fclk);
227 WARN_ON(!timer->reserved);
231 EXPORT_SYMBOL_GPL(omap_dm_timer_free);
233 void omap_dm_timer_enable(struct omap_dm_timer *timer)
235 pm_runtime_get_sync(&timer->pdev->dev);
237 EXPORT_SYMBOL_GPL(omap_dm_timer_enable);
239 void omap_dm_timer_disable(struct omap_dm_timer *timer)
241 pm_runtime_put(&timer->pdev->dev);
243 EXPORT_SYMBOL_GPL(omap_dm_timer_disable);
245 int omap_dm_timer_get_irq(struct omap_dm_timer *timer)
251 EXPORT_SYMBOL_GPL(omap_dm_timer_get_irq);
253 #if defined(CONFIG_ARCH_OMAP1)
256 * omap_dm_timer_modify_idlect_mask - Check if any running timers use ARMXOR
257 * @inputmask: current value of idlect mask
259 __u32 omap_dm_timer_modify_idlect_mask(__u32 inputmask)
262 struct omap_dm_timer *timer = NULL;
265 /* If ARMXOR cannot be idled this function call is unnecessary */
266 if (!(inputmask & (1 << 1)))
269 /* If any active timer is using ARMXOR return modified mask */
270 spin_lock_irqsave(&dm_timer_lock, flags);
271 list_for_each_entry(timer, &omap_timer_list, node) {
274 l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
275 if (l & OMAP_TIMER_CTRL_ST) {
276 if (((omap_readl(MOD_CONF_CTRL_1) >> (i * 2)) & 0x03) == 0)
277 inputmask &= ~(1 << 1);
279 inputmask &= ~(1 << 2);
283 spin_unlock_irqrestore(&dm_timer_lock, flags);
287 EXPORT_SYMBOL_GPL(omap_dm_timer_modify_idlect_mask);
291 struct clk *omap_dm_timer_get_fclk(struct omap_dm_timer *timer)
297 EXPORT_SYMBOL_GPL(omap_dm_timer_get_fclk);
299 __u32 omap_dm_timer_modify_idlect_mask(__u32 inputmask)
305 EXPORT_SYMBOL_GPL(omap_dm_timer_modify_idlect_mask);
309 int omap_dm_timer_trigger(struct omap_dm_timer *timer)
311 if (unlikely(!timer || pm_runtime_suspended(&timer->pdev->dev))) {
312 pr_err("%s: timer not available or enabled.\n", __func__);
316 omap_dm_timer_write_reg(timer, OMAP_TIMER_TRIGGER_REG, 0);
319 EXPORT_SYMBOL_GPL(omap_dm_timer_trigger);
321 int omap_dm_timer_start(struct omap_dm_timer *timer)
325 if (unlikely(!timer))
328 omap_dm_timer_enable(timer);
330 if (timer->loses_context) {
331 u32 ctx_loss_cnt_after =
332 timer->get_context_loss_count(&timer->pdev->dev);
333 if (ctx_loss_cnt_after != timer->ctx_loss_count)
334 omap_timer_restore_context(timer);
337 l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
338 if (!(l & OMAP_TIMER_CTRL_ST)) {
339 l |= OMAP_TIMER_CTRL_ST;
340 omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
343 /* Save the context */
344 timer->context.tclr = l;
347 EXPORT_SYMBOL_GPL(omap_dm_timer_start);
349 int omap_dm_timer_stop(struct omap_dm_timer *timer)
351 unsigned long rate = 0;
352 struct dmtimer_platform_data *pdata = timer->pdev->dev.platform_data;
354 if (unlikely(!timer))
357 if (!pdata->needs_manual_reset)
358 rate = clk_get_rate(timer->fclk);
360 __omap_dm_timer_stop(timer, timer->posted, rate);
362 if (timer->loses_context && timer->get_context_loss_count)
363 timer->ctx_loss_count =
364 timer->get_context_loss_count(&timer->pdev->dev);
367 * Since the register values are computed and written within
368 * __omap_dm_timer_stop, we need to use read to retrieve the
371 timer->context.tclr =
372 omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
373 timer->context.tisr = __raw_readl(timer->irq_stat);
374 omap_dm_timer_disable(timer);
377 EXPORT_SYMBOL_GPL(omap_dm_timer_stop);
379 int omap_dm_timer_set_source(struct omap_dm_timer *timer, int source)
382 struct dmtimer_platform_data *pdata;
384 if (unlikely(!timer))
387 pdata = timer->pdev->dev.platform_data;
389 if (source < 0 || source >= 3)
392 ret = pdata->set_timer_src(timer->pdev, source);
396 EXPORT_SYMBOL_GPL(omap_dm_timer_set_source);
398 int omap_dm_timer_set_load(struct omap_dm_timer *timer, int autoreload,
403 if (unlikely(!timer))
406 omap_dm_timer_enable(timer);
407 l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
409 l |= OMAP_TIMER_CTRL_AR;
411 l &= ~OMAP_TIMER_CTRL_AR;
412 omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
413 omap_dm_timer_write_reg(timer, OMAP_TIMER_LOAD_REG, load);
415 omap_dm_timer_write_reg(timer, OMAP_TIMER_TRIGGER_REG, 0);
416 /* Save the context */
417 timer->context.tclr = l;
418 timer->context.tldr = load;
419 omap_dm_timer_disable(timer);
422 EXPORT_SYMBOL_GPL(omap_dm_timer_set_load);
424 /* Optimized set_load which removes costly spin wait in timer_start */
425 int omap_dm_timer_set_load_start(struct omap_dm_timer *timer, int autoreload,
430 if (unlikely(!timer))
433 omap_dm_timer_enable(timer);
435 if (timer->loses_context) {
436 u32 ctx_loss_cnt_after =
437 timer->get_context_loss_count(&timer->pdev->dev);
438 if (ctx_loss_cnt_after != timer->ctx_loss_count)
439 omap_timer_restore_context(timer);
442 l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
444 l |= OMAP_TIMER_CTRL_AR;
445 omap_dm_timer_write_reg(timer, OMAP_TIMER_LOAD_REG, load);
447 l &= ~OMAP_TIMER_CTRL_AR;
449 l |= OMAP_TIMER_CTRL_ST;
451 __omap_dm_timer_load_start(timer, l, load, timer->posted);
453 /* Save the context */
454 timer->context.tclr = l;
455 timer->context.tldr = load;
456 timer->context.tcrr = load;
459 EXPORT_SYMBOL_GPL(omap_dm_timer_set_load_start);
461 int omap_dm_timer_set_match(struct omap_dm_timer *timer, int enable,
466 if (unlikely(!timer))
469 omap_dm_timer_enable(timer);
470 l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
472 l |= OMAP_TIMER_CTRL_CE;
474 l &= ~OMAP_TIMER_CTRL_CE;
475 omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
476 omap_dm_timer_write_reg(timer, OMAP_TIMER_MATCH_REG, match);
478 /* Save the context */
479 timer->context.tclr = l;
480 timer->context.tmar = match;
481 omap_dm_timer_disable(timer);
484 EXPORT_SYMBOL_GPL(omap_dm_timer_set_match);
486 int omap_dm_timer_set_pwm(struct omap_dm_timer *timer, int def_on,
487 int toggle, int trigger)
491 if (unlikely(!timer))
494 omap_dm_timer_enable(timer);
495 l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
496 l &= ~(OMAP_TIMER_CTRL_GPOCFG | OMAP_TIMER_CTRL_SCPWM |
497 OMAP_TIMER_CTRL_PT | (0x03 << 10));
499 l |= OMAP_TIMER_CTRL_SCPWM;
501 l |= OMAP_TIMER_CTRL_PT;
503 omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
505 /* Save the context */
506 timer->context.tclr = l;
507 omap_dm_timer_disable(timer);
510 EXPORT_SYMBOL_GPL(omap_dm_timer_set_pwm);
512 int omap_dm_timer_set_prescaler(struct omap_dm_timer *timer, int prescaler)
516 if (unlikely(!timer))
519 omap_dm_timer_enable(timer);
520 l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
521 l &= ~(OMAP_TIMER_CTRL_PRE | (0x07 << 2));
522 if (prescaler >= 0x00 && prescaler <= 0x07) {
523 l |= OMAP_TIMER_CTRL_PRE;
526 omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
528 /* Save the context */
529 timer->context.tclr = l;
530 omap_dm_timer_disable(timer);
533 EXPORT_SYMBOL_GPL(omap_dm_timer_set_prescaler);
535 int omap_dm_timer_set_int_enable(struct omap_dm_timer *timer,
538 if (unlikely(!timer))
541 omap_dm_timer_enable(timer);
542 __omap_dm_timer_int_enable(timer, value);
544 /* Save the context */
545 timer->context.tier = value;
546 timer->context.twer = value;
547 omap_dm_timer_disable(timer);
550 EXPORT_SYMBOL_GPL(omap_dm_timer_set_int_enable);
552 unsigned int omap_dm_timer_read_status(struct omap_dm_timer *timer)
556 if (unlikely(!timer || pm_runtime_suspended(&timer->pdev->dev))) {
557 pr_err("%s: timer not available or enabled.\n", __func__);
561 l = __raw_readl(timer->irq_stat);
565 EXPORT_SYMBOL_GPL(omap_dm_timer_read_status);
567 int omap_dm_timer_write_status(struct omap_dm_timer *timer, unsigned int value)
569 if (unlikely(!timer || pm_runtime_suspended(&timer->pdev->dev)))
572 __omap_dm_timer_write_status(timer, value);
573 /* Save the context */
574 timer->context.tisr = value;
577 EXPORT_SYMBOL_GPL(omap_dm_timer_write_status);
579 unsigned int omap_dm_timer_read_counter(struct omap_dm_timer *timer)
581 if (unlikely(!timer || pm_runtime_suspended(&timer->pdev->dev))) {
582 pr_err("%s: timer not iavailable or enabled.\n", __func__);
586 return __omap_dm_timer_read_counter(timer, timer->posted);
588 EXPORT_SYMBOL_GPL(omap_dm_timer_read_counter);
590 int omap_dm_timer_write_counter(struct omap_dm_timer *timer, unsigned int value)
592 if (unlikely(!timer || pm_runtime_suspended(&timer->pdev->dev))) {
593 pr_err("%s: timer not available or enabled.\n", __func__);
597 omap_dm_timer_write_reg(timer, OMAP_TIMER_COUNTER_REG, value);
599 /* Save the context */
600 timer->context.tcrr = value;
603 EXPORT_SYMBOL_GPL(omap_dm_timer_write_counter);
605 int omap_dm_timers_active(void)
607 struct omap_dm_timer *timer;
609 list_for_each_entry(timer, &omap_timer_list, node) {
610 if (!timer->reserved)
613 if (omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG) &
614 OMAP_TIMER_CTRL_ST) {
620 EXPORT_SYMBOL_GPL(omap_dm_timers_active);
623 * omap_dm_timer_probe - probe function called for every registered device
624 * @pdev: pointer to current timer platform device
626 * Called by driver framework at the end of device registration for all
629 static int __devinit omap_dm_timer_probe(struct platform_device *pdev)
633 struct omap_dm_timer *timer;
634 struct resource *mem, *irq, *ioarea;
635 struct dmtimer_platform_data *pdata = pdev->dev.platform_data;
638 dev_err(&pdev->dev, "%s: no platform data.\n", __func__);
642 irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
643 if (unlikely(!irq)) {
644 dev_err(&pdev->dev, "%s: no IRQ resource.\n", __func__);
648 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
649 if (unlikely(!mem)) {
650 dev_err(&pdev->dev, "%s: no memory resource.\n", __func__);
654 ioarea = request_mem_region(mem->start, resource_size(mem),
657 dev_err(&pdev->dev, "%s: region already claimed.\n", __func__);
661 timer = kzalloc(sizeof(struct omap_dm_timer), GFP_KERNEL);
663 dev_err(&pdev->dev, "%s: no memory for omap_dm_timer.\n",
666 goto err_free_ioregion;
669 timer->io_base = ioremap(mem->start, resource_size(mem));
670 if (!timer->io_base) {
671 dev_err(&pdev->dev, "%s: ioremap failed.\n", __func__);
676 timer->id = pdev->id;
677 timer->irq = irq->start;
678 timer->reserved = pdata->reserved;
680 timer->loses_context = pdata->loses_context;
681 timer->get_context_loss_count = pdata->get_context_loss_count;
683 /* Skip pm_runtime_enable for OMAP1 */
684 if (!pdata->needs_manual_reset) {
685 pm_runtime_enable(&pdev->dev);
686 pm_runtime_irq_safe(&pdev->dev);
689 if (!timer->reserved) {
690 pm_runtime_get_sync(&pdev->dev);
691 __omap_dm_timer_init_regs(timer);
692 pm_runtime_put(&pdev->dev);
695 /* add the timer element to the list */
696 spin_lock_irqsave(&dm_timer_lock, flags);
697 list_add_tail(&timer->node, &omap_timer_list);
698 spin_unlock_irqrestore(&dm_timer_lock, flags);
700 dev_dbg(&pdev->dev, "Device Probed.\n");
708 release_mem_region(mem->start, resource_size(mem));
714 * omap_dm_timer_remove - cleanup a registered timer device
715 * @pdev: pointer to current timer platform device
717 * Called by driver framework whenever a timer device is unregistered.
718 * In addition to freeing platform resources it also deletes the timer
719 * entry from the local list.
721 static int __devexit omap_dm_timer_remove(struct platform_device *pdev)
723 struct omap_dm_timer *timer;
727 spin_lock_irqsave(&dm_timer_lock, flags);
728 list_for_each_entry(timer, &omap_timer_list, node)
729 if (timer->pdev->id == pdev->id) {
730 list_del(&timer->node);
735 spin_unlock_irqrestore(&dm_timer_lock, flags);
740 static struct platform_driver omap_dm_timer_driver = {
741 .probe = omap_dm_timer_probe,
742 .remove = __devexit_p(omap_dm_timer_remove),
744 .name = "omap_timer",
748 static int __init omap_dm_timer_driver_init(void)
750 return platform_driver_register(&omap_dm_timer_driver);
753 static void __exit omap_dm_timer_driver_exit(void)
755 platform_driver_unregister(&omap_dm_timer_driver);
758 early_platform_init("earlytimer", &omap_dm_timer_driver);
759 module_init(omap_dm_timer_driver_init);
760 module_exit(omap_dm_timer_driver_exit);
762 MODULE_DESCRIPTION("OMAP Dual-Mode Timer Driver");
763 MODULE_LICENSE("GPL");
764 MODULE_ALIAS("platform:" DRIVER_NAME);
765 MODULE_AUTHOR("Texas Instruments Inc");