2 * SuperH Timer Support - TMU
4 * Copyright (C) 2009 Magnus Damm
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
16 #include <linux/clk.h>
17 #include <linux/clockchips.h>
18 #include <linux/clocksource.h>
19 #include <linux/delay.h>
20 #include <linux/err.h>
21 #include <linux/init.h>
22 #include <linux/interrupt.h>
24 #include <linux/ioport.h>
25 #include <linux/irq.h>
26 #include <linux/module.h>
28 #include <linux/platform_device.h>
29 #include <linux/pm_domain.h>
30 #include <linux/pm_runtime.h>
31 #include <linux/sh_timer.h>
32 #include <linux/slab.h>
33 #include <linux/spinlock.h>
42 struct sh_tmu_channel {
43 struct sh_tmu_device *tmu;
50 unsigned long periodic;
51 struct clock_event_device ced;
52 struct clocksource cs;
54 unsigned int enable_count;
57 struct sh_tmu_device {
58 struct platform_device *pdev;
60 void __iomem *mapbase;
63 enum sh_tmu_model model;
65 raw_spinlock_t lock; /* Protect the shared start/stop register */
67 struct sh_tmu_channel *channels;
68 unsigned int num_channels;
74 #define TSTR -1 /* shared register */
75 #define TCOR 0 /* channel register */
76 #define TCNT 1 /* channel register */
77 #define TCR 2 /* channel register */
79 #define TCR_UNF (1 << 8)
80 #define TCR_UNIE (1 << 5)
81 #define TCR_TPSC_CLK4 (0 << 0)
82 #define TCR_TPSC_CLK16 (1 << 0)
83 #define TCR_TPSC_CLK64 (2 << 0)
84 #define TCR_TPSC_CLK256 (3 << 0)
85 #define TCR_TPSC_CLK1024 (4 << 0)
86 #define TCR_TPSC_MASK (7 << 0)
88 static inline unsigned long sh_tmu_read(struct sh_tmu_channel *ch, int reg_nr)
93 switch (ch->tmu->model) {
95 return ioread8(ch->tmu->mapbase + 2);
97 return ioread8(ch->tmu->mapbase + 4);
104 return ioread16(ch->base + offs);
106 return ioread32(ch->base + offs);
109 static inline void sh_tmu_write(struct sh_tmu_channel *ch, int reg_nr,
114 if (reg_nr == TSTR) {
115 switch (ch->tmu->model) {
117 return iowrite8(value, ch->tmu->mapbase + 2);
119 return iowrite8(value, ch->tmu->mapbase + 4);
126 iowrite16(value, ch->base + offs);
128 iowrite32(value, ch->base + offs);
131 static void sh_tmu_start_stop_ch(struct sh_tmu_channel *ch, int start)
133 unsigned long flags, value;
135 /* start stop register shared by multiple timer channels */
136 raw_spin_lock_irqsave(&ch->tmu->lock, flags);
137 value = sh_tmu_read(ch, TSTR);
140 value |= 1 << ch->index;
142 value &= ~(1 << ch->index);
144 sh_tmu_write(ch, TSTR, value);
145 raw_spin_unlock_irqrestore(&ch->tmu->lock, flags);
148 static int __sh_tmu_enable(struct sh_tmu_channel *ch)
153 ret = clk_enable(ch->tmu->clk);
155 dev_err(&ch->tmu->pdev->dev, "ch%u: cannot enable clock\n",
160 /* make sure channel is disabled */
161 sh_tmu_start_stop_ch(ch, 0);
163 /* maximum timeout */
164 sh_tmu_write(ch, TCOR, 0xffffffff);
165 sh_tmu_write(ch, TCNT, 0xffffffff);
167 /* configure channel to parent clock / 4, irq off */
168 ch->rate = clk_get_rate(ch->tmu->clk) / 4;
169 sh_tmu_write(ch, TCR, TCR_TPSC_CLK4);
172 sh_tmu_start_stop_ch(ch, 1);
177 static int sh_tmu_enable(struct sh_tmu_channel *ch)
179 if (ch->enable_count++ > 0)
182 pm_runtime_get_sync(&ch->tmu->pdev->dev);
183 dev_pm_syscore_device(&ch->tmu->pdev->dev, true);
185 return __sh_tmu_enable(ch);
188 static void __sh_tmu_disable(struct sh_tmu_channel *ch)
190 /* disable channel */
191 sh_tmu_start_stop_ch(ch, 0);
193 /* disable interrupts in TMU block */
194 sh_tmu_write(ch, TCR, TCR_TPSC_CLK4);
197 clk_disable(ch->tmu->clk);
200 static void sh_tmu_disable(struct sh_tmu_channel *ch)
202 if (WARN_ON(ch->enable_count == 0))
205 if (--ch->enable_count > 0)
208 __sh_tmu_disable(ch);
210 dev_pm_syscore_device(&ch->tmu->pdev->dev, false);
211 pm_runtime_put(&ch->tmu->pdev->dev);
214 static void sh_tmu_set_next(struct sh_tmu_channel *ch, unsigned long delta,
218 sh_tmu_start_stop_ch(ch, 0);
220 /* acknowledge interrupt */
221 sh_tmu_read(ch, TCR);
223 /* enable interrupt */
224 sh_tmu_write(ch, TCR, TCR_UNIE | TCR_TPSC_CLK4);
226 /* reload delta value in case of periodic timer */
228 sh_tmu_write(ch, TCOR, delta);
230 sh_tmu_write(ch, TCOR, 0xffffffff);
232 sh_tmu_write(ch, TCNT, delta);
235 sh_tmu_start_stop_ch(ch, 1);
238 static irqreturn_t sh_tmu_interrupt(int irq, void *dev_id)
240 struct sh_tmu_channel *ch = dev_id;
242 /* disable or acknowledge interrupt */
243 if (clockevent_state_oneshot(&ch->ced))
244 sh_tmu_write(ch, TCR, TCR_TPSC_CLK4);
246 sh_tmu_write(ch, TCR, TCR_UNIE | TCR_TPSC_CLK4);
248 /* notify clockevent layer */
249 ch->ced.event_handler(&ch->ced);
253 static struct sh_tmu_channel *cs_to_sh_tmu(struct clocksource *cs)
255 return container_of(cs, struct sh_tmu_channel, cs);
258 static u64 sh_tmu_clocksource_read(struct clocksource *cs)
260 struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
262 return sh_tmu_read(ch, TCNT) ^ 0xffffffff;
265 static int sh_tmu_clocksource_enable(struct clocksource *cs)
267 struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
270 if (WARN_ON(ch->cs_enabled))
273 ret = sh_tmu_enable(ch);
275 __clocksource_update_freq_hz(cs, ch->rate);
276 ch->cs_enabled = true;
282 static void sh_tmu_clocksource_disable(struct clocksource *cs)
284 struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
286 if (WARN_ON(!ch->cs_enabled))
290 ch->cs_enabled = false;
293 static void sh_tmu_clocksource_suspend(struct clocksource *cs)
295 struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
300 if (--ch->enable_count == 0) {
301 __sh_tmu_disable(ch);
302 pm_genpd_syscore_poweroff(&ch->tmu->pdev->dev);
306 static void sh_tmu_clocksource_resume(struct clocksource *cs)
308 struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
313 if (ch->enable_count++ == 0) {
314 pm_genpd_syscore_poweron(&ch->tmu->pdev->dev);
319 static int sh_tmu_register_clocksource(struct sh_tmu_channel *ch,
322 struct clocksource *cs = &ch->cs;
326 cs->read = sh_tmu_clocksource_read;
327 cs->enable = sh_tmu_clocksource_enable;
328 cs->disable = sh_tmu_clocksource_disable;
329 cs->suspend = sh_tmu_clocksource_suspend;
330 cs->resume = sh_tmu_clocksource_resume;
331 cs->mask = CLOCKSOURCE_MASK(32);
332 cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
334 dev_info(&ch->tmu->pdev->dev, "ch%u: used as clock source\n",
337 /* Register with dummy 1 Hz value, gets updated in ->enable() */
338 clocksource_register_hz(cs, 1);
342 static struct sh_tmu_channel *ced_to_sh_tmu(struct clock_event_device *ced)
344 return container_of(ced, struct sh_tmu_channel, ced);
347 static void sh_tmu_clock_event_start(struct sh_tmu_channel *ch, int periodic)
349 struct clock_event_device *ced = &ch->ced;
353 clockevents_config(ced, ch->rate);
356 ch->periodic = (ch->rate + HZ/2) / HZ;
357 sh_tmu_set_next(ch, ch->periodic, 1);
361 static int sh_tmu_clock_event_shutdown(struct clock_event_device *ced)
363 struct sh_tmu_channel *ch = ced_to_sh_tmu(ced);
365 if (clockevent_state_oneshot(ced) || clockevent_state_periodic(ced))
370 static int sh_tmu_clock_event_set_state(struct clock_event_device *ced,
373 struct sh_tmu_channel *ch = ced_to_sh_tmu(ced);
375 /* deal with old setting first */
376 if (clockevent_state_oneshot(ced) || clockevent_state_periodic(ced))
379 dev_info(&ch->tmu->pdev->dev, "ch%u: used for %s clock events\n",
380 ch->index, periodic ? "periodic" : "oneshot");
381 sh_tmu_clock_event_start(ch, periodic);
385 static int sh_tmu_clock_event_set_oneshot(struct clock_event_device *ced)
387 return sh_tmu_clock_event_set_state(ced, 0);
390 static int sh_tmu_clock_event_set_periodic(struct clock_event_device *ced)
392 return sh_tmu_clock_event_set_state(ced, 1);
395 static int sh_tmu_clock_event_next(unsigned long delta,
396 struct clock_event_device *ced)
398 struct sh_tmu_channel *ch = ced_to_sh_tmu(ced);
400 BUG_ON(!clockevent_state_oneshot(ced));
402 /* program new delta value */
403 sh_tmu_set_next(ch, delta, 0);
407 static void sh_tmu_clock_event_suspend(struct clock_event_device *ced)
409 pm_genpd_syscore_poweroff(&ced_to_sh_tmu(ced)->tmu->pdev->dev);
412 static void sh_tmu_clock_event_resume(struct clock_event_device *ced)
414 pm_genpd_syscore_poweron(&ced_to_sh_tmu(ced)->tmu->pdev->dev);
417 static void sh_tmu_register_clockevent(struct sh_tmu_channel *ch,
420 struct clock_event_device *ced = &ch->ced;
424 ced->features = CLOCK_EVT_FEAT_PERIODIC;
425 ced->features |= CLOCK_EVT_FEAT_ONESHOT;
427 ced->cpumask = cpu_possible_mask;
428 ced->set_next_event = sh_tmu_clock_event_next;
429 ced->set_state_shutdown = sh_tmu_clock_event_shutdown;
430 ced->set_state_periodic = sh_tmu_clock_event_set_periodic;
431 ced->set_state_oneshot = sh_tmu_clock_event_set_oneshot;
432 ced->suspend = sh_tmu_clock_event_suspend;
433 ced->resume = sh_tmu_clock_event_resume;
435 dev_info(&ch->tmu->pdev->dev, "ch%u: used for clock events\n",
438 clockevents_config_and_register(ced, 1, 0x300, 0xffffffff);
440 ret = request_irq(ch->irq, sh_tmu_interrupt,
441 IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING,
442 dev_name(&ch->tmu->pdev->dev), ch);
444 dev_err(&ch->tmu->pdev->dev, "ch%u: failed to request irq %d\n",
450 static int sh_tmu_register(struct sh_tmu_channel *ch, const char *name,
451 bool clockevent, bool clocksource)
454 ch->tmu->has_clockevent = true;
455 sh_tmu_register_clockevent(ch, name);
456 } else if (clocksource) {
457 ch->tmu->has_clocksource = true;
458 sh_tmu_register_clocksource(ch, name);
464 static int sh_tmu_channel_setup(struct sh_tmu_channel *ch, unsigned int index,
465 bool clockevent, bool clocksource,
466 struct sh_tmu_device *tmu)
468 /* Skip unused channels. */
469 if (!clockevent && !clocksource)
475 if (tmu->model == SH_TMU_SH3)
476 ch->base = tmu->mapbase + 4 + ch->index * 12;
478 ch->base = tmu->mapbase + 8 + ch->index * 12;
480 ch->irq = platform_get_irq(tmu->pdev, index);
482 dev_err(&tmu->pdev->dev, "ch%u: failed to get irq\n",
487 ch->cs_enabled = false;
488 ch->enable_count = 0;
490 return sh_tmu_register(ch, dev_name(&tmu->pdev->dev),
491 clockevent, clocksource);
494 static int sh_tmu_map_memory(struct sh_tmu_device *tmu)
496 struct resource *res;
498 res = platform_get_resource(tmu->pdev, IORESOURCE_MEM, 0);
500 dev_err(&tmu->pdev->dev, "failed to get I/O memory\n");
504 tmu->mapbase = ioremap_nocache(res->start, resource_size(res));
505 if (tmu->mapbase == NULL)
511 static int sh_tmu_parse_dt(struct sh_tmu_device *tmu)
513 struct device_node *np = tmu->pdev->dev.of_node;
516 tmu->num_channels = 3;
518 of_property_read_u32(np, "#renesas,channels", &tmu->num_channels);
520 if (tmu->num_channels != 2 && tmu->num_channels != 3) {
521 dev_err(&tmu->pdev->dev, "invalid number of channels %u\n",
529 static int sh_tmu_setup(struct sh_tmu_device *tmu, struct platform_device *pdev)
536 raw_spin_lock_init(&tmu->lock);
538 if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) {
539 ret = sh_tmu_parse_dt(tmu);
542 } else if (pdev->dev.platform_data) {
543 const struct platform_device_id *id = pdev->id_entry;
544 struct sh_timer_config *cfg = pdev->dev.platform_data;
546 tmu->model = id->driver_data;
547 tmu->num_channels = hweight8(cfg->channels_mask);
549 dev_err(&tmu->pdev->dev, "missing platform data\n");
553 /* Get hold of clock. */
554 tmu->clk = clk_get(&tmu->pdev->dev, "fck");
555 if (IS_ERR(tmu->clk)) {
556 dev_err(&tmu->pdev->dev, "cannot get clock\n");
557 return PTR_ERR(tmu->clk);
560 ret = clk_prepare(tmu->clk);
564 /* Map the memory resource. */
565 ret = sh_tmu_map_memory(tmu);
567 dev_err(&tmu->pdev->dev, "failed to remap I/O memory\n");
568 goto err_clk_unprepare;
571 /* Allocate and setup the channels. */
572 tmu->channels = kzalloc(sizeof(*tmu->channels) * tmu->num_channels,
574 if (tmu->channels == NULL) {
580 * Use the first channel as a clock event device and the second channel
583 for (i = 0; i < tmu->num_channels; ++i) {
584 ret = sh_tmu_channel_setup(&tmu->channels[i], i,
585 i == 0, i == 1, tmu);
590 platform_set_drvdata(pdev, tmu);
595 kfree(tmu->channels);
596 iounmap(tmu->mapbase);
598 clk_unprepare(tmu->clk);
604 static int sh_tmu_probe(struct platform_device *pdev)
606 struct sh_tmu_device *tmu = platform_get_drvdata(pdev);
609 if (!is_early_platform_device(pdev)) {
610 pm_runtime_set_active(&pdev->dev);
611 pm_runtime_enable(&pdev->dev);
615 dev_info(&pdev->dev, "kept as earlytimer\n");
619 tmu = kzalloc(sizeof(*tmu), GFP_KERNEL);
623 ret = sh_tmu_setup(tmu, pdev);
626 pm_runtime_idle(&pdev->dev);
629 if (is_early_platform_device(pdev))
633 if (tmu->has_clockevent || tmu->has_clocksource)
634 pm_runtime_irq_safe(&pdev->dev);
636 pm_runtime_idle(&pdev->dev);
641 static int sh_tmu_remove(struct platform_device *pdev)
643 return -EBUSY; /* cannot unregister clockevent and clocksource */
646 static const struct platform_device_id sh_tmu_id_table[] = {
647 { "sh-tmu", SH_TMU },
648 { "sh-tmu-sh3", SH_TMU_SH3 },
651 MODULE_DEVICE_TABLE(platform, sh_tmu_id_table);
653 static const struct of_device_id sh_tmu_of_table[] __maybe_unused = {
654 { .compatible = "renesas,tmu" },
657 MODULE_DEVICE_TABLE(of, sh_tmu_of_table);
659 static struct platform_driver sh_tmu_device_driver = {
660 .probe = sh_tmu_probe,
661 .remove = sh_tmu_remove,
664 .of_match_table = of_match_ptr(sh_tmu_of_table),
666 .id_table = sh_tmu_id_table,
669 static int __init sh_tmu_init(void)
671 return platform_driver_register(&sh_tmu_device_driver);
674 static void __exit sh_tmu_exit(void)
676 platform_driver_unregister(&sh_tmu_device_driver);
679 early_platform_init("earlytimer", &sh_tmu_device_driver);
680 subsys_initcall(sh_tmu_init);
681 module_exit(sh_tmu_exit);
683 MODULE_AUTHOR("Magnus Damm");
684 MODULE_DESCRIPTION("SuperH TMU Timer Driver");
685 MODULE_LICENSE("GPL v2");