From: Thomas Gleixner Date: Wed, 9 May 2007 09:35:15 +0000 (-0700) Subject: [PATCH] clocksource: fix resume logic X-Git-Tag: v2.6.21.2~31 X-Git-Url: https://git.karo-electronics.de/?a=commitdiff_plain;h=f34ddce1302d548244c499b2905ab2fe999610c6;p=karo-tx-linux.git [PATCH] clocksource: fix resume logic We need to make sure that the clocksources are resumed, when timekeeping is resumed. The current resume logic does not guarantee this. Add a resume function pointer to the clocksource struct, so clocksource drivers which need to reinitialize the clocksource can provide a resume function. Add a resume function, which calls the maybe available clocksource resume functions and resets the watchdog function, so a stable TSC can be used accross suspend/resume. Signed-off-by: Thomas Gleixner Cc: john stultz Cc: Andi Kleen Cc: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Chris Wright --- diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index daa4940cc0f1..bf92c2631562 100644 --- a/include/linux/clocksource.h +++ b/include/linux/clocksource.h @@ -48,6 +48,7 @@ struct clocksource; * @shift: cycle to nanosecond divisor (power of two) * @flags: flags describing special properties * @vread: vsyscall based read + * @resume: resume function for the clocksource, if necessary * @cycle_interval: Used internally by timekeeping core, please ignore. * @xtime_interval: Used internally by timekeeping core, please ignore. */ @@ -61,6 +62,7 @@ struct clocksource { u32 shift; unsigned long flags; cycle_t (*vread)(void); + void (*resume)(void); /* timekeeping specific data, ignore */ cycle_t cycle_last, cycle_interval; @@ -198,6 +200,7 @@ static inline void clocksource_calculate_interval(struct clocksource *c, extern int clocksource_register(struct clocksource*); extern struct clocksource* clocksource_get_next(void); extern void clocksource_change_rating(struct clocksource *cs, int rating); +extern void clocksource_resume(void); #ifdef CONFIG_GENERIC_TIME_VSYSCALL extern void update_vsyscall(struct timespec *ts, struct clocksource *c); diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index fe5c7db24247..5baee91ebc70 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c @@ -74,6 +74,8 @@ static struct clocksource *watchdog; static struct timer_list watchdog_timer; static DEFINE_SPINLOCK(watchdog_lock); static cycle_t watchdog_last; +static int watchdog_resumed; + /* * Interval: 0.5sec Treshold: 0.0625s */ @@ -98,15 +100,26 @@ static void clocksource_watchdog(unsigned long data) struct clocksource *cs, *tmp; cycle_t csnow, wdnow; int64_t wd_nsec, cs_nsec; + int resumed; spin_lock(&watchdog_lock); + resumed = watchdog_resumed; + if (unlikely(resumed)) + watchdog_resumed = 0; + wdnow = watchdog->read(); wd_nsec = cyc2ns(watchdog, (wdnow - watchdog_last) & watchdog->mask); watchdog_last = wdnow; list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) { csnow = cs->read(); + + if (unlikely(resumed)) { + cs->wd_last = csnow; + continue; + } + /* Initialized ? */ if (!(cs->flags & CLOCK_SOURCE_WATCHDOG)) { if ((cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) && @@ -136,6 +149,13 @@ static void clocksource_watchdog(unsigned long data) } spin_unlock(&watchdog_lock); } +static void clocksource_resume_watchdog(void) +{ + spin_lock(&watchdog_lock); + watchdog_resumed = 1; + spin_unlock(&watchdog_lock); +} + static void clocksource_check_watchdog(struct clocksource *cs) { struct clocksource *cse; @@ -182,8 +202,33 @@ static void clocksource_check_watchdog(struct clocksource *cs) if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; } + +static inline void clocksource_resume_watchdog(void) { } #endif +/** + * clocksource_resume - resume the clocksource(s) + */ +void clocksource_resume(void) +{ + struct list_head *tmp; + unsigned long flags; + + spin_lock_irqsave(&clocksource_lock, flags); + + list_for_each(tmp, &clocksource_list) { + struct clocksource *cs; + + cs = list_entry(tmp, struct clocksource, list); + if (cs->resume) + cs->resume(); + } + + clocksource_resume_watchdog(); + + spin_unlock_irqrestore(&clocksource_lock, flags); +} + /** * clocksource_get_next - Returns the selected clocksource * diff --git a/kernel/timer.c b/kernel/timer.c index dd6c2c1c561b..e045774b3d65 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -1903,6 +1903,8 @@ unregister_time_interpolator(struct time_interpolator *ti) prev = &curr->next; } + clocksource_resume(); + write_seqlock_irqsave(&xtime_lock, flags); if (ti == time_interpolator) { /* we lost the best time-interpolator: */