2 * linux/kernel/time/timekeeping.c
4 * Kernel timekeeping code and accessor functions
6 * This code was moved from linux/kernel/timer.c.
7 * Please see that file for copyright and history logs.
11 #include <linux/module.h>
12 #include <linux/interrupt.h>
13 #include <linux/percpu.h>
14 #include <linux/init.h>
16 #include <linux/sysdev.h>
17 #include <linux/clocksource.h>
18 #include <linux/jiffies.h>
19 #include <linux/time.h>
20 #include <linux/tick.h>
24 * This read-write spinlock protects us from races in SMP while
25 * playing with xtime and avenrun.
27 __cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock);
32 * wall_to_monotonic is what we need to add to xtime (or xtime corrected
33 * for sub jiffie times) to get to monotonic time. Monotonic is pegged
34 * at zero at system boot time, so wall_to_monotonic will be negative,
35 * however, we will ALWAYS keep the tv_nsec part positive so we can use
36 * the usual normalization.
38 * wall_to_monotonic is moved after resume from suspend for the monotonic
39 * time not to jump. We need to add total_sleep_time to wall_to_monotonic
40 * to get the real boot based time offset.
42 * - wall_to_monotonic is no longer the boot time, getboottime must be
45 struct timespec xtime __attribute__ ((aligned (16)));
46 struct timespec wall_to_monotonic __attribute__ ((aligned (16)));
47 static unsigned long total_sleep_time; /* seconds */
49 static struct timespec xtime_cache __attribute__ ((aligned (16)));
50 void update_xtime_cache(u64 nsec)
53 timespec_add_ns(&xtime_cache, nsec);
56 struct clocksource *clock;
59 #ifdef CONFIG_GENERIC_TIME
61 * clocksource_forward_now - update clock to the current time
63 * Forward the current clock to update its state since the last call to
64 * update_wall_time(). This is useful before significant clock changes,
65 * as it avoids having to deal with this time offset explicitly.
67 static void clocksource_forward_now(void)
69 cycle_t cycle_now, cycle_delta;
72 cycle_now = clocksource_read(clock);
73 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
74 clock->cycle_last = cycle_now;
76 nsec = cyc2ns(clock, cycle_delta);
77 timespec_add_ns(&xtime, nsec);
81 * getnstimeofday - Returns the time of day in a timespec
82 * @ts: pointer to the timespec to be set
84 * Returns the time of day in a timespec.
86 void getnstimeofday(struct timespec *ts)
88 cycle_t cycle_now, cycle_delta;
93 seq = read_seqbegin(&xtime_lock);
97 /* read clocksource: */
98 cycle_now = clocksource_read(clock);
100 /* calculate the delta since the last update_wall_time: */
101 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
103 /* convert to nanoseconds: */
104 nsecs = cyc2ns(clock, cycle_delta);
106 } while (read_seqretry(&xtime_lock, seq));
108 timespec_add_ns(ts, nsecs);
111 EXPORT_SYMBOL(getnstimeofday);
114 * do_gettimeofday - Returns the time of day in a timeval
115 * @tv: pointer to the timeval to be set
117 * NOTE: Users should be converted to using getnstimeofday()
119 void do_gettimeofday(struct timeval *tv)
123 getnstimeofday(&now);
124 tv->tv_sec = now.tv_sec;
125 tv->tv_usec = now.tv_nsec/1000;
128 EXPORT_SYMBOL(do_gettimeofday);
130 * do_settimeofday - Sets the time of day
131 * @tv: pointer to the timespec variable containing the new time
133 * Sets the time of day to the new time and update NTP and notify hrtimers
135 int do_settimeofday(struct timespec *tv)
137 struct timespec ts_delta;
140 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
143 write_seqlock_irqsave(&xtime_lock, flags);
145 clocksource_forward_now();
147 ts_delta.tv_sec = tv->tv_sec - xtime.tv_sec;
148 ts_delta.tv_nsec = tv->tv_nsec - xtime.tv_nsec;
149 wall_to_monotonic = timespec_sub(wall_to_monotonic, ts_delta);
153 update_xtime_cache(0);
158 update_vsyscall(&xtime, clock);
160 write_sequnlock_irqrestore(&xtime_lock, flags);
162 /* signal hrtimers about time change */
168 EXPORT_SYMBOL(do_settimeofday);
171 * change_clocksource - Swaps clocksources if a new one is available
173 * Accumulates current time interval and initializes new clocksource
175 static void change_clocksource(void)
177 struct clocksource *new;
179 new = clocksource_get_next();
184 clocksource_forward_now();
187 clock->cycle_last = 0;
188 clock->cycle_last = clocksource_read(new);
190 clock->xtime_nsec = 0;
191 clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH);
196 * We're holding xtime lock and waking up klogd would deadlock
197 * us on enqueue. So no printing!
198 printk(KERN_INFO "Time: %s clocksource has been installed.\n",
203 static inline void clocksource_forward_now(void) { }
204 static inline void change_clocksource(void) { }
208 * timekeeping_valid_for_hres - Check if timekeeping is suitable for hres
210 int timekeeping_valid_for_hres(void)
216 seq = read_seqbegin(&xtime_lock);
218 ret = clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
220 } while (read_seqretry(&xtime_lock, seq));
226 * read_persistent_clock - Return time in seconds from the persistent clock.
228 * Weak dummy function for arches that do not yet support it.
229 * Returns seconds from epoch using the battery backed persistent clock.
230 * Returns zero if unsupported.
232 * XXX - Do be sure to remove it once all arches implement it.
234 unsigned long __attribute__((weak)) read_persistent_clock(void)
240 * timekeeping_init - Initializes the clocksource and common timekeeping values
242 void __init timekeeping_init(void)
245 unsigned long sec = read_persistent_clock();
247 write_seqlock_irqsave(&xtime_lock, flags);
251 clock = clocksource_get_next();
252 clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH);
253 clock->cycle_last = clocksource_read(clock);
257 set_normalized_timespec(&wall_to_monotonic,
258 -xtime.tv_sec, -xtime.tv_nsec);
259 update_xtime_cache(0);
260 total_sleep_time = 0;
261 write_sequnlock_irqrestore(&xtime_lock, flags);
264 /* flag for if timekeeping is suspended */
265 static int timekeeping_suspended;
266 /* time in seconds when suspend began */
267 static unsigned long timekeeping_suspend_time;
270 * timekeeping_resume - Resumes the generic timekeeping subsystem.
273 * This is for the generic clocksource timekeeping.
274 * xtime/wall_to_monotonic/jiffies/etc are
275 * still managed by arch specific suspend/resume code.
277 static int timekeeping_resume(struct sys_device *dev)
280 unsigned long now = read_persistent_clock();
282 clocksource_resume();
284 write_seqlock_irqsave(&xtime_lock, flags);
286 if (now && (now > timekeeping_suspend_time)) {
287 unsigned long sleep_length = now - timekeeping_suspend_time;
289 xtime.tv_sec += sleep_length;
290 wall_to_monotonic.tv_sec -= sleep_length;
291 total_sleep_time += sleep_length;
293 update_xtime_cache(0);
294 /* re-base the last cycle value */
295 clock->cycle_last = 0;
296 clock->cycle_last = clocksource_read(clock);
298 timekeeping_suspended = 0;
299 write_sequnlock_irqrestore(&xtime_lock, flags);
301 touch_softlockup_watchdog();
303 clockevents_notify(CLOCK_EVT_NOTIFY_RESUME, NULL);
305 /* Resume hrtimers */
306 hres_timers_resume();
311 static int timekeeping_suspend(struct sys_device *dev, pm_message_t state)
315 timekeeping_suspend_time = read_persistent_clock();
317 write_seqlock_irqsave(&xtime_lock, flags);
318 clocksource_forward_now();
319 timekeeping_suspended = 1;
320 write_sequnlock_irqrestore(&xtime_lock, flags);
322 clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL);
327 /* sysfs resume/suspend bits for timekeeping */
328 static struct sysdev_class timekeeping_sysclass = {
329 .name = "timekeeping",
330 .resume = timekeeping_resume,
331 .suspend = timekeeping_suspend,
334 static struct sys_device device_timer = {
336 .cls = &timekeeping_sysclass,
339 static int __init timekeeping_init_device(void)
341 int error = sysdev_class_register(&timekeeping_sysclass);
343 error = sysdev_register(&device_timer);
347 device_initcall(timekeeping_init_device);
350 * If the error is already larger, we look ahead even further
351 * to compensate for late or lost adjustments.
353 static __always_inline int clocksource_bigadjust(s64 error, s64 *interval,
361 * Use the current error value to determine how much to look ahead.
362 * The larger the error the slower we adjust for it to avoid problems
363 * with losing too many ticks, otherwise we would overadjust and
364 * produce an even larger error. The smaller the adjustment the
365 * faster we try to adjust for it, as lost ticks can do less harm
366 * here. This is tuned so that an error of about 1 msec is adjusted
367 * within about 1 sec (or 2^20 nsec in 2^SHIFT_HZ ticks).
369 error2 = clock->error >> (NTP_SCALE_SHIFT + 22 - 2 * SHIFT_HZ);
370 error2 = abs(error2);
371 for (look_ahead = 0; error2 > 0; look_ahead++)
375 * Now calculate the error in (1 << look_ahead) ticks, but first
376 * remove the single look ahead already included in the error.
378 tick_error = tick_length >> (NTP_SCALE_SHIFT - clock->shift + 1);
379 tick_error -= clock->xtime_interval >> 1;
380 error = ((error - tick_error) >> look_ahead) + tick_error;
382 /* Finally calculate the adjustment shift value. */
387 *interval = -*interval;
391 for (adj = 0; error > i; adj++)
400 * Adjust the multiplier to reduce the error value,
401 * this is optimized for the most common adjustments of -1,0,1,
402 * for other values we can do a bit more work.
404 static void clocksource_adjust(s64 offset)
406 s64 error, interval = clock->cycle_interval;
409 error = clock->error >> (NTP_SCALE_SHIFT - clock->shift - 1);
410 if (error > interval) {
412 if (likely(error <= interval))
415 adj = clocksource_bigadjust(error, &interval, &offset);
416 } else if (error < -interval) {
418 if (likely(error >= -interval)) {
420 interval = -interval;
423 adj = clocksource_bigadjust(error, &interval, &offset);
428 clock->xtime_interval += interval;
429 clock->xtime_nsec -= offset;
430 clock->error -= (interval - offset) <<
431 (NTP_SCALE_SHIFT - clock->shift);
435 * update_wall_time - Uses the current clocksource to increment the wall time
437 * Called from the timer interrupt, must hold a write on xtime_lock.
439 void update_wall_time(void)
443 /* Make sure we're fully resumed: */
444 if (unlikely(timekeeping_suspended))
447 #ifdef CONFIG_GENERIC_TIME
448 offset = (clocksource_read(clock) - clock->cycle_last) & clock->mask;
450 offset = clock->cycle_interval;
452 clock->xtime_nsec += (s64)xtime.tv_nsec << clock->shift;
454 /* normally this loop will run just once, however in the
455 * case of lost or late ticks, it will accumulate correctly.
457 while (offset >= clock->cycle_interval) {
458 /* accumulate one interval */
459 offset -= clock->cycle_interval;
460 clock->cycle_last += clock->cycle_interval;
462 clock->xtime_nsec += clock->xtime_interval;
463 if (clock->xtime_nsec >= (u64)NSEC_PER_SEC << clock->shift) {
464 clock->xtime_nsec -= (u64)NSEC_PER_SEC << clock->shift;
469 /* accumulate error between NTP and clock interval */
470 clock->error += tick_length;
471 clock->error -= clock->xtime_interval << (NTP_SCALE_SHIFT - clock->shift);
474 /* correct the clock when NTP error is too big */
475 clocksource_adjust(offset);
477 /* store full nanoseconds into xtime */
478 xtime.tv_nsec = (s64)clock->xtime_nsec >> clock->shift;
479 clock->xtime_nsec -= (s64)xtime.tv_nsec << clock->shift;
481 update_xtime_cache(cyc2ns(clock, offset));
483 /* check to see if there is a new clocksource to use */
484 change_clocksource();
485 update_vsyscall(&xtime, clock);
489 * getboottime - Return the real time of system boot.
490 * @ts: pointer to the timespec to be set
492 * Returns the time of day in a timespec.
494 * This is based on the wall_to_monotonic offset and the total suspend
495 * time. Calls to settimeofday will affect the value returned (which
496 * basically means that however wrong your real time clock is at boot time,
497 * you get the right time here).
499 void getboottime(struct timespec *ts)
501 set_normalized_timespec(ts,
502 - (wall_to_monotonic.tv_sec + total_sleep_time),
503 - wall_to_monotonic.tv_nsec);
507 * monotonic_to_bootbased - Convert the monotonic time to boot based.
508 * @ts: pointer to the timespec to be converted
510 void monotonic_to_bootbased(struct timespec *ts)
512 ts->tv_sec += total_sleep_time;
515 unsigned long get_seconds(void)
517 return xtime_cache.tv_sec;
519 EXPORT_SYMBOL(get_seconds);
522 struct timespec current_kernel_time(void)
528 seq = read_seqbegin(&xtime_lock);
531 } while (read_seqretry(&xtime_lock, seq));
535 EXPORT_SYMBOL(current_kernel_time);