2 * linux/kernel/time/timekeeping.c
4 * Kernel timekeeping code and accessor functions
6 * This code was moved from linux/kernel/timer.c.
7 * Please see that file for copyright and history logs.
11 #include <linux/module.h>
12 #include <linux/interrupt.h>
13 #include <linux/percpu.h>
14 #include <linux/init.h>
16 #include <linux/sysdev.h>
17 #include <linux/clocksource.h>
18 #include <linux/jiffies.h>
19 #include <linux/time.h>
20 #include <linux/tick.h>
22 /* Structure holding internal timekeeping values. */
24 /* Current clocksource used for timekeeping. */
25 struct clocksource *clock;
26 /* The shift value of the current clocksource. */
29 /* Number of clock cycles in one NTP interval. */
30 cycle_t cycle_interval;
31 /* Number of clock shifted nano seconds in one NTP interval. */
33 /* Raw nano seconds accumulated per NTP interval. */
36 /* Clock shifted nano seconds remainder not stored in xtime.tv_nsec. */
38 /* Difference between accumulated time and NTP time in ntp
39 * shifted nano seconds. */
41 /* Shift conversion between clock shifted nano seconds and
42 * ntp shifted nano seconds. */
44 /* NTP adjusted clock multiplier */
48 struct timekeeper timekeeper;
51 * timekeeper_setup_internals - Set up internals to use clocksource clock.
53 * @clock: Pointer to clocksource.
55 * Calculates a fixed cycle/nsec interval for a given clocksource/adjustment
56 * pair and interval request.
58 * Unless you're the timekeeping code, you should not be using this!
60 static void timekeeper_setup_internals(struct clocksource *clock)
65 timekeeper.clock = clock;
66 clock->cycle_last = clock->read(clock);
68 /* Do the ns -> cycle conversion first, using original mult */
69 tmp = NTP_INTERVAL_LENGTH;
72 do_div(tmp, clock->mult);
76 interval = (cycle_t) tmp;
77 timekeeper.cycle_interval = interval;
79 /* Go back from cycles -> shifted ns */
80 timekeeper.xtime_interval = (u64) interval * clock->mult;
81 timekeeper.raw_interval =
82 ((u64) interval * clock->mult) >> clock->shift;
84 timekeeper.xtime_nsec = 0;
85 timekeeper.shift = clock->shift;
87 timekeeper.ntp_error = 0;
88 timekeeper.ntp_error_shift = NTP_SCALE_SHIFT - clock->shift;
91 * The timekeeper keeps its own mult values for the currently
92 * active clocksource. These value will be adjusted via NTP
93 * to counteract clock drifting.
95 timekeeper.mult = clock->mult;
99 * This read-write spinlock protects us from races in SMP while
100 * playing with xtime.
102 __cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock);
107 * wall_to_monotonic is what we need to add to xtime (or xtime corrected
108 * for sub jiffie times) to get to monotonic time. Monotonic is pegged
109 * at zero at system boot time, so wall_to_monotonic will be negative,
110 * however, we will ALWAYS keep the tv_nsec part positive so we can use
111 * the usual normalization.
113 * wall_to_monotonic is moved after resume from suspend for the monotonic
114 * time not to jump. We need to add total_sleep_time to wall_to_monotonic
115 * to get the real boot based time offset.
117 * - wall_to_monotonic is no longer the boot time, getboottime must be
120 struct timespec xtime __attribute__ ((aligned (16)));
121 struct timespec wall_to_monotonic __attribute__ ((aligned (16)));
122 static unsigned long total_sleep_time; /* seconds */
125 * The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock.
127 struct timespec raw_time;
129 /* flag for if timekeeping is suspended */
130 int __read_mostly timekeeping_suspended;
132 static struct timespec xtime_cache __attribute__ ((aligned (16)));
133 void update_xtime_cache(u64 nsec)
136 timespec_add_ns(&xtime_cache, nsec);
139 /* must hold xtime_lock */
140 void timekeeping_leap_insert(int leapsecond)
142 xtime.tv_sec += leapsecond;
143 wall_to_monotonic.tv_sec -= leapsecond;
144 update_vsyscall(&xtime, timekeeper.clock);
147 #ifdef CONFIG_GENERIC_TIME
149 * timekeeping_forward_now - update clock to the current time
151 * Forward the current clock to update its state since the last call to
152 * update_wall_time(). This is useful before significant clock changes,
153 * as it avoids having to deal with this time offset explicitly.
155 static void timekeeping_forward_now(void)
157 cycle_t cycle_now, cycle_delta;
158 struct clocksource *clock;
161 clock = timekeeper.clock;
162 cycle_now = clock->read(clock);
163 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
164 clock->cycle_last = cycle_now;
166 nsec = clocksource_cyc2ns(cycle_delta, timekeeper.mult,
169 /* If arch requires, add in gettimeoffset() */
170 nsec += arch_gettimeoffset();
172 timespec_add_ns(&xtime, nsec);
174 nsec = clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift);
175 timespec_add_ns(&raw_time, nsec);
179 * getnstimeofday - Returns the time of day in a timespec
180 * @ts: pointer to the timespec to be set
182 * Returns the time of day in a timespec.
184 void getnstimeofday(struct timespec *ts)
186 cycle_t cycle_now, cycle_delta;
187 struct clocksource *clock;
191 WARN_ON(timekeeping_suspended);
194 seq = read_seqbegin(&xtime_lock);
198 /* read clocksource: */
199 clock = timekeeper.clock;
200 cycle_now = clock->read(clock);
202 /* calculate the delta since the last update_wall_time: */
203 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
205 /* convert to nanoseconds: */
206 nsecs = clocksource_cyc2ns(cycle_delta, timekeeper.mult,
209 /* If arch requires, add in gettimeoffset() */
210 nsecs += arch_gettimeoffset();
212 } while (read_seqretry(&xtime_lock, seq));
214 timespec_add_ns(ts, nsecs);
217 EXPORT_SYMBOL(getnstimeofday);
219 ktime_t ktime_get(void)
221 cycle_t cycle_now, cycle_delta;
222 struct clocksource *clock;
226 WARN_ON(timekeeping_suspended);
229 seq = read_seqbegin(&xtime_lock);
230 secs = xtime.tv_sec + wall_to_monotonic.tv_sec;
231 nsecs = xtime.tv_nsec + wall_to_monotonic.tv_nsec;
233 /* read clocksource: */
234 clock = timekeeper.clock;
235 cycle_now = clock->read(clock);
237 /* calculate the delta since the last update_wall_time: */
238 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
240 /* convert to nanoseconds: */
241 nsecs += clocksource_cyc2ns(cycle_delta, timekeeper.mult,
244 } while (read_seqretry(&xtime_lock, seq));
246 * Use ktime_set/ktime_add_ns to create a proper ktime on
247 * 32-bit architectures without CONFIG_KTIME_SCALAR.
249 return ktime_add_ns(ktime_set(secs, 0), nsecs);
251 EXPORT_SYMBOL_GPL(ktime_get);
254 * ktime_get_ts - get the monotonic clock in timespec format
255 * @ts: pointer to timespec variable
257 * The function calculates the monotonic clock from the realtime
258 * clock and the wall_to_monotonic offset and stores the result
259 * in normalized timespec format in the variable pointed to by @ts.
261 void ktime_get_ts(struct timespec *ts)
263 cycle_t cycle_now, cycle_delta;
264 struct clocksource *clock;
265 struct timespec tomono;
269 WARN_ON(timekeeping_suspended);
272 seq = read_seqbegin(&xtime_lock);
274 tomono = wall_to_monotonic;
276 /* read clocksource: */
277 clock = timekeeper.clock;
278 cycle_now = clock->read(clock);
280 /* calculate the delta since the last update_wall_time: */
281 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
283 /* convert to nanoseconds: */
284 nsecs = clocksource_cyc2ns(cycle_delta, timekeeper.mult,
287 } while (read_seqretry(&xtime_lock, seq));
289 set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec,
290 ts->tv_nsec + tomono.tv_nsec + nsecs);
292 EXPORT_SYMBOL_GPL(ktime_get_ts);
295 * do_gettimeofday - Returns the time of day in a timeval
296 * @tv: pointer to the timeval to be set
298 * NOTE: Users should be converted to using getnstimeofday()
300 void do_gettimeofday(struct timeval *tv)
304 getnstimeofday(&now);
305 tv->tv_sec = now.tv_sec;
306 tv->tv_usec = now.tv_nsec/1000;
309 EXPORT_SYMBOL(do_gettimeofday);
311 * do_settimeofday - Sets the time of day
312 * @tv: pointer to the timespec variable containing the new time
314 * Sets the time of day to the new time and update NTP and notify hrtimers
316 int do_settimeofday(struct timespec *tv)
318 struct timespec ts_delta;
321 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
324 write_seqlock_irqsave(&xtime_lock, flags);
326 timekeeping_forward_now();
328 ts_delta.tv_sec = tv->tv_sec - xtime.tv_sec;
329 ts_delta.tv_nsec = tv->tv_nsec - xtime.tv_nsec;
330 wall_to_monotonic = timespec_sub(wall_to_monotonic, ts_delta);
334 update_xtime_cache(0);
336 timekeeper.ntp_error = 0;
339 update_vsyscall(&xtime, timekeeper.clock);
341 write_sequnlock_irqrestore(&xtime_lock, flags);
343 /* signal hrtimers about time change */
349 EXPORT_SYMBOL(do_settimeofday);
352 * change_clocksource - Swaps clocksources if a new one is available
354 * Accumulates current time interval and initializes new clocksource
356 static void change_clocksource(void)
358 struct clocksource *new, *old;
360 new = clocksource_get_next();
362 if (!new || timekeeper.clock == new)
365 timekeeping_forward_now();
367 if (new->enable && !new->enable(new))
370 old = timekeeper.clock;
371 timekeeper_setup_internals(new);
378 #else /* GENERIC_TIME */
379 static inline void timekeeping_forward_now(void) { }
380 static inline void change_clocksource(void) { }
383 * ktime_get - get the monotonic time in ktime_t format
385 * returns the time in ktime_t format
387 ktime_t ktime_get(void)
393 return timespec_to_ktime(now);
395 EXPORT_SYMBOL_GPL(ktime_get);
398 * ktime_get_ts - get the monotonic clock in timespec format
399 * @ts: pointer to timespec variable
401 * The function calculates the monotonic clock from the realtime
402 * clock and the wall_to_monotonic offset and stores the result
403 * in normalized timespec format in the variable pointed to by @ts.
405 void ktime_get_ts(struct timespec *ts)
407 struct timespec tomono;
411 seq = read_seqbegin(&xtime_lock);
413 tomono = wall_to_monotonic;
415 } while (read_seqretry(&xtime_lock, seq));
417 set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec,
418 ts->tv_nsec + tomono.tv_nsec);
420 EXPORT_SYMBOL_GPL(ktime_get_ts);
421 #endif /* !GENERIC_TIME */
424 * ktime_get_real - get the real (wall-) time in ktime_t format
426 * returns the time in ktime_t format
428 ktime_t ktime_get_real(void)
432 getnstimeofday(&now);
434 return timespec_to_ktime(now);
436 EXPORT_SYMBOL_GPL(ktime_get_real);
439 * getrawmonotonic - Returns the raw monotonic time in a timespec
440 * @ts: pointer to the timespec to be set
442 * Returns the raw monotonic time (completely un-modified by ntp)
444 void getrawmonotonic(struct timespec *ts)
448 cycle_t cycle_now, cycle_delta;
449 struct clocksource *clock;
452 seq = read_seqbegin(&xtime_lock);
454 /* read clocksource: */
455 clock = timekeeper.clock;
456 cycle_now = clock->read(clock);
458 /* calculate the delta since the last update_wall_time: */
459 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
461 /* convert to nanoseconds: */
462 nsecs = clocksource_cyc2ns(cycle_delta, clock->mult,
467 } while (read_seqretry(&xtime_lock, seq));
469 timespec_add_ns(ts, nsecs);
471 EXPORT_SYMBOL(getrawmonotonic);
475 * timekeeping_valid_for_hres - Check if timekeeping is suitable for hres
477 int timekeeping_valid_for_hres(void)
483 seq = read_seqbegin(&xtime_lock);
485 ret = timekeeper.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
487 } while (read_seqretry(&xtime_lock, seq));
493 * read_persistent_clock - Return time in seconds from the persistent clock.
495 * Weak dummy function for arches that do not yet support it.
496 * Returns seconds from epoch using the battery backed persistent clock.
497 * Returns zero if unsupported.
499 * XXX - Do be sure to remove it once all arches implement it.
501 unsigned long __attribute__((weak)) read_persistent_clock(void)
507 * timekeeping_init - Initializes the clocksource and common timekeeping values
509 void __init timekeeping_init(void)
511 struct clocksource *clock;
513 unsigned long sec = read_persistent_clock();
515 write_seqlock_irqsave(&xtime_lock, flags);
519 clock = clocksource_default_clock();
521 clock->enable(clock);
522 timekeeper_setup_internals(clock);
527 raw_time.tv_nsec = 0;
528 set_normalized_timespec(&wall_to_monotonic,
529 -xtime.tv_sec, -xtime.tv_nsec);
530 update_xtime_cache(0);
531 total_sleep_time = 0;
532 write_sequnlock_irqrestore(&xtime_lock, flags);
535 /* time in seconds when suspend began */
536 static unsigned long timekeeping_suspend_time;
539 * timekeeping_resume - Resumes the generic timekeeping subsystem.
542 * This is for the generic clocksource timekeeping.
543 * xtime/wall_to_monotonic/jiffies/etc are
544 * still managed by arch specific suspend/resume code.
546 static int timekeeping_resume(struct sys_device *dev)
549 unsigned long now = read_persistent_clock();
551 clocksource_resume();
553 write_seqlock_irqsave(&xtime_lock, flags);
555 if (now && (now > timekeeping_suspend_time)) {
556 unsigned long sleep_length = now - timekeeping_suspend_time;
558 xtime.tv_sec += sleep_length;
559 wall_to_monotonic.tv_sec -= sleep_length;
560 total_sleep_time += sleep_length;
562 update_xtime_cache(0);
563 /* re-base the last cycle value */
564 timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock);
565 timekeeper.ntp_error = 0;
566 timekeeping_suspended = 0;
567 write_sequnlock_irqrestore(&xtime_lock, flags);
569 touch_softlockup_watchdog();
571 clockevents_notify(CLOCK_EVT_NOTIFY_RESUME, NULL);
573 /* Resume hrtimers */
574 hres_timers_resume();
579 static int timekeeping_suspend(struct sys_device *dev, pm_message_t state)
583 timekeeping_suspend_time = read_persistent_clock();
585 write_seqlock_irqsave(&xtime_lock, flags);
586 timekeeping_forward_now();
587 timekeeping_suspended = 1;
588 write_sequnlock_irqrestore(&xtime_lock, flags);
590 clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL);
595 /* sysfs resume/suspend bits for timekeeping */
596 static struct sysdev_class timekeeping_sysclass = {
597 .name = "timekeeping",
598 .resume = timekeeping_resume,
599 .suspend = timekeeping_suspend,
602 static struct sys_device device_timer = {
604 .cls = &timekeeping_sysclass,
607 static int __init timekeeping_init_device(void)
609 int error = sysdev_class_register(&timekeeping_sysclass);
611 error = sysdev_register(&device_timer);
615 device_initcall(timekeeping_init_device);
618 * If the error is already larger, we look ahead even further
619 * to compensate for late or lost adjustments.
621 static __always_inline int timekeeping_bigadjust(s64 error, s64 *interval,
629 * Use the current error value to determine how much to look ahead.
630 * The larger the error the slower we adjust for it to avoid problems
631 * with losing too many ticks, otherwise we would overadjust and
632 * produce an even larger error. The smaller the adjustment the
633 * faster we try to adjust for it, as lost ticks can do less harm
634 * here. This is tuned so that an error of about 1 msec is adjusted
635 * within about 1 sec (or 2^20 nsec in 2^SHIFT_HZ ticks).
637 error2 = timekeeper.ntp_error >> (NTP_SCALE_SHIFT + 22 - 2 * SHIFT_HZ);
638 error2 = abs(error2);
639 for (look_ahead = 0; error2 > 0; look_ahead++)
643 * Now calculate the error in (1 << look_ahead) ticks, but first
644 * remove the single look ahead already included in the error.
646 tick_error = tick_length >> (timekeeper.ntp_error_shift + 1);
647 tick_error -= timekeeper.xtime_interval >> 1;
648 error = ((error - tick_error) >> look_ahead) + tick_error;
650 /* Finally calculate the adjustment shift value. */
655 *interval = -*interval;
659 for (adj = 0; error > i; adj++)
668 * Adjust the multiplier to reduce the error value,
669 * this is optimized for the most common adjustments of -1,0,1,
670 * for other values we can do a bit more work.
672 static void timekeeping_adjust(s64 offset)
674 s64 error, interval = timekeeper.cycle_interval;
677 error = timekeeper.ntp_error >> (timekeeper.ntp_error_shift - 1);
678 if (error > interval) {
680 if (likely(error <= interval))
683 adj = timekeeping_bigadjust(error, &interval, &offset);
684 } else if (error < -interval) {
686 if (likely(error >= -interval)) {
688 interval = -interval;
691 adj = timekeeping_bigadjust(error, &interval, &offset);
695 timekeeper.mult += adj;
696 timekeeper.xtime_interval += interval;
697 timekeeper.xtime_nsec -= offset;
698 timekeeper.ntp_error -= (interval - offset) <<
699 timekeeper.ntp_error_shift;
703 * update_wall_time - Uses the current clocksource to increment the wall time
705 * Called from the timer interrupt, must hold a write on xtime_lock.
707 void update_wall_time(void)
709 struct clocksource *clock;
713 /* Make sure we're fully resumed: */
714 if (unlikely(timekeeping_suspended))
717 clock = timekeeper.clock;
718 #ifdef CONFIG_GENERIC_TIME
719 offset = (clock->read(clock) - clock->cycle_last) & clock->mask;
721 offset = timekeeper.cycle_interval;
723 timekeeper.xtime_nsec = (s64)xtime.tv_nsec << timekeeper.shift;
725 /* normally this loop will run just once, however in the
726 * case of lost or late ticks, it will accumulate correctly.
728 while (offset >= timekeeper.cycle_interval) {
729 u64 nsecps = (u64)NSEC_PER_SEC << timekeeper.shift;
731 /* accumulate one interval */
732 offset -= timekeeper.cycle_interval;
733 clock->cycle_last += timekeeper.cycle_interval;
735 timekeeper.xtime_nsec += timekeeper.xtime_interval;
736 if (timekeeper.xtime_nsec >= nsecps) {
737 timekeeper.xtime_nsec -= nsecps;
742 raw_time.tv_nsec += timekeeper.raw_interval;
743 if (raw_time.tv_nsec >= NSEC_PER_SEC) {
744 raw_time.tv_nsec -= NSEC_PER_SEC;
748 /* accumulate error between NTP and clock interval */
749 timekeeper.ntp_error += tick_length;
750 timekeeper.ntp_error -= timekeeper.xtime_interval <<
751 timekeeper.ntp_error_shift;
754 /* correct the clock when NTP error is too big */
755 timekeeping_adjust(offset);
758 * Since in the loop above, we accumulate any amount of time
759 * in xtime_nsec over a second into xtime.tv_sec, its possible for
760 * xtime_nsec to be fairly small after the loop. Further, if we're
761 * slightly speeding the clocksource up in timekeeping_adjust(),
762 * its possible the required corrective factor to xtime_nsec could
763 * cause it to underflow.
765 * Now, we cannot simply roll the accumulated second back, since
766 * the NTP subsystem has been notified via second_overflow. So
767 * instead we push xtime_nsec forward by the amount we underflowed,
768 * and add that amount into the error.
770 * We'll correct this error next time through this function, when
771 * xtime_nsec is not as small.
773 if (unlikely((s64)timekeeper.xtime_nsec < 0)) {
774 s64 neg = -(s64)timekeeper.xtime_nsec;
775 timekeeper.xtime_nsec = 0;
776 timekeeper.ntp_error += neg << timekeeper.ntp_error_shift;
779 /* store full nanoseconds into xtime after rounding it up and
780 * add the remainder to the error difference.
782 xtime.tv_nsec = ((s64) timekeeper.xtime_nsec >> timekeeper.shift) + 1;
783 timekeeper.xtime_nsec -= (s64) xtime.tv_nsec << timekeeper.shift;
784 timekeeper.ntp_error += timekeeper.xtime_nsec <<
785 timekeeper.ntp_error_shift;
787 nsecs = clocksource_cyc2ns(offset, timekeeper.mult, timekeeper.shift);
788 update_xtime_cache(nsecs);
790 /* check to see if there is a new clocksource to use */
791 change_clocksource();
792 update_vsyscall(&xtime, timekeeper.clock);
796 * getboottime - Return the real time of system boot.
797 * @ts: pointer to the timespec to be set
799 * Returns the time of day in a timespec.
801 * This is based on the wall_to_monotonic offset and the total suspend
802 * time. Calls to settimeofday will affect the value returned (which
803 * basically means that however wrong your real time clock is at boot time,
804 * you get the right time here).
806 void getboottime(struct timespec *ts)
808 set_normalized_timespec(ts,
809 - (wall_to_monotonic.tv_sec + total_sleep_time),
810 - wall_to_monotonic.tv_nsec);
814 * monotonic_to_bootbased - Convert the monotonic time to boot based.
815 * @ts: pointer to the timespec to be converted
817 void monotonic_to_bootbased(struct timespec *ts)
819 ts->tv_sec += total_sleep_time;
822 unsigned long get_seconds(void)
824 return xtime_cache.tv_sec;
826 EXPORT_SYMBOL(get_seconds);
829 struct timespec current_kernel_time(void)
835 seq = read_seqbegin(&xtime_lock);
838 } while (read_seqretry(&xtime_lock, seq));
842 EXPORT_SYMBOL(current_kernel_time);