2 * Copyright 2006 Andi Kleen, SUSE Labs.
3 * Subject to the GNU Public License, v.2
5 * Fast user context implementation of clock_gettime, gettimeofday, and time.
7 * The code should have no internal unresolved relocations.
8 * Check with readelf after changing.
11 /* Disable profiling for userspace code: */
12 #define DISABLE_BRANCH_PROFILING
14 #include <linux/kernel.h>
15 #include <linux/posix-timers.h>
16 #include <linux/time.h>
17 #include <linux/string.h>
18 #include <asm/vsyscall.h>
19 #include <asm/fixmap.h>
20 #include <asm/vgtod.h>
21 #include <asm/timex.h>
23 #include <asm/unistd.h>
26 #define gtod (&VVAR(vsyscall_gtod_data))
28 notrace static cycle_t vread_tsc(void)
34 * Empirically, a fence (of type that depends on the CPU)
35 * before rdtsc is enough to ensure that rdtsc is ordered
36 * with respect to loads. The various CPU manuals are unclear
37 * as to whether rdtsc can be reordered with later loads,
38 * but no one has ever seen it happen.
41 ret = (cycle_t)vget_cycles();
43 last = VVAR(vsyscall_gtod_data).clock.cycle_last;
45 if (likely(ret >= last))
49 * GCC likes to generate cmov here, but this branch is extremely
50 * predictable (it's just a funciton of time and the likely is
51 * very likely) and there's a data dependence, so force GCC
52 * to generate a branch instead. I don't barrier() because
53 * we don't actually need a barrier, and if this function
54 * ever gets inlined it will generate worse code.
60 static notrace cycle_t vread_hpet(void)
62 return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
65 notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
68 asm("syscall" : "=a" (ret) :
69 "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "memory");
73 notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
77 asm("syscall" : "=a" (ret) :
78 "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
83 notrace static inline long vgetns(void)
87 if (gtod->clock.vclock_mode == VCLOCK_TSC)
89 else if (gtod->clock.vclock_mode == VCLOCK_HPET)
90 cycles = vread_hpet();
93 v = (cycles - gtod->clock.cycle_last) & gtod->clock.mask;
94 return (v * gtod->clock.mult) >> gtod->clock.shift;
97 /* Code size doesn't matter (vdso is 4k anyway) and this is faster. */
98 notrace static int __always_inline do_realtime(struct timespec *ts)
100 unsigned long seq, ns;
104 seq = read_seqcount_begin(>od->seq);
105 mode = gtod->clock.vclock_mode;
106 ts->tv_sec = gtod->wall_time_sec;
107 ts->tv_nsec = gtod->wall_time_nsec;
109 } while (unlikely(read_seqcount_retry(>od->seq, seq)));
111 timespec_add_ns(ts, ns);
115 notrace static int do_monotonic(struct timespec *ts)
117 unsigned long seq, ns;
121 seq = read_seqcount_begin(>od->seq);
122 mode = gtod->clock.vclock_mode;
123 ts->tv_sec = gtod->monotonic_time_sec;
124 ts->tv_nsec = gtod->monotonic_time_nsec;
126 } while (unlikely(read_seqcount_retry(>od->seq, seq)));
127 timespec_add_ns(ts, ns);
132 notrace static int do_realtime_coarse(struct timespec *ts)
136 seq = read_seqcount_begin(>od->seq);
137 ts->tv_sec = gtod->wall_time_coarse.tv_sec;
138 ts->tv_nsec = gtod->wall_time_coarse.tv_nsec;
139 } while (unlikely(read_seqcount_retry(>od->seq, seq)));
143 notrace static int do_monotonic_coarse(struct timespec *ts)
147 seq = read_seqcount_begin(>od->seq);
148 ts->tv_sec = gtod->monotonic_time_coarse.tv_sec;
149 ts->tv_nsec = gtod->monotonic_time_coarse.tv_nsec;
150 } while (unlikely(read_seqcount_retry(>od->seq, seq)));
155 notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
157 int ret = VCLOCK_NONE;
161 ret = do_realtime(ts);
163 case CLOCK_MONOTONIC:
164 ret = do_monotonic(ts);
166 case CLOCK_REALTIME_COARSE:
167 return do_realtime_coarse(ts);
168 case CLOCK_MONOTONIC_COARSE:
169 return do_monotonic_coarse(ts);
172 if (ret == VCLOCK_NONE)
173 return vdso_fallback_gettime(clock, ts);
176 int clock_gettime(clockid_t, struct timespec *)
177 __attribute__((weak, alias("__vdso_clock_gettime")));
179 notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
181 long ret = VCLOCK_NONE;
183 if (likely(tv != NULL)) {
184 BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
185 offsetof(struct timespec, tv_nsec) ||
186 sizeof(*tv) != sizeof(struct timespec));
187 ret = do_realtime((struct timespec *)tv);
190 if (unlikely(tz != NULL)) {
191 /* Avoid memcpy. Some old compilers fail to inline it */
192 tz->tz_minuteswest = gtod->sys_tz.tz_minuteswest;
193 tz->tz_dsttime = gtod->sys_tz.tz_dsttime;
196 if (ret == VCLOCK_NONE)
197 return vdso_fallback_gtod(tv, tz);
200 int gettimeofday(struct timeval *, struct timezone *)
201 __attribute__((weak, alias("__vdso_gettimeofday")));
204 * This will break when the xtime seconds get inaccurate, but that is
207 notrace time_t __vdso_time(time_t *t)
209 /* This is atomic on x86_64 so we don't need any locks. */
210 time_t result = ACCESS_ONCE(VVAR(vsyscall_gtod_data).wall_time_sec);
217 __attribute__((weak, alias("__vdso_time")));