2 * Copyright 2006 Andi Kleen, SUSE Labs.
3 * Subject to the GNU Public License, v.2
5 * Fast user context implementation of clock_gettime, gettimeofday, and time.
7 * 32 Bit compat layer by Stefani Seibold <stefani@seibold.net>
8 * sponsored by Rohde & Schwarz GmbH & Co. KG Munich/Germany
10 * The code should have no internal unresolved relocations.
11 * Check with readelf after changing.
14 /* Disable profiling for userspace code: */
15 #define DISABLE_BRANCH_PROFILING
17 #include <linux/kernel.h>
18 #include <uapi/linux/time.h>
19 #include <linux/string.h>
20 #include <asm/vsyscall.h>
21 #include <asm/fixmap.h>
22 #include <asm/vgtod.h>
24 #include <asm/unistd.h>
26 #include <asm/pvclock.h>
28 #define gtod (&VVAR(vsyscall_gtod_data))
30 extern int __vdso_clock_gettime(clockid_t clock, struct timespec *ts);
31 extern int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz);
32 extern time_t __vdso_time(time_t *t);
36 static notrace cycle_t vread_hpet(void)
38 return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + HPET_COUNTER);
41 notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
44 asm("syscall" : "=a" (ret) :
45 "0" (__NR_clock_gettime), "D" (clock), "S" (ts) : "memory");
49 notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
53 asm("syscall" : "=a" (ret) :
54 "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
58 #ifdef CONFIG_PARAVIRT_CLOCK
60 static notrace const struct pvclock_vsyscall_time_info *get_pvti(int cpu)
62 const struct pvclock_vsyscall_time_info *pvti_base;
63 int idx = cpu / (PAGE_SIZE/PVTI_SIZE);
64 int offset = cpu % (PAGE_SIZE/PVTI_SIZE);
66 BUG_ON(PVCLOCK_FIXMAP_BEGIN + idx > PVCLOCK_FIXMAP_END);
68 pvti_base = (struct pvclock_vsyscall_time_info *)
69 __fix_to_virt(PVCLOCK_FIXMAP_BEGIN+idx);
71 return &pvti_base[offset];
74 static notrace cycle_t vread_pvclock(int *mode)
76 const struct pvclock_vsyscall_time_info *pvti;
85 * Note: hypervisor must guarantee that:
86 * 1. cpu ID number maps 1:1 to per-CPU pvclock time info.
87 * 2. that per-CPU pvclock time info is updated if the
88 * underlying CPU changes.
89 * 3. that version is increased whenever underlying CPU
94 cpu = __getcpu() & VGETCPU_CPU_MASK;
95 /* TODO: We can put vcpu id into higher bits of pvti.version.
96 * This will save a couple of cycles by getting rid of
97 * __getcpu() calls (Gleb).
100 pvti = get_pvti(cpu);
102 version = __pvclock_read_cycles(&pvti->pvti, &ret, &flags);
105 * Test we're still on the cpu as well as the version.
106 * We could have been migrated just after the first
107 * vgetcpu but before fetching the version, so we
108 * wouldn't notice a version change.
110 cpu1 = __getcpu() & VGETCPU_CPU_MASK;
111 } while (unlikely(cpu != cpu1 ||
112 (pvti->pvti.version & 1) ||
113 pvti->pvti.version != version));
115 if (unlikely(!(flags & PVCLOCK_TSC_STABLE_BIT)))
118 /* refer to tsc.c read_tsc() comment for rationale */
119 last = gtod->clock.cycle_last;
121 if (likely(ret >= last))
131 __attribute__((visibility("hidden")));
133 #ifdef CONFIG_HPET_TIMER
134 static notrace cycle_t vread_hpet(void)
136 return readl((const void __iomem *)(&hpet_page + HPET_COUNTER));
140 notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
145 "mov %%ebx, %%edx \n"
147 "call VDSO32_vsyscall \n"
148 "mov %%edx, %%ebx \n"
150 : "0" (__NR_clock_gettime), "g" (clock), "c" (ts)
155 notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
160 "mov %%ebx, %%edx \n"
162 "call VDSO32_vsyscall \n"
163 "mov %%edx, %%ebx \n"
165 : "0" (__NR_gettimeofday), "g" (tv), "c" (tz)
170 #ifdef CONFIG_PARAVIRT_CLOCK
172 static notrace cycle_t vread_pvclock(int *mode)
181 notrace static cycle_t vread_tsc(void)
187 * Empirically, a fence (of type that depends on the CPU)
188 * before rdtsc is enough to ensure that rdtsc is ordered
189 * with respect to loads. The various CPU manuals are unclear
190 * as to whether rdtsc can be reordered with later loads,
191 * but no one has ever seen it happen.
194 ret = (cycle_t)__native_read_tsc();
196 last = gtod->clock.cycle_last;
198 if (likely(ret >= last))
202 * GCC likes to generate cmov here, but this branch is extremely
203 * predictable (it's just a funciton of time and the likely is
204 * very likely) and there's a data dependence, so force GCC
205 * to generate a branch instead. I don't barrier() because
206 * we don't actually need a barrier, and if this function
207 * ever gets inlined it will generate worse code.
213 notrace static inline u64 vgetsns(int *mode)
217 if (gtod->clock.vclock_mode == VCLOCK_TSC)
218 cycles = vread_tsc();
219 #ifdef CONFIG_HPET_TIMER
220 else if (gtod->clock.vclock_mode == VCLOCK_HPET)
221 cycles = vread_hpet();
223 #ifdef CONFIG_PARAVIRT_CLOCK
224 else if (gtod->clock.vclock_mode == VCLOCK_PVCLOCK)
225 cycles = vread_pvclock(mode);
229 v = (cycles - gtod->clock.cycle_last) & gtod->clock.mask;
230 return v * gtod->clock.mult;
233 /* Code size doesn't matter (vdso is 4k anyway) and this is faster. */
234 notrace static int __always_inline do_realtime(struct timespec *ts)
242 seq = raw_read_seqcount_begin(>od->seq);
243 mode = gtod->clock.vclock_mode;
244 ts->tv_sec = gtod->wall_time_sec;
245 ns = gtod->wall_time_snsec;
246 ns += vgetsns(&mode);
247 ns >>= gtod->clock.shift;
248 } while (unlikely(read_seqcount_retry(>od->seq, seq)));
250 timespec_add_ns(ts, ns);
254 notrace static int __always_inline do_monotonic(struct timespec *ts)
262 seq = raw_read_seqcount_begin(>od->seq);
263 mode = gtod->clock.vclock_mode;
264 ts->tv_sec = gtod->monotonic_time_sec;
265 ns = gtod->monotonic_time_snsec;
266 ns += vgetsns(&mode);
267 ns >>= gtod->clock.shift;
268 } while (unlikely(read_seqcount_retry(>od->seq, seq)));
269 timespec_add_ns(ts, ns);
274 notrace static void do_realtime_coarse(struct timespec *ts)
278 seq = raw_read_seqcount_begin(>od->seq);
279 ts->tv_sec = gtod->wall_time_coarse.tv_sec;
280 ts->tv_nsec = gtod->wall_time_coarse.tv_nsec;
281 } while (unlikely(read_seqcount_retry(>od->seq, seq)));
284 notrace static void do_monotonic_coarse(struct timespec *ts)
288 seq = raw_read_seqcount_begin(>od->seq);
289 ts->tv_sec = gtod->monotonic_time_coarse.tv_sec;
290 ts->tv_nsec = gtod->monotonic_time_coarse.tv_nsec;
291 } while (unlikely(read_seqcount_retry(>od->seq, seq)));
294 notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
298 if (do_realtime(ts) == VCLOCK_NONE)
301 case CLOCK_MONOTONIC:
302 if (do_monotonic(ts) == VCLOCK_NONE)
305 case CLOCK_REALTIME_COARSE:
306 do_realtime_coarse(ts);
308 case CLOCK_MONOTONIC_COARSE:
309 do_monotonic_coarse(ts);
317 return vdso_fallback_gettime(clock, ts);
319 int clock_gettime(clockid_t, struct timespec *)
320 __attribute__((weak, alias("__vdso_clock_gettime")));
322 notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
324 if (likely(tv != NULL)) {
325 BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
326 offsetof(struct timespec, tv_nsec) ||
327 sizeof(*tv) != sizeof(struct timespec));
328 if (unlikely(do_realtime((struct timespec *)tv) == VCLOCK_NONE))
329 return vdso_fallback_gtod(tv, tz);
332 if (unlikely(tz != NULL)) {
333 /* Avoid memcpy. Some old compilers fail to inline it */
334 tz->tz_minuteswest = gtod->sys_tz.tz_minuteswest;
335 tz->tz_dsttime = gtod->sys_tz.tz_dsttime;
340 int gettimeofday(struct timeval *, struct timezone *)
341 __attribute__((weak, alias("__vdso_gettimeofday")));
344 * This will break when the xtime seconds get inaccurate, but that is
347 notrace time_t __vdso_time(time_t *t)
349 /* This is atomic on x86 so we don't need any locks. */
350 time_t result = ACCESS_ONCE(gtod->wall_time_sec);
357 __attribute__((weak, alias("__vdso_time")));