2 * linux/arch/x86_64/kernel/vsyscall.c
4 * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
5 * Copyright 2003 Andi Kleen, SuSE Labs.
7 * Thanks to hpa@transmeta.com for some useful hint.
8 * Special thanks to Ingo Molnar for his early experience with
9 * a different vsyscall implementation for Linux/IA32 and for the name.
11 * vsyscall 1 is located at -10Mbyte, vsyscall 2 is located
12 * at virtual address -10Mbyte+1024bytes etc... There are at max 4
13 * vsyscalls. One vsyscall can reserve more than 1 slot to avoid
14 * jumping out of line if necessary. We cannot add more with this
15 * mechanism because older kernels won't return -ENOSYS.
16 * If we want more than four we need a vDSO.
18 * Note: the concept clashes with user mode linux. If you use UML and
19 * want per guest time just set the kernel.vsyscall64 sysctl to 0.
22 #include <linux/time.h>
23 #include <linux/init.h>
24 #include <linux/kernel.h>
25 #include <linux/timer.h>
26 #include <linux/seqlock.h>
27 #include <linux/jiffies.h>
28 #include <linux/sysctl.h>
29 #include <linux/getcpu.h>
31 #include <asm/vsyscall.h>
32 #include <asm/pgtable.h>
34 #include <asm/fixmap.h>
35 #include <asm/errno.h>
37 #include <asm/segment.h>
39 #include <asm/topology.h>
41 #define __vsyscall(nr) __attribute__ ((unused,__section__(".vsyscall_" #nr)))
43 int __sysctl_vsyscall __section_sysctl_vsyscall = 1;
44 seqlock_t __xtime_lock __section_xtime_lock = SEQLOCK_UNLOCKED;
45 int __vgetcpu_mode __section_vgetcpu_mode;
47 #include <asm/unistd.h>
49 static __always_inline void timeval_normalize(struct timeval * tv)
53 __sec = tv->tv_usec / 1000000;
55 tv->tv_usec %= 1000000;
60 static __always_inline void do_vgettimeofday(struct timeval * tv)
63 unsigned long sec, usec;
66 sequence = read_seqbegin(&__xtime_lock);
69 usec = (__xtime.tv_nsec / 1000) +
70 (__jiffies - __wall_jiffies) * (1000000 / HZ);
72 if (__vxtime.mode != VXTIME_HPET) {
73 t = get_cycles_sync();
74 if (t < __vxtime.last_tsc)
75 t = __vxtime.last_tsc;
76 usec += ((t - __vxtime.last_tsc) *
77 __vxtime.tsc_quot) >> 32;
78 /* See comment in x86_64 do_gettimeofday. */
80 usec += ((readl((void __iomem *)
81 fix_to_virt(VSYSCALL_HPET) + 0xf0) -
82 __vxtime.last) * __vxtime.quot) >> 32;
84 } while (read_seqretry(&__xtime_lock, sequence));
86 tv->tv_sec = sec + usec / 1000000;
87 tv->tv_usec = usec % 1000000;
90 /* RED-PEN may want to readd seq locking, but then the variable should be write-once. */
91 static __always_inline void do_get_tz(struct timezone * tz)
96 static __always_inline int gettimeofday(struct timeval *tv, struct timezone *tz)
99 asm volatile("vsysc2: syscall"
101 : "0" (__NR_gettimeofday),"D" (tv),"S" (tz) : __syscall_clobber );
105 static __always_inline long time_syscall(long *t)
108 asm volatile("vsysc1: syscall"
110 : "0" (__NR_time),"D" (t) : __syscall_clobber);
114 int __vsyscall(0) vgettimeofday(struct timeval * tv, struct timezone * tz)
116 if (!__sysctl_vsyscall)
117 return gettimeofday(tv,tz);
119 do_vgettimeofday(tv);
125 /* This will break when the xtime seconds get inaccurate, but that is
127 time_t __vsyscall(1) vtime(time_t *t)
129 if (!__sysctl_vsyscall)
130 return time_syscall(t);
133 return __xtime.tv_sec;
136 /* Fast way to get current CPU and node.
137 This helps to do per node and per CPU caches in user space.
138 The result is not guaranteed without CPU affinity, but usually
139 works out because the scheduler tries to keep a thread on the same
142 tcache must point to a two element sized long array.
143 All arguments can be NULL. */
145 vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache)
147 unsigned int dummy, p;
150 /* Fast cache - only recompute value once per jiffies and avoid
151 relatively costly rdtscp/cpuid otherwise.
152 This works because the scheduler usually keeps the process
153 on the same CPU and this syscall doesn't guarantee its
155 We do this here because otherwise user space would do it on
156 its own in a likely inferior way (no access to jiffies).
157 If you don't like it pass NULL. */
158 if (tcache && tcache->blob[0] == (j = __jiffies)) {
160 } else if (__vgetcpu_mode == VGETCPU_RDTSCP) {
161 /* Load per CPU data from RDTSCP */
162 rdtscp(dummy, dummy, p);
164 /* Load per CPU data from GDT */
165 asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
178 long __vsyscall(3) venosys_1(void)
185 #define SYSCALL 0x050f
189 * NOP out syscall in vsyscall page when not needed.
191 static int vsyscall_sysctl_change(ctl_table *ctl, int write, struct file * filp,
192 void __user *buffer, size_t *lenp, loff_t *ppos)
194 extern u16 vsysc1, vsysc2;
197 int ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
200 /* gcc has some trouble with __va(__pa()), so just do it this
202 map1 = ioremap(__pa_symbol(&vsysc1), 2);
205 map2 = ioremap(__pa_symbol(&vsysc2), 2);
210 if (!sysctl_vsyscall) {
211 writew(SYSCALL, map1);
212 writew(SYSCALL, map2);
223 static int vsyscall_sysctl_nostrat(ctl_table *t, int __user *name, int nlen,
224 void __user *oldval, size_t __user *oldlenp,
225 void __user *newval, size_t newlen,
231 static ctl_table kernel_table2[] = {
232 { .ctl_name = 99, .procname = "vsyscall64",
233 .data = &sysctl_vsyscall, .maxlen = sizeof(int), .mode = 0644,
234 .strategy = vsyscall_sysctl_nostrat,
235 .proc_handler = vsyscall_sysctl_change },
239 static ctl_table kernel_root_table2[] = {
240 { .ctl_name = CTL_KERN, .procname = "kernel", .mode = 0555,
241 .child = kernel_table2 },
247 static void __cpuinit write_rdtscp_cb(void *info)
249 write_rdtscp_aux((unsigned long)info);
252 void __cpuinit vsyscall_set_cpu(int cpu)
255 unsigned long node = 0;
257 node = cpu_to_node[cpu];
259 if (cpu_has(&cpu_data[cpu], X86_FEATURE_RDTSCP)) {
260 void *info = (void *)((node << 12) | cpu);
261 /* Can happen on preemptive kernel */
262 if (get_cpu() == cpu)
263 write_rdtscp_cb(info);
266 /* the notifier is unfortunately not executed on the
268 smp_call_function_single(cpu,write_rdtscp_cb,info,0,1);
274 /* Store cpu number in limit so that it can be loaded quickly
275 in user space in vgetcpu.
276 12 bits for the CPU and 8 bits for the node. */
277 d = (unsigned long *)(cpu_gdt(cpu) + GDT_ENTRY_PER_CPU);
278 *d = 0x0f40000000000ULL;
280 *d |= (node & 0xf) << 12;
281 *d |= (node >> 4) << 48;
284 static void __init map_vsyscall(void)
286 extern char __vsyscall_0;
287 unsigned long physaddr_page0 = __pa_symbol(&__vsyscall_0);
289 __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_page0, PAGE_KERNEL_VSYSCALL);
292 static int __init vsyscall_init(void)
294 BUG_ON(((unsigned long) &vgettimeofday !=
295 VSYSCALL_ADDR(__NR_vgettimeofday)));
296 BUG_ON((unsigned long) &vtime != VSYSCALL_ADDR(__NR_vtime));
297 BUG_ON((VSYSCALL_ADDR(0) != __fix_to_virt(VSYSCALL_FIRST_PAGE)));
298 BUG_ON((unsigned long) &vgetcpu != VSYSCALL_ADDR(__NR_vgetcpu));
301 register_sysctl_table(kernel_root_table2, 0);
306 __initcall(vsyscall_init);