2 * Copyright 2007 Andi Kleen, SUSE Labs.
3 * Subject to the GPL, v.2
5 * This contains most of the x86 vDSO kernel-side code.
9 #include <linux/sched.h>
10 #include <linux/sched/task_stack.h>
11 #include <linux/slab.h>
12 #include <linux/init.h>
13 #include <linux/random.h>
14 #include <linux/elf.h>
15 #include <linux/cpu.h>
16 #include <linux/ptrace.h>
17 #include <asm/pvclock.h>
18 #include <asm/vgtod.h>
19 #include <asm/proto.h>
24 #include <asm/cpufeature.h>
26 #if defined(CONFIG_X86_64)
27 unsigned int __read_mostly vdso64_enabled = 1;
30 void __init init_vdso_image(const struct vdso_image *image)
32 BUG_ON(image->size % PAGE_SIZE != 0);
34 apply_alternatives((struct alt_instr *)(image->data + image->alt),
35 (struct alt_instr *)(image->data + image->alt +
41 static int vdso_fault(const struct vm_special_mapping *sm,
42 struct vm_area_struct *vma, struct vm_fault *vmf)
44 const struct vdso_image *image = vma->vm_mm->context.vdso_image;
46 if (!image || (vmf->pgoff << PAGE_SHIFT) >= image->size)
47 return VM_FAULT_SIGBUS;
49 vmf->page = virt_to_page(image->data + (vmf->pgoff << PAGE_SHIFT));
54 static void vdso_fix_landing(const struct vdso_image *image,
55 struct vm_area_struct *new_vma)
57 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
58 if (in_ia32_syscall() && image == &vdso_image_32) {
59 struct pt_regs *regs = current_pt_regs();
60 unsigned long vdso_land = image->sym_int80_landing_pad;
61 unsigned long old_land_addr = vdso_land +
62 (unsigned long)current->mm->context.vdso;
64 /* Fixing userspace landing - look at do_fast_syscall_32 */
65 if (regs->ip == old_land_addr)
66 regs->ip = new_vma->vm_start + vdso_land;
71 static int vdso_mremap(const struct vm_special_mapping *sm,
72 struct vm_area_struct *new_vma)
74 unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
75 const struct vdso_image *image = current->mm->context.vdso_image;
77 if (image->size != new_size)
80 if (WARN_ON_ONCE(current->mm != new_vma->vm_mm))
83 vdso_fix_landing(image, new_vma);
84 current->mm->context.vdso = (void __user *)new_vma->vm_start;
89 static int vvar_fault(const struct vm_special_mapping *sm,
90 struct vm_area_struct *vma, struct vm_fault *vmf)
92 const struct vdso_image *image = vma->vm_mm->context.vdso_image;
97 return VM_FAULT_SIGBUS;
99 sym_offset = (long)(vmf->pgoff << PAGE_SHIFT) +
100 image->sym_vvar_start;
103 * Sanity check: a symbol offset of zero means that the page
104 * does not exist for this vdso image, not that the page is at
105 * offset zero relative to the text mapping. This should be
106 * impossible here, because sym_offset should only be zero for
107 * the page past the end of the vvar mapping.
110 return VM_FAULT_SIGBUS;
112 if (sym_offset == image->sym_vvar_page) {
113 ret = vm_insert_pfn(vma, vmf->address,
114 __pa_symbol(&__vvar_page) >> PAGE_SHIFT);
115 } else if (sym_offset == image->sym_pvclock_page) {
116 struct pvclock_vsyscall_time_info *pvti =
117 pvclock_pvti_cpu0_va();
118 if (pvti && vclock_was_used(VCLOCK_PVCLOCK)) {
122 __pa(pvti) >> PAGE_SHIFT);
126 if (ret == 0 || ret == -EBUSY)
127 return VM_FAULT_NOPAGE;
129 return VM_FAULT_SIGBUS;
132 static const struct vm_special_mapping vdso_mapping = {
135 .mremap = vdso_mremap,
137 static const struct vm_special_mapping vvar_mapping = {
143 * Add vdso and vvar mappings to current process.
144 * @image - blob to map
145 * @addr - request a specific address (zero to map at free addr)
147 static int map_vdso(const struct vdso_image *image, unsigned long addr)
149 struct mm_struct *mm = current->mm;
150 struct vm_area_struct *vma;
151 unsigned long text_start;
154 if (down_write_killable(&mm->mmap_sem))
157 addr = get_unmapped_area(NULL, addr,
158 image->size - image->sym_vvar_start, 0, 0);
159 if (IS_ERR_VALUE(addr)) {
164 text_start = addr - image->sym_vvar_start;
167 * MAYWRITE to allow gdb to COW and set breakpoints
169 vma = _install_special_mapping(mm,
173 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
181 vma = _install_special_mapping(mm,
183 -image->sym_vvar_start,
184 VM_READ|VM_MAYREAD|VM_IO|VM_DONTDUMP|
190 do_munmap(mm, text_start, image->size, NULL);
192 current->mm->context.vdso = (void __user *)text_start;
193 current->mm->context.vdso_image = image;
197 up_write(&mm->mmap_sem);
203 * Put the vdso above the (randomized) stack with another randomized
204 * offset. This way there is no hole in the middle of address space.
205 * To save memory make sure it is still in the same PTE as the stack
206 * top. This doesn't give that many random bits.
208 * Note that this algorithm is imperfect: the distribution of the vdso
209 * start address within a PMD is biased toward the end.
211 * Only used for the 64-bit and x32 vdsos.
213 static unsigned long vdso_addr(unsigned long start, unsigned len)
215 unsigned long addr, end;
219 * Round up the start address. It can start out unaligned as a result
220 * of stack start randomization.
222 start = PAGE_ALIGN(start);
224 /* Round the lowest possible end address up to a PMD boundary. */
225 end = (start + len + PMD_SIZE - 1) & PMD_MASK;
226 if (end >= TASK_SIZE_MAX)
231 offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
232 addr = start + (offset << PAGE_SHIFT);
238 * Forcibly align the final address in case we have a hardware
239 * issue that requires alignment for performance reasons.
241 addr = align_vdso_addr(addr);
246 static int map_vdso_randomized(const struct vdso_image *image)
248 unsigned long addr = vdso_addr(current->mm->start_stack, image->size-image->sym_vvar_start);
250 return map_vdso(image, addr);
254 int map_vdso_once(const struct vdso_image *image, unsigned long addr)
256 struct mm_struct *mm = current->mm;
257 struct vm_area_struct *vma;
259 down_write(&mm->mmap_sem);
261 * Check if we have already mapped vdso blob - fail to prevent
262 * abusing from userspace install_speciall_mapping, which may
263 * not do accounting and rlimit right.
264 * We could search vma near context.vdso, but it's a slowpath,
265 * so let's explicitely check all VMAs to be completely sure.
267 for (vma = mm->mmap; vma; vma = vma->vm_next) {
268 if (vma_is_special_mapping(vma, &vdso_mapping) ||
269 vma_is_special_mapping(vma, &vvar_mapping)) {
270 up_write(&mm->mmap_sem);
274 up_write(&mm->mmap_sem);
276 return map_vdso(image, addr);
279 #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
280 static int load_vdso32(void)
282 if (vdso32_enabled != 1) /* Other values all mean "disabled" */
285 return map_vdso(&vdso_image_32, 0);
290 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
295 return map_vdso_randomized(&vdso_image_64);
299 int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
302 #ifdef CONFIG_X86_X32_ABI
303 if (test_thread_flag(TIF_X32)) {
306 return map_vdso_randomized(&vdso_image_x32);
309 #ifdef CONFIG_IA32_EMULATION
310 return load_vdso32();
317 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
319 return load_vdso32();
324 static __init int vdso_setup(char *s)
326 vdso64_enabled = simple_strtoul(s, NULL, 0);
329 __setup("vdso=", vdso_setup);
333 static void vgetcpu_cpu_init(void *arg)
335 int cpu = smp_processor_id();
336 struct desc_struct d = { };
337 unsigned long node = 0;
339 node = cpu_to_node(cpu);
341 if (static_cpu_has(X86_FEATURE_RDTSCP))
342 write_rdtscp_aux((node << 12) | cpu);
345 * Store cpu number in limit so that it can be loaded
346 * quickly in user space in vgetcpu. (12 bits for the CPU
347 * and 8 bits for the node)
349 d.limit0 = cpu | ((node & 0xf) << 12);
351 d.type = 5; /* RO data, expand down, accessed */
352 d.dpl = 3; /* Visible to user code */
353 d.s = 1; /* Not a system segment */
354 d.p = 1; /* Present */
355 d.d = 1; /* 32-bit */
357 write_gdt_entry(get_cpu_gdt_rw(cpu), GDT_ENTRY_PER_CPU, &d, DESCTYPE_S);
360 static int vgetcpu_online(unsigned int cpu)
362 return smp_call_function_single(cpu, vgetcpu_cpu_init, NULL, 1);
365 static int __init init_vdso(void)
367 init_vdso_image(&vdso_image_64);
369 #ifdef CONFIG_X86_X32_ABI
370 init_vdso_image(&vdso_image_x32);
373 /* notifier priority > KVM */
374 return cpuhp_setup_state(CPUHP_AP_X86_VDSO_VMA_ONLINE,
375 "x86/vdso/vma:online", vgetcpu_online, NULL);
377 subsys_initcall(init_vdso);
378 #endif /* CONFIG_X86_64 */