]> git.karo-electronics.de Git - karo-tx-linux.git/blob - arch/x86/vdso/vma.c
Merge commit '3cf2f34' into sched/core, to fix build error
[karo-tx-linux.git] / arch / x86 / vdso / vma.c
1 /*
2  * Set up the VMAs to tell the VM about the vDSO.
3  * Copyright 2007 Andi Kleen, SUSE Labs.
4  * Subject to the GPL, v.2
5  */
6 #include <linux/mm.h>
7 #include <linux/err.h>
8 #include <linux/sched.h>
9 #include <linux/slab.h>
10 #include <linux/init.h>
11 #include <linux/random.h>
12 #include <linux/elf.h>
13 #include <asm/vsyscall.h>
14 #include <asm/vgtod.h>
15 #include <asm/proto.h>
16 #include <asm/vdso.h>
17 #include <asm/page.h>
18
19 #if defined(CONFIG_X86_64)
20 unsigned int __read_mostly vdso_enabled = 1;
21
22 DECLARE_VDSO_IMAGE(vdso);
23 extern unsigned short vdso_sync_cpuid;
24 static unsigned vdso_size;
25
26 #ifdef CONFIG_X86_X32_ABI
27 DECLARE_VDSO_IMAGE(vdsox32);
28 static unsigned vdsox32_size;
29 #endif
30 #endif
31
32 #if defined(CONFIG_X86_32) || defined(CONFIG_X86_X32_ABI) || \
33         defined(CONFIG_COMPAT)
34 void __init patch_vdso32(void *vdso, size_t len)
35 {
36         Elf32_Ehdr *hdr = vdso;
37         Elf32_Shdr *sechdrs, *alt_sec = 0;
38         char *secstrings;
39         void *alt_data;
40         int i;
41
42         BUG_ON(len < sizeof(Elf32_Ehdr));
43         BUG_ON(memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0);
44
45         sechdrs = (void *)hdr + hdr->e_shoff;
46         secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
47
48         for (i = 1; i < hdr->e_shnum; i++) {
49                 Elf32_Shdr *shdr = &sechdrs[i];
50                 if (!strcmp(secstrings + shdr->sh_name, ".altinstructions")) {
51                         alt_sec = shdr;
52                         goto found;
53                 }
54         }
55
56         /* If we get here, it's probably a bug. */
57         pr_warning("patch_vdso32: .altinstructions not found\n");
58         return;  /* nothing to patch */
59
60 found:
61         alt_data = (void *)hdr + alt_sec->sh_offset;
62         apply_alternatives(alt_data, alt_data + alt_sec->sh_size);
63 }
64 #endif
65
66 #if defined(CONFIG_X86_64)
67 static void __init patch_vdso64(void *vdso, size_t len)
68 {
69         Elf64_Ehdr *hdr = vdso;
70         Elf64_Shdr *sechdrs, *alt_sec = 0;
71         char *secstrings;
72         void *alt_data;
73         int i;
74
75         BUG_ON(len < sizeof(Elf64_Ehdr));
76         BUG_ON(memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0);
77
78         sechdrs = (void *)hdr + hdr->e_shoff;
79         secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
80
81         for (i = 1; i < hdr->e_shnum; i++) {
82                 Elf64_Shdr *shdr = &sechdrs[i];
83                 if (!strcmp(secstrings + shdr->sh_name, ".altinstructions")) {
84                         alt_sec = shdr;
85                         goto found;
86                 }
87         }
88
89         /* If we get here, it's probably a bug. */
90         pr_warning("patch_vdso64: .altinstructions not found\n");
91         return;  /* nothing to patch */
92
93 found:
94         alt_data = (void *)hdr + alt_sec->sh_offset;
95         apply_alternatives(alt_data, alt_data + alt_sec->sh_size);
96 }
97
98 static int __init init_vdso(void)
99 {
100         int npages = (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE;
101         int i;
102
103         patch_vdso64(vdso_start, vdso_end - vdso_start);
104
105         vdso_size = npages << PAGE_SHIFT;
106         for (i = 0; i < npages; i++)
107                 vdso_pages[i] = virt_to_page(vdso_start + i*PAGE_SIZE);
108
109 #ifdef CONFIG_X86_X32_ABI
110         patch_vdso32(vdsox32_start, vdsox32_end - vdsox32_start);
111         npages = (vdsox32_end - vdsox32_start + PAGE_SIZE - 1) / PAGE_SIZE;
112         vdsox32_size = npages << PAGE_SHIFT;
113         for (i = 0; i < npages; i++)
114                 vdsox32_pages[i] = virt_to_page(vdsox32_start + i*PAGE_SIZE);
115 #endif
116
117         return 0;
118 }
119 subsys_initcall(init_vdso);
120
121 struct linux_binprm;
122
123 /* Put the vdso above the (randomized) stack with another randomized offset.
124    This way there is no hole in the middle of address space.
125    To save memory make sure it is still in the same PTE as the stack top.
126    This doesn't give that many random bits */
127 static unsigned long vdso_addr(unsigned long start, unsigned len)
128 {
129         unsigned long addr, end;
130         unsigned offset;
131         end = (start + PMD_SIZE - 1) & PMD_MASK;
132         if (end >= TASK_SIZE_MAX)
133                 end = TASK_SIZE_MAX;
134         end -= len;
135         /* This loses some more bits than a modulo, but is cheaper */
136         offset = get_random_int() & (PTRS_PER_PTE - 1);
137         addr = start + (offset << PAGE_SHIFT);
138         if (addr >= end)
139                 addr = end;
140
141         /*
142          * page-align it here so that get_unmapped_area doesn't
143          * align it wrongfully again to the next page. addr can come in 4K
144          * unaligned here as a result of stack start randomization.
145          */
146         addr = PAGE_ALIGN(addr);
147         addr = align_vdso_addr(addr);
148
149         return addr;
150 }
151
152 /* Setup a VMA at program startup for the vsyscall page.
153    Not called for compat tasks */
154 static int setup_additional_pages(struct linux_binprm *bprm,
155                                   int uses_interp,
156                                   struct page **pages,
157                                   unsigned size)
158 {
159         struct mm_struct *mm = current->mm;
160         unsigned long addr;
161         int ret;
162
163         if (!vdso_enabled)
164                 return 0;
165
166         down_write(&mm->mmap_sem);
167         addr = vdso_addr(mm->start_stack, size);
168         addr = get_unmapped_area(NULL, addr, size, 0, 0);
169         if (IS_ERR_VALUE(addr)) {
170                 ret = addr;
171                 goto up_fail;
172         }
173
174         current->mm->context.vdso = (void *)addr;
175
176         ret = install_special_mapping(mm, addr, size,
177                                       VM_READ|VM_EXEC|
178                                       VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
179                                       pages);
180         if (ret) {
181                 current->mm->context.vdso = NULL;
182                 goto up_fail;
183         }
184
185 up_fail:
186         up_write(&mm->mmap_sem);
187         return ret;
188 }
189
190 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
191 {
192         return setup_additional_pages(bprm, uses_interp, vdso_pages,
193                                       vdso_size);
194 }
195
196 #ifdef CONFIG_X86_X32_ABI
197 int x32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
198 {
199         return setup_additional_pages(bprm, uses_interp, vdsox32_pages,
200                                       vdsox32_size);
201 }
202 #endif
203
204 static __init int vdso_setup(char *s)
205 {
206         vdso_enabled = simple_strtoul(s, NULL, 0);
207         return 0;
208 }
209 __setup("vdso=", vdso_setup);
210 #endif