2 * flexible mmap layout support
4 * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 * Started by Ingo Molnar <mingo@elte.hu>
25 #include <linux/personality.h>
27 #include <linux/mman.h>
28 #include <linux/module.h>
29 #include <linux/random.h>
30 #include <linux/compat.h>
31 #include <linux/security.h>
32 #include <asm/pgalloc.h>
34 unsigned long mmap_rnd_mask;
35 unsigned long mmap_align_mask;
37 static unsigned long stack_maxrandom_size(void)
39 if (!(current->flags & PF_RANDOMIZE))
41 if (current->personality & ADDR_NO_RANDOMIZE)
43 return STACK_RND_MASK << PAGE_SHIFT;
47 * Top of mmap area (just below the process stack).
49 * Leave at least a ~32 MB hole.
51 #define MIN_GAP (32*1024*1024)
52 #define MAX_GAP (STACK_TOP/6*5)
54 static inline int mmap_is_legacy(void)
56 if (current->personality & ADDR_COMPAT_LAYOUT)
58 if (rlimit(RLIMIT_STACK) == RLIM_INFINITY)
60 return sysctl_legacy_va_layout;
63 static unsigned long mmap_rnd(void)
65 if (!(current->flags & PF_RANDOMIZE))
68 return (get_random_int() & 0x7ff) << PAGE_SHIFT;
70 return (get_random_int() & mmap_rnd_mask) << PAGE_SHIFT;
73 static unsigned long mmap_base_legacy(void)
75 return TASK_UNMAPPED_BASE + mmap_rnd();
78 static inline unsigned long mmap_base(void)
80 unsigned long gap = rlimit(RLIMIT_STACK);
84 else if (gap > MAX_GAP)
87 return STACK_TOP - stack_maxrandom_size() - mmap_rnd() - gap;
91 arch_get_unmapped_area(struct file *filp, unsigned long addr,
92 unsigned long len, unsigned long pgoff, unsigned long flags)
94 struct mm_struct *mm = current->mm;
95 struct vm_area_struct *vma;
96 struct vm_unmapped_area_info info;
99 if (len > TASK_SIZE - mmap_min_addr)
102 if (flags & MAP_FIXED)
106 addr = PAGE_ALIGN(addr);
107 vma = find_vma(mm, addr);
108 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
109 (!vma || addr + len <= vma->vm_start))
114 if (filp || (flags & MAP_SHARED))
115 do_color_align = !is_32bit_task();
119 info.low_limit = mm->mmap_base;
120 info.high_limit = TASK_SIZE;
121 info.align_mask = do_color_align ? (mmap_align_mask << PAGE_SHIFT) : 0;
122 info.align_offset = pgoff << PAGE_SHIFT;
123 return vm_unmapped_area(&info);
127 arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
128 const unsigned long len, const unsigned long pgoff,
129 const unsigned long flags)
131 struct vm_area_struct *vma;
132 struct mm_struct *mm = current->mm;
133 unsigned long addr = addr0;
134 struct vm_unmapped_area_info info;
137 /* requested length too big for entire address space */
138 if (len > TASK_SIZE - mmap_min_addr)
141 if (flags & MAP_FIXED)
144 /* requesting a specific address */
146 addr = PAGE_ALIGN(addr);
147 vma = find_vma(mm, addr);
148 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
149 (!vma || addr + len <= vma->vm_start))
154 if (filp || (flags & MAP_SHARED))
155 do_color_align = !is_32bit_task();
157 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
159 info.low_limit = max(PAGE_SIZE, mmap_min_addr);
160 info.high_limit = mm->mmap_base;
161 info.align_mask = do_color_align ? (mmap_align_mask << PAGE_SHIFT) : 0;
162 info.align_offset = pgoff << PAGE_SHIFT;
163 addr = vm_unmapped_area(&info);
166 * A failed mmap() very likely causes application failure,
167 * so fall back to the bottom-up function here. This scenario
168 * can happen with large stack limits and large mmap()
171 if (addr & ~PAGE_MASK) {
172 VM_BUG_ON(addr != -ENOMEM);
174 info.low_limit = TASK_UNMAPPED_BASE;
175 info.high_limit = TASK_SIZE;
176 addr = vm_unmapped_area(&info);
182 unsigned long randomize_et_dyn(void)
186 base = (STACK_TOP / 3 * 2) & (~mmap_align_mask << PAGE_SHIFT);
187 return base + mmap_rnd();
193 * This function, called very early during the creation of a new
194 * process VM image, sets up which VM layout function to use:
196 void arch_pick_mmap_layout(struct mm_struct *mm)
199 * Fall back to the standard layout if the personality
200 * bit is set, or if the expected stack growth is unlimited:
202 if (mmap_is_legacy()) {
203 mm->mmap_base = mmap_base_legacy();
204 mm->get_unmapped_area = arch_get_unmapped_area;
206 mm->mmap_base = mmap_base();
207 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
213 int s390_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
215 if (is_compat_task() || (TASK_SIZE >= (1UL << 53)))
217 if (!(flags & MAP_FIXED))
219 if ((addr + len) >= TASK_SIZE)
220 return crst_table_upgrade(current->mm, 1UL << 53);
225 s390_get_unmapped_area(struct file *filp, unsigned long addr,
226 unsigned long len, unsigned long pgoff, unsigned long flags)
228 struct mm_struct *mm = current->mm;
232 area = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
233 if (!(area & ~PAGE_MASK))
235 if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < (1UL << 53)) {
236 /* Upgrade the page table to 4 levels and retry. */
237 rc = crst_table_upgrade(mm, 1UL << 53);
239 return (unsigned long) rc;
240 area = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
246 s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr,
247 const unsigned long len, const unsigned long pgoff,
248 const unsigned long flags)
250 struct mm_struct *mm = current->mm;
254 area = arch_get_unmapped_area_topdown(filp, addr, len, pgoff, flags);
255 if (!(area & ~PAGE_MASK))
257 if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < (1UL << 53)) {
258 /* Upgrade the page table to 4 levels and retry. */
259 rc = crst_table_upgrade(mm, 1UL << 53);
261 return (unsigned long) rc;
262 area = arch_get_unmapped_area_topdown(filp, addr, len,
268 * This function, called very early during the creation of a new
269 * process VM image, sets up which VM layout function to use:
271 void arch_pick_mmap_layout(struct mm_struct *mm)
274 * Fall back to the standard layout if the personality
275 * bit is set, or if the expected stack growth is unlimited:
277 if (mmap_is_legacy()) {
278 mm->mmap_base = mmap_base_legacy();
279 mm->get_unmapped_area = s390_get_unmapped_area;
281 mm->mmap_base = mmap_base();
282 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
286 static int __init setup_mmap_rnd(void)
291 switch (cpu_id.machine) {
305 mmap_rnd_mask = 0x7ffUL;
306 mmap_align_mask = 0UL;
308 case 0x2964: /* z13 */
310 mmap_rnd_mask = 0x3ff80UL;
311 mmap_align_mask = 0x7fUL;
316 early_initcall(setup_mmap_rnd);