1 #include <linux/errno.h>
2 #include <linux/sched.h>
3 #include <linux/syscalls.h>
10 #include <linux/stat.h>
11 #include <linux/mman.h>
12 #include <linux/file.h>
13 #include <linux/utsname.h>
14 #include <linux/personality.h>
15 #include <linux/random.h>
17 #include <asm/uaccess.h>
19 #include <asm/syscalls.h>
21 asmlinkage long sys_mmap(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags,
22 unsigned long fd, unsigned long off)
33 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
34 if (!(flags & MAP_ANONYMOUS)) {
39 down_write(¤t->mm->mmap_sem);
40 error = do_mmap_pgoff(file, addr, len, prot, flags, off >> PAGE_SHIFT);
41 up_write(¤t->mm->mmap_sem);
49 static void find_start_end(unsigned long flags, unsigned long *begin,
52 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
53 unsigned long new_begin;
54 /* This is usually used needed to map code in small
55 model, so it needs to be in the first 31bit. Limit
56 it to that. This means we need to move the
57 unmapped base down for this case. This can give
58 conflicts with the heap, but we assume that glibc
59 malloc knows how to fall back to mmap. Give it 1GB
60 of playground for now. -AK */
63 if (current->flags & PF_RANDOMIZE) {
64 new_begin = randomize_range(*begin, *begin + 0x02000000, 0);
69 *begin = TASK_UNMAPPED_BASE;
75 arch_get_unmapped_area(struct file *filp, unsigned long addr,
76 unsigned long len, unsigned long pgoff, unsigned long flags)
78 struct mm_struct *mm = current->mm;
79 struct vm_area_struct *vma;
80 unsigned long start_addr;
81 unsigned long begin, end;
83 if (flags & MAP_FIXED)
86 find_start_end(flags, &begin, &end);
92 addr = PAGE_ALIGN(addr);
93 vma = find_vma(mm, addr);
94 if (end - len >= addr &&
95 (!vma || addr + len <= vma->vm_start))
98 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
99 && len <= mm->cached_hole_size) {
100 mm->cached_hole_size = 0;
101 mm->free_area_cache = begin;
103 addr = mm->free_area_cache;
109 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
110 /* At this point: (!vma || addr < vma->vm_end). */
111 if (end - len < addr) {
113 * Start a new search - just in case we missed
116 if (start_addr != begin) {
117 start_addr = addr = begin;
118 mm->cached_hole_size = 0;
123 if (!vma || addr + len <= vma->vm_start) {
125 * Remember the place where we stopped the search:
127 mm->free_area_cache = addr + len;
130 if (addr + mm->cached_hole_size < vma->vm_start)
131 mm->cached_hole_size = vma->vm_start - addr;
139 arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
140 const unsigned long len, const unsigned long pgoff,
141 const unsigned long flags)
143 struct vm_area_struct *vma;
144 struct mm_struct *mm = current->mm;
145 unsigned long addr = addr0;
147 /* requested length too big for entire address space */
151 if (flags & MAP_FIXED)
154 /* for MAP_32BIT mappings we force the legact mmap base */
155 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
158 /* requesting a specific address */
160 addr = PAGE_ALIGN(addr);
161 vma = find_vma(mm, addr);
162 if (TASK_SIZE - len >= addr &&
163 (!vma || addr + len <= vma->vm_start))
167 /* check if free_area_cache is useful for us */
168 if (len <= mm->cached_hole_size) {
169 mm->cached_hole_size = 0;
170 mm->free_area_cache = mm->mmap_base;
173 /* either no address requested or can't fit in requested address hole */
174 addr = mm->free_area_cache;
176 /* make sure it can fit in the remaining address space */
178 vma = find_vma(mm, addr-len);
179 if (!vma || addr <= vma->vm_start)
180 /* remember the address as a hint for next time */
181 return (mm->free_area_cache = addr-len);
184 if (mm->mmap_base < len)
187 addr = mm->mmap_base-len;
191 * Lookup failure means no vma is above this address,
192 * else if new region fits below vma->vm_start,
193 * return with success:
195 vma = find_vma(mm, addr);
196 if (!vma || addr+len <= vma->vm_start)
197 /* remember the address as a hint for next time */
198 return (mm->free_area_cache = addr);
200 /* remember the largest hole we saw so far */
201 if (addr + mm->cached_hole_size < vma->vm_start)
202 mm->cached_hole_size = vma->vm_start - addr;
204 /* try just below the current vma->vm_start */
205 addr = vma->vm_start-len;
206 } while (len < vma->vm_start);
210 * A failed mmap() very likely causes application failure,
211 * so fall back to the bottom-up function here. This scenario
212 * can happen with large stack limits and large mmap()
215 mm->cached_hole_size = ~0UL;
216 mm->free_area_cache = TASK_UNMAPPED_BASE;
217 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
219 * Restore the topdown base:
221 mm->free_area_cache = mm->mmap_base;
222 mm->cached_hole_size = ~0UL;
228 asmlinkage long sys_uname(struct new_utsname __user * name)
232 err = copy_to_user(name, utsname(), sizeof (*name));
234 if (personality(current->personality) == PER_LINUX32)
235 err |= copy_to_user(&name->machine, "i686", 5);
236 return err ? -EFAULT : 0;