2 * linux/arch/arm/mm/mmap.c
6 #include <linux/mman.h>
8 #include <linux/sched/signal.h>
9 #include <linux/sched/mm.h>
11 #include <linux/personality.h>
12 #include <linux/random.h>
13 #include <asm/cachetype.h>
15 #define COLOUR_ALIGN(addr,pgoff) \
16 ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \
17 (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
19 /* gap between mmap and stack */
20 #define MIN_GAP (128*1024*1024UL)
21 #define MAX_GAP ((TASK_SIZE)/6*5)
23 static int mmap_is_legacy(void)
25 if (current->personality & ADDR_COMPAT_LAYOUT)
28 if (rlimit(RLIMIT_STACK) == RLIM_INFINITY)
31 return sysctl_legacy_va_layout;
34 static unsigned long mmap_base(unsigned long rnd)
36 unsigned long gap = rlimit(RLIMIT_STACK);
40 else if (gap > MAX_GAP)
43 return PAGE_ALIGN(TASK_SIZE - gap - rnd);
47 * We need to ensure that shared mappings are correctly aligned to
48 * avoid aliasing issues with VIPT caches. We need to ensure that
49 * a specific page of an object is always mapped at a multiple of
52 * We unconditionally provide this function for all cases, however
53 * in the VIVT case, we optimise out the alignment rules.
56 arch_get_unmapped_area(struct file *filp, unsigned long addr,
57 unsigned long len, unsigned long pgoff, unsigned long flags)
59 struct mm_struct *mm = current->mm;
60 struct vm_area_struct *vma;
62 int aliasing = cache_is_vipt_aliasing();
63 struct vm_unmapped_area_info info;
66 * We only need to do colour alignment if either the I or D
70 do_align = filp || (flags & MAP_SHARED);
73 * We enforce the MAP_FIXED case.
75 if (flags & MAP_FIXED) {
76 if (aliasing && flags & MAP_SHARED &&
77 (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
87 addr = COLOUR_ALIGN(addr, pgoff);
89 addr = PAGE_ALIGN(addr);
91 vma = find_vma(mm, addr);
92 if (TASK_SIZE - len >= addr &&
93 (!vma || addr + len <= vma->vm_start))
99 info.low_limit = mm->mmap_base;
100 info.high_limit = TASK_SIZE;
101 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
102 info.align_offset = pgoff << PAGE_SHIFT;
103 return vm_unmapped_area(&info);
107 arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
108 const unsigned long len, const unsigned long pgoff,
109 const unsigned long flags)
111 struct vm_area_struct *vma;
112 struct mm_struct *mm = current->mm;
113 unsigned long addr = addr0;
115 int aliasing = cache_is_vipt_aliasing();
116 struct vm_unmapped_area_info info;
119 * We only need to do colour alignment if either the I or D
123 do_align = filp || (flags & MAP_SHARED);
125 /* requested length too big for entire address space */
129 if (flags & MAP_FIXED) {
130 if (aliasing && flags & MAP_SHARED &&
131 (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
136 /* requesting a specific address */
139 addr = COLOUR_ALIGN(addr, pgoff);
141 addr = PAGE_ALIGN(addr);
142 vma = find_vma(mm, addr);
143 if (TASK_SIZE - len >= addr &&
144 (!vma || addr + len <= vma->vm_start))
148 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
150 info.low_limit = FIRST_USER_ADDRESS;
151 info.high_limit = mm->mmap_base;
152 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
153 info.align_offset = pgoff << PAGE_SHIFT;
154 addr = vm_unmapped_area(&info);
157 * A failed mmap() very likely causes application failure,
158 * so fall back to the bottom-up function here. This scenario
159 * can happen with large stack limits and large mmap()
162 if (addr & ~PAGE_MASK) {
163 VM_BUG_ON(addr != -ENOMEM);
165 info.low_limit = mm->mmap_base;
166 info.high_limit = TASK_SIZE;
167 addr = vm_unmapped_area(&info);
173 unsigned long arch_mmap_rnd(void)
177 rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
179 return rnd << PAGE_SHIFT;
182 void arch_pick_mmap_layout(struct mm_struct *mm)
184 unsigned long random_factor = 0UL;
186 if (current->flags & PF_RANDOMIZE)
187 random_factor = arch_mmap_rnd();
189 if (mmap_is_legacy()) {
190 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
191 mm->get_unmapped_area = arch_get_unmapped_area;
193 mm->mmap_base = mmap_base(random_factor);
194 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
199 * You really shouldn't be using read() or write() on /dev/mem. This
200 * might go away in the future.
202 int valid_phys_addr_range(phys_addr_t addr, size_t size)
204 if (addr < PHYS_OFFSET)
206 if (addr + size > __pa(high_memory - 1) + 1)
213 * Do not allow /dev/mem mappings beyond the supported physical range.
215 int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
217 return (pfn + (size >> PAGE_SHIFT)) <= (1 + (PHYS_MASK >> PAGE_SHIFT));
220 #ifdef CONFIG_STRICT_DEVMEM
222 #include <linux/ioport.h>
225 * devmem_is_allowed() checks to see if /dev/mem access to a certain
226 * address is valid. The argument is a physical page number.
227 * We mimic x86 here by disallowing access to system RAM as well as
228 * device-exclusive MMIO regions. This effectively disable read()/write()
231 int devmem_is_allowed(unsigned long pfn)
233 if (iomem_is_exclusive(pfn << PAGE_SHIFT))
235 if (!page_is_ram(pfn))