]> git.karo-electronics.de Git - karo-tx-linux.git/blob - mm/mprotect.c
33eab902f10e8efb255f1b3981e67d709cafb6b2
[karo-tx-linux.git] / mm / mprotect.c
1 /*
2  *  mm/mprotect.c
3  *
4  *  (C) Copyright 1994 Linus Torvalds
5  *  (C) Copyright 2002 Christoph Hellwig
6  *
7  *  Address space accounting code       <alan@lxorguk.ukuu.org.uk>
8  *  (C) Copyright 2002 Red Hat Inc, All Rights Reserved
9  */
10
11 #include <linux/mm.h>
12 #include <linux/hugetlb.h>
13 #include <linux/shm.h>
14 #include <linux/mman.h>
15 #include <linux/fs.h>
16 #include <linux/highmem.h>
17 #include <linux/security.h>
18 #include <linux/mempolicy.h>
19 #include <linux/personality.h>
20 #include <linux/syscalls.h>
21 #include <linux/swap.h>
22 #include <linux/swapops.h>
23 #include <linux/mmu_notifier.h>
24 #include <linux/migrate.h>
25 #include <linux/perf_event.h>
26 #include <linux/ksm.h>
27 #include <asm/uaccess.h>
28 #include <asm/pgtable.h>
29 #include <asm/cacheflush.h>
30 #include <asm/tlbflush.h>
31
32 #ifndef pgprot_modify
33 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
34 {
35         return newprot;
36 }
37 #endif
38
39 static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
40                 unsigned long addr, unsigned long end, pgprot_t newprot,
41                 int dirty_accountable, int prot_numa)
42 {
43         struct mm_struct *mm = vma->vm_mm;
44         pte_t *pte, oldpte;
45         spinlock_t *ptl;
46         unsigned long pages = 0;
47
48         pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
49         arch_enter_lazy_mmu_mode();
50         do {
51                 oldpte = *pte;
52                 if (pte_present(oldpte)) {
53                         pte_t ptent;
54                         bool updated = false;
55
56                         if (!prot_numa) {
57                                 ptent = ptep_modify_prot_start(mm, addr, pte);
58                                 if (pte_numa(ptent))
59                                         ptent = pte_mknonnuma(ptent);
60                                 ptent = pte_modify(ptent, newprot);
61                                 /*
62                                  * Avoid taking write faults for pages we
63                                  * know to be dirty.
64                                  */
65                                 if (dirty_accountable && pte_dirty(ptent))
66                                         ptent = pte_mkwrite(ptent);
67                                 ptep_modify_prot_commit(mm, addr, pte, ptent);
68                                 updated = true;
69                         } else {
70                                 struct page *page;
71
72                                 ptent = *pte;
73                                 page = vm_normal_page(vma, addr, oldpte);
74                                 if (page && !PageKsm(page)) {
75                                         if (!pte_numa(oldpte)) {
76                                                 ptent = pte_mknuma(ptent);
77                                                 set_pte_at(mm, addr, pte, ptent);
78                                                 updated = true;
79                                         }
80                                 }
81                         }
82                         if (updated)
83                                 pages++;
84                 } else if (IS_ENABLED(CONFIG_MIGRATION) && !pte_file(oldpte)) {
85                         swp_entry_t entry = pte_to_swp_entry(oldpte);
86
87                         if (is_write_migration_entry(entry)) {
88                                 pte_t newpte;
89                                 /*
90                                  * A protection check is difficult so
91                                  * just be safe and disable write
92                                  */
93                                 make_migration_entry_read(&entry);
94                                 newpte = swp_entry_to_pte(entry);
95                                 if (pte_swp_soft_dirty(oldpte))
96                                         newpte = pte_swp_mksoft_dirty(newpte);
97                                 set_pte_at(mm, addr, pte, newpte);
98
99                                 pages++;
100                         }
101                 }
102         } while (pte++, addr += PAGE_SIZE, addr != end);
103         arch_leave_lazy_mmu_mode();
104         pte_unmap_unlock(pte - 1, ptl);
105
106         return pages;
107 }
108
109 static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
110                 pud_t *pud, unsigned long addr, unsigned long end,
111                 pgprot_t newprot, int dirty_accountable, int prot_numa)
112 {
113         pmd_t *pmd;
114         unsigned long next;
115         unsigned long pages = 0;
116         unsigned long nr_huge_updates = 0;
117
118         pmd = pmd_offset(pud, addr);
119         do {
120                 unsigned long this_pages;
121
122                 next = pmd_addr_end(addr, end);
123                 if (pmd_trans_huge(*pmd)) {
124                         if (next - addr != HPAGE_PMD_SIZE)
125                                 split_huge_page_pmd(vma, addr, pmd);
126                         else {
127                                 int nr_ptes = change_huge_pmd(vma, pmd, addr,
128                                                 newprot, prot_numa);
129
130                                 if (nr_ptes) {
131                                         if (nr_ptes == HPAGE_PMD_NR) {
132                                                 pages += HPAGE_PMD_NR;
133                                                 nr_huge_updates++;
134                                         }
135                                         continue;
136                                 }
137                         }
138                         /* fall through */
139                 }
140                 if (pmd_none_or_clear_bad(pmd))
141                         continue;
142                 this_pages = change_pte_range(vma, pmd, addr, next, newprot,
143                                  dirty_accountable, prot_numa);
144                 pages += this_pages;
145         } while (pmd++, addr = next, addr != end);
146
147         if (nr_huge_updates)
148                 count_vm_numa_events(NUMA_HUGE_PTE_UPDATES, nr_huge_updates);
149         return pages;
150 }
151
152 static inline unsigned long change_pud_range(struct vm_area_struct *vma,
153                 pgd_t *pgd, unsigned long addr, unsigned long end,
154                 pgprot_t newprot, int dirty_accountable, int prot_numa)
155 {
156         pud_t *pud;
157         unsigned long next;
158         unsigned long pages = 0;
159
160         pud = pud_offset(pgd, addr);
161         do {
162                 next = pud_addr_end(addr, end);
163                 if (pud_none_or_clear_bad(pud))
164                         continue;
165                 pages += change_pmd_range(vma, pud, addr, next, newprot,
166                                  dirty_accountable, prot_numa);
167         } while (pud++, addr = next, addr != end);
168
169         return pages;
170 }
171
172 static unsigned long change_protection_range(struct vm_area_struct *vma,
173                 unsigned long addr, unsigned long end, pgprot_t newprot,
174                 int dirty_accountable, int prot_numa)
175 {
176         struct mm_struct *mm = vma->vm_mm;
177         pgd_t *pgd;
178         unsigned long next;
179         unsigned long start = addr;
180         unsigned long pages = 0;
181
182         BUG_ON(addr >= end);
183         pgd = pgd_offset(mm, addr);
184         flush_cache_range(vma, addr, end);
185         set_tlb_flush_pending(mm);
186         do {
187                 next = pgd_addr_end(addr, end);
188                 if (pgd_none_or_clear_bad(pgd))
189                         continue;
190                 pages += change_pud_range(vma, pgd, addr, next, newprot,
191                                  dirty_accountable, prot_numa);
192         } while (pgd++, addr = next, addr != end);
193
194         /* Only flush the TLB if we actually modified any entries: */
195         if (pages)
196                 flush_tlb_range(vma, start, end);
197         clear_tlb_flush_pending(mm);
198
199         return pages;
200 }
201
202 unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
203                        unsigned long end, pgprot_t newprot,
204                        int dirty_accountable, int prot_numa)
205 {
206         struct mm_struct *mm = vma->vm_mm;
207         unsigned long pages;
208
209         mmu_notifier_invalidate_range_start(mm, start, end);
210         if (is_vm_hugetlb_page(vma))
211                 pages = hugetlb_change_protection(vma, start, end, newprot);
212         else
213                 pages = change_protection_range(vma, start, end, newprot, dirty_accountable, prot_numa);
214         mmu_notifier_invalidate_range_end(mm, start, end);
215
216         return pages;
217 }
218
219 int
220 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
221         unsigned long start, unsigned long end, unsigned long newflags)
222 {
223         struct mm_struct *mm = vma->vm_mm;
224         unsigned long oldflags = vma->vm_flags;
225         long nrpages = (end - start) >> PAGE_SHIFT;
226         unsigned long charged = 0;
227         pgoff_t pgoff;
228         int error;
229         int dirty_accountable = 0;
230
231         if (newflags == oldflags) {
232                 *pprev = vma;
233                 return 0;
234         }
235
236         /*
237          * If we make a private mapping writable we increase our commit;
238          * but (without finer accounting) cannot reduce our commit if we
239          * make it unwritable again. hugetlb mapping were accounted for
240          * even if read-only so there is no need to account for them here
241          */
242         if (newflags & VM_WRITE) {
243                 if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_HUGETLB|
244                                                 VM_SHARED|VM_NORESERVE))) {
245                         charged = nrpages;
246                         if (security_vm_enough_memory_mm(mm, charged))
247                                 return -ENOMEM;
248                         newflags |= VM_ACCOUNT;
249                 }
250         }
251
252         /*
253          * First try to merge with previous and/or next vma.
254          */
255         pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
256         *pprev = vma_merge(mm, *pprev, start, end, newflags,
257                         vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma));
258         if (*pprev) {
259                 vma = *pprev;
260                 goto success;
261         }
262
263         *pprev = vma;
264
265         if (start != vma->vm_start) {
266                 error = split_vma(mm, vma, start, 1);
267                 if (error)
268                         goto fail;
269         }
270
271         if (end != vma->vm_end) {
272                 error = split_vma(mm, vma, end, 0);
273                 if (error)
274                         goto fail;
275         }
276
277 success:
278         /*
279          * vm_flags and vm_page_prot are protected by the mmap_sem
280          * held in write mode.
281          */
282         vma->vm_flags = newflags;
283         vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
284                                           vm_get_page_prot(newflags));
285
286         if (vma_wants_writenotify(vma)) {
287                 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
288                 dirty_accountable = 1;
289         }
290
291         change_protection(vma, start, end, vma->vm_page_prot,
292                           dirty_accountable, 0);
293
294         vm_stat_account(mm, oldflags, vma->vm_file, -nrpages);
295         vm_stat_account(mm, newflags, vma->vm_file, nrpages);
296         perf_event_mmap(vma);
297         return 0;
298
299 fail:
300         vm_unacct_memory(charged);
301         return error;
302 }
303
304 SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
305                 unsigned long, prot)
306 {
307         unsigned long vm_flags, nstart, end, tmp, reqprot;
308         struct vm_area_struct *vma, *prev;
309         int error = -EINVAL;
310         const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP);
311         prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP);
312         if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */
313                 return -EINVAL;
314
315         if (start & ~PAGE_MASK)
316                 return -EINVAL;
317         if (!len)
318                 return 0;
319         len = PAGE_ALIGN(len);
320         end = start + len;
321         if (end <= start)
322                 return -ENOMEM;
323         if (!arch_validate_prot(prot))
324                 return -EINVAL;
325
326         reqprot = prot;
327         /*
328          * Does the application expect PROT_READ to imply PROT_EXEC:
329          */
330         if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
331                 prot |= PROT_EXEC;
332
333         vm_flags = calc_vm_prot_bits(prot);
334
335         down_write(&current->mm->mmap_sem);
336
337         vma = find_vma(current->mm, start);
338         error = -ENOMEM;
339         if (!vma)
340                 goto out;
341         prev = vma->vm_prev;
342         if (unlikely(grows & PROT_GROWSDOWN)) {
343                 if (vma->vm_start >= end)
344                         goto out;
345                 start = vma->vm_start;
346                 error = -EINVAL;
347                 if (!(vma->vm_flags & VM_GROWSDOWN))
348                         goto out;
349         } else {
350                 if (vma->vm_start > start)
351                         goto out;
352                 if (unlikely(grows & PROT_GROWSUP)) {
353                         end = vma->vm_end;
354                         error = -EINVAL;
355                         if (!(vma->vm_flags & VM_GROWSUP))
356                                 goto out;
357                 }
358         }
359         if (start > vma->vm_start)
360                 prev = vma;
361
362         for (nstart = start ; ; ) {
363                 unsigned long newflags;
364
365                 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
366
367                 newflags = vm_flags;
368                 newflags |= (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC));
369
370                 /* newflags >> 4 shift VM_MAY% in place of VM_% */
371                 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
372                         error = -EACCES;
373                         goto out;
374                 }
375
376                 error = security_file_mprotect(vma, reqprot, prot);
377                 if (error)
378                         goto out;
379
380                 tmp = vma->vm_end;
381                 if (tmp > end)
382                         tmp = end;
383                 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
384                 if (error)
385                         goto out;
386                 nstart = tmp;
387
388                 if (nstart < prev->vm_end)
389                         nstart = prev->vm_end;
390                 if (nstart >= end)
391                         goto out;
392
393                 vma = prev->vm_next;
394                 if (!vma || vma->vm_start != nstart) {
395                         error = -ENOMEM;
396                         goto out;
397                 }
398         }
399 out:
400         up_write(&current->mm->mmap_sem);
401         return error;
402 }