]> git.karo-electronics.de Git - karo-tx-linux.git/blob - mm/madvise.c
mm/madvise.c: free swp_entry in madvise_free
[karo-tx-linux.git] / mm / madvise.c
1 /*
2  *      linux/mm/madvise.c
3  *
4  * Copyright (C) 1999  Linus Torvalds
5  * Copyright (C) 2002  Christoph Hellwig
6  */
7
8 #include <linux/mman.h>
9 #include <linux/pagemap.h>
10 #include <linux/syscalls.h>
11 #include <linux/mempolicy.h>
12 #include <linux/page-isolation.h>
13 #include <linux/hugetlb.h>
14 #include <linux/falloc.h>
15 #include <linux/sched.h>
16 #include <linux/ksm.h>
17 #include <linux/fs.h>
18 #include <linux/file.h>
19 #include <linux/blkdev.h>
20 #include <linux/backing-dev.h>
21 #include <linux/swap.h>
22 #include <linux/swapops.h>
23 #include <linux/mmu_notifier.h>
24
25 #include <asm/tlb.h>
26
27 /*
28  * Any behaviour which results in changes to the vma->vm_flags needs to
29  * take mmap_sem for writing. Others, which simply traverse vmas, need
30  * to only take it for reading.
31  */
32 static int madvise_need_mmap_write(int behavior)
33 {
34         switch (behavior) {
35         case MADV_REMOVE:
36         case MADV_WILLNEED:
37         case MADV_DONTNEED:
38         case MADV_FREE:
39                 return 0;
40         default:
41                 /* be safe, default to 1. list exceptions explicitly */
42                 return 1;
43         }
44 }
45
46 /*
47  * We can potentially split a vm area into separate
48  * areas, each area with its own behavior.
49  */
50 static long madvise_behavior(struct vm_area_struct *vma,
51                      struct vm_area_struct **prev,
52                      unsigned long start, unsigned long end, int behavior)
53 {
54         struct mm_struct *mm = vma->vm_mm;
55         int error = 0;
56         pgoff_t pgoff;
57         unsigned long new_flags = vma->vm_flags;
58
59         switch (behavior) {
60         case MADV_NORMAL:
61                 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
62                 break;
63         case MADV_SEQUENTIAL:
64                 new_flags = (new_flags & ~VM_RAND_READ) | VM_SEQ_READ;
65                 break;
66         case MADV_RANDOM:
67                 new_flags = (new_flags & ~VM_SEQ_READ) | VM_RAND_READ;
68                 break;
69         case MADV_DONTFORK:
70                 new_flags |= VM_DONTCOPY;
71                 break;
72         case MADV_DOFORK:
73                 if (vma->vm_flags & VM_IO) {
74                         error = -EINVAL;
75                         goto out;
76                 }
77                 new_flags &= ~VM_DONTCOPY;
78                 break;
79         case MADV_DONTDUMP:
80                 new_flags |= VM_DONTDUMP;
81                 break;
82         case MADV_DODUMP:
83                 if (new_flags & VM_SPECIAL) {
84                         error = -EINVAL;
85                         goto out;
86                 }
87                 new_flags &= ~VM_DONTDUMP;
88                 break;
89         case MADV_MERGEABLE:
90         case MADV_UNMERGEABLE:
91                 error = ksm_madvise(vma, start, end, behavior, &new_flags);
92                 if (error)
93                         goto out;
94                 break;
95         case MADV_HUGEPAGE:
96         case MADV_NOHUGEPAGE:
97                 error = hugepage_madvise(vma, &new_flags, behavior);
98                 if (error)
99                         goto out;
100                 break;
101         }
102
103         if (new_flags == vma->vm_flags) {
104                 *prev = vma;
105                 goto out;
106         }
107
108         pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
109         *prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma,
110                           vma->vm_file, pgoff, vma_policy(vma),
111                           vma->vm_userfaultfd_ctx);
112         if (*prev) {
113                 vma = *prev;
114                 goto success;
115         }
116
117         *prev = vma;
118
119         if (start != vma->vm_start) {
120                 error = split_vma(mm, vma, start, 1);
121                 if (error)
122                         goto out;
123         }
124
125         if (end != vma->vm_end) {
126                 error = split_vma(mm, vma, end, 0);
127                 if (error)
128                         goto out;
129         }
130
131 success:
132         /*
133          * vm_flags is protected by the mmap_sem held in write mode.
134          */
135         vma->vm_flags = new_flags;
136
137 out:
138         if (error == -ENOMEM)
139                 error = -EAGAIN;
140         return error;
141 }
142
143 #ifdef CONFIG_SWAP
144 static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start,
145         unsigned long end, struct mm_walk *walk)
146 {
147         pte_t *orig_pte;
148         struct vm_area_struct *vma = walk->private;
149         unsigned long index;
150
151         if (pmd_none_or_trans_huge_or_clear_bad(pmd))
152                 return 0;
153
154         for (index = start; index != end; index += PAGE_SIZE) {
155                 pte_t pte;
156                 swp_entry_t entry;
157                 struct page *page;
158                 spinlock_t *ptl;
159
160                 orig_pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl);
161                 pte = *(orig_pte + ((index - start) / PAGE_SIZE));
162                 pte_unmap_unlock(orig_pte, ptl);
163
164                 if (pte_present(pte) || pte_none(pte))
165                         continue;
166                 entry = pte_to_swp_entry(pte);
167                 if (unlikely(non_swap_entry(entry)))
168                         continue;
169
170                 page = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE,
171                                                                 vma, index);
172                 if (page)
173                         page_cache_release(page);
174         }
175
176         return 0;
177 }
178
179 static void force_swapin_readahead(struct vm_area_struct *vma,
180                 unsigned long start, unsigned long end)
181 {
182         struct mm_walk walk = {
183                 .mm = vma->vm_mm,
184                 .pmd_entry = swapin_walk_pmd_entry,
185                 .private = vma,
186         };
187
188         walk_page_range(start, end, &walk);
189
190         lru_add_drain();        /* Push any new pages onto the LRU now */
191 }
192
193 static void force_shm_swapin_readahead(struct vm_area_struct *vma,
194                 unsigned long start, unsigned long end,
195                 struct address_space *mapping)
196 {
197         pgoff_t index;
198         struct page *page;
199         swp_entry_t swap;
200
201         for (; start < end; start += PAGE_SIZE) {
202                 index = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
203
204                 page = find_get_entry(mapping, index);
205                 if (!radix_tree_exceptional_entry(page)) {
206                         if (page)
207                                 page_cache_release(page);
208                         continue;
209                 }
210                 swap = radix_to_swp_entry(page);
211                 page = read_swap_cache_async(swap, GFP_HIGHUSER_MOVABLE,
212                                                                 NULL, 0);
213                 if (page)
214                         page_cache_release(page);
215         }
216
217         lru_add_drain();        /* Push any new pages onto the LRU now */
218 }
219 #endif          /* CONFIG_SWAP */
220
221 /*
222  * Schedule all required I/O operations.  Do not wait for completion.
223  */
224 static long madvise_willneed(struct vm_area_struct *vma,
225                              struct vm_area_struct **prev,
226                              unsigned long start, unsigned long end)
227 {
228         struct file *file = vma->vm_file;
229
230 #ifdef CONFIG_SWAP
231         if (!file) {
232                 *prev = vma;
233                 force_swapin_readahead(vma, start, end);
234                 return 0;
235         }
236
237         if (shmem_mapping(file->f_mapping)) {
238                 *prev = vma;
239                 force_shm_swapin_readahead(vma, start, end,
240                                         file->f_mapping);
241                 return 0;
242         }
243 #else
244         if (!file)
245                 return -EBADF;
246 #endif
247
248         if (IS_DAX(file_inode(file))) {
249                 /* no bad return value, but ignore advice */
250                 return 0;
251         }
252
253         *prev = vma;
254         start = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
255         if (end > vma->vm_end)
256                 end = vma->vm_end;
257         end = ((end - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
258
259         force_page_cache_readahead(file->f_mapping, file, start, end - start);
260         return 0;
261 }
262
263 static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
264                                 unsigned long end, struct mm_walk *walk)
265
266 {
267         struct mmu_gather *tlb = walk->private;
268         struct mm_struct *mm = tlb->mm;
269         struct vm_area_struct *vma = walk->vma;
270         spinlock_t *ptl;
271         pte_t *orig_pte, *pte, ptent;
272         struct page *page;
273         int nr_swap = 0;
274
275         split_huge_pmd(vma, pmd, addr);
276         if (pmd_trans_unstable(pmd))
277                 return 0;
278
279         orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
280         arch_enter_lazy_mmu_mode();
281         for (; addr != end; pte++, addr += PAGE_SIZE) {
282                 ptent = *pte;
283
284                 if (pte_none(ptent))
285                         continue;
286                 /*
287                  * If the pte has swp_entry, just clear page table to
288                  * prevent swap-in which is more expensive rather than
289                  * (page allocation + zeroing).
290                  */
291                 if (!pte_present(ptent)) {
292                         swp_entry_t entry;
293
294                         entry = pte_to_swp_entry(ptent);
295                         if (non_swap_entry(entry))
296                                 continue;
297                         nr_swap--;
298                         free_swap_and_cache(entry);
299                         pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
300                         continue;
301                 }
302
303                 page = vm_normal_page(vma, addr, ptent);
304                 if (!page)
305                         continue;
306
307                 /*
308                  * If pmd isn't transhuge but the page is THP and
309                  * is owned by only this process, split it and
310                  * deactivate all pages.
311                  */
312                 if (PageTransCompound(page)) {
313                         if (page_mapcount(page) != 1)
314                                 goto out;
315                         get_page(page);
316                         if (!trylock_page(page)) {
317                                 put_page(page);
318                                 goto out;
319                         }
320                         pte_unmap_unlock(orig_pte, ptl);
321                         if (split_huge_page(page)) {
322                                 unlock_page(page);
323                                 put_page(page);
324                                 pte_offset_map_lock(mm, pmd, addr, &ptl);
325                                 goto out;
326                         }
327                         put_page(page);
328                         unlock_page(page);
329                         pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
330                         pte--;
331                         addr -= PAGE_SIZE;
332                         continue;
333                 }
334
335                 VM_BUG_ON_PAGE(PageTransCompound(page), page);
336
337                 if (PageSwapCache(page) || PageDirty(page)) {
338                         if (!trylock_page(page))
339                                 continue;
340                         /*
341                          * If page is shared with others, we couldn't clear
342                          * PG_dirty of the page.
343                          */
344                         if (page_mapcount(page) != 1) {
345                                 unlock_page(page);
346                                 continue;
347                         }
348
349                         if (PageSwapCache(page) && !try_to_free_swap(page)) {
350                                 unlock_page(page);
351                                 continue;
352                         }
353
354                         ClearPageDirty(page);
355                         unlock_page(page);
356                 }
357
358                 if (pte_young(ptent) || pte_dirty(ptent)) {
359                         /*
360                          * Some of architecture(ex, PPC) don't update TLB
361                          * with set_pte_at and tlb_remove_tlb_entry so for
362                          * the portability, remap the pte with old|clean
363                          * after pte clearing.
364                          */
365                         ptent = ptep_get_and_clear_full(mm, addr, pte,
366                                                         tlb->fullmm);
367
368                         ptent = pte_mkold(ptent);
369                         ptent = pte_mkclean(ptent);
370                         set_pte_at(mm, addr, pte, ptent);
371                         tlb_remove_tlb_entry(tlb, pte, addr);
372                 }
373         }
374 out:
375         if (nr_swap) {
376                 if (current->mm == mm)
377                         sync_mm_rss(mm);
378
379                 add_mm_counter(mm, MM_SWAPENTS, nr_swap);
380         }
381         arch_leave_lazy_mmu_mode();
382         pte_unmap_unlock(orig_pte, ptl);
383         cond_resched();
384         return 0;
385 }
386
387 static void madvise_free_page_range(struct mmu_gather *tlb,
388                              struct vm_area_struct *vma,
389                              unsigned long addr, unsigned long end)
390 {
391         struct mm_walk free_walk = {
392                 .pmd_entry = madvise_free_pte_range,
393                 .mm = vma->vm_mm,
394                 .private = tlb,
395         };
396
397         tlb_start_vma(tlb, vma);
398         walk_page_range(addr, end, &free_walk);
399         tlb_end_vma(tlb, vma);
400 }
401
402 static int madvise_free_single_vma(struct vm_area_struct *vma,
403                         unsigned long start_addr, unsigned long end_addr)
404 {
405         unsigned long start, end;
406         struct mm_struct *mm = vma->vm_mm;
407         struct mmu_gather tlb;
408
409         if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
410                 return -EINVAL;
411
412         /* MADV_FREE works for only anon vma at the moment */
413         if (!vma_is_anonymous(vma))
414                 return -EINVAL;
415
416         start = max(vma->vm_start, start_addr);
417         if (start >= vma->vm_end)
418                 return -EINVAL;
419         end = min(vma->vm_end, end_addr);
420         if (end <= vma->vm_start)
421                 return -EINVAL;
422
423         lru_add_drain();
424         tlb_gather_mmu(&tlb, mm, start, end);
425         update_hiwater_rss(mm);
426
427         mmu_notifier_invalidate_range_start(mm, start, end);
428         madvise_free_page_range(&tlb, vma, start, end);
429         mmu_notifier_invalidate_range_end(mm, start, end);
430         tlb_finish_mmu(&tlb, start, end);
431
432         return 0;
433 }
434
435 static long madvise_free(struct vm_area_struct *vma,
436                              struct vm_area_struct **prev,
437                              unsigned long start, unsigned long end)
438 {
439         *prev = vma;
440         return madvise_free_single_vma(vma, start, end);
441 }
442
443 /*
444  * Application no longer needs these pages.  If the pages are dirty,
445  * it's OK to just throw them away.  The app will be more careful about
446  * data it wants to keep.  Be sure to free swap resources too.  The
447  * zap_page_range call sets things up for shrink_active_list to actually free
448  * these pages later if no one else has touched them in the meantime,
449  * although we could add these pages to a global reuse list for
450  * shrink_active_list to pick up before reclaiming other pages.
451  *
452  * NB: This interface discards data rather than pushes it out to swap,
453  * as some implementations do.  This has performance implications for
454  * applications like large transactional databases which want to discard
455  * pages in anonymous maps after committing to backing store the data
456  * that was kept in them.  There is no reason to write this data out to
457  * the swap area if the application is discarding it.
458  *
459  * An interface that causes the system to free clean pages and flush
460  * dirty pages is already available as msync(MS_INVALIDATE).
461  */
462 static long madvise_dontneed(struct vm_area_struct *vma,
463                              struct vm_area_struct **prev,
464                              unsigned long start, unsigned long end)
465 {
466         *prev = vma;
467         if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
468                 return -EINVAL;
469
470         zap_page_range(vma, start, end - start, NULL);
471         return 0;
472 }
473
474 /*
475  * Application wants to free up the pages and associated backing store.
476  * This is effectively punching a hole into the middle of a file.
477  */
478 static long madvise_remove(struct vm_area_struct *vma,
479                                 struct vm_area_struct **prev,
480                                 unsigned long start, unsigned long end)
481 {
482         loff_t offset;
483         int error;
484         struct file *f;
485
486         *prev = NULL;   /* tell sys_madvise we drop mmap_sem */
487
488         if (vma->vm_flags & VM_LOCKED)
489                 return -EINVAL;
490
491         f = vma->vm_file;
492
493         if (!f || !f->f_mapping || !f->f_mapping->host) {
494                         return -EINVAL;
495         }
496
497         if ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE))
498                 return -EACCES;
499
500         offset = (loff_t)(start - vma->vm_start)
501                         + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
502
503         /*
504          * Filesystem's fallocate may need to take i_mutex.  We need to
505          * explicitly grab a reference because the vma (and hence the
506          * vma's reference to the file) can go away as soon as we drop
507          * mmap_sem.
508          */
509         get_file(f);
510         up_read(&current->mm->mmap_sem);
511         error = vfs_fallocate(f,
512                                 FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
513                                 offset, end - start);
514         fput(f);
515         down_read(&current->mm->mmap_sem);
516         return error;
517 }
518
519 #ifdef CONFIG_MEMORY_FAILURE
520 /*
521  * Error injection support for memory error handling.
522  */
523 static int madvise_hwpoison(int bhv, unsigned long start, unsigned long end)
524 {
525         struct page *p;
526         if (!capable(CAP_SYS_ADMIN))
527                 return -EPERM;
528         for (; start < end; start += PAGE_SIZE <<
529                                 compound_order(compound_head(p))) {
530                 int ret;
531
532                 ret = get_user_pages_fast(start, 1, 0, &p);
533                 if (ret != 1)
534                         return ret;
535
536                 if (PageHWPoison(p)) {
537                         put_page(p);
538                         continue;
539                 }
540                 if (bhv == MADV_SOFT_OFFLINE) {
541                         pr_info("Soft offlining page %#lx at %#lx\n",
542                                 page_to_pfn(p), start);
543                         ret = soft_offline_page(p, MF_COUNT_INCREASED);
544                         if (ret)
545                                 return ret;
546                         continue;
547                 }
548                 pr_info("Injecting memory failure for page %#lx at %#lx\n",
549                        page_to_pfn(p), start);
550                 /* Ignore return value for now */
551                 memory_failure(page_to_pfn(p), 0, MF_COUNT_INCREASED);
552         }
553         return 0;
554 }
555 #endif
556
557 static long
558 madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev,
559                 unsigned long start, unsigned long end, int behavior)
560 {
561         switch (behavior) {
562         case MADV_REMOVE:
563                 return madvise_remove(vma, prev, start, end);
564         case MADV_WILLNEED:
565                 return madvise_willneed(vma, prev, start, end);
566         case MADV_FREE:
567                 /*
568                  * XXX: In this implementation, MADV_FREE works like
569                  * MADV_DONTNEED on swapless system or full swap.
570                  */
571                 if (get_nr_swap_pages() > 0)
572                         return madvise_free(vma, prev, start, end);
573                 /* passthrough */
574         case MADV_DONTNEED:
575                 return madvise_dontneed(vma, prev, start, end);
576         default:
577                 return madvise_behavior(vma, prev, start, end, behavior);
578         }
579 }
580
581 static bool
582 madvise_behavior_valid(int behavior)
583 {
584         switch (behavior) {
585         case MADV_DOFORK:
586         case MADV_DONTFORK:
587         case MADV_NORMAL:
588         case MADV_SEQUENTIAL:
589         case MADV_RANDOM:
590         case MADV_REMOVE:
591         case MADV_WILLNEED:
592         case MADV_DONTNEED:
593         case MADV_FREE:
594 #ifdef CONFIG_KSM
595         case MADV_MERGEABLE:
596         case MADV_UNMERGEABLE:
597 #endif
598 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
599         case MADV_HUGEPAGE:
600         case MADV_NOHUGEPAGE:
601 #endif
602         case MADV_DONTDUMP:
603         case MADV_DODUMP:
604                 return true;
605
606         default:
607                 return false;
608         }
609 }
610
611 /*
612  * The madvise(2) system call.
613  *
614  * Applications can use madvise() to advise the kernel how it should
615  * handle paging I/O in this VM area.  The idea is to help the kernel
616  * use appropriate read-ahead and caching techniques.  The information
617  * provided is advisory only, and can be safely disregarded by the
618  * kernel without affecting the correct operation of the application.
619  *
620  * behavior values:
621  *  MADV_NORMAL - the default behavior is to read clusters.  This
622  *              results in some read-ahead and read-behind.
623  *  MADV_RANDOM - the system should read the minimum amount of data
624  *              on any access, since it is unlikely that the appli-
625  *              cation will need more than what it asks for.
626  *  MADV_SEQUENTIAL - pages in the given range will probably be accessed
627  *              once, so they can be aggressively read ahead, and
628  *              can be freed soon after they are accessed.
629  *  MADV_WILLNEED - the application is notifying the system to read
630  *              some pages ahead.
631  *  MADV_DONTNEED - the application is finished with the given range,
632  *              so the kernel can free resources associated with it.
633  *  MADV_REMOVE - the application wants to free up the given range of
634  *              pages and associated backing store.
635  *  MADV_DONTFORK - omit this area from child's address space when forking:
636  *              typically, to avoid COWing pages pinned by get_user_pages().
637  *  MADV_DOFORK - cancel MADV_DONTFORK: no longer omit this area when forking.
638  *  MADV_MERGEABLE - the application recommends that KSM try to merge pages in
639  *              this area with pages of identical content from other such areas.
640  *  MADV_UNMERGEABLE- cancel MADV_MERGEABLE: no longer merge pages with others.
641  *
642  * return values:
643  *  zero    - success
644  *  -EINVAL - start + len < 0, start is not page-aligned,
645  *              "behavior" is not a valid value, or application
646  *              is attempting to release locked or shared pages.
647  *  -ENOMEM - addresses in the specified range are not currently
648  *              mapped, or are outside the AS of the process.
649  *  -EIO    - an I/O error occurred while paging in data.
650  *  -EBADF  - map exists, but area maps something that isn't a file.
651  *  -EAGAIN - a kernel resource was temporarily unavailable.
652  */
653 SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
654 {
655         unsigned long end, tmp;
656         struct vm_area_struct *vma, *prev;
657         int unmapped_error = 0;
658         int error = -EINVAL;
659         int write;
660         size_t len;
661         struct blk_plug plug;
662
663 #ifdef CONFIG_MEMORY_FAILURE
664         if (behavior == MADV_HWPOISON || behavior == MADV_SOFT_OFFLINE)
665                 return madvise_hwpoison(behavior, start, start+len_in);
666 #endif
667         if (!madvise_behavior_valid(behavior))
668                 return error;
669
670         if (start & ~PAGE_MASK)
671                 return error;
672         len = (len_in + ~PAGE_MASK) & PAGE_MASK;
673
674         /* Check to see whether len was rounded up from small -ve to zero */
675         if (len_in && !len)
676                 return error;
677
678         end = start + len;
679         if (end < start)
680                 return error;
681
682         error = 0;
683         if (end == start)
684                 return error;
685
686         write = madvise_need_mmap_write(behavior);
687         if (write)
688                 down_write(&current->mm->mmap_sem);
689         else
690                 down_read(&current->mm->mmap_sem);
691
692         /*
693          * If the interval [start,end) covers some unmapped address
694          * ranges, just ignore them, but return -ENOMEM at the end.
695          * - different from the way of handling in mlock etc.
696          */
697         vma = find_vma_prev(current->mm, start, &prev);
698         if (vma && start > vma->vm_start)
699                 prev = vma;
700
701         blk_start_plug(&plug);
702         for (;;) {
703                 /* Still start < end. */
704                 error = -ENOMEM;
705                 if (!vma)
706                         goto out;
707
708                 /* Here start < (end|vma->vm_end). */
709                 if (start < vma->vm_start) {
710                         unmapped_error = -ENOMEM;
711                         start = vma->vm_start;
712                         if (start >= end)
713                                 goto out;
714                 }
715
716                 /* Here vma->vm_start <= start < (end|vma->vm_end) */
717                 tmp = vma->vm_end;
718                 if (end < tmp)
719                         tmp = end;
720
721                 /* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */
722                 error = madvise_vma(vma, &prev, start, tmp, behavior);
723                 if (error)
724                         goto out;
725                 start = tmp;
726                 if (prev && start < prev->vm_end)
727                         start = prev->vm_end;
728                 error = unmapped_error;
729                 if (start >= end)
730                         goto out;
731                 if (prev)
732                         vma = prev->vm_next;
733                 else    /* madvise_remove dropped mmap_sem */
734                         vma = find_vma(current->mm, start);
735         }
736 out:
737         blk_finish_plug(&plug);
738         if (write)
739                 up_write(&current->mm->mmap_sem);
740         else
741                 up_read(&current->mm->mmap_sem);
742
743         return error;
744 }