]> git.karo-electronics.de Git - linux-beck.git/blob - mm/migrate.c
dccp: fix freeing skb too early for IPV6_RECVPKTINFO
[linux-beck.git] / mm / migrate.c
1 /*
2  * Memory Migration functionality - linux/mm/migrate.c
3  *
4  * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
5  *
6  * Page migration was first developed in the context of the memory hotplug
7  * project. The main authors of the migration code are:
8  *
9  * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
10  * Hirokazu Takahashi <taka@valinux.co.jp>
11  * Dave Hansen <haveblue@us.ibm.com>
12  * Christoph Lameter
13  */
14
15 #include <linux/migrate.h>
16 #include <linux/export.h>
17 #include <linux/swap.h>
18 #include <linux/swapops.h>
19 #include <linux/pagemap.h>
20 #include <linux/buffer_head.h>
21 #include <linux/mm_inline.h>
22 #include <linux/nsproxy.h>
23 #include <linux/pagevec.h>
24 #include <linux/ksm.h>
25 #include <linux/rmap.h>
26 #include <linux/topology.h>
27 #include <linux/cpu.h>
28 #include <linux/cpuset.h>
29 #include <linux/writeback.h>
30 #include <linux/mempolicy.h>
31 #include <linux/vmalloc.h>
32 #include <linux/security.h>
33 #include <linux/backing-dev.h>
34 #include <linux/compaction.h>
35 #include <linux/syscalls.h>
36 #include <linux/hugetlb.h>
37 #include <linux/hugetlb_cgroup.h>
38 #include <linux/gfp.h>
39 #include <linux/balloon_compaction.h>
40 #include <linux/mmu_notifier.h>
41 #include <linux/page_idle.h>
42 #include <linux/page_owner.h>
43
44 #include <asm/tlbflush.h>
45
46 #define CREATE_TRACE_POINTS
47 #include <trace/events/migrate.h>
48
49 #include "internal.h"
50
51 /*
52  * migrate_prep() needs to be called before we start compiling a list of pages
53  * to be migrated using isolate_lru_page(). If scheduling work on other CPUs is
54  * undesirable, use migrate_prep_local()
55  */
56 int migrate_prep(void)
57 {
58         /*
59          * Clear the LRU lists so pages can be isolated.
60          * Note that pages may be moved off the LRU after we have
61          * drained them. Those pages will fail to migrate like other
62          * pages that may be busy.
63          */
64         lru_add_drain_all();
65
66         return 0;
67 }
68
69 /* Do the necessary work of migrate_prep but not if it involves other CPUs */
70 int migrate_prep_local(void)
71 {
72         lru_add_drain();
73
74         return 0;
75 }
76
77 bool isolate_movable_page(struct page *page, isolate_mode_t mode)
78 {
79         struct address_space *mapping;
80
81         /*
82          * Avoid burning cycles with pages that are yet under __free_pages(),
83          * or just got freed under us.
84          *
85          * In case we 'win' a race for a movable page being freed under us and
86          * raise its refcount preventing __free_pages() from doing its job
87          * the put_page() at the end of this block will take care of
88          * release this page, thus avoiding a nasty leakage.
89          */
90         if (unlikely(!get_page_unless_zero(page)))
91                 goto out;
92
93         /*
94          * Check PageMovable before holding a PG_lock because page's owner
95          * assumes anybody doesn't touch PG_lock of newly allocated page
96          * so unconditionally grapping the lock ruins page's owner side.
97          */
98         if (unlikely(!__PageMovable(page)))
99                 goto out_putpage;
100         /*
101          * As movable pages are not isolated from LRU lists, concurrent
102          * compaction threads can race against page migration functions
103          * as well as race against the releasing a page.
104          *
105          * In order to avoid having an already isolated movable page
106          * being (wrongly) re-isolated while it is under migration,
107          * or to avoid attempting to isolate pages being released,
108          * lets be sure we have the page lock
109          * before proceeding with the movable page isolation steps.
110          */
111         if (unlikely(!trylock_page(page)))
112                 goto out_putpage;
113
114         if (!PageMovable(page) || PageIsolated(page))
115                 goto out_no_isolated;
116
117         mapping = page_mapping(page);
118         VM_BUG_ON_PAGE(!mapping, page);
119
120         if (!mapping->a_ops->isolate_page(page, mode))
121                 goto out_no_isolated;
122
123         /* Driver shouldn't use PG_isolated bit of page->flags */
124         WARN_ON_ONCE(PageIsolated(page));
125         __SetPageIsolated(page);
126         unlock_page(page);
127
128         return true;
129
130 out_no_isolated:
131         unlock_page(page);
132 out_putpage:
133         put_page(page);
134 out:
135         return false;
136 }
137
138 /* It should be called on page which is PG_movable */
139 void putback_movable_page(struct page *page)
140 {
141         struct address_space *mapping;
142
143         VM_BUG_ON_PAGE(!PageLocked(page), page);
144         VM_BUG_ON_PAGE(!PageMovable(page), page);
145         VM_BUG_ON_PAGE(!PageIsolated(page), page);
146
147         mapping = page_mapping(page);
148         mapping->a_ops->putback_page(page);
149         __ClearPageIsolated(page);
150 }
151
152 /*
153  * Put previously isolated pages back onto the appropriate lists
154  * from where they were once taken off for compaction/migration.
155  *
156  * This function shall be used whenever the isolated pageset has been
157  * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
158  * and isolate_huge_page().
159  */
160 void putback_movable_pages(struct list_head *l)
161 {
162         struct page *page;
163         struct page *page2;
164
165         list_for_each_entry_safe(page, page2, l, lru) {
166                 if (unlikely(PageHuge(page))) {
167                         putback_active_hugepage(page);
168                         continue;
169                 }
170                 list_del(&page->lru);
171                 /*
172                  * We isolated non-lru movable page so here we can use
173                  * __PageMovable because LRU page's mapping cannot have
174                  * PAGE_MAPPING_MOVABLE.
175                  */
176                 if (unlikely(__PageMovable(page))) {
177                         VM_BUG_ON_PAGE(!PageIsolated(page), page);
178                         lock_page(page);
179                         if (PageMovable(page))
180                                 putback_movable_page(page);
181                         else
182                                 __ClearPageIsolated(page);
183                         unlock_page(page);
184                         put_page(page);
185                 } else {
186                         putback_lru_page(page);
187                         dec_node_page_state(page, NR_ISOLATED_ANON +
188                                         page_is_file_cache(page));
189                 }
190         }
191 }
192
193 /*
194  * Restore a potential migration pte to a working pte entry
195  */
196 static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
197                                  unsigned long addr, void *old)
198 {
199         struct mm_struct *mm = vma->vm_mm;
200         swp_entry_t entry;
201         pmd_t *pmd;
202         pte_t *ptep, pte;
203         spinlock_t *ptl;
204
205         if (unlikely(PageHuge(new))) {
206                 ptep = huge_pte_offset(mm, addr);
207                 if (!ptep)
208                         goto out;
209                 ptl = huge_pte_lockptr(hstate_vma(vma), mm, ptep);
210         } else {
211                 pmd = mm_find_pmd(mm, addr);
212                 if (!pmd)
213                         goto out;
214
215                 ptep = pte_offset_map(pmd, addr);
216
217                 /*
218                  * Peek to check is_swap_pte() before taking ptlock?  No, we
219                  * can race mremap's move_ptes(), which skips anon_vma lock.
220                  */
221
222                 ptl = pte_lockptr(mm, pmd);
223         }
224
225         spin_lock(ptl);
226         pte = *ptep;
227         if (!is_swap_pte(pte))
228                 goto unlock;
229
230         entry = pte_to_swp_entry(pte);
231
232         if (!is_migration_entry(entry) ||
233             migration_entry_to_page(entry) != old)
234                 goto unlock;
235
236         get_page(new);
237         pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot)));
238         if (pte_swp_soft_dirty(*ptep))
239                 pte = pte_mksoft_dirty(pte);
240
241         /* Recheck VMA as permissions can change since migration started  */
242         if (is_write_migration_entry(entry))
243                 pte = maybe_mkwrite(pte, vma);
244
245 #ifdef CONFIG_HUGETLB_PAGE
246         if (PageHuge(new)) {
247                 pte = pte_mkhuge(pte);
248                 pte = arch_make_huge_pte(pte, vma, new, 0);
249         }
250 #endif
251         flush_dcache_page(new);
252         set_pte_at(mm, addr, ptep, pte);
253
254         if (PageHuge(new)) {
255                 if (PageAnon(new))
256                         hugepage_add_anon_rmap(new, vma, addr);
257                 else
258                         page_dup_rmap(new, true);
259         } else if (PageAnon(new))
260                 page_add_anon_rmap(new, vma, addr, false);
261         else
262                 page_add_file_rmap(new, false);
263
264         if (vma->vm_flags & VM_LOCKED && !PageTransCompound(new))
265                 mlock_vma_page(new);
266
267         /* No need to invalidate - it was non-present before */
268         update_mmu_cache(vma, addr, ptep);
269 unlock:
270         pte_unmap_unlock(ptep, ptl);
271 out:
272         return SWAP_AGAIN;
273 }
274
275 /*
276  * Get rid of all migration entries and replace them by
277  * references to the indicated page.
278  */
279 void remove_migration_ptes(struct page *old, struct page *new, bool locked)
280 {
281         struct rmap_walk_control rwc = {
282                 .rmap_one = remove_migration_pte,
283                 .arg = old,
284         };
285
286         if (locked)
287                 rmap_walk_locked(new, &rwc);
288         else
289                 rmap_walk(new, &rwc);
290 }
291
292 /*
293  * Something used the pte of a page under migration. We need to
294  * get to the page and wait until migration is finished.
295  * When we return from this function the fault will be retried.
296  */
297 void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
298                                 spinlock_t *ptl)
299 {
300         pte_t pte;
301         swp_entry_t entry;
302         struct page *page;
303
304         spin_lock(ptl);
305         pte = *ptep;
306         if (!is_swap_pte(pte))
307                 goto out;
308
309         entry = pte_to_swp_entry(pte);
310         if (!is_migration_entry(entry))
311                 goto out;
312
313         page = migration_entry_to_page(entry);
314
315         /*
316          * Once radix-tree replacement of page migration started, page_count
317          * *must* be zero. And, we don't want to call wait_on_page_locked()
318          * against a page without get_page().
319          * So, we use get_page_unless_zero(), here. Even failed, page fault
320          * will occur again.
321          */
322         if (!get_page_unless_zero(page))
323                 goto out;
324         pte_unmap_unlock(ptep, ptl);
325         wait_on_page_locked(page);
326         put_page(page);
327         return;
328 out:
329         pte_unmap_unlock(ptep, ptl);
330 }
331
332 void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
333                                 unsigned long address)
334 {
335         spinlock_t *ptl = pte_lockptr(mm, pmd);
336         pte_t *ptep = pte_offset_map(pmd, address);
337         __migration_entry_wait(mm, ptep, ptl);
338 }
339
340 void migration_entry_wait_huge(struct vm_area_struct *vma,
341                 struct mm_struct *mm, pte_t *pte)
342 {
343         spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), mm, pte);
344         __migration_entry_wait(mm, pte, ptl);
345 }
346
347 #ifdef CONFIG_BLOCK
348 /* Returns true if all buffers are successfully locked */
349 static bool buffer_migrate_lock_buffers(struct buffer_head *head,
350                                                         enum migrate_mode mode)
351 {
352         struct buffer_head *bh = head;
353
354         /* Simple case, sync compaction */
355         if (mode != MIGRATE_ASYNC) {
356                 do {
357                         get_bh(bh);
358                         lock_buffer(bh);
359                         bh = bh->b_this_page;
360
361                 } while (bh != head);
362
363                 return true;
364         }
365
366         /* async case, we cannot block on lock_buffer so use trylock_buffer */
367         do {
368                 get_bh(bh);
369                 if (!trylock_buffer(bh)) {
370                         /*
371                          * We failed to lock the buffer and cannot stall in
372                          * async migration. Release the taken locks
373                          */
374                         struct buffer_head *failed_bh = bh;
375                         put_bh(failed_bh);
376                         bh = head;
377                         while (bh != failed_bh) {
378                                 unlock_buffer(bh);
379                                 put_bh(bh);
380                                 bh = bh->b_this_page;
381                         }
382                         return false;
383                 }
384
385                 bh = bh->b_this_page;
386         } while (bh != head);
387         return true;
388 }
389 #else
390 static inline bool buffer_migrate_lock_buffers(struct buffer_head *head,
391                                                         enum migrate_mode mode)
392 {
393         return true;
394 }
395 #endif /* CONFIG_BLOCK */
396
397 /*
398  * Replace the page in the mapping.
399  *
400  * The number of remaining references must be:
401  * 1 for anonymous pages without a mapping
402  * 2 for pages with a mapping
403  * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
404  */
405 int migrate_page_move_mapping(struct address_space *mapping,
406                 struct page *newpage, struct page *page,
407                 struct buffer_head *head, enum migrate_mode mode,
408                 int extra_count)
409 {
410         struct zone *oldzone, *newzone;
411         int dirty;
412         int expected_count = 1 + extra_count;
413         void **pslot;
414
415         if (!mapping) {
416                 /* Anonymous page without mapping */
417                 if (page_count(page) != expected_count)
418                         return -EAGAIN;
419
420                 /* No turning back from here */
421                 newpage->index = page->index;
422                 newpage->mapping = page->mapping;
423                 if (PageSwapBacked(page))
424                         __SetPageSwapBacked(newpage);
425
426                 return MIGRATEPAGE_SUCCESS;
427         }
428
429         oldzone = page_zone(page);
430         newzone = page_zone(newpage);
431
432         spin_lock_irq(&mapping->tree_lock);
433
434         pslot = radix_tree_lookup_slot(&mapping->page_tree,
435                                         page_index(page));
436
437         expected_count += 1 + page_has_private(page);
438         if (page_count(page) != expected_count ||
439                 radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) {
440                 spin_unlock_irq(&mapping->tree_lock);
441                 return -EAGAIN;
442         }
443
444         if (!page_ref_freeze(page, expected_count)) {
445                 spin_unlock_irq(&mapping->tree_lock);
446                 return -EAGAIN;
447         }
448
449         /*
450          * In the async migration case of moving a page with buffers, lock the
451          * buffers using trylock before the mapping is moved. If the mapping
452          * was moved, we later failed to lock the buffers and could not move
453          * the mapping back due to an elevated page count, we would have to
454          * block waiting on other references to be dropped.
455          */
456         if (mode == MIGRATE_ASYNC && head &&
457                         !buffer_migrate_lock_buffers(head, mode)) {
458                 page_ref_unfreeze(page, expected_count);
459                 spin_unlock_irq(&mapping->tree_lock);
460                 return -EAGAIN;
461         }
462
463         /*
464          * Now we know that no one else is looking at the page:
465          * no turning back from here.
466          */
467         newpage->index = page->index;
468         newpage->mapping = page->mapping;
469         if (PageSwapBacked(page))
470                 __SetPageSwapBacked(newpage);
471
472         get_page(newpage);      /* add cache reference */
473         if (PageSwapCache(page)) {
474                 SetPageSwapCache(newpage);
475                 set_page_private(newpage, page_private(page));
476         }
477
478         /* Move dirty while page refs frozen and newpage not yet exposed */
479         dirty = PageDirty(page);
480         if (dirty) {
481                 ClearPageDirty(page);
482                 SetPageDirty(newpage);
483         }
484
485         radix_tree_replace_slot(pslot, newpage);
486
487         /*
488          * Drop cache reference from old page by unfreezing
489          * to one less reference.
490          * We know this isn't the last reference.
491          */
492         page_ref_unfreeze(page, expected_count - 1);
493
494         spin_unlock(&mapping->tree_lock);
495         /* Leave irq disabled to prevent preemption while updating stats */
496
497         /*
498          * If moved to a different zone then also account
499          * the page for that zone. Other VM counters will be
500          * taken care of when we establish references to the
501          * new page and drop references to the old page.
502          *
503          * Note that anonymous pages are accounted for
504          * via NR_FILE_PAGES and NR_ANON_MAPPED if they
505          * are mapped to swap space.
506          */
507         if (newzone != oldzone) {
508                 __dec_node_state(oldzone->zone_pgdat, NR_FILE_PAGES);
509                 __inc_node_state(newzone->zone_pgdat, NR_FILE_PAGES);
510                 if (PageSwapBacked(page) && !PageSwapCache(page)) {
511                         __dec_node_state(oldzone->zone_pgdat, NR_SHMEM);
512                         __inc_node_state(newzone->zone_pgdat, NR_SHMEM);
513                 }
514                 if (dirty && mapping_cap_account_dirty(mapping)) {
515                         __dec_node_state(oldzone->zone_pgdat, NR_FILE_DIRTY);
516                         __dec_zone_state(oldzone, NR_ZONE_WRITE_PENDING);
517                         __inc_node_state(newzone->zone_pgdat, NR_FILE_DIRTY);
518                         __inc_zone_state(newzone, NR_ZONE_WRITE_PENDING);
519                 }
520         }
521         local_irq_enable();
522
523         return MIGRATEPAGE_SUCCESS;
524 }
525 EXPORT_SYMBOL(migrate_page_move_mapping);
526
527 /*
528  * The expected number of remaining references is the same as that
529  * of migrate_page_move_mapping().
530  */
531 int migrate_huge_page_move_mapping(struct address_space *mapping,
532                                    struct page *newpage, struct page *page)
533 {
534         int expected_count;
535         void **pslot;
536
537         spin_lock_irq(&mapping->tree_lock);
538
539         pslot = radix_tree_lookup_slot(&mapping->page_tree,
540                                         page_index(page));
541
542         expected_count = 2 + page_has_private(page);
543         if (page_count(page) != expected_count ||
544                 radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) {
545                 spin_unlock_irq(&mapping->tree_lock);
546                 return -EAGAIN;
547         }
548
549         if (!page_ref_freeze(page, expected_count)) {
550                 spin_unlock_irq(&mapping->tree_lock);
551                 return -EAGAIN;
552         }
553
554         newpage->index = page->index;
555         newpage->mapping = page->mapping;
556
557         get_page(newpage);
558
559         radix_tree_replace_slot(pslot, newpage);
560
561         page_ref_unfreeze(page, expected_count - 1);
562
563         spin_unlock_irq(&mapping->tree_lock);
564
565         return MIGRATEPAGE_SUCCESS;
566 }
567
568 /*
569  * Gigantic pages are so large that we do not guarantee that page++ pointer
570  * arithmetic will work across the entire page.  We need something more
571  * specialized.
572  */
573 static void __copy_gigantic_page(struct page *dst, struct page *src,
574                                 int nr_pages)
575 {
576         int i;
577         struct page *dst_base = dst;
578         struct page *src_base = src;
579
580         for (i = 0; i < nr_pages; ) {
581                 cond_resched();
582                 copy_highpage(dst, src);
583
584                 i++;
585                 dst = mem_map_next(dst, dst_base, i);
586                 src = mem_map_next(src, src_base, i);
587         }
588 }
589
590 static void copy_huge_page(struct page *dst, struct page *src)
591 {
592         int i;
593         int nr_pages;
594
595         if (PageHuge(src)) {
596                 /* hugetlbfs page */
597                 struct hstate *h = page_hstate(src);
598                 nr_pages = pages_per_huge_page(h);
599
600                 if (unlikely(nr_pages > MAX_ORDER_NR_PAGES)) {
601                         __copy_gigantic_page(dst, src, nr_pages);
602                         return;
603                 }
604         } else {
605                 /* thp page */
606                 BUG_ON(!PageTransHuge(src));
607                 nr_pages = hpage_nr_pages(src);
608         }
609
610         for (i = 0; i < nr_pages; i++) {
611                 cond_resched();
612                 copy_highpage(dst + i, src + i);
613         }
614 }
615
616 /*
617  * Copy the page to its new location
618  */
619 void migrate_page_copy(struct page *newpage, struct page *page)
620 {
621         int cpupid;
622
623         if (PageHuge(page) || PageTransHuge(page))
624                 copy_huge_page(newpage, page);
625         else
626                 copy_highpage(newpage, page);
627
628         if (PageError(page))
629                 SetPageError(newpage);
630         if (PageReferenced(page))
631                 SetPageReferenced(newpage);
632         if (PageUptodate(page))
633                 SetPageUptodate(newpage);
634         if (TestClearPageActive(page)) {
635                 VM_BUG_ON_PAGE(PageUnevictable(page), page);
636                 SetPageActive(newpage);
637         } else if (TestClearPageUnevictable(page))
638                 SetPageUnevictable(newpage);
639         if (PageChecked(page))
640                 SetPageChecked(newpage);
641         if (PageMappedToDisk(page))
642                 SetPageMappedToDisk(newpage);
643
644         /* Move dirty on pages not done by migrate_page_move_mapping() */
645         if (PageDirty(page))
646                 SetPageDirty(newpage);
647
648         if (page_is_young(page))
649                 set_page_young(newpage);
650         if (page_is_idle(page))
651                 set_page_idle(newpage);
652
653         /*
654          * Copy NUMA information to the new page, to prevent over-eager
655          * future migrations of this same page.
656          */
657         cpupid = page_cpupid_xchg_last(page, -1);
658         page_cpupid_xchg_last(newpage, cpupid);
659
660         ksm_migrate_page(newpage, page);
661         /*
662          * Please do not reorder this without considering how mm/ksm.c's
663          * get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache().
664          */
665         if (PageSwapCache(page))
666                 ClearPageSwapCache(page);
667         ClearPagePrivate(page);
668         set_page_private(page, 0);
669
670         /*
671          * If any waiters have accumulated on the new page then
672          * wake them up.
673          */
674         if (PageWriteback(newpage))
675                 end_page_writeback(newpage);
676
677         copy_page_owner(page, newpage);
678
679         mem_cgroup_migrate(page, newpage);
680 }
681 EXPORT_SYMBOL(migrate_page_copy);
682
683 /************************************************************
684  *                    Migration functions
685  ***********************************************************/
686
687 /*
688  * Common logic to directly migrate a single LRU page suitable for
689  * pages that do not use PagePrivate/PagePrivate2.
690  *
691  * Pages are locked upon entry and exit.
692  */
693 int migrate_page(struct address_space *mapping,
694                 struct page *newpage, struct page *page,
695                 enum migrate_mode mode)
696 {
697         int rc;
698
699         BUG_ON(PageWriteback(page));    /* Writeback must be complete */
700
701         rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0);
702
703         if (rc != MIGRATEPAGE_SUCCESS)
704                 return rc;
705
706         migrate_page_copy(newpage, page);
707         return MIGRATEPAGE_SUCCESS;
708 }
709 EXPORT_SYMBOL(migrate_page);
710
711 #ifdef CONFIG_BLOCK
712 /*
713  * Migration function for pages with buffers. This function can only be used
714  * if the underlying filesystem guarantees that no other references to "page"
715  * exist.
716  */
717 int buffer_migrate_page(struct address_space *mapping,
718                 struct page *newpage, struct page *page, enum migrate_mode mode)
719 {
720         struct buffer_head *bh, *head;
721         int rc;
722
723         if (!page_has_buffers(page))
724                 return migrate_page(mapping, newpage, page, mode);
725
726         head = page_buffers(page);
727
728         rc = migrate_page_move_mapping(mapping, newpage, page, head, mode, 0);
729
730         if (rc != MIGRATEPAGE_SUCCESS)
731                 return rc;
732
733         /*
734          * In the async case, migrate_page_move_mapping locked the buffers
735          * with an IRQ-safe spinlock held. In the sync case, the buffers
736          * need to be locked now
737          */
738         if (mode != MIGRATE_ASYNC)
739                 BUG_ON(!buffer_migrate_lock_buffers(head, mode));
740
741         ClearPagePrivate(page);
742         set_page_private(newpage, page_private(page));
743         set_page_private(page, 0);
744         put_page(page);
745         get_page(newpage);
746
747         bh = head;
748         do {
749                 set_bh_page(bh, newpage, bh_offset(bh));
750                 bh = bh->b_this_page;
751
752         } while (bh != head);
753
754         SetPagePrivate(newpage);
755
756         migrate_page_copy(newpage, page);
757
758         bh = head;
759         do {
760                 unlock_buffer(bh);
761                 put_bh(bh);
762                 bh = bh->b_this_page;
763
764         } while (bh != head);
765
766         return MIGRATEPAGE_SUCCESS;
767 }
768 EXPORT_SYMBOL(buffer_migrate_page);
769 #endif
770
771 /*
772  * Writeback a page to clean the dirty state
773  */
774 static int writeout(struct address_space *mapping, struct page *page)
775 {
776         struct writeback_control wbc = {
777                 .sync_mode = WB_SYNC_NONE,
778                 .nr_to_write = 1,
779                 .range_start = 0,
780                 .range_end = LLONG_MAX,
781                 .for_reclaim = 1
782         };
783         int rc;
784
785         if (!mapping->a_ops->writepage)
786                 /* No write method for the address space */
787                 return -EINVAL;
788
789         if (!clear_page_dirty_for_io(page))
790                 /* Someone else already triggered a write */
791                 return -EAGAIN;
792
793         /*
794          * A dirty page may imply that the underlying filesystem has
795          * the page on some queue. So the page must be clean for
796          * migration. Writeout may mean we loose the lock and the
797          * page state is no longer what we checked for earlier.
798          * At this point we know that the migration attempt cannot
799          * be successful.
800          */
801         remove_migration_ptes(page, page, false);
802
803         rc = mapping->a_ops->writepage(page, &wbc);
804
805         if (rc != AOP_WRITEPAGE_ACTIVATE)
806                 /* unlocked. Relock */
807                 lock_page(page);
808
809         return (rc < 0) ? -EIO : -EAGAIN;
810 }
811
812 /*
813  * Default handling if a filesystem does not provide a migration function.
814  */
815 static int fallback_migrate_page(struct address_space *mapping,
816         struct page *newpage, struct page *page, enum migrate_mode mode)
817 {
818         if (PageDirty(page)) {
819                 /* Only writeback pages in full synchronous migration */
820                 if (mode != MIGRATE_SYNC)
821                         return -EBUSY;
822                 return writeout(mapping, page);
823         }
824
825         /*
826          * Buffers may be managed in a filesystem specific way.
827          * We must have no buffers or drop them.
828          */
829         if (page_has_private(page) &&
830             !try_to_release_page(page, GFP_KERNEL))
831                 return -EAGAIN;
832
833         return migrate_page(mapping, newpage, page, mode);
834 }
835
836 /*
837  * Move a page to a newly allocated page
838  * The page is locked and all ptes have been successfully removed.
839  *
840  * The new page will have replaced the old page if this function
841  * is successful.
842  *
843  * Return value:
844  *   < 0 - error code
845  *  MIGRATEPAGE_SUCCESS - success
846  */
847 static int move_to_new_page(struct page *newpage, struct page *page,
848                                 enum migrate_mode mode)
849 {
850         struct address_space *mapping;
851         int rc = -EAGAIN;
852         bool is_lru = !__PageMovable(page);
853
854         VM_BUG_ON_PAGE(!PageLocked(page), page);
855         VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
856
857         mapping = page_mapping(page);
858
859         if (likely(is_lru)) {
860                 if (!mapping)
861                         rc = migrate_page(mapping, newpage, page, mode);
862                 else if (mapping->a_ops->migratepage)
863                         /*
864                          * Most pages have a mapping and most filesystems
865                          * provide a migratepage callback. Anonymous pages
866                          * are part of swap space which also has its own
867                          * migratepage callback. This is the most common path
868                          * for page migration.
869                          */
870                         rc = mapping->a_ops->migratepage(mapping, newpage,
871                                                         page, mode);
872                 else
873                         rc = fallback_migrate_page(mapping, newpage,
874                                                         page, mode);
875         } else {
876                 /*
877                  * In case of non-lru page, it could be released after
878                  * isolation step. In that case, we shouldn't try migration.
879                  */
880                 VM_BUG_ON_PAGE(!PageIsolated(page), page);
881                 if (!PageMovable(page)) {
882                         rc = MIGRATEPAGE_SUCCESS;
883                         __ClearPageIsolated(page);
884                         goto out;
885                 }
886
887                 rc = mapping->a_ops->migratepage(mapping, newpage,
888                                                 page, mode);
889                 WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS &&
890                         !PageIsolated(page));
891         }
892
893         /*
894          * When successful, old pagecache page->mapping must be cleared before
895          * page is freed; but stats require that PageAnon be left as PageAnon.
896          */
897         if (rc == MIGRATEPAGE_SUCCESS) {
898                 if (__PageMovable(page)) {
899                         VM_BUG_ON_PAGE(!PageIsolated(page), page);
900
901                         /*
902                          * We clear PG_movable under page_lock so any compactor
903                          * cannot try to migrate this page.
904                          */
905                         __ClearPageIsolated(page);
906                 }
907
908                 /*
909                  * Anonymous and movable page->mapping will be cleard by
910                  * free_pages_prepare so don't reset it here for keeping
911                  * the type to work PageAnon, for example.
912                  */
913                 if (!PageMappingFlags(page))
914                         page->mapping = NULL;
915         }
916 out:
917         return rc;
918 }
919
920 static int __unmap_and_move(struct page *page, struct page *newpage,
921                                 int force, enum migrate_mode mode)
922 {
923         int rc = -EAGAIN;
924         int page_was_mapped = 0;
925         struct anon_vma *anon_vma = NULL;
926         bool is_lru = !__PageMovable(page);
927
928         if (!trylock_page(page)) {
929                 if (!force || mode == MIGRATE_ASYNC)
930                         goto out;
931
932                 /*
933                  * It's not safe for direct compaction to call lock_page.
934                  * For example, during page readahead pages are added locked
935                  * to the LRU. Later, when the IO completes the pages are
936                  * marked uptodate and unlocked. However, the queueing
937                  * could be merging multiple pages for one bio (e.g.
938                  * mpage_readpages). If an allocation happens for the
939                  * second or third page, the process can end up locking
940                  * the same page twice and deadlocking. Rather than
941                  * trying to be clever about what pages can be locked,
942                  * avoid the use of lock_page for direct compaction
943                  * altogether.
944                  */
945                 if (current->flags & PF_MEMALLOC)
946                         goto out;
947
948                 lock_page(page);
949         }
950
951         if (PageWriteback(page)) {
952                 /*
953                  * Only in the case of a full synchronous migration is it
954                  * necessary to wait for PageWriteback. In the async case,
955                  * the retry loop is too short and in the sync-light case,
956                  * the overhead of stalling is too much
957                  */
958                 if (mode != MIGRATE_SYNC) {
959                         rc = -EBUSY;
960                         goto out_unlock;
961                 }
962                 if (!force)
963                         goto out_unlock;
964                 wait_on_page_writeback(page);
965         }
966
967         /*
968          * By try_to_unmap(), page->mapcount goes down to 0 here. In this case,
969          * we cannot notice that anon_vma is freed while we migrates a page.
970          * This get_anon_vma() delays freeing anon_vma pointer until the end
971          * of migration. File cache pages are no problem because of page_lock()
972          * File Caches may use write_page() or lock_page() in migration, then,
973          * just care Anon page here.
974          *
975          * Only page_get_anon_vma() understands the subtleties of
976          * getting a hold on an anon_vma from outside one of its mms.
977          * But if we cannot get anon_vma, then we won't need it anyway,
978          * because that implies that the anon page is no longer mapped
979          * (and cannot be remapped so long as we hold the page lock).
980          */
981         if (PageAnon(page) && !PageKsm(page))
982                 anon_vma = page_get_anon_vma(page);
983
984         /*
985          * Block others from accessing the new page when we get around to
986          * establishing additional references. We are usually the only one
987          * holding a reference to newpage at this point. We used to have a BUG
988          * here if trylock_page(newpage) fails, but would like to allow for
989          * cases where there might be a race with the previous use of newpage.
990          * This is much like races on refcount of oldpage: just don't BUG().
991          */
992         if (unlikely(!trylock_page(newpage)))
993                 goto out_unlock;
994
995         if (unlikely(!is_lru)) {
996                 rc = move_to_new_page(newpage, page, mode);
997                 goto out_unlock_both;
998         }
999
1000         /*
1001          * Corner case handling:
1002          * 1. When a new swap-cache page is read into, it is added to the LRU
1003          * and treated as swapcache but it has no rmap yet.
1004          * Calling try_to_unmap() against a page->mapping==NULL page will
1005          * trigger a BUG.  So handle it here.
1006          * 2. An orphaned page (see truncate_complete_page) might have
1007          * fs-private metadata. The page can be picked up due to memory
1008          * offlining.  Everywhere else except page reclaim, the page is
1009          * invisible to the vm, so the page can not be migrated.  So try to
1010          * free the metadata, so the page can be freed.
1011          */
1012         if (!page->mapping) {
1013                 VM_BUG_ON_PAGE(PageAnon(page), page);
1014                 if (page_has_private(page)) {
1015                         try_to_free_buffers(page);
1016                         goto out_unlock_both;
1017                 }
1018         } else if (page_mapped(page)) {
1019                 /* Establish migration ptes */
1020                 VM_BUG_ON_PAGE(PageAnon(page) && !PageKsm(page) && !anon_vma,
1021                                 page);
1022                 try_to_unmap(page,
1023                         TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
1024                 page_was_mapped = 1;
1025         }
1026
1027         if (!page_mapped(page))
1028                 rc = move_to_new_page(newpage, page, mode);
1029
1030         if (page_was_mapped)
1031                 remove_migration_ptes(page,
1032                         rc == MIGRATEPAGE_SUCCESS ? newpage : page, false);
1033
1034 out_unlock_both:
1035         unlock_page(newpage);
1036 out_unlock:
1037         /* Drop an anon_vma reference if we took one */
1038         if (anon_vma)
1039                 put_anon_vma(anon_vma);
1040         unlock_page(page);
1041 out:
1042         /*
1043          * If migration is successful, decrease refcount of the newpage
1044          * which will not free the page because new page owner increased
1045          * refcounter. As well, if it is LRU page, add the page to LRU
1046          * list in here.
1047          */
1048         if (rc == MIGRATEPAGE_SUCCESS) {
1049                 if (unlikely(__PageMovable(newpage)))
1050                         put_page(newpage);
1051                 else
1052                         putback_lru_page(newpage);
1053         }
1054
1055         return rc;
1056 }
1057
1058 /*
1059  * gcc 4.7 and 4.8 on arm get an ICEs when inlining unmap_and_move().  Work
1060  * around it.
1061  */
1062 #if (GCC_VERSION >= 40700 && GCC_VERSION < 40900) && defined(CONFIG_ARM)
1063 #define ICE_noinline noinline
1064 #else
1065 #define ICE_noinline
1066 #endif
1067
1068 /*
1069  * Obtain the lock on page, remove all ptes and migrate the page
1070  * to the newly allocated page in newpage.
1071  */
1072 static ICE_noinline int unmap_and_move(new_page_t get_new_page,
1073                                    free_page_t put_new_page,
1074                                    unsigned long private, struct page *page,
1075                                    int force, enum migrate_mode mode,
1076                                    enum migrate_reason reason)
1077 {
1078         int rc = MIGRATEPAGE_SUCCESS;
1079         int *result = NULL;
1080         struct page *newpage;
1081
1082         newpage = get_new_page(page, private, &result);
1083         if (!newpage)
1084                 return -ENOMEM;
1085
1086         if (page_count(page) == 1) {
1087                 /* page was freed from under us. So we are done. */
1088                 ClearPageActive(page);
1089                 ClearPageUnevictable(page);
1090                 if (unlikely(__PageMovable(page))) {
1091                         lock_page(page);
1092                         if (!PageMovable(page))
1093                                 __ClearPageIsolated(page);
1094                         unlock_page(page);
1095                 }
1096                 if (put_new_page)
1097                         put_new_page(newpage, private);
1098                 else
1099                         put_page(newpage);
1100                 goto out;
1101         }
1102
1103         if (unlikely(PageTransHuge(page))) {
1104                 lock_page(page);
1105                 rc = split_huge_page(page);
1106                 unlock_page(page);
1107                 if (rc)
1108                         goto out;
1109         }
1110
1111         rc = __unmap_and_move(page, newpage, force, mode);
1112         if (rc == MIGRATEPAGE_SUCCESS)
1113                 set_page_owner_migrate_reason(newpage, reason);
1114
1115 out:
1116         if (rc != -EAGAIN) {
1117                 /*
1118                  * A page that has been migrated has all references
1119                  * removed and will be freed. A page that has not been
1120                  * migrated will have kepts its references and be
1121                  * restored.
1122                  */
1123                 list_del(&page->lru);
1124
1125                 /*
1126                  * Compaction can migrate also non-LRU pages which are
1127                  * not accounted to NR_ISOLATED_*. They can be recognized
1128                  * as __PageMovable
1129                  */
1130                 if (likely(!__PageMovable(page)))
1131                         dec_node_page_state(page, NR_ISOLATED_ANON +
1132                                         page_is_file_cache(page));
1133         }
1134
1135         /*
1136          * If migration is successful, releases reference grabbed during
1137          * isolation. Otherwise, restore the page to right list unless
1138          * we want to retry.
1139          */
1140         if (rc == MIGRATEPAGE_SUCCESS) {
1141                 put_page(page);
1142                 if (reason == MR_MEMORY_FAILURE) {
1143                         /*
1144                          * Set PG_HWPoison on just freed page
1145                          * intentionally. Although it's rather weird,
1146                          * it's how HWPoison flag works at the moment.
1147                          */
1148                         if (!test_set_page_hwpoison(page))
1149                                 num_poisoned_pages_inc();
1150                 }
1151         } else {
1152                 if (rc != -EAGAIN) {
1153                         if (likely(!__PageMovable(page))) {
1154                                 putback_lru_page(page);
1155                                 goto put_new;
1156                         }
1157
1158                         lock_page(page);
1159                         if (PageMovable(page))
1160                                 putback_movable_page(page);
1161                         else
1162                                 __ClearPageIsolated(page);
1163                         unlock_page(page);
1164                         put_page(page);
1165                 }
1166 put_new:
1167                 if (put_new_page)
1168                         put_new_page(newpage, private);
1169                 else
1170                         put_page(newpage);
1171         }
1172
1173         if (result) {
1174                 if (rc)
1175                         *result = rc;
1176                 else
1177                         *result = page_to_nid(newpage);
1178         }
1179         return rc;
1180 }
1181
1182 /*
1183  * Counterpart of unmap_and_move_page() for hugepage migration.
1184  *
1185  * This function doesn't wait the completion of hugepage I/O
1186  * because there is no race between I/O and migration for hugepage.
1187  * Note that currently hugepage I/O occurs only in direct I/O
1188  * where no lock is held and PG_writeback is irrelevant,
1189  * and writeback status of all subpages are counted in the reference
1190  * count of the head page (i.e. if all subpages of a 2MB hugepage are
1191  * under direct I/O, the reference of the head page is 512 and a bit more.)
1192  * This means that when we try to migrate hugepage whose subpages are
1193  * doing direct I/O, some references remain after try_to_unmap() and
1194  * hugepage migration fails without data corruption.
1195  *
1196  * There is also no race when direct I/O is issued on the page under migration,
1197  * because then pte is replaced with migration swap entry and direct I/O code
1198  * will wait in the page fault for migration to complete.
1199  */
1200 static int unmap_and_move_huge_page(new_page_t get_new_page,
1201                                 free_page_t put_new_page, unsigned long private,
1202                                 struct page *hpage, int force,
1203                                 enum migrate_mode mode, int reason)
1204 {
1205         int rc = -EAGAIN;
1206         int *result = NULL;
1207         int page_was_mapped = 0;
1208         struct page *new_hpage;
1209         struct anon_vma *anon_vma = NULL;
1210
1211         /*
1212          * Movability of hugepages depends on architectures and hugepage size.
1213          * This check is necessary because some callers of hugepage migration
1214          * like soft offline and memory hotremove don't walk through page
1215          * tables or check whether the hugepage is pmd-based or not before
1216          * kicking migration.
1217          */
1218         if (!hugepage_migration_supported(page_hstate(hpage))) {
1219                 putback_active_hugepage(hpage);
1220                 return -ENOSYS;
1221         }
1222
1223         new_hpage = get_new_page(hpage, private, &result);
1224         if (!new_hpage)
1225                 return -ENOMEM;
1226
1227         if (!trylock_page(hpage)) {
1228                 if (!force || mode != MIGRATE_SYNC)
1229                         goto out;
1230                 lock_page(hpage);
1231         }
1232
1233         if (PageAnon(hpage))
1234                 anon_vma = page_get_anon_vma(hpage);
1235
1236         if (unlikely(!trylock_page(new_hpage)))
1237                 goto put_anon;
1238
1239         if (page_mapped(hpage)) {
1240                 try_to_unmap(hpage,
1241                         TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
1242                 page_was_mapped = 1;
1243         }
1244
1245         if (!page_mapped(hpage))
1246                 rc = move_to_new_page(new_hpage, hpage, mode);
1247
1248         if (page_was_mapped)
1249                 remove_migration_ptes(hpage,
1250                         rc == MIGRATEPAGE_SUCCESS ? new_hpage : hpage, false);
1251
1252         unlock_page(new_hpage);
1253
1254 put_anon:
1255         if (anon_vma)
1256                 put_anon_vma(anon_vma);
1257
1258         if (rc == MIGRATEPAGE_SUCCESS) {
1259                 hugetlb_cgroup_migrate(hpage, new_hpage);
1260                 put_new_page = NULL;
1261                 set_page_owner_migrate_reason(new_hpage, reason);
1262         }
1263
1264         unlock_page(hpage);
1265 out:
1266         if (rc != -EAGAIN)
1267                 putback_active_hugepage(hpage);
1268
1269         /*
1270          * If migration was not successful and there's a freeing callback, use
1271          * it.  Otherwise, put_page() will drop the reference grabbed during
1272          * isolation.
1273          */
1274         if (put_new_page)
1275                 put_new_page(new_hpage, private);
1276         else
1277                 putback_active_hugepage(new_hpage);
1278
1279         if (result) {
1280                 if (rc)
1281                         *result = rc;
1282                 else
1283                         *result = page_to_nid(new_hpage);
1284         }
1285         return rc;
1286 }
1287
1288 /*
1289  * migrate_pages - migrate the pages specified in a list, to the free pages
1290  *                 supplied as the target for the page migration
1291  *
1292  * @from:               The list of pages to be migrated.
1293  * @get_new_page:       The function used to allocate free pages to be used
1294  *                      as the target of the page migration.
1295  * @put_new_page:       The function used to free target pages if migration
1296  *                      fails, or NULL if no special handling is necessary.
1297  * @private:            Private data to be passed on to get_new_page()
1298  * @mode:               The migration mode that specifies the constraints for
1299  *                      page migration, if any.
1300  * @reason:             The reason for page migration.
1301  *
1302  * The function returns after 10 attempts or if no pages are movable any more
1303  * because the list has become empty or no retryable pages exist any more.
1304  * The caller should call putback_movable_pages() to return pages to the LRU
1305  * or free list only if ret != 0.
1306  *
1307  * Returns the number of pages that were not migrated, or an error code.
1308  */
1309 int migrate_pages(struct list_head *from, new_page_t get_new_page,
1310                 free_page_t put_new_page, unsigned long private,
1311                 enum migrate_mode mode, int reason)
1312 {
1313         int retry = 1;
1314         int nr_failed = 0;
1315         int nr_succeeded = 0;
1316         int pass = 0;
1317         struct page *page;
1318         struct page *page2;
1319         int swapwrite = current->flags & PF_SWAPWRITE;
1320         int rc;
1321
1322         if (!swapwrite)
1323                 current->flags |= PF_SWAPWRITE;
1324
1325         for(pass = 0; pass < 10 && retry; pass++) {
1326                 retry = 0;
1327
1328                 list_for_each_entry_safe(page, page2, from, lru) {
1329                         cond_resched();
1330
1331                         if (PageHuge(page))
1332                                 rc = unmap_and_move_huge_page(get_new_page,
1333                                                 put_new_page, private, page,
1334                                                 pass > 2, mode, reason);
1335                         else
1336                                 rc = unmap_and_move(get_new_page, put_new_page,
1337                                                 private, page, pass > 2, mode,
1338                                                 reason);
1339
1340                         switch(rc) {
1341                         case -ENOMEM:
1342                                 nr_failed++;
1343                                 goto out;
1344                         case -EAGAIN:
1345                                 retry++;
1346                                 break;
1347                         case MIGRATEPAGE_SUCCESS:
1348                                 nr_succeeded++;
1349                                 break;
1350                         default:
1351                                 /*
1352                                  * Permanent failure (-EBUSY, -ENOSYS, etc.):
1353                                  * unlike -EAGAIN case, the failed page is
1354                                  * removed from migration page list and not
1355                                  * retried in the next outer loop.
1356                                  */
1357                                 nr_failed++;
1358                                 break;
1359                         }
1360                 }
1361         }
1362         nr_failed += retry;
1363         rc = nr_failed;
1364 out:
1365         if (nr_succeeded)
1366                 count_vm_events(PGMIGRATE_SUCCESS, nr_succeeded);
1367         if (nr_failed)
1368                 count_vm_events(PGMIGRATE_FAIL, nr_failed);
1369         trace_mm_migrate_pages(nr_succeeded, nr_failed, mode, reason);
1370
1371         if (!swapwrite)
1372                 current->flags &= ~PF_SWAPWRITE;
1373
1374         return rc;
1375 }
1376
1377 #ifdef CONFIG_NUMA
1378 /*
1379  * Move a list of individual pages
1380  */
1381 struct page_to_node {
1382         unsigned long addr;
1383         struct page *page;
1384         int node;
1385         int status;
1386 };
1387
1388 static struct page *new_page_node(struct page *p, unsigned long private,
1389                 int **result)
1390 {
1391         struct page_to_node *pm = (struct page_to_node *)private;
1392
1393         while (pm->node != MAX_NUMNODES && pm->page != p)
1394                 pm++;
1395
1396         if (pm->node == MAX_NUMNODES)
1397                 return NULL;
1398
1399         *result = &pm->status;
1400
1401         if (PageHuge(p))
1402                 return alloc_huge_page_node(page_hstate(compound_head(p)),
1403                                         pm->node);
1404         else
1405                 return __alloc_pages_node(pm->node,
1406                                 GFP_HIGHUSER_MOVABLE | __GFP_THISNODE, 0);
1407 }
1408
1409 /*
1410  * Move a set of pages as indicated in the pm array. The addr
1411  * field must be set to the virtual address of the page to be moved
1412  * and the node number must contain a valid target node.
1413  * The pm array ends with node = MAX_NUMNODES.
1414  */
1415 static int do_move_page_to_node_array(struct mm_struct *mm,
1416                                       struct page_to_node *pm,
1417                                       int migrate_all)
1418 {
1419         int err;
1420         struct page_to_node *pp;
1421         LIST_HEAD(pagelist);
1422
1423         down_read(&mm->mmap_sem);
1424
1425         /*
1426          * Build a list of pages to migrate
1427          */
1428         for (pp = pm; pp->node != MAX_NUMNODES; pp++) {
1429                 struct vm_area_struct *vma;
1430                 struct page *page;
1431
1432                 err = -EFAULT;
1433                 vma = find_vma(mm, pp->addr);
1434                 if (!vma || pp->addr < vma->vm_start || !vma_migratable(vma))
1435                         goto set_status;
1436
1437                 /* FOLL_DUMP to ignore special (like zero) pages */
1438                 page = follow_page(vma, pp->addr,
1439                                 FOLL_GET | FOLL_SPLIT | FOLL_DUMP);
1440
1441                 err = PTR_ERR(page);
1442                 if (IS_ERR(page))
1443                         goto set_status;
1444
1445                 err = -ENOENT;
1446                 if (!page)
1447                         goto set_status;
1448
1449                 pp->page = page;
1450                 err = page_to_nid(page);
1451
1452                 if (err == pp->node)
1453                         /*
1454                          * Node already in the right place
1455                          */
1456                         goto put_and_set;
1457
1458                 err = -EACCES;
1459                 if (page_mapcount(page) > 1 &&
1460                                 !migrate_all)
1461                         goto put_and_set;
1462
1463                 if (PageHuge(page)) {
1464                         if (PageHead(page))
1465                                 isolate_huge_page(page, &pagelist);
1466                         goto put_and_set;
1467                 }
1468
1469                 err = isolate_lru_page(page);
1470                 if (!err) {
1471                         list_add_tail(&page->lru, &pagelist);
1472                         inc_node_page_state(page, NR_ISOLATED_ANON +
1473                                             page_is_file_cache(page));
1474                 }
1475 put_and_set:
1476                 /*
1477                  * Either remove the duplicate refcount from
1478                  * isolate_lru_page() or drop the page ref if it was
1479                  * not isolated.
1480                  */
1481                 put_page(page);
1482 set_status:
1483                 pp->status = err;
1484         }
1485
1486         err = 0;
1487         if (!list_empty(&pagelist)) {
1488                 err = migrate_pages(&pagelist, new_page_node, NULL,
1489                                 (unsigned long)pm, MIGRATE_SYNC, MR_SYSCALL);
1490                 if (err)
1491                         putback_movable_pages(&pagelist);
1492         }
1493
1494         up_read(&mm->mmap_sem);
1495         return err;
1496 }
1497
1498 /*
1499  * Migrate an array of page address onto an array of nodes and fill
1500  * the corresponding array of status.
1501  */
1502 static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
1503                          unsigned long nr_pages,
1504                          const void __user * __user *pages,
1505                          const int __user *nodes,
1506                          int __user *status, int flags)
1507 {
1508         struct page_to_node *pm;
1509         unsigned long chunk_nr_pages;
1510         unsigned long chunk_start;
1511         int err;
1512
1513         err = -ENOMEM;
1514         pm = (struct page_to_node *)__get_free_page(GFP_KERNEL);
1515         if (!pm)
1516                 goto out;
1517
1518         migrate_prep();
1519
1520         /*
1521          * Store a chunk of page_to_node array in a page,
1522          * but keep the last one as a marker
1523          */
1524         chunk_nr_pages = (PAGE_SIZE / sizeof(struct page_to_node)) - 1;
1525
1526         for (chunk_start = 0;
1527              chunk_start < nr_pages;
1528              chunk_start += chunk_nr_pages) {
1529                 int j;
1530
1531                 if (chunk_start + chunk_nr_pages > nr_pages)
1532                         chunk_nr_pages = nr_pages - chunk_start;
1533
1534                 /* fill the chunk pm with addrs and nodes from user-space */
1535                 for (j = 0; j < chunk_nr_pages; j++) {
1536                         const void __user *p;
1537                         int node;
1538
1539                         err = -EFAULT;
1540                         if (get_user(p, pages + j + chunk_start))
1541                                 goto out_pm;
1542                         pm[j].addr = (unsigned long) p;
1543
1544                         if (get_user(node, nodes + j + chunk_start))
1545                                 goto out_pm;
1546
1547                         err = -ENODEV;
1548                         if (node < 0 || node >= MAX_NUMNODES)
1549                                 goto out_pm;
1550
1551                         if (!node_state(node, N_MEMORY))
1552                                 goto out_pm;
1553
1554                         err = -EACCES;
1555                         if (!node_isset(node, task_nodes))
1556                                 goto out_pm;
1557
1558                         pm[j].node = node;
1559                 }
1560
1561                 /* End marker for this chunk */
1562                 pm[chunk_nr_pages].node = MAX_NUMNODES;
1563
1564                 /* Migrate this chunk */
1565                 err = do_move_page_to_node_array(mm, pm,
1566                                                  flags & MPOL_MF_MOVE_ALL);
1567                 if (err < 0)
1568                         goto out_pm;
1569
1570                 /* Return status information */
1571                 for (j = 0; j < chunk_nr_pages; j++)
1572                         if (put_user(pm[j].status, status + j + chunk_start)) {
1573                                 err = -EFAULT;
1574                                 goto out_pm;
1575                         }
1576         }
1577         err = 0;
1578
1579 out_pm:
1580         free_page((unsigned long)pm);
1581 out:
1582         return err;
1583 }
1584
1585 /*
1586  * Determine the nodes of an array of pages and store it in an array of status.
1587  */
1588 static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
1589                                 const void __user **pages, int *status)
1590 {
1591         unsigned long i;
1592
1593         down_read(&mm->mmap_sem);
1594
1595         for (i = 0; i < nr_pages; i++) {
1596                 unsigned long addr = (unsigned long)(*pages);
1597                 struct vm_area_struct *vma;
1598                 struct page *page;
1599                 int err = -EFAULT;
1600
1601                 vma = find_vma(mm, addr);
1602                 if (!vma || addr < vma->vm_start)
1603                         goto set_status;
1604
1605                 /* FOLL_DUMP to ignore special (like zero) pages */
1606                 page = follow_page(vma, addr, FOLL_DUMP);
1607
1608                 err = PTR_ERR(page);
1609                 if (IS_ERR(page))
1610                         goto set_status;
1611
1612                 err = page ? page_to_nid(page) : -ENOENT;
1613 set_status:
1614                 *status = err;
1615
1616                 pages++;
1617                 status++;
1618         }
1619
1620         up_read(&mm->mmap_sem);
1621 }
1622
1623 /*
1624  * Determine the nodes of a user array of pages and store it in
1625  * a user array of status.
1626  */
1627 static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
1628                          const void __user * __user *pages,
1629                          int __user *status)
1630 {
1631 #define DO_PAGES_STAT_CHUNK_NR 16
1632         const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
1633         int chunk_status[DO_PAGES_STAT_CHUNK_NR];
1634
1635         while (nr_pages) {
1636                 unsigned long chunk_nr;
1637
1638                 chunk_nr = nr_pages;
1639                 if (chunk_nr > DO_PAGES_STAT_CHUNK_NR)
1640                         chunk_nr = DO_PAGES_STAT_CHUNK_NR;
1641
1642                 if (copy_from_user(chunk_pages, pages, chunk_nr * sizeof(*chunk_pages)))
1643                         break;
1644
1645                 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
1646
1647                 if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
1648                         break;
1649
1650                 pages += chunk_nr;
1651                 status += chunk_nr;
1652                 nr_pages -= chunk_nr;
1653         }
1654         return nr_pages ? -EFAULT : 0;
1655 }
1656
1657 /*
1658  * Move a list of pages in the address space of the currently executing
1659  * process.
1660  */
1661 SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
1662                 const void __user * __user *, pages,
1663                 const int __user *, nodes,
1664                 int __user *, status, int, flags)
1665 {
1666         const struct cred *cred = current_cred(), *tcred;
1667         struct task_struct *task;
1668         struct mm_struct *mm;
1669         int err;
1670         nodemask_t task_nodes;
1671
1672         /* Check flags */
1673         if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
1674                 return -EINVAL;
1675
1676         if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1677                 return -EPERM;
1678
1679         /* Find the mm_struct */
1680         rcu_read_lock();
1681         task = pid ? find_task_by_vpid(pid) : current;
1682         if (!task) {
1683                 rcu_read_unlock();
1684                 return -ESRCH;
1685         }
1686         get_task_struct(task);
1687
1688         /*
1689          * Check if this process has the right to modify the specified
1690          * process. The right exists if the process has administrative
1691          * capabilities, superuser privileges or the same
1692          * userid as the target process.
1693          */
1694         tcred = __task_cred(task);
1695         if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
1696             !uid_eq(cred->uid,  tcred->suid) && !uid_eq(cred->uid,  tcred->uid) &&
1697             !capable(CAP_SYS_NICE)) {
1698                 rcu_read_unlock();
1699                 err = -EPERM;
1700                 goto out;
1701         }
1702         rcu_read_unlock();
1703
1704         err = security_task_movememory(task);
1705         if (err)
1706                 goto out;
1707
1708         task_nodes = cpuset_mems_allowed(task);
1709         mm = get_task_mm(task);
1710         put_task_struct(task);
1711
1712         if (!mm)
1713                 return -EINVAL;
1714
1715         if (nodes)
1716                 err = do_pages_move(mm, task_nodes, nr_pages, pages,
1717                                     nodes, status, flags);
1718         else
1719                 err = do_pages_stat(mm, nr_pages, pages, status);
1720
1721         mmput(mm);
1722         return err;
1723
1724 out:
1725         put_task_struct(task);
1726         return err;
1727 }
1728
1729 #ifdef CONFIG_NUMA_BALANCING
1730 /*
1731  * Returns true if this is a safe migration target node for misplaced NUMA
1732  * pages. Currently it only checks the watermarks which crude
1733  */
1734 static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
1735                                    unsigned long nr_migrate_pages)
1736 {
1737         int z;
1738
1739         if (!pgdat_reclaimable(pgdat))
1740                 return false;
1741
1742         for (z = pgdat->nr_zones - 1; z >= 0; z--) {
1743                 struct zone *zone = pgdat->node_zones + z;
1744
1745                 if (!populated_zone(zone))
1746                         continue;
1747
1748                 /* Avoid waking kswapd by allocating pages_to_migrate pages. */
1749                 if (!zone_watermark_ok(zone, 0,
1750                                        high_wmark_pages(zone) +
1751                                        nr_migrate_pages,
1752                                        0, 0))
1753                         continue;
1754                 return true;
1755         }
1756         return false;
1757 }
1758
1759 static struct page *alloc_misplaced_dst_page(struct page *page,
1760                                            unsigned long data,
1761                                            int **result)
1762 {
1763         int nid = (int) data;
1764         struct page *newpage;
1765
1766         newpage = __alloc_pages_node(nid,
1767                                          (GFP_HIGHUSER_MOVABLE |
1768                                           __GFP_THISNODE | __GFP_NOMEMALLOC |
1769                                           __GFP_NORETRY | __GFP_NOWARN) &
1770                                          ~__GFP_RECLAIM, 0);
1771
1772         return newpage;
1773 }
1774
1775 /*
1776  * page migration rate limiting control.
1777  * Do not migrate more than @pages_to_migrate in a @migrate_interval_millisecs
1778  * window of time. Default here says do not migrate more than 1280M per second.
1779  */
1780 static unsigned int migrate_interval_millisecs __read_mostly = 100;
1781 static unsigned int ratelimit_pages __read_mostly = 128 << (20 - PAGE_SHIFT);
1782
1783 /* Returns true if the node is migrate rate-limited after the update */
1784 static bool numamigrate_update_ratelimit(pg_data_t *pgdat,
1785                                         unsigned long nr_pages)
1786 {
1787         /*
1788          * Rate-limit the amount of data that is being migrated to a node.
1789          * Optimal placement is no good if the memory bus is saturated and
1790          * all the time is being spent migrating!
1791          */
1792         if (time_after(jiffies, pgdat->numabalancing_migrate_next_window)) {
1793                 spin_lock(&pgdat->numabalancing_migrate_lock);
1794                 pgdat->numabalancing_migrate_nr_pages = 0;
1795                 pgdat->numabalancing_migrate_next_window = jiffies +
1796                         msecs_to_jiffies(migrate_interval_millisecs);
1797                 spin_unlock(&pgdat->numabalancing_migrate_lock);
1798         }
1799         if (pgdat->numabalancing_migrate_nr_pages > ratelimit_pages) {
1800                 trace_mm_numa_migrate_ratelimit(current, pgdat->node_id,
1801                                                                 nr_pages);
1802                 return true;
1803         }
1804
1805         /*
1806          * This is an unlocked non-atomic update so errors are possible.
1807          * The consequences are failing to migrate when we potentiall should
1808          * have which is not severe enough to warrant locking. If it is ever
1809          * a problem, it can be converted to a per-cpu counter.
1810          */
1811         pgdat->numabalancing_migrate_nr_pages += nr_pages;
1812         return false;
1813 }
1814
1815 static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
1816 {
1817         int page_lru;
1818
1819         VM_BUG_ON_PAGE(compound_order(page) && !PageTransHuge(page), page);
1820
1821         /* Avoid migrating to a node that is nearly full */
1822         if (!migrate_balanced_pgdat(pgdat, 1UL << compound_order(page)))
1823                 return 0;
1824
1825         if (isolate_lru_page(page))
1826                 return 0;
1827
1828         /*
1829          * migrate_misplaced_transhuge_page() skips page migration's usual
1830          * check on page_count(), so we must do it here, now that the page
1831          * has been isolated: a GUP pin, or any other pin, prevents migration.
1832          * The expected page count is 3: 1 for page's mapcount and 1 for the
1833          * caller's pin and 1 for the reference taken by isolate_lru_page().
1834          */
1835         if (PageTransHuge(page) && page_count(page) != 3) {
1836                 putback_lru_page(page);
1837                 return 0;
1838         }
1839
1840         page_lru = page_is_file_cache(page);
1841         mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_lru,
1842                                 hpage_nr_pages(page));
1843
1844         /*
1845          * Isolating the page has taken another reference, so the
1846          * caller's reference can be safely dropped without the page
1847          * disappearing underneath us during migration.
1848          */
1849         put_page(page);
1850         return 1;
1851 }
1852
1853 bool pmd_trans_migrating(pmd_t pmd)
1854 {
1855         struct page *page = pmd_page(pmd);
1856         return PageLocked(page);
1857 }
1858
1859 /*
1860  * Attempt to migrate a misplaced page to the specified destination
1861  * node. Caller is expected to have an elevated reference count on
1862  * the page that will be dropped by this function before returning.
1863  */
1864 int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
1865                            int node)
1866 {
1867         pg_data_t *pgdat = NODE_DATA(node);
1868         int isolated;
1869         int nr_remaining;
1870         LIST_HEAD(migratepages);
1871
1872         /*
1873          * Don't migrate file pages that are mapped in multiple processes
1874          * with execute permissions as they are probably shared libraries.
1875          */
1876         if (page_mapcount(page) != 1 && page_is_file_cache(page) &&
1877             (vma->vm_flags & VM_EXEC))
1878                 goto out;
1879
1880         /*
1881          * Rate-limit the amount of data that is being migrated to a node.
1882          * Optimal placement is no good if the memory bus is saturated and
1883          * all the time is being spent migrating!
1884          */
1885         if (numamigrate_update_ratelimit(pgdat, 1))
1886                 goto out;
1887
1888         isolated = numamigrate_isolate_page(pgdat, page);
1889         if (!isolated)
1890                 goto out;
1891
1892         list_add(&page->lru, &migratepages);
1893         nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_page,
1894                                      NULL, node, MIGRATE_ASYNC,
1895                                      MR_NUMA_MISPLACED);
1896         if (nr_remaining) {
1897                 if (!list_empty(&migratepages)) {
1898                         list_del(&page->lru);
1899                         dec_node_page_state(page, NR_ISOLATED_ANON +
1900                                         page_is_file_cache(page));
1901                         putback_lru_page(page);
1902                 }
1903                 isolated = 0;
1904         } else
1905                 count_vm_numa_event(NUMA_PAGE_MIGRATE);
1906         BUG_ON(!list_empty(&migratepages));
1907         return isolated;
1908
1909 out:
1910         put_page(page);
1911         return 0;
1912 }
1913 #endif /* CONFIG_NUMA_BALANCING */
1914
1915 #if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
1916 /*
1917  * Migrates a THP to a given target node. page must be locked and is unlocked
1918  * before returning.
1919  */
1920 int migrate_misplaced_transhuge_page(struct mm_struct *mm,
1921                                 struct vm_area_struct *vma,
1922                                 pmd_t *pmd, pmd_t entry,
1923                                 unsigned long address,
1924                                 struct page *page, int node)
1925 {
1926         spinlock_t *ptl;
1927         pg_data_t *pgdat = NODE_DATA(node);
1928         int isolated = 0;
1929         struct page *new_page = NULL;
1930         int page_lru = page_is_file_cache(page);
1931         unsigned long mmun_start = address & HPAGE_PMD_MASK;
1932         unsigned long mmun_end = mmun_start + HPAGE_PMD_SIZE;
1933         pmd_t orig_entry;
1934
1935         /*
1936          * Rate-limit the amount of data that is being migrated to a node.
1937          * Optimal placement is no good if the memory bus is saturated and
1938          * all the time is being spent migrating!
1939          */
1940         if (numamigrate_update_ratelimit(pgdat, HPAGE_PMD_NR))
1941                 goto out_dropref;
1942
1943         new_page = alloc_pages_node(node,
1944                 (GFP_TRANSHUGE_LIGHT | __GFP_THISNODE),
1945                 HPAGE_PMD_ORDER);
1946         if (!new_page)
1947                 goto out_fail;
1948         prep_transhuge_page(new_page);
1949
1950         isolated = numamigrate_isolate_page(pgdat, page);
1951         if (!isolated) {
1952                 put_page(new_page);
1953                 goto out_fail;
1954         }
1955         /*
1956          * We are not sure a pending tlb flush here is for a huge page
1957          * mapping or not. Hence use the tlb range variant
1958          */
1959         if (mm_tlb_flush_pending(mm))
1960                 flush_tlb_range(vma, mmun_start, mmun_end);
1961
1962         /* Prepare a page as a migration target */
1963         __SetPageLocked(new_page);
1964         __SetPageSwapBacked(new_page);
1965
1966         /* anon mapping, we can simply copy page->mapping to the new page: */
1967         new_page->mapping = page->mapping;
1968         new_page->index = page->index;
1969         migrate_page_copy(new_page, page);
1970         WARN_ON(PageLRU(new_page));
1971
1972         /* Recheck the target PMD */
1973         mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
1974         ptl = pmd_lock(mm, pmd);
1975         if (unlikely(!pmd_same(*pmd, entry) || page_count(page) != 2)) {
1976 fail_putback:
1977                 spin_unlock(ptl);
1978                 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1979
1980                 /* Reverse changes made by migrate_page_copy() */
1981                 if (TestClearPageActive(new_page))
1982                         SetPageActive(page);
1983                 if (TestClearPageUnevictable(new_page))
1984                         SetPageUnevictable(page);
1985
1986                 unlock_page(new_page);
1987                 put_page(new_page);             /* Free it */
1988
1989                 /* Retake the callers reference and putback on LRU */
1990                 get_page(page);
1991                 putback_lru_page(page);
1992                 mod_node_page_state(page_pgdat(page),
1993                          NR_ISOLATED_ANON + page_lru, -HPAGE_PMD_NR);
1994
1995                 goto out_unlock;
1996         }
1997
1998         orig_entry = *pmd;
1999         entry = mk_huge_pmd(new_page, vma->vm_page_prot);
2000         entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
2001
2002         /*
2003          * Clear the old entry under pagetable lock and establish the new PTE.
2004          * Any parallel GUP will either observe the old page blocking on the
2005          * page lock, block on the page table lock or observe the new page.
2006          * The SetPageUptodate on the new page and page_add_new_anon_rmap
2007          * guarantee the copy is visible before the pagetable update.
2008          */
2009         flush_cache_range(vma, mmun_start, mmun_end);
2010         page_add_anon_rmap(new_page, vma, mmun_start, true);
2011         pmdp_huge_clear_flush_notify(vma, mmun_start, pmd);
2012         set_pmd_at(mm, mmun_start, pmd, entry);
2013         update_mmu_cache_pmd(vma, address, &entry);
2014
2015         if (page_count(page) != 2) {
2016                 set_pmd_at(mm, mmun_start, pmd, orig_entry);
2017                 flush_pmd_tlb_range(vma, mmun_start, mmun_end);
2018                 mmu_notifier_invalidate_range(mm, mmun_start, mmun_end);
2019                 update_mmu_cache_pmd(vma, address, &entry);
2020                 page_remove_rmap(new_page, true);
2021                 goto fail_putback;
2022         }
2023
2024         mlock_migrate_page(new_page, page);
2025         page_remove_rmap(page, true);
2026         set_page_owner_migrate_reason(new_page, MR_NUMA_MISPLACED);
2027
2028         spin_unlock(ptl);
2029         mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
2030
2031         /* Take an "isolate" reference and put new page on the LRU. */
2032         get_page(new_page);
2033         putback_lru_page(new_page);
2034
2035         unlock_page(new_page);
2036         unlock_page(page);
2037         put_page(page);                 /* Drop the rmap reference */
2038         put_page(page);                 /* Drop the LRU isolation reference */
2039
2040         count_vm_events(PGMIGRATE_SUCCESS, HPAGE_PMD_NR);
2041         count_vm_numa_events(NUMA_PAGE_MIGRATE, HPAGE_PMD_NR);
2042
2043         mod_node_page_state(page_pgdat(page),
2044                         NR_ISOLATED_ANON + page_lru,
2045                         -HPAGE_PMD_NR);
2046         return isolated;
2047
2048 out_fail:
2049         count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
2050 out_dropref:
2051         ptl = pmd_lock(mm, pmd);
2052         if (pmd_same(*pmd, entry)) {
2053                 entry = pmd_modify(entry, vma->vm_page_prot);
2054                 set_pmd_at(mm, mmun_start, pmd, entry);
2055                 update_mmu_cache_pmd(vma, address, &entry);
2056         }
2057         spin_unlock(ptl);
2058
2059 out_unlock:
2060         unlock_page(page);
2061         put_page(page);
2062         return 0;
2063 }
2064 #endif /* CONFIG_NUMA_BALANCING */
2065
2066 #endif /* CONFIG_NUMA */