]> git.karo-electronics.de Git - linux-beck.git/blobdiff - mm/migrate.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/egtvedt...
[linux-beck.git] / mm / migrate.c
index 344cdf692fc8060b20022a559dd222fded2a6e12..f98067e5d3533832cf3468b00c08868cdcf34e08 100644 (file)
@@ -178,37 +178,6 @@ out:
        return SWAP_AGAIN;
 }
 
-/*
- * Congratulations to trinity for discovering this bug.
- * mm/fremap.c's remap_file_pages() accepts any range within a single vma to
- * convert that vma to VM_NONLINEAR; and generic_file_remap_pages() will then
- * replace the specified range by file ptes throughout (maybe populated after).
- * If page migration finds a page within that range, while it's still located
- * by vma_interval_tree rather than lost to i_mmap_nonlinear list, no problem:
- * zap_pte() clears the temporary migration entry before mmap_sem is dropped.
- * But if the migrating page is in a part of the vma outside the range to be
- * remapped, then it will not be cleared, and remove_migration_ptes() needs to
- * deal with it.  Fortunately, this part of the vma is of course still linear,
- * so we just need to use linear location on the nonlinear list.
- */
-static int remove_linear_migration_ptes_from_nonlinear(struct page *page,
-               struct address_space *mapping, void *arg)
-{
-       struct vm_area_struct *vma;
-       /* hugetlbfs does not support remap_pages, so no huge pgoff worries */
-       pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
-       unsigned long addr;
-
-       list_for_each_entry(vma,
-               &mapping->i_mmap_nonlinear, shared.nonlinear) {
-
-               addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
-               if (addr >= vma->vm_start && addr < vma->vm_end)
-                       remove_migration_pte(page, vma, addr, arg);
-       }
-       return SWAP_AGAIN;
-}
-
 /*
  * Get rid of all migration entries and replace them by
  * references to the indicated page.
@@ -218,7 +187,6 @@ static void remove_migration_ptes(struct page *old, struct page *new)
        struct rmap_walk_control rwc = {
                .rmap_one = remove_migration_pte,
                .arg = old,
-               .file_nonlinear = remove_linear_migration_ptes_from_nonlinear,
        };
 
        rmap_walk(new, &rwc);
@@ -229,7 +197,7 @@ static void remove_migration_ptes(struct page *old, struct page *new)
  * get to the page and wait until migration is finished.
  * When we return from this function the fault will be retried.
  */
-static void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
+void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
                                spinlock_t *ptl)
 {
        pte_t pte;
@@ -1268,7 +1236,8 @@ static int do_move_page_to_node_array(struct mm_struct *mm,
                        goto put_and_set;
 
                if (PageHuge(page)) {
-                       isolate_huge_page(page, &pagelist);
+                       if (PageHead(page))
+                               isolate_huge_page(page, &pagelist);
                        goto put_and_set;
                }