]> git.karo-electronics.de Git - mv-sheeva.git/blobdiff - mm/migrate.c
thp: pmd_trans_huge migrate bugcheck
[mv-sheeva.git] / mm / migrate.c
index 6ae8a66a704575764ecf068c3f9a02bcc0a14575..1a531b760b3bedb3b3395f95e4d1f2d1fbf8b4b5 100644 (file)
@@ -113,6 +113,8 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
                        goto out;
 
                pmd = pmd_offset(pud, addr);
+               if (pmd_trans_huge(*pmd))
+                       goto out;
                if (!pmd_present(*pmd))
                        goto out;
 
@@ -614,7 +616,7 @@ static int move_to_new_page(struct page *newpage, struct page *page,
  * to the newly allocated page in newpage.
  */
 static int unmap_and_move(new_page_t get_new_page, unsigned long private,
-                       struct page *page, int force, int offlining)
+                       struct page *page, int force, bool offlining, bool sync)
 {
        int rc = 0;
        int *result = NULL;
@@ -632,6 +634,9 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
                /* page was freed from under us. So we are done. */
                goto move_newpage;
        }
+       if (unlikely(PageTransHuge(page)))
+               if (unlikely(split_huge_page(page)))
+                       goto move_newpage;
 
        /* prepare cgroup just returns 0 or -ENOMEM */
        rc = -EAGAIN;
@@ -639,6 +644,23 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
        if (!trylock_page(page)) {
                if (!force)
                        goto move_newpage;
+
+               /*
+                * It's not safe for direct compaction to call lock_page.
+                * For example, during page readahead pages are added locked
+                * to the LRU. Later, when the IO completes the pages are
+                * marked uptodate and unlocked. However, the queueing
+                * could be merging multiple pages for one bio (e.g.
+                * mpage_readpages). If an allocation happens for the
+                * second or third page, the process can end up locking
+                * the same page twice and deadlocking. Rather than
+                * trying to be clever about what pages can be locked,
+                * avoid the use of lock_page for direct compaction
+                * altogether.
+                */
+               if (current->flags & PF_MEMALLOC)
+                       goto move_newpage;
+
                lock_page(page);
        }
 
@@ -665,7 +687,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
        BUG_ON(charge);
 
        if (PageWriteback(page)) {
-               if (!force)
+               if (!force || !sync)
                        goto uncharge;
                wait_on_page_writeback(page);
        }
@@ -810,7 +832,7 @@ move_newpage:
  */
 static int unmap_and_move_huge_page(new_page_t get_new_page,
                                unsigned long private, struct page *hpage,
-                               int force, int offlining)
+                               int force, bool offlining, bool sync)
 {
        int rc = 0;
        int *result = NULL;
@@ -824,7 +846,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
        rc = -EAGAIN;
 
        if (!trylock_page(hpage)) {
-               if (!force)
+               if (!force || !sync)
                        goto out;
                lock_page(hpage);
        }
@@ -892,7 +914,8 @@ out:
  * Return: Number of pages not migrated or error code.
  */
 int migrate_pages(struct list_head *from,
-               new_page_t get_new_page, unsigned long private, int offlining)
+               new_page_t get_new_page, unsigned long private, bool offlining,
+               bool sync)
 {
        int retry = 1;
        int nr_failed = 0;
@@ -912,7 +935,8 @@ int migrate_pages(struct list_head *from,
                        cond_resched();
 
                        rc = unmap_and_move(get_new_page, private,
-                                               page, pass > 2, offlining);
+                                               page, pass > 2, offlining,
+                                               sync);
 
                        switch(rc) {
                        case -ENOMEM:
@@ -941,7 +965,8 @@ out:
 }
 
 int migrate_huge_pages(struct list_head *from,
-               new_page_t get_new_page, unsigned long private, int offlining)
+               new_page_t get_new_page, unsigned long private, bool offlining,
+               bool sync)
 {
        int retry = 1;
        int nr_failed = 0;
@@ -957,7 +982,8 @@ int migrate_huge_pages(struct list_head *from,
                        cond_resched();
 
                        rc = unmap_and_move_huge_page(get_new_page,
-                                       private, page, pass > 2, offlining);
+                                       private, page, pass > 2, offlining,
+                                       sync);
 
                        switch(rc) {
                        case -ENOMEM:
@@ -1042,7 +1068,7 @@ static int do_move_page_to_node_array(struct mm_struct *mm,
                if (!vma || pp->addr < vma->vm_start || !vma_migratable(vma))
                        goto set_status;
 
-               page = follow_page(vma, pp->addr, FOLL_GET);
+               page = follow_page(vma, pp->addr, FOLL_GET|FOLL_SPLIT);
 
                err = PTR_ERR(page);
                if (IS_ERR(page))
@@ -1090,7 +1116,7 @@ set_status:
        err = 0;
        if (!list_empty(&pagelist)) {
                err = migrate_pages(&pagelist, new_page_node,
-                               (unsigned long)pm, 0);
+                               (unsigned long)pm, 0, true);
                if (err)
                        putback_lru_pages(&pagelist);
        }