]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - mm/memory-failure.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc
[karo-tx-linux.git] / mm / memory-failure.c
index 27f7210e7fabd1441d699d328213f95302c79378..73066b80d14af70d0fdf12e2228823b84c258903 100644 (file)
@@ -220,6 +220,9 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
  */
 void shake_page(struct page *p, int access)
 {
+       if (PageHuge(p))
+               return;
+
        if (!PageSlab(p)) {
                lru_add_drain_all();
                if (PageLRU(p))
@@ -322,7 +325,7 @@ static void add_to_kill(struct task_struct *tsk, struct page *p,
  * wrong earlier.
  */
 static void kill_procs(struct list_head *to_kill, int forcekill, int trapno,
-                         int fail, struct page *page, unsigned long pfn,
+                         bool fail, struct page *page, unsigned long pfn,
                          int flags)
 {
        struct to_kill *tk, *next;
@@ -904,35 +907,36 @@ EXPORT_SYMBOL_GPL(get_hwpoison_page);
  * Do all that is necessary to remove user space mappings. Unmap
  * the pages and send SIGBUS to the processes if the data was dirty.
  */
-static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
+static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
                                  int trapno, int flags, struct page **hpagep)
 {
-       enum ttu_flags ttu = TTU_UNMAP | TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS;
+       enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS;
        struct address_space *mapping;
        LIST_HEAD(tokill);
-       int ret;
+       bool unmap_success;
        int kill = 1, forcekill;
        struct page *hpage = *hpagep;
+       bool mlocked = PageMlocked(hpage);
 
        /*
         * Here we are interested only in user-mapped pages, so skip any
         * other types of pages.
         */
        if (PageReserved(p) || PageSlab(p))
-               return SWAP_SUCCESS;
+               return true;
        if (!(PageLRU(hpage) || PageHuge(p)))
-               return SWAP_SUCCESS;
+               return true;
 
        /*
         * This check implies we don't kill processes if their pages
         * are in the swap cache early. Those are always late kills.
         */
        if (!page_mapped(hpage))
-               return SWAP_SUCCESS;
+               return true;
 
        if (PageKsm(p)) {
                pr_err("Memory failure: %#lx: can't handle KSM pages.\n", pfn);
-               return SWAP_FAIL;
+               return false;
        }
 
        if (PageSwapCache(p)) {
@@ -971,11 +975,18 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
        if (kill)
                collect_procs(hpage, &tokill, flags & MF_ACTION_REQUIRED);
 
-       ret = try_to_unmap(hpage, ttu);
-       if (ret != SWAP_SUCCESS)
+       unmap_success = try_to_unmap(hpage, ttu);
+       if (!unmap_success)
                pr_err("Memory failure: %#lx: failed to unmap page (mapcount=%d)\n",
                       pfn, page_mapcount(hpage));
 
+       /*
+        * try_to_unmap() might put mlocked page in lru cache, so call
+        * shake_page() again to ensure that it's flushed.
+        */
+       if (mlocked)
+               shake_page(hpage, 0);
+
        /*
         * Now that the dirty bit has been propagated to the
         * struct page and all unmaps done we can decide if
@@ -987,10 +998,9 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
         * any accesses to the poisoned memory.
         */
        forcekill = PageDirty(hpage) || (flags & MF_MUST_KILL);
-       kill_procs(&tokill, forcekill, trapno,
-                     ret != SWAP_SUCCESS, p, pfn, flags);
+       kill_procs(&tokill, forcekill, trapno, !unmap_success, p, pfn, flags);
 
-       return ret;
+       return unmap_success;
 }
 
 static void set_page_hwpoison_huge_page(struct page *hpage)
@@ -1138,22 +1148,14 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
         * The check (unnecessarily) ignores LRU pages being isolated and
         * walked by the page reclaim code, however that's not a big loss.
         */
-       if (!PageHuge(p)) {
-               if (!PageLRU(p))
-                       shake_page(p, 0);
-               if (!PageLRU(p)) {
-                       /*
-                        * shake_page could have turned it free.
-                        */
-                       if (is_free_buddy_page(p)) {
-                               if (flags & MF_COUNT_INCREASED)
-                                       action_result(pfn, MF_MSG_BUDDY, MF_DELAYED);
-                               else
-                                       action_result(pfn, MF_MSG_BUDDY_2ND,
-                                                     MF_DELAYED);
-                               return 0;
-                       }
-               }
+       shake_page(p, 0);
+       /* shake_page could have turned it free. */
+       if (!PageLRU(p) && is_free_buddy_page(p)) {
+               if (flags & MF_COUNT_INCREASED)
+                       action_result(pfn, MF_MSG_BUDDY, MF_DELAYED);
+               else
+                       action_result(pfn, MF_MSG_BUDDY_2ND, MF_DELAYED);
+               return 0;
        }
 
        lock_page(hpage);
@@ -1230,8 +1232,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
         * When the raw error page is thp tail page, hpage points to the raw
         * page after thp split.
         */
-       if (hwpoison_user_mappings(p, pfn, trapno, flags, &hpage)
-           != SWAP_SUCCESS) {
+       if (!hwpoison_user_mappings(p, pfn, trapno, flags, &hpage)) {
                action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED);
                res = -EBUSY;
                goto out;
@@ -1543,8 +1544,8 @@ static int get_any_page(struct page *page, unsigned long pfn, int flags)
                if (ret == 1 && !PageLRU(page)) {
                        /* Drop page reference which is from __get_any_page() */
                        put_hwpoison_page(page);
-                       pr_info("soft_offline: %#lx: unknown non LRU page type %lx\n",
-                               pfn, page->flags);
+                       pr_info("soft_offline: %#lx: unknown non LRU page type %lx (%pGp)\n",
+                               pfn, page->flags, &page->flags);
                        return -EIO;
                }
        }
@@ -1585,8 +1586,8 @@ static int soft_offline_huge_page(struct page *page, int flags)
        ret = migrate_pages(&pagelist, new_page, NULL, MPOL_MF_MOVE_ALL,
                                MIGRATE_SYNC, MR_MEMORY_FAILURE);
        if (ret) {
-               pr_info("soft offline: %#lx: migration failed %d, type %lx\n",
-                       pfn, ret, page->flags);
+               pr_info("soft offline: %#lx: migration failed %d, type %lx (%pGp)\n",
+                       pfn, ret, page->flags, &page->flags);
                /*
                 * We know that soft_offline_huge_page() tries to migrate
                 * only one hugepage pointed to by hpage, so we need not
@@ -1677,14 +1678,14 @@ static int __soft_offline_page(struct page *page, int flags)
                        if (!list_empty(&pagelist))
                                putback_movable_pages(&pagelist);
 
-                       pr_info("soft offline: %#lx: migration failed %d, type %lx\n",
-                               pfn, ret, page->flags);
+                       pr_info("soft offline: %#lx: migration failed %d, type %lx (%pGp)\n",
+                               pfn, ret, page->flags, &page->flags);
                        if (ret > 0)
                                ret = -EIO;
                }
        } else {
-               pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n",
-                       pfn, ret, page_count(page), page->flags);
+               pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx (%pGp)\n",
+                       pfn, ret, page_count(page), page->flags, &page->flags);
        }
        return ret;
 }