]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - mm/khugepaged.c
mm, thp: copying user pages must schedule on collapse
[karo-tx-linux.git] / mm / khugepaged.c
index 77ae3239c3de17bfbf7ba29b56a5cb270611cfd8..945fd1ca49b5af0bc3b87dbfe8098f0f602775a2 100644 (file)
@@ -2,6 +2,8 @@
 
 #include <linux/mm.h>
 #include <linux/sched.h>
+#include <linux/sched/mm.h>
+#include <linux/sched/coredump.h>
 #include <linux/mmu_notifier.h>
 #include <linux/rmap.h>
 #include <linux/swap.h>
@@ -420,7 +422,7 @@ int __khugepaged_enter(struct mm_struct *mm)
        list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
        spin_unlock(&khugepaged_mm_lock);
 
-       atomic_inc(&mm->mm_count);
+       mmgrab(mm);
        if (wakeup)
                wake_up_interruptible(&khugepaged_wait);
 
@@ -481,8 +483,7 @@ void __khugepaged_exit(struct mm_struct *mm)
 
 static void release_pte_page(struct page *page)
 {
-       /* 0 stands for page_is_file_cache(page) == false */
-       dec_node_page_state(page, NR_ISOLATED_ANON + 0);
+       dec_node_page_state(page, NR_ISOLATED_ANON + page_is_file_cache(page));
        unlock_page(page);
        putback_lru_page(page);
 }
@@ -530,7 +531,6 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
 
                VM_BUG_ON_PAGE(PageCompound(page), page);
                VM_BUG_ON_PAGE(!PageAnon(page), page);
-               VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
 
                /*
                 * We can do it before isolate_lru_page because the
@@ -548,7 +548,7 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
                 * The page must only be referenced by the scanned process
                 * and page swap cache.
                 */
-               if (page_count(page) != 1 + !!PageSwapCache(page)) {
+               if (page_count(page) != 1 + PageSwapCache(page)) {
                        unlock_page(page);
                        result = SCAN_PAGE_COUNT;
                        goto out;
@@ -577,8 +577,8 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
                        result = SCAN_DEL_PAGE_LRU;
                        goto out;
                }
-               /* 0 stands for page_is_file_cache(page) == false */
-               inc_node_page_state(page, NR_ISOLATED_ANON + 0);
+               inc_node_page_state(page,
+                               NR_ISOLATED_ANON + page_is_file_cache(page));
                VM_BUG_ON_PAGE(!PageLocked(page), page);
                VM_BUG_ON_PAGE(PageLRU(page), page);
 
@@ -612,7 +612,8 @@ static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
                                      spinlock_t *ptl)
 {
        pte_t *_pte;
-       for (_pte = pte; _pte < pte+HPAGE_PMD_NR; _pte++) {
+       for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
+                               _pte++, page++, address += PAGE_SIZE) {
                pte_t pteval = *_pte;
                struct page *src_page;
 
@@ -651,9 +652,7 @@ static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
                        spin_unlock(ptl);
                        free_page_and_swap_cache(src_page);
                }
-
-               address += PAGE_SIZE;
-               page++;
+               cond_resched();
        }
 }
 
@@ -907,8 +906,10 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm,
                                return false;
                        }
                        /* check if the pmd is still valid */
-                       if (mm_find_pmd(mm, address) != pmd)
+                       if (mm_find_pmd(mm, address) != pmd) {
+                               trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
                                return false;
+                       }
                }
                if (ret & VM_FAULT_ERROR) {
                        trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
@@ -1181,7 +1182,7 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
                 * The page must only be referenced by the scanned process
                 * and page swap cache.
                 */
-               if (page_count(page) != 1 + !!PageSwapCache(page)) {
+               if (page_count(page) != 1 + PageSwapCache(page)) {
                        result = SCAN_PAGE_COUNT;
                        goto out_unmap;
                }