]> git.karo-electronics.de Git - mv-sheeva.git/blobdiff - mm/rmap.c
thp: fix page_referenced to modify mapcount/vm_flags only if page is found
[mv-sheeva.git] / mm / rmap.c
index f21f4a1d6a1ce144d2ce45c30123eb2010f93bb8..941bf82e896128b618284ae17a25d963c13838fc 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -497,41 +497,51 @@ int page_referenced_one(struct page *page, struct vm_area_struct *vma,
        struct mm_struct *mm = vma->vm_mm;
        int referenced = 0;
 
-       /*
-        * Don't want to elevate referenced for mlocked page that gets this far,
-        * in order that it progresses to try_to_unmap and is moved to the
-        * unevictable list.
-        */
-       if (vma->vm_flags & VM_LOCKED) {
-               *mapcount = 0;  /* break early from loop */
-               *vm_flags |= VM_LOCKED;
-               goto out;
-       }
-
-       /* Pretend the page is referenced if the task has the
-          swap token and is in the middle of a page fault. */
-       if (mm != current->mm && has_swap_token(mm) &&
-                       rwsem_is_locked(&mm->mmap_sem))
-               referenced++;
-
        if (unlikely(PageTransHuge(page))) {
                pmd_t *pmd;
 
                spin_lock(&mm->page_table_lock);
+               /*
+                * rmap might return false positives; we must filter
+                * these out using page_check_address_pmd().
+                */
                pmd = page_check_address_pmd(page, mm, address,
                                             PAGE_CHECK_ADDRESS_PMD_FLAG);
-               if (pmd && !pmd_trans_splitting(*pmd) &&
-                   pmdp_clear_flush_young_notify(vma, address, pmd))
+               if (!pmd) {
+                       spin_unlock(&mm->page_table_lock);
+                       goto out;
+               }
+
+               if (vma->vm_flags & VM_LOCKED) {
+                       spin_unlock(&mm->page_table_lock);
+                       *mapcount = 0;  /* break early from loop */
+                       *vm_flags |= VM_LOCKED;
+                       goto out;
+               }
+
+               /* go ahead even if the pmd is pmd_trans_splitting() */
+               if (pmdp_clear_flush_young_notify(vma, address, pmd))
                        referenced++;
                spin_unlock(&mm->page_table_lock);
        } else {
                pte_t *pte;
                spinlock_t *ptl;
 
+               /*
+                * rmap might return false positives; we must filter
+                * these out using page_check_address().
+                */
                pte = page_check_address(page, mm, address, &ptl, 0);
                if (!pte)
                        goto out;
 
+               if (vma->vm_flags & VM_LOCKED) {
+                       pte_unmap_unlock(pte, ptl);
+                       *mapcount = 0;  /* break early from loop */
+                       *vm_flags |= VM_LOCKED;
+                       goto out;
+               }
+
                if (ptep_clear_flush_young_notify(vma, address, pte)) {
                        /*
                         * Don't treat a reference through a sequentially read
@@ -546,6 +556,12 @@ int page_referenced_one(struct page *page, struct vm_area_struct *vma,
                pte_unmap_unlock(pte, ptl);
        }
 
+       /* Pretend the page is referenced if the task has the
+          swap token and is in the middle of a page fault. */
+       if (mm != current->mm && has_swap_token(mm) &&
+                       rwsem_is_locked(&mm->mmap_sem))
+               referenced++;
+
        (*mapcount)--;
 
        if (referenced)