]> git.karo-electronics.de Git - mv-sheeva.git/blobdiff - mm/rmap.c
thp: fix page_referenced to modify mapcount/vm_flags only if page is found
[mv-sheeva.git] / mm / rmap.c
index 92e14dcfe737ebf41a8b37fd58993bcc41035c6a..941bf82e896128b618284ae17a25d963c13838fc 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -497,41 +497,51 @@ int page_referenced_one(struct page *page, struct vm_area_struct *vma,
        struct mm_struct *mm = vma->vm_mm;
        int referenced = 0;
 
-       /*
-        * Don't want to elevate referenced for mlocked page that gets this far,
-        * in order that it progresses to try_to_unmap and is moved to the
-        * unevictable list.
-        */
-       if (vma->vm_flags & VM_LOCKED) {
-               *mapcount = 0;  /* break early from loop */
-               *vm_flags |= VM_LOCKED;
-               goto out;
-       }
-
-       /* Pretend the page is referenced if the task has the
-          swap token and is in the middle of a page fault. */
-       if (mm != current->mm && has_swap_token(mm) &&
-                       rwsem_is_locked(&mm->mmap_sem))
-               referenced++;
-
        if (unlikely(PageTransHuge(page))) {
                pmd_t *pmd;
 
                spin_lock(&mm->page_table_lock);
+               /*
+                * rmap might return false positives; we must filter
+                * these out using page_check_address_pmd().
+                */
                pmd = page_check_address_pmd(page, mm, address,
                                             PAGE_CHECK_ADDRESS_PMD_FLAG);
-               if (pmd && !pmd_trans_splitting(*pmd) &&
-                   pmdp_clear_flush_young_notify(vma, address, pmd))
+               if (!pmd) {
+                       spin_unlock(&mm->page_table_lock);
+                       goto out;
+               }
+
+               if (vma->vm_flags & VM_LOCKED) {
+                       spin_unlock(&mm->page_table_lock);
+                       *mapcount = 0;  /* break early from loop */
+                       *vm_flags |= VM_LOCKED;
+                       goto out;
+               }
+
+               /* go ahead even if the pmd is pmd_trans_splitting() */
+               if (pmdp_clear_flush_young_notify(vma, address, pmd))
                        referenced++;
                spin_unlock(&mm->page_table_lock);
        } else {
                pte_t *pte;
                spinlock_t *ptl;
 
+               /*
+                * rmap might return false positives; we must filter
+                * these out using page_check_address().
+                */
                pte = page_check_address(page, mm, address, &ptl, 0);
                if (!pte)
                        goto out;
 
+               if (vma->vm_flags & VM_LOCKED) {
+                       pte_unmap_unlock(pte, ptl);
+                       *mapcount = 0;  /* break early from loop */
+                       *vm_flags |= VM_LOCKED;
+                       goto out;
+               }
+
                if (ptep_clear_flush_young_notify(vma, address, pte)) {
                        /*
                         * Don't treat a reference through a sequentially read
@@ -546,6 +556,12 @@ int page_referenced_one(struct page *page, struct vm_area_struct *vma,
                pte_unmap_unlock(pte, ptl);
        }
 
+       /* Pretend the page is referenced if the task has the
+          swap token and is in the middle of a page fault. */
+       if (mm != current->mm && has_swap_token(mm) &&
+                       rwsem_is_locked(&mm->mmap_sem))
+               referenced++;
+
        (*mapcount)--;
 
        if (referenced)
@@ -882,8 +898,13 @@ void do_page_add_anon_rmap(struct page *page,
        struct vm_area_struct *vma, unsigned long address, int exclusive)
 {
        int first = atomic_inc_and_test(&page->_mapcount);
-       if (first)
-               __inc_zone_page_state(page, NR_ANON_PAGES);
+       if (first) {
+               if (!PageTransHuge(page))
+                       __inc_zone_page_state(page, NR_ANON_PAGES);
+               else
+                       __inc_zone_page_state(page,
+                                             NR_ANON_TRANSPARENT_HUGEPAGES);
+       }
        if (unlikely(PageKsm(page)))
                return;
 
@@ -911,7 +932,10 @@ void page_add_new_anon_rmap(struct page *page,
        VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
        SetPageSwapBacked(page);
        atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */
-       __inc_zone_page_state(page, NR_ANON_PAGES);
+       if (!PageTransHuge(page))
+               __inc_zone_page_state(page, NR_ANON_PAGES);
+       else
+               __inc_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
        __page_set_anon_rmap(page, vma, address, 1);
        if (page_evictable(page, vma))
                lru_cache_add_lru(page, LRU_ACTIVE_ANON);
@@ -929,7 +953,7 @@ void page_add_file_rmap(struct page *page)
 {
        if (atomic_inc_and_test(&page->_mapcount)) {
                __inc_zone_page_state(page, NR_FILE_MAPPED);
-               mem_cgroup_update_file_mapped(page, 1);
+               mem_cgroup_inc_page_stat(page, MEMCG_NR_FILE_MAPPED);
        }
 }
 
@@ -964,10 +988,14 @@ void page_remove_rmap(struct page *page)
                return;
        if (PageAnon(page)) {
                mem_cgroup_uncharge_page(page);
-               __dec_zone_page_state(page, NR_ANON_PAGES);
+               if (!PageTransHuge(page))
+                       __dec_zone_page_state(page, NR_ANON_PAGES);
+               else
+                       __dec_zone_page_state(page,
+                                             NR_ANON_TRANSPARENT_HUGEPAGES);
        } else {
                __dec_zone_page_state(page, NR_FILE_MAPPED);
-               mem_cgroup_update_file_mapped(page, -1);
+               mem_cgroup_dec_page_stat(page, MEMCG_NR_FILE_MAPPED);
        }
        /*
         * It would be tidy to reset the PageAnon mapping here,
@@ -1418,7 +1446,7 @@ int try_to_unmap(struct page *page, enum ttu_flags flags)
        int ret;
 
        BUG_ON(!PageLocked(page));
-       BUG_ON(PageTransHuge(page));
+       VM_BUG_ON(!PageHuge(page) && PageTransHuge(page));
 
        if (unlikely(PageKsm(page)))
                ret = try_to_unmap_ksm(page, flags);