]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
mm, thp: count thp_fault_fallback anytime thp fault fails
authorDavid Rientjes <rientjes@google.com>
Wed, 28 Aug 2013 00:17:51 +0000 (10:17 +1000)
committerStephen Rothwell <sfr@canb.auug.org.au>
Thu, 12 Sep 2013 03:49:28 +0000 (13:49 +1000)
Currently, thp_fault_fallback in vmstat only gets incremented if a
hugepage allocation fails.  If current's memcg hits its limit or the page
fault handler returns an error, it is incorrectly accounted as a
successful thp_fault_alloc.

Count thp_fault_fallback anytime the page fault handler falls back to
using regular pages and only count thp_fault_alloc when a hugepage has
actually been faulted.

Signed-off-by: David Rientjes <rientjes@google.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/huge_memory.c

index d96d921064bde5143a79762ac3d915b5892d0ed3..7489884682d84a6b5840fef19e90234076fd374e 100644 (file)
@@ -825,17 +825,19 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
                count_vm_event(THP_FAULT_FALLBACK);
                return VM_FAULT_FALLBACK;
        }
-       count_vm_event(THP_FAULT_ALLOC);
        if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) {
                put_page(page);
+               count_vm_event(THP_FAULT_FALLBACK);
                return VM_FAULT_FALLBACK;
        }
        if (unlikely(__do_huge_pmd_anonymous_page(mm, vma, haddr, pmd, page))) {
                mem_cgroup_uncharge_page(page);
                put_page(page);
+               count_vm_event(THP_FAULT_FALLBACK);
                return VM_FAULT_FALLBACK;
        }
 
+       count_vm_event(THP_FAULT_ALLOC);
        return 0;
 }
 
@@ -1148,7 +1150,6 @@ alloc:
                new_page = NULL;
 
        if (unlikely(!new_page)) {
-               count_vm_event(THP_FAULT_FALLBACK);
                if (is_huge_zero_pmd(orig_pmd)) {
                        ret = do_huge_pmd_wp_zero_page_fallback(mm, vma,
                                        address, pmd, orig_pmd, haddr);
@@ -1159,9 +1160,9 @@ alloc:
                                split_huge_page(page);
                        put_page(page);
                }
+               count_vm_event(THP_FAULT_FALLBACK);
                goto out;
        }
-       count_vm_event(THP_FAULT_ALLOC);
 
        if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
                put_page(new_page);
@@ -1169,10 +1170,13 @@ alloc:
                        split_huge_page(page);
                        put_page(page);
                }
+               count_vm_event(THP_FAULT_FALLBACK);
                ret |= VM_FAULT_OOM;
                goto out;
        }
 
+       count_vm_event(THP_FAULT_ALLOC);
+
        if (is_huge_zero_pmd(orig_pmd))
                clear_huge_page(new_page, haddr, HPAGE_PMD_NR);
        else