]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
thp: transparent hugepage vmstat
authorAndrea Arcangeli <aarcange@redhat.com>
Thu, 13 Jan 2011 23:46:58 +0000 (15:46 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 14 Jan 2011 01:32:43 +0000 (17:32 -0800)
Add hugepage stat information to /proc/vmstat and /proc/meminfo.

Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Acked-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
fs/proc/meminfo.c
include/linux/mmzone.h
mm/huge_memory.c
mm/rmap.c
mm/vmstat.c

index a65239cfd97ebc9aecc3914e9f228673b46da9bb..ed257d1415687f50dc84abad0089be090a2d3f8c 100644 (file)
@@ -100,6 +100,9 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
                "VmallocChunk:   %8lu kB\n"
 #ifdef CONFIG_MEMORY_FAILURE
                "HardwareCorrupted: %5lu kB\n"
+#endif
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+               "AnonHugePages:  %8lu kB\n"
 #endif
                ,
                K(i.totalram),
@@ -128,7 +131,12 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
                K(i.freeswap),
                K(global_page_state(NR_FILE_DIRTY)),
                K(global_page_state(NR_WRITEBACK)),
-               K(global_page_state(NR_ANON_PAGES)),
+               K(global_page_state(NR_ANON_PAGES)
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+                 + global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
+                 HPAGE_PMD_NR
+#endif
+                 ),
                K(global_page_state(NR_FILE_MAPPED)),
                K(global_page_state(NR_SHMEM)),
                K(global_page_state(NR_SLAB_RECLAIMABLE) +
@@ -150,6 +158,10 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
                vmi.largest_chunk >> 10
 #ifdef CONFIG_MEMORY_FAILURE
                ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
+#endif
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+               ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
+                  HPAGE_PMD_NR)
 #endif
                );
 
index dad3612a7e392b4a72a056ba0e7c9bf54c5a9ffd..02ecb0189b1d9d1fcf0fff2900178045b40cedff 100644 (file)
@@ -114,6 +114,7 @@ enum zone_stat_item {
        NUMA_LOCAL,             /* allocation from local node */
        NUMA_OTHER,             /* allocation from other node */
 #endif
+       NR_ANON_TRANSPARENT_HUGEPAGES,
        NR_VM_ZONE_STAT_ITEMS };
 
 /*
index a313403b3c5ecb589bcd78183ecb64c3bfbd6c54..7101112a54296b311bfe3723630237e4a138290a 100644 (file)
@@ -751,6 +751,9 @@ static void __split_huge_page_refcount(struct page *page)
                lru_add_page_tail(zone, page, page_tail);
        }
 
+       __dec_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
+       __mod_zone_page_state(zone, NR_ANON_PAGES, HPAGE_PMD_NR);
+
        ClearPageCompound(page);
        compound_unlock(page);
        spin_unlock_irq(&zone->lru_lock);
index 92e14dcfe737ebf41a8b37fd58993bcc41035c6a..3825ae4bc32f971be5c2c3536f145f200036a0f0 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -882,8 +882,13 @@ void do_page_add_anon_rmap(struct page *page,
        struct vm_area_struct *vma, unsigned long address, int exclusive)
 {
        int first = atomic_inc_and_test(&page->_mapcount);
-       if (first)
-               __inc_zone_page_state(page, NR_ANON_PAGES);
+       if (first) {
+               if (!PageTransHuge(page))
+                       __inc_zone_page_state(page, NR_ANON_PAGES);
+               else
+                       __inc_zone_page_state(page,
+                                             NR_ANON_TRANSPARENT_HUGEPAGES);
+       }
        if (unlikely(PageKsm(page)))
                return;
 
@@ -911,7 +916,10 @@ void page_add_new_anon_rmap(struct page *page,
        VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
        SetPageSwapBacked(page);
        atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */
-       __inc_zone_page_state(page, NR_ANON_PAGES);
+       if (!PageTransHuge(page))
+               __inc_zone_page_state(page, NR_ANON_PAGES);
+       else
+               __inc_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
        __page_set_anon_rmap(page, vma, address, 1);
        if (page_evictable(page, vma))
                lru_cache_add_lru(page, LRU_ACTIVE_ANON);
@@ -964,7 +972,11 @@ void page_remove_rmap(struct page *page)
                return;
        if (PageAnon(page)) {
                mem_cgroup_uncharge_page(page);
-               __dec_zone_page_state(page, NR_ANON_PAGES);
+               if (!PageTransHuge(page))
+                       __dec_zone_page_state(page, NR_ANON_PAGES);
+               else
+                       __dec_zone_page_state(page,
+                                             NR_ANON_TRANSPARENT_HUGEPAGES);
        } else {
                __dec_zone_page_state(page, NR_FILE_MAPPED);
                mem_cgroup_update_file_mapped(page, -1);
index 751a65e00aacb18853ea07feb2210fbafadc6b80..0c3b5048773e6d486f43343a859ebf785161036e 100644 (file)
@@ -880,6 +880,7 @@ static const char * const vmstat_text[] = {
        "numa_local",
        "numa_other",
 #endif
+       "nr_anon_transparent_hugepages",
        "nr_dirty_threshold",
        "nr_dirty_background_threshold",