]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
smaps: redefine callback functions for page table walker
authorNaoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Thu, 22 May 2014 00:42:40 +0000 (10:42 +1000)
committerStephen Rothwell <sfr@canb.auug.org.au>
Thu, 22 May 2014 00:42:40 +0000 (10:42 +1000)
smaps_pte_range() connected to pmd_entry() does both of pmd loop and pte
loop.  So this patch moves pte part into smaps_pte() on pte_entry() as
expected by the name.

Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Matt Mackall <mpm@selenic.com>
Cc: Cliff Wickman <cpw@sgi.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
Cc: Pavel Emelyanov <xemul@parallels.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
fs/proc/task_mmu.c

index 442177b1119a4528b08210a56f5188b3ef1f2a20..45aaf761617216ab2bb3ef8c61cadc7c7d3c8c61 100644 (file)
@@ -424,7 +424,6 @@ const struct file_operations proc_tid_maps_operations = {
 
 #ifdef CONFIG_PROC_PAGE_MONITOR
 struct mem_size_stats {
-       struct vm_area_struct *vma;
        unsigned long resident;
        unsigned long shared_clean;
        unsigned long shared_dirty;
@@ -438,15 +437,16 @@ struct mem_size_stats {
        u64 pss;
 };
 
-
-static void smaps_pte_entry(pte_t ptent, unsigned long addr,
-               unsigned long ptent_size, struct mm_walk *walk)
+static int smaps_pte(pte_t *pte, unsigned long addr, unsigned long end,
+                       struct mm_walk *walk)
 {
        struct mem_size_stats *mss = walk->private;
-       struct vm_area_struct *vma = mss->vma;
+       struct vm_area_struct *vma = walk->vma;
        pgoff_t pgoff = linear_page_index(vma, addr);
        struct page *page = NULL;
        int mapcount;
+       pte_t ptent = *pte;
+       unsigned long ptent_size = end - addr;
 
        if (pte_present(ptent)) {
                page = vm_normal_page(vma, addr, ptent);
@@ -463,7 +463,7 @@ static void smaps_pte_entry(pte_t ptent, unsigned long addr,
        }
 
        if (!page)
-               return;
+               return 0;
 
        if (PageAnon(page))
                mss->anonymous += ptent_size;
@@ -489,35 +489,22 @@ static void smaps_pte_entry(pte_t ptent, unsigned long addr,
                        mss->private_clean += ptent_size;
                mss->pss += (ptent_size << PSS_SHIFT);
        }
+       return 0;
 }
 
-static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
-                          struct mm_walk *walk)
+static int smaps_pmd(pmd_t *pmd, unsigned long addr, unsigned long end,
+                       struct mm_walk *walk)
 {
        struct mem_size_stats *mss = walk->private;
-       struct vm_area_struct *vma = mss->vma;
-       pte_t *pte;
        spinlock_t *ptl;
 
-       if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
-               smaps_pte_entry(*(pte_t *)pmd, addr, HPAGE_PMD_SIZE, walk);
+       if (pmd_trans_huge_lock(pmd, walk->vma, &ptl) == 1) {
+               smaps_pte((pte_t *)pmd, addr, addr + HPAGE_PMD_SIZE, walk);
                spin_unlock(ptl);
                mss->anonymous_thp += HPAGE_PMD_SIZE;
-               return 0;
+               /* don't call smaps_pte() */
+               walk->skip = 1;
        }
-
-       if (pmd_trans_unstable(pmd))
-               return 0;
-       /*
-        * The mmap_sem held all the way back in m_start() is what
-        * keeps khugepaged out of here and from collapsing things
-        * in here.
-        */
-       pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
-       for (; addr != end; pte++, addr += PAGE_SIZE)
-               smaps_pte_entry(*pte, addr, PAGE_SIZE, walk);
-       pte_unmap_unlock(pte - 1, ptl);
-       cond_resched();
        return 0;
 }
 
@@ -582,16 +569,16 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
        struct vm_area_struct *vma = v;
        struct mem_size_stats mss;
        struct mm_walk smaps_walk = {
-               .pmd_entry = smaps_pte_range,
+               .pmd_entry = smaps_pmd,
+               .pte_entry = smaps_pte,
                .mm = vma->vm_mm,
+               .vma = vma,
                .private = &mss,
        };
 
        memset(&mss, 0, sizeof mss);
-       mss.vma = vma;
        /* mmap_sem is held in m_start */
-       if (vma->vm_mm && !is_vm_hugetlb_page(vma))
-               walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
+       walk_page_vma(vma, &smaps_walk);
 
        show_map_vma(m, vma, is_pid);