]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
pagewalk: add walk_page_vma()
authorNaoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Thu, 22 May 2014 00:42:40 +0000 (10:42 +1000)
committerStephen Rothwell <sfr@canb.auug.org.au>
Thu, 22 May 2014 00:42:40 +0000 (10:42 +1000)
Introduces walk_page_vma(), which is useful for the callers which want to
walk over a given vma.  It's used by later patches.

Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Matt Mackall <mpm@selenic.com>
Cc: Cliff Wickman <cpw@sgi.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
Cc: Pavel Emelyanov <xemul@parallels.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/mm.h
mm/pagewalk.c

index d69c8a14bf81ef1694f0de65a906c827a2f01cf1..d0e96c98cc672cb909fc0a4275d768040ad49f5a 100644 (file)
@@ -1135,6 +1135,7 @@ struct mm_walk {
 
 int walk_page_range(unsigned long addr, unsigned long end,
                struct mm_walk *walk);
+int walk_page_vma(struct vm_area_struct *vma, struct mm_walk *walk);
 void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
                unsigned long end, unsigned long floor, unsigned long ceiling);
 int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
index 20b0eb528afa98d4d120db9d3c71e702c2ce3ffd..81c1228ce67d4267582d7dd0ca4fca0f0ac01ae6 100644 (file)
@@ -335,3 +335,21 @@ int walk_page_range(unsigned long start, unsigned long end,
        } while (start = next, start < end);
        return err;
 }
+
+int walk_page_vma(struct vm_area_struct *vma, struct mm_walk *walk)
+{
+       int err;
+
+       if (!walk->mm)
+               return -EINVAL;
+
+       VM_BUG_ON(!rwsem_is_locked(&walk->mm->mmap_sem));
+       VM_BUG_ON(!vma);
+       walk->vma = vma;
+       err = walk_page_test(vma->vm_start, vma->vm_end, walk);
+       if (skip_lower_level_walking(walk))
+               return 0;
+       if (err)
+               return err;
+       return __walk_page_range(vma->vm_start, vma->vm_end, walk);
+}