]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - include/linux/mm.h
Merge branch 'akpm-current/current'
[karo-tx-linux.git] / include / linux / mm.h
index fb27946baf54575513cf61c5f4323c4aff5a387b..5ac1cea7750b434620c5a1263f52f60060ab4d5a 100644 (file)
@@ -413,20 +413,25 @@ static inline void compound_unlock_irqrestore(struct page *page,
 #endif
 }
 
+static inline struct page *compound_head_by_tail(struct page *tail)
+{
+       struct page *head = tail->first_page;
+
+       /*
+        * page->first_page may be a dangling pointer to an old
+        * compound page, so recheck that it is still a tail
+        * page before returning.
+        */
+       smp_rmb();
+       if (likely(PageTail(tail)))
+               return head;
+       return tail;
+}
+
 static inline struct page *compound_head(struct page *page)
 {
-       if (unlikely(PageTail(page))) {
-               struct page *head = page->first_page;
-
-               /*
-                * page->first_page may be a dangling pointer to an old
-                * compound page, so recheck that it is still a tail
-                * page before returning.
-                */
-               smp_rmb();
-               if (likely(PageTail(page)))
-                       return head;
-       }
+       if (unlikely(PageTail(page)))
+               return compound_head_by_tail(page);
        return page;
 }
 
@@ -1104,10 +1109,18 @@ void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
  * @pte_entry: if set, called for each non-empty PTE (4th-level) entry
  * @pte_hole: if set, called for each hole at all levels
  * @hugetlb_entry: if set, called for each hugetlb entry
- *                *Caution*: The caller must hold mmap_sem() if @hugetlb_entry
- *                           is used.
+ * @test_walk: caller specific callback function to determine whether
+ *             we walk over the current vma or not. A positive returned
+ *             value means "do page table walk over the current vma,"
+ *             and a negative one means "abort current page table walk
+ *             right now." 0 means "skip the current vma."
+ * @mm:        mm_struct representing the target process of page table walk
+ * @vma:       vma currently walked
+ * @skip:      internal control flag which is set when we skip the lower
+ *             level entries.
+ * @private:   private data for callbacks' use
  *
- * (see walk_page_range for more details)
+ * (see the comment on walk_page_range() for more details)
  */
 struct mm_walk {
        int (*pgd_entry)(pgd_t *pgd, unsigned long addr,
@@ -1120,15 +1133,19 @@ struct mm_walk {
                         unsigned long next, struct mm_walk *walk);
        int (*pte_hole)(unsigned long addr, unsigned long next,
                        struct mm_walk *walk);
-       int (*hugetlb_entry)(pte_t *pte, unsigned long hmask,
-                            unsigned long addr, unsigned long next,
-                            struct mm_walk *walk);
+       int (*hugetlb_entry)(pte_t *pte, unsigned long addr,
+                       unsigned long next, struct mm_walk *walk);
+       int (*test_walk)(unsigned long addr, unsigned long next,
+                       struct mm_walk *walk);
        struct mm_struct *mm;
+       struct vm_area_struct *vma;
+       int skip;
        void *private;
 };
 
 int walk_page_range(unsigned long addr, unsigned long end,
                struct mm_walk *walk);
+int walk_page_vma(struct vm_area_struct *vma, struct mm_walk *walk);
 void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
                unsigned long end, unsigned long floor, unsigned long ceiling);
 int copy_page_range(struct mm_struct *dst, struct mm_struct *src,