]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
mm: kill vma flag VM_INSERTPAGE
authorKonstantin Khlebnikov <khlebnikov@openvz.org>
Wed, 26 Sep 2012 01:33:10 +0000 (11:33 +1000)
committerStephen Rothwell <sfr@canb.auug.org.au>
Thu, 27 Sep 2012 07:25:33 +0000 (17:25 +1000)
Merge VM_INSERTPAGE into VM_MIXEDMAP.  VM_MIXEDMAP VMA can mix pure-pfn
ptes, special ptes and normal ptes.

Now copy_page_range() always copies VM_MIXEDMAP VMA on fork like
VM_PFNMAP.  If driver populates whole VMA at mmap() it probably not
expects page-faults.

This patch removes special check from vma_wants_writenotify() which
disables pages write tracking for VMA populated via vm_instert_page().
BDI below mapped file should not use dirty-accounting, moreover
do_wp_page() can handle this.

vm_insert_page() still marks vma after first usage.  Usually it is called
from f_op->mmap() handler under mm->mmap_sem write-lock, so it able to
change vma->vm_flags.  Caller must set VM_MIXEDMAP at mmap time if it
wants to call this function from other places, for example from page-fault
handler.

Signed-off-by: Konstantin Khlebnikov <khlebnikov@openvz.org>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Carsten Otte <cotte@de.ibm.com>
Cc: Chris Metcalf <cmetcalf@tilera.com>
Cc: Cyrill Gorcunov <gorcunov@openvz.org>
Cc: Eric Paris <eparis@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Morris <james.l.morris@oracle.com>
Cc: Jason Baron <jbaron@redhat.com>
Cc: Kentaro Takeda <takedakn@nttdata.co.jp>
Cc: Matt Helsley <matthltc@us.ibm.com>
Cc: Nick Piggin <npiggin@kernel.dk>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Robert Richter <robert.richter@amd.com>
Cc: Suresh Siddha <suresh.b.siddha@intel.com>
Cc: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
Cc: Venkatesh Pallipadi <venki@google.com>
Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/mm.h
mm/huge_memory.c
mm/ksm.c
mm/memory.c
mm/mmap.c

index cf8d9e03abbca4a528c3058a78276839c4ef1fba..19ddad2bb7ab019f291bfc0f031b53d9d9eb36b5 100644 (file)
@@ -103,7 +103,6 @@ extern unsigned int kobjsize(const void *objp);
 #define VM_HUGETLB     0x00400000      /* Huge TLB Page VM */
 #define VM_NONLINEAR   0x00800000      /* Is non-linear (remap_file_pages) */
 #define VM_ARCH_1      0x01000000      /* Architecture-specific flag */
-#define VM_INSERTPAGE  0x02000000      /* The vma has had "vm_insert_page()" done on it */
 #define VM_NODUMP      0x04000000      /* Do not include in the core dump */
 
 #define VM_CAN_NONLINEAR 0x08000000    /* Has ->fault & does nonlinear pages */
index 47f272160a492a44626f0abce9d8655028463bfe..18ef053ab37f060c38ccd161a8fe8fc2d767efe7 100644 (file)
@@ -1558,8 +1558,7 @@ out:
        return ret;
 }
 
-#define VM_NO_THP (VM_SPECIAL|VM_INSERTPAGE|VM_MIXEDMAP| \
-                  VM_HUGETLB|VM_SHARED|VM_MAYSHARE)
+#define VM_NO_THP (VM_SPECIAL|VM_MIXEDMAP|VM_HUGETLB|VM_SHARED|VM_MAYSHARE)
 
 int hugepage_madvise(struct vm_area_struct *vma,
                     unsigned long *vm_flags, int advice)
index d1cbe2aa6b3a74dc71980b6b93650d2f75766ccc..f9ccb16559ee6620566cbb6cc2e64297546c65a6 100644 (file)
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1469,7 +1469,7 @@ int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
                 */
                if (*vm_flags & (VM_MERGEABLE | VM_SHARED  | VM_MAYSHARE   |
                                 VM_PFNMAP    | VM_IO      | VM_DONTEXPAND |
-                                VM_RESERVED  | VM_HUGETLB | VM_INSERTPAGE |
+                                VM_RESERVED  | VM_HUGETLB |
                                 VM_NONLINEAR | VM_MIXEDMAP))
                        return 0;               /* just ignore the advice */
 
index 61fa82aa774ea04c3a7bdbec9c3e929d4dbf313e..15270f4749e1ad913293a7570d875dae0a8937e7 100644 (file)
@@ -1048,7 +1048,8 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
         * readonly mappings. The tradeoff is that copy_page_range is more
         * efficient than faulting.
         */
-       if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_PFNMAP|VM_INSERTPAGE))) {
+       if (!(vma->vm_flags & (VM_HUGETLB | VM_NONLINEAR |
+                              VM_PFNMAP | VM_MIXEDMAP))) {
                if (!vma->anon_vma)
                        return 0;
        }
@@ -2086,6 +2087,11 @@ out:
  * ask for a shared writable mapping!
  *
  * The page does not need to be reserved.
+ *
+ * Usually this function is called from f_op->mmap() handler
+ * under mm->mmap_sem write-lock, so it can change vma->vm_flags.
+ * Caller must set VM_MIXEDMAP on vma if it wants to call this
+ * function from other places, for example from page-fault handler.
  */
 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
                        struct page *page)
@@ -2094,7 +2100,11 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
                return -EFAULT;
        if (!page_count(page))
                return -EINVAL;
-       vma->vm_flags |= VM_INSERTPAGE;
+       if (!(vma->vm_flags & VM_MIXEDMAP)) {
+               BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem));
+               BUG_ON(vma->vm_flags & VM_PFNMAP);
+               vma->vm_flags |= VM_MIXEDMAP;
+       }
        return insert_page(vma, addr, page, vma->vm_page_prot);
 }
 EXPORT_SYMBOL(vm_insert_page);
index 872441e819141c2e93657b358fedf6204d62ac57..b0989f4d4f098c016d83b32f1641887c966ef701 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1190,7 +1190,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
                return 0;
 
        /* Specialty mapping? */
-       if (vm_flags & (VM_PFNMAP|VM_INSERTPAGE))
+       if (vm_flags & VM_PFNMAP)
                return 0;
 
        /* Can the mapping track the dirty pages? */