]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
mremap-enforce-rmap-src-dst-vma-ordering-in-case-of-vma_merge-succeeding-in-copy_vma...
authorAndrea Arcangeli <aarcange@redhat.com>
Fri, 16 Dec 2011 04:49:57 +0000 (15:49 +1100)
committerStephen Rothwell <sfr@canb.auug.org.au>
Mon, 19 Dec 2011 07:19:30 +0000 (18:19 +1100)
Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Cc: Nai Xia <nai.xia@gmail.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Hugh Dickins <hughd@google.com>
Cc: Pawel Sikora <pluto@agmk.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/mmap.c
mm/mremap.c

index ca83bd503a41bc661067ce240d618aac04cd7051..be0d6066b7dbc1baa6535a19fc115a92997b7b72 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2355,7 +2355,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
         * If anonymous vma has not yet been faulted, update new pgoff
         * to match new location, to increase its chance of merging.
         */
-       if (!vma->vm_file && !vma->anon_vma) {
+       if (unlikely(!vma->vm_file && !vma->anon_vma)) {
                pgoff = addr >> PAGE_SHIFT;
                faulted_in_anon_vma = false;
        }
@@ -2367,8 +2367,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
                /*
                 * Source vma may have been merged into new_vma
                 */
-               if (vma_start >= new_vma->vm_start &&
-                   vma_start < new_vma->vm_end) {
+               if (unlikely(vma_start >= new_vma->vm_start &&
+                            vma_start < new_vma->vm_end)) {
                        /*
                         * The only way we can get a vma_merge with
                         * self during an mremap is if the vma hasn't
index d845537287c8faf85870cd66e449dc7b6cee5323..87bb8393e7d238115a450139d24c090347c215a7 100644 (file)
@@ -220,12 +220,20 @@ static unsigned long move_vma(struct vm_area_struct *vma,
 
        moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len);
        if (moved_len < old_len) {
+               /*
+                * Before moving the page tables from the new vma to
+                * the old vma, we need to be sure the old vma is
+                * queued after new vma in the same_anon_vma list to
+                * prevent SMP races with rmap_walk (that could lead
+                * rmap_walk to miss some page table).
+                */
+               anon_vma_moveto_tail(vma);
+
                /*
                 * On error, move entries back from new area to old,
                 * which will succeed since page tables still there,
                 * and then proceed to unmap new area instead of old.
                 */
-               anon_vma_moveto_tail(vma);
                move_page_tables(new_vma, new_addr, vma, old_addr, moved_len);
                vma = new_vma;
                old_len = new_len;