]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
mm: make the vma list be doubly linked
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 20 Aug 2010 23:24:55 +0000 (16:24 -0700)
committerGreg Kroah-Hartman <gregkh@suse.de>
Thu, 26 Aug 2010 23:41:44 +0000 (16:41 -0700)
commit 297c5eee372478fc32fec5fe8eed711eedb13f3d upstream.

It's a really simple list, and several of the users want to go backwards
in it to find the previous vma.  So rather than have to look up the
previous entry with 'find_vma_prev()' or something similar, just make it
doubly linked instead.

Tested-by: Ian Campbell <ijc@hellion.org.uk>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
include/linux/mm_types.h
kernel/fork.c
mm/mmap.c
mm/nommu.c

index 84a524afb3dcdffdd60c7ef1eaf2672acd7731ca..9d12ed56bfbc6116db691245a311ae313bce2e2e 100644 (file)
@@ -138,7 +138,7 @@ struct vm_area_struct {
                                           within vm_mm. */
 
        /* linked list of VM areas per task, sorted by address */
-       struct vm_area_struct *vm_next;
+       struct vm_area_struct *vm_next, *vm_prev;
 
        pgprot_t vm_page_prot;          /* Access permissions of this VMA. */
        unsigned long vm_flags;         /* Flags, see mm.h. */
index ce2f5859933fd0d73565b31bee7fdd7cbbbd923d..9f3b066bf7357285f5606b4f75bad115215bd7ab 100644 (file)
@@ -277,7 +277,7 @@ out:
 #ifdef CONFIG_MMU
 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
 {
-       struct vm_area_struct *mpnt, *tmp, **pprev;
+       struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
        struct rb_node **rb_link, *rb_parent;
        int retval;
        unsigned long charge;
@@ -305,6 +305,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
        if (retval)
                goto out;
 
+       prev = NULL;
        for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
                struct file *file;
 
@@ -333,7 +334,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
                vma_set_policy(tmp, pol);
                tmp->vm_flags &= ~VM_LOCKED;
                tmp->vm_mm = mm;
-               tmp->vm_next = NULL;
+               tmp->vm_next = tmp->vm_prev = NULL;
                anon_vma_link(tmp);
                file = tmp->vm_file;
                if (file) {
@@ -367,6 +368,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
                 */
                *pprev = tmp;
                pprev = &tmp->vm_next;
+               tmp->vm_prev = prev;
+               prev = tmp;
 
                __vma_link_rb(mm, tmp, rb_link, rb_parent);
                rb_link = &tmp->vm_rb.rb_right;
index ae197468b352bbdcecfaeb04cd0ebce73382aaef..b309c75b497a7ef7f1824239a963a180a7db2518 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -389,17 +389,23 @@ static inline void
 __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
                struct vm_area_struct *prev, struct rb_node *rb_parent)
 {
+       struct vm_area_struct *next;
+
+       vma->vm_prev = prev;
        if (prev) {
-               vma->vm_next = prev->vm_next;
+               next = prev->vm_next;
                prev->vm_next = vma;
        } else {
                mm->mmap = vma;
                if (rb_parent)
-                       vma->vm_next = rb_entry(rb_parent,
+                       next = rb_entry(rb_parent,
                                        struct vm_area_struct, vm_rb);
                else
-                       vma->vm_next = NULL;
+                       next = NULL;
        }
+       vma->vm_next = next;
+       if (next)
+               next->vm_prev = vma;
 }
 
 void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma,
@@ -487,7 +493,11 @@ static inline void
 __vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma,
                struct vm_area_struct *prev)
 {
-       prev->vm_next = vma->vm_next;
+       struct vm_area_struct *next = vma->vm_next;
+
+       prev->vm_next = next;
+       if (next)
+               next->vm_prev = prev;
        rb_erase(&vma->vm_rb, &mm->mm_rb);
        if (mm->mmap_cache == vma)
                mm->mmap_cache = prev;
@@ -1798,6 +1808,7 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
        unsigned long addr;
 
        insertion_point = (prev ? &prev->vm_next : &mm->mmap);
+       vma->vm_prev = NULL;
        do {
                rb_erase(&vma->vm_rb, &mm->mm_rb);
                mm->map_count--;
@@ -1805,6 +1816,8 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
                vma = vma->vm_next;
        } while (vma && vma->vm_start < end);
        *insertion_point = vma;
+       if (vma)
+               vma->vm_prev = prev;
        tail_vma->vm_next = NULL;
        if (mm->unmap_area == arch_unmap_area)
                addr = prev ? prev->vm_end : mm->mmap_base;
index 9876fa0c3ad30e75d842f965ac4417e66be6f07a..ebb3154d84e40182e977cc938a3d2f8e344e6d79 100644 (file)
@@ -608,7 +608,7 @@ static void protect_vma(struct vm_area_struct *vma, unsigned long flags)
  */
 static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
 {
-       struct vm_area_struct *pvma, **pp;
+       struct vm_area_struct *pvma, **pp, *next;
        struct address_space *mapping;
        struct rb_node **p, *parent;
 
@@ -668,8 +668,11 @@ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
                        break;
        }
 
-       vma->vm_next = *pp;
+       next = *pp;
        *pp = vma;
+       vma->vm_next = next;
+       if (next)
+               next->vm_prev = vma;
 }
 
 /*