]> git.karo-electronics.de Git - mv-sheeva.git/blobdiff - mm/mmap.c
Merge branches 'core-fixes-for-linus', 'x86-fixes-for-linus', 'timers-fixes-for-linus...
[mv-sheeva.git] / mm / mmap.c
index b179abb1474ae41bff47060b5241045d3b1b12ad..2ec8eb5a9cdd0b4ae2e20471858bd4e09d83af00 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -29,6 +29,7 @@
 #include <linux/mmu_notifier.h>
 #include <linux/perf_event.h>
 #include <linux/audit.h>
+#include <linux/khugepaged.h>
 
 #include <asm/uaccess.h>
 #include <asm/cacheflush.h>
@@ -253,7 +254,15 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
        down_write(&mm->mmap_sem);
 
 #ifdef CONFIG_COMPAT_BRK
-       min_brk = mm->end_code;
+       /*
+        * CONFIG_COMPAT_BRK can still be overridden by setting
+        * randomize_va_space to 2, which will still cause mm->start_brk
+        * to be arbitrarily shifted
+        */
+       if (mm->start_brk > PAGE_ALIGN(mm->end_data))
+               min_brk = mm->start_brk;
+       else
+               min_brk = mm->end_data;
 #else
        min_brk = mm->start_brk;
 #endif
@@ -588,6 +597,8 @@ again:                      remove_next = 1 + (end > next->vm_end);
                }
        }
 
+       vma_adjust_trans_huge(vma, start, end, adjust_next);
+
        /*
         * When changing only vma->vm_end, we don't really need anon_vma
         * lock. This is a fairly rare case by itself, but the anon_vma
@@ -815,6 +826,7 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
                                end, prev->vm_pgoff, NULL);
                if (err)
                        return NULL;
+               khugepaged_enter_vma_merge(prev);
                return prev;
        }
 
@@ -833,6 +845,7 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
                                next->vm_pgoff - pglen, NULL);
                if (err)
                        return NULL;
+               khugepaged_enter_vma_merge(area);
                return area;
        }
 
@@ -1761,6 +1774,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
                }
        }
        vma_unlock_anon_vma(vma);
+       khugepaged_enter_vma_merge(vma);
        return error;
 }
 #endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */
@@ -1808,6 +1822,7 @@ static int expand_downwards(struct vm_area_struct *vma,
                }
        }
        vma_unlock_anon_vma(vma);
+       khugepaged_enter_vma_merge(vma);
        return error;
 }
 
@@ -2462,6 +2477,7 @@ int install_special_mapping(struct mm_struct *mm,
                            unsigned long addr, unsigned long len,
                            unsigned long vm_flags, struct page **pages)
 {
+       int ret;
        struct vm_area_struct *vma;
 
        vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
@@ -2479,16 +2495,23 @@ int install_special_mapping(struct mm_struct *mm,
        vma->vm_ops = &special_mapping_vmops;
        vma->vm_private_data = pages;
 
-       if (unlikely(insert_vm_struct(mm, vma))) {
-               kmem_cache_free(vm_area_cachep, vma);
-               return -ENOMEM;
-       }
+       ret = security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1);
+       if (ret)
+               goto out;
+
+       ret = insert_vm_struct(mm, vma);
+       if (ret)
+               goto out;
 
        mm->total_vm += len >> PAGE_SHIFT;
 
        perf_event_mmap(vma);
 
        return 0;
+
+out:
+       kmem_cache_free(vm_area_cachep, vma);
+       return ret;
 }
 
 static DEFINE_MUTEX(mm_all_locks_mutex);