]> git.karo-electronics.de Git - mv-sheeva.git/commitdiff
thp: split_huge_page_mm/vma
authorAndrea Arcangeli <aarcange@redhat.com>
Thu, 13 Jan 2011 23:46:46 +0000 (15:46 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 14 Jan 2011 01:32:41 +0000 (17:32 -0800)
split_huge_page_pmd compat code.  Each one of those would need to be
expanded to hundred of lines of complex code without a fully reliable
split_huge_page_pmd design.

Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Acked-by: Rik van Riel <riel@redhat.com>
Acked-by: Mel Gorman <mel@csn.ul.ie>
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
arch/x86/kernel/vm86_32.c
mm/mempolicy.c
mm/mincore.c
mm/mprotect.c
mm/mremap.c
mm/pagewalk.c

index 61fb985196222909dbec39750cccec5f0993b852..863f8753ab0ae696f8981ef30d9ee031dd0e310b 100644 (file)
@@ -179,6 +179,7 @@ static void mark_screen_rdonly(struct mm_struct *mm)
        if (pud_none_or_clear_bad(pud))
                goto out;
        pmd = pmd_offset(pud, 0xA0000);
+       split_huge_page_pmd(mm, pmd);
        if (pmd_none_or_clear_bad(pmd))
                goto out;
        pte = pte_offset_map_lock(mm, pmd, 0xA0000, &ptl);
index e6d351265aede4c598fe70ee6a9586c1a0e91e92..83b7df309fc4937ee4de27be733018d955980221 100644 (file)
@@ -514,6 +514,7 @@ static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud,
        pmd = pmd_offset(pud, addr);
        do {
                next = pmd_addr_end(addr, end);
+               split_huge_page_pmd(vma->vm_mm, pmd);
                if (pmd_none_or_clear_bad(pmd))
                        continue;
                if (check_pte_range(vma, pmd, addr, next, nodes,
index 9ac42dc6d7b61481b1eedb8e12b463bd151b1f92..9959bb41570e64fc446c86d186737d1ba17baeec 100644 (file)
@@ -154,6 +154,7 @@ static void mincore_pmd_range(struct vm_area_struct *vma, pud_t *pud,
        pmd = pmd_offset(pud, addr);
        do {
                next = pmd_addr_end(addr, end);
+               split_huge_page_pmd(vma->vm_mm, pmd);
                if (pmd_none_or_clear_bad(pmd))
                        mincore_unmapped_range(vma, addr, next, vec);
                else
index 4c51338730977604266bab4e67ef0251aba69638..bd27db6b992b80c4c39d84b83d910996357727fd 100644 (file)
@@ -88,6 +88,7 @@ static inline void change_pmd_range(struct mm_struct *mm, pud_t *pud,
        pmd = pmd_offset(pud, addr);
        do {
                next = pmd_addr_end(addr, end);
+               split_huge_page_pmd(mm, pmd);
                if (pmd_none_or_clear_bad(pmd))
                        continue;
                change_pte_range(mm, pmd, addr, next, newprot, dirty_accountable);
index b09eefaea0b8c6ea0d126e959453bfc59874525f..9925b6391b8035a547355a8ad9919e9a8f06f920 100644 (file)
@@ -41,6 +41,7 @@ static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr)
                return NULL;
 
        pmd = pmd_offset(pud, addr);
+       split_huge_page_pmd(mm, pmd);
        if (pmd_none_or_clear_bad(pmd))
                return NULL;
 
index 38cc58b8b2b0d37340f6e327c56c796bd7c24516..7cfa6ae023038ef4d1f05c6224c85faad173d533 100644 (file)
@@ -34,6 +34,7 @@ static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
        pmd = pmd_offset(pud, addr);
        do {
                next = pmd_addr_end(addr, end);
+               split_huge_page_pmd(walk->mm, pmd);
                if (pmd_none_or_clear_bad(pmd)) {
                        if (walk->pte_hole)
                                err = walk->pte_hole(addr, next, walk);