]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
thp: move maybe_pmd_mkwrite() out of mk_huge_pmd()
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Thu, 18 Jul 2013 23:56:44 +0000 (09:56 +1000)
committerStephen Rothwell <sfr@canb.auug.org.au>
Thu, 18 Jul 2013 23:56:44 +0000 (09:56 +1000)
It's confusing that mk_huge_pmd() has semantics different from mk_pte() or
mk_pmd().  I spent some time on debugging issue cased by this
inconsistency.

Let's move maybe_pmd_mkwrite() out of mk_huge_pmd() and adjust prototype
to match mk_pte().

Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Hugh Dickins <hughd@google.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Matthew Wilcox <willy@linux.intel.com>
Cc: Hillf Danton <dhillf@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/huge_memory.c

index 04f0749ed105fefa78826452298226590263d55e..ec735a96701693f2bd723763f2006f82ae2343e7 100644 (file)
@@ -690,11 +690,10 @@ pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
        return pmd;
 }
 
-static inline pmd_t mk_huge_pmd(struct page *page, struct vm_area_struct *vma)
+static inline pmd_t mk_huge_pmd(struct page *page, pgprot_t prot)
 {
        pmd_t entry;
-       entry = mk_pmd(page, vma->vm_page_prot);
-       entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
+       entry = mk_pmd(page, prot);
        entry = pmd_mkhuge(entry);
        return entry;
 }
@@ -727,7 +726,8 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
                pte_free(mm, pgtable);
        } else {
                pmd_t entry;
-               entry = mk_huge_pmd(page, vma);
+               entry = mk_huge_pmd(page, vma->vm_page_prot);
+               entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
                page_add_new_anon_rmap(page, vma, haddr);
                pgtable_trans_huge_deposit(mm, pmd, pgtable);
                set_pmd_at(mm, haddr, pmd, entry);
@@ -1210,7 +1210,8 @@ alloc:
                goto out_mn;
        } else {
                pmd_t entry;
-               entry = mk_huge_pmd(new_page, vma);
+               entry = mk_huge_pmd(new_page, vma->vm_page_prot);
+               entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
                pmdp_clear_flush(vma, haddr, pmd);
                page_add_new_anon_rmap(new_page, vma, haddr);
                set_pmd_at(mm, haddr, pmd, entry);
@@ -2356,7 +2357,8 @@ static void collapse_huge_page(struct mm_struct *mm,
        __SetPageUptodate(new_page);
        pgtable = pmd_pgtable(_pmd);
 
-       _pmd = mk_huge_pmd(new_page, vma);
+       _pmd = mk_huge_pmd(new_page, vma->vm_page_prot);
+       _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
 
        /*
         * spin_lock() below is not the equivalent of smp_wmb(), so