]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
thp: cleanup: introduce mk_huge_pmd()
authorBob Liu <lliubbo@gmail.com>
Tue, 23 Oct 2012 02:50:12 +0000 (13:50 +1100)
committerStephen Rothwell <sfr@canb.auug.org.au>
Tue, 23 Oct 2012 03:11:47 +0000 (14:11 +1100)
Introduce mk_huge_pmd() to simplify the code

Signed-off-by: Bob Liu <lliubbo@gmail.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Minchan Kim <minchan.kim@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/huge_memory.c

index 9b2ab8d901bdca93e7b344e06c8245c74e8c5462..7299f43df97b3c04e7124be3e8d8bce74be5f62b 100644 (file)
@@ -607,6 +607,15 @@ static inline pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
        return pmd;
 }
 
+static inline pmd_t mk_huge_pmd(struct page *page, struct vm_area_struct *vma)
+{
+       pmd_t entry;
+       entry = mk_pmd(page, vma->vm_page_prot);
+       entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
+       entry = pmd_mkhuge(entry);
+       return entry;
+}
+
 static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
                                        struct vm_area_struct *vma,
                                        unsigned long haddr, pmd_t *pmd,
@@ -630,9 +639,7 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
                pte_free(mm, pgtable);
        } else {
                pmd_t entry;
-               entry = mk_pmd(page, vma->vm_page_prot);
-               entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
-               entry = pmd_mkhuge(entry);
+               entry = mk_huge_pmd(page, vma);
                /*
                 * The spinlocking to take the lru_lock inside
                 * page_add_new_anon_rmap() acts as a full memory
@@ -1104,9 +1111,7 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
        } else {
                pmd_t entry;
                VM_BUG_ON(!PageHead(page));
-               entry = mk_pmd(new_page, vma->vm_page_prot);
-               entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
-               entry = pmd_mkhuge(entry);
+               entry = mk_huge_pmd(new_page, vma);
                pmdp_clear_flush(vma, haddr, pmd);
                page_add_new_anon_rmap(new_page, vma, haddr);
                set_pmd_at(mm, haddr, pmd, entry);
@@ -2171,9 +2176,7 @@ static void collapse_huge_page(struct mm_struct *mm,
        __SetPageUptodate(new_page);
        pgtable = pmd_pgtable(_pmd);
 
-       _pmd = mk_pmd(new_page, vma->vm_page_prot);
-       _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
-       _pmd = pmd_mkhuge(_pmd);
+       _pmd = mk_huge_pmd(new_page, vma);
 
        /*
         * spin_lock() below is not the equivalent of smp_wmb(), so