]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
thp: copy_huge_pmd(): copy huge zero page
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Thu, 29 Nov 2012 03:17:34 +0000 (14:17 +1100)
committerStephen Rothwell <sfr@canb.auug.org.au>
Wed, 5 Dec 2012 05:23:14 +0000 (16:23 +1100)
It's easy to copy huge zero page. Just set destination pmd to huge zero
page.

It's safe to copy huge zero page since we have none yet :-p

Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: "H. Peter Anvin" <hpa@linux.intel.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/huge_memory.c

index d525bdc58be1a9e95eff5afb68f973eadb526276..afe8b3d034e212150b4f1dad3ee9c7f67ec7569f 100644 (file)
@@ -709,6 +709,18 @@ static inline struct page *alloc_hugepage(int defrag)
 }
 #endif
 
+static void set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
+               struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd)
+{
+       pmd_t entry;
+       entry = pfn_pmd(huge_zero_pfn, vma->vm_page_prot);
+       entry = pmd_wrprotect(entry);
+       entry = pmd_mkhuge(entry);
+       set_pmd_at(mm, haddr, pmd, entry);
+       pgtable_trans_huge_deposit(mm, pgtable);
+       mm->nr_ptes++;
+}
+
 int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
                               unsigned long address, pmd_t *pmd,
                               unsigned int flags)
@@ -946,6 +958,11 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
                pte_free(dst_mm, pgtable);
                goto out_unlock;
        }
+       if (is_huge_zero_pmd(pmd)) {
+               set_huge_zero_page(pgtable, dst_mm, vma, addr, dst_pmd);
+               ret = 0;
+               goto out_unlock;
+       }
        if (unlikely(pmd_trans_splitting(pmd))) {
                /* split huge page running from under us */
                spin_unlock(&src_mm->page_table_lock);