]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
thp-implement-splitting-pmd-for-huge-zero-page-v6
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Thu, 29 Nov 2012 03:17:38 +0000 (14:17 +1100)
committerStephen Rothwell <sfr@canb.auug.org.au>
Wed, 5 Dec 2012 05:23:16 +0000 (16:23 +1100)
We can't split huge zero page itself (and it's bug if we try), but we
can split the pmd which points to it.

On splitting the pmd we create a table with all ptes set to normal zero
page.

Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: "H. Peter Anvin" <hpa@linux.intel.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/huge_memory.c

index 4b3f410f249d9d62fe31e0bddde9b5b96417dc2b..62209b018aaead57b9adb6b18731fbd0decd6813 100644 (file)
@@ -2643,6 +2643,7 @@ static int khugepaged(void *none)
 static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
                unsigned long haddr, pmd_t *pmd)
 {
+       struct mm_struct *mm = vma->vm_mm;
        pgtable_t pgtable;
        pmd_t _pmd;
        int i;
@@ -2650,8 +2651,8 @@ static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
        pmdp_clear_flush(vma, haddr, pmd);
        /* leave pmd empty until pte is filled */
 
-       pgtable = get_pmd_huge_pte(vma->vm_mm);
-       pmd_populate(vma->vm_mm, &_pmd, pgtable);
+       pgtable = pgtable_trans_huge_withdraw(mm);
+       pmd_populate(mm, &_pmd, pgtable);
 
        for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
                pte_t *pte, entry;
@@ -2659,11 +2660,11 @@ static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
                entry = pte_mkspecial(entry);
                pte = pte_offset_map(&_pmd, haddr);
                VM_BUG_ON(!pte_none(*pte));
-               set_pte_at(vma->vm_mm, haddr, pte, entry);
+               set_pte_at(mm, haddr, pte, entry);
                pte_unmap(pte);
        }
        smp_wmb(); /* make pte visible before pmd */
-       pmd_populate(vma->vm_mm, pmd, pgtable);
+       pmd_populate(mm, pmd, pgtable);
 }
 
 void __split_huge_page_pmd(struct vm_area_struct *vma, unsigned long address,
@@ -2678,7 +2679,7 @@ void __split_huge_page_pmd(struct vm_area_struct *vma, unsigned long address,
        BUG_ON(vma->vm_start > haddr || vma->vm_end < haddr + HPAGE_PMD_SIZE);
 
        mmun_start = haddr;
-       mmun_end   = address + HPAGE_PMD_SIZE;
+       mmun_end   = haddr + HPAGE_PMD_SIZE;
        mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
        spin_lock(&mm->page_table_lock);
        if (unlikely(!pmd_trans_huge(*pmd))) {