From: Kirill A. Shutemov Date: Thu, 29 Nov 2012 03:17:38 +0000 (+1100) Subject: thp-implement-splitting-pmd-for-huge-zero-page-v6 X-Git-Tag: next-20121205~1^2~263 X-Git-Url: https://git.karo-electronics.de/?a=commitdiff_plain;h=8963251cf4dbb392bf7f77b7afd11fb36b67fb01;p=karo-tx-linux.git thp-implement-splitting-pmd-for-huge-zero-page-v6 We can't split huge zero page itself (and it's bug if we try), but we can split the pmd which points to it. On splitting the pmd we create a table with all ptes set to normal zero page. Signed-off-by: Kirill A. Shutemov Cc: Andrea Arcangeli Cc: Andi Kleen Cc: "H. Peter Anvin" Cc: Mel Gorman Cc: David Rientjes Signed-off-by: Andrew Morton --- diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 4b3f410f249d..62209b018aae 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2643,6 +2643,7 @@ static int khugepaged(void *none) static void __split_huge_zero_page_pmd(struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd) { + struct mm_struct *mm = vma->vm_mm; pgtable_t pgtable; pmd_t _pmd; int i; @@ -2650,8 +2651,8 @@ static void __split_huge_zero_page_pmd(struct vm_area_struct *vma, pmdp_clear_flush(vma, haddr, pmd); /* leave pmd empty until pte is filled */ - pgtable = get_pmd_huge_pte(vma->vm_mm); - pmd_populate(vma->vm_mm, &_pmd, pgtable); + pgtable = pgtable_trans_huge_withdraw(mm); + pmd_populate(mm, &_pmd, pgtable); for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { pte_t *pte, entry; @@ -2659,11 +2660,11 @@ static void __split_huge_zero_page_pmd(struct vm_area_struct *vma, entry = pte_mkspecial(entry); pte = pte_offset_map(&_pmd, haddr); VM_BUG_ON(!pte_none(*pte)); - set_pte_at(vma->vm_mm, haddr, pte, entry); + set_pte_at(mm, haddr, pte, entry); pte_unmap(pte); } smp_wmb(); /* make pte visible before pmd */ - pmd_populate(vma->vm_mm, pmd, pgtable); + pmd_populate(mm, pmd, pgtable); } void __split_huge_page_pmd(struct vm_area_struct *vma, unsigned long address, @@ -2678,7 +2679,7 @@ void __split_huge_page_pmd(struct vm_area_struct *vma, unsigned long address, BUG_ON(vma->vm_start > haddr || vma->vm_end < haddr + HPAGE_PMD_SIZE); mmun_start = haddr; - mmun_end = address + HPAGE_PMD_SIZE; + mmun_end = haddr + HPAGE_PMD_SIZE; mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); spin_lock(&mm->page_table_lock); if (unlikely(!pmd_trans_huge(*pmd))) {