From e8e1c8d22ac72f4ad5ded18e50e6d09de60e6220 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Thu, 29 Nov 2012 14:17:36 +1100 Subject: [PATCH] thp-do_huge_pmd_wp_page-handle-huge-zero-page-v6 On write access to huge zero page we alloc a new huge page and clear it. If ENOMEM, graceful fallback: we create a new pmd table and set pte around fault address to newly allocated normal (4k) page. All other ptes in the pmd set to normal zero page. Signed-off-by: Kirill A. Shutemov Cc: Andrea Arcangeli Cc: Andi Kleen Cc: "H. Peter Anvin" Cc: Mel Gorman Acked-by: David Rientjes Signed-off-by: Andrew Morton --- mm/huge_memory.c | 24 ++---------------------- 1 file changed, 2 insertions(+), 22 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 0b2fce1f4154..edc1869c532d 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1019,25 +1019,6 @@ unlock: spin_unlock(&mm->page_table_lock); } -/* no "address" argument so destroys page coloring of some arch */ -pgtable_t get_pmd_huge_pte(struct mm_struct *mm) -{ - pgtable_t pgtable; - - assert_spin_locked(&mm->page_table_lock); - - /* FIFO */ - pgtable = mm->pmd_huge_pte; - if (list_empty(&pgtable->lru)) - mm->pmd_huge_pte = NULL; - else { - mm->pmd_huge_pte = list_entry(pgtable->lru.next, - struct page, lru); - list_del(&pgtable->lru); - } - return pgtable; -} - static int do_huge_pmd_wp_zero_page_fallback(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pmd_t *pmd, unsigned long haddr) @@ -1072,7 +1053,7 @@ static int do_huge_pmd_wp_zero_page_fallback(struct mm_struct *mm, pmdp_clear_flush(vma, haddr, pmd); /* leave pmd empty until pte is filled */ - pgtable = get_pmd_huge_pte(mm); + pgtable = pgtable_trans_huge_withdraw(mm); pmd_populate(mm, &_pmd, pgtable); for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { @@ -1287,7 +1268,6 @@ alloc: goto out_mn; } else { pmd_t entry; - entry = mk_huge_pmd(new_page, vma); pmdp_clear_flush(vma, haddr, pmd); page_add_new_anon_rmap(new_page, vma, haddr); @@ -1295,7 +1275,7 @@ alloc: update_mmu_cache_pmd(vma, address, pmd); if (is_huge_zero_pmd(orig_pmd)) add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR); - if (page) { + else { VM_BUG_ON(!PageHead(page)); page_remove_rmap(page); put_page(page); -- 2.39.5