]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
thp-do_huge_pmd_wp_page-handle-huge-zero-page-v6
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Thu, 29 Nov 2012 03:17:36 +0000 (14:17 +1100)
committerStephen Rothwell <sfr@canb.auug.org.au>
Wed, 5 Dec 2012 05:23:15 +0000 (16:23 +1100)
On write access to huge zero page we alloc a new huge page and clear it.

If ENOMEM, graceful fallback: we create a new pmd table and set pte
around fault address to newly allocated normal (4k) page. All other ptes
in the pmd set to normal zero page.

Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: "H. Peter Anvin" <hpa@linux.intel.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Acked-by: David Rientjes <rientjes@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/huge_memory.c

index 0b2fce1f41546f8fdf5ad38be62ef29f3654559f..edc1869c532d8b559394e2bf8e9e42c9fbe02651 100644 (file)
@@ -1019,25 +1019,6 @@ unlock:
        spin_unlock(&mm->page_table_lock);
 }
 
-/* no "address" argument so destroys page coloring of some arch */
-pgtable_t get_pmd_huge_pte(struct mm_struct *mm)
-{
-       pgtable_t pgtable;
-
-       assert_spin_locked(&mm->page_table_lock);
-
-       /* FIFO */
-       pgtable = mm->pmd_huge_pte;
-       if (list_empty(&pgtable->lru))
-               mm->pmd_huge_pte = NULL;
-       else {
-               mm->pmd_huge_pte = list_entry(pgtable->lru.next,
-                                             struct page, lru);
-               list_del(&pgtable->lru);
-       }
-       return pgtable;
-}
-
 static int do_huge_pmd_wp_zero_page_fallback(struct mm_struct *mm,
                struct vm_area_struct *vma, unsigned long address,
                pmd_t *pmd, unsigned long haddr)
@@ -1072,7 +1053,7 @@ static int do_huge_pmd_wp_zero_page_fallback(struct mm_struct *mm,
        pmdp_clear_flush(vma, haddr, pmd);
        /* leave pmd empty until pte is filled */
 
-       pgtable = get_pmd_huge_pte(mm);
+       pgtable = pgtable_trans_huge_withdraw(mm);
        pmd_populate(mm, &_pmd, pgtable);
 
        for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
@@ -1287,7 +1268,6 @@ alloc:
                goto out_mn;
        } else {
                pmd_t entry;
-
                entry = mk_huge_pmd(new_page, vma);
                pmdp_clear_flush(vma, haddr, pmd);
                page_add_new_anon_rmap(new_page, vma, haddr);
@@ -1295,7 +1275,7 @@ alloc:
                update_mmu_cache_pmd(vma, address, pmd);
                if (is_huge_zero_pmd(orig_pmd))
                        add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
-               if (page) {
+               else {
                        VM_BUG_ON(!PageHead(page));
                        page_remove_rmap(page);
                        put_page(page);