]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
thp-change-split_huge_page_pmd-interface-v6
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Thu, 29 Nov 2012 03:17:37 +0000 (14:17 +1100)
committerStephen Rothwell <sfr@canb.auug.org.au>
Wed, 5 Dec 2012 05:23:16 +0000 (16:23 +1100)
Pass vma instead of mm and add address parameter.

In most cases we already have vma on the stack. We provides
split_huge_page_pmd_mm() for few cases when we have mm, but not vma.

This change is preparation to huge zero pmd splitting implementation.

Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: "H. Peter Anvin" <hpa@linux.intel.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Documentation/vm/transhuge.txt
mm/huge_memory.c

index 677a599be43094e856ded2c4838fad52ef5d8159..8f5b41db314cfe0f9c6e1e978cdfd315ddf26019 100644 (file)
@@ -276,7 +276,7 @@ unaffected. libhugetlbfs will also work fine as usual.
 == Graceful fallback ==
 
 Code walking pagetables but unware about huge pmds can simply call
-split_huge_page_pmd(vma, pmd, addr) where the pmd is the one returned by
+split_huge_page_pmd(vma, addr, pmd) where the pmd is the one returned by
 pmd_offset. It's trivial to make the code transparent hugepage aware
 by just grepping for "pmd_offset" and adding split_huge_page_pmd where
 missing after pmd_offset returns the pmd. Thanks to the graceful
@@ -299,7 +299,7 @@ diff --git a/mm/mremap.c b/mm/mremap.c
                return NULL;
 
        pmd = pmd_offset(pud, addr);
-+      split_huge_page_pmd(vma, pmd, addr);
++      split_huge_page_pmd(vma, addr, pmd);
        if (pmd_none_or_clear_bad(pmd))
                return NULL;
 
index 53292fb1334f4ddbf32de5b20d14b2a979f0810f..9fed4ccb2aec23948f5e7e0377318d1fdc28157b 100644 (file)
@@ -2644,18 +2644,19 @@ void __split_huge_page_pmd(struct vm_area_struct *vma, unsigned long address,
 {
        struct page *page;
        unsigned long haddr = address & HPAGE_PMD_MASK;
+       struct mm_struct *mm = vma->vm_mm;
 
        BUG_ON(vma->vm_start > haddr || vma->vm_end < haddr + HPAGE_PMD_SIZE);
 
-       spin_lock(&vma->vm_mm->page_table_lock);
+       spin_lock(&mm->page_table_lock);
        if (unlikely(!pmd_trans_huge(*pmd))) {
-               spin_unlock(&vma->vm_mm->page_table_lock);
+               spin_unlock(&mm->page_table_lock);
                return;
        }
        page = pmd_page(*pmd);
        VM_BUG_ON(!page_count(page));
        get_page(page);
-       spin_unlock(&vma->vm_mm->page_table_lock);
+       spin_unlock(&mm->page_table_lock);
 
        split_huge_page(page);