From: David Rientjes Date: Tue, 7 Apr 2015 23:44:28 +0000 (+1000) Subject: mm, memcg: sync allocation and memcg charge gfp flags for thp fix fix X-Git-Tag: KARO-TXA5-2015-06-26~18^2~306 X-Git-Url: https://git.karo-electronics.de/?a=commitdiff_plain;h=72d3eb0037fe6b21701df7157142c61db76e5cc0;p=karo-tx-linux.git mm, memcg: sync allocation and memcg charge gfp flags for thp fix fix "mm, memcg: sync allocation and memcg charge gfp flags for THP" in -mm introduces a formal to pass the gfp mask for khugepaged's hugepage allocation. This is just too ugly to live. alloc_hugepage_gfpmask() cannot differ between NUMA and UMA configs by anything in GFP_RECLAIM_MASK, which is the only thing that matters for memcg reclaim, so just determine the gfp flags once in collapse_huge_page() and avoid the complexity. Signed-off-by: David Rientjes Acked-by: Michal Hocko Acked-by: Vlastimil Babka Cc: Johannes Weiner Acked-by: Hillf Danton Signed-off-by: Andrew Morton --- diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 00be5cb9af6d..b6e55143f19d 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2322,16 +2322,12 @@ static bool khugepaged_prealloc_page(struct page **hpage, bool *wait) } static struct page * -khugepaged_alloc_page(struct page **hpage, gfp_t *gfp, struct mm_struct *mm, +khugepaged_alloc_page(struct page **hpage, gfp_t gfp, struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, int node) { VM_BUG_ON_PAGE(*hpage, *hpage); - /* Only allocate from the target node */ - *gfp = alloc_hugepage_gfpmask(khugepaged_defrag(), __GFP_OTHER_NODE) | - __GFP_THISNODE; - /* * Before allocating the hugepage, release the mmap_sem read lock. * The allocation can take potentially a long time if it involves @@ -2340,7 +2336,7 @@ khugepaged_alloc_page(struct page **hpage, gfp_t *gfp, struct mm_struct *mm, */ up_read(&mm->mmap_sem); - *hpage = alloc_pages_exact_node(node, *gfp, HPAGE_PMD_ORDER); + *hpage = alloc_pages_exact_node(node, gfp, HPAGE_PMD_ORDER); if (unlikely(!*hpage)) { count_vm_event(THP_COLLAPSE_ALLOC_FAILED); *hpage = ERR_PTR(-ENOMEM); @@ -2394,18 +2390,13 @@ static bool khugepaged_prealloc_page(struct page **hpage, bool *wait) } static struct page * -khugepaged_alloc_page(struct page **hpage, gfp_t *gfp, struct mm_struct *mm, +khugepaged_alloc_page(struct page **hpage, gfp_t gfp, struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, int node) { up_read(&mm->mmap_sem); VM_BUG_ON(!*hpage); - /* - * khugepaged_alloc_hugepage is doing the preallocation, use the same - * gfp flags here. - */ - *gfp = alloc_hugepage_gfpmask(khugepaged_defrag(), 0); return *hpage; } #endif @@ -2444,8 +2435,12 @@ static void collapse_huge_page(struct mm_struct *mm, VM_BUG_ON(address & ~HPAGE_PMD_MASK); + /* Only allocate from the target node */ + gfp = alloc_hugepage_gfpmask(khugepaged_defrag(), __GFP_OTHER_NODE) | + __GFP_THISNODE; + /* release the mmap_sem read lock. */ - new_page = khugepaged_alloc_page(hpage, &gfp, mm, vma, address, node); + new_page = khugepaged_alloc_page(hpage, gfp, mm, vma, address, node); if (!new_page) return;