From 7366ac6c94751a34de0de458a9e149a5c4b92366 Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Fri, 28 Sep 2012 10:19:17 +1000 Subject: [PATCH] thp: merge page pre-alloc in khugepaged_loop into khugepaged_do_scan There are two pre-alloc operations in these two function, the different is: - it allows to sleep if page alloc fail in khugepaged_loop - it exits immediately if page alloc fail in khugepaged_do_scan Actually, in khugepaged_do_scan, we can allow the pre-alloc to sleep on the first failure, then the operation in khugepaged_loop can be removed Signed-off-by: Xiao Guangrong Cc: Andrea Arcangeli Cc: Hugh Dickins Cc: David Rientjes Signed-off-by: Andrew Morton --- mm/huge_memory.c | 97 ++++++++++++++++++++++-------------------------- 1 file changed, 45 insertions(+), 52 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index cfe8694c33ad..e414b1d996a4 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2288,26 +2288,57 @@ static int khugepaged_wait_event(void) kthread_should_stop(); } -static void khugepaged_do_scan(struct page **hpage) +static void khugepaged_alloc_sleep(void) +{ + wait_event_freezable_timeout(khugepaged_wait, false, + msecs_to_jiffies(khugepaged_alloc_sleep_millisecs)); +} + +#ifndef CONFIG_NUMA +static struct page *khugepaged_alloc_hugepage(bool *wait) +{ + struct page *hpage; + + do { + hpage = alloc_hugepage(khugepaged_defrag()); + if (!hpage) { + count_vm_event(THP_COLLAPSE_ALLOC_FAILED); + if (!*wait) + return NULL; + + *wait = false; + khugepaged_alloc_sleep(); + } else + count_vm_event(THP_COLLAPSE_ALLOC); + } while (unlikely(!hpage) && likely(khugepaged_enabled())); + + return hpage; +} +#endif + +static void khugepaged_do_scan(void) { + struct page *hpage = NULL; unsigned int progress = 0, pass_through_head = 0; unsigned int pages = ACCESS_ONCE(khugepaged_pages_to_scan); + bool wait = true; while (progress < pages) { cond_resched(); #ifndef CONFIG_NUMA - if (!*hpage) { - *hpage = alloc_hugepage(khugepaged_defrag()); - if (unlikely(!*hpage)) { - count_vm_event(THP_COLLAPSE_ALLOC_FAILED); + if (!hpage) + hpage = khugepaged_alloc_hugepage(&wait); + + if (unlikely(!hpage)) + break; +#else + if (IS_ERR(hpage)) { + if (!wait) break; - } - count_vm_event(THP_COLLAPSE_ALLOC); + wait = false; + khugepaged_alloc_sleep(); } -#else - if (IS_ERR(*hpage)) - break; #endif if (unlikely(kthread_should_stop() || freezing(current))) @@ -2319,37 +2350,16 @@ static void khugepaged_do_scan(struct page **hpage) if (khugepaged_has_work() && pass_through_head < 2) progress += khugepaged_scan_mm_slot(pages - progress, - hpage); + &hpage); else progress = pages; spin_unlock(&khugepaged_mm_lock); } -} -static void khugepaged_alloc_sleep(void) -{ - wait_event_freezable_timeout(khugepaged_wait, false, - msecs_to_jiffies(khugepaged_alloc_sleep_millisecs)); + if (!IS_ERR_OR_NULL(hpage)) + put_page(hpage); } -#ifndef CONFIG_NUMA -static struct page *khugepaged_alloc_hugepage(void) -{ - struct page *hpage; - - do { - hpage = alloc_hugepage(khugepaged_defrag()); - if (!hpage) { - count_vm_event(THP_COLLAPSE_ALLOC_FAILED); - khugepaged_alloc_sleep(); - } else - count_vm_event(THP_COLLAPSE_ALLOC); - } while (unlikely(!hpage) && - likely(khugepaged_enabled())); - return hpage; -} -#endif - static void khugepaged_wait_work(void) { try_to_freeze(); @@ -2370,25 +2380,8 @@ static void khugepaged_wait_work(void) static void khugepaged_loop(void) { - struct page *hpage = NULL; - while (likely(khugepaged_enabled())) { -#ifndef CONFIG_NUMA - hpage = khugepaged_alloc_hugepage(); - if (unlikely(!hpage)) - break; -#else - if (IS_ERR(hpage)) { - khugepaged_alloc_sleep(); - hpage = NULL; - } -#endif - - khugepaged_do_scan(&hpage); - - if (!IS_ERR_OR_NULL(hpage)) - put_page(hpage); - + khugepaged_do_scan(); khugepaged_wait_work(); } } -- 2.39.5