]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
mm-hugetlb-introduce-pagehugeactive-flag-fix
authorAndrew Morton <akpm@linux-foundation.org>
Tue, 7 Apr 2015 23:44:34 +0000 (09:44 +1000)
committerStephen Rothwell <sfr@canb.auug.org.au>
Tue, 7 Apr 2015 23:44:34 +0000 (09:44 +1000)
s/PageHugeActive/page_huge_active/, make it return bool

Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/hugetlb.c

index 4f98d6ba5b50c3c9d9af11095bff9407f54c895b..b527a7a6a8dc432e841505c1a37a4450b3c60418 100644 (file)
@@ -925,25 +925,25 @@ struct hstate *size_to_hstate(unsigned long size)
 }
 
 /*
- * Page flag to show that the hugepage is "active/in-use" (i.e. being linked to
- * hstate->hugepage_activelist.)
+ * Test to determine whether the hugepage is "active/in-use" (i.e. being linked
+ * to hstate->hugepage_activelist.)
  *
  * This function can be called for tail pages, but never returns true for them.
  */
-int PageHugeActive(struct page *page)
+bool page_huge_active(struct page *page)
 {
        VM_BUG_ON_PAGE(!PageHuge(page), page);
        return PageHead(page) && PagePrivate(&page[1]);
 }
 
 /* never called for tail page */
-void SetPageHugeActive(struct page *page)
+void set_page_huge_active(struct page *page)
 {
        VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
        SetPagePrivate(&page[1]);
 }
 
-void ClearPageHugeActive(struct page *page)
+void clear_page_huge_active(struct page *page)
 {
        VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
        ClearPagePrivate(&page[1]);
@@ -977,7 +977,7 @@ void free_huge_page(struct page *page)
                restore_reserve = true;
 
        spin_lock(&hugetlb_lock);
-       ClearPageHugeActive(page);
+       clear_page_huge_active(page);
        hugetlb_cgroup_uncharge_page(hstate_index(h),
                                     pages_per_huge_page(h), page);
        if (restore_reserve)
@@ -2998,7 +2998,7 @@ retry_avoidcopy:
        copy_user_huge_page(new_page, old_page, address, vma,
                            pages_per_huge_page(h));
        __SetPageUptodate(new_page);
-       SetPageHugeActive(new_page);
+       set_page_huge_active(new_page);
 
        mmun_start = address & huge_page_mask(h);
        mmun_end = mmun_start + huge_page_size(h);
@@ -3111,7 +3111,7 @@ retry:
                }
                clear_huge_page(page, address, pages_per_huge_page(h));
                __SetPageUptodate(page);
-               SetPageHugeActive(page);
+               set_page_huge_active(page);
 
                if (vma->vm_flags & VM_MAYSHARE) {
                        int err;
@@ -3946,11 +3946,11 @@ bool isolate_huge_page(struct page *page, struct list_head *list)
 
        VM_BUG_ON_PAGE(!PageHead(page), page);
        spin_lock(&hugetlb_lock);
-       if (!PageHugeActive(page) || !get_page_unless_zero(page)) {
+       if (!page_huge_active(page) || !get_page_unless_zero(page)) {
                ret = false;
                goto unlock;
        }
-       ClearPageHugeActive(page);
+       clear_page_huge_active(page);
        list_move_tail(&page->lru, list);
 unlock:
        spin_unlock(&hugetlb_lock);
@@ -3961,7 +3961,7 @@ void putback_active_hugepage(struct page *page)
 {
        VM_BUG_ON_PAGE(!PageHead(page), page);
        spin_lock(&hugetlb_lock);
-       SetPageHugeActive(page);
+       set_page_huge_active(page);
        list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
        spin_unlock(&hugetlb_lock);
        put_page(page);