]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
mm: hugetlb: cleanup using paeg_huge_active()
authorNaoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Tue, 7 Apr 2015 23:44:34 +0000 (09:44 +1000)
committerStephen Rothwell <sfr@canb.auug.org.au>
Tue, 7 Apr 2015 23:44:34 +0000 (09:44 +1000)
Now we have an easy access to hugepages' activeness, so existing helpers to
get the information can be cleaned up.

Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Hugh Dickins <hughd@google.com>
Reviewed-by: Michal Hocko <mhocko@suse.cz>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/hugetlb.h
mm/hugetlb.c
mm/memory_hotplug.c

index 5713c49a4a5c4e1544ee63e7f9dc7a54364cbf84..e7f6178b8f49f306ccfb85803d7f5d98b1708501 100644 (file)
@@ -44,6 +44,8 @@ extern int hugetlb_max_hstate __read_mostly;
 #define for_each_hstate(h) \
        for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
 
+int PageHugeActive(struct page *page);
+
 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
                                                long min_hpages);
 void hugepage_put_subpool(struct hugepage_subpool *spool);
@@ -84,7 +86,6 @@ void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed);
 int dequeue_hwpoisoned_huge_page(struct page *page);
 bool isolate_huge_page(struct page *page, struct list_head *list);
 void putback_active_hugepage(struct page *page);
-bool is_hugepage_active(struct page *page);
 void free_huge_page(struct page *page);
 
 #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
@@ -114,6 +115,11 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
 
 #else /* !CONFIG_HUGETLB_PAGE */
 
+static inline int PageHugeActive(struct page *page)
+{
+       return 0;
+}
+
 static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
 {
 }
@@ -152,7 +158,6 @@ static inline bool isolate_huge_page(struct page *page, struct list_head *list)
        return false;
 }
 #define putback_active_hugepage(p)     do {} while (0)
-#define is_hugepage_active(x)  false
 
 static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
                unsigned long address, unsigned long end, pgprot_t newprot)
index e837e0b8646acfd2d1bb437ced03f415ffae340d..1b1dc7494a3cecedbe4346f92f1a807bc9306be5 100644 (file)
@@ -3897,20 +3897,6 @@ follow_huge_pud(struct mm_struct *mm, unsigned long address,
 
 #ifdef CONFIG_MEMORY_FAILURE
 
-/* Should be called in hugetlb_lock */
-static int is_hugepage_on_freelist(struct page *hpage)
-{
-       struct page *page;
-       struct page *tmp;
-       struct hstate *h = page_hstate(hpage);
-       int nid = page_to_nid(hpage);
-
-       list_for_each_entry_safe(page, tmp, &h->hugepage_freelists[nid], lru)
-               if (page == hpage)
-                       return 1;
-       return 0;
-}
-
 /*
  * This function is called from memory failure code.
  * Assume the caller holds page lock of the head page.
@@ -3922,7 +3908,11 @@ int dequeue_hwpoisoned_huge_page(struct page *hpage)
        int ret = -EBUSY;
 
        spin_lock(&hugetlb_lock);
-       if (is_hugepage_on_freelist(hpage)) {
+       /*
+        * Just checking !PageHugeActive is not enough, because that could be
+        * an isolated/hwpoisoned hugepage (which have >0 refcount).
+        */
+       if (!PageHugeActive(hpage) && !page_count(hpage)) {
                /*
                 * Hwpoisoned hugepage isn't linked to activelist or freelist,
                 * but dangling hpage->lru can trigger list-debug warnings
@@ -3966,25 +3956,3 @@ void putback_active_hugepage(struct page *page)
        spin_unlock(&hugetlb_lock);
        put_page(page);
 }
-
-bool is_hugepage_active(struct page *page)
-{
-       VM_BUG_ON_PAGE(!PageHuge(page), page);
-       /*
-        * This function can be called for a tail page because the caller,
-        * scan_movable_pages, scans through a given pfn-range which typically
-        * covers one memory block. In systems using gigantic hugepage (1GB
-        * for x86_64,) a hugepage is larger than a memory block, and we don't
-        * support migrating such large hugepages for now, so return false
-        * when called for tail pages.
-        */
-       if (PageTail(page))
-               return false;
-       /*
-        * Refcount of a hwpoisoned hugepages is 1, but they are not active,
-        * so we should return false for them.
-        */
-       if (unlikely(PageHWPoison(page)))
-               return false;
-       return page_count(page) > 0;
-}
index e2e8014fb75548bfca8c720e39fd79f5328d9533..113538a2e965d8587d023b4b8c0d79d481aa28f5 100644 (file)
@@ -1373,7 +1373,7 @@ static unsigned long scan_movable_pages(unsigned long start, unsigned long end)
                        if (PageLRU(page))
                                return pfn;
                        if (PageHuge(page)) {
-                               if (is_hugepage_active(page))
+                               if (PageHugeActive(page))
                                        return pfn;
                                else
                                        pfn = round_up(pfn + 1,