]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
hugetlb: add hstate_is_gigantic()
authorLuiz Capitulino <lcapitulino@redhat.com>
Wed, 14 May 2014 00:01:44 +0000 (10:01 +1000)
committerStephen Rothwell <sfr@canb.auug.org.au>
Wed, 14 May 2014 00:01:44 +0000 (10:01 +1000)
Signed-off-by: Luiz Capitulino <lcapitulino@redhat.com>
Reviewed-by: Andrea Arcangeli <aarcange@redhat.com>
Reviewed-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Reviewed-by: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com>
Reviewed-by: Davidlohr Bueso <davidlohr@hp.com>
Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Reviewed-by: Zhang Yanfei <zhangyanfei@cn.fujitsu.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Yinghai Lu <yinghai@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/hugetlb.h
mm/hugetlb.c

index 63214868c5b2c37dd7b3715f280ac22e8c24eb96..0683f55cb2f43c36a7fa2282c6cc8b6b02489ff2 100644 (file)
@@ -336,6 +336,11 @@ static inline unsigned huge_page_shift(struct hstate *h)
        return h->order + PAGE_SHIFT;
 }
 
+static inline bool hstate_is_gigantic(struct hstate *h)
+{
+       return huge_page_order(h) >= MAX_ORDER;
+}
+
 static inline unsigned int pages_per_huge_page(struct hstate *h)
 {
        return 1 << h->order;
index 247ca2652ac376185e03e59a802e45587918a540..3ba04bca0525713c1577568526b9cd8bbf53831a 100644 (file)
@@ -612,7 +612,7 @@ static void update_and_free_page(struct hstate *h, struct page *page)
 {
        int i;
 
-       VM_BUG_ON(h->order >= MAX_ORDER);
+       VM_BUG_ON(hstate_is_gigantic(h));
 
        h->nr_huge_pages--;
        h->nr_huge_pages_node[page_to_nid(page)]--;
@@ -665,7 +665,7 @@ static void free_huge_page(struct page *page)
        if (restore_reserve)
                h->resv_huge_pages++;
 
-       if (h->surplus_huge_pages_node[nid] && huge_page_order(h) < MAX_ORDER) {
+       if (h->surplus_huge_pages_node[nid] && !hstate_is_gigantic(h)) {
                /* remove the page from active list */
                list_del(&page->lru);
                update_and_free_page(h, page);
@@ -769,7 +769,7 @@ static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
 {
        struct page *page;
 
-       if (h->order >= MAX_ORDER)
+       if (hstate_is_gigantic(h))
                return NULL;
 
        page = alloc_pages_exact_node(nid,
@@ -963,7 +963,7 @@ static struct page *alloc_buddy_huge_page(struct hstate *h, int nid)
        struct page *page;
        unsigned int r_nid;
 
-       if (h->order >= MAX_ORDER)
+       if (hstate_is_gigantic(h))
                return NULL;
 
        /*
@@ -1156,7 +1156,7 @@ static void return_unused_surplus_pages(struct hstate *h,
        h->resv_huge_pages -= unused_resv_pages;
 
        /* Cannot return gigantic pages currently */
-       if (h->order >= MAX_ORDER)
+       if (hstate_is_gigantic(h))
                return;
 
        nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
@@ -1356,7 +1356,7 @@ static void __init gather_bootmem_prealloc(void)
                 * fix confusing memory reports from free(1) and another
                 * side-effects, like CommitLimit going negative.
                 */
-               if (h->order > (MAX_ORDER - 1))
+               if (hstate_is_gigantic(h))
                        adjust_managed_page_count(page, 1 << h->order);
        }
 }
@@ -1366,7 +1366,7 @@ static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
        unsigned long i;
 
        for (i = 0; i < h->max_huge_pages; ++i) {
-               if (h->order >= MAX_ORDER) {
+               if (hstate_is_gigantic(h)) {
                        if (!alloc_bootmem_huge_page(h))
                                break;
                } else if (!alloc_fresh_huge_page(h,
@@ -1382,7 +1382,7 @@ static void __init hugetlb_init_hstates(void)
 
        for_each_hstate(h) {
                /* oversize hugepages were init'ed in early boot */
-               if (h->order < MAX_ORDER)
+               if (!hstate_is_gigantic(h))
                        hugetlb_hstate_alloc_pages(h);
        }
 }
@@ -1416,7 +1416,7 @@ static void try_to_free_low(struct hstate *h, unsigned long count,
 {
        int i;
 
-       if (h->order >= MAX_ORDER)
+       if (hstate_is_gigantic(h))
                return;
 
        for_each_node_mask(i, *nodes_allowed) {
@@ -1479,7 +1479,7 @@ static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
 {
        unsigned long min_count, ret;
 
-       if (h->order >= MAX_ORDER)
+       if (hstate_is_gigantic(h))
                return h->max_huge_pages;
 
        /*
@@ -1606,7 +1606,7 @@ static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
                goto out;
 
        h = kobj_to_hstate(kobj, &nid);
-       if (h->order >= MAX_ORDER) {
+       if (hstate_is_gigantic(h)) {
                err = -EINVAL;
                goto out;
        }
@@ -1689,7 +1689,7 @@ static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
        unsigned long input;
        struct hstate *h = kobj_to_hstate(kobj, NULL);
 
-       if (h->order >= MAX_ORDER)
+       if (hstate_is_gigantic(h))
                return -EINVAL;
 
        err = kstrtoul(buf, 10, &input);
@@ -2113,7 +2113,7 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
 
        tmp = h->max_huge_pages;
 
-       if (write && h->order >= MAX_ORDER)
+       if (write && hstate_is_gigantic(h))
                return -EINVAL;
 
        table->data = &tmp;
@@ -2169,7 +2169,7 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
 
        tmp = h->nr_overcommit_huge_pages;
 
-       if (write && h->order >= MAX_ORDER)
+       if (write && hstate_is_gigantic(h))
                return -EINVAL;
 
        table->data = &tmp;