]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
hugetlb/cgroup: add the cgroup pointer to page lru
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Sat, 21 Jul 2012 00:54:01 +0000 (10:54 +1000)
committerStephen Rothwell <sfr@canb.auug.org.au>
Wed, 25 Jul 2012 03:02:25 +0000 (13:02 +1000)
Add the hugetlb cgroup pointer to 3rd page lru.next.  This limit the usage
to hugetlb cgroup to only hugepages with 3 or more normal pages.  I guess
that is an acceptable limitation.

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Cc: David Rientjes <rientjes@google.com>
Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Hillf Danton <dhillf@gmail.com>
Reviewed-by: Michal Hocko <mhocko@suse.cz>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/hugetlb_cgroup.h
mm/hugetlb.c

index f19889e56b472a82cab80c8b4dab67344f0cf23f..e5451a3b4ebc07156c829bc79ad6b8f9795aeceb 100644 (file)
 #include <linux/res_counter.h>
 
 struct hugetlb_cgroup;
+/*
+ * Minimum page order trackable by hugetlb cgroup.
+ * At least 3 pages are necessary for all the tracking information.
+ */
+#define HUGETLB_CGROUP_MIN_ORDER       2
 
 #ifdef CONFIG_CGROUP_HUGETLB
+
+static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page)
+{
+       VM_BUG_ON(!PageHuge(page));
+
+       if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER)
+               return NULL;
+       return (struct hugetlb_cgroup *)page[2].lru.next;
+}
+
+static inline
+int set_hugetlb_cgroup(struct page *page, struct hugetlb_cgroup *h_cg)
+{
+       VM_BUG_ON(!PageHuge(page));
+
+       if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER)
+               return -1;
+       page[2].lru.next = (void *)h_cg;
+       return 0;
+}
+
 static inline bool hugetlb_cgroup_disabled(void)
 {
        if (hugetlb_subsys.disabled)
@@ -28,6 +54,17 @@ static inline bool hugetlb_cgroup_disabled(void)
 }
 
 #else
+static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page)
+{
+       return NULL;
+}
+
+static inline
+int set_hugetlb_cgroup(struct page *page, struct hugetlb_cgroup *h_cg)
+{
+       return 0;
+}
+
 static inline bool hugetlb_cgroup_disabled(void)
 {
        return true;
index d5971597736b647c6062746efb4f048c35a8c37f..efe29b53daff85620bd6fa3c40d3964cd721642a 100644 (file)
@@ -28,6 +28,7 @@
 
 #include <linux/io.h>
 #include <linux/hugetlb.h>
+#include <linux/hugetlb_cgroup.h>
 #include <linux/node.h>
 #include "internal.h"
 
@@ -591,6 +592,7 @@ static void update_and_free_page(struct hstate *h, struct page *page)
                                1 << PG_active | 1 << PG_reserved |
                                1 << PG_private | 1 << PG_writeback);
        }
+       VM_BUG_ON(hugetlb_cgroup_from_page(page));
        set_compound_page_dtor(page, NULL);
        set_page_refcounted(page);
        arch_release_hugepage(page);
@@ -643,6 +645,7 @@ static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
        INIT_LIST_HEAD(&page->lru);
        set_compound_page_dtor(page, free_huge_page);
        spin_lock(&hugetlb_lock);
+       set_hugetlb_cgroup(page, NULL);
        h->nr_huge_pages++;
        h->nr_huge_pages_node[nid]++;
        spin_unlock(&hugetlb_lock);
@@ -892,6 +895,7 @@ static struct page *alloc_buddy_huge_page(struct hstate *h, int nid)
                INIT_LIST_HEAD(&page->lru);
                r_nid = page_to_nid(page);
                set_compound_page_dtor(page, free_huge_page);
+               set_hugetlb_cgroup(page, NULL);
                /*
                 * We incremented the global counters already
                 */