From ab756cbd66b553759d425b237f14a258f392c788 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Thu, 3 May 2012 15:43:39 +1000 Subject: [PATCH] hugetlb: add charge/uncharge calls for HugeTLB alloc/free This adds necessary charge/uncharge calls in the HugeTLB code. We do memcg charge in page alloc and uncharge in compound page destructor. We also need to ignore HugeTLB pages in __mem_cgroup_uncharge_common because that get called from delete_from_page_cache Signed-off-by: Aneesh Kumar K.V Reviewed-by: KAMEZAWA Hiroyuki Acked-by: Hillf Danton Cc: Michal Hocko Cc: Andrea Arcangeli Cc: Johannes Weiner Signed-off-by: Andrew Morton --- mm/hugetlb.c | 20 +++++++++++++++++++- mm/memcontrol.c | 5 +++++ 2 files changed, 24 insertions(+), 1 deletion(-) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 69e628b93d42..10980022b191 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -21,6 +21,8 @@ #include #include #include +#include +#include #include #include @@ -628,6 +630,8 @@ static void free_huge_page(struct page *page) BUG_ON(page_mapcount(page)); INIT_LIST_HEAD(&page->lru); + mem_cgroup_hugetlb_uncharge_page(hstate_index(h), + pages_per_huge_page(h), page); spin_lock(&hugetlb_lock); if (h->surplus_huge_pages_node[nid] && huge_page_order(h) < MAX_ORDER) { update_and_free_page(h, page); @@ -1113,7 +1117,10 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma, struct hstate *h = hstate_vma(vma); struct page *page; long chg; + int ret, idx; + struct mem_cgroup *memcg; + idx = hstate_index(h); /* * Processes that did not create the mapping will have no * reserves and will not have accounted against subpool @@ -1129,6 +1136,12 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma, if (hugepage_subpool_get_pages(spool, chg)) return ERR_PTR(-ENOSPC); + ret = mem_cgroup_hugetlb_charge_page(idx, pages_per_huge_page(h), + &memcg); + if (ret) { + hugepage_subpool_put_pages(spool, chg); + return ERR_PTR(-ENOSPC); + } spin_lock(&hugetlb_lock); page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve); spin_unlock(&hugetlb_lock); @@ -1136,6 +1149,9 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma, if (!page) { page = alloc_buddy_huge_page(h, NUMA_NO_NODE); if (!page) { + mem_cgroup_hugetlb_uncharge_memcg(idx, + pages_per_huge_page(h), + memcg); hugepage_subpool_put_pages(spool, chg); return ERR_PTR(-ENOSPC); } @@ -1144,7 +1160,9 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma, set_page_private(page, (unsigned long)spool); vma_commit_reservation(h, vma, addr); - + /* update page cgroup details */ + mem_cgroup_hugetlb_commit_charge(idx, pages_per_huge_page(h), + memcg, page); return page; } diff --git a/mm/memcontrol.c b/mm/memcontrol.c index d10288836e5a..201be330b3fe 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2971,6 +2971,11 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype) if (PageSwapCache(page)) return NULL; + /* + * HugeTLB page uncharge happen in the HugeTLB compound page destructor + */ + if (PageHuge(page)) + return NULL; if (PageTransHuge(page)) { nr_pages <<= compound_order(page); -- 2.39.5