#include <linux/rmap.h>
#include <linux/swap.h>
#include <linux/swapops.h>
+#include <linux/memcontrol.h>
+#include <linux/page_cgroup.h>
#include <asm/page.h>
#include <asm/pgtable.h>
BUG_ON(page_mapcount(page));
INIT_LIST_HEAD(&page->lru);
+ mem_cgroup_hugetlb_uncharge_page(hstate_index(h),
+ pages_per_huge_page(h), page);
spin_lock(&hugetlb_lock);
if (h->surplus_huge_pages_node[nid] && huge_page_order(h) < MAX_ORDER) {
update_and_free_page(h, page);
struct hstate *h = hstate_vma(vma);
struct page *page;
long chg;
+ int ret, idx;
+ struct mem_cgroup *memcg;
+ idx = hstate_index(h);
/*
* Processes that did not create the mapping will have no
* reserves and will not have accounted against subpool
if (hugepage_subpool_get_pages(spool, chg))
return ERR_PTR(-ENOSPC);
+ ret = mem_cgroup_hugetlb_charge_page(idx, pages_per_huge_page(h),
+ &memcg);
+ if (ret) {
+ hugepage_subpool_put_pages(spool, chg);
+ return ERR_PTR(-ENOSPC);
+ }
spin_lock(&hugetlb_lock);
page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve);
spin_unlock(&hugetlb_lock);
if (!page) {
page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
if (!page) {
+ mem_cgroup_hugetlb_uncharge_memcg(idx,
+ pages_per_huge_page(h),
+ memcg);
hugepage_subpool_put_pages(spool, chg);
return ERR_PTR(-ENOSPC);
}
set_page_private(page, (unsigned long)spool);
vma_commit_reservation(h, vma, addr);
-
+ /* update page cgroup details */
+ mem_cgroup_hugetlb_commit_charge(idx, pages_per_huge_page(h),
+ memcg, page);
return page;
}