struct page *page);
extern bool mem_cgroup_have_hugetlb_usage(struct cgroup *cgroup);
+extern void mem_cgroup_hugetlb_migrate(struct page *oldhpage,
+ struct page *newhpage);
#else
static inline int
mem_cgroup_hugetlb_charge_page(int idx, unsigned long nr_pages,
return 0;
}
+static inline void mem_cgroup_hugetlb_migrate(struct page *oldhpage,
+ struct page *newhpage)
+{
+ return;
+}
+
#endif /* CONFIG_MEM_RES_CTLR_HUGETLB */
#endif /* _LINUX_MEMCONTROL_H */
out:
return ret;
}
+
+void mem_cgroup_hugetlb_migrate(struct page *oldhpage, struct page *newhpage)
+{
+ struct mem_cgroup *memcg;
+ struct page_cgroup *pc;
+
+ VM_BUG_ON(!PageHuge(oldhpage));
+
+ if (mem_cgroup_disabled())
+ return;
+
+ pc = lookup_page_cgroup(oldhpage);
+ lock_page_cgroup(pc);
+ memcg = pc->mem_cgroup;
+ pc->mem_cgroup = root_mem_cgroup;
+ ClearPageCgroupUsed(pc);
+ cgroup_exclude_rmdir(&memcg->css);
+ unlock_page_cgroup(pc);
+
+ /* move the mem_cg details to new cgroup */
+ pc = lookup_page_cgroup(newhpage);
+ lock_page_cgroup(pc);
+ pc->mem_cgroup = memcg;
+ SetPageCgroupUsed(pc);
+ unlock_page_cgroup(pc);
+ cgroup_release_and_wakeup_rmdir(&memcg->css);
+ return;
+}
#endif /* CONFIG_MEM_RES_CTLR_HUGETLB */
/*