]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
memcg: move HugeTLB resource count to parent cgroup on memcg removal
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Thu, 3 May 2012 05:43:41 +0000 (15:43 +1000)
committerStephen Rothwell <sfr@canb.auug.org.au>
Thu, 3 May 2012 05:46:26 +0000 (15:46 +1000)
Add support for memcg removal with HugeTLB resource usage.

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Hillf Danton <dhillf@gmail.com>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/hugetlb.h
include/linux/memcontrol.h
mm/hugetlb.c
mm/memcontrol.c

index 2e09821d70b6b67c6b7e73c3c83fc8d3ccc9bad2..ff450b4fc417d33c5bb9ca3d4edddc9fdadfcb1b 100644 (file)
@@ -348,4 +348,12 @@ static inline unsigned int pages_per_huge_page(struct hstate *h)
 #define hstate_index(h) 0
 #endif
 
+#ifdef CONFIG_MEM_RES_CTLR_HUGETLB
+extern int hugetlb_force_memcg_empty(struct cgroup *cgroup);
+#else
+static inline int hugetlb_force_memcg_empty(struct cgroup *cgroup)
+{
+       return 0;
+}
+#endif
 #endif /* _LINUX_HUGETLB_H */
index 4f1757420ed97b3dee5796a9840134c53171308a..70317e598bf8af01464b5f2ed0a8ce5d8281ec7e 100644 (file)
@@ -460,6 +460,9 @@ extern void mem_cgroup_hugetlb_uncharge_page(int idx, unsigned long nr_pages,
 extern void mem_cgroup_hugetlb_uncharge_memcg(int idx, unsigned long nr_pages,
                                              struct mem_cgroup *memcg);
 extern int mem_cgroup_hugetlb_file_init(int idx) __init;
+extern int mem_cgroup_move_hugetlb_parent(int idx, struct cgroup *cgroup,
+                                         struct page *page);
+extern bool mem_cgroup_have_hugetlb_usage(struct cgroup *cgroup);
 
 #else
 static inline int
@@ -496,6 +499,17 @@ static inline int mem_cgroup_hugetlb_file_init(int idx)
        return 0;
 }
 
+static inline int
+mem_cgroup_move_hugetlb_parent(int idx, struct cgroup *cgroup,
+                              struct page *page)
+{
+       return 0;
+}
+
+static inline bool mem_cgroup_have_hugetlb_usage(struct cgroup *cgroup)
+{
+       return 0;
+}
 #endif  /* CONFIG_MEM_RES_CTLR_HUGETLB */
 #endif /* _LINUX_MEMCONTROL_H */
 
index 15c0bf8e4fce35def85086898ea82dc06a669ee5..628640124c42084a9d54df1883fc416e5ba58d02 100644 (file)
@@ -1909,6 +1909,49 @@ static int __init hugetlb_init(void)
 }
 module_init(hugetlb_init);
 
+#ifdef CONFIG_MEM_RES_CTLR_HUGETLB
+/*
+ * Force the memcg to empty the hugetlb resources by moving them to
+ * the parent cgroup. We can fail if the parent cgroup's limit prevented
+ * the charging. This should only happen if use_hierarchy is not set.
+ */
+int hugetlb_force_memcg_empty(struct cgroup *cgroup)
+{
+       struct hstate *h;
+       struct page *page;
+       int ret = 0, idx = 0;
+
+       do {
+               if (cgroup_task_count(cgroup) || !list_empty(&cgroup->children))
+                       goto out;
+               /*
+                * If the task doing the cgroup_rmdir got a signal
+                * we don't really need to loop till the hugetlb resource
+                * usage become zero.
+                */
+               if (signal_pending(current)) {
+                       ret = -EINTR;
+                       goto out;
+               }
+               for_each_hstate(h) {
+                       spin_lock(&hugetlb_lock);
+                       list_for_each_entry(page, &h->hugepage_activelist, lru) {
+                               ret = mem_cgroup_move_hugetlb_parent(idx, cgroup, page);
+                               if (ret) {
+                                       spin_unlock(&hugetlb_lock);
+                                       goto out;
+                               }
+                       }
+                       spin_unlock(&hugetlb_lock);
+                       idx++;
+               }
+               cond_resched();
+       } while (mem_cgroup_have_hugetlb_usage(cgroup));
+out:
+       return ret;
+}
+#endif
+
 /* Should be called on processing a hugepagesz=... option */
 void __init hugetlb_add_hstate(unsigned order)
 {
index 11642849c8e068b6e929c38fcc9fab291d549746..6f7947778308929a5bce37744832702f0ef25b31 100644 (file)
@@ -3233,9 +3233,11 @@ static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
 #endif
 
 #ifdef CONFIG_MEM_RES_CTLR_HUGETLB
-static bool mem_cgroup_have_hugetlb_usage(struct mem_cgroup *memcg)
+bool mem_cgroup_have_hugetlb_usage(struct cgroup *cgroup)
 {
        int idx;
+       struct mem_cgroup *memcg = mem_cgroup_from_cont(cgroup);
+
        for (idx = 0; idx < hugetlb_max_hstate; idx++) {
                if ((res_counter_read_u64(&memcg->hugepage[idx], RES_USAGE)) > 0)
                        return 1;
@@ -3333,10 +3335,54 @@ void mem_cgroup_hugetlb_uncharge_memcg(int idx, unsigned long nr_pages,
        res_counter_uncharge(&memcg->hugepage[idx], csize);
        return;
 }
-#else
-static bool mem_cgroup_have_hugetlb_usage(struct mem_cgroup *memcg)
+
+int mem_cgroup_move_hugetlb_parent(int idx, struct cgroup *cgroup,
+                                  struct page *page)
 {
-       return 0;
+       struct page_cgroup *pc;
+       int csize,  ret = 0;
+       struct res_counter *fail_res;
+       struct cgroup *pcgrp = cgroup->parent;
+       struct mem_cgroup *parent = mem_cgroup_from_cont(pcgrp);
+       struct mem_cgroup *memcg  = mem_cgroup_from_cont(cgroup);
+
+       if (!get_page_unless_zero(page))
+               goto out;
+
+       pc = lookup_page_cgroup(page);
+       lock_page_cgroup(pc);
+       if (!PageCgroupUsed(pc) || pc->mem_cgroup != memcg)
+               goto err_out;
+
+       csize = PAGE_SIZE << compound_order(page);
+       /*
+        * If we have use_hierarchy set we can never fail here. So instead of
+        * using res_counter_uncharge use the open-coded variant which just
+        * uncharge the child res_counter. The parent will retain the charge.
+        */
+       if (parent->use_hierarchy) {
+               unsigned long flags;
+               struct res_counter *counter;
+
+               counter = &memcg->hugepage[idx];
+               spin_lock_irqsave(&counter->lock, flags);
+               res_counter_uncharge_locked(counter, csize);
+               spin_unlock_irqrestore(&counter->lock, flags);
+       } else {
+               ret = res_counter_charge(&parent->hugepage[idx],
+                                        csize, &fail_res);
+               if (ret) {
+                       ret = -EBUSY;
+                       goto err_out;
+               }
+               res_counter_uncharge(&memcg->hugepage[idx], csize);
+       }
+       pc->mem_cgroup = parent;
+err_out:
+       unlock_page_cgroup(pc);
+       put_page(page);
+out:
+       return ret;
 }
 #endif /* CONFIG_MEM_RES_CTLR_HUGETLB */
 
@@ -3856,6 +3902,11 @@ static int mem_cgroup_force_empty(struct mem_cgroup *memcg, bool free_all)
        /* should free all ? */
        if (free_all)
                goto try_to_free;
+
+       /* move the hugetlb charges */
+       ret = hugetlb_force_memcg_empty(cgrp);
+       if (ret)
+               goto out;
 move_account:
        do {
                ret = -EBUSY;
@@ -5186,12 +5237,6 @@ free_out:
 static int mem_cgroup_pre_destroy(struct cgroup *cont)
 {
        struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
-       /*
-        * Don't allow memcg removal if we have HugeTLB resource
-        * usage.
-        */
-       if (mem_cgroup_have_hugetlb_usage(memcg))
-               return -EBUSY;
 
        return mem_cgroup_force_empty(memcg, false);
 }