]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
memcg/sl[au]b: track all the memcg children of a kmem_cache
authorGlauber Costa <glommer@parallels.com>
Sat, 3 Nov 2012 00:42:32 +0000 (11:42 +1100)
committerStephen Rothwell <sfr@canb.auug.org.au>
Wed, 7 Nov 2012 04:15:17 +0000 (15:15 +1100)
This enables us to remove all the children of a kmem_cache being
destroyed, if for example the kernel module it's being used in gets
unloaded.  Otherwise, the children will still point to the destroyed
parent.

Signed-off-by: Suleiman Souhlal <suleiman@google.com>
Signed-off-by: Glauber Costa <glommer@parallels.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Frederic Weisbecker <fweisbec@redhat.com>
Cc: Greg Thelen <gthelen@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: JoonSoo Kim <js1304@gmail.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Pekka Enberg <penberg@cs.helsinki.fi>
Cc: Rik van Riel <riel@redhat.com>
Cc: Tejun Heo <tj@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/memcontrol.h
mm/memcontrol.c
mm/slab_common.c

index 7d59852f1dcefd7409f310e8e84cf1b5b0490c7b..d5511cc5762ea22bd4800b00bd9e8e75001503ed 100644 (file)
@@ -447,6 +447,7 @@ struct kmem_cache *
 __memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp);
 
 void mem_cgroup_destroy_cache(struct kmem_cache *cachep);
+void kmem_cache_destroy_memcg_children(struct kmem_cache *s);
 
 /**
  * memcg_kmem_newpage_charge: verify if a new kmem allocation is allowed.
@@ -594,6 +595,10 @@ memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
 {
        return cachep;
 }
+
+static inline void kmem_cache_destroy_memcg_children(struct kmem_cache *s)
+{
+}
 #endif /* CONFIG_MEMCG_KMEM */
 #endif /* _LINUX_MEMCONTROL_H */
 
index 67d1c5868965c3a1b35ef5cc03d18cb2a2c663b7..84d19e367e7f9d7b7e5302c077029924750506ec 100644 (file)
@@ -2740,6 +2740,8 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
        memcg_check_events(memcg, page);
 }
 
+static DEFINE_MUTEX(set_limit_mutex);
+
 #ifdef CONFIG_MEMCG_KMEM
 static inline bool memcg_can_account_kmem(struct mem_cgroup *memcg)
 {
@@ -3152,6 +3154,51 @@ out:
        return new_cachep;
 }
 
+void kmem_cache_destroy_memcg_children(struct kmem_cache *s)
+{
+       struct kmem_cache *c;
+       int i;
+
+       if (!s->memcg_params)
+               return;
+       if (!s->memcg_params->is_root_cache)
+               return;
+
+       /*
+        * If the cache is being destroyed, we trust that there is no one else
+        * requesting objects from it. Even if there are, the sanity checks in
+        * kmem_cache_destroy should caught this ill-case.
+        *
+        * Still, we don't want anyone else freeing memcg_caches under our
+        * noses, which can happen if a new memcg comes to life. As usual,
+        * we'll take the set_limit_mutex to protect ourselves against this.
+        */
+       mutex_lock(&set_limit_mutex);
+       for (i = 0; i < memcg_limited_groups_array_size; i++) {
+               c = s->memcg_params->memcg_caches[i];
+               if (!c)
+                       continue;
+
+               /*
+                * We will now manually delete the caches, so to avoid races
+                * we need to cancel all pending destruction workers and
+                * proceed with destruction ourselves.
+                *
+                * kmem_cache_destroy() will call kmem_cache_shrink internally,
+                * and that could spawn the workers again: it is likely that
+                * the cache still have active pages until this very moment.
+                * This would lead us back to mem_cgroup_destroy_cache.
+                *
+                * But that will not execute at all if the "dead" flag is not
+                * set, so flip it down to guarantee we are in control.
+                */
+               c->memcg_params->dead = false;
+               cancel_delayed_work_sync(&c->memcg_params->destroy);
+               kmem_cache_destroy(c);
+       }
+       mutex_unlock(&set_limit_mutex);
+}
+
 struct create_work {
        struct mem_cgroup *memcg;
        struct kmem_cache *cachep;
@@ -4257,8 +4304,6 @@ void mem_cgroup_print_bad_page(struct page *page)
 }
 #endif
 
-static DEFINE_MUTEX(set_limit_mutex);
-
 static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
                                unsigned long long val)
 {
index b76a74c78179692761ded92c8ce2667a05a7bef7..04215a5454d9fd817dba7eb759285fcb6ae1b1d4 100644 (file)
@@ -221,6 +221,9 @@ EXPORT_SYMBOL(kmem_cache_create);
 
 void kmem_cache_destroy(struct kmem_cache *s)
 {
+       /* Destroy all the children caches if we aren't a memcg cache */
+       kmem_cache_destroy_memcg_children(s);
+
        get_online_cpus();
        mutex_lock(&slab_mutex);
        s->refcount--;