From abca335e2d89a0b473f8340f597676335223a0e3 Mon Sep 17 00:00:00 2001 From: Vladimir Davydov Date: Thu, 26 Jun 2014 10:42:30 +1000 Subject: [PATCH] memcg: destroy kmem caches when last slab is freed When the memcg_cache_params->refcnt goes to 0, schedule the worker that will unregister the cache. To prevent this from happening when the owner memcg is alive, keep the refcnt incremented during memcg lifetime. Note, this doesn't guarantee that the cache that belongs to a dead memcg will go away as soon as the last object is freed, because SL[AU]B implementation can cache empty slabs for performance reasons. Hence the cache may be hanging around indefinitely after memcg offline. This is to be resolved by the next patches. Signed-off-by: Vladimir Davydov Acked-by: Christoph Lameter Cc: Michal Hocko Cc: Johannes Weiner Cc: Pekka Enberg Cc: David Rientjes Cc: Joonsoo Kim Signed-off-by: Andrew Morton --- include/linux/slab.h | 2 ++ mm/memcontrol.c | 22 ++++++++++++++++++++-- 2 files changed, 22 insertions(+), 2 deletions(-) diff --git a/include/linux/slab.h b/include/linux/slab.h index 1985bd9bec7d..d9716fdc8211 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -527,6 +527,7 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) * @list: list_head for the list of all caches in this memcg * @root_cache: pointer to the global, root cache, this cache was derived from * @refcnt: reference counter + * @unregister_work: worker to destroy the cache */ struct memcg_cache_params { bool is_root_cache; @@ -540,6 +541,7 @@ struct memcg_cache_params { struct list_head list; struct kmem_cache *root_cache; atomic_long_t refcnt; + struct work_struct unregister_work; }; }; }; diff --git a/mm/memcontrol.c b/mm/memcontrol.c index f12bca5eed9b..4409688cd2f6 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -3076,6 +3076,8 @@ int memcg_update_cache_size(struct kmem_cache *s, int num_groups) return 0; } +static void memcg_unregister_cache_func(struct work_struct *work); + int memcg_alloc_cache_params(struct mem_cgroup *memcg, struct kmem_cache *s, struct kmem_cache *root_cache) { @@ -3097,6 +3099,9 @@ int memcg_alloc_cache_params(struct mem_cgroup *memcg, struct kmem_cache *s, if (memcg) { s->memcg_params->memcg = memcg; s->memcg_params->root_cache = root_cache; + atomic_long_set(&s->memcg_params->refcnt, 1); + INIT_WORK(&s->memcg_params->unregister_work, + memcg_unregister_cache_func); css_get(&memcg->css); } else s->memcg_params->is_root_cache = true; @@ -3178,6 +3183,17 @@ static void memcg_unregister_cache(struct kmem_cache *cachep) kmem_cache_destroy(cachep); } +static void memcg_unregister_cache_func(struct work_struct *work) +{ + struct memcg_cache_params *params = + container_of(work, struct memcg_cache_params, unregister_work); + struct kmem_cache *cachep = memcg_params_to_cache(params); + + mutex_lock(&memcg_slab_mutex); + memcg_unregister_cache(cachep); + mutex_unlock(&memcg_slab_mutex); +} + /* * During the creation a new cache, we need to disable our accounting mechanism * altogether. This is true even if we are not creating, but rather just @@ -3241,7 +3257,7 @@ static void memcg_unregister_all_caches(struct mem_cgroup *memcg) list_for_each_entry_safe(params, tmp, &memcg->memcg_slab_caches, list) { cachep = memcg_params_to_cache(params); kmem_cache_shrink(cachep); - if (atomic_long_read(&cachep->memcg_params->refcnt) == 0) + if (atomic_long_dec_and_test(&cachep->memcg_params->refcnt)) memcg_unregister_cache(cachep); } mutex_unlock(&memcg_slab_mutex); @@ -3322,7 +3338,9 @@ int __memcg_charge_slab(struct kmem_cache *cachep, gfp_t gfp, int order) void __memcg_uncharge_slab(struct kmem_cache *cachep, int order) { memcg_uncharge_kmem(cachep->memcg_params->memcg, PAGE_SIZE << order); - atomic_long_dec(&cachep->memcg_params->refcnt); + + if (unlikely(atomic_long_dec_and_test(&cachep->memcg_params->refcnt))) + schedule_work(&cachep->memcg_params->unregister_work); } /* -- 2.39.5