* @list: list_head for the list of all caches in this memcg
* @root_cache: pointer to the global, root cache, this cache was derived from
* @refcnt: reference counter
+ * @unregister_work: worker to destroy the cache
*/
struct memcg_cache_params {
bool is_root_cache;
struct list_head list;
struct kmem_cache *root_cache;
atomic_long_t refcnt;
+ struct work_struct unregister_work;
};
};
};
return 0;
}
+static void memcg_unregister_cache_func(struct work_struct *work);
+
int memcg_alloc_cache_params(struct mem_cgroup *memcg, struct kmem_cache *s,
struct kmem_cache *root_cache)
{
if (memcg) {
s->memcg_params->memcg = memcg;
s->memcg_params->root_cache = root_cache;
+ atomic_long_set(&s->memcg_params->refcnt, 1);
+ INIT_WORK(&s->memcg_params->unregister_work,
+ memcg_unregister_cache_func);
css_get(&memcg->css);
} else
s->memcg_params->is_root_cache = true;
kmem_cache_destroy(cachep);
}
+static void memcg_unregister_cache_func(struct work_struct *work)
+{
+ struct memcg_cache_params *params =
+ container_of(work, struct memcg_cache_params, unregister_work);
+ struct kmem_cache *cachep = memcg_params_to_cache(params);
+
+ mutex_lock(&memcg_slab_mutex);
+ memcg_unregister_cache(cachep);
+ mutex_unlock(&memcg_slab_mutex);
+}
+
/*
* During the creation a new cache, we need to disable our accounting mechanism
* altogether. This is true even if we are not creating, but rather just
list_for_each_entry_safe(params, tmp, &memcg->memcg_slab_caches, list) {
cachep = memcg_params_to_cache(params);
kmem_cache_shrink(cachep);
- if (atomic_long_read(&cachep->memcg_params->refcnt) == 0)
+ if (atomic_long_dec_and_test(&cachep->memcg_params->refcnt))
memcg_unregister_cache(cachep);
}
mutex_unlock(&memcg_slab_mutex);
void __memcg_uncharge_slab(struct kmem_cache *cachep, int order)
{
memcg_uncharge_kmem(cachep->memcg_params->memcg, PAGE_SIZE << order);
- atomic_long_dec(&cachep->memcg_params->refcnt);
+
+ if (unlikely(atomic_long_dec_and_test(&cachep->memcg_params->refcnt)))
+ schedule_work(&cachep->memcg_params->unregister_work);
}
/*