mutex_unlock(&memcg_slab_mutex);
}
+static void memcg_unregister_cache_rcu_func(struct rcu_head *rcu)
+{
+ struct memcg_cache_params *params =
+ container_of(rcu, struct memcg_cache_params, rcu_head);
+
+ schedule_work(¶ms->unregister_work);
+}
+
/*
* During the creation a new cache, we need to disable our accounting mechanism
* altogether. This is true even if we are not creating, but rather just
{
struct kmem_cache *cachep;
struct memcg_cache_params *params, *tmp;
+ LIST_HEAD(empty_caches);
if (!memcg_kmem_is_active(memcg))
return;
kmem_cache_shrink(cachep);
if (atomic_long_dec_and_test(&cachep->memcg_params->refcnt))
- memcg_unregister_cache(cachep);
+ list_move(&cachep->memcg_params->list, &empty_caches);
+ }
+
+ /*
+ * kmem_cache_free doesn't expect that the cache can be destroyed as
+ * soon as the object is freed, e.g. SLUB's implementation may want to
+ * update cache stats after putting the object to the free list.
+ *
+ * Therefore we should wait for all kmem_cache_free's to finish before
+ * proceeding to cache destruction. Since both SLAB and SLUB versions
+ * of kmem_cache_free are non-preemptable, we wait for rcu-sched grace
+ * period to elapse.
+ */
+ synchronize_sched();
+
+ while (!list_empty(&empty_caches)) {
+ params = list_first_entry(&empty_caches,
+ struct memcg_cache_params, list);
+ cachep = memcg_params_to_cache(params);
+ memcg_unregister_cache(cachep);
}
mutex_unlock(&memcg_slab_mutex);
}
memcg_uncharge_kmem(cachep->memcg_params->memcg, PAGE_SIZE << order);
if (unlikely(atomic_long_dec_and_test(&cachep->memcg_params->refcnt)))
- schedule_work(&cachep->memcg_params->unregister_work);
+ /* see memcg_unregister_all_caches */
+ call_rcu_sched(&cachep->memcg_params->rcu_head,
+ memcg_unregister_cache_rcu_func);
}
/*