This will be used by the next patches.
Signed-off-by: Vladimir Davydov <vdavydov@parallels.com>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
* @list: list_head for the list of all caches in this memcg
* @root_cache: pointer to the global, root cache, this cache was derived from
* @refcnt: reference counter
+ * @dead: set to true when owner memcg is turned offline
* @unregister_work: worker to destroy the cache
*/
struct memcg_cache_params {
struct list_head list;
struct kmem_cache *root_cache;
atomic_long_t refcnt;
+ bool dead;
struct work_struct unregister_work;
};
};
mutex_lock(&memcg_slab_mutex);
list_for_each_entry_safe(params, tmp, &memcg->memcg_slab_caches, list) {
cachep = memcg_params_to_cache(params);
+
+ memcg_cache_mark_dead(cachep);
kmem_cache_shrink(cachep);
+
if (atomic_long_dec_and_test(&cachep->memcg_params->refcnt))
memcg_unregister_cache(cachep);
}
return !s->memcg_params || s->memcg_params->is_root_cache;
}
+static inline bool memcg_cache_dead(struct kmem_cache *s)
+{
+ if (is_root_cache(s))
+ return false;
+
+ /*
+ * Since this function can be called without holding any locks, it
+ * needs a barrier here to guarantee the read won't be reordered.
+ */
+ smp_rmb();
+ return s->memcg_params->dead;
+}
+
+static inline void memcg_cache_mark_dead(struct kmem_cache *s)
+{
+ BUG_ON(is_root_cache(s));
+ s->memcg_params->dead = true;
+ smp_wmb(); /* matches rmb in memcg_cache_dead() */
+}
+
static inline bool slab_equal_or_root(struct kmem_cache *s,
struct kmem_cache *p)
{
return true;
}
+static inline bool memcg_cache_dead(struct kmem_cache *s)
+{
+ return false;
+}
+
static inline bool slab_equal_or_root(struct kmem_cache *s,
struct kmem_cache *p)
{