struct array_cache *shared = NULL;
struct array_cache **alien = NULL;
+ if (memcg_cache_dead(cachep))
+ continue;
+
nc = alloc_arraycache(node, cachep->limit,
cachep->batchcount, GFP_KERNEL);
if (!nc)
check_irq_off();
ac = cpu_cache_get(cachep);
+ if (!ac)
+ return;
+
n = get_node(cachep, node);
spin_lock(&n->list_lock);
free_block(cachep, ac->entry, ac->avail, node);
spin_unlock(&n->list_lock);
ac->avail = 0;
+ if (memcg_cache_dead(cachep)) {
+ cachep->array[smp_processor_id()] = NULL;
+ kfree(ac);
+ }
}
static void drain_cpu_caches(struct kmem_cache *cachep)
/* fixup slab chains */
if (page->active == 0) {
- if (n->free_objects > n->free_limit) {
+ if (n->free_objects > n->free_limit ||
+ memcg_cache_dead(cachep)) {
n->free_objects -= cachep->num;
/* No need to drop any previously held
* lock here, even if we have a off-slab slab
kmemcheck_slab_free(cachep, objp, cachep->object_size);
+#ifdef CONFIG_MEMCG_KMEM
+ if (unlikely(!ac)) {
+ int nodeid = page_to_nid(virt_to_page(objp));
+
+ spin_lock(&cachep->node[nodeid]->list_lock);
+ free_block(cachep, &objp, 1, nodeid);
+ spin_unlock(&cachep->node[nodeid]->list_lock);
+ return;
+ }
+#endif
+
/*
* Skip calling cache_free_alien() when the platform is not numa.
* This will avoid cache misses that happen while accessing slabp (which
struct ccupdate_struct *new;
int i;
+ if (memcg_cache_dead(cachep))
+ return 0;
+
new = kzalloc(sizeof(*new) + nr_cpu_ids * sizeof(struct array_cache *),
gfp);
if (!new)
list_for_each_entry(searchp, &slab_caches, list) {
check_irq_on();
+ if (memcg_cache_dead(searchp))
+ continue;
+
/*
* We only take the node lock if absolutely necessary and we
* have established with reasonable certainty that