]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
memcg: wait for kfree's to finish before destroying cache
authorVladimir Davydov <vdavydov@parallels.com>
Thu, 26 Jun 2014 00:42:31 +0000 (10:42 +1000)
committerStephen Rothwell <sfr@canb.auug.org.au>
Thu, 26 Jun 2014 00:42:31 +0000 (10:42 +1000)
kmem_cache_free doesn't expect that the cache can be destroyed as soon as
the object is freed, e.g.  SLUB's implementation may want to update cache
stats after putting the object to the free list.

Therefore we should wait for all kmem_cache_free's to finish before
proceeding to cache destruction.  Since both SLAB and SLUB versions of
kmem_cache_free are non-preemptable, we wait for rcu-sched grace period to
elapse.

Signed-off-by: Vladimir Davydov <vdavydov@parallels.com>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/slab.h
mm/memcontrol.c

index d99d5212b8151a8a7ffd0af78ebf5ec17358b308..68b1feaba9d6672c3c0d277f2f6175f4dfbfe63c 100644 (file)
@@ -532,11 +532,9 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
  */
 struct memcg_cache_params {
        bool is_root_cache;
+       struct rcu_head rcu_head;
        union {
-               struct {
-                       struct rcu_head rcu_head;
-                       struct kmem_cache *memcg_caches[0];
-               };
+               struct kmem_cache *memcg_caches[0];
                struct {
                        struct mem_cgroup *memcg;
                        struct list_head list;
index 2a36a3977e58d1b53e4341d54fb34dc371fecec7..8f8f01b3425628a5151af7060e0837b458a83e13 100644 (file)
@@ -3194,6 +3194,14 @@ static void memcg_unregister_cache_func(struct work_struct *work)
        mutex_unlock(&memcg_slab_mutex);
 }
 
+static void memcg_unregister_cache_rcu_func(struct rcu_head *rcu)
+{
+       struct memcg_cache_params *params =
+               container_of(rcu, struct memcg_cache_params, rcu_head);
+
+       schedule_work(&params->unregister_work);
+}
+
 /*
  * During the creation a new cache, we need to disable our accounting mechanism
  * altogether. This is true even if we are not creating, but rather just
@@ -3249,6 +3257,7 @@ static void memcg_unregister_all_caches(struct mem_cgroup *memcg)
 {
        struct kmem_cache *cachep;
        struct memcg_cache_params *params, *tmp;
+       LIST_HEAD(empty_caches);
 
        if (!memcg_kmem_is_active(memcg))
                return;
@@ -3261,7 +3270,26 @@ static void memcg_unregister_all_caches(struct mem_cgroup *memcg)
                kmem_cache_shrink(cachep);
 
                if (atomic_long_dec_and_test(&cachep->memcg_params->refcnt))
-                       memcg_unregister_cache(cachep);
+                       list_move(&cachep->memcg_params->list, &empty_caches);
+       }
+
+       /*
+        * kmem_cache_free doesn't expect that the cache can be destroyed as
+        * soon as the object is freed, e.g. SLUB's implementation may want to
+        * update cache stats after putting the object to the free list.
+        *
+        * Therefore we should wait for all kmem_cache_free's to finish before
+        * proceeding to cache destruction. Since both SLAB and SLUB versions
+        * of kmem_cache_free are non-preemptable, we wait for rcu-sched grace
+        * period to elapse.
+        */
+       synchronize_sched();
+
+       while (!list_empty(&empty_caches)) {
+               params = list_first_entry(&empty_caches,
+                                         struct memcg_cache_params, list);
+               cachep = memcg_params_to_cache(params);
+               memcg_unregister_cache(cachep);
        }
        mutex_unlock(&memcg_slab_mutex);
 }
@@ -3343,7 +3371,9 @@ void __memcg_uncharge_slab(struct kmem_cache *cachep, int order)
        memcg_uncharge_kmem(cachep->memcg_params->memcg, PAGE_SIZE << order);
 
        if (unlikely(atomic_long_dec_and_test(&cachep->memcg_params->refcnt)))
-               schedule_work(&cachep->memcg_params->unregister_work);
+               /* see memcg_unregister_all_caches */
+               call_rcu_sched(&cachep->memcg_params->rcu_head,
+                              memcg_unregister_cache_rcu_func);
 }
 
 /*