]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
memcg: increment static branch right after limit set
authorGlauber Costa <glommer@parallels.com>
Thu, 7 Feb 2013 01:26:58 +0000 (12:26 +1100)
committerStephen Rothwell <sfr@canb.auug.org.au>
Mon, 18 Feb 2013 05:46:46 +0000 (16:46 +1100)
We were deferring the kmemcg static branch increment to a later time, due
to a nasty dependency between the cpu_hotplug lock, taken by the jump
label update, and the cgroup_lock.

Now we no longer take the cgroup lock, and we can save ourselves the
trouble.

Signed-off-by: Glauber Costa <glommer@parallels.com>
Acked-by: Michal Hocko <mhocko@suse.cz>
Cc: Tejun Heo <tj@kernel.org>
Cc: Hiroyuki Kamezawa <kamezawa.hiroyuki@gmail.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/memcontrol.c

index 14eebb64a8395b89ffc64a29b534f4581d4188f4..f9592450dbe3ae59064cba9ee806410fd13c9942 100644 (file)
@@ -4974,8 +4974,6 @@ static int memcg_update_kmem_limit(struct cgroup *cont, u64 val)
 {
        int ret = -EINVAL;
 #ifdef CONFIG_MEMCG_KMEM
-       bool must_inc_static_branch = false;
-
        struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
        /*
         * For simplicity, we won't allow this to be disabled.  It also can't
@@ -5004,7 +5002,13 @@ static int memcg_update_kmem_limit(struct cgroup *cont, u64 val)
                        res_counter_set_limit(&memcg->kmem, RESOURCE_MAX);
                        goto out;
                }
-               must_inc_static_branch = true;
+               static_key_slow_inc(&memcg_kmem_enabled_key);
+               /*
+                * setting the active bit after the inc will guarantee no one
+                * starts accounting before all call sites are patched
+                */
+               memcg_kmem_set_active(memcg);
+
                /*
                 * kmem charges can outlive the cgroup. In the case of slab
                 * pages, for instance, a page contain objects from various
@@ -5017,27 +5021,6 @@ static int memcg_update_kmem_limit(struct cgroup *cont, u64 val)
 out:
        mutex_unlock(&set_limit_mutex);
        mutex_unlock(&memcg_create_mutex);
-
-       /*
-        * We are by now familiar with the fact that we can't inc the static
-        * branch inside cgroup_lock. See disarm functions for details. A
-        * worker here is overkill, but also wrong: After the limit is set, we
-        * must start accounting right away. Since this operation can't fail,
-        * we can safely defer it to here - no rollback will be needed.
-        *
-        * The boolean used to control this is also safe, because
-        * KMEM_ACCOUNTED_ACTIVATED guarantees that only one process will be
-        * able to set it to true;
-        */
-       if (must_inc_static_branch) {
-               static_key_slow_inc(&memcg_kmem_enabled_key);
-               /*
-                * setting the active bit after the inc will guarantee no one
-                * starts accounting before all call sites are patched
-                */
-               memcg_kmem_set_active(memcg);
-       }
-
 #endif
        return ret;
 }