]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
cgroup: only need to check oldcgrp==newgrp once
authorMandeep Singh Baines <msb@chromium.org>
Thu, 22 Dec 2011 04:18:37 +0000 (20:18 -0800)
committerTejun Heo <tj@kernel.org>
Thu, 22 Dec 2011 15:32:57 +0000 (07:32 -0800)
In cgroup_attach_proc it is now sufficient to only check that
oldcgrp==newcgrp once. Now that we are using threadgroup_lock()
during the migrations, oldcgrp will not change.

Signed-off-by: Mandeep Singh Baines <msb@chromium.org>
Acked-by: Li Zefan <lizf@cn.fujitsu.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: containers@lists.linux-foundation.org
Cc: cgroups@vger.kernel.org
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Paul Menage <paul@paulmenage.org>
kernel/cgroup.c

index a85a7002ca330fb92b9ada6428f40bbf96ae95dd..1042b3c41314be22821c1bf4e9ff5883f7b0008f 100644 (file)
@@ -2067,7 +2067,7 @@ static int css_set_prefetch(struct cgroup *cgrp, struct css_set *cg,
  */
 int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
 {
-       int retval, i, group_size, nr_migrating_tasks;
+       int retval, i, group_size;
        struct cgroup_subsys *ss, *failed_ss = NULL;
        /* guaranteed to be initialized later, but the compiler needs this */
        struct css_set *oldcg;
@@ -2118,7 +2118,7 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
        }
 
        tsk = leader;
-       i = nr_migrating_tasks = 0;
+       i = 0;
        do {
                struct task_and_cgroup ent;
 
@@ -2134,11 +2134,12 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
                 */
                ent.task = tsk;
                ent.cgrp = task_cgroup_from_root(tsk, root);
+               /* nothing to do if this task is already in the cgroup */
+               if (ent.cgrp == cgrp)
+                       continue;
                retval = flex_array_put(group, i, &ent, GFP_ATOMIC);
                BUG_ON(retval != 0);
                i++;
-               if (ent.cgrp != cgrp)
-                       nr_migrating_tasks++;
        } while_each_thread(leader, tsk);
        /* remember the number of threads in the array for later. */
        group_size = i;
@@ -2148,7 +2149,7 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
 
        /* methods shouldn't be called if no task is actually migrating */
        retval = 0;
-       if (!nr_migrating_tasks)
+       if (!group_size)
                goto out_free_group_list;
 
        /*
@@ -2171,14 +2172,6 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
        INIT_LIST_HEAD(&newcg_list);
        for (i = 0; i < group_size; i++) {
                tc = flex_array_get(group, i);
-               /* nothing to do if this task is already in the cgroup */
-               if (tc->cgrp == cgrp)
-                       continue;
-               /*
-                * get old css_set pointer. threadgroup is locked so this is
-                * safe against concurrent cgroup_exit() changing this to
-                * init_css_set.
-                */
                oldcg = tc->task->cgroups;
 
                /* if we don't already have it in the list get a new one */
@@ -2194,9 +2187,6 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
         */
        for (i = 0; i < group_size; i++) {
                tc = flex_array_get(group, i);
-               /* leave current thread as it is if it's already there */
-               if (tc->cgrp == cgrp)
-                       continue;
                retval = cgroup_task_migrate(cgrp, tc->cgrp, tc->task, true);
                BUG_ON(retval);
        }