]> git.karo-electronics.de Git - linux-beck.git/blobdiff - kernel/cgroup.c
Merge branch 'cgroup/for-3.14-fixes' into cgroup/for-3.15
[linux-beck.git] / kernel / cgroup.c
index 771d1b8aaae98bd9e980b79dc838905e3f023a48..8ab800c7bac0ef79328e739bda3837351b797112 100644 (file)
@@ -1331,9 +1331,13 @@ static void cgroup_enable_task_cg_lists(void)
                 * We should check if the process is exiting, otherwise
                 * it will race with cgroup_exit() in that the list
                 * entry won't be deleted though the process has exited.
+                * Do it while holding siglock so that we don't end up
+                * racing against cgroup_exit().
                 */
+               spin_lock_irq(&p->sighand->siglock);
                if (!(p->flags & PF_EXITING))
                        list_add(&p->cg_list, &task_css_set(p)->tasks);
+               spin_unlock_irq(&p->sighand->siglock);
 
                task_unlock(p);
        } while_each_thread(g, p);
@@ -3968,16 +3972,12 @@ static int __init cgroup_wq_init(void)
        /*
         * There isn't much point in executing destruction path in
         * parallel.  Good chunk is serialized with cgroup_mutex anyway.
-        *
-        * XXX: Must be ordered to make sure parent is offlined after
-        * children.  The ordering requirement is for memcg where a
-        * parent's offline may wait for a child's leading to deadlock.  In
-        * the long term, this should be fixed from memcg side.
+        * Use 1 for @max_active.
         *
         * We would prefer to do this in cgroup_init() above, but that
         * is called before init_workqueues(): so leave this until after.
         */
-       cgroup_destroy_wq = alloc_ordered_workqueue("cgroup_destroy", 0);
+       cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1);
        BUG_ON(!cgroup_destroy_wq);
 
        /*