]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
Merge remote-tracking branch 'cgroup/for-next'
authorStephen Rothwell <sfr@canb.auug.org.au>
Mon, 2 Nov 2015 02:49:04 +0000 (13:49 +1100)
committerStephen Rothwell <sfr@canb.auug.org.au>
Mon, 2 Nov 2015 02:49:07 +0000 (13:49 +1100)
21 files changed:
Documentation/cgroups/cgroups.txt
Documentation/cgroups/unified-hierarchy.txt
block/blk-cgroup.c
block/blk-throttle.c
block/cfq-iosched.c
include/linux/backing-dev.h
include/linux/cgroup-defs.h
include/linux/cgroup.h
include/linux/hugetlb_cgroup.h
include/linux/init_task.h
include/linux/jump_label.h
include/linux/memcontrol.h
include/linux/sched.h
kernel/cgroup.c
kernel/cgroup_pids.c
kernel/cpuset.c
kernel/events/core.c
kernel/fork.c
kernel/sched/core.c
mm/memcontrol.c
mm/vmscan.c

index f935fac1e73b9e18981b8b0ff2a69ba534b5e34d..c6256ae9885b8ac7445f6d10066ee541ebb388b8 100644 (file)
@@ -637,6 +637,10 @@ void exit(struct task_struct *task)
 
 Called during task exit.
 
+void free(struct task_struct *task)
+
+Called when the task_struct is freed.
+
 void bind(struct cgroup *root)
 (cgroup_mutex held by caller)
 
index 5c07337b51c1b345483f4ba2699e6ce8b7ea2616..781b1d475bcfc9666d0d3dad80832c74adb48b22 100644 (file)
@@ -107,12 +107,6 @@ root of unified hierarchy can be bound to other hierarchies.  This
 allows mixing unified hierarchy with the traditional multiple
 hierarchies in a fully backward compatible way.
 
-For development purposes, the following boot parameter makes all
-controllers to appear on the unified hierarchy whether supported or
-not.
-
- cgroup__DEVEL__legacy_files_on_dfl
-
 A controller can be moved across hierarchies only after the controller
 is no longer referenced in its current hierarchy.  Because per-cgroup
 controller states are destroyed asynchronously and controllers may
@@ -341,11 +335,11 @@ is riddled with issues.
   unnecessarily complicated and probably done this way because event
   delivery itself was expensive.
 
-Unified hierarchy implements an interface file "cgroup.populated"
-which can be used to monitor whether the cgroup's subhierarchy has
-tasks in it or not.  Its value is 0 if there is no task in the cgroup
-and its descendants; otherwise, 1.  poll and [id]notify events are
-triggered when the value changes.
+Unified hierarchy implements "populated" field in "cgroup.events"
+interface file which can be used to monitor whether the cgroup's
+subhierarchy has tasks in it or not.  Its value is 0 if there is no
+task in the cgroup and its descendants; otherwise, 1.  poll and
+[id]notify events are triggered when the value changes.
 
 This is significantly lighter and simpler and trivially allows
 delegating management of subhierarchy - subhierarchy monitoring can
@@ -374,6 +368,10 @@ supported and the interface files "release_agent" and
 
 - The "cgroup.clone_children" file is removed.
 
+- /proc/PID/cgroup keeps reporting the cgroup that a zombie belonged
+  to before exiting.  If the cgroup is removed before the zombie is
+  reaped, " (deleted)" is appeneded to the path.
+
 
 5-3. Controller File Conventions
 
@@ -435,6 +433,11 @@ may be specified in any order and not all pairs have to be specified.
   the first entry in the file.  Specific entries can use "default" as
   its value to indicate inheritance of the default value.
 
+- For events which are not very high frequency, an interface file
+  "events" should be created which lists event key value pairs.
+  Whenever a notifiable event happens, file modified event should be
+  generated on the file.
+
 
 5-4. Per-Controller Changes
 
index 55512dd626336eae49b758def08d601bc3515b74..5bcdfc10c23a6340367c4b9781496a49b5c81efe 100644 (file)
@@ -899,6 +899,7 @@ static int blkcg_print_stat(struct seq_file *sf, void *v)
 struct cftype blkcg_files[] = {
        {
                .name = "stat",
+               .flags = CFTYPE_NOT_ON_ROOT,
                .seq_show = blkcg_print_stat,
        },
        { }     /* terminate */
index c75a2636dd404840ea13000ea5747e0d44ddd7be..2149a1ddbacf21a02a164b042c76e06bf6734c77 100644 (file)
@@ -369,7 +369,7 @@ static void throtl_pd_init(struct blkg_policy_data *pd)
         * regardless of the position of the group in the hierarchy.
         */
        sq->parent_sq = &td->service_queue;
-       if (cgroup_on_dfl(blkg->blkcg->css.cgroup) && blkg->parent)
+       if (cgroup_subsys_on_dfl(io_cgrp_subsys) && blkg->parent)
                sq->parent_sq = &blkg_to_tg(blkg->parent)->service_queue;
        tg->td = td;
 }
index 04de88463a986384b54fa57593d2c89e7189e95c..1f9093e901daed7849a54633be5d80ce96b010f4 100644 (file)
@@ -1581,7 +1581,7 @@ static struct blkcg_policy_data *cfq_cpd_alloc(gfp_t gfp)
 static void cfq_cpd_init(struct blkcg_policy_data *cpd)
 {
        struct cfq_group_data *cgd = cpd_to_cfqgd(cpd);
-       unsigned int weight = cgroup_on_dfl(blkcg_root.css.cgroup) ?
+       unsigned int weight = cgroup_subsys_on_dfl(io_cgrp_subsys) ?
                              CGROUP_WEIGHT_DFL : CFQ_WEIGHT_LEGACY_DFL;
 
        if (cpd_to_blkcg(cpd) == &blkcg_root)
@@ -1599,7 +1599,7 @@ static void cfq_cpd_free(struct blkcg_policy_data *cpd)
 static void cfq_cpd_bind(struct blkcg_policy_data *cpd)
 {
        struct blkcg *blkcg = cpd_to_blkcg(cpd);
-       bool on_dfl = cgroup_on_dfl(blkcg_root.css.cgroup);
+       bool on_dfl = cgroup_subsys_on_dfl(io_cgrp_subsys);
        unsigned int weight = on_dfl ? CGROUP_WEIGHT_DFL : CFQ_WEIGHT_LEGACY_DFL;
 
        if (blkcg == &blkcg_root)
index c85f74946a8bab65ff3f16cddea6a4446b0a4799..c82794f20110420582d496ae478bc600f9400233 100644 (file)
@@ -13,7 +13,6 @@
 #include <linux/sched.h>
 #include <linux/blkdev.h>
 #include <linux/writeback.h>
-#include <linux/memcontrol.h>
 #include <linux/blk-cgroup.h>
 #include <linux/backing-dev-defs.h>
 #include <linux/slab.h>
@@ -267,8 +266,8 @@ static inline bool inode_cgwb_enabled(struct inode *inode)
 {
        struct backing_dev_info *bdi = inode_to_bdi(inode);
 
-       return cgroup_on_dfl(mem_cgroup_root_css->cgroup) &&
-               cgroup_on_dfl(blkcg_root_css->cgroup) &&
+       return cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
+               cgroup_subsys_on_dfl(io_cgrp_subsys) &&
                bdi_cap_account_dirty(bdi) &&
                (bdi->capabilities & BDI_CAP_CGROUP_WRITEBACK) &&
                (inode->i_sb->s_iflags & SB_I_CGROUPWB);
index 8492721b39be8f0fffa793c5ef39f33d32b5c5fb..60d44b26276d84a77d7f7e3379ae12821a8b3eeb 100644 (file)
@@ -76,12 +76,24 @@ enum {
        CFTYPE_ONLY_ON_ROOT     = (1 << 0),     /* only create on root cgrp */
        CFTYPE_NOT_ON_ROOT      = (1 << 1),     /* don't create on root cgrp */
        CFTYPE_NO_PREFIX        = (1 << 3),     /* (DON'T USE FOR NEW FILES) no subsys prefix */
+       CFTYPE_WORLD_WRITABLE   = (1 << 4),     /* (DON'T USE FOR NEW FILES) S_IWUGO */
 
        /* internal flags, do not use outside cgroup core proper */
        __CFTYPE_ONLY_ON_DFL    = (1 << 16),    /* only on default hierarchy */
        __CFTYPE_NOT_ON_DFL     = (1 << 17),    /* not on default hierarchy */
 };
 
+/*
+ * cgroup_file is the handle for a file instance created in a cgroup which
+ * is used, for example, to generate file changed notifications.  This can
+ * be obtained by setting cftype->file_offset.
+ */
+struct cgroup_file {
+       /* do not access any fields from outside cgroup core */
+       struct list_head node;                  /* anchored at css->files */
+       struct kernfs_node *kn;
+};
+
 /*
  * Per-subsystem/per-cgroup state maintained by the system.  This is the
  * fundamental structural building block that controllers deal with.
@@ -122,6 +134,9 @@ struct cgroup_subsys_state {
         */
        u64 serial_nr;
 
+       /* all cgroup_files associated with this css */
+       struct list_head files;
+
        /* percpu_ref killing and RCU release */
        struct rcu_head rcu_head;
        struct work_struct destroy_work;
@@ -196,6 +211,9 @@ struct css_set {
         */
        struct list_head e_cset_node[CGROUP_SUBSYS_COUNT];
 
+       /* all css_task_iters currently walking this cset */
+       struct list_head task_iters;
+
        /* For RCU-protected deletion */
        struct rcu_head rcu_head;
 };
@@ -217,16 +235,16 @@ struct cgroup {
        int id;
 
        /*
-        * If this cgroup contains any tasks, it contributes one to
-        * populated_cnt.  All children with non-zero popuplated_cnt of
-        * their own contribute one.  The count is zero iff there's no task
-        * in this cgroup or its subtree.
+        * Each non-empty css_set associated with this cgroup contributes
+        * one to populated_cnt.  All children with non-zero popuplated_cnt
+        * of their own contribute one.  The count is zero iff there's no
+        * task in this cgroup or its subtree.
         */
        int populated_cnt;
 
        struct kernfs_node *kn;         /* cgroup kernfs entry */
-       struct kernfs_node *procs_kn;   /* kn for "cgroup.procs" */
-       struct kernfs_node *populated_kn; /* kn for "cgroup.subtree_populated" */
+       struct cgroup_file procs_file;  /* handle for "cgroup.procs" */
+       struct cgroup_file events_file; /* handle for "cgroup.events" */
 
        /*
         * The bitmask of subsystems enabled on the child cgroups.
@@ -324,11 +342,6 @@ struct cftype {
         */
        char name[MAX_CFTYPE_NAME];
        unsigned long private;
-       /*
-        * If not 0, file mode is set to this value, otherwise it will
-        * be figured out automatically
-        */
-       umode_t mode;
 
        /*
         * The maximum length of string, excluding trailing nul, that can
@@ -339,6 +352,14 @@ struct cftype {
        /* CFTYPE_* flags */
        unsigned int flags;
 
+       /*
+        * If non-zero, should contain the offset from the start of css to
+        * a struct cgroup_file field.  cgroup will record the handle of
+        * the created file into it.  The recorded handle can be used as
+        * long as the containing css remains accessible.
+        */
+       unsigned int file_offset;
+
        /*
         * Fields used for internal bookkeeping.  Initialized automatically
         * during registration.
@@ -414,12 +435,10 @@ struct cgroup_subsys {
        int (*can_fork)(struct task_struct *task, void **priv_p);
        void (*cancel_fork)(struct task_struct *task, void *priv);
        void (*fork)(struct task_struct *task, void *priv);
-       void (*exit)(struct cgroup_subsys_state *css,
-                    struct cgroup_subsys_state *old_css,
-                    struct task_struct *task);
+       void (*exit)(struct task_struct *task);
+       void (*free)(struct task_struct *task);
        void (*bind)(struct cgroup_subsys_state *root_css);
 
-       int disabled;
        int early_init;
 
        /*
@@ -473,8 +492,31 @@ struct cgroup_subsys {
        unsigned int depends_on;
 };
 
-void cgroup_threadgroup_change_begin(struct task_struct *tsk);
-void cgroup_threadgroup_change_end(struct task_struct *tsk);
+extern struct percpu_rw_semaphore cgroup_threadgroup_rwsem;
+
+/**
+ * cgroup_threadgroup_change_begin - threadgroup exclusion for cgroups
+ * @tsk: target task
+ *
+ * Called from threadgroup_change_begin() and allows cgroup operations to
+ * synchronize against threadgroup changes using a percpu_rw_semaphore.
+ */
+static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk)
+{
+       percpu_down_read(&cgroup_threadgroup_rwsem);
+}
+
+/**
+ * cgroup_threadgroup_change_end - threadgroup exclusion for cgroups
+ * @tsk: target task
+ *
+ * Called from threadgroup_change_end().  Counterpart of
+ * cgroup_threadcgroup_change_begin().
+ */
+static inline void cgroup_threadgroup_change_end(struct task_struct *tsk)
+{
+       percpu_up_read(&cgroup_threadgroup_rwsem);
+}
 
 #else  /* CONFIG_CGROUPS */
 
index eb7ca55f72ef192c6df382d1e43e482203b96221..22e3754f89c511374af4ca8ac5a518786dcd6d88 100644 (file)
 #include <linux/nodemask.h>
 #include <linux/rculist.h>
 #include <linux/cgroupstats.h>
-#include <linux/rwsem.h>
 #include <linux/fs.h>
 #include <linux/seq_file.h>
 #include <linux/kernfs.h>
+#include <linux/jump_label.h>
 
 #include <linux/cgroup-defs.h>
 
@@ -41,6 +41,10 @@ struct css_task_iter {
        struct list_head                *task_pos;
        struct list_head                *tasks_head;
        struct list_head                *mg_tasks_head;
+
+       struct css_set                  *cur_cset;
+       struct task_struct              *cur_task;
+       struct list_head                iters_node;     /* css_set->task_iters */
 };
 
 extern struct cgroup_root cgrp_dfl_root;
@@ -50,6 +54,26 @@ extern struct css_set init_css_set;
 #include <linux/cgroup_subsys.h>
 #undef SUBSYS
 
+#define SUBSYS(_x)                                                             \
+       extern struct static_key_true _x ## _cgrp_subsys_enabled_key;           \
+       extern struct static_key_true _x ## _cgrp_subsys_on_dfl_key;
+#include <linux/cgroup_subsys.h>
+#undef SUBSYS
+
+/**
+ * cgroup_subsys_enabled - fast test on whether a subsys is enabled
+ * @ss: subsystem in question
+ */
+#define cgroup_subsys_enabled(ss)                                              \
+       static_branch_likely(&ss ## _enabled_key)
+
+/**
+ * cgroup_subsys_on_dfl - fast test on whether a subsys is on default hierarchy
+ * @ss: subsystem in question
+ */
+#define cgroup_subsys_on_dfl(ss)                                               \
+       static_branch_likely(&ss ## _on_dfl_key)
+
 bool css_has_online_children(struct cgroup_subsys_state *css);
 struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss);
 struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup,
@@ -78,6 +102,7 @@ extern void cgroup_cancel_fork(struct task_struct *p,
 extern void cgroup_post_fork(struct task_struct *p,
                             void *old_ss_priv[CGROUP_CANFORK_COUNT]);
 void cgroup_exit(struct task_struct *p);
+void cgroup_free(struct task_struct *p);
 
 int cgroup_init_early(void);
 int cgroup_init(void);
@@ -211,11 +236,33 @@ void css_task_iter_end(struct css_task_iter *it);
  * cgroup_taskset_for_each - iterate cgroup_taskset
  * @task: the loop cursor
  * @tset: taskset to iterate
+ *
+ * @tset may contain multiple tasks and they may belong to multiple
+ * processes.  When there are multiple tasks in @tset, if a task of a
+ * process is in @tset, all tasks of the process are in @tset.  Also, all
+ * are guaranteed to share the same source and destination csses.
+ *
+ * Iteration is not in any specific order.
  */
 #define cgroup_taskset_for_each(task, tset)                            \
        for ((task) = cgroup_taskset_first((tset)); (task);             \
             (task) = cgroup_taskset_next((tset)))
 
+/**
+ * cgroup_taskset_for_each_leader - iterate group leaders in a cgroup_taskset
+ * @leader: the loop cursor
+ * @tset: takset to iterate
+ *
+ * Iterate threadgroup leaders of @tset.  For single-task migrations, @tset
+ * may not contain any.
+ */
+#define cgroup_taskset_for_each_leader(leader, tset)                   \
+       for ((leader) = cgroup_taskset_first((tset)); (leader);         \
+            (leader) = cgroup_taskset_next((tset)))                    \
+               if ((leader) != (leader)->group_leader)                 \
+                       ;                                               \
+               else
+
 /*
  * Inline functions.
  */
@@ -320,11 +367,11 @@ static inline void css_put_many(struct cgroup_subsys_state *css, unsigned int n)
  */
 #ifdef CONFIG_PROVE_RCU
 extern struct mutex cgroup_mutex;
-extern struct rw_semaphore css_set_rwsem;
+extern spinlock_t css_set_lock;
 #define task_css_set_check(task, __c)                                  \
        rcu_dereference_check((task)->cgroups,                          \
                lockdep_is_held(&cgroup_mutex) ||                       \
-               lockdep_is_held(&css_set_rwsem) ||                      \
+               lockdep_is_held(&css_set_lock) ||                       \
                ((task)->flags & PF_EXITING) || (__c))
 #else
 #define task_css_set_check(task, __c)                                  \
@@ -412,68 +459,10 @@ static inline struct cgroup *task_cgroup(struct task_struct *task,
        return task_css(task, subsys_id)->cgroup;
 }
 
-/**
- * cgroup_on_dfl - test whether a cgroup is on the default hierarchy
- * @cgrp: the cgroup of interest
- *
- * The default hierarchy is the v2 interface of cgroup and this function
- * can be used to test whether a cgroup is on the default hierarchy for
- * cases where a subsystem should behave differnetly depending on the
- * interface version.
- *
- * The set of behaviors which change on the default hierarchy are still
- * being determined and the mount option is prefixed with __DEVEL__.
- *
- * List of changed behaviors:
- *
- * - Mount options "noprefix", "xattr", "clone_children", "release_agent"
- *   and "name" are disallowed.
- *
- * - When mounting an existing superblock, mount options should match.
- *
- * - Remount is disallowed.
- *
- * - rename(2) is disallowed.
- *
- * - "tasks" is removed.  Everything should be at process granularity.  Use
- *   "cgroup.procs" instead.
- *
- * - "cgroup.procs" is not sorted.  pids will be unique unless they got
- *   recycled inbetween reads.
- *
- * - "release_agent" and "notify_on_release" are removed.  Replacement
- *   notification mechanism will be implemented.
- *
- * - "cgroup.clone_children" is removed.
- *
- * - "cgroup.subtree_populated" is available.  Its value is 0 if the cgroup
- *   and its descendants contain no task; otherwise, 1.  The file also
- *   generates kernfs notification which can be monitored through poll and
- *   [di]notify when the value of the file changes.
- *
- * - cpuset: tasks will be kept in empty cpusets when hotplug happens and
- *   take masks of ancestors with non-empty cpus/mems, instead of being
- *   moved to an ancestor.
- *
- * - cpuset: a task can be moved into an empty cpuset, and again it takes
- *   masks of ancestors.
- *
- * - memcg: use_hierarchy is on by default and the cgroup file for the flag
- *   is not created.
- *
- * - blkcg: blk-throttle becomes properly hierarchical.
- *
- * - debug: disallowed on the default hierarchy.
- */
-static inline bool cgroup_on_dfl(const struct cgroup *cgrp)
-{
-       return cgrp->root == &cgrp_dfl_root;
-}
-
 /* no synchronization, the result can only be used as a hint */
-static inline bool cgroup_has_tasks(struct cgroup *cgrp)
+static inline bool cgroup_is_populated(struct cgroup *cgrp)
 {
-       return !list_empty(&cgrp->cset_links);
+       return cgrp->populated_cnt;
 }
 
 /* returns ino associated with a cgroup */
@@ -527,6 +516,19 @@ static inline void pr_cont_cgroup_path(struct cgroup *cgrp)
        pr_cont_kernfs_path(cgrp->kn);
 }
 
+/**
+ * cgroup_file_notify - generate a file modified event for a cgroup_file
+ * @cfile: target cgroup_file
+ *
+ * @cfile must have been obtained by setting cftype->file_offset.
+ */
+static inline void cgroup_file_notify(struct cgroup_file *cfile)
+{
+       /* might not have been created due to one of the CFTYPE selector flags */
+       if (cfile->kn)
+               kernfs_notify(cfile->kn);
+}
+
 #else /* !CONFIG_CGROUPS */
 
 struct cgroup_subsys_state;
@@ -546,6 +548,7 @@ static inline void cgroup_cancel_fork(struct task_struct *p,
 static inline void cgroup_post_fork(struct task_struct *p,
                                    void *ss_priv[CGROUP_CANFORK_COUNT]) {}
 static inline void cgroup_exit(struct task_struct *p) {}
+static inline void cgroup_free(struct task_struct *p) {}
 
 static inline int cgroup_init_early(void) { return 0; }
 static inline int cgroup_init(void) { return 0; }
index bcc853eccc85ba6ab38fc903263db60723dca553..7edd305152983af1ab6aee93f470dd99289046e8 100644 (file)
@@ -48,9 +48,7 @@ int set_hugetlb_cgroup(struct page *page, struct hugetlb_cgroup *h_cg)
 
 static inline bool hugetlb_cgroup_disabled(void)
 {
-       if (hugetlb_cgrp_subsys.disabled)
-               return true;
-       return false;
+       return !cgroup_subsys_enabled(hugetlb_cgrp_subsys);
 }
 
 extern int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
index 810a34f604247065d78c2820ed34b0bf8d3aabf6..1c1ff7e4faa4bf158166b789605107f6a65baf44 100644 (file)
 extern struct files_struct init_files;
 extern struct fs_struct init_fs;
 
-#ifdef CONFIG_CGROUPS
-#define INIT_GROUP_RWSEM(sig)                                          \
-       .group_rwsem = __RWSEM_INITIALIZER(sig.group_rwsem),
-#else
-#define INIT_GROUP_RWSEM(sig)
-#endif
-
 #ifdef CONFIG_CPUSETS
 #define INIT_CPUSET_SEQ(tsk)                                                   \
        .mems_allowed_seq = SEQCNT_ZERO(tsk.mems_allowed_seq),
@@ -65,7 +58,6 @@ extern struct fs_struct init_fs;
        INIT_PREV_CPUTIME(sig)                                          \
        .cred_guard_mutex =                                             \
                 __MUTEX_INITIALIZER(sig.cred_guard_mutex),             \
-       INIT_GROUP_RWSEM(sig)                                           \
 }
 
 extern struct nsproxy init_nsproxy;
index f1094238ab2a0f0fddeb40e3c7aadde7c2a89015..8dde55974f186bca7c1488866aaacdd805c347b3 100644 (file)
@@ -214,11 +214,6 @@ static inline int jump_label_apply_nops(struct module *mod)
 #define STATIC_KEY_INIT STATIC_KEY_INIT_FALSE
 #define jump_label_enabled static_key_enabled
 
-static inline bool static_key_enabled(struct static_key *key)
-{
-       return static_key_count(key) > 0;
-}
-
 static inline void static_key_enable(struct static_key *key)
 {
        int count = static_key_count(key);
@@ -265,6 +260,17 @@ struct static_key_false {
 #define DEFINE_STATIC_KEY_FALSE(name)  \
        struct static_key_false name = STATIC_KEY_FALSE_INIT
 
+extern bool ____wrong_branch_error(void);
+
+#define static_key_enabled(x)                                                  \
+({                                                                             \
+       if (!__builtin_types_compatible_p(typeof(*x), struct static_key) &&     \
+           !__builtin_types_compatible_p(typeof(*x), struct static_key_true) &&\
+           !__builtin_types_compatible_p(typeof(*x), struct static_key_false)) \
+               ____wrong_branch_error();                                       \
+       static_key_count((struct static_key *)x) > 0;                           \
+})
+
 #ifdef HAVE_JUMP_LABEL
 
 /*
@@ -323,8 +329,6 @@ struct static_key_false {
  * See jump_label_type() / jump_label_init_type().
  */
 
-extern bool ____wrong_branch_error(void);
-
 #define static_branch_likely(x)                                                        \
 ({                                                                             \
        bool branch;                                                            \
index 3e3318ddfc0e3e09a0e15825f78eb6052d628d78..27251ed428f7db8adaf54c58b7f9e41deda9048d 100644 (file)
@@ -213,6 +213,9 @@ struct mem_cgroup {
        /* OOM-Killer disable */
        int             oom_kill_disable;
 
+       /* handle for "memory.events" */
+       struct cgroup_file events_file;
+
        /* protect arrays of thresholds */
        struct mutex thresholds_lock;
 
@@ -285,6 +288,7 @@ static inline void mem_cgroup_events(struct mem_cgroup *memcg,
                       unsigned int nr)
 {
        this_cpu_add(memcg->stat->events[idx], nr);
+       cgroup_file_notify(&memcg->events_file);
 }
 
 bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg);
@@ -346,9 +350,7 @@ ino_t page_cgroup_ino(struct page *page);
 
 static inline bool mem_cgroup_disabled(void)
 {
-       if (memory_cgrp_subsys.disabled)
-               return true;
-       return false;
+       return !cgroup_subsys_enabled(memory_cgrp_subsys);
 }
 
 /*
index c115d617739d8e6f1f388c13069ad32cfca1bb3b..4effb1025fbb1555bc9c3ce6f80d98db004271a2 100644 (file)
@@ -771,18 +771,6 @@ struct signal_struct {
        unsigned audit_tty_log_passwd;
        struct tty_audit_buf *tty_audit_buf;
 #endif
-#ifdef CONFIG_CGROUPS
-       /*
-        * group_rwsem prevents new tasks from entering the threadgroup and
-        * member tasks from exiting,a more specifically, setting of
-        * PF_EXITING.  fork and exit paths are protected with this rwsem
-        * using threadgroup_change_begin/end().  Users which require
-        * threadgroup to remain stable should use threadgroup_[un]lock()
-        * which also takes care of exec path.  Currently, cgroup is the
-        * only user.
-        */
-       struct rw_semaphore group_rwsem;
-#endif
 
        oom_flags_t oom_flags;
        short oom_score_adj;            /* OOM kill score adjustment */
index 2c9eae6ad9704d3278557f6c692d2ef0027b587b..b9d0cce3f9ce54937fea988b531d0cc7bf52f692 100644 (file)
@@ -45,7 +45,7 @@
 #include <linux/sched.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
-#include <linux/rwsem.h>
+#include <linux/percpu-rwsem.h>
 #include <linux/string.h>
 #include <linux/sort.h>
 #include <linux/kmod.h>
@@ -75,7 +75,7 @@
  * cgroup_mutex is the master lock.  Any modification to cgroup or its
  * hierarchy must be performed while holding it.
  *
- * css_set_rwsem protects task->cgroups pointer, the list of css_set
+ * css_set_lock protects task->cgroups pointer, the list of css_set
  * objects, and the chain of tasks off each css_set.
  *
  * These locks are exported if CONFIG_PROVE_RCU so that accessors in
  */
 #ifdef CONFIG_PROVE_RCU
 DEFINE_MUTEX(cgroup_mutex);
-DECLARE_RWSEM(css_set_rwsem);
+DEFINE_SPINLOCK(css_set_lock);
 EXPORT_SYMBOL_GPL(cgroup_mutex);
-EXPORT_SYMBOL_GPL(css_set_rwsem);
+EXPORT_SYMBOL_GPL(css_set_lock);
 #else
 static DEFINE_MUTEX(cgroup_mutex);
-static DECLARE_RWSEM(css_set_rwsem);
+static DEFINE_SPINLOCK(css_set_lock);
 #endif
 
 /*
@@ -103,6 +103,8 @@ static DEFINE_SPINLOCK(cgroup_idr_lock);
  */
 static DEFINE_SPINLOCK(release_agent_path_lock);
 
+struct percpu_rw_semaphore cgroup_threadgroup_rwsem;
+
 #define cgroup_assert_mutex_or_rcu_locked()                            \
        RCU_LOCKDEP_WARN(!rcu_read_lock_held() &&                       \
                           !lockdep_is_held(&cgroup_mutex),             \
@@ -136,6 +138,27 @@ static const char *cgroup_subsys_name[] = {
 };
 #undef SUBSYS
 
+/* array of static_keys for cgroup_subsys_enabled() and cgroup_subsys_on_dfl() */
+#define SUBSYS(_x)                                                             \
+       DEFINE_STATIC_KEY_TRUE(_x ## _cgrp_subsys_enabled_key);                 \
+       DEFINE_STATIC_KEY_TRUE(_x ## _cgrp_subsys_on_dfl_key);                  \
+       EXPORT_SYMBOL_GPL(_x ## _cgrp_subsys_enabled_key);                      \
+       EXPORT_SYMBOL_GPL(_x ## _cgrp_subsys_on_dfl_key);
+#include <linux/cgroup_subsys.h>
+#undef SUBSYS
+
+#define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys_enabled_key,
+static struct static_key_true *cgroup_subsys_enabled_key[] = {
+#include <linux/cgroup_subsys.h>
+};
+#undef SUBSYS
+
+#define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys_on_dfl_key,
+static struct static_key_true *cgroup_subsys_on_dfl_key[] = {
+#include <linux/cgroup_subsys.h>
+};
+#undef SUBSYS
+
 /*
  * The default hierarchy, reserved for the subsystems that are otherwise
  * unattached - it never has more than a single cgroup, and all tasks are
@@ -150,12 +173,6 @@ EXPORT_SYMBOL_GPL(cgrp_dfl_root);
  */
 static bool cgrp_dfl_root_visible;
 
-/*
- * Set by the boot param of the same name and makes subsystems with NULL
- * ->dfl_files to use ->legacy_files on the default hierarchy.
- */
-static bool cgroup_legacy_files_on_dfl;
-
 /* some controllers are not supported in the default hierarchy */
 static unsigned long cgrp_dfl_root_inhibit_ss_mask;
 
@@ -183,6 +200,7 @@ static u64 css_serial_nr_next = 1;
  */
 static unsigned long have_fork_callback __read_mostly;
 static unsigned long have_exit_callback __read_mostly;
+static unsigned long have_free_callback __read_mostly;
 
 /* Ditto for the can_fork callback. */
 static unsigned long have_canfork_callback __read_mostly;
@@ -192,14 +210,87 @@ static struct cftype cgroup_legacy_base_files[];
 
 static int rebind_subsystems(struct cgroup_root *dst_root,
                             unsigned long ss_mask);
+static void css_task_iter_advance(struct css_task_iter *it);
 static int cgroup_destroy_locked(struct cgroup *cgrp);
 static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss,
                      bool visible);
 static void css_release(struct percpu_ref *ref);
 static void kill_css(struct cgroup_subsys_state *css);
-static int cgroup_addrm_files(struct cgroup *cgrp, struct cftype cfts[],
+static int cgroup_addrm_files(struct cgroup_subsys_state *css,
+                             struct cgroup *cgrp, struct cftype cfts[],
                              bool is_add);
 
+/**
+ * cgroup_ssid_enabled - cgroup subsys enabled test by subsys ID
+ * @ssid: subsys ID of interest
+ *
+ * cgroup_subsys_enabled() can only be used with literal subsys names which
+ * is fine for individual subsystems but unsuitable for cgroup core.  This
+ * is slower static_key_enabled() based test indexed by @ssid.
+ */
+static bool cgroup_ssid_enabled(int ssid)
+{
+       return static_key_enabled(cgroup_subsys_enabled_key[ssid]);
+}
+
+/**
+ * cgroup_on_dfl - test whether a cgroup is on the default hierarchy
+ * @cgrp: the cgroup of interest
+ *
+ * The default hierarchy is the v2 interface of cgroup and this function
+ * can be used to test whether a cgroup is on the default hierarchy for
+ * cases where a subsystem should behave differnetly depending on the
+ * interface version.
+ *
+ * The set of behaviors which change on the default hierarchy are still
+ * being determined and the mount option is prefixed with __DEVEL__.
+ *
+ * List of changed behaviors:
+ *
+ * - Mount options "noprefix", "xattr", "clone_children", "release_agent"
+ *   and "name" are disallowed.
+ *
+ * - When mounting an existing superblock, mount options should match.
+ *
+ * - Remount is disallowed.
+ *
+ * - rename(2) is disallowed.
+ *
+ * - "tasks" is removed.  Everything should be at process granularity.  Use
+ *   "cgroup.procs" instead.
+ *
+ * - "cgroup.procs" is not sorted.  pids will be unique unless they got
+ *   recycled inbetween reads.
+ *
+ * - "release_agent" and "notify_on_release" are removed.  Replacement
+ *   notification mechanism will be implemented.
+ *
+ * - "cgroup.clone_children" is removed.
+ *
+ * - "cgroup.subtree_populated" is available.  Its value is 0 if the cgroup
+ *   and its descendants contain no task; otherwise, 1.  The file also
+ *   generates kernfs notification which can be monitored through poll and
+ *   [di]notify when the value of the file changes.
+ *
+ * - cpuset: tasks will be kept in empty cpusets when hotplug happens and
+ *   take masks of ancestors with non-empty cpus/mems, instead of being
+ *   moved to an ancestor.
+ *
+ * - cpuset: a task can be moved into an empty cpuset, and again it takes
+ *   masks of ancestors.
+ *
+ * - memcg: use_hierarchy is on by default and the cgroup file for the flag
+ *   is not created.
+ *
+ * - blkcg: blk-throttle becomes properly hierarchical.
+ *
+ * - debug: disallowed on the default hierarchy.
+ */
+static bool cgroup_on_dfl(const struct cgroup *cgrp)
+{
+       return cgrp->root == &cgrp_dfl_root;
+}
+
 /* IDR wrappers which synchronize using cgroup_idr_lock */
 static int cgroup_idr_alloc(struct idr *idr, void *ptr, int start, int end,
                            gfp_t gfp_mask)
@@ -332,6 +423,22 @@ static inline bool cgroup_is_dead(const struct cgroup *cgrp)
        return !(cgrp->self.flags & CSS_ONLINE);
 }
 
+static void cgroup_get(struct cgroup *cgrp)
+{
+       WARN_ON_ONCE(cgroup_is_dead(cgrp));
+       css_get(&cgrp->self);
+}
+
+static bool cgroup_tryget(struct cgroup *cgrp)
+{
+       return css_tryget(&cgrp->self);
+}
+
+static void cgroup_put(struct cgroup *cgrp)
+{
+       css_put(&cgrp->self);
+}
+
 struct cgroup_subsys_state *of_css(struct kernfs_open_file *of)
 {
        struct cgroup *cgrp = of->kn->parent->priv;
@@ -481,19 +588,31 @@ struct css_set init_css_set = {
        .mg_tasks               = LIST_HEAD_INIT(init_css_set.mg_tasks),
        .mg_preload_node        = LIST_HEAD_INIT(init_css_set.mg_preload_node),
        .mg_node                = LIST_HEAD_INIT(init_css_set.mg_node),
+       .task_iters             = LIST_HEAD_INIT(init_css_set.task_iters),
 };
 
 static int css_set_count       = 1;    /* 1 for init_css_set */
 
+/**
+ * css_set_populated - does a css_set contain any tasks?
+ * @cset: target css_set
+ */
+static bool css_set_populated(struct css_set *cset)
+{
+       lockdep_assert_held(&css_set_lock);
+
+       return !list_empty(&cset->tasks) || !list_empty(&cset->mg_tasks);
+}
+
 /**
  * cgroup_update_populated - updated populated count of a cgroup
  * @cgrp: the target cgroup
  * @populated: inc or dec populated count
  *
- * @cgrp is either getting the first task (css_set) or losing the last.
- * Update @cgrp->populated_cnt accordingly.  The count is propagated
- * towards root so that a given cgroup's populated_cnt is zero iff the
- * cgroup and all its descendants are empty.
+ * One of the css_sets associated with @cgrp is either getting its first
+ * task or losing the last.  Update @cgrp->populated_cnt accordingly.  The
+ * count is propagated towards root so that a given cgroup's populated_cnt
+ * is zero iff the cgroup and all its descendants don't contain any tasks.
  *
  * @cgrp's interface file "cgroup.populated" is zero if
  * @cgrp->populated_cnt is zero and 1 otherwise.  When @cgrp->populated_cnt
@@ -503,7 +622,7 @@ static int css_set_count    = 1;    /* 1 for init_css_set */
  */
 static void cgroup_update_populated(struct cgroup *cgrp, bool populated)
 {
-       lockdep_assert_held(&css_set_rwsem);
+       lockdep_assert_held(&css_set_lock);
 
        do {
                bool trigger;
@@ -516,12 +635,93 @@ static void cgroup_update_populated(struct cgroup *cgrp, bool populated)
                if (!trigger)
                        break;
 
-               if (cgrp->populated_kn)
-                       kernfs_notify(cgrp->populated_kn);
+               check_for_release(cgrp);
+               cgroup_file_notify(&cgrp->events_file);
+
                cgrp = cgroup_parent(cgrp);
        } while (cgrp);
 }
 
+/**
+ * css_set_update_populated - update populated state of a css_set
+ * @cset: target css_set
+ * @populated: whether @cset is populated or depopulated
+ *
+ * @cset is either getting the first task or losing the last.  Update the
+ * ->populated_cnt of all associated cgroups accordingly.
+ */
+static void css_set_update_populated(struct css_set *cset, bool populated)
+{
+       struct cgrp_cset_link *link;
+
+       lockdep_assert_held(&css_set_lock);
+
+       list_for_each_entry(link, &cset->cgrp_links, cgrp_link)
+               cgroup_update_populated(link->cgrp, populated);
+}
+
+/**
+ * css_set_move_task - move a task from one css_set to another
+ * @task: task being moved
+ * @from_cset: css_set @task currently belongs to (may be NULL)
+ * @to_cset: new css_set @task is being moved to (may be NULL)
+ * @use_mg_tasks: move to @to_cset->mg_tasks instead of ->tasks
+ *
+ * Move @task from @from_cset to @to_cset.  If @task didn't belong to any
+ * css_set, @from_cset can be NULL.  If @task is being disassociated
+ * instead of moved, @to_cset can be NULL.
+ *
+ * This function automatically handles populated_cnt updates and
+ * css_task_iter adjustments but the caller is responsible for managing
+ * @from_cset and @to_cset's reference counts.
+ */
+static void css_set_move_task(struct task_struct *task,
+                             struct css_set *from_cset, struct css_set *to_cset,
+                             bool use_mg_tasks)
+{
+       lockdep_assert_held(&css_set_lock);
+
+       if (from_cset) {
+               struct css_task_iter *it, *pos;
+
+               WARN_ON_ONCE(list_empty(&task->cg_list));
+
+               /*
+                * @task is leaving, advance task iterators which are
+                * pointing to it so that they can resume at the next
+                * position.  Advancing an iterator might remove it from
+                * the list, use safe walk.  See css_task_iter_advance*()
+                * for details.
+                */
+               list_for_each_entry_safe(it, pos, &from_cset->task_iters,
+                                        iters_node)
+                       if (it->task_pos == &task->cg_list)
+                               css_task_iter_advance(it);
+
+               list_del_init(&task->cg_list);
+               if (!css_set_populated(from_cset))
+                       css_set_update_populated(from_cset, false);
+       } else {
+               WARN_ON_ONCE(!list_empty(&task->cg_list));
+       }
+
+       if (to_cset) {
+               /*
+                * We are synchronized through cgroup_threadgroup_rwsem
+                * against PF_EXITING setting such that we can't race
+                * against cgroup_exit() changing the css_set to
+                * init_css_set and dropping the old one.
+                */
+               WARN_ON_ONCE(task->flags & PF_EXITING);
+
+               if (!css_set_populated(to_cset))
+                       css_set_update_populated(to_cset, true);
+               rcu_assign_pointer(task->cgroups, to_cset);
+               list_add_tail(&task->cg_list, use_mg_tasks ? &to_cset->mg_tasks :
+                                                            &to_cset->tasks);
+       }
+}
+
 /*
  * hash table for cgroup groups. This improves the performance to find
  * an existing css_set. This hash doesn't (currently) take into
@@ -549,7 +749,7 @@ static void put_css_set_locked(struct css_set *cset)
        struct cgroup_subsys *ss;
        int ssid;
 
-       lockdep_assert_held(&css_set_rwsem);
+       lockdep_assert_held(&css_set_lock);
 
        if (!atomic_dec_and_test(&cset->refcount))
                return;
@@ -561,17 +761,10 @@ static void put_css_set_locked(struct css_set *cset)
        css_set_count--;
 
        list_for_each_entry_safe(link, tmp_link, &cset->cgrp_links, cgrp_link) {
-               struct cgroup *cgrp = link->cgrp;
-
                list_del(&link->cset_link);
                list_del(&link->cgrp_link);
-
-               /* @cgrp can't go away while we're holding css_set_rwsem */
-               if (list_empty(&cgrp->cset_links)) {
-                       cgroup_update_populated(cgrp, false);
-                       check_for_release(cgrp);
-               }
-
+               if (cgroup_parent(link->cgrp))
+                       cgroup_put(link->cgrp);
                kfree(link);
        }
 
@@ -588,9 +781,9 @@ static void put_css_set(struct css_set *cset)
        if (atomic_add_unless(&cset->refcount, -1, 1))
                return;
 
-       down_write(&css_set_rwsem);
+       spin_lock_bh(&css_set_lock);
        put_css_set_locked(cset);
-       up_write(&css_set_rwsem);
+       spin_unlock_bh(&css_set_lock);
 }
 
 /*
@@ -779,15 +972,15 @@ static void link_css_set(struct list_head *tmp_links, struct css_set *cset,
        link->cset = cset;
        link->cgrp = cgrp;
 
-       if (list_empty(&cgrp->cset_links))
-               cgroup_update_populated(cgrp, true);
-       list_move(&link->cset_link, &cgrp->cset_links);
-
        /*
-        * Always add links to the tail of the list so that the list
-        * is sorted by order of hierarchy creation
+        * Always add links to the tail of the lists so that the lists are
+        * in choronological order.
         */
+       list_move_tail(&link->cset_link, &cgrp->cset_links);
        list_add_tail(&link->cgrp_link, &cset->cgrp_links);
+
+       if (cgroup_parent(cgrp))
+               cgroup_get(cgrp);
 }
 
 /**
@@ -813,11 +1006,11 @@ static struct css_set *find_css_set(struct css_set *old_cset,
 
        /* First see if we already have a cgroup group that matches
         * the desired set */
-       down_read(&css_set_rwsem);
+       spin_lock_bh(&css_set_lock);
        cset = find_existing_css_set(old_cset, cgrp, template);
        if (cset)
                get_css_set(cset);
-       up_read(&css_set_rwsem);
+       spin_unlock_bh(&css_set_lock);
 
        if (cset)
                return cset;
@@ -838,13 +1031,14 @@ static struct css_set *find_css_set(struct css_set *old_cset,
        INIT_LIST_HEAD(&cset->mg_tasks);
        INIT_LIST_HEAD(&cset->mg_preload_node);
        INIT_LIST_HEAD(&cset->mg_node);
+       INIT_LIST_HEAD(&cset->task_iters);
        INIT_HLIST_NODE(&cset->hlist);
 
        /* Copy the set of subsystem state objects generated in
         * find_existing_css_set() */
        memcpy(cset->subsys, template, sizeof(cset->subsys));
 
-       down_write(&css_set_rwsem);
+       spin_lock_bh(&css_set_lock);
        /* Add reference counts and links from the new css_set. */
        list_for_each_entry(link, &old_cset->cgrp_links, cgrp_link) {
                struct cgroup *c = link->cgrp;
@@ -866,53 +1060,11 @@ static struct css_set *find_css_set(struct css_set *old_cset,
                list_add_tail(&cset->e_cset_node[ssid],
                              &cset->subsys[ssid]->cgroup->e_csets[ssid]);
 
-       up_write(&css_set_rwsem);
+       spin_unlock_bh(&css_set_lock);
 
        return cset;
 }
 
-void cgroup_threadgroup_change_begin(struct task_struct *tsk)
-{
-       down_read(&tsk->signal->group_rwsem);
-}
-
-void cgroup_threadgroup_change_end(struct task_struct *tsk)
-{
-       up_read(&tsk->signal->group_rwsem);
-}
-
-/**
- * threadgroup_lock - lock threadgroup
- * @tsk: member task of the threadgroup to lock
- *
- * Lock the threadgroup @tsk belongs to.  No new task is allowed to enter
- * and member tasks aren't allowed to exit (as indicated by PF_EXITING) or
- * change ->group_leader/pid.  This is useful for cases where the threadgroup
- * needs to stay stable across blockable operations.
- *
- * fork and exit explicitly call threadgroup_change_{begin|end}() for
- * synchronization.  While held, no new task will be added to threadgroup
- * and no existing live task will have its PF_EXITING set.
- *
- * de_thread() does threadgroup_change_{begin|end}() when a non-leader
- * sub-thread becomes a new leader.
- */
-static void threadgroup_lock(struct task_struct *tsk)
-{
-       down_write(&tsk->signal->group_rwsem);
-}
-
-/**
- * threadgroup_unlock - unlock threadgroup
- * @tsk: member task of the threadgroup to unlock
- *
- * Reverse threadgroup_lock().
- */
-static inline void threadgroup_unlock(struct task_struct *tsk)
-{
-       up_write(&tsk->signal->group_rwsem);
-}
-
 static struct cgroup_root *cgroup_root_from_kf(struct kernfs_root *kf_root)
 {
        struct cgroup *root_cgrp = kf_root->kn->priv;
@@ -972,14 +1124,15 @@ static void cgroup_destroy_root(struct cgroup_root *root)
         * Release all the links from cset_links to this hierarchy's
         * root cgroup
         */
-       down_write(&css_set_rwsem);
+       spin_lock_bh(&css_set_lock);
 
        list_for_each_entry_safe(link, tmp_link, &cgrp->cset_links, cset_link) {
                list_del(&link->cset_link);
                list_del(&link->cgrp_link);
                kfree(link);
        }
-       up_write(&css_set_rwsem);
+
+       spin_unlock_bh(&css_set_lock);
 
        if (!list_empty(&root->root_list)) {
                list_del(&root->root_list);
@@ -1001,7 +1154,7 @@ static struct cgroup *cset_cgroup_from_root(struct css_set *cset,
        struct cgroup *res = NULL;
 
        lockdep_assert_held(&cgroup_mutex);
-       lockdep_assert_held(&css_set_rwsem);
+       lockdep_assert_held(&css_set_lock);
 
        if (cset == &init_css_set) {
                res = &root->cgrp;
@@ -1024,7 +1177,7 @@ static struct cgroup *cset_cgroup_from_root(struct css_set *cset,
 
 /*
  * Return the cgroup for "task" from the given hierarchy. Must be
- * called with cgroup_mutex and css_set_rwsem held.
+ * called with cgroup_mutex and css_set_lock held.
  */
 static struct cgroup *task_cgroup_from_root(struct task_struct *task,
                                            struct cgroup_root *root)
@@ -1063,7 +1216,6 @@ static struct cgroup *task_cgroup_from_root(struct task_struct *task,
  * update of a tasks cgroup pointer by cgroup_attach_task()
  */
 
-static int cgroup_populate_dir(struct cgroup *cgrp, unsigned long subsys_mask);
 static struct kernfs_syscall_ops cgroup_kf_syscall_ops;
 static const struct file_operations proc_cgroupstats_operations;
 
@@ -1086,43 +1238,25 @@ static char *cgroup_file_name(struct cgroup *cgrp, const struct cftype *cft,
  * cgroup_file_mode - deduce file mode of a control file
  * @cft: the control file in question
  *
- * returns cft->mode if ->mode is not 0
- * returns S_IRUGO|S_IWUSR if it has both a read and a write handler
- * returns S_IRUGO if it has only a read handler
- * returns S_IWUSR if it has only a write hander
+ * S_IRUGO for read, S_IWUSR for write.
  */
 static umode_t cgroup_file_mode(const struct cftype *cft)
 {
        umode_t mode = 0;
 
-       if (cft->mode)
-               return cft->mode;
-
        if (cft->read_u64 || cft->read_s64 || cft->seq_show)
                mode |= S_IRUGO;
 
-       if (cft->write_u64 || cft->write_s64 || cft->write)
-               mode |= S_IWUSR;
+       if (cft->write_u64 || cft->write_s64 || cft->write) {
+               if (cft->flags & CFTYPE_WORLD_WRITABLE)
+                       mode |= S_IWUGO;
+               else
+                       mode |= S_IWUSR;
+       }
 
        return mode;
 }
 
-static void cgroup_get(struct cgroup *cgrp)
-{
-       WARN_ON_ONCE(cgroup_is_dead(cgrp));
-       css_get(&cgrp->self);
-}
-
-static bool cgroup_tryget(struct cgroup *cgrp)
-{
-       return css_tryget(&cgrp->self);
-}
-
-static void cgroup_put(struct cgroup *cgrp)
-{
-       css_put(&cgrp->self);
-}
-
 /**
  * cgroup_calc_child_subsys_mask - calculate child_subsys_mask
  * @cgrp: the target cgroup
@@ -1263,28 +1397,64 @@ static void cgroup_rm_file(struct cgroup *cgrp, const struct cftype *cft)
 }
 
 /**
- * cgroup_clear_dir - remove subsys files in a cgroup directory
- * @cgrp: target cgroup
- * @subsys_mask: mask of the subsystem ids whose files should be removed
+ * css_clear_dir - remove subsys files in a cgroup directory
+ * @css: taget css
+ * @cgrp_override: specify if target cgroup is different from css->cgroup
  */
-static void cgroup_clear_dir(struct cgroup *cgrp, unsigned long subsys_mask)
+static void css_clear_dir(struct cgroup_subsys_state *css,
+                         struct cgroup *cgrp_override)
 {
-       struct cgroup_subsys *ss;
-       int i;
+       struct cgroup *cgrp = cgrp_override ?: css->cgroup;
+       struct cftype *cfts;
 
-       for_each_subsys(ss, i) {
-               struct cftype *cfts;
+       list_for_each_entry(cfts, &css->ss->cfts, node)
+               cgroup_addrm_files(css, cgrp, cfts, false);
+}
 
-               if (!(subsys_mask & (1 << i)))
-                       continue;
-               list_for_each_entry(cfts, &ss->cfts, node)
-                       cgroup_addrm_files(cgrp, cfts, false);
+/**
+ * css_populate_dir - create subsys files in a cgroup directory
+ * @css: target css
+ * @cgrp_overried: specify if target cgroup is different from css->cgroup
+ *
+ * On failure, no file is added.
+ */
+static int css_populate_dir(struct cgroup_subsys_state *css,
+                           struct cgroup *cgrp_override)
+{
+       struct cgroup *cgrp = cgrp_override ?: css->cgroup;
+       struct cftype *cfts, *failed_cfts;
+       int ret;
+
+       if (!css->ss) {
+               if (cgroup_on_dfl(cgrp))
+                       cfts = cgroup_dfl_base_files;
+               else
+                       cfts = cgroup_legacy_base_files;
+
+               return cgroup_addrm_files(&cgrp->self, cgrp, cfts, true);
+       }
+
+       list_for_each_entry(cfts, &css->ss->cfts, node) {
+               ret = cgroup_addrm_files(css, cgrp, cfts, true);
+               if (ret < 0) {
+                       failed_cfts = cfts;
+                       goto err;
+               }
        }
+       return 0;
+err:
+       list_for_each_entry(cfts, &css->ss->cfts, node) {
+               if (cfts == failed_cfts)
+                       break;
+               cgroup_addrm_files(css, cgrp, cfts, false);
+       }
+       return ret;
 }
 
 static int rebind_subsystems(struct cgroup_root *dst_root,
                             unsigned long ss_mask)
 {
+       struct cgroup *dcgrp = &dst_root->cgrp;
        struct cgroup_subsys *ss;
        unsigned long tmp_ss_mask;
        int ssid, i, ret;
@@ -1306,10 +1476,13 @@ static int rebind_subsystems(struct cgroup_root *dst_root,
        if (dst_root == &cgrp_dfl_root)
                tmp_ss_mask &= ~cgrp_dfl_root_inhibit_ss_mask;
 
-       ret = cgroup_populate_dir(&dst_root->cgrp, tmp_ss_mask);
-       if (ret) {
-               if (dst_root != &cgrp_dfl_root)
-                       return ret;
+       for_each_subsys_which(ss, ssid, &tmp_ss_mask) {
+               struct cgroup *scgrp = &ss->root->cgrp;
+               int tssid;
+
+               ret = css_populate_dir(cgroup_css(scgrp, ss), dcgrp);
+               if (!ret)
+                       continue;
 
                /*
                 * Rebinding back to the default root is not allowed to
@@ -1317,57 +1490,67 @@ static int rebind_subsystems(struct cgroup_root *dst_root,
                 * be rare.  Moving subsystems back and forth even more so.
                 * Just warn about it and continue.
                 */
-               if (cgrp_dfl_root_visible) {
-                       pr_warn("failed to create files (%d) while rebinding 0x%lx to default root\n",
-                               ret, ss_mask);
-                       pr_warn("you may retry by moving them to a different hierarchy and unbinding\n");
+               if (dst_root == &cgrp_dfl_root) {
+                       if (cgrp_dfl_root_visible) {
+                               pr_warn("failed to create files (%d) while rebinding 0x%lx to default root\n",
+                                       ret, ss_mask);
+                               pr_warn("you may retry by moving them to a different hierarchy and unbinding\n");
+                       }
+                       continue;
+               }
+
+               for_each_subsys_which(ss, tssid, &tmp_ss_mask) {
+                       if (tssid == ssid)
+                               break;
+                       css_clear_dir(cgroup_css(scgrp, ss), dcgrp);
                }
+               return ret;
        }
 
        /*
         * Nothing can fail from this point on.  Remove files for the
         * removed subsystems and rebind each subsystem.
         */
-       for_each_subsys_which(ss, ssid, &ss_mask)
-               cgroup_clear_dir(&ss->root->cgrp, 1 << ssid);
-
        for_each_subsys_which(ss, ssid, &ss_mask) {
-               struct cgroup_root *src_root;
-               struct cgroup_subsys_state *css;
+               struct cgroup_root *src_root = ss->root;
+               struct cgroup *scgrp = &src_root->cgrp;
+               struct cgroup_subsys_state *css = cgroup_css(scgrp, ss);
                struct css_set *cset;
 
-               src_root = ss->root;
-               css = cgroup_css(&src_root->cgrp, ss);
+               WARN_ON(!css || cgroup_css(dcgrp, ss));
 
-               WARN_ON(!css || cgroup_css(&dst_root->cgrp, ss));
+               css_clear_dir(css, NULL);
 
-               RCU_INIT_POINTER(src_root->cgrp.subsys[ssid], NULL);
-               rcu_assign_pointer(dst_root->cgrp.subsys[ssid], css);
+               RCU_INIT_POINTER(scgrp->subsys[ssid], NULL);
+               rcu_assign_pointer(dcgrp->subsys[ssid], css);
                ss->root = dst_root;
-               css->cgroup = &dst_root->cgrp;
+               css->cgroup = dcgrp;
 
-               down_write(&css_set_rwsem);
+               spin_lock_bh(&css_set_lock);
                hash_for_each(css_set_table, i, cset, hlist)
                        list_move_tail(&cset->e_cset_node[ss->id],
-                                      &dst_root->cgrp.e_csets[ss->id]);
-               up_write(&css_set_rwsem);
+                                      &dcgrp->e_csets[ss->id]);
+               spin_unlock_bh(&css_set_lock);
 
                src_root->subsys_mask &= ~(1 << ssid);
-               src_root->cgrp.subtree_control &= ~(1 << ssid);
-               cgroup_refresh_child_subsys_mask(&src_root->cgrp);
+               scgrp->subtree_control &= ~(1 << ssid);
+               cgroup_refresh_child_subsys_mask(scgrp);
 
                /* default hierarchy doesn't enable controllers by default */
                dst_root->subsys_mask |= 1 << ssid;
-               if (dst_root != &cgrp_dfl_root) {
-                       dst_root->cgrp.subtree_control |= 1 << ssid;
-                       cgroup_refresh_child_subsys_mask(&dst_root->cgrp);
+               if (dst_root == &cgrp_dfl_root) {
+                       static_branch_enable(cgroup_subsys_on_dfl_key[ssid]);
+               } else {
+                       dcgrp->subtree_control |= 1 << ssid;
+                       cgroup_refresh_child_subsys_mask(dcgrp);
+                       static_branch_disable(cgroup_subsys_on_dfl_key[ssid]);
                }
 
                if (ss->bind)
                        ss->bind(css);
        }
 
-       kernfs_activate(dst_root->cgrp.kn);
+       kernfs_activate(dcgrp->kn);
        return 0;
 }
 
@@ -1497,7 +1680,7 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts)
                for_each_subsys(ss, i) {
                        if (strcmp(token, ss->legacy_name))
                                continue;
-                       if (ss->disabled)
+                       if (!cgroup_ssid_enabled(i))
                                continue;
 
                        /* Mutually exclusive option 'all' + subsystem name */
@@ -1528,7 +1711,7 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts)
         */
        if (all_ss || (!one_ss && !opts->none && !opts->name))
                for_each_subsys(ss, i)
-                       if (!ss->disabled)
+                       if (cgroup_ssid_enabled(i))
                                opts->subsys_mask |= (1 << i);
 
        /*
@@ -1624,7 +1807,7 @@ static void cgroup_enable_task_cg_lists(void)
 {
        struct task_struct *p, *g;
 
-       down_write(&css_set_rwsem);
+       spin_lock_bh(&css_set_lock);
 
        if (use_task_css_set_links)
                goto out_unlock;
@@ -1654,14 +1837,16 @@ static void cgroup_enable_task_cg_lists(void)
                if (!(p->flags & PF_EXITING)) {
                        struct css_set *cset = task_css_set(p);
 
-                       list_add(&p->cg_list, &cset->tasks);
+                       if (!css_set_populated(cset))
+                               css_set_update_populated(cset, true);
+                       list_add_tail(&p->cg_list, &cset->tasks);
                        get_css_set(cset);
                }
                spin_unlock_irq(&p->sighand->siglock);
        } while_each_thread(g, p);
        read_unlock(&tasklist_lock);
 out_unlock:
-       up_write(&css_set_rwsem);
+       spin_unlock_bh(&css_set_lock);
 }
 
 static void init_cgroup_housekeeping(struct cgroup *cgrp)
@@ -1671,6 +1856,7 @@ static void init_cgroup_housekeeping(struct cgroup *cgrp)
 
        INIT_LIST_HEAD(&cgrp->self.sibling);
        INIT_LIST_HEAD(&cgrp->self.children);
+       INIT_LIST_HEAD(&cgrp->self.files);
        INIT_LIST_HEAD(&cgrp->cset_links);
        INIT_LIST_HEAD(&cgrp->pidlists);
        mutex_init(&cgrp->pidlist_mutex);
@@ -1708,7 +1894,6 @@ static int cgroup_setup_root(struct cgroup_root *root, unsigned long ss_mask)
 {
        LIST_HEAD(tmp_links);
        struct cgroup *root_cgrp = &root->cgrp;
-       struct cftype *base_files;
        struct css_set *cset;
        int i, ret;
 
@@ -1725,7 +1910,7 @@ static int cgroup_setup_root(struct cgroup_root *root, unsigned long ss_mask)
                goto out;
 
        /*
-        * We're accessing css_set_count without locking css_set_rwsem here,
+        * We're accessing css_set_count without locking css_set_lock here,
         * but that's OK - it can only be increased by someone holding
         * cgroup_lock, and that's us. The worst that can happen is that we
         * have some link structures left over
@@ -1747,12 +1932,7 @@ static int cgroup_setup_root(struct cgroup_root *root, unsigned long ss_mask)
        }
        root_cgrp->kn = root->kf_root->kn;
 
-       if (root == &cgrp_dfl_root)
-               base_files = cgroup_dfl_base_files;
-       else
-               base_files = cgroup_legacy_base_files;
-
-       ret = cgroup_addrm_files(root_cgrp, base_files, true);
+       ret = css_populate_dir(&root_cgrp->self, NULL);
        if (ret)
                goto destroy_root;
 
@@ -1772,10 +1952,13 @@ static int cgroup_setup_root(struct cgroup_root *root, unsigned long ss_mask)
         * Link the root cgroup in this hierarchy into all the css_set
         * objects.
         */
-       down_write(&css_set_rwsem);
-       hash_for_each(css_set_table, i, cset, hlist)
+       spin_lock_bh(&css_set_lock);
+       hash_for_each(css_set_table, i, cset, hlist) {
                link_css_set(&tmp_links, cset, root_cgrp);
-       up_write(&css_set_rwsem);
+               if (css_set_populated(cset))
+                       cgroup_update_populated(root_cgrp, true);
+       }
+       spin_unlock_bh(&css_set_lock);
 
        BUG_ON(!list_empty(&root_cgrp->self.children));
        BUG_ON(atomic_read(&root->nr_cgrps) != 1);
@@ -2008,7 +2191,7 @@ char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen)
        char *path = NULL;
 
        mutex_lock(&cgroup_mutex);
-       down_read(&css_set_rwsem);
+       spin_lock_bh(&css_set_lock);
 
        root = idr_get_next(&cgroup_hierarchy_idr, &hierarchy_id);
 
@@ -2021,7 +2204,7 @@ char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen)
                        path = buf;
        }
 
-       up_read(&css_set_rwsem);
+       spin_unlock_bh(&css_set_lock);
        mutex_unlock(&cgroup_mutex);
        return path;
 }
@@ -2049,6 +2232,49 @@ struct cgroup_taskset {
        struct task_struct      *cur_task;
 };
 
+#define CGROUP_TASKSET_INIT(tset)      (struct cgroup_taskset){        \
+       .src_csets              = LIST_HEAD_INIT(tset.src_csets),       \
+       .dst_csets              = LIST_HEAD_INIT(tset.dst_csets),       \
+       .csets                  = &tset.src_csets,                      \
+}
+
+/**
+ * cgroup_taskset_add - try to add a migration target task to a taskset
+ * @task: target task
+ * @tset: target taskset
+ *
+ * Add @task, which is a migration target, to @tset.  This function becomes
+ * noop if @task doesn't need to be migrated.  @task's css_set should have
+ * been added as a migration source and @task->cg_list will be moved from
+ * the css_set's tasks list to mg_tasks one.
+ */
+static void cgroup_taskset_add(struct task_struct *task,
+                              struct cgroup_taskset *tset)
+{
+       struct css_set *cset;
+
+       lockdep_assert_held(&css_set_lock);
+
+       /* @task either already exited or can't exit until the end */
+       if (task->flags & PF_EXITING)
+               return;
+
+       /* leave @task alone if post_fork() hasn't linked it yet */
+       if (list_empty(&task->cg_list))
+               return;
+
+       cset = task_css_set(task);
+       if (!cset->mg_src_cgrp)
+               return;
+
+       list_move_tail(&task->cg_list, &cset->mg_tasks);
+       if (list_empty(&cset->mg_node))
+               list_add_tail(&cset->mg_node, &tset->src_csets);
+       if (list_empty(&cset->mg_dst_cset->mg_node))
+               list_move_tail(&cset->mg_dst_cset->mg_node,
+                              &tset->dst_csets);
+}
+
 /**
  * cgroup_taskset_first - reset taskset and return the first task
  * @tset: taskset of interest
@@ -2096,47 +2322,86 @@ struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset)
 }
 
 /**
- * cgroup_task_migrate - move a task from one cgroup to another.
- * @old_cgrp: the cgroup @tsk is being migrated from
- * @tsk: the task being migrated
- * @new_cset: the new css_set @tsk is being attached to
+ * cgroup_taskset_migrate - migrate a taskset to a cgroup
+ * @tset: taget taskset
+ * @dst_cgrp: destination cgroup
  *
- * Must be called with cgroup_mutex, threadgroup and css_set_rwsem locked.
+ * Migrate tasks in @tset to @dst_cgrp.  This function fails iff one of the
+ * ->can_attach callbacks fails and guarantees that either all or none of
+ * the tasks in @tset are migrated.  @tset is consumed regardless of
+ * success.
  */
-static void cgroup_task_migrate(struct cgroup *old_cgrp,
-                               struct task_struct *tsk,
-                               struct css_set *new_cset)
+static int cgroup_taskset_migrate(struct cgroup_taskset *tset,
+                                 struct cgroup *dst_cgrp)
 {
-       struct css_set *old_cset;
-
-       lockdep_assert_held(&cgroup_mutex);
-       lockdep_assert_held(&css_set_rwsem);
+       struct cgroup_subsys_state *css, *failed_css = NULL;
+       struct task_struct *task, *tmp_task;
+       struct css_set *cset, *tmp_cset;
+       int i, ret;
 
-       /*
-        * We are synchronized through threadgroup_lock() against PF_EXITING
-        * setting such that we can't race against cgroup_exit() changing the
-        * css_set to init_css_set and dropping the old one.
-        */
-       WARN_ON_ONCE(tsk->flags & PF_EXITING);
-       old_cset = task_css_set(tsk);
+       /* methods shouldn't be called if no task is actually migrating */
+       if (list_empty(&tset->src_csets))
+               return 0;
 
-       get_css_set(new_cset);
-       rcu_assign_pointer(tsk->cgroups, new_cset);
+       /* check that we can legitimately attach to the cgroup */
+       for_each_e_css(css, i, dst_cgrp) {
+               if (css->ss->can_attach) {
+                       ret = css->ss->can_attach(css, tset);
+                       if (ret) {
+                               failed_css = css;
+                               goto out_cancel_attach;
+                       }
+               }
+       }
 
        /*
-        * Use move_tail so that cgroup_taskset_first() still returns the
-        * leader after migration.  This works because cgroup_migrate()
-        * ensures that the dst_cset of the leader is the first on the
-        * tset's dst_csets list.
+        * Now that we're guaranteed success, proceed to move all tasks to
+        * the new cgroup.  There are no failure cases after here, so this
+        * is the commit point.
         */
-       list_move_tail(&tsk->cg_list, &new_cset->mg_tasks);
+       spin_lock_bh(&css_set_lock);
+       list_for_each_entry(cset, &tset->src_csets, mg_node) {
+               list_for_each_entry_safe(task, tmp_task, &cset->mg_tasks, cg_list) {
+                       struct css_set *from_cset = task_css_set(task);
+                       struct css_set *to_cset = cset->mg_dst_cset;
+
+                       get_css_set(to_cset);
+                       css_set_move_task(task, from_cset, to_cset, true);
+                       put_css_set_locked(from_cset);
+               }
+       }
+       spin_unlock_bh(&css_set_lock);
 
        /*
-        * We just gained a reference on old_cset by taking it from the
-        * task. As trading it for new_cset is protected by cgroup_mutex,
-        * we're safe to drop it here; it will be freed under RCU.
+        * Migration is committed, all target tasks are now on dst_csets.
+        * Nothing is sensitive to fork() after this point.  Notify
+        * controllers that migration is complete.
         */
-       put_css_set_locked(old_cset);
+       tset->csets = &tset->dst_csets;
+
+       for_each_e_css(css, i, dst_cgrp)
+               if (css->ss->attach)
+                       css->ss->attach(css, tset);
+
+       ret = 0;
+       goto out_release_tset;
+
+out_cancel_attach:
+       for_each_e_css(css, i, dst_cgrp) {
+               if (css == failed_css)
+                       break;
+               if (css->ss->cancel_attach)
+                       css->ss->cancel_attach(css, tset);
+       }
+out_release_tset:
+       spin_lock_bh(&css_set_lock);
+       list_splice_init(&tset->dst_csets, &tset->src_csets);
+       list_for_each_entry_safe(cset, tmp_cset, &tset->src_csets, mg_node) {
+               list_splice_tail_init(&cset->mg_tasks, &cset->tasks);
+               list_del_init(&cset->mg_node);
+       }
+       spin_unlock_bh(&css_set_lock);
+       return ret;
 }
 
 /**
@@ -2152,14 +2417,14 @@ static void cgroup_migrate_finish(struct list_head *preloaded_csets)
 
        lockdep_assert_held(&cgroup_mutex);
 
-       down_write(&css_set_rwsem);
+       spin_lock_bh(&css_set_lock);
        list_for_each_entry_safe(cset, tmp_cset, preloaded_csets, mg_preload_node) {
                cset->mg_src_cgrp = NULL;
                cset->mg_dst_cset = NULL;
                list_del_init(&cset->mg_preload_node);
                put_css_set_locked(cset);
        }
-       up_write(&css_set_rwsem);
+       spin_unlock_bh(&css_set_lock);
 }
 
 /**
@@ -2172,10 +2437,11 @@ static void cgroup_migrate_finish(struct list_head *preloaded_csets)
  * @src_cset and add it to @preloaded_csets, which should later be cleaned
  * up by cgroup_migrate_finish().
  *
- * This function may be called without holding threadgroup_lock even if the
- * target is a process.  Threads may be created and destroyed but as long
- * as cgroup_mutex is not dropped, no new css_set can be put into play and
- * the preloaded css_sets are guaranteed to cover all migrations.
+ * This function may be called without holding cgroup_threadgroup_rwsem
+ * even if the target is a process.  Threads may be created and destroyed
+ * but as long as cgroup_mutex is not dropped, no new css_set can be put
+ * into play and the preloaded css_sets are guaranteed to cover all
+ * migrations.
  */
 static void cgroup_migrate_add_src(struct css_set *src_cset,
                                   struct cgroup *dst_cgrp,
@@ -2184,7 +2450,7 @@ static void cgroup_migrate_add_src(struct css_set *src_cset,
        struct cgroup *src_cgrp;
 
        lockdep_assert_held(&cgroup_mutex);
-       lockdep_assert_held(&css_set_rwsem);
+       lockdep_assert_held(&css_set_lock);
 
        src_cgrp = cset_cgroup_from_root(src_cset, dst_cgrp->root);
 
@@ -2273,12 +2539,12 @@ err:
 
 /**
  * cgroup_migrate - migrate a process or task to a cgroup
- * @cgrp: the destination cgroup
  * @leader: the leader of the process or the task to migrate
  * @threadgroup: whether @leader points to the whole process or a single task
+ * @cgrp: the destination cgroup
  *
  * Migrate a process or task denoted by @leader to @cgrp.  If migrating a
- * process, the caller must be holding threadgroup_lock of @leader.  The
+ * process, the caller must be holding cgroup_threadgroup_rwsem.  The
  * caller is also responsible for invoking cgroup_migrate_add_src() and
  * cgroup_migrate_prepare_dst() on the targets before invoking this
  * function and following up with cgroup_migrate_finish().
@@ -2289,115 +2555,29 @@ err:
  * decided for all targets by invoking group_migrate_prepare_dst() before
  * actually starting migrating.
  */
-static int cgroup_migrate(struct cgroup *cgrp, struct task_struct *leader,
-                         bool threadgroup)
-{
-       struct cgroup_taskset tset = {
-               .src_csets      = LIST_HEAD_INIT(tset.src_csets),
-               .dst_csets      = LIST_HEAD_INIT(tset.dst_csets),
-               .csets          = &tset.src_csets,
-       };
-       struct cgroup_subsys_state *css, *failed_css = NULL;
-       struct css_set *cset, *tmp_cset;
-       struct task_struct *task, *tmp_task;
-       int i, ret;
+static int cgroup_migrate(struct task_struct *leader, bool threadgroup,
+                         struct cgroup *cgrp)
+{
+       struct cgroup_taskset tset = CGROUP_TASKSET_INIT(tset);
+       struct task_struct *task;
 
        /*
         * Prevent freeing of tasks while we take a snapshot. Tasks that are
         * already PF_EXITING could be freed from underneath us unless we
         * take an rcu_read_lock.
         */
-       down_write(&css_set_rwsem);
+       spin_lock_bh(&css_set_lock);
        rcu_read_lock();
        task = leader;
        do {
-               /* @task either already exited or can't exit until the end */
-               if (task->flags & PF_EXITING)
-                       goto next;
-
-               /* leave @task alone if post_fork() hasn't linked it yet */
-               if (list_empty(&task->cg_list))
-                       goto next;
-
-               cset = task_css_set(task);
-               if (!cset->mg_src_cgrp)
-                       goto next;
-
-               /*
-                * cgroup_taskset_first() must always return the leader.
-                * Take care to avoid disturbing the ordering.
-                */
-               list_move_tail(&task->cg_list, &cset->mg_tasks);
-               if (list_empty(&cset->mg_node))
-                       list_add_tail(&cset->mg_node, &tset.src_csets);
-               if (list_empty(&cset->mg_dst_cset->mg_node))
-                       list_move_tail(&cset->mg_dst_cset->mg_node,
-                                      &tset.dst_csets);
-       next:
+               cgroup_taskset_add(task, &tset);
                if (!threadgroup)
                        break;
        } while_each_thread(leader, task);
        rcu_read_unlock();
-       up_write(&css_set_rwsem);
-
-       /* methods shouldn't be called if no task is actually migrating */
-       if (list_empty(&tset.src_csets))
-               return 0;
-
-       /* check that we can legitimately attach to the cgroup */
-       for_each_e_css(css, i, cgrp) {
-               if (css->ss->can_attach) {
-                       ret = css->ss->can_attach(css, &tset);
-                       if (ret) {
-                               failed_css = css;
-                               goto out_cancel_attach;
-                       }
-               }
-       }
-
-       /*
-        * Now that we're guaranteed success, proceed to move all tasks to
-        * the new cgroup.  There are no failure cases after here, so this
-        * is the commit point.
-        */
-       down_write(&css_set_rwsem);
-       list_for_each_entry(cset, &tset.src_csets, mg_node) {
-               list_for_each_entry_safe(task, tmp_task, &cset->mg_tasks, cg_list)
-                       cgroup_task_migrate(cset->mg_src_cgrp, task,
-                                           cset->mg_dst_cset);
-       }
-       up_write(&css_set_rwsem);
-
-       /*
-        * Migration is committed, all target tasks are now on dst_csets.
-        * Nothing is sensitive to fork() after this point.  Notify
-        * controllers that migration is complete.
-        */
-       tset.csets = &tset.dst_csets;
-
-       for_each_e_css(css, i, cgrp)
-               if (css->ss->attach)
-                       css->ss->attach(css, &tset);
-
-       ret = 0;
-       goto out_release_tset;
+       spin_unlock_bh(&css_set_lock);
 
-out_cancel_attach:
-       for_each_e_css(css, i, cgrp) {
-               if (css == failed_css)
-                       break;
-               if (css->ss->cancel_attach)
-                       css->ss->cancel_attach(css, &tset);
-       }
-out_release_tset:
-       down_write(&css_set_rwsem);
-       list_splice_init(&tset.dst_csets, &tset.src_csets);
-       list_for_each_entry_safe(cset, tmp_cset, &tset.src_csets, mg_node) {
-               list_splice_tail_init(&cset->mg_tasks, &cset->tasks);
-               list_del_init(&cset->mg_node);
-       }
-       up_write(&css_set_rwsem);
-       return ret;
+       return cgroup_taskset_migrate(&tset, cgrp);
 }
 
 /**
@@ -2406,7 +2586,7 @@ out_release_tset:
  * @leader: the task or the leader of the threadgroup to be attached
  * @threadgroup: attach the whole threadgroup?
  *
- * Call holding cgroup_mutex and threadgroup_lock of @leader.
+ * Call holding cgroup_mutex and cgroup_threadgroup_rwsem.
  */
 static int cgroup_attach_task(struct cgroup *dst_cgrp,
                              struct task_struct *leader, bool threadgroup)
@@ -2416,7 +2596,7 @@ static int cgroup_attach_task(struct cgroup *dst_cgrp,
        int ret;
 
        /* look up all src csets */
-       down_read(&css_set_rwsem);
+       spin_lock_bh(&css_set_lock);
        rcu_read_lock();
        task = leader;
        do {
@@ -2426,12 +2606,12 @@ static int cgroup_attach_task(struct cgroup *dst_cgrp,
                        break;
        } while_each_thread(leader, task);
        rcu_read_unlock();
-       up_read(&css_set_rwsem);
+       spin_unlock_bh(&css_set_lock);
 
        /* prepare dst csets and commit */
        ret = cgroup_migrate_prepare_dst(dst_cgrp, &preloaded_csets);
        if (!ret)
-               ret = cgroup_migrate(dst_cgrp, leader, threadgroup);
+               ret = cgroup_migrate(leader, threadgroup, dst_cgrp);
 
        cgroup_migrate_finish(&preloaded_csets);
        return ret;
@@ -2459,15 +2639,15 @@ static int cgroup_procs_write_permission(struct task_struct *task,
                struct cgroup *cgrp;
                struct inode *inode;
 
-               down_read(&css_set_rwsem);
+               spin_lock_bh(&css_set_lock);
                cgrp = task_cgroup_from_root(task, &cgrp_dfl_root);
-               up_read(&css_set_rwsem);
+               spin_unlock_bh(&css_set_lock);
 
                while (!cgroup_is_descendant(dst_cgrp, cgrp))
                        cgrp = cgroup_parent(cgrp);
 
                ret = -ENOMEM;
-               inode = kernfs_get_inode(sb, cgrp->procs_kn);
+               inode = kernfs_get_inode(sb, cgrp->procs_file.kn);
                if (inode) {
                        ret = inode_permission(inode, MAY_WRITE);
                        iput(inode);
@@ -2498,14 +2678,13 @@ static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
        if (!cgrp)
                return -ENODEV;
 
-retry_find_task:
+       percpu_down_write(&cgroup_threadgroup_rwsem);
        rcu_read_lock();
        if (pid) {
                tsk = find_task_by_vpid(pid);
                if (!tsk) {
-                       rcu_read_unlock();
                        ret = -ESRCH;
-                       goto out_unlock_cgroup;
+                       goto out_unlock_rcu;
                }
        } else {
                tsk = current;
@@ -2521,37 +2700,23 @@ retry_find_task:
         */
        if (tsk == kthreadd_task || (tsk->flags & PF_NO_SETAFFINITY)) {
                ret = -EINVAL;
-               rcu_read_unlock();
-               goto out_unlock_cgroup;
+               goto out_unlock_rcu;
        }
 
        get_task_struct(tsk);
        rcu_read_unlock();
 
-       threadgroup_lock(tsk);
-       if (threadgroup) {
-               if (!thread_group_leader(tsk)) {
-                       /*
-                        * a race with de_thread from another thread's exec()
-                        * may strip us of our leadership, if this happens,
-                        * there is no choice but to throw this task away and
-                        * try again; this is
-                        * "double-double-toil-and-trouble-check locking".
-                        */
-                       threadgroup_unlock(tsk);
-                       put_task_struct(tsk);
-                       goto retry_find_task;
-               }
-       }
-
        ret = cgroup_procs_write_permission(tsk, cgrp, of);
        if (!ret)
                ret = cgroup_attach_task(cgrp, tsk, threadgroup);
 
-       threadgroup_unlock(tsk);
-
        put_task_struct(tsk);
-out_unlock_cgroup:
+       goto out_unlock_threadgroup;
+
+out_unlock_rcu:
+       rcu_read_unlock();
+out_unlock_threadgroup:
+       percpu_up_write(&cgroup_threadgroup_rwsem);
        cgroup_kn_unlock(of->kn);
        return ret ?: nbytes;
 }
@@ -2573,9 +2738,9 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
                if (root == &cgrp_dfl_root)
                        continue;
 
-               down_read(&css_set_rwsem);
+               spin_lock_bh(&css_set_lock);
                from_cgrp = task_cgroup_from_root(from, root);
-               up_read(&css_set_rwsem);
+               spin_unlock_bh(&css_set_lock);
 
                retval = cgroup_attach_task(from_cgrp, tsk, false);
                if (retval)
@@ -2690,14 +2855,17 @@ static int cgroup_subtree_control_show(struct seq_file *seq, void *v)
 static int cgroup_update_dfl_csses(struct cgroup *cgrp)
 {
        LIST_HEAD(preloaded_csets);
+       struct cgroup_taskset tset = CGROUP_TASKSET_INIT(tset);
        struct cgroup_subsys_state *css;
        struct css_set *src_cset;
        int ret;
 
        lockdep_assert_held(&cgroup_mutex);
 
+       percpu_down_write(&cgroup_threadgroup_rwsem);
+
        /* look up all csses currently attached to @cgrp's subtree */
-       down_read(&css_set_rwsem);
+       spin_lock_bh(&css_set_lock);
        css_for_each_descendant_pre(css, cgroup_css(cgrp, NULL)) {
                struct cgrp_cset_link *link;
 
@@ -2709,68 +2877,31 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
                        cgroup_migrate_add_src(link->cset, cgrp,
                                               &preloaded_csets);
        }
-       up_read(&css_set_rwsem);
+       spin_unlock_bh(&css_set_lock);
 
        /* NULL dst indicates self on default hierarchy */
        ret = cgroup_migrate_prepare_dst(NULL, &preloaded_csets);
        if (ret)
                goto out_finish;
 
+       spin_lock_bh(&css_set_lock);
        list_for_each_entry(src_cset, &preloaded_csets, mg_preload_node) {
-               struct task_struct *last_task = NULL, *task;
+               struct task_struct *task, *ntask;
 
                /* src_csets precede dst_csets, break on the first dst_cset */
                if (!src_cset->mg_src_cgrp)
                        break;
 
-               /*
-                * All tasks in src_cset need to be migrated to the
-                * matching dst_cset.  Empty it process by process.  We
-                * walk tasks but migrate processes.  The leader might even
-                * belong to a different cset but such src_cset would also
-                * be among the target src_csets because the default
-                * hierarchy enforces per-process membership.
-                */
-               while (true) {
-                       down_read(&css_set_rwsem);
-                       task = list_first_entry_or_null(&src_cset->tasks,
-                                               struct task_struct, cg_list);
-                       if (task) {
-                               task = task->group_leader;
-                               WARN_ON_ONCE(!task_css_set(task)->mg_src_cgrp);
-                               get_task_struct(task);
-                       }
-                       up_read(&css_set_rwsem);
-
-                       if (!task)
-                               break;
-
-                       /* guard against possible infinite loop */
-                       if (WARN(last_task == task,
-                                "cgroup: update_dfl_csses failed to make progress, aborting in inconsistent state\n"))
-                               goto out_finish;
-                       last_task = task;
-
-                       threadgroup_lock(task);
-                       /* raced against de_thread() from another thread? */
-                       if (!thread_group_leader(task)) {
-                               threadgroup_unlock(task);
-                               put_task_struct(task);
-                               continue;
-                       }
-
-                       ret = cgroup_migrate(src_cset->dfl_cgrp, task, true);
-
-                       threadgroup_unlock(task);
-                       put_task_struct(task);
-
-                       if (WARN(ret, "cgroup: failed to update controllers for the default hierarchy (%d), further operations may crash or hang\n", ret))
-                               goto out_finish;
-               }
+               /* all tasks in src_csets need to be migrated */
+               list_for_each_entry_safe(task, ntask, &src_cset->tasks, cg_list)
+                       cgroup_taskset_add(task, &tset);
        }
+       spin_unlock_bh(&css_set_lock);
 
+       ret = cgroup_taskset_migrate(&tset, cgrp);
 out_finish:
        cgroup_migrate_finish(&preloaded_csets);
+       percpu_up_write(&cgroup_threadgroup_rwsem);
        return ret;
 }
 
@@ -2797,7 +2928,8 @@ static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of,
                if (tok[0] == '\0')
                        continue;
                for_each_subsys_which(ss, ssid, &tmp_ss_mask) {
-                       if (ss->disabled || strcmp(tok + 1, ss->name))
+                       if (!cgroup_ssid_enabled(ssid) ||
+                           strcmp(tok + 1, ss->name))
                                continue;
 
                        if (*tok == '+') {
@@ -2921,7 +3053,8 @@ static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of,
                                ret = create_css(child, ss,
                                        cgrp->subtree_control & (1 << ssid));
                        else
-                               ret = cgroup_populate_dir(child, 1 << ssid);
+                               ret = css_populate_dir(cgroup_css(child, ss),
+                                                      NULL);
                        if (ret)
                                goto err_undo_css;
                }
@@ -2954,7 +3087,7 @@ static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of,
                        if (css_disable & (1 << ssid)) {
                                kill_css(css);
                        } else {
-                               cgroup_clear_dir(child, 1 << ssid);
+                               css_clear_dir(css, NULL);
                                if (ss->css_reset)
                                        ss->css_reset(css);
                        }
@@ -3002,15 +3135,16 @@ err_undo_css:
                        if (css_enable & (1 << ssid))
                                kill_css(css);
                        else
-                               cgroup_clear_dir(child, 1 << ssid);
+                               css_clear_dir(css, NULL);
                }
        }
        goto out_unlock;
 }
 
-static int cgroup_populated_show(struct seq_file *seq, void *v)
+static int cgroup_events_show(struct seq_file *seq, void *v)
 {
-       seq_printf(seq, "%d\n", (bool)seq_css(seq)->cgroup->populated_cnt);
+       seq_printf(seq, "populated %d\n",
+                  cgroup_is_populated(seq_css(seq)->cgroup));
        return 0;
 }
 
@@ -3153,7 +3287,8 @@ static int cgroup_kn_set_ugid(struct kernfs_node *kn)
        return kernfs_setattr(kn, &iattr);
 }
 
-static int cgroup_add_file(struct cgroup *cgrp, struct cftype *cft)
+static int cgroup_add_file(struct cgroup_subsys_state *css, struct cgroup *cgrp,
+                          struct cftype *cft)
 {
        char name[CGROUP_FILE_NAME_MAX];
        struct kernfs_node *kn;
@@ -3175,33 +3310,38 @@ static int cgroup_add_file(struct cgroup *cgrp, struct cftype *cft)
                return ret;
        }
 
-       if (cft->write == cgroup_procs_write)
-               cgrp->procs_kn = kn;
-       else if (cft->seq_show == cgroup_populated_show)
-               cgrp->populated_kn = kn;
+       if (cft->file_offset) {
+               struct cgroup_file *cfile = (void *)css + cft->file_offset;
+
+               kernfs_get(kn);
+               cfile->kn = kn;
+               list_add(&cfile->node, &css->files);
+       }
+
        return 0;
 }
 
 /**
  * cgroup_addrm_files - add or remove files to a cgroup directory
- * @cgrp: the target cgroup
+ * @css: the target css
+ * @cgrp: the target cgroup (usually css->cgroup)
  * @cfts: array of cftypes to be added
  * @is_add: whether to add or remove
  *
  * Depending on @is_add, add or remove files defined by @cfts on @cgrp.
- * For removals, this function never fails.  If addition fails, this
- * function doesn't remove files already added.  The caller is responsible
- * for cleaning up.
+ * For removals, this function never fails.
  */
-static int cgroup_addrm_files(struct cgroup *cgrp, struct cftype cfts[],
+static int cgroup_addrm_files(struct cgroup_subsys_state *css,
+                             struct cgroup *cgrp, struct cftype cfts[],
                              bool is_add)
 {
-       struct cftype *cft;
+       struct cftype *cft, *cft_end = NULL;
        int ret;
 
        lockdep_assert_held(&cgroup_mutex);
 
-       for (cft = cfts; cft->name[0] != '\0'; cft++) {
+restart:
+       for (cft = cfts; cft != cft_end && cft->name[0] != '\0'; cft++) {
                /* does cft->flags tell us to skip this file on @cgrp? */
                if ((cft->flags & __CFTYPE_ONLY_ON_DFL) && !cgroup_on_dfl(cgrp))
                        continue;
@@ -3213,11 +3353,13 @@ static int cgroup_addrm_files(struct cgroup *cgrp, struct cftype cfts[],
                        continue;
 
                if (is_add) {
-                       ret = cgroup_add_file(cgrp, cft);
+                       ret = cgroup_add_file(css, cgrp, cft);
                        if (ret) {
                                pr_warn("%s: failed to add %s, err=%d\n",
                                        __func__, cft->name, ret);
-                               return ret;
+                               cft_end = cft;
+                               is_add = false;
+                               goto restart;
                        }
                } else {
                        cgroup_rm_file(cgrp, cft);
@@ -3243,7 +3385,7 @@ static int cgroup_apply_cftypes(struct cftype *cfts, bool is_add)
                if (cgroup_is_dead(cgrp))
                        continue;
 
-               ret = cgroup_addrm_files(cgrp, cfts, is_add);
+               ret = cgroup_addrm_files(css, cgrp, cfts, is_add);
                if (ret)
                        break;
        }
@@ -3355,7 +3497,7 @@ static int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
 {
        int ret;
 
-       if (ss->disabled)
+       if (!cgroup_ssid_enabled(ss->id))
                return 0;
 
        if (!cfts || cfts[0].name[0] == '\0')
@@ -3405,17 +3547,8 @@ int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
 {
        struct cftype *cft;
 
-       /*
-        * If legacy_flies_on_dfl, we want to show the legacy files on the
-        * dfl hierarchy but iff the target subsystem hasn't been updated
-        * for the dfl hierarchy yet.
-        */
-       if (!cgroup_legacy_files_on_dfl ||
-           ss->dfl_cftypes != ss->legacy_cftypes) {
-               for (cft = cfts; cft && cft->name[0] != '\0'; cft++)
-                       cft->flags |= __CFTYPE_NOT_ON_DFL;
-       }
-
+       for (cft = cfts; cft && cft->name[0] != '\0'; cft++)
+               cft->flags |= __CFTYPE_NOT_ON_DFL;
        return cgroup_add_cftypes(ss, cfts);
 }
 
@@ -3430,10 +3563,10 @@ static int cgroup_task_count(const struct cgroup *cgrp)
        int count = 0;
        struct cgrp_cset_link *link;
 
-       down_read(&css_set_rwsem);
+       spin_lock_bh(&css_set_lock);
        list_for_each_entry(link, &cgrp->cset_links, cset_link)
                count += atomic_read(&link->cset->refcount);
-       up_read(&css_set_rwsem);
+       spin_unlock_bh(&css_set_lock);
        return count;
 }
 
@@ -3665,22 +3798,25 @@ bool css_has_online_children(struct cgroup_subsys_state *css)
 }
 
 /**
- * css_advance_task_iter - advance a task itererator to the next css_set
+ * css_task_iter_advance_css_set - advance a task itererator to the next css_set
  * @it: the iterator to advance
  *
  * Advance @it to the next css_set to walk.
  */
-static void css_advance_task_iter(struct css_task_iter *it)
+static void css_task_iter_advance_css_set(struct css_task_iter *it)
 {
        struct list_head *l = it->cset_pos;
        struct cgrp_cset_link *link;
        struct css_set *cset;
 
+       lockdep_assert_held(&css_set_lock);
+
        /* Advance to the next non-empty css_set */
        do {
                l = l->next;
                if (l == it->cset_head) {
                        it->cset_pos = NULL;
+                       it->task_pos = NULL;
                        return;
                }
 
@@ -3691,7 +3827,7 @@ static void css_advance_task_iter(struct css_task_iter *it)
                        link = list_entry(l, struct cgrp_cset_link, cset_link);
                        cset = link->cset;
                }
-       } while (list_empty(&cset->tasks) && list_empty(&cset->mg_tasks));
+       } while (!css_set_populated(cset));
 
        it->cset_pos = l;
 
@@ -3702,6 +3838,52 @@ static void css_advance_task_iter(struct css_task_iter *it)
 
        it->tasks_head = &cset->tasks;
        it->mg_tasks_head = &cset->mg_tasks;
+
+       /*
+        * We don't keep css_sets locked across iteration steps and thus
+        * need to take steps to ensure that iteration can be resumed after
+        * the lock is re-acquired.  Iteration is performed at two levels -
+        * css_sets and tasks in them.
+        *
+        * Once created, a css_set never leaves its cgroup lists, so a
+        * pinned css_set is guaranteed to stay put and we can resume
+        * iteration afterwards.
+        *
+        * Tasks may leave @cset across iteration steps.  This is resolved
+        * by registering each iterator with the css_set currently being
+        * walked and making css_set_move_task() advance iterators whose
+        * next task is leaving.
+        */
+       if (it->cur_cset) {
+               list_del(&it->iters_node);
+               put_css_set_locked(it->cur_cset);
+       }
+       get_css_set(cset);
+       it->cur_cset = cset;
+       list_add(&it->iters_node, &cset->task_iters);
+}
+
+static void css_task_iter_advance(struct css_task_iter *it)
+{
+       struct list_head *l = it->task_pos;
+
+       lockdep_assert_held(&css_set_lock);
+       WARN_ON_ONCE(!l);
+
+       /*
+        * Advance iterator to find next entry.  cset->tasks is consumed
+        * first and then ->mg_tasks.  After ->mg_tasks, we move onto the
+        * next cset.
+        */
+       l = l->next;
+
+       if (l == it->tasks_head)
+               l = it->mg_tasks_head->next;
+
+       if (l == it->mg_tasks_head)
+               css_task_iter_advance_css_set(it);
+       else
+               it->task_pos = l;
 }
 
 /**
@@ -3713,19 +3895,16 @@ static void css_advance_task_iter(struct css_task_iter *it)
  * css_task_iter_next() to walk through the tasks until the function
  * returns NULL.  On completion of iteration, css_task_iter_end() must be
  * called.
- *
- * Note that this function acquires a lock which is released when the
- * iteration finishes.  The caller can't sleep while iteration is in
- * progress.
  */
 void css_task_iter_start(struct cgroup_subsys_state *css,
                         struct css_task_iter *it)
-       __acquires(css_set_rwsem)
 {
        /* no one should try to iterate before mounting cgroups */
        WARN_ON_ONCE(!use_task_css_set_links);
 
-       down_read(&css_set_rwsem);
+       memset(it, 0, sizeof(*it));
+
+       spin_lock_bh(&css_set_lock);
 
        it->ss = css->ss;
 
@@ -3736,7 +3915,9 @@ void css_task_iter_start(struct cgroup_subsys_state *css,
 
        it->cset_head = it->cset_pos;
 
-       css_advance_task_iter(it);
+       css_task_iter_advance_css_set(it);
+
+       spin_unlock_bh(&css_set_lock);
 }
 
 /**
@@ -3749,30 +3930,23 @@ void css_task_iter_start(struct cgroup_subsys_state *css,
  */
 struct task_struct *css_task_iter_next(struct css_task_iter *it)
 {
-       struct task_struct *res;
-       struct list_head *l = it->task_pos;
+       if (it->cur_task) {
+               put_task_struct(it->cur_task);
+               it->cur_task = NULL;
+       }
 
-       /* If the iterator cg is NULL, we have no tasks */
-       if (!it->cset_pos)
-               return NULL;
-       res = list_entry(l, struct task_struct, cg_list);
+       spin_lock_bh(&css_set_lock);
 
-       /*
-        * Advance iterator to find next entry.  cset->tasks is consumed
-        * first and then ->mg_tasks.  After ->mg_tasks, we move onto the
-        * next cset.
-        */
-       l = l->next;
+       if (it->task_pos) {
+               it->cur_task = list_entry(it->task_pos, struct task_struct,
+                                         cg_list);
+               get_task_struct(it->cur_task);
+               css_task_iter_advance(it);
+       }
 
-       if (l == it->tasks_head)
-               l = it->mg_tasks_head->next;
+       spin_unlock_bh(&css_set_lock);
 
-       if (l == it->mg_tasks_head)
-               css_advance_task_iter(it);
-       else
-               it->task_pos = l;
-
-       return res;
+       return it->cur_task;
 }
 
 /**
@@ -3782,9 +3956,16 @@ struct task_struct *css_task_iter_next(struct css_task_iter *it)
  * Finish task iteration started by css_task_iter_start().
  */
 void css_task_iter_end(struct css_task_iter *it)
-       __releases(css_set_rwsem)
 {
-       up_read(&css_set_rwsem);
+       if (it->cur_cset) {
+               spin_lock_bh(&css_set_lock);
+               list_del(&it->iters_node);
+               put_css_set_locked(it->cur_cset);
+               spin_unlock_bh(&css_set_lock);
+       }
+
+       if (it->cur_task)
+               put_task_struct(it->cur_task);
 }
 
 /**
@@ -3809,10 +3990,10 @@ int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
        mutex_lock(&cgroup_mutex);
 
        /* all tasks in @from are being moved, all csets are source */
-       down_read(&css_set_rwsem);
+       spin_lock_bh(&css_set_lock);
        list_for_each_entry(link, &from->cset_links, cset_link)
                cgroup_migrate_add_src(link->cset, to, &preloaded_csets);
-       up_read(&css_set_rwsem);
+       spin_unlock_bh(&css_set_lock);
 
        ret = cgroup_migrate_prepare_dst(to, &preloaded_csets);
        if (ret)
@@ -3830,7 +4011,7 @@ int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
                css_task_iter_end(&it);
 
                if (task) {
-                       ret = cgroup_migrate(to, task, false);
+                       ret = cgroup_migrate(task, false, to);
                        put_task_struct(task);
                }
        } while (task && !ret);
@@ -4327,13 +4508,13 @@ static int cgroup_clone_children_write(struct cgroup_subsys_state *css,
 static struct cftype cgroup_dfl_base_files[] = {
        {
                .name = "cgroup.procs",
+               .file_offset = offsetof(struct cgroup, procs_file),
                .seq_start = cgroup_pidlist_start,
                .seq_next = cgroup_pidlist_next,
                .seq_stop = cgroup_pidlist_stop,
                .seq_show = cgroup_pidlist_show,
                .private = CGROUP_FILE_PROCS,
                .write = cgroup_procs_write,
-               .mode = S_IRUGO | S_IWUSR,
        },
        {
                .name = "cgroup.controllers",
@@ -4351,9 +4532,10 @@ static struct cftype cgroup_dfl_base_files[] = {
                .write = cgroup_subtree_control_write,
        },
        {
-               .name = "cgroup.populated",
+               .name = "cgroup.events",
                .flags = CFTYPE_NOT_ON_ROOT,
-               .seq_show = cgroup_populated_show,
+               .file_offset = offsetof(struct cgroup, events_file),
+               .seq_show = cgroup_events_show,
        },
        { }     /* terminate */
 };
@@ -4368,7 +4550,6 @@ static struct cftype cgroup_legacy_base_files[] = {
                .seq_show = cgroup_pidlist_show,
                .private = CGROUP_FILE_PROCS,
                .write = cgroup_procs_write,
-               .mode = S_IRUGO | S_IWUSR,
        },
        {
                .name = "cgroup.clone_children",
@@ -4388,7 +4569,6 @@ static struct cftype cgroup_legacy_base_files[] = {
                .seq_show = cgroup_pidlist_show,
                .private = CGROUP_FILE_TASKS,
                .write = cgroup_tasks_write,
-               .mode = S_IRUGO | S_IWUSR,
        },
        {
                .name = "notify_on_release",
@@ -4405,37 +4585,6 @@ static struct cftype cgroup_legacy_base_files[] = {
        { }     /* terminate */
 };
 
-/**
- * cgroup_populate_dir - create subsys files in a cgroup directory
- * @cgrp: target cgroup
- * @subsys_mask: mask of the subsystem ids whose files should be added
- *
- * On failure, no file is added.
- */
-static int cgroup_populate_dir(struct cgroup *cgrp, unsigned long subsys_mask)
-{
-       struct cgroup_subsys *ss;
-       int i, ret = 0;
-
-       /* process cftsets of each subsystem */
-       for_each_subsys(ss, i) {
-               struct cftype *cfts;
-
-               if (!(subsys_mask & (1 << i)))
-                       continue;
-
-               list_for_each_entry(cfts, &ss->cfts, node) {
-                       ret = cgroup_addrm_files(cgrp, cfts, true);
-                       if (ret < 0)
-                               goto err;
-               }
-       }
-       return 0;
-err:
-       cgroup_clear_dir(cgrp, subsys_mask);
-       return ret;
-}
-
 /*
  * css destruction is four-stage process.
  *
@@ -4464,9 +4613,13 @@ static void css_free_work_fn(struct work_struct *work)
                container_of(work, struct cgroup_subsys_state, destroy_work);
        struct cgroup_subsys *ss = css->ss;
        struct cgroup *cgrp = css->cgroup;
+       struct cgroup_file *cfile;
 
        percpu_ref_exit(&css->refcnt);
 
+       list_for_each_entry(cfile, &css->files, node)
+               kernfs_put(cfile->kn);
+
        if (ss) {
                /* css free path */
                int id = css->id;
@@ -4571,6 +4724,7 @@ static void init_and_link_css(struct cgroup_subsys_state *css,
        css->ss = ss;
        INIT_LIST_HEAD(&css->sibling);
        INIT_LIST_HEAD(&css->children);
+       INIT_LIST_HEAD(&css->files);
        css->serial_nr = css_serial_nr_next++;
 
        if (cgroup_parent(cgrp)) {
@@ -4653,7 +4807,7 @@ static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss,
        css->id = err;
 
        if (visible) {
-               err = cgroup_populate_dir(cgrp, 1 << ss->id);
+               err = css_populate_dir(css, NULL);
                if (err)
                        goto err_free_id;
        }
@@ -4679,7 +4833,7 @@ static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss,
 
 err_list_del:
        list_del_rcu(&css->sibling);
-       cgroup_clear_dir(css->cgroup, 1 << css->ss->id);
+       css_clear_dir(css, NULL);
 err_free_id:
        cgroup_idr_remove(&ss->css_idr, css->id);
 err_free_percpu_ref:
@@ -4696,7 +4850,6 @@ static int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
        struct cgroup_root *root;
        struct cgroup_subsys *ss;
        struct kernfs_node *kn;
-       struct cftype *base_files;
        int ssid, ret;
 
        /* Do not accept '\n' to prevent making /proc/<pid>/cgroup unparsable.
@@ -4772,12 +4925,7 @@ static int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
        if (ret)
                goto out_destroy;
 
-       if (cgroup_on_dfl(cgrp))
-               base_files = cgroup_dfl_base_files;
-       else
-               base_files = cgroup_legacy_base_files;
-
-       ret = cgroup_addrm_files(cgrp, base_files, true);
+       ret = css_populate_dir(&cgrp->self, NULL);
        if (ret)
                goto out_destroy;
 
@@ -4864,7 +5012,7 @@ static void kill_css(struct cgroup_subsys_state *css)
         * This must happen before css is disassociated with its cgroup.
         * See seq_css() for details.
         */
-       cgroup_clear_dir(css->cgroup, 1 << css->ss->id);
+       css_clear_dir(css, NULL);
 
        /*
         * Killing would put the base ref, but we need to keep it alive
@@ -4913,19 +5061,15 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
        __releases(&cgroup_mutex) __acquires(&cgroup_mutex)
 {
        struct cgroup_subsys_state *css;
-       bool empty;
        int ssid;
 
        lockdep_assert_held(&cgroup_mutex);
 
        /*
-        * css_set_rwsem synchronizes access to ->cset_links and prevents
-        * @cgrp from being removed while put_css_set() is in progress.
+        * Only migration can raise populated from zero and we're already
+        * holding cgroup_mutex.
         */
-       down_read(&css_set_rwsem);
-       empty = list_empty(&cgrp->cset_links);
-       up_read(&css_set_rwsem);
-       if (!empty)
+       if (cgroup_is_populated(cgrp))
                return -EBUSY;
 
        /*
@@ -5023,6 +5167,7 @@ static void __init cgroup_init_subsys(struct cgroup_subsys *ss, bool early)
 
        have_fork_callback |= (bool)ss->fork << ss->id;
        have_exit_callback |= (bool)ss->exit << ss->id;
+       have_free_callback |= (bool)ss->free << ss->id;
        have_canfork_callback |= (bool)ss->can_fork << ss->id;
 
        /* At system boot, before all subsystems have been
@@ -5071,6 +5216,8 @@ int __init cgroup_init_early(void)
        return 0;
 }
 
+static unsigned long cgroup_disable_mask __initdata;
+
 /**
  * cgroup_init - cgroup initialization
  *
@@ -5081,8 +5228,9 @@ int __init cgroup_init(void)
 {
        struct cgroup_subsys *ss;
        unsigned long key;
-       int ssid, err;
+       int ssid;
 
+       BUG_ON(percpu_init_rwsem(&cgroup_threadgroup_rwsem));
        BUG_ON(cgroup_init_cftypes(NULL, cgroup_dfl_base_files));
        BUG_ON(cgroup_init_cftypes(NULL, cgroup_legacy_base_files));
 
@@ -5116,14 +5264,15 @@ int __init cgroup_init(void)
                 * disabled flag and cftype registration needs kmalloc,
                 * both of which aren't available during early_init.
                 */
-               if (ss->disabled)
+               if (cgroup_disable_mask & (1 << ssid)) {
+                       static_branch_disable(cgroup_subsys_enabled_key[ssid]);
+                       printk(KERN_INFO "Disabling %s control group subsystem\n",
+                              ss->name);
                        continue;
+               }
 
                cgrp_dfl_root.subsys_mask |= 1 << ss->id;
 
-               if (cgroup_legacy_files_on_dfl && !ss->dfl_cftypes)
-                       ss->dfl_cftypes = ss->legacy_cftypes;
-
                if (!ss->dfl_cftypes)
                        cgrp_dfl_root_inhibit_ss_mask |= 1 << ss->id;
 
@@ -5138,17 +5287,10 @@ int __init cgroup_init(void)
                        ss->bind(init_css_set.subsys[ssid]);
        }
 
-       err = sysfs_create_mount_point(fs_kobj, "cgroup");
-       if (err)
-               return err;
+       WARN_ON(sysfs_create_mount_point(fs_kobj, "cgroup"));
+       WARN_ON(register_filesystem(&cgroup_fs_type));
+       WARN_ON(!proc_create("cgroups", 0, NULL, &proc_cgroupstats_operations));
 
-       err = register_filesystem(&cgroup_fs_type);
-       if (err < 0) {
-               sysfs_remove_mount_point(fs_kobj, "cgroup");
-               return err;
-       }
-
-       proc_create("cgroups", 0, NULL, &proc_cgroupstats_operations);
        return 0;
 }
 
@@ -5195,7 +5337,7 @@ int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
                goto out;
 
        mutex_lock(&cgroup_mutex);
-       down_read(&css_set_rwsem);
+       spin_lock_bh(&css_set_lock);
 
        for_each_root(root) {
                struct cgroup_subsys *ss;
@@ -5215,19 +5357,39 @@ int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
                        seq_printf(m, "%sname=%s", count ? "," : "",
                                   root->name);
                seq_putc(m, ':');
+
                cgrp = task_cgroup_from_root(tsk, root);
-               path = cgroup_path(cgrp, buf, PATH_MAX);
-               if (!path) {
-                       retval = -ENAMETOOLONG;
-                       goto out_unlock;
+
+               /*
+                * On traditional hierarchies, all zombie tasks show up as
+                * belonging to the root cgroup.  On the default hierarchy,
+                * while a zombie doesn't show up in "cgroup.procs" and
+                * thus can't be migrated, its /proc/PID/cgroup keeps
+                * reporting the cgroup it belonged to before exiting.  If
+                * the cgroup is removed before the zombie is reaped,
+                * " (deleted)" is appended to the cgroup path.
+                */
+               if (cgroup_on_dfl(cgrp) || !(tsk->flags & PF_EXITING)) {
+                       path = cgroup_path(cgrp, buf, PATH_MAX);
+                       if (!path) {
+                               retval = -ENAMETOOLONG;
+                               goto out_unlock;
+                       }
+               } else {
+                       path = "/";
                }
+
                seq_puts(m, path);
-               seq_putc(m, '\n');
+
+               if (cgroup_on_dfl(cgrp) && cgroup_is_dead(cgrp))
+                       seq_puts(m, " (deleted)\n");
+               else
+                       seq_putc(m, '\n');
        }
 
        retval = 0;
 out_unlock:
-       up_read(&css_set_rwsem);
+       spin_unlock_bh(&css_set_lock);
        mutex_unlock(&cgroup_mutex);
        kfree(buf);
 out:
@@ -5251,7 +5413,8 @@ static int proc_cgroupstats_show(struct seq_file *m, void *v)
        for_each_subsys(ss, i)
                seq_printf(m, "%s\t%d\t%d\t%d\n",
                           ss->legacy_name, ss->root->hierarchy_id,
-                          atomic_read(&ss->root->nr_cgrps), !ss->disabled);
+                          atomic_read(&ss->root->nr_cgrps),
+                          cgroup_ssid_enabled(i));
 
        mutex_unlock(&cgroup_mutex);
        return 0;
@@ -5372,7 +5535,7 @@ void cgroup_post_fork(struct task_struct *child,
         * @child during its iteration.
         *
         * If we won the race, @child is associated with %current's
-        * css_set.  Grabbing css_set_rwsem guarantees both that the
+        * css_set.  Grabbing css_set_lock guarantees both that the
         * association is stable, and, on completion of the parent's
         * migration, @child is visible in the source of migration or
         * already in the destination cgroup.  This guarantee is necessary
@@ -5387,14 +5550,13 @@ void cgroup_post_fork(struct task_struct *child,
        if (use_task_css_set_links) {
                struct css_set *cset;
 
-               down_write(&css_set_rwsem);
+               spin_lock_bh(&css_set_lock);
                cset = task_css_set(current);
                if (list_empty(&child->cg_list)) {
-                       rcu_assign_pointer(child->cgroups, cset);
-                       list_add(&child->cg_list, &cset->tasks);
                        get_css_set(cset);
+                       css_set_move_task(child, NULL, cset, false);
                }
-               up_write(&css_set_rwsem);
+               spin_unlock_bh(&css_set_lock);
        }
 
        /*
@@ -5429,39 +5591,42 @@ void cgroup_exit(struct task_struct *tsk)
 {
        struct cgroup_subsys *ss;
        struct css_set *cset;
-       bool put_cset = false;
        int i;
 
        /*
         * Unlink from @tsk from its css_set.  As migration path can't race
-        * with us, we can check cg_list without grabbing css_set_rwsem.
+        * with us, we can check css_set and cg_list without synchronization.
         */
+       cset = task_css_set(tsk);
+
        if (!list_empty(&tsk->cg_list)) {
-               down_write(&css_set_rwsem);
-               list_del_init(&tsk->cg_list);
-               up_write(&css_set_rwsem);
-               put_cset = true;
+               spin_lock_bh(&css_set_lock);
+               css_set_move_task(tsk, cset, NULL, false);
+               spin_unlock_bh(&css_set_lock);
+       } else {
+               get_css_set(cset);
        }
 
-       /* Reassign the task to the init_css_set. */
-       cset = task_css_set(tsk);
-       RCU_INIT_POINTER(tsk->cgroups, &init_css_set);
-
        /* see cgroup_post_fork() for details */
-       for_each_subsys_which(ss, i, &have_exit_callback) {
-               struct cgroup_subsys_state *old_css = cset->subsys[i];
-               struct cgroup_subsys_state *css = task_css(tsk, i);
+       for_each_subsys_which(ss, i, &have_exit_callback)
+               ss->exit(tsk);
+}
 
-               ss->exit(css, old_css, tsk);
-       }
+void cgroup_free(struct task_struct *task)
+{
+       struct css_set *cset = task_css_set(task);
+       struct cgroup_subsys *ss;
+       int ssid;
 
-       if (put_cset)
-               put_css_set(cset);
+       for_each_subsys_which(ss, ssid, &have_free_callback)
+               ss->free(task);
+
+       put_css_set(cset);
 }
 
 static void check_for_release(struct cgroup *cgrp)
 {
-       if (notify_on_release(cgrp) && !cgroup_has_tasks(cgrp) &&
+       if (notify_on_release(cgrp) && !cgroup_is_populated(cgrp) &&
            !css_has_online_children(&cgrp->self) && !cgroup_is_dead(cgrp))
                schedule_work(&cgrp->release_agent_work);
 }
@@ -5540,25 +5705,13 @@ static int __init cgroup_disable(char *str)
                        if (strcmp(token, ss->name) &&
                            strcmp(token, ss->legacy_name))
                                continue;
-
-                       ss->disabled = 1;
-                       printk(KERN_INFO "Disabling %s control group subsystem\n",
-                              ss->name);
-                       break;
+                       cgroup_disable_mask |= 1 << i;
                }
        }
        return 1;
 }
 __setup("cgroup_disable=", cgroup_disable);
 
-static int __init cgroup_set_legacy_files_on_dfl(char *str)
-{
-       printk("cgroup: using legacy files on the default hierarchy\n");
-       cgroup_legacy_files_on_dfl = true;
-       return 0;
-}
-__setup("cgroup__DEVEL__legacy_files_on_dfl", cgroup_set_legacy_files_on_dfl);
-
 /**
  * css_tryget_online_from_dir - get corresponding css from a cgroup dentry
  * @dentry: directory dentry of interest
@@ -5662,7 +5815,7 @@ static int current_css_set_cg_links_read(struct seq_file *seq, void *v)
        if (!name_buf)
                return -ENOMEM;
 
-       down_read(&css_set_rwsem);
+       spin_lock_bh(&css_set_lock);
        rcu_read_lock();
        cset = rcu_dereference(current->cgroups);
        list_for_each_entry(link, &cset->cgrp_links, cgrp_link) {
@@ -5673,7 +5826,7 @@ static int current_css_set_cg_links_read(struct seq_file *seq, void *v)
                           c->root->hierarchy_id, name_buf);
        }
        rcu_read_unlock();
-       up_read(&css_set_rwsem);
+       spin_unlock_bh(&css_set_lock);
        kfree(name_buf);
        return 0;
 }
@@ -5684,7 +5837,7 @@ static int cgroup_css_links_read(struct seq_file *seq, void *v)
        struct cgroup_subsys_state *css = seq_css(seq);
        struct cgrp_cset_link *link;
 
-       down_read(&css_set_rwsem);
+       spin_lock_bh(&css_set_lock);
        list_for_each_entry(link, &css->cgroup->cset_links, cset_link) {
                struct css_set *cset = link->cset;
                struct task_struct *task;
@@ -5707,13 +5860,13 @@ static int cgroup_css_links_read(struct seq_file *seq, void *v)
        overflow:
                seq_puts(seq, "  ...\n");
        }
-       up_read(&css_set_rwsem);
+       spin_unlock_bh(&css_set_lock);
        return 0;
 }
 
 static u64 releasable_read(struct cgroup_subsys_state *css, struct cftype *cft)
 {
-       return (!cgroup_has_tasks(css->cgroup) &&
+       return (!cgroup_is_populated(css->cgroup) &&
                !css_has_online_children(&css->cgroup->self));
 }
 
index 806cd7693ac88b0307173ab7bdaf1a8bc44ab173..cdd8df4e991c7781ac5996677d724bcd23314623 100644 (file)
@@ -266,11 +266,9 @@ static void pids_fork(struct task_struct *task, void *priv)
        css_put(old_css);
 }
 
-static void pids_exit(struct cgroup_subsys_state *css,
-                     struct cgroup_subsys_state *old_css,
-                     struct task_struct *task)
+static void pids_free(struct task_struct *task)
 {
-       struct pids_cgroup *pids = css_pids(old_css);
+       struct pids_cgroup *pids = css_pids(task_css(task, pids_cgrp_id));
 
        pids_uncharge(pids, 1);
 }
@@ -349,7 +347,7 @@ struct cgroup_subsys pids_cgrp_subsys = {
        .can_fork       = pids_can_fork,
        .cancel_fork    = pids_cancel_fork,
        .fork           = pids_fork,
-       .exit           = pids_exit,
+       .free           = pids_free,
        .legacy_cftypes = pids_files,
        .dfl_cftypes    = pids_files,
 };
index f0acff0f66c91380412dcbc1c899c94b1d3236b0..d7ccb87a6714b17cb5b4a86232428cb7b5253799 100644 (file)
@@ -473,7 +473,8 @@ static int validate_change(struct cpuset *cur, struct cpuset *trial)
 
        /* On legacy hiearchy, we must be a subset of our parent cpuset. */
        ret = -EACCES;
-       if (!cgroup_on_dfl(cur->css.cgroup) && !is_cpuset_subset(trial, par))
+       if (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
+           !is_cpuset_subset(trial, par))
                goto out;
 
        /*
@@ -497,7 +498,7 @@ static int validate_change(struct cpuset *cur, struct cpuset *trial)
         * be changed to have empty cpus_allowed or mems_allowed.
         */
        ret = -ENOSPC;
-       if ((cgroup_has_tasks(cur->css.cgroup) || cur->attach_in_progress)) {
+       if ((cgroup_is_populated(cur->css.cgroup) || cur->attach_in_progress)) {
                if (!cpumask_empty(cur->cpus_allowed) &&
                    cpumask_empty(trial->cpus_allowed))
                        goto out;
@@ -879,7 +880,8 @@ static void update_cpumasks_hier(struct cpuset *cs, struct cpumask *new_cpus)
                 * If it becomes empty, inherit the effective mask of the
                 * parent, which is guaranteed to have some CPUs.
                 */
-               if (cgroup_on_dfl(cp->css.cgroup) && cpumask_empty(new_cpus))
+               if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
+                   cpumask_empty(new_cpus))
                        cpumask_copy(new_cpus, parent->effective_cpus);
 
                /* Skip the whole subtree if the cpumask remains the same. */
@@ -896,7 +898,7 @@ static void update_cpumasks_hier(struct cpuset *cs, struct cpumask *new_cpus)
                cpumask_copy(cp->effective_cpus, new_cpus);
                spin_unlock_irq(&callback_lock);
 
-               WARN_ON(!cgroup_on_dfl(cp->css.cgroup) &&
+               WARN_ON(!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
                        !cpumask_equal(cp->cpus_allowed, cp->effective_cpus));
 
                update_tasks_cpumask(cp);
@@ -1135,7 +1137,8 @@ static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
                 * If it becomes empty, inherit the effective mask of the
                 * parent, which is guaranteed to have some MEMs.
                 */
-               if (cgroup_on_dfl(cp->css.cgroup) && nodes_empty(*new_mems))
+               if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
+                   nodes_empty(*new_mems))
                        *new_mems = parent->effective_mems;
 
                /* Skip the whole subtree if the nodemask remains the same. */
@@ -1152,7 +1155,7 @@ static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
                cp->effective_mems = *new_mems;
                spin_unlock_irq(&callback_lock);
 
-               WARN_ON(!cgroup_on_dfl(cp->css.cgroup) &&
+               WARN_ON(!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
                        !nodes_equal(cp->mems_allowed, cp->effective_mems));
 
                update_tasks_nodemask(cp);
@@ -1440,7 +1443,7 @@ static int cpuset_can_attach(struct cgroup_subsys_state *css,
 
        /* allow moving tasks into an empty cpuset if on default hierarchy */
        ret = -ENOSPC;
-       if (!cgroup_on_dfl(css->cgroup) &&
+       if (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
            (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)))
                goto out_unlock;
 
@@ -1484,9 +1487,8 @@ static void cpuset_attach(struct cgroup_subsys_state *css,
 {
        /* static buf protected by cpuset_mutex */
        static nodemask_t cpuset_attach_nodemask_to;
-       struct mm_struct *mm;
        struct task_struct *task;
-       struct task_struct *leader = cgroup_taskset_first(tset);
+       struct task_struct *leader;
        struct cpuset *cs = css_cs(css);
        struct cpuset *oldcs = cpuset_attach_old_cs;
 
@@ -1512,26 +1514,30 @@ static void cpuset_attach(struct cgroup_subsys_state *css,
        }
 
        /*
-        * Change mm, possibly for multiple threads in a threadgroup. This is
-        * expensive and may sleep.
+        * Change mm for all threadgroup leaders. This is expensive and may
+        * sleep and should be moved outside migration path proper.
         */
        cpuset_attach_nodemask_to = cs->effective_mems;
-       mm = get_task_mm(leader);
-       if (mm) {
-               mpol_rebind_mm(mm, &cpuset_attach_nodemask_to);
-
-               /*
-                * old_mems_allowed is the same with mems_allowed here, except
-                * if this task is being moved automatically due to hotplug.
-                * In that case @mems_allowed has been updated and is empty,
-                * so @old_mems_allowed is the right nodesets that we migrate
-                * mm from.
-                */
-               if (is_memory_migrate(cs)) {
-                       cpuset_migrate_mm(mm, &oldcs->old_mems_allowed,
-                                         &cpuset_attach_nodemask_to);
+       cgroup_taskset_for_each_leader(leader, tset) {
+               struct mm_struct *mm = get_task_mm(leader);
+
+               if (mm) {
+                       mpol_rebind_mm(mm, &cpuset_attach_nodemask_to);
+
+                       /*
+                        * old_mems_allowed is the same with mems_allowed
+                        * here, except if this task is being moved
+                        * automatically due to hotplug.  In that case
+                        * @mems_allowed has been updated and is empty, so
+                        * @old_mems_allowed is the right nodesets that we
+                        * migrate mm from.
+                        */
+                       if (is_memory_migrate(cs)) {
+                               cpuset_migrate_mm(mm, &oldcs->old_mems_allowed,
+                                                 &cpuset_attach_nodemask_to);
+                       }
+                       mmput(mm);
                }
-               mmput(mm);
        }
 
        cs->old_mems_allowed = cpuset_attach_nodemask_to;
@@ -1594,9 +1600,6 @@ static int cpuset_write_u64(struct cgroup_subsys_state *css, struct cftype *cft,
        case FILE_MEMORY_PRESSURE_ENABLED:
                cpuset_memory_pressure_enabled = !!val;
                break;
-       case FILE_MEMORY_PRESSURE:
-               retval = -EACCES;
-               break;
        case FILE_SPREAD_PAGE:
                retval = update_flag(CS_SPREAD_PAGE, cs, val);
                break;
@@ -1863,9 +1866,6 @@ static struct cftype files[] = {
        {
                .name = "memory_pressure",
                .read_u64 = cpuset_read_u64,
-               .write_u64 = cpuset_write_u64,
-               .private = FILE_MEMORY_PRESSURE,
-               .mode = S_IRUGO,
        },
 
        {
@@ -1952,7 +1952,7 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
        cpuset_inc();
 
        spin_lock_irq(&callback_lock);
-       if (cgroup_on_dfl(cs->css.cgroup)) {
+       if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) {
                cpumask_copy(cs->effective_cpus, parent->effective_cpus);
                cs->effective_mems = parent->effective_mems;
        }
@@ -2029,7 +2029,7 @@ static void cpuset_bind(struct cgroup_subsys_state *root_css)
        mutex_lock(&cpuset_mutex);
        spin_lock_irq(&callback_lock);
 
-       if (cgroup_on_dfl(root_css->cgroup)) {
+       if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) {
                cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask);
                top_cpuset.mems_allowed = node_possible_map;
        } else {
@@ -2210,7 +2210,7 @@ retry:
        cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus);
        mems_updated = !nodes_equal(new_mems, cs->effective_mems);
 
-       if (cgroup_on_dfl(cs->css.cgroup))
+       if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys))
                hotplug_update_tasks(cs, &new_cpus, &new_mems,
                                     cpus_updated, mems_updated);
        else
@@ -2241,7 +2241,7 @@ static void cpuset_hotplug_workfn(struct work_struct *work)
        static cpumask_t new_cpus;
        static nodemask_t new_mems;
        bool cpus_updated, mems_updated;
-       bool on_dfl = cgroup_on_dfl(top_cpuset.css.cgroup);
+       bool on_dfl = cgroup_subsys_on_dfl(cpuset_cgrp_subsys);
 
        mutex_lock(&cpuset_mutex);
 
index 39db20c6248e47c940bd8721c41ade530e1eb5c9..1a734e0adfa78259dac6cae57239a2d5f992a9ac 100644 (file)
@@ -9460,17 +9460,9 @@ static void perf_cgroup_attach(struct cgroup_subsys_state *css,
                task_function_call(task, __perf_cgroup_move, task);
 }
 
-static void perf_cgroup_exit(struct cgroup_subsys_state *css,
-                            struct cgroup_subsys_state *old_css,
-                            struct task_struct *task)
-{
-       task_function_call(task, __perf_cgroup_move, task);
-}
-
 struct cgroup_subsys perf_event_cgrp_subsys = {
        .css_alloc      = perf_cgroup_css_alloc,
        .css_free       = perf_cgroup_css_free,
-       .exit           = perf_cgroup_exit,
        .attach         = perf_cgroup_attach,
 };
 #endif /* CONFIG_CGROUP_PERF */
index 6ac894244d3978fb800f7a1a02912bb2901e5e84..825ecc32454d23f4e60216bedfb2de31fe504699 100644 (file)
@@ -251,6 +251,7 @@ void __put_task_struct(struct task_struct *tsk)
        WARN_ON(atomic_read(&tsk->usage));
        WARN_ON(tsk == current);
 
+       cgroup_free(tsk);
        task_numa_free(tsk);
        security_task_free(tsk);
        exit_creds(tsk);
@@ -1149,10 +1150,6 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
        tty_audit_fork(sig);
        sched_autogroup_fork(sig);
 
-#ifdef CONFIG_CGROUPS
-       init_rwsem(&sig->group_rwsem);
-#endif
-
        sig->oom_score_adj = current->signal->oom_score_adj;
        sig->oom_score_adj_min = current->signal->oom_score_adj_min;
 
index aa5973220ad213a960092012bf4493f296dab90b..4d568ac9319eaf04c9d00673483678bc5e14f22e 100644 (file)
@@ -8244,13 +8244,6 @@ static void cpu_cgroup_attach(struct cgroup_subsys_state *css,
                sched_move_task(task);
 }
 
-static void cpu_cgroup_exit(struct cgroup_subsys_state *css,
-                           struct cgroup_subsys_state *old_css,
-                           struct task_struct *task)
-{
-       sched_move_task(task);
-}
-
 #ifdef CONFIG_FAIR_GROUP_SCHED
 static int cpu_shares_write_u64(struct cgroup_subsys_state *css,
                                struct cftype *cftype, u64 shareval)
@@ -8582,7 +8575,6 @@ struct cgroup_subsys cpu_cgrp_subsys = {
        .fork           = cpu_cgroup_fork,
        .can_attach     = cpu_cgroup_can_attach,
        .attach         = cpu_cgroup_attach,
-       .exit           = cpu_cgroup_exit,
        .legacy_cftypes = cpu_files,
        .early_init     = 1,
 };
index c57c4423c68837d14816c5ff230435e1567e7c20..b732edfddb767025185f27c8879903591c2b0c82 100644 (file)
@@ -434,7 +434,7 @@ struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page)
 
        memcg = page->mem_cgroup;
 
-       if (!memcg || !cgroup_on_dfl(memcg->css.cgroup))
+       if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
                memcg = root_mem_cgroup;
 
        rcu_read_unlock();
@@ -2926,7 +2926,7 @@ static int memcg_activate_kmem(struct mem_cgroup *memcg,
         * of course permitted.
         */
        mutex_lock(&memcg_create_mutex);
-       if (cgroup_has_tasks(memcg->css.cgroup) ||
+       if (cgroup_is_populated(memcg->css.cgroup) ||
            (memcg->use_hierarchy && memcg_has_children(memcg)))
                err = -EBUSY;
        mutex_unlock(&memcg_create_mutex);
@@ -4066,8 +4066,7 @@ static struct cftype mem_cgroup_legacy_files[] = {
        {
                .name = "cgroup.event_control",         /* XXX: for compat */
                .write = memcg_write_event_control,
-               .flags = CFTYPE_NO_PREFIX,
-               .mode = S_IWUGO,
+               .flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE,
        },
        {
                .name = "swappiness",
@@ -4834,7 +4833,7 @@ static int mem_cgroup_can_attach(struct cgroup_subsys_state *css,
 {
        struct mem_cgroup *memcg = mem_cgroup_from_css(css);
        struct mem_cgroup *from;
-       struct task_struct *p;
+       struct task_struct *leader, *p;
        struct mm_struct *mm;
        unsigned long move_flags;
        int ret = 0;
@@ -4848,7 +4847,20 @@ static int mem_cgroup_can_attach(struct cgroup_subsys_state *css,
        if (!move_flags)
                return 0;
 
-       p = cgroup_taskset_first(tset);
+       /*
+        * Multi-process migrations only happen on the default hierarchy
+        * where charge immigration is not used.  Perform charge
+        * immigration if @tset contains a leader and whine if there are
+        * multiple.
+        */
+       p = NULL;
+       cgroup_taskset_for_each_leader(leader, tset) {
+               WARN_ON_ONCE(p);
+               p = leader;
+       }
+       if (!p)
+               return 0;
+
        from = mem_cgroup_from_task(p);
 
        VM_BUG_ON(from == memcg);
@@ -5064,7 +5076,7 @@ static void mem_cgroup_bind(struct cgroup_subsys_state *root_css)
         * guarantees that @root doesn't have any children, so turning it
         * on for the root memcg is enough.
         */
-       if (cgroup_on_dfl(root_css->cgroup))
+       if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
                root_mem_cgroup->use_hierarchy = true;
        else
                root_mem_cgroup->use_hierarchy = false;
@@ -5208,6 +5220,7 @@ static struct cftype memory_files[] = {
        {
                .name = "events",
                .flags = CFTYPE_NOT_ON_ROOT,
+               .file_offset = offsetof(struct mem_cgroup, events_file),
                .seq_show = memory_events_show,
        },
        { }     /* terminate */
index 7f63a9381f71ebbb0c1f9bdda94a913c930280f0..e7057af54b6e267558a99749fac80dc77dd7855f 100644 (file)
@@ -175,7 +175,7 @@ static bool sane_reclaim(struct scan_control *sc)
        if (!memcg)
                return true;
 #ifdef CONFIG_CGROUP_WRITEBACK
-       if (cgroup_on_dfl(memcg->css.cgroup))
+       if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
                return true;
 #endif
        return false;