2 * Limits on number of tasks subsystem for cgroups
4 * Copyright (C) 2011 Red Hat, Inc., Frederic Weisbecker <fweisbec@redhat.com>
6 * Thanks to Andrew Morton, Johannes Weiner, Li Zefan, Oleg Nesterov and
7 * Paul Menage for their suggestions.
11 #include <linux/err.h>
12 #include <linux/cgroup.h>
13 #include <linux/slab.h>
14 #include <linux/res_counter.h>
17 struct res_counter res;
18 struct cgroup_subsys_state css;
22 * The root task counter doesn't exist because it's not part of the
23 * whole task counting. We want to optimize the trivial case of only
24 * one root cgroup living.
26 static struct cgroup_subsys_state root_css;
29 static inline struct task_counter *cgroup_task_counter(struct cgroup *cgrp)
34 return container_of(cgroup_subsys_state(cgrp, tasks_subsys_id),
35 struct task_counter, css);
38 static inline struct res_counter *cgroup_task_res_counter(struct cgroup *cgrp)
40 struct task_counter *cnt;
42 cnt = cgroup_task_counter(cgrp);
49 static struct cgroup_subsys_state *
50 task_counter_create(struct cgroup_subsys *ss, struct cgroup *cgrp)
52 struct task_counter *cnt;
53 struct res_counter *parent_res;
58 cnt = kzalloc(sizeof(*cnt), GFP_KERNEL);
60 return ERR_PTR(-ENOMEM);
62 parent_res = cgroup_task_res_counter(cgrp->parent);
64 res_counter_init(&cnt->res, parent_res);
70 * Inherit the limit value of the parent. This is not really to enforce
71 * a limit below or equal to the one of the parent which can be changed
72 * concurrently anyway. This is just to honour the clone flag.
74 static void task_counter_post_clone(struct cgroup_subsys *ss,
77 /* cgrp can't be root, so cgroup_task_res_counter() can't return NULL */
78 res_counter_inherit(cgroup_task_res_counter(cgrp), RES_LIMIT);
81 static void task_counter_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
83 struct task_counter *cnt = cgroup_task_counter(cgrp);
88 /* Uncharge the cgroup the task was attached to */
89 static void task_counter_exit(struct cgroup_subsys *ss, struct cgroup *cgrp,
90 struct cgroup *old_cgrp, struct task_struct *task)
92 /* Optimize for the root cgroup case */
94 res_counter_uncharge(cgroup_task_res_counter(old_cgrp), 1);
98 * Protected amongst can_attach_task/attach_task/cancel_attach_task by
101 static struct res_counter *common_ancestor;
104 * This does more than just probing the ability to attach to the dest cgroup.
105 * We can not just _check_ if we can attach to the destination and do the real
106 * attachment later in task_counter_attach_task() because a task in the dest
107 * cgroup can fork before and steal the last remaining count.
108 * Thus we need to charge the dest cgroup right now.
110 static int task_counter_can_attach_task(struct cgroup *cgrp,
111 struct cgroup *old_cgrp,
112 struct task_struct *tsk)
114 struct res_counter *res = cgroup_task_res_counter(cgrp);
115 struct res_counter *old_res = cgroup_task_res_counter(old_cgrp);
119 * When moving a task from a cgroup to another, we don't want
120 * to charge the common ancestors, even though they will be
121 * uncharged later from attach_task(), because during that
122 * short window between charge and uncharge, a task could fork
123 * in the ancestor and spuriously fail due to the temporary
126 common_ancestor = res_counter_common_ancestor(res, old_res);
129 * If cgrp is the root then res is NULL, however in this case
130 * the common ancestor is NULL as well, making the below a NOP.
132 err = res_counter_charge_until(res, common_ancestor, 1, NULL);
139 /* Uncharge the dest cgroup that we charged in task_counter_can_attach_task() */
140 static void task_counter_cancel_attach_task(struct cgroup *cgrp,
141 struct task_struct *tsk)
143 res_counter_uncharge_until(cgroup_task_res_counter(cgrp),
148 * This uncharge the old cgroup. We can do that now that we are sure the
149 * attachment can't cancelled anymore, because this uncharge operation
150 * couldn't be reverted later: a task in the old cgroup could fork after
151 * we uncharge and reach the task counter limit, making our return there
154 static void task_counter_attach_task(struct cgroup *cgrp,
155 struct cgroup *old_cgrp,
156 struct task_struct *tsk)
158 res_counter_uncharge_until(cgroup_task_res_counter(old_cgrp),
162 static u64 task_counter_read_u64(struct cgroup *cgrp, struct cftype *cft)
164 int type = cft->private;
166 return res_counter_read_u64(cgroup_task_res_counter(cgrp), type);
169 static int task_counter_write_u64(struct cgroup *cgrp, struct cftype *cft,
172 int type = cft->private;
174 res_counter_write_u64(cgroup_task_res_counter(cgrp), type, val);
179 static struct cftype files[] = {
182 .read_u64 = task_counter_read_u64,
183 .write_u64 = task_counter_write_u64,
184 .private = RES_LIMIT,
189 .read_u64 = task_counter_read_u64,
190 .private = RES_USAGE,
194 static int task_counter_populate(struct cgroup_subsys *ss, struct cgroup *cgrp)
199 return cgroup_add_files(cgrp, ss, files, ARRAY_SIZE(files));
203 * Charge the task counter with the new child coming, or reject it if we
206 static int task_counter_fork(struct cgroup_subsys *ss,
207 struct task_struct *child)
209 struct cgroup_subsys_state *css;
213 css = child->cgroups->subsys[tasks_subsys_id];
216 /* Optimize for the root cgroup case, which doesn't have a limit */
220 err = res_counter_charge(cgroup_task_res_counter(cgrp), 1, NULL);
227 struct cgroup_subsys tasks_subsys = {
229 .subsys_id = tasks_subsys_id,
230 .create = task_counter_create,
231 .post_clone = task_counter_post_clone,
232 .destroy = task_counter_destroy,
233 .exit = task_counter_exit,
234 .can_attach_task = task_counter_can_attach_task,
235 .cancel_attach_task = task_counter_cancel_attach_task,
236 .attach_task = task_counter_attach_task,
237 .fork = task_counter_fork,
238 .populate = task_counter_populate,