]> git.karo-electronics.de Git - karo-tx-linux.git/blob - kernel/cgroup_task_counter.c
cgroups: ERR_PTR needs err.h
[karo-tx-linux.git] / kernel / cgroup_task_counter.c
1 /*
2  * Limits on number of tasks subsystem for cgroups
3  *
4  * Copyright (C) 2011 Red Hat, Inc., Frederic Weisbecker <fweisbec@redhat.com>
5  *
6  * Thanks to Andrew Morton, Johannes Weiner, Li Zefan, Oleg Nesterov and
7  * Paul Menage for their suggestions.
8  *
9  */
10
11 #include <linux/err.h>
12 #include <linux/cgroup.h>
13 #include <linux/slab.h>
14 #include <linux/res_counter.h>
15
16 struct task_counter {
17         struct res_counter              res;
18         struct cgroup_subsys_state      css;
19 };
20
21 /*
22  * The root task counter doesn't exist because it's not part of the
23  * whole task counting. We want to optimize the trivial case of only
24  * one root cgroup living.
25  */
26 static struct cgroup_subsys_state root_css;
27
28
29 static inline struct task_counter *cgroup_task_counter(struct cgroup *cgrp)
30 {
31         if (!cgrp->parent)
32                 return NULL;
33
34         return container_of(cgroup_subsys_state(cgrp, tasks_subsys_id),
35                             struct task_counter, css);
36 }
37
38 static inline struct res_counter *cgroup_task_res_counter(struct cgroup *cgrp)
39 {
40         struct task_counter *cnt;
41
42         cnt = cgroup_task_counter(cgrp);
43         if (!cnt)
44                 return NULL;
45
46         return &cnt->res;
47 }
48
49 static struct cgroup_subsys_state *
50 task_counter_create(struct cgroup_subsys *ss, struct cgroup *cgrp)
51 {
52         struct task_counter *cnt;
53         struct res_counter *parent_res;
54
55         if (!cgrp->parent)
56                 return &root_css;
57
58         cnt = kzalloc(sizeof(*cnt), GFP_KERNEL);
59         if (!cnt)
60                 return ERR_PTR(-ENOMEM);
61
62         parent_res = cgroup_task_res_counter(cgrp->parent);
63
64         res_counter_init(&cnt->res, parent_res);
65
66         return &cnt->css;
67 }
68
69 /*
70  * Inherit the limit value of the parent. This is not really to enforce
71  * a limit below or equal to the one of the parent which can be changed
72  * concurrently anyway. This is just to honour the clone flag.
73  */
74 static void task_counter_post_clone(struct cgroup_subsys *ss,
75                                     struct cgroup *cgrp)
76 {
77         /* cgrp can't be root, so cgroup_task_res_counter() can't return NULL */
78         res_counter_inherit(cgroup_task_res_counter(cgrp), RES_LIMIT);
79 }
80
81 static void task_counter_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
82 {
83         struct task_counter *cnt = cgroup_task_counter(cgrp);
84
85         kfree(cnt);
86 }
87
88 /* Uncharge the cgroup the task was attached to */
89 static void task_counter_exit(struct cgroup_subsys *ss, struct cgroup *cgrp,
90                               struct cgroup *old_cgrp, struct task_struct *task)
91 {
92         /* Optimize for the root cgroup case */
93         if (old_cgrp->parent)
94                 res_counter_uncharge(cgroup_task_res_counter(old_cgrp), 1);
95 }
96
97 /*
98  * Protected amongst can_attach_task/attach_task/cancel_attach_task by
99  * cgroup mutex
100  */
101 static struct res_counter *common_ancestor;
102
103 /*
104  * This does more than just probing the ability to attach to the dest cgroup.
105  * We can not just _check_ if we can attach to the destination and do the real
106  * attachment later in task_counter_attach_task() because a task in the dest
107  * cgroup can fork before and steal the last remaining count.
108  * Thus we need to charge the dest cgroup right now.
109  */
110 static int task_counter_can_attach_task(struct cgroup *cgrp,
111                                         struct cgroup *old_cgrp,
112                                         struct task_struct *tsk)
113 {
114         struct res_counter *res = cgroup_task_res_counter(cgrp);
115         struct res_counter *old_res = cgroup_task_res_counter(old_cgrp);
116         int err;
117
118         /*
119          * When moving a task from a cgroup to another, we don't want
120          * to charge the common ancestors, even though they will be
121          * uncharged later from attach_task(), because during that
122          * short window between charge and uncharge, a task could fork
123          * in the ancestor and spuriously fail due to the temporary
124          * charge.
125          */
126         common_ancestor = res_counter_common_ancestor(res, old_res);
127
128         /*
129          * If cgrp is the root then res is NULL, however in this case
130          * the common ancestor is NULL as well, making the below a NOP.
131          */
132         err = res_counter_charge_until(res, common_ancestor, 1, NULL);
133         if (err)
134                 return -EINVAL;
135
136         return 0;
137 }
138
139 /* Uncharge the dest cgroup that we charged in task_counter_can_attach_task() */
140 static void task_counter_cancel_attach_task(struct cgroup *cgrp,
141                                             struct task_struct *tsk)
142 {
143         res_counter_uncharge_until(cgroup_task_res_counter(cgrp),
144                                    common_ancestor, 1);
145 }
146
147 /*
148  * This uncharge the old cgroup. We can do that now that we are sure the
149  * attachment can't cancelled anymore, because this uncharge operation
150  * couldn't be reverted later: a task in the old cgroup could fork after
151  * we uncharge and reach the task counter limit, making our return there
152  * not possible.
153  */
154 static void task_counter_attach_task(struct cgroup *cgrp,
155                                      struct cgroup *old_cgrp,
156                                      struct task_struct *tsk)
157 {
158         res_counter_uncharge_until(cgroup_task_res_counter(old_cgrp),
159                                    common_ancestor, 1);
160 }
161
162 static u64 task_counter_read_u64(struct cgroup *cgrp, struct cftype *cft)
163 {
164         int type = cft->private;
165
166         return res_counter_read_u64(cgroup_task_res_counter(cgrp), type);
167 }
168
169 static int task_counter_write_u64(struct cgroup *cgrp, struct cftype *cft,
170                                   u64 val)
171 {
172         int type = cft->private;
173
174         res_counter_write_u64(cgroup_task_res_counter(cgrp), type, val);
175
176         return 0;
177 }
178
179 static struct cftype files[] = {
180         {
181                 .name           = "limit",
182                 .read_u64       = task_counter_read_u64,
183                 .write_u64      = task_counter_write_u64,
184                 .private        = RES_LIMIT,
185         },
186
187         {
188                 .name           = "usage",
189                 .read_u64       = task_counter_read_u64,
190                 .private        = RES_USAGE,
191         },
192 };
193
194 static int task_counter_populate(struct cgroup_subsys *ss, struct cgroup *cgrp)
195 {
196         if (!cgrp->parent)
197                 return 0;
198
199         return cgroup_add_files(cgrp, ss, files, ARRAY_SIZE(files));
200 }
201
202 /*
203  * Charge the task counter with the new child coming, or reject it if we
204  * reached the limit.
205  */
206 static int task_counter_fork(struct cgroup_subsys *ss,
207                              struct task_struct *child)
208 {
209         struct cgroup_subsys_state *css;
210         struct cgroup *cgrp;
211         int err;
212
213         css = child->cgroups->subsys[tasks_subsys_id];
214         cgrp = css->cgroup;
215
216         /* Optimize for the root cgroup case, which doesn't have a limit */
217         if (!cgrp->parent)
218                 return 0;
219
220         err = res_counter_charge(cgroup_task_res_counter(cgrp), 1, NULL);
221         if (err)
222                 return -EAGAIN;
223
224         return 0;
225 }
226
227 struct cgroup_subsys tasks_subsys = {
228         .name                   = "tasks",
229         .subsys_id              = tasks_subsys_id,
230         .create                 = task_counter_create,
231         .post_clone             = task_counter_post_clone,
232         .destroy                = task_counter_destroy,
233         .exit                   = task_counter_exit,
234         .can_attach_task        = task_counter_can_attach_task,
235         .cancel_attach_task     = task_counter_cancel_attach_task,
236         .attach_task            = task_counter_attach_task,
237         .fork                   = task_counter_fork,
238         .populate               = task_counter_populate,
239 };