]> git.karo-electronics.de Git - karo-tx-linux.git/blob - mm/hugetlb_cgroup.c
Merge branches 'x86-ras-for-linus', 'x86-uv-for-linus' and 'x86-vdso-for-linus' of...
[karo-tx-linux.git] / mm / hugetlb_cgroup.c
1 /*
2  *
3  * Copyright IBM Corporation, 2012
4  * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of version 2.1 of the GNU Lesser General Public License
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it would be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
13  *
14  */
15
16 #include <linux/cgroup.h>
17 #include <linux/slab.h>
18 #include <linux/hugetlb.h>
19 #include <linux/hugetlb_cgroup.h>
20
21 struct hugetlb_cgroup {
22         struct cgroup_subsys_state css;
23         /*
24          * the counter to account for hugepages from hugetlb.
25          */
26         struct res_counter hugepage[HUGE_MAX_HSTATE];
27 };
28
29 #define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val))
30 #define MEMFILE_IDX(val)        (((val) >> 16) & 0xffff)
31 #define MEMFILE_ATTR(val)       ((val) & 0xffff)
32
33 static struct hugetlb_cgroup *root_h_cgroup __read_mostly;
34
35 static inline
36 struct hugetlb_cgroup *hugetlb_cgroup_from_css(struct cgroup_subsys_state *s)
37 {
38         return s ? container_of(s, struct hugetlb_cgroup, css) : NULL;
39 }
40
41 static inline
42 struct hugetlb_cgroup *hugetlb_cgroup_from_task(struct task_struct *task)
43 {
44         return hugetlb_cgroup_from_css(task_css(task, hugetlb_cgrp_id));
45 }
46
47 static inline bool hugetlb_cgroup_is_root(struct hugetlb_cgroup *h_cg)
48 {
49         return (h_cg == root_h_cgroup);
50 }
51
52 static inline struct hugetlb_cgroup *
53 parent_hugetlb_cgroup(struct hugetlb_cgroup *h_cg)
54 {
55         return hugetlb_cgroup_from_css(h_cg->css.parent);
56 }
57
58 static inline bool hugetlb_cgroup_have_usage(struct hugetlb_cgroup *h_cg)
59 {
60         int idx;
61
62         for (idx = 0; idx < hugetlb_max_hstate; idx++) {
63                 if ((res_counter_read_u64(&h_cg->hugepage[idx], RES_USAGE)) > 0)
64                         return true;
65         }
66         return false;
67 }
68
69 static struct cgroup_subsys_state *
70 hugetlb_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
71 {
72         struct hugetlb_cgroup *parent_h_cgroup = hugetlb_cgroup_from_css(parent_css);
73         struct hugetlb_cgroup *h_cgroup;
74         int idx;
75
76         h_cgroup = kzalloc(sizeof(*h_cgroup), GFP_KERNEL);
77         if (!h_cgroup)
78                 return ERR_PTR(-ENOMEM);
79
80         if (parent_h_cgroup) {
81                 for (idx = 0; idx < HUGE_MAX_HSTATE; idx++)
82                         res_counter_init(&h_cgroup->hugepage[idx],
83                                          &parent_h_cgroup->hugepage[idx]);
84         } else {
85                 root_h_cgroup = h_cgroup;
86                 for (idx = 0; idx < HUGE_MAX_HSTATE; idx++)
87                         res_counter_init(&h_cgroup->hugepage[idx], NULL);
88         }
89         return &h_cgroup->css;
90 }
91
92 static void hugetlb_cgroup_css_free(struct cgroup_subsys_state *css)
93 {
94         struct hugetlb_cgroup *h_cgroup;
95
96         h_cgroup = hugetlb_cgroup_from_css(css);
97         kfree(h_cgroup);
98 }
99
100
101 /*
102  * Should be called with hugetlb_lock held.
103  * Since we are holding hugetlb_lock, pages cannot get moved from
104  * active list or uncharged from the cgroup, So no need to get
105  * page reference and test for page active here. This function
106  * cannot fail.
107  */
108 static void hugetlb_cgroup_move_parent(int idx, struct hugetlb_cgroup *h_cg,
109                                        struct page *page)
110 {
111         int csize;
112         struct res_counter *counter;
113         struct res_counter *fail_res;
114         struct hugetlb_cgroup *page_hcg;
115         struct hugetlb_cgroup *parent = parent_hugetlb_cgroup(h_cg);
116
117         page_hcg = hugetlb_cgroup_from_page(page);
118         /*
119          * We can have pages in active list without any cgroup
120          * ie, hugepage with less than 3 pages. We can safely
121          * ignore those pages.
122          */
123         if (!page_hcg || page_hcg != h_cg)
124                 goto out;
125
126         csize = PAGE_SIZE << compound_order(page);
127         if (!parent) {
128                 parent = root_h_cgroup;
129                 /* root has no limit */
130                 res_counter_charge_nofail(&parent->hugepage[idx],
131                                           csize, &fail_res);
132         }
133         counter = &h_cg->hugepage[idx];
134         res_counter_uncharge_until(counter, counter->parent, csize);
135
136         set_hugetlb_cgroup(page, parent);
137 out:
138         return;
139 }
140
141 /*
142  * Force the hugetlb cgroup to empty the hugetlb resources by moving them to
143  * the parent cgroup.
144  */
145 static void hugetlb_cgroup_css_offline(struct cgroup_subsys_state *css)
146 {
147         struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css);
148         struct hstate *h;
149         struct page *page;
150         int idx = 0;
151
152         do {
153                 for_each_hstate(h) {
154                         spin_lock(&hugetlb_lock);
155                         list_for_each_entry(page, &h->hugepage_activelist, lru)
156                                 hugetlb_cgroup_move_parent(idx, h_cg, page);
157
158                         spin_unlock(&hugetlb_lock);
159                         idx++;
160                 }
161                 cond_resched();
162         } while (hugetlb_cgroup_have_usage(h_cg));
163 }
164
165 int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
166                                  struct hugetlb_cgroup **ptr)
167 {
168         int ret = 0;
169         struct res_counter *fail_res;
170         struct hugetlb_cgroup *h_cg = NULL;
171         unsigned long csize = nr_pages * PAGE_SIZE;
172
173         if (hugetlb_cgroup_disabled())
174                 goto done;
175         /*
176          * We don't charge any cgroup if the compound page have less
177          * than 3 pages.
178          */
179         if (huge_page_order(&hstates[idx]) < HUGETLB_CGROUP_MIN_ORDER)
180                 goto done;
181 again:
182         rcu_read_lock();
183         h_cg = hugetlb_cgroup_from_task(current);
184         if (!css_tryget_online(&h_cg->css)) {
185                 rcu_read_unlock();
186                 goto again;
187         }
188         rcu_read_unlock();
189
190         ret = res_counter_charge(&h_cg->hugepage[idx], csize, &fail_res);
191         css_put(&h_cg->css);
192 done:
193         *ptr = h_cg;
194         return ret;
195 }
196
197 /* Should be called with hugetlb_lock held */
198 void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
199                                   struct hugetlb_cgroup *h_cg,
200                                   struct page *page)
201 {
202         if (hugetlb_cgroup_disabled() || !h_cg)
203                 return;
204
205         set_hugetlb_cgroup(page, h_cg);
206         return;
207 }
208
209 /*
210  * Should be called with hugetlb_lock held
211  */
212 void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
213                                   struct page *page)
214 {
215         struct hugetlb_cgroup *h_cg;
216         unsigned long csize = nr_pages * PAGE_SIZE;
217
218         if (hugetlb_cgroup_disabled())
219                 return;
220         lockdep_assert_held(&hugetlb_lock);
221         h_cg = hugetlb_cgroup_from_page(page);
222         if (unlikely(!h_cg))
223                 return;
224         set_hugetlb_cgroup(page, NULL);
225         res_counter_uncharge(&h_cg->hugepage[idx], csize);
226         return;
227 }
228
229 void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
230                                     struct hugetlb_cgroup *h_cg)
231 {
232         unsigned long csize = nr_pages * PAGE_SIZE;
233
234         if (hugetlb_cgroup_disabled() || !h_cg)
235                 return;
236
237         if (huge_page_order(&hstates[idx]) < HUGETLB_CGROUP_MIN_ORDER)
238                 return;
239
240         res_counter_uncharge(&h_cg->hugepage[idx], csize);
241         return;
242 }
243
244 static u64 hugetlb_cgroup_read_u64(struct cgroup_subsys_state *css,
245                                    struct cftype *cft)
246 {
247         int idx, name;
248         struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css);
249
250         idx = MEMFILE_IDX(cft->private);
251         name = MEMFILE_ATTR(cft->private);
252
253         return res_counter_read_u64(&h_cg->hugepage[idx], name);
254 }
255
256 static ssize_t hugetlb_cgroup_write(struct kernfs_open_file *of,
257                                     char *buf, size_t nbytes, loff_t off)
258 {
259         int idx, name, ret;
260         unsigned long long val;
261         struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(of_css(of));
262
263         buf = strstrip(buf);
264         idx = MEMFILE_IDX(of_cft(of)->private);
265         name = MEMFILE_ATTR(of_cft(of)->private);
266
267         switch (name) {
268         case RES_LIMIT:
269                 if (hugetlb_cgroup_is_root(h_cg)) {
270                         /* Can't set limit on root */
271                         ret = -EINVAL;
272                         break;
273                 }
274                 /* This function does all necessary parse...reuse it */
275                 ret = res_counter_memparse_write_strategy(buf, &val);
276                 if (ret)
277                         break;
278                 val = ALIGN(val, 1ULL << huge_page_shift(&hstates[idx]));
279                 ret = res_counter_set_limit(&h_cg->hugepage[idx], val);
280                 break;
281         default:
282                 ret = -EINVAL;
283                 break;
284         }
285         return ret ?: nbytes;
286 }
287
288 static ssize_t hugetlb_cgroup_reset(struct kernfs_open_file *of,
289                                     char *buf, size_t nbytes, loff_t off)
290 {
291         int idx, name, ret = 0;
292         struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(of_css(of));
293
294         idx = MEMFILE_IDX(of_cft(of)->private);
295         name = MEMFILE_ATTR(of_cft(of)->private);
296
297         switch (name) {
298         case RES_MAX_USAGE:
299                 res_counter_reset_max(&h_cg->hugepage[idx]);
300                 break;
301         case RES_FAILCNT:
302                 res_counter_reset_failcnt(&h_cg->hugepage[idx]);
303                 break;
304         default:
305                 ret = -EINVAL;
306                 break;
307         }
308         return ret ?: nbytes;
309 }
310
311 static char *mem_fmt(char *buf, int size, unsigned long hsize)
312 {
313         if (hsize >= (1UL << 30))
314                 snprintf(buf, size, "%luGB", hsize >> 30);
315         else if (hsize >= (1UL << 20))
316                 snprintf(buf, size, "%luMB", hsize >> 20);
317         else
318                 snprintf(buf, size, "%luKB", hsize >> 10);
319         return buf;
320 }
321
322 static void __init __hugetlb_cgroup_file_init(int idx)
323 {
324         char buf[32];
325         struct cftype *cft;
326         struct hstate *h = &hstates[idx];
327
328         /* format the size */
329         mem_fmt(buf, 32, huge_page_size(h));
330
331         /* Add the limit file */
332         cft = &h->cgroup_files[0];
333         snprintf(cft->name, MAX_CFTYPE_NAME, "%s.limit_in_bytes", buf);
334         cft->private = MEMFILE_PRIVATE(idx, RES_LIMIT);
335         cft->read_u64 = hugetlb_cgroup_read_u64;
336         cft->write = hugetlb_cgroup_write;
337
338         /* Add the usage file */
339         cft = &h->cgroup_files[1];
340         snprintf(cft->name, MAX_CFTYPE_NAME, "%s.usage_in_bytes", buf);
341         cft->private = MEMFILE_PRIVATE(idx, RES_USAGE);
342         cft->read_u64 = hugetlb_cgroup_read_u64;
343
344         /* Add the MAX usage file */
345         cft = &h->cgroup_files[2];
346         snprintf(cft->name, MAX_CFTYPE_NAME, "%s.max_usage_in_bytes", buf);
347         cft->private = MEMFILE_PRIVATE(idx, RES_MAX_USAGE);
348         cft->write = hugetlb_cgroup_reset;
349         cft->read_u64 = hugetlb_cgroup_read_u64;
350
351         /* Add the failcntfile */
352         cft = &h->cgroup_files[3];
353         snprintf(cft->name, MAX_CFTYPE_NAME, "%s.failcnt", buf);
354         cft->private  = MEMFILE_PRIVATE(idx, RES_FAILCNT);
355         cft->write = hugetlb_cgroup_reset;
356         cft->read_u64 = hugetlb_cgroup_read_u64;
357
358         /* NULL terminate the last cft */
359         cft = &h->cgroup_files[4];
360         memset(cft, 0, sizeof(*cft));
361
362         WARN_ON(cgroup_add_legacy_cftypes(&hugetlb_cgrp_subsys,
363                                           h->cgroup_files));
364 }
365
366 void __init hugetlb_cgroup_file_init(void)
367 {
368         struct hstate *h;
369
370         for_each_hstate(h) {
371                 /*
372                  * Add cgroup control files only if the huge page consists
373                  * of more than two normal pages. This is because we use
374                  * page[2].lru.next for storing cgroup details.
375                  */
376                 if (huge_page_order(h) >= HUGETLB_CGROUP_MIN_ORDER)
377                         __hugetlb_cgroup_file_init(hstate_index(h));
378         }
379 }
380
381 /*
382  * hugetlb_lock will make sure a parallel cgroup rmdir won't happen
383  * when we migrate hugepages
384  */
385 void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage)
386 {
387         struct hugetlb_cgroup *h_cg;
388         struct hstate *h = page_hstate(oldhpage);
389
390         if (hugetlb_cgroup_disabled())
391                 return;
392
393         VM_BUG_ON_PAGE(!PageHuge(oldhpage), oldhpage);
394         spin_lock(&hugetlb_lock);
395         h_cg = hugetlb_cgroup_from_page(oldhpage);
396         set_hugetlb_cgroup(oldhpage, NULL);
397
398         /* move the h_cg details to new cgroup */
399         set_hugetlb_cgroup(newhpage, h_cg);
400         list_move(&newhpage->lru, &h->hugepage_activelist);
401         spin_unlock(&hugetlb_lock);
402         return;
403 }
404
405 struct cgroup_subsys hugetlb_cgrp_subsys = {
406         .css_alloc      = hugetlb_cgroup_css_alloc,
407         .css_offline    = hugetlb_cgroup_css_offline,
408         .css_free       = hugetlb_cgroup_css_free,
409 };