]> git.karo-electronics.de Git - karo-tx-linux.git/blob - mm/memcontrol.c
6a0199706f00bc428968f017ea32df0889e4cd2b
[karo-tx-linux.git] / mm / memcontrol.c
1 /* memcontrol.c - Memory Controller
2  *
3  * Copyright IBM Corporation, 2007
4  * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5  *
6  * Copyright 2007 OpenVZ SWsoft Inc
7  * Author: Pavel Emelianov <xemul@openvz.org>
8  *
9  * Memory thresholds
10  * Copyright (C) 2009 Nokia Corporation
11  * Author: Kirill A. Shutemov
12  *
13  * Kernel Memory Controller
14  * Copyright (C) 2012 Parallels Inc. and Google Inc.
15  * Authors: Glauber Costa and Suleiman Souhlal
16  *
17  * Native page reclaim
18  * Charge lifetime sanitation
19  * Lockless page tracking & accounting
20  * Unified hierarchy configuration model
21  * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner
22  *
23  * This program is free software; you can redistribute it and/or modify
24  * it under the terms of the GNU General Public License as published by
25  * the Free Software Foundation; either version 2 of the License, or
26  * (at your option) any later version.
27  *
28  * This program is distributed in the hope that it will be useful,
29  * but WITHOUT ANY WARRANTY; without even the implied warranty of
30  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
31  * GNU General Public License for more details.
32  */
33
34 #include <linux/page_counter.h>
35 #include <linux/memcontrol.h>
36 #include <linux/cgroup.h>
37 #include <linux/mm.h>
38 #include <linux/hugetlb.h>
39 #include <linux/pagemap.h>
40 #include <linux/smp.h>
41 #include <linux/page-flags.h>
42 #include <linux/backing-dev.h>
43 #include <linux/bit_spinlock.h>
44 #include <linux/rcupdate.h>
45 #include <linux/limits.h>
46 #include <linux/export.h>
47 #include <linux/mutex.h>
48 #include <linux/rbtree.h>
49 #include <linux/slab.h>
50 #include <linux/swap.h>
51 #include <linux/swapops.h>
52 #include <linux/spinlock.h>
53 #include <linux/eventfd.h>
54 #include <linux/poll.h>
55 #include <linux/sort.h>
56 #include <linux/fs.h>
57 #include <linux/seq_file.h>
58 #include <linux/vmpressure.h>
59 #include <linux/mm_inline.h>
60 #include <linux/swap_cgroup.h>
61 #include <linux/cpu.h>
62 #include <linux/oom.h>
63 #include <linux/lockdep.h>
64 #include <linux/file.h>
65 #include <linux/tracehook.h>
66 #include "internal.h"
67 #include <net/sock.h>
68 #include <net/ip.h>
69 #include "slab.h"
70
71 #include <asm/uaccess.h>
72
73 #include <trace/events/vmscan.h>
74
75 struct cgroup_subsys memory_cgrp_subsys __read_mostly;
76 EXPORT_SYMBOL(memory_cgrp_subsys);
77
78 struct mem_cgroup *root_mem_cgroup __read_mostly;
79
80 #define MEM_CGROUP_RECLAIM_RETRIES      5
81
82 /* Socket memory accounting disabled? */
83 static bool cgroup_memory_nosocket;
84
85 /* Kernel memory accounting disabled? */
86 static bool cgroup_memory_nokmem;
87
88 /* Whether the swap controller is active */
89 #ifdef CONFIG_MEMCG_SWAP
90 int do_swap_account __read_mostly;
91 #else
92 #define do_swap_account         0
93 #endif
94
95 /* Whether legacy memory+swap accounting is active */
96 static bool do_memsw_account(void)
97 {
98         return !cgroup_subsys_on_dfl(memory_cgrp_subsys) && do_swap_account;
99 }
100
101 static const char * const mem_cgroup_stat_names[] = {
102         "cache",
103         "rss",
104         "rss_huge",
105         "mapped_file",
106         "dirty",
107         "writeback",
108         "swap",
109 };
110
111 static const char * const mem_cgroup_events_names[] = {
112         "pgpgin",
113         "pgpgout",
114         "pgfault",
115         "pgmajfault",
116 };
117
118 static const char * const mem_cgroup_lru_names[] = {
119         "inactive_anon",
120         "active_anon",
121         "inactive_file",
122         "active_file",
123         "unevictable",
124 };
125
126 #define THRESHOLDS_EVENTS_TARGET 128
127 #define SOFTLIMIT_EVENTS_TARGET 1024
128 #define NUMAINFO_EVENTS_TARGET  1024
129
130 /*
131  * Cgroups above their limits are maintained in a RB-Tree, independent of
132  * their hierarchy representation
133  */
134
135 struct mem_cgroup_tree_per_zone {
136         struct rb_root rb_root;
137         spinlock_t lock;
138 };
139
140 struct mem_cgroup_tree_per_node {
141         struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES];
142 };
143
144 struct mem_cgroup_tree {
145         struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
146 };
147
148 static struct mem_cgroup_tree soft_limit_tree __read_mostly;
149
150 /* for OOM */
151 struct mem_cgroup_eventfd_list {
152         struct list_head list;
153         struct eventfd_ctx *eventfd;
154 };
155
156 /*
157  * cgroup_event represents events which userspace want to receive.
158  */
159 struct mem_cgroup_event {
160         /*
161          * memcg which the event belongs to.
162          */
163         struct mem_cgroup *memcg;
164         /*
165          * eventfd to signal userspace about the event.
166          */
167         struct eventfd_ctx *eventfd;
168         /*
169          * Each of these stored in a list by the cgroup.
170          */
171         struct list_head list;
172         /*
173          * register_event() callback will be used to add new userspace
174          * waiter for changes related to this event.  Use eventfd_signal()
175          * on eventfd to send notification to userspace.
176          */
177         int (*register_event)(struct mem_cgroup *memcg,
178                               struct eventfd_ctx *eventfd, const char *args);
179         /*
180          * unregister_event() callback will be called when userspace closes
181          * the eventfd or on cgroup removing.  This callback must be set,
182          * if you want provide notification functionality.
183          */
184         void (*unregister_event)(struct mem_cgroup *memcg,
185                                  struct eventfd_ctx *eventfd);
186         /*
187          * All fields below needed to unregister event when
188          * userspace closes eventfd.
189          */
190         poll_table pt;
191         wait_queue_head_t *wqh;
192         wait_queue_t wait;
193         struct work_struct remove;
194 };
195
196 static void mem_cgroup_threshold(struct mem_cgroup *memcg);
197 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
198
199 /* Stuffs for move charges at task migration. */
200 /*
201  * Types of charges to be moved.
202  */
203 #define MOVE_ANON       0x1U
204 #define MOVE_FILE       0x2U
205 #define MOVE_MASK       (MOVE_ANON | MOVE_FILE)
206
207 /* "mc" and its members are protected by cgroup_mutex */
208 static struct move_charge_struct {
209         spinlock_t        lock; /* for from, to */
210         struct mm_struct  *mm;
211         struct mem_cgroup *from;
212         struct mem_cgroup *to;
213         unsigned long flags;
214         unsigned long precharge;
215         unsigned long moved_charge;
216         unsigned long moved_swap;
217         struct task_struct *moving_task;        /* a task moving charges */
218         wait_queue_head_t waitq;                /* a waitq for other context */
219 } mc = {
220         .lock = __SPIN_LOCK_UNLOCKED(mc.lock),
221         .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
222 };
223
224 /*
225  * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
226  * limit reclaim to prevent infinite loops, if they ever occur.
227  */
228 #define MEM_CGROUP_MAX_RECLAIM_LOOPS            100
229 #define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2
230
231 enum charge_type {
232         MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
233         MEM_CGROUP_CHARGE_TYPE_ANON,
234         MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */
235         MEM_CGROUP_CHARGE_TYPE_DROP,    /* a page was unused swap cache */
236         NR_CHARGE_TYPE,
237 };
238
239 /* for encoding cft->private value on file */
240 enum res_type {
241         _MEM,
242         _MEMSWAP,
243         _OOM_TYPE,
244         _KMEM,
245         _TCP,
246 };
247
248 #define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val))
249 #define MEMFILE_TYPE(val)       ((val) >> 16 & 0xffff)
250 #define MEMFILE_ATTR(val)       ((val) & 0xffff)
251 /* Used for OOM nofiier */
252 #define OOM_CONTROL             (0)
253
254 /* Some nice accessors for the vmpressure. */
255 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
256 {
257         if (!memcg)
258                 memcg = root_mem_cgroup;
259         return &memcg->vmpressure;
260 }
261
262 struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr)
263 {
264         return &container_of(vmpr, struct mem_cgroup, vmpressure)->css;
265 }
266
267 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
268 {
269         return (memcg == root_mem_cgroup);
270 }
271
272 #ifndef CONFIG_SLOB
273 /*
274  * This will be the memcg's index in each cache's ->memcg_params.memcg_caches.
275  * The main reason for not using cgroup id for this:
276  *  this works better in sparse environments, where we have a lot of memcgs,
277  *  but only a few kmem-limited. Or also, if we have, for instance, 200
278  *  memcgs, and none but the 200th is kmem-limited, we'd have to have a
279  *  200 entry array for that.
280  *
281  * The current size of the caches array is stored in memcg_nr_cache_ids. It
282  * will double each time we have to increase it.
283  */
284 static DEFINE_IDA(memcg_cache_ida);
285 int memcg_nr_cache_ids;
286
287 /* Protects memcg_nr_cache_ids */
288 static DECLARE_RWSEM(memcg_cache_ids_sem);
289
290 void memcg_get_cache_ids(void)
291 {
292         down_read(&memcg_cache_ids_sem);
293 }
294
295 void memcg_put_cache_ids(void)
296 {
297         up_read(&memcg_cache_ids_sem);
298 }
299
300 /*
301  * MIN_SIZE is different than 1, because we would like to avoid going through
302  * the alloc/free process all the time. In a small machine, 4 kmem-limited
303  * cgroups is a reasonable guess. In the future, it could be a parameter or
304  * tunable, but that is strictly not necessary.
305  *
306  * MAX_SIZE should be as large as the number of cgrp_ids. Ideally, we could get
307  * this constant directly from cgroup, but it is understandable that this is
308  * better kept as an internal representation in cgroup.c. In any case, the
309  * cgrp_id space is not getting any smaller, and we don't have to necessarily
310  * increase ours as well if it increases.
311  */
312 #define MEMCG_CACHES_MIN_SIZE 4
313 #define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX
314
315 /*
316  * A lot of the calls to the cache allocation functions are expected to be
317  * inlined by the compiler. Since the calls to memcg_kmem_get_cache are
318  * conditional to this static branch, we'll have to allow modules that does
319  * kmem_cache_alloc and the such to see this symbol as well
320  */
321 DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key);
322 EXPORT_SYMBOL(memcg_kmem_enabled_key);
323
324 #endif /* !CONFIG_SLOB */
325
326 static struct mem_cgroup_per_zone *
327 mem_cgroup_zone_zoneinfo(struct mem_cgroup *memcg, struct zone *zone)
328 {
329         int nid = zone_to_nid(zone);
330         int zid = zone_idx(zone);
331
332         return &memcg->nodeinfo[nid]->zoneinfo[zid];
333 }
334
335 /**
336  * mem_cgroup_css_from_page - css of the memcg associated with a page
337  * @page: page of interest
338  *
339  * If memcg is bound to the default hierarchy, css of the memcg associated
340  * with @page is returned.  The returned css remains associated with @page
341  * until it is released.
342  *
343  * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup
344  * is returned.
345  */
346 struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page)
347 {
348         struct mem_cgroup *memcg;
349
350         memcg = page->mem_cgroup;
351
352         if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
353                 memcg = root_mem_cgroup;
354
355         return &memcg->css;
356 }
357
358 /**
359  * page_cgroup_ino - return inode number of the memcg a page is charged to
360  * @page: the page
361  *
362  * Look up the closest online ancestor of the memory cgroup @page is charged to
363  * and return its inode number or 0 if @page is not charged to any cgroup. It
364  * is safe to call this function without holding a reference to @page.
365  *
366  * Note, this function is inherently racy, because there is nothing to prevent
367  * the cgroup inode from getting torn down and potentially reallocated a moment
368  * after page_cgroup_ino() returns, so it only should be used by callers that
369  * do not care (such as procfs interfaces).
370  */
371 ino_t page_cgroup_ino(struct page *page)
372 {
373         struct mem_cgroup *memcg;
374         unsigned long ino = 0;
375
376         rcu_read_lock();
377         memcg = READ_ONCE(page->mem_cgroup);
378         while (memcg && !(memcg->css.flags & CSS_ONLINE))
379                 memcg = parent_mem_cgroup(memcg);
380         if (memcg)
381                 ino = cgroup_ino(memcg->css.cgroup);
382         rcu_read_unlock();
383         return ino;
384 }
385
386 static struct mem_cgroup_per_zone *
387 mem_cgroup_page_zoneinfo(struct mem_cgroup *memcg, struct page *page)
388 {
389         int nid = page_to_nid(page);
390         int zid = page_zonenum(page);
391
392         return &memcg->nodeinfo[nid]->zoneinfo[zid];
393 }
394
395 static struct mem_cgroup_tree_per_zone *
396 soft_limit_tree_node_zone(int nid, int zid)
397 {
398         return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
399 }
400
401 static struct mem_cgroup_tree_per_zone *
402 soft_limit_tree_from_page(struct page *page)
403 {
404         int nid = page_to_nid(page);
405         int zid = page_zonenum(page);
406
407         return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
408 }
409
410 static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_zone *mz,
411                                          struct mem_cgroup_tree_per_zone *mctz,
412                                          unsigned long new_usage_in_excess)
413 {
414         struct rb_node **p = &mctz->rb_root.rb_node;
415         struct rb_node *parent = NULL;
416         struct mem_cgroup_per_zone *mz_node;
417
418         if (mz->on_tree)
419                 return;
420
421         mz->usage_in_excess = new_usage_in_excess;
422         if (!mz->usage_in_excess)
423                 return;
424         while (*p) {
425                 parent = *p;
426                 mz_node = rb_entry(parent, struct mem_cgroup_per_zone,
427                                         tree_node);
428                 if (mz->usage_in_excess < mz_node->usage_in_excess)
429                         p = &(*p)->rb_left;
430                 /*
431                  * We can't avoid mem cgroups that are over their soft
432                  * limit by the same amount
433                  */
434                 else if (mz->usage_in_excess >= mz_node->usage_in_excess)
435                         p = &(*p)->rb_right;
436         }
437         rb_link_node(&mz->tree_node, parent, p);
438         rb_insert_color(&mz->tree_node, &mctz->rb_root);
439         mz->on_tree = true;
440 }
441
442 static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_zone *mz,
443                                          struct mem_cgroup_tree_per_zone *mctz)
444 {
445         if (!mz->on_tree)
446                 return;
447         rb_erase(&mz->tree_node, &mctz->rb_root);
448         mz->on_tree = false;
449 }
450
451 static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_zone *mz,
452                                        struct mem_cgroup_tree_per_zone *mctz)
453 {
454         unsigned long flags;
455
456         spin_lock_irqsave(&mctz->lock, flags);
457         __mem_cgroup_remove_exceeded(mz, mctz);
458         spin_unlock_irqrestore(&mctz->lock, flags);
459 }
460
461 static unsigned long soft_limit_excess(struct mem_cgroup *memcg)
462 {
463         unsigned long nr_pages = page_counter_read(&memcg->memory);
464         unsigned long soft_limit = READ_ONCE(memcg->soft_limit);
465         unsigned long excess = 0;
466
467         if (nr_pages > soft_limit)
468                 excess = nr_pages - soft_limit;
469
470         return excess;
471 }
472
473 static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
474 {
475         unsigned long excess;
476         struct mem_cgroup_per_zone *mz;
477         struct mem_cgroup_tree_per_zone *mctz;
478
479         mctz = soft_limit_tree_from_page(page);
480         /*
481          * Necessary to update all ancestors when hierarchy is used.
482          * because their event counter is not touched.
483          */
484         for (; memcg; memcg = parent_mem_cgroup(memcg)) {
485                 mz = mem_cgroup_page_zoneinfo(memcg, page);
486                 excess = soft_limit_excess(memcg);
487                 /*
488                  * We have to update the tree if mz is on RB-tree or
489                  * mem is over its softlimit.
490                  */
491                 if (excess || mz->on_tree) {
492                         unsigned long flags;
493
494                         spin_lock_irqsave(&mctz->lock, flags);
495                         /* if on-tree, remove it */
496                         if (mz->on_tree)
497                                 __mem_cgroup_remove_exceeded(mz, mctz);
498                         /*
499                          * Insert again. mz->usage_in_excess will be updated.
500                          * If excess is 0, no tree ops.
501                          */
502                         __mem_cgroup_insert_exceeded(mz, mctz, excess);
503                         spin_unlock_irqrestore(&mctz->lock, flags);
504                 }
505         }
506 }
507
508 static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
509 {
510         struct mem_cgroup_tree_per_zone *mctz;
511         struct mem_cgroup_per_zone *mz;
512         int nid, zid;
513
514         for_each_node(nid) {
515                 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
516                         mz = &memcg->nodeinfo[nid]->zoneinfo[zid];
517                         mctz = soft_limit_tree_node_zone(nid, zid);
518                         mem_cgroup_remove_exceeded(mz, mctz);
519                 }
520         }
521 }
522
523 static struct mem_cgroup_per_zone *
524 __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
525 {
526         struct rb_node *rightmost = NULL;
527         struct mem_cgroup_per_zone *mz;
528
529 retry:
530         mz = NULL;
531         rightmost = rb_last(&mctz->rb_root);
532         if (!rightmost)
533                 goto done;              /* Nothing to reclaim from */
534
535         mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node);
536         /*
537          * Remove the node now but someone else can add it back,
538          * we will to add it back at the end of reclaim to its correct
539          * position in the tree.
540          */
541         __mem_cgroup_remove_exceeded(mz, mctz);
542         if (!soft_limit_excess(mz->memcg) ||
543             !css_tryget_online(&mz->memcg->css))
544                 goto retry;
545 done:
546         return mz;
547 }
548
549 static struct mem_cgroup_per_zone *
550 mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
551 {
552         struct mem_cgroup_per_zone *mz;
553
554         spin_lock_irq(&mctz->lock);
555         mz = __mem_cgroup_largest_soft_limit_node(mctz);
556         spin_unlock_irq(&mctz->lock);
557         return mz;
558 }
559
560 /*
561  * Return page count for single (non recursive) @memcg.
562  *
563  * Implementation Note: reading percpu statistics for memcg.
564  *
565  * Both of vmstat[] and percpu_counter has threshold and do periodic
566  * synchronization to implement "quick" read. There are trade-off between
567  * reading cost and precision of value. Then, we may have a chance to implement
568  * a periodic synchronization of counter in memcg's counter.
569  *
570  * But this _read() function is used for user interface now. The user accounts
571  * memory usage by memory cgroup and he _always_ requires exact value because
572  * he accounts memory. Even if we provide quick-and-fuzzy read, we always
573  * have to visit all online cpus and make sum. So, for now, unnecessary
574  * synchronization is not implemented. (just implemented for cpu hotplug)
575  *
576  * If there are kernel internal actions which can make use of some not-exact
577  * value, and reading all cpu value can be performance bottleneck in some
578  * common workload, threshold and synchronization as vmstat[] should be
579  * implemented.
580  */
581 static unsigned long
582 mem_cgroup_read_stat(struct mem_cgroup *memcg, enum mem_cgroup_stat_index idx)
583 {
584         long val = 0;
585         int cpu;
586
587         /* Per-cpu values can be negative, use a signed accumulator */
588         for_each_possible_cpu(cpu)
589                 val += per_cpu(memcg->stat->count[idx], cpu);
590         /*
591          * Summing races with updates, so val may be negative.  Avoid exposing
592          * transient negative values.
593          */
594         if (val < 0)
595                 val = 0;
596         return val;
597 }
598
599 static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg,
600                                             enum mem_cgroup_events_index idx)
601 {
602         unsigned long val = 0;
603         int cpu;
604
605         for_each_possible_cpu(cpu)
606                 val += per_cpu(memcg->stat->events[idx], cpu);
607         return val;
608 }
609
610 static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
611                                          struct page *page,
612                                          bool compound, int nr_pages)
613 {
614         /*
615          * Here, RSS means 'mapped anon' and anon's SwapCache. Shmem/tmpfs is
616          * counted as CACHE even if it's on ANON LRU.
617          */
618         if (PageAnon(page))
619                 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS],
620                                 nr_pages);
621         else
622                 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE],
623                                 nr_pages);
624
625         if (compound) {
626                 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
627                 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE],
628                                 nr_pages);
629         }
630
631         /* pagein of a big page is an event. So, ignore page size */
632         if (nr_pages > 0)
633                 __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGIN]);
634         else {
635                 __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]);
636                 nr_pages = -nr_pages; /* for event */
637         }
638
639         __this_cpu_add(memcg->stat->nr_page_events, nr_pages);
640 }
641
642 unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
643                                            int nid, unsigned int lru_mask)
644 {
645         unsigned long nr = 0;
646         int zid;
647
648         VM_BUG_ON((unsigned)nid >= nr_node_ids);
649
650         for (zid = 0; zid < MAX_NR_ZONES; zid++) {
651                 struct mem_cgroup_per_zone *mz;
652                 enum lru_list lru;
653
654                 for_each_lru(lru) {
655                         if (!(BIT(lru) & lru_mask))
656                                 continue;
657                         mz = &memcg->nodeinfo[nid]->zoneinfo[zid];
658                         nr += mz->lru_size[lru];
659                 }
660         }
661         return nr;
662 }
663
664 static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
665                         unsigned int lru_mask)
666 {
667         unsigned long nr = 0;
668         int nid;
669
670         for_each_node_state(nid, N_MEMORY)
671                 nr += mem_cgroup_node_nr_lru_pages(memcg, nid, lru_mask);
672         return nr;
673 }
674
675 static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
676                                        enum mem_cgroup_events_target target)
677 {
678         unsigned long val, next;
679
680         val = __this_cpu_read(memcg->stat->nr_page_events);
681         next = __this_cpu_read(memcg->stat->targets[target]);
682         /* from time_after() in jiffies.h */
683         if ((long)next - (long)val < 0) {
684                 switch (target) {
685                 case MEM_CGROUP_TARGET_THRESH:
686                         next = val + THRESHOLDS_EVENTS_TARGET;
687                         break;
688                 case MEM_CGROUP_TARGET_SOFTLIMIT:
689                         next = val + SOFTLIMIT_EVENTS_TARGET;
690                         break;
691                 case MEM_CGROUP_TARGET_NUMAINFO:
692                         next = val + NUMAINFO_EVENTS_TARGET;
693                         break;
694                 default:
695                         break;
696                 }
697                 __this_cpu_write(memcg->stat->targets[target], next);
698                 return true;
699         }
700         return false;
701 }
702
703 /*
704  * Check events in order.
705  *
706  */
707 static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
708 {
709         /* threshold event is triggered in finer grain than soft limit */
710         if (unlikely(mem_cgroup_event_ratelimit(memcg,
711                                                 MEM_CGROUP_TARGET_THRESH))) {
712                 bool do_softlimit;
713                 bool do_numainfo __maybe_unused;
714
715                 do_softlimit = mem_cgroup_event_ratelimit(memcg,
716                                                 MEM_CGROUP_TARGET_SOFTLIMIT);
717 #if MAX_NUMNODES > 1
718                 do_numainfo = mem_cgroup_event_ratelimit(memcg,
719                                                 MEM_CGROUP_TARGET_NUMAINFO);
720 #endif
721                 mem_cgroup_threshold(memcg);
722                 if (unlikely(do_softlimit))
723                         mem_cgroup_update_tree(memcg, page);
724 #if MAX_NUMNODES > 1
725                 if (unlikely(do_numainfo))
726                         atomic_inc(&memcg->numainfo_events);
727 #endif
728         }
729 }
730
731 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
732 {
733         /*
734          * mm_update_next_owner() may clear mm->owner to NULL
735          * if it races with swapoff, page migration, etc.
736          * So this can be called with p == NULL.
737          */
738         if (unlikely(!p))
739                 return NULL;
740
741         return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
742 }
743 EXPORT_SYMBOL(mem_cgroup_from_task);
744
745 static struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
746 {
747         struct mem_cgroup *memcg = NULL;
748
749         rcu_read_lock();
750         do {
751                 /*
752                  * Page cache insertions can happen withou an
753                  * actual mm context, e.g. during disk probing
754                  * on boot, loopback IO, acct() writes etc.
755                  */
756                 if (unlikely(!mm))
757                         memcg = root_mem_cgroup;
758                 else {
759                         memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
760                         if (unlikely(!memcg))
761                                 memcg = root_mem_cgroup;
762                 }
763         } while (!css_tryget_online(&memcg->css));
764         rcu_read_unlock();
765         return memcg;
766 }
767
768 /**
769  * mem_cgroup_iter - iterate over memory cgroup hierarchy
770  * @root: hierarchy root
771  * @prev: previously returned memcg, NULL on first invocation
772  * @reclaim: cookie for shared reclaim walks, NULL for full walks
773  *
774  * Returns references to children of the hierarchy below @root, or
775  * @root itself, or %NULL after a full round-trip.
776  *
777  * Caller must pass the return value in @prev on subsequent
778  * invocations for reference counting, or use mem_cgroup_iter_break()
779  * to cancel a hierarchy walk before the round-trip is complete.
780  *
781  * Reclaimers can specify a zone and a priority level in @reclaim to
782  * divide up the memcgs in the hierarchy among all concurrent
783  * reclaimers operating on the same zone and priority.
784  */
785 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
786                                    struct mem_cgroup *prev,
787                                    struct mem_cgroup_reclaim_cookie *reclaim)
788 {
789         struct mem_cgroup_reclaim_iter *uninitialized_var(iter);
790         struct cgroup_subsys_state *css = NULL;
791         struct mem_cgroup *memcg = NULL;
792         struct mem_cgroup *pos = NULL;
793
794         if (mem_cgroup_disabled())
795                 return NULL;
796
797         if (!root)
798                 root = root_mem_cgroup;
799
800         if (prev && !reclaim)
801                 pos = prev;
802
803         if (!root->use_hierarchy && root != root_mem_cgroup) {
804                 if (prev)
805                         goto out;
806                 return root;
807         }
808
809         rcu_read_lock();
810
811         if (reclaim) {
812                 struct mem_cgroup_per_zone *mz;
813
814                 mz = mem_cgroup_zone_zoneinfo(root, reclaim->zone);
815                 iter = &mz->iter[reclaim->priority];
816
817                 if (prev && reclaim->generation != iter->generation)
818                         goto out_unlock;
819
820                 while (1) {
821                         pos = READ_ONCE(iter->position);
822                         if (!pos || css_tryget(&pos->css))
823                                 break;
824                         /*
825                          * css reference reached zero, so iter->position will
826                          * be cleared by ->css_released. However, we should not
827                          * rely on this happening soon, because ->css_released
828                          * is called from a work queue, and by busy-waiting we
829                          * might block it. So we clear iter->position right
830                          * away.
831                          */
832                         (void)cmpxchg(&iter->position, pos, NULL);
833                 }
834         }
835
836         if (pos)
837                 css = &pos->css;
838
839         for (;;) {
840                 css = css_next_descendant_pre(css, &root->css);
841                 if (!css) {
842                         /*
843                          * Reclaimers share the hierarchy walk, and a
844                          * new one might jump in right at the end of
845                          * the hierarchy - make sure they see at least
846                          * one group and restart from the beginning.
847                          */
848                         if (!prev)
849                                 continue;
850                         break;
851                 }
852
853                 /*
854                  * Verify the css and acquire a reference.  The root
855                  * is provided by the caller, so we know it's alive
856                  * and kicking, and don't take an extra reference.
857                  */
858                 memcg = mem_cgroup_from_css(css);
859
860                 if (css == &root->css)
861                         break;
862
863                 if (css_tryget(css))
864                         break;
865
866                 memcg = NULL;
867         }
868
869         if (reclaim) {
870                 /*
871                  * The position could have already been updated by a competing
872                  * thread, so check that the value hasn't changed since we read
873                  * it to avoid reclaiming from the same cgroup twice.
874                  */
875                 (void)cmpxchg(&iter->position, pos, memcg);
876
877                 if (pos)
878                         css_put(&pos->css);
879
880                 if (!memcg)
881                         iter->generation++;
882                 else if (!prev)
883                         reclaim->generation = iter->generation;
884         }
885
886 out_unlock:
887         rcu_read_unlock();
888 out:
889         if (prev && prev != root)
890                 css_put(&prev->css);
891
892         return memcg;
893 }
894
895 /**
896  * mem_cgroup_iter_break - abort a hierarchy walk prematurely
897  * @root: hierarchy root
898  * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
899  */
900 void mem_cgroup_iter_break(struct mem_cgroup *root,
901                            struct mem_cgroup *prev)
902 {
903         if (!root)
904                 root = root_mem_cgroup;
905         if (prev && prev != root)
906                 css_put(&prev->css);
907 }
908
909 static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
910 {
911         struct mem_cgroup *memcg = dead_memcg;
912         struct mem_cgroup_reclaim_iter *iter;
913         struct mem_cgroup_per_zone *mz;
914         int nid, zid;
915         int i;
916
917         while ((memcg = parent_mem_cgroup(memcg))) {
918                 for_each_node(nid) {
919                         for (zid = 0; zid < MAX_NR_ZONES; zid++) {
920                                 mz = &memcg->nodeinfo[nid]->zoneinfo[zid];
921                                 for (i = 0; i <= DEF_PRIORITY; i++) {
922                                         iter = &mz->iter[i];
923                                         cmpxchg(&iter->position,
924                                                 dead_memcg, NULL);
925                                 }
926                         }
927                 }
928         }
929 }
930
931 /*
932  * Iteration constructs for visiting all cgroups (under a tree).  If
933  * loops are exited prematurely (break), mem_cgroup_iter_break() must
934  * be used for reference counting.
935  */
936 #define for_each_mem_cgroup_tree(iter, root)            \
937         for (iter = mem_cgroup_iter(root, NULL, NULL);  \
938              iter != NULL;                              \
939              iter = mem_cgroup_iter(root, iter, NULL))
940
941 #define for_each_mem_cgroup(iter)                       \
942         for (iter = mem_cgroup_iter(NULL, NULL, NULL);  \
943              iter != NULL;                              \
944              iter = mem_cgroup_iter(NULL, iter, NULL))
945
946 /**
947  * mem_cgroup_zone_lruvec - get the lru list vector for a zone and memcg
948  * @zone: zone of the wanted lruvec
949  * @memcg: memcg of the wanted lruvec
950  *
951  * Returns the lru list vector holding pages for the given @zone and
952  * @mem.  This can be the global zone lruvec, if the memory controller
953  * is disabled.
954  */
955 struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
956                                       struct mem_cgroup *memcg)
957 {
958         struct mem_cgroup_per_zone *mz;
959         struct lruvec *lruvec;
960
961         if (mem_cgroup_disabled()) {
962                 lruvec = &zone->lruvec;
963                 goto out;
964         }
965
966         mz = mem_cgroup_zone_zoneinfo(memcg, zone);
967         lruvec = &mz->lruvec;
968 out:
969         /*
970          * Since a node can be onlined after the mem_cgroup was created,
971          * we have to be prepared to initialize lruvec->zone here;
972          * and if offlined then reonlined, we need to reinitialize it.
973          */
974         if (unlikely(lruvec->zone != zone))
975                 lruvec->zone = zone;
976         return lruvec;
977 }
978
979 /**
980  * mem_cgroup_page_lruvec - return lruvec for isolating/putting an LRU page
981  * @page: the page
982  * @zone: zone of the page
983  *
984  * This function is only safe when following the LRU page isolation
985  * and putback protocol: the LRU lock must be held, and the page must
986  * either be PageLRU() or the caller must have isolated/allocated it.
987  */
988 struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone)
989 {
990         struct mem_cgroup_per_zone *mz;
991         struct mem_cgroup *memcg;
992         struct lruvec *lruvec;
993
994         if (mem_cgroup_disabled()) {
995                 lruvec = &zone->lruvec;
996                 goto out;
997         }
998
999         memcg = page->mem_cgroup;
1000         /*
1001          * Swapcache readahead pages are added to the LRU - and
1002          * possibly migrated - before they are charged.
1003          */
1004         if (!memcg)
1005                 memcg = root_mem_cgroup;
1006
1007         mz = mem_cgroup_page_zoneinfo(memcg, page);
1008         lruvec = &mz->lruvec;
1009 out:
1010         /*
1011          * Since a node can be onlined after the mem_cgroup was created,
1012          * we have to be prepared to initialize lruvec->zone here;
1013          * and if offlined then reonlined, we need to reinitialize it.
1014          */
1015         if (unlikely(lruvec->zone != zone))
1016                 lruvec->zone = zone;
1017         return lruvec;
1018 }
1019
1020 /**
1021  * mem_cgroup_update_lru_size - account for adding or removing an lru page
1022  * @lruvec: mem_cgroup per zone lru vector
1023  * @lru: index of lru list the page is sitting on
1024  * @nr_pages: positive when adding or negative when removing
1025  *
1026  * This function must be called under lru_lock, just before a page is added
1027  * to or just after a page is removed from an lru list (that ordering being
1028  * so as to allow it to check that lru_size 0 is consistent with list_empty).
1029  */
1030 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
1031                                 int nr_pages)
1032 {
1033         struct mem_cgroup_per_zone *mz;
1034         unsigned long *lru_size;
1035         long size;
1036         bool empty;
1037
1038         if (mem_cgroup_disabled())
1039                 return;
1040
1041         mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
1042         lru_size = mz->lru_size + lru;
1043         empty = list_empty(lruvec->lists + lru);
1044
1045         if (nr_pages < 0)
1046                 *lru_size += nr_pages;
1047
1048         size = *lru_size;
1049         if (WARN_ONCE(size < 0 || empty != !size,
1050                 "%s(%p, %d, %d): lru_size %ld but %sempty\n",
1051                 __func__, lruvec, lru, nr_pages, size, empty ? "" : "not ")) {
1052                 VM_BUG_ON(1);
1053                 *lru_size = 0;
1054         }
1055
1056         if (nr_pages > 0)
1057                 *lru_size += nr_pages;
1058 }
1059
1060 bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg)
1061 {
1062         struct mem_cgroup *task_memcg;
1063         struct task_struct *p;
1064         bool ret;
1065
1066         p = find_lock_task_mm(task);
1067         if (p) {
1068                 task_memcg = get_mem_cgroup_from_mm(p->mm);
1069                 task_unlock(p);
1070         } else {
1071                 /*
1072                  * All threads may have already detached their mm's, but the oom
1073                  * killer still needs to detect if they have already been oom
1074                  * killed to prevent needlessly killing additional tasks.
1075                  */
1076                 rcu_read_lock();
1077                 task_memcg = mem_cgroup_from_task(task);
1078                 css_get(&task_memcg->css);
1079                 rcu_read_unlock();
1080         }
1081         ret = mem_cgroup_is_descendant(task_memcg, memcg);
1082         css_put(&task_memcg->css);
1083         return ret;
1084 }
1085
1086 /**
1087  * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1088  * @memcg: the memory cgroup
1089  *
1090  * Returns the maximum amount of memory @mem can be charged with, in
1091  * pages.
1092  */
1093 static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1094 {
1095         unsigned long margin = 0;
1096         unsigned long count;
1097         unsigned long limit;
1098
1099         count = page_counter_read(&memcg->memory);
1100         limit = READ_ONCE(memcg->memory.limit);
1101         if (count < limit)
1102                 margin = limit - count;
1103
1104         if (do_memsw_account()) {
1105                 count = page_counter_read(&memcg->memsw);
1106                 limit = READ_ONCE(memcg->memsw.limit);
1107                 if (count <= limit)
1108                         margin = min(margin, limit - count);
1109         }
1110
1111         return margin;
1112 }
1113
1114 /*
1115  * A routine for checking "mem" is under move_account() or not.
1116  *
1117  * Checking a cgroup is mc.from or mc.to or under hierarchy of
1118  * moving cgroups. This is for waiting at high-memory pressure
1119  * caused by "move".
1120  */
1121 static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
1122 {
1123         struct mem_cgroup *from;
1124         struct mem_cgroup *to;
1125         bool ret = false;
1126         /*
1127          * Unlike task_move routines, we access mc.to, mc.from not under
1128          * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1129          */
1130         spin_lock(&mc.lock);
1131         from = mc.from;
1132         to = mc.to;
1133         if (!from)
1134                 goto unlock;
1135
1136         ret = mem_cgroup_is_descendant(from, memcg) ||
1137                 mem_cgroup_is_descendant(to, memcg);
1138 unlock:
1139         spin_unlock(&mc.lock);
1140         return ret;
1141 }
1142
1143 static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
1144 {
1145         if (mc.moving_task && current != mc.moving_task) {
1146                 if (mem_cgroup_under_move(memcg)) {
1147                         DEFINE_WAIT(wait);
1148                         prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1149                         /* moving charge context might have finished. */
1150                         if (mc.moving_task)
1151                                 schedule();
1152                         finish_wait(&mc.waitq, &wait);
1153                         return true;
1154                 }
1155         }
1156         return false;
1157 }
1158
1159 #define K(x) ((x) << (PAGE_SHIFT-10))
1160 /**
1161  * mem_cgroup_print_oom_info: Print OOM information relevant to memory controller.
1162  * @memcg: The memory cgroup that went over limit
1163  * @p: Task that is going to be killed
1164  *
1165  * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1166  * enabled
1167  */
1168 void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
1169 {
1170         struct mem_cgroup *iter;
1171         unsigned int i;
1172
1173         rcu_read_lock();
1174
1175         if (p) {
1176                 pr_info("Task in ");
1177                 pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
1178                 pr_cont(" killed as a result of limit of ");
1179         } else {
1180                 pr_info("Memory limit reached of cgroup ");
1181         }
1182
1183         pr_cont_cgroup_path(memcg->css.cgroup);
1184         pr_cont("\n");
1185
1186         rcu_read_unlock();
1187
1188         pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
1189                 K((u64)page_counter_read(&memcg->memory)),
1190                 K((u64)memcg->memory.limit), memcg->memory.failcnt);
1191         pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
1192                 K((u64)page_counter_read(&memcg->memsw)),
1193                 K((u64)memcg->memsw.limit), memcg->memsw.failcnt);
1194         pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
1195                 K((u64)page_counter_read(&memcg->kmem)),
1196                 K((u64)memcg->kmem.limit), memcg->kmem.failcnt);
1197
1198         for_each_mem_cgroup_tree(iter, memcg) {
1199                 pr_info("Memory cgroup stats for ");
1200                 pr_cont_cgroup_path(iter->css.cgroup);
1201                 pr_cont(":");
1202
1203                 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
1204                         if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
1205                                 continue;
1206                         pr_cont(" %s:%luKB", mem_cgroup_stat_names[i],
1207                                 K(mem_cgroup_read_stat(iter, i)));
1208                 }
1209
1210                 for (i = 0; i < NR_LRU_LISTS; i++)
1211                         pr_cont(" %s:%luKB", mem_cgroup_lru_names[i],
1212                                 K(mem_cgroup_nr_lru_pages(iter, BIT(i))));
1213
1214                 pr_cont("\n");
1215         }
1216 }
1217
1218 /*
1219  * This function returns the number of memcg under hierarchy tree. Returns
1220  * 1(self count) if no children.
1221  */
1222 static int mem_cgroup_count_children(struct mem_cgroup *memcg)
1223 {
1224         int num = 0;
1225         struct mem_cgroup *iter;
1226
1227         for_each_mem_cgroup_tree(iter, memcg)
1228                 num++;
1229         return num;
1230 }
1231
1232 /*
1233  * Return the memory (and swap, if configured) limit for a memcg.
1234  */
1235 static unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg)
1236 {
1237         unsigned long limit;
1238
1239         limit = memcg->memory.limit;
1240         if (mem_cgroup_swappiness(memcg)) {
1241                 unsigned long memsw_limit;
1242                 unsigned long swap_limit;
1243
1244                 memsw_limit = memcg->memsw.limit;
1245                 swap_limit = memcg->swap.limit;
1246                 swap_limit = min(swap_limit, (unsigned long)total_swap_pages);
1247                 limit = min(limit + swap_limit, memsw_limit);
1248         }
1249         return limit;
1250 }
1251
1252 static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1253                                      int order)
1254 {
1255         struct oom_control oc = {
1256                 .zonelist = NULL,
1257                 .nodemask = NULL,
1258                 .gfp_mask = gfp_mask,
1259                 .order = order,
1260         };
1261         struct mem_cgroup *iter;
1262         unsigned long chosen_points = 0;
1263         unsigned long totalpages;
1264         unsigned int points = 0;
1265         struct task_struct *chosen = NULL;
1266
1267         mutex_lock(&oom_lock);
1268
1269         /*
1270          * If current has a pending SIGKILL or is exiting, then automatically
1271          * select it.  The goal is to allow it to allocate so that it may
1272          * quickly exit and free its memory.
1273          */
1274         if (fatal_signal_pending(current) || task_will_free_mem(current)) {
1275                 mark_oom_victim(current);
1276                 goto unlock;
1277         }
1278
1279         check_panic_on_oom(&oc, CONSTRAINT_MEMCG, memcg);
1280         totalpages = mem_cgroup_get_limit(memcg) ? : 1;
1281         for_each_mem_cgroup_tree(iter, memcg) {
1282                 struct css_task_iter it;
1283                 struct task_struct *task;
1284
1285                 css_task_iter_start(&iter->css, &it);
1286                 while ((task = css_task_iter_next(&it))) {
1287                         switch (oom_scan_process_thread(&oc, task, totalpages)) {
1288                         case OOM_SCAN_SELECT:
1289                                 if (chosen)
1290                                         put_task_struct(chosen);
1291                                 chosen = task;
1292                                 chosen_points = ULONG_MAX;
1293                                 get_task_struct(chosen);
1294                                 /* fall through */
1295                         case OOM_SCAN_CONTINUE:
1296                                 continue;
1297                         case OOM_SCAN_ABORT:
1298                                 css_task_iter_end(&it);
1299                                 mem_cgroup_iter_break(memcg, iter);
1300                                 if (chosen)
1301                                         put_task_struct(chosen);
1302                                 goto unlock;
1303                         case OOM_SCAN_OK:
1304                                 break;
1305                         };
1306                         points = oom_badness(task, memcg, NULL, totalpages);
1307                         if (!points || points < chosen_points)
1308                                 continue;
1309                         /* Prefer thread group leaders for display purposes */
1310                         if (points == chosen_points &&
1311                             thread_group_leader(chosen))
1312                                 continue;
1313
1314                         if (chosen)
1315                                 put_task_struct(chosen);
1316                         chosen = task;
1317                         chosen_points = points;
1318                         get_task_struct(chosen);
1319                 }
1320                 css_task_iter_end(&it);
1321         }
1322
1323         if (chosen) {
1324                 points = chosen_points * 1000 / totalpages;
1325                 oom_kill_process(&oc, chosen, points, totalpages, memcg,
1326                                  "Memory cgroup out of memory");
1327         }
1328 unlock:
1329         mutex_unlock(&oom_lock);
1330         return chosen;
1331 }
1332
1333 #if MAX_NUMNODES > 1
1334
1335 /**
1336  * test_mem_cgroup_node_reclaimable
1337  * @memcg: the target memcg
1338  * @nid: the node ID to be checked.
1339  * @noswap : specify true here if the user wants flle only information.
1340  *
1341  * This function returns whether the specified memcg contains any
1342  * reclaimable pages on a node. Returns true if there are any reclaimable
1343  * pages in the node.
1344  */
1345 static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg,
1346                 int nid, bool noswap)
1347 {
1348         if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_FILE))
1349                 return true;
1350         if (noswap || !total_swap_pages)
1351                 return false;
1352         if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_ANON))
1353                 return true;
1354         return false;
1355
1356 }
1357
1358 /*
1359  * Always updating the nodemask is not very good - even if we have an empty
1360  * list or the wrong list here, we can start from some node and traverse all
1361  * nodes based on the zonelist. So update the list loosely once per 10 secs.
1362  *
1363  */
1364 static void mem_cgroup_may_update_nodemask(struct mem_cgroup *memcg)
1365 {
1366         int nid;
1367         /*
1368          * numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET
1369          * pagein/pageout changes since the last update.
1370          */
1371         if (!atomic_read(&memcg->numainfo_events))
1372                 return;
1373         if (atomic_inc_return(&memcg->numainfo_updating) > 1)
1374                 return;
1375
1376         /* make a nodemask where this memcg uses memory from */
1377         memcg->scan_nodes = node_states[N_MEMORY];
1378
1379         for_each_node_mask(nid, node_states[N_MEMORY]) {
1380
1381                 if (!test_mem_cgroup_node_reclaimable(memcg, nid, false))
1382                         node_clear(nid, memcg->scan_nodes);
1383         }
1384
1385         atomic_set(&memcg->numainfo_events, 0);
1386         atomic_set(&memcg->numainfo_updating, 0);
1387 }
1388
1389 /*
1390  * Selecting a node where we start reclaim from. Because what we need is just
1391  * reducing usage counter, start from anywhere is O,K. Considering
1392  * memory reclaim from current node, there are pros. and cons.
1393  *
1394  * Freeing memory from current node means freeing memory from a node which
1395  * we'll use or we've used. So, it may make LRU bad. And if several threads
1396  * hit limits, it will see a contention on a node. But freeing from remote
1397  * node means more costs for memory reclaim because of memory latency.
1398  *
1399  * Now, we use round-robin. Better algorithm is welcomed.
1400  */
1401 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
1402 {
1403         int node;
1404
1405         mem_cgroup_may_update_nodemask(memcg);
1406         node = memcg->last_scanned_node;
1407
1408         node = next_node_in(node, memcg->scan_nodes);
1409         /*
1410          * mem_cgroup_may_update_nodemask might have seen no reclaimmable pages
1411          * last time it really checked all the LRUs due to rate limiting.
1412          * Fallback to the current node in that case for simplicity.
1413          */
1414         if (unlikely(node == MAX_NUMNODES))
1415                 node = numa_node_id();
1416
1417         memcg->last_scanned_node = node;
1418         return node;
1419 }
1420 #else
1421 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
1422 {
1423         return 0;
1424 }
1425 #endif
1426
1427 static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
1428                                    struct zone *zone,
1429                                    gfp_t gfp_mask,
1430                                    unsigned long *total_scanned)
1431 {
1432         struct mem_cgroup *victim = NULL;
1433         int total = 0;
1434         int loop = 0;
1435         unsigned long excess;
1436         unsigned long nr_scanned;
1437         struct mem_cgroup_reclaim_cookie reclaim = {
1438                 .zone = zone,
1439                 .priority = 0,
1440         };
1441
1442         excess = soft_limit_excess(root_memcg);
1443
1444         while (1) {
1445                 victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
1446                 if (!victim) {
1447                         loop++;
1448                         if (loop >= 2) {
1449                                 /*
1450                                  * If we have not been able to reclaim
1451                                  * anything, it might because there are
1452                                  * no reclaimable pages under this hierarchy
1453                                  */
1454                                 if (!total)
1455                                         break;
1456                                 /*
1457                                  * We want to do more targeted reclaim.
1458                                  * excess >> 2 is not to excessive so as to
1459                                  * reclaim too much, nor too less that we keep
1460                                  * coming back to reclaim from this cgroup
1461                                  */
1462                                 if (total >= (excess >> 2) ||
1463                                         (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
1464                                         break;
1465                         }
1466                         continue;
1467                 }
1468                 total += mem_cgroup_shrink_node_zone(victim, gfp_mask, false,
1469                                                      zone, &nr_scanned);
1470                 *total_scanned += nr_scanned;
1471                 if (!soft_limit_excess(root_memcg))
1472                         break;
1473         }
1474         mem_cgroup_iter_break(root_memcg, victim);
1475         return total;
1476 }
1477
1478 #ifdef CONFIG_LOCKDEP
1479 static struct lockdep_map memcg_oom_lock_dep_map = {
1480         .name = "memcg_oom_lock",
1481 };
1482 #endif
1483
1484 static DEFINE_SPINLOCK(memcg_oom_lock);
1485
1486 /*
1487  * Check OOM-Killer is already running under our hierarchy.
1488  * If someone is running, return false.
1489  */
1490 static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
1491 {
1492         struct mem_cgroup *iter, *failed = NULL;
1493
1494         spin_lock(&memcg_oom_lock);
1495
1496         for_each_mem_cgroup_tree(iter, memcg) {
1497                 if (iter->oom_lock) {
1498                         /*
1499                          * this subtree of our hierarchy is already locked
1500                          * so we cannot give a lock.
1501                          */
1502                         failed = iter;
1503                         mem_cgroup_iter_break(memcg, iter);
1504                         break;
1505                 } else
1506                         iter->oom_lock = true;
1507         }
1508
1509         if (failed) {
1510                 /*
1511                  * OK, we failed to lock the whole subtree so we have
1512                  * to clean up what we set up to the failing subtree
1513                  */
1514                 for_each_mem_cgroup_tree(iter, memcg) {
1515                         if (iter == failed) {
1516                                 mem_cgroup_iter_break(memcg, iter);
1517                                 break;
1518                         }
1519                         iter->oom_lock = false;
1520                 }
1521         } else
1522                 mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_);
1523
1524         spin_unlock(&memcg_oom_lock);
1525
1526         return !failed;
1527 }
1528
1529 static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
1530 {
1531         struct mem_cgroup *iter;
1532
1533         spin_lock(&memcg_oom_lock);
1534         mutex_release(&memcg_oom_lock_dep_map, 1, _RET_IP_);
1535         for_each_mem_cgroup_tree(iter, memcg)
1536                 iter->oom_lock = false;
1537         spin_unlock(&memcg_oom_lock);
1538 }
1539
1540 static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
1541 {
1542         struct mem_cgroup *iter;
1543
1544         spin_lock(&memcg_oom_lock);
1545         for_each_mem_cgroup_tree(iter, memcg)
1546                 iter->under_oom++;
1547         spin_unlock(&memcg_oom_lock);
1548 }
1549
1550 static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
1551 {
1552         struct mem_cgroup *iter;
1553
1554         /*
1555          * When a new child is created while the hierarchy is under oom,
1556          * mem_cgroup_oom_lock() may not be called. Watch for underflow.
1557          */
1558         spin_lock(&memcg_oom_lock);
1559         for_each_mem_cgroup_tree(iter, memcg)
1560                 if (iter->under_oom > 0)
1561                         iter->under_oom--;
1562         spin_unlock(&memcg_oom_lock);
1563 }
1564
1565 static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1566
1567 struct oom_wait_info {
1568         struct mem_cgroup *memcg;
1569         wait_queue_t    wait;
1570 };
1571
1572 static int memcg_oom_wake_function(wait_queue_t *wait,
1573         unsigned mode, int sync, void *arg)
1574 {
1575         struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
1576         struct mem_cgroup *oom_wait_memcg;
1577         struct oom_wait_info *oom_wait_info;
1578
1579         oom_wait_info = container_of(wait, struct oom_wait_info, wait);
1580         oom_wait_memcg = oom_wait_info->memcg;
1581
1582         if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) &&
1583             !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg))
1584                 return 0;
1585         return autoremove_wake_function(wait, mode, sync, arg);
1586 }
1587
1588 static void memcg_oom_recover(struct mem_cgroup *memcg)
1589 {
1590         /*
1591          * For the following lockless ->under_oom test, the only required
1592          * guarantee is that it must see the state asserted by an OOM when
1593          * this function is called as a result of userland actions
1594          * triggered by the notification of the OOM.  This is trivially
1595          * achieved by invoking mem_cgroup_mark_under_oom() before
1596          * triggering notification.
1597          */
1598         if (memcg && memcg->under_oom)
1599                 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
1600 }
1601
1602 static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
1603 {
1604         if (!current->memcg_may_oom)
1605                 return;
1606         /*
1607          * We are in the middle of the charge context here, so we
1608          * don't want to block when potentially sitting on a callstack
1609          * that holds all kinds of filesystem and mm locks.
1610          *
1611          * Also, the caller may handle a failed allocation gracefully
1612          * (like optional page cache readahead) and so an OOM killer
1613          * invocation might not even be necessary.
1614          *
1615          * That's why we don't do anything here except remember the
1616          * OOM context and then deal with it at the end of the page
1617          * fault when the stack is unwound, the locks are released,
1618          * and when we know whether the fault was overall successful.
1619          */
1620         css_get(&memcg->css);
1621         current->memcg_in_oom = memcg;
1622         current->memcg_oom_gfp_mask = mask;
1623         current->memcg_oom_order = order;
1624 }
1625
1626 /**
1627  * mem_cgroup_oom_synchronize - complete memcg OOM handling
1628  * @handle: actually kill/wait or just clean up the OOM state
1629  *
1630  * This has to be called at the end of a page fault if the memcg OOM
1631  * handler was enabled.
1632  *
1633  * Memcg supports userspace OOM handling where failed allocations must
1634  * sleep on a waitqueue until the userspace task resolves the
1635  * situation.  Sleeping directly in the charge context with all kinds
1636  * of locks held is not a good idea, instead we remember an OOM state
1637  * in the task and mem_cgroup_oom_synchronize() has to be called at
1638  * the end of the page fault to complete the OOM handling.
1639  *
1640  * Returns %true if an ongoing memcg OOM situation was detected and
1641  * completed, %false otherwise.
1642  */
1643 bool mem_cgroup_oom_synchronize(bool handle)
1644 {
1645         struct mem_cgroup *memcg = current->memcg_in_oom;
1646         struct oom_wait_info owait;
1647         bool locked;
1648
1649         /* OOM is global, do not handle */
1650         if (!memcg)
1651                 return false;
1652
1653         if (!handle || oom_killer_disabled)
1654                 goto cleanup;
1655
1656         owait.memcg = memcg;
1657         owait.wait.flags = 0;
1658         owait.wait.func = memcg_oom_wake_function;
1659         owait.wait.private = current;
1660         INIT_LIST_HEAD(&owait.wait.task_list);
1661
1662         prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
1663         mem_cgroup_mark_under_oom(memcg);
1664
1665         locked = mem_cgroup_oom_trylock(memcg);
1666
1667         if (locked)
1668                 mem_cgroup_oom_notify(memcg);
1669
1670         if (locked && !memcg->oom_kill_disable) {
1671                 mem_cgroup_unmark_under_oom(memcg);
1672                 finish_wait(&memcg_oom_waitq, &owait.wait);
1673                 mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask,
1674                                          current->memcg_oom_order);
1675         } else {
1676                 schedule();
1677                 mem_cgroup_unmark_under_oom(memcg);
1678                 finish_wait(&memcg_oom_waitq, &owait.wait);
1679         }
1680
1681         if (locked) {
1682                 mem_cgroup_oom_unlock(memcg);
1683                 /*
1684                  * There is no guarantee that an OOM-lock contender
1685                  * sees the wakeups triggered by the OOM kill
1686                  * uncharges.  Wake any sleepers explicitely.
1687                  */
1688                 memcg_oom_recover(memcg);
1689         }
1690 cleanup:
1691         current->memcg_in_oom = NULL;
1692         css_put(&memcg->css);
1693         return true;
1694 }
1695
1696 /**
1697  * lock_page_memcg - lock a page->mem_cgroup binding
1698  * @page: the page
1699  *
1700  * This function protects unlocked LRU pages from being moved to
1701  * another cgroup and stabilizes their page->mem_cgroup binding.
1702  */
1703 void lock_page_memcg(struct page *page)
1704 {
1705         struct mem_cgroup *memcg;
1706         unsigned long flags;
1707
1708         /*
1709          * The RCU lock is held throughout the transaction.  The fast
1710          * path can get away without acquiring the memcg->move_lock
1711          * because page moving starts with an RCU grace period.
1712          */
1713         rcu_read_lock();
1714
1715         if (mem_cgroup_disabled())
1716                 return;
1717 again:
1718         memcg = page->mem_cgroup;
1719         if (unlikely(!memcg))
1720                 return;
1721
1722         if (atomic_read(&memcg->moving_account) <= 0)
1723                 return;
1724
1725         spin_lock_irqsave(&memcg->move_lock, flags);
1726         if (memcg != page->mem_cgroup) {
1727                 spin_unlock_irqrestore(&memcg->move_lock, flags);
1728                 goto again;
1729         }
1730
1731         /*
1732          * When charge migration first begins, we can have locked and
1733          * unlocked page stat updates happening concurrently.  Track
1734          * the task who has the lock for unlock_page_memcg().
1735          */
1736         memcg->move_lock_task = current;
1737         memcg->move_lock_flags = flags;
1738
1739         return;
1740 }
1741 EXPORT_SYMBOL(lock_page_memcg);
1742
1743 /**
1744  * unlock_page_memcg - unlock a page->mem_cgroup binding
1745  * @page: the page
1746  */
1747 void unlock_page_memcg(struct page *page)
1748 {
1749         struct mem_cgroup *memcg = page->mem_cgroup;
1750
1751         if (memcg && memcg->move_lock_task == current) {
1752                 unsigned long flags = memcg->move_lock_flags;
1753
1754                 memcg->move_lock_task = NULL;
1755                 memcg->move_lock_flags = 0;
1756
1757                 spin_unlock_irqrestore(&memcg->move_lock, flags);
1758         }
1759
1760         rcu_read_unlock();
1761 }
1762 EXPORT_SYMBOL(unlock_page_memcg);
1763
1764 /*
1765  * size of first charge trial. "32" comes from vmscan.c's magic value.
1766  * TODO: maybe necessary to use big numbers in big irons.
1767  */
1768 #define CHARGE_BATCH    32U
1769 struct memcg_stock_pcp {
1770         struct mem_cgroup *cached; /* this never be root cgroup */
1771         unsigned int nr_pages;
1772         struct work_struct work;
1773         unsigned long flags;
1774 #define FLUSHING_CACHED_CHARGE  0
1775 };
1776 static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
1777 static DEFINE_MUTEX(percpu_charge_mutex);
1778
1779 /**
1780  * consume_stock: Try to consume stocked charge on this cpu.
1781  * @memcg: memcg to consume from.
1782  * @nr_pages: how many pages to charge.
1783  *
1784  * The charges will only happen if @memcg matches the current cpu's memcg
1785  * stock, and at least @nr_pages are available in that stock.  Failure to
1786  * service an allocation will refill the stock.
1787  *
1788  * returns true if successful, false otherwise.
1789  */
1790 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
1791 {
1792         struct memcg_stock_pcp *stock;
1793         bool ret = false;
1794
1795         if (nr_pages > CHARGE_BATCH)
1796                 return ret;
1797
1798         stock = &get_cpu_var(memcg_stock);
1799         if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
1800                 stock->nr_pages -= nr_pages;
1801                 ret = true;
1802         }
1803         put_cpu_var(memcg_stock);
1804         return ret;
1805 }
1806
1807 /*
1808  * Returns stocks cached in percpu and reset cached information.
1809  */
1810 static void drain_stock(struct memcg_stock_pcp *stock)
1811 {
1812         struct mem_cgroup *old = stock->cached;
1813
1814         if (stock->nr_pages) {
1815                 page_counter_uncharge(&old->memory, stock->nr_pages);
1816                 if (do_memsw_account())
1817                         page_counter_uncharge(&old->memsw, stock->nr_pages);
1818                 css_put_many(&old->css, stock->nr_pages);
1819                 stock->nr_pages = 0;
1820         }
1821         stock->cached = NULL;
1822 }
1823
1824 /*
1825  * This must be called under preempt disabled or must be called by
1826  * a thread which is pinned to local cpu.
1827  */
1828 static void drain_local_stock(struct work_struct *dummy)
1829 {
1830         struct memcg_stock_pcp *stock = this_cpu_ptr(&memcg_stock);
1831         drain_stock(stock);
1832         clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
1833 }
1834
1835 /*
1836  * Cache charges(val) to local per_cpu area.
1837  * This will be consumed by consume_stock() function, later.
1838  */
1839 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
1840 {
1841         struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock);
1842
1843         if (stock->cached != memcg) { /* reset if necessary */
1844                 drain_stock(stock);
1845                 stock->cached = memcg;
1846         }
1847         stock->nr_pages += nr_pages;
1848         put_cpu_var(memcg_stock);
1849 }
1850
1851 /*
1852  * Drains all per-CPU charge caches for given root_memcg resp. subtree
1853  * of the hierarchy under it.
1854  */
1855 static void drain_all_stock(struct mem_cgroup *root_memcg)
1856 {
1857         int cpu, curcpu;
1858
1859         /* If someone's already draining, avoid adding running more workers. */
1860         if (!mutex_trylock(&percpu_charge_mutex))
1861                 return;
1862         /* Notify other cpus that system-wide "drain" is running */
1863         get_online_cpus();
1864         curcpu = get_cpu();
1865         for_each_online_cpu(cpu) {
1866                 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
1867                 struct mem_cgroup *memcg;
1868
1869                 memcg = stock->cached;
1870                 if (!memcg || !stock->nr_pages)
1871                         continue;
1872                 if (!mem_cgroup_is_descendant(memcg, root_memcg))
1873                         continue;
1874                 if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
1875                         if (cpu == curcpu)
1876                                 drain_local_stock(&stock->work);
1877                         else
1878                                 schedule_work_on(cpu, &stock->work);
1879                 }
1880         }
1881         put_cpu();
1882         put_online_cpus();
1883         mutex_unlock(&percpu_charge_mutex);
1884 }
1885
1886 static int memcg_cpu_hotplug_callback(struct notifier_block *nb,
1887                                         unsigned long action,
1888                                         void *hcpu)
1889 {
1890         int cpu = (unsigned long)hcpu;
1891         struct memcg_stock_pcp *stock;
1892
1893         if (action == CPU_ONLINE)
1894                 return NOTIFY_OK;
1895
1896         if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
1897                 return NOTIFY_OK;
1898
1899         stock = &per_cpu(memcg_stock, cpu);
1900         drain_stock(stock);
1901         return NOTIFY_OK;
1902 }
1903
1904 static void reclaim_high(struct mem_cgroup *memcg,
1905                          unsigned int nr_pages,
1906                          gfp_t gfp_mask)
1907 {
1908         do {
1909                 if (page_counter_read(&memcg->memory) <= memcg->high)
1910                         continue;
1911                 mem_cgroup_events(memcg, MEMCG_HIGH, 1);
1912                 try_to_free_mem_cgroup_pages(memcg, nr_pages, gfp_mask, true);
1913         } while ((memcg = parent_mem_cgroup(memcg)));
1914 }
1915
1916 static void high_work_func(struct work_struct *work)
1917 {
1918         struct mem_cgroup *memcg;
1919
1920         memcg = container_of(work, struct mem_cgroup, high_work);
1921         reclaim_high(memcg, CHARGE_BATCH, GFP_KERNEL);
1922 }
1923
1924 /*
1925  * Scheduled by try_charge() to be executed from the userland return path
1926  * and reclaims memory over the high limit.
1927  */
1928 void mem_cgroup_handle_over_high(void)
1929 {
1930         unsigned int nr_pages = current->memcg_nr_pages_over_high;
1931         struct mem_cgroup *memcg;
1932
1933         if (likely(!nr_pages))
1934                 return;
1935
1936         memcg = get_mem_cgroup_from_mm(current->mm);
1937         reclaim_high(memcg, nr_pages, GFP_KERNEL);
1938         css_put(&memcg->css);
1939         current->memcg_nr_pages_over_high = 0;
1940 }
1941
1942 static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
1943                       unsigned int nr_pages)
1944 {
1945         unsigned int batch = max(CHARGE_BATCH, nr_pages);
1946         int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
1947         struct mem_cgroup *mem_over_limit;
1948         struct page_counter *counter;
1949         unsigned long nr_reclaimed;
1950         bool may_swap = true;
1951         bool drained = false;
1952
1953         if (mem_cgroup_is_root(memcg))
1954                 return 0;
1955 retry:
1956         if (consume_stock(memcg, nr_pages))
1957                 return 0;
1958
1959         if (!do_memsw_account() ||
1960             page_counter_try_charge(&memcg->memsw, batch, &counter)) {
1961                 if (page_counter_try_charge(&memcg->memory, batch, &counter))
1962                         goto done_restock;
1963                 if (do_memsw_account())
1964                         page_counter_uncharge(&memcg->memsw, batch);
1965                 mem_over_limit = mem_cgroup_from_counter(counter, memory);
1966         } else {
1967                 mem_over_limit = mem_cgroup_from_counter(counter, memsw);
1968                 may_swap = false;
1969         }
1970
1971         if (batch > nr_pages) {
1972                 batch = nr_pages;
1973                 goto retry;
1974         }
1975
1976         /*
1977          * Unlike in global OOM situations, memcg is not in a physical
1978          * memory shortage.  Allow dying and OOM-killed tasks to
1979          * bypass the last charges so that they can exit quickly and
1980          * free their memory.
1981          */
1982         if (unlikely(test_thread_flag(TIF_MEMDIE) ||
1983                      fatal_signal_pending(current) ||
1984                      current->flags & PF_EXITING))
1985                 goto force;
1986
1987         if (unlikely(task_in_memcg_oom(current)))
1988                 goto nomem;
1989
1990         if (!gfpflags_allow_blocking(gfp_mask))
1991                 goto nomem;
1992
1993         mem_cgroup_events(mem_over_limit, MEMCG_MAX, 1);
1994
1995         nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
1996                                                     gfp_mask, may_swap);
1997
1998         if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
1999                 goto retry;
2000
2001         if (!drained) {
2002                 drain_all_stock(mem_over_limit);
2003                 drained = true;
2004                 goto retry;
2005         }
2006
2007         if (gfp_mask & __GFP_NORETRY)
2008                 goto nomem;
2009         /*
2010          * Even though the limit is exceeded at this point, reclaim
2011          * may have been able to free some pages.  Retry the charge
2012          * before killing the task.
2013          *
2014          * Only for regular pages, though: huge pages are rather
2015          * unlikely to succeed so close to the limit, and we fall back
2016          * to regular pages anyway in case of failure.
2017          */
2018         if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
2019                 goto retry;
2020         /*
2021          * At task move, charge accounts can be doubly counted. So, it's
2022          * better to wait until the end of task_move if something is going on.
2023          */
2024         if (mem_cgroup_wait_acct_move(mem_over_limit))
2025                 goto retry;
2026
2027         if (nr_retries--)
2028                 goto retry;
2029
2030         if (gfp_mask & __GFP_NOFAIL)
2031                 goto force;
2032
2033         if (fatal_signal_pending(current))
2034                 goto force;
2035
2036         mem_cgroup_events(mem_over_limit, MEMCG_OOM, 1);
2037
2038         mem_cgroup_oom(mem_over_limit, gfp_mask,
2039                        get_order(nr_pages * PAGE_SIZE));
2040 nomem:
2041         if (!(gfp_mask & __GFP_NOFAIL))
2042                 return -ENOMEM;
2043 force:
2044         /*
2045          * The allocation either can't fail or will lead to more memory
2046          * being freed very soon.  Allow memory usage go over the limit
2047          * temporarily by force charging it.
2048          */
2049         page_counter_charge(&memcg->memory, nr_pages);
2050         if (do_memsw_account())
2051                 page_counter_charge(&memcg->memsw, nr_pages);
2052         css_get_many(&memcg->css, nr_pages);
2053
2054         return 0;
2055
2056 done_restock:
2057         css_get_many(&memcg->css, batch);
2058         if (batch > nr_pages)
2059                 refill_stock(memcg, batch - nr_pages);
2060
2061         /*
2062          * If the hierarchy is above the normal consumption range, schedule
2063          * reclaim on returning to userland.  We can perform reclaim here
2064          * if __GFP_RECLAIM but let's always punt for simplicity and so that
2065          * GFP_KERNEL can consistently be used during reclaim.  @memcg is
2066          * not recorded as it most likely matches current's and won't
2067          * change in the meantime.  As high limit is checked again before
2068          * reclaim, the cost of mismatch is negligible.
2069          */
2070         do {
2071                 if (page_counter_read(&memcg->memory) > memcg->high) {
2072                         /* Don't bother a random interrupted task */
2073                         if (in_interrupt()) {
2074                                 schedule_work(&memcg->high_work);
2075                                 break;
2076                         }
2077                         current->memcg_nr_pages_over_high += batch;
2078                         set_notify_resume(current);
2079                         break;
2080                 }
2081         } while ((memcg = parent_mem_cgroup(memcg)));
2082
2083         return 0;
2084 }
2085
2086 static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
2087 {
2088         if (mem_cgroup_is_root(memcg))
2089                 return;
2090
2091         page_counter_uncharge(&memcg->memory, nr_pages);
2092         if (do_memsw_account())
2093                 page_counter_uncharge(&memcg->memsw, nr_pages);
2094
2095         css_put_many(&memcg->css, nr_pages);
2096 }
2097
2098 static void lock_page_lru(struct page *page, int *isolated)
2099 {
2100         struct zone *zone = page_zone(page);
2101
2102         spin_lock_irq(&zone->lru_lock);
2103         if (PageLRU(page)) {
2104                 struct lruvec *lruvec;
2105
2106                 lruvec = mem_cgroup_page_lruvec(page, zone);
2107                 ClearPageLRU(page);
2108                 del_page_from_lru_list(page, lruvec, page_lru(page));
2109                 *isolated = 1;
2110         } else
2111                 *isolated = 0;
2112 }
2113
2114 static void unlock_page_lru(struct page *page, int isolated)
2115 {
2116         struct zone *zone = page_zone(page);
2117
2118         if (isolated) {
2119                 struct lruvec *lruvec;
2120
2121                 lruvec = mem_cgroup_page_lruvec(page, zone);
2122                 VM_BUG_ON_PAGE(PageLRU(page), page);
2123                 SetPageLRU(page);
2124                 add_page_to_lru_list(page, lruvec, page_lru(page));
2125         }
2126         spin_unlock_irq(&zone->lru_lock);
2127 }
2128
2129 static void commit_charge(struct page *page, struct mem_cgroup *memcg,
2130                           bool lrucare)
2131 {
2132         int isolated;
2133
2134         VM_BUG_ON_PAGE(page->mem_cgroup, page);
2135
2136         /*
2137          * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page
2138          * may already be on some other mem_cgroup's LRU.  Take care of it.
2139          */
2140         if (lrucare)
2141                 lock_page_lru(page, &isolated);
2142
2143         /*
2144          * Nobody should be changing or seriously looking at
2145          * page->mem_cgroup at this point:
2146          *
2147          * - the page is uncharged
2148          *
2149          * - the page is off-LRU
2150          *
2151          * - an anonymous fault has exclusive page access, except for
2152          *   a locked page table
2153          *
2154          * - a page cache insertion, a swapin fault, or a migration
2155          *   have the page locked
2156          */
2157         page->mem_cgroup = memcg;
2158
2159         if (lrucare)
2160                 unlock_page_lru(page, isolated);
2161 }
2162
2163 #ifndef CONFIG_SLOB
2164 static int memcg_alloc_cache_id(void)
2165 {
2166         int id, size;
2167         int err;
2168
2169         id = ida_simple_get(&memcg_cache_ida,
2170                             0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL);
2171         if (id < 0)
2172                 return id;
2173
2174         if (id < memcg_nr_cache_ids)
2175                 return id;
2176
2177         /*
2178          * There's no space for the new id in memcg_caches arrays,
2179          * so we have to grow them.
2180          */
2181         down_write(&memcg_cache_ids_sem);
2182
2183         size = 2 * (id + 1);
2184         if (size < MEMCG_CACHES_MIN_SIZE)
2185                 size = MEMCG_CACHES_MIN_SIZE;
2186         else if (size > MEMCG_CACHES_MAX_SIZE)
2187                 size = MEMCG_CACHES_MAX_SIZE;
2188
2189         err = memcg_update_all_caches(size);
2190         if (!err)
2191                 err = memcg_update_all_list_lrus(size);
2192         if (!err)
2193                 memcg_nr_cache_ids = size;
2194
2195         up_write(&memcg_cache_ids_sem);
2196
2197         if (err) {
2198                 ida_simple_remove(&memcg_cache_ida, id);
2199                 return err;
2200         }
2201         return id;
2202 }
2203
2204 static void memcg_free_cache_id(int id)
2205 {
2206         ida_simple_remove(&memcg_cache_ida, id);
2207 }
2208
2209 struct memcg_kmem_cache_create_work {
2210         struct mem_cgroup *memcg;
2211         struct kmem_cache *cachep;
2212         struct work_struct work;
2213 };
2214
2215 static void memcg_kmem_cache_create_func(struct work_struct *w)
2216 {
2217         struct memcg_kmem_cache_create_work *cw =
2218                 container_of(w, struct memcg_kmem_cache_create_work, work);
2219         struct mem_cgroup *memcg = cw->memcg;
2220         struct kmem_cache *cachep = cw->cachep;
2221
2222         memcg_create_kmem_cache(memcg, cachep);
2223
2224         css_put(&memcg->css);
2225         kfree(cw);
2226 }
2227
2228 /*
2229  * Enqueue the creation of a per-memcg kmem_cache.
2230  */
2231 static void __memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
2232                                                struct kmem_cache *cachep)
2233 {
2234         struct memcg_kmem_cache_create_work *cw;
2235
2236         cw = kmalloc(sizeof(*cw), GFP_NOWAIT);
2237         if (!cw)
2238                 return;
2239
2240         css_get(&memcg->css);
2241
2242         cw->memcg = memcg;
2243         cw->cachep = cachep;
2244         INIT_WORK(&cw->work, memcg_kmem_cache_create_func);
2245
2246         schedule_work(&cw->work);
2247 }
2248
2249 static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
2250                                              struct kmem_cache *cachep)
2251 {
2252         /*
2253          * We need to stop accounting when we kmalloc, because if the
2254          * corresponding kmalloc cache is not yet created, the first allocation
2255          * in __memcg_schedule_kmem_cache_create will recurse.
2256          *
2257          * However, it is better to enclose the whole function. Depending on
2258          * the debugging options enabled, INIT_WORK(), for instance, can
2259          * trigger an allocation. This too, will make us recurse. Because at
2260          * this point we can't allow ourselves back into memcg_kmem_get_cache,
2261          * the safest choice is to do it like this, wrapping the whole function.
2262          */
2263         current->memcg_kmem_skip_account = 1;
2264         __memcg_schedule_kmem_cache_create(memcg, cachep);
2265         current->memcg_kmem_skip_account = 0;
2266 }
2267
2268 /*
2269  * Return the kmem_cache we're supposed to use for a slab allocation.
2270  * We try to use the current memcg's version of the cache.
2271  *
2272  * If the cache does not exist yet, if we are the first user of it,
2273  * we either create it immediately, if possible, or create it asynchronously
2274  * in a workqueue.
2275  * In the latter case, we will let the current allocation go through with
2276  * the original cache.
2277  *
2278  * Can't be called in interrupt context or from kernel threads.
2279  * This function needs to be called with rcu_read_lock() held.
2280  */
2281 struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
2282 {
2283         struct mem_cgroup *memcg;
2284         struct kmem_cache *memcg_cachep;
2285         int kmemcg_id;
2286
2287         VM_BUG_ON(!is_root_cache(cachep));
2288
2289         if (cachep->flags & SLAB_ACCOUNT)
2290                 gfp |= __GFP_ACCOUNT;
2291
2292         if (!(gfp & __GFP_ACCOUNT))
2293                 return cachep;
2294
2295         if (current->memcg_kmem_skip_account)
2296                 return cachep;
2297
2298         memcg = get_mem_cgroup_from_mm(current->mm);
2299         kmemcg_id = READ_ONCE(memcg->kmemcg_id);
2300         if (kmemcg_id < 0)
2301                 goto out;
2302
2303         memcg_cachep = cache_from_memcg_idx(cachep, kmemcg_id);
2304         if (likely(memcg_cachep))
2305                 return memcg_cachep;
2306
2307         /*
2308          * If we are in a safe context (can wait, and not in interrupt
2309          * context), we could be be predictable and return right away.
2310          * This would guarantee that the allocation being performed
2311          * already belongs in the new cache.
2312          *
2313          * However, there are some clashes that can arrive from locking.
2314          * For instance, because we acquire the slab_mutex while doing
2315          * memcg_create_kmem_cache, this means no further allocation
2316          * could happen with the slab_mutex held. So it's better to
2317          * defer everything.
2318          */
2319         memcg_schedule_kmem_cache_create(memcg, cachep);
2320 out:
2321         css_put(&memcg->css);
2322         return cachep;
2323 }
2324
2325 void __memcg_kmem_put_cache(struct kmem_cache *cachep)
2326 {
2327         if (!is_root_cache(cachep))
2328                 css_put(&cachep->memcg_params.memcg->css);
2329 }
2330
2331 int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
2332                               struct mem_cgroup *memcg)
2333 {
2334         unsigned int nr_pages = 1 << order;
2335         struct page_counter *counter;
2336         int ret;
2337
2338         ret = try_charge(memcg, gfp, nr_pages);
2339         if (ret)
2340                 return ret;
2341
2342         if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
2343             !page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) {
2344                 cancel_charge(memcg, nr_pages);
2345                 return -ENOMEM;
2346         }
2347
2348         page->mem_cgroup = memcg;
2349
2350         return 0;
2351 }
2352
2353 int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
2354 {
2355         struct mem_cgroup *memcg;
2356         int ret = 0;
2357
2358         memcg = get_mem_cgroup_from_mm(current->mm);
2359         if (!mem_cgroup_is_root(memcg))
2360                 ret = __memcg_kmem_charge_memcg(page, gfp, order, memcg);
2361         css_put(&memcg->css);
2362         return ret;
2363 }
2364
2365 void __memcg_kmem_uncharge(struct page *page, int order)
2366 {
2367         struct mem_cgroup *memcg = page->mem_cgroup;
2368         unsigned int nr_pages = 1 << order;
2369
2370         if (!memcg)
2371                 return;
2372
2373         VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page);
2374
2375         if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
2376                 page_counter_uncharge(&memcg->kmem, nr_pages);
2377
2378         page_counter_uncharge(&memcg->memory, nr_pages);
2379         if (do_memsw_account())
2380                 page_counter_uncharge(&memcg->memsw, nr_pages);
2381
2382         page->mem_cgroup = NULL;
2383         css_put_many(&memcg->css, nr_pages);
2384 }
2385 #endif /* !CONFIG_SLOB */
2386
2387 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
2388
2389 /*
2390  * Because tail pages are not marked as "used", set it. We're under
2391  * zone->lru_lock and migration entries setup in all page mappings.
2392  */
2393 void mem_cgroup_split_huge_fixup(struct page *head)
2394 {
2395         int i;
2396
2397         if (mem_cgroup_disabled())
2398                 return;
2399
2400         for (i = 1; i < HPAGE_PMD_NR; i++)
2401                 head[i].mem_cgroup = head->mem_cgroup;
2402
2403         __this_cpu_sub(head->mem_cgroup->stat->count[MEM_CGROUP_STAT_RSS_HUGE],
2404                        HPAGE_PMD_NR);
2405 }
2406 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
2407
2408 #ifdef CONFIG_MEMCG_SWAP
2409 static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg,
2410                                          bool charge)
2411 {
2412         int val = (charge) ? 1 : -1;
2413         this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_SWAP], val);
2414 }
2415
2416 /**
2417  * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
2418  * @entry: swap entry to be moved
2419  * @from:  mem_cgroup which the entry is moved from
2420  * @to:  mem_cgroup which the entry is moved to
2421  *
2422  * It succeeds only when the swap_cgroup's record for this entry is the same
2423  * as the mem_cgroup's id of @from.
2424  *
2425  * Returns 0 on success, -EINVAL on failure.
2426  *
2427  * The caller must have charged to @to, IOW, called page_counter_charge() about
2428  * both res and memsw, and called css_get().
2429  */
2430 static int mem_cgroup_move_swap_account(swp_entry_t entry,
2431                                 struct mem_cgroup *from, struct mem_cgroup *to)
2432 {
2433         unsigned short old_id, new_id;
2434
2435         old_id = mem_cgroup_id(from);
2436         new_id = mem_cgroup_id(to);
2437
2438         if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
2439                 mem_cgroup_swap_statistics(from, false);
2440                 mem_cgroup_swap_statistics(to, true);
2441                 return 0;
2442         }
2443         return -EINVAL;
2444 }
2445 #else
2446 static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
2447                                 struct mem_cgroup *from, struct mem_cgroup *to)
2448 {
2449         return -EINVAL;
2450 }
2451 #endif
2452
2453 static DEFINE_MUTEX(memcg_limit_mutex);
2454
2455 static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
2456                                    unsigned long limit)
2457 {
2458         unsigned long curusage;
2459         unsigned long oldusage;
2460         bool enlarge = false;
2461         int retry_count;
2462         int ret;
2463
2464         /*
2465          * For keeping hierarchical_reclaim simple, how long we should retry
2466          * is depends on callers. We set our retry-count to be function
2467          * of # of children which we should visit in this loop.
2468          */
2469         retry_count = MEM_CGROUP_RECLAIM_RETRIES *
2470                       mem_cgroup_count_children(memcg);
2471
2472         oldusage = page_counter_read(&memcg->memory);
2473
2474         do {
2475                 if (signal_pending(current)) {
2476                         ret = -EINTR;
2477                         break;
2478                 }
2479
2480                 mutex_lock(&memcg_limit_mutex);
2481                 if (limit > memcg->memsw.limit) {
2482                         mutex_unlock(&memcg_limit_mutex);
2483                         ret = -EINVAL;
2484                         break;
2485                 }
2486                 if (limit > memcg->memory.limit)
2487                         enlarge = true;
2488                 ret = page_counter_limit(&memcg->memory, limit);
2489                 mutex_unlock(&memcg_limit_mutex);
2490
2491                 if (!ret)
2492                         break;
2493
2494                 try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, true);
2495
2496                 curusage = page_counter_read(&memcg->memory);
2497                 /* Usage is reduced ? */
2498                 if (curusage >= oldusage)
2499                         retry_count--;
2500                 else
2501                         oldusage = curusage;
2502         } while (retry_count);
2503
2504         if (!ret && enlarge)
2505                 memcg_oom_recover(memcg);
2506
2507         return ret;
2508 }
2509
2510 static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
2511                                          unsigned long limit)
2512 {
2513         unsigned long curusage;
2514         unsigned long oldusage;
2515         bool enlarge = false;
2516         int retry_count;
2517         int ret;
2518
2519         /* see mem_cgroup_resize_res_limit */
2520         retry_count = MEM_CGROUP_RECLAIM_RETRIES *
2521                       mem_cgroup_count_children(memcg);
2522
2523         oldusage = page_counter_read(&memcg->memsw);
2524
2525         do {
2526                 if (signal_pending(current)) {
2527                         ret = -EINTR;
2528                         break;
2529                 }
2530
2531                 mutex_lock(&memcg_limit_mutex);
2532                 if (limit < memcg->memory.limit) {
2533                         mutex_unlock(&memcg_limit_mutex);
2534                         ret = -EINVAL;
2535                         break;
2536                 }
2537                 if (limit > memcg->memsw.limit)
2538                         enlarge = true;
2539                 ret = page_counter_limit(&memcg->memsw, limit);
2540                 mutex_unlock(&memcg_limit_mutex);
2541
2542                 if (!ret)
2543                         break;
2544
2545                 try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, false);
2546
2547                 curusage = page_counter_read(&memcg->memsw);
2548                 /* Usage is reduced ? */
2549                 if (curusage >= oldusage)
2550                         retry_count--;
2551                 else
2552                         oldusage = curusage;
2553         } while (retry_count);
2554
2555         if (!ret && enlarge)
2556                 memcg_oom_recover(memcg);
2557
2558         return ret;
2559 }
2560
2561 unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
2562                                             gfp_t gfp_mask,
2563                                             unsigned long *total_scanned)
2564 {
2565         unsigned long nr_reclaimed = 0;
2566         struct mem_cgroup_per_zone *mz, *next_mz = NULL;
2567         unsigned long reclaimed;
2568         int loop = 0;
2569         struct mem_cgroup_tree_per_zone *mctz;
2570         unsigned long excess;
2571         unsigned long nr_scanned;
2572
2573         if (order > 0)
2574                 return 0;
2575
2576         mctz = soft_limit_tree_node_zone(zone_to_nid(zone), zone_idx(zone));
2577         /*
2578          * This loop can run a while, specially if mem_cgroup's continuously
2579          * keep exceeding their soft limit and putting the system under
2580          * pressure
2581          */
2582         do {
2583                 if (next_mz)
2584                         mz = next_mz;
2585                 else
2586                         mz = mem_cgroup_largest_soft_limit_node(mctz);
2587                 if (!mz)
2588                         break;
2589
2590                 nr_scanned = 0;
2591                 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, zone,
2592                                                     gfp_mask, &nr_scanned);
2593                 nr_reclaimed += reclaimed;
2594                 *total_scanned += nr_scanned;
2595                 spin_lock_irq(&mctz->lock);
2596                 __mem_cgroup_remove_exceeded(mz, mctz);
2597
2598                 /*
2599                  * If we failed to reclaim anything from this memory cgroup
2600                  * it is time to move on to the next cgroup
2601                  */
2602                 next_mz = NULL;
2603                 if (!reclaimed)
2604                         next_mz = __mem_cgroup_largest_soft_limit_node(mctz);
2605
2606                 excess = soft_limit_excess(mz->memcg);
2607                 /*
2608                  * One school of thought says that we should not add
2609                  * back the node to the tree if reclaim returns 0.
2610                  * But our reclaim could return 0, simply because due
2611                  * to priority we are exposing a smaller subset of
2612                  * memory to reclaim from. Consider this as a longer
2613                  * term TODO.
2614                  */
2615                 /* If excess == 0, no tree ops */
2616                 __mem_cgroup_insert_exceeded(mz, mctz, excess);
2617                 spin_unlock_irq(&mctz->lock);
2618                 css_put(&mz->memcg->css);
2619                 loop++;
2620                 /*
2621                  * Could not reclaim anything and there are no more
2622                  * mem cgroups to try or we seem to be looping without
2623                  * reclaiming anything.
2624                  */
2625                 if (!nr_reclaimed &&
2626                         (next_mz == NULL ||
2627                         loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
2628                         break;
2629         } while (!nr_reclaimed);
2630         if (next_mz)
2631                 css_put(&next_mz->memcg->css);
2632         return nr_reclaimed;
2633 }
2634
2635 /*
2636  * Test whether @memcg has children, dead or alive.  Note that this
2637  * function doesn't care whether @memcg has use_hierarchy enabled and
2638  * returns %true if there are child csses according to the cgroup
2639  * hierarchy.  Testing use_hierarchy is the caller's responsiblity.
2640  */
2641 static inline bool memcg_has_children(struct mem_cgroup *memcg)
2642 {
2643         bool ret;
2644
2645         rcu_read_lock();
2646         ret = css_next_child(NULL, &memcg->css);
2647         rcu_read_unlock();
2648         return ret;
2649 }
2650
2651 /*
2652  * Reclaims as many pages from the given memcg as possible and moves
2653  * the rest to the parent.
2654  *
2655  * Caller is responsible for holding css reference for memcg.
2656  */
2657 static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
2658 {
2659         int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
2660
2661         /* we call try-to-free pages for make this cgroup empty */
2662         lru_add_drain_all();
2663         /* try to free all pages in this cgroup */
2664         while (nr_retries && page_counter_read(&memcg->memory)) {
2665                 int progress;
2666
2667                 if (signal_pending(current))
2668                         return -EINTR;
2669
2670                 progress = try_to_free_mem_cgroup_pages(memcg, 1,
2671                                                         GFP_KERNEL, true);
2672                 if (!progress) {
2673                         nr_retries--;
2674                         /* maybe some writeback is necessary */
2675                         congestion_wait(BLK_RW_ASYNC, HZ/10);
2676                 }
2677
2678         }
2679
2680         return 0;
2681 }
2682
2683 static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of,
2684                                             char *buf, size_t nbytes,
2685                                             loff_t off)
2686 {
2687         struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
2688
2689         if (mem_cgroup_is_root(memcg))
2690                 return -EINVAL;
2691         return mem_cgroup_force_empty(memcg) ?: nbytes;
2692 }
2693
2694 static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
2695                                      struct cftype *cft)
2696 {
2697         return mem_cgroup_from_css(css)->use_hierarchy;
2698 }
2699
2700 static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
2701                                       struct cftype *cft, u64 val)
2702 {
2703         int retval = 0;
2704         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
2705         struct mem_cgroup *parent_memcg = mem_cgroup_from_css(memcg->css.parent);
2706
2707         if (memcg->use_hierarchy == val)
2708                 return 0;
2709
2710         /*
2711          * If parent's use_hierarchy is set, we can't make any modifications
2712          * in the child subtrees. If it is unset, then the change can
2713          * occur, provided the current cgroup has no children.
2714          *
2715          * For the root cgroup, parent_mem is NULL, we allow value to be
2716          * set if there are no children.
2717          */
2718         if ((!parent_memcg || !parent_memcg->use_hierarchy) &&
2719                                 (val == 1 || val == 0)) {
2720                 if (!memcg_has_children(memcg))
2721                         memcg->use_hierarchy = val;
2722                 else
2723                         retval = -EBUSY;
2724         } else
2725                 retval = -EINVAL;
2726
2727         return retval;
2728 }
2729
2730 static void tree_stat(struct mem_cgroup *memcg, unsigned long *stat)
2731 {
2732         struct mem_cgroup *iter;
2733         int i;
2734
2735         memset(stat, 0, sizeof(*stat) * MEMCG_NR_STAT);
2736
2737         for_each_mem_cgroup_tree(iter, memcg) {
2738                 for (i = 0; i < MEMCG_NR_STAT; i++)
2739                         stat[i] += mem_cgroup_read_stat(iter, i);
2740         }
2741 }
2742
2743 static void tree_events(struct mem_cgroup *memcg, unsigned long *events)
2744 {
2745         struct mem_cgroup *iter;
2746         int i;
2747
2748         memset(events, 0, sizeof(*events) * MEMCG_NR_EVENTS);
2749
2750         for_each_mem_cgroup_tree(iter, memcg) {
2751                 for (i = 0; i < MEMCG_NR_EVENTS; i++)
2752                         events[i] += mem_cgroup_read_events(iter, i);
2753         }
2754 }
2755
2756 static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
2757 {
2758         unsigned long val = 0;
2759
2760         if (mem_cgroup_is_root(memcg)) {
2761                 struct mem_cgroup *iter;
2762
2763                 for_each_mem_cgroup_tree(iter, memcg) {
2764                         val += mem_cgroup_read_stat(iter,
2765                                         MEM_CGROUP_STAT_CACHE);
2766                         val += mem_cgroup_read_stat(iter,
2767                                         MEM_CGROUP_STAT_RSS);
2768                         if (swap)
2769                                 val += mem_cgroup_read_stat(iter,
2770                                                 MEM_CGROUP_STAT_SWAP);
2771                 }
2772         } else {
2773                 if (!swap)
2774                         val = page_counter_read(&memcg->memory);
2775                 else
2776                         val = page_counter_read(&memcg->memsw);
2777         }
2778         return val;
2779 }
2780
2781 enum {
2782         RES_USAGE,
2783         RES_LIMIT,
2784         RES_MAX_USAGE,
2785         RES_FAILCNT,
2786         RES_SOFT_LIMIT,
2787 };
2788
2789 static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
2790                                struct cftype *cft)
2791 {
2792         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
2793         struct page_counter *counter;
2794
2795         switch (MEMFILE_TYPE(cft->private)) {
2796         case _MEM:
2797                 counter = &memcg->memory;
2798                 break;
2799         case _MEMSWAP:
2800                 counter = &memcg->memsw;
2801                 break;
2802         case _KMEM:
2803                 counter = &memcg->kmem;
2804                 break;
2805         case _TCP:
2806                 counter = &memcg->tcpmem;
2807                 break;
2808         default:
2809                 BUG();
2810         }
2811
2812         switch (MEMFILE_ATTR(cft->private)) {
2813         case RES_USAGE:
2814                 if (counter == &memcg->memory)
2815                         return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE;
2816                 if (counter == &memcg->memsw)
2817                         return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE;
2818                 return (u64)page_counter_read(counter) * PAGE_SIZE;
2819         case RES_LIMIT:
2820                 return (u64)counter->limit * PAGE_SIZE;
2821         case RES_MAX_USAGE:
2822                 return (u64)counter->watermark * PAGE_SIZE;
2823         case RES_FAILCNT:
2824                 return counter->failcnt;
2825         case RES_SOFT_LIMIT:
2826                 return (u64)memcg->soft_limit * PAGE_SIZE;
2827         default:
2828                 BUG();
2829         }
2830 }
2831
2832 #ifndef CONFIG_SLOB
2833 static int memcg_online_kmem(struct mem_cgroup *memcg)
2834 {
2835         int memcg_id;
2836
2837         if (cgroup_memory_nokmem)
2838                 return 0;
2839
2840         BUG_ON(memcg->kmemcg_id >= 0);
2841         BUG_ON(memcg->kmem_state);
2842
2843         memcg_id = memcg_alloc_cache_id();
2844         if (memcg_id < 0)
2845                 return memcg_id;
2846
2847         static_branch_inc(&memcg_kmem_enabled_key);
2848         /*
2849          * A memory cgroup is considered kmem-online as soon as it gets
2850          * kmemcg_id. Setting the id after enabling static branching will
2851          * guarantee no one starts accounting before all call sites are
2852          * patched.
2853          */
2854         memcg->kmemcg_id = memcg_id;
2855         memcg->kmem_state = KMEM_ONLINE;
2856
2857         return 0;
2858 }
2859
2860 static void memcg_offline_kmem(struct mem_cgroup *memcg)
2861 {
2862         struct cgroup_subsys_state *css;
2863         struct mem_cgroup *parent, *child;
2864         int kmemcg_id;
2865
2866         if (memcg->kmem_state != KMEM_ONLINE)
2867                 return;
2868         /*
2869          * Clear the online state before clearing memcg_caches array
2870          * entries. The slab_mutex in memcg_deactivate_kmem_caches()
2871          * guarantees that no cache will be created for this cgroup
2872          * after we are done (see memcg_create_kmem_cache()).
2873          */
2874         memcg->kmem_state = KMEM_ALLOCATED;
2875
2876         memcg_deactivate_kmem_caches(memcg);
2877
2878         kmemcg_id = memcg->kmemcg_id;
2879         BUG_ON(kmemcg_id < 0);
2880
2881         parent = parent_mem_cgroup(memcg);
2882         if (!parent)
2883                 parent = root_mem_cgroup;
2884
2885         /*
2886          * Change kmemcg_id of this cgroup and all its descendants to the
2887          * parent's id, and then move all entries from this cgroup's list_lrus
2888          * to ones of the parent. After we have finished, all list_lrus
2889          * corresponding to this cgroup are guaranteed to remain empty. The
2890          * ordering is imposed by list_lru_node->lock taken by
2891          * memcg_drain_all_list_lrus().
2892          */
2893         css_for_each_descendant_pre(css, &memcg->css) {
2894                 child = mem_cgroup_from_css(css);
2895                 BUG_ON(child->kmemcg_id != kmemcg_id);
2896                 child->kmemcg_id = parent->kmemcg_id;
2897                 if (!memcg->use_hierarchy)
2898                         break;
2899         }
2900         memcg_drain_all_list_lrus(kmemcg_id, parent->kmemcg_id);
2901
2902         memcg_free_cache_id(kmemcg_id);
2903 }
2904
2905 static void memcg_free_kmem(struct mem_cgroup *memcg)
2906 {
2907         /* css_alloc() failed, offlining didn't happen */
2908         if (unlikely(memcg->kmem_state == KMEM_ONLINE))
2909                 memcg_offline_kmem(memcg);
2910
2911         if (memcg->kmem_state == KMEM_ALLOCATED) {
2912                 memcg_destroy_kmem_caches(memcg);
2913                 static_branch_dec(&memcg_kmem_enabled_key);
2914                 WARN_ON(page_counter_read(&memcg->kmem));
2915         }
2916 }
2917 #else
2918 static int memcg_online_kmem(struct mem_cgroup *memcg)
2919 {
2920         return 0;
2921 }
2922 static void memcg_offline_kmem(struct mem_cgroup *memcg)
2923 {
2924 }
2925 static void memcg_free_kmem(struct mem_cgroup *memcg)
2926 {
2927 }
2928 #endif /* !CONFIG_SLOB */
2929
2930 static int memcg_update_kmem_limit(struct mem_cgroup *memcg,
2931                                    unsigned long limit)
2932 {
2933         int ret;
2934
2935         mutex_lock(&memcg_limit_mutex);
2936         ret = page_counter_limit(&memcg->kmem, limit);
2937         mutex_unlock(&memcg_limit_mutex);
2938         return ret;
2939 }
2940
2941 static int memcg_update_tcp_limit(struct mem_cgroup *memcg, unsigned long limit)
2942 {
2943         int ret;
2944
2945         mutex_lock(&memcg_limit_mutex);
2946
2947         ret = page_counter_limit(&memcg->tcpmem, limit);
2948         if (ret)
2949                 goto out;
2950
2951         if (!memcg->tcpmem_active) {
2952                 /*
2953                  * The active flag needs to be written after the static_key
2954                  * update. This is what guarantees that the socket activation
2955                  * function is the last one to run. See sock_update_memcg() for
2956                  * details, and note that we don't mark any socket as belonging
2957                  * to this memcg until that flag is up.
2958                  *
2959                  * We need to do this, because static_keys will span multiple
2960                  * sites, but we can't control their order. If we mark a socket
2961                  * as accounted, but the accounting functions are not patched in
2962                  * yet, we'll lose accounting.
2963                  *
2964                  * We never race with the readers in sock_update_memcg(),
2965                  * because when this value change, the code to process it is not
2966                  * patched in yet.
2967                  */
2968                 static_branch_inc(&memcg_sockets_enabled_key);
2969                 memcg->tcpmem_active = true;
2970         }
2971 out:
2972         mutex_unlock(&memcg_limit_mutex);
2973         return ret;
2974 }
2975
2976 /*
2977  * The user of this function is...
2978  * RES_LIMIT.
2979  */
2980 static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
2981                                 char *buf, size_t nbytes, loff_t off)
2982 {
2983         struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
2984         unsigned long nr_pages;
2985         int ret;
2986
2987         buf = strstrip(buf);
2988         ret = page_counter_memparse(buf, "-1", &nr_pages);
2989         if (ret)
2990                 return ret;
2991
2992         switch (MEMFILE_ATTR(of_cft(of)->private)) {
2993         case RES_LIMIT:
2994                 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
2995                         ret = -EINVAL;
2996                         break;
2997                 }
2998                 switch (MEMFILE_TYPE(of_cft(of)->private)) {
2999                 case _MEM:
3000                         ret = mem_cgroup_resize_limit(memcg, nr_pages);
3001                         break;
3002                 case _MEMSWAP:
3003                         ret = mem_cgroup_resize_memsw_limit(memcg, nr_pages);
3004                         break;
3005                 case _KMEM:
3006                         ret = memcg_update_kmem_limit(memcg, nr_pages);
3007                         break;
3008                 case _TCP:
3009                         ret = memcg_update_tcp_limit(memcg, nr_pages);
3010                         break;
3011                 }
3012                 break;
3013         case RES_SOFT_LIMIT:
3014                 memcg->soft_limit = nr_pages;
3015                 ret = 0;
3016                 break;
3017         }
3018         return ret ?: nbytes;
3019 }
3020
3021 static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf,
3022                                 size_t nbytes, loff_t off)
3023 {
3024         struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3025         struct page_counter *counter;
3026
3027         switch (MEMFILE_TYPE(of_cft(of)->private)) {
3028         case _MEM:
3029                 counter = &memcg->memory;
3030                 break;
3031         case _MEMSWAP:
3032                 counter = &memcg->memsw;
3033                 break;
3034         case _KMEM:
3035                 counter = &memcg->kmem;
3036                 break;
3037         case _TCP:
3038                 counter = &memcg->tcpmem;
3039                 break;
3040         default:
3041                 BUG();
3042         }
3043
3044         switch (MEMFILE_ATTR(of_cft(of)->private)) {
3045         case RES_MAX_USAGE:
3046                 page_counter_reset_watermark(counter);
3047                 break;
3048         case RES_FAILCNT:
3049                 counter->failcnt = 0;
3050                 break;
3051         default:
3052                 BUG();
3053         }
3054
3055         return nbytes;
3056 }
3057
3058 static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
3059                                         struct cftype *cft)
3060 {
3061         return mem_cgroup_from_css(css)->move_charge_at_immigrate;
3062 }
3063
3064 #ifdef CONFIG_MMU
3065 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3066                                         struct cftype *cft, u64 val)
3067 {
3068         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3069
3070         if (val & ~MOVE_MASK)
3071                 return -EINVAL;
3072
3073         /*
3074          * No kind of locking is needed in here, because ->can_attach() will
3075          * check this value once in the beginning of the process, and then carry
3076          * on with stale data. This means that changes to this value will only
3077          * affect task migrations starting after the change.
3078          */
3079         memcg->move_charge_at_immigrate = val;
3080         return 0;
3081 }
3082 #else
3083 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3084                                         struct cftype *cft, u64 val)
3085 {
3086         return -ENOSYS;
3087 }
3088 #endif
3089
3090 #ifdef CONFIG_NUMA
3091 static int memcg_numa_stat_show(struct seq_file *m, void *v)
3092 {
3093         struct numa_stat {
3094                 const char *name;
3095                 unsigned int lru_mask;
3096         };
3097
3098         static const struct numa_stat stats[] = {
3099                 { "total", LRU_ALL },
3100                 { "file", LRU_ALL_FILE },
3101                 { "anon", LRU_ALL_ANON },
3102                 { "unevictable", BIT(LRU_UNEVICTABLE) },
3103         };
3104         const struct numa_stat *stat;
3105         int nid;
3106         unsigned long nr;
3107         struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
3108
3109         for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3110                 nr = mem_cgroup_nr_lru_pages(memcg, stat->lru_mask);
3111                 seq_printf(m, "%s=%lu", stat->name, nr);
3112                 for_each_node_state(nid, N_MEMORY) {
3113                         nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
3114                                                           stat->lru_mask);
3115                         seq_printf(m, " N%d=%lu", nid, nr);
3116                 }
3117                 seq_putc(m, '\n');
3118         }
3119
3120         for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3121                 struct mem_cgroup *iter;
3122
3123                 nr = 0;
3124                 for_each_mem_cgroup_tree(iter, memcg)
3125                         nr += mem_cgroup_nr_lru_pages(iter, stat->lru_mask);
3126                 seq_printf(m, "hierarchical_%s=%lu", stat->name, nr);
3127                 for_each_node_state(nid, N_MEMORY) {
3128                         nr = 0;
3129                         for_each_mem_cgroup_tree(iter, memcg)
3130                                 nr += mem_cgroup_node_nr_lru_pages(
3131                                         iter, nid, stat->lru_mask);
3132                         seq_printf(m, " N%d=%lu", nid, nr);
3133                 }
3134                 seq_putc(m, '\n');
3135         }
3136
3137         return 0;
3138 }
3139 #endif /* CONFIG_NUMA */
3140
3141 static int memcg_stat_show(struct seq_file *m, void *v)
3142 {
3143         struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
3144         unsigned long memory, memsw;
3145         struct mem_cgroup *mi;
3146         unsigned int i;
3147
3148         BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_stat_names) !=
3149                      MEM_CGROUP_STAT_NSTATS);
3150         BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_events_names) !=
3151                      MEM_CGROUP_EVENTS_NSTATS);
3152         BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS);
3153
3154         for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
3155                 if (i == MEM_CGROUP_STAT_SWAP && !do_memsw_account())
3156                         continue;
3157                 seq_printf(m, "%s %lu\n", mem_cgroup_stat_names[i],
3158                            mem_cgroup_read_stat(memcg, i) * PAGE_SIZE);
3159         }
3160
3161         for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++)
3162                 seq_printf(m, "%s %lu\n", mem_cgroup_events_names[i],
3163                            mem_cgroup_read_events(memcg, i));
3164
3165         for (i = 0; i < NR_LRU_LISTS; i++)
3166                 seq_printf(m, "%s %lu\n", mem_cgroup_lru_names[i],
3167                            mem_cgroup_nr_lru_pages(memcg, BIT(i)) * PAGE_SIZE);
3168
3169         /* Hierarchical information */
3170         memory = memsw = PAGE_COUNTER_MAX;
3171         for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) {
3172                 memory = min(memory, mi->memory.limit);
3173                 memsw = min(memsw, mi->memsw.limit);
3174         }
3175         seq_printf(m, "hierarchical_memory_limit %llu\n",
3176                    (u64)memory * PAGE_SIZE);
3177         if (do_memsw_account())
3178                 seq_printf(m, "hierarchical_memsw_limit %llu\n",
3179                            (u64)memsw * PAGE_SIZE);
3180
3181         for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
3182                 unsigned long long val = 0;
3183
3184                 if (i == MEM_CGROUP_STAT_SWAP && !do_memsw_account())
3185                         continue;
3186                 for_each_mem_cgroup_tree(mi, memcg)
3187                         val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE;
3188                 seq_printf(m, "total_%s %llu\n", mem_cgroup_stat_names[i], val);
3189         }
3190
3191         for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
3192                 unsigned long long val = 0;
3193
3194                 for_each_mem_cgroup_tree(mi, memcg)
3195                         val += mem_cgroup_read_events(mi, i);
3196                 seq_printf(m, "total_%s %llu\n",
3197                            mem_cgroup_events_names[i], val);
3198         }
3199
3200         for (i = 0; i < NR_LRU_LISTS; i++) {
3201                 unsigned long long val = 0;
3202
3203                 for_each_mem_cgroup_tree(mi, memcg)
3204                         val += mem_cgroup_nr_lru_pages(mi, BIT(i)) * PAGE_SIZE;
3205                 seq_printf(m, "total_%s %llu\n", mem_cgroup_lru_names[i], val);
3206         }
3207
3208 #ifdef CONFIG_DEBUG_VM
3209         {
3210                 int nid, zid;
3211                 struct mem_cgroup_per_zone *mz;
3212                 struct zone_reclaim_stat *rstat;
3213                 unsigned long recent_rotated[2] = {0, 0};
3214                 unsigned long recent_scanned[2] = {0, 0};
3215
3216                 for_each_online_node(nid)
3217                         for (zid = 0; zid < MAX_NR_ZONES; zid++) {
3218                                 mz = &memcg->nodeinfo[nid]->zoneinfo[zid];
3219                                 rstat = &mz->lruvec.reclaim_stat;
3220
3221                                 recent_rotated[0] += rstat->recent_rotated[0];
3222                                 recent_rotated[1] += rstat->recent_rotated[1];
3223                                 recent_scanned[0] += rstat->recent_scanned[0];
3224                                 recent_scanned[1] += rstat->recent_scanned[1];
3225                         }
3226                 seq_printf(m, "recent_rotated_anon %lu\n", recent_rotated[0]);
3227                 seq_printf(m, "recent_rotated_file %lu\n", recent_rotated[1]);
3228                 seq_printf(m, "recent_scanned_anon %lu\n", recent_scanned[0]);
3229                 seq_printf(m, "recent_scanned_file %lu\n", recent_scanned[1]);
3230         }
3231 #endif
3232
3233         return 0;
3234 }
3235
3236 static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
3237                                       struct cftype *cft)
3238 {
3239         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3240
3241         return mem_cgroup_swappiness(memcg);
3242 }
3243
3244 static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
3245                                        struct cftype *cft, u64 val)
3246 {
3247         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3248
3249         if (val > 100)
3250                 return -EINVAL;
3251
3252         if (css->parent)
3253                 memcg->swappiness = val;
3254         else
3255                 vm_swappiness = val;
3256
3257         return 0;
3258 }
3259
3260 static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
3261 {
3262         struct mem_cgroup_threshold_ary *t;
3263         unsigned long usage;
3264         int i;
3265
3266         rcu_read_lock();
3267         if (!swap)
3268                 t = rcu_dereference(memcg->thresholds.primary);
3269         else
3270                 t = rcu_dereference(memcg->memsw_thresholds.primary);
3271
3272         if (!t)
3273                 goto unlock;
3274
3275         usage = mem_cgroup_usage(memcg, swap);
3276
3277         /*
3278          * current_threshold points to threshold just below or equal to usage.
3279          * If it's not true, a threshold was crossed after last
3280          * call of __mem_cgroup_threshold().
3281          */
3282         i = t->current_threshold;
3283
3284         /*
3285          * Iterate backward over array of thresholds starting from
3286          * current_threshold and check if a threshold is crossed.
3287          * If none of thresholds below usage is crossed, we read
3288          * only one element of the array here.
3289          */
3290         for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
3291                 eventfd_signal(t->entries[i].eventfd, 1);
3292
3293         /* i = current_threshold + 1 */
3294         i++;
3295
3296         /*
3297          * Iterate forward over array of thresholds starting from
3298          * current_threshold+1 and check if a threshold is crossed.
3299          * If none of thresholds above usage is crossed, we read
3300          * only one element of the array here.
3301          */
3302         for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
3303                 eventfd_signal(t->entries[i].eventfd, 1);
3304
3305         /* Update current_threshold */
3306         t->current_threshold = i - 1;
3307 unlock:
3308         rcu_read_unlock();
3309 }
3310
3311 static void mem_cgroup_threshold(struct mem_cgroup *memcg)
3312 {
3313         while (memcg) {
3314                 __mem_cgroup_threshold(memcg, false);
3315                 if (do_memsw_account())
3316                         __mem_cgroup_threshold(memcg, true);
3317
3318                 memcg = parent_mem_cgroup(memcg);
3319         }
3320 }
3321
3322 static int compare_thresholds(const void *a, const void *b)
3323 {
3324         const struct mem_cgroup_threshold *_a = a;
3325         const struct mem_cgroup_threshold *_b = b;
3326
3327         if (_a->threshold > _b->threshold)
3328                 return 1;
3329
3330         if (_a->threshold < _b->threshold)
3331                 return -1;
3332
3333         return 0;
3334 }
3335
3336 static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
3337 {
3338         struct mem_cgroup_eventfd_list *ev;
3339
3340         spin_lock(&memcg_oom_lock);
3341
3342         list_for_each_entry(ev, &memcg->oom_notify, list)
3343                 eventfd_signal(ev->eventfd, 1);
3344
3345         spin_unlock(&memcg_oom_lock);
3346         return 0;
3347 }
3348
3349 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
3350 {
3351         struct mem_cgroup *iter;
3352
3353         for_each_mem_cgroup_tree(iter, memcg)
3354                 mem_cgroup_oom_notify_cb(iter);
3355 }
3356
3357 static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
3358         struct eventfd_ctx *eventfd, const char *args, enum res_type type)
3359 {
3360         struct mem_cgroup_thresholds *thresholds;
3361         struct mem_cgroup_threshold_ary *new;
3362         unsigned long threshold;
3363         unsigned long usage;
3364         int i, size, ret;
3365
3366         ret = page_counter_memparse(args, "-1", &threshold);
3367         if (ret)
3368                 return ret;
3369
3370         mutex_lock(&memcg->thresholds_lock);
3371
3372         if (type == _MEM) {
3373                 thresholds = &memcg->thresholds;
3374                 usage = mem_cgroup_usage(memcg, false);
3375         } else if (type == _MEMSWAP) {
3376                 thresholds = &memcg->memsw_thresholds;
3377                 usage = mem_cgroup_usage(memcg, true);
3378         } else
3379                 BUG();
3380
3381         /* Check if a threshold crossed before adding a new one */
3382         if (thresholds->primary)
3383                 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
3384
3385         size = thresholds->primary ? thresholds->primary->size + 1 : 1;
3386
3387         /* Allocate memory for new array of thresholds */
3388         new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold),
3389                         GFP_KERNEL);
3390         if (!new) {
3391                 ret = -ENOMEM;
3392                 goto unlock;
3393         }
3394         new->size = size;
3395
3396         /* Copy thresholds (if any) to new array */
3397         if (thresholds->primary) {
3398                 memcpy(new->entries, thresholds->primary->entries, (size - 1) *
3399                                 sizeof(struct mem_cgroup_threshold));
3400         }
3401
3402         /* Add new threshold */
3403         new->entries[size - 1].eventfd = eventfd;
3404         new->entries[size - 1].threshold = threshold;
3405
3406         /* Sort thresholds. Registering of new threshold isn't time-critical */
3407         sort(new->entries, size, sizeof(struct mem_cgroup_threshold),
3408                         compare_thresholds, NULL);
3409
3410         /* Find current threshold */
3411         new->current_threshold = -1;
3412         for (i = 0; i < size; i++) {
3413                 if (new->entries[i].threshold <= usage) {
3414                         /*
3415                          * new->current_threshold will not be used until
3416                          * rcu_assign_pointer(), so it's safe to increment
3417                          * it here.
3418                          */
3419                         ++new->current_threshold;
3420                 } else
3421                         break;
3422         }
3423
3424         /* Free old spare buffer and save old primary buffer as spare */
3425         kfree(thresholds->spare);
3426         thresholds->spare = thresholds->primary;
3427
3428         rcu_assign_pointer(thresholds->primary, new);
3429
3430         /* To be sure that nobody uses thresholds */
3431         synchronize_rcu();
3432
3433 unlock:
3434         mutex_unlock(&memcg->thresholds_lock);
3435
3436         return ret;
3437 }
3438
3439 static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
3440         struct eventfd_ctx *eventfd, const char *args)
3441 {
3442         return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM);
3443 }
3444
3445 static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg,
3446         struct eventfd_ctx *eventfd, const char *args)
3447 {
3448         return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP);
3449 }
3450
3451 static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
3452         struct eventfd_ctx *eventfd, enum res_type type)
3453 {
3454         struct mem_cgroup_thresholds *thresholds;
3455         struct mem_cgroup_threshold_ary *new;
3456         unsigned long usage;
3457         int i, j, size;
3458
3459         mutex_lock(&memcg->thresholds_lock);
3460
3461         if (type == _MEM) {
3462                 thresholds = &memcg->thresholds;
3463                 usage = mem_cgroup_usage(memcg, false);
3464         } else if (type == _MEMSWAP) {
3465                 thresholds = &memcg->memsw_thresholds;
3466                 usage = mem_cgroup_usage(memcg, true);
3467         } else
3468                 BUG();
3469
3470         if (!thresholds->primary)
3471                 goto unlock;
3472
3473         /* Check if a threshold crossed before removing */
3474         __mem_cgroup_threshold(memcg, type == _MEMSWAP);
3475
3476         /* Calculate new number of threshold */
3477         size = 0;
3478         for (i = 0; i < thresholds->primary->size; i++) {
3479                 if (thresholds->primary->entries[i].eventfd != eventfd)
3480                         size++;
3481         }
3482
3483         new = thresholds->spare;
3484
3485         /* Set thresholds array to NULL if we don't have thresholds */
3486         if (!size) {
3487                 kfree(new);
3488                 new = NULL;
3489                 goto swap_buffers;
3490         }
3491
3492         new->size = size;
3493
3494         /* Copy thresholds and find current threshold */
3495         new->current_threshold = -1;
3496         for (i = 0, j = 0; i < thresholds->primary->size; i++) {
3497                 if (thresholds->primary->entries[i].eventfd == eventfd)
3498                         continue;
3499
3500                 new->entries[j] = thresholds->primary->entries[i];
3501                 if (new->entries[j].threshold <= usage) {
3502                         /*
3503                          * new->current_threshold will not be used
3504                          * until rcu_assign_pointer(), so it's safe to increment
3505                          * it here.
3506                          */
3507                         ++new->current_threshold;
3508                 }
3509                 j++;
3510         }
3511
3512 swap_buffers:
3513         /* Swap primary and spare array */
3514         thresholds->spare = thresholds->primary;
3515
3516         rcu_assign_pointer(thresholds->primary, new);
3517
3518         /* To be sure that nobody uses thresholds */
3519         synchronize_rcu();
3520
3521         /* If all events are unregistered, free the spare array */
3522         if (!new) {
3523                 kfree(thresholds->spare);
3524                 thresholds->spare = NULL;
3525         }
3526 unlock:
3527         mutex_unlock(&memcg->thresholds_lock);
3528 }
3529
3530 static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
3531         struct eventfd_ctx *eventfd)
3532 {
3533         return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM);
3534 }
3535
3536 static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
3537         struct eventfd_ctx *eventfd)
3538 {
3539         return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP);
3540 }
3541
3542 static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
3543         struct eventfd_ctx *eventfd, const char *args)
3544 {
3545         struct mem_cgroup_eventfd_list *event;
3546
3547         event = kmalloc(sizeof(*event), GFP_KERNEL);
3548         if (!event)
3549                 return -ENOMEM;
3550
3551         spin_lock(&memcg_oom_lock);
3552
3553         event->eventfd = eventfd;
3554         list_add(&event->list, &memcg->oom_notify);
3555
3556         /* already in OOM ? */
3557         if (memcg->under_oom)
3558                 eventfd_signal(eventfd, 1);
3559         spin_unlock(&memcg_oom_lock);
3560
3561         return 0;
3562 }
3563
3564 static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg,
3565         struct eventfd_ctx *eventfd)
3566 {
3567         struct mem_cgroup_eventfd_list *ev, *tmp;
3568
3569         spin_lock(&memcg_oom_lock);
3570
3571         list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
3572                 if (ev->eventfd == eventfd) {
3573                         list_del(&ev->list);
3574                         kfree(ev);
3575                 }
3576         }
3577
3578         spin_unlock(&memcg_oom_lock);
3579 }
3580
3581 static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
3582 {
3583         struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf));
3584
3585         seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable);
3586         seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom);
3587         return 0;
3588 }
3589
3590 static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
3591         struct cftype *cft, u64 val)
3592 {
3593         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3594
3595         /* cannot set to root cgroup and only 0 and 1 are allowed */
3596         if (!css->parent || !((val == 0) || (val == 1)))
3597                 return -EINVAL;
3598
3599         memcg->oom_kill_disable = val;
3600         if (!val)
3601                 memcg_oom_recover(memcg);
3602
3603         return 0;
3604 }
3605
3606 #ifdef CONFIG_CGROUP_WRITEBACK
3607
3608 struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg)
3609 {
3610         return &memcg->cgwb_list;
3611 }
3612
3613 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
3614 {
3615         return wb_domain_init(&memcg->cgwb_domain, gfp);
3616 }
3617
3618 static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
3619 {
3620         wb_domain_exit(&memcg->cgwb_domain);
3621 }
3622
3623 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
3624 {
3625         wb_domain_size_changed(&memcg->cgwb_domain);
3626 }
3627
3628 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
3629 {
3630         struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3631
3632         if (!memcg->css.parent)
3633                 return NULL;
3634
3635         return &memcg->cgwb_domain;
3636 }
3637
3638 /**
3639  * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
3640  * @wb: bdi_writeback in question
3641  * @pfilepages: out parameter for number of file pages
3642  * @pheadroom: out parameter for number of allocatable pages according to memcg
3643  * @pdirty: out parameter for number of dirty pages
3644  * @pwriteback: out parameter for number of pages under writeback
3645  *
3646  * Determine the numbers of file, headroom, dirty, and writeback pages in
3647  * @wb's memcg.  File, dirty and writeback are self-explanatory.  Headroom
3648  * is a bit more involved.
3649  *
3650  * A memcg's headroom is "min(max, high) - used".  In the hierarchy, the
3651  * headroom is calculated as the lowest headroom of itself and the
3652  * ancestors.  Note that this doesn't consider the actual amount of
3653  * available memory in the system.  The caller should further cap
3654  * *@pheadroom accordingly.
3655  */
3656 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
3657                          unsigned long *pheadroom, unsigned long *pdirty,
3658                          unsigned long *pwriteback)
3659 {
3660         struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3661         struct mem_cgroup *parent;
3662
3663         *pdirty = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_DIRTY);
3664
3665         /* this should eventually include NR_UNSTABLE_NFS */
3666         *pwriteback = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_WRITEBACK);
3667         *pfilepages = mem_cgroup_nr_lru_pages(memcg, (1 << LRU_INACTIVE_FILE) |
3668                                                      (1 << LRU_ACTIVE_FILE));
3669         *pheadroom = PAGE_COUNTER_MAX;
3670
3671         while ((parent = parent_mem_cgroup(memcg))) {
3672                 unsigned long ceiling = min(memcg->memory.limit, memcg->high);
3673                 unsigned long used = page_counter_read(&memcg->memory);
3674
3675                 *pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
3676                 memcg = parent;
3677         }
3678 }
3679
3680 #else   /* CONFIG_CGROUP_WRITEBACK */
3681
3682 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
3683 {
3684         return 0;
3685 }
3686
3687 static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
3688 {
3689 }
3690
3691 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
3692 {
3693 }
3694
3695 #endif  /* CONFIG_CGROUP_WRITEBACK */
3696
3697 /*
3698  * DO NOT USE IN NEW FILES.
3699  *
3700  * "cgroup.event_control" implementation.
3701  *
3702  * This is way over-engineered.  It tries to support fully configurable
3703  * events for each user.  Such level of flexibility is completely
3704  * unnecessary especially in the light of the planned unified hierarchy.
3705  *
3706  * Please deprecate this and replace with something simpler if at all
3707  * possible.
3708  */
3709
3710 /*
3711  * Unregister event and free resources.
3712  *
3713  * Gets called from workqueue.
3714  */
3715 static void memcg_event_remove(struct work_struct *work)
3716 {
3717         struct mem_cgroup_event *event =
3718                 container_of(work, struct mem_cgroup_event, remove);
3719         struct mem_cgroup *memcg = event->memcg;
3720
3721         remove_wait_queue(event->wqh, &event->wait);
3722
3723         event->unregister_event(memcg, event->eventfd);
3724
3725         /* Notify userspace the event is going away. */
3726         eventfd_signal(event->eventfd, 1);
3727
3728         eventfd_ctx_put(event->eventfd);
3729         kfree(event);
3730         css_put(&memcg->css);
3731 }
3732
3733 /*
3734  * Gets called on POLLHUP on eventfd when user closes it.
3735  *
3736  * Called with wqh->lock held and interrupts disabled.
3737  */
3738 static int memcg_event_wake(wait_queue_t *wait, unsigned mode,
3739                             int sync, void *key)
3740 {
3741         struct mem_cgroup_event *event =
3742                 container_of(wait, struct mem_cgroup_event, wait);
3743         struct mem_cgroup *memcg = event->memcg;
3744         unsigned long flags = (unsigned long)key;
3745
3746         if (flags & POLLHUP) {
3747                 /*
3748                  * If the event has been detached at cgroup removal, we
3749                  * can simply return knowing the other side will cleanup
3750                  * for us.
3751                  *
3752                  * We can't race against event freeing since the other
3753                  * side will require wqh->lock via remove_wait_queue(),
3754                  * which we hold.
3755                  */
3756                 spin_lock(&memcg->event_list_lock);
3757                 if (!list_empty(&event->list)) {
3758                         list_del_init(&event->list);
3759                         /*
3760                          * We are in atomic context, but cgroup_event_remove()
3761                          * may sleep, so we have to call it in workqueue.
3762                          */
3763                         schedule_work(&event->remove);
3764                 }
3765                 spin_unlock(&memcg->event_list_lock);
3766         }
3767
3768         return 0;
3769 }
3770
3771 static void memcg_event_ptable_queue_proc(struct file *file,
3772                 wait_queue_head_t *wqh, poll_table *pt)
3773 {
3774         struct mem_cgroup_event *event =
3775                 container_of(pt, struct mem_cgroup_event, pt);
3776
3777         event->wqh = wqh;
3778         add_wait_queue(wqh, &event->wait);
3779 }
3780
3781 /*
3782  * DO NOT USE IN NEW FILES.
3783  *
3784  * Parse input and register new cgroup event handler.
3785  *
3786  * Input must be in format '<event_fd> <control_fd> <args>'.
3787  * Interpretation of args is defined by control file implementation.
3788  */
3789 static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
3790                                          char *buf, size_t nbytes, loff_t off)
3791 {
3792         struct cgroup_subsys_state *css = of_css(of);
3793         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3794         struct mem_cgroup_event *event;
3795         struct cgroup_subsys_state *cfile_css;
3796         unsigned int efd, cfd;
3797         struct fd efile;
3798         struct fd cfile;
3799         const char *name;
3800         char *endp;
3801         int ret;
3802
3803         buf = strstrip(buf);
3804
3805         efd = simple_strtoul(buf, &endp, 10);
3806         if (*endp != ' ')
3807                 return -EINVAL;
3808         buf = endp + 1;
3809
3810         cfd = simple_strtoul(buf, &endp, 10);
3811         if ((*endp != ' ') && (*endp != '\0'))
3812                 return -EINVAL;
3813         buf = endp + 1;
3814
3815         event = kzalloc(sizeof(*event), GFP_KERNEL);
3816         if (!event)
3817                 return -ENOMEM;
3818
3819         event->memcg = memcg;
3820         INIT_LIST_HEAD(&event->list);
3821         init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc);
3822         init_waitqueue_func_entry(&event->wait, memcg_event_wake);
3823         INIT_WORK(&event->remove, memcg_event_remove);
3824
3825         efile = fdget(efd);
3826         if (!efile.file) {
3827                 ret = -EBADF;
3828                 goto out_kfree;
3829         }
3830
3831         event->eventfd = eventfd_ctx_fileget(efile.file);
3832         if (IS_ERR(event->eventfd)) {
3833                 ret = PTR_ERR(event->eventfd);
3834                 goto out_put_efile;
3835         }
3836
3837         cfile = fdget(cfd);
3838         if (!cfile.file) {
3839                 ret = -EBADF;
3840                 goto out_put_eventfd;
3841         }
3842
3843         /* the process need read permission on control file */
3844         /* AV: shouldn't we check that it's been opened for read instead? */
3845         ret = inode_permission(file_inode(cfile.file), MAY_READ);
3846         if (ret < 0)
3847                 goto out_put_cfile;
3848
3849         /*
3850          * Determine the event callbacks and set them in @event.  This used
3851          * to be done via struct cftype but cgroup core no longer knows
3852          * about these events.  The following is crude but the whole thing
3853          * is for compatibility anyway.
3854          *
3855          * DO NOT ADD NEW FILES.
3856          */
3857         name = cfile.file->f_path.dentry->d_name.name;
3858
3859         if (!strcmp(name, "memory.usage_in_bytes")) {
3860                 event->register_event = mem_cgroup_usage_register_event;
3861                 event->unregister_event = mem_cgroup_usage_unregister_event;
3862         } else if (!strcmp(name, "memory.oom_control")) {
3863                 event->register_event = mem_cgroup_oom_register_event;
3864                 event->unregister_event = mem_cgroup_oom_unregister_event;
3865         } else if (!strcmp(name, "memory.pressure_level")) {
3866                 event->register_event = vmpressure_register_event;
3867                 event->unregister_event = vmpressure_unregister_event;
3868         } else if (!strcmp(name, "memory.memsw.usage_in_bytes")) {
3869                 event->register_event = memsw_cgroup_usage_register_event;
3870                 event->unregister_event = memsw_cgroup_usage_unregister_event;
3871         } else {
3872                 ret = -EINVAL;
3873                 goto out_put_cfile;
3874         }
3875
3876         /*
3877          * Verify @cfile should belong to @css.  Also, remaining events are
3878          * automatically removed on cgroup destruction but the removal is
3879          * asynchronous, so take an extra ref on @css.
3880          */
3881         cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent,
3882                                                &memory_cgrp_subsys);
3883         ret = -EINVAL;
3884         if (IS_ERR(cfile_css))
3885                 goto out_put_cfile;
3886         if (cfile_css != css) {
3887                 css_put(cfile_css);
3888                 goto out_put_cfile;
3889         }
3890
3891         ret = event->register_event(memcg, event->eventfd, buf);
3892         if (ret)
3893                 goto out_put_css;
3894
3895         efile.file->f_op->poll(efile.file, &event->pt);
3896
3897         spin_lock(&memcg->event_list_lock);
3898         list_add(&event->list, &memcg->event_list);
3899         spin_unlock(&memcg->event_list_lock);
3900
3901         fdput(cfile);
3902         fdput(efile);
3903
3904         return nbytes;
3905
3906 out_put_css:
3907         css_put(css);
3908 out_put_cfile:
3909         fdput(cfile);
3910 out_put_eventfd:
3911         eventfd_ctx_put(event->eventfd);
3912 out_put_efile:
3913         fdput(efile);
3914 out_kfree:
3915         kfree(event);
3916
3917         return ret;
3918 }
3919
3920 static struct cftype mem_cgroup_legacy_files[] = {
3921         {
3922                 .name = "usage_in_bytes",
3923                 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
3924                 .read_u64 = mem_cgroup_read_u64,
3925         },
3926         {
3927                 .name = "max_usage_in_bytes",
3928                 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
3929                 .write = mem_cgroup_reset,
3930                 .read_u64 = mem_cgroup_read_u64,
3931         },
3932         {
3933                 .name = "limit_in_bytes",
3934                 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
3935                 .write = mem_cgroup_write,
3936                 .read_u64 = mem_cgroup_read_u64,
3937         },
3938         {
3939                 .name = "soft_limit_in_bytes",
3940                 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
3941                 .write = mem_cgroup_write,
3942                 .read_u64 = mem_cgroup_read_u64,
3943         },
3944         {
3945                 .name = "failcnt",
3946                 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
3947                 .write = mem_cgroup_reset,
3948                 .read_u64 = mem_cgroup_read_u64,
3949         },
3950         {
3951                 .name = "stat",
3952                 .seq_show = memcg_stat_show,
3953         },
3954         {
3955                 .name = "force_empty",
3956                 .write = mem_cgroup_force_empty_write,
3957         },
3958         {
3959                 .name = "use_hierarchy",
3960                 .write_u64 = mem_cgroup_hierarchy_write,
3961                 .read_u64 = mem_cgroup_hierarchy_read,
3962         },
3963         {
3964                 .name = "cgroup.event_control",         /* XXX: for compat */
3965                 .write = memcg_write_event_control,
3966                 .flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE,
3967         },
3968         {
3969                 .name = "swappiness",
3970                 .read_u64 = mem_cgroup_swappiness_read,
3971                 .write_u64 = mem_cgroup_swappiness_write,
3972         },
3973         {
3974                 .name = "move_charge_at_immigrate",
3975                 .read_u64 = mem_cgroup_move_charge_read,
3976                 .write_u64 = mem_cgroup_move_charge_write,
3977         },
3978         {
3979                 .name = "oom_control",
3980                 .seq_show = mem_cgroup_oom_control_read,
3981                 .write_u64 = mem_cgroup_oom_control_write,
3982                 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
3983         },
3984         {
3985                 .name = "pressure_level",
3986         },
3987 #ifdef CONFIG_NUMA
3988         {
3989                 .name = "numa_stat",
3990                 .seq_show = memcg_numa_stat_show,
3991         },
3992 #endif
3993         {
3994                 .name = "kmem.limit_in_bytes",
3995                 .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
3996                 .write = mem_cgroup_write,
3997                 .read_u64 = mem_cgroup_read_u64,
3998         },
3999         {
4000                 .name = "kmem.usage_in_bytes",
4001                 .private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
4002                 .read_u64 = mem_cgroup_read_u64,
4003         },
4004         {
4005                 .name = "kmem.failcnt",
4006                 .private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
4007                 .write = mem_cgroup_reset,
4008                 .read_u64 = mem_cgroup_read_u64,
4009         },
4010         {
4011                 .name = "kmem.max_usage_in_bytes",
4012                 .private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
4013                 .write = mem_cgroup_reset,
4014                 .read_u64 = mem_cgroup_read_u64,
4015         },
4016 #ifdef CONFIG_SLABINFO
4017         {
4018                 .name = "kmem.slabinfo",
4019                 .seq_start = slab_start,
4020                 .seq_next = slab_next,
4021                 .seq_stop = slab_stop,
4022                 .seq_show = memcg_slab_show,
4023         },
4024 #endif
4025         {
4026                 .name = "kmem.tcp.limit_in_bytes",
4027                 .private = MEMFILE_PRIVATE(_TCP, RES_LIMIT),
4028                 .write = mem_cgroup_write,
4029                 .read_u64 = mem_cgroup_read_u64,
4030         },
4031         {
4032                 .name = "kmem.tcp.usage_in_bytes",
4033                 .private = MEMFILE_PRIVATE(_TCP, RES_USAGE),
4034                 .read_u64 = mem_cgroup_read_u64,
4035         },
4036         {
4037                 .name = "kmem.tcp.failcnt",
4038                 .private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT),
4039                 .write = mem_cgroup_reset,
4040                 .read_u64 = mem_cgroup_read_u64,
4041         },
4042         {
4043                 .name = "kmem.tcp.max_usage_in_bytes",
4044                 .private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE),
4045                 .write = mem_cgroup_reset,
4046                 .read_u64 = mem_cgroup_read_u64,
4047         },
4048         { },    /* terminate */
4049 };
4050
4051 static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
4052 {
4053         struct mem_cgroup_per_node *pn;
4054         struct mem_cgroup_per_zone *mz;
4055         int zone, tmp = node;
4056         /*
4057          * This routine is called against possible nodes.
4058          * But it's BUG to call kmalloc() against offline node.
4059          *
4060          * TODO: this routine can waste much memory for nodes which will
4061          *       never be onlined. It's better to use memory hotplug callback
4062          *       function.
4063          */
4064         if (!node_state(node, N_NORMAL_MEMORY))
4065                 tmp = -1;
4066         pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
4067         if (!pn)
4068                 return 1;
4069
4070         for (zone = 0; zone < MAX_NR_ZONES; zone++) {
4071                 mz = &pn->zoneinfo[zone];
4072                 lruvec_init(&mz->lruvec);
4073                 mz->usage_in_excess = 0;
4074                 mz->on_tree = false;
4075                 mz->memcg = memcg;
4076         }
4077         memcg->nodeinfo[node] = pn;
4078         return 0;
4079 }
4080
4081 static void free_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
4082 {
4083         kfree(memcg->nodeinfo[node]);
4084 }
4085
4086 static void mem_cgroup_free(struct mem_cgroup *memcg)
4087 {
4088         int node;
4089
4090         memcg_wb_domain_exit(memcg);
4091         for_each_node(node)
4092                 free_mem_cgroup_per_zone_info(memcg, node);
4093         free_percpu(memcg->stat);
4094         kfree(memcg);
4095 }
4096
4097 static struct mem_cgroup *mem_cgroup_alloc(void)
4098 {
4099         struct mem_cgroup *memcg;
4100         size_t size;
4101         int node;
4102
4103         size = sizeof(struct mem_cgroup);
4104         size += nr_node_ids * sizeof(struct mem_cgroup_per_node *);
4105
4106         memcg = kzalloc(size, GFP_KERNEL);
4107         if (!memcg)
4108                 return NULL;
4109
4110         memcg->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
4111         if (!memcg->stat)
4112                 goto fail;
4113
4114         for_each_node(node)
4115                 if (alloc_mem_cgroup_per_zone_info(memcg, node))
4116                         goto fail;
4117
4118         if (memcg_wb_domain_init(memcg, GFP_KERNEL))
4119                 goto fail;
4120
4121         INIT_WORK(&memcg->high_work, high_work_func);
4122         memcg->last_scanned_node = MAX_NUMNODES;
4123         INIT_LIST_HEAD(&memcg->oom_notify);
4124         mutex_init(&memcg->thresholds_lock);
4125         spin_lock_init(&memcg->move_lock);
4126         vmpressure_init(&memcg->vmpressure);
4127         INIT_LIST_HEAD(&memcg->event_list);
4128         spin_lock_init(&memcg->event_list_lock);
4129         memcg->socket_pressure = jiffies;
4130 #ifndef CONFIG_SLOB
4131         memcg->kmemcg_id = -1;
4132 #endif
4133 #ifdef CONFIG_CGROUP_WRITEBACK
4134         INIT_LIST_HEAD(&memcg->cgwb_list);
4135 #endif
4136         return memcg;
4137 fail:
4138         mem_cgroup_free(memcg);
4139         return NULL;
4140 }
4141
4142 static struct cgroup_subsys_state * __ref
4143 mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
4144 {
4145         struct mem_cgroup *parent = mem_cgroup_from_css(parent_css);
4146         struct mem_cgroup *memcg;
4147         long error = -ENOMEM;
4148
4149         memcg = mem_cgroup_alloc();
4150         if (!memcg)
4151                 return ERR_PTR(error);
4152
4153         memcg->high = PAGE_COUNTER_MAX;
4154         memcg->soft_limit = PAGE_COUNTER_MAX;
4155         if (parent) {
4156                 memcg->swappiness = mem_cgroup_swappiness(parent);
4157                 memcg->oom_kill_disable = parent->oom_kill_disable;
4158         }
4159         if (parent && parent->use_hierarchy) {
4160                 memcg->use_hierarchy = true;
4161                 page_counter_init(&memcg->memory, &parent->memory);
4162                 page_counter_init(&memcg->swap, &parent->swap);
4163                 page_counter_init(&memcg->memsw, &parent->memsw);
4164                 page_counter_init(&memcg->kmem, &parent->kmem);
4165                 page_counter_init(&memcg->tcpmem, &parent->tcpmem);
4166         } else {
4167                 page_counter_init(&memcg->memory, NULL);
4168                 page_counter_init(&memcg->swap, NULL);
4169                 page_counter_init(&memcg->memsw, NULL);
4170                 page_counter_init(&memcg->kmem, NULL);
4171                 page_counter_init(&memcg->tcpmem, NULL);
4172                 /*
4173                  * Deeper hierachy with use_hierarchy == false doesn't make
4174                  * much sense so let cgroup subsystem know about this
4175                  * unfortunate state in our controller.
4176                  */
4177                 if (parent != root_mem_cgroup)
4178                         memory_cgrp_subsys.broken_hierarchy = true;
4179         }
4180
4181         /* The following stuff does not apply to the root */
4182         if (!parent) {
4183                 root_mem_cgroup = memcg;
4184                 return &memcg->css;
4185         }
4186
4187         error = memcg_online_kmem(memcg);
4188         if (error)
4189                 goto fail;
4190
4191         if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
4192                 static_branch_inc(&memcg_sockets_enabled_key);
4193
4194         return &memcg->css;
4195 fail:
4196         mem_cgroup_free(memcg);
4197         return NULL;
4198 }
4199
4200 static int
4201 mem_cgroup_css_online(struct cgroup_subsys_state *css)
4202 {
4203         if (css->id > MEM_CGROUP_ID_MAX)
4204                 return -ENOSPC;
4205
4206         return 0;
4207 }
4208
4209 static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
4210 {
4211         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4212         struct mem_cgroup_event *event, *tmp;
4213
4214         /*
4215          * Unregister events and notify userspace.
4216          * Notify userspace about cgroup removing only after rmdir of cgroup
4217          * directory to avoid race between userspace and kernelspace.
4218          */
4219         spin_lock(&memcg->event_list_lock);
4220         list_for_each_entry_safe(event, tmp, &memcg->event_list, list) {
4221                 list_del_init(&event->list);
4222                 schedule_work(&event->remove);
4223         }
4224         spin_unlock(&memcg->event_list_lock);
4225
4226         memcg_offline_kmem(memcg);
4227         wb_memcg_offline(memcg);
4228 }
4229
4230 static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
4231 {
4232         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4233
4234         invalidate_reclaim_iterators(memcg);
4235 }
4236
4237 static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
4238 {
4239         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4240
4241         if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
4242                 static_branch_dec(&memcg_sockets_enabled_key);
4243
4244         if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active)
4245                 static_branch_dec(&memcg_sockets_enabled_key);
4246
4247         vmpressure_cleanup(&memcg->vmpressure);
4248         cancel_work_sync(&memcg->high_work);
4249         mem_cgroup_remove_from_trees(memcg);
4250         memcg_free_kmem(memcg);
4251         mem_cgroup_free(memcg);
4252 }
4253
4254 /**
4255  * mem_cgroup_css_reset - reset the states of a mem_cgroup
4256  * @css: the target css
4257  *
4258  * Reset the states of the mem_cgroup associated with @css.  This is
4259  * invoked when the userland requests disabling on the default hierarchy
4260  * but the memcg is pinned through dependency.  The memcg should stop
4261  * applying policies and should revert to the vanilla state as it may be
4262  * made visible again.
4263  *
4264  * The current implementation only resets the essential configurations.
4265  * This needs to be expanded to cover all the visible parts.
4266  */
4267 static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
4268 {
4269         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4270
4271         page_counter_limit(&memcg->memory, PAGE_COUNTER_MAX);
4272         page_counter_limit(&memcg->swap, PAGE_COUNTER_MAX);
4273         page_counter_limit(&memcg->memsw, PAGE_COUNTER_MAX);
4274         page_counter_limit(&memcg->kmem, PAGE_COUNTER_MAX);
4275         page_counter_limit(&memcg->tcpmem, PAGE_COUNTER_MAX);
4276         memcg->low = 0;
4277         memcg->high = PAGE_COUNTER_MAX;
4278         memcg->soft_limit = PAGE_COUNTER_MAX;
4279         memcg_wb_domain_size_changed(memcg);
4280 }
4281
4282 #ifdef CONFIG_MMU
4283 /* Handlers for move charge at task migration. */
4284 static int mem_cgroup_do_precharge(unsigned long count)
4285 {
4286         int ret;
4287
4288         /* Try a single bulk charge without reclaim first, kswapd may wake */
4289         ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count);
4290         if (!ret) {
4291                 mc.precharge += count;
4292                 return ret;
4293         }
4294
4295         /* Try charges one by one with reclaim */
4296         while (count--) {
4297                 ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_NORETRY, 1);
4298                 if (ret)
4299                         return ret;
4300                 mc.precharge++;
4301                 cond_resched();
4302         }
4303         return 0;
4304 }
4305
4306 /**
4307  * get_mctgt_type - get target type of moving charge
4308  * @vma: the vma the pte to be checked belongs
4309  * @addr: the address corresponding to the pte to be checked
4310  * @ptent: the pte to be checked
4311  * @target: the pointer the target page or swap ent will be stored(can be NULL)
4312  *
4313  * Returns
4314  *   0(MC_TARGET_NONE): if the pte is not a target for move charge.
4315  *   1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
4316  *     move charge. if @target is not NULL, the page is stored in target->page
4317  *     with extra refcnt got(Callers should handle it).
4318  *   2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
4319  *     target for charge migration. if @target is not NULL, the entry is stored
4320  *     in target->ent.
4321  *
4322  * Called with pte lock held.
4323  */
4324 union mc_target {
4325         struct page     *page;
4326         swp_entry_t     ent;
4327 };
4328
4329 enum mc_target_type {
4330         MC_TARGET_NONE = 0,
4331         MC_TARGET_PAGE,
4332         MC_TARGET_SWAP,
4333 };
4334
4335 static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
4336                                                 unsigned long addr, pte_t ptent)
4337 {
4338         struct page *page = vm_normal_page(vma, addr, ptent);
4339
4340         if (!page || !page_mapped(page))
4341                 return NULL;
4342         if (PageAnon(page)) {
4343                 if (!(mc.flags & MOVE_ANON))
4344                         return NULL;
4345         } else {
4346                 if (!(mc.flags & MOVE_FILE))
4347                         return NULL;
4348         }
4349         if (!get_page_unless_zero(page))
4350                 return NULL;
4351
4352         return page;
4353 }
4354
4355 #ifdef CONFIG_SWAP
4356 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
4357                         unsigned long addr, pte_t ptent, swp_entry_t *entry)
4358 {
4359         struct page *page = NULL;
4360         swp_entry_t ent = pte_to_swp_entry(ptent);
4361
4362         if (!(mc.flags & MOVE_ANON) || non_swap_entry(ent))
4363                 return NULL;
4364         /*
4365          * Because lookup_swap_cache() updates some statistics counter,
4366          * we call find_get_page() with swapper_space directly.
4367          */
4368         page = find_get_page(swap_address_space(ent), ent.val);
4369         if (do_memsw_account())
4370                 entry->val = ent.val;
4371
4372         return page;
4373 }
4374 #else
4375 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
4376                         unsigned long addr, pte_t ptent, swp_entry_t *entry)
4377 {
4378         return NULL;
4379 }
4380 #endif
4381
4382 static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
4383                         unsigned long addr, pte_t ptent, swp_entry_t *entry)
4384 {
4385         struct page *page = NULL;
4386         struct address_space *mapping;
4387         pgoff_t pgoff;
4388
4389         if (!vma->vm_file) /* anonymous vma */
4390                 return NULL;
4391         if (!(mc.flags & MOVE_FILE))
4392                 return NULL;
4393
4394         mapping = vma->vm_file->f_mapping;
4395         pgoff = linear_page_index(vma, addr);
4396
4397         /* page is moved even if it's not RSS of this task(page-faulted). */
4398 #ifdef CONFIG_SWAP
4399         /* shmem/tmpfs may report page out on swap: account for that too. */
4400         if (shmem_mapping(mapping)) {
4401                 page = find_get_entry(mapping, pgoff);
4402                 if (radix_tree_exceptional_entry(page)) {
4403                         swp_entry_t swp = radix_to_swp_entry(page);
4404                         if (do_memsw_account())
4405                                 *entry = swp;
4406                         page = find_get_page(swap_address_space(swp), swp.val);
4407                 }
4408         } else
4409                 page = find_get_page(mapping, pgoff);
4410 #else
4411         page = find_get_page(mapping, pgoff);
4412 #endif
4413         return page;
4414 }
4415
4416 /**
4417  * mem_cgroup_move_account - move account of the page
4418  * @page: the page
4419  * @nr_pages: number of regular pages (>1 for huge pages)
4420  * @from: mem_cgroup which the page is moved from.
4421  * @to: mem_cgroup which the page is moved to. @from != @to.
4422  *
4423  * The caller must make sure the page is not on LRU (isolate_page() is useful.)
4424  *
4425  * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
4426  * from old cgroup.
4427  */
4428 static int mem_cgroup_move_account(struct page *page,
4429                                    bool compound,
4430                                    struct mem_cgroup *from,
4431                                    struct mem_cgroup *to)
4432 {
4433         unsigned long flags;
4434         unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
4435         int ret;
4436         bool anon;
4437
4438         VM_BUG_ON(from == to);
4439         VM_BUG_ON_PAGE(PageLRU(page), page);
4440         VM_BUG_ON(compound && !PageTransHuge(page));
4441
4442         /*
4443          * Prevent mem_cgroup_migrate() from looking at
4444          * page->mem_cgroup of its source page while we change it.
4445          */
4446         ret = -EBUSY;
4447         if (!trylock_page(page))
4448                 goto out;
4449
4450         ret = -EINVAL;
4451         if (page->mem_cgroup != from)
4452                 goto out_unlock;
4453
4454         anon = PageAnon(page);
4455
4456         spin_lock_irqsave(&from->move_lock, flags);
4457
4458         if (!anon && page_mapped(page)) {
4459                 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED],
4460                                nr_pages);
4461                 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED],
4462                                nr_pages);
4463         }
4464
4465         /*
4466          * move_lock grabbed above and caller set from->moving_account, so
4467          * mem_cgroup_update_page_stat() will serialize updates to PageDirty.
4468          * So mapping should be stable for dirty pages.
4469          */
4470         if (!anon && PageDirty(page)) {
4471                 struct address_space *mapping = page_mapping(page);
4472
4473                 if (mapping_cap_account_dirty(mapping)) {
4474                         __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_DIRTY],
4475                                        nr_pages);
4476                         __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_DIRTY],
4477                                        nr_pages);
4478                 }
4479         }
4480
4481         if (PageWriteback(page)) {
4482                 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_WRITEBACK],
4483                                nr_pages);
4484                 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_WRITEBACK],
4485                                nr_pages);
4486         }
4487
4488         /*
4489          * It is safe to change page->mem_cgroup here because the page
4490          * is referenced, charged, and isolated - we can't race with
4491          * uncharging, charging, migration, or LRU putback.
4492          */
4493
4494         /* caller should have done css_get */
4495         page->mem_cgroup = to;
4496         spin_unlock_irqrestore(&from->move_lock, flags);
4497
4498         ret = 0;
4499
4500         local_irq_disable();
4501         mem_cgroup_charge_statistics(to, page, compound, nr_pages);
4502         memcg_check_events(to, page);
4503         mem_cgroup_charge_statistics(from, page, compound, -nr_pages);
4504         memcg_check_events(from, page);
4505         local_irq_enable();
4506 out_unlock:
4507         unlock_page(page);
4508 out:
4509         return ret;
4510 }
4511
4512 static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
4513                 unsigned long addr, pte_t ptent, union mc_target *target)
4514 {
4515         struct page *page = NULL;
4516         enum mc_target_type ret = MC_TARGET_NONE;
4517         swp_entry_t ent = { .val = 0 };
4518
4519         if (pte_present(ptent))
4520                 page = mc_handle_present_pte(vma, addr, ptent);
4521         else if (is_swap_pte(ptent))
4522                 page = mc_handle_swap_pte(vma, addr, ptent, &ent);
4523         else if (pte_none(ptent))
4524                 page = mc_handle_file_pte(vma, addr, ptent, &ent);
4525
4526         if (!page && !ent.val)
4527                 return ret;
4528         if (page) {
4529                 /*
4530                  * Do only loose check w/o serialization.
4531                  * mem_cgroup_move_account() checks the page is valid or
4532                  * not under LRU exclusion.
4533                  */
4534                 if (page->mem_cgroup == mc.from) {
4535                         ret = MC_TARGET_PAGE;
4536                         if (target)
4537                                 target->page = page;
4538                 }
4539                 if (!ret || !target)
4540                         put_page(page);
4541         }
4542         /* There is a swap entry and a page doesn't exist or isn't charged */
4543         if (ent.val && !ret &&
4544             mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) {
4545                 ret = MC_TARGET_SWAP;
4546                 if (target)
4547                         target->ent = ent;
4548         }
4549         return ret;
4550 }
4551
4552 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4553 /*
4554  * We don't consider swapping or file mapped pages because THP does not
4555  * support them for now.
4556  * Caller should make sure that pmd_trans_huge(pmd) is true.
4557  */
4558 static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
4559                 unsigned long addr, pmd_t pmd, union mc_target *target)
4560 {
4561         struct page *page = NULL;
4562         enum mc_target_type ret = MC_TARGET_NONE;
4563
4564         page = pmd_page(pmd);
4565         VM_BUG_ON_PAGE(!page || !PageHead(page), page);
4566         if (!(mc.flags & MOVE_ANON))
4567                 return ret;
4568         if (page->mem_cgroup == mc.from) {
4569                 ret = MC_TARGET_PAGE;
4570                 if (target) {
4571                         get_page(page);
4572                         target->page = page;
4573                 }
4574         }
4575         return ret;
4576 }
4577 #else
4578 static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
4579                 unsigned long addr, pmd_t pmd, union mc_target *target)
4580 {
4581         return MC_TARGET_NONE;
4582 }
4583 #endif
4584
4585 static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
4586                                         unsigned long addr, unsigned long end,
4587                                         struct mm_walk *walk)
4588 {
4589         struct vm_area_struct *vma = walk->vma;
4590         pte_t *pte;
4591         spinlock_t *ptl;
4592
4593         ptl = pmd_trans_huge_lock(pmd, vma);
4594         if (ptl) {
4595                 if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
4596                         mc.precharge += HPAGE_PMD_NR;
4597                 spin_unlock(ptl);
4598                 return 0;
4599         }
4600
4601         if (pmd_trans_unstable(pmd))
4602                 return 0;
4603         pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
4604         for (; addr != end; pte++, addr += PAGE_SIZE)
4605                 if (get_mctgt_type(vma, addr, *pte, NULL))
4606                         mc.precharge++; /* increment precharge temporarily */
4607         pte_unmap_unlock(pte - 1, ptl);
4608         cond_resched();
4609
4610         return 0;
4611 }
4612
4613 static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
4614 {
4615         unsigned long precharge;
4616
4617         struct mm_walk mem_cgroup_count_precharge_walk = {
4618                 .pmd_entry = mem_cgroup_count_precharge_pte_range,
4619                 .mm = mm,
4620         };
4621         down_read(&mm->mmap_sem);
4622         walk_page_range(0, ~0UL, &mem_cgroup_count_precharge_walk);
4623         up_read(&mm->mmap_sem);
4624
4625         precharge = mc.precharge;
4626         mc.precharge = 0;
4627
4628         return precharge;
4629 }
4630
4631 static int mem_cgroup_precharge_mc(struct mm_struct *mm)
4632 {
4633         unsigned long precharge = mem_cgroup_count_precharge(mm);
4634
4635         VM_BUG_ON(mc.moving_task);
4636         mc.moving_task = current;
4637         return mem_cgroup_do_precharge(precharge);
4638 }
4639
4640 /* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
4641 static void __mem_cgroup_clear_mc(void)
4642 {
4643         struct mem_cgroup *from = mc.from;
4644         struct mem_cgroup *to = mc.to;
4645
4646         /* we must uncharge all the leftover precharges from mc.to */
4647         if (mc.precharge) {
4648                 cancel_charge(mc.to, mc.precharge);
4649                 mc.precharge = 0;
4650         }
4651         /*
4652          * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
4653          * we must uncharge here.
4654          */
4655         if (mc.moved_charge) {
4656                 cancel_charge(mc.from, mc.moved_charge);
4657                 mc.moved_charge = 0;
4658         }
4659         /* we must fixup refcnts and charges */
4660         if (mc.moved_swap) {
4661                 /* uncharge swap account from the old cgroup */
4662                 if (!mem_cgroup_is_root(mc.from))
4663                         page_counter_uncharge(&mc.from->memsw, mc.moved_swap);
4664
4665                 /*
4666                  * we charged both to->memory and to->memsw, so we
4667                  * should uncharge to->memory.
4668                  */
4669                 if (!mem_cgroup_is_root(mc.to))
4670                         page_counter_uncharge(&mc.to->memory, mc.moved_swap);
4671
4672                 css_put_many(&mc.from->css, mc.moved_swap);
4673
4674                 /* we've already done css_get(mc.to) */
4675                 mc.moved_swap = 0;
4676         }
4677         memcg_oom_recover(from);
4678         memcg_oom_recover(to);
4679         wake_up_all(&mc.waitq);
4680 }
4681
4682 static void mem_cgroup_clear_mc(void)
4683 {
4684         struct mm_struct *mm = mc.mm;
4685
4686         /*
4687          * we must clear moving_task before waking up waiters at the end of
4688          * task migration.
4689          */
4690         mc.moving_task = NULL;
4691         __mem_cgroup_clear_mc();
4692         spin_lock(&mc.lock);
4693         mc.from = NULL;
4694         mc.to = NULL;
4695         mc.mm = NULL;
4696         spin_unlock(&mc.lock);
4697
4698         mmput(mm);
4699 }
4700
4701 static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
4702 {
4703         struct cgroup_subsys_state *css;
4704         struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */
4705         struct mem_cgroup *from;
4706         struct task_struct *leader, *p;
4707         struct mm_struct *mm;
4708         unsigned long move_flags;
4709         int ret = 0;
4710
4711         /* charge immigration isn't supported on the default hierarchy */
4712         if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
4713                 return 0;
4714
4715         /*
4716          * Multi-process migrations only happen on the default hierarchy
4717          * where charge immigration is not used.  Perform charge
4718          * immigration if @tset contains a leader and whine if there are
4719          * multiple.
4720          */
4721         p = NULL;
4722         cgroup_taskset_for_each_leader(leader, css, tset) {
4723                 WARN_ON_ONCE(p);
4724                 p = leader;
4725                 memcg = mem_cgroup_from_css(css);
4726         }
4727         if (!p)
4728                 return 0;
4729
4730         /*
4731          * We are now commited to this value whatever it is. Changes in this
4732          * tunable will only affect upcoming migrations, not the current one.
4733          * So we need to save it, and keep it going.
4734          */
4735         move_flags = READ_ONCE(memcg->move_charge_at_immigrate);
4736         if (!move_flags)
4737                 return 0;
4738
4739         from = mem_cgroup_from_task(p);
4740
4741         VM_BUG_ON(from == memcg);
4742
4743         mm = get_task_mm(p);
4744         if (!mm)
4745                 return 0;
4746         /* We move charges only when we move a owner of the mm */
4747         if (mm->owner == p) {
4748                 VM_BUG_ON(mc.from);
4749                 VM_BUG_ON(mc.to);
4750                 VM_BUG_ON(mc.precharge);
4751                 VM_BUG_ON(mc.moved_charge);
4752                 VM_BUG_ON(mc.moved_swap);
4753
4754                 spin_lock(&mc.lock);
4755                 mc.mm = mm;
4756                 mc.from = from;
4757                 mc.to = memcg;
4758                 mc.flags = move_flags;
4759                 spin_unlock(&mc.lock);
4760                 /* We set mc.moving_task later */
4761
4762                 ret = mem_cgroup_precharge_mc(mm);
4763                 if (ret)
4764                         mem_cgroup_clear_mc();
4765         } else {
4766                 mmput(mm);
4767         }
4768         return ret;
4769 }
4770
4771 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
4772 {
4773         if (mc.to)
4774                 mem_cgroup_clear_mc();
4775 }
4776
4777 static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
4778                                 unsigned long addr, unsigned long end,
4779                                 struct mm_walk *walk)
4780 {
4781         int ret = 0;
4782         struct vm_area_struct *vma = walk->vma;
4783         pte_t *pte;
4784         spinlock_t *ptl;
4785         enum mc_target_type target_type;
4786         union mc_target target;
4787         struct page *page;
4788
4789         ptl = pmd_trans_huge_lock(pmd, vma);
4790         if (ptl) {
4791                 if (mc.precharge < HPAGE_PMD_NR) {
4792                         spin_unlock(ptl);
4793                         return 0;
4794                 }
4795                 target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
4796                 if (target_type == MC_TARGET_PAGE) {
4797                         page = target.page;
4798                         if (!isolate_lru_page(page)) {
4799                                 if (!mem_cgroup_move_account(page, true,
4800                                                              mc.from, mc.to)) {
4801                                         mc.precharge -= HPAGE_PMD_NR;
4802                                         mc.moved_charge += HPAGE_PMD_NR;
4803                                 }
4804                                 putback_lru_page(page);
4805                         }
4806                         put_page(page);
4807                 }
4808                 spin_unlock(ptl);
4809                 return 0;
4810         }
4811
4812         if (pmd_trans_unstable(pmd))
4813                 return 0;
4814 retry:
4815         pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
4816         for (; addr != end; addr += PAGE_SIZE) {
4817                 pte_t ptent = *(pte++);
4818                 swp_entry_t ent;
4819
4820                 if (!mc.precharge)
4821                         break;
4822
4823                 switch (get_mctgt_type(vma, addr, ptent, &target)) {
4824                 case MC_TARGET_PAGE:
4825                         page = target.page;
4826                         /*
4827                          * We can have a part of the split pmd here. Moving it
4828                          * can be done but it would be too convoluted so simply
4829                          * ignore such a partial THP and keep it in original
4830                          * memcg. There should be somebody mapping the head.
4831                          */
4832                         if (PageTransCompound(page))
4833                                 goto put;
4834                         if (isolate_lru_page(page))
4835                                 goto put;
4836                         if (!mem_cgroup_move_account(page, false,
4837                                                 mc.from, mc.to)) {
4838                                 mc.precharge--;
4839                                 /* we uncharge from mc.from later. */
4840                                 mc.moved_charge++;
4841                         }
4842                         putback_lru_page(page);
4843 put:                    /* get_mctgt_type() gets the page */
4844                         put_page(page);
4845                         break;
4846                 case MC_TARGET_SWAP:
4847                         ent = target.ent;
4848                         if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
4849                                 mc.precharge--;
4850                                 /* we fixup refcnts and charges later. */
4851                                 mc.moved_swap++;
4852                         }
4853                         break;
4854                 default:
4855                         break;
4856                 }
4857         }
4858         pte_unmap_unlock(pte - 1, ptl);
4859         cond_resched();
4860
4861         if (addr != end) {
4862                 /*
4863                  * We have consumed all precharges we got in can_attach().
4864                  * We try charge one by one, but don't do any additional
4865                  * charges to mc.to if we have failed in charge once in attach()
4866                  * phase.
4867                  */
4868                 ret = mem_cgroup_do_precharge(1);
4869                 if (!ret)
4870                         goto retry;
4871         }
4872
4873         return ret;
4874 }
4875
4876 static void mem_cgroup_move_charge(void)
4877 {
4878         struct mm_walk mem_cgroup_move_charge_walk = {
4879                 .pmd_entry = mem_cgroup_move_charge_pte_range,
4880                 .mm = mc.mm,
4881         };
4882
4883         lru_add_drain_all();
4884         /*
4885          * Signal lock_page_memcg() to take the memcg's move_lock
4886          * while we're moving its pages to another memcg. Then wait
4887          * for already started RCU-only updates to finish.
4888          */
4889         atomic_inc(&mc.from->moving_account);
4890         synchronize_rcu();
4891 retry:
4892         if (unlikely(!down_read_trylock(&mc.mm->mmap_sem))) {
4893                 /*
4894                  * Someone who are holding the mmap_sem might be waiting in
4895                  * waitq. So we cancel all extra charges, wake up all waiters,
4896                  * and retry. Because we cancel precharges, we might not be able
4897                  * to move enough charges, but moving charge is a best-effort
4898                  * feature anyway, so it wouldn't be a big problem.
4899                  */
4900                 __mem_cgroup_clear_mc();
4901                 cond_resched();
4902                 goto retry;
4903         }
4904         /*
4905          * When we have consumed all precharges and failed in doing
4906          * additional charge, the page walk just aborts.
4907          */
4908         walk_page_range(0, ~0UL, &mem_cgroup_move_charge_walk);
4909         up_read(&mc.mm->mmap_sem);
4910         atomic_dec(&mc.from->moving_account);
4911 }
4912
4913 static void mem_cgroup_move_task(void)
4914 {
4915         if (mc.to) {
4916                 mem_cgroup_move_charge();
4917                 mem_cgroup_clear_mc();
4918         }
4919 }
4920 #else   /* !CONFIG_MMU */
4921 static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
4922 {
4923         return 0;
4924 }
4925 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
4926 {
4927 }
4928 static void mem_cgroup_move_task(void)
4929 {
4930 }
4931 #endif
4932
4933 /*
4934  * Cgroup retains root cgroups across [un]mount cycles making it necessary
4935  * to verify whether we're attached to the default hierarchy on each mount
4936  * attempt.
4937  */
4938 static void mem_cgroup_bind(struct cgroup_subsys_state *root_css)
4939 {
4940         /*
4941          * use_hierarchy is forced on the default hierarchy.  cgroup core
4942          * guarantees that @root doesn't have any children, so turning it
4943          * on for the root memcg is enough.
4944          */
4945         if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
4946                 root_mem_cgroup->use_hierarchy = true;
4947         else
4948                 root_mem_cgroup->use_hierarchy = false;
4949 }
4950
4951 static u64 memory_current_read(struct cgroup_subsys_state *css,
4952                                struct cftype *cft)
4953 {
4954         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4955
4956         return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE;
4957 }
4958
4959 static int memory_low_show(struct seq_file *m, void *v)
4960 {
4961         struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
4962         unsigned long low = READ_ONCE(memcg->low);
4963
4964         if (low == PAGE_COUNTER_MAX)
4965                 seq_puts(m, "max\n");
4966         else
4967                 seq_printf(m, "%llu\n", (u64)low * PAGE_SIZE);
4968
4969         return 0;
4970 }
4971
4972 static ssize_t memory_low_write(struct kernfs_open_file *of,
4973                                 char *buf, size_t nbytes, loff_t off)
4974 {
4975         struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4976         unsigned long low;
4977         int err;
4978
4979         buf = strstrip(buf);
4980         err = page_counter_memparse(buf, "max", &low);
4981         if (err)
4982                 return err;
4983
4984         memcg->low = low;
4985
4986         return nbytes;
4987 }
4988
4989 static int memory_high_show(struct seq_file *m, void *v)
4990 {
4991         struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
4992         unsigned long high = READ_ONCE(memcg->high);
4993
4994         if (high == PAGE_COUNTER_MAX)
4995                 seq_puts(m, "max\n");
4996         else
4997                 seq_printf(m, "%llu\n", (u64)high * PAGE_SIZE);
4998
4999         return 0;
5000 }
5001
5002 static ssize_t memory_high_write(struct kernfs_open_file *of,
5003                                  char *buf, size_t nbytes, loff_t off)
5004 {
5005         struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5006         unsigned long nr_pages;
5007         unsigned long high;
5008         int err;
5009
5010         buf = strstrip(buf);
5011         err = page_counter_memparse(buf, "max", &high);
5012         if (err)
5013                 return err;
5014
5015         memcg->high = high;
5016
5017         nr_pages = page_counter_read(&memcg->memory);
5018         if (nr_pages > high)
5019                 try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
5020                                              GFP_KERNEL, true);
5021
5022         memcg_wb_domain_size_changed(memcg);
5023         return nbytes;
5024 }
5025
5026 static int memory_max_show(struct seq_file *m, void *v)
5027 {
5028         struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5029         unsigned long max = READ_ONCE(memcg->memory.limit);
5030
5031         if (max == PAGE_COUNTER_MAX)
5032                 seq_puts(m, "max\n");
5033         else
5034                 seq_printf(m, "%llu\n", (u64)max * PAGE_SIZE);
5035
5036         return 0;
5037 }
5038
5039 static ssize_t memory_max_write(struct kernfs_open_file *of,
5040                                 char *buf, size_t nbytes, loff_t off)
5041 {
5042         struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5043         unsigned int nr_reclaims = MEM_CGROUP_RECLAIM_RETRIES;
5044         bool drained = false;
5045         unsigned long max;
5046         int err;
5047
5048         buf = strstrip(buf);
5049         err = page_counter_memparse(buf, "max", &max);
5050         if (err)
5051                 return err;
5052
5053         xchg(&memcg->memory.limit, max);
5054
5055         for (;;) {
5056                 unsigned long nr_pages = page_counter_read(&memcg->memory);
5057
5058                 if (nr_pages <= max)
5059                         break;
5060
5061                 if (signal_pending(current)) {
5062                         err = -EINTR;
5063                         break;
5064                 }
5065
5066                 if (!drained) {
5067                         drain_all_stock(memcg);
5068                         drained = true;
5069                         continue;
5070                 }
5071
5072                 if (nr_reclaims) {
5073                         if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
5074                                                           GFP_KERNEL, true))
5075                                 nr_reclaims--;
5076                         continue;
5077                 }
5078
5079                 mem_cgroup_events(memcg, MEMCG_OOM, 1);
5080                 if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
5081                         break;
5082         }
5083
5084         memcg_wb_domain_size_changed(memcg);
5085         return nbytes;
5086 }
5087
5088 static int memory_events_show(struct seq_file *m, void *v)
5089 {
5090         struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5091
5092         seq_printf(m, "low %lu\n", mem_cgroup_read_events(memcg, MEMCG_LOW));
5093         seq_printf(m, "high %lu\n", mem_cgroup_read_events(memcg, MEMCG_HIGH));
5094         seq_printf(m, "max %lu\n", mem_cgroup_read_events(memcg, MEMCG_MAX));
5095         seq_printf(m, "oom %lu\n", mem_cgroup_read_events(memcg, MEMCG_OOM));
5096
5097         return 0;
5098 }
5099
5100 static int memory_stat_show(struct seq_file *m, void *v)
5101 {
5102         struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5103         unsigned long stat[MEMCG_NR_STAT];
5104         unsigned long events[MEMCG_NR_EVENTS];
5105         int i;
5106
5107         /*
5108          * Provide statistics on the state of the memory subsystem as
5109          * well as cumulative event counters that show past behavior.
5110          *
5111          * This list is ordered following a combination of these gradients:
5112          * 1) generic big picture -> specifics and details
5113          * 2) reflecting userspace activity -> reflecting kernel heuristics
5114          *
5115          * Current memory state:
5116          */
5117
5118         tree_stat(memcg, stat);
5119         tree_events(memcg, events);
5120
5121         seq_printf(m, "anon %llu\n",
5122                    (u64)stat[MEM_CGROUP_STAT_RSS] * PAGE_SIZE);
5123         seq_printf(m, "file %llu\n",
5124                    (u64)stat[MEM_CGROUP_STAT_CACHE] * PAGE_SIZE);
5125         seq_printf(m, "kernel_stack %llu\n",
5126                    (u64)stat[MEMCG_KERNEL_STACK] * PAGE_SIZE);
5127         seq_printf(m, "slab %llu\n",
5128                    (u64)(stat[MEMCG_SLAB_RECLAIMABLE] +
5129                          stat[MEMCG_SLAB_UNRECLAIMABLE]) * PAGE_SIZE);
5130         seq_printf(m, "sock %llu\n",
5131                    (u64)stat[MEMCG_SOCK] * PAGE_SIZE);
5132
5133         seq_printf(m, "file_mapped %llu\n",
5134                    (u64)stat[MEM_CGROUP_STAT_FILE_MAPPED] * PAGE_SIZE);
5135         seq_printf(m, "file_dirty %llu\n",
5136                    (u64)stat[MEM_CGROUP_STAT_DIRTY] * PAGE_SIZE);
5137         seq_printf(m, "file_writeback %llu\n",
5138                    (u64)stat[MEM_CGROUP_STAT_WRITEBACK] * PAGE_SIZE);
5139
5140         for (i = 0; i < NR_LRU_LISTS; i++) {
5141                 struct mem_cgroup *mi;
5142                 unsigned long val = 0;
5143
5144                 for_each_mem_cgroup_tree(mi, memcg)
5145                         val += mem_cgroup_nr_lru_pages(mi, BIT(i));
5146                 seq_printf(m, "%s %llu\n",
5147                            mem_cgroup_lru_names[i], (u64)val * PAGE_SIZE);
5148         }
5149
5150         seq_printf(m, "slab_reclaimable %llu\n",
5151                    (u64)stat[MEMCG_SLAB_RECLAIMABLE] * PAGE_SIZE);
5152         seq_printf(m, "slab_unreclaimable %llu\n",
5153                    (u64)stat[MEMCG_SLAB_UNRECLAIMABLE] * PAGE_SIZE);
5154
5155         /* Accumulated memory events */
5156
5157         seq_printf(m, "pgfault %lu\n",
5158                    events[MEM_CGROUP_EVENTS_PGFAULT]);
5159         seq_printf(m, "pgmajfault %lu\n",
5160                    events[MEM_CGROUP_EVENTS_PGMAJFAULT]);
5161
5162         return 0;
5163 }
5164
5165 static struct cftype memory_files[] = {
5166         {
5167                 .name = "current",
5168                 .flags = CFTYPE_NOT_ON_ROOT,
5169                 .read_u64 = memory_current_read,
5170         },
5171         {
5172                 .name = "low",
5173                 .flags = CFTYPE_NOT_ON_ROOT,
5174                 .seq_show = memory_low_show,
5175                 .write = memory_low_write,
5176         },
5177         {
5178                 .name = "high",
5179                 .flags = CFTYPE_NOT_ON_ROOT,
5180                 .seq_show = memory_high_show,
5181                 .write = memory_high_write,
5182         },
5183         {
5184                 .name = "max",
5185                 .flags = CFTYPE_NOT_ON_ROOT,
5186                 .seq_show = memory_max_show,
5187                 .write = memory_max_write,
5188         },
5189         {
5190                 .name = "events",
5191                 .flags = CFTYPE_NOT_ON_ROOT,
5192                 .file_offset = offsetof(struct mem_cgroup, events_file),
5193                 .seq_show = memory_events_show,
5194         },
5195         {
5196                 .name = "stat",
5197                 .flags = CFTYPE_NOT_ON_ROOT,
5198                 .seq_show = memory_stat_show,
5199         },
5200         { }     /* terminate */
5201 };
5202
5203 struct cgroup_subsys memory_cgrp_subsys = {
5204         .css_alloc = mem_cgroup_css_alloc,
5205         .css_online = mem_cgroup_css_online,
5206         .css_offline = mem_cgroup_css_offline,
5207         .css_released = mem_cgroup_css_released,
5208         .css_free = mem_cgroup_css_free,
5209         .css_reset = mem_cgroup_css_reset,
5210         .can_attach = mem_cgroup_can_attach,
5211         .cancel_attach = mem_cgroup_cancel_attach,
5212         .post_attach = mem_cgroup_move_task,
5213         .bind = mem_cgroup_bind,
5214         .dfl_cftypes = memory_files,
5215         .legacy_cftypes = mem_cgroup_legacy_files,
5216         .early_init = 0,
5217 };
5218
5219 /**
5220  * mem_cgroup_low - check if memory consumption is below the normal range
5221  * @root: the highest ancestor to consider
5222  * @memcg: the memory cgroup to check
5223  *
5224  * Returns %true if memory consumption of @memcg, and that of all
5225  * configurable ancestors up to @root, is below the normal range.
5226  */
5227 bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg)
5228 {
5229         if (mem_cgroup_disabled())
5230                 return false;
5231
5232         /*
5233          * The toplevel group doesn't have a configurable range, so
5234          * it's never low when looked at directly, and it is not
5235          * considered an ancestor when assessing the hierarchy.
5236          */
5237
5238         if (memcg == root_mem_cgroup)
5239                 return false;
5240
5241         if (page_counter_read(&memcg->memory) >= memcg->low)
5242                 return false;
5243
5244         while (memcg != root) {
5245                 memcg = parent_mem_cgroup(memcg);
5246
5247                 if (memcg == root_mem_cgroup)
5248                         break;
5249
5250                 if (page_counter_read(&memcg->memory) >= memcg->low)
5251                         return false;
5252         }
5253         return true;
5254 }
5255
5256 /**
5257  * mem_cgroup_try_charge - try charging a page
5258  * @page: page to charge
5259  * @mm: mm context of the victim
5260  * @gfp_mask: reclaim mode
5261  * @memcgp: charged memcg return
5262  *
5263  * Try to charge @page to the memcg that @mm belongs to, reclaiming
5264  * pages according to @gfp_mask if necessary.
5265  *
5266  * Returns 0 on success, with *@memcgp pointing to the charged memcg.
5267  * Otherwise, an error code is returned.
5268  *
5269  * After page->mapping has been set up, the caller must finalize the
5270  * charge with mem_cgroup_commit_charge().  Or abort the transaction
5271  * with mem_cgroup_cancel_charge() in case page instantiation fails.
5272  */
5273 int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
5274                           gfp_t gfp_mask, struct mem_cgroup **memcgp,
5275                           bool compound)
5276 {
5277         struct mem_cgroup *memcg = NULL;
5278         unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
5279         int ret = 0;
5280
5281         if (mem_cgroup_disabled())
5282                 goto out;
5283
5284         if (PageSwapCache(page)) {
5285                 /*
5286                  * Every swap fault against a single page tries to charge the
5287                  * page, bail as early as possible.  shmem_unuse() encounters
5288                  * already charged pages, too.  The USED bit is protected by
5289                  * the page lock, which serializes swap cache removal, which
5290                  * in turn serializes uncharging.
5291                  */
5292                 VM_BUG_ON_PAGE(!PageLocked(page), page);
5293                 if (page->mem_cgroup)
5294                         goto out;
5295
5296                 if (do_swap_account) {
5297                         swp_entry_t ent = { .val = page_private(page), };
5298                         unsigned short id = lookup_swap_cgroup_id(ent);
5299
5300                         rcu_read_lock();
5301                         memcg = mem_cgroup_from_id(id);
5302                         if (memcg && !css_tryget_online(&memcg->css))
5303                                 memcg = NULL;
5304                         rcu_read_unlock();
5305                 }
5306         }
5307
5308         if (!memcg)
5309                 memcg = get_mem_cgroup_from_mm(mm);
5310
5311         ret = try_charge(memcg, gfp_mask, nr_pages);
5312
5313         css_put(&memcg->css);
5314 out:
5315         *memcgp = memcg;
5316         return ret;
5317 }
5318
5319 /**
5320  * mem_cgroup_commit_charge - commit a page charge
5321  * @page: page to charge
5322  * @memcg: memcg to charge the page to
5323  * @lrucare: page might be on LRU already
5324  *
5325  * Finalize a charge transaction started by mem_cgroup_try_charge(),
5326  * after page->mapping has been set up.  This must happen atomically
5327  * as part of the page instantiation, i.e. under the page table lock
5328  * for anonymous pages, under the page lock for page and swap cache.
5329  *
5330  * In addition, the page must not be on the LRU during the commit, to
5331  * prevent racing with task migration.  If it might be, use @lrucare.
5332  *
5333  * Use mem_cgroup_cancel_charge() to cancel the transaction instead.
5334  */
5335 void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
5336                               bool lrucare, bool compound)
5337 {
5338         unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
5339
5340         VM_BUG_ON_PAGE(!page->mapping, page);
5341         VM_BUG_ON_PAGE(PageLRU(page) && !lrucare, page);
5342
5343         if (mem_cgroup_disabled())
5344                 return;
5345         /*
5346          * Swap faults will attempt to charge the same page multiple
5347          * times.  But reuse_swap_page() might have removed the page
5348          * from swapcache already, so we can't check PageSwapCache().
5349          */
5350         if (!memcg)
5351                 return;
5352
5353         commit_charge(page, memcg, lrucare);
5354
5355         local_irq_disable();
5356         mem_cgroup_charge_statistics(memcg, page, compound, nr_pages);
5357         memcg_check_events(memcg, page);
5358         local_irq_enable();
5359
5360         if (do_memsw_account() && PageSwapCache(page)) {
5361                 swp_entry_t entry = { .val = page_private(page) };
5362                 /*
5363                  * The swap entry might not get freed for a long time,
5364                  * let's not wait for it.  The page already received a
5365                  * memory+swap charge, drop the swap entry duplicate.
5366                  */
5367                 mem_cgroup_uncharge_swap(entry);
5368         }
5369 }
5370
5371 /**
5372  * mem_cgroup_cancel_charge - cancel a page charge
5373  * @page: page to charge
5374  * @memcg: memcg to charge the page to
5375  *
5376  * Cancel a charge transaction started by mem_cgroup_try_charge().
5377  */
5378 void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg,
5379                 bool compound)
5380 {
5381         unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
5382
5383         if (mem_cgroup_disabled())
5384                 return;
5385         /*
5386          * Swap faults will attempt to charge the same page multiple
5387          * times.  But reuse_swap_page() might have removed the page
5388          * from swapcache already, so we can't check PageSwapCache().
5389          */
5390         if (!memcg)
5391                 return;
5392
5393         cancel_charge(memcg, nr_pages);
5394 }
5395
5396 static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout,
5397                            unsigned long nr_anon, unsigned long nr_file,
5398                            unsigned long nr_huge, struct page *dummy_page)
5399 {
5400         unsigned long nr_pages = nr_anon + nr_file;
5401         unsigned long flags;
5402
5403         if (!mem_cgroup_is_root(memcg)) {
5404                 page_counter_uncharge(&memcg->memory, nr_pages);
5405                 if (do_memsw_account())
5406                         page_counter_uncharge(&memcg->memsw, nr_pages);
5407                 memcg_oom_recover(memcg);
5408         }
5409
5410         local_irq_save(flags);
5411         __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS], nr_anon);
5412         __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_CACHE], nr_file);
5413         __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], nr_huge);
5414         __this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT], pgpgout);
5415         __this_cpu_add(memcg->stat->nr_page_events, nr_pages);
5416         memcg_check_events(memcg, dummy_page);
5417         local_irq_restore(flags);
5418
5419         if (!mem_cgroup_is_root(memcg))
5420                 css_put_many(&memcg->css, nr_pages);
5421 }
5422
5423 static void uncharge_list(struct list_head *page_list)
5424 {
5425         struct mem_cgroup *memcg = NULL;
5426         unsigned long nr_anon = 0;
5427         unsigned long nr_file = 0;
5428         unsigned long nr_huge = 0;
5429         unsigned long pgpgout = 0;
5430         struct list_head *next;
5431         struct page *page;
5432
5433         /*
5434          * Note that the list can be a single page->lru; hence the
5435          * do-while loop instead of a simple list_for_each_entry().
5436          */
5437         next = page_list->next;
5438         do {
5439                 unsigned int nr_pages = 1;
5440
5441                 page = list_entry(next, struct page, lru);
5442                 next = page->lru.next;
5443
5444                 VM_BUG_ON_PAGE(PageLRU(page), page);
5445                 VM_BUG_ON_PAGE(page_count(page), page);
5446
5447                 if (!page->mem_cgroup)
5448                         continue;
5449
5450                 /*
5451                  * Nobody should be changing or seriously looking at
5452                  * page->mem_cgroup at this point, we have fully
5453                  * exclusive access to the page.
5454                  */
5455
5456                 if (memcg != page->mem_cgroup) {
5457                         if (memcg) {
5458                                 uncharge_batch(memcg, pgpgout, nr_anon, nr_file,
5459                                                nr_huge, page);
5460                                 pgpgout = nr_anon = nr_file = nr_huge = 0;
5461                         }
5462                         memcg = page->mem_cgroup;
5463                 }
5464
5465                 if (PageTransHuge(page)) {
5466                         nr_pages <<= compound_order(page);
5467                         VM_BUG_ON_PAGE(!PageTransHuge(page), page);
5468                         nr_huge += nr_pages;
5469                 }
5470
5471                 if (PageAnon(page))
5472                         nr_anon += nr_pages;
5473                 else
5474                         nr_file += nr_pages;
5475
5476                 page->mem_cgroup = NULL;
5477
5478                 pgpgout++;
5479         } while (next != page_list);
5480
5481         if (memcg)
5482                 uncharge_batch(memcg, pgpgout, nr_anon, nr_file,
5483                                nr_huge, page);
5484 }
5485
5486 /**
5487  * mem_cgroup_uncharge - uncharge a page
5488  * @page: page to uncharge
5489  *
5490  * Uncharge a page previously charged with mem_cgroup_try_charge() and
5491  * mem_cgroup_commit_charge().
5492  */
5493 void mem_cgroup_uncharge(struct page *page)
5494 {
5495         if (mem_cgroup_disabled())
5496                 return;
5497
5498         /* Don't touch page->lru of any random page, pre-check: */
5499         if (!page->mem_cgroup)
5500                 return;
5501
5502         INIT_LIST_HEAD(&page->lru);
5503         uncharge_list(&page->lru);
5504 }
5505
5506 /**
5507  * mem_cgroup_uncharge_list - uncharge a list of page
5508  * @page_list: list of pages to uncharge
5509  *
5510  * Uncharge a list of pages previously charged with
5511  * mem_cgroup_try_charge() and mem_cgroup_commit_charge().
5512  */
5513 void mem_cgroup_uncharge_list(struct list_head *page_list)
5514 {
5515         if (mem_cgroup_disabled())
5516                 return;
5517
5518         if (!list_empty(page_list))
5519                 uncharge_list(page_list);
5520 }
5521
5522 /**
5523  * mem_cgroup_migrate - charge a page's replacement
5524  * @oldpage: currently circulating page
5525  * @newpage: replacement page
5526  *
5527  * Charge @newpage as a replacement page for @oldpage. @oldpage will
5528  * be uncharged upon free.
5529  *
5530  * Both pages must be locked, @newpage->mapping must be set up.
5531  */
5532 void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
5533 {
5534         struct mem_cgroup *memcg;
5535         unsigned int nr_pages;
5536         bool compound;
5537
5538         VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
5539         VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
5540         VM_BUG_ON_PAGE(PageAnon(oldpage) != PageAnon(newpage), newpage);
5541         VM_BUG_ON_PAGE(PageTransHuge(oldpage) != PageTransHuge(newpage),
5542                        newpage);
5543
5544         if (mem_cgroup_disabled())
5545                 return;
5546
5547         /* Page cache replacement: new page already charged? */
5548         if (newpage->mem_cgroup)
5549                 return;
5550
5551         /* Swapcache readahead pages can get replaced before being charged */
5552         memcg = oldpage->mem_cgroup;
5553         if (!memcg)
5554                 return;
5555
5556         /* Force-charge the new page. The old one will be freed soon */
5557         compound = PageTransHuge(newpage);
5558         nr_pages = compound ? hpage_nr_pages(newpage) : 1;
5559
5560         page_counter_charge(&memcg->memory, nr_pages);
5561         if (do_memsw_account())
5562                 page_counter_charge(&memcg->memsw, nr_pages);
5563         css_get_many(&memcg->css, nr_pages);
5564
5565         commit_charge(newpage, memcg, false);
5566
5567         local_irq_disable();
5568         mem_cgroup_charge_statistics(memcg, newpage, compound, nr_pages);
5569         memcg_check_events(memcg, newpage);
5570         local_irq_enable();
5571 }
5572
5573 DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
5574 EXPORT_SYMBOL(memcg_sockets_enabled_key);
5575
5576 void sock_update_memcg(struct sock *sk)
5577 {
5578         struct mem_cgroup *memcg;
5579
5580         /* Socket cloning can throw us here with sk_cgrp already
5581          * filled. It won't however, necessarily happen from
5582          * process context. So the test for root memcg given
5583          * the current task's memcg won't help us in this case.
5584          *
5585          * Respecting the original socket's memcg is a better
5586          * decision in this case.
5587          */
5588         if (sk->sk_memcg) {
5589                 BUG_ON(mem_cgroup_is_root(sk->sk_memcg));
5590                 css_get(&sk->sk_memcg->css);
5591                 return;
5592         }
5593
5594         rcu_read_lock();
5595         memcg = mem_cgroup_from_task(current);
5596         if (memcg == root_mem_cgroup)
5597                 goto out;
5598         if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active)
5599                 goto out;
5600         if (css_tryget_online(&memcg->css))
5601                 sk->sk_memcg = memcg;
5602 out:
5603         rcu_read_unlock();
5604 }
5605 EXPORT_SYMBOL(sock_update_memcg);
5606
5607 void sock_release_memcg(struct sock *sk)
5608 {
5609         WARN_ON(!sk->sk_memcg);
5610         css_put(&sk->sk_memcg->css);
5611 }
5612
5613 /**
5614  * mem_cgroup_charge_skmem - charge socket memory
5615  * @memcg: memcg to charge
5616  * @nr_pages: number of pages to charge
5617  *
5618  * Charges @nr_pages to @memcg. Returns %true if the charge fit within
5619  * @memcg's configured limit, %false if the charge had to be forced.
5620  */
5621 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
5622 {
5623         gfp_t gfp_mask = GFP_KERNEL;
5624
5625         if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
5626                 struct page_counter *fail;
5627
5628                 if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) {
5629                         memcg->tcpmem_pressure = 0;
5630                         return true;
5631                 }
5632                 page_counter_charge(&memcg->tcpmem, nr_pages);
5633                 memcg->tcpmem_pressure = 1;
5634                 return false;
5635         }
5636
5637         /* Don't block in the packet receive path */
5638         if (in_softirq())
5639                 gfp_mask = GFP_NOWAIT;
5640
5641         this_cpu_add(memcg->stat->count[MEMCG_SOCK], nr_pages);
5642
5643         if (try_charge(memcg, gfp_mask, nr_pages) == 0)
5644                 return true;
5645
5646         try_charge(memcg, gfp_mask|__GFP_NOFAIL, nr_pages);
5647         return false;
5648 }
5649
5650 /**
5651  * mem_cgroup_uncharge_skmem - uncharge socket memory
5652  * @memcg - memcg to uncharge
5653  * @nr_pages - number of pages to uncharge
5654  */
5655 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
5656 {
5657         if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
5658                 page_counter_uncharge(&memcg->tcpmem, nr_pages);
5659                 return;
5660         }
5661
5662         this_cpu_sub(memcg->stat->count[MEMCG_SOCK], nr_pages);
5663
5664         page_counter_uncharge(&memcg->memory, nr_pages);
5665         css_put_many(&memcg->css, nr_pages);
5666 }
5667
5668 static int __init cgroup_memory(char *s)
5669 {
5670         char *token;
5671
5672         while ((token = strsep(&s, ",")) != NULL) {
5673                 if (!*token)
5674                         continue;
5675                 if (!strcmp(token, "nosocket"))
5676                         cgroup_memory_nosocket = true;
5677                 if (!strcmp(token, "nokmem"))
5678                         cgroup_memory_nokmem = true;
5679         }
5680         return 0;
5681 }
5682 __setup("cgroup.memory=", cgroup_memory);
5683
5684 /*
5685  * subsys_initcall() for memory controller.
5686  *
5687  * Some parts like hotcpu_notifier() have to be initialized from this context
5688  * because of lock dependencies (cgroup_lock -> cpu hotplug) but basically
5689  * everything that doesn't depend on a specific mem_cgroup structure should
5690  * be initialized from here.
5691  */
5692 static int __init mem_cgroup_init(void)
5693 {
5694         int cpu, node;
5695
5696         hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
5697
5698         for_each_possible_cpu(cpu)
5699                 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
5700                           drain_local_stock);
5701
5702         for_each_node(node) {
5703                 struct mem_cgroup_tree_per_node *rtpn;
5704                 int zone;
5705
5706                 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL,
5707                                     node_online(node) ? node : NUMA_NO_NODE);
5708
5709                 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
5710                         struct mem_cgroup_tree_per_zone *rtpz;
5711
5712                         rtpz = &rtpn->rb_tree_per_zone[zone];
5713                         rtpz->rb_root = RB_ROOT;
5714                         spin_lock_init(&rtpz->lock);
5715                 }
5716                 soft_limit_tree.rb_tree_per_node[node] = rtpn;
5717         }
5718
5719         return 0;
5720 }
5721 subsys_initcall(mem_cgroup_init);
5722
5723 #ifdef CONFIG_MEMCG_SWAP
5724 /**
5725  * mem_cgroup_swapout - transfer a memsw charge to swap
5726  * @page: page whose memsw charge to transfer
5727  * @entry: swap entry to move the charge to
5728  *
5729  * Transfer the memsw charge of @page to @entry.
5730  */
5731 void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
5732 {
5733         struct mem_cgroup *memcg;
5734         unsigned short oldid;
5735
5736         VM_BUG_ON_PAGE(PageLRU(page), page);
5737         VM_BUG_ON_PAGE(page_count(page), page);
5738
5739         if (!do_memsw_account())
5740                 return;
5741
5742         memcg = page->mem_cgroup;
5743
5744         /* Readahead page, never charged */
5745         if (!memcg)
5746                 return;
5747
5748         oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
5749         VM_BUG_ON_PAGE(oldid, page);
5750         mem_cgroup_swap_statistics(memcg, true);
5751
5752         page->mem_cgroup = NULL;
5753
5754         if (!mem_cgroup_is_root(memcg))
5755                 page_counter_uncharge(&memcg->memory, 1);
5756
5757         /*
5758          * Interrupts should be disabled here because the caller holds the
5759          * mapping->tree_lock lock which is taken with interrupts-off. It is
5760          * important here to have the interrupts disabled because it is the
5761          * only synchronisation we have for udpating the per-CPU variables.
5762          */
5763         VM_BUG_ON(!irqs_disabled());
5764         mem_cgroup_charge_statistics(memcg, page, false, -1);
5765         memcg_check_events(memcg, page);
5766 }
5767
5768 /*
5769  * mem_cgroup_try_charge_swap - try charging a swap entry
5770  * @page: page being added to swap
5771  * @entry: swap entry to charge
5772  *
5773  * Try to charge @entry to the memcg that @page belongs to.
5774  *
5775  * Returns 0 on success, -ENOMEM on failure.
5776  */
5777 int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
5778 {
5779         struct mem_cgroup *memcg;
5780         struct page_counter *counter;
5781         unsigned short oldid;
5782
5783         if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) || !do_swap_account)
5784                 return 0;
5785
5786         memcg = page->mem_cgroup;
5787
5788         /* Readahead page, never charged */
5789         if (!memcg)
5790                 return 0;
5791
5792         if (!mem_cgroup_is_root(memcg) &&
5793             !page_counter_try_charge(&memcg->swap, 1, &counter))
5794                 return -ENOMEM;
5795
5796         oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
5797         VM_BUG_ON_PAGE(oldid, page);
5798         mem_cgroup_swap_statistics(memcg, true);
5799
5800         css_get(&memcg->css);
5801         return 0;
5802 }
5803
5804 /**
5805  * mem_cgroup_uncharge_swap - uncharge a swap entry
5806  * @entry: swap entry to uncharge
5807  *
5808  * Drop the swap charge associated with @entry.
5809  */
5810 void mem_cgroup_uncharge_swap(swp_entry_t entry)
5811 {
5812         struct mem_cgroup *memcg;
5813         unsigned short id;
5814
5815         if (!do_swap_account)
5816                 return;
5817
5818         id = swap_cgroup_record(entry, 0);
5819         rcu_read_lock();
5820         memcg = mem_cgroup_from_id(id);
5821         if (memcg) {
5822                 if (!mem_cgroup_is_root(memcg)) {
5823                         if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
5824                                 page_counter_uncharge(&memcg->swap, 1);
5825                         else
5826                                 page_counter_uncharge(&memcg->memsw, 1);
5827                 }
5828                 mem_cgroup_swap_statistics(memcg, false);
5829                 css_put(&memcg->css);
5830         }
5831         rcu_read_unlock();
5832 }
5833
5834 long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
5835 {
5836         long nr_swap_pages = get_nr_swap_pages();
5837
5838         if (!do_swap_account || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
5839                 return nr_swap_pages;
5840         for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg))
5841                 nr_swap_pages = min_t(long, nr_swap_pages,
5842                                       READ_ONCE(memcg->swap.limit) -
5843                                       page_counter_read(&memcg->swap));
5844         return nr_swap_pages;
5845 }
5846
5847 bool mem_cgroup_swap_full(struct page *page)
5848 {
5849         struct mem_cgroup *memcg;
5850
5851         VM_BUG_ON_PAGE(!PageLocked(page), page);
5852
5853         if (vm_swap_full())
5854                 return true;
5855         if (!do_swap_account || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
5856                 return false;
5857
5858         memcg = page->mem_cgroup;
5859         if (!memcg)
5860                 return false;
5861
5862         for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg))
5863                 if (page_counter_read(&memcg->swap) * 2 >= memcg->swap.limit)
5864                         return true;
5865
5866         return false;
5867 }
5868
5869 /* for remember boot option*/
5870 #ifdef CONFIG_MEMCG_SWAP_ENABLED
5871 static int really_do_swap_account __initdata = 1;
5872 #else
5873 static int really_do_swap_account __initdata;
5874 #endif
5875
5876 static int __init enable_swap_account(char *s)
5877 {
5878         if (!strcmp(s, "1"))
5879                 really_do_swap_account = 1;
5880         else if (!strcmp(s, "0"))
5881                 really_do_swap_account = 0;
5882         return 1;
5883 }
5884 __setup("swapaccount=", enable_swap_account);
5885
5886 static u64 swap_current_read(struct cgroup_subsys_state *css,
5887                              struct cftype *cft)
5888 {
5889         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5890
5891         return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE;
5892 }
5893
5894 static int swap_max_show(struct seq_file *m, void *v)
5895 {
5896         struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5897         unsigned long max = READ_ONCE(memcg->swap.limit);
5898
5899         if (max == PAGE_COUNTER_MAX)
5900                 seq_puts(m, "max\n");
5901         else
5902                 seq_printf(m, "%llu\n", (u64)max * PAGE_SIZE);
5903
5904         return 0;
5905 }
5906
5907 static ssize_t swap_max_write(struct kernfs_open_file *of,
5908                               char *buf, size_t nbytes, loff_t off)
5909 {
5910         struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5911         unsigned long max;
5912         int err;
5913
5914         buf = strstrip(buf);
5915         err = page_counter_memparse(buf, "max", &max);
5916         if (err)
5917                 return err;
5918
5919         mutex_lock(&memcg_limit_mutex);
5920         err = page_counter_limit(&memcg->swap, max);
5921         mutex_unlock(&memcg_limit_mutex);
5922         if (err)
5923                 return err;
5924
5925         return nbytes;
5926 }
5927
5928 static struct cftype swap_files[] = {
5929         {
5930                 .name = "swap.current",
5931                 .flags = CFTYPE_NOT_ON_ROOT,
5932                 .read_u64 = swap_current_read,
5933         },
5934         {
5935                 .name = "swap.max",
5936                 .flags = CFTYPE_NOT_ON_ROOT,
5937                 .seq_show = swap_max_show,
5938                 .write = swap_max_write,
5939         },
5940         { }     /* terminate */
5941 };
5942
5943 static struct cftype memsw_cgroup_files[] = {
5944         {
5945                 .name = "memsw.usage_in_bytes",
5946                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
5947                 .read_u64 = mem_cgroup_read_u64,
5948         },
5949         {
5950                 .name = "memsw.max_usage_in_bytes",
5951                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
5952                 .write = mem_cgroup_reset,
5953                 .read_u64 = mem_cgroup_read_u64,
5954         },
5955         {
5956                 .name = "memsw.limit_in_bytes",
5957                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
5958                 .write = mem_cgroup_write,
5959                 .read_u64 = mem_cgroup_read_u64,
5960         },
5961         {
5962                 .name = "memsw.failcnt",
5963                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
5964                 .write = mem_cgroup_reset,
5965                 .read_u64 = mem_cgroup_read_u64,
5966         },
5967         { },    /* terminate */
5968 };
5969
5970 static int __init mem_cgroup_swap_init(void)
5971 {
5972         if (!mem_cgroup_disabled() && really_do_swap_account) {
5973                 do_swap_account = 1;
5974                 WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys,
5975                                                swap_files));
5976                 WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys,
5977                                                   memsw_cgroup_files));
5978         }
5979         return 0;
5980 }
5981 subsys_initcall(mem_cgroup_swap_init);
5982
5983 #endif /* CONFIG_MEMCG_SWAP */