1 /* memcontrol.c - Memory Controller
3 * Copyright IBM Corporation, 2007
4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
6 * Copyright 2007 OpenVZ SWsoft Inc
7 * Author: Pavel Emelianov <xemul@openvz.org>
10 * Copyright (C) 2009 Nokia Corporation
11 * Author: Kirill A. Shutemov
13 * Kernel Memory Controller
14 * Copyright (C) 2012 Parallels Inc. and Google Inc.
15 * Authors: Glauber Costa and Suleiman Souhlal
18 * Charge lifetime sanitation
19 * Lockless page tracking & accounting
20 * Unified hierarchy configuration model
21 * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner
23 * This program is free software; you can redistribute it and/or modify
24 * it under the terms of the GNU General Public License as published by
25 * the Free Software Foundation; either version 2 of the License, or
26 * (at your option) any later version.
28 * This program is distributed in the hope that it will be useful,
29 * but WITHOUT ANY WARRANTY; without even the implied warranty of
30 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
31 * GNU General Public License for more details.
34 #include <linux/page_counter.h>
35 #include <linux/memcontrol.h>
36 #include <linux/cgroup.h>
38 #include <linux/hugetlb.h>
39 #include <linux/pagemap.h>
40 #include <linux/smp.h>
41 #include <linux/page-flags.h>
42 #include <linux/backing-dev.h>
43 #include <linux/bit_spinlock.h>
44 #include <linux/rcupdate.h>
45 #include <linux/limits.h>
46 #include <linux/export.h>
47 #include <linux/mutex.h>
48 #include <linux/rbtree.h>
49 #include <linux/slab.h>
50 #include <linux/swap.h>
51 #include <linux/swapops.h>
52 #include <linux/spinlock.h>
53 #include <linux/eventfd.h>
54 #include <linux/poll.h>
55 #include <linux/sort.h>
57 #include <linux/seq_file.h>
58 #include <linux/vmpressure.h>
59 #include <linux/mm_inline.h>
60 #include <linux/swap_cgroup.h>
61 #include <linux/cpu.h>
62 #include <linux/oom.h>
63 #include <linux/lockdep.h>
64 #include <linux/file.h>
65 #include <linux/tracehook.h>
69 #include <net/tcp_memcontrol.h>
72 #include <asm/uaccess.h>
74 #include <trace/events/vmscan.h>
76 struct cgroup_subsys memory_cgrp_subsys __read_mostly;
77 EXPORT_SYMBOL(memory_cgrp_subsys);
79 struct mem_cgroup *root_mem_cgroup __read_mostly;
81 #define MEM_CGROUP_RECLAIM_RETRIES 5
83 /* Whether the swap controller is active */
84 #ifdef CONFIG_MEMCG_SWAP
85 int do_swap_account __read_mostly;
87 #define do_swap_account 0
90 static const char * const mem_cgroup_stat_names[] = {
100 static const char * const mem_cgroup_events_names[] = {
107 static const char * const mem_cgroup_lru_names[] = {
115 #define THRESHOLDS_EVENTS_TARGET 128
116 #define SOFTLIMIT_EVENTS_TARGET 1024
117 #define NUMAINFO_EVENTS_TARGET 1024
120 * Cgroups above their limits are maintained in a RB-Tree, independent of
121 * their hierarchy representation
124 struct mem_cgroup_tree_per_zone {
125 struct rb_root rb_root;
129 struct mem_cgroup_tree_per_node {
130 struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES];
133 struct mem_cgroup_tree {
134 struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
137 static struct mem_cgroup_tree soft_limit_tree __read_mostly;
140 struct mem_cgroup_eventfd_list {
141 struct list_head list;
142 struct eventfd_ctx *eventfd;
146 * cgroup_event represents events which userspace want to receive.
148 struct mem_cgroup_event {
150 * memcg which the event belongs to.
152 struct mem_cgroup *memcg;
154 * eventfd to signal userspace about the event.
156 struct eventfd_ctx *eventfd;
158 * Each of these stored in a list by the cgroup.
160 struct list_head list;
162 * register_event() callback will be used to add new userspace
163 * waiter for changes related to this event. Use eventfd_signal()
164 * on eventfd to send notification to userspace.
166 int (*register_event)(struct mem_cgroup *memcg,
167 struct eventfd_ctx *eventfd, const char *args);
169 * unregister_event() callback will be called when userspace closes
170 * the eventfd or on cgroup removing. This callback must be set,
171 * if you want provide notification functionality.
173 void (*unregister_event)(struct mem_cgroup *memcg,
174 struct eventfd_ctx *eventfd);
176 * All fields below needed to unregister event when
177 * userspace closes eventfd.
180 wait_queue_head_t *wqh;
182 struct work_struct remove;
185 static void mem_cgroup_threshold(struct mem_cgroup *memcg);
186 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
188 /* Stuffs for move charges at task migration. */
190 * Types of charges to be moved.
192 #define MOVE_ANON 0x1U
193 #define MOVE_FILE 0x2U
194 #define MOVE_MASK (MOVE_ANON | MOVE_FILE)
196 /* "mc" and its members are protected by cgroup_mutex */
197 static struct move_charge_struct {
198 spinlock_t lock; /* for from, to */
199 struct mem_cgroup *from;
200 struct mem_cgroup *to;
202 unsigned long precharge;
203 unsigned long moved_charge;
204 unsigned long moved_swap;
205 struct task_struct *moving_task; /* a task moving charges */
206 wait_queue_head_t waitq; /* a waitq for other context */
208 .lock = __SPIN_LOCK_UNLOCKED(mc.lock),
209 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
213 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
214 * limit reclaim to prevent infinite loops, if they ever occur.
216 #define MEM_CGROUP_MAX_RECLAIM_LOOPS 100
217 #define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2
220 MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
221 MEM_CGROUP_CHARGE_TYPE_ANON,
222 MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */
223 MEM_CGROUP_CHARGE_TYPE_DROP, /* a page was unused swap cache */
227 /* for encoding cft->private value on file */
235 #define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val))
236 #define MEMFILE_TYPE(val) ((val) >> 16 & 0xffff)
237 #define MEMFILE_ATTR(val) ((val) & 0xffff)
238 /* Used for OOM nofiier */
239 #define OOM_CONTROL (0)
242 * The memcg_create_mutex will be held whenever a new cgroup is created.
243 * As a consequence, any change that needs to protect against new child cgroups
244 * appearing has to hold it as well.
246 static DEFINE_MUTEX(memcg_create_mutex);
248 /* Some nice accessors for the vmpressure. */
249 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
252 memcg = root_mem_cgroup;
253 return &memcg->vmpressure;
256 struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr)
258 return &container_of(vmpr, struct mem_cgroup, vmpressure)->css;
261 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
263 return (memcg == root_mem_cgroup);
267 * We restrict the id in the range of [1, 65535], so it can fit into
270 #define MEM_CGROUP_ID_MAX USHRT_MAX
272 static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
274 return memcg->css.id;
278 * A helper function to get mem_cgroup from ID. must be called under
279 * rcu_read_lock(). The caller is responsible for calling
280 * css_tryget_online() if the mem_cgroup is used for charging. (dropping
281 * refcnt from swap can be called against removed memcg.)
283 static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
285 struct cgroup_subsys_state *css;
287 css = css_from_id(id, &memory_cgrp_subsys);
288 return mem_cgroup_from_css(css);
291 /* Writing them here to avoid exposing memcg's inner layout */
292 #if defined(CONFIG_INET) && defined(CONFIG_MEMCG_KMEM)
294 struct static_key memcg_sockets_enabled_key;
295 EXPORT_SYMBOL(memcg_sockets_enabled_key);
297 void sock_update_memcg(struct sock *sk)
299 struct mem_cgroup *memcg;
301 /* Socket cloning can throw us here with sk_cgrp already
302 * filled. It won't however, necessarily happen from
303 * process context. So the test for root memcg given
304 * the current task's memcg won't help us in this case.
306 * Respecting the original socket's memcg is a better
307 * decision in this case.
310 BUG_ON(mem_cgroup_is_root(sk->sk_memcg));
311 css_get(&sk->sk_memcg->css);
316 memcg = mem_cgroup_from_task(current);
317 if (memcg != root_mem_cgroup &&
318 memcg->tcp_mem.active &&
319 css_tryget_online(&memcg->css))
320 sk->sk_memcg = memcg;
323 EXPORT_SYMBOL(sock_update_memcg);
325 void sock_release_memcg(struct sock *sk)
327 WARN_ON(!sk->sk_memcg);
328 css_put(&sk->sk_memcg->css);
332 * mem_cgroup_charge_skmem - charge socket memory
333 * @memcg: memcg to charge
334 * @nr_pages: number of pages to charge
336 * Charges @nr_pages to @memcg. Returns %true if the charge fit within
337 * @memcg's configured limit, %false if the charge had to be forced.
339 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
341 struct page_counter *counter;
343 if (page_counter_try_charge(&memcg->tcp_mem.memory_allocated,
344 nr_pages, &counter)) {
345 memcg->tcp_mem.memory_pressure = 0;
348 page_counter_charge(&memcg->tcp_mem.memory_allocated, nr_pages);
349 memcg->tcp_mem.memory_pressure = 1;
354 * mem_cgroup_uncharge_skmem - uncharge socket memory
355 * @memcg - memcg to uncharge
356 * @nr_pages - number of pages to uncharge
358 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
360 page_counter_uncharge(&memcg->tcp_mem.memory_allocated, nr_pages);
365 #ifdef CONFIG_MEMCG_KMEM
367 * This will be the memcg's index in each cache's ->memcg_params.memcg_caches.
368 * The main reason for not using cgroup id for this:
369 * this works better in sparse environments, where we have a lot of memcgs,
370 * but only a few kmem-limited. Or also, if we have, for instance, 200
371 * memcgs, and none but the 200th is kmem-limited, we'd have to have a
372 * 200 entry array for that.
374 * The current size of the caches array is stored in memcg_nr_cache_ids. It
375 * will double each time we have to increase it.
377 static DEFINE_IDA(memcg_cache_ida);
378 int memcg_nr_cache_ids;
380 /* Protects memcg_nr_cache_ids */
381 static DECLARE_RWSEM(memcg_cache_ids_sem);
383 void memcg_get_cache_ids(void)
385 down_read(&memcg_cache_ids_sem);
388 void memcg_put_cache_ids(void)
390 up_read(&memcg_cache_ids_sem);
394 * MIN_SIZE is different than 1, because we would like to avoid going through
395 * the alloc/free process all the time. In a small machine, 4 kmem-limited
396 * cgroups is a reasonable guess. In the future, it could be a parameter or
397 * tunable, but that is strictly not necessary.
399 * MAX_SIZE should be as large as the number of cgrp_ids. Ideally, we could get
400 * this constant directly from cgroup, but it is understandable that this is
401 * better kept as an internal representation in cgroup.c. In any case, the
402 * cgrp_id space is not getting any smaller, and we don't have to necessarily
403 * increase ours as well if it increases.
405 #define MEMCG_CACHES_MIN_SIZE 4
406 #define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX
409 * A lot of the calls to the cache allocation functions are expected to be
410 * inlined by the compiler. Since the calls to memcg_kmem_get_cache are
411 * conditional to this static branch, we'll have to allow modules that does
412 * kmem_cache_alloc and the such to see this symbol as well
414 struct static_key memcg_kmem_enabled_key;
415 EXPORT_SYMBOL(memcg_kmem_enabled_key);
417 #endif /* CONFIG_MEMCG_KMEM */
419 static struct mem_cgroup_per_zone *
420 mem_cgroup_zone_zoneinfo(struct mem_cgroup *memcg, struct zone *zone)
422 int nid = zone_to_nid(zone);
423 int zid = zone_idx(zone);
425 return &memcg->nodeinfo[nid]->zoneinfo[zid];
429 * mem_cgroup_css_from_page - css of the memcg associated with a page
430 * @page: page of interest
432 * If memcg is bound to the default hierarchy, css of the memcg associated
433 * with @page is returned. The returned css remains associated with @page
434 * until it is released.
436 * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup
439 * XXX: The above description of behavior on the default hierarchy isn't
440 * strictly true yet as replace_page_cache_page() can modify the
441 * association before @page is released even on the default hierarchy;
442 * however, the current and planned usages don't mix the the two functions
443 * and replace_page_cache_page() will soon be updated to make the invariant
446 struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page)
448 struct mem_cgroup *memcg;
452 memcg = page->mem_cgroup;
454 if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
455 memcg = root_mem_cgroup;
462 * page_cgroup_ino - return inode number of the memcg a page is charged to
465 * Look up the closest online ancestor of the memory cgroup @page is charged to
466 * and return its inode number or 0 if @page is not charged to any cgroup. It
467 * is safe to call this function without holding a reference to @page.
469 * Note, this function is inherently racy, because there is nothing to prevent
470 * the cgroup inode from getting torn down and potentially reallocated a moment
471 * after page_cgroup_ino() returns, so it only should be used by callers that
472 * do not care (such as procfs interfaces).
474 ino_t page_cgroup_ino(struct page *page)
476 struct mem_cgroup *memcg;
477 unsigned long ino = 0;
480 memcg = READ_ONCE(page->mem_cgroup);
481 while (memcg && !(memcg->css.flags & CSS_ONLINE))
482 memcg = parent_mem_cgroup(memcg);
484 ino = cgroup_ino(memcg->css.cgroup);
489 static struct mem_cgroup_per_zone *
490 mem_cgroup_page_zoneinfo(struct mem_cgroup *memcg, struct page *page)
492 int nid = page_to_nid(page);
493 int zid = page_zonenum(page);
495 return &memcg->nodeinfo[nid]->zoneinfo[zid];
498 static struct mem_cgroup_tree_per_zone *
499 soft_limit_tree_node_zone(int nid, int zid)
501 return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
504 static struct mem_cgroup_tree_per_zone *
505 soft_limit_tree_from_page(struct page *page)
507 int nid = page_to_nid(page);
508 int zid = page_zonenum(page);
510 return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
513 static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_zone *mz,
514 struct mem_cgroup_tree_per_zone *mctz,
515 unsigned long new_usage_in_excess)
517 struct rb_node **p = &mctz->rb_root.rb_node;
518 struct rb_node *parent = NULL;
519 struct mem_cgroup_per_zone *mz_node;
524 mz->usage_in_excess = new_usage_in_excess;
525 if (!mz->usage_in_excess)
529 mz_node = rb_entry(parent, struct mem_cgroup_per_zone,
531 if (mz->usage_in_excess < mz_node->usage_in_excess)
534 * We can't avoid mem cgroups that are over their soft
535 * limit by the same amount
537 else if (mz->usage_in_excess >= mz_node->usage_in_excess)
540 rb_link_node(&mz->tree_node, parent, p);
541 rb_insert_color(&mz->tree_node, &mctz->rb_root);
545 static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_zone *mz,
546 struct mem_cgroup_tree_per_zone *mctz)
550 rb_erase(&mz->tree_node, &mctz->rb_root);
554 static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_zone *mz,
555 struct mem_cgroup_tree_per_zone *mctz)
559 spin_lock_irqsave(&mctz->lock, flags);
560 __mem_cgroup_remove_exceeded(mz, mctz);
561 spin_unlock_irqrestore(&mctz->lock, flags);
564 static unsigned long soft_limit_excess(struct mem_cgroup *memcg)
566 unsigned long nr_pages = page_counter_read(&memcg->memory);
567 unsigned long soft_limit = READ_ONCE(memcg->soft_limit);
568 unsigned long excess = 0;
570 if (nr_pages > soft_limit)
571 excess = nr_pages - soft_limit;
576 static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
578 unsigned long excess;
579 struct mem_cgroup_per_zone *mz;
580 struct mem_cgroup_tree_per_zone *mctz;
582 mctz = soft_limit_tree_from_page(page);
584 * Necessary to update all ancestors when hierarchy is used.
585 * because their event counter is not touched.
587 for (; memcg; memcg = parent_mem_cgroup(memcg)) {
588 mz = mem_cgroup_page_zoneinfo(memcg, page);
589 excess = soft_limit_excess(memcg);
591 * We have to update the tree if mz is on RB-tree or
592 * mem is over its softlimit.
594 if (excess || mz->on_tree) {
597 spin_lock_irqsave(&mctz->lock, flags);
598 /* if on-tree, remove it */
600 __mem_cgroup_remove_exceeded(mz, mctz);
602 * Insert again. mz->usage_in_excess will be updated.
603 * If excess is 0, no tree ops.
605 __mem_cgroup_insert_exceeded(mz, mctz, excess);
606 spin_unlock_irqrestore(&mctz->lock, flags);
611 static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
613 struct mem_cgroup_tree_per_zone *mctz;
614 struct mem_cgroup_per_zone *mz;
618 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
619 mz = &memcg->nodeinfo[nid]->zoneinfo[zid];
620 mctz = soft_limit_tree_node_zone(nid, zid);
621 mem_cgroup_remove_exceeded(mz, mctz);
626 static struct mem_cgroup_per_zone *
627 __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
629 struct rb_node *rightmost = NULL;
630 struct mem_cgroup_per_zone *mz;
634 rightmost = rb_last(&mctz->rb_root);
636 goto done; /* Nothing to reclaim from */
638 mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node);
640 * Remove the node now but someone else can add it back,
641 * we will to add it back at the end of reclaim to its correct
642 * position in the tree.
644 __mem_cgroup_remove_exceeded(mz, mctz);
645 if (!soft_limit_excess(mz->memcg) ||
646 !css_tryget_online(&mz->memcg->css))
652 static struct mem_cgroup_per_zone *
653 mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
655 struct mem_cgroup_per_zone *mz;
657 spin_lock_irq(&mctz->lock);
658 mz = __mem_cgroup_largest_soft_limit_node(mctz);
659 spin_unlock_irq(&mctz->lock);
664 * Return page count for single (non recursive) @memcg.
666 * Implementation Note: reading percpu statistics for memcg.
668 * Both of vmstat[] and percpu_counter has threshold and do periodic
669 * synchronization to implement "quick" read. There are trade-off between
670 * reading cost and precision of value. Then, we may have a chance to implement
671 * a periodic synchronization of counter in memcg's counter.
673 * But this _read() function is used for user interface now. The user accounts
674 * memory usage by memory cgroup and he _always_ requires exact value because
675 * he accounts memory. Even if we provide quick-and-fuzzy read, we always
676 * have to visit all online cpus and make sum. So, for now, unnecessary
677 * synchronization is not implemented. (just implemented for cpu hotplug)
679 * If there are kernel internal actions which can make use of some not-exact
680 * value, and reading all cpu value can be performance bottleneck in some
681 * common workload, threshold and synchronization as vmstat[] should be
685 mem_cgroup_read_stat(struct mem_cgroup *memcg, enum mem_cgroup_stat_index idx)
690 /* Per-cpu values can be negative, use a signed accumulator */
691 for_each_possible_cpu(cpu)
692 val += per_cpu(memcg->stat->count[idx], cpu);
694 * Summing races with updates, so val may be negative. Avoid exposing
695 * transient negative values.
702 static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg,
703 enum mem_cgroup_events_index idx)
705 unsigned long val = 0;
708 for_each_possible_cpu(cpu)
709 val += per_cpu(memcg->stat->events[idx], cpu);
713 static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
718 * Here, RSS means 'mapped anon' and anon's SwapCache. Shmem/tmpfs is
719 * counted as CACHE even if it's on ANON LRU.
722 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS],
725 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE],
728 if (PageTransHuge(page))
729 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE],
732 /* pagein of a big page is an event. So, ignore page size */
734 __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGIN]);
736 __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]);
737 nr_pages = -nr_pages; /* for event */
740 __this_cpu_add(memcg->stat->nr_page_events, nr_pages);
743 static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
745 unsigned int lru_mask)
747 unsigned long nr = 0;
750 VM_BUG_ON((unsigned)nid >= nr_node_ids);
752 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
753 struct mem_cgroup_per_zone *mz;
757 if (!(BIT(lru) & lru_mask))
759 mz = &memcg->nodeinfo[nid]->zoneinfo[zid];
760 nr += mz->lru_size[lru];
766 static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
767 unsigned int lru_mask)
769 unsigned long nr = 0;
772 for_each_node_state(nid, N_MEMORY)
773 nr += mem_cgroup_node_nr_lru_pages(memcg, nid, lru_mask);
777 static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
778 enum mem_cgroup_events_target target)
780 unsigned long val, next;
782 val = __this_cpu_read(memcg->stat->nr_page_events);
783 next = __this_cpu_read(memcg->stat->targets[target]);
784 /* from time_after() in jiffies.h */
785 if ((long)next - (long)val < 0) {
787 case MEM_CGROUP_TARGET_THRESH:
788 next = val + THRESHOLDS_EVENTS_TARGET;
790 case MEM_CGROUP_TARGET_SOFTLIMIT:
791 next = val + SOFTLIMIT_EVENTS_TARGET;
793 case MEM_CGROUP_TARGET_NUMAINFO:
794 next = val + NUMAINFO_EVENTS_TARGET;
799 __this_cpu_write(memcg->stat->targets[target], next);
806 * Check events in order.
809 static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
811 /* threshold event is triggered in finer grain than soft limit */
812 if (unlikely(mem_cgroup_event_ratelimit(memcg,
813 MEM_CGROUP_TARGET_THRESH))) {
815 bool do_numainfo __maybe_unused;
817 do_softlimit = mem_cgroup_event_ratelimit(memcg,
818 MEM_CGROUP_TARGET_SOFTLIMIT);
820 do_numainfo = mem_cgroup_event_ratelimit(memcg,
821 MEM_CGROUP_TARGET_NUMAINFO);
823 mem_cgroup_threshold(memcg);
824 if (unlikely(do_softlimit))
825 mem_cgroup_update_tree(memcg, page);
827 if (unlikely(do_numainfo))
828 atomic_inc(&memcg->numainfo_events);
833 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
836 * mm_update_next_owner() may clear mm->owner to NULL
837 * if it races with swapoff, page migration, etc.
838 * So this can be called with p == NULL.
843 return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
845 EXPORT_SYMBOL(mem_cgroup_from_task);
847 static struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
849 struct mem_cgroup *memcg = NULL;
854 * Page cache insertions can happen withou an
855 * actual mm context, e.g. during disk probing
856 * on boot, loopback IO, acct() writes etc.
859 memcg = root_mem_cgroup;
861 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
862 if (unlikely(!memcg))
863 memcg = root_mem_cgroup;
865 } while (!css_tryget_online(&memcg->css));
871 * mem_cgroup_iter - iterate over memory cgroup hierarchy
872 * @root: hierarchy root
873 * @prev: previously returned memcg, NULL on first invocation
874 * @reclaim: cookie for shared reclaim walks, NULL for full walks
876 * Returns references to children of the hierarchy below @root, or
877 * @root itself, or %NULL after a full round-trip.
879 * Caller must pass the return value in @prev on subsequent
880 * invocations for reference counting, or use mem_cgroup_iter_break()
881 * to cancel a hierarchy walk before the round-trip is complete.
883 * Reclaimers can specify a zone and a priority level in @reclaim to
884 * divide up the memcgs in the hierarchy among all concurrent
885 * reclaimers operating on the same zone and priority.
887 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
888 struct mem_cgroup *prev,
889 struct mem_cgroup_reclaim_cookie *reclaim)
891 struct mem_cgroup_reclaim_iter *uninitialized_var(iter);
892 struct cgroup_subsys_state *css = NULL;
893 struct mem_cgroup *memcg = NULL;
894 struct mem_cgroup *pos = NULL;
896 if (mem_cgroup_disabled())
900 root = root_mem_cgroup;
902 if (prev && !reclaim)
905 if (!root->use_hierarchy && root != root_mem_cgroup) {
914 struct mem_cgroup_per_zone *mz;
916 mz = mem_cgroup_zone_zoneinfo(root, reclaim->zone);
917 iter = &mz->iter[reclaim->priority];
919 if (prev && reclaim->generation != iter->generation)
923 pos = READ_ONCE(iter->position);
924 if (!pos || css_tryget(&pos->css))
927 * css reference reached zero, so iter->position will
928 * be cleared by ->css_released. However, we should not
929 * rely on this happening soon, because ->css_released
930 * is called from a work queue, and by busy-waiting we
931 * might block it. So we clear iter->position right
934 (void)cmpxchg(&iter->position, pos, NULL);
942 css = css_next_descendant_pre(css, &root->css);
945 * Reclaimers share the hierarchy walk, and a
946 * new one might jump in right at the end of
947 * the hierarchy - make sure they see at least
948 * one group and restart from the beginning.
956 * Verify the css and acquire a reference. The root
957 * is provided by the caller, so we know it's alive
958 * and kicking, and don't take an extra reference.
960 memcg = mem_cgroup_from_css(css);
962 if (css == &root->css)
965 if (css_tryget(css)) {
967 * Make sure the memcg is initialized:
968 * mem_cgroup_css_online() orders the the
969 * initialization against setting the flag.
971 if (smp_load_acquire(&memcg->initialized))
982 * The position could have already been updated by a competing
983 * thread, so check that the value hasn't changed since we read
984 * it to avoid reclaiming from the same cgroup twice.
986 (void)cmpxchg(&iter->position, pos, memcg);
994 reclaim->generation = iter->generation;
1000 if (prev && prev != root)
1001 css_put(&prev->css);
1007 * mem_cgroup_iter_break - abort a hierarchy walk prematurely
1008 * @root: hierarchy root
1009 * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
1011 void mem_cgroup_iter_break(struct mem_cgroup *root,
1012 struct mem_cgroup *prev)
1015 root = root_mem_cgroup;
1016 if (prev && prev != root)
1017 css_put(&prev->css);
1020 static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
1022 struct mem_cgroup *memcg = dead_memcg;
1023 struct mem_cgroup_reclaim_iter *iter;
1024 struct mem_cgroup_per_zone *mz;
1028 while ((memcg = parent_mem_cgroup(memcg))) {
1029 for_each_node(nid) {
1030 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1031 mz = &memcg->nodeinfo[nid]->zoneinfo[zid];
1032 for (i = 0; i <= DEF_PRIORITY; i++) {
1033 iter = &mz->iter[i];
1034 cmpxchg(&iter->position,
1043 * Iteration constructs for visiting all cgroups (under a tree). If
1044 * loops are exited prematurely (break), mem_cgroup_iter_break() must
1045 * be used for reference counting.
1047 #define for_each_mem_cgroup_tree(iter, root) \
1048 for (iter = mem_cgroup_iter(root, NULL, NULL); \
1050 iter = mem_cgroup_iter(root, iter, NULL))
1052 #define for_each_mem_cgroup(iter) \
1053 for (iter = mem_cgroup_iter(NULL, NULL, NULL); \
1055 iter = mem_cgroup_iter(NULL, iter, NULL))
1058 * mem_cgroup_zone_lruvec - get the lru list vector for a zone and memcg
1059 * @zone: zone of the wanted lruvec
1060 * @memcg: memcg of the wanted lruvec
1062 * Returns the lru list vector holding pages for the given @zone and
1063 * @mem. This can be the global zone lruvec, if the memory controller
1066 struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
1067 struct mem_cgroup *memcg)
1069 struct mem_cgroup_per_zone *mz;
1070 struct lruvec *lruvec;
1072 if (mem_cgroup_disabled()) {
1073 lruvec = &zone->lruvec;
1077 mz = mem_cgroup_zone_zoneinfo(memcg, zone);
1078 lruvec = &mz->lruvec;
1081 * Since a node can be onlined after the mem_cgroup was created,
1082 * we have to be prepared to initialize lruvec->zone here;
1083 * and if offlined then reonlined, we need to reinitialize it.
1085 if (unlikely(lruvec->zone != zone))
1086 lruvec->zone = zone;
1091 * mem_cgroup_page_lruvec - return lruvec for isolating/putting an LRU page
1093 * @zone: zone of the page
1095 * This function is only safe when following the LRU page isolation
1096 * and putback protocol: the LRU lock must be held, and the page must
1097 * either be PageLRU() or the caller must have isolated/allocated it.
1099 struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone)
1101 struct mem_cgroup_per_zone *mz;
1102 struct mem_cgroup *memcg;
1103 struct lruvec *lruvec;
1105 if (mem_cgroup_disabled()) {
1106 lruvec = &zone->lruvec;
1110 memcg = page->mem_cgroup;
1112 * Swapcache readahead pages are added to the LRU - and
1113 * possibly migrated - before they are charged.
1116 memcg = root_mem_cgroup;
1118 mz = mem_cgroup_page_zoneinfo(memcg, page);
1119 lruvec = &mz->lruvec;
1122 * Since a node can be onlined after the mem_cgroup was created,
1123 * we have to be prepared to initialize lruvec->zone here;
1124 * and if offlined then reonlined, we need to reinitialize it.
1126 if (unlikely(lruvec->zone != zone))
1127 lruvec->zone = zone;
1132 * mem_cgroup_update_lru_size - account for adding or removing an lru page
1133 * @lruvec: mem_cgroup per zone lru vector
1134 * @lru: index of lru list the page is sitting on
1135 * @nr_pages: positive when adding or negative when removing
1137 * This function must be called when a page is added to or removed from an
1140 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
1143 struct mem_cgroup_per_zone *mz;
1144 unsigned long *lru_size;
1146 if (mem_cgroup_disabled())
1149 mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
1150 lru_size = mz->lru_size + lru;
1151 *lru_size += nr_pages;
1152 VM_BUG_ON((long)(*lru_size) < 0);
1155 bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg)
1157 struct mem_cgroup *task_memcg;
1158 struct task_struct *p;
1161 p = find_lock_task_mm(task);
1163 task_memcg = get_mem_cgroup_from_mm(p->mm);
1167 * All threads may have already detached their mm's, but the oom
1168 * killer still needs to detect if they have already been oom
1169 * killed to prevent needlessly killing additional tasks.
1172 task_memcg = mem_cgroup_from_task(task);
1173 css_get(&task_memcg->css);
1176 ret = mem_cgroup_is_descendant(task_memcg, memcg);
1177 css_put(&task_memcg->css);
1181 #define mem_cgroup_from_counter(counter, member) \
1182 container_of(counter, struct mem_cgroup, member)
1185 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1186 * @memcg: the memory cgroup
1188 * Returns the maximum amount of memory @mem can be charged with, in
1191 static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1193 unsigned long margin = 0;
1194 unsigned long count;
1195 unsigned long limit;
1197 count = page_counter_read(&memcg->memory);
1198 limit = READ_ONCE(memcg->memory.limit);
1200 margin = limit - count;
1202 if (do_swap_account) {
1203 count = page_counter_read(&memcg->memsw);
1204 limit = READ_ONCE(memcg->memsw.limit);
1206 margin = min(margin, limit - count);
1213 * A routine for checking "mem" is under move_account() or not.
1215 * Checking a cgroup is mc.from or mc.to or under hierarchy of
1216 * moving cgroups. This is for waiting at high-memory pressure
1219 static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
1221 struct mem_cgroup *from;
1222 struct mem_cgroup *to;
1225 * Unlike task_move routines, we access mc.to, mc.from not under
1226 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1228 spin_lock(&mc.lock);
1234 ret = mem_cgroup_is_descendant(from, memcg) ||
1235 mem_cgroup_is_descendant(to, memcg);
1237 spin_unlock(&mc.lock);
1241 static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
1243 if (mc.moving_task && current != mc.moving_task) {
1244 if (mem_cgroup_under_move(memcg)) {
1246 prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1247 /* moving charge context might have finished. */
1250 finish_wait(&mc.waitq, &wait);
1257 #define K(x) ((x) << (PAGE_SHIFT-10))
1259 * mem_cgroup_print_oom_info: Print OOM information relevant to memory controller.
1260 * @memcg: The memory cgroup that went over limit
1261 * @p: Task that is going to be killed
1263 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1266 void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
1268 /* oom_info_lock ensures that parallel ooms do not interleave */
1269 static DEFINE_MUTEX(oom_info_lock);
1270 struct mem_cgroup *iter;
1273 mutex_lock(&oom_info_lock);
1277 pr_info("Task in ");
1278 pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
1279 pr_cont(" killed as a result of limit of ");
1281 pr_info("Memory limit reached of cgroup ");
1284 pr_cont_cgroup_path(memcg->css.cgroup);
1289 pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
1290 K((u64)page_counter_read(&memcg->memory)),
1291 K((u64)memcg->memory.limit), memcg->memory.failcnt);
1292 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
1293 K((u64)page_counter_read(&memcg->memsw)),
1294 K((u64)memcg->memsw.limit), memcg->memsw.failcnt);
1295 pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
1296 K((u64)page_counter_read(&memcg->kmem)),
1297 K((u64)memcg->kmem.limit), memcg->kmem.failcnt);
1299 for_each_mem_cgroup_tree(iter, memcg) {
1300 pr_info("Memory cgroup stats for ");
1301 pr_cont_cgroup_path(iter->css.cgroup);
1304 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
1305 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
1307 pr_cont(" %s:%luKB", mem_cgroup_stat_names[i],
1308 K(mem_cgroup_read_stat(iter, i)));
1311 for (i = 0; i < NR_LRU_LISTS; i++)
1312 pr_cont(" %s:%luKB", mem_cgroup_lru_names[i],
1313 K(mem_cgroup_nr_lru_pages(iter, BIT(i))));
1317 mutex_unlock(&oom_info_lock);
1321 * This function returns the number of memcg under hierarchy tree. Returns
1322 * 1(self count) if no children.
1324 static int mem_cgroup_count_children(struct mem_cgroup *memcg)
1327 struct mem_cgroup *iter;
1329 for_each_mem_cgroup_tree(iter, memcg)
1335 * Return the memory (and swap, if configured) limit for a memcg.
1337 static unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg)
1339 unsigned long limit;
1341 limit = memcg->memory.limit;
1342 if (mem_cgroup_swappiness(memcg)) {
1343 unsigned long memsw_limit;
1345 memsw_limit = memcg->memsw.limit;
1346 limit = min(limit + total_swap_pages, memsw_limit);
1351 static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1354 struct oom_control oc = {
1357 .gfp_mask = gfp_mask,
1360 struct mem_cgroup *iter;
1361 unsigned long chosen_points = 0;
1362 unsigned long totalpages;
1363 unsigned int points = 0;
1364 struct task_struct *chosen = NULL;
1366 mutex_lock(&oom_lock);
1369 * If current has a pending SIGKILL or is exiting, then automatically
1370 * select it. The goal is to allow it to allocate so that it may
1371 * quickly exit and free its memory.
1373 if (fatal_signal_pending(current) || task_will_free_mem(current)) {
1374 mark_oom_victim(current);
1378 check_panic_on_oom(&oc, CONSTRAINT_MEMCG, memcg);
1379 totalpages = mem_cgroup_get_limit(memcg) ? : 1;
1380 for_each_mem_cgroup_tree(iter, memcg) {
1381 struct css_task_iter it;
1382 struct task_struct *task;
1384 css_task_iter_start(&iter->css, &it);
1385 while ((task = css_task_iter_next(&it))) {
1386 switch (oom_scan_process_thread(&oc, task, totalpages)) {
1387 case OOM_SCAN_SELECT:
1389 put_task_struct(chosen);
1391 chosen_points = ULONG_MAX;
1392 get_task_struct(chosen);
1394 case OOM_SCAN_CONTINUE:
1396 case OOM_SCAN_ABORT:
1397 css_task_iter_end(&it);
1398 mem_cgroup_iter_break(memcg, iter);
1400 put_task_struct(chosen);
1405 points = oom_badness(task, memcg, NULL, totalpages);
1406 if (!points || points < chosen_points)
1408 /* Prefer thread group leaders for display purposes */
1409 if (points == chosen_points &&
1410 thread_group_leader(chosen))
1414 put_task_struct(chosen);
1416 chosen_points = points;
1417 get_task_struct(chosen);
1419 css_task_iter_end(&it);
1423 points = chosen_points * 1000 / totalpages;
1424 oom_kill_process(&oc, chosen, points, totalpages, memcg,
1425 "Memory cgroup out of memory");
1428 mutex_unlock(&oom_lock);
1431 #if MAX_NUMNODES > 1
1434 * test_mem_cgroup_node_reclaimable
1435 * @memcg: the target memcg
1436 * @nid: the node ID to be checked.
1437 * @noswap : specify true here if the user wants flle only information.
1439 * This function returns whether the specified memcg contains any
1440 * reclaimable pages on a node. Returns true if there are any reclaimable
1441 * pages in the node.
1443 static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg,
1444 int nid, bool noswap)
1446 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_FILE))
1448 if (noswap || !total_swap_pages)
1450 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_ANON))
1457 * Always updating the nodemask is not very good - even if we have an empty
1458 * list or the wrong list here, we can start from some node and traverse all
1459 * nodes based on the zonelist. So update the list loosely once per 10 secs.
1462 static void mem_cgroup_may_update_nodemask(struct mem_cgroup *memcg)
1466 * numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET
1467 * pagein/pageout changes since the last update.
1469 if (!atomic_read(&memcg->numainfo_events))
1471 if (atomic_inc_return(&memcg->numainfo_updating) > 1)
1474 /* make a nodemask where this memcg uses memory from */
1475 memcg->scan_nodes = node_states[N_MEMORY];
1477 for_each_node_mask(nid, node_states[N_MEMORY]) {
1479 if (!test_mem_cgroup_node_reclaimable(memcg, nid, false))
1480 node_clear(nid, memcg->scan_nodes);
1483 atomic_set(&memcg->numainfo_events, 0);
1484 atomic_set(&memcg->numainfo_updating, 0);
1488 * Selecting a node where we start reclaim from. Because what we need is just
1489 * reducing usage counter, start from anywhere is O,K. Considering
1490 * memory reclaim from current node, there are pros. and cons.
1492 * Freeing memory from current node means freeing memory from a node which
1493 * we'll use or we've used. So, it may make LRU bad. And if several threads
1494 * hit limits, it will see a contention on a node. But freeing from remote
1495 * node means more costs for memory reclaim because of memory latency.
1497 * Now, we use round-robin. Better algorithm is welcomed.
1499 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
1503 mem_cgroup_may_update_nodemask(memcg);
1504 node = memcg->last_scanned_node;
1506 node = next_node(node, memcg->scan_nodes);
1507 if (node == MAX_NUMNODES)
1508 node = first_node(memcg->scan_nodes);
1510 * We call this when we hit limit, not when pages are added to LRU.
1511 * No LRU may hold pages because all pages are UNEVICTABLE or
1512 * memcg is too small and all pages are not on LRU. In that case,
1513 * we use curret node.
1515 if (unlikely(node == MAX_NUMNODES))
1516 node = numa_node_id();
1518 memcg->last_scanned_node = node;
1522 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
1528 static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
1531 unsigned long *total_scanned)
1533 struct mem_cgroup *victim = NULL;
1536 unsigned long excess;
1537 unsigned long nr_scanned;
1538 struct mem_cgroup_reclaim_cookie reclaim = {
1543 excess = soft_limit_excess(root_memcg);
1546 victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
1551 * If we have not been able to reclaim
1552 * anything, it might because there are
1553 * no reclaimable pages under this hierarchy
1558 * We want to do more targeted reclaim.
1559 * excess >> 2 is not to excessive so as to
1560 * reclaim too much, nor too less that we keep
1561 * coming back to reclaim from this cgroup
1563 if (total >= (excess >> 2) ||
1564 (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
1569 total += mem_cgroup_shrink_node_zone(victim, gfp_mask, false,
1571 *total_scanned += nr_scanned;
1572 if (!soft_limit_excess(root_memcg))
1575 mem_cgroup_iter_break(root_memcg, victim);
1579 #ifdef CONFIG_LOCKDEP
1580 static struct lockdep_map memcg_oom_lock_dep_map = {
1581 .name = "memcg_oom_lock",
1585 static DEFINE_SPINLOCK(memcg_oom_lock);
1588 * Check OOM-Killer is already running under our hierarchy.
1589 * If someone is running, return false.
1591 static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
1593 struct mem_cgroup *iter, *failed = NULL;
1595 spin_lock(&memcg_oom_lock);
1597 for_each_mem_cgroup_tree(iter, memcg) {
1598 if (iter->oom_lock) {
1600 * this subtree of our hierarchy is already locked
1601 * so we cannot give a lock.
1604 mem_cgroup_iter_break(memcg, iter);
1607 iter->oom_lock = true;
1612 * OK, we failed to lock the whole subtree so we have
1613 * to clean up what we set up to the failing subtree
1615 for_each_mem_cgroup_tree(iter, memcg) {
1616 if (iter == failed) {
1617 mem_cgroup_iter_break(memcg, iter);
1620 iter->oom_lock = false;
1623 mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_);
1625 spin_unlock(&memcg_oom_lock);
1630 static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
1632 struct mem_cgroup *iter;
1634 spin_lock(&memcg_oom_lock);
1635 mutex_release(&memcg_oom_lock_dep_map, 1, _RET_IP_);
1636 for_each_mem_cgroup_tree(iter, memcg)
1637 iter->oom_lock = false;
1638 spin_unlock(&memcg_oom_lock);
1641 static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
1643 struct mem_cgroup *iter;
1645 spin_lock(&memcg_oom_lock);
1646 for_each_mem_cgroup_tree(iter, memcg)
1648 spin_unlock(&memcg_oom_lock);
1651 static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
1653 struct mem_cgroup *iter;
1656 * When a new child is created while the hierarchy is under oom,
1657 * mem_cgroup_oom_lock() may not be called. Watch for underflow.
1659 spin_lock(&memcg_oom_lock);
1660 for_each_mem_cgroup_tree(iter, memcg)
1661 if (iter->under_oom > 0)
1663 spin_unlock(&memcg_oom_lock);
1666 static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1668 struct oom_wait_info {
1669 struct mem_cgroup *memcg;
1673 static int memcg_oom_wake_function(wait_queue_t *wait,
1674 unsigned mode, int sync, void *arg)
1676 struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
1677 struct mem_cgroup *oom_wait_memcg;
1678 struct oom_wait_info *oom_wait_info;
1680 oom_wait_info = container_of(wait, struct oom_wait_info, wait);
1681 oom_wait_memcg = oom_wait_info->memcg;
1683 if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) &&
1684 !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg))
1686 return autoremove_wake_function(wait, mode, sync, arg);
1689 static void memcg_oom_recover(struct mem_cgroup *memcg)
1692 * For the following lockless ->under_oom test, the only required
1693 * guarantee is that it must see the state asserted by an OOM when
1694 * this function is called as a result of userland actions
1695 * triggered by the notification of the OOM. This is trivially
1696 * achieved by invoking mem_cgroup_mark_under_oom() before
1697 * triggering notification.
1699 if (memcg && memcg->under_oom)
1700 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
1703 static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
1705 if (!current->memcg_may_oom)
1708 * We are in the middle of the charge context here, so we
1709 * don't want to block when potentially sitting on a callstack
1710 * that holds all kinds of filesystem and mm locks.
1712 * Also, the caller may handle a failed allocation gracefully
1713 * (like optional page cache readahead) and so an OOM killer
1714 * invocation might not even be necessary.
1716 * That's why we don't do anything here except remember the
1717 * OOM context and then deal with it at the end of the page
1718 * fault when the stack is unwound, the locks are released,
1719 * and when we know whether the fault was overall successful.
1721 css_get(&memcg->css);
1722 current->memcg_in_oom = memcg;
1723 current->memcg_oom_gfp_mask = mask;
1724 current->memcg_oom_order = order;
1728 * mem_cgroup_oom_synchronize - complete memcg OOM handling
1729 * @handle: actually kill/wait or just clean up the OOM state
1731 * This has to be called at the end of a page fault if the memcg OOM
1732 * handler was enabled.
1734 * Memcg supports userspace OOM handling where failed allocations must
1735 * sleep on a waitqueue until the userspace task resolves the
1736 * situation. Sleeping directly in the charge context with all kinds
1737 * of locks held is not a good idea, instead we remember an OOM state
1738 * in the task and mem_cgroup_oom_synchronize() has to be called at
1739 * the end of the page fault to complete the OOM handling.
1741 * Returns %true if an ongoing memcg OOM situation was detected and
1742 * completed, %false otherwise.
1744 bool mem_cgroup_oom_synchronize(bool handle)
1746 struct mem_cgroup *memcg = current->memcg_in_oom;
1747 struct oom_wait_info owait;
1750 /* OOM is global, do not handle */
1754 if (!handle || oom_killer_disabled)
1757 owait.memcg = memcg;
1758 owait.wait.flags = 0;
1759 owait.wait.func = memcg_oom_wake_function;
1760 owait.wait.private = current;
1761 INIT_LIST_HEAD(&owait.wait.task_list);
1763 prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
1764 mem_cgroup_mark_under_oom(memcg);
1766 locked = mem_cgroup_oom_trylock(memcg);
1769 mem_cgroup_oom_notify(memcg);
1771 if (locked && !memcg->oom_kill_disable) {
1772 mem_cgroup_unmark_under_oom(memcg);
1773 finish_wait(&memcg_oom_waitq, &owait.wait);
1774 mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask,
1775 current->memcg_oom_order);
1778 mem_cgroup_unmark_under_oom(memcg);
1779 finish_wait(&memcg_oom_waitq, &owait.wait);
1783 mem_cgroup_oom_unlock(memcg);
1785 * There is no guarantee that an OOM-lock contender
1786 * sees the wakeups triggered by the OOM kill
1787 * uncharges. Wake any sleepers explicitely.
1789 memcg_oom_recover(memcg);
1792 current->memcg_in_oom = NULL;
1793 css_put(&memcg->css);
1798 * mem_cgroup_begin_page_stat - begin a page state statistics transaction
1799 * @page: page that is going to change accounted state
1801 * This function must mark the beginning of an accounted page state
1802 * change to prevent double accounting when the page is concurrently
1803 * being moved to another memcg:
1805 * memcg = mem_cgroup_begin_page_stat(page);
1806 * if (TestClearPageState(page))
1807 * mem_cgroup_update_page_stat(memcg, state, -1);
1808 * mem_cgroup_end_page_stat(memcg);
1810 struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page)
1812 struct mem_cgroup *memcg;
1813 unsigned long flags;
1816 * The RCU lock is held throughout the transaction. The fast
1817 * path can get away without acquiring the memcg->move_lock
1818 * because page moving starts with an RCU grace period.
1820 * The RCU lock also protects the memcg from being freed when
1821 * the page state that is going to change is the only thing
1822 * preventing the page from being uncharged.
1823 * E.g. end-writeback clearing PageWriteback(), which allows
1824 * migration to go ahead and uncharge the page before the
1825 * account transaction might be complete.
1829 if (mem_cgroup_disabled())
1832 memcg = page->mem_cgroup;
1833 if (unlikely(!memcg))
1836 if (atomic_read(&memcg->moving_account) <= 0)
1839 spin_lock_irqsave(&memcg->move_lock, flags);
1840 if (memcg != page->mem_cgroup) {
1841 spin_unlock_irqrestore(&memcg->move_lock, flags);
1846 * When charge migration first begins, we can have locked and
1847 * unlocked page stat updates happening concurrently. Track
1848 * the task who has the lock for mem_cgroup_end_page_stat().
1850 memcg->move_lock_task = current;
1851 memcg->move_lock_flags = flags;
1855 EXPORT_SYMBOL(mem_cgroup_begin_page_stat);
1858 * mem_cgroup_end_page_stat - finish a page state statistics transaction
1859 * @memcg: the memcg that was accounted against
1861 void mem_cgroup_end_page_stat(struct mem_cgroup *memcg)
1863 if (memcg && memcg->move_lock_task == current) {
1864 unsigned long flags = memcg->move_lock_flags;
1866 memcg->move_lock_task = NULL;
1867 memcg->move_lock_flags = 0;
1869 spin_unlock_irqrestore(&memcg->move_lock, flags);
1874 EXPORT_SYMBOL(mem_cgroup_end_page_stat);
1877 * size of first charge trial. "32" comes from vmscan.c's magic value.
1878 * TODO: maybe necessary to use big numbers in big irons.
1880 #define CHARGE_BATCH 32U
1881 struct memcg_stock_pcp {
1882 struct mem_cgroup *cached; /* this never be root cgroup */
1883 unsigned int nr_pages;
1884 struct work_struct work;
1885 unsigned long flags;
1886 #define FLUSHING_CACHED_CHARGE 0
1888 static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
1889 static DEFINE_MUTEX(percpu_charge_mutex);
1892 * consume_stock: Try to consume stocked charge on this cpu.
1893 * @memcg: memcg to consume from.
1894 * @nr_pages: how many pages to charge.
1896 * The charges will only happen if @memcg matches the current cpu's memcg
1897 * stock, and at least @nr_pages are available in that stock. Failure to
1898 * service an allocation will refill the stock.
1900 * returns true if successful, false otherwise.
1902 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
1904 struct memcg_stock_pcp *stock;
1907 if (nr_pages > CHARGE_BATCH)
1910 stock = &get_cpu_var(memcg_stock);
1911 if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
1912 stock->nr_pages -= nr_pages;
1915 put_cpu_var(memcg_stock);
1920 * Returns stocks cached in percpu and reset cached information.
1922 static void drain_stock(struct memcg_stock_pcp *stock)
1924 struct mem_cgroup *old = stock->cached;
1926 if (stock->nr_pages) {
1927 page_counter_uncharge(&old->memory, stock->nr_pages);
1928 if (do_swap_account)
1929 page_counter_uncharge(&old->memsw, stock->nr_pages);
1930 css_put_many(&old->css, stock->nr_pages);
1931 stock->nr_pages = 0;
1933 stock->cached = NULL;
1937 * This must be called under preempt disabled or must be called by
1938 * a thread which is pinned to local cpu.
1940 static void drain_local_stock(struct work_struct *dummy)
1942 struct memcg_stock_pcp *stock = this_cpu_ptr(&memcg_stock);
1944 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
1948 * Cache charges(val) to local per_cpu area.
1949 * This will be consumed by consume_stock() function, later.
1951 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
1953 struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock);
1955 if (stock->cached != memcg) { /* reset if necessary */
1957 stock->cached = memcg;
1959 stock->nr_pages += nr_pages;
1960 put_cpu_var(memcg_stock);
1964 * Drains all per-CPU charge caches for given root_memcg resp. subtree
1965 * of the hierarchy under it.
1967 static void drain_all_stock(struct mem_cgroup *root_memcg)
1971 /* If someone's already draining, avoid adding running more workers. */
1972 if (!mutex_trylock(&percpu_charge_mutex))
1974 /* Notify other cpus that system-wide "drain" is running */
1977 for_each_online_cpu(cpu) {
1978 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
1979 struct mem_cgroup *memcg;
1981 memcg = stock->cached;
1982 if (!memcg || !stock->nr_pages)
1984 if (!mem_cgroup_is_descendant(memcg, root_memcg))
1986 if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
1988 drain_local_stock(&stock->work);
1990 schedule_work_on(cpu, &stock->work);
1995 mutex_unlock(&percpu_charge_mutex);
1998 static int memcg_cpu_hotplug_callback(struct notifier_block *nb,
1999 unsigned long action,
2002 int cpu = (unsigned long)hcpu;
2003 struct memcg_stock_pcp *stock;
2005 if (action == CPU_ONLINE)
2008 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
2011 stock = &per_cpu(memcg_stock, cpu);
2017 * Scheduled by try_charge() to be executed from the userland return path
2018 * and reclaims memory over the high limit.
2020 void mem_cgroup_handle_over_high(void)
2022 unsigned int nr_pages = current->memcg_nr_pages_over_high;
2023 struct mem_cgroup *memcg, *pos;
2025 if (likely(!nr_pages))
2028 pos = memcg = get_mem_cgroup_from_mm(current->mm);
2031 if (page_counter_read(&pos->memory) <= pos->high)
2033 mem_cgroup_events(pos, MEMCG_HIGH, 1);
2034 try_to_free_mem_cgroup_pages(pos, nr_pages, GFP_KERNEL, true);
2035 } while ((pos = parent_mem_cgroup(pos)));
2037 css_put(&memcg->css);
2038 current->memcg_nr_pages_over_high = 0;
2041 static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
2042 unsigned int nr_pages)
2044 unsigned int batch = max(CHARGE_BATCH, nr_pages);
2045 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
2046 struct mem_cgroup *mem_over_limit;
2047 struct page_counter *counter;
2048 unsigned long nr_reclaimed;
2049 bool may_swap = true;
2050 bool drained = false;
2052 if (mem_cgroup_is_root(memcg))
2055 if (consume_stock(memcg, nr_pages))
2058 if (!do_swap_account ||
2059 page_counter_try_charge(&memcg->memsw, batch, &counter)) {
2060 if (page_counter_try_charge(&memcg->memory, batch, &counter))
2062 if (do_swap_account)
2063 page_counter_uncharge(&memcg->memsw, batch);
2064 mem_over_limit = mem_cgroup_from_counter(counter, memory);
2066 mem_over_limit = mem_cgroup_from_counter(counter, memsw);
2070 if (batch > nr_pages) {
2076 * Unlike in global OOM situations, memcg is not in a physical
2077 * memory shortage. Allow dying and OOM-killed tasks to
2078 * bypass the last charges so that they can exit quickly and
2079 * free their memory.
2081 if (unlikely(test_thread_flag(TIF_MEMDIE) ||
2082 fatal_signal_pending(current) ||
2083 current->flags & PF_EXITING))
2086 if (unlikely(task_in_memcg_oom(current)))
2089 if (!gfpflags_allow_blocking(gfp_mask))
2092 mem_cgroup_events(mem_over_limit, MEMCG_MAX, 1);
2094 nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
2095 gfp_mask, may_swap);
2097 if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
2101 drain_all_stock(mem_over_limit);
2106 if (gfp_mask & __GFP_NORETRY)
2109 * Even though the limit is exceeded at this point, reclaim
2110 * may have been able to free some pages. Retry the charge
2111 * before killing the task.
2113 * Only for regular pages, though: huge pages are rather
2114 * unlikely to succeed so close to the limit, and we fall back
2115 * to regular pages anyway in case of failure.
2117 if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
2120 * At task move, charge accounts can be doubly counted. So, it's
2121 * better to wait until the end of task_move if something is going on.
2123 if (mem_cgroup_wait_acct_move(mem_over_limit))
2129 if (gfp_mask & __GFP_NOFAIL)
2132 if (fatal_signal_pending(current))
2135 mem_cgroup_events(mem_over_limit, MEMCG_OOM, 1);
2137 mem_cgroup_oom(mem_over_limit, gfp_mask,
2138 get_order(nr_pages * PAGE_SIZE));
2140 if (!(gfp_mask & __GFP_NOFAIL))
2144 * The allocation either can't fail or will lead to more memory
2145 * being freed very soon. Allow memory usage go over the limit
2146 * temporarily by force charging it.
2148 page_counter_charge(&memcg->memory, nr_pages);
2149 if (do_swap_account)
2150 page_counter_charge(&memcg->memsw, nr_pages);
2151 css_get_many(&memcg->css, nr_pages);
2156 css_get_many(&memcg->css, batch);
2157 if (batch > nr_pages)
2158 refill_stock(memcg, batch - nr_pages);
2161 * If the hierarchy is above the normal consumption range, schedule
2162 * reclaim on returning to userland. We can perform reclaim here
2163 * if __GFP_RECLAIM but let's always punt for simplicity and so that
2164 * GFP_KERNEL can consistently be used during reclaim. @memcg is
2165 * not recorded as it most likely matches current's and won't
2166 * change in the meantime. As high limit is checked again before
2167 * reclaim, the cost of mismatch is negligible.
2170 if (page_counter_read(&memcg->memory) > memcg->high) {
2171 current->memcg_nr_pages_over_high += batch;
2172 set_notify_resume(current);
2175 } while ((memcg = parent_mem_cgroup(memcg)));
2180 static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
2182 if (mem_cgroup_is_root(memcg))
2185 page_counter_uncharge(&memcg->memory, nr_pages);
2186 if (do_swap_account)
2187 page_counter_uncharge(&memcg->memsw, nr_pages);
2189 css_put_many(&memcg->css, nr_pages);
2192 static void lock_page_lru(struct page *page, int *isolated)
2194 struct zone *zone = page_zone(page);
2196 spin_lock_irq(&zone->lru_lock);
2197 if (PageLRU(page)) {
2198 struct lruvec *lruvec;
2200 lruvec = mem_cgroup_page_lruvec(page, zone);
2202 del_page_from_lru_list(page, lruvec, page_lru(page));
2208 static void unlock_page_lru(struct page *page, int isolated)
2210 struct zone *zone = page_zone(page);
2213 struct lruvec *lruvec;
2215 lruvec = mem_cgroup_page_lruvec(page, zone);
2216 VM_BUG_ON_PAGE(PageLRU(page), page);
2218 add_page_to_lru_list(page, lruvec, page_lru(page));
2220 spin_unlock_irq(&zone->lru_lock);
2223 static void commit_charge(struct page *page, struct mem_cgroup *memcg,
2228 VM_BUG_ON_PAGE(page->mem_cgroup, page);
2231 * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page
2232 * may already be on some other mem_cgroup's LRU. Take care of it.
2235 lock_page_lru(page, &isolated);
2238 * Nobody should be changing or seriously looking at
2239 * page->mem_cgroup at this point:
2241 * - the page is uncharged
2243 * - the page is off-LRU
2245 * - an anonymous fault has exclusive page access, except for
2246 * a locked page table
2248 * - a page cache insertion, a swapin fault, or a migration
2249 * have the page locked
2251 page->mem_cgroup = memcg;
2254 unlock_page_lru(page, isolated);
2257 #ifdef CONFIG_MEMCG_KMEM
2258 static int memcg_alloc_cache_id(void)
2263 id = ida_simple_get(&memcg_cache_ida,
2264 0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL);
2268 if (id < memcg_nr_cache_ids)
2272 * There's no space for the new id in memcg_caches arrays,
2273 * so we have to grow them.
2275 down_write(&memcg_cache_ids_sem);
2277 size = 2 * (id + 1);
2278 if (size < MEMCG_CACHES_MIN_SIZE)
2279 size = MEMCG_CACHES_MIN_SIZE;
2280 else if (size > MEMCG_CACHES_MAX_SIZE)
2281 size = MEMCG_CACHES_MAX_SIZE;
2283 err = memcg_update_all_caches(size);
2285 err = memcg_update_all_list_lrus(size);
2287 memcg_nr_cache_ids = size;
2289 up_write(&memcg_cache_ids_sem);
2292 ida_simple_remove(&memcg_cache_ida, id);
2298 static void memcg_free_cache_id(int id)
2300 ida_simple_remove(&memcg_cache_ida, id);
2303 struct memcg_kmem_cache_create_work {
2304 struct mem_cgroup *memcg;
2305 struct kmem_cache *cachep;
2306 struct work_struct work;
2309 static void memcg_kmem_cache_create_func(struct work_struct *w)
2311 struct memcg_kmem_cache_create_work *cw =
2312 container_of(w, struct memcg_kmem_cache_create_work, work);
2313 struct mem_cgroup *memcg = cw->memcg;
2314 struct kmem_cache *cachep = cw->cachep;
2316 memcg_create_kmem_cache(memcg, cachep);
2318 css_put(&memcg->css);
2323 * Enqueue the creation of a per-memcg kmem_cache.
2325 static void __memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
2326 struct kmem_cache *cachep)
2328 struct memcg_kmem_cache_create_work *cw;
2330 cw = kmalloc(sizeof(*cw), GFP_NOWAIT);
2334 css_get(&memcg->css);
2337 cw->cachep = cachep;
2338 INIT_WORK(&cw->work, memcg_kmem_cache_create_func);
2340 schedule_work(&cw->work);
2343 static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
2344 struct kmem_cache *cachep)
2347 * We need to stop accounting when we kmalloc, because if the
2348 * corresponding kmalloc cache is not yet created, the first allocation
2349 * in __memcg_schedule_kmem_cache_create will recurse.
2351 * However, it is better to enclose the whole function. Depending on
2352 * the debugging options enabled, INIT_WORK(), for instance, can
2353 * trigger an allocation. This too, will make us recurse. Because at
2354 * this point we can't allow ourselves back into memcg_kmem_get_cache,
2355 * the safest choice is to do it like this, wrapping the whole function.
2357 current->memcg_kmem_skip_account = 1;
2358 __memcg_schedule_kmem_cache_create(memcg, cachep);
2359 current->memcg_kmem_skip_account = 0;
2363 * Return the kmem_cache we're supposed to use for a slab allocation.
2364 * We try to use the current memcg's version of the cache.
2366 * If the cache does not exist yet, if we are the first user of it,
2367 * we either create it immediately, if possible, or create it asynchronously
2369 * In the latter case, we will let the current allocation go through with
2370 * the original cache.
2372 * Can't be called in interrupt context or from kernel threads.
2373 * This function needs to be called with rcu_read_lock() held.
2375 struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
2377 struct mem_cgroup *memcg;
2378 struct kmem_cache *memcg_cachep;
2381 VM_BUG_ON(!is_root_cache(cachep));
2383 if (cachep->flags & SLAB_ACCOUNT)
2384 gfp |= __GFP_ACCOUNT;
2386 if (!(gfp & __GFP_ACCOUNT))
2389 if (current->memcg_kmem_skip_account)
2392 memcg = get_mem_cgroup_from_mm(current->mm);
2393 kmemcg_id = READ_ONCE(memcg->kmemcg_id);
2397 memcg_cachep = cache_from_memcg_idx(cachep, kmemcg_id);
2398 if (likely(memcg_cachep))
2399 return memcg_cachep;
2402 * If we are in a safe context (can wait, and not in interrupt
2403 * context), we could be be predictable and return right away.
2404 * This would guarantee that the allocation being performed
2405 * already belongs in the new cache.
2407 * However, there are some clashes that can arrive from locking.
2408 * For instance, because we acquire the slab_mutex while doing
2409 * memcg_create_kmem_cache, this means no further allocation
2410 * could happen with the slab_mutex held. So it's better to
2413 memcg_schedule_kmem_cache_create(memcg, cachep);
2415 css_put(&memcg->css);
2419 void __memcg_kmem_put_cache(struct kmem_cache *cachep)
2421 if (!is_root_cache(cachep))
2422 css_put(&cachep->memcg_params.memcg->css);
2425 int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
2426 struct mem_cgroup *memcg)
2428 unsigned int nr_pages = 1 << order;
2429 struct page_counter *counter;
2432 if (!memcg_kmem_is_active(memcg))
2435 if (!page_counter_try_charge(&memcg->kmem, nr_pages, &counter))
2438 ret = try_charge(memcg, gfp, nr_pages);
2440 page_counter_uncharge(&memcg->kmem, nr_pages);
2444 page->mem_cgroup = memcg;
2449 int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
2451 struct mem_cgroup *memcg;
2454 memcg = get_mem_cgroup_from_mm(current->mm);
2455 ret = __memcg_kmem_charge_memcg(page, gfp, order, memcg);
2456 css_put(&memcg->css);
2460 void __memcg_kmem_uncharge(struct page *page, int order)
2462 struct mem_cgroup *memcg = page->mem_cgroup;
2463 unsigned int nr_pages = 1 << order;
2468 VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page);
2470 page_counter_uncharge(&memcg->kmem, nr_pages);
2471 page_counter_uncharge(&memcg->memory, nr_pages);
2472 if (do_swap_account)
2473 page_counter_uncharge(&memcg->memsw, nr_pages);
2475 page->mem_cgroup = NULL;
2476 css_put_many(&memcg->css, nr_pages);
2478 #endif /* CONFIG_MEMCG_KMEM */
2480 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
2483 * Because tail pages are not marked as "used", set it. We're under
2484 * zone->lru_lock, 'splitting on pmd' and compound_lock.
2485 * charge/uncharge will be never happen and move_account() is done under
2486 * compound_lock(), so we don't have to take care of races.
2488 void mem_cgroup_split_huge_fixup(struct page *head)
2492 if (mem_cgroup_disabled())
2495 for (i = 1; i < HPAGE_PMD_NR; i++)
2496 head[i].mem_cgroup = head->mem_cgroup;
2498 __this_cpu_sub(head->mem_cgroup->stat->count[MEM_CGROUP_STAT_RSS_HUGE],
2501 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
2503 #ifdef CONFIG_MEMCG_SWAP
2504 static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg,
2507 int val = (charge) ? 1 : -1;
2508 this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_SWAP], val);
2512 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
2513 * @entry: swap entry to be moved
2514 * @from: mem_cgroup which the entry is moved from
2515 * @to: mem_cgroup which the entry is moved to
2517 * It succeeds only when the swap_cgroup's record for this entry is the same
2518 * as the mem_cgroup's id of @from.
2520 * Returns 0 on success, -EINVAL on failure.
2522 * The caller must have charged to @to, IOW, called page_counter_charge() about
2523 * both res and memsw, and called css_get().
2525 static int mem_cgroup_move_swap_account(swp_entry_t entry,
2526 struct mem_cgroup *from, struct mem_cgroup *to)
2528 unsigned short old_id, new_id;
2530 old_id = mem_cgroup_id(from);
2531 new_id = mem_cgroup_id(to);
2533 if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
2534 mem_cgroup_swap_statistics(from, false);
2535 mem_cgroup_swap_statistics(to, true);
2541 static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
2542 struct mem_cgroup *from, struct mem_cgroup *to)
2548 static DEFINE_MUTEX(memcg_limit_mutex);
2550 static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
2551 unsigned long limit)
2553 unsigned long curusage;
2554 unsigned long oldusage;
2555 bool enlarge = false;
2560 * For keeping hierarchical_reclaim simple, how long we should retry
2561 * is depends on callers. We set our retry-count to be function
2562 * of # of children which we should visit in this loop.
2564 retry_count = MEM_CGROUP_RECLAIM_RETRIES *
2565 mem_cgroup_count_children(memcg);
2567 oldusage = page_counter_read(&memcg->memory);
2570 if (signal_pending(current)) {
2575 mutex_lock(&memcg_limit_mutex);
2576 if (limit > memcg->memsw.limit) {
2577 mutex_unlock(&memcg_limit_mutex);
2581 if (limit > memcg->memory.limit)
2583 ret = page_counter_limit(&memcg->memory, limit);
2584 mutex_unlock(&memcg_limit_mutex);
2589 try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, true);
2591 curusage = page_counter_read(&memcg->memory);
2592 /* Usage is reduced ? */
2593 if (curusage >= oldusage)
2596 oldusage = curusage;
2597 } while (retry_count);
2599 if (!ret && enlarge)
2600 memcg_oom_recover(memcg);
2605 static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
2606 unsigned long limit)
2608 unsigned long curusage;
2609 unsigned long oldusage;
2610 bool enlarge = false;
2614 /* see mem_cgroup_resize_res_limit */
2615 retry_count = MEM_CGROUP_RECLAIM_RETRIES *
2616 mem_cgroup_count_children(memcg);
2618 oldusage = page_counter_read(&memcg->memsw);
2621 if (signal_pending(current)) {
2626 mutex_lock(&memcg_limit_mutex);
2627 if (limit < memcg->memory.limit) {
2628 mutex_unlock(&memcg_limit_mutex);
2632 if (limit > memcg->memsw.limit)
2634 ret = page_counter_limit(&memcg->memsw, limit);
2635 mutex_unlock(&memcg_limit_mutex);
2640 try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, false);
2642 curusage = page_counter_read(&memcg->memsw);
2643 /* Usage is reduced ? */
2644 if (curusage >= oldusage)
2647 oldusage = curusage;
2648 } while (retry_count);
2650 if (!ret && enlarge)
2651 memcg_oom_recover(memcg);
2656 unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
2658 unsigned long *total_scanned)
2660 unsigned long nr_reclaimed = 0;
2661 struct mem_cgroup_per_zone *mz, *next_mz = NULL;
2662 unsigned long reclaimed;
2664 struct mem_cgroup_tree_per_zone *mctz;
2665 unsigned long excess;
2666 unsigned long nr_scanned;
2671 mctz = soft_limit_tree_node_zone(zone_to_nid(zone), zone_idx(zone));
2673 * This loop can run a while, specially if mem_cgroup's continuously
2674 * keep exceeding their soft limit and putting the system under
2681 mz = mem_cgroup_largest_soft_limit_node(mctz);
2686 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, zone,
2687 gfp_mask, &nr_scanned);
2688 nr_reclaimed += reclaimed;
2689 *total_scanned += nr_scanned;
2690 spin_lock_irq(&mctz->lock);
2691 __mem_cgroup_remove_exceeded(mz, mctz);
2694 * If we failed to reclaim anything from this memory cgroup
2695 * it is time to move on to the next cgroup
2699 next_mz = __mem_cgroup_largest_soft_limit_node(mctz);
2701 excess = soft_limit_excess(mz->memcg);
2703 * One school of thought says that we should not add
2704 * back the node to the tree if reclaim returns 0.
2705 * But our reclaim could return 0, simply because due
2706 * to priority we are exposing a smaller subset of
2707 * memory to reclaim from. Consider this as a longer
2710 /* If excess == 0, no tree ops */
2711 __mem_cgroup_insert_exceeded(mz, mctz, excess);
2712 spin_unlock_irq(&mctz->lock);
2713 css_put(&mz->memcg->css);
2716 * Could not reclaim anything and there are no more
2717 * mem cgroups to try or we seem to be looping without
2718 * reclaiming anything.
2720 if (!nr_reclaimed &&
2722 loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
2724 } while (!nr_reclaimed);
2726 css_put(&next_mz->memcg->css);
2727 return nr_reclaimed;
2731 * Test whether @memcg has children, dead or alive. Note that this
2732 * function doesn't care whether @memcg has use_hierarchy enabled and
2733 * returns %true if there are child csses according to the cgroup
2734 * hierarchy. Testing use_hierarchy is the caller's responsiblity.
2736 static inline bool memcg_has_children(struct mem_cgroup *memcg)
2741 * The lock does not prevent addition or deletion of children, but
2742 * it prevents a new child from being initialized based on this
2743 * parent in css_online(), so it's enough to decide whether
2744 * hierarchically inherited attributes can still be changed or not.
2746 lockdep_assert_held(&memcg_create_mutex);
2749 ret = css_next_child(NULL, &memcg->css);
2755 * Reclaims as many pages from the given memcg as possible and moves
2756 * the rest to the parent.
2758 * Caller is responsible for holding css reference for memcg.
2760 static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
2762 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
2764 /* we call try-to-free pages for make this cgroup empty */
2765 lru_add_drain_all();
2766 /* try to free all pages in this cgroup */
2767 while (nr_retries && page_counter_read(&memcg->memory)) {
2770 if (signal_pending(current))
2773 progress = try_to_free_mem_cgroup_pages(memcg, 1,
2777 /* maybe some writeback is necessary */
2778 congestion_wait(BLK_RW_ASYNC, HZ/10);
2786 static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of,
2787 char *buf, size_t nbytes,
2790 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
2792 if (mem_cgroup_is_root(memcg))
2794 return mem_cgroup_force_empty(memcg) ?: nbytes;
2797 static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
2800 return mem_cgroup_from_css(css)->use_hierarchy;
2803 static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
2804 struct cftype *cft, u64 val)
2807 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
2808 struct mem_cgroup *parent_memcg = mem_cgroup_from_css(memcg->css.parent);
2810 mutex_lock(&memcg_create_mutex);
2812 if (memcg->use_hierarchy == val)
2816 * If parent's use_hierarchy is set, we can't make any modifications
2817 * in the child subtrees. If it is unset, then the change can
2818 * occur, provided the current cgroup has no children.
2820 * For the root cgroup, parent_mem is NULL, we allow value to be
2821 * set if there are no children.
2823 if ((!parent_memcg || !parent_memcg->use_hierarchy) &&
2824 (val == 1 || val == 0)) {
2825 if (!memcg_has_children(memcg))
2826 memcg->use_hierarchy = val;
2833 mutex_unlock(&memcg_create_mutex);
2838 static unsigned long tree_stat(struct mem_cgroup *memcg,
2839 enum mem_cgroup_stat_index idx)
2841 struct mem_cgroup *iter;
2842 unsigned long val = 0;
2844 for_each_mem_cgroup_tree(iter, memcg)
2845 val += mem_cgroup_read_stat(iter, idx);
2850 static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
2854 if (mem_cgroup_is_root(memcg)) {
2855 val = tree_stat(memcg, MEM_CGROUP_STAT_CACHE);
2856 val += tree_stat(memcg, MEM_CGROUP_STAT_RSS);
2858 val += tree_stat(memcg, MEM_CGROUP_STAT_SWAP);
2861 val = page_counter_read(&memcg->memory);
2863 val = page_counter_read(&memcg->memsw);
2876 static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
2879 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
2880 struct page_counter *counter;
2882 switch (MEMFILE_TYPE(cft->private)) {
2884 counter = &memcg->memory;
2887 counter = &memcg->memsw;
2890 counter = &memcg->kmem;
2896 switch (MEMFILE_ATTR(cft->private)) {
2898 if (counter == &memcg->memory)
2899 return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE;
2900 if (counter == &memcg->memsw)
2901 return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE;
2902 return (u64)page_counter_read(counter) * PAGE_SIZE;
2904 return (u64)counter->limit * PAGE_SIZE;
2906 return (u64)counter->watermark * PAGE_SIZE;
2908 return counter->failcnt;
2909 case RES_SOFT_LIMIT:
2910 return (u64)memcg->soft_limit * PAGE_SIZE;
2916 #ifdef CONFIG_MEMCG_KMEM
2917 static int memcg_activate_kmem(struct mem_cgroup *memcg,
2918 unsigned long nr_pages)
2923 BUG_ON(memcg->kmemcg_id >= 0);
2924 BUG_ON(memcg->kmem_acct_activated);
2925 BUG_ON(memcg->kmem_acct_active);
2928 * For simplicity, we won't allow this to be disabled. It also can't
2929 * be changed if the cgroup has children already, or if tasks had
2932 * If tasks join before we set the limit, a person looking at
2933 * kmem.usage_in_bytes will have no way to determine when it took
2934 * place, which makes the value quite meaningless.
2936 * After it first became limited, changes in the value of the limit are
2937 * of course permitted.
2939 mutex_lock(&memcg_create_mutex);
2940 if (cgroup_is_populated(memcg->css.cgroup) ||
2941 (memcg->use_hierarchy && memcg_has_children(memcg)))
2943 mutex_unlock(&memcg_create_mutex);
2947 memcg_id = memcg_alloc_cache_id();
2954 * We couldn't have accounted to this cgroup, because it hasn't got
2955 * activated yet, so this should succeed.
2957 err = page_counter_limit(&memcg->kmem, nr_pages);
2960 static_key_slow_inc(&memcg_kmem_enabled_key);
2962 * A memory cgroup is considered kmem-active as soon as it gets
2963 * kmemcg_id. Setting the id after enabling static branching will
2964 * guarantee no one starts accounting before all call sites are
2967 memcg->kmemcg_id = memcg_id;
2968 memcg->kmem_acct_activated = true;
2969 memcg->kmem_acct_active = true;
2974 static int memcg_update_kmem_limit(struct mem_cgroup *memcg,
2975 unsigned long limit)
2979 mutex_lock(&memcg_limit_mutex);
2980 if (!memcg_kmem_is_active(memcg))
2981 ret = memcg_activate_kmem(memcg, limit);
2983 ret = page_counter_limit(&memcg->kmem, limit);
2984 mutex_unlock(&memcg_limit_mutex);
2988 static int memcg_propagate_kmem(struct mem_cgroup *memcg)
2991 struct mem_cgroup *parent = parent_mem_cgroup(memcg);
2996 mutex_lock(&memcg_limit_mutex);
2998 * If the parent cgroup is not kmem-active now, it cannot be activated
2999 * after this point, because it has at least one child already.
3001 if (memcg_kmem_is_active(parent))
3002 ret = memcg_activate_kmem(memcg, PAGE_COUNTER_MAX);
3003 mutex_unlock(&memcg_limit_mutex);
3007 static int memcg_update_kmem_limit(struct mem_cgroup *memcg,
3008 unsigned long limit)
3012 #endif /* CONFIG_MEMCG_KMEM */
3015 * The user of this function is...
3018 static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
3019 char *buf, size_t nbytes, loff_t off)
3021 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3022 unsigned long nr_pages;
3025 buf = strstrip(buf);
3026 ret = page_counter_memparse(buf, "-1", &nr_pages);
3030 switch (MEMFILE_ATTR(of_cft(of)->private)) {
3032 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
3036 switch (MEMFILE_TYPE(of_cft(of)->private)) {
3038 ret = mem_cgroup_resize_limit(memcg, nr_pages);
3041 ret = mem_cgroup_resize_memsw_limit(memcg, nr_pages);
3044 ret = memcg_update_kmem_limit(memcg, nr_pages);
3048 case RES_SOFT_LIMIT:
3049 memcg->soft_limit = nr_pages;
3053 return ret ?: nbytes;
3056 static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf,
3057 size_t nbytes, loff_t off)
3059 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3060 struct page_counter *counter;
3062 switch (MEMFILE_TYPE(of_cft(of)->private)) {
3064 counter = &memcg->memory;
3067 counter = &memcg->memsw;
3070 counter = &memcg->kmem;
3076 switch (MEMFILE_ATTR(of_cft(of)->private)) {
3078 page_counter_reset_watermark(counter);
3081 counter->failcnt = 0;
3090 static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
3093 return mem_cgroup_from_css(css)->move_charge_at_immigrate;
3097 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3098 struct cftype *cft, u64 val)
3100 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3102 if (val & ~MOVE_MASK)
3106 * No kind of locking is needed in here, because ->can_attach() will
3107 * check this value once in the beginning of the process, and then carry
3108 * on with stale data. This means that changes to this value will only
3109 * affect task migrations starting after the change.
3111 memcg->move_charge_at_immigrate = val;
3115 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3116 struct cftype *cft, u64 val)
3123 static int memcg_numa_stat_show(struct seq_file *m, void *v)
3127 unsigned int lru_mask;
3130 static const struct numa_stat stats[] = {
3131 { "total", LRU_ALL },
3132 { "file", LRU_ALL_FILE },
3133 { "anon", LRU_ALL_ANON },
3134 { "unevictable", BIT(LRU_UNEVICTABLE) },
3136 const struct numa_stat *stat;
3139 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
3141 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3142 nr = mem_cgroup_nr_lru_pages(memcg, stat->lru_mask);
3143 seq_printf(m, "%s=%lu", stat->name, nr);
3144 for_each_node_state(nid, N_MEMORY) {
3145 nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
3147 seq_printf(m, " N%d=%lu", nid, nr);
3152 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3153 struct mem_cgroup *iter;
3156 for_each_mem_cgroup_tree(iter, memcg)
3157 nr += mem_cgroup_nr_lru_pages(iter, stat->lru_mask);
3158 seq_printf(m, "hierarchical_%s=%lu", stat->name, nr);
3159 for_each_node_state(nid, N_MEMORY) {
3161 for_each_mem_cgroup_tree(iter, memcg)
3162 nr += mem_cgroup_node_nr_lru_pages(
3163 iter, nid, stat->lru_mask);
3164 seq_printf(m, " N%d=%lu", nid, nr);
3171 #endif /* CONFIG_NUMA */
3173 static int memcg_stat_show(struct seq_file *m, void *v)
3175 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
3176 unsigned long memory, memsw;
3177 struct mem_cgroup *mi;
3180 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_stat_names) !=
3181 MEM_CGROUP_STAT_NSTATS);
3182 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_events_names) !=
3183 MEM_CGROUP_EVENTS_NSTATS);
3184 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS);
3186 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
3187 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
3189 seq_printf(m, "%s %lu\n", mem_cgroup_stat_names[i],
3190 mem_cgroup_read_stat(memcg, i) * PAGE_SIZE);
3193 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++)
3194 seq_printf(m, "%s %lu\n", mem_cgroup_events_names[i],
3195 mem_cgroup_read_events(memcg, i));
3197 for (i = 0; i < NR_LRU_LISTS; i++)
3198 seq_printf(m, "%s %lu\n", mem_cgroup_lru_names[i],
3199 mem_cgroup_nr_lru_pages(memcg, BIT(i)) * PAGE_SIZE);
3201 /* Hierarchical information */
3202 memory = memsw = PAGE_COUNTER_MAX;
3203 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) {
3204 memory = min(memory, mi->memory.limit);
3205 memsw = min(memsw, mi->memsw.limit);
3207 seq_printf(m, "hierarchical_memory_limit %llu\n",
3208 (u64)memory * PAGE_SIZE);
3209 if (do_swap_account)
3210 seq_printf(m, "hierarchical_memsw_limit %llu\n",
3211 (u64)memsw * PAGE_SIZE);
3213 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
3214 unsigned long long val = 0;
3216 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
3218 for_each_mem_cgroup_tree(mi, memcg)
3219 val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE;
3220 seq_printf(m, "total_%s %llu\n", mem_cgroup_stat_names[i], val);
3223 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
3224 unsigned long long val = 0;
3226 for_each_mem_cgroup_tree(mi, memcg)
3227 val += mem_cgroup_read_events(mi, i);
3228 seq_printf(m, "total_%s %llu\n",
3229 mem_cgroup_events_names[i], val);
3232 for (i = 0; i < NR_LRU_LISTS; i++) {
3233 unsigned long long val = 0;
3235 for_each_mem_cgroup_tree(mi, memcg)
3236 val += mem_cgroup_nr_lru_pages(mi, BIT(i)) * PAGE_SIZE;
3237 seq_printf(m, "total_%s %llu\n", mem_cgroup_lru_names[i], val);
3240 #ifdef CONFIG_DEBUG_VM
3243 struct mem_cgroup_per_zone *mz;
3244 struct zone_reclaim_stat *rstat;
3245 unsigned long recent_rotated[2] = {0, 0};
3246 unsigned long recent_scanned[2] = {0, 0};
3248 for_each_online_node(nid)
3249 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
3250 mz = &memcg->nodeinfo[nid]->zoneinfo[zid];
3251 rstat = &mz->lruvec.reclaim_stat;
3253 recent_rotated[0] += rstat->recent_rotated[0];
3254 recent_rotated[1] += rstat->recent_rotated[1];
3255 recent_scanned[0] += rstat->recent_scanned[0];
3256 recent_scanned[1] += rstat->recent_scanned[1];
3258 seq_printf(m, "recent_rotated_anon %lu\n", recent_rotated[0]);
3259 seq_printf(m, "recent_rotated_file %lu\n", recent_rotated[1]);
3260 seq_printf(m, "recent_scanned_anon %lu\n", recent_scanned[0]);
3261 seq_printf(m, "recent_scanned_file %lu\n", recent_scanned[1]);
3268 static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
3271 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3273 return mem_cgroup_swappiness(memcg);
3276 static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
3277 struct cftype *cft, u64 val)
3279 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3285 memcg->swappiness = val;
3287 vm_swappiness = val;
3292 static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
3294 struct mem_cgroup_threshold_ary *t;
3295 unsigned long usage;
3300 t = rcu_dereference(memcg->thresholds.primary);
3302 t = rcu_dereference(memcg->memsw_thresholds.primary);
3307 usage = mem_cgroup_usage(memcg, swap);
3310 * current_threshold points to threshold just below or equal to usage.
3311 * If it's not true, a threshold was crossed after last
3312 * call of __mem_cgroup_threshold().
3314 i = t->current_threshold;
3317 * Iterate backward over array of thresholds starting from
3318 * current_threshold and check if a threshold is crossed.
3319 * If none of thresholds below usage is crossed, we read
3320 * only one element of the array here.
3322 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
3323 eventfd_signal(t->entries[i].eventfd, 1);
3325 /* i = current_threshold + 1 */
3329 * Iterate forward over array of thresholds starting from
3330 * current_threshold+1 and check if a threshold is crossed.
3331 * If none of thresholds above usage is crossed, we read
3332 * only one element of the array here.
3334 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
3335 eventfd_signal(t->entries[i].eventfd, 1);
3337 /* Update current_threshold */
3338 t->current_threshold = i - 1;
3343 static void mem_cgroup_threshold(struct mem_cgroup *memcg)
3346 __mem_cgroup_threshold(memcg, false);
3347 if (do_swap_account)
3348 __mem_cgroup_threshold(memcg, true);
3350 memcg = parent_mem_cgroup(memcg);
3354 static int compare_thresholds(const void *a, const void *b)
3356 const struct mem_cgroup_threshold *_a = a;
3357 const struct mem_cgroup_threshold *_b = b;
3359 if (_a->threshold > _b->threshold)
3362 if (_a->threshold < _b->threshold)
3368 static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
3370 struct mem_cgroup_eventfd_list *ev;
3372 spin_lock(&memcg_oom_lock);
3374 list_for_each_entry(ev, &memcg->oom_notify, list)
3375 eventfd_signal(ev->eventfd, 1);
3377 spin_unlock(&memcg_oom_lock);
3381 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
3383 struct mem_cgroup *iter;
3385 for_each_mem_cgroup_tree(iter, memcg)
3386 mem_cgroup_oom_notify_cb(iter);
3389 static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
3390 struct eventfd_ctx *eventfd, const char *args, enum res_type type)
3392 struct mem_cgroup_thresholds *thresholds;
3393 struct mem_cgroup_threshold_ary *new;
3394 unsigned long threshold;
3395 unsigned long usage;
3398 ret = page_counter_memparse(args, "-1", &threshold);
3402 mutex_lock(&memcg->thresholds_lock);
3405 thresholds = &memcg->thresholds;
3406 usage = mem_cgroup_usage(memcg, false);
3407 } else if (type == _MEMSWAP) {
3408 thresholds = &memcg->memsw_thresholds;
3409 usage = mem_cgroup_usage(memcg, true);
3413 /* Check if a threshold crossed before adding a new one */
3414 if (thresholds->primary)
3415 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
3417 size = thresholds->primary ? thresholds->primary->size + 1 : 1;
3419 /* Allocate memory for new array of thresholds */
3420 new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold),
3428 /* Copy thresholds (if any) to new array */
3429 if (thresholds->primary) {
3430 memcpy(new->entries, thresholds->primary->entries, (size - 1) *
3431 sizeof(struct mem_cgroup_threshold));
3434 /* Add new threshold */
3435 new->entries[size - 1].eventfd = eventfd;
3436 new->entries[size - 1].threshold = threshold;
3438 /* Sort thresholds. Registering of new threshold isn't time-critical */
3439 sort(new->entries, size, sizeof(struct mem_cgroup_threshold),
3440 compare_thresholds, NULL);
3442 /* Find current threshold */
3443 new->current_threshold = -1;
3444 for (i = 0; i < size; i++) {
3445 if (new->entries[i].threshold <= usage) {
3447 * new->current_threshold will not be used until
3448 * rcu_assign_pointer(), so it's safe to increment
3451 ++new->current_threshold;
3456 /* Free old spare buffer and save old primary buffer as spare */
3457 kfree(thresholds->spare);
3458 thresholds->spare = thresholds->primary;
3460 rcu_assign_pointer(thresholds->primary, new);
3462 /* To be sure that nobody uses thresholds */
3466 mutex_unlock(&memcg->thresholds_lock);
3471 static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
3472 struct eventfd_ctx *eventfd, const char *args)
3474 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM);
3477 static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg,
3478 struct eventfd_ctx *eventfd, const char *args)
3480 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP);
3483 static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
3484 struct eventfd_ctx *eventfd, enum res_type type)
3486 struct mem_cgroup_thresholds *thresholds;
3487 struct mem_cgroup_threshold_ary *new;
3488 unsigned long usage;
3491 mutex_lock(&memcg->thresholds_lock);
3494 thresholds = &memcg->thresholds;
3495 usage = mem_cgroup_usage(memcg, false);
3496 } else if (type == _MEMSWAP) {
3497 thresholds = &memcg->memsw_thresholds;
3498 usage = mem_cgroup_usage(memcg, true);
3502 if (!thresholds->primary)
3505 /* Check if a threshold crossed before removing */
3506 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
3508 /* Calculate new number of threshold */
3510 for (i = 0; i < thresholds->primary->size; i++) {
3511 if (thresholds->primary->entries[i].eventfd != eventfd)
3515 new = thresholds->spare;
3517 /* Set thresholds array to NULL if we don't have thresholds */
3526 /* Copy thresholds and find current threshold */
3527 new->current_threshold = -1;
3528 for (i = 0, j = 0; i < thresholds->primary->size; i++) {
3529 if (thresholds->primary->entries[i].eventfd == eventfd)
3532 new->entries[j] = thresholds->primary->entries[i];
3533 if (new->entries[j].threshold <= usage) {
3535 * new->current_threshold will not be used
3536 * until rcu_assign_pointer(), so it's safe to increment
3539 ++new->current_threshold;
3545 /* Swap primary and spare array */
3546 thresholds->spare = thresholds->primary;
3547 /* If all events are unregistered, free the spare array */
3549 kfree(thresholds->spare);
3550 thresholds->spare = NULL;
3553 rcu_assign_pointer(thresholds->primary, new);
3555 /* To be sure that nobody uses thresholds */
3558 mutex_unlock(&memcg->thresholds_lock);
3561 static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
3562 struct eventfd_ctx *eventfd)
3564 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM);
3567 static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
3568 struct eventfd_ctx *eventfd)
3570 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP);
3573 static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
3574 struct eventfd_ctx *eventfd, const char *args)
3576 struct mem_cgroup_eventfd_list *event;
3578 event = kmalloc(sizeof(*event), GFP_KERNEL);
3582 spin_lock(&memcg_oom_lock);
3584 event->eventfd = eventfd;
3585 list_add(&event->list, &memcg->oom_notify);
3587 /* already in OOM ? */
3588 if (memcg->under_oom)
3589 eventfd_signal(eventfd, 1);
3590 spin_unlock(&memcg_oom_lock);
3595 static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg,
3596 struct eventfd_ctx *eventfd)
3598 struct mem_cgroup_eventfd_list *ev, *tmp;
3600 spin_lock(&memcg_oom_lock);
3602 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
3603 if (ev->eventfd == eventfd) {
3604 list_del(&ev->list);
3609 spin_unlock(&memcg_oom_lock);
3612 static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
3614 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf));
3616 seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable);
3617 seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom);
3621 static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
3622 struct cftype *cft, u64 val)
3624 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3626 /* cannot set to root cgroup and only 0 and 1 are allowed */
3627 if (!css->parent || !((val == 0) || (val == 1)))
3630 memcg->oom_kill_disable = val;
3632 memcg_oom_recover(memcg);
3637 #ifdef CONFIG_MEMCG_KMEM
3638 static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
3642 ret = memcg_propagate_kmem(memcg);
3646 return tcp_init_cgroup(memcg, ss);
3649 static void memcg_deactivate_kmem(struct mem_cgroup *memcg)
3651 struct cgroup_subsys_state *css;
3652 struct mem_cgroup *parent, *child;
3655 if (!memcg->kmem_acct_active)
3659 * Clear the 'active' flag before clearing memcg_caches arrays entries.
3660 * Since we take the slab_mutex in memcg_deactivate_kmem_caches(), it
3661 * guarantees no cache will be created for this cgroup after we are
3662 * done (see memcg_create_kmem_cache()).
3664 memcg->kmem_acct_active = false;
3666 memcg_deactivate_kmem_caches(memcg);
3668 kmemcg_id = memcg->kmemcg_id;
3669 BUG_ON(kmemcg_id < 0);
3671 parent = parent_mem_cgroup(memcg);
3673 parent = root_mem_cgroup;
3676 * Change kmemcg_id of this cgroup and all its descendants to the
3677 * parent's id, and then move all entries from this cgroup's list_lrus
3678 * to ones of the parent. After we have finished, all list_lrus
3679 * corresponding to this cgroup are guaranteed to remain empty. The
3680 * ordering is imposed by list_lru_node->lock taken by
3681 * memcg_drain_all_list_lrus().
3683 css_for_each_descendant_pre(css, &memcg->css) {
3684 child = mem_cgroup_from_css(css);
3685 BUG_ON(child->kmemcg_id != kmemcg_id);
3686 child->kmemcg_id = parent->kmemcg_id;
3687 if (!memcg->use_hierarchy)
3690 memcg_drain_all_list_lrus(kmemcg_id, parent->kmemcg_id);
3692 memcg_free_cache_id(kmemcg_id);
3695 static void memcg_destroy_kmem(struct mem_cgroup *memcg)
3697 if (memcg->kmem_acct_activated) {
3698 memcg_destroy_kmem_caches(memcg);
3699 static_key_slow_dec(&memcg_kmem_enabled_key);
3700 WARN_ON(page_counter_read(&memcg->kmem));
3702 tcp_destroy_cgroup(memcg);
3705 static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
3710 static void memcg_deactivate_kmem(struct mem_cgroup *memcg)
3714 static void memcg_destroy_kmem(struct mem_cgroup *memcg)
3719 #ifdef CONFIG_CGROUP_WRITEBACK
3721 struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg)
3723 return &memcg->cgwb_list;
3726 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
3728 return wb_domain_init(&memcg->cgwb_domain, gfp);
3731 static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
3733 wb_domain_exit(&memcg->cgwb_domain);
3736 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
3738 wb_domain_size_changed(&memcg->cgwb_domain);
3741 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
3743 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3745 if (!memcg->css.parent)
3748 return &memcg->cgwb_domain;
3752 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
3753 * @wb: bdi_writeback in question
3754 * @pfilepages: out parameter for number of file pages
3755 * @pheadroom: out parameter for number of allocatable pages according to memcg
3756 * @pdirty: out parameter for number of dirty pages
3757 * @pwriteback: out parameter for number of pages under writeback
3759 * Determine the numbers of file, headroom, dirty, and writeback pages in
3760 * @wb's memcg. File, dirty and writeback are self-explanatory. Headroom
3761 * is a bit more involved.
3763 * A memcg's headroom is "min(max, high) - used". In the hierarchy, the
3764 * headroom is calculated as the lowest headroom of itself and the
3765 * ancestors. Note that this doesn't consider the actual amount of
3766 * available memory in the system. The caller should further cap
3767 * *@pheadroom accordingly.
3769 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
3770 unsigned long *pheadroom, unsigned long *pdirty,
3771 unsigned long *pwriteback)
3773 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3774 struct mem_cgroup *parent;
3776 *pdirty = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_DIRTY);
3778 /* this should eventually include NR_UNSTABLE_NFS */
3779 *pwriteback = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_WRITEBACK);
3780 *pfilepages = mem_cgroup_nr_lru_pages(memcg, (1 << LRU_INACTIVE_FILE) |
3781 (1 << LRU_ACTIVE_FILE));
3782 *pheadroom = PAGE_COUNTER_MAX;
3784 while ((parent = parent_mem_cgroup(memcg))) {
3785 unsigned long ceiling = min(memcg->memory.limit, memcg->high);
3786 unsigned long used = page_counter_read(&memcg->memory);
3788 *pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
3793 #else /* CONFIG_CGROUP_WRITEBACK */
3795 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
3800 static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
3804 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
3808 #endif /* CONFIG_CGROUP_WRITEBACK */
3811 * DO NOT USE IN NEW FILES.
3813 * "cgroup.event_control" implementation.
3815 * This is way over-engineered. It tries to support fully configurable
3816 * events for each user. Such level of flexibility is completely
3817 * unnecessary especially in the light of the planned unified hierarchy.
3819 * Please deprecate this and replace with something simpler if at all
3824 * Unregister event and free resources.
3826 * Gets called from workqueue.
3828 static void memcg_event_remove(struct work_struct *work)
3830 struct mem_cgroup_event *event =
3831 container_of(work, struct mem_cgroup_event, remove);
3832 struct mem_cgroup *memcg = event->memcg;
3834 remove_wait_queue(event->wqh, &event->wait);
3836 event->unregister_event(memcg, event->eventfd);
3838 /* Notify userspace the event is going away. */
3839 eventfd_signal(event->eventfd, 1);
3841 eventfd_ctx_put(event->eventfd);
3843 css_put(&memcg->css);
3847 * Gets called on POLLHUP on eventfd when user closes it.
3849 * Called with wqh->lock held and interrupts disabled.
3851 static int memcg_event_wake(wait_queue_t *wait, unsigned mode,
3852 int sync, void *key)
3854 struct mem_cgroup_event *event =
3855 container_of(wait, struct mem_cgroup_event, wait);
3856 struct mem_cgroup *memcg = event->memcg;
3857 unsigned long flags = (unsigned long)key;
3859 if (flags & POLLHUP) {
3861 * If the event has been detached at cgroup removal, we
3862 * can simply return knowing the other side will cleanup
3865 * We can't race against event freeing since the other
3866 * side will require wqh->lock via remove_wait_queue(),
3869 spin_lock(&memcg->event_list_lock);
3870 if (!list_empty(&event->list)) {
3871 list_del_init(&event->list);
3873 * We are in atomic context, but cgroup_event_remove()
3874 * may sleep, so we have to call it in workqueue.
3876 schedule_work(&event->remove);
3878 spin_unlock(&memcg->event_list_lock);
3884 static void memcg_event_ptable_queue_proc(struct file *file,
3885 wait_queue_head_t *wqh, poll_table *pt)
3887 struct mem_cgroup_event *event =
3888 container_of(pt, struct mem_cgroup_event, pt);
3891 add_wait_queue(wqh, &event->wait);
3895 * DO NOT USE IN NEW FILES.
3897 * Parse input and register new cgroup event handler.
3899 * Input must be in format '<event_fd> <control_fd> <args>'.
3900 * Interpretation of args is defined by control file implementation.
3902 static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
3903 char *buf, size_t nbytes, loff_t off)
3905 struct cgroup_subsys_state *css = of_css(of);
3906 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3907 struct mem_cgroup_event *event;
3908 struct cgroup_subsys_state *cfile_css;
3909 unsigned int efd, cfd;
3916 buf = strstrip(buf);
3918 efd = simple_strtoul(buf, &endp, 10);
3923 cfd = simple_strtoul(buf, &endp, 10);
3924 if ((*endp != ' ') && (*endp != '\0'))
3928 event = kzalloc(sizeof(*event), GFP_KERNEL);
3932 event->memcg = memcg;
3933 INIT_LIST_HEAD(&event->list);
3934 init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc);
3935 init_waitqueue_func_entry(&event->wait, memcg_event_wake);
3936 INIT_WORK(&event->remove, memcg_event_remove);
3944 event->eventfd = eventfd_ctx_fileget(efile.file);
3945 if (IS_ERR(event->eventfd)) {
3946 ret = PTR_ERR(event->eventfd);
3953 goto out_put_eventfd;
3956 /* the process need read permission on control file */
3957 /* AV: shouldn't we check that it's been opened for read instead? */
3958 ret = inode_permission(file_inode(cfile.file), MAY_READ);
3963 * Determine the event callbacks and set them in @event. This used
3964 * to be done via struct cftype but cgroup core no longer knows
3965 * about these events. The following is crude but the whole thing
3966 * is for compatibility anyway.
3968 * DO NOT ADD NEW FILES.
3970 name = cfile.file->f_path.dentry->d_name.name;
3972 if (!strcmp(name, "memory.usage_in_bytes")) {
3973 event->register_event = mem_cgroup_usage_register_event;
3974 event->unregister_event = mem_cgroup_usage_unregister_event;
3975 } else if (!strcmp(name, "memory.oom_control")) {
3976 event->register_event = mem_cgroup_oom_register_event;
3977 event->unregister_event = mem_cgroup_oom_unregister_event;
3978 } else if (!strcmp(name, "memory.pressure_level")) {
3979 event->register_event = vmpressure_register_event;
3980 event->unregister_event = vmpressure_unregister_event;
3981 } else if (!strcmp(name, "memory.memsw.usage_in_bytes")) {
3982 event->register_event = memsw_cgroup_usage_register_event;
3983 event->unregister_event = memsw_cgroup_usage_unregister_event;
3990 * Verify @cfile should belong to @css. Also, remaining events are
3991 * automatically removed on cgroup destruction but the removal is
3992 * asynchronous, so take an extra ref on @css.
3994 cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent,
3995 &memory_cgrp_subsys);
3997 if (IS_ERR(cfile_css))
3999 if (cfile_css != css) {
4004 ret = event->register_event(memcg, event->eventfd, buf);
4008 efile.file->f_op->poll(efile.file, &event->pt);
4010 spin_lock(&memcg->event_list_lock);
4011 list_add(&event->list, &memcg->event_list);
4012 spin_unlock(&memcg->event_list_lock);
4024 eventfd_ctx_put(event->eventfd);
4033 static struct cftype mem_cgroup_legacy_files[] = {
4035 .name = "usage_in_bytes",
4036 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
4037 .read_u64 = mem_cgroup_read_u64,
4040 .name = "max_usage_in_bytes",
4041 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
4042 .write = mem_cgroup_reset,
4043 .read_u64 = mem_cgroup_read_u64,
4046 .name = "limit_in_bytes",
4047 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
4048 .write = mem_cgroup_write,
4049 .read_u64 = mem_cgroup_read_u64,
4052 .name = "soft_limit_in_bytes",
4053 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
4054 .write = mem_cgroup_write,
4055 .read_u64 = mem_cgroup_read_u64,
4059 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
4060 .write = mem_cgroup_reset,
4061 .read_u64 = mem_cgroup_read_u64,
4065 .seq_show = memcg_stat_show,
4068 .name = "force_empty",
4069 .write = mem_cgroup_force_empty_write,
4072 .name = "use_hierarchy",
4073 .write_u64 = mem_cgroup_hierarchy_write,
4074 .read_u64 = mem_cgroup_hierarchy_read,
4077 .name = "cgroup.event_control", /* XXX: for compat */
4078 .write = memcg_write_event_control,
4079 .flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE,
4082 .name = "swappiness",
4083 .read_u64 = mem_cgroup_swappiness_read,
4084 .write_u64 = mem_cgroup_swappiness_write,
4087 .name = "move_charge_at_immigrate",
4088 .read_u64 = mem_cgroup_move_charge_read,
4089 .write_u64 = mem_cgroup_move_charge_write,
4092 .name = "oom_control",
4093 .seq_show = mem_cgroup_oom_control_read,
4094 .write_u64 = mem_cgroup_oom_control_write,
4095 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
4098 .name = "pressure_level",
4102 .name = "numa_stat",
4103 .seq_show = memcg_numa_stat_show,
4106 #ifdef CONFIG_MEMCG_KMEM
4108 .name = "kmem.limit_in_bytes",
4109 .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
4110 .write = mem_cgroup_write,
4111 .read_u64 = mem_cgroup_read_u64,
4114 .name = "kmem.usage_in_bytes",
4115 .private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
4116 .read_u64 = mem_cgroup_read_u64,
4119 .name = "kmem.failcnt",
4120 .private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
4121 .write = mem_cgroup_reset,
4122 .read_u64 = mem_cgroup_read_u64,
4125 .name = "kmem.max_usage_in_bytes",
4126 .private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
4127 .write = mem_cgroup_reset,
4128 .read_u64 = mem_cgroup_read_u64,
4130 #ifdef CONFIG_SLABINFO
4132 .name = "kmem.slabinfo",
4133 .seq_start = slab_start,
4134 .seq_next = slab_next,
4135 .seq_stop = slab_stop,
4136 .seq_show = memcg_slab_show,
4140 { }, /* terminate */
4143 static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
4145 struct mem_cgroup_per_node *pn;
4146 struct mem_cgroup_per_zone *mz;
4147 int zone, tmp = node;
4149 * This routine is called against possible nodes.
4150 * But it's BUG to call kmalloc() against offline node.
4152 * TODO: this routine can waste much memory for nodes which will
4153 * never be onlined. It's better to use memory hotplug callback
4156 if (!node_state(node, N_NORMAL_MEMORY))
4158 pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
4162 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
4163 mz = &pn->zoneinfo[zone];
4164 lruvec_init(&mz->lruvec);
4165 mz->usage_in_excess = 0;
4166 mz->on_tree = false;
4169 memcg->nodeinfo[node] = pn;
4173 static void free_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
4175 kfree(memcg->nodeinfo[node]);
4178 static struct mem_cgroup *mem_cgroup_alloc(void)
4180 struct mem_cgroup *memcg;
4183 size = sizeof(struct mem_cgroup);
4184 size += nr_node_ids * sizeof(struct mem_cgroup_per_node *);
4186 memcg = kzalloc(size, GFP_KERNEL);
4190 memcg->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
4194 if (memcg_wb_domain_init(memcg, GFP_KERNEL))
4200 free_percpu(memcg->stat);
4207 * At destroying mem_cgroup, references from swap_cgroup can remain.
4208 * (scanning all at force_empty is too costly...)
4210 * Instead of clearing all references at force_empty, we remember
4211 * the number of reference from swap_cgroup and free mem_cgroup when
4212 * it goes down to 0.
4214 * Removal of cgroup itself succeeds regardless of refs from swap.
4217 static void __mem_cgroup_free(struct mem_cgroup *memcg)
4221 mem_cgroup_remove_from_trees(memcg);
4224 free_mem_cgroup_per_zone_info(memcg, node);
4226 free_percpu(memcg->stat);
4227 memcg_wb_domain_exit(memcg);
4232 * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled.
4234 struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
4236 if (!memcg->memory.parent)
4238 return mem_cgroup_from_counter(memcg->memory.parent, memory);
4240 EXPORT_SYMBOL(parent_mem_cgroup);
4242 static struct cgroup_subsys_state * __ref
4243 mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
4245 struct mem_cgroup *memcg;
4246 long error = -ENOMEM;
4249 memcg = mem_cgroup_alloc();
4251 return ERR_PTR(error);
4254 if (alloc_mem_cgroup_per_zone_info(memcg, node))
4258 if (parent_css == NULL) {
4259 root_mem_cgroup = memcg;
4260 page_counter_init(&memcg->memory, NULL);
4261 memcg->high = PAGE_COUNTER_MAX;
4262 memcg->soft_limit = PAGE_COUNTER_MAX;
4263 page_counter_init(&memcg->memsw, NULL);
4264 page_counter_init(&memcg->kmem, NULL);
4267 memcg->last_scanned_node = MAX_NUMNODES;
4268 INIT_LIST_HEAD(&memcg->oom_notify);
4269 memcg->move_charge_at_immigrate = 0;
4270 mutex_init(&memcg->thresholds_lock);
4271 spin_lock_init(&memcg->move_lock);
4272 vmpressure_init(&memcg->vmpressure);
4273 INIT_LIST_HEAD(&memcg->event_list);
4274 spin_lock_init(&memcg->event_list_lock);
4275 #ifdef CONFIG_MEMCG_KMEM
4276 memcg->kmemcg_id = -1;
4278 #ifdef CONFIG_CGROUP_WRITEBACK
4279 INIT_LIST_HEAD(&memcg->cgwb_list);
4284 __mem_cgroup_free(memcg);
4285 return ERR_PTR(error);
4289 mem_cgroup_css_online(struct cgroup_subsys_state *css)
4291 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4292 struct mem_cgroup *parent = mem_cgroup_from_css(css->parent);
4295 if (css->id > MEM_CGROUP_ID_MAX)
4301 mutex_lock(&memcg_create_mutex);
4303 memcg->use_hierarchy = parent->use_hierarchy;
4304 memcg->oom_kill_disable = parent->oom_kill_disable;
4305 memcg->swappiness = mem_cgroup_swappiness(parent);
4307 if (parent->use_hierarchy) {
4308 page_counter_init(&memcg->memory, &parent->memory);
4309 memcg->high = PAGE_COUNTER_MAX;
4310 memcg->soft_limit = PAGE_COUNTER_MAX;
4311 page_counter_init(&memcg->memsw, &parent->memsw);
4312 page_counter_init(&memcg->kmem, &parent->kmem);
4315 * No need to take a reference to the parent because cgroup
4316 * core guarantees its existence.
4319 page_counter_init(&memcg->memory, NULL);
4320 memcg->high = PAGE_COUNTER_MAX;
4321 memcg->soft_limit = PAGE_COUNTER_MAX;
4322 page_counter_init(&memcg->memsw, NULL);
4323 page_counter_init(&memcg->kmem, NULL);
4325 * Deeper hierachy with use_hierarchy == false doesn't make
4326 * much sense so let cgroup subsystem know about this
4327 * unfortunate state in our controller.
4329 if (parent != root_mem_cgroup)
4330 memory_cgrp_subsys.broken_hierarchy = true;
4332 mutex_unlock(&memcg_create_mutex);
4334 ret = memcg_init_kmem(memcg, &memory_cgrp_subsys);
4339 * Make sure the memcg is initialized: mem_cgroup_iter()
4340 * orders reading memcg->initialized against its callers
4341 * reading the memcg members.
4343 smp_store_release(&memcg->initialized, 1);
4348 static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
4350 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4351 struct mem_cgroup_event *event, *tmp;
4354 * Unregister events and notify userspace.
4355 * Notify userspace about cgroup removing only after rmdir of cgroup
4356 * directory to avoid race between userspace and kernelspace.
4358 spin_lock(&memcg->event_list_lock);
4359 list_for_each_entry_safe(event, tmp, &memcg->event_list, list) {
4360 list_del_init(&event->list);
4361 schedule_work(&event->remove);
4363 spin_unlock(&memcg->event_list_lock);
4365 vmpressure_cleanup(&memcg->vmpressure);
4367 memcg_deactivate_kmem(memcg);
4369 wb_memcg_offline(memcg);
4372 static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
4374 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4376 invalidate_reclaim_iterators(memcg);
4379 static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
4381 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4383 memcg_destroy_kmem(memcg);
4384 __mem_cgroup_free(memcg);
4388 * mem_cgroup_css_reset - reset the states of a mem_cgroup
4389 * @css: the target css
4391 * Reset the states of the mem_cgroup associated with @css. This is
4392 * invoked when the userland requests disabling on the default hierarchy
4393 * but the memcg is pinned through dependency. The memcg should stop
4394 * applying policies and should revert to the vanilla state as it may be
4395 * made visible again.
4397 * The current implementation only resets the essential configurations.
4398 * This needs to be expanded to cover all the visible parts.
4400 static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
4402 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4404 mem_cgroup_resize_limit(memcg, PAGE_COUNTER_MAX);
4405 mem_cgroup_resize_memsw_limit(memcg, PAGE_COUNTER_MAX);
4406 memcg_update_kmem_limit(memcg, PAGE_COUNTER_MAX);
4408 memcg->high = PAGE_COUNTER_MAX;
4409 memcg->soft_limit = PAGE_COUNTER_MAX;
4410 memcg_wb_domain_size_changed(memcg);
4414 /* Handlers for move charge at task migration. */
4415 static int mem_cgroup_do_precharge(unsigned long count)
4419 /* Try a single bulk charge without reclaim first, kswapd may wake */
4420 ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count);
4422 mc.precharge += count;
4426 /* Try charges one by one with reclaim */
4428 ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_NORETRY, 1);
4438 * get_mctgt_type - get target type of moving charge
4439 * @vma: the vma the pte to be checked belongs
4440 * @addr: the address corresponding to the pte to be checked
4441 * @ptent: the pte to be checked
4442 * @target: the pointer the target page or swap ent will be stored(can be NULL)
4445 * 0(MC_TARGET_NONE): if the pte is not a target for move charge.
4446 * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
4447 * move charge. if @target is not NULL, the page is stored in target->page
4448 * with extra refcnt got(Callers should handle it).
4449 * 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
4450 * target for charge migration. if @target is not NULL, the entry is stored
4453 * Called with pte lock held.
4460 enum mc_target_type {
4466 static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
4467 unsigned long addr, pte_t ptent)
4469 struct page *page = vm_normal_page(vma, addr, ptent);
4471 if (!page || !page_mapped(page))
4473 if (PageAnon(page)) {
4474 if (!(mc.flags & MOVE_ANON))
4477 if (!(mc.flags & MOVE_FILE))
4480 if (!get_page_unless_zero(page))
4487 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
4488 unsigned long addr, pte_t ptent, swp_entry_t *entry)
4490 struct page *page = NULL;
4491 swp_entry_t ent = pte_to_swp_entry(ptent);
4493 if (!(mc.flags & MOVE_ANON) || non_swap_entry(ent))
4496 * Because lookup_swap_cache() updates some statistics counter,
4497 * we call find_get_page() with swapper_space directly.
4499 page = find_get_page(swap_address_space(ent), ent.val);
4500 if (do_swap_account)
4501 entry->val = ent.val;
4506 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
4507 unsigned long addr, pte_t ptent, swp_entry_t *entry)
4513 static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
4514 unsigned long addr, pte_t ptent, swp_entry_t *entry)
4516 struct page *page = NULL;
4517 struct address_space *mapping;
4520 if (!vma->vm_file) /* anonymous vma */
4522 if (!(mc.flags & MOVE_FILE))
4525 mapping = vma->vm_file->f_mapping;
4526 pgoff = linear_page_index(vma, addr);
4528 /* page is moved even if it's not RSS of this task(page-faulted). */
4530 /* shmem/tmpfs may report page out on swap: account for that too. */
4531 if (shmem_mapping(mapping)) {
4532 page = find_get_entry(mapping, pgoff);
4533 if (radix_tree_exceptional_entry(page)) {
4534 swp_entry_t swp = radix_to_swp_entry(page);
4535 if (do_swap_account)
4537 page = find_get_page(swap_address_space(swp), swp.val);
4540 page = find_get_page(mapping, pgoff);
4542 page = find_get_page(mapping, pgoff);
4548 * mem_cgroup_move_account - move account of the page
4550 * @nr_pages: number of regular pages (>1 for huge pages)
4551 * @from: mem_cgroup which the page is moved from.
4552 * @to: mem_cgroup which the page is moved to. @from != @to.
4554 * The caller must confirm following.
4555 * - page is not on LRU (isolate_page() is useful.)
4556 * - compound_lock is held when nr_pages > 1
4558 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
4561 static int mem_cgroup_move_account(struct page *page,
4562 unsigned int nr_pages,
4563 struct mem_cgroup *from,
4564 struct mem_cgroup *to)
4566 unsigned long flags;
4570 VM_BUG_ON(from == to);
4571 VM_BUG_ON_PAGE(PageLRU(page), page);
4573 * The page is isolated from LRU. So, collapse function
4574 * will not handle this page. But page splitting can happen.
4575 * Do this check under compound_page_lock(). The caller should
4579 if (nr_pages > 1 && !PageTransHuge(page))
4583 * Prevent mem_cgroup_replace_page() from looking at
4584 * page->mem_cgroup of its source page while we change it.
4586 if (!trylock_page(page))
4590 if (page->mem_cgroup != from)
4593 anon = PageAnon(page);
4595 spin_lock_irqsave(&from->move_lock, flags);
4597 if (!anon && page_mapped(page)) {
4598 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED],
4600 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED],
4605 * move_lock grabbed above and caller set from->moving_account, so
4606 * mem_cgroup_update_page_stat() will serialize updates to PageDirty.
4607 * So mapping should be stable for dirty pages.
4609 if (!anon && PageDirty(page)) {
4610 struct address_space *mapping = page_mapping(page);
4612 if (mapping_cap_account_dirty(mapping)) {
4613 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_DIRTY],
4615 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_DIRTY],
4620 if (PageWriteback(page)) {
4621 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_WRITEBACK],
4623 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_WRITEBACK],
4628 * It is safe to change page->mem_cgroup here because the page
4629 * is referenced, charged, and isolated - we can't race with
4630 * uncharging, charging, migration, or LRU putback.
4633 /* caller should have done css_get */
4634 page->mem_cgroup = to;
4635 spin_unlock_irqrestore(&from->move_lock, flags);
4639 local_irq_disable();
4640 mem_cgroup_charge_statistics(to, page, nr_pages);
4641 memcg_check_events(to, page);
4642 mem_cgroup_charge_statistics(from, page, -nr_pages);
4643 memcg_check_events(from, page);
4651 static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
4652 unsigned long addr, pte_t ptent, union mc_target *target)
4654 struct page *page = NULL;
4655 enum mc_target_type ret = MC_TARGET_NONE;
4656 swp_entry_t ent = { .val = 0 };
4658 if (pte_present(ptent))
4659 page = mc_handle_present_pte(vma, addr, ptent);
4660 else if (is_swap_pte(ptent))
4661 page = mc_handle_swap_pte(vma, addr, ptent, &ent);
4662 else if (pte_none(ptent))
4663 page = mc_handle_file_pte(vma, addr, ptent, &ent);
4665 if (!page && !ent.val)
4669 * Do only loose check w/o serialization.
4670 * mem_cgroup_move_account() checks the page is valid or
4671 * not under LRU exclusion.
4673 if (page->mem_cgroup == mc.from) {
4674 ret = MC_TARGET_PAGE;
4676 target->page = page;
4678 if (!ret || !target)
4681 /* There is a swap entry and a page doesn't exist or isn't charged */
4682 if (ent.val && !ret &&
4683 mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) {
4684 ret = MC_TARGET_SWAP;
4691 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4693 * We don't consider swapping or file mapped pages because THP does not
4694 * support them for now.
4695 * Caller should make sure that pmd_trans_huge(pmd) is true.
4697 static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
4698 unsigned long addr, pmd_t pmd, union mc_target *target)
4700 struct page *page = NULL;
4701 enum mc_target_type ret = MC_TARGET_NONE;
4703 page = pmd_page(pmd);
4704 VM_BUG_ON_PAGE(!page || !PageHead(page), page);
4705 if (!(mc.flags & MOVE_ANON))
4707 if (page->mem_cgroup == mc.from) {
4708 ret = MC_TARGET_PAGE;
4711 target->page = page;
4717 static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
4718 unsigned long addr, pmd_t pmd, union mc_target *target)
4720 return MC_TARGET_NONE;
4724 static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
4725 unsigned long addr, unsigned long end,
4726 struct mm_walk *walk)
4728 struct vm_area_struct *vma = walk->vma;
4732 if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
4733 if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
4734 mc.precharge += HPAGE_PMD_NR;
4739 if (pmd_trans_unstable(pmd))
4741 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
4742 for (; addr != end; pte++, addr += PAGE_SIZE)
4743 if (get_mctgt_type(vma, addr, *pte, NULL))
4744 mc.precharge++; /* increment precharge temporarily */
4745 pte_unmap_unlock(pte - 1, ptl);
4751 static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
4753 unsigned long precharge;
4755 struct mm_walk mem_cgroup_count_precharge_walk = {
4756 .pmd_entry = mem_cgroup_count_precharge_pte_range,
4759 down_read(&mm->mmap_sem);
4760 walk_page_range(0, ~0UL, &mem_cgroup_count_precharge_walk);
4761 up_read(&mm->mmap_sem);
4763 precharge = mc.precharge;
4769 static int mem_cgroup_precharge_mc(struct mm_struct *mm)
4771 unsigned long precharge = mem_cgroup_count_precharge(mm);
4773 VM_BUG_ON(mc.moving_task);
4774 mc.moving_task = current;
4775 return mem_cgroup_do_precharge(precharge);
4778 /* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
4779 static void __mem_cgroup_clear_mc(void)
4781 struct mem_cgroup *from = mc.from;
4782 struct mem_cgroup *to = mc.to;
4784 /* we must uncharge all the leftover precharges from mc.to */
4786 cancel_charge(mc.to, mc.precharge);
4790 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
4791 * we must uncharge here.
4793 if (mc.moved_charge) {
4794 cancel_charge(mc.from, mc.moved_charge);
4795 mc.moved_charge = 0;
4797 /* we must fixup refcnts and charges */
4798 if (mc.moved_swap) {
4799 /* uncharge swap account from the old cgroup */
4800 if (!mem_cgroup_is_root(mc.from))
4801 page_counter_uncharge(&mc.from->memsw, mc.moved_swap);
4804 * we charged both to->memory and to->memsw, so we
4805 * should uncharge to->memory.
4807 if (!mem_cgroup_is_root(mc.to))
4808 page_counter_uncharge(&mc.to->memory, mc.moved_swap);
4810 css_put_many(&mc.from->css, mc.moved_swap);
4812 /* we've already done css_get(mc.to) */
4815 memcg_oom_recover(from);
4816 memcg_oom_recover(to);
4817 wake_up_all(&mc.waitq);
4820 static void mem_cgroup_clear_mc(void)
4823 * we must clear moving_task before waking up waiters at the end of
4826 mc.moving_task = NULL;
4827 __mem_cgroup_clear_mc();
4828 spin_lock(&mc.lock);
4831 spin_unlock(&mc.lock);
4834 static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
4836 struct cgroup_subsys_state *css;
4837 struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */
4838 struct mem_cgroup *from;
4839 struct task_struct *leader, *p;
4840 struct mm_struct *mm;
4841 unsigned long move_flags;
4844 /* charge immigration isn't supported on the default hierarchy */
4845 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
4849 * Multi-process migrations only happen on the default hierarchy
4850 * where charge immigration is not used. Perform charge
4851 * immigration if @tset contains a leader and whine if there are
4855 cgroup_taskset_for_each_leader(leader, css, tset) {
4858 memcg = mem_cgroup_from_css(css);
4864 * We are now commited to this value whatever it is. Changes in this
4865 * tunable will only affect upcoming migrations, not the current one.
4866 * So we need to save it, and keep it going.
4868 move_flags = READ_ONCE(memcg->move_charge_at_immigrate);
4872 from = mem_cgroup_from_task(p);
4874 VM_BUG_ON(from == memcg);
4876 mm = get_task_mm(p);
4879 /* We move charges only when we move a owner of the mm */
4880 if (mm->owner == p) {
4883 VM_BUG_ON(mc.precharge);
4884 VM_BUG_ON(mc.moved_charge);
4885 VM_BUG_ON(mc.moved_swap);
4887 spin_lock(&mc.lock);
4890 mc.flags = move_flags;
4891 spin_unlock(&mc.lock);
4892 /* We set mc.moving_task later */
4894 ret = mem_cgroup_precharge_mc(mm);
4896 mem_cgroup_clear_mc();
4902 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
4905 mem_cgroup_clear_mc();
4908 static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
4909 unsigned long addr, unsigned long end,
4910 struct mm_walk *walk)
4913 struct vm_area_struct *vma = walk->vma;
4916 enum mc_target_type target_type;
4917 union mc_target target;
4921 * We don't take compound_lock() here but no race with splitting thp
4923 * - if pmd_trans_huge_lock() returns 1, the relevant thp is not
4924 * under splitting, which means there's no concurrent thp split,
4925 * - if another thread runs into split_huge_page() just after we
4926 * entered this if-block, the thread must wait for page table lock
4927 * to be unlocked in __split_huge_page_splitting(), where the main
4928 * part of thp split is not executed yet.
4930 if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
4931 if (mc.precharge < HPAGE_PMD_NR) {
4935 target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
4936 if (target_type == MC_TARGET_PAGE) {
4938 if (!isolate_lru_page(page)) {
4939 if (!mem_cgroup_move_account(page, HPAGE_PMD_NR,
4941 mc.precharge -= HPAGE_PMD_NR;
4942 mc.moved_charge += HPAGE_PMD_NR;
4944 putback_lru_page(page);
4952 if (pmd_trans_unstable(pmd))
4955 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
4956 for (; addr != end; addr += PAGE_SIZE) {
4957 pte_t ptent = *(pte++);
4963 switch (get_mctgt_type(vma, addr, ptent, &target)) {
4964 case MC_TARGET_PAGE:
4966 if (isolate_lru_page(page))
4968 if (!mem_cgroup_move_account(page, 1, mc.from, mc.to)) {
4970 /* we uncharge from mc.from later. */
4973 putback_lru_page(page);
4974 put: /* get_mctgt_type() gets the page */
4977 case MC_TARGET_SWAP:
4979 if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
4981 /* we fixup refcnts and charges later. */
4989 pte_unmap_unlock(pte - 1, ptl);
4994 * We have consumed all precharges we got in can_attach().
4995 * We try charge one by one, but don't do any additional
4996 * charges to mc.to if we have failed in charge once in attach()
4999 ret = mem_cgroup_do_precharge(1);
5007 static void mem_cgroup_move_charge(struct mm_struct *mm)
5009 struct mm_walk mem_cgroup_move_charge_walk = {
5010 .pmd_entry = mem_cgroup_move_charge_pte_range,
5014 lru_add_drain_all();
5016 * Signal mem_cgroup_begin_page_stat() to take the memcg's
5017 * move_lock while we're moving its pages to another memcg.
5018 * Then wait for already started RCU-only updates to finish.
5020 atomic_inc(&mc.from->moving_account);
5023 if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
5025 * Someone who are holding the mmap_sem might be waiting in
5026 * waitq. So we cancel all extra charges, wake up all waiters,
5027 * and retry. Because we cancel precharges, we might not be able
5028 * to move enough charges, but moving charge is a best-effort
5029 * feature anyway, so it wouldn't be a big problem.
5031 __mem_cgroup_clear_mc();
5036 * When we have consumed all precharges and failed in doing
5037 * additional charge, the page walk just aborts.
5039 walk_page_range(0, ~0UL, &mem_cgroup_move_charge_walk);
5040 up_read(&mm->mmap_sem);
5041 atomic_dec(&mc.from->moving_account);
5044 static void mem_cgroup_move_task(struct cgroup_taskset *tset)
5046 struct cgroup_subsys_state *css;
5047 struct task_struct *p = cgroup_taskset_first(tset, &css);
5048 struct mm_struct *mm = get_task_mm(p);
5052 mem_cgroup_move_charge(mm);
5056 mem_cgroup_clear_mc();
5058 #else /* !CONFIG_MMU */
5059 static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
5063 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
5066 static void mem_cgroup_move_task(struct cgroup_taskset *tset)
5072 * Cgroup retains root cgroups across [un]mount cycles making it necessary
5073 * to verify whether we're attached to the default hierarchy on each mount
5076 static void mem_cgroup_bind(struct cgroup_subsys_state *root_css)
5079 * use_hierarchy is forced on the default hierarchy. cgroup core
5080 * guarantees that @root doesn't have any children, so turning it
5081 * on for the root memcg is enough.
5083 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
5084 root_mem_cgroup->use_hierarchy = true;
5086 root_mem_cgroup->use_hierarchy = false;
5089 static u64 memory_current_read(struct cgroup_subsys_state *css,
5092 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5094 return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE;
5097 static int memory_low_show(struct seq_file *m, void *v)
5099 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5100 unsigned long low = READ_ONCE(memcg->low);
5102 if (low == PAGE_COUNTER_MAX)
5103 seq_puts(m, "max\n");
5105 seq_printf(m, "%llu\n", (u64)low * PAGE_SIZE);
5110 static ssize_t memory_low_write(struct kernfs_open_file *of,
5111 char *buf, size_t nbytes, loff_t off)
5113 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5117 buf = strstrip(buf);
5118 err = page_counter_memparse(buf, "max", &low);
5127 static int memory_high_show(struct seq_file *m, void *v)
5129 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5130 unsigned long high = READ_ONCE(memcg->high);
5132 if (high == PAGE_COUNTER_MAX)
5133 seq_puts(m, "max\n");
5135 seq_printf(m, "%llu\n", (u64)high * PAGE_SIZE);
5140 static ssize_t memory_high_write(struct kernfs_open_file *of,
5141 char *buf, size_t nbytes, loff_t off)
5143 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5147 buf = strstrip(buf);
5148 err = page_counter_memparse(buf, "max", &high);
5154 memcg_wb_domain_size_changed(memcg);
5158 static int memory_max_show(struct seq_file *m, void *v)
5160 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5161 unsigned long max = READ_ONCE(memcg->memory.limit);
5163 if (max == PAGE_COUNTER_MAX)
5164 seq_puts(m, "max\n");
5166 seq_printf(m, "%llu\n", (u64)max * PAGE_SIZE);
5171 static ssize_t memory_max_write(struct kernfs_open_file *of,
5172 char *buf, size_t nbytes, loff_t off)
5174 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5178 buf = strstrip(buf);
5179 err = page_counter_memparse(buf, "max", &max);
5183 err = mem_cgroup_resize_limit(memcg, max);
5187 memcg_wb_domain_size_changed(memcg);
5191 static int memory_events_show(struct seq_file *m, void *v)
5193 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5195 seq_printf(m, "low %lu\n", mem_cgroup_read_events(memcg, MEMCG_LOW));
5196 seq_printf(m, "high %lu\n", mem_cgroup_read_events(memcg, MEMCG_HIGH));
5197 seq_printf(m, "max %lu\n", mem_cgroup_read_events(memcg, MEMCG_MAX));
5198 seq_printf(m, "oom %lu\n", mem_cgroup_read_events(memcg, MEMCG_OOM));
5203 static struct cftype memory_files[] = {
5206 .flags = CFTYPE_NOT_ON_ROOT,
5207 .read_u64 = memory_current_read,
5211 .flags = CFTYPE_NOT_ON_ROOT,
5212 .seq_show = memory_low_show,
5213 .write = memory_low_write,
5217 .flags = CFTYPE_NOT_ON_ROOT,
5218 .seq_show = memory_high_show,
5219 .write = memory_high_write,
5223 .flags = CFTYPE_NOT_ON_ROOT,
5224 .seq_show = memory_max_show,
5225 .write = memory_max_write,
5229 .flags = CFTYPE_NOT_ON_ROOT,
5230 .file_offset = offsetof(struct mem_cgroup, events_file),
5231 .seq_show = memory_events_show,
5236 struct cgroup_subsys memory_cgrp_subsys = {
5237 .css_alloc = mem_cgroup_css_alloc,
5238 .css_online = mem_cgroup_css_online,
5239 .css_offline = mem_cgroup_css_offline,
5240 .css_released = mem_cgroup_css_released,
5241 .css_free = mem_cgroup_css_free,
5242 .css_reset = mem_cgroup_css_reset,
5243 .can_attach = mem_cgroup_can_attach,
5244 .cancel_attach = mem_cgroup_cancel_attach,
5245 .attach = mem_cgroup_move_task,
5246 .bind = mem_cgroup_bind,
5247 .dfl_cftypes = memory_files,
5248 .legacy_cftypes = mem_cgroup_legacy_files,
5253 * mem_cgroup_low - check if memory consumption is below the normal range
5254 * @root: the highest ancestor to consider
5255 * @memcg: the memory cgroup to check
5257 * Returns %true if memory consumption of @memcg, and that of all
5258 * configurable ancestors up to @root, is below the normal range.
5260 bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg)
5262 if (mem_cgroup_disabled())
5266 * The toplevel group doesn't have a configurable range, so
5267 * it's never low when looked at directly, and it is not
5268 * considered an ancestor when assessing the hierarchy.
5271 if (memcg == root_mem_cgroup)
5274 if (page_counter_read(&memcg->memory) >= memcg->low)
5277 while (memcg != root) {
5278 memcg = parent_mem_cgroup(memcg);
5280 if (memcg == root_mem_cgroup)
5283 if (page_counter_read(&memcg->memory) >= memcg->low)
5290 * mem_cgroup_try_charge - try charging a page
5291 * @page: page to charge
5292 * @mm: mm context of the victim
5293 * @gfp_mask: reclaim mode
5294 * @memcgp: charged memcg return
5296 * Try to charge @page to the memcg that @mm belongs to, reclaiming
5297 * pages according to @gfp_mask if necessary.
5299 * Returns 0 on success, with *@memcgp pointing to the charged memcg.
5300 * Otherwise, an error code is returned.
5302 * After page->mapping has been set up, the caller must finalize the
5303 * charge with mem_cgroup_commit_charge(). Or abort the transaction
5304 * with mem_cgroup_cancel_charge() in case page instantiation fails.
5306 int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
5307 gfp_t gfp_mask, struct mem_cgroup **memcgp)
5309 struct mem_cgroup *memcg = NULL;
5310 unsigned int nr_pages = 1;
5313 if (mem_cgroup_disabled())
5316 if (PageSwapCache(page)) {
5318 * Every swap fault against a single page tries to charge the
5319 * page, bail as early as possible. shmem_unuse() encounters
5320 * already charged pages, too. The USED bit is protected by
5321 * the page lock, which serializes swap cache removal, which
5322 * in turn serializes uncharging.
5324 VM_BUG_ON_PAGE(!PageLocked(page), page);
5325 if (page->mem_cgroup)
5328 if (do_swap_account) {
5329 swp_entry_t ent = { .val = page_private(page), };
5330 unsigned short id = lookup_swap_cgroup_id(ent);
5333 memcg = mem_cgroup_from_id(id);
5334 if (memcg && !css_tryget_online(&memcg->css))
5340 if (PageTransHuge(page)) {
5341 nr_pages <<= compound_order(page);
5342 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
5346 memcg = get_mem_cgroup_from_mm(mm);
5348 ret = try_charge(memcg, gfp_mask, nr_pages);
5350 css_put(&memcg->css);
5357 * mem_cgroup_commit_charge - commit a page charge
5358 * @page: page to charge
5359 * @memcg: memcg to charge the page to
5360 * @lrucare: page might be on LRU already
5362 * Finalize a charge transaction started by mem_cgroup_try_charge(),
5363 * after page->mapping has been set up. This must happen atomically
5364 * as part of the page instantiation, i.e. under the page table lock
5365 * for anonymous pages, under the page lock for page and swap cache.
5367 * In addition, the page must not be on the LRU during the commit, to
5368 * prevent racing with task migration. If it might be, use @lrucare.
5370 * Use mem_cgroup_cancel_charge() to cancel the transaction instead.
5372 void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
5375 unsigned int nr_pages = 1;
5377 VM_BUG_ON_PAGE(!page->mapping, page);
5378 VM_BUG_ON_PAGE(PageLRU(page) && !lrucare, page);
5380 if (mem_cgroup_disabled())
5383 * Swap faults will attempt to charge the same page multiple
5384 * times. But reuse_swap_page() might have removed the page
5385 * from swapcache already, so we can't check PageSwapCache().
5390 commit_charge(page, memcg, lrucare);
5392 if (PageTransHuge(page)) {
5393 nr_pages <<= compound_order(page);
5394 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
5397 local_irq_disable();
5398 mem_cgroup_charge_statistics(memcg, page, nr_pages);
5399 memcg_check_events(memcg, page);
5402 if (do_swap_account && PageSwapCache(page)) {
5403 swp_entry_t entry = { .val = page_private(page) };
5405 * The swap entry might not get freed for a long time,
5406 * let's not wait for it. The page already received a
5407 * memory+swap charge, drop the swap entry duplicate.
5409 mem_cgroup_uncharge_swap(entry);
5414 * mem_cgroup_cancel_charge - cancel a page charge
5415 * @page: page to charge
5416 * @memcg: memcg to charge the page to
5418 * Cancel a charge transaction started by mem_cgroup_try_charge().
5420 void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg)
5422 unsigned int nr_pages = 1;
5424 if (mem_cgroup_disabled())
5427 * Swap faults will attempt to charge the same page multiple
5428 * times. But reuse_swap_page() might have removed the page
5429 * from swapcache already, so we can't check PageSwapCache().
5434 if (PageTransHuge(page)) {
5435 nr_pages <<= compound_order(page);
5436 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
5439 cancel_charge(memcg, nr_pages);
5442 static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout,
5443 unsigned long nr_anon, unsigned long nr_file,
5444 unsigned long nr_huge, struct page *dummy_page)
5446 unsigned long nr_pages = nr_anon + nr_file;
5447 unsigned long flags;
5449 if (!mem_cgroup_is_root(memcg)) {
5450 page_counter_uncharge(&memcg->memory, nr_pages);
5451 if (do_swap_account)
5452 page_counter_uncharge(&memcg->memsw, nr_pages);
5453 memcg_oom_recover(memcg);
5456 local_irq_save(flags);
5457 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS], nr_anon);
5458 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_CACHE], nr_file);
5459 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], nr_huge);
5460 __this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT], pgpgout);
5461 __this_cpu_add(memcg->stat->nr_page_events, nr_pages);
5462 memcg_check_events(memcg, dummy_page);
5463 local_irq_restore(flags);
5465 if (!mem_cgroup_is_root(memcg))
5466 css_put_many(&memcg->css, nr_pages);
5469 static void uncharge_list(struct list_head *page_list)
5471 struct mem_cgroup *memcg = NULL;
5472 unsigned long nr_anon = 0;
5473 unsigned long nr_file = 0;
5474 unsigned long nr_huge = 0;
5475 unsigned long pgpgout = 0;
5476 struct list_head *next;
5479 next = page_list->next;
5481 unsigned int nr_pages = 1;
5483 page = list_entry(next, struct page, lru);
5484 next = page->lru.next;
5486 VM_BUG_ON_PAGE(PageLRU(page), page);
5487 VM_BUG_ON_PAGE(page_count(page), page);
5489 if (!page->mem_cgroup)
5493 * Nobody should be changing or seriously looking at
5494 * page->mem_cgroup at this point, we have fully
5495 * exclusive access to the page.
5498 if (memcg != page->mem_cgroup) {
5500 uncharge_batch(memcg, pgpgout, nr_anon, nr_file,
5502 pgpgout = nr_anon = nr_file = nr_huge = 0;
5504 memcg = page->mem_cgroup;
5507 if (PageTransHuge(page)) {
5508 nr_pages <<= compound_order(page);
5509 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
5510 nr_huge += nr_pages;
5514 nr_anon += nr_pages;
5516 nr_file += nr_pages;
5518 page->mem_cgroup = NULL;
5521 } while (next != page_list);
5524 uncharge_batch(memcg, pgpgout, nr_anon, nr_file,
5529 * mem_cgroup_uncharge - uncharge a page
5530 * @page: page to uncharge
5532 * Uncharge a page previously charged with mem_cgroup_try_charge() and
5533 * mem_cgroup_commit_charge().
5535 void mem_cgroup_uncharge(struct page *page)
5537 if (mem_cgroup_disabled())
5540 /* Don't touch page->lru of any random page, pre-check: */
5541 if (!page->mem_cgroup)
5544 INIT_LIST_HEAD(&page->lru);
5545 uncharge_list(&page->lru);
5549 * mem_cgroup_uncharge_list - uncharge a list of page
5550 * @page_list: list of pages to uncharge
5552 * Uncharge a list of pages previously charged with
5553 * mem_cgroup_try_charge() and mem_cgroup_commit_charge().
5555 void mem_cgroup_uncharge_list(struct list_head *page_list)
5557 if (mem_cgroup_disabled())
5560 if (!list_empty(page_list))
5561 uncharge_list(page_list);
5565 * mem_cgroup_replace_page - migrate a charge to another page
5566 * @oldpage: currently charged page
5567 * @newpage: page to transfer the charge to
5569 * Migrate the charge from @oldpage to @newpage.
5571 * Both pages must be locked, @newpage->mapping must be set up.
5572 * Either or both pages might be on the LRU already.
5574 void mem_cgroup_replace_page(struct page *oldpage, struct page *newpage)
5576 struct mem_cgroup *memcg;
5579 VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
5580 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
5581 VM_BUG_ON_PAGE(PageAnon(oldpage) != PageAnon(newpage), newpage);
5582 VM_BUG_ON_PAGE(PageTransHuge(oldpage) != PageTransHuge(newpage),
5585 if (mem_cgroup_disabled())
5588 /* Page cache replacement: new page already charged? */
5589 if (newpage->mem_cgroup)
5592 /* Swapcache readahead pages can get replaced before being charged */
5593 memcg = oldpage->mem_cgroup;
5597 lock_page_lru(oldpage, &isolated);
5598 oldpage->mem_cgroup = NULL;
5599 unlock_page_lru(oldpage, isolated);
5601 commit_charge(newpage, memcg, true);
5605 * subsys_initcall() for memory controller.
5607 * Some parts like hotcpu_notifier() have to be initialized from this context
5608 * because of lock dependencies (cgroup_lock -> cpu hotplug) but basically
5609 * everything that doesn't depend on a specific mem_cgroup structure should
5610 * be initialized from here.
5612 static int __init mem_cgroup_init(void)
5616 hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
5618 for_each_possible_cpu(cpu)
5619 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
5622 for_each_node(node) {
5623 struct mem_cgroup_tree_per_node *rtpn;
5626 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL,
5627 node_online(node) ? node : NUMA_NO_NODE);
5629 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
5630 struct mem_cgroup_tree_per_zone *rtpz;
5632 rtpz = &rtpn->rb_tree_per_zone[zone];
5633 rtpz->rb_root = RB_ROOT;
5634 spin_lock_init(&rtpz->lock);
5636 soft_limit_tree.rb_tree_per_node[node] = rtpn;
5641 subsys_initcall(mem_cgroup_init);
5643 #ifdef CONFIG_MEMCG_SWAP
5645 * mem_cgroup_swapout - transfer a memsw charge to swap
5646 * @page: page whose memsw charge to transfer
5647 * @entry: swap entry to move the charge to
5649 * Transfer the memsw charge of @page to @entry.
5651 void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
5653 struct mem_cgroup *memcg;
5654 unsigned short oldid;
5656 VM_BUG_ON_PAGE(PageLRU(page), page);
5657 VM_BUG_ON_PAGE(page_count(page), page);
5659 if (!do_swap_account)
5662 memcg = page->mem_cgroup;
5664 /* Readahead page, never charged */
5668 oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
5669 VM_BUG_ON_PAGE(oldid, page);
5670 mem_cgroup_swap_statistics(memcg, true);
5672 page->mem_cgroup = NULL;
5674 if (!mem_cgroup_is_root(memcg))
5675 page_counter_uncharge(&memcg->memory, 1);
5678 * Interrupts should be disabled here because the caller holds the
5679 * mapping->tree_lock lock which is taken with interrupts-off. It is
5680 * important here to have the interrupts disabled because it is the
5681 * only synchronisation we have for udpating the per-CPU variables.
5683 VM_BUG_ON(!irqs_disabled());
5684 mem_cgroup_charge_statistics(memcg, page, -1);
5685 memcg_check_events(memcg, page);
5689 * mem_cgroup_uncharge_swap - uncharge a swap entry
5690 * @entry: swap entry to uncharge
5692 * Drop the memsw charge associated with @entry.
5694 void mem_cgroup_uncharge_swap(swp_entry_t entry)
5696 struct mem_cgroup *memcg;
5699 if (!do_swap_account)
5702 id = swap_cgroup_record(entry, 0);
5704 memcg = mem_cgroup_from_id(id);
5706 if (!mem_cgroup_is_root(memcg))
5707 page_counter_uncharge(&memcg->memsw, 1);
5708 mem_cgroup_swap_statistics(memcg, false);
5709 css_put(&memcg->css);
5714 /* for remember boot option*/
5715 #ifdef CONFIG_MEMCG_SWAP_ENABLED
5716 static int really_do_swap_account __initdata = 1;
5718 static int really_do_swap_account __initdata;
5721 static int __init enable_swap_account(char *s)
5723 if (!strcmp(s, "1"))
5724 really_do_swap_account = 1;
5725 else if (!strcmp(s, "0"))
5726 really_do_swap_account = 0;
5729 __setup("swapaccount=", enable_swap_account);
5731 static struct cftype memsw_cgroup_files[] = {
5733 .name = "memsw.usage_in_bytes",
5734 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
5735 .read_u64 = mem_cgroup_read_u64,
5738 .name = "memsw.max_usage_in_bytes",
5739 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
5740 .write = mem_cgroup_reset,
5741 .read_u64 = mem_cgroup_read_u64,
5744 .name = "memsw.limit_in_bytes",
5745 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
5746 .write = mem_cgroup_write,
5747 .read_u64 = mem_cgroup_read_u64,
5750 .name = "memsw.failcnt",
5751 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
5752 .write = mem_cgroup_reset,
5753 .read_u64 = mem_cgroup_read_u64,
5755 { }, /* terminate */
5758 static int __init mem_cgroup_swap_init(void)
5760 if (!mem_cgroup_disabled() && really_do_swap_account) {
5761 do_swap_account = 1;
5762 WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys,
5763 memsw_cgroup_files));
5767 subsys_initcall(mem_cgroup_swap_init);
5769 #endif /* CONFIG_MEMCG_SWAP */