]> git.karo-electronics.de Git - linux-beck.git/blobdiff - mm/memcontrol.c
mm: memcontrol - uninitialised return value
[linux-beck.git] / mm / memcontrol.c
index c8569bc298ffc77c5a2627b5a4d2fd2a42b91742..8c200e86da4cbc818b72db6e7ff3c72faf0480a9 100644 (file)
@@ -149,16 +149,23 @@ struct mem_cgroup_threshold {
        u64 threshold;
 };
 
+/* For threshold */
 struct mem_cgroup_threshold_ary {
        /* An array index points to threshold just below usage. */
-       atomic_t current_threshold;
+       int current_threshold;
        /* Size of entries[] */
        unsigned int size;
        /* Array of thresholds */
        struct mem_cgroup_threshold entries[0];
 };
+/* for OOM */
+struct mem_cgroup_eventfd_list {
+       struct list_head list;
+       struct eventfd_ctx *eventfd;
+};
 
 static void mem_cgroup_threshold(struct mem_cgroup *mem);
+static void mem_cgroup_oom_notify(struct mem_cgroup *mem);
 
 /*
  * The memory controller data structure. The memory controller controls both
@@ -207,6 +214,8 @@ struct mem_cgroup {
        atomic_t        refcnt;
 
        unsigned int    swappiness;
+       /* OOM-Killer disable */
+       int             oom_kill_disable;
 
        /* set when res.limit == memsw.limit */
        bool            memsw_is_minimum;
@@ -220,12 +229,14 @@ struct mem_cgroup {
        /* thresholds for mem+swap usage. RCU-protected */
        struct mem_cgroup_threshold_ary *memsw_thresholds;
 
+       /* For oom notifier event fd */
+       struct list_head oom_notify;
+
        /*
         * Should we move charges of a task when a task is moved into this
         * mem_cgroup ? And what type of charges should we move ?
         */
        unsigned long   move_charge_at_immigrate;
-
        /*
         * percpu counter.
         */
@@ -239,6 +250,7 @@ struct mem_cgroup {
  */
 enum move_type {
        MOVE_CHARGE_TYPE_ANON,  /* private anonymous page and swap of it */
+       MOVE_CHARGE_TYPE_FILE,  /* file page(including tmpfs) and swap of it */
        NR_MOVE_TYPE,
 };
 
@@ -255,6 +267,18 @@ static struct move_charge_struct {
        .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
 };
 
+static bool move_anon(void)
+{
+       return test_bit(MOVE_CHARGE_TYPE_ANON,
+                                       &mc.to->move_charge_at_immigrate);
+}
+
+static bool move_file(void)
+{
+       return test_bit(MOVE_CHARGE_TYPE_FILE,
+                                       &mc.to->move_charge_at_immigrate);
+}
+
 /*
  * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
  * limit reclaim to prevent infinite loops, if they ever occur.
@@ -282,9 +306,12 @@ enum charge_type {
 /* for encoding cft->private value on file */
 #define _MEM                   (0)
 #define _MEMSWAP               (1)
+#define _OOM_TYPE              (2)
 #define MEMFILE_PRIVATE(x, val)        (((x) << 16) | (val))
 #define MEMFILE_TYPE(val)      (((val) >> 16) & 0xffff)
 #define MEMFILE_ATTR(val)      ((val) & 0xffff)
+/* Used for OOM nofiier */
+#define OOM_CONTROL            (0)
 
 /*
  * Reclaim flags for mem_cgroup_hierarchical_reclaim
@@ -1293,14 +1320,62 @@ static void mem_cgroup_oom_unlock(struct mem_cgroup *mem)
 static DEFINE_MUTEX(memcg_oom_mutex);
 static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
 
+struct oom_wait_info {
+       struct mem_cgroup *mem;
+       wait_queue_t    wait;
+};
+
+static int memcg_oom_wake_function(wait_queue_t *wait,
+       unsigned mode, int sync, void *arg)
+{
+       struct mem_cgroup *wake_mem = (struct mem_cgroup *)arg;
+       struct oom_wait_info *oom_wait_info;
+
+       oom_wait_info = container_of(wait, struct oom_wait_info, wait);
+
+       if (oom_wait_info->mem == wake_mem)
+               goto wakeup;
+       /* if no hierarchy, no match */
+       if (!oom_wait_info->mem->use_hierarchy || !wake_mem->use_hierarchy)
+               return 0;
+       /*
+        * Both of oom_wait_info->mem and wake_mem are stable under us.
+        * Then we can use css_is_ancestor without taking care of RCU.
+        */
+       if (!css_is_ancestor(&oom_wait_info->mem->css, &wake_mem->css) &&
+           !css_is_ancestor(&wake_mem->css, &oom_wait_info->mem->css))
+               return 0;
+
+wakeup:
+       return autoremove_wake_function(wait, mode, sync, arg);
+}
+
+static void memcg_wakeup_oom(struct mem_cgroup *mem)
+{
+       /* for filtering, pass "mem" as argument. */
+       __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, mem);
+}
+
+static void memcg_oom_recover(struct mem_cgroup *mem)
+{
+       if (mem->oom_kill_disable && atomic_read(&mem->oom_lock))
+               memcg_wakeup_oom(mem);
+}
+
 /*
  * try to call OOM killer. returns false if we should exit memory-reclaim loop.
  */
 bool mem_cgroup_handle_oom(struct mem_cgroup *mem, gfp_t mask)
 {
-       DEFINE_WAIT(wait);
-       bool locked;
+       struct oom_wait_info owait;
+       bool locked, need_to_kill;
 
+       owait.mem = mem;
+       owait.wait.flags = 0;
+       owait.wait.func = memcg_oom_wake_function;
+       owait.wait.private = current;
+       INIT_LIST_HEAD(&owait.wait.task_list);
+       need_to_kill = true;
        /* At first, try to OOM lock hierarchy under mem.*/
        mutex_lock(&memcg_oom_mutex);
        locked = mem_cgroup_oom_lock(mem);
@@ -1309,32 +1384,23 @@ bool mem_cgroup_handle_oom(struct mem_cgroup *mem, gfp_t mask)
         * accounting. So, UNINTERRUPTIBLE is appropriate. But SIGKILL
         * under OOM is always welcomed, use TASK_KILLABLE here.
         */
-       if (!locked)
-               prepare_to_wait(&memcg_oom_waitq, &wait, TASK_KILLABLE);
+       prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
+       if (!locked || mem->oom_kill_disable)
+               need_to_kill = false;
+       if (locked)
+               mem_cgroup_oom_notify(mem);
        mutex_unlock(&memcg_oom_mutex);
 
-       if (locked)
+       if (need_to_kill) {
+               finish_wait(&memcg_oom_waitq, &owait.wait);
                mem_cgroup_out_of_memory(mem, mask);
-       else {
+       else {
                schedule();
-               finish_wait(&memcg_oom_waitq, &wait);
+               finish_wait(&memcg_oom_waitq, &owait.wait);
        }
        mutex_lock(&memcg_oom_mutex);
        mem_cgroup_oom_unlock(mem);
-       /*
-        * Here, we use global waitq .....more fine grained waitq ?
-        * Assume following hierarchy.
-        * A/
-        *   01
-        *   02
-        * assume OOM happens both in A and 01 at the same time. Tthey are
-        * mutually exclusive by lock. (kill in 01 helps A.)
-        * When we use per memcg waitq, we have to wake up waiters on A and 02
-        * in addtion to waiters on 01. We use global waitq for avoiding mess.
-        * It will not be a big problem.
-        * (And a task may be moved to other groups while it's waiting for OOM.)
-        */
-       wake_up_all(&memcg_oom_waitq);
+       memcg_wakeup_oom(mem);
        mutex_unlock(&memcg_oom_mutex);
 
        if (test_thread_flag(TIF_MEMDIE) || fatal_signal_pending(current))
@@ -2118,15 +2184,6 @@ __do_uncharge(struct mem_cgroup *mem, const enum charge_type ctype)
        /* If swapout, usage of swap doesn't decrease */
        if (!do_swap_account || ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
                uncharge_memsw = false;
-       /*
-        * do_batch > 0 when unmapping pages or inode invalidate/truncate.
-        * In those cases, all pages freed continously can be expected to be in
-        * the same cgroup and we have chance to coalesce uncharges.
-        * But we do uncharge one by one if this is killed by OOM(TIF_MEMDIE)
-        * because we want to do uncharge as soon as possible.
-        */
-       if (!current->memcg_batch.do_batch || test_thread_flag(TIF_MEMDIE))
-               goto direct_uncharge;
 
        batch = &current->memcg_batch;
        /*
@@ -2136,6 +2193,17 @@ __do_uncharge(struct mem_cgroup *mem, const enum charge_type ctype)
         */
        if (!batch->memcg)
                batch->memcg = mem;
+       /*
+        * do_batch > 0 when unmapping pages or inode invalidate/truncate.
+        * In those cases, all pages freed continously can be expected to be in
+        * the same cgroup and we have chance to coalesce uncharges.
+        * But we do uncharge one by one if this is killed by OOM(TIF_MEMDIE)
+        * because we want to do uncharge as soon as possible.
+        */
+
+       if (!batch->do_batch || test_thread_flag(TIF_MEMDIE))
+               goto direct_uncharge;
+
        /*
         * In typical case, batch->memcg == mem. This means we can
         * merge a series of uncharges to an uncharge of res_counter.
@@ -2152,6 +2220,8 @@ direct_uncharge:
        res_counter_uncharge(&mem->res, PAGE_SIZE);
        if (uncharge_memsw)
                res_counter_uncharge(&mem->memsw, PAGE_SIZE);
+       if (unlikely(batch->memcg != mem))
+               memcg_oom_recover(mem);
        return;
 }
 
@@ -2288,6 +2358,7 @@ void mem_cgroup_uncharge_end(void)
                res_counter_uncharge(&batch->memcg->res, batch->bytes);
        if (batch->memsw_bytes)
                res_counter_uncharge(&batch->memcg->memsw, batch->memsw_bytes);
+       memcg_oom_recover(batch->memcg);
        /* forget this pointer (for sanity check) */
        batch->memcg = NULL;
 }
@@ -2524,10 +2595,11 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
                                unsigned long long val)
 {
        int retry_count;
-       u64 memswlimit;
+       u64 memswlimit, memlimit;
        int ret = 0;
        int children = mem_cgroup_count_children(memcg);
        u64 curusage, oldusage;
+       int enlarge;
 
        /*
         * For keeping hierarchical_reclaim simple, how long we should retry
@@ -2538,6 +2610,7 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
 
        oldusage = res_counter_read_u64(&memcg->res, RES_USAGE);
 
+       enlarge = 0;
        while (retry_count) {
                if (signal_pending(current)) {
                        ret = -EINTR;
@@ -2555,6 +2628,11 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
                        mutex_unlock(&set_limit_mutex);
                        break;
                }
+
+               memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
+               if (memlimit < val)
+                       enlarge = 1;
+
                ret = res_counter_set_limit(&memcg->res, val);
                if (!ret) {
                        if (memswlimit == val)
@@ -2576,6 +2654,8 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
                else
                        oldusage = curusage;
        }
+       if (!ret && enlarge)
+               memcg_oom_recover(memcg);
 
        return ret;
 }
@@ -2584,9 +2664,10 @@ static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
                                        unsigned long long val)
 {
        int retry_count;
-       u64 memlimit, oldusage, curusage;
+       u64 memlimit, memswlimit, oldusage, curusage;
        int children = mem_cgroup_count_children(memcg);
        int ret = -EBUSY;
+       int enlarge = 0;
 
        /* see mem_cgroup_resize_res_limit */
        retry_count = children * MEM_CGROUP_RECLAIM_RETRIES;
@@ -2608,6 +2689,9 @@ static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
                        mutex_unlock(&set_limit_mutex);
                        break;
                }
+               memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
+               if (memswlimit < val)
+                       enlarge = 1;
                ret = res_counter_set_limit(&memcg->memsw, val);
                if (!ret) {
                        if (memlimit == val)
@@ -2630,6 +2714,8 @@ static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
                else
                        oldusage = curusage;
        }
+       if (!ret && enlarge)
+               memcg_oom_recover(memcg);
        return ret;
 }
 
@@ -2821,6 +2907,7 @@ move_account:
                        if (ret)
                                break;
                }
+               memcg_oom_recover(mem);
                /* it seems parent cgroup doesn't have enough mem */
                if (ret == -ENOMEM)
                        goto try_to_free;
@@ -3325,7 +3412,7 @@ static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
         * If it's not true, a threshold was crossed after last
         * call of __mem_cgroup_threshold().
         */
-       i = atomic_read(&t->current_threshold);
+       i = t->current_threshold;
 
        /*
         * Iterate backward over array of thresholds starting from
@@ -3349,7 +3436,7 @@ static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
                eventfd_signal(t->entries[i].eventfd, 1);
 
        /* Update current_threshold */
-       atomic_set(&t->current_threshold, i - 1);
+       t->current_threshold = i - 1;
 unlock:
        rcu_read_unlock();
 }
@@ -3369,8 +3456,22 @@ static int compare_thresholds(const void *a, const void *b)
        return _a->threshold - _b->threshold;
 }
 
-static int mem_cgroup_register_event(struct cgroup *cgrp, struct cftype *cft,
-               struct eventfd_ctx *eventfd, const char *args)
+static int mem_cgroup_oom_notify_cb(struct mem_cgroup *mem, void *data)
+{
+       struct mem_cgroup_eventfd_list *ev;
+
+       list_for_each_entry(ev, &mem->oom_notify, list)
+               eventfd_signal(ev->eventfd, 1);
+       return 0;
+}
+
+static void mem_cgroup_oom_notify(struct mem_cgroup *mem)
+{
+       mem_cgroup_walk_tree(mem, NULL, mem_cgroup_oom_notify_cb);
+}
+
+static int mem_cgroup_usage_register_event(struct cgroup *cgrp,
+       struct cftype *cft, struct eventfd_ctx *eventfd, const char *args)
 {
        struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
        struct mem_cgroup_threshold_ary *thresholds, *thresholds_new;
@@ -3427,7 +3528,7 @@ static int mem_cgroup_register_event(struct cgroup *cgrp, struct cftype *cft,
                        compare_thresholds, NULL);
 
        /* Find current threshold */
-       atomic_set(&thresholds_new->current_threshold, -1);
+       thresholds_new->current_threshold = -1;
        for (i = 0; i < size; i++) {
                if (thresholds_new->entries[i].threshold < usage) {
                        /*
@@ -3435,7 +3536,7 @@ static int mem_cgroup_register_event(struct cgroup *cgrp, struct cftype *cft,
                         * until rcu_assign_pointer(), so it's safe to increment
                         * it here.
                         */
-                       atomic_inc(&thresholds_new->current_threshold);
+                       ++thresholds_new->current_threshold;
                }
        }
 
@@ -3454,15 +3555,15 @@ unlock:
        return ret;
 }
 
-static int mem_cgroup_unregister_event(struct cgroup *cgrp, struct cftype *cft,
-               struct eventfd_ctx *eventfd)
+static int mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
+       struct cftype *cft, struct eventfd_ctx *eventfd)
 {
        struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
        struct mem_cgroup_threshold_ary *thresholds, *thresholds_new;
        int type = MEMFILE_TYPE(cft->private);
        u64 usage;
        int size = 0;
-       int i, j, ret;
+       int i, j, ret = 0;
 
        mutex_lock(&memcg->thresholds_lock);
        if (type == _MEM)
@@ -3506,7 +3607,7 @@ static int mem_cgroup_unregister_event(struct cgroup *cgrp, struct cftype *cft,
        thresholds_new->size = size;
 
        /* Copy thresholds and find current threshold */
-       atomic_set(&thresholds_new->current_threshold, -1);
+       thresholds_new->current_threshold = -1;
        for (i = 0, j = 0; i < thresholds->size; i++) {
                if (thresholds->entries[i].eventfd == eventfd)
                        continue;
@@ -3518,7 +3619,7 @@ static int mem_cgroup_unregister_event(struct cgroup *cgrp, struct cftype *cft,
                         * until rcu_assign_pointer(), so it's safe to increment
                         * it here.
                         */
-                       atomic_inc(&thresholds_new->current_threshold);
+                       ++thresholds_new->current_threshold;
                }
                j++;
        }
@@ -3539,13 +3640,101 @@ unlock:
        return ret;
 }
 
+static int mem_cgroup_oom_register_event(struct cgroup *cgrp,
+       struct cftype *cft, struct eventfd_ctx *eventfd, const char *args)
+{
+       struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
+       struct mem_cgroup_eventfd_list *event;
+       int type = MEMFILE_TYPE(cft->private);
+
+       BUG_ON(type != _OOM_TYPE);
+       event = kmalloc(sizeof(*event), GFP_KERNEL);
+       if (!event)
+               return -ENOMEM;
+
+       mutex_lock(&memcg_oom_mutex);
+
+       event->eventfd = eventfd;
+       list_add(&event->list, &memcg->oom_notify);
+
+       /* already in OOM ? */
+       if (atomic_read(&memcg->oom_lock))
+               eventfd_signal(eventfd, 1);
+       mutex_unlock(&memcg_oom_mutex);
+
+       return 0;
+}
+
+static int mem_cgroup_oom_unregister_event(struct cgroup *cgrp,
+       struct cftype *cft, struct eventfd_ctx *eventfd)
+{
+       struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
+       struct mem_cgroup_eventfd_list *ev, *tmp;
+       int type = MEMFILE_TYPE(cft->private);
+
+       BUG_ON(type != _OOM_TYPE);
+
+       mutex_lock(&memcg_oom_mutex);
+
+       list_for_each_entry_safe(ev, tmp, &mem->oom_notify, list) {
+               if (ev->eventfd == eventfd) {
+                       list_del(&ev->list);
+                       kfree(ev);
+               }
+       }
+
+       mutex_unlock(&memcg_oom_mutex);
+
+       return 0;
+}
+
+static int mem_cgroup_oom_control_read(struct cgroup *cgrp,
+       struct cftype *cft,  struct cgroup_map_cb *cb)
+{
+       struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
+
+       cb->fill(cb, "oom_kill_disable", mem->oom_kill_disable);
+
+       if (atomic_read(&mem->oom_lock))
+               cb->fill(cb, "under_oom", 1);
+       else
+               cb->fill(cb, "under_oom", 0);
+       return 0;
+}
+
+/*
+ */
+static int mem_cgroup_oom_control_write(struct cgroup *cgrp,
+       struct cftype *cft, u64 val)
+{
+       struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
+       struct mem_cgroup *parent;
+
+       /* cannot set to root cgroup and only 0 and 1 are allowed */
+       if (!cgrp->parent || !((val == 0) || (val == 1)))
+               return -EINVAL;
+
+       parent = mem_cgroup_from_cont(cgrp->parent);
+
+       cgroup_lock();
+       /* oom-kill-disable is a flag for subhierarchy. */
+       if ((parent->use_hierarchy) ||
+           (mem->use_hierarchy && !list_empty(&cgrp->children))) {
+               cgroup_unlock();
+               return -EINVAL;
+       }
+       mem->oom_kill_disable = val;
+       cgroup_unlock();
+       return 0;
+}
+
 static struct cftype mem_cgroup_files[] = {
        {
                .name = "usage_in_bytes",
                .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
                .read_u64 = mem_cgroup_read,
-               .register_event = mem_cgroup_register_event,
-               .unregister_event = mem_cgroup_unregister_event,
+               .register_event = mem_cgroup_usage_register_event,
+               .unregister_event = mem_cgroup_usage_unregister_event,
        },
        {
                .name = "max_usage_in_bytes",
@@ -3594,6 +3783,14 @@ static struct cftype mem_cgroup_files[] = {
                .read_u64 = mem_cgroup_move_charge_read,
                .write_u64 = mem_cgroup_move_charge_write,
        },
+       {
+               .name = "oom_control",
+               .read_map = mem_cgroup_oom_control_read,
+               .write_u64 = mem_cgroup_oom_control_write,
+               .register_event = mem_cgroup_oom_register_event,
+               .unregister_event = mem_cgroup_oom_unregister_event,
+               .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
+       },
 };
 
 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
@@ -3602,8 +3799,8 @@ static struct cftype memsw_cgroup_files[] = {
                .name = "memsw.usage_in_bytes",
                .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
                .read_u64 = mem_cgroup_read,
-               .register_event = mem_cgroup_register_event,
-               .unregister_event = mem_cgroup_unregister_event,
+               .register_event = mem_cgroup_usage_register_event,
+               .unregister_event = mem_cgroup_usage_unregister_event,
        },
        {
                .name = "memsw.max_usage_in_bytes",
@@ -3831,6 +4028,7 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
        } else {
                parent = mem_cgroup_from_cont(cont->parent);
                mem->use_hierarchy = parent->use_hierarchy;
+               mem->oom_kill_disable = parent->oom_kill_disable;
        }
 
        if (parent && parent->use_hierarchy) {
@@ -3849,6 +4047,7 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
        }
        mem->last_scanned_child = 0;
        spin_lock_init(&mem->reclaim_param_lock);
+       INIT_LIST_HEAD(&mem->oom_notify);
 
        if (parent)
                mem->swappiness = get_swappiness(parent);
@@ -3976,6 +4175,80 @@ enum mc_target_type {
        MC_TARGET_SWAP,
 };
 
+static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
+                                               unsigned long addr, pte_t ptent)
+{
+       struct page *page = vm_normal_page(vma, addr, ptent);
+
+       if (!page || !page_mapped(page))
+               return NULL;
+       if (PageAnon(page)) {
+               /* we don't move shared anon */
+               if (!move_anon() || page_mapcount(page) > 2)
+                       return NULL;
+       } else if (!move_file())
+               /* we ignore mapcount for file pages */
+               return NULL;
+       if (!get_page_unless_zero(page))
+               return NULL;
+
+       return page;
+}
+
+static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
+                       unsigned long addr, pte_t ptent, swp_entry_t *entry)
+{
+       int usage_count;
+       struct page *page = NULL;
+       swp_entry_t ent = pte_to_swp_entry(ptent);
+
+       if (!move_anon() || non_swap_entry(ent))
+               return NULL;
+       usage_count = mem_cgroup_count_swap_user(ent, &page);
+       if (usage_count > 1) { /* we don't move shared anon */
+               if (page)
+                       put_page(page);
+               return NULL;
+       }
+       if (do_swap_account)
+               entry->val = ent.val;
+
+       return page;
+}
+
+static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
+                       unsigned long addr, pte_t ptent, swp_entry_t *entry)
+{
+       struct page *page = NULL;
+       struct inode *inode;
+       struct address_space *mapping;
+       pgoff_t pgoff;
+
+       if (!vma->vm_file) /* anonymous vma */
+               return NULL;
+       if (!move_file())
+               return NULL;
+
+       inode = vma->vm_file->f_path.dentry->d_inode;
+       mapping = vma->vm_file->f_mapping;
+       if (pte_none(ptent))
+               pgoff = linear_page_index(vma, addr);
+       else /* pte_file(ptent) is true */
+               pgoff = pte_to_pgoff(ptent);
+
+       /* page is moved even if it's not RSS of this task(page-faulted). */
+       if (!mapping_cap_swap_backed(mapping)) { /* normal file */
+               page = find_get_page(mapping, pgoff);
+       } else { /* shmem/tmpfs file. we should take account of swap too. */
+               swp_entry_t ent;
+               mem_cgroup_get_shmem_target(inode, pgoff, &page, &ent);
+               if (do_swap_account)
+                       entry->val = ent.val;
+       }
+
+       return page;
+}
+
 static int is_target_pte_for_mc(struct vm_area_struct *vma,
                unsigned long addr, pte_t ptent, union mc_target *target)
 {
@@ -3983,43 +4256,16 @@ static int is_target_pte_for_mc(struct vm_area_struct *vma,
        struct page_cgroup *pc;
        int ret = 0;
        swp_entry_t ent = { .val = 0 };
-       int usage_count = 0;
-       bool move_anon = test_bit(MOVE_CHARGE_TYPE_ANON,
-                                       &mc.to->move_charge_at_immigrate);
 
-       if (!pte_present(ptent)) {
-               /* TODO: handle swap of shmes/tmpfs */
-               if (pte_none(ptent) || pte_file(ptent))
-                       return 0;
-               else if (is_swap_pte(ptent)) {
-                       ent = pte_to_swp_entry(ptent);
-                       if (!move_anon || non_swap_entry(ent))
-                               return 0;
-                       usage_count = mem_cgroup_count_swap_user(ent, &page);
-               }
-       } else {
-               page = vm_normal_page(vma, addr, ptent);
-               if (!page || !page_mapped(page))
-                       return 0;
-               /*
-                * TODO: We don't move charges of file(including shmem/tmpfs)
-                * pages for now.
-                */
-               if (!move_anon || !PageAnon(page))
-                       return 0;
-               if (!get_page_unless_zero(page))
-                       return 0;
-               usage_count = page_mapcount(page);
-       }
-       if (usage_count > 1) {
-               /*
-                * TODO: We don't move charges of shared(used by multiple
-                * processes) pages for now.
-                */
-               if (page)
-                       put_page(page);
+       if (pte_present(ptent))
+               page = mc_handle_present_pte(vma, addr, ptent);
+       else if (is_swap_pte(ptent))
+               page = mc_handle_swap_pte(vma, addr, ptent, &ent);
+       else if (pte_none(ptent) || pte_file(ptent))
+               page = mc_handle_file_pte(vma, addr, ptent, &ent);
+
+       if (!page && !ent.val)
                return 0;
-       }
        if (page) {
                pc = lookup_page_cgroup(page);
                /*
@@ -4035,8 +4281,8 @@ static int is_target_pte_for_mc(struct vm_area_struct *vma,
                if (!ret || !target)
                        put_page(page);
        }
-       /* throught */
-       if (ent.val && do_swap_account && !ret &&
+       /* There is a swap entry and a page doesn't exist or isn't charged */
+       if (ent.val && !ret &&
                        css_id(&mc.from->css) == lookup_swap_cgroup(ent)) {
                ret = MC_TARGET_SWAP;
                if (target)
@@ -4077,9 +4323,6 @@ static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
                };
                if (is_vm_hugetlb_page(vma))
                        continue;
-               /* TODO: We don't move charges of shmem/tmpfs pages for now. */
-               if (vma->vm_flags & VM_SHARED)
-                       continue;
                walk_page_range(vma->vm_start, vma->vm_end,
                                        &mem_cgroup_count_precharge_walk);
        }
@@ -4102,6 +4345,7 @@ static void mem_cgroup_clear_mc(void)
        if (mc.precharge) {
                __mem_cgroup_cancel_charge(mc.to, mc.precharge);
                mc.precharge = 0;
+               memcg_oom_recover(mc.to);
        }
        /*
         * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
@@ -4110,6 +4354,7 @@ static void mem_cgroup_clear_mc(void)
        if (mc.moved_charge) {
                __mem_cgroup_cancel_charge(mc.from, mc.moved_charge);
                mc.moved_charge = 0;
+               memcg_oom_recover(mc.from);
        }
        /* we must fixup refcnts and charges */
        if (mc.moved_swap) {
@@ -4274,9 +4519,6 @@ static void mem_cgroup_move_charge(struct mm_struct *mm)
                };
                if (is_vm_hugetlb_page(vma))
                        continue;
-               /* TODO: We don't move charges of shmem/tmpfs pages for now. */
-               if (vma->vm_flags & VM_SHARED)
-                       continue;
                ret = walk_page_range(vma->vm_start, vma->vm_end,
                                                &mem_cgroup_move_charge_walk);
                if (ret)