]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - mm/memcontrol.c
mm: memcg: rework and document OOM waiting and wakeup
[karo-tx-linux.git] / mm / memcontrol.c
index 0980bbf6438d8c4667494299760f78b5c731a7c8..04250cbf46c6112acbc07a7634bcb6a67b98cbbf 100644 (file)
@@ -1916,15 +1916,18 @@ mem_cgroup_soft_reclaim_eligible(struct mem_cgroup *memcg,
        return SKIP;
 }
 
+static DEFINE_SPINLOCK(memcg_oom_lock);
+
 /*
  * Check OOM-Killer is already running under our hierarchy.
  * If someone is running, return false.
- * Has to be called with memcg_oom_lock
  */
-static bool mem_cgroup_oom_lock(struct mem_cgroup *memcg)
+static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
 {
        struct mem_cgroup *iter, *failed = NULL;
 
+       spin_lock(&memcg_oom_lock);
+
        for_each_mem_cgroup_tree(iter, memcg) {
                if (iter->oom_lock) {
                        /*
@@ -1938,33 +1941,33 @@ static bool mem_cgroup_oom_lock(struct mem_cgroup *memcg)
                        iter->oom_lock = true;
        }
 
-       if (!failed)
-               return true;
-
-       /*
-        * OK, we failed to lock the whole subtree so we have to clean up
-        * what we set up to the failing subtree
-        */
-       for_each_mem_cgroup_tree(iter, memcg) {
-               if (iter == failed) {
-                       mem_cgroup_iter_break(memcg, iter);
-                       break;
+       if (failed) {
+               /*
+                * OK, we failed to lock the whole subtree so we have
+                * to clean up what we set up to the failing subtree
+                */
+               for_each_mem_cgroup_tree(iter, memcg) {
+                       if (iter == failed) {
+                               mem_cgroup_iter_break(memcg, iter);
+                               break;
+                       }
+                       iter->oom_lock = false;
                }
-               iter->oom_lock = false;
        }
-       return false;
+
+       spin_unlock(&memcg_oom_lock);
+
+       return !failed;
 }
 
-/*
- * Has to be called with memcg_oom_lock
- */
-static int mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
+static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
 {
        struct mem_cgroup *iter;
 
+       spin_lock(&memcg_oom_lock);
        for_each_mem_cgroup_tree(iter, memcg)
                iter->oom_lock = false;
-       return 0;
+       spin_unlock(&memcg_oom_lock);
 }
 
 static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
@@ -1988,7 +1991,6 @@ static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
                atomic_add_unless(&iter->under_oom, -1, 0);
 }
 
-static DEFINE_SPINLOCK(memcg_oom_lock);
 static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
 
 struct oom_wait_info {
@@ -2035,45 +2037,52 @@ static bool mem_cgroup_handle_oom(struct mem_cgroup *memcg, gfp_t mask,
                                  int order)
 {
        struct oom_wait_info owait;
-       bool locked, need_to_kill;
+       bool locked;
 
        owait.memcg = memcg;
        owait.wait.flags = 0;
        owait.wait.func = memcg_oom_wake_function;
        owait.wait.private = current;
        INIT_LIST_HEAD(&owait.wait.task_list);
-       need_to_kill = true;
-       mem_cgroup_mark_under_oom(memcg);
 
-       /* At first, try to OOM lock hierarchy under memcg.*/
-       spin_lock(&memcg_oom_lock);
-       locked = mem_cgroup_oom_lock(memcg);
        /*
+        * As with any blocking lock, a contender needs to start
+        * listening for wakeups before attempting the trylock,
+        * otherwise it can miss the wakeup from the unlock and sleep
+        * indefinitely.  This is just open-coded because our locking
+        * is so particular to memcg hierarchies.
+        *
         * Even if signal_pending(), we can't quit charge() loop without
         * accounting. So, UNINTERRUPTIBLE is appropriate. But SIGKILL
         * under OOM is always welcomed, use TASK_KILLABLE here.
         */
        prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
-       if (!locked || memcg->oom_kill_disable)
-               need_to_kill = false;
+       mem_cgroup_mark_under_oom(memcg);
+
+       locked = mem_cgroup_oom_trylock(memcg);
+
        if (locked)
                mem_cgroup_oom_notify(memcg);
-       spin_unlock(&memcg_oom_lock);
 
-       if (need_to_kill) {
+       if (locked && !memcg->oom_kill_disable) {
+               mem_cgroup_unmark_under_oom(memcg);
                finish_wait(&memcg_oom_waitq, &owait.wait);
                mem_cgroup_out_of_memory(memcg, mask, order);
        } else {
                schedule();
+               mem_cgroup_unmark_under_oom(memcg);
                finish_wait(&memcg_oom_waitq, &owait.wait);
        }
-       spin_lock(&memcg_oom_lock);
-       if (locked)
-               mem_cgroup_oom_unlock(memcg);
-       memcg_wakeup_oom(memcg);
-       spin_unlock(&memcg_oom_lock);
 
-       mem_cgroup_unmark_under_oom(memcg);
+       if (locked) {
+               mem_cgroup_oom_unlock(memcg);
+               /*
+                * There is no guarantee that an OOM-lock contender
+                * sees the wakeups triggered by the OOM kill
+                * uncharges.  Wake any sleepers explicitely.
+                */
+               memcg_oom_recover(memcg);
+       }
 
        if (test_thread_flag(TIF_MEMDIE) || fatal_signal_pending(current))
                return false;