]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
oom: remove oom_disable_count
authorDavid Rientjes <rientjes@google.com>
Wed, 28 Sep 2011 00:50:02 +0000 (10:50 +1000)
committerStephen Rothwell <sfr@canb.auug.org.au>
Thu, 29 Sep 2011 06:08:00 +0000 (16:08 +1000)
This removes mm->oom_disable_count entirely since it's unnecessary and
currently buggy.  The counter was intended to be per-process but it's
currently decremented in the exit path for each thread that exits, causing
it to underflow.

The count was originally intended to prevent oom killing threads that
share memory with threads that cannot be killed since it doesn't lead to
future memory freeing.  The counter could be fixed to represent all
threads sharing the same mm, but it's better to remove the count since:

 - it is possible that the OOM_DISABLE thread sharing memory with the
   victim is waiting on that thread to exit and will actually cause
   future memory freeing, and

 - there is no guarantee that a thread is disabled from oom killing just
   because another thread sharing its mm is oom disabled.

Signed-off-by: David Rientjes <rientjes@google.com>
Reported-by: Oleg Nesterov <oleg@redhat.com>
Cc: Ying Han <yinghan@google.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: Andrew Morton <>
fs/exec.c
fs/proc/base.c
include/linux/mm_types.h
kernel/exit.c
kernel/fork.c
mm/oom_kill.c

index 8433e5de1c700b2b24fe8b52aeb858309684da2e..8002e7d1ae91e7cd67a7cf30d789d8cf16276678 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -841,10 +841,6 @@ static int exec_mmap(struct mm_struct *mm)
        tsk->mm = mm;
        tsk->active_mm = mm;
        activate_mm(active_mm, mm);
-       if (old_mm && tsk->signal->oom_score_adj == OOM_SCORE_ADJ_MIN) {
-               atomic_dec(&old_mm->oom_disable_count);
-               atomic_inc(&tsk->mm->oom_disable_count);
-       }
        task_unlock(tsk);
        arch_pick_mmap_layout(mm);
        if (old_mm) {
index 5eb02069e1b817730050b7b46304a9244c064bde..8f0087e20e168ee04e6b14e7ef05a68f13e4b58e 100644 (file)
@@ -1107,13 +1107,6 @@ static ssize_t oom_adjust_write(struct file *file, const char __user *buf,
                goto err_sighand;
        }
 
-       if (oom_adjust != task->signal->oom_adj) {
-               if (oom_adjust == OOM_DISABLE)
-                       atomic_inc(&task->mm->oom_disable_count);
-               if (task->signal->oom_adj == OOM_DISABLE)
-                       atomic_dec(&task->mm->oom_disable_count);
-       }
-
        /*
         * Warn that /proc/pid/oom_adj is deprecated, see
         * Documentation/feature-removal-schedule.txt.
@@ -1215,12 +1208,6 @@ static ssize_t oom_score_adj_write(struct file *file, const char __user *buf,
                goto err_sighand;
        }
 
-       if (oom_score_adj != task->signal->oom_score_adj) {
-               if (oom_score_adj == OOM_SCORE_ADJ_MIN)
-                       atomic_inc(&task->mm->oom_disable_count);
-               if (task->signal->oom_score_adj == OOM_SCORE_ADJ_MIN)
-                       atomic_dec(&task->mm->oom_disable_count);
-       }
        task->signal->oom_score_adj = oom_score_adj;
        if (has_capability_noaudit(current, CAP_SYS_RESOURCE))
                task->signal->oom_score_adj_min = oom_score_adj;
index 7870e473033cacb50bb931450616b285c1b027fd..707c2b1999e2a7f50e6015c45d8ed8d66c27eab3 100644 (file)
@@ -325,9 +325,6 @@ struct mm_struct {
        unsigned int token_priority;
        unsigned int last_interval;
 
-       /* How many tasks sharing this mm are OOM_DISABLE */
-       atomic_t oom_disable_count;
-
        unsigned long flags; /* Must use atomic bitops to access the bits */
 
        struct core_state *core_state; /* coredumping support */
index af36b06305b2645aca887a6e5bbb2191e6a439ce..7c8624c06efcc3ccbccb9253afe6aba93dcdd9bf 100644 (file)
@@ -682,8 +682,6 @@ static void exit_mm(struct task_struct * tsk)
        enter_lazy_tlb(mm, current);
        /* We don't want this task to be frozen prematurely */
        clear_freeze_flag(tsk);
-       if (tsk->signal->oom_score_adj == OOM_SCORE_ADJ_MIN)
-               atomic_dec(&mm->oom_disable_count);
        task_unlock(tsk);
        mm_update_next_owner(mm);
        mmput(mm);
index 2b21cf9ec61ab2b8eadc7c6837c02eb1f6a52465..d386c48738a96706f5bb8244b7b70b51d93b2510 100644 (file)
@@ -502,7 +502,6 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p)
        mm->cached_hole_size = ~0UL;
        mm_init_aio(mm);
        mm_init_owner(mm, p);
-       atomic_set(&mm->oom_disable_count, 0);
 
        if (likely(!mm_alloc_pgd(mm))) {
                mm->def_flags = 0;
@@ -845,8 +844,6 @@ good_mm:
        /* Initializing for Swap token stuff */
        mm->token_priority = 0;
        mm->last_interval = 0;
-       if (tsk->signal->oom_score_adj == OOM_SCORE_ADJ_MIN)
-               atomic_inc(&mm->oom_disable_count);
 
        tsk->mm = mm;
        tsk->active_mm = mm;
@@ -1419,13 +1416,8 @@ bad_fork_cleanup_io:
 bad_fork_cleanup_namespaces:
        exit_task_namespaces(p);
 bad_fork_cleanup_mm:
-       if (p->mm) {
-               task_lock(p);
-               if (p->signal->oom_score_adj == OOM_SCORE_ADJ_MIN)
-                       atomic_dec(&p->mm->oom_disable_count);
-               task_unlock(p);
+       if (p->mm)
                mmput(p->mm);
-       }
 bad_fork_cleanup_signal:
        if (!(clone_flags & CLONE_THREAD))
                free_signal_struct(p->signal);
index 64bccffb848e8c5a5cdc8c8180de858269e9746b..d25a3da9a8a7323f558d22b6c99725984a15334f 100644 (file)
@@ -53,13 +53,7 @@ int test_set_oom_score_adj(int new_val)
 
        spin_lock_irq(&sighand->siglock);
        old_val = current->signal->oom_score_adj;
-       if (new_val != old_val) {
-               if (new_val == OOM_SCORE_ADJ_MIN)
-                       atomic_inc(&current->mm->oom_disable_count);
-               else if (old_val == OOM_SCORE_ADJ_MIN)
-                       atomic_dec(&current->mm->oom_disable_count);
-               current->signal->oom_score_adj = new_val;
-       }
+       current->signal->oom_score_adj = new_val;
        spin_unlock_irq(&sighand->siglock);
 
        return old_val;
@@ -171,16 +165,6 @@ unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem,
        if (!p)
                return 0;
 
-       /*
-        * Shortcut check for a thread sharing p->mm that is OOM_SCORE_ADJ_MIN
-        * so the entire heuristic doesn't need to be executed for something
-        * that cannot be killed.
-        */
-       if (atomic_read(&p->mm->oom_disable_count)) {
-               task_unlock(p);
-               return 0;
-       }
-
        /*
         * The memory controller may have a limit of 0 bytes, so avoid a divide
         * by zero, if necessary.
@@ -447,6 +431,9 @@ static int oom_kill_task(struct task_struct *p, struct mem_cgroup *mem)
        for_each_process(q)
                if (q->mm == mm && !same_thread_group(q, p) &&
                    !(q->flags & PF_KTHREAD)) {
+                       if (q->signal->oom_score_adj == OOM_SCORE_ADJ_MIN)
+                               continue;
+
                        task_lock(q);   /* Protect ->comm from prctl() */
                        pr_err("Kill process %d (%s) sharing same memory\n",
                                task_pid_nr(q), q->comm);
@@ -723,7 +710,7 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
        read_lock(&tasklist_lock);
        if (sysctl_oom_kill_allocating_task &&
            !oom_unkillable_task(current, NULL, nodemask) &&
-           current->mm && !atomic_read(&current->mm->oom_disable_count)) {
+           current->mm) {
                /*
                 * oom_kill_process() needs tasklist_lock held.  If it returns
                 * non-zero, current could not be killed so we must fallback to