tsk->mm = mm;
tsk->active_mm = mm;
activate_mm(active_mm, mm);
- if (old_mm && tsk->signal->oom_score_adj == OOM_SCORE_ADJ_MIN) {
- atomic_dec(&old_mm->oom_disable_count);
- atomic_inc(&tsk->mm->oom_disable_count);
- }
task_unlock(tsk);
arch_pick_mmap_layout(mm);
if (old_mm) {
goto err_sighand;
}
- if (oom_adjust != task->signal->oom_adj) {
- if (oom_adjust == OOM_DISABLE)
- atomic_inc(&task->mm->oom_disable_count);
- if (task->signal->oom_adj == OOM_DISABLE)
- atomic_dec(&task->mm->oom_disable_count);
- }
-
/*
* Warn that /proc/pid/oom_adj is deprecated, see
* Documentation/feature-removal-schedule.txt.
goto err_sighand;
}
- if (oom_score_adj != task->signal->oom_score_adj) {
- if (oom_score_adj == OOM_SCORE_ADJ_MIN)
- atomic_inc(&task->mm->oom_disable_count);
- if (task->signal->oom_score_adj == OOM_SCORE_ADJ_MIN)
- atomic_dec(&task->mm->oom_disable_count);
- }
task->signal->oom_score_adj = oom_score_adj;
if (has_capability_noaudit(current, CAP_SYS_RESOURCE))
task->signal->oom_score_adj_min = oom_score_adj;
unsigned int token_priority;
unsigned int last_interval;
- /* How many tasks sharing this mm are OOM_DISABLE */
- atomic_t oom_disable_count;
-
unsigned long flags; /* Must use atomic bitops to access the bits */
struct core_state *core_state; /* coredumping support */
enter_lazy_tlb(mm, current);
/* We don't want this task to be frozen prematurely */
clear_freeze_flag(tsk);
- if (tsk->signal->oom_score_adj == OOM_SCORE_ADJ_MIN)
- atomic_dec(&mm->oom_disable_count);
task_unlock(tsk);
mm_update_next_owner(mm);
mmput(mm);
mm->cached_hole_size = ~0UL;
mm_init_aio(mm);
mm_init_owner(mm, p);
- atomic_set(&mm->oom_disable_count, 0);
if (likely(!mm_alloc_pgd(mm))) {
mm->def_flags = 0;
/* Initializing for Swap token stuff */
mm->token_priority = 0;
mm->last_interval = 0;
- if (tsk->signal->oom_score_adj == OOM_SCORE_ADJ_MIN)
- atomic_inc(&mm->oom_disable_count);
tsk->mm = mm;
tsk->active_mm = mm;
bad_fork_cleanup_namespaces:
exit_task_namespaces(p);
bad_fork_cleanup_mm:
- if (p->mm) {
- task_lock(p);
- if (p->signal->oom_score_adj == OOM_SCORE_ADJ_MIN)
- atomic_dec(&p->mm->oom_disable_count);
- task_unlock(p);
+ if (p->mm)
mmput(p->mm);
- }
bad_fork_cleanup_signal:
if (!(clone_flags & CLONE_THREAD))
free_signal_struct(p->signal);
spin_lock_irq(&sighand->siglock);
old_val = current->signal->oom_score_adj;
- if (new_val != old_val) {
- if (new_val == OOM_SCORE_ADJ_MIN)
- atomic_inc(¤t->mm->oom_disable_count);
- else if (old_val == OOM_SCORE_ADJ_MIN)
- atomic_dec(¤t->mm->oom_disable_count);
- current->signal->oom_score_adj = new_val;
- }
+ current->signal->oom_score_adj = new_val;
spin_unlock_irq(&sighand->siglock);
return old_val;
if (!p)
return 0;
- /*
- * Shortcut check for a thread sharing p->mm that is OOM_SCORE_ADJ_MIN
- * so the entire heuristic doesn't need to be executed for something
- * that cannot be killed.
- */
- if (atomic_read(&p->mm->oom_disable_count)) {
- task_unlock(p);
- return 0;
- }
-
/*
* The memory controller may have a limit of 0 bytes, so avoid a divide
* by zero, if necessary.
for_each_process(q)
if (q->mm == mm && !same_thread_group(q, p) &&
!(q->flags & PF_KTHREAD)) {
+ if (q->signal->oom_score_adj == OOM_SCORE_ADJ_MIN)
+ continue;
+
task_lock(q); /* Protect ->comm from prctl() */
pr_err("Kill process %d (%s) sharing same memory\n",
task_pid_nr(q), q->comm);
read_lock(&tasklist_lock);
if (sysctl_oom_kill_allocating_task &&
!oom_unkillable_task(current, NULL, nodemask) &&
- current->mm && !atomic_read(¤t->mm->oom_disable_count)) {
+ current->mm) {
/*
* oom_kill_process() needs tasklist_lock held. If it returns
* non-zero, current could not be killed so we must fallback to