]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
x86,mm: make pagefault killable
authorKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Wed, 25 May 2011 00:11:30 +0000 (17:11 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 25 May 2011 15:39:08 +0000 (08:39 -0700)
When an oom killing occurs, almost all processes are getting stuck at the
following two points.

1) __alloc_pages_nodemask
2) __lock_page_or_retry

1) is not very problematic because TIF_MEMDIE leads to an allocation
failure and getting out from page allocator.

2) is more problematic.  In an OOM situation, zones typically don't have
page cache at all and memory starvation might lead to greatly reduced IO
performance.  When a fork bomb occurs, TIF_MEMDIE tasks don't die quickly,
meaning that a fork bomb may create new process quickly rather than the
oom-killer killing it.  Then, the system may become livelocked.

This patch makes the pagefault interruptible by SIGKILL.

Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Cc: Matthew Wilcox <willy@linux.intel.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
arch/x86/mm/fault.c
include/linux/mm.h
mm/filemap.c

index bcb394dfbb3587f8b4d40cc096b6ff7334ce6b12..f7a2a054a3c08f2a7ea9c013fe16cc3421a4b135 100644 (file)
@@ -965,7 +965,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
        struct mm_struct *mm;
        int fault;
        int write = error_code & PF_WRITE;
-       unsigned int flags = FAULT_FLAG_ALLOW_RETRY |
+       unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
                                        (write ? FAULT_FLAG_WRITE : 0);
 
        tsk = current;
@@ -1138,6 +1138,16 @@ good_area:
                return;
        }
 
+       /*
+        * Pagefault was interrupted by SIGKILL. We have no reason to
+        * continue pagefault.
+        */
+       if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
+               if (!(error_code & PF_USER))
+                       no_context(regs, error_code, address);
+               return;
+       }
+
        /*
         * Major/minor page fault accounting is only done on the
         * initial attempt. If we go through a retry, it is extremely
index 1746f67c33de925b572bc99db4503d99950eaf1e..57d3d5fade168f3a0f6f6940b15e993aafa56e2a 100644 (file)
@@ -153,6 +153,7 @@ extern pgprot_t protection_map[16];
 #define FAULT_FLAG_MKWRITE     0x04    /* Fault was mkwrite of existing pte */
 #define FAULT_FLAG_ALLOW_RETRY 0x08    /* Retry fault if blocking */
 #define FAULT_FLAG_RETRY_NOWAIT        0x10    /* Don't drop mmap_sem and wait when retrying */
+#define FAULT_FLAG_KILLABLE    0x20    /* The fault task is in SIGKILL killable region */
 
 /*
  * This interface is used by x86 PAT code to identify a pfn mapping that is
index dea8a38bb2bb7d6ed442121367e90d103d698105..8144f87dcbb4a5b410d521e51d7e86ed1c7d86c1 100644 (file)
@@ -654,15 +654,32 @@ EXPORT_SYMBOL_GPL(__lock_page_killable);
 int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
                         unsigned int flags)
 {
-       if (!(flags & FAULT_FLAG_ALLOW_RETRY)) {
-               __lock_page(page);
-               return 1;
-       } else {
-               if (!(flags & FAULT_FLAG_RETRY_NOWAIT)) {
-                       up_read(&mm->mmap_sem);
+       if (flags & FAULT_FLAG_ALLOW_RETRY) {
+               /*
+                * CAUTION! In this case, mmap_sem is not released
+                * even though return 0.
+                */
+               if (flags & FAULT_FLAG_RETRY_NOWAIT)
+                       return 0;
+
+               up_read(&mm->mmap_sem);
+               if (flags & FAULT_FLAG_KILLABLE)
+                       wait_on_page_locked_killable(page);
+               else
                        wait_on_page_locked(page);
-               }
                return 0;
+       } else {
+               if (flags & FAULT_FLAG_KILLABLE) {
+                       int ret;
+
+                       ret = __lock_page_killable(page);
+                       if (ret) {
+                               up_read(&mm->mmap_sem);
+                               return 0;
+                       }
+               } else
+                       __lock_page(page);
+               return 1;
        }
 }