]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
thp, memcg: split hugepage for memcg oom on cow
authorDavid Rientjes <rientjes@google.com>
Thu, 3 May 2012 05:43:47 +0000 (15:43 +1000)
committerStephen Rothwell <sfr@canb.auug.org.au>
Thu, 3 May 2012 05:46:28 +0000 (15:46 +1000)
On COW, a new hugepage is allocated and charged to the memcg.  If the
system is oom or the charge to the memcg fails, however, the fault handler
will return VM_FAULT_OOM which results in an oom kill.

Instead, it's possible to fallback to splitting the hugepage so that the
COW results only in an order-0 page being allocated and charged to the
memcg which has a higher liklihood to succeed.  This is expensive because
the hugepage must be split in the page fault handler, but it is much
better than unnecessarily oom killing a process.

Signed-off-by: David Rientjes <rientjes@google.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Johannes Weiner <jweiner@redhat.com>
Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Michal Hocko <mhocko@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/huge_memory.c
mm/memory.c

index d7d7165156ca27b3bfe6bc3f149dc9aaad71ea06..edfeb8cb23df27ad77716408962687ceed4ba342 100644 (file)
@@ -952,6 +952,8 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
                count_vm_event(THP_FAULT_FALLBACK);
                ret = do_huge_pmd_wp_page_fallback(mm, vma, address,
                                                   pmd, orig_pmd, page, haddr);
+               if (ret & VM_FAULT_OOM)
+                       split_huge_page(page);
                put_page(page);
                goto out;
        }
@@ -959,6 +961,7 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
 
        if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
                put_page(new_page);
+               split_huge_page(page);
                put_page(page);
                ret |= VM_FAULT_OOM;
                goto out;
index bf3ccf13bd3661f6b38ce1cdd28540f2de262e30..aede6b091c3df964e3f9c4c4095362ecf17f48e3 100644 (file)
@@ -3492,6 +3492,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
        if (unlikely(is_vm_hugetlb_page(vma)))
                return hugetlb_fault(mm, vma, address, flags);
 
+retry:
        pgd = pgd_offset(mm, address);
        pud = pud_alloc(mm, pgd, address);
        if (!pud)
@@ -3505,13 +3506,24 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                                                          pmd, flags);
        } else {
                pmd_t orig_pmd = *pmd;
+               int ret;
+
                barrier();
                if (pmd_trans_huge(orig_pmd)) {
                        if (flags & FAULT_FLAG_WRITE &&
                            !pmd_write(orig_pmd) &&
-                           !pmd_trans_splitting(orig_pmd))
-                               return do_huge_pmd_wp_page(mm, vma, address,
-                                                          pmd, orig_pmd);
+                           !pmd_trans_splitting(orig_pmd)) {
+                               ret = do_huge_pmd_wp_page(mm, vma, address, pmd,
+                                                         orig_pmd);
+                               /*
+                                * If COW results in an oom, the huge pmd will
+                                * have been split, so retry the fault on the
+                                * pte for a smaller charge.
+                                */
+                               if (unlikely(ret & VM_FAULT_OOM))
+                                       goto retry;
+                               return ret;
+                       }
                        return 0;
                }
        }