]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
thp: avoid VM_BUG_ON page_count(page) false positives in __collapse_huge_page_copy
authorAndrea Arcangeli <aarcange@redhat.com>
Wed, 26 Sep 2012 01:32:55 +0000 (11:32 +1000)
committerStephen Rothwell <sfr@canb.auug.org.au>
Wed, 26 Sep 2012 05:41:22 +0000 (15:41 +1000)
Some time ago Petr once reproduced a false positive VM_BUG_ON in
khugepaged while running the autonuma-benchmark on a large 8 node system.
All production kernels out there have DEBUG_VM=n so it was only noticeable
on self built kernels.  It's not easily reproducible even on the 8 nodes
system.

Use page_freeze_refs to prevent speculative pagecache lookups to
trigger the false positives, so we're still able to check the
page_count to be exact.

This patch removes the false positive and it has been tested for a
while and it's good idea to queue it for upstream too. It's not urgent
and probably not worth it for -stable, though it wouldn't hurt. On
smaller systems it's not reproducible AFIK.

Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Johannes Weiner <jweiner@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Petr Holasek <pholasek@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/huge_memory.c

index 57c4b93090151f2acbc1271b7b214fe5bc96478c..15228c7c101bda934d24d40f83f5dc0c3a85f8e9 100644 (file)
@@ -1701,6 +1701,9 @@ void __khugepaged_exit(struct mm_struct *mm)
 
 static void release_pte_page(struct page *page)
 {
+#ifdef CONFIG_DEBUG_VM
+       page_unfreeze_refs(page, 2);
+#endif
        /* 0 stands for page_is_file_cache(page) == false */
        dec_zone_page_state(page, NR_ISOLATED_ANON + 0);
        unlock_page(page);
@@ -1781,6 +1784,20 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
                VM_BUG_ON(!PageLocked(page));
                VM_BUG_ON(PageLRU(page));
 
+#ifdef CONFIG_DEBUG_VM
+               /*
+                * For the VM_BUG_ON check on page_count(page) in
+                * __collapse_huge_page_copy not to trigger false
+                * positives we've to prevent the speculative
+                * pagecache lookups too with page_freeze_refs. We
+                * could check for >= 2 instead but this provides for
+                * a more strict debugging behavior.
+                */
+               if (!page_freeze_refs(page, 2)) {
+                       release_pte_pages(pte, _pte+1);
+                       goto out;
+               }
+#endif
                /* If there is no mapped pte young don't collapse the page */
                if (pte_young(pteval) || PageReferenced(page) ||
                    mmu_notifier_test_young(vma->vm_mm, address))
@@ -1811,7 +1828,7 @@ static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
                        src_page = pte_page(pteval);
                        copy_user_highpage(page, src_page, address, vma);
                        VM_BUG_ON(page_mapcount(src_page) != 1);
-                       VM_BUG_ON(page_count(src_page) != 2);
+                       VM_BUG_ON(page_count(src_page) != 0);
                        release_pte_page(src_page);
                        /*
                         * ptl mostly unnecessary, but preempt has to