]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
mm: sanitize page->mapping for tail pages
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Tue, 7 Apr 2015 23:44:26 +0000 (09:44 +1000)
committerStephen Rothwell <sfr@canb.auug.org.au>
Tue, 7 Apr 2015 23:44:26 +0000 (09:44 +1000)
We don't define meaning of page->mapping for tail pages.  Currently it's
always NULL, which can be inconsistent with head page and potentially lead
to problems.

Let's poison the pointer to catch all illigal uses.

page_rmapping(), page_mapping() and page_anon_vma() are changed to look on
head page.

The only illegal use I've caught so far is __GPF_COMP pages from sound
subsystem, mapped with PTEs.  do_shared_fault() is changed to use
page_rmapping() instead of direct access to fault_page->mapping.

Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Rik van Riel <riel@redhat.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Christoph Lameter <cl@linux.com>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Steve Capper <steve.capper@linaro.org>
Cc: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Jerome Marchand <jmarchan@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/mm.h
include/linux/poison.h
mm/huge_memory.c
mm/memory.c
mm/page_alloc.c
mm/util.c

index 9287fffd9f0d9395d70dfac945565e87797c23e4..16fe322b66eaf8a6f933d45f04bb39586deb89d2 100644 (file)
@@ -915,6 +915,7 @@ extern struct address_space *page_mapping(struct page *page);
 /* Neutral page->mapping pointer to address_space or anon_vma or other */
 static inline void *page_rmapping(struct page *page)
 {
+       page = compound_head(page);
        return (void *)((unsigned long)page->mapping & ~PAGE_MAPPING_FLAGS);
 }
 
index 2110a81c5e2afaab47ec5cb107cf17503d731317..7b2a7fcde6a320dd805be9f7bdd1bdd1c6d09a3b 100644 (file)
 /********** mm/debug-pagealloc.c **********/
 #define PAGE_POISON 0xaa
 
+/********** mm/page_alloc.c ************/
+
+#define TAIL_MAPPING   ((void *) 0x01014A11 + POISON_POINTER_DELTA)
+
 /********** mm/slab.c **********/
 /*
  * Magic nums for obj red zoning.
index 3afb5cbe13128b51428d812e9b2d251fa3e8e4e1..7ce6b6c2d1f4a815f74307d6cf3361c8619fd847 100644 (file)
@@ -1703,7 +1703,7 @@ static void __split_huge_page_refcount(struct page *page,
                */
                page_tail->_mapcount = page->_mapcount;
 
-               BUG_ON(page_tail->mapping);
+               BUG_ON(page_tail->mapping != TAIL_MAPPING);
                page_tail->mapping = page->mapping;
 
                page_tail->index = page->index + i;
index ac20b2a6a0c35124da70aa2cbbaf5f5ba571f235..fc91fcb407ccd59341db4feb13a0c8cc8aa060b5 100644 (file)
@@ -3033,7 +3033,7 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma,
         * pinned by vma->vm_file's reference.  We rely on unlock_page()'s
         * release semantics to prevent the compiler from undoing this copying.
         */
-       mapping = fault_page->mapping;
+       mapping = page_rmapping(fault_page);
        unlock_page(fault_page);
        if ((dirtied || vma->vm_ops->page_mkwrite) && mapping) {
                /*
index 1b849500640c6c0fd109473f518d7a61306b3b11..e73ecbbfa69f33901a6ca5311811c3b69fed274d 100644 (file)
@@ -373,6 +373,7 @@ void prep_compound_page(struct page *page, unsigned long order)
        for (i = 1; i < nr_pages; i++) {
                struct page *p = page + i;
                set_page_count(p, 0);
+               p->mapping = TAIL_MAPPING;
                p->first_page = page;
                /* Make sure p->first_page is always valid for PageTail() */
                smp_wmb();
@@ -765,6 +766,12 @@ static void free_one_page(struct zone *zone,
 
 static int free_tail_pages_check(struct page *head_page, struct page *page)
 {
+       if (page->mapping != TAIL_MAPPING) {
+               bad_page(page, "corrupted mapping in tail page", 0);
+               page->mapping = NULL;
+               return 1;
+       }
+       page->mapping = NULL;
        if (!IS_ENABLED(CONFIG_DEBUG_VM))
                return 0;
        if (unlikely(!PageTail(page))) {
index 3981ae9d1b15a2fcc4e4d46e561d3ee7517c6b78..e78968bd11e734184c2ac78ed32208124eefe725 100644 (file)
--- a/mm/util.c
+++ b/mm/util.c
@@ -327,7 +327,10 @@ EXPORT_SYMBOL(kvfree);
 
 struct address_space *page_mapping(struct page *page)
 {
-       struct address_space *mapping = page->mapping;
+       struct address_space *mapping;
+
+       page = compound_head(page);
+       mapping = page->mapping;
 
        /* This happens if someone calls flush_dcache_page on slab page */
        if (unlikely(PageSlab(page)))