]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
mm: thp: tail page refcounting fix
authorAndrea Arcangeli <aarcange@redhat.com>
Wed, 28 Sep 2011 00:50:22 +0000 (10:50 +1000)
committerStephen Rothwell <sfr@canb.auug.org.au>
Thu, 29 Sep 2011 06:08:10 +0000 (16:08 +1000)
Michel while working on the working set estimation code, noticed that
calling get_page_unless_zero() on a random pfn_to_page(random_pfn) wasn't
safe, if the pfn ended up being a tail page of a transparent hugepage
under splitting by __split_huge_page_refcount().  He then found the
problem could also theoretically materialize with
page_cache_get_speculative() during the speculative radix tree lookups
that uses get_page_unless_zero() in SMP if the radix tree page is freed
and reallocated and get_user_pages is called on it before
page_cache_get_speculative has a chance to call get_page_unless_zero().

So the best way to fix the problem is to keep page_tail->_count zero at
all times.  This will guarantee that get_page_unless_zero() can never
succeed on any tail page.  page_tail->_mapcount is guaranteed zero and is
unused for all tail pages of a compound page, so we can simply account the
tail page references there and transfer them to tail_page->_count in
__split_huge_page_refcount() (in addition to the head_page->_mapcount).

While debugging this s/_count/_mapcount/ change I also noticed get_page is
called by direct-io.c on pages returned by get_user_pages.  That wasn't
entirely safe because the two atomic_inc in get_page weren't atomic.  As
opposed other get_user_page users like secondary-MMU page fault to
establish the shadow pagetables would never call any superflous get_page
after get_user_page returns.  It's safer to make get_page universally safe
for tail pages and to use get_page_foll() within follow_page (inside
get_user_pages()).  get_page_foll() is safe to do the refcounting for tail
pages without taking any locks because it is run within PT lock protected
critical sections (PT lock for pte and page_table_lock for
pmd_trans_huge).  The standard get_page() as invoked by direct-io instead
will now take the compound_lock but still only for tail pages.  The
direct-io paths are usually I/O bound and the compound_lock is per THP so
very finegrined, so there's no risk of scalability issues with it.  A
simple direct-io benchmarks with all lockdep prove locking and spinlock
debugging infrastructure enabled shows identical performance and no
overhead.  So it's worth it.  Ideally direct-io should stop calling
get_page() on pages returned by get_user_pages().  The spinlock in
get_page() is already optimized away for no-THP builds but doing
get_page() on tail pages returned by GUP is generally a rare operation and
usually only run in I/O paths.

This new refcounting on page_tail->_mapcount in addition to avoiding new
RCU critical sections will also allow the working set estimation code to
work without any further complexity associated to the tail page
refcounting with THP.

Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Reported-by: Michel Lespinasse <walken@google.com>
Reviewed-by: Michel Lespinasse <walken@google.com>
Reviewed-by: Minchan Kim <minchan.kim@gmail.com>
Signed-off-by: Andrew Morton <>
arch/powerpc/mm/gup.c
arch/x86/mm/gup.c
include/linux/mm.h
include/linux/mm_types.h
mm/huge_memory.c
mm/internal.h
mm/memory.c
mm/swap.c

index fec13200868f8e6ab40fd834abe7ed4bf6dc89f8..b9e1c7ff5f6d33d8a111c10ab9b3551f6df6f335 100644 (file)
@@ -22,8 +22,9 @@ static inline void get_huge_page_tail(struct page *page)
         * __split_huge_page_refcount() cannot run
         * from under us.
         */
-       VM_BUG_ON(atomic_read(&page->_count) < 0);
-       atomic_inc(&page->_count);
+       VM_BUG_ON(page_mapcount(page) < 0);
+       VM_BUG_ON(atomic_read(&page->_count) != 0);
+       atomic_inc(&page->_mapcount);
 }
 
 /*
index dbe34b9313743f1cae72ac28aee5c99e5d2c9369..3b5032a62b0f3e41c98b98c0842500215e9e3e18 100644 (file)
@@ -114,8 +114,9 @@ static inline void get_huge_page_tail(struct page *page)
         * __split_huge_page_refcount() cannot run
         * from under us.
         */
-       VM_BUG_ON(atomic_read(&page->_count) < 0);
-       atomic_inc(&page->_count);
+       VM_BUG_ON(page_mapcount(page) < 0);
+       VM_BUG_ON(atomic_read(&page->_count) != 0);
+       atomic_inc(&page->_mapcount);
 }
 
 static noinline int gup_huge_pmd(pmd_t pmd, unsigned long addr,
index e2a92a67fa5b187d2fe338c658623307d0a5918f..527c5bb4188a4b2d74c547f1b05887f71acf4d7e 100644 (file)
@@ -357,36 +357,39 @@ static inline struct page *compound_head(struct page *page)
        return page;
 }
 
+/*
+ * The atomic page->_mapcount, starts from -1: so that transitions
+ * both from it and to it can be tracked, using atomic_inc_and_test
+ * and atomic_add_negative(-1).
+ */
+static inline void reset_page_mapcount(struct page *page)
+{
+       atomic_set(&(page)->_mapcount, -1);
+}
+
+static inline int page_mapcount(struct page *page)
+{
+       return atomic_read(&(page)->_mapcount) + 1;
+}
+
 static inline int page_count(struct page *page)
 {
        return atomic_read(&compound_head(page)->_count);
 }
 
+extern int __get_page_tail(struct page *page);
+
 static inline void get_page(struct page *page)
 {
+       if (unlikely(PageTail(page)))
+               if (likely(__get_page_tail(page)))
+                       return;
        /*
         * Getting a normal page or the head of a compound page
-        * requires to already have an elevated page->_count. Only if
-        * we're getting a tail page, the elevated page->_count is
-        * required only in the head page, so for tail pages the
-        * bugcheck only verifies that the page->_count isn't
-        * negative.
+        * requires to already have an elevated page->_count.
         */
-       VM_BUG_ON(atomic_read(&page->_count) < !PageTail(page));
+       VM_BUG_ON(atomic_read(&page->_count) <= 0);
        atomic_inc(&page->_count);
-       /*
-        * Getting a tail page will elevate both the head and tail
-        * page->_count(s).
-        */
-       if (unlikely(PageTail(page))) {
-               /*
-                * This is safe only because
-                * __split_huge_page_refcount can't run under
-                * get_page().
-                */
-               VM_BUG_ON(atomic_read(&page->first_page->_count) <= 0);
-               atomic_inc(&page->first_page->_count);
-       }
 }
 
 static inline struct page *virt_to_head_page(const void *x)
@@ -804,21 +807,6 @@ static inline pgoff_t page_index(struct page *page)
        return page->index;
 }
 
-/*
- * The atomic page->_mapcount, like _count, starts from -1:
- * so that transitions both from it and to it can be tracked,
- * using atomic_inc_and_test and atomic_add_negative(-1).
- */
-static inline void reset_page_mapcount(struct page *page)
-{
-       atomic_set(&(page)->_mapcount, -1);
-}
-
-static inline int page_mapcount(struct page *page)
-{
-       return atomic_read(&(page)->_mapcount) + 1;
-}
-
 /*
  * Return true if this page is mapped into pagetables.
  */
index ac5508e2836791b22703af11771a9eccefedde5a..da501e5febcddd5b4a1b27d7c778a40f5d69692d 100644 (file)
@@ -62,10 +62,23 @@ struct page {
                        struct {
 
                                union {
-                                       atomic_t _mapcount;     /* Count of ptes mapped in mms,
-                                                        * to show when page is mapped
-                                                        * & limit reverse map searches.
-                                                        */
+                                       /*
+                                        * Count of ptes mapped in
+                                        * mms, to show when page is
+                                        * mapped & limit reverse map
+                                        * searches.
+                                        *
+                                        * Used also for tail pages
+                                        * refcounting instead of
+                                        * _count. Tail pages cannot
+                                        * be mapped and keeping the
+                                        * tail page _count zero at
+                                        * all times guarantees
+                                        * get_page_unless_zero() will
+                                        * never succeed on tail
+                                        * pages.
+                                        */
+                                       atomic_t _mapcount;
 
                                        struct {
                                                unsigned inuse:16;
index 6b072bdccf817d1ec223a96ba0209ff2eab3cd51..a175d20097874962420478cfe7935e76addc4888 100644 (file)
@@ -989,7 +989,7 @@ struct page *follow_trans_huge_pmd(struct mm_struct *mm,
        page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
        VM_BUG_ON(!PageCompound(page));
        if (flags & FOLL_GET)
-               get_page(page);
+               get_page_foll(page);
 
 out:
        return page;
@@ -1201,6 +1201,7 @@ static void __split_huge_page_refcount(struct page *page)
        unsigned long head_index = page->index;
        struct zone *zone = page_zone(page);
        int zonestat;
+       int tail_count = 0;
 
        /* prevent PageLRU to go away from under us, and freeze lru stats */
        spin_lock_irq(&zone->lru_lock);
@@ -1209,11 +1210,27 @@ static void __split_huge_page_refcount(struct page *page)
        for (i = 1; i < HPAGE_PMD_NR; i++) {
                struct page *page_tail = page + i;
 
-               /* tail_page->_count cannot change */
-               atomic_sub(atomic_read(&page_tail->_count), &page->_count);
-               BUG_ON(page_count(page) <= 0);
-               atomic_add(page_mapcount(page) + 1, &page_tail->_count);
-               BUG_ON(atomic_read(&page_tail->_count) <= 0);
+               /* tail_page->_mapcount cannot change */
+               BUG_ON(page_mapcount(page_tail) < 0);
+               tail_count += page_mapcount(page_tail);
+               /* check for overflow */
+               BUG_ON(tail_count < 0);
+               BUG_ON(atomic_read(&page_tail->_count) != 0);
+               /*
+                * tail_page->_count is zero and not changing from
+                * under us. But get_page_unless_zero() may be running
+                * from under us on the tail_page. If we used
+                * atomic_set() below instead of atomic_add(), we
+                * would then run atomic_set() concurrently with
+                * get_page_unless_zero(), and atomic_set() is
+                * implemented in C not using locked ops. spin_unlock
+                * on x86 sometime uses locked ops because of PPro
+                * errata 66, 92, so unless somebody can guarantee
+                * atomic_set() here would be safe on all archs (and
+                * not only on x86), it's safer to use atomic_add().
+                */
+               atomic_add(page_mapcount(page) + page_mapcount(page_tail) + 1,
+                          &page_tail->_count);
 
                /* after clearing PageTail the gup refcount can be released */
                smp_mb();
@@ -1231,10 +1248,7 @@ static void __split_huge_page_refcount(struct page *page)
                                      (1L << PG_uptodate)));
                page_tail->flags |= (1L << PG_dirty);
 
-               /*
-                * 1) clear PageTail before overwriting first_page
-                * 2) clear PageTail before clearing PageHead for VM_BUG_ON
-                */
+               /* clear PageTail before overwriting first_page */
                smp_wmb();
 
                /*
@@ -1251,7 +1265,6 @@ static void __split_huge_page_refcount(struct page *page)
                 * status is achieved setting a reserved bit in the
                 * pmd, not by clearing the present bit.
                */
-               BUG_ON(page_mapcount(page_tail));
                page_tail->_mapcount = page->_mapcount;
 
                BUG_ON(page_tail->mapping);
@@ -1268,6 +1281,8 @@ static void __split_huge_page_refcount(struct page *page)
 
                lru_add_page_tail(zone, page, page_tail);
        }
+       atomic_sub(tail_count, &page->_count);
+       BUG_ON(atomic_read(&page->_count) <= 0);
 
        __dec_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
        __mod_zone_page_state(zone, NR_ANON_PAGES, HPAGE_PMD_NR);
index d071d380fb498ab36ac6700343bf86485463bdbe..cd09cca7042379f1f383ada467fa6fd4261060f4 100644 (file)
@@ -37,6 +37,47 @@ static inline void __put_page(struct page *page)
        atomic_dec(&page->_count);
 }
 
+static inline void __get_page_tail_foll(struct page *page,
+                                       bool get_page_head)
+{
+       /*
+        * If we're getting a tail page, the elevated page->_count is
+        * required only in the head page and we will elevate the head
+        * page->_count and tail page->_mapcount.
+        *
+        * We elevate page_tail->_mapcount for tail pages to force
+        * page_tail->_count to be zero at all times to avoid getting
+        * false positives from get_page_unless_zero() with
+        * speculative page access (like in
+        * page_cache_get_speculative()) on tail pages.
+        */
+       VM_BUG_ON(atomic_read(&page->first_page->_count) <= 0);
+       VM_BUG_ON(atomic_read(&page->_count) != 0);
+       VM_BUG_ON(page_mapcount(page) < 0);
+       if (get_page_head)
+               atomic_inc(&page->first_page->_count);
+       atomic_inc(&page->_mapcount);
+}
+
+static inline void get_page_foll(struct page *page)
+{
+       if (unlikely(PageTail(page)))
+               /*
+                * This is safe only because
+                * __split_huge_page_refcount() can't run under
+                * get_page_foll() because we hold the proper PT lock.
+                */
+               __get_page_tail_foll(page, true);
+       else {
+               /*
+                * Getting a normal page or the head of a compound page
+                * requires to already have an elevated page->_count.
+                */
+               VM_BUG_ON(atomic_read(&page->_count) <= 0);
+               atomic_inc(&page->_count);
+       }
+}
+
 extern unsigned long highest_memmap_pfn;
 
 /*
index 6f422180c0885db9ce4c7b225f8f8a1e7e0eeca9..0d75c103f56cedd2d193d0219486a50111c078c2 100644 (file)
@@ -1503,7 +1503,7 @@ split_fallthrough:
        }
 
        if (flags & FOLL_GET)
-               get_page(page);
+               get_page_foll(page);
        if (flags & FOLL_TOUCH) {
                if ((flags & FOLL_WRITE) &&
                    !pte_dirty(pte) && !PageDirty(page))
index b9138c70e37b0a3fe49a32201ef9dabcf40b7f22..93c0b582ee06359e9ba6db034146910bbf8e2d4b 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -78,39 +78,21 @@ static void put_compound_page(struct page *page)
 {
        if (unlikely(PageTail(page))) {
                /* __split_huge_page_refcount can run under us */
-               struct page *page_head = page->first_page;
-               smp_rmb();
-               /*
-                * If PageTail is still set after smp_rmb() we can be sure
-                * that the page->first_page we read wasn't a dangling pointer.
-                * See __split_huge_page_refcount() smp_wmb().
-                */
-               if (likely(PageTail(page) && get_page_unless_zero(page_head))) {
+               struct page *page_head = compound_trans_head(page);
+               if (likely(page != page_head &&
+                          get_page_unless_zero(page_head))) {
                        unsigned long flags;
                        /*
-                        * Verify that our page_head wasn't converted
-                        * to a a regular page before we got a
-                        * reference on it.
+                        * page_head wasn't a dangling pointer but it
+                        * may not be a head page anymore by the time
+                        * we obtain the lock. That is ok as long as it
+                        * can't be freed from under us.
                         */
-                       if (unlikely(!PageHead(page_head))) {
-                               /* PageHead is cleared after PageTail */
-                               smp_rmb();
-                               VM_BUG_ON(PageTail(page));
-                               goto out_put_head;
-                       }
-                       /*
-                        * Only run compound_lock on a valid PageHead,
-                        * after having it pinned with
-                        * get_page_unless_zero() above.
-                        */
-                       smp_mb();
-                       /* page_head wasn't a dangling pointer */
                        flags = compound_lock_irqsave(page_head);
                        if (unlikely(!PageTail(page))) {
                                /* __split_huge_page_refcount run before us */
                                compound_unlock_irqrestore(page_head, flags);
                                VM_BUG_ON(PageHead(page_head));
-                       out_put_head:
                                if (put_page_testzero(page_head))
                                        __put_single_page(page_head);
                        out_put_single:
@@ -121,16 +103,17 @@ static void put_compound_page(struct page *page)
                        VM_BUG_ON(page_head != page->first_page);
                        /*
                         * We can release the refcount taken by
-                        * get_page_unless_zero now that
-                        * split_huge_page_refcount is blocked on the
-                        * compound_lock.
+                        * get_page_unless_zero() now that
+                        * __split_huge_page_refcount() is blocked on
+                        * the compound_lock.
                         */
                        if (put_page_testzero(page_head))
                                VM_BUG_ON(1);
                        /* __split_huge_page_refcount will wait now */
-                       VM_BUG_ON(atomic_read(&page->_count) <= 0);
-                       atomic_dec(&page->_count);
+                       VM_BUG_ON(page_mapcount(page) <= 0);
+                       atomic_dec(&page->_mapcount);
                        VM_BUG_ON(atomic_read(&page_head->_count) <= 0);
+                       VM_BUG_ON(atomic_read(&page->_count) != 0);
                        compound_unlock_irqrestore(page_head, flags);
                        if (put_page_testzero(page_head)) {
                                if (PageHead(page_head))
@@ -160,6 +143,40 @@ void put_page(struct page *page)
 }
 EXPORT_SYMBOL(put_page);
 
+int __get_page_tail(struct page *page)
+{
+       /*
+        * This takes care of get_page() if run on a tail page
+        * returned by one of the get_user_pages/follow_page variants.
+        * get_user_pages/follow_page itself doesn't need the compound
+        * lock because it runs __get_page_tail_foll() under the
+        * proper PT lock that already serializes against
+        * split_huge_page().
+        */
+       unsigned long flags;
+       int got = 0;
+       struct page *page_head = compound_trans_head(page);
+       if (likely(page != page_head && get_page_unless_zero(page_head))) {
+               /*
+                * page_head wasn't a dangling pointer but it
+                * may not be a head page anymore by the time
+                * we obtain the lock. That is ok as long as it
+                * can't be freed from under us.
+                */
+               flags = compound_lock_irqsave(page_head);
+               /* here __split_huge_page_refcount won't run anymore */
+               if (likely(PageTail(page))) {
+                       __get_page_tail_foll(page, false);
+                       got = 1;
+               }
+               compound_unlock_irqrestore(page_head, flags);
+               if (unlikely(!got))
+                       put_page(page_head);
+       }
+       return got;
+}
+EXPORT_SYMBOL(__get_page_tail);
+
 /**
  * put_pages_list() - release a list of pages
  * @pages: list of pages threaded on page->lru