]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
page-flags: define PG_locked behavior on compound pages
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Wed, 21 Oct 2015 22:03:32 +0000 (09:03 +1100)
committerStephen Rothwell <sfr@canb.auug.org.au>
Wed, 21 Oct 2015 22:03:32 +0000 (09:03 +1100)
lock_page() must operate on the whole compound page.  It doesn't make much
sense to lock part of compound page.  Change code to use head page's
PG_locked, if tail page is passed.

This patch also gets rid of custom helper functions -- __set_page_locked()
and __clear_page_locked().  They are replaced with helpers generated by
__SETPAGEFLAG/__CLEARPAGEFLAG.  Tail pages to these helper would trigger
VM_BUG_ON().

SLUB uses PG_locked as a bit spin locked.  IIUC, tail pages should never
appear there.  VM_BUG_ON() is added to make sure that this assumption is
correct.

Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Rik van Riel <riel@redhat.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Christoph Lameter <cl@linux.com>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Steve Capper <steve.capper@linaro.org>
Cc: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Jerome Marchand <jmarchan@redhat.com>
Cc: Jérôme Glisse <jglisse@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/page-flags.h
include/linux/pagemap.h
mm/filemap.c
mm/ksm.c
mm/memory-failure.c
mm/migrate.c
mm/shmem.c
mm/slub.c
mm/swap_state.c
mm/vmscan.c

index 12ab023b67f284054056584938bc0c223cfffbaf..32c87eb470cb56a6a0fd9a8e562ce27d17111fe1 100644 (file)
@@ -256,7 +256,7 @@ static inline int __TestClearPage##uname(struct page *page) { return 0; }
 #define TESTSCFLAG_FALSE(uname)                                                \
        TESTSETFLAG_FALSE(uname) TESTCLEARFLAG_FALSE(uname)
 
-TESTPAGEFLAG(Locked, locked, PF_ANY)
+__PAGEFLAG(Locked, locked, PF_NO_TAIL)
 PAGEFLAG(Error, error, PF_ANY) TESTCLEARFLAG(Error, error, PF_ANY)
 PAGEFLAG(Referenced, referenced, PF_ANY) TESTCLEARFLAG(Referenced, referenced, PF_ANY)
        __SETPAGEFLAG(Referenced, referenced, PF_ANY)
index 26eabf5ec718a457eb9cef5635f5c475cca0c48d..df214a4b886d2e816bee2cf007a8485f02b348f9 100644 (file)
@@ -433,18 +433,9 @@ extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
                                unsigned int flags);
 extern void unlock_page(struct page *page);
 
-static inline void __set_page_locked(struct page *page)
-{
-       __set_bit(PG_locked, &page->flags);
-}
-
-static inline void __clear_page_locked(struct page *page)
-{
-       __clear_bit(PG_locked, &page->flags);
-}
-
 static inline int trylock_page(struct page *page)
 {
+       page = compound_head(page);
        return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
 }
 
@@ -497,9 +488,9 @@ extern int wait_on_page_bit_killable_timeout(struct page *page,
 
 static inline int wait_on_page_locked_killable(struct page *page)
 {
-       if (PageLocked(page))
-               return wait_on_page_bit_killable(page, PG_locked);
-       return 0;
+       if (!PageLocked(page))
+               return 0;
+       return wait_on_page_bit_killable(compound_head(page), PG_locked);
 }
 
 extern wait_queue_head_t *page_waitqueue(struct page *page);
@@ -518,7 +509,7 @@ static inline void wake_up_page(struct page *page, int bit)
 static inline void wait_on_page_locked(struct page *page)
 {
        if (PageLocked(page))
-               wait_on_page_bit(page, PG_locked);
+               wait_on_page_bit(compound_head(page), PG_locked);
 }
 
 /* 
@@ -664,17 +655,17 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
 
 /*
  * Like add_to_page_cache_locked, but used to add newly allocated pages:
- * the page is new, so we can just run __set_page_locked() against it.
+ * the page is new, so we can just run __SetPageLocked() against it.
  */
 static inline int add_to_page_cache(struct page *page,
                struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
 {
        int error;
 
-       __set_page_locked(page);
+       __SetPageLocked(page);
        error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
        if (unlikely(error))
-               __clear_page_locked(page);
+               __ClearPageLocked(page);
        return error;
 }
 
index 1bb007624b53e1cc086ae26d91238d6c3683d6b2..2b5dcfbfc40e413d061a113e3a05e2810e8566f9 100644 (file)
@@ -682,11 +682,11 @@ int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
        void *shadow = NULL;
        int ret;
 
-       __set_page_locked(page);
+       __SetPageLocked(page);
        ret = __add_to_page_cache_locked(page, mapping, offset,
                                         gfp_mask, &shadow);
        if (unlikely(ret))
-               __clear_page_locked(page);
+               __ClearPageLocked(page);
        else {
                /*
                 * The page might have been evicted from cache only
@@ -809,6 +809,7 @@ EXPORT_SYMBOL_GPL(add_page_wait_queue);
  */
 void unlock_page(struct page *page)
 {
+       page = compound_head(page);
        VM_BUG_ON_PAGE(!PageLocked(page), page);
        clear_bit_unlock(PG_locked, &page->flags);
        smp_mb__after_atomic();
@@ -873,18 +874,20 @@ EXPORT_SYMBOL_GPL(page_endio);
  */
 void __lock_page(struct page *page)
 {
-       DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
+       struct page *page_head = compound_head(page);
+       DEFINE_WAIT_BIT(wait, &page_head->flags, PG_locked);
 
-       __wait_on_bit_lock(page_waitqueue(page), &wait, bit_wait_io,
+       __wait_on_bit_lock(page_waitqueue(page_head), &wait, bit_wait_io,
                                                        TASK_UNINTERRUPTIBLE);
 }
 EXPORT_SYMBOL(__lock_page);
 
 int __lock_page_killable(struct page *page)
 {
-       DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
+       struct page *page_head = compound_head(page);
+       DEFINE_WAIT_BIT(wait, &page_head->flags, PG_locked);
 
-       return __wait_on_bit_lock(page_waitqueue(page), &wait,
+       return __wait_on_bit_lock(page_waitqueue(page_head), &wait,
                                        bit_wait_io, TASK_KILLABLE);
 }
 EXPORT_SYMBOL_GPL(__lock_page_killable);
index dcefc371fc628bdd6f9f50528e254a4d785b3854..14329661f4535a7d2cb8bba84147964b18026022 100644 (file)
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1953,7 +1953,7 @@ struct page *ksm_might_need_to_copy(struct page *page,
 
                SetPageDirty(new_page);
                __SetPageUptodate(new_page);
-               __set_page_locked(new_page);
+               __SetPageLocked(new_page);
        }
 
        return new_page;
index 8424b64711ac35955772078804b2e19f7cd99620..5b965e27aaaed4b97d101a823176d3a6762d2b9a 100644 (file)
@@ -1166,7 +1166,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
        /*
         * We ignore non-LRU pages for good reasons.
         * - PG_locked is only well defined for LRU pages and a few others
-        * - to avoid races with __set_page_locked()
+        * - to avoid races with __SetPageLocked()
         * - to avoid races with __SetPageSlab*() (and more non-atomic ops)
         * The check (unnecessarily) ignores LRU pages being isolated and
         * walked by the page reclaim code, however that's not a big loss.
index 7890d0bb5e23c3db75cc682878a2c5ec89b0e513..f7f345ddc9aedbeca142b4ebbe5f5e654c1eda19 100644 (file)
@@ -1767,7 +1767,7 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
                flush_tlb_range(vma, mmun_start, mmun_end);
 
        /* Prepare a page as a migration target */
-       __set_page_locked(new_page);
+       __SetPageLocked(new_page);
        SetPageSwapBacked(new_page);
 
        /* anon mapping, we can simply copy page->mapping to the new page: */
index 1f80ea63c09f750f57840ff1cb8066da2c78f1ee..d098fda700781b4b4deabd2295ab9a42ea62d6a5 100644 (file)
@@ -998,7 +998,7 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
        copy_highpage(newpage, oldpage);
        flush_dcache_page(newpage);
 
-       __set_page_locked(newpage);
+       __SetPageLocked(newpage);
        SetPageUptodate(newpage);
        SetPageSwapBacked(newpage);
        set_page_private(newpage, swap_index);
@@ -1190,7 +1190,7 @@ repeat:
                }
 
                __SetPageSwapBacked(page);
-               __set_page_locked(page);
+               __SetPageLocked(page);
                if (sgp == SGP_WRITE)
                        __SetPageReferenced(page);
 
index c17c5202864d9f091b210eab4dd2eba2e426fcd4..438ebf8bbab1ddaba0e25db37a8f2b1913859b96 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -338,11 +338,13 @@ static inline int oo_objects(struct kmem_cache_order_objects x)
  */
 static __always_inline void slab_lock(struct page *page)
 {
+       VM_BUG_ON_PAGE(PageTail(page), page);
        bit_spin_lock(PG_locked, &page->flags);
 }
 
 static __always_inline void slab_unlock(struct page *page)
 {
+       VM_BUG_ON_PAGE(PageTail(page), page);
        __bit_spin_unlock(PG_locked, &page->flags);
 }
 
index d504adb7fa5f08ced98eeb2a285976c0db64a9ae..d783872d746caa810bf4bb68e94b7086dcff26ee 100644 (file)
@@ -353,7 +353,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
                }
 
                /* May fail (-ENOMEM) if radix-tree node allocation failed. */
-               __set_page_locked(new_page);
+               __SetPageLocked(new_page);
                SetPageSwapBacked(new_page);
                err = __add_to_swap_cache(new_page, entry);
                if (likely(!err)) {
@@ -367,7 +367,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
                }
                radix_tree_preload_end();
                ClearPageSwapBacked(new_page);
-               __clear_page_locked(new_page);
+               __ClearPageLocked(new_page);
                /*
                 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
                 * clear SWAP_HAS_CACHE flag.
index 1b9eed7b08cfc8b0eb53e0a420c04a7731d3bbfe..f01c00d2c6cd63a21cc7008f24e22cb6c8461653 100644 (file)
@@ -1184,7 +1184,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                 * we obviously don't have to worry about waking up a process
                 * waiting on the page lock, because there are no references.
                 */
-               __clear_page_locked(page);
+               __ClearPageLocked(page);
 free_it:
                nr_reclaimed++;