]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - mm/rmap.c
sched/headers, timekeeping: Move the timer tick function prototypes to <linux/timekee...
[karo-tx-linux.git] / mm / rmap.c
index 91619fd709399a428a5a65fff6367d14cbef3db3..2da487d6cea83b4f51db93bcbd05feaad31b927c 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -46,6 +46,8 @@
  */
 
 #include <linux/mm.h>
+#include <linux/sched/mm.h>
+#include <linux/sched/task.h>
 #include <linux/pagemap.h>
 #include <linux/swap.h>
 #include <linux/swapops.h>
@@ -607,8 +609,7 @@ void try_to_unmap_flush_dirty(void)
                try_to_unmap_flush();
 }
 
-static void set_tlb_ubc_flush_pending(struct mm_struct *mm,
-               struct page *page, bool writable)
+static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable)
 {
        struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc;
 
@@ -643,8 +644,7 @@ static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
        return should_defer;
 }
 #else
-static void set_tlb_ubc_flush_pending(struct mm_struct *mm,
-               struct page *page, bool writable)
+static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable)
 {
 }
 
@@ -710,170 +710,6 @@ out:
        return pmd;
 }
 
-/*
- * Check that @page is mapped at @address into @mm.
- *
- * If @sync is false, page_check_address may perform a racy check to avoid
- * the page table lock when the pte is not present (helpful when reclaiming
- * highly shared pages).
- *
- * On success returns with pte mapped and locked.
- */
-pte_t *__page_check_address(struct page *page, struct mm_struct *mm,
-                         unsigned long address, spinlock_t **ptlp, int sync)
-{
-       pmd_t *pmd;
-       pte_t *pte;
-       spinlock_t *ptl;
-
-       if (unlikely(PageHuge(page))) {
-               /* when pud is not present, pte will be NULL */
-               pte = huge_pte_offset(mm, address);
-               if (!pte)
-                       return NULL;
-
-               ptl = huge_pte_lockptr(page_hstate(page), mm, pte);
-               goto check;
-       }
-
-       pmd = mm_find_pmd(mm, address);
-       if (!pmd)
-               return NULL;
-
-       pte = pte_offset_map(pmd, address);
-       /* Make a quick check before getting the lock */
-       if (!sync && !pte_present(*pte)) {
-               pte_unmap(pte);
-               return NULL;
-       }
-
-       ptl = pte_lockptr(mm, pmd);
-check:
-       spin_lock(ptl);
-       if (pte_present(*pte) && page_to_pfn(page) == pte_pfn(*pte)) {
-               *ptlp = ptl;
-               return pte;
-       }
-       pte_unmap_unlock(pte, ptl);
-       return NULL;
-}
-
-/**
- * page_mapped_in_vma - check whether a page is really mapped in a VMA
- * @page: the page to test
- * @vma: the VMA to test
- *
- * Returns 1 if the page is mapped into the page tables of the VMA, 0
- * if the page is not mapped into the page tables of this VMA.  Only
- * valid for normal file or anonymous VMAs.
- */
-int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
-{
-       unsigned long address;
-       pte_t *pte;
-       spinlock_t *ptl;
-
-       address = __vma_address(page, vma);
-       if (unlikely(address < vma->vm_start || address >= vma->vm_end))
-               return 0;
-       pte = page_check_address(page, vma->vm_mm, address, &ptl, 1);
-       if (!pte)                       /* the page is not in this mm */
-               return 0;
-       pte_unmap_unlock(pte, ptl);
-
-       return 1;
-}
-
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-/*
- * Check that @page is mapped at @address into @mm. In contrast to
- * page_check_address(), this function can handle transparent huge pages.
- *
- * On success returns true with pte mapped and locked. For PMD-mapped
- * transparent huge pages *@ptep is set to NULL.
- */
-bool page_check_address_transhuge(struct page *page, struct mm_struct *mm,
-                                 unsigned long address, pmd_t **pmdp,
-                                 pte_t **ptep, spinlock_t **ptlp)
-{
-       pgd_t *pgd;
-       pud_t *pud;
-       pmd_t *pmd;
-       pte_t *pte;
-       spinlock_t *ptl;
-
-       if (unlikely(PageHuge(page))) {
-               /* when pud is not present, pte will be NULL */
-               pte = huge_pte_offset(mm, address);
-               if (!pte)
-                       return false;
-
-               ptl = huge_pte_lockptr(page_hstate(page), mm, pte);
-               pmd = NULL;
-               goto check_pte;
-       }
-
-       pgd = pgd_offset(mm, address);
-       if (!pgd_present(*pgd))
-               return false;
-       pud = pud_offset(pgd, address);
-       if (!pud_present(*pud))
-               return false;
-       pmd = pmd_offset(pud, address);
-
-       if (pmd_trans_huge(*pmd)) {
-               ptl = pmd_lock(mm, pmd);
-               if (!pmd_present(*pmd))
-                       goto unlock_pmd;
-               if (unlikely(!pmd_trans_huge(*pmd))) {
-                       spin_unlock(ptl);
-                       goto map_pte;
-               }
-
-               if (pmd_page(*pmd) != page)
-                       goto unlock_pmd;
-
-               pte = NULL;
-               goto found;
-unlock_pmd:
-               spin_unlock(ptl);
-               return false;
-       } else {
-               pmd_t pmde = *pmd;
-
-               barrier();
-               if (!pmd_present(pmde) || pmd_trans_huge(pmde))
-                       return false;
-       }
-map_pte:
-       pte = pte_offset_map(pmd, address);
-       if (!pte_present(*pte)) {
-               pte_unmap(pte);
-               return false;
-       }
-
-       ptl = pte_lockptr(mm, pmd);
-check_pte:
-       spin_lock(ptl);
-
-       if (!pte_present(*pte)) {
-               pte_unmap_unlock(pte, ptl);
-               return false;
-       }
-
-       /* THP can be referenced by any subpage */
-       if (pte_pfn(*pte) - page_to_pfn(page) >= hpage_nr_pages(page)) {
-               pte_unmap_unlock(pte, ptl);
-               return false;
-       }
-found:
-       *ptep = pte;
-       *pmdp = pmd;
-       *ptlp = ptl;
-       return true;
-}
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
-
 struct page_referenced_arg {
        int mapcount;
        int referenced;
@@ -886,45 +722,48 @@ struct page_referenced_arg {
 static int page_referenced_one(struct page *page, struct vm_area_struct *vma,
                        unsigned long address, void *arg)
 {
-       struct mm_struct *mm = vma->vm_mm;
        struct page_referenced_arg *pra = arg;
-       pmd_t *pmd;
-       pte_t *pte;
-       spinlock_t *ptl;
+       struct page_vma_mapped_walk pvmw = {
+               .page = page,
+               .vma = vma,
+               .address = address,
+       };
        int referenced = 0;
 
-       if (!page_check_address_transhuge(page, mm, address, &pmd, &pte, &ptl))
-               return SWAP_AGAIN;
+       while (page_vma_mapped_walk(&pvmw)) {
+               address = pvmw.address;
 
-       if (vma->vm_flags & VM_LOCKED) {
-               if (pte)
-                       pte_unmap(pte);
-               spin_unlock(ptl);
-               pra->vm_flags |= VM_LOCKED;
-               return SWAP_FAIL; /* To break the loop */
-       }
+               if (vma->vm_flags & VM_LOCKED) {
+                       page_vma_mapped_walk_done(&pvmw);
+                       pra->vm_flags |= VM_LOCKED;
+                       return SWAP_FAIL; /* To break the loop */
+               }
 
-       if (pte) {
-               if (ptep_clear_flush_young_notify(vma, address, pte)) {
-                       /*
-                        * Don't treat a reference through a sequentially read
-                        * mapping as such.  If the page has been used in
-                        * another mapping, we will catch it; if this other
-                        * mapping is already gone, the unmap path will have
-                        * set PG_referenced or activated the page.
-                        */
-                       if (likely(!(vma->vm_flags & VM_SEQ_READ)))
+               if (pvmw.pte) {
+                       if (ptep_clear_flush_young_notify(vma, address,
+                                               pvmw.pte)) {
+                               /*
+                                * Don't treat a reference through
+                                * a sequentially read mapping as such.
+                                * If the page has been used in another mapping,
+                                * we will catch it; if this other mapping is
+                                * already gone, the unmap path will have set
+                                * PG_referenced or activated the page.
+                                */
+                               if (likely(!(vma->vm_flags & VM_SEQ_READ)))
+                                       referenced++;
+                       }
+               } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
+                       if (pmdp_clear_flush_young_notify(vma, address,
+                                               pvmw.pmd))
                                referenced++;
+               } else {
+                       /* unexpected pmd-mapped page? */
+                       WARN_ON_ONCE(1);
                }
-               pte_unmap(pte);
-       } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
-               if (pmdp_clear_flush_young_notify(vma, address, pmd))
-                       referenced++;
-       } else {
-               /* unexpected pmd-mapped page? */
-               WARN_ON_ONCE(1);
+
+               pra->mapcount--;
        }
-       spin_unlock(ptl);
 
        if (referenced)
                clear_page_idle(page);
@@ -936,7 +775,6 @@ static int page_referenced_one(struct page *page, struct vm_area_struct *vma,
                pra->vm_flags |= vma->vm_flags;
        }
 
-       pra->mapcount--;
        if (!pra->mapcount)
                return SWAP_SUCCESS; /* To break the loop */
 
@@ -1015,34 +853,56 @@ int page_referenced(struct page *page,
 static int page_mkclean_one(struct page *page, struct vm_area_struct *vma,
                            unsigned long address, void *arg)
 {
-       struct mm_struct *mm = vma->vm_mm;
-       pte_t *pte;
-       spinlock_t *ptl;
-       int ret = 0;
+       struct page_vma_mapped_walk pvmw = {
+               .page = page,
+               .vma = vma,
+               .address = address,
+               .flags = PVMW_SYNC,
+       };
        int *cleaned = arg;
 
-       pte = page_check_address(page, mm, address, &ptl, 1);
-       if (!pte)
-               goto out;
-
-       if (pte_dirty(*pte) || pte_write(*pte)) {
-               pte_t entry;
+       while (page_vma_mapped_walk(&pvmw)) {
+               int ret = 0;
+               address = pvmw.address;
+               if (pvmw.pte) {
+                       pte_t entry;
+                       pte_t *pte = pvmw.pte;
+
+                       if (!pte_dirty(*pte) && !pte_write(*pte))
+                               continue;
+
+                       flush_cache_page(vma, address, pte_pfn(*pte));
+                       entry = ptep_clear_flush(vma, address, pte);
+                       entry = pte_wrprotect(entry);
+                       entry = pte_mkclean(entry);
+                       set_pte_at(vma->vm_mm, address, pte, entry);
+                       ret = 1;
+               } else {
+#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
+                       pmd_t *pmd = pvmw.pmd;
+                       pmd_t entry;
+
+                       if (!pmd_dirty(*pmd) && !pmd_write(*pmd))
+                               continue;
+
+                       flush_cache_page(vma, address, page_to_pfn(page));
+                       entry = pmdp_huge_clear_flush(vma, address, pmd);
+                       entry = pmd_wrprotect(entry);
+                       entry = pmd_mkclean(entry);
+                       set_pmd_at(vma->vm_mm, address, pmd, entry);
+                       ret = 1;
+#else
+                       /* unexpected pmd-mapped page? */
+                       WARN_ON_ONCE(1);
+#endif
+               }
 
-               flush_cache_page(vma, address, pte_pfn(*pte));
-               entry = ptep_clear_flush(vma, address, pte);
-               entry = pte_wrprotect(entry);
-               entry = pte_mkclean(entry);
-               set_pte_at(mm, address, pte, entry);
-               ret = 1;
+               if (ret) {
+                       mmu_notifier_invalidate_page(vma->vm_mm, address);
+                       (*cleaned)++;
+               }
        }
 
-       pte_unmap_unlock(pte, ptl);
-
-       if (ret) {
-               mmu_notifier_invalidate_page(mm, address);
-               (*cleaned)++;
-       }
-out:
        return SWAP_AGAIN;
 }
 
@@ -1435,155 +1295,163 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                     unsigned long address, void *arg)
 {
        struct mm_struct *mm = vma->vm_mm;
-       pte_t *pte;
+       struct page_vma_mapped_walk pvmw = {
+               .page = page,
+               .vma = vma,
+               .address = address,
+       };
        pte_t pteval;
-       spinlock_t *ptl;
+       struct page *subpage;
        int ret = SWAP_AGAIN;
        struct rmap_private *rp = arg;
        enum ttu_flags flags = rp->flags;
 
        /* munlock has nothing to gain from examining un-locked vmas */
        if ((flags & TTU_MUNLOCK) && !(vma->vm_flags & VM_LOCKED))
-               goto out;
+               return SWAP_AGAIN;
 
        if (flags & TTU_SPLIT_HUGE_PMD) {
                split_huge_pmd_address(vma, address,
                                flags & TTU_MIGRATION, page);
-               /* check if we have anything to do after split */
-               if (page_mapcount(page) == 0)
-                       goto out;
        }
 
-       pte = page_check_address(page, mm, address, &ptl,
-                                PageTransCompound(page));
-       if (!pte)
-               goto out;
+       while (page_vma_mapped_walk(&pvmw)) {
+               subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte);
+               address = pvmw.address;
 
-       /*
-        * If the page is mlock()d, we cannot swap it out.
-        * If it's recently referenced (perhaps page_referenced
-        * skipped over this mm) then we should reactivate it.
-        */
-       if (!(flags & TTU_IGNORE_MLOCK)) {
-               if (vma->vm_flags & VM_LOCKED) {
-                       /* PTE-mapped THP are never mlocked */
-                       if (!PageTransCompound(page)) {
-                               /*
-                                * Holding pte lock, we do *not* need
-                                * mmap_sem here
-                                */
-                               mlock_vma_page(page);
-                       }
-                       ret = SWAP_MLOCK;
-                       goto out_unmap;
-               }
-               if (flags & TTU_MUNLOCK)
-                       goto out_unmap;
-       }
-       if (!(flags & TTU_IGNORE_ACCESS)) {
-               if (ptep_clear_flush_young_notify(vma, address, pte)) {
-                       ret = SWAP_FAIL;
-                       goto out_unmap;
-               }
-       }
+               /* Unexpected PMD-mapped THP? */
+               VM_BUG_ON_PAGE(!pvmw.pte, page);
 
-       /* Nuke the page table entry. */
-       flush_cache_page(vma, address, page_to_pfn(page));
-       if (should_defer_flush(mm, flags)) {
                /*
-                * We clear the PTE but do not flush so potentially a remote
-                * CPU could still be writing to the page. If the entry was
-                * previously clean then the architecture must guarantee that
-                * a clear->dirty transition on a cached TLB entry is written
-                * through and traps if the PTE is unmapped.
+                * If the page is mlock()d, we cannot swap it out.
+                * If it's recently referenced (perhaps page_referenced
+                * skipped over this mm) then we should reactivate it.
                 */
-               pteval = ptep_get_and_clear(mm, address, pte);
-
-               set_tlb_ubc_flush_pending(mm, page, pte_dirty(pteval));
-       } else {
-               pteval = ptep_clear_flush(vma, address, pte);
-       }
+               if (!(flags & TTU_IGNORE_MLOCK)) {
+                       if (vma->vm_flags & VM_LOCKED) {
+                               /* PTE-mapped THP are never mlocked */
+                               if (!PageTransCompound(page)) {
+                                       /*
+                                        * Holding pte lock, we do *not* need
+                                        * mmap_sem here
+                                        */
+                                       mlock_vma_page(page);
+                               }
+                               ret = SWAP_MLOCK;
+                               page_vma_mapped_walk_done(&pvmw);
+                               break;
+                       }
+                       if (flags & TTU_MUNLOCK)
+                               continue;
+               }
 
-       /* Move the dirty bit to the physical page now the pte is gone. */
-       if (pte_dirty(pteval))
-               set_page_dirty(page);
+               if (!(flags & TTU_IGNORE_ACCESS)) {
+                       if (ptep_clear_flush_young_notify(vma, address,
+                                               pvmw.pte)) {
+                               ret = SWAP_FAIL;
+                               page_vma_mapped_walk_done(&pvmw);
+                               break;
+                       }
+               }
 
-       /* Update high watermark before we lower rss */
-       update_hiwater_rss(mm);
+               /* Nuke the page table entry. */
+               flush_cache_page(vma, address, pte_pfn(*pvmw.pte));
+               if (should_defer_flush(mm, flags)) {
+                       /*
+                        * We clear the PTE but do not flush so potentially
+                        * a remote CPU could still be writing to the page.
+                        * If the entry was previously clean then the
+                        * architecture must guarantee that a clear->dirty
+                        * transition on a cached TLB entry is written through
+                        * and traps if the PTE is unmapped.
+                        */
+                       pteval = ptep_get_and_clear(mm, address, pvmw.pte);
 
-       if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) {
-               if (PageHuge(page)) {
-                       hugetlb_count_sub(1 << compound_order(page), mm);
+                       set_tlb_ubc_flush_pending(mm, pte_dirty(pteval));
                } else {
-                       dec_mm_counter(mm, mm_counter(page));
+                       pteval = ptep_clear_flush(vma, address, pvmw.pte);
                }
-               set_pte_at(mm, address, pte,
-                          swp_entry_to_pte(make_hwpoison_entry(page)));
-       } else if (pte_unused(pteval)) {
-               /*
-                * The guest indicated that the page content is of no
-                * interest anymore. Simply discard the pte, vmscan
-                * will take care of the rest.
-                */
-               dec_mm_counter(mm, mm_counter(page));
-       } else if (IS_ENABLED(CONFIG_MIGRATION) && (flags & TTU_MIGRATION)) {
-               swp_entry_t entry;
-               pte_t swp_pte;
-               /*
-                * Store the pfn of the page in a special migration
-                * pte. do_swap_page() will wait until the migration
-                * pte is removed and then restart fault handling.
-                */
-               entry = make_migration_entry(page, pte_write(pteval));
-               swp_pte = swp_entry_to_pte(entry);
-               if (pte_soft_dirty(pteval))
-                       swp_pte = pte_swp_mksoft_dirty(swp_pte);
-               set_pte_at(mm, address, pte, swp_pte);
-       } else if (PageAnon(page)) {
-               swp_entry_t entry = { .val = page_private(page) };
-               pte_t swp_pte;
-               /*
-                * Store the swap location in the pte.
-                * See handle_pte_fault() ...
-                */
-               VM_BUG_ON_PAGE(!PageSwapCache(page), page);
 
-               if (!PageDirty(page) && (flags & TTU_LZFREE)) {
-                       /* It's a freeable page by MADV_FREE */
-                       dec_mm_counter(mm, MM_ANONPAGES);
-                       rp->lazyfreed++;
-                       goto discard;
-               }
+               /* Move the dirty bit to the page. Now the pte is gone. */
+               if (pte_dirty(pteval))
+                       set_page_dirty(page);
 
-               if (swap_duplicate(entry) < 0) {
-                       set_pte_at(mm, address, pte, pteval);
-                       ret = SWAP_FAIL;
-                       goto out_unmap;
-               }
-               if (list_empty(&mm->mmlist)) {
-                       spin_lock(&mmlist_lock);
-                       if (list_empty(&mm->mmlist))
-                               list_add(&mm->mmlist, &init_mm.mmlist);
-                       spin_unlock(&mmlist_lock);
-               }
-               dec_mm_counter(mm, MM_ANONPAGES);
-               inc_mm_counter(mm, MM_SWAPENTS);
-               swp_pte = swp_entry_to_pte(entry);
-               if (pte_soft_dirty(pteval))
-                       swp_pte = pte_swp_mksoft_dirty(swp_pte);
-               set_pte_at(mm, address, pte, swp_pte);
-       } else
-               dec_mm_counter(mm, mm_counter_file(page));
+               /* Update high watermark before we lower rss */
+               update_hiwater_rss(mm);
 
-discard:
-       page_remove_rmap(page, PageHuge(page));
-       put_page(page);
+               if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) {
+                       if (PageHuge(page)) {
+                               int nr = 1 << compound_order(page);
+                               hugetlb_count_sub(nr, mm);
+                       } else {
+                               dec_mm_counter(mm, mm_counter(page));
+                       }
+
+                       pteval = swp_entry_to_pte(make_hwpoison_entry(subpage));
+                       set_pte_at(mm, address, pvmw.pte, pteval);
+               } else if (pte_unused(pteval)) {
+                       /*
+                        * The guest indicated that the page content is of no
+                        * interest anymore. Simply discard the pte, vmscan
+                        * will take care of the rest.
+                        */
+                       dec_mm_counter(mm, mm_counter(page));
+               } else if (IS_ENABLED(CONFIG_MIGRATION) &&
+                               (flags & TTU_MIGRATION)) {
+                       swp_entry_t entry;
+                       pte_t swp_pte;
+                       /*
+                        * Store the pfn of the page in a special migration
+                        * pte. do_swap_page() will wait until the migration
+                        * pte is removed and then restart fault handling.
+                        */
+                       entry = make_migration_entry(subpage,
+                                       pte_write(pteval));
+                       swp_pte = swp_entry_to_pte(entry);
+                       if (pte_soft_dirty(pteval))
+                               swp_pte = pte_swp_mksoft_dirty(swp_pte);
+                       set_pte_at(mm, address, pvmw.pte, swp_pte);
+               } else if (PageAnon(page)) {
+                       swp_entry_t entry = { .val = page_private(subpage) };
+                       pte_t swp_pte;
+                       /*
+                        * Store the swap location in the pte.
+                        * See handle_pte_fault() ...
+                        */
+                       VM_BUG_ON_PAGE(!PageSwapCache(page), page);
+
+                       if (!PageDirty(page) && (flags & TTU_LZFREE)) {
+                               /* It's a freeable page by MADV_FREE */
+                               dec_mm_counter(mm, MM_ANONPAGES);
+                               rp->lazyfreed++;
+                               goto discard;
+                       }
 
-out_unmap:
-       pte_unmap_unlock(pte, ptl);
-       if (ret != SWAP_FAIL && ret != SWAP_MLOCK && !(flags & TTU_MUNLOCK))
+                       if (swap_duplicate(entry) < 0) {
+                               set_pte_at(mm, address, pvmw.pte, pteval);
+                               ret = SWAP_FAIL;
+                               page_vma_mapped_walk_done(&pvmw);
+                               break;
+                       }
+                       if (list_empty(&mm->mmlist)) {
+                               spin_lock(&mmlist_lock);
+                               if (list_empty(&mm->mmlist))
+                                       list_add(&mm->mmlist, &init_mm.mmlist);
+                               spin_unlock(&mmlist_lock);
+                       }
+                       dec_mm_counter(mm, MM_ANONPAGES);
+                       inc_mm_counter(mm, MM_SWAPENTS);
+                       swp_pte = swp_entry_to_pte(entry);
+                       if (pte_soft_dirty(pteval))
+                               swp_pte = pte_swp_mksoft_dirty(swp_pte);
+                       set_pte_at(mm, address, pvmw.pte, swp_pte);
+               } else
+                       dec_mm_counter(mm, mm_counter_file(page));
+discard:
+               page_remove_rmap(subpage, PageHuge(page));
+               put_page(page);
                mmu_notifier_invalidate_page(mm, address);
-out:
+       }
        return ret;
 }
 
@@ -1608,7 +1476,7 @@ static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg)
 
 static int page_mapcount_is_zero(struct page *page)
 {
-       return !page_mapcount(page);
+       return !total_mapcount(page);
 }
 
 /**
@@ -1755,7 +1623,7 @@ static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc,
                bool locked)
 {
        struct anon_vma *anon_vma;
-       pgoff_t pgoff;
+       pgoff_t pgoff_start, pgoff_end;
        struct anon_vma_chain *avc;
        int ret = SWAP_AGAIN;
 
@@ -1769,8 +1637,10 @@ static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc,
        if (!anon_vma)
                return ret;
 
-       pgoff = page_to_pgoff(page);
-       anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) {
+       pgoff_start = page_to_pgoff(page);
+       pgoff_end = pgoff_start + hpage_nr_pages(page) - 1;
+       anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root,
+                       pgoff_start, pgoff_end) {
                struct vm_area_struct *vma = avc->vma;
                unsigned long address = vma_address(page, vma);
 
@@ -1808,7 +1678,7 @@ static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc,
                bool locked)
 {
        struct address_space *mapping = page_mapping(page);
-       pgoff_t pgoff;
+       pgoff_t pgoff_start, pgoff_end;
        struct vm_area_struct *vma;
        int ret = SWAP_AGAIN;
 
@@ -1823,10 +1693,12 @@ static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc,
        if (!mapping)
                return ret;
 
-       pgoff = page_to_pgoff(page);
+       pgoff_start = page_to_pgoff(page);
+       pgoff_end = pgoff_start + hpage_nr_pages(page) - 1;
        if (!locked)
                i_mmap_lock_read(mapping);
-       vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
+       vma_interval_tree_foreach(vma, &mapping->i_mmap,
+                       pgoff_start, pgoff_end) {
                unsigned long address = vma_address(page, vma);
 
                cond_resched();