]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
mm: MADV_FREE trivial clean up
authorMinchan Kim <minchan@kernel.org>
Wed, 21 Oct 2015 22:03:45 +0000 (09:03 +1100)
committerStephen Rothwell <sfr@canb.auug.org.au>
Wed, 21 Oct 2015 22:03:45 +0000 (09:03 +1100)
1. Page table waker already pass the vma it is processing
so we don't need to pass vma.

2. If page table entry is dirty in try_to_unmap_one, the dirtiness
should propagate to PG_dirty of the page. So, it's enough to check
only PageDirty without other pte dirty bit checking.

Signed-off-by: Minchan Kim <minchan@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/madvise.c
mm/rmap.c

index 113dc021182bb7dcbaf0d83b01c55f17c7fb5f00..f6fd3dcfb471f705020fd0c60f8975e22b5f602c 100644 (file)
 
 #include <asm/tlb.h>
 
-struct madvise_free_private {
-       struct vm_area_struct *vma;
-       struct mmu_gather *tlb;
-};
-
 /*
  * Any behaviour which results in changes to the vma->vm_flags needs to
  * take mmap_sem for writing. Others, which simply traverse vmas, need
@@ -269,10 +264,9 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
                                unsigned long end, struct mm_walk *walk)
 
 {
-       struct madvise_free_private *fp = walk->private;
-       struct mmu_gather *tlb = fp->tlb;
+       struct mmu_gather *tlb = walk->private;
        struct mm_struct *mm = tlb->mm;
-       struct vm_area_struct *vma = fp->vma;
+       struct vm_area_struct *vma = walk->vma;
        spinlock_t *ptl;
        pte_t *pte, ptent;
        struct page *page;
@@ -329,15 +323,10 @@ static void madvise_free_page_range(struct mmu_gather *tlb,
                             struct vm_area_struct *vma,
                             unsigned long addr, unsigned long end)
 {
-       struct madvise_free_private fp = {
-               .vma = vma,
-               .tlb = tlb,
-       };
-
        struct mm_walk free_walk = {
                .pmd_entry = madvise_free_pte_range,
                .mm = vma->vm_mm,
-               .private = &fp,
+               .private = tlb,
        };
 
        BUG_ON(addr >= end);
index c9b0c47cbd9f1d6cb069710cc5f48b1274fdd4dc..311eedb143271c8fa0800f6516e133b74ddb3011 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1376,7 +1376,6 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
        spinlock_t *ptl;
        int ret = SWAP_AGAIN;
        enum ttu_flags flags = (enum ttu_flags)arg;
-       int dirty = 0;
 
        /* munlock has nothing to gain from examining un-locked vmas */
        if ((flags & TTU_MUNLOCK) && !(vma->vm_flags & VM_LOCKED))
@@ -1426,8 +1425,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
        }
 
        /* Move the dirty bit to the physical page now the pte is gone. */
-       dirty = pte_dirty(pteval);
-       if (dirty)
+       if (pte_dirty(pteval))
                set_page_dirty(page);
 
        /* Update high watermark before we lower rss */
@@ -1473,7 +1471,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
 
                if (flags & TTU_FREE) {
                        VM_BUG_ON_PAGE(PageSwapCache(page), page);
-                       if (!dirty && !PageDirty(page)) {
+                       if (!PageDirty(page)) {
                                /* It's a freeable page by MADV_FREE */
                                dec_mm_counter(mm, MM_ANONPAGES);
                                goto discard;