From b97d46de2ac990fe888116fc46d68226d7dee360 Mon Sep 17 00:00:00 2001 From: Minchan Kim Date: Thu, 22 Oct 2015 09:03:45 +1100 Subject: [PATCH] mm: MADV_FREE trivial clean up 1. Page table waker already pass the vma it is processing so we don't need to pass vma. 2. If page table entry is dirty in try_to_unmap_one, the dirtiness should propagate to PG_dirty of the page. So, it's enough to check only PageDirty without other pte dirty bit checking. Signed-off-by: Minchan Kim Signed-off-by: Andrew Morton --- mm/madvise.c | 17 +++-------------- mm/rmap.c | 6 ++---- 2 files changed, 5 insertions(+), 18 deletions(-) diff --git a/mm/madvise.c b/mm/madvise.c index 113dc021182b..f6fd3dcfb471 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -24,11 +24,6 @@ #include -struct madvise_free_private { - struct vm_area_struct *vma; - struct mmu_gather *tlb; -}; - /* * Any behaviour which results in changes to the vma->vm_flags needs to * take mmap_sem for writing. Others, which simply traverse vmas, need @@ -269,10 +264,9 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, struct mm_walk *walk) { - struct madvise_free_private *fp = walk->private; - struct mmu_gather *tlb = fp->tlb; + struct mmu_gather *tlb = walk->private; struct mm_struct *mm = tlb->mm; - struct vm_area_struct *vma = fp->vma; + struct vm_area_struct *vma = walk->vma; spinlock_t *ptl; pte_t *pte, ptent; struct page *page; @@ -329,15 +323,10 @@ static void madvise_free_page_range(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long addr, unsigned long end) { - struct madvise_free_private fp = { - .vma = vma, - .tlb = tlb, - }; - struct mm_walk free_walk = { .pmd_entry = madvise_free_pte_range, .mm = vma->vm_mm, - .private = &fp, + .private = tlb, }; BUG_ON(addr >= end); diff --git a/mm/rmap.c b/mm/rmap.c index c9b0c47cbd9f..311eedb14327 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1376,7 +1376,6 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, spinlock_t *ptl; int ret = SWAP_AGAIN; enum ttu_flags flags = (enum ttu_flags)arg; - int dirty = 0; /* munlock has nothing to gain from examining un-locked vmas */ if ((flags & TTU_MUNLOCK) && !(vma->vm_flags & VM_LOCKED)) @@ -1426,8 +1425,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, } /* Move the dirty bit to the physical page now the pte is gone. */ - dirty = pte_dirty(pteval); - if (dirty) + if (pte_dirty(pteval)) set_page_dirty(page); /* Update high watermark before we lower rss */ @@ -1473,7 +1471,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, if (flags & TTU_FREE) { VM_BUG_ON_PAGE(PageSwapCache(page), page); - if (!dirty && !PageDirty(page)) { + if (!PageDirty(page)) { /* It's a freeable page by MADV_FREE */ dec_mm_counter(mm, MM_ANONPAGES); goto discard; -- 2.39.5