From e571e09cd0edadc0e3584b5a7ba79f0f5e00711b Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Wed, 5 Oct 2011 11:43:01 +1100 Subject: [PATCH] thp-mremap-support-and-tlb-optimization-fix coding-style nitpicking Cc: Andrea Arcangeli Cc: Hugh Dickins Cc: Johannes Weiner Cc: Mel Gorman Cc: Rik van Riel Signed-off-by: Andrew Morton --- mm/huge_memory.c | 9 ++++----- mm/mremap.c | 6 +++--- 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 7c9ab4da1e6a..b975dc1db24f 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1065,15 +1065,14 @@ int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma, if ((old_addr & ~HPAGE_PMD_MASK) || (new_addr & ~HPAGE_PMD_MASK) || (old_addr + HPAGE_PMD_SIZE) > old_end || - new_vma->vm_flags & VM_NOHUGEPAGE) + (new_vma->vm_flags & VM_NOHUGEPAGE)) goto out; /* * The destination pmd shouldn't be established, free_pgtables() * should have release it. */ - if (!pmd_none(*new_pmd)) { - WARN_ON(1); + if (!WARN_ON(pmd_none(*new_pmd))) { VM_BUG_ON(pmd_trans_huge(*new_pmd)); goto out; } @@ -1091,9 +1090,9 @@ int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma, spin_unlock(&mm->page_table_lock); ret = 1; } - } else + } else { spin_unlock(&mm->page_table_lock); - + } out: return ret; } diff --git a/mm/mremap.c b/mm/mremap.c index f0c4fcdbb4c6..d6959cb4df58 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -155,13 +155,13 @@ unsigned long move_page_tables(struct vm_area_struct *vma, if (err > 0) { need_flush = true; continue; - } else if (!err) + } else if (!err) { split_huge_page_pmd(vma->vm_mm, old_pmd); + } VM_BUG_ON(pmd_trans_huge(*old_pmd)); } if (pmd_none(*new_pmd) && __pte_alloc(new_vma->vm_mm, new_vma, - new_pmd, - new_addr)) + new_pmd, new_addr)) break; next = (new_addr + PMD_SIZE) & PMD_MASK; if (extent > next - new_addr) -- 2.39.5