]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
thp-mremap-support-and-tlb-optimization-fix
authorAndrew Morton <akpm@linux-foundation.org>
Wed, 5 Oct 2011 00:43:01 +0000 (11:43 +1100)
committerStephen Rothwell <sfr@canb.auug.org.au>
Wed, 12 Oct 2011 06:32:07 +0000 (17:32 +1100)
coding-style nitpicking

Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Johannes Weiner <jweiner@redhat.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/huge_memory.c
mm/mremap.c

index 7c9ab4da1e6a498e2734887522da95f6e812c6ea..b975dc1db24f814c6749644e22b3d8a139df5f34 100644 (file)
@@ -1065,15 +1065,14 @@ int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma,
        if ((old_addr & ~HPAGE_PMD_MASK) ||
            (new_addr & ~HPAGE_PMD_MASK) ||
            (old_addr + HPAGE_PMD_SIZE) > old_end ||
-           new_vma->vm_flags & VM_NOHUGEPAGE)
+           (new_vma->vm_flags & VM_NOHUGEPAGE))
                goto out;
 
        /*
         * The destination pmd shouldn't be established, free_pgtables()
         * should have release it.
         */
-       if (!pmd_none(*new_pmd)) {
-               WARN_ON(1);
+       if (!WARN_ON(pmd_none(*new_pmd))) {
                VM_BUG_ON(pmd_trans_huge(*new_pmd));
                goto out;
        }
@@ -1091,9 +1090,9 @@ int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma,
                        spin_unlock(&mm->page_table_lock);
                        ret = 1;
                }
-       } else
+       } else {
                spin_unlock(&mm->page_table_lock);
-
+       }
 out:
        return ret;
 }
index f0c4fcdbb4c6a6e2fe6a2dc5b06cde37487c1f5a..d6959cb4df58f1d694c179898553bf7e3150cc49 100644 (file)
@@ -155,13 +155,13 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
                        if (err > 0) {
                                need_flush = true;
                                continue;
-                       } else if (!err)
+                       } else if (!err) {
                                split_huge_page_pmd(vma->vm_mm, old_pmd);
+                       }
                        VM_BUG_ON(pmd_trans_huge(*old_pmd));
                }
                if (pmd_none(*new_pmd) && __pte_alloc(new_vma->vm_mm, new_vma,
-                                                     new_pmd,
-                                                     new_addr))
+                                                     new_pmd, new_addr))
                        break;
                next = (new_addr + PMD_SIZE) & PMD_MASK;
                if (extent > next - new_addr)