]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
mm/memory.c: squash unused variable warning
authorAndrew Morton <akpm@linux-foundation.org>
Fri, 28 Sep 2012 00:19:02 +0000 (10:19 +1000)
committerStephen Rothwell <sfr@canb.auug.org.au>
Fri, 28 Sep 2012 06:06:41 +0000 (16:06 +1000)
CONFIG_NUMA=n:

mm/memory.c: In function 'do_prot_none':
mm/memory.c:3447: warning: unused variable 'node'

Fix this by eliminiating a rather gratuitous goto, and nuke a second
gratuitous goto while we're there.

Cc: Ingo Molnar <mingo@elte.hu>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/memory.c

index d896a2438eaed99a5efcf4e79939d38510c9382b..95dfbb4c5837a4639bbf61488660cfb98a27b788 100644 (file)
@@ -3444,7 +3444,6 @@ static int do_prot_none(struct mm_struct *mm, struct vm_area_struct *vma,
 {
        struct page *page = NULL;
        spinlock_t *ptl;
-       int node;
 
        ptl = pte_lockptr(mm, pmd);
        spin_lock(ptl);
@@ -3457,35 +3456,32 @@ static int do_prot_none(struct mm_struct *mm, struct vm_area_struct *vma,
         * lazy page migration, see MPOL_MF_LAZY and related.
         */
        page = vm_normal_page(vma, address, entry);
-       if (!page)
-               goto do_fixup_locked;
-
-       get_page(page);
-       pte_unmap_unlock(ptep, ptl);
+       if (page) {
+               int node;
 
-       node = mpol_misplaced(page, vma, address);
-       if (node == -1)
-               goto do_fixup;
+               get_page(page);
+               pte_unmap_unlock(ptep, ptl);
 
-       /*
-        * Page migration will install a new pte with vma->vm_page_prot,
-        * otherwise fall-through to the fixup. Next time,.. perhaps.
-        */
-       if (!migrate_misplaced_page(mm, page, node)) {
-               put_page(page);
-               return 0;
+               node = mpol_misplaced(page, vma, address);
+               if (node != -1) {
+                       /*
+                        * Page migration will install a new pte with
+                        * vma->vm_page_prot, otherwise fall-through to the
+                        * fixup. Next time,.. perhaps.
+                        */
+                       if (!migrate_misplaced_page(mm, page, node)) {
+                               put_page(page);
+                               return 0;
+                       }
+               }
+               /*
+                * OK, nothing to do,.. change the protection back to what it
+                * ought to be.
+                */
+               ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
+               if (unlikely(!pte_same(*ptep, entry)))
+                       goto unlock;
        }
-
-do_fixup:
-       /*
-        * OK, nothing to do,.. change the protection back to what it
-        * ought to be.
-        */
-       ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
-       if (unlikely(!pte_same(*ptep, entry)))
-               goto unlock;
-
-do_fixup_locked:
 #endif /* CONFIG_NUMA */
 
        flush_cache_page(vma, address, pte_pfn(entry));