{
struct page *page = NULL;
spinlock_t *ptl;
- int node;
ptl = pte_lockptr(mm, pmd);
spin_lock(ptl);
* lazy page migration, see MPOL_MF_LAZY and related.
*/
page = vm_normal_page(vma, address, entry);
- if (!page)
- goto do_fixup_locked;
-
- get_page(page);
- pte_unmap_unlock(ptep, ptl);
+ if (page) {
+ int node;
- node = mpol_misplaced(page, vma, address);
- if (node == -1)
- goto do_fixup;
+ get_page(page);
+ pte_unmap_unlock(ptep, ptl);
- /*
- * Page migration will install a new pte with vma->vm_page_prot,
- * otherwise fall-through to the fixup. Next time,.. perhaps.
- */
- if (!migrate_misplaced_page(mm, page, node)) {
- put_page(page);
- return 0;
+ node = mpol_misplaced(page, vma, address);
+ if (node != -1) {
+ /*
+ * Page migration will install a new pte with
+ * vma->vm_page_prot, otherwise fall-through to the
+ * fixup. Next time,.. perhaps.
+ */
+ if (!migrate_misplaced_page(mm, page, node)) {
+ put_page(page);
+ return 0;
+ }
+ }
+ /*
+ * OK, nothing to do,.. change the protection back to what it
+ * ought to be.
+ */
+ ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
+ if (unlikely(!pte_same(*ptep, entry)))
+ goto unlock;
}
-
-do_fixup:
- /*
- * OK, nothing to do,.. change the protection back to what it
- * ought to be.
- */
- ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
- if (unlikely(!pte_same(*ptep, entry)))
- goto unlock;
-
-do_fixup_locked:
#endif /* CONFIG_NUMA */
flush_cache_page(vma, address, pte_pfn(entry));