static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, unsigned long end, pgprot_t newprot,
- int dirty_accountable, int prot_numa, bool *ret_all_same_nidpid)
+ int dirty_accountable, int prot_numa)
{
struct mm_struct *mm = vma->vm_mm;
pte_t *pte, oldpte;
spinlock_t *ptl;
unsigned long pages = 0;
- bool all_same_nidpid = true;
- int last_nid = -1;
- int last_pid = -1;
pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
arch_enter_lazy_mmu_mode();
page = vm_normal_page(vma, addr, oldpte);
if (page) {
- int nidpid = page_nidpid_last(page);
- int this_nid = nidpid_to_nid(nidpid);
- int this_pid = nidpid_to_pid(nidpid);
-
- if (last_nid == -1)
- last_nid = this_nid;
- if (last_pid == -1)
- last_pid = this_pid;
- if (last_nid != this_nid ||
- last_pid != this_pid) {
- all_same_nidpid = false;
- }
-
if (!pte_numa(oldpte)) {
ptent = pte_mknuma(ptent);
updated = true;
swp_entry_t entry = pte_to_swp_entry(oldpte);
if (is_write_migration_entry(entry)) {
+ pte_t newpte;
/*
* A protection check is difficult so
* just be safe and disable write
*/
make_migration_entry_read(&entry);
- set_pte_at(mm, addr, pte,
- swp_entry_to_pte(entry));
+ newpte = swp_entry_to_pte(entry);
+ if (pte_swp_soft_dirty(oldpte))
+ newpte = pte_swp_mksoft_dirty(newpte);
+ set_pte_at(mm, addr, pte, newpte);
pages++;
}
arch_leave_lazy_mmu_mode();
pte_unmap_unlock(pte - 1, ptl);
- *ret_all_same_nidpid = all_same_nidpid;
return pages;
}
-#ifdef CONFIG_NUMA_BALANCING
-static inline void change_pmd_protnuma(struct mm_struct *mm, unsigned long addr,
- pmd_t *pmd)
-{
- spin_lock(&mm->page_table_lock);
- set_pmd_at(mm, addr & PMD_MASK, pmd, pmd_mknuma(*pmd));
- spin_unlock(&mm->page_table_lock);
-}
-#else
-static inline void change_pmd_protnuma(struct mm_struct *mm, unsigned long addr,
- pmd_t *pmd)
-{
- BUG();
-}
-#endif /* CONFIG_NUMA_BALANCING */
-
static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
pud_t *pud, unsigned long addr, unsigned long end,
pgprot_t newprot, int dirty_accountable, int prot_numa)
pmd_t *pmd;
unsigned long next;
unsigned long pages = 0;
- bool all_same_nidpid;
pmd = pmd_offset(pud, addr);
do {
if (pmd_none_or_clear_bad(pmd))
continue;
this_pages = change_pte_range(vma, pmd, addr, next, newprot,
- dirty_accountable, prot_numa, &all_same_nidpid);
+ dirty_accountable, prot_numa);
pages += this_pages;
-
- /*
- * If we are changing protections for NUMA hinting faults then
- * set pmd_numa if the examined pages were all on the same
- * node. This allows a regular PMD to be handled as one fault
- * and effectively batches the taking of the PTL
- */
- if (prot_numa && this_pages && all_same_nidpid)
- change_pmd_protnuma(vma->vm_mm, addr, pmd);
} while (pmd++, addr = next, addr != end);
return pages;