]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - mm/huge_memory.c
blk-mq: refactor blk_mq_sched_assign_ioc
[karo-tx-linux.git] / mm / huge_memory.c
index f3c4f9d22821f889104340332eee93c5e124df4d..a84909cf20d36b3d84f00d8529127f78f6b5981d 100644 (file)
@@ -715,7 +715,8 @@ int do_huge_pmd_anonymous_page(struct vm_fault *vmf)
 }
 
 static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
-               pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write)
+               pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write,
+               pgtable_t pgtable)
 {
        struct mm_struct *mm = vma->vm_mm;
        pmd_t entry;
@@ -729,6 +730,12 @@ static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
                entry = pmd_mkyoung(pmd_mkdirty(entry));
                entry = maybe_pmd_mkwrite(entry, vma);
        }
+
+       if (pgtable) {
+               pgtable_trans_huge_deposit(mm, pmd, pgtable);
+               atomic_long_inc(&mm->nr_ptes);
+       }
+
        set_pmd_at(mm, addr, pmd, entry);
        update_mmu_cache_pmd(vma, addr, pmd);
        spin_unlock(ptl);
@@ -738,6 +745,7 @@ int vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
                        pmd_t *pmd, pfn_t pfn, bool write)
 {
        pgprot_t pgprot = vma->vm_page_prot;
+       pgtable_t pgtable = NULL;
        /*
         * If we had pmd_special, we could avoid all these restrictions,
         * but we need to be consistent with PTEs and architectures that
@@ -752,9 +760,15 @@ int vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
        if (addr < vma->vm_start || addr >= vma->vm_end)
                return VM_FAULT_SIGBUS;
 
+       if (arch_needs_pgtable_deposit()) {
+               pgtable = pte_alloc_one(vma->vm_mm, addr);
+               if (!pgtable)
+                       return VM_FAULT_OOM;
+       }
+
        track_pfn_insert(vma, &pgprot, pfn);
 
-       insert_pfn_pmd(vma, addr, pmd, pfn, pgprot, write);
+       insert_pfn_pmd(vma, addr, pmd, pfn, pgprot, write, pgtable);
        return VM_FAULT_NOPAGE;
 }
 EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd);
@@ -1564,9 +1578,6 @@ bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
                ClearPageDirty(page);
        unlock_page(page);
 
-       if (PageActive(page))
-               deactivate_page(page);
-
        if (pmd_young(orig_pmd) || pmd_dirty(orig_pmd)) {
                pmdp_invalidate(vma, addr, pmd);
                orig_pmd = pmd_mkold(orig_pmd);
@@ -1575,6 +1586,8 @@ bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
                set_pmd_at(mm, addr, pmd, orig_pmd);
                tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
        }
+
+       mark_page_lazyfree(page);
        ret = true;
 out:
        spin_unlock(ptl);
@@ -1612,12 +1625,13 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
                        tlb->fullmm);
        tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
        if (vma_is_dax(vma)) {
+               if (arch_needs_pgtable_deposit())
+                       zap_deposited_table(tlb->mm, pmd);
                spin_unlock(ptl);
                if (is_huge_zero_pmd(orig_pmd))
                        tlb_remove_page_size(tlb, pmd_page(orig_pmd), HPAGE_PMD_SIZE);
        } else if (is_huge_zero_pmd(orig_pmd)) {
-               pte_free(tlb->mm, pgtable_trans_huge_withdraw(tlb->mm, pmd));
-               atomic_long_dec(&tlb->mm->nr_ptes);
+               zap_deposited_table(tlb->mm, pmd);
                spin_unlock(ptl);
                tlb_remove_page_size(tlb, pmd_page(orig_pmd), HPAGE_PMD_SIZE);
        } else {
@@ -1626,10 +1640,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
                VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
                VM_BUG_ON_PAGE(!PageHead(page), page);
                if (PageAnon(page)) {
-                       pgtable_t pgtable;
-                       pgtable = pgtable_trans_huge_withdraw(tlb->mm, pmd);
-                       pte_free(tlb->mm, pgtable);
-                       atomic_long_dec(&tlb->mm->nr_ptes);
+                       zap_deposited_table(tlb->mm, pmd);
                        add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
                } else {
                        if (arch_needs_pgtable_deposit())
@@ -2145,15 +2156,15 @@ static void freeze_page(struct page *page)
 {
        enum ttu_flags ttu_flags = TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS |
                TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD;
-       int ret;
+       bool unmap_success;
 
        VM_BUG_ON_PAGE(!PageHead(page), page);
 
        if (PageAnon(page))
                ttu_flags |= TTU_MIGRATION;
 
-       ret = try_to_unmap(page, ttu_flags);
-       VM_BUG_ON_PAGE(ret, page);
+       unmap_success = try_to_unmap(page, ttu_flags);
+       VM_BUG_ON_PAGE(!unmap_success, page);
 }
 
 static void unfreeze_page(struct page *page)
@@ -2399,7 +2410,6 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
 
        VM_BUG_ON_PAGE(is_huge_zero_page(page), page);
        VM_BUG_ON_PAGE(!PageLocked(page), page);
-       VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
        VM_BUG_ON_PAGE(!PageCompound(page), page);
 
        if (PageAnon(head)) {