]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - mm/khugepaged.c
cxgb4: hide unused warnings
[karo-tx-linux.git] / mm / khugepaged.c
index 09460955e81839d0ea48ba6f036ed46672986ea2..77ae3239c3de17bfbf7ba29b56a5cb270611cfd8 100644 (file)
@@ -875,13 +875,13 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm,
                                        unsigned long address, pmd_t *pmd,
                                        int referenced)
 {
-       pte_t pteval;
        int swapped_in = 0, ret = 0;
-       struct fault_env fe = {
+       struct vm_fault vmf = {
                .vma = vma,
                .address = address,
                .flags = FAULT_FLAG_ALLOW_RETRY,
                .pmd = pmd,
+               .pgoff = linear_page_index(vma, address),
        };
 
        /* we only decide to swapin, if there is enough young ptes */
@@ -889,19 +889,19 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm,
                trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
                return false;
        }
-       fe.pte = pte_offset_map(pmd, address);
-       for (; fe.address < address + HPAGE_PMD_NR*PAGE_SIZE;
-                       fe.pte++, fe.address += PAGE_SIZE) {
-               pteval = *fe.pte;
-               if (!is_swap_pte(pteval))
+       vmf.pte = pte_offset_map(pmd, address);
+       for (; vmf.address < address + HPAGE_PMD_NR*PAGE_SIZE;
+                       vmf.pte++, vmf.address += PAGE_SIZE) {
+               vmf.orig_pte = *vmf.pte;
+               if (!is_swap_pte(vmf.orig_pte))
                        continue;
                swapped_in++;
-               ret = do_swap_page(&fe, pteval);
+               ret = do_swap_page(&vmf);
 
                /* do_swap_page returns VM_FAULT_RETRY with released mmap_sem */
                if (ret & VM_FAULT_RETRY) {
                        down_read(&mm->mmap_sem);
-                       if (hugepage_vma_revalidate(mm, address, &fe.vma)) {
+                       if (hugepage_vma_revalidate(mm, address, &vmf.vma)) {
                                /* vma is no longer available, don't continue to swapin */
                                trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
                                return false;
@@ -915,10 +915,10 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm,
                        return false;
                }
                /* pte is unmapped now, we need to map it */
-               fe.pte = pte_offset_map(pmd, fe.address);
+               vmf.pte = pte_offset_map(pmd, vmf.address);
        }
-       fe.pte--;
-       pte_unmap(fe.pte);
+       vmf.pte--;
+       pte_unmap(vmf.pte);
        trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 1);
        return true;
 }
@@ -943,7 +943,7 @@ static void collapse_huge_page(struct mm_struct *mm,
        VM_BUG_ON(address & ~HPAGE_PMD_MASK);
 
        /* Only allocate from the target node */
-       gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_OTHER_NODE | __GFP_THISNODE;
+       gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
 
        /*
         * Before allocating the hugepage, release the mmap_sem read lock.
@@ -1242,7 +1242,6 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
        struct vm_area_struct *vma;
        unsigned long addr;
        pmd_t *pmd, _pmd;
-       bool deposited = false;
 
        i_mmap_lock_write(mapping);
        vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
@@ -1267,26 +1266,10 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
                        spinlock_t *ptl = pmd_lock(vma->vm_mm, pmd);
                        /* assume page table is clear */
                        _pmd = pmdp_collapse_flush(vma, addr, pmd);
-                       /*
-                        * now deposit the pgtable for arch that need it
-                        * otherwise free it.
-                        */
-                       if (arch_needs_pgtable_deposit()) {
-                               /*
-                                * The deposit should be visibile only after
-                                * collapse is seen by others.
-                                */
-                               smp_wmb();
-                               pgtable_trans_huge_deposit(vma->vm_mm, pmd,
-                                                          pmd_pgtable(_pmd));
-                               deposited = true;
-                       }
                        spin_unlock(ptl);
                        up_write(&vma->vm_mm->mmap_sem);
-                       if (!deposited) {
-                               atomic_long_dec(&vma->vm_mm->nr_ptes);
-                               pte_free(vma->vm_mm, pmd_pgtable(_pmd));
-                       }
+                       atomic_long_dec(&vma->vm_mm->nr_ptes);
+                       pte_free(vma->vm_mm, pmd_pgtable(_pmd));
                }
        }
        i_mmap_unlock_write(mapping);
@@ -1326,8 +1309,7 @@ static void collapse_shmem(struct mm_struct *mm,
        VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
 
        /* Only allocate from the target node */
-       gfp = alloc_hugepage_khugepaged_gfpmask() |
-               __GFP_OTHER_NODE | __GFP_THISNODE;
+       gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
 
        new_page = khugepaged_alloc_page(hpage, gfp, node);
        if (!new_page) {
@@ -1446,7 +1428,7 @@ static void collapse_shmem(struct mm_struct *mm,
                radix_tree_replace_slot(&mapping->page_tree, slot,
                                new_page + (index % HPAGE_PMD_NR));
 
-               slot = radix_tree_iter_next(&iter);
+               slot = radix_tree_iter_resume(slot, &iter);
                index++;
                continue;
 out_lru:
@@ -1546,7 +1528,6 @@ tree_unlocked:
                                /* Put holes back where they were */
                                radix_tree_delete(&mapping->page_tree,
                                                  iter.index);
-                               slot = radix_tree_iter_next(&iter);
                                continue;
                        }
 
@@ -1557,11 +1538,11 @@ tree_unlocked:
                        page_ref_unfreeze(page, 2);
                        radix_tree_replace_slot(&mapping->page_tree,
                                                slot, page);
+                       slot = radix_tree_iter_resume(slot, &iter);
                        spin_unlock_irq(&mapping->tree_lock);
                        putback_lru_page(page);
                        unlock_page(page);
                        spin_lock_irq(&mapping->tree_lock);
-                       slot = radix_tree_iter_next(&iter);
                }
                VM_BUG_ON(nr_none);
                spin_unlock_irq(&mapping->tree_lock);
@@ -1641,8 +1622,8 @@ static void khugepaged_scan_shmem(struct mm_struct *mm,
                present++;
 
                if (need_resched()) {
+                       slot = radix_tree_iter_resume(slot, &iter);
                        cond_resched_rcu();
-                       slot = radix_tree_iter_next(&iter);
                }
        }
        rcu_read_unlock();