]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - mm/huge_memory.c
Merge branch 'x86-nuke386-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[karo-tx-linux.git] / mm / huge_memory.c
index 40f17c34b4153fab93b4f1a2685dee0b8cac4da8..5f902e20e8c03eb70cd11d569e36a8697ed5d28a 100644 (file)
@@ -606,6 +606,15 @@ static inline pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
        return pmd;
 }
 
+static inline pmd_t mk_huge_pmd(struct page *page, struct vm_area_struct *vma)
+{
+       pmd_t entry;
+       entry = mk_pmd(page, vma->vm_page_prot);
+       entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
+       entry = pmd_mkhuge(entry);
+       return entry;
+}
+
 static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
                                        struct vm_area_struct *vma,
                                        unsigned long haddr, pmd_t *pmd,
@@ -629,9 +638,7 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
                pte_free(mm, pgtable);
        } else {
                pmd_t entry;
-               entry = mk_pmd(page, vma->vm_page_prot);
-               entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
-               entry = pmd_mkhuge(entry);
+               entry = mk_huge_pmd(page, vma);
                /*
                 * The spinlocking to take the lru_lock inside
                 * page_add_new_anon_rmap() acts as a full memory
@@ -777,6 +784,28 @@ out:
        return ret;
 }
 
+void huge_pmd_set_accessed(struct mm_struct *mm,
+                          struct vm_area_struct *vma,
+                          unsigned long address,
+                          pmd_t *pmd, pmd_t orig_pmd,
+                          int dirty)
+{
+       pmd_t entry;
+       unsigned long haddr;
+
+       spin_lock(&mm->page_table_lock);
+       if (unlikely(!pmd_same(*pmd, orig_pmd)))
+               goto unlock;
+
+       entry = pmd_mkyoung(orig_pmd);
+       haddr = address & HPAGE_PMD_MASK;
+       if (pmdp_set_access_flags(vma, haddr, pmd, entry, dirty))
+               update_mmu_cache_pmd(vma, address, pmd);
+
+unlock:
+       spin_unlock(&mm->page_table_lock);
+}
+
 static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
                                        struct vm_area_struct *vma,
                                        unsigned long address,
@@ -951,9 +980,7 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
        } else {
                pmd_t entry;
                VM_BUG_ON(!PageHead(page));
-               entry = mk_pmd(new_page, vma->vm_page_prot);
-               entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
-               entry = pmd_mkhuge(entry);
+               entry = mk_huge_pmd(new_page, vma);
                pmdp_clear_flush(vma, haddr, pmd);
                page_add_new_anon_rmap(new_page, vma, haddr);
                set_pmd_at(mm, haddr, pmd, entry);
@@ -1146,22 +1173,14 @@ pmd_t *page_check_address_pmd(struct page *page,
                              unsigned long address,
                              enum page_check_address_pmd_flag flag)
 {
-       pgd_t *pgd;
-       pud_t *pud;
        pmd_t *pmd, *ret = NULL;
 
        if (address & ~HPAGE_PMD_MASK)
                goto out;
 
-       pgd = pgd_offset(mm, address);
-       if (!pgd_present(*pgd))
-               goto out;
-
-       pud = pud_offset(pgd, address);
-       if (!pud_present(*pud))
+       pmd = mm_find_pmd(mm, address);
+       if (!pmd)
                goto out;
-
-       pmd = pmd_offset(pud, address);
        if (pmd_none(*pmd))
                goto out;
        if (pmd_page(*pmd) != page)
@@ -1701,64 +1720,49 @@ static void release_pte_pages(pte_t *pte, pte_t *_pte)
        }
 }
 
-static void release_all_pte_pages(pte_t *pte)
-{
-       release_pte_pages(pte, pte + HPAGE_PMD_NR);
-}
-
 static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
                                        unsigned long address,
                                        pte_t *pte)
 {
        struct page *page;
        pte_t *_pte;
-       int referenced = 0, isolated = 0, none = 0;
+       int referenced = 0, none = 0;
        for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
             _pte++, address += PAGE_SIZE) {
                pte_t pteval = *_pte;
                if (pte_none(pteval)) {
                        if (++none <= khugepaged_max_ptes_none)
                                continue;
-                       else {
-                               release_pte_pages(pte, _pte);
+                       else
                                goto out;
-                       }
                }
-               if (!pte_present(pteval) || !pte_write(pteval)) {
-                       release_pte_pages(pte, _pte);
+               if (!pte_present(pteval) || !pte_write(pteval))
                        goto out;
-               }
                page = vm_normal_page(vma, address, pteval);
-               if (unlikely(!page)) {
-                       release_pte_pages(pte, _pte);
+               if (unlikely(!page))
                        goto out;
-               }
+
                VM_BUG_ON(PageCompound(page));
                BUG_ON(!PageAnon(page));
                VM_BUG_ON(!PageSwapBacked(page));
 
                /* cannot use mapcount: can't collapse if there's a gup pin */
-               if (page_count(page) != 1) {
-                       release_pte_pages(pte, _pte);
+               if (page_count(page) != 1)
                        goto out;
-               }
                /*
                 * We can do it before isolate_lru_page because the
                 * page can't be freed from under us. NOTE: PG_lock
                 * is needed to serialize against split_huge_page
                 * when invoked from the VM.
                 */
-               if (!trylock_page(page)) {
-                       release_pte_pages(pte, _pte);
+               if (!trylock_page(page))
                        goto out;
-               }
                /*
                 * Isolate the page to avoid collapsing an hugepage
                 * currently in use by the VM.
                 */
                if (isolate_lru_page(page)) {
                        unlock_page(page);
-                       release_pte_pages(pte, _pte);
                        goto out;
                }
                /* 0 stands for page_is_file_cache(page) == false */
@@ -1771,12 +1775,11 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
                    mmu_notifier_test_young(vma->vm_mm, address))
                        referenced = 1;
        }
-       if (unlikely(!referenced))
-               release_all_pte_pages(pte);
-       else
-               isolated = 1;
+       if (likely(referenced))
+               return 1;
 out:
-       return isolated;
+       release_pte_pages(pte, _pte);
+       return 0;
 }
 
 static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
@@ -1918,14 +1921,26 @@ static struct page
 }
 #endif
 
+static bool hugepage_vma_check(struct vm_area_struct *vma)
+{
+       if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) ||
+           (vma->vm_flags & VM_NOHUGEPAGE))
+               return false;
+
+       if (!vma->anon_vma || vma->vm_ops)
+               return false;
+       if (is_vma_temporary_stack(vma))
+               return false;
+       VM_BUG_ON(vma->vm_flags & VM_NO_THP);
+       return true;
+}
+
 static void collapse_huge_page(struct mm_struct *mm,
                                   unsigned long address,
                                   struct page **hpage,
                                   struct vm_area_struct *vma,
                                   int node)
 {
-       pgd_t *pgd;
-       pud_t *pud;
        pmd_t *pmd, _pmd;
        pte_t *pte;
        pgtable_t pgtable;
@@ -1960,28 +1975,12 @@ static void collapse_huge_page(struct mm_struct *mm,
        hend = vma->vm_end & HPAGE_PMD_MASK;
        if (address < hstart || address + HPAGE_PMD_SIZE > hend)
                goto out;
-
-       if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) ||
-           (vma->vm_flags & VM_NOHUGEPAGE))
-               goto out;
-
-       if (!vma->anon_vma || vma->vm_ops)
-               goto out;
-       if (is_vma_temporary_stack(vma))
+       if (!hugepage_vma_check(vma))
                goto out;
-       VM_BUG_ON(vma->vm_flags & VM_NO_THP);
-
-       pgd = pgd_offset(mm, address);
-       if (!pgd_present(*pgd))
+       pmd = mm_find_pmd(mm, address);
+       if (!pmd)
                goto out;
-
-       pud = pud_offset(pgd, address);
-       if (!pud_present(*pud))
-               goto out;
-
-       pmd = pmd_offset(pud, address);
-       /* pmd can't go away or become huge under us */
-       if (!pmd_present(*pmd) || pmd_trans_huge(*pmd))
+       if (pmd_trans_huge(*pmd))
                goto out;
 
        anon_vma_lock(vma->anon_vma);
@@ -2028,9 +2027,7 @@ static void collapse_huge_page(struct mm_struct *mm,
        __SetPageUptodate(new_page);
        pgtable = pmd_pgtable(_pmd);
 
-       _pmd = mk_pmd(new_page, vma->vm_page_prot);
-       _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
-       _pmd = pmd_mkhuge(_pmd);
+       _pmd = mk_huge_pmd(new_page, vma);
 
        /*
         * spin_lock() below is not the equivalent of smp_wmb(), so
@@ -2064,8 +2061,6 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
                               unsigned long address,
                               struct page **hpage)
 {
-       pgd_t *pgd;
-       pud_t *pud;
        pmd_t *pmd;
        pte_t *pte, *_pte;
        int ret = 0, referenced = 0, none = 0;
@@ -2076,16 +2071,10 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
 
        VM_BUG_ON(address & ~HPAGE_PMD_MASK);
 
-       pgd = pgd_offset(mm, address);
-       if (!pgd_present(*pgd))
-               goto out;
-
-       pud = pud_offset(pgd, address);
-       if (!pud_present(*pud))
+       pmd = mm_find_pmd(mm, address);
+       if (!pmd)
                goto out;
-
-       pmd = pmd_offset(pud, address);
-       if (!pmd_present(*pmd) || pmd_trans_huge(*pmd))
+       if (pmd_trans_huge(*pmd))
                goto out;
 
        pte = pte_offset_map_lock(mm, pmd, address, &ptl);
@@ -2193,20 +2182,11 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
                        progress++;
                        break;
                }
-
-               if ((!(vma->vm_flags & VM_HUGEPAGE) &&
-                    !khugepaged_always()) ||
-                   (vma->vm_flags & VM_NOHUGEPAGE)) {
-               skip:
+               if (!hugepage_vma_check(vma)) {
+skip:
                        progress++;
                        continue;
                }
-               if (!vma->anon_vma || vma->vm_ops)
-                       goto skip;
-               if (is_vma_temporary_stack(vma))
-                       goto skip;
-               VM_BUG_ON(vma->vm_flags & VM_NO_THP);
-
                hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
                hend = vma->vm_end & HPAGE_PMD_MASK;
                if (hstart >= hend)
@@ -2379,22 +2359,12 @@ void __split_huge_page_pmd(struct mm_struct *mm, pmd_t *pmd)
 static void split_huge_page_address(struct mm_struct *mm,
                                    unsigned long address)
 {
-       pgd_t *pgd;
-       pud_t *pud;
        pmd_t *pmd;
 
        VM_BUG_ON(!(address & ~HPAGE_PMD_MASK));
 
-       pgd = pgd_offset(mm, address);
-       if (!pgd_present(*pgd))
-               return;
-
-       pud = pud_offset(pgd, address);
-       if (!pud_present(*pud))
-               return;
-
-       pmd = pmd_offset(pud, address);
-       if (!pmd_present(*pmd))
+       pmd = mm_find_pmd(mm, address);
+       if (!pmd)
                return;
        /*
         * Caller holds the mmap_sem write mode, so a huge pmd cannot