]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - mm/huge_memory.c
kmemleak: use rbtree instead of prio tree
[karo-tx-linux.git] / mm / huge_memory.c
index 1e21b4cf4c75c0bb9d2cf35329637779ff63a1c6..010d32944d14d64f7b6174bc1bed9a38c159dc9d 100644 (file)
@@ -102,10 +102,7 @@ static int set_recommended_min_free_kbytes(void)
        unsigned long recommended_min;
        extern int min_free_kbytes;
 
-       if (!test_bit(TRANSPARENT_HUGEPAGE_FLAG,
-                     &transparent_hugepage_flags) &&
-           !test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
-                     &transparent_hugepage_flags))
+       if (!khugepaged_enabled())
                return 0;
 
        for_each_populated_zone(zone)
@@ -228,13 +225,6 @@ static ssize_t enabled_store(struct kobject *kobj,
                        ret = err;
        }
 
-       if (ret > 0 &&
-           (test_bit(TRANSPARENT_HUGEPAGE_FLAG,
-                     &transparent_hugepage_flags) ||
-            test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
-                     &transparent_hugepage_flags)))
-               set_recommended_min_free_kbytes();
-
        return ret;
 }
 static struct kobj_attribute enabled_attr =
@@ -569,8 +559,6 @@ static int __init hugepage_init(void)
 
        start_khugepaged();
 
-       set_recommended_min_free_kbytes();
-
        return 0;
 out:
        hugepage_exit_sysfs(hugepage_kobj);
@@ -610,19 +598,6 @@ out:
 }
 __setup("transparent_hugepage=", setup_transparent_hugepage);
 
-static void prepare_pmd_huge_pte(pgtable_t pgtable,
-                                struct mm_struct *mm)
-{
-       assert_spin_locked(&mm->page_table_lock);
-
-       /* FIFO */
-       if (!mm->pmd_huge_pte)
-               INIT_LIST_HEAD(&pgtable->lru);
-       else
-               list_add(&pgtable->lru, &mm->pmd_huge_pte->lru);
-       mm->pmd_huge_pte = pgtable;
-}
-
 static inline pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
 {
        if (likely(vma->vm_flags & VM_WRITE))
@@ -664,7 +639,7 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
                 */
                page_add_new_anon_rmap(page, vma, haddr);
                set_pmd_at(mm, haddr, pmd, entry);
-               prepare_pmd_huge_pte(pgtable, mm);
+               pgtable_trans_huge_deposit(mm, pgtable);
                add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
                mm->nr_ptes++;
                spin_unlock(&mm->page_table_lock);
@@ -790,7 +765,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
        pmdp_set_wrprotect(src_mm, addr, src_pmd);
        pmd = pmd_mkold(pmd_wrprotect(pmd));
        set_pmd_at(dst_mm, addr, dst_pmd, pmd);
-       prepare_pmd_huge_pte(pgtable, dst_mm);
+       pgtable_trans_huge_deposit(dst_mm, pgtable);
        dst_mm->nr_ptes++;
 
        ret = 0;
@@ -801,25 +776,6 @@ out:
        return ret;
 }
 
-/* no "address" argument so destroys page coloring of some arch */
-pgtable_t get_pmd_huge_pte(struct mm_struct *mm)
-{
-       pgtable_t pgtable;
-
-       assert_spin_locked(&mm->page_table_lock);
-
-       /* FIFO */
-       pgtable = mm->pmd_huge_pte;
-       if (list_empty(&pgtable->lru))
-               mm->pmd_huge_pte = NULL;
-       else {
-               mm->pmd_huge_pte = list_entry(pgtable->lru.next,
-                                             struct page, lru);
-               list_del(&pgtable->lru);
-       }
-       return pgtable;
-}
-
 static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
                                        struct vm_area_struct *vma,
                                        unsigned long address,
@@ -875,7 +831,7 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
        pmdp_clear_flush_notify(vma, haddr, pmd);
        /* leave pmd empty until pte is filled */
 
-       pgtable = get_pmd_huge_pte(mm);
+       pgtable = pgtable_trans_huge_withdraw(mm);
        pmd_populate(mm, &_pmd, pgtable);
 
        for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
@@ -1040,7 +996,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
        if (__pmd_trans_huge_lock(pmd, vma) == 1) {
                struct page *page;
                pgtable_t pgtable;
-               pgtable = get_pmd_huge_pte(tlb->mm);
+               pgtable = pgtable_trans_huge_withdraw(tlb->mm);
                page = pmd_page(*pmd);
                pmd_clear(pmd);
                tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
@@ -1357,11 +1313,11 @@ static int __split_huge_page_map(struct page *page,
        pmd = page_check_address_pmd(page, mm, address,
                                     PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG);
        if (pmd) {
-               pgtable = get_pmd_huge_pte(mm);
+               pgtable = pgtable_trans_huge_withdraw(mm);
                pmd_populate(mm, &_pmd, pgtable);
 
-               for (i = 0, haddr = address; i < HPAGE_PMD_NR;
-                    i++, haddr += PAGE_SIZE) {
+               haddr = address;
+               for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
                        pte_t *pte, entry;
                        BUG_ON(PageCompound(page+i));
                        entry = mk_pte(page + i, vma->vm_page_prot);
@@ -1405,8 +1361,7 @@ static int __split_huge_page_map(struct page *page,
                 * SMP TLB and finally we write the non-huge version
                 * of the pmd entry with pmd_populate.
                 */
-               set_pmd_at(mm, address, pmd, pmd_mknotpresent(*pmd));
-               flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
+               pmdp_invalidate(vma, address, pmd);
                pmd_populate(mm, pmd, pgtable);
                ret = 1;
        }
@@ -1495,6 +1450,8 @@ out:
 int hugepage_madvise(struct vm_area_struct *vma,
                     unsigned long *vm_flags, int advice)
 {
+       struct mm_struct *mm = vma->vm_mm;
+
        switch (advice) {
        case MADV_HUGEPAGE:
                /*
@@ -1502,6 +1459,8 @@ int hugepage_madvise(struct vm_area_struct *vma,
                 */
                if (*vm_flags & (VM_HUGEPAGE | VM_NO_THP))
                        return -EINVAL;
+               if (mm->def_flags & VM_NOHUGEPAGE)
+                       return -EINVAL;
                *vm_flags &= ~VM_NOHUGEPAGE;
                *vm_flags |= VM_HUGEPAGE;
                /*
@@ -1827,28 +1786,34 @@ static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
        }
 }
 
-static void collapse_huge_page(struct mm_struct *mm,
-                              unsigned long address,
-                              struct page **hpage,
-                              struct vm_area_struct *vma,
-                              int node)
+static void khugepaged_alloc_sleep(void)
 {
-       pgd_t *pgd;
-       pud_t *pud;
-       pmd_t *pmd, _pmd;
-       pte_t *pte;
-       pgtable_t pgtable;
-       struct page *new_page;
-       spinlock_t *ptl;
-       int isolated;
-       unsigned long hstart, hend;
+       wait_event_freezable_timeout(khugepaged_wait, false,
+                       msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
+}
 
-       VM_BUG_ON(address & ~HPAGE_PMD_MASK);
-#ifndef CONFIG_NUMA
-       up_read(&mm->mmap_sem);
-       VM_BUG_ON(!*hpage);
-       new_page = *hpage;
-#else
+#ifdef CONFIG_NUMA
+static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
+{
+       if (IS_ERR(*hpage)) {
+               if (!*wait)
+                       return false;
+
+               *wait = false;
+               khugepaged_alloc_sleep();
+       } else if (*hpage) {
+               put_page(*hpage);
+               *hpage = NULL;
+       }
+
+       return true;
+}
+
+static struct page
+*khugepaged_alloc_page(struct page **hpage, struct mm_struct *mm,
+                      struct vm_area_struct *vma, unsigned long address,
+                      int node)
+{
        VM_BUG_ON(*hpage);
        /*
         * Allocate the page while the vma is still valid and under
@@ -1860,7 +1825,7 @@ static void collapse_huge_page(struct mm_struct *mm,
         * mmap_sem in read mode is good idea also to allow greater
         * scalability.
         */
-       new_page = alloc_hugepage_vma(khugepaged_defrag(), vma, address,
+       *hpage  = alloc_hugepage_vma(khugepaged_defrag(), vma, address,
                                      node, __GFP_OTHER_NODE);
 
        /*
@@ -1868,20 +1833,83 @@ static void collapse_huge_page(struct mm_struct *mm,
         * preparation for taking it in write mode.
         */
        up_read(&mm->mmap_sem);
-       if (unlikely(!new_page)) {
+       if (unlikely(!*hpage)) {
                count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
                *hpage = ERR_PTR(-ENOMEM);
-               return;
+               return NULL;
        }
+
        count_vm_event(THP_COLLAPSE_ALLOC);
-#endif
+       return *hpage;
+}
+#else
+static struct page *khugepaged_alloc_hugepage(bool *wait)
+{
+       struct page *hpage;
 
-       if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
-#ifdef CONFIG_NUMA
-               put_page(new_page);
+       do {
+               hpage = alloc_hugepage(khugepaged_defrag());
+               if (!hpage) {
+                       count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
+                       if (!*wait)
+                               return NULL;
+
+                       *wait = false;
+                       khugepaged_alloc_sleep();
+               } else
+                       count_vm_event(THP_COLLAPSE_ALLOC);
+       } while (unlikely(!hpage) && likely(khugepaged_enabled()));
+
+       return hpage;
+}
+
+static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
+{
+       if (!*hpage)
+               *hpage = khugepaged_alloc_hugepage(wait);
+
+       if (unlikely(!*hpage))
+               return false;
+
+       return true;
+}
+
+static struct page
+*khugepaged_alloc_page(struct page **hpage, struct mm_struct *mm,
+                      struct vm_area_struct *vma, unsigned long address,
+                      int node)
+{
+       up_read(&mm->mmap_sem);
+       VM_BUG_ON(!*hpage);
+       return  *hpage;
+}
 #endif
+
+static void collapse_huge_page(struct mm_struct *mm,
+                                  unsigned long address,
+                                  struct page **hpage,
+                                  struct vm_area_struct *vma,
+                                  int node)
+{
+       pgd_t *pgd;
+       pud_t *pud;
+       pmd_t *pmd, _pmd;
+       pte_t *pte;
+       pgtable_t pgtable;
+       struct page *new_page;
+       spinlock_t *ptl;
+       int isolated;
+       unsigned long hstart, hend;
+
+       VM_BUG_ON(address & ~HPAGE_PMD_MASK);
+
+       /* release the mmap_sem read lock. */
+       new_page = khugepaged_alloc_page(hpage, mm, vma, address, node);
+       if (!new_page)
+               return;
+
+       if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL)))
                return;
-       }
 
        /*
         * Prevent all access to pagetables with the exception of
@@ -1960,8 +1988,6 @@ static void collapse_huge_page(struct mm_struct *mm,
        pte_unmap(pte);
        __SetPageUptodate(new_page);
        pgtable = pmd_pgtable(_pmd);
-       VM_BUG_ON(page_count(pgtable) != 1);
-       VM_BUG_ON(page_mapcount(pgtable) != 0);
 
        _pmd = mk_pmd(new_page, vma->vm_page_prot);
        _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
@@ -1979,12 +2005,11 @@ static void collapse_huge_page(struct mm_struct *mm,
        page_add_new_anon_rmap(new_page, vma, address);
        set_pmd_at(mm, address, pmd, _pmd);
        update_mmu_cache(vma, address, _pmd);
-       prepare_pmd_huge_pte(pgtable, mm);
+       pgtable_trans_huge_deposit(mm, pgtable);
        spin_unlock(&mm->page_table_lock);
 
-#ifndef CONFIG_NUMA
        *hpage = NULL;
-#endif
+
        khugepaged_pages_collapsed++;
 out_up_write:
        up_write(&mm->mmap_sem);
@@ -1992,9 +2017,6 @@ out_up_write:
 
 out:
        mem_cgroup_uncharge_page(new_page);
-#ifdef CONFIG_NUMA
-       put_page(new_page);
-#endif
        goto out_up_write;
 }
 
@@ -2222,29 +2244,20 @@ static int khugepaged_wait_event(void)
                kthread_should_stop();
 }
 
-static void khugepaged_do_scan(struct page **hpage)
+static void khugepaged_do_scan(void)
 {
+       struct page *hpage = NULL;
        unsigned int progress = 0, pass_through_head = 0;
        unsigned int pages = khugepaged_pages_to_scan;
+       bool wait = true;
 
        barrier(); /* write khugepaged_pages_to_scan to local stack */
 
        while (progress < pages) {
-               cond_resched();
-
-#ifndef CONFIG_NUMA
-               if (!*hpage) {
-                       *hpage = alloc_hugepage(khugepaged_defrag());
-                       if (unlikely(!*hpage)) {
-                               count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
-                               break;
-                       }
-                       count_vm_event(THP_COLLAPSE_ALLOC);
-               }
-#else
-               if (IS_ERR(*hpage))
+               if (!khugepaged_prealloc_page(&hpage, &wait))
                        break;
-#endif
+
+               cond_resched();
 
                if (unlikely(kthread_should_stop() || freezing(current)))
                        break;
@@ -2255,36 +2268,15 @@ static void khugepaged_do_scan(struct page **hpage)
                if (khugepaged_has_work() &&
                    pass_through_head < 2)
                        progress += khugepaged_scan_mm_slot(pages - progress,
-                                                           hpage);
+                                                           &hpage);
                else
                        progress = pages;
                spin_unlock(&khugepaged_mm_lock);
        }
-}
-
-static void khugepaged_alloc_sleep(void)
-{
-       wait_event_freezable_timeout(khugepaged_wait, false,
-                       msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
-}
 
-#ifndef CONFIG_NUMA
-static struct page *khugepaged_alloc_hugepage(void)
-{
-       struct page *hpage;
-
-       do {
-               hpage = alloc_hugepage(khugepaged_defrag());
-               if (!hpage) {
-                       count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
-                       khugepaged_alloc_sleep();
-               } else
-                       count_vm_event(THP_COLLAPSE_ALLOC);
-       } while (unlikely(!hpage) &&
-                likely(khugepaged_enabled()));
-       return hpage;
+       if (!IS_ERR_OR_NULL(hpage))
+               put_page(hpage);
 }
-#endif
 
 static void khugepaged_wait_work(void)
 {
@@ -2304,31 +2296,6 @@ static void khugepaged_wait_work(void)
                wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
 }
 
-static void khugepaged_loop(void)
-{
-       struct page *hpage = NULL;
-
-       while (likely(khugepaged_enabled())) {
-#ifndef CONFIG_NUMA
-               hpage = khugepaged_alloc_hugepage();
-               if (unlikely(!hpage))
-                       break;
-#else
-               if (IS_ERR(hpage)) {
-                       khugepaged_alloc_sleep();
-                       hpage = NULL;
-               }
-#endif
-
-               khugepaged_do_scan(&hpage);
-
-               if (!IS_ERR_OR_NULL(hpage))
-                       put_page(hpage);
-
-               khugepaged_wait_work();
-       }
-}
-
 static int khugepaged(void *none)
 {
        struct mm_slot *mm_slot;
@@ -2336,8 +2303,10 @@ static int khugepaged(void *none)
        set_freezable();
        set_user_nice(current, 19);
 
-       while (!kthread_should_stop())
-               khugepaged_loop();
+       while (!kthread_should_stop()) {
+               khugepaged_do_scan();
+               khugepaged_wait_work();
+       }
 
        spin_lock(&khugepaged_mm_lock);
        mm_slot = khugepaged_scan.mm_slot;