1 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
4 #include <linux/sched.h>
5 #include <linux/sched/mm.h>
6 #include <linux/sched/coredump.h>
7 #include <linux/mmu_notifier.h>
8 #include <linux/rmap.h>
9 #include <linux/swap.h>
10 #include <linux/mm_inline.h>
11 #include <linux/kthread.h>
12 #include <linux/khugepaged.h>
13 #include <linux/freezer.h>
14 #include <linux/mman.h>
15 #include <linux/hashtable.h>
16 #include <linux/userfaultfd_k.h>
17 #include <linux/page_idle.h>
18 #include <linux/swapops.h>
19 #include <linux/shmem_fs.h>
22 #include <asm/pgalloc.h>
32 SCAN_LACK_REFERENCED_PAGE,
46 SCAN_ALLOC_HUGE_PAGE_FAIL,
47 SCAN_CGROUP_CHARGE_FAIL,
52 #define CREATE_TRACE_POINTS
53 #include <trace/events/huge_memory.h>
55 /* default scan 8*512 pte (or vmas) every 30 second */
56 static unsigned int khugepaged_pages_to_scan __read_mostly;
57 static unsigned int khugepaged_pages_collapsed;
58 static unsigned int khugepaged_full_scans;
59 static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
60 /* during fragmentation poll the hugepage allocator once every minute */
61 static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
62 static unsigned long khugepaged_sleep_expire;
63 static DEFINE_SPINLOCK(khugepaged_mm_lock);
64 static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
66 * default collapse hugepages if there is at least one pte mapped like
67 * it would have happened if the vma was large enough during page
70 static unsigned int khugepaged_max_ptes_none __read_mostly;
71 static unsigned int khugepaged_max_ptes_swap __read_mostly;
73 #define MM_SLOTS_HASH_BITS 10
74 static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
76 static struct kmem_cache *mm_slot_cache __read_mostly;
79 * struct mm_slot - hash lookup from mm to mm_slot
80 * @hash: hash collision list
81 * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
82 * @mm: the mm that this information is valid for
85 struct hlist_node hash;
86 struct list_head mm_node;
91 * struct khugepaged_scan - cursor for scanning
92 * @mm_head: the head of the mm list to scan
93 * @mm_slot: the current mm_slot we are scanning
94 * @address: the next address inside that to be scanned
96 * There is only the one khugepaged_scan instance of this cursor structure.
98 struct khugepaged_scan {
99 struct list_head mm_head;
100 struct mm_slot *mm_slot;
101 unsigned long address;
104 static struct khugepaged_scan khugepaged_scan = {
105 .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
109 static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
110 struct kobj_attribute *attr,
113 return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs);
116 static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
117 struct kobj_attribute *attr,
118 const char *buf, size_t count)
123 err = kstrtoul(buf, 10, &msecs);
124 if (err || msecs > UINT_MAX)
127 khugepaged_scan_sleep_millisecs = msecs;
128 khugepaged_sleep_expire = 0;
129 wake_up_interruptible(&khugepaged_wait);
133 static struct kobj_attribute scan_sleep_millisecs_attr =
134 __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show,
135 scan_sleep_millisecs_store);
137 static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
138 struct kobj_attribute *attr,
141 return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
144 static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
145 struct kobj_attribute *attr,
146 const char *buf, size_t count)
151 err = kstrtoul(buf, 10, &msecs);
152 if (err || msecs > UINT_MAX)
155 khugepaged_alloc_sleep_millisecs = msecs;
156 khugepaged_sleep_expire = 0;
157 wake_up_interruptible(&khugepaged_wait);
161 static struct kobj_attribute alloc_sleep_millisecs_attr =
162 __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show,
163 alloc_sleep_millisecs_store);
165 static ssize_t pages_to_scan_show(struct kobject *kobj,
166 struct kobj_attribute *attr,
169 return sprintf(buf, "%u\n", khugepaged_pages_to_scan);
171 static ssize_t pages_to_scan_store(struct kobject *kobj,
172 struct kobj_attribute *attr,
173 const char *buf, size_t count)
178 err = kstrtoul(buf, 10, &pages);
179 if (err || !pages || pages > UINT_MAX)
182 khugepaged_pages_to_scan = pages;
186 static struct kobj_attribute pages_to_scan_attr =
187 __ATTR(pages_to_scan, 0644, pages_to_scan_show,
188 pages_to_scan_store);
190 static ssize_t pages_collapsed_show(struct kobject *kobj,
191 struct kobj_attribute *attr,
194 return sprintf(buf, "%u\n", khugepaged_pages_collapsed);
196 static struct kobj_attribute pages_collapsed_attr =
197 __ATTR_RO(pages_collapsed);
199 static ssize_t full_scans_show(struct kobject *kobj,
200 struct kobj_attribute *attr,
203 return sprintf(buf, "%u\n", khugepaged_full_scans);
205 static struct kobj_attribute full_scans_attr =
206 __ATTR_RO(full_scans);
208 static ssize_t khugepaged_defrag_show(struct kobject *kobj,
209 struct kobj_attribute *attr, char *buf)
211 return single_hugepage_flag_show(kobj, attr, buf,
212 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
214 static ssize_t khugepaged_defrag_store(struct kobject *kobj,
215 struct kobj_attribute *attr,
216 const char *buf, size_t count)
218 return single_hugepage_flag_store(kobj, attr, buf, count,
219 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
221 static struct kobj_attribute khugepaged_defrag_attr =
222 __ATTR(defrag, 0644, khugepaged_defrag_show,
223 khugepaged_defrag_store);
226 * max_ptes_none controls if khugepaged should collapse hugepages over
227 * any unmapped ptes in turn potentially increasing the memory
228 * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
229 * reduce the available free memory in the system as it
230 * runs. Increasing max_ptes_none will instead potentially reduce the
231 * free memory in the system during the khugepaged scan.
233 static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj,
234 struct kobj_attribute *attr,
237 return sprintf(buf, "%u\n", khugepaged_max_ptes_none);
239 static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj,
240 struct kobj_attribute *attr,
241 const char *buf, size_t count)
244 unsigned long max_ptes_none;
246 err = kstrtoul(buf, 10, &max_ptes_none);
247 if (err || max_ptes_none > HPAGE_PMD_NR-1)
250 khugepaged_max_ptes_none = max_ptes_none;
254 static struct kobj_attribute khugepaged_max_ptes_none_attr =
255 __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show,
256 khugepaged_max_ptes_none_store);
258 static ssize_t khugepaged_max_ptes_swap_show(struct kobject *kobj,
259 struct kobj_attribute *attr,
262 return sprintf(buf, "%u\n", khugepaged_max_ptes_swap);
265 static ssize_t khugepaged_max_ptes_swap_store(struct kobject *kobj,
266 struct kobj_attribute *attr,
267 const char *buf, size_t count)
270 unsigned long max_ptes_swap;
272 err = kstrtoul(buf, 10, &max_ptes_swap);
273 if (err || max_ptes_swap > HPAGE_PMD_NR-1)
276 khugepaged_max_ptes_swap = max_ptes_swap;
281 static struct kobj_attribute khugepaged_max_ptes_swap_attr =
282 __ATTR(max_ptes_swap, 0644, khugepaged_max_ptes_swap_show,
283 khugepaged_max_ptes_swap_store);
285 static struct attribute *khugepaged_attr[] = {
286 &khugepaged_defrag_attr.attr,
287 &khugepaged_max_ptes_none_attr.attr,
288 &pages_to_scan_attr.attr,
289 &pages_collapsed_attr.attr,
290 &full_scans_attr.attr,
291 &scan_sleep_millisecs_attr.attr,
292 &alloc_sleep_millisecs_attr.attr,
293 &khugepaged_max_ptes_swap_attr.attr,
297 struct attribute_group khugepaged_attr_group = {
298 .attrs = khugepaged_attr,
299 .name = "khugepaged",
301 #endif /* CONFIG_SYSFS */
303 #define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB)
305 int hugepage_madvise(struct vm_area_struct *vma,
306 unsigned long *vm_flags, int advice)
312 * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
313 * can't handle this properly after s390_enable_sie, so we simply
314 * ignore the madvise to prevent qemu from causing a SIGSEGV.
316 if (mm_has_pgste(vma->vm_mm))
319 *vm_flags &= ~VM_NOHUGEPAGE;
320 *vm_flags |= VM_HUGEPAGE;
322 * If the vma become good for khugepaged to scan,
323 * register it here without waiting a page fault that
324 * may not happen any time soon.
326 if (!(*vm_flags & VM_NO_KHUGEPAGED) &&
327 khugepaged_enter_vma_merge(vma, *vm_flags))
330 case MADV_NOHUGEPAGE:
331 *vm_flags &= ~VM_HUGEPAGE;
332 *vm_flags |= VM_NOHUGEPAGE;
334 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
335 * this vma even if we leave the mm registered in khugepaged if
336 * it got registered before VM_NOHUGEPAGE was set.
344 int __init khugepaged_init(void)
346 mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
347 sizeof(struct mm_slot),
348 __alignof__(struct mm_slot), 0, NULL);
352 khugepaged_pages_to_scan = HPAGE_PMD_NR * 8;
353 khugepaged_max_ptes_none = HPAGE_PMD_NR - 1;
354 khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8;
359 void __init khugepaged_destroy(void)
361 kmem_cache_destroy(mm_slot_cache);
364 static inline struct mm_slot *alloc_mm_slot(void)
366 if (!mm_slot_cache) /* initialization failed */
368 return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
371 static inline void free_mm_slot(struct mm_slot *mm_slot)
373 kmem_cache_free(mm_slot_cache, mm_slot);
376 static struct mm_slot *get_mm_slot(struct mm_struct *mm)
378 struct mm_slot *mm_slot;
380 hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm)
381 if (mm == mm_slot->mm)
387 static void insert_to_mm_slots_hash(struct mm_struct *mm,
388 struct mm_slot *mm_slot)
391 hash_add(mm_slots_hash, &mm_slot->hash, (long)mm);
394 static inline int khugepaged_test_exit(struct mm_struct *mm)
396 return atomic_read(&mm->mm_users) == 0;
399 int __khugepaged_enter(struct mm_struct *mm)
401 struct mm_slot *mm_slot;
404 mm_slot = alloc_mm_slot();
408 /* __khugepaged_exit() must not run from under us */
409 VM_BUG_ON_MM(khugepaged_test_exit(mm), mm);
410 if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
411 free_mm_slot(mm_slot);
415 spin_lock(&khugepaged_mm_lock);
416 insert_to_mm_slots_hash(mm, mm_slot);
418 * Insert just behind the scanning cursor, to let the area settle
421 wakeup = list_empty(&khugepaged_scan.mm_head);
422 list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
423 spin_unlock(&khugepaged_mm_lock);
427 wake_up_interruptible(&khugepaged_wait);
432 int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
433 unsigned long vm_flags)
435 unsigned long hstart, hend;
438 * Not yet faulted in so we will register later in the
439 * page fault if needed.
442 if (vma->vm_ops || (vm_flags & VM_NO_KHUGEPAGED))
443 /* khugepaged not yet working on file or special mappings */
445 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
446 hend = vma->vm_end & HPAGE_PMD_MASK;
448 return khugepaged_enter(vma, vm_flags);
452 void __khugepaged_exit(struct mm_struct *mm)
454 struct mm_slot *mm_slot;
457 spin_lock(&khugepaged_mm_lock);
458 mm_slot = get_mm_slot(mm);
459 if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
460 hash_del(&mm_slot->hash);
461 list_del(&mm_slot->mm_node);
464 spin_unlock(&khugepaged_mm_lock);
467 clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
468 free_mm_slot(mm_slot);
470 } else if (mm_slot) {
472 * This is required to serialize against
473 * khugepaged_test_exit() (which is guaranteed to run
474 * under mmap sem read mode). Stop here (after we
475 * return all pagetables will be destroyed) until
476 * khugepaged has finished working on the pagetables
477 * under the mmap_sem.
479 down_write(&mm->mmap_sem);
480 up_write(&mm->mmap_sem);
484 static void release_pte_page(struct page *page)
486 dec_node_page_state(page, NR_ISOLATED_ANON + page_is_file_cache(page));
488 putback_lru_page(page);
491 static void release_pte_pages(pte_t *pte, pte_t *_pte)
493 while (--_pte >= pte) {
494 pte_t pteval = *_pte;
495 if (!pte_none(pteval) && !is_zero_pfn(pte_pfn(pteval)))
496 release_pte_page(pte_page(pteval));
500 static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
501 unsigned long address,
504 struct page *page = NULL;
506 int none_or_zero = 0, result = 0, referenced = 0;
507 bool writable = false;
509 for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
510 _pte++, address += PAGE_SIZE) {
511 pte_t pteval = *_pte;
512 if (pte_none(pteval) || (pte_present(pteval) &&
513 is_zero_pfn(pte_pfn(pteval)))) {
514 if (!userfaultfd_armed(vma) &&
515 ++none_or_zero <= khugepaged_max_ptes_none) {
518 result = SCAN_EXCEED_NONE_PTE;
522 if (!pte_present(pteval)) {
523 result = SCAN_PTE_NON_PRESENT;
526 page = vm_normal_page(vma, address, pteval);
527 if (unlikely(!page)) {
528 result = SCAN_PAGE_NULL;
532 VM_BUG_ON_PAGE(PageCompound(page), page);
533 VM_BUG_ON_PAGE(!PageAnon(page), page);
536 * We can do it before isolate_lru_page because the
537 * page can't be freed from under us. NOTE: PG_lock
538 * is needed to serialize against split_huge_page
539 * when invoked from the VM.
541 if (!trylock_page(page)) {
542 result = SCAN_PAGE_LOCK;
547 * cannot use mapcount: can't collapse if there's a gup pin.
548 * The page must only be referenced by the scanned process
549 * and page swap cache.
551 if (page_count(page) != 1 + PageSwapCache(page)) {
553 result = SCAN_PAGE_COUNT;
556 if (pte_write(pteval)) {
559 if (PageSwapCache(page) &&
560 !reuse_swap_page(page, NULL)) {
562 result = SCAN_SWAP_CACHE_PAGE;
566 * Page is not in the swap cache. It can be collapsed
572 * Isolate the page to avoid collapsing an hugepage
573 * currently in use by the VM.
575 if (isolate_lru_page(page)) {
577 result = SCAN_DEL_PAGE_LRU;
580 inc_node_page_state(page,
581 NR_ISOLATED_ANON + page_is_file_cache(page));
582 VM_BUG_ON_PAGE(!PageLocked(page), page);
583 VM_BUG_ON_PAGE(PageLRU(page), page);
585 /* There should be enough young pte to collapse the page */
586 if (pte_young(pteval) ||
587 page_is_young(page) || PageReferenced(page) ||
588 mmu_notifier_test_young(vma->vm_mm, address))
591 if (likely(writable)) {
592 if (likely(referenced)) {
593 result = SCAN_SUCCEED;
594 trace_mm_collapse_huge_page_isolate(page, none_or_zero,
595 referenced, writable, result);
599 result = SCAN_PAGE_RO;
603 release_pte_pages(pte, _pte);
604 trace_mm_collapse_huge_page_isolate(page, none_or_zero,
605 referenced, writable, result);
609 static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
610 struct vm_area_struct *vma,
611 unsigned long address,
615 for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
616 _pte++, page++, address += PAGE_SIZE) {
617 pte_t pteval = *_pte;
618 struct page *src_page;
620 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
621 clear_user_highpage(page, address);
622 add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
623 if (is_zero_pfn(pte_pfn(pteval))) {
625 * ptl mostly unnecessary.
629 * paravirt calls inside pte_clear here are
632 pte_clear(vma->vm_mm, address, _pte);
636 src_page = pte_page(pteval);
637 copy_user_highpage(page, src_page, address, vma);
638 VM_BUG_ON_PAGE(page_mapcount(src_page) != 1, src_page);
639 release_pte_page(src_page);
641 * ptl mostly unnecessary, but preempt has to
642 * be disabled to update the per-cpu stats
643 * inside page_remove_rmap().
647 * paravirt calls inside pte_clear here are
650 pte_clear(vma->vm_mm, address, _pte);
651 page_remove_rmap(src_page, false);
653 free_page_and_swap_cache(src_page);
658 static void khugepaged_alloc_sleep(void)
662 add_wait_queue(&khugepaged_wait, &wait);
663 freezable_schedule_timeout_interruptible(
664 msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
665 remove_wait_queue(&khugepaged_wait, &wait);
668 static int khugepaged_node_load[MAX_NUMNODES];
670 static bool khugepaged_scan_abort(int nid)
675 * If node_reclaim_mode is disabled, then no extra effort is made to
676 * allocate memory locally.
678 if (!node_reclaim_mode)
681 /* If there is a count for this node already, it must be acceptable */
682 if (khugepaged_node_load[nid])
685 for (i = 0; i < MAX_NUMNODES; i++) {
686 if (!khugepaged_node_load[i])
688 if (node_distance(nid, i) > RECLAIM_DISTANCE)
694 /* Defrag for khugepaged will enter direct reclaim/compaction if necessary */
695 static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void)
697 return khugepaged_defrag() ? GFP_TRANSHUGE : GFP_TRANSHUGE_LIGHT;
701 static int khugepaged_find_target_node(void)
703 static int last_khugepaged_target_node = NUMA_NO_NODE;
704 int nid, target_node = 0, max_value = 0;
706 /* find first node with max normal pages hit */
707 for (nid = 0; nid < MAX_NUMNODES; nid++)
708 if (khugepaged_node_load[nid] > max_value) {
709 max_value = khugepaged_node_load[nid];
713 /* do some balance if several nodes have the same hit record */
714 if (target_node <= last_khugepaged_target_node)
715 for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES;
717 if (max_value == khugepaged_node_load[nid]) {
722 last_khugepaged_target_node = target_node;
726 static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
728 if (IS_ERR(*hpage)) {
734 khugepaged_alloc_sleep();
744 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
746 VM_BUG_ON_PAGE(*hpage, *hpage);
748 *hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER);
749 if (unlikely(!*hpage)) {
750 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
751 *hpage = ERR_PTR(-ENOMEM);
755 prep_transhuge_page(*hpage);
756 count_vm_event(THP_COLLAPSE_ALLOC);
760 static int khugepaged_find_target_node(void)
765 static inline struct page *alloc_khugepaged_hugepage(void)
769 page = alloc_pages(alloc_hugepage_khugepaged_gfpmask(),
772 prep_transhuge_page(page);
776 static struct page *khugepaged_alloc_hugepage(bool *wait)
781 hpage = alloc_khugepaged_hugepage();
783 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
788 khugepaged_alloc_sleep();
790 count_vm_event(THP_COLLAPSE_ALLOC);
791 } while (unlikely(!hpage) && likely(khugepaged_enabled()));
796 static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
799 *hpage = khugepaged_alloc_hugepage(wait);
801 if (unlikely(!*hpage))
808 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
816 static bool hugepage_vma_check(struct vm_area_struct *vma)
818 if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) ||
819 (vma->vm_flags & VM_NOHUGEPAGE))
821 if (shmem_file(vma->vm_file)) {
822 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE))
824 return IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
827 if (!vma->anon_vma || vma->vm_ops)
829 if (is_vma_temporary_stack(vma))
831 return !(vma->vm_flags & VM_NO_KHUGEPAGED);
835 * If mmap_sem temporarily dropped, revalidate vma
836 * before taking mmap_sem.
837 * Return 0 if succeeds, otherwise return none-zero
841 static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
842 struct vm_area_struct **vmap)
844 struct vm_area_struct *vma;
845 unsigned long hstart, hend;
847 if (unlikely(khugepaged_test_exit(mm)))
848 return SCAN_ANY_PROCESS;
850 *vmap = vma = find_vma(mm, address);
852 return SCAN_VMA_NULL;
854 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
855 hend = vma->vm_end & HPAGE_PMD_MASK;
856 if (address < hstart || address + HPAGE_PMD_SIZE > hend)
857 return SCAN_ADDRESS_RANGE;
858 if (!hugepage_vma_check(vma))
859 return SCAN_VMA_CHECK;
864 * Bring missing pages in from swap, to complete THP collapse.
865 * Only done if khugepaged_scan_pmd believes it is worthwhile.
867 * Called and returns without pte mapped or spinlocks held,
868 * but with mmap_sem held to protect against vma changes.
871 static bool __collapse_huge_page_swapin(struct mm_struct *mm,
872 struct vm_area_struct *vma,
873 unsigned long address, pmd_t *pmd,
876 int swapped_in = 0, ret = 0;
877 struct vm_fault vmf = {
880 .flags = FAULT_FLAG_ALLOW_RETRY,
882 .pgoff = linear_page_index(vma, address),
885 /* we only decide to swapin, if there is enough young ptes */
886 if (referenced < HPAGE_PMD_NR/2) {
887 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
890 vmf.pte = pte_offset_map(pmd, address);
891 for (; vmf.address < address + HPAGE_PMD_NR*PAGE_SIZE;
892 vmf.pte++, vmf.address += PAGE_SIZE) {
893 vmf.orig_pte = *vmf.pte;
894 if (!is_swap_pte(vmf.orig_pte))
897 ret = do_swap_page(&vmf);
899 /* do_swap_page returns VM_FAULT_RETRY with released mmap_sem */
900 if (ret & VM_FAULT_RETRY) {
901 down_read(&mm->mmap_sem);
902 if (hugepage_vma_revalidate(mm, address, &vmf.vma)) {
903 /* vma is no longer available, don't continue to swapin */
904 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
907 /* check if the pmd is still valid */
908 if (mm_find_pmd(mm, address) != pmd) {
909 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
913 if (ret & VM_FAULT_ERROR) {
914 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
917 /* pte is unmapped now, we need to map it */
918 vmf.pte = pte_offset_map(pmd, vmf.address);
922 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 1);
926 static void collapse_huge_page(struct mm_struct *mm,
927 unsigned long address,
929 int node, int referenced)
934 struct page *new_page;
935 spinlock_t *pmd_ptl, *pte_ptl;
936 int isolated = 0, result = 0;
937 struct mem_cgroup *memcg;
938 struct vm_area_struct *vma;
939 unsigned long mmun_start; /* For mmu_notifiers */
940 unsigned long mmun_end; /* For mmu_notifiers */
943 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
945 /* Only allocate from the target node */
946 gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
949 * Before allocating the hugepage, release the mmap_sem read lock.
950 * The allocation can take potentially a long time if it involves
951 * sync compaction, and we do not need to hold the mmap_sem during
952 * that. We will recheck the vma after taking it again in write mode.
954 up_read(&mm->mmap_sem);
955 new_page = khugepaged_alloc_page(hpage, gfp, node);
957 result = SCAN_ALLOC_HUGE_PAGE_FAIL;
961 if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) {
962 result = SCAN_CGROUP_CHARGE_FAIL;
966 down_read(&mm->mmap_sem);
967 result = hugepage_vma_revalidate(mm, address, &vma);
969 mem_cgroup_cancel_charge(new_page, memcg, true);
970 up_read(&mm->mmap_sem);
974 pmd = mm_find_pmd(mm, address);
976 result = SCAN_PMD_NULL;
977 mem_cgroup_cancel_charge(new_page, memcg, true);
978 up_read(&mm->mmap_sem);
983 * __collapse_huge_page_swapin always returns with mmap_sem locked.
984 * If it fails, we release mmap_sem and jump out_nolock.
985 * Continuing to collapse causes inconsistency.
987 if (!__collapse_huge_page_swapin(mm, vma, address, pmd, referenced)) {
988 mem_cgroup_cancel_charge(new_page, memcg, true);
989 up_read(&mm->mmap_sem);
993 up_read(&mm->mmap_sem);
995 * Prevent all access to pagetables with the exception of
996 * gup_fast later handled by the ptep_clear_flush and the VM
997 * handled by the anon_vma lock + PG_lock.
999 down_write(&mm->mmap_sem);
1000 result = hugepage_vma_revalidate(mm, address, &vma);
1003 /* check if the pmd is still valid */
1004 if (mm_find_pmd(mm, address) != pmd)
1007 anon_vma_lock_write(vma->anon_vma);
1009 pte = pte_offset_map(pmd, address);
1010 pte_ptl = pte_lockptr(mm, pmd);
1012 mmun_start = address;
1013 mmun_end = address + HPAGE_PMD_SIZE;
1014 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
1015 pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
1017 * After this gup_fast can't run anymore. This also removes
1018 * any huge TLB entry from the CPU so we won't allow
1019 * huge and small TLB entries for the same virtual address
1020 * to avoid the risk of CPU bugs in that area.
1022 _pmd = pmdp_collapse_flush(vma, address, pmd);
1023 spin_unlock(pmd_ptl);
1024 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1027 isolated = __collapse_huge_page_isolate(vma, address, pte);
1028 spin_unlock(pte_ptl);
1030 if (unlikely(!isolated)) {
1033 BUG_ON(!pmd_none(*pmd));
1035 * We can only use set_pmd_at when establishing
1036 * hugepmds and never for establishing regular pmds that
1037 * points to regular pagetables. Use pmd_populate for that
1039 pmd_populate(mm, pmd, pmd_pgtable(_pmd));
1040 spin_unlock(pmd_ptl);
1041 anon_vma_unlock_write(vma->anon_vma);
1047 * All pages are isolated and locked so anon_vma rmap
1048 * can't run anymore.
1050 anon_vma_unlock_write(vma->anon_vma);
1052 __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl);
1054 __SetPageUptodate(new_page);
1055 pgtable = pmd_pgtable(_pmd);
1057 _pmd = mk_huge_pmd(new_page, vma->vm_page_prot);
1058 _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
1061 * spin_lock() below is not the equivalent of smp_wmb(), so
1062 * this is needed to avoid the copy_huge_page writes to become
1063 * visible after the set_pmd_at() write.
1068 BUG_ON(!pmd_none(*pmd));
1069 page_add_new_anon_rmap(new_page, vma, address, true);
1070 mem_cgroup_commit_charge(new_page, memcg, false, true);
1071 lru_cache_add_active_or_unevictable(new_page, vma);
1072 pgtable_trans_huge_deposit(mm, pmd, pgtable);
1073 set_pmd_at(mm, address, pmd, _pmd);
1074 update_mmu_cache_pmd(vma, address, pmd);
1075 spin_unlock(pmd_ptl);
1079 khugepaged_pages_collapsed++;
1080 result = SCAN_SUCCEED;
1082 up_write(&mm->mmap_sem);
1084 trace_mm_collapse_huge_page(mm, isolated, result);
1087 mem_cgroup_cancel_charge(new_page, memcg, true);
1091 static int khugepaged_scan_pmd(struct mm_struct *mm,
1092 struct vm_area_struct *vma,
1093 unsigned long address,
1094 struct page **hpage)
1098 int ret = 0, none_or_zero = 0, result = 0, referenced = 0;
1099 struct page *page = NULL;
1100 unsigned long _address;
1102 int node = NUMA_NO_NODE, unmapped = 0;
1103 bool writable = false;
1105 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1107 pmd = mm_find_pmd(mm, address);
1109 result = SCAN_PMD_NULL;
1113 memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
1114 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
1115 for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
1116 _pte++, _address += PAGE_SIZE) {
1117 pte_t pteval = *_pte;
1118 if (is_swap_pte(pteval)) {
1119 if (++unmapped <= khugepaged_max_ptes_swap) {
1122 result = SCAN_EXCEED_SWAP_PTE;
1126 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
1127 if (!userfaultfd_armed(vma) &&
1128 ++none_or_zero <= khugepaged_max_ptes_none) {
1131 result = SCAN_EXCEED_NONE_PTE;
1135 if (!pte_present(pteval)) {
1136 result = SCAN_PTE_NON_PRESENT;
1139 if (pte_write(pteval))
1142 page = vm_normal_page(vma, _address, pteval);
1143 if (unlikely(!page)) {
1144 result = SCAN_PAGE_NULL;
1148 /* TODO: teach khugepaged to collapse THP mapped with pte */
1149 if (PageCompound(page)) {
1150 result = SCAN_PAGE_COMPOUND;
1155 * Record which node the original page is from and save this
1156 * information to khugepaged_node_load[].
1157 * Khupaged will allocate hugepage from the node has the max
1160 node = page_to_nid(page);
1161 if (khugepaged_scan_abort(node)) {
1162 result = SCAN_SCAN_ABORT;
1165 khugepaged_node_load[node]++;
1166 if (!PageLRU(page)) {
1167 result = SCAN_PAGE_LRU;
1170 if (PageLocked(page)) {
1171 result = SCAN_PAGE_LOCK;
1174 if (!PageAnon(page)) {
1175 result = SCAN_PAGE_ANON;
1180 * cannot use mapcount: can't collapse if there's a gup pin.
1181 * The page must only be referenced by the scanned process
1182 * and page swap cache.
1184 if (page_count(page) != 1 + PageSwapCache(page)) {
1185 result = SCAN_PAGE_COUNT;
1188 if (pte_young(pteval) ||
1189 page_is_young(page) || PageReferenced(page) ||
1190 mmu_notifier_test_young(vma->vm_mm, address))
1195 result = SCAN_SUCCEED;
1198 result = SCAN_LACK_REFERENCED_PAGE;
1201 result = SCAN_PAGE_RO;
1204 pte_unmap_unlock(pte, ptl);
1206 node = khugepaged_find_target_node();
1207 /* collapse_huge_page will return with the mmap_sem released */
1208 collapse_huge_page(mm, address, hpage, node, referenced);
1211 trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
1212 none_or_zero, result, unmapped);
1216 static void collect_mm_slot(struct mm_slot *mm_slot)
1218 struct mm_struct *mm = mm_slot->mm;
1220 VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
1222 if (khugepaged_test_exit(mm)) {
1224 hash_del(&mm_slot->hash);
1225 list_del(&mm_slot->mm_node);
1228 * Not strictly needed because the mm exited already.
1230 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1233 /* khugepaged_mm_lock actually not necessary for the below */
1234 free_mm_slot(mm_slot);
1239 #if defined(CONFIG_SHMEM) && defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE)
1240 static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
1242 struct vm_area_struct *vma;
1246 i_mmap_lock_write(mapping);
1247 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
1248 /* probably overkill */
1251 addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
1252 if (addr & ~HPAGE_PMD_MASK)
1254 if (vma->vm_end < addr + HPAGE_PMD_SIZE)
1256 pmd = mm_find_pmd(vma->vm_mm, addr);
1260 * We need exclusive mmap_sem to retract page table.
1261 * If trylock fails we would end up with pte-mapped THP after
1262 * re-fault. Not ideal, but it's more important to not disturb
1263 * the system too much.
1265 if (down_write_trylock(&vma->vm_mm->mmap_sem)) {
1266 spinlock_t *ptl = pmd_lock(vma->vm_mm, pmd);
1267 /* assume page table is clear */
1268 _pmd = pmdp_collapse_flush(vma, addr, pmd);
1270 up_write(&vma->vm_mm->mmap_sem);
1271 atomic_long_dec(&vma->vm_mm->nr_ptes);
1272 pte_free(vma->vm_mm, pmd_pgtable(_pmd));
1275 i_mmap_unlock_write(mapping);
1279 * collapse_shmem - collapse small tmpfs/shmem pages into huge one.
1281 * Basic scheme is simple, details are more complex:
1282 * - allocate and freeze a new huge page;
1283 * - scan over radix tree replacing old pages the new one
1284 * + swap in pages if necessary;
1286 * + keep old pages around in case if rollback is required;
1287 * - if replacing succeed:
1290 * + unfreeze huge page;
1291 * - if replacing failed;
1292 * + put all pages back and unfreeze them;
1293 * + restore gaps in the radix-tree;
1296 static void collapse_shmem(struct mm_struct *mm,
1297 struct address_space *mapping, pgoff_t start,
1298 struct page **hpage, int node)
1301 struct page *page, *new_page, *tmp;
1302 struct mem_cgroup *memcg;
1303 pgoff_t index, end = start + HPAGE_PMD_NR;
1304 LIST_HEAD(pagelist);
1305 struct radix_tree_iter iter;
1307 int nr_none = 0, result = SCAN_SUCCEED;
1309 VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
1311 /* Only allocate from the target node */
1312 gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
1314 new_page = khugepaged_alloc_page(hpage, gfp, node);
1316 result = SCAN_ALLOC_HUGE_PAGE_FAIL;
1320 if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) {
1321 result = SCAN_CGROUP_CHARGE_FAIL;
1325 new_page->index = start;
1326 new_page->mapping = mapping;
1327 __SetPageSwapBacked(new_page);
1328 __SetPageLocked(new_page);
1329 BUG_ON(!page_ref_freeze(new_page, 1));
1333 * At this point the new_page is 'frozen' (page_count() is zero), locked
1334 * and not up-to-date. It's safe to insert it into radix tree, because
1335 * nobody would be able to map it or use it in other way until we
1340 spin_lock_irq(&mapping->tree_lock);
1341 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
1342 int n = min(iter.index, end) - index;
1345 * Handle holes in the radix tree: charge it from shmem and
1346 * insert relevant subpage of new_page into the radix-tree.
1348 if (n && !shmem_charge(mapping->host, n)) {
1353 for (; index < min(iter.index, end); index++) {
1354 radix_tree_insert(&mapping->page_tree, index,
1355 new_page + (index % HPAGE_PMD_NR));
1362 page = radix_tree_deref_slot_protected(slot,
1363 &mapping->tree_lock);
1364 if (radix_tree_exceptional_entry(page) || !PageUptodate(page)) {
1365 spin_unlock_irq(&mapping->tree_lock);
1366 /* swap in or instantiate fallocated page */
1367 if (shmem_getpage(mapping->host, index, &page,
1372 spin_lock_irq(&mapping->tree_lock);
1373 } else if (trylock_page(page)) {
1376 result = SCAN_PAGE_LOCK;
1381 * The page must be locked, so we can drop the tree_lock
1382 * without racing with truncate.
1384 VM_BUG_ON_PAGE(!PageLocked(page), page);
1385 VM_BUG_ON_PAGE(!PageUptodate(page), page);
1386 VM_BUG_ON_PAGE(PageTransCompound(page), page);
1388 if (page_mapping(page) != mapping) {
1389 result = SCAN_TRUNCATED;
1392 spin_unlock_irq(&mapping->tree_lock);
1394 if (isolate_lru_page(page)) {
1395 result = SCAN_DEL_PAGE_LRU;
1396 goto out_isolate_failed;
1399 if (page_mapped(page))
1400 unmap_mapping_range(mapping, index << PAGE_SHIFT,
1403 spin_lock_irq(&mapping->tree_lock);
1405 slot = radix_tree_lookup_slot(&mapping->page_tree, index);
1406 VM_BUG_ON_PAGE(page != radix_tree_deref_slot_protected(slot,
1407 &mapping->tree_lock), page);
1408 VM_BUG_ON_PAGE(page_mapped(page), page);
1411 * The page is expected to have page_count() == 3:
1412 * - we hold a pin on it;
1413 * - one reference from radix tree;
1414 * - one from isolate_lru_page;
1416 if (!page_ref_freeze(page, 3)) {
1417 result = SCAN_PAGE_COUNT;
1422 * Add the page to the list to be able to undo the collapse if
1423 * something go wrong.
1425 list_add_tail(&page->lru, &pagelist);
1427 /* Finally, replace with the new page. */
1428 radix_tree_replace_slot(&mapping->page_tree, slot,
1429 new_page + (index % HPAGE_PMD_NR));
1431 slot = radix_tree_iter_resume(slot, &iter);
1435 spin_unlock_irq(&mapping->tree_lock);
1436 putback_lru_page(page);
1448 * Handle hole in radix tree at the end of the range.
1449 * This code only triggers if there's nothing in radix tree
1452 if (result == SCAN_SUCCEED && index < end) {
1453 int n = end - index;
1455 if (!shmem_charge(mapping->host, n)) {
1460 for (; index < end; index++) {
1461 radix_tree_insert(&mapping->page_tree, index,
1462 new_page + (index % HPAGE_PMD_NR));
1468 spin_unlock_irq(&mapping->tree_lock);
1471 if (result == SCAN_SUCCEED) {
1472 unsigned long flags;
1473 struct zone *zone = page_zone(new_page);
1476 * Replacing old pages with new one has succeed, now we need to
1477 * copy the content and free old pages.
1479 list_for_each_entry_safe(page, tmp, &pagelist, lru) {
1480 copy_highpage(new_page + (page->index % HPAGE_PMD_NR),
1482 list_del(&page->lru);
1484 page_ref_unfreeze(page, 1);
1485 page->mapping = NULL;
1486 ClearPageActive(page);
1487 ClearPageUnevictable(page);
1491 local_irq_save(flags);
1492 __inc_node_page_state(new_page, NR_SHMEM_THPS);
1494 __mod_node_page_state(zone->zone_pgdat, NR_FILE_PAGES, nr_none);
1495 __mod_node_page_state(zone->zone_pgdat, NR_SHMEM, nr_none);
1497 local_irq_restore(flags);
1500 * Remove pte page tables, so we can re-faulti
1503 retract_page_tables(mapping, start);
1505 /* Everything is ready, let's unfreeze the new_page */
1506 set_page_dirty(new_page);
1507 SetPageUptodate(new_page);
1508 page_ref_unfreeze(new_page, HPAGE_PMD_NR);
1509 mem_cgroup_commit_charge(new_page, memcg, false, true);
1510 lru_cache_add_anon(new_page);
1511 unlock_page(new_page);
1515 /* Something went wrong: rollback changes to the radix-tree */
1516 shmem_uncharge(mapping->host, nr_none);
1517 spin_lock_irq(&mapping->tree_lock);
1518 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter,
1520 if (iter.index >= end)
1522 page = list_first_entry_or_null(&pagelist,
1524 if (!page || iter.index < page->index) {
1528 /* Put holes back where they were */
1529 radix_tree_delete(&mapping->page_tree,
1534 VM_BUG_ON_PAGE(page->index != iter.index, page);
1536 /* Unfreeze the page. */
1537 list_del(&page->lru);
1538 page_ref_unfreeze(page, 2);
1539 radix_tree_replace_slot(&mapping->page_tree,
1541 slot = radix_tree_iter_resume(slot, &iter);
1542 spin_unlock_irq(&mapping->tree_lock);
1543 putback_lru_page(page);
1545 spin_lock_irq(&mapping->tree_lock);
1548 spin_unlock_irq(&mapping->tree_lock);
1550 /* Unfreeze new_page, caller would take care about freeing it */
1551 page_ref_unfreeze(new_page, 1);
1552 mem_cgroup_cancel_charge(new_page, memcg, true);
1553 unlock_page(new_page);
1554 new_page->mapping = NULL;
1557 VM_BUG_ON(!list_empty(&pagelist));
1558 /* TODO: tracepoints */
1561 static void khugepaged_scan_shmem(struct mm_struct *mm,
1562 struct address_space *mapping,
1563 pgoff_t start, struct page **hpage)
1565 struct page *page = NULL;
1566 struct radix_tree_iter iter;
1569 int node = NUMA_NO_NODE;
1570 int result = SCAN_SUCCEED;
1574 memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
1576 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
1577 if (iter.index >= start + HPAGE_PMD_NR)
1580 page = radix_tree_deref_slot(slot);
1581 if (radix_tree_deref_retry(page)) {
1582 slot = radix_tree_iter_retry(&iter);
1586 if (radix_tree_exception(page)) {
1587 if (++swap > khugepaged_max_ptes_swap) {
1588 result = SCAN_EXCEED_SWAP_PTE;
1594 if (PageTransCompound(page)) {
1595 result = SCAN_PAGE_COMPOUND;
1599 node = page_to_nid(page);
1600 if (khugepaged_scan_abort(node)) {
1601 result = SCAN_SCAN_ABORT;
1604 khugepaged_node_load[node]++;
1606 if (!PageLRU(page)) {
1607 result = SCAN_PAGE_LRU;
1611 if (page_count(page) != 1 + page_mapcount(page)) {
1612 result = SCAN_PAGE_COUNT;
1617 * We probably should check if the page is referenced here, but
1618 * nobody would transfer pte_young() to PageReferenced() for us.
1619 * And rmap walk here is just too costly...
1624 if (need_resched()) {
1625 slot = radix_tree_iter_resume(slot, &iter);
1631 if (result == SCAN_SUCCEED) {
1632 if (present < HPAGE_PMD_NR - khugepaged_max_ptes_none) {
1633 result = SCAN_EXCEED_NONE_PTE;
1635 node = khugepaged_find_target_node();
1636 collapse_shmem(mm, mapping, start, hpage, node);
1640 /* TODO: tracepoints */
1643 static void khugepaged_scan_shmem(struct mm_struct *mm,
1644 struct address_space *mapping,
1645 pgoff_t start, struct page **hpage)
1651 static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
1652 struct page **hpage)
1653 __releases(&khugepaged_mm_lock)
1654 __acquires(&khugepaged_mm_lock)
1656 struct mm_slot *mm_slot;
1657 struct mm_struct *mm;
1658 struct vm_area_struct *vma;
1662 VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
1664 if (khugepaged_scan.mm_slot)
1665 mm_slot = khugepaged_scan.mm_slot;
1667 mm_slot = list_entry(khugepaged_scan.mm_head.next,
1668 struct mm_slot, mm_node);
1669 khugepaged_scan.address = 0;
1670 khugepaged_scan.mm_slot = mm_slot;
1672 spin_unlock(&khugepaged_mm_lock);
1675 down_read(&mm->mmap_sem);
1676 if (unlikely(khugepaged_test_exit(mm)))
1679 vma = find_vma(mm, khugepaged_scan.address);
1682 for (; vma; vma = vma->vm_next) {
1683 unsigned long hstart, hend;
1686 if (unlikely(khugepaged_test_exit(mm))) {
1690 if (!hugepage_vma_check(vma)) {
1695 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
1696 hend = vma->vm_end & HPAGE_PMD_MASK;
1699 if (khugepaged_scan.address > hend)
1701 if (khugepaged_scan.address < hstart)
1702 khugepaged_scan.address = hstart;
1703 VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
1705 while (khugepaged_scan.address < hend) {
1708 if (unlikely(khugepaged_test_exit(mm)))
1709 goto breakouterloop;
1711 VM_BUG_ON(khugepaged_scan.address < hstart ||
1712 khugepaged_scan.address + HPAGE_PMD_SIZE >
1714 if (shmem_file(vma->vm_file)) {
1716 pgoff_t pgoff = linear_page_index(vma,
1717 khugepaged_scan.address);
1718 if (!shmem_huge_enabled(vma))
1720 file = get_file(vma->vm_file);
1721 up_read(&mm->mmap_sem);
1723 khugepaged_scan_shmem(mm, file->f_mapping,
1727 ret = khugepaged_scan_pmd(mm, vma,
1728 khugepaged_scan.address,
1731 /* move to next address */
1732 khugepaged_scan.address += HPAGE_PMD_SIZE;
1733 progress += HPAGE_PMD_NR;
1735 /* we released mmap_sem so break loop */
1736 goto breakouterloop_mmap_sem;
1737 if (progress >= pages)
1738 goto breakouterloop;
1742 up_read(&mm->mmap_sem); /* exit_mmap will destroy ptes after this */
1743 breakouterloop_mmap_sem:
1745 spin_lock(&khugepaged_mm_lock);
1746 VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
1748 * Release the current mm_slot if this mm is about to die, or
1749 * if we scanned all vmas of this mm.
1751 if (khugepaged_test_exit(mm) || !vma) {
1753 * Make sure that if mm_users is reaching zero while
1754 * khugepaged runs here, khugepaged_exit will find
1755 * mm_slot not pointing to the exiting mm.
1757 if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
1758 khugepaged_scan.mm_slot = list_entry(
1759 mm_slot->mm_node.next,
1760 struct mm_slot, mm_node);
1761 khugepaged_scan.address = 0;
1763 khugepaged_scan.mm_slot = NULL;
1764 khugepaged_full_scans++;
1767 collect_mm_slot(mm_slot);
1773 static int khugepaged_has_work(void)
1775 return !list_empty(&khugepaged_scan.mm_head) &&
1776 khugepaged_enabled();
1779 static int khugepaged_wait_event(void)
1781 return !list_empty(&khugepaged_scan.mm_head) ||
1782 kthread_should_stop();
1785 static void khugepaged_do_scan(void)
1787 struct page *hpage = NULL;
1788 unsigned int progress = 0, pass_through_head = 0;
1789 unsigned int pages = khugepaged_pages_to_scan;
1792 barrier(); /* write khugepaged_pages_to_scan to local stack */
1794 while (progress < pages) {
1795 if (!khugepaged_prealloc_page(&hpage, &wait))
1800 if (unlikely(kthread_should_stop() || try_to_freeze()))
1803 spin_lock(&khugepaged_mm_lock);
1804 if (!khugepaged_scan.mm_slot)
1805 pass_through_head++;
1806 if (khugepaged_has_work() &&
1807 pass_through_head < 2)
1808 progress += khugepaged_scan_mm_slot(pages - progress,
1812 spin_unlock(&khugepaged_mm_lock);
1815 if (!IS_ERR_OR_NULL(hpage))
1819 static bool khugepaged_should_wakeup(void)
1821 return kthread_should_stop() ||
1822 time_after_eq(jiffies, khugepaged_sleep_expire);
1825 static void khugepaged_wait_work(void)
1827 if (khugepaged_has_work()) {
1828 const unsigned long scan_sleep_jiffies =
1829 msecs_to_jiffies(khugepaged_scan_sleep_millisecs);
1831 if (!scan_sleep_jiffies)
1834 khugepaged_sleep_expire = jiffies + scan_sleep_jiffies;
1835 wait_event_freezable_timeout(khugepaged_wait,
1836 khugepaged_should_wakeup(),
1837 scan_sleep_jiffies);
1841 if (khugepaged_enabled())
1842 wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
1845 static int khugepaged(void *none)
1847 struct mm_slot *mm_slot;
1850 set_user_nice(current, MAX_NICE);
1852 while (!kthread_should_stop()) {
1853 khugepaged_do_scan();
1854 khugepaged_wait_work();
1857 spin_lock(&khugepaged_mm_lock);
1858 mm_slot = khugepaged_scan.mm_slot;
1859 khugepaged_scan.mm_slot = NULL;
1861 collect_mm_slot(mm_slot);
1862 spin_unlock(&khugepaged_mm_lock);
1866 static void set_recommended_min_free_kbytes(void)
1870 unsigned long recommended_min;
1872 for_each_populated_zone(zone)
1875 /* Ensure 2 pageblocks are free to assist fragmentation avoidance */
1876 recommended_min = pageblock_nr_pages * nr_zones * 2;
1879 * Make sure that on average at least two pageblocks are almost free
1880 * of another type, one for a migratetype to fall back to and a
1881 * second to avoid subsequent fallbacks of other types There are 3
1882 * MIGRATE_TYPES we care about.
1884 recommended_min += pageblock_nr_pages * nr_zones *
1885 MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
1887 /* don't ever allow to reserve more than 5% of the lowmem */
1888 recommended_min = min(recommended_min,
1889 (unsigned long) nr_free_buffer_pages() / 20);
1890 recommended_min <<= (PAGE_SHIFT-10);
1892 if (recommended_min > min_free_kbytes) {
1893 if (user_min_free_kbytes >= 0)
1894 pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n",
1895 min_free_kbytes, recommended_min);
1897 min_free_kbytes = recommended_min;
1899 setup_per_zone_wmarks();
1902 int start_stop_khugepaged(void)
1904 static struct task_struct *khugepaged_thread __read_mostly;
1905 static DEFINE_MUTEX(khugepaged_mutex);
1908 mutex_lock(&khugepaged_mutex);
1909 if (khugepaged_enabled()) {
1910 if (!khugepaged_thread)
1911 khugepaged_thread = kthread_run(khugepaged, NULL,
1913 if (IS_ERR(khugepaged_thread)) {
1914 pr_err("khugepaged: kthread_run(khugepaged) failed\n");
1915 err = PTR_ERR(khugepaged_thread);
1916 khugepaged_thread = NULL;
1920 if (!list_empty(&khugepaged_scan.mm_head))
1921 wake_up_interruptible(&khugepaged_wait);
1923 set_recommended_min_free_kbytes();
1924 } else if (khugepaged_thread) {
1925 kthread_stop(khugepaged_thread);
1926 khugepaged_thread = NULL;
1929 mutex_unlock(&khugepaged_mutex);