2 * Copyright (C) 2009 Red Hat, Inc.
4 * This work is licensed under the terms of the GNU GPL, version 2. See
5 * the COPYING file in the top-level directory.
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11 #include <linux/sched.h>
12 #include <linux/highmem.h>
13 #include <linux/hugetlb.h>
14 #include <linux/mmu_notifier.h>
15 #include <linux/rmap.h>
16 #include <linux/swap.h>
17 #include <linux/shrinker.h>
18 #include <linux/mm_inline.h>
19 #include <linux/swapops.h>
20 #include <linux/dax.h>
21 #include <linux/kthread.h>
22 #include <linux/khugepaged.h>
23 #include <linux/freezer.h>
24 #include <linux/pfn_t.h>
25 #include <linux/mman.h>
26 #include <linux/memremap.h>
27 #include <linux/pagemap.h>
28 #include <linux/debugfs.h>
29 #include <linux/migrate.h>
30 #include <linux/hashtable.h>
31 #include <linux/userfaultfd_k.h>
32 #include <linux/page_idle.h>
33 #include <linux/swapops.h>
36 #include <asm/pgalloc.h>
46 SCAN_NO_REFERENCED_PAGE,
60 SCAN_ALLOC_HUGE_PAGE_FAIL,
61 SCAN_CGROUP_CHARGE_FAIL,
65 #define CREATE_TRACE_POINTS
66 #include <trace/events/huge_memory.h>
69 * By default transparent hugepage support is disabled in order that avoid
70 * to risk increase the memory footprint of applications without a guaranteed
71 * benefit. When transparent hugepage support is enabled, is for all mappings,
72 * and khugepaged scans all mappings.
73 * Defrag is invoked by khugepaged hugepage allocations and by page faults
74 * for all hugepage allocations.
76 unsigned long transparent_hugepage_flags __read_mostly =
77 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS
78 (1<<TRANSPARENT_HUGEPAGE_FLAG)|
80 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE
81 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)|
83 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)|
84 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)|
85 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
87 /* default scan 8*512 pte (or vmas) every 30 second */
88 static unsigned int khugepaged_pages_to_scan __read_mostly = HPAGE_PMD_NR*8;
89 static unsigned int khugepaged_pages_collapsed;
90 static unsigned int khugepaged_full_scans;
91 static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
92 /* during fragmentation poll the hugepage allocator once every minute */
93 static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
94 static struct task_struct *khugepaged_thread __read_mostly;
95 static DEFINE_MUTEX(khugepaged_mutex);
96 static DEFINE_SPINLOCK(khugepaged_mm_lock);
97 static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
99 * default collapse hugepages if there is at least one pte mapped like
100 * it would have happened if the vma was large enough during page
103 static unsigned int khugepaged_max_ptes_none __read_mostly = HPAGE_PMD_NR-1;
104 static unsigned int khugepaged_max_ptes_swap __read_mostly = HPAGE_PMD_NR/8;
106 static int khugepaged(void *none);
107 static int khugepaged_slab_init(void);
108 static void khugepaged_slab_exit(void);
110 #define MM_SLOTS_HASH_BITS 10
111 static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
113 static struct kmem_cache *mm_slot_cache __read_mostly;
116 * struct mm_slot - hash lookup from mm to mm_slot
117 * @hash: hash collision list
118 * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
119 * @mm: the mm that this information is valid for
122 struct hlist_node hash;
123 struct list_head mm_node;
124 struct mm_struct *mm;
128 * struct khugepaged_scan - cursor for scanning
129 * @mm_head: the head of the mm list to scan
130 * @mm_slot: the current mm_slot we are scanning
131 * @address: the next address inside that to be scanned
133 * There is only the one khugepaged_scan instance of this cursor structure.
135 struct khugepaged_scan {
136 struct list_head mm_head;
137 struct mm_slot *mm_slot;
138 unsigned long address;
140 static struct khugepaged_scan khugepaged_scan = {
141 .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
144 static struct shrinker deferred_split_shrinker;
146 static void set_recommended_min_free_kbytes(void)
150 unsigned long recommended_min;
152 for_each_populated_zone(zone)
155 /* Ensure 2 pageblocks are free to assist fragmentation avoidance */
156 recommended_min = pageblock_nr_pages * nr_zones * 2;
159 * Make sure that on average at least two pageblocks are almost free
160 * of another type, one for a migratetype to fall back to and a
161 * second to avoid subsequent fallbacks of other types There are 3
162 * MIGRATE_TYPES we care about.
164 recommended_min += pageblock_nr_pages * nr_zones *
165 MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
167 /* don't ever allow to reserve more than 5% of the lowmem */
168 recommended_min = min(recommended_min,
169 (unsigned long) nr_free_buffer_pages() / 20);
170 recommended_min <<= (PAGE_SHIFT-10);
172 if (recommended_min > min_free_kbytes) {
173 if (user_min_free_kbytes >= 0)
174 pr_info("raising min_free_kbytes from %d to %lu "
175 "to help transparent hugepage allocations\n",
176 min_free_kbytes, recommended_min);
178 min_free_kbytes = recommended_min;
180 setup_per_zone_wmarks();
183 static int start_stop_khugepaged(void)
186 if (khugepaged_enabled()) {
187 if (!khugepaged_thread)
188 khugepaged_thread = kthread_run(khugepaged, NULL,
190 if (IS_ERR(khugepaged_thread)) {
191 pr_err("khugepaged: kthread_run(khugepaged) failed\n");
192 err = PTR_ERR(khugepaged_thread);
193 khugepaged_thread = NULL;
197 if (!list_empty(&khugepaged_scan.mm_head))
198 wake_up_interruptible(&khugepaged_wait);
200 set_recommended_min_free_kbytes();
201 } else if (khugepaged_thread) {
202 kthread_stop(khugepaged_thread);
203 khugepaged_thread = NULL;
209 static atomic_t huge_zero_refcount;
210 struct page *huge_zero_page __read_mostly;
212 struct page *get_huge_zero_page(void)
214 struct page *zero_page;
216 if (likely(atomic_inc_not_zero(&huge_zero_refcount)))
217 return READ_ONCE(huge_zero_page);
219 zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE,
222 count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED);
225 count_vm_event(THP_ZERO_PAGE_ALLOC);
227 if (cmpxchg(&huge_zero_page, NULL, zero_page)) {
229 __free_pages(zero_page, compound_order(zero_page));
233 /* We take additional reference here. It will be put back by shrinker */
234 atomic_set(&huge_zero_refcount, 2);
236 return READ_ONCE(huge_zero_page);
239 static void put_huge_zero_page(void)
242 * Counter should never go to zero here. Only shrinker can put
245 BUG_ON(atomic_dec_and_test(&huge_zero_refcount));
248 static unsigned long shrink_huge_zero_page_count(struct shrinker *shrink,
249 struct shrink_control *sc)
251 /* we can free zero page only if last reference remains */
252 return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0;
255 static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink,
256 struct shrink_control *sc)
258 if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
259 struct page *zero_page = xchg(&huge_zero_page, NULL);
260 BUG_ON(zero_page == NULL);
261 __free_pages(zero_page, compound_order(zero_page));
268 static struct shrinker huge_zero_page_shrinker = {
269 .count_objects = shrink_huge_zero_page_count,
270 .scan_objects = shrink_huge_zero_page_scan,
271 .seeks = DEFAULT_SEEKS,
276 static ssize_t double_flag_show(struct kobject *kobj,
277 struct kobj_attribute *attr, char *buf,
278 enum transparent_hugepage_flag enabled,
279 enum transparent_hugepage_flag req_madv)
281 if (test_bit(enabled, &transparent_hugepage_flags)) {
282 VM_BUG_ON(test_bit(req_madv, &transparent_hugepage_flags));
283 return sprintf(buf, "[always] madvise never\n");
284 } else if (test_bit(req_madv, &transparent_hugepage_flags))
285 return sprintf(buf, "always [madvise] never\n");
287 return sprintf(buf, "always madvise [never]\n");
289 static ssize_t double_flag_store(struct kobject *kobj,
290 struct kobj_attribute *attr,
291 const char *buf, size_t count,
292 enum transparent_hugepage_flag enabled,
293 enum transparent_hugepage_flag req_madv)
295 if (!memcmp("always", buf,
296 min(sizeof("always")-1, count))) {
297 set_bit(enabled, &transparent_hugepage_flags);
298 clear_bit(req_madv, &transparent_hugepage_flags);
299 } else if (!memcmp("madvise", buf,
300 min(sizeof("madvise")-1, count))) {
301 clear_bit(enabled, &transparent_hugepage_flags);
302 set_bit(req_madv, &transparent_hugepage_flags);
303 } else if (!memcmp("never", buf,
304 min(sizeof("never")-1, count))) {
305 clear_bit(enabled, &transparent_hugepage_flags);
306 clear_bit(req_madv, &transparent_hugepage_flags);
313 static ssize_t enabled_show(struct kobject *kobj,
314 struct kobj_attribute *attr, char *buf)
316 return double_flag_show(kobj, attr, buf,
317 TRANSPARENT_HUGEPAGE_FLAG,
318 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG);
320 static ssize_t enabled_store(struct kobject *kobj,
321 struct kobj_attribute *attr,
322 const char *buf, size_t count)
326 ret = double_flag_store(kobj, attr, buf, count,
327 TRANSPARENT_HUGEPAGE_FLAG,
328 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG);
333 mutex_lock(&khugepaged_mutex);
334 err = start_stop_khugepaged();
335 mutex_unlock(&khugepaged_mutex);
343 static struct kobj_attribute enabled_attr =
344 __ATTR(enabled, 0644, enabled_show, enabled_store);
346 static ssize_t single_flag_show(struct kobject *kobj,
347 struct kobj_attribute *attr, char *buf,
348 enum transparent_hugepage_flag flag)
350 return sprintf(buf, "%d\n",
351 !!test_bit(flag, &transparent_hugepage_flags));
354 static ssize_t single_flag_store(struct kobject *kobj,
355 struct kobj_attribute *attr,
356 const char *buf, size_t count,
357 enum transparent_hugepage_flag flag)
362 ret = kstrtoul(buf, 10, &value);
369 set_bit(flag, &transparent_hugepage_flags);
371 clear_bit(flag, &transparent_hugepage_flags);
377 * Currently defrag only disables __GFP_NOWAIT for allocation. A blind
378 * __GFP_REPEAT is too aggressive, it's never worth swapping tons of
379 * memory just to allocate one more hugepage.
381 static ssize_t defrag_show(struct kobject *kobj,
382 struct kobj_attribute *attr, char *buf)
384 return double_flag_show(kobj, attr, buf,
385 TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
386 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG);
388 static ssize_t defrag_store(struct kobject *kobj,
389 struct kobj_attribute *attr,
390 const char *buf, size_t count)
392 return double_flag_store(kobj, attr, buf, count,
393 TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
394 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG);
396 static struct kobj_attribute defrag_attr =
397 __ATTR(defrag, 0644, defrag_show, defrag_store);
399 static ssize_t use_zero_page_show(struct kobject *kobj,
400 struct kobj_attribute *attr, char *buf)
402 return single_flag_show(kobj, attr, buf,
403 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
405 static ssize_t use_zero_page_store(struct kobject *kobj,
406 struct kobj_attribute *attr, const char *buf, size_t count)
408 return single_flag_store(kobj, attr, buf, count,
409 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
411 static struct kobj_attribute use_zero_page_attr =
412 __ATTR(use_zero_page, 0644, use_zero_page_show, use_zero_page_store);
413 #ifdef CONFIG_DEBUG_VM
414 static ssize_t debug_cow_show(struct kobject *kobj,
415 struct kobj_attribute *attr, char *buf)
417 return single_flag_show(kobj, attr, buf,
418 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG);
420 static ssize_t debug_cow_store(struct kobject *kobj,
421 struct kobj_attribute *attr,
422 const char *buf, size_t count)
424 return single_flag_store(kobj, attr, buf, count,
425 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG);
427 static struct kobj_attribute debug_cow_attr =
428 __ATTR(debug_cow, 0644, debug_cow_show, debug_cow_store);
429 #endif /* CONFIG_DEBUG_VM */
431 static struct attribute *hugepage_attr[] = {
434 &use_zero_page_attr.attr,
435 #ifdef CONFIG_DEBUG_VM
436 &debug_cow_attr.attr,
441 static struct attribute_group hugepage_attr_group = {
442 .attrs = hugepage_attr,
445 static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
446 struct kobj_attribute *attr,
449 return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs);
452 static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
453 struct kobj_attribute *attr,
454 const char *buf, size_t count)
459 err = kstrtoul(buf, 10, &msecs);
460 if (err || msecs > UINT_MAX)
463 khugepaged_scan_sleep_millisecs = msecs;
464 wake_up_interruptible(&khugepaged_wait);
468 static struct kobj_attribute scan_sleep_millisecs_attr =
469 __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show,
470 scan_sleep_millisecs_store);
472 static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
473 struct kobj_attribute *attr,
476 return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
479 static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
480 struct kobj_attribute *attr,
481 const char *buf, size_t count)
486 err = kstrtoul(buf, 10, &msecs);
487 if (err || msecs > UINT_MAX)
490 khugepaged_alloc_sleep_millisecs = msecs;
491 wake_up_interruptible(&khugepaged_wait);
495 static struct kobj_attribute alloc_sleep_millisecs_attr =
496 __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show,
497 alloc_sleep_millisecs_store);
499 static ssize_t pages_to_scan_show(struct kobject *kobj,
500 struct kobj_attribute *attr,
503 return sprintf(buf, "%u\n", khugepaged_pages_to_scan);
505 static ssize_t pages_to_scan_store(struct kobject *kobj,
506 struct kobj_attribute *attr,
507 const char *buf, size_t count)
512 err = kstrtoul(buf, 10, &pages);
513 if (err || !pages || pages > UINT_MAX)
516 khugepaged_pages_to_scan = pages;
520 static struct kobj_attribute pages_to_scan_attr =
521 __ATTR(pages_to_scan, 0644, pages_to_scan_show,
522 pages_to_scan_store);
524 static ssize_t pages_collapsed_show(struct kobject *kobj,
525 struct kobj_attribute *attr,
528 return sprintf(buf, "%u\n", khugepaged_pages_collapsed);
530 static struct kobj_attribute pages_collapsed_attr =
531 __ATTR_RO(pages_collapsed);
533 static ssize_t full_scans_show(struct kobject *kobj,
534 struct kobj_attribute *attr,
537 return sprintf(buf, "%u\n", khugepaged_full_scans);
539 static struct kobj_attribute full_scans_attr =
540 __ATTR_RO(full_scans);
542 static ssize_t khugepaged_defrag_show(struct kobject *kobj,
543 struct kobj_attribute *attr, char *buf)
545 return single_flag_show(kobj, attr, buf,
546 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
548 static ssize_t khugepaged_defrag_store(struct kobject *kobj,
549 struct kobj_attribute *attr,
550 const char *buf, size_t count)
552 return single_flag_store(kobj, attr, buf, count,
553 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
555 static struct kobj_attribute khugepaged_defrag_attr =
556 __ATTR(defrag, 0644, khugepaged_defrag_show,
557 khugepaged_defrag_store);
560 * max_ptes_none controls if khugepaged should collapse hugepages over
561 * any unmapped ptes in turn potentially increasing the memory
562 * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
563 * reduce the available free memory in the system as it
564 * runs. Increasing max_ptes_none will instead potentially reduce the
565 * free memory in the system during the khugepaged scan.
567 static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj,
568 struct kobj_attribute *attr,
571 return sprintf(buf, "%u\n", khugepaged_max_ptes_none);
573 static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj,
574 struct kobj_attribute *attr,
575 const char *buf, size_t count)
578 unsigned long max_ptes_none;
580 err = kstrtoul(buf, 10, &max_ptes_none);
581 if (err || max_ptes_none > HPAGE_PMD_NR-1)
584 khugepaged_max_ptes_none = max_ptes_none;
588 static struct kobj_attribute khugepaged_max_ptes_none_attr =
589 __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show,
590 khugepaged_max_ptes_none_store);
592 static ssize_t khugepaged_max_ptes_swap_show(struct kobject *kobj,
593 struct kobj_attribute *attr,
596 return sprintf(buf, "%u\n", khugepaged_max_ptes_swap);
599 static ssize_t khugepaged_max_ptes_swap_store(struct kobject *kobj,
600 struct kobj_attribute *attr,
601 const char *buf, size_t count)
604 unsigned long max_ptes_swap;
606 err = kstrtoul(buf, 10, &max_ptes_swap);
607 if (err || max_ptes_swap > HPAGE_PMD_NR-1)
610 khugepaged_max_ptes_swap = max_ptes_swap;
615 static struct kobj_attribute khugepaged_max_ptes_swap_attr =
616 __ATTR(max_ptes_swap, 0644, khugepaged_max_ptes_swap_show,
617 khugepaged_max_ptes_swap_store);
619 static struct attribute *khugepaged_attr[] = {
620 &khugepaged_defrag_attr.attr,
621 &khugepaged_max_ptes_none_attr.attr,
622 &pages_to_scan_attr.attr,
623 &pages_collapsed_attr.attr,
624 &full_scans_attr.attr,
625 &scan_sleep_millisecs_attr.attr,
626 &alloc_sleep_millisecs_attr.attr,
627 &khugepaged_max_ptes_swap_attr.attr,
631 static struct attribute_group khugepaged_attr_group = {
632 .attrs = khugepaged_attr,
633 .name = "khugepaged",
636 static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj)
640 *hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj);
641 if (unlikely(!*hugepage_kobj)) {
642 pr_err("failed to create transparent hugepage kobject\n");
646 err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group);
648 pr_err("failed to register transparent hugepage group\n");
652 err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group);
654 pr_err("failed to register transparent hugepage group\n");
655 goto remove_hp_group;
661 sysfs_remove_group(*hugepage_kobj, &hugepage_attr_group);
663 kobject_put(*hugepage_kobj);
667 static void __init hugepage_exit_sysfs(struct kobject *hugepage_kobj)
669 sysfs_remove_group(hugepage_kobj, &khugepaged_attr_group);
670 sysfs_remove_group(hugepage_kobj, &hugepage_attr_group);
671 kobject_put(hugepage_kobj);
674 static inline int hugepage_init_sysfs(struct kobject **hugepage_kobj)
679 static inline void hugepage_exit_sysfs(struct kobject *hugepage_kobj)
682 #endif /* CONFIG_SYSFS */
684 static int __init hugepage_init(void)
687 struct kobject *hugepage_kobj;
689 if (!has_transparent_hugepage()) {
690 transparent_hugepage_flags = 0;
694 err = hugepage_init_sysfs(&hugepage_kobj);
698 err = khugepaged_slab_init();
702 err = register_shrinker(&huge_zero_page_shrinker);
704 goto err_hzp_shrinker;
705 err = register_shrinker(&deferred_split_shrinker);
707 goto err_split_shrinker;
710 * By default disable transparent hugepages on smaller systems,
711 * where the extra memory used could hurt more than TLB overhead
712 * is likely to save. The admin can still enable it through /sys.
714 if (totalram_pages < (512 << (20 - PAGE_SHIFT))) {
715 transparent_hugepage_flags = 0;
719 err = start_stop_khugepaged();
725 unregister_shrinker(&deferred_split_shrinker);
727 unregister_shrinker(&huge_zero_page_shrinker);
729 khugepaged_slab_exit();
731 hugepage_exit_sysfs(hugepage_kobj);
735 subsys_initcall(hugepage_init);
737 static int __init setup_transparent_hugepage(char *str)
742 if (!strcmp(str, "always")) {
743 set_bit(TRANSPARENT_HUGEPAGE_FLAG,
744 &transparent_hugepage_flags);
745 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
746 &transparent_hugepage_flags);
748 } else if (!strcmp(str, "madvise")) {
749 clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
750 &transparent_hugepage_flags);
751 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
752 &transparent_hugepage_flags);
754 } else if (!strcmp(str, "never")) {
755 clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
756 &transparent_hugepage_flags);
757 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
758 &transparent_hugepage_flags);
763 pr_warn("transparent_hugepage= cannot parse, ignored\n");
766 __setup("transparent_hugepage=", setup_transparent_hugepage);
768 pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
770 if (likely(vma->vm_flags & VM_WRITE))
771 pmd = pmd_mkwrite(pmd);
775 static inline pmd_t mk_huge_pmd(struct page *page, pgprot_t prot)
778 entry = mk_pmd(page, prot);
779 entry = pmd_mkhuge(entry);
783 static inline struct list_head *page_deferred_list(struct page *page)
786 * ->lru in the tail pages is occupied by compound_head.
787 * Let's use ->mapping + ->index in the second tail page as list_head.
789 return (struct list_head *)&page[2].mapping;
792 void prep_transhuge_page(struct page *page)
795 * we use page->mapping and page->indexlru in second tail page
796 * as list_head: assuming THP order >= 2
798 BUILD_BUG_ON(HPAGE_PMD_ORDER < 2);
800 INIT_LIST_HEAD(page_deferred_list(page));
801 set_compound_page_dtor(page, TRANSHUGE_PAGE_DTOR);
804 static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
805 struct vm_area_struct *vma,
806 unsigned long address, pmd_t *pmd,
807 struct page *page, gfp_t gfp,
810 struct mem_cgroup *memcg;
813 unsigned long haddr = address & HPAGE_PMD_MASK;
815 VM_BUG_ON_PAGE(!PageCompound(page), page);
817 if (mem_cgroup_try_charge(page, mm, gfp, &memcg, true)) {
819 count_vm_event(THP_FAULT_FALLBACK);
820 return VM_FAULT_FALLBACK;
823 pgtable = pte_alloc_one(mm, haddr);
824 if (unlikely(!pgtable)) {
825 mem_cgroup_cancel_charge(page, memcg, true);
830 clear_huge_page(page, haddr, HPAGE_PMD_NR);
832 * The memory barrier inside __SetPageUptodate makes sure that
833 * clear_huge_page writes become visible before the set_pmd_at()
836 __SetPageUptodate(page);
838 ptl = pmd_lock(mm, pmd);
839 if (unlikely(!pmd_none(*pmd))) {
841 mem_cgroup_cancel_charge(page, memcg, true);
843 pte_free(mm, pgtable);
847 /* Deliver the page fault to userland */
848 if (userfaultfd_missing(vma)) {
852 mem_cgroup_cancel_charge(page, memcg, true);
854 pte_free(mm, pgtable);
855 ret = handle_userfault(vma, address, flags,
857 VM_BUG_ON(ret & VM_FAULT_FALLBACK);
861 entry = mk_huge_pmd(page, vma->vm_page_prot);
862 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
863 page_add_new_anon_rmap(page, vma, haddr, true);
864 mem_cgroup_commit_charge(page, memcg, false, true);
865 lru_cache_add_active_or_unevictable(page, vma);
866 pgtable_trans_huge_deposit(mm, pmd, pgtable);
867 set_pmd_at(mm, haddr, pmd, entry);
868 add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
869 atomic_long_inc(&mm->nr_ptes);
871 count_vm_event(THP_FAULT_ALLOC);
877 static inline gfp_t alloc_hugepage_gfpmask(int defrag, gfp_t extra_gfp)
879 return (GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_RECLAIM)) | extra_gfp;
882 /* Caller must hold page table lock. */
883 static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
884 struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd,
885 struct page *zero_page)
890 entry = mk_pmd(zero_page, vma->vm_page_prot);
891 entry = pmd_mkhuge(entry);
893 pgtable_trans_huge_deposit(mm, pmd, pgtable);
894 set_pmd_at(mm, haddr, pmd, entry);
895 atomic_long_inc(&mm->nr_ptes);
899 int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
900 unsigned long address, pmd_t *pmd,
905 unsigned long haddr = address & HPAGE_PMD_MASK;
907 if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
908 return VM_FAULT_FALLBACK;
909 if (unlikely(anon_vma_prepare(vma)))
911 if (unlikely(khugepaged_enter(vma, vma->vm_flags)))
913 if (!(flags & FAULT_FLAG_WRITE) && !mm_forbids_zeropage(mm) &&
914 transparent_hugepage_use_zero_page()) {
917 struct page *zero_page;
920 pgtable = pte_alloc_one(mm, haddr);
921 if (unlikely(!pgtable))
923 zero_page = get_huge_zero_page();
924 if (unlikely(!zero_page)) {
925 pte_free(mm, pgtable);
926 count_vm_event(THP_FAULT_FALLBACK);
927 return VM_FAULT_FALLBACK;
929 ptl = pmd_lock(mm, pmd);
932 if (pmd_none(*pmd)) {
933 if (userfaultfd_missing(vma)) {
935 ret = handle_userfault(vma, address, flags,
937 VM_BUG_ON(ret & VM_FAULT_FALLBACK);
939 set_huge_zero_page(pgtable, mm, vma,
948 pte_free(mm, pgtable);
949 put_huge_zero_page();
953 gfp = alloc_hugepage_gfpmask(transparent_hugepage_defrag(vma), 0);
954 page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER);
955 if (unlikely(!page)) {
956 count_vm_event(THP_FAULT_FALLBACK);
957 return VM_FAULT_FALLBACK;
959 prep_transhuge_page(page);
960 return __do_huge_pmd_anonymous_page(mm, vma, address, pmd, page, gfp,
964 static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
965 pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write)
967 struct mm_struct *mm = vma->vm_mm;
971 ptl = pmd_lock(mm, pmd);
972 entry = pmd_mkhuge(pfn_t_pmd(pfn, prot));
973 if (pfn_t_devmap(pfn))
974 entry = pmd_mkdevmap(entry);
976 entry = pmd_mkyoung(pmd_mkdirty(entry));
977 entry = maybe_pmd_mkwrite(entry, vma);
979 set_pmd_at(mm, addr, pmd, entry);
980 update_mmu_cache_pmd(vma, addr, pmd);
984 int vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
985 pmd_t *pmd, pfn_t pfn, bool write)
987 pgprot_t pgprot = vma->vm_page_prot;
989 * If we had pmd_special, we could avoid all these restrictions,
990 * but we need to be consistent with PTEs and architectures that
991 * can't support a 'special' bit.
993 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
994 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
995 (VM_PFNMAP|VM_MIXEDMAP));
996 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
997 BUG_ON(!pfn_t_devmap(pfn));
999 if (addr < vma->vm_start || addr >= vma->vm_end)
1000 return VM_FAULT_SIGBUS;
1001 if (track_pfn_insert(vma, &pgprot, pfn))
1002 return VM_FAULT_SIGBUS;
1003 insert_pfn_pmd(vma, addr, pmd, pfn, pgprot, write);
1004 return VM_FAULT_NOPAGE;
1007 static void touch_pmd(struct vm_area_struct *vma, unsigned long addr,
1013 * We should set the dirty bit only for FOLL_WRITE but for now
1014 * the dirty bit in the pmd is meaningless. And if the dirty
1015 * bit will become meaningful and we'll only set it with
1016 * FOLL_WRITE, an atomic set_bit will be required on the pmd to
1017 * set the young bit, instead of the current set_pmd_at.
1019 _pmd = pmd_mkyoung(pmd_mkdirty(*pmd));
1020 if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK,
1022 update_mmu_cache_pmd(vma, addr, pmd);
1025 struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
1026 pmd_t *pmd, int flags)
1028 unsigned long pfn = pmd_pfn(*pmd);
1029 struct mm_struct *mm = vma->vm_mm;
1030 struct dev_pagemap *pgmap;
1033 assert_spin_locked(pmd_lockptr(mm, pmd));
1035 if (flags & FOLL_WRITE && !pmd_write(*pmd))
1038 if (pmd_present(*pmd) && pmd_devmap(*pmd))
1043 if (flags & FOLL_TOUCH)
1044 touch_pmd(vma, addr, pmd);
1047 * device mapped pages can only be returned if the
1048 * caller will manage the page reference count.
1050 if (!(flags & FOLL_GET))
1051 return ERR_PTR(-EEXIST);
1053 pfn += (addr & ~PMD_MASK) >> PAGE_SHIFT;
1054 pgmap = get_dev_pagemap(pfn, NULL);
1056 return ERR_PTR(-EFAULT);
1057 page = pfn_to_page(pfn);
1059 put_dev_pagemap(pgmap);
1064 int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1065 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
1066 struct vm_area_struct *vma)
1068 spinlock_t *dst_ptl, *src_ptl;
1069 struct page *src_page;
1071 pgtable_t pgtable = NULL;
1074 if (!vma_is_dax(vma)) {
1076 pgtable = pte_alloc_one(dst_mm, addr);
1077 if (unlikely(!pgtable))
1081 dst_ptl = pmd_lock(dst_mm, dst_pmd);
1082 src_ptl = pmd_lockptr(src_mm, src_pmd);
1083 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
1087 if (unlikely(!pmd_trans_huge(pmd) && !pmd_devmap(pmd))) {
1088 pte_free(dst_mm, pgtable);
1092 * When page table lock is held, the huge zero pmd should not be
1093 * under splitting since we don't split the page itself, only pmd to
1096 if (is_huge_zero_pmd(pmd)) {
1097 struct page *zero_page;
1099 * get_huge_zero_page() will never allocate a new page here,
1100 * since we already have a zero page to copy. It just takes a
1103 zero_page = get_huge_zero_page();
1104 set_huge_zero_page(pgtable, dst_mm, vma, addr, dst_pmd,
1110 if (!vma_is_dax(vma)) {
1111 /* thp accounting separate from pmd_devmap accounting */
1112 src_page = pmd_page(pmd);
1113 VM_BUG_ON_PAGE(!PageHead(src_page), src_page);
1115 page_dup_rmap(src_page, true);
1116 add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
1117 atomic_long_inc(&dst_mm->nr_ptes);
1118 pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
1121 pmdp_set_wrprotect(src_mm, addr, src_pmd);
1122 pmd = pmd_mkold(pmd_wrprotect(pmd));
1123 set_pmd_at(dst_mm, addr, dst_pmd, pmd);
1127 spin_unlock(src_ptl);
1128 spin_unlock(dst_ptl);
1133 void huge_pmd_set_accessed(struct mm_struct *mm,
1134 struct vm_area_struct *vma,
1135 unsigned long address,
1136 pmd_t *pmd, pmd_t orig_pmd,
1141 unsigned long haddr;
1143 ptl = pmd_lock(mm, pmd);
1144 if (unlikely(!pmd_same(*pmd, orig_pmd)))
1147 entry = pmd_mkyoung(orig_pmd);
1148 haddr = address & HPAGE_PMD_MASK;
1149 if (pmdp_set_access_flags(vma, haddr, pmd, entry, dirty))
1150 update_mmu_cache_pmd(vma, address, pmd);
1156 static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
1157 struct vm_area_struct *vma,
1158 unsigned long address,
1159 pmd_t *pmd, pmd_t orig_pmd,
1161 unsigned long haddr)
1163 struct mem_cgroup *memcg;
1168 struct page **pages;
1169 unsigned long mmun_start; /* For mmu_notifiers */
1170 unsigned long mmun_end; /* For mmu_notifiers */
1172 pages = kmalloc(sizeof(struct page *) * HPAGE_PMD_NR,
1174 if (unlikely(!pages)) {
1175 ret |= VM_FAULT_OOM;
1179 for (i = 0; i < HPAGE_PMD_NR; i++) {
1180 pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE |
1182 vma, address, page_to_nid(page));
1183 if (unlikely(!pages[i] ||
1184 mem_cgroup_try_charge(pages[i], mm, GFP_KERNEL,
1189 memcg = (void *)page_private(pages[i]);
1190 set_page_private(pages[i], 0);
1191 mem_cgroup_cancel_charge(pages[i], memcg,
1196 ret |= VM_FAULT_OOM;
1199 set_page_private(pages[i], (unsigned long)memcg);
1202 for (i = 0; i < HPAGE_PMD_NR; i++) {
1203 copy_user_highpage(pages[i], page + i,
1204 haddr + PAGE_SIZE * i, vma);
1205 __SetPageUptodate(pages[i]);
1210 mmun_end = haddr + HPAGE_PMD_SIZE;
1211 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
1213 ptl = pmd_lock(mm, pmd);
1214 if (unlikely(!pmd_same(*pmd, orig_pmd)))
1215 goto out_free_pages;
1216 VM_BUG_ON_PAGE(!PageHead(page), page);
1218 pmdp_huge_clear_flush_notify(vma, haddr, pmd);
1219 /* leave pmd empty until pte is filled */
1221 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
1222 pmd_populate(mm, &_pmd, pgtable);
1224 for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
1226 entry = mk_pte(pages[i], vma->vm_page_prot);
1227 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
1228 memcg = (void *)page_private(pages[i]);
1229 set_page_private(pages[i], 0);
1230 page_add_new_anon_rmap(pages[i], vma, haddr, false);
1231 mem_cgroup_commit_charge(pages[i], memcg, false, false);
1232 lru_cache_add_active_or_unevictable(pages[i], vma);
1233 pte = pte_offset_map(&_pmd, haddr);
1234 VM_BUG_ON(!pte_none(*pte));
1235 set_pte_at(mm, haddr, pte, entry);
1240 smp_wmb(); /* make pte visible before pmd */
1241 pmd_populate(mm, pmd, pgtable);
1242 page_remove_rmap(page, true);
1245 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1247 ret |= VM_FAULT_WRITE;
1255 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1256 for (i = 0; i < HPAGE_PMD_NR; i++) {
1257 memcg = (void *)page_private(pages[i]);
1258 set_page_private(pages[i], 0);
1259 mem_cgroup_cancel_charge(pages[i], memcg, false);
1266 int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
1267 unsigned long address, pmd_t *pmd, pmd_t orig_pmd)
1271 struct page *page = NULL, *new_page;
1272 struct mem_cgroup *memcg;
1273 unsigned long haddr;
1274 unsigned long mmun_start; /* For mmu_notifiers */
1275 unsigned long mmun_end; /* For mmu_notifiers */
1276 gfp_t huge_gfp; /* for allocation and charge */
1278 ptl = pmd_lockptr(mm, pmd);
1279 VM_BUG_ON_VMA(!vma->anon_vma, vma);
1280 haddr = address & HPAGE_PMD_MASK;
1281 if (is_huge_zero_pmd(orig_pmd))
1284 if (unlikely(!pmd_same(*pmd, orig_pmd)))
1287 page = pmd_page(orig_pmd);
1288 VM_BUG_ON_PAGE(!PageCompound(page) || !PageHead(page), page);
1290 * We can only reuse the page if nobody else maps the huge page or it's
1291 * part. We can do it by checking page_mapcount() on each sub-page, but
1293 * The cheaper way is to check page_count() to be equal 1: every
1294 * mapcount takes page reference reference, so this way we can
1295 * guarantee, that the PMD is the only mapping.
1296 * This can give false negative if somebody pinned the page, but that's
1299 if (page_mapcount(page) == 1 && page_count(page) == 1) {
1301 entry = pmd_mkyoung(orig_pmd);
1302 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
1303 if (pmdp_set_access_flags(vma, haddr, pmd, entry, 1))
1304 update_mmu_cache_pmd(vma, address, pmd);
1305 ret |= VM_FAULT_WRITE;
1311 if (transparent_hugepage_enabled(vma) &&
1312 !transparent_hugepage_debug_cow()) {
1313 huge_gfp = alloc_hugepage_gfpmask(transparent_hugepage_defrag(vma), 0);
1314 new_page = alloc_hugepage_vma(huge_gfp, vma, haddr, HPAGE_PMD_ORDER);
1318 if (likely(new_page)) {
1319 prep_transhuge_page(new_page);
1322 split_huge_pmd(vma, pmd, address);
1323 ret |= VM_FAULT_FALLBACK;
1325 ret = do_huge_pmd_wp_page_fallback(mm, vma, address,
1326 pmd, orig_pmd, page, haddr);
1327 if (ret & VM_FAULT_OOM) {
1328 split_huge_pmd(vma, pmd, address);
1329 ret |= VM_FAULT_FALLBACK;
1333 count_vm_event(THP_FAULT_FALLBACK);
1337 if (unlikely(mem_cgroup_try_charge(new_page, mm, huge_gfp, &memcg,
1341 split_huge_pmd(vma, pmd, address);
1344 split_huge_pmd(vma, pmd, address);
1345 ret |= VM_FAULT_FALLBACK;
1346 count_vm_event(THP_FAULT_FALLBACK);
1350 count_vm_event(THP_FAULT_ALLOC);
1353 clear_huge_page(new_page, haddr, HPAGE_PMD_NR);
1355 copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR);
1356 __SetPageUptodate(new_page);
1359 mmun_end = haddr + HPAGE_PMD_SIZE;
1360 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
1365 if (unlikely(!pmd_same(*pmd, orig_pmd))) {
1367 mem_cgroup_cancel_charge(new_page, memcg, true);
1372 entry = mk_huge_pmd(new_page, vma->vm_page_prot);
1373 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
1374 pmdp_huge_clear_flush_notify(vma, haddr, pmd);
1375 page_add_new_anon_rmap(new_page, vma, haddr, true);
1376 mem_cgroup_commit_charge(new_page, memcg, false, true);
1377 lru_cache_add_active_or_unevictable(new_page, vma);
1378 set_pmd_at(mm, haddr, pmd, entry);
1379 update_mmu_cache_pmd(vma, address, pmd);
1381 add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
1382 put_huge_zero_page();
1384 VM_BUG_ON_PAGE(!PageHead(page), page);
1385 page_remove_rmap(page, true);
1388 ret |= VM_FAULT_WRITE;
1392 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1400 struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
1405 struct mm_struct *mm = vma->vm_mm;
1406 struct page *page = NULL;
1408 assert_spin_locked(pmd_lockptr(mm, pmd));
1410 if (flags & FOLL_WRITE && !pmd_write(*pmd))
1413 /* Avoid dumping huge zero page */
1414 if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd))
1415 return ERR_PTR(-EFAULT);
1417 /* Full NUMA hinting faults to serialise migration in fault paths */
1418 if ((flags & FOLL_NUMA) && pmd_protnone(*pmd))
1421 page = pmd_page(*pmd);
1422 VM_BUG_ON_PAGE(!PageHead(page), page);
1423 if (flags & FOLL_TOUCH)
1424 touch_pmd(vma, addr, pmd);
1425 if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
1427 * We don't mlock() pte-mapped THPs. This way we can avoid
1428 * leaking mlocked pages into non-VM_LOCKED VMAs.
1430 * In most cases the pmd is the only mapping of the page as we
1431 * break COW for the mlock() -- see gup_flags |= FOLL_WRITE for
1432 * writable private mappings in populate_vma_page_range().
1434 * The only scenario when we have the page shared here is if we
1435 * mlocking read-only mapping shared over fork(). We skip
1436 * mlocking such pages.
1438 if (compound_mapcount(page) == 1 && !PageDoubleMap(page) &&
1439 page->mapping && trylock_page(page)) {
1442 mlock_vma_page(page);
1446 page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
1447 VM_BUG_ON_PAGE(!PageCompound(page), page);
1448 if (flags & FOLL_GET)
1455 /* NUMA hinting page fault entry point for trans huge pmds */
1456 int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
1457 unsigned long addr, pmd_t pmd, pmd_t *pmdp)
1460 struct anon_vma *anon_vma = NULL;
1462 unsigned long haddr = addr & HPAGE_PMD_MASK;
1463 int page_nid = -1, this_nid = numa_node_id();
1464 int target_nid, last_cpupid = -1;
1466 bool migrated = false;
1470 /* A PROT_NONE fault should not end up here */
1471 BUG_ON(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)));
1473 ptl = pmd_lock(mm, pmdp);
1474 if (unlikely(!pmd_same(pmd, *pmdp)))
1478 * If there are potential migrations, wait for completion and retry
1479 * without disrupting NUMA hinting information. Do not relock and
1480 * check_same as the page may no longer be mapped.
1482 if (unlikely(pmd_trans_migrating(*pmdp))) {
1483 page = pmd_page(*pmdp);
1485 wait_on_page_locked(page);
1489 page = pmd_page(pmd);
1490 BUG_ON(is_huge_zero_page(page));
1491 page_nid = page_to_nid(page);
1492 last_cpupid = page_cpupid_last(page);
1493 count_vm_numa_event(NUMA_HINT_FAULTS);
1494 if (page_nid == this_nid) {
1495 count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
1496 flags |= TNF_FAULT_LOCAL;
1499 /* See similar comment in do_numa_page for explanation */
1500 if (!(vma->vm_flags & VM_WRITE))
1501 flags |= TNF_NO_GROUP;
1504 * Acquire the page lock to serialise THP migrations but avoid dropping
1505 * page_table_lock if at all possible
1507 page_locked = trylock_page(page);
1508 target_nid = mpol_misplaced(page, vma, haddr);
1509 if (target_nid == -1) {
1510 /* If the page was locked, there are no parallel migrations */
1515 /* Migration could have started since the pmd_trans_migrating check */
1518 wait_on_page_locked(page);
1524 * Page is misplaced. Page lock serialises migrations. Acquire anon_vma
1525 * to serialises splits
1529 anon_vma = page_lock_anon_vma_read(page);
1531 /* Confirm the PMD did not change while page_table_lock was released */
1533 if (unlikely(!pmd_same(pmd, *pmdp))) {
1540 /* Bail if we fail to protect against THP splits for any reason */
1541 if (unlikely(!anon_vma)) {
1548 * Migrate the THP to the requested node, returns with page unlocked
1549 * and access rights restored.
1552 migrated = migrate_misplaced_transhuge_page(mm, vma,
1553 pmdp, pmd, addr, page, target_nid);
1555 flags |= TNF_MIGRATED;
1556 page_nid = target_nid;
1558 flags |= TNF_MIGRATE_FAIL;
1562 BUG_ON(!PageLocked(page));
1563 was_writable = pmd_write(pmd);
1564 pmd = pmd_modify(pmd, vma->vm_page_prot);
1565 pmd = pmd_mkyoung(pmd);
1567 pmd = pmd_mkwrite(pmd);
1568 set_pmd_at(mm, haddr, pmdp, pmd);
1569 update_mmu_cache_pmd(vma, addr, pmdp);
1576 page_unlock_anon_vma_read(anon_vma);
1579 task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR, flags);
1584 int madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1585 pmd_t *pmd, unsigned long addr, unsigned long next)
1591 struct mm_struct *mm = tlb->mm;
1594 ptl = pmd_trans_huge_lock(pmd, vma);
1599 if (is_huge_zero_pmd(orig_pmd)) {
1604 page = pmd_page(orig_pmd);
1606 * If other processes are mapping this page, we couldn't discard
1607 * the page unless they all do MADV_FREE so let's skip the page.
1609 if (page_mapcount(page) != 1)
1612 if (!trylock_page(page))
1616 * If user want to discard part-pages of THP, split it so MADV_FREE
1617 * will deactivate only them.
1619 if (next - addr != HPAGE_PMD_SIZE) {
1622 if (split_huge_page(page)) {
1633 if (PageDirty(page))
1634 ClearPageDirty(page);
1637 if (PageActive(page))
1638 deactivate_page(page);
1640 if (pmd_young(orig_pmd) || pmd_dirty(orig_pmd)) {
1641 orig_pmd = pmdp_huge_get_and_clear_full(tlb->mm, addr, pmd,
1643 orig_pmd = pmd_mkold(orig_pmd);
1644 orig_pmd = pmd_mkclean(orig_pmd);
1646 set_pmd_at(mm, addr, pmd, orig_pmd);
1647 tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
1656 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1657 pmd_t *pmd, unsigned long addr)
1662 ptl = __pmd_trans_huge_lock(pmd, vma);
1666 * For architectures like ppc64 we look at deposited pgtable
1667 * when calling pmdp_huge_get_and_clear. So do the
1668 * pgtable_trans_huge_withdraw after finishing pmdp related
1671 orig_pmd = pmdp_huge_get_and_clear_full(tlb->mm, addr, pmd,
1673 tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
1674 if (vma_is_dax(vma)) {
1676 if (is_huge_zero_pmd(orig_pmd))
1677 put_huge_zero_page();
1678 } else if (is_huge_zero_pmd(orig_pmd)) {
1679 pte_free(tlb->mm, pgtable_trans_huge_withdraw(tlb->mm, pmd));
1680 atomic_long_dec(&tlb->mm->nr_ptes);
1682 put_huge_zero_page();
1684 struct page *page = pmd_page(orig_pmd);
1685 page_remove_rmap(page, true);
1686 VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
1687 add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
1688 VM_BUG_ON_PAGE(!PageHead(page), page);
1689 pte_free(tlb->mm, pgtable_trans_huge_withdraw(tlb->mm, pmd));
1690 atomic_long_dec(&tlb->mm->nr_ptes);
1692 tlb_remove_page(tlb, page);
1697 bool move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma,
1698 unsigned long old_addr,
1699 unsigned long new_addr, unsigned long old_end,
1700 pmd_t *old_pmd, pmd_t *new_pmd)
1702 spinlock_t *old_ptl, *new_ptl;
1705 struct mm_struct *mm = vma->vm_mm;
1707 if ((old_addr & ~HPAGE_PMD_MASK) ||
1708 (new_addr & ~HPAGE_PMD_MASK) ||
1709 old_end - old_addr < HPAGE_PMD_SIZE ||
1710 (new_vma->vm_flags & VM_NOHUGEPAGE))
1714 * The destination pmd shouldn't be established, free_pgtables()
1715 * should have release it.
1717 if (WARN_ON(!pmd_none(*new_pmd))) {
1718 VM_BUG_ON(pmd_trans_huge(*new_pmd));
1723 * We don't have to worry about the ordering of src and dst
1724 * ptlocks because exclusive mmap_sem prevents deadlock.
1726 old_ptl = __pmd_trans_huge_lock(old_pmd, vma);
1728 new_ptl = pmd_lockptr(mm, new_pmd);
1729 if (new_ptl != old_ptl)
1730 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
1731 pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd);
1732 VM_BUG_ON(!pmd_none(*new_pmd));
1734 if (pmd_move_must_withdraw(new_ptl, old_ptl)) {
1736 pgtable = pgtable_trans_huge_withdraw(mm, old_pmd);
1737 pgtable_trans_huge_deposit(mm, new_pmd, pgtable);
1739 set_pmd_at(mm, new_addr, new_pmd, pmd_mksoft_dirty(pmd));
1740 if (new_ptl != old_ptl)
1741 spin_unlock(new_ptl);
1742 spin_unlock(old_ptl);
1750 * - 0 if PMD could not be locked
1751 * - 1 if PMD was locked but protections unchange and TLB flush unnecessary
1752 * - HPAGE_PMD_NR is protections changed and TLB flush necessary
1754 int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
1755 unsigned long addr, pgprot_t newprot, int prot_numa)
1757 struct mm_struct *mm = vma->vm_mm;
1761 ptl = __pmd_trans_huge_lock(pmd, vma);
1764 bool preserve_write = prot_numa && pmd_write(*pmd);
1768 * Avoid trapping faults against the zero page. The read-only
1769 * data is likely to be read-cached on the local CPU and
1770 * local/remote hits to the zero page are not interesting.
1772 if (prot_numa && is_huge_zero_pmd(*pmd)) {
1777 if (!prot_numa || !pmd_protnone(*pmd)) {
1778 entry = pmdp_huge_get_and_clear_notify(mm, addr, pmd);
1779 entry = pmd_modify(entry, newprot);
1781 entry = pmd_mkwrite(entry);
1783 set_pmd_at(mm, addr, pmd, entry);
1784 BUG_ON(!preserve_write && pmd_write(entry));
1793 * Returns true if a given pmd maps a thp, false otherwise.
1795 * Note that if it returns true, this routine returns without unlocking page
1796 * table lock. So callers must unlock it.
1798 spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma)
1801 ptl = pmd_lock(vma->vm_mm, pmd);
1802 if (likely(pmd_trans_huge(*pmd) || pmd_devmap(*pmd)))
1808 #define VM_NO_THP (VM_SPECIAL | VM_HUGETLB | VM_SHARED | VM_MAYSHARE)
1810 int hugepage_madvise(struct vm_area_struct *vma,
1811 unsigned long *vm_flags, int advice)
1817 * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
1818 * can't handle this properly after s390_enable_sie, so we simply
1819 * ignore the madvise to prevent qemu from causing a SIGSEGV.
1821 if (mm_has_pgste(vma->vm_mm))
1825 * Be somewhat over-protective like KSM for now!
1827 if (*vm_flags & VM_NO_THP)
1829 *vm_flags &= ~VM_NOHUGEPAGE;
1830 *vm_flags |= VM_HUGEPAGE;
1832 * If the vma become good for khugepaged to scan,
1833 * register it here without waiting a page fault that
1834 * may not happen any time soon.
1836 if (unlikely(khugepaged_enter_vma_merge(vma, *vm_flags)))
1839 case MADV_NOHUGEPAGE:
1841 * Be somewhat over-protective like KSM for now!
1843 if (*vm_flags & VM_NO_THP)
1845 *vm_flags &= ~VM_HUGEPAGE;
1846 *vm_flags |= VM_NOHUGEPAGE;
1848 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
1849 * this vma even if we leave the mm registered in khugepaged if
1850 * it got registered before VM_NOHUGEPAGE was set.
1858 static int __init khugepaged_slab_init(void)
1860 mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
1861 sizeof(struct mm_slot),
1862 __alignof__(struct mm_slot), 0, NULL);
1869 static void __init khugepaged_slab_exit(void)
1871 kmem_cache_destroy(mm_slot_cache);
1874 static inline struct mm_slot *alloc_mm_slot(void)
1876 if (!mm_slot_cache) /* initialization failed */
1878 return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
1881 static inline void free_mm_slot(struct mm_slot *mm_slot)
1883 kmem_cache_free(mm_slot_cache, mm_slot);
1886 static struct mm_slot *get_mm_slot(struct mm_struct *mm)
1888 struct mm_slot *mm_slot;
1890 hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm)
1891 if (mm == mm_slot->mm)
1897 static void insert_to_mm_slots_hash(struct mm_struct *mm,
1898 struct mm_slot *mm_slot)
1901 hash_add(mm_slots_hash, &mm_slot->hash, (long)mm);
1904 static inline int khugepaged_test_exit(struct mm_struct *mm)
1906 return atomic_read(&mm->mm_users) == 0;
1909 int __khugepaged_enter(struct mm_struct *mm)
1911 struct mm_slot *mm_slot;
1914 mm_slot = alloc_mm_slot();
1918 /* __khugepaged_exit() must not run from under us */
1919 VM_BUG_ON_MM(khugepaged_test_exit(mm), mm);
1920 if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
1921 free_mm_slot(mm_slot);
1925 spin_lock(&khugepaged_mm_lock);
1926 insert_to_mm_slots_hash(mm, mm_slot);
1928 * Insert just behind the scanning cursor, to let the area settle
1931 wakeup = list_empty(&khugepaged_scan.mm_head);
1932 list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
1933 spin_unlock(&khugepaged_mm_lock);
1935 atomic_inc(&mm->mm_count);
1937 wake_up_interruptible(&khugepaged_wait);
1942 int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
1943 unsigned long vm_flags)
1945 unsigned long hstart, hend;
1948 * Not yet faulted in so we will register later in the
1949 * page fault if needed.
1953 /* khugepaged not yet working on file or special mappings */
1955 VM_BUG_ON_VMA(vm_flags & VM_NO_THP, vma);
1956 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
1957 hend = vma->vm_end & HPAGE_PMD_MASK;
1959 return khugepaged_enter(vma, vm_flags);
1963 void __khugepaged_exit(struct mm_struct *mm)
1965 struct mm_slot *mm_slot;
1968 spin_lock(&khugepaged_mm_lock);
1969 mm_slot = get_mm_slot(mm);
1970 if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
1971 hash_del(&mm_slot->hash);
1972 list_del(&mm_slot->mm_node);
1975 spin_unlock(&khugepaged_mm_lock);
1978 clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1979 free_mm_slot(mm_slot);
1981 } else if (mm_slot) {
1983 * This is required to serialize against
1984 * khugepaged_test_exit() (which is guaranteed to run
1985 * under mmap sem read mode). Stop here (after we
1986 * return all pagetables will be destroyed) until
1987 * khugepaged has finished working on the pagetables
1988 * under the mmap_sem.
1990 down_write(&mm->mmap_sem);
1991 up_write(&mm->mmap_sem);
1995 static void release_pte_page(struct page *page)
1997 /* 0 stands for page_is_file_cache(page) == false */
1998 dec_zone_page_state(page, NR_ISOLATED_ANON + 0);
2000 putback_lru_page(page);
2003 static void release_pte_pages(pte_t *pte, pte_t *_pte)
2005 while (--_pte >= pte) {
2006 pte_t pteval = *_pte;
2007 if (!pte_none(pteval) && !is_zero_pfn(pte_pfn(pteval)))
2008 release_pte_page(pte_page(pteval));
2012 static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
2013 unsigned long address,
2016 struct page *page = NULL;
2018 int none_or_zero = 0, result = 0;
2019 bool referenced = false, writable = false;
2021 for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
2022 _pte++, address += PAGE_SIZE) {
2023 pte_t pteval = *_pte;
2024 if (pte_none(pteval) || (pte_present(pteval) &&
2025 is_zero_pfn(pte_pfn(pteval)))) {
2026 if (!userfaultfd_armed(vma) &&
2027 ++none_or_zero <= khugepaged_max_ptes_none) {
2030 result = SCAN_EXCEED_NONE_PTE;
2034 if (!pte_present(pteval)) {
2035 result = SCAN_PTE_NON_PRESENT;
2038 page = vm_normal_page(vma, address, pteval);
2039 if (unlikely(!page)) {
2040 result = SCAN_PAGE_NULL;
2044 VM_BUG_ON_PAGE(PageCompound(page), page);
2045 VM_BUG_ON_PAGE(!PageAnon(page), page);
2046 VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
2049 * We can do it before isolate_lru_page because the
2050 * page can't be freed from under us. NOTE: PG_lock
2051 * is needed to serialize against split_huge_page
2052 * when invoked from the VM.
2054 if (!trylock_page(page)) {
2055 result = SCAN_PAGE_LOCK;
2060 * cannot use mapcount: can't collapse if there's a gup pin.
2061 * The page must only be referenced by the scanned process
2062 * and page swap cache.
2064 if (page_count(page) != 1 + !!PageSwapCache(page)) {
2066 result = SCAN_PAGE_COUNT;
2069 if (pte_write(pteval)) {
2072 if (PageSwapCache(page) && !reuse_swap_page(page)) {
2074 result = SCAN_SWAP_CACHE_PAGE;
2078 * Page is not in the swap cache. It can be collapsed
2084 * Isolate the page to avoid collapsing an hugepage
2085 * currently in use by the VM.
2087 if (isolate_lru_page(page)) {
2089 result = SCAN_DEL_PAGE_LRU;
2092 /* 0 stands for page_is_file_cache(page) == false */
2093 inc_zone_page_state(page, NR_ISOLATED_ANON + 0);
2094 VM_BUG_ON_PAGE(!PageLocked(page), page);
2095 VM_BUG_ON_PAGE(PageLRU(page), page);
2097 /* If there is no mapped pte young don't collapse the page */
2098 if (pte_young(pteval) ||
2099 page_is_young(page) || PageReferenced(page) ||
2100 mmu_notifier_test_young(vma->vm_mm, address))
2103 if (likely(writable)) {
2104 if (likely(referenced)) {
2105 result = SCAN_SUCCEED;
2106 trace_mm_collapse_huge_page_isolate(page, none_or_zero,
2107 referenced, writable, result);
2111 result = SCAN_PAGE_RO;
2115 release_pte_pages(pte, _pte);
2116 trace_mm_collapse_huge_page_isolate(page, none_or_zero,
2117 referenced, writable, result);
2121 static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
2122 struct vm_area_struct *vma,
2123 unsigned long address,
2127 for (_pte = pte; _pte < pte+HPAGE_PMD_NR; _pte++) {
2128 pte_t pteval = *_pte;
2129 struct page *src_page;
2131 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
2132 clear_user_highpage(page, address);
2133 add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
2134 if (is_zero_pfn(pte_pfn(pteval))) {
2136 * ptl mostly unnecessary.
2140 * paravirt calls inside pte_clear here are
2143 pte_clear(vma->vm_mm, address, _pte);
2147 src_page = pte_page(pteval);
2148 copy_user_highpage(page, src_page, address, vma);
2149 VM_BUG_ON_PAGE(page_mapcount(src_page) != 1, src_page);
2150 release_pte_page(src_page);
2152 * ptl mostly unnecessary, but preempt has to
2153 * be disabled to update the per-cpu stats
2154 * inside page_remove_rmap().
2158 * paravirt calls inside pte_clear here are
2161 pte_clear(vma->vm_mm, address, _pte);
2162 page_remove_rmap(src_page, false);
2164 free_page_and_swap_cache(src_page);
2167 address += PAGE_SIZE;
2172 static void khugepaged_alloc_sleep(void)
2176 add_wait_queue(&khugepaged_wait, &wait);
2177 freezable_schedule_timeout_interruptible(
2178 msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
2179 remove_wait_queue(&khugepaged_wait, &wait);
2182 static int khugepaged_node_load[MAX_NUMNODES];
2184 static bool khugepaged_scan_abort(int nid)
2189 * If zone_reclaim_mode is disabled, then no extra effort is made to
2190 * allocate memory locally.
2192 if (!zone_reclaim_mode)
2195 /* If there is a count for this node already, it must be acceptable */
2196 if (khugepaged_node_load[nid])
2199 for (i = 0; i < MAX_NUMNODES; i++) {
2200 if (!khugepaged_node_load[i])
2202 if (node_distance(nid, i) > RECLAIM_DISTANCE)
2209 static int khugepaged_find_target_node(void)
2211 static int last_khugepaged_target_node = NUMA_NO_NODE;
2212 int nid, target_node = 0, max_value = 0;
2214 /* find first node with max normal pages hit */
2215 for (nid = 0; nid < MAX_NUMNODES; nid++)
2216 if (khugepaged_node_load[nid] > max_value) {
2217 max_value = khugepaged_node_load[nid];
2221 /* do some balance if several nodes have the same hit record */
2222 if (target_node <= last_khugepaged_target_node)
2223 for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES;
2225 if (max_value == khugepaged_node_load[nid]) {
2230 last_khugepaged_target_node = target_node;
2234 static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
2236 if (IS_ERR(*hpage)) {
2242 khugepaged_alloc_sleep();
2243 } else if (*hpage) {
2251 static struct page *
2252 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, struct mm_struct *mm,
2253 unsigned long address, int node)
2255 VM_BUG_ON_PAGE(*hpage, *hpage);
2258 * Before allocating the hugepage, release the mmap_sem read lock.
2259 * The allocation can take potentially a long time if it involves
2260 * sync compaction, and we do not need to hold the mmap_sem during
2261 * that. We will recheck the vma after taking it again in write mode.
2263 up_read(&mm->mmap_sem);
2265 *hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER);
2266 if (unlikely(!*hpage)) {
2267 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
2268 *hpage = ERR_PTR(-ENOMEM);
2272 prep_transhuge_page(*hpage);
2273 count_vm_event(THP_COLLAPSE_ALLOC);
2277 static int khugepaged_find_target_node(void)
2282 static inline struct page *alloc_hugepage(int defrag)
2286 page = alloc_pages(alloc_hugepage_gfpmask(defrag, 0), HPAGE_PMD_ORDER);
2288 prep_transhuge_page(page);
2292 static struct page *khugepaged_alloc_hugepage(bool *wait)
2297 hpage = alloc_hugepage(khugepaged_defrag());
2299 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
2304 khugepaged_alloc_sleep();
2306 count_vm_event(THP_COLLAPSE_ALLOC);
2307 } while (unlikely(!hpage) && likely(khugepaged_enabled()));
2312 static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
2315 *hpage = khugepaged_alloc_hugepage(wait);
2317 if (unlikely(!*hpage))
2323 static struct page *
2324 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, struct mm_struct *mm,
2325 unsigned long address, int node)
2327 up_read(&mm->mmap_sem);
2334 static bool hugepage_vma_check(struct vm_area_struct *vma)
2336 if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) ||
2337 (vma->vm_flags & VM_NOHUGEPAGE))
2339 if (!vma->anon_vma || vma->vm_ops)
2341 if (is_vma_temporary_stack(vma))
2343 VM_BUG_ON_VMA(vma->vm_flags & VM_NO_THP, vma);
2348 * Bring missing pages in from swap, to complete THP collapse.
2349 * Only done if khugepaged_scan_pmd believes it is worthwhile.
2351 * Called and returns without pte mapped or spinlocks held,
2352 * but with mmap_sem held to protect against vma changes.
2355 static void __collapse_huge_page_swapin(struct mm_struct *mm,
2356 struct vm_area_struct *vma,
2357 unsigned long address, pmd_t *pmd)
2359 unsigned long _address;
2361 int swapped_in = 0, ret = 0;
2363 pte = pte_offset_map(pmd, address);
2364 for (_address = address; _address < address + HPAGE_PMD_NR*PAGE_SIZE;
2365 pte++, _address += PAGE_SIZE) {
2367 if (!is_swap_pte(pteval))
2370 ret = do_swap_page(mm, vma, _address, pte, pmd,
2371 FAULT_FLAG_ALLOW_RETRY|FAULT_FLAG_RETRY_NOWAIT,
2373 if (ret & VM_FAULT_ERROR) {
2374 trace_mm_collapse_huge_page_swapin(mm, swapped_in, 0);
2377 /* pte is unmapped now, we need to map it */
2378 pte = pte_offset_map(pmd, _address);
2382 trace_mm_collapse_huge_page_swapin(mm, swapped_in, 1);
2385 static void collapse_huge_page(struct mm_struct *mm,
2386 unsigned long address,
2387 struct page **hpage,
2388 struct vm_area_struct *vma,
2394 struct page *new_page;
2395 spinlock_t *pmd_ptl, *pte_ptl;
2396 int isolated = 0, result = 0;
2397 unsigned long hstart, hend;
2398 struct mem_cgroup *memcg;
2399 unsigned long mmun_start; /* For mmu_notifiers */
2400 unsigned long mmun_end; /* For mmu_notifiers */
2403 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
2405 /* Only allocate from the target node */
2406 gfp = alloc_hugepage_gfpmask(khugepaged_defrag(), __GFP_OTHER_NODE) |
2409 /* release the mmap_sem read lock. */
2410 new_page = khugepaged_alloc_page(hpage, gfp, mm, address, node);
2412 result = SCAN_ALLOC_HUGE_PAGE_FAIL;
2416 if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) {
2417 result = SCAN_CGROUP_CHARGE_FAIL;
2422 * Prevent all access to pagetables with the exception of
2423 * gup_fast later hanlded by the ptep_clear_flush and the VM
2424 * handled by the anon_vma lock + PG_lock.
2426 down_write(&mm->mmap_sem);
2427 if (unlikely(khugepaged_test_exit(mm))) {
2428 result = SCAN_ANY_PROCESS;
2432 vma = find_vma(mm, address);
2434 result = SCAN_VMA_NULL;
2437 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2438 hend = vma->vm_end & HPAGE_PMD_MASK;
2439 if (address < hstart || address + HPAGE_PMD_SIZE > hend) {
2440 result = SCAN_ADDRESS_RANGE;
2443 if (!hugepage_vma_check(vma)) {
2444 result = SCAN_VMA_CHECK;
2447 pmd = mm_find_pmd(mm, address);
2449 result = SCAN_PMD_NULL;
2453 __collapse_huge_page_swapin(mm, vma, address, pmd);
2455 anon_vma_lock_write(vma->anon_vma);
2457 pte = pte_offset_map(pmd, address);
2458 pte_ptl = pte_lockptr(mm, pmd);
2460 mmun_start = address;
2461 mmun_end = address + HPAGE_PMD_SIZE;
2462 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
2463 pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
2465 * After this gup_fast can't run anymore. This also removes
2466 * any huge TLB entry from the CPU so we won't allow
2467 * huge and small TLB entries for the same virtual address
2468 * to avoid the risk of CPU bugs in that area.
2470 _pmd = pmdp_collapse_flush(vma, address, pmd);
2471 spin_unlock(pmd_ptl);
2472 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
2475 isolated = __collapse_huge_page_isolate(vma, address, pte);
2476 spin_unlock(pte_ptl);
2478 if (unlikely(!isolated)) {
2481 BUG_ON(!pmd_none(*pmd));
2483 * We can only use set_pmd_at when establishing
2484 * hugepmds and never for establishing regular pmds that
2485 * points to regular pagetables. Use pmd_populate for that
2487 pmd_populate(mm, pmd, pmd_pgtable(_pmd));
2488 spin_unlock(pmd_ptl);
2489 anon_vma_unlock_write(vma->anon_vma);
2495 * All pages are isolated and locked so anon_vma rmap
2496 * can't run anymore.
2498 anon_vma_unlock_write(vma->anon_vma);
2500 __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl);
2502 __SetPageUptodate(new_page);
2503 pgtable = pmd_pgtable(_pmd);
2505 _pmd = mk_huge_pmd(new_page, vma->vm_page_prot);
2506 _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
2509 * spin_lock() below is not the equivalent of smp_wmb(), so
2510 * this is needed to avoid the copy_huge_page writes to become
2511 * visible after the set_pmd_at() write.
2516 BUG_ON(!pmd_none(*pmd));
2517 page_add_new_anon_rmap(new_page, vma, address, true);
2518 mem_cgroup_commit_charge(new_page, memcg, false, true);
2519 lru_cache_add_active_or_unevictable(new_page, vma);
2520 pgtable_trans_huge_deposit(mm, pmd, pgtable);
2521 set_pmd_at(mm, address, pmd, _pmd);
2522 update_mmu_cache_pmd(vma, address, pmd);
2523 spin_unlock(pmd_ptl);
2527 khugepaged_pages_collapsed++;
2528 result = SCAN_SUCCEED;
2530 up_write(&mm->mmap_sem);
2532 trace_mm_collapse_huge_page(mm, isolated, result);
2535 mem_cgroup_cancel_charge(new_page, memcg, true);
2539 static int khugepaged_scan_pmd(struct mm_struct *mm,
2540 struct vm_area_struct *vma,
2541 unsigned long address,
2542 struct page **hpage)
2546 int ret = 0, none_or_zero = 0, result = 0;
2547 struct page *page = NULL;
2548 unsigned long _address;
2550 int node = NUMA_NO_NODE, unmapped = 0;
2551 bool writable = false, referenced = false;
2553 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
2555 pmd = mm_find_pmd(mm, address);
2557 result = SCAN_PMD_NULL;
2561 memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
2562 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
2563 for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
2564 _pte++, _address += PAGE_SIZE) {
2565 pte_t pteval = *_pte;
2566 if (is_swap_pte(pteval)) {
2567 if (++unmapped <= khugepaged_max_ptes_swap) {
2570 result = SCAN_EXCEED_SWAP_PTE;
2574 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
2575 if (!userfaultfd_armed(vma) &&
2576 ++none_or_zero <= khugepaged_max_ptes_none) {
2579 result = SCAN_EXCEED_NONE_PTE;
2583 if (!pte_present(pteval)) {
2584 result = SCAN_PTE_NON_PRESENT;
2587 if (pte_write(pteval))
2590 page = vm_normal_page(vma, _address, pteval);
2591 if (unlikely(!page)) {
2592 result = SCAN_PAGE_NULL;
2596 /* TODO: teach khugepaged to collapse THP mapped with pte */
2597 if (PageCompound(page)) {
2598 result = SCAN_PAGE_COMPOUND;
2603 * Record which node the original page is from and save this
2604 * information to khugepaged_node_load[].
2605 * Khupaged will allocate hugepage from the node has the max
2608 node = page_to_nid(page);
2609 if (khugepaged_scan_abort(node)) {
2610 result = SCAN_SCAN_ABORT;
2613 khugepaged_node_load[node]++;
2614 if (!PageLRU(page)) {
2615 result = SCAN_SCAN_ABORT;
2618 if (PageLocked(page)) {
2619 result = SCAN_PAGE_LOCK;
2622 if (!PageAnon(page)) {
2623 result = SCAN_PAGE_ANON;
2628 * cannot use mapcount: can't collapse if there's a gup pin.
2629 * The page must only be referenced by the scanned process
2630 * and page swap cache.
2632 if (page_count(page) != 1 + !!PageSwapCache(page)) {
2633 result = SCAN_PAGE_COUNT;
2636 if (pte_young(pteval) ||
2637 page_is_young(page) || PageReferenced(page) ||
2638 mmu_notifier_test_young(vma->vm_mm, address))
2643 result = SCAN_SUCCEED;
2646 result = SCAN_NO_REFERENCED_PAGE;
2649 result = SCAN_PAGE_RO;
2652 pte_unmap_unlock(pte, ptl);
2654 node = khugepaged_find_target_node();
2655 /* collapse_huge_page will return with the mmap_sem released */
2656 collapse_huge_page(mm, address, hpage, vma, node);
2659 trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
2660 none_or_zero, result, unmapped);
2664 static void collect_mm_slot(struct mm_slot *mm_slot)
2666 struct mm_struct *mm = mm_slot->mm;
2668 VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
2670 if (khugepaged_test_exit(mm)) {
2672 hash_del(&mm_slot->hash);
2673 list_del(&mm_slot->mm_node);
2676 * Not strictly needed because the mm exited already.
2678 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
2681 /* khugepaged_mm_lock actually not necessary for the below */
2682 free_mm_slot(mm_slot);
2687 static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
2688 struct page **hpage)
2689 __releases(&khugepaged_mm_lock)
2690 __acquires(&khugepaged_mm_lock)
2692 struct mm_slot *mm_slot;
2693 struct mm_struct *mm;
2694 struct vm_area_struct *vma;
2698 VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
2700 if (khugepaged_scan.mm_slot)
2701 mm_slot = khugepaged_scan.mm_slot;
2703 mm_slot = list_entry(khugepaged_scan.mm_head.next,
2704 struct mm_slot, mm_node);
2705 khugepaged_scan.address = 0;
2706 khugepaged_scan.mm_slot = mm_slot;
2708 spin_unlock(&khugepaged_mm_lock);
2711 down_read(&mm->mmap_sem);
2712 if (unlikely(khugepaged_test_exit(mm)))
2715 vma = find_vma(mm, khugepaged_scan.address);
2718 for (; vma; vma = vma->vm_next) {
2719 unsigned long hstart, hend;
2722 if (unlikely(khugepaged_test_exit(mm))) {
2726 if (!hugepage_vma_check(vma)) {
2731 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2732 hend = vma->vm_end & HPAGE_PMD_MASK;
2735 if (khugepaged_scan.address > hend)
2737 if (khugepaged_scan.address < hstart)
2738 khugepaged_scan.address = hstart;
2739 VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
2741 while (khugepaged_scan.address < hend) {
2744 if (unlikely(khugepaged_test_exit(mm)))
2745 goto breakouterloop;
2747 VM_BUG_ON(khugepaged_scan.address < hstart ||
2748 khugepaged_scan.address + HPAGE_PMD_SIZE >
2750 ret = khugepaged_scan_pmd(mm, vma,
2751 khugepaged_scan.address,
2753 /* move to next address */
2754 khugepaged_scan.address += HPAGE_PMD_SIZE;
2755 progress += HPAGE_PMD_NR;
2757 /* we released mmap_sem so break loop */
2758 goto breakouterloop_mmap_sem;
2759 if (progress >= pages)
2760 goto breakouterloop;
2764 up_read(&mm->mmap_sem); /* exit_mmap will destroy ptes after this */
2765 breakouterloop_mmap_sem:
2767 spin_lock(&khugepaged_mm_lock);
2768 VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
2770 * Release the current mm_slot if this mm is about to die, or
2771 * if we scanned all vmas of this mm.
2773 if (khugepaged_test_exit(mm) || !vma) {
2775 * Make sure that if mm_users is reaching zero while
2776 * khugepaged runs here, khugepaged_exit will find
2777 * mm_slot not pointing to the exiting mm.
2779 if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
2780 khugepaged_scan.mm_slot = list_entry(
2781 mm_slot->mm_node.next,
2782 struct mm_slot, mm_node);
2783 khugepaged_scan.address = 0;
2785 khugepaged_scan.mm_slot = NULL;
2786 khugepaged_full_scans++;
2789 collect_mm_slot(mm_slot);
2795 static int khugepaged_has_work(void)
2797 return !list_empty(&khugepaged_scan.mm_head) &&
2798 khugepaged_enabled();
2801 static int khugepaged_wait_event(void)
2803 return !list_empty(&khugepaged_scan.mm_head) ||
2804 kthread_should_stop();
2807 static void khugepaged_do_scan(void)
2809 struct page *hpage = NULL;
2810 unsigned int progress = 0, pass_through_head = 0;
2811 unsigned int pages = khugepaged_pages_to_scan;
2814 barrier(); /* write khugepaged_pages_to_scan to local stack */
2816 while (progress < pages) {
2817 if (!khugepaged_prealloc_page(&hpage, &wait))
2822 if (unlikely(kthread_should_stop() || try_to_freeze()))
2825 spin_lock(&khugepaged_mm_lock);
2826 if (!khugepaged_scan.mm_slot)
2827 pass_through_head++;
2828 if (khugepaged_has_work() &&
2829 pass_through_head < 2)
2830 progress += khugepaged_scan_mm_slot(pages - progress,
2834 spin_unlock(&khugepaged_mm_lock);
2837 if (!IS_ERR_OR_NULL(hpage))
2841 static void khugepaged_wait_work(void)
2843 if (khugepaged_has_work()) {
2844 if (!khugepaged_scan_sleep_millisecs)
2847 wait_event_freezable_timeout(khugepaged_wait,
2848 kthread_should_stop(),
2849 msecs_to_jiffies(khugepaged_scan_sleep_millisecs));
2853 if (khugepaged_enabled())
2854 wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
2857 static int khugepaged(void *none)
2859 struct mm_slot *mm_slot;
2862 set_user_nice(current, MAX_NICE);
2864 while (!kthread_should_stop()) {
2865 khugepaged_do_scan();
2866 khugepaged_wait_work();
2869 spin_lock(&khugepaged_mm_lock);
2870 mm_slot = khugepaged_scan.mm_slot;
2871 khugepaged_scan.mm_slot = NULL;
2873 collect_mm_slot(mm_slot);
2874 spin_unlock(&khugepaged_mm_lock);
2878 static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
2879 unsigned long haddr, pmd_t *pmd)
2881 struct mm_struct *mm = vma->vm_mm;
2886 /* leave pmd empty until pte is filled */
2887 pmdp_huge_clear_flush_notify(vma, haddr, pmd);
2889 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
2890 pmd_populate(mm, &_pmd, pgtable);
2892 for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
2894 entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot);
2895 entry = pte_mkspecial(entry);
2896 pte = pte_offset_map(&_pmd, haddr);
2897 VM_BUG_ON(!pte_none(*pte));
2898 set_pte_at(mm, haddr, pte, entry);
2901 smp_wmb(); /* make pte visible before pmd */
2902 pmd_populate(mm, pmd, pgtable);
2903 put_huge_zero_page();
2906 static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
2907 unsigned long haddr, bool freeze)
2909 struct mm_struct *mm = vma->vm_mm;
2913 bool young, write, dirty;
2916 VM_BUG_ON(haddr & ~HPAGE_PMD_MASK);
2917 VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
2918 VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PMD_SIZE, vma);
2919 VM_BUG_ON(!pmd_trans_huge(*pmd) && !pmd_devmap(*pmd));
2921 count_vm_event(THP_SPLIT_PMD);
2923 if (vma_is_dax(vma)) {
2924 pmd_t _pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd);
2925 if (is_huge_zero_pmd(_pmd))
2926 put_huge_zero_page();
2928 } else if (is_huge_zero_pmd(*pmd)) {
2929 return __split_huge_zero_page_pmd(vma, haddr, pmd);
2932 page = pmd_page(*pmd);
2933 VM_BUG_ON_PAGE(!page_count(page), page);
2934 atomic_add(HPAGE_PMD_NR - 1, &page->_count);
2935 write = pmd_write(*pmd);
2936 young = pmd_young(*pmd);
2937 dirty = pmd_dirty(*pmd);
2939 pmdp_huge_split_prepare(vma, haddr, pmd);
2940 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
2941 pmd_populate(mm, &_pmd, pgtable);
2943 for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
2946 * Note that NUMA hinting access restrictions are not
2947 * transferred to avoid any possibility of altering
2948 * permissions across VMAs.
2951 swp_entry_t swp_entry;
2952 swp_entry = make_migration_entry(page + i, write);
2953 entry = swp_entry_to_pte(swp_entry);
2955 entry = mk_pte(page + i, vma->vm_page_prot);
2956 entry = maybe_mkwrite(entry, vma);
2958 entry = pte_wrprotect(entry);
2960 entry = pte_mkold(entry);
2963 SetPageDirty(page + i);
2964 pte = pte_offset_map(&_pmd, haddr);
2965 BUG_ON(!pte_none(*pte));
2966 set_pte_at(mm, haddr, pte, entry);
2967 atomic_inc(&page[i]._mapcount);
2972 * Set PG_double_map before dropping compound_mapcount to avoid
2973 * false-negative page_mapped().
2975 if (compound_mapcount(page) > 1 && !TestSetPageDoubleMap(page)) {
2976 for (i = 0; i < HPAGE_PMD_NR; i++)
2977 atomic_inc(&page[i]._mapcount);
2980 if (atomic_add_negative(-1, compound_mapcount_ptr(page))) {
2981 /* Last compound_mapcount is gone. */
2982 __dec_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
2983 if (TestClearPageDoubleMap(page)) {
2984 /* No need in mapcount reference anymore */
2985 for (i = 0; i < HPAGE_PMD_NR; i++)
2986 atomic_dec(&page[i]._mapcount);
2990 smp_wmb(); /* make pte visible before pmd */
2992 * Up to this point the pmd is present and huge and userland has the
2993 * whole access to the hugepage during the split (which happens in
2994 * place). If we overwrite the pmd with the not-huge version pointing
2995 * to the pte here (which of course we could if all CPUs were bug
2996 * free), userland could trigger a small page size TLB miss on the
2997 * small sized TLB while the hugepage TLB entry is still established in
2998 * the huge TLB. Some CPU doesn't like that.
2999 * See http://support.amd.com/us/Processor_TechDocs/41322.pdf, Erratum
3000 * 383 on page 93. Intel should be safe but is also warns that it's
3001 * only safe if the permission and cache attributes of the two entries
3002 * loaded in the two TLB is identical (which should be the case here).
3003 * But it is generally safer to never allow small and huge TLB entries
3004 * for the same virtual address to be loaded simultaneously. So instead
3005 * of doing "pmd_populate(); flush_pmd_tlb_range();" we first mark the
3006 * current pmd notpresent (atomically because here the pmd_trans_huge
3007 * and pmd_trans_splitting must remain set at all times on the pmd
3008 * until the split is complete for this pmd), then we flush the SMP TLB
3009 * and finally we write the non-huge version of the pmd entry with
3012 pmdp_invalidate(vma, haddr, pmd);
3013 pmd_populate(mm, pmd, pgtable);
3016 for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
3017 page_remove_rmap(page + i, false);
3023 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
3024 unsigned long address)
3027 struct mm_struct *mm = vma->vm_mm;
3028 struct page *page = NULL;
3029 unsigned long haddr = address & HPAGE_PMD_MASK;
3031 mmu_notifier_invalidate_range_start(mm, haddr, haddr + HPAGE_PMD_SIZE);
3032 ptl = pmd_lock(mm, pmd);
3033 if (pmd_trans_huge(*pmd)) {
3034 page = pmd_page(*pmd);
3035 if (PageMlocked(page))
3039 } else if (!pmd_devmap(*pmd))
3041 __split_huge_pmd_locked(vma, pmd, haddr, false);
3044 mmu_notifier_invalidate_range_end(mm, haddr, haddr + HPAGE_PMD_SIZE);
3047 munlock_vma_page(page);
3053 static void split_huge_pmd_address(struct vm_area_struct *vma,
3054 unsigned long address)
3060 VM_BUG_ON(!(address & ~HPAGE_PMD_MASK));
3062 pgd = pgd_offset(vma->vm_mm, address);
3063 if (!pgd_present(*pgd))
3066 pud = pud_offset(pgd, address);
3067 if (!pud_present(*pud))
3070 pmd = pmd_offset(pud, address);
3071 if (!pmd_present(*pmd) || (!pmd_trans_huge(*pmd) && !pmd_devmap(*pmd)))
3074 * Caller holds the mmap_sem write mode, so a huge pmd cannot
3075 * materialize from under us.
3077 split_huge_pmd(vma, pmd, address);
3080 void vma_adjust_trans_huge(struct vm_area_struct *vma,
3081 unsigned long start,
3086 * If the new start address isn't hpage aligned and it could
3087 * previously contain an hugepage: check if we need to split
3090 if (start & ~HPAGE_PMD_MASK &&
3091 (start & HPAGE_PMD_MASK) >= vma->vm_start &&
3092 (start & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end)
3093 split_huge_pmd_address(vma, start);
3096 * If the new end address isn't hpage aligned and it could
3097 * previously contain an hugepage: check if we need to split
3100 if (end & ~HPAGE_PMD_MASK &&
3101 (end & HPAGE_PMD_MASK) >= vma->vm_start &&
3102 (end & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end)
3103 split_huge_pmd_address(vma, end);
3106 * If we're also updating the vma->vm_next->vm_start, if the new
3107 * vm_next->vm_start isn't page aligned and it could previously
3108 * contain an hugepage: check if we need to split an huge pmd.
3110 if (adjust_next > 0) {
3111 struct vm_area_struct *next = vma->vm_next;
3112 unsigned long nstart = next->vm_start;
3113 nstart += adjust_next << PAGE_SHIFT;
3114 if (nstart & ~HPAGE_PMD_MASK &&
3115 (nstart & HPAGE_PMD_MASK) >= next->vm_start &&
3116 (nstart & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= next->vm_end)
3117 split_huge_pmd_address(next, nstart);
3121 static void freeze_page_vma(struct vm_area_struct *vma, struct page *page,
3122 unsigned long address)
3124 unsigned long haddr = address & HPAGE_PMD_MASK;
3130 int i, nr = HPAGE_PMD_NR;
3132 /* Skip pages which doesn't belong to the VMA */
3133 if (address < vma->vm_start) {
3134 int off = (vma->vm_start - address) >> PAGE_SHIFT;
3137 address = vma->vm_start;
3140 pgd = pgd_offset(vma->vm_mm, address);
3141 if (!pgd_present(*pgd))
3143 pud = pud_offset(pgd, address);
3144 if (!pud_present(*pud))
3146 pmd = pmd_offset(pud, address);
3147 ptl = pmd_lock(vma->vm_mm, pmd);
3148 if (!pmd_present(*pmd)) {
3152 if (pmd_trans_huge(*pmd)) {
3153 if (page == pmd_page(*pmd))
3154 __split_huge_pmd_locked(vma, pmd, haddr, true);
3160 pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
3161 for (i = 0; i < nr; i++, address += PAGE_SIZE, page++, pte++) {
3162 pte_t entry, swp_pte;
3163 swp_entry_t swp_entry;
3166 * We've just crossed page table boundary: need to map next one.
3167 * It can happen if THP was mremaped to non PMD-aligned address.
3169 if (unlikely(address == haddr + HPAGE_PMD_SIZE)) {
3170 pte_unmap_unlock(pte - 1, ptl);
3171 pmd = mm_find_pmd(vma->vm_mm, address);
3174 pte = pte_offset_map_lock(vma->vm_mm, pmd,
3178 if (!pte_present(*pte))
3180 if (page_to_pfn(page) != pte_pfn(*pte))
3182 flush_cache_page(vma, address, page_to_pfn(page));
3183 entry = ptep_clear_flush(vma, address, pte);
3184 if (pte_dirty(entry))
3186 swp_entry = make_migration_entry(page, pte_write(entry));
3187 swp_pte = swp_entry_to_pte(swp_entry);
3188 if (pte_soft_dirty(entry))
3189 swp_pte = pte_swp_mksoft_dirty(swp_pte);
3190 set_pte_at(vma->vm_mm, address, pte, swp_pte);
3191 page_remove_rmap(page, false);
3194 pte_unmap_unlock(pte - 1, ptl);
3197 static void freeze_page(struct anon_vma *anon_vma, struct page *page)
3199 struct anon_vma_chain *avc;
3200 pgoff_t pgoff = page_to_pgoff(page);
3202 VM_BUG_ON_PAGE(!PageHead(page), page);
3204 anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff,
3205 pgoff + HPAGE_PMD_NR - 1) {
3206 unsigned long address = __vma_address(page, avc->vma);
3208 mmu_notifier_invalidate_range_start(avc->vma->vm_mm,
3209 address, address + HPAGE_PMD_SIZE);
3210 freeze_page_vma(avc->vma, page, address);
3211 mmu_notifier_invalidate_range_end(avc->vma->vm_mm,
3212 address, address + HPAGE_PMD_SIZE);
3216 static void unfreeze_page_vma(struct vm_area_struct *vma, struct page *page,
3217 unsigned long address)
3222 swp_entry_t swp_entry;
3223 unsigned long haddr = address & HPAGE_PMD_MASK;
3224 int i, nr = HPAGE_PMD_NR;
3226 /* Skip pages which doesn't belong to the VMA */
3227 if (address < vma->vm_start) {
3228 int off = (vma->vm_start - address) >> PAGE_SHIFT;
3231 address = vma->vm_start;
3234 pmd = mm_find_pmd(vma->vm_mm, address);
3238 pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
3239 for (i = 0; i < nr; i++, address += PAGE_SIZE, page++, pte++) {
3241 * We've just crossed page table boundary: need to map next one.
3242 * It can happen if THP was mremaped to non-PMD aligned address.
3244 if (unlikely(address == haddr + HPAGE_PMD_SIZE)) {
3245 pte_unmap_unlock(pte - 1, ptl);
3246 pmd = mm_find_pmd(vma->vm_mm, address);
3249 pte = pte_offset_map_lock(vma->vm_mm, pmd,
3253 if (!is_swap_pte(*pte))
3256 swp_entry = pte_to_swp_entry(*pte);
3257 if (!is_migration_entry(swp_entry))
3259 if (migration_entry_to_page(swp_entry) != page)
3263 page_add_anon_rmap(page, vma, address, false);
3265 entry = pte_mkold(mk_pte(page, vma->vm_page_prot));
3266 if (PageDirty(page))
3267 entry = pte_mkdirty(entry);
3268 if (is_write_migration_entry(swp_entry))
3269 entry = maybe_mkwrite(entry, vma);
3271 flush_dcache_page(page);
3272 set_pte_at(vma->vm_mm, address, pte, entry);
3274 /* No need to invalidate - it was non-present before */
3275 update_mmu_cache(vma, address, pte);
3277 pte_unmap_unlock(pte - 1, ptl);
3280 static void unfreeze_page(struct anon_vma *anon_vma, struct page *page)
3282 struct anon_vma_chain *avc;
3283 pgoff_t pgoff = page_to_pgoff(page);
3285 anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root,
3286 pgoff, pgoff + HPAGE_PMD_NR - 1) {
3287 unsigned long address = __vma_address(page, avc->vma);
3289 mmu_notifier_invalidate_range_start(avc->vma->vm_mm,
3290 address, address + HPAGE_PMD_SIZE);
3291 unfreeze_page_vma(avc->vma, page, address);
3292 mmu_notifier_invalidate_range_end(avc->vma->vm_mm,
3293 address, address + HPAGE_PMD_SIZE);
3297 static void __split_huge_page_tail(struct page *head, int tail,
3298 struct lruvec *lruvec, struct list_head *list)
3300 struct page *page_tail = head + tail;
3302 VM_BUG_ON_PAGE(atomic_read(&page_tail->_mapcount) != -1, page_tail);
3303 VM_BUG_ON_PAGE(atomic_read(&page_tail->_count) != 0, page_tail);
3306 * tail_page->_count is zero and not changing from under us. But
3307 * get_page_unless_zero() may be running from under us on the
3308 * tail_page. If we used atomic_set() below instead of atomic_inc(), we
3309 * would then run atomic_set() concurrently with
3310 * get_page_unless_zero(), and atomic_set() is implemented in C not
3311 * using locked ops. spin_unlock on x86 sometime uses locked ops
3312 * because of PPro errata 66, 92, so unless somebody can guarantee
3313 * atomic_set() here would be safe on all archs (and not only on x86),
3314 * it's safer to use atomic_inc().
3316 atomic_inc(&page_tail->_count);
3318 page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
3319 page_tail->flags |= (head->flags &
3320 ((1L << PG_referenced) |
3321 (1L << PG_swapbacked) |
3322 (1L << PG_mlocked) |
3323 (1L << PG_uptodate) |
3326 (1L << PG_unevictable) |
3330 * After clearing PageTail the gup refcount can be released.
3331 * Page flags also must be visible before we make the page non-compound.
3335 clear_compound_head(page_tail);
3337 if (page_is_young(head))
3338 set_page_young(page_tail);
3339 if (page_is_idle(head))
3340 set_page_idle(page_tail);
3342 /* ->mapping in first tail page is compound_mapcount */
3343 VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING,
3345 page_tail->mapping = head->mapping;
3347 page_tail->index = head->index + tail;
3348 page_cpupid_xchg_last(page_tail, page_cpupid_last(head));
3349 lru_add_page_tail(head, page_tail, lruvec, list);
3352 static void __split_huge_page(struct page *page, struct list_head *list)
3354 struct page *head = compound_head(page);
3355 struct zone *zone = page_zone(head);
3356 struct lruvec *lruvec;
3359 /* prevent PageLRU to go away from under us, and freeze lru stats */
3360 spin_lock_irq(&zone->lru_lock);
3361 lruvec = mem_cgroup_page_lruvec(head, zone);
3363 /* complete memcg works before add pages to LRU */
3364 mem_cgroup_split_huge_fixup(head);
3366 for (i = HPAGE_PMD_NR - 1; i >= 1; i--)
3367 __split_huge_page_tail(head, i, lruvec, list);
3369 ClearPageCompound(head);
3370 spin_unlock_irq(&zone->lru_lock);
3372 unfreeze_page(page_anon_vma(head), head);
3374 for (i = 0; i < HPAGE_PMD_NR; i++) {
3375 struct page *subpage = head + i;
3376 if (subpage == page)
3378 unlock_page(subpage);
3381 * Subpages may be freed if there wasn't any mapping
3382 * like if add_to_swap() is running on a lru page that
3383 * had its mapping zapped. And freeing these pages
3384 * requires taking the lru_lock so we do the put_page
3385 * of the tail pages after the split is complete.
3391 int total_mapcount(struct page *page)
3395 VM_BUG_ON_PAGE(PageTail(page), page);
3397 if (likely(!PageCompound(page)))
3398 return atomic_read(&page->_mapcount) + 1;
3400 ret = compound_mapcount(page);
3403 for (i = 0; i < HPAGE_PMD_NR; i++)
3404 ret += atomic_read(&page[i]._mapcount) + 1;
3405 if (PageDoubleMap(page))
3406 ret -= HPAGE_PMD_NR;
3411 * This function splits huge page into normal pages. @page can point to any
3412 * subpage of huge page to split. Split doesn't change the position of @page.
3414 * Only caller must hold pin on the @page, otherwise split fails with -EBUSY.
3415 * The huge page must be locked.
3417 * If @list is null, tail pages will be added to LRU list, otherwise, to @list.
3419 * Both head page and tail pages will inherit mapping, flags, and so on from
3422 * GUP pin and PG_locked transferred to @page. Rest subpages can be freed if
3423 * they are not mapped.
3425 * Returns 0 if the hugepage is split successfully.
3426 * Returns -EBUSY if the page is pinned or if anon_vma disappeared from under
3429 int split_huge_page_to_list(struct page *page, struct list_head *list)
3431 struct page *head = compound_head(page);
3432 struct pglist_data *pgdata = NODE_DATA(page_to_nid(head));
3433 struct anon_vma *anon_vma;
3434 int count, mapcount, ret;
3436 unsigned long flags;
3438 VM_BUG_ON_PAGE(is_huge_zero_page(page), page);
3439 VM_BUG_ON_PAGE(!PageAnon(page), page);
3440 VM_BUG_ON_PAGE(!PageLocked(page), page);
3441 VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
3442 VM_BUG_ON_PAGE(!PageCompound(page), page);
3445 * The caller does not necessarily hold an mmap_sem that would prevent
3446 * the anon_vma disappearing so we first we take a reference to it
3447 * and then lock the anon_vma for write. This is similar to
3448 * page_lock_anon_vma_read except the write lock is taken to serialise
3449 * against parallel split or collapse operations.
3451 anon_vma = page_get_anon_vma(head);
3456 anon_vma_lock_write(anon_vma);
3459 * Racy check if we can split the page, before freeze_page() will
3462 if (total_mapcount(head) != page_count(head) - 1) {
3467 mlocked = PageMlocked(page);
3468 freeze_page(anon_vma, head);
3469 VM_BUG_ON_PAGE(compound_mapcount(head), head);
3471 /* Make sure the page is not on per-CPU pagevec as it takes pin */
3475 /* Prevent deferred_split_scan() touching ->_count */
3476 spin_lock_irqsave(&pgdata->split_queue_lock, flags);
3477 count = page_count(head);
3478 mapcount = total_mapcount(head);
3479 if (!mapcount && count == 1) {
3480 if (!list_empty(page_deferred_list(head))) {
3481 pgdata->split_queue_len--;
3482 list_del(page_deferred_list(head));
3484 spin_unlock_irqrestore(&pgdata->split_queue_lock, flags);
3485 __split_huge_page(page, list);
3487 } else if (IS_ENABLED(CONFIG_DEBUG_VM) && mapcount) {
3488 spin_unlock_irqrestore(&pgdata->split_queue_lock, flags);
3489 pr_alert("total_mapcount: %u, page_count(): %u\n",
3492 dump_page(head, NULL);
3493 dump_page(page, "total_mapcount(head) > 0");
3496 spin_unlock_irqrestore(&pgdata->split_queue_lock, flags);
3497 unfreeze_page(anon_vma, head);
3502 anon_vma_unlock_write(anon_vma);
3503 put_anon_vma(anon_vma);
3505 count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED);
3509 void free_transhuge_page(struct page *page)
3511 struct pglist_data *pgdata = NODE_DATA(page_to_nid(page));
3512 unsigned long flags;
3514 spin_lock_irqsave(&pgdata->split_queue_lock, flags);
3515 if (!list_empty(page_deferred_list(page))) {
3516 pgdata->split_queue_len--;
3517 list_del(page_deferred_list(page));
3519 spin_unlock_irqrestore(&pgdata->split_queue_lock, flags);
3520 free_compound_page(page);
3523 void deferred_split_huge_page(struct page *page)
3525 struct pglist_data *pgdata = NODE_DATA(page_to_nid(page));
3526 unsigned long flags;
3528 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
3530 spin_lock_irqsave(&pgdata->split_queue_lock, flags);
3531 if (list_empty(page_deferred_list(page))) {
3532 count_vm_event(THP_DEFERRED_SPLIT_PAGE);
3533 list_add_tail(page_deferred_list(page), &pgdata->split_queue);
3534 pgdata->split_queue_len++;
3536 spin_unlock_irqrestore(&pgdata->split_queue_lock, flags);
3539 static unsigned long deferred_split_count(struct shrinker *shrink,
3540 struct shrink_control *sc)
3542 struct pglist_data *pgdata = NODE_DATA(sc->nid);
3543 return ACCESS_ONCE(pgdata->split_queue_len);
3546 static unsigned long deferred_split_scan(struct shrinker *shrink,
3547 struct shrink_control *sc)
3549 struct pglist_data *pgdata = NODE_DATA(sc->nid);
3550 unsigned long flags;
3551 LIST_HEAD(list), *pos, *next;
3555 spin_lock_irqsave(&pgdata->split_queue_lock, flags);
3556 /* Take pin on all head pages to avoid freeing them under us */
3557 list_for_each_safe(pos, next, &pgdata->split_queue) {
3558 page = list_entry((void *)pos, struct page, mapping);
3559 page = compound_head(page);
3560 if (get_page_unless_zero(page)) {
3561 list_move(page_deferred_list(page), &list);
3563 /* We lost race with put_compound_page() */
3564 list_del_init(page_deferred_list(page));
3565 pgdata->split_queue_len--;
3567 if (!--sc->nr_to_scan)
3570 spin_unlock_irqrestore(&pgdata->split_queue_lock, flags);
3572 list_for_each_safe(pos, next, &list) {
3573 page = list_entry((void *)pos, struct page, mapping);
3575 /* split_huge_page() removes page from list on success */
3576 if (!split_huge_page(page))
3582 spin_lock_irqsave(&pgdata->split_queue_lock, flags);
3583 list_splice_tail(&list, &pgdata->split_queue);
3584 spin_unlock_irqrestore(&pgdata->split_queue_lock, flags);
3587 * Stop shrinker if we didn't split any page, but the queue is empty.
3588 * This can happen if pages were freed under us.
3590 if (!split && list_empty(&pgdata->split_queue))
3595 static struct shrinker deferred_split_shrinker = {
3596 .count_objects = deferred_split_count,
3597 .scan_objects = deferred_split_scan,
3598 .seeks = DEFAULT_SEEKS,
3599 .flags = SHRINKER_NUMA_AWARE,
3602 #ifdef CONFIG_DEBUG_FS
3603 static int split_huge_pages_set(void *data, u64 val)
3607 unsigned long pfn, max_zone_pfn;
3608 unsigned long total = 0, split = 0;
3613 for_each_populated_zone(zone) {
3614 max_zone_pfn = zone_end_pfn(zone);
3615 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) {
3616 if (!pfn_valid(pfn))
3619 page = pfn_to_page(pfn);
3620 if (!get_page_unless_zero(page))
3623 if (zone != page_zone(page))
3626 if (!PageHead(page) || !PageAnon(page) ||
3632 if (!split_huge_page(page))
3640 pr_info("%lu of %lu THP split", split, total);
3644 DEFINE_SIMPLE_ATTRIBUTE(split_huge_pages_fops, NULL, split_huge_pages_set,
3647 static int __init split_huge_pages_debugfs(void)
3651 ret = debugfs_create_file("split_huge_pages", 0644, NULL, NULL,
3652 &split_huge_pages_fops);
3654 pr_warn("Failed to create split_huge_pages in debugfs");
3657 late_initcall(split_huge_pages_debugfs);