]> git.karo-electronics.de Git - karo-tx-linux.git/blob - mm/huge_memory.c
thp: fix split_huge_page() after mremap() of THP
[karo-tx-linux.git] / mm / huge_memory.c
1 /*
2  *  Copyright (C) 2009  Red Hat, Inc.
3  *
4  *  This work is licensed under the terms of the GNU GPL, version 2. See
5  *  the COPYING file in the top-level directory.
6  */
7
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10 #include <linux/mm.h>
11 #include <linux/sched.h>
12 #include <linux/highmem.h>
13 #include <linux/hugetlb.h>
14 #include <linux/mmu_notifier.h>
15 #include <linux/rmap.h>
16 #include <linux/swap.h>
17 #include <linux/shrinker.h>
18 #include <linux/mm_inline.h>
19 #include <linux/swapops.h>
20 #include <linux/dax.h>
21 #include <linux/kthread.h>
22 #include <linux/khugepaged.h>
23 #include <linux/freezer.h>
24 #include <linux/mman.h>
25 #include <linux/pagemap.h>
26 #include <linux/debugfs.h>
27 #include <linux/migrate.h>
28 #include <linux/hashtable.h>
29 #include <linux/userfaultfd_k.h>
30 #include <linux/page_idle.h>
31
32 #include <asm/tlb.h>
33 #include <asm/pgalloc.h>
34 #include "internal.h"
35
36 enum scan_result {
37         SCAN_FAIL,
38         SCAN_SUCCEED,
39         SCAN_PMD_NULL,
40         SCAN_EXCEED_NONE_PTE,
41         SCAN_PTE_NON_PRESENT,
42         SCAN_PAGE_RO,
43         SCAN_NO_REFERENCED_PAGE,
44         SCAN_PAGE_NULL,
45         SCAN_SCAN_ABORT,
46         SCAN_PAGE_COUNT,
47         SCAN_PAGE_LRU,
48         SCAN_PAGE_LOCK,
49         SCAN_PAGE_ANON,
50         SCAN_PAGE_COMPOUND,
51         SCAN_ANY_PROCESS,
52         SCAN_VMA_NULL,
53         SCAN_VMA_CHECK,
54         SCAN_ADDRESS_RANGE,
55         SCAN_SWAP_CACHE_PAGE,
56         SCAN_DEL_PAGE_LRU,
57         SCAN_ALLOC_HUGE_PAGE_FAIL,
58         SCAN_CGROUP_CHARGE_FAIL
59 };
60
61 #define CREATE_TRACE_POINTS
62 #include <trace/events/huge_memory.h>
63
64 /*
65  * By default transparent hugepage support is disabled in order that avoid
66  * to risk increase the memory footprint of applications without a guaranteed
67  * benefit. When transparent hugepage support is enabled, is for all mappings,
68  * and khugepaged scans all mappings.
69  * Defrag is invoked by khugepaged hugepage allocations and by page faults
70  * for all hugepage allocations.
71  */
72 unsigned long transparent_hugepage_flags __read_mostly =
73 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS
74         (1<<TRANSPARENT_HUGEPAGE_FLAG)|
75 #endif
76 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE
77         (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)|
78 #endif
79         (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)|
80         (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)|
81         (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
82
83 /* default scan 8*512 pte (or vmas) every 30 second */
84 static unsigned int khugepaged_pages_to_scan __read_mostly = HPAGE_PMD_NR*8;
85 static unsigned int khugepaged_pages_collapsed;
86 static unsigned int khugepaged_full_scans;
87 static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
88 /* during fragmentation poll the hugepage allocator once every minute */
89 static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
90 static struct task_struct *khugepaged_thread __read_mostly;
91 static DEFINE_MUTEX(khugepaged_mutex);
92 static DEFINE_SPINLOCK(khugepaged_mm_lock);
93 static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
94 /*
95  * default collapse hugepages if there is at least one pte mapped like
96  * it would have happened if the vma was large enough during page
97  * fault.
98  */
99 static unsigned int khugepaged_max_ptes_none __read_mostly = HPAGE_PMD_NR-1;
100
101 static int khugepaged(void *none);
102 static int khugepaged_slab_init(void);
103 static void khugepaged_slab_exit(void);
104
105 #define MM_SLOTS_HASH_BITS 10
106 static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
107
108 static struct kmem_cache *mm_slot_cache __read_mostly;
109
110 /**
111  * struct mm_slot - hash lookup from mm to mm_slot
112  * @hash: hash collision list
113  * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
114  * @mm: the mm that this information is valid for
115  */
116 struct mm_slot {
117         struct hlist_node hash;
118         struct list_head mm_node;
119         struct mm_struct *mm;
120 };
121
122 /**
123  * struct khugepaged_scan - cursor for scanning
124  * @mm_head: the head of the mm list to scan
125  * @mm_slot: the current mm_slot we are scanning
126  * @address: the next address inside that to be scanned
127  *
128  * There is only the one khugepaged_scan instance of this cursor structure.
129  */
130 struct khugepaged_scan {
131         struct list_head mm_head;
132         struct mm_slot *mm_slot;
133         unsigned long address;
134 };
135 static struct khugepaged_scan khugepaged_scan = {
136         .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
137 };
138
139 static DEFINE_SPINLOCK(split_queue_lock);
140 static LIST_HEAD(split_queue);
141 static unsigned long split_queue_len;
142 static struct shrinker deferred_split_shrinker;
143
144 static void set_recommended_min_free_kbytes(void)
145 {
146         struct zone *zone;
147         int nr_zones = 0;
148         unsigned long recommended_min;
149
150         for_each_populated_zone(zone)
151                 nr_zones++;
152
153         /* Ensure 2 pageblocks are free to assist fragmentation avoidance */
154         recommended_min = pageblock_nr_pages * nr_zones * 2;
155
156         /*
157          * Make sure that on average at least two pageblocks are almost free
158          * of another type, one for a migratetype to fall back to and a
159          * second to avoid subsequent fallbacks of other types There are 3
160          * MIGRATE_TYPES we care about.
161          */
162         recommended_min += pageblock_nr_pages * nr_zones *
163                            MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
164
165         /* don't ever allow to reserve more than 5% of the lowmem */
166         recommended_min = min(recommended_min,
167                               (unsigned long) nr_free_buffer_pages() / 20);
168         recommended_min <<= (PAGE_SHIFT-10);
169
170         if (recommended_min > min_free_kbytes) {
171                 if (user_min_free_kbytes >= 0)
172                         pr_info("raising min_free_kbytes from %d to %lu "
173                                 "to help transparent hugepage allocations\n",
174                                 min_free_kbytes, recommended_min);
175
176                 min_free_kbytes = recommended_min;
177         }
178         setup_per_zone_wmarks();
179 }
180
181 static int start_stop_khugepaged(void)
182 {
183         int err = 0;
184         if (khugepaged_enabled()) {
185                 if (!khugepaged_thread)
186                         khugepaged_thread = kthread_run(khugepaged, NULL,
187                                                         "khugepaged");
188                 if (IS_ERR(khugepaged_thread)) {
189                         pr_err("khugepaged: kthread_run(khugepaged) failed\n");
190                         err = PTR_ERR(khugepaged_thread);
191                         khugepaged_thread = NULL;
192                         goto fail;
193                 }
194
195                 if (!list_empty(&khugepaged_scan.mm_head))
196                         wake_up_interruptible(&khugepaged_wait);
197
198                 set_recommended_min_free_kbytes();
199         } else if (khugepaged_thread) {
200                 kthread_stop(khugepaged_thread);
201                 khugepaged_thread = NULL;
202         }
203 fail:
204         return err;
205 }
206
207 static atomic_t huge_zero_refcount;
208 struct page *huge_zero_page __read_mostly;
209
210 struct page *get_huge_zero_page(void)
211 {
212         struct page *zero_page;
213 retry:
214         if (likely(atomic_inc_not_zero(&huge_zero_refcount)))
215                 return READ_ONCE(huge_zero_page);
216
217         zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE,
218                         HPAGE_PMD_ORDER);
219         if (!zero_page) {
220                 count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED);
221                 return NULL;
222         }
223         count_vm_event(THP_ZERO_PAGE_ALLOC);
224         preempt_disable();
225         if (cmpxchg(&huge_zero_page, NULL, zero_page)) {
226                 preempt_enable();
227                 __free_pages(zero_page, compound_order(zero_page));
228                 goto retry;
229         }
230
231         /* We take additional reference here. It will be put back by shrinker */
232         atomic_set(&huge_zero_refcount, 2);
233         preempt_enable();
234         return READ_ONCE(huge_zero_page);
235 }
236
237 static void put_huge_zero_page(void)
238 {
239         /*
240          * Counter should never go to zero here. Only shrinker can put
241          * last reference.
242          */
243         BUG_ON(atomic_dec_and_test(&huge_zero_refcount));
244 }
245
246 static unsigned long shrink_huge_zero_page_count(struct shrinker *shrink,
247                                         struct shrink_control *sc)
248 {
249         /* we can free zero page only if last reference remains */
250         return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0;
251 }
252
253 static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink,
254                                        struct shrink_control *sc)
255 {
256         if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
257                 struct page *zero_page = xchg(&huge_zero_page, NULL);
258                 BUG_ON(zero_page == NULL);
259                 __free_pages(zero_page, compound_order(zero_page));
260                 return HPAGE_PMD_NR;
261         }
262
263         return 0;
264 }
265
266 static struct shrinker huge_zero_page_shrinker = {
267         .count_objects = shrink_huge_zero_page_count,
268         .scan_objects = shrink_huge_zero_page_scan,
269         .seeks = DEFAULT_SEEKS,
270 };
271
272 #ifdef CONFIG_SYSFS
273
274 static ssize_t double_flag_show(struct kobject *kobj,
275                                 struct kobj_attribute *attr, char *buf,
276                                 enum transparent_hugepage_flag enabled,
277                                 enum transparent_hugepage_flag req_madv)
278 {
279         if (test_bit(enabled, &transparent_hugepage_flags)) {
280                 VM_BUG_ON(test_bit(req_madv, &transparent_hugepage_flags));
281                 return sprintf(buf, "[always] madvise never\n");
282         } else if (test_bit(req_madv, &transparent_hugepage_flags))
283                 return sprintf(buf, "always [madvise] never\n");
284         else
285                 return sprintf(buf, "always madvise [never]\n");
286 }
287 static ssize_t double_flag_store(struct kobject *kobj,
288                                  struct kobj_attribute *attr,
289                                  const char *buf, size_t count,
290                                  enum transparent_hugepage_flag enabled,
291                                  enum transparent_hugepage_flag req_madv)
292 {
293         if (!memcmp("always", buf,
294                     min(sizeof("always")-1, count))) {
295                 set_bit(enabled, &transparent_hugepage_flags);
296                 clear_bit(req_madv, &transparent_hugepage_flags);
297         } else if (!memcmp("madvise", buf,
298                            min(sizeof("madvise")-1, count))) {
299                 clear_bit(enabled, &transparent_hugepage_flags);
300                 set_bit(req_madv, &transparent_hugepage_flags);
301         } else if (!memcmp("never", buf,
302                            min(sizeof("never")-1, count))) {
303                 clear_bit(enabled, &transparent_hugepage_flags);
304                 clear_bit(req_madv, &transparent_hugepage_flags);
305         } else
306                 return -EINVAL;
307
308         return count;
309 }
310
311 static ssize_t enabled_show(struct kobject *kobj,
312                             struct kobj_attribute *attr, char *buf)
313 {
314         return double_flag_show(kobj, attr, buf,
315                                 TRANSPARENT_HUGEPAGE_FLAG,
316                                 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG);
317 }
318 static ssize_t enabled_store(struct kobject *kobj,
319                              struct kobj_attribute *attr,
320                              const char *buf, size_t count)
321 {
322         ssize_t ret;
323
324         ret = double_flag_store(kobj, attr, buf, count,
325                                 TRANSPARENT_HUGEPAGE_FLAG,
326                                 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG);
327
328         if (ret > 0) {
329                 int err;
330
331                 mutex_lock(&khugepaged_mutex);
332                 err = start_stop_khugepaged();
333                 mutex_unlock(&khugepaged_mutex);
334
335                 if (err)
336                         ret = err;
337         }
338
339         return ret;
340 }
341 static struct kobj_attribute enabled_attr =
342         __ATTR(enabled, 0644, enabled_show, enabled_store);
343
344 static ssize_t single_flag_show(struct kobject *kobj,
345                                 struct kobj_attribute *attr, char *buf,
346                                 enum transparent_hugepage_flag flag)
347 {
348         return sprintf(buf, "%d\n",
349                        !!test_bit(flag, &transparent_hugepage_flags));
350 }
351
352 static ssize_t single_flag_store(struct kobject *kobj,
353                                  struct kobj_attribute *attr,
354                                  const char *buf, size_t count,
355                                  enum transparent_hugepage_flag flag)
356 {
357         unsigned long value;
358         int ret;
359
360         ret = kstrtoul(buf, 10, &value);
361         if (ret < 0)
362                 return ret;
363         if (value > 1)
364                 return -EINVAL;
365
366         if (value)
367                 set_bit(flag, &transparent_hugepage_flags);
368         else
369                 clear_bit(flag, &transparent_hugepage_flags);
370
371         return count;
372 }
373
374 /*
375  * Currently defrag only disables __GFP_NOWAIT for allocation. A blind
376  * __GFP_REPEAT is too aggressive, it's never worth swapping tons of
377  * memory just to allocate one more hugepage.
378  */
379 static ssize_t defrag_show(struct kobject *kobj,
380                            struct kobj_attribute *attr, char *buf)
381 {
382         return double_flag_show(kobj, attr, buf,
383                                 TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
384                                 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG);
385 }
386 static ssize_t defrag_store(struct kobject *kobj,
387                             struct kobj_attribute *attr,
388                             const char *buf, size_t count)
389 {
390         return double_flag_store(kobj, attr, buf, count,
391                                  TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
392                                  TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG);
393 }
394 static struct kobj_attribute defrag_attr =
395         __ATTR(defrag, 0644, defrag_show, defrag_store);
396
397 static ssize_t use_zero_page_show(struct kobject *kobj,
398                 struct kobj_attribute *attr, char *buf)
399 {
400         return single_flag_show(kobj, attr, buf,
401                                 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
402 }
403 static ssize_t use_zero_page_store(struct kobject *kobj,
404                 struct kobj_attribute *attr, const char *buf, size_t count)
405 {
406         return single_flag_store(kobj, attr, buf, count,
407                                  TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
408 }
409 static struct kobj_attribute use_zero_page_attr =
410         __ATTR(use_zero_page, 0644, use_zero_page_show, use_zero_page_store);
411 #ifdef CONFIG_DEBUG_VM
412 static ssize_t debug_cow_show(struct kobject *kobj,
413                                 struct kobj_attribute *attr, char *buf)
414 {
415         return single_flag_show(kobj, attr, buf,
416                                 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG);
417 }
418 static ssize_t debug_cow_store(struct kobject *kobj,
419                                struct kobj_attribute *attr,
420                                const char *buf, size_t count)
421 {
422         return single_flag_store(kobj, attr, buf, count,
423                                  TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG);
424 }
425 static struct kobj_attribute debug_cow_attr =
426         __ATTR(debug_cow, 0644, debug_cow_show, debug_cow_store);
427 #endif /* CONFIG_DEBUG_VM */
428
429 static struct attribute *hugepage_attr[] = {
430         &enabled_attr.attr,
431         &defrag_attr.attr,
432         &use_zero_page_attr.attr,
433 #ifdef CONFIG_DEBUG_VM
434         &debug_cow_attr.attr,
435 #endif
436         NULL,
437 };
438
439 static struct attribute_group hugepage_attr_group = {
440         .attrs = hugepage_attr,
441 };
442
443 static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
444                                          struct kobj_attribute *attr,
445                                          char *buf)
446 {
447         return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs);
448 }
449
450 static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
451                                           struct kobj_attribute *attr,
452                                           const char *buf, size_t count)
453 {
454         unsigned long msecs;
455         int err;
456
457         err = kstrtoul(buf, 10, &msecs);
458         if (err || msecs > UINT_MAX)
459                 return -EINVAL;
460
461         khugepaged_scan_sleep_millisecs = msecs;
462         wake_up_interruptible(&khugepaged_wait);
463
464         return count;
465 }
466 static struct kobj_attribute scan_sleep_millisecs_attr =
467         __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show,
468                scan_sleep_millisecs_store);
469
470 static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
471                                           struct kobj_attribute *attr,
472                                           char *buf)
473 {
474         return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
475 }
476
477 static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
478                                            struct kobj_attribute *attr,
479                                            const char *buf, size_t count)
480 {
481         unsigned long msecs;
482         int err;
483
484         err = kstrtoul(buf, 10, &msecs);
485         if (err || msecs > UINT_MAX)
486                 return -EINVAL;
487
488         khugepaged_alloc_sleep_millisecs = msecs;
489         wake_up_interruptible(&khugepaged_wait);
490
491         return count;
492 }
493 static struct kobj_attribute alloc_sleep_millisecs_attr =
494         __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show,
495                alloc_sleep_millisecs_store);
496
497 static ssize_t pages_to_scan_show(struct kobject *kobj,
498                                   struct kobj_attribute *attr,
499                                   char *buf)
500 {
501         return sprintf(buf, "%u\n", khugepaged_pages_to_scan);
502 }
503 static ssize_t pages_to_scan_store(struct kobject *kobj,
504                                    struct kobj_attribute *attr,
505                                    const char *buf, size_t count)
506 {
507         int err;
508         unsigned long pages;
509
510         err = kstrtoul(buf, 10, &pages);
511         if (err || !pages || pages > UINT_MAX)
512                 return -EINVAL;
513
514         khugepaged_pages_to_scan = pages;
515
516         return count;
517 }
518 static struct kobj_attribute pages_to_scan_attr =
519         __ATTR(pages_to_scan, 0644, pages_to_scan_show,
520                pages_to_scan_store);
521
522 static ssize_t pages_collapsed_show(struct kobject *kobj,
523                                     struct kobj_attribute *attr,
524                                     char *buf)
525 {
526         return sprintf(buf, "%u\n", khugepaged_pages_collapsed);
527 }
528 static struct kobj_attribute pages_collapsed_attr =
529         __ATTR_RO(pages_collapsed);
530
531 static ssize_t full_scans_show(struct kobject *kobj,
532                                struct kobj_attribute *attr,
533                                char *buf)
534 {
535         return sprintf(buf, "%u\n", khugepaged_full_scans);
536 }
537 static struct kobj_attribute full_scans_attr =
538         __ATTR_RO(full_scans);
539
540 static ssize_t khugepaged_defrag_show(struct kobject *kobj,
541                                       struct kobj_attribute *attr, char *buf)
542 {
543         return single_flag_show(kobj, attr, buf,
544                                 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
545 }
546 static ssize_t khugepaged_defrag_store(struct kobject *kobj,
547                                        struct kobj_attribute *attr,
548                                        const char *buf, size_t count)
549 {
550         return single_flag_store(kobj, attr, buf, count,
551                                  TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
552 }
553 static struct kobj_attribute khugepaged_defrag_attr =
554         __ATTR(defrag, 0644, khugepaged_defrag_show,
555                khugepaged_defrag_store);
556
557 /*
558  * max_ptes_none controls if khugepaged should collapse hugepages over
559  * any unmapped ptes in turn potentially increasing the memory
560  * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
561  * reduce the available free memory in the system as it
562  * runs. Increasing max_ptes_none will instead potentially reduce the
563  * free memory in the system during the khugepaged scan.
564  */
565 static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj,
566                                              struct kobj_attribute *attr,
567                                              char *buf)
568 {
569         return sprintf(buf, "%u\n", khugepaged_max_ptes_none);
570 }
571 static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj,
572                                               struct kobj_attribute *attr,
573                                               const char *buf, size_t count)
574 {
575         int err;
576         unsigned long max_ptes_none;
577
578         err = kstrtoul(buf, 10, &max_ptes_none);
579         if (err || max_ptes_none > HPAGE_PMD_NR-1)
580                 return -EINVAL;
581
582         khugepaged_max_ptes_none = max_ptes_none;
583
584         return count;
585 }
586 static struct kobj_attribute khugepaged_max_ptes_none_attr =
587         __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show,
588                khugepaged_max_ptes_none_store);
589
590 static struct attribute *khugepaged_attr[] = {
591         &khugepaged_defrag_attr.attr,
592         &khugepaged_max_ptes_none_attr.attr,
593         &pages_to_scan_attr.attr,
594         &pages_collapsed_attr.attr,
595         &full_scans_attr.attr,
596         &scan_sleep_millisecs_attr.attr,
597         &alloc_sleep_millisecs_attr.attr,
598         NULL,
599 };
600
601 static struct attribute_group khugepaged_attr_group = {
602         .attrs = khugepaged_attr,
603         .name = "khugepaged",
604 };
605
606 static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj)
607 {
608         int err;
609
610         *hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj);
611         if (unlikely(!*hugepage_kobj)) {
612                 pr_err("failed to create transparent hugepage kobject\n");
613                 return -ENOMEM;
614         }
615
616         err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group);
617         if (err) {
618                 pr_err("failed to register transparent hugepage group\n");
619                 goto delete_obj;
620         }
621
622         err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group);
623         if (err) {
624                 pr_err("failed to register transparent hugepage group\n");
625                 goto remove_hp_group;
626         }
627
628         return 0;
629
630 remove_hp_group:
631         sysfs_remove_group(*hugepage_kobj, &hugepage_attr_group);
632 delete_obj:
633         kobject_put(*hugepage_kobj);
634         return err;
635 }
636
637 static void __init hugepage_exit_sysfs(struct kobject *hugepage_kobj)
638 {
639         sysfs_remove_group(hugepage_kobj, &khugepaged_attr_group);
640         sysfs_remove_group(hugepage_kobj, &hugepage_attr_group);
641         kobject_put(hugepage_kobj);
642 }
643 #else
644 static inline int hugepage_init_sysfs(struct kobject **hugepage_kobj)
645 {
646         return 0;
647 }
648
649 static inline void hugepage_exit_sysfs(struct kobject *hugepage_kobj)
650 {
651 }
652 #endif /* CONFIG_SYSFS */
653
654 static int __init hugepage_init(void)
655 {
656         int err;
657         struct kobject *hugepage_kobj;
658
659         if (!has_transparent_hugepage()) {
660                 transparent_hugepage_flags = 0;
661                 return -EINVAL;
662         }
663
664         err = hugepage_init_sysfs(&hugepage_kobj);
665         if (err)
666                 goto err_sysfs;
667
668         err = khugepaged_slab_init();
669         if (err)
670                 goto err_slab;
671
672         err = register_shrinker(&huge_zero_page_shrinker);
673         if (err)
674                 goto err_hzp_shrinker;
675         err = register_shrinker(&deferred_split_shrinker);
676         if (err)
677                 goto err_split_shrinker;
678
679         /*
680          * By default disable transparent hugepages on smaller systems,
681          * where the extra memory used could hurt more than TLB overhead
682          * is likely to save.  The admin can still enable it through /sys.
683          */
684         if (totalram_pages < (512 << (20 - PAGE_SHIFT))) {
685                 transparent_hugepage_flags = 0;
686                 return 0;
687         }
688
689         err = start_stop_khugepaged();
690         if (err)
691                 goto err_khugepaged;
692
693         return 0;
694 err_khugepaged:
695         unregister_shrinker(&deferred_split_shrinker);
696 err_split_shrinker:
697         unregister_shrinker(&huge_zero_page_shrinker);
698 err_hzp_shrinker:
699         khugepaged_slab_exit();
700 err_slab:
701         hugepage_exit_sysfs(hugepage_kobj);
702 err_sysfs:
703         return err;
704 }
705 subsys_initcall(hugepage_init);
706
707 static int __init setup_transparent_hugepage(char *str)
708 {
709         int ret = 0;
710         if (!str)
711                 goto out;
712         if (!strcmp(str, "always")) {
713                 set_bit(TRANSPARENT_HUGEPAGE_FLAG,
714                         &transparent_hugepage_flags);
715                 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
716                           &transparent_hugepage_flags);
717                 ret = 1;
718         } else if (!strcmp(str, "madvise")) {
719                 clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
720                           &transparent_hugepage_flags);
721                 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
722                         &transparent_hugepage_flags);
723                 ret = 1;
724         } else if (!strcmp(str, "never")) {
725                 clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
726                           &transparent_hugepage_flags);
727                 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
728                           &transparent_hugepage_flags);
729                 ret = 1;
730         }
731 out:
732         if (!ret)
733                 pr_warn("transparent_hugepage= cannot parse, ignored\n");
734         return ret;
735 }
736 __setup("transparent_hugepage=", setup_transparent_hugepage);
737
738 pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
739 {
740         if (likely(vma->vm_flags & VM_WRITE))
741                 pmd = pmd_mkwrite(pmd);
742         return pmd;
743 }
744
745 static inline pmd_t mk_huge_pmd(struct page *page, pgprot_t prot)
746 {
747         pmd_t entry;
748         entry = mk_pmd(page, prot);
749         entry = pmd_mkhuge(entry);
750         return entry;
751 }
752
753 static inline struct list_head *page_deferred_list(struct page *page)
754 {
755         /*
756          * ->lru in the tail pages is occupied by compound_head.
757          * Let's use ->mapping + ->index in the second tail page as list_head.
758          */
759         return (struct list_head *)&page[2].mapping;
760 }
761
762 void prep_transhuge_page(struct page *page)
763 {
764         /*
765          * we use page->mapping and page->indexlru in second tail page
766          * as list_head: assuming THP order >= 2
767          */
768         BUILD_BUG_ON(HPAGE_PMD_ORDER < 2);
769
770         INIT_LIST_HEAD(page_deferred_list(page));
771         set_compound_page_dtor(page, TRANSHUGE_PAGE_DTOR);
772 }
773
774 static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
775                                         struct vm_area_struct *vma,
776                                         unsigned long address, pmd_t *pmd,
777                                         struct page *page, gfp_t gfp,
778                                         unsigned int flags)
779 {
780         struct mem_cgroup *memcg;
781         pgtable_t pgtable;
782         spinlock_t *ptl;
783         unsigned long haddr = address & HPAGE_PMD_MASK;
784
785         VM_BUG_ON_PAGE(!PageCompound(page), page);
786
787         if (mem_cgroup_try_charge(page, mm, gfp, &memcg, true)) {
788                 put_page(page);
789                 count_vm_event(THP_FAULT_FALLBACK);
790                 return VM_FAULT_FALLBACK;
791         }
792
793         pgtable = pte_alloc_one(mm, haddr);
794         if (unlikely(!pgtable)) {
795                 mem_cgroup_cancel_charge(page, memcg, true);
796                 put_page(page);
797                 return VM_FAULT_OOM;
798         }
799
800         clear_huge_page(page, haddr, HPAGE_PMD_NR);
801         /*
802          * The memory barrier inside __SetPageUptodate makes sure that
803          * clear_huge_page writes become visible before the set_pmd_at()
804          * write.
805          */
806         __SetPageUptodate(page);
807
808         ptl = pmd_lock(mm, pmd);
809         if (unlikely(!pmd_none(*pmd))) {
810                 spin_unlock(ptl);
811                 mem_cgroup_cancel_charge(page, memcg, true);
812                 put_page(page);
813                 pte_free(mm, pgtable);
814         } else {
815                 pmd_t entry;
816
817                 /* Deliver the page fault to userland */
818                 if (userfaultfd_missing(vma)) {
819                         int ret;
820
821                         spin_unlock(ptl);
822                         mem_cgroup_cancel_charge(page, memcg, true);
823                         put_page(page);
824                         pte_free(mm, pgtable);
825                         ret = handle_userfault(vma, address, flags,
826                                                VM_UFFD_MISSING);
827                         VM_BUG_ON(ret & VM_FAULT_FALLBACK);
828                         return ret;
829                 }
830
831                 entry = mk_huge_pmd(page, vma->vm_page_prot);
832                 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
833                 page_add_new_anon_rmap(page, vma, haddr, true);
834                 mem_cgroup_commit_charge(page, memcg, false, true);
835                 lru_cache_add_active_or_unevictable(page, vma);
836                 pgtable_trans_huge_deposit(mm, pmd, pgtable);
837                 set_pmd_at(mm, haddr, pmd, entry);
838                 add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
839                 atomic_long_inc(&mm->nr_ptes);
840                 spin_unlock(ptl);
841                 count_vm_event(THP_FAULT_ALLOC);
842         }
843
844         return 0;
845 }
846
847 static inline gfp_t alloc_hugepage_gfpmask(int defrag, gfp_t extra_gfp)
848 {
849         return (GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_RECLAIM)) | extra_gfp;
850 }
851
852 /* Caller must hold page table lock. */
853 static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
854                 struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd,
855                 struct page *zero_page)
856 {
857         pmd_t entry;
858         if (!pmd_none(*pmd))
859                 return false;
860         entry = mk_pmd(zero_page, vma->vm_page_prot);
861         entry = pmd_mkhuge(entry);
862         pgtable_trans_huge_deposit(mm, pmd, pgtable);
863         set_pmd_at(mm, haddr, pmd, entry);
864         atomic_long_inc(&mm->nr_ptes);
865         return true;
866 }
867
868 int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
869                                unsigned long address, pmd_t *pmd,
870                                unsigned int flags)
871 {
872         gfp_t gfp;
873         struct page *page;
874         unsigned long haddr = address & HPAGE_PMD_MASK;
875
876         if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
877                 return VM_FAULT_FALLBACK;
878         if (unlikely(anon_vma_prepare(vma)))
879                 return VM_FAULT_OOM;
880         if (unlikely(khugepaged_enter(vma, vma->vm_flags)))
881                 return VM_FAULT_OOM;
882         if (!(flags & FAULT_FLAG_WRITE) && !mm_forbids_zeropage(mm) &&
883                         transparent_hugepage_use_zero_page()) {
884                 spinlock_t *ptl;
885                 pgtable_t pgtable;
886                 struct page *zero_page;
887                 bool set;
888                 int ret;
889                 pgtable = pte_alloc_one(mm, haddr);
890                 if (unlikely(!pgtable))
891                         return VM_FAULT_OOM;
892                 zero_page = get_huge_zero_page();
893                 if (unlikely(!zero_page)) {
894                         pte_free(mm, pgtable);
895                         count_vm_event(THP_FAULT_FALLBACK);
896                         return VM_FAULT_FALLBACK;
897                 }
898                 ptl = pmd_lock(mm, pmd);
899                 ret = 0;
900                 set = false;
901                 if (pmd_none(*pmd)) {
902                         if (userfaultfd_missing(vma)) {
903                                 spin_unlock(ptl);
904                                 ret = handle_userfault(vma, address, flags,
905                                                        VM_UFFD_MISSING);
906                                 VM_BUG_ON(ret & VM_FAULT_FALLBACK);
907                         } else {
908                                 set_huge_zero_page(pgtable, mm, vma,
909                                                    haddr, pmd,
910                                                    zero_page);
911                                 spin_unlock(ptl);
912                                 set = true;
913                         }
914                 } else
915                         spin_unlock(ptl);
916                 if (!set) {
917                         pte_free(mm, pgtable);
918                         put_huge_zero_page();
919                 }
920                 return ret;
921         }
922         gfp = alloc_hugepage_gfpmask(transparent_hugepage_defrag(vma), 0);
923         page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER);
924         if (unlikely(!page)) {
925                 count_vm_event(THP_FAULT_FALLBACK);
926                 return VM_FAULT_FALLBACK;
927         }
928         prep_transhuge_page(page);
929         return __do_huge_pmd_anonymous_page(mm, vma, address, pmd, page, gfp,
930                                             flags);
931 }
932
933 static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
934                 pmd_t *pmd, unsigned long pfn, pgprot_t prot, bool write)
935 {
936         struct mm_struct *mm = vma->vm_mm;
937         pmd_t entry;
938         spinlock_t *ptl;
939
940         ptl = pmd_lock(mm, pmd);
941         if (pmd_none(*pmd)) {
942                 entry = pmd_mkhuge(pfn_pmd(pfn, prot));
943                 if (write) {
944                         entry = pmd_mkyoung(pmd_mkdirty(entry));
945                         entry = maybe_pmd_mkwrite(entry, vma);
946                 }
947                 set_pmd_at(mm, addr, pmd, entry);
948                 update_mmu_cache_pmd(vma, addr, pmd);
949         }
950         spin_unlock(ptl);
951 }
952
953 int vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
954                         pmd_t *pmd, unsigned long pfn, bool write)
955 {
956         pgprot_t pgprot = vma->vm_page_prot;
957         /*
958          * If we had pmd_special, we could avoid all these restrictions,
959          * but we need to be consistent with PTEs and architectures that
960          * can't support a 'special' bit.
961          */
962         BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
963         BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
964                                                 (VM_PFNMAP|VM_MIXEDMAP));
965         BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
966         BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn));
967
968         if (addr < vma->vm_start || addr >= vma->vm_end)
969                 return VM_FAULT_SIGBUS;
970         if (track_pfn_insert(vma, &pgprot, pfn))
971                 return VM_FAULT_SIGBUS;
972         insert_pfn_pmd(vma, addr, pmd, pfn, pgprot, write);
973         return VM_FAULT_NOPAGE;
974 }
975
976 int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
977                   pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
978                   struct vm_area_struct *vma)
979 {
980         spinlock_t *dst_ptl, *src_ptl;
981         struct page *src_page;
982         pmd_t pmd;
983         pgtable_t pgtable;
984         int ret;
985
986         ret = -ENOMEM;
987         pgtable = pte_alloc_one(dst_mm, addr);
988         if (unlikely(!pgtable))
989                 goto out;
990
991         dst_ptl = pmd_lock(dst_mm, dst_pmd);
992         src_ptl = pmd_lockptr(src_mm, src_pmd);
993         spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
994
995         ret = -EAGAIN;
996         pmd = *src_pmd;
997         if (unlikely(!pmd_trans_huge(pmd))) {
998                 pte_free(dst_mm, pgtable);
999                 goto out_unlock;
1000         }
1001         /*
1002          * When page table lock is held, the huge zero pmd should not be
1003          * under splitting since we don't split the page itself, only pmd to
1004          * a page table.
1005          */
1006         if (is_huge_zero_pmd(pmd)) {
1007                 struct page *zero_page;
1008                 /*
1009                  * get_huge_zero_page() will never allocate a new page here,
1010                  * since we already have a zero page to copy. It just takes a
1011                  * reference.
1012                  */
1013                 zero_page = get_huge_zero_page();
1014                 set_huge_zero_page(pgtable, dst_mm, vma, addr, dst_pmd,
1015                                 zero_page);
1016                 ret = 0;
1017                 goto out_unlock;
1018         }
1019
1020         src_page = pmd_page(pmd);
1021         VM_BUG_ON_PAGE(!PageHead(src_page), src_page);
1022         get_page(src_page);
1023         page_dup_rmap(src_page, true);
1024         add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
1025
1026         pmdp_set_wrprotect(src_mm, addr, src_pmd);
1027         pmd = pmd_mkold(pmd_wrprotect(pmd));
1028         pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
1029         set_pmd_at(dst_mm, addr, dst_pmd, pmd);
1030         atomic_long_inc(&dst_mm->nr_ptes);
1031
1032         ret = 0;
1033 out_unlock:
1034         spin_unlock(src_ptl);
1035         spin_unlock(dst_ptl);
1036 out:
1037         return ret;
1038 }
1039
1040 void huge_pmd_set_accessed(struct mm_struct *mm,
1041                            struct vm_area_struct *vma,
1042                            unsigned long address,
1043                            pmd_t *pmd, pmd_t orig_pmd,
1044                            int dirty)
1045 {
1046         spinlock_t *ptl;
1047         pmd_t entry;
1048         unsigned long haddr;
1049
1050         ptl = pmd_lock(mm, pmd);
1051         if (unlikely(!pmd_same(*pmd, orig_pmd)))
1052                 goto unlock;
1053
1054         entry = pmd_mkyoung(orig_pmd);
1055         haddr = address & HPAGE_PMD_MASK;
1056         if (pmdp_set_access_flags(vma, haddr, pmd, entry, dirty))
1057                 update_mmu_cache_pmd(vma, address, pmd);
1058
1059 unlock:
1060         spin_unlock(ptl);
1061 }
1062
1063 static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
1064                                         struct vm_area_struct *vma,
1065                                         unsigned long address,
1066                                         pmd_t *pmd, pmd_t orig_pmd,
1067                                         struct page *page,
1068                                         unsigned long haddr)
1069 {
1070         struct mem_cgroup *memcg;
1071         spinlock_t *ptl;
1072         pgtable_t pgtable;
1073         pmd_t _pmd;
1074         int ret = 0, i;
1075         struct page **pages;
1076         unsigned long mmun_start;       /* For mmu_notifiers */
1077         unsigned long mmun_end;         /* For mmu_notifiers */
1078
1079         pages = kmalloc(sizeof(struct page *) * HPAGE_PMD_NR,
1080                         GFP_KERNEL);
1081         if (unlikely(!pages)) {
1082                 ret |= VM_FAULT_OOM;
1083                 goto out;
1084         }
1085
1086         for (i = 0; i < HPAGE_PMD_NR; i++) {
1087                 pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE |
1088                                                __GFP_OTHER_NODE,
1089                                                vma, address, page_to_nid(page));
1090                 if (unlikely(!pages[i] ||
1091                              mem_cgroup_try_charge(pages[i], mm, GFP_KERNEL,
1092                                                    &memcg, false))) {
1093                         if (pages[i])
1094                                 put_page(pages[i]);
1095                         while (--i >= 0) {
1096                                 memcg = (void *)page_private(pages[i]);
1097                                 set_page_private(pages[i], 0);
1098                                 mem_cgroup_cancel_charge(pages[i], memcg,
1099                                                 false);
1100                                 put_page(pages[i]);
1101                         }
1102                         kfree(pages);
1103                         ret |= VM_FAULT_OOM;
1104                         goto out;
1105                 }
1106                 set_page_private(pages[i], (unsigned long)memcg);
1107         }
1108
1109         for (i = 0; i < HPAGE_PMD_NR; i++) {
1110                 copy_user_highpage(pages[i], page + i,
1111                                    haddr + PAGE_SIZE * i, vma);
1112                 __SetPageUptodate(pages[i]);
1113                 cond_resched();
1114         }
1115
1116         mmun_start = haddr;
1117         mmun_end   = haddr + HPAGE_PMD_SIZE;
1118         mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
1119
1120         ptl = pmd_lock(mm, pmd);
1121         if (unlikely(!pmd_same(*pmd, orig_pmd)))
1122                 goto out_free_pages;
1123         VM_BUG_ON_PAGE(!PageHead(page), page);
1124
1125         pmdp_huge_clear_flush_notify(vma, haddr, pmd);
1126         /* leave pmd empty until pte is filled */
1127
1128         pgtable = pgtable_trans_huge_withdraw(mm, pmd);
1129         pmd_populate(mm, &_pmd, pgtable);
1130
1131         for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
1132                 pte_t *pte, entry;
1133                 entry = mk_pte(pages[i], vma->vm_page_prot);
1134                 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
1135                 memcg = (void *)page_private(pages[i]);
1136                 set_page_private(pages[i], 0);
1137                 page_add_new_anon_rmap(pages[i], vma, haddr, false);
1138                 mem_cgroup_commit_charge(pages[i], memcg, false, false);
1139                 lru_cache_add_active_or_unevictable(pages[i], vma);
1140                 pte = pte_offset_map(&_pmd, haddr);
1141                 VM_BUG_ON(!pte_none(*pte));
1142                 set_pte_at(mm, haddr, pte, entry);
1143                 pte_unmap(pte);
1144         }
1145         kfree(pages);
1146
1147         smp_wmb(); /* make pte visible before pmd */
1148         pmd_populate(mm, pmd, pgtable);
1149         page_remove_rmap(page, true);
1150         spin_unlock(ptl);
1151
1152         mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1153
1154         ret |= VM_FAULT_WRITE;
1155         put_page(page);
1156
1157 out:
1158         return ret;
1159
1160 out_free_pages:
1161         spin_unlock(ptl);
1162         mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1163         for (i = 0; i < HPAGE_PMD_NR; i++) {
1164                 memcg = (void *)page_private(pages[i]);
1165                 set_page_private(pages[i], 0);
1166                 mem_cgroup_cancel_charge(pages[i], memcg, false);
1167                 put_page(pages[i]);
1168         }
1169         kfree(pages);
1170         goto out;
1171 }
1172
1173 int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
1174                         unsigned long address, pmd_t *pmd, pmd_t orig_pmd)
1175 {
1176         spinlock_t *ptl;
1177         int ret = 0;
1178         struct page *page = NULL, *new_page;
1179         struct mem_cgroup *memcg;
1180         unsigned long haddr;
1181         unsigned long mmun_start;       /* For mmu_notifiers */
1182         unsigned long mmun_end;         /* For mmu_notifiers */
1183         gfp_t huge_gfp;                 /* for allocation and charge */
1184
1185         ptl = pmd_lockptr(mm, pmd);
1186         VM_BUG_ON_VMA(!vma->anon_vma, vma);
1187         haddr = address & HPAGE_PMD_MASK;
1188         if (is_huge_zero_pmd(orig_pmd))
1189                 goto alloc;
1190         spin_lock(ptl);
1191         if (unlikely(!pmd_same(*pmd, orig_pmd)))
1192                 goto out_unlock;
1193
1194         page = pmd_page(orig_pmd);
1195         VM_BUG_ON_PAGE(!PageCompound(page) || !PageHead(page), page);
1196         /*
1197          * We can only reuse the page if nobody else maps the huge page or it's
1198          * part. We can do it by checking page_mapcount() on each sub-page, but
1199          * it's expensive.
1200          * The cheaper way is to check page_count() to be equal 1: every
1201          * mapcount takes page reference reference, so this way we can
1202          * guarantee, that the PMD is the only mapping.
1203          * This can give false negative if somebody pinned the page, but that's
1204          * fine.
1205          */
1206         if (page_mapcount(page) == 1 && page_count(page) == 1) {
1207                 pmd_t entry;
1208                 entry = pmd_mkyoung(orig_pmd);
1209                 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
1210                 if (pmdp_set_access_flags(vma, haddr, pmd, entry,  1))
1211                         update_mmu_cache_pmd(vma, address, pmd);
1212                 ret |= VM_FAULT_WRITE;
1213                 goto out_unlock;
1214         }
1215         get_page(page);
1216         spin_unlock(ptl);
1217 alloc:
1218         if (transparent_hugepage_enabled(vma) &&
1219             !transparent_hugepage_debug_cow()) {
1220                 huge_gfp = alloc_hugepage_gfpmask(transparent_hugepage_defrag(vma), 0);
1221                 new_page = alloc_hugepage_vma(huge_gfp, vma, haddr, HPAGE_PMD_ORDER);
1222         } else
1223                 new_page = NULL;
1224
1225         if (likely(new_page)) {
1226                 prep_transhuge_page(new_page);
1227         } else {
1228                 if (!page) {
1229                         split_huge_pmd(vma, pmd, address);
1230                         ret |= VM_FAULT_FALLBACK;
1231                 } else {
1232                         ret = do_huge_pmd_wp_page_fallback(mm, vma, address,
1233                                         pmd, orig_pmd, page, haddr);
1234                         if (ret & VM_FAULT_OOM) {
1235                                 split_huge_pmd(vma, pmd, address);
1236                                 ret |= VM_FAULT_FALLBACK;
1237                         }
1238                         put_page(page);
1239                 }
1240                 count_vm_event(THP_FAULT_FALLBACK);
1241                 goto out;
1242         }
1243
1244         if (unlikely(mem_cgroup_try_charge(new_page, mm, huge_gfp, &memcg,
1245                                            true))) {
1246                 put_page(new_page);
1247                 if (page) {
1248                         split_huge_pmd(vma, pmd, address);
1249                         put_page(page);
1250                 } else
1251                         split_huge_pmd(vma, pmd, address);
1252                 ret |= VM_FAULT_FALLBACK;
1253                 count_vm_event(THP_FAULT_FALLBACK);
1254                 goto out;
1255         }
1256
1257         count_vm_event(THP_FAULT_ALLOC);
1258
1259         if (!page)
1260                 clear_huge_page(new_page, haddr, HPAGE_PMD_NR);
1261         else
1262                 copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR);
1263         __SetPageUptodate(new_page);
1264
1265         mmun_start = haddr;
1266         mmun_end   = haddr + HPAGE_PMD_SIZE;
1267         mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
1268
1269         spin_lock(ptl);
1270         if (page)
1271                 put_page(page);
1272         if (unlikely(!pmd_same(*pmd, orig_pmd))) {
1273                 spin_unlock(ptl);
1274                 mem_cgroup_cancel_charge(new_page, memcg, true);
1275                 put_page(new_page);
1276                 goto out_mn;
1277         } else {
1278                 pmd_t entry;
1279                 entry = mk_huge_pmd(new_page, vma->vm_page_prot);
1280                 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
1281                 pmdp_huge_clear_flush_notify(vma, haddr, pmd);
1282                 page_add_new_anon_rmap(new_page, vma, haddr, true);
1283                 mem_cgroup_commit_charge(new_page, memcg, false, true);
1284                 lru_cache_add_active_or_unevictable(new_page, vma);
1285                 set_pmd_at(mm, haddr, pmd, entry);
1286                 update_mmu_cache_pmd(vma, address, pmd);
1287                 if (!page) {
1288                         add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
1289                         put_huge_zero_page();
1290                 } else {
1291                         VM_BUG_ON_PAGE(!PageHead(page), page);
1292                         page_remove_rmap(page, true);
1293                         put_page(page);
1294                 }
1295                 ret |= VM_FAULT_WRITE;
1296         }
1297         spin_unlock(ptl);
1298 out_mn:
1299         mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1300 out:
1301         return ret;
1302 out_unlock:
1303         spin_unlock(ptl);
1304         return ret;
1305 }
1306
1307 struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
1308                                    unsigned long addr,
1309                                    pmd_t *pmd,
1310                                    unsigned int flags)
1311 {
1312         struct mm_struct *mm = vma->vm_mm;
1313         struct page *page = NULL;
1314
1315         assert_spin_locked(pmd_lockptr(mm, pmd));
1316
1317         if (flags & FOLL_WRITE && !pmd_write(*pmd))
1318                 goto out;
1319
1320         /* Avoid dumping huge zero page */
1321         if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd))
1322                 return ERR_PTR(-EFAULT);
1323
1324         /* Full NUMA hinting faults to serialise migration in fault paths */
1325         if ((flags & FOLL_NUMA) && pmd_protnone(*pmd))
1326                 goto out;
1327
1328         page = pmd_page(*pmd);
1329         VM_BUG_ON_PAGE(!PageHead(page), page);
1330         if (flags & FOLL_TOUCH) {
1331                 pmd_t _pmd;
1332                 /*
1333                  * We should set the dirty bit only for FOLL_WRITE but
1334                  * for now the dirty bit in the pmd is meaningless.
1335                  * And if the dirty bit will become meaningful and
1336                  * we'll only set it with FOLL_WRITE, an atomic
1337                  * set_bit will be required on the pmd to set the
1338                  * young bit, instead of the current set_pmd_at.
1339                  */
1340                 _pmd = pmd_mkyoung(pmd_mkdirty(*pmd));
1341                 if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK,
1342                                           pmd, _pmd,  1))
1343                         update_mmu_cache_pmd(vma, addr, pmd);
1344         }
1345         if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
1346                 /*
1347                  * We don't mlock() pte-mapped THPs. This way we can avoid
1348                  * leaking mlocked pages into non-VM_LOCKED VMAs.
1349                  *
1350                  * In most cases the pmd is the only mapping of the page as we
1351                  * break COW for the mlock() -- see gup_flags |= FOLL_WRITE for
1352                  * writable private mappings in populate_vma_page_range().
1353                  *
1354                  * The only scenario when we have the page shared here is if we
1355                  * mlocking read-only mapping shared over fork(). We skip
1356                  * mlocking such pages.
1357                  */
1358                 if (compound_mapcount(page) == 1 && !PageDoubleMap(page) &&
1359                                 page->mapping && trylock_page(page)) {
1360                         lru_add_drain();
1361                         if (page->mapping)
1362                                 mlock_vma_page(page);
1363                         unlock_page(page);
1364                 }
1365         }
1366         page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
1367         VM_BUG_ON_PAGE(!PageCompound(page), page);
1368         if (flags & FOLL_GET)
1369                 get_page(page);
1370
1371 out:
1372         return page;
1373 }
1374
1375 /* NUMA hinting page fault entry point for trans huge pmds */
1376 int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
1377                                 unsigned long addr, pmd_t pmd, pmd_t *pmdp)
1378 {
1379         spinlock_t *ptl;
1380         struct anon_vma *anon_vma = NULL;
1381         struct page *page;
1382         unsigned long haddr = addr & HPAGE_PMD_MASK;
1383         int page_nid = -1, this_nid = numa_node_id();
1384         int target_nid, last_cpupid = -1;
1385         bool page_locked;
1386         bool migrated = false;
1387         bool was_writable;
1388         int flags = 0;
1389
1390         /* A PROT_NONE fault should not end up here */
1391         BUG_ON(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)));
1392
1393         ptl = pmd_lock(mm, pmdp);
1394         if (unlikely(!pmd_same(pmd, *pmdp)))
1395                 goto out_unlock;
1396
1397         /*
1398          * If there are potential migrations, wait for completion and retry
1399          * without disrupting NUMA hinting information. Do not relock and
1400          * check_same as the page may no longer be mapped.
1401          */
1402         if (unlikely(pmd_trans_migrating(*pmdp))) {
1403                 page = pmd_page(*pmdp);
1404                 spin_unlock(ptl);
1405                 wait_on_page_locked(page);
1406                 goto out;
1407         }
1408
1409         page = pmd_page(pmd);
1410         BUG_ON(is_huge_zero_page(page));
1411         page_nid = page_to_nid(page);
1412         last_cpupid = page_cpupid_last(page);
1413         count_vm_numa_event(NUMA_HINT_FAULTS);
1414         if (page_nid == this_nid) {
1415                 count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
1416                 flags |= TNF_FAULT_LOCAL;
1417         }
1418
1419         /* See similar comment in do_numa_page for explanation */
1420         if (!(vma->vm_flags & VM_WRITE))
1421                 flags |= TNF_NO_GROUP;
1422
1423         /*
1424          * Acquire the page lock to serialise THP migrations but avoid dropping
1425          * page_table_lock if at all possible
1426          */
1427         page_locked = trylock_page(page);
1428         target_nid = mpol_misplaced(page, vma, haddr);
1429         if (target_nid == -1) {
1430                 /* If the page was locked, there are no parallel migrations */
1431                 if (page_locked)
1432                         goto clear_pmdnuma;
1433         }
1434
1435         /* Migration could have started since the pmd_trans_migrating check */
1436         if (!page_locked) {
1437                 spin_unlock(ptl);
1438                 wait_on_page_locked(page);
1439                 page_nid = -1;
1440                 goto out;
1441         }
1442
1443         /*
1444          * Page is misplaced. Page lock serialises migrations. Acquire anon_vma
1445          * to serialises splits
1446          */
1447         get_page(page);
1448         spin_unlock(ptl);
1449         anon_vma = page_lock_anon_vma_read(page);
1450
1451         /* Confirm the PMD did not change while page_table_lock was released */
1452         spin_lock(ptl);
1453         if (unlikely(!pmd_same(pmd, *pmdp))) {
1454                 unlock_page(page);
1455                 put_page(page);
1456                 page_nid = -1;
1457                 goto out_unlock;
1458         }
1459
1460         /* Bail if we fail to protect against THP splits for any reason */
1461         if (unlikely(!anon_vma)) {
1462                 put_page(page);
1463                 page_nid = -1;
1464                 goto clear_pmdnuma;
1465         }
1466
1467         /*
1468          * Migrate the THP to the requested node, returns with page unlocked
1469          * and access rights restored.
1470          */
1471         spin_unlock(ptl);
1472         migrated = migrate_misplaced_transhuge_page(mm, vma,
1473                                 pmdp, pmd, addr, page, target_nid);
1474         if (migrated) {
1475                 flags |= TNF_MIGRATED;
1476                 page_nid = target_nid;
1477         } else
1478                 flags |= TNF_MIGRATE_FAIL;
1479
1480         goto out;
1481 clear_pmdnuma:
1482         BUG_ON(!PageLocked(page));
1483         was_writable = pmd_write(pmd);
1484         pmd = pmd_modify(pmd, vma->vm_page_prot);
1485         pmd = pmd_mkyoung(pmd);
1486         if (was_writable)
1487                 pmd = pmd_mkwrite(pmd);
1488         set_pmd_at(mm, haddr, pmdp, pmd);
1489         update_mmu_cache_pmd(vma, addr, pmdp);
1490         unlock_page(page);
1491 out_unlock:
1492         spin_unlock(ptl);
1493
1494 out:
1495         if (anon_vma)
1496                 page_unlock_anon_vma_read(anon_vma);
1497
1498         if (page_nid != -1)
1499                 task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR, flags);
1500
1501         return 0;
1502 }
1503
1504 int madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1505                 pmd_t *pmd, unsigned long addr, unsigned long next)
1506
1507 {
1508         spinlock_t *ptl;
1509         pmd_t orig_pmd;
1510         struct page *page;
1511         struct mm_struct *mm = tlb->mm;
1512         int ret = 0;
1513
1514         if (!pmd_trans_huge_lock(pmd, vma, &ptl))
1515                 goto out;
1516
1517         orig_pmd = *pmd;
1518         if (is_huge_zero_pmd(orig_pmd)) {
1519                 ret = 1;
1520                 goto out;
1521         }
1522
1523         page = pmd_page(orig_pmd);
1524         /*
1525          * If other processes are mapping this page, we couldn't discard
1526          * the page unless they all do MADV_FREE so let's skip the page.
1527          */
1528         if (page_mapcount(page) != 1)
1529                 goto out;
1530
1531         if (!trylock_page(page))
1532                 goto out;
1533
1534         /*
1535          * If user want to discard part-pages of THP, split it so MADV_FREE
1536          * will deactivate only them.
1537          */
1538         if (next - addr != HPAGE_PMD_SIZE) {
1539                 get_page(page);
1540                 spin_unlock(ptl);
1541                 if (split_huge_page(page)) {
1542                         put_page(page);
1543                         unlock_page(page);
1544                         goto out_unlocked;
1545                 }
1546                 put_page(page);
1547                 unlock_page(page);
1548                 ret = 1;
1549                 goto out_unlocked;
1550         }
1551
1552         if (PageDirty(page))
1553                 ClearPageDirty(page);
1554         unlock_page(page);
1555
1556         if (PageActive(page))
1557                 deactivate_page(page);
1558
1559         if (pmd_young(orig_pmd) || pmd_dirty(orig_pmd)) {
1560                 orig_pmd = pmdp_huge_get_and_clear_full(tlb->mm, addr, pmd,
1561                         tlb->fullmm);
1562                 orig_pmd = pmd_mkold(orig_pmd);
1563                 orig_pmd = pmd_mkclean(orig_pmd);
1564
1565                 set_pmd_at(mm, addr, pmd, orig_pmd);
1566                 tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
1567         }
1568         ret = 1;
1569 out:
1570         spin_unlock(ptl);
1571 out_unlocked:
1572         return ret;
1573 }
1574
1575 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1576                  pmd_t *pmd, unsigned long addr)
1577 {
1578         pmd_t orig_pmd;
1579         spinlock_t *ptl;
1580
1581         if (!__pmd_trans_huge_lock(pmd, vma, &ptl))
1582                 return 0;
1583         /*
1584          * For architectures like ppc64 we look at deposited pgtable
1585          * when calling pmdp_huge_get_and_clear. So do the
1586          * pgtable_trans_huge_withdraw after finishing pmdp related
1587          * operations.
1588          */
1589         orig_pmd = pmdp_huge_get_and_clear_full(tlb->mm, addr, pmd,
1590                         tlb->fullmm);
1591         tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
1592         if (vma_is_dax(vma)) {
1593                 spin_unlock(ptl);
1594                 if (is_huge_zero_pmd(orig_pmd))
1595                         put_huge_zero_page();
1596         } else if (is_huge_zero_pmd(orig_pmd)) {
1597                 pte_free(tlb->mm, pgtable_trans_huge_withdraw(tlb->mm, pmd));
1598                 atomic_long_dec(&tlb->mm->nr_ptes);
1599                 spin_unlock(ptl);
1600                 put_huge_zero_page();
1601         } else {
1602                 struct page *page = pmd_page(orig_pmd);
1603                 page_remove_rmap(page, true);
1604                 VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
1605                 add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
1606                 VM_BUG_ON_PAGE(!PageHead(page), page);
1607                 pte_free(tlb->mm, pgtable_trans_huge_withdraw(tlb->mm, pmd));
1608                 atomic_long_dec(&tlb->mm->nr_ptes);
1609                 spin_unlock(ptl);
1610                 tlb_remove_page(tlb, page);
1611         }
1612         return 1;
1613 }
1614
1615 bool move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma,
1616                   unsigned long old_addr,
1617                   unsigned long new_addr, unsigned long old_end,
1618                   pmd_t *old_pmd, pmd_t *new_pmd)
1619 {
1620         spinlock_t *old_ptl, *new_ptl;
1621         pmd_t pmd;
1622
1623         struct mm_struct *mm = vma->vm_mm;
1624
1625         if ((old_addr & ~HPAGE_PMD_MASK) ||
1626             (new_addr & ~HPAGE_PMD_MASK) ||
1627             old_end - old_addr < HPAGE_PMD_SIZE ||
1628             (new_vma->vm_flags & VM_NOHUGEPAGE))
1629                 return false;
1630
1631         /*
1632          * The destination pmd shouldn't be established, free_pgtables()
1633          * should have release it.
1634          */
1635         if (WARN_ON(!pmd_none(*new_pmd))) {
1636                 VM_BUG_ON(pmd_trans_huge(*new_pmd));
1637                 return false;
1638         }
1639
1640         /*
1641          * We don't have to worry about the ordering of src and dst
1642          * ptlocks because exclusive mmap_sem prevents deadlock.
1643          */
1644         if (__pmd_trans_huge_lock(old_pmd, vma, &old_ptl)) {
1645                 new_ptl = pmd_lockptr(mm, new_pmd);
1646                 if (new_ptl != old_ptl)
1647                         spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
1648                 pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd);
1649                 VM_BUG_ON(!pmd_none(*new_pmd));
1650
1651                 if (pmd_move_must_withdraw(new_ptl, old_ptl)) {
1652                         pgtable_t pgtable;
1653                         pgtable = pgtable_trans_huge_withdraw(mm, old_pmd);
1654                         pgtable_trans_huge_deposit(mm, new_pmd, pgtable);
1655                 }
1656                 set_pmd_at(mm, new_addr, new_pmd, pmd_mksoft_dirty(pmd));
1657                 if (new_ptl != old_ptl)
1658                         spin_unlock(new_ptl);
1659                 spin_unlock(old_ptl);
1660                 return true;
1661         }
1662         return false;
1663 }
1664
1665 /*
1666  * Returns
1667  *  - 0 if PMD could not be locked
1668  *  - 1 if PMD was locked but protections unchange and TLB flush unnecessary
1669  *  - HPAGE_PMD_NR is protections changed and TLB flush necessary
1670  */
1671 int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
1672                 unsigned long addr, pgprot_t newprot, int prot_numa)
1673 {
1674         struct mm_struct *mm = vma->vm_mm;
1675         spinlock_t *ptl;
1676         int ret = 0;
1677
1678         if (__pmd_trans_huge_lock(pmd, vma, &ptl)) {
1679                 pmd_t entry;
1680                 bool preserve_write = prot_numa && pmd_write(*pmd);
1681                 ret = 1;
1682
1683                 /*
1684                  * Avoid trapping faults against the zero page. The read-only
1685                  * data is likely to be read-cached on the local CPU and
1686                  * local/remote hits to the zero page are not interesting.
1687                  */
1688                 if (prot_numa && is_huge_zero_pmd(*pmd)) {
1689                         spin_unlock(ptl);
1690                         return ret;
1691                 }
1692
1693                 if (!prot_numa || !pmd_protnone(*pmd)) {
1694                         entry = pmdp_huge_get_and_clear_notify(mm, addr, pmd);
1695                         entry = pmd_modify(entry, newprot);
1696                         if (preserve_write)
1697                                 entry = pmd_mkwrite(entry);
1698                         ret = HPAGE_PMD_NR;
1699                         set_pmd_at(mm, addr, pmd, entry);
1700                         BUG_ON(!preserve_write && pmd_write(entry));
1701                 }
1702                 spin_unlock(ptl);
1703         }
1704
1705         return ret;
1706 }
1707
1708 /*
1709  * Returns true if a given pmd maps a thp, false otherwise.
1710  *
1711  * Note that if it returns true, this routine returns without unlocking page
1712  * table lock. So callers must unlock it.
1713  */
1714 bool __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
1715                 spinlock_t **ptl)
1716 {
1717         *ptl = pmd_lock(vma->vm_mm, pmd);
1718         if (likely(pmd_trans_huge(*pmd)))
1719                 return true;
1720         spin_unlock(*ptl);
1721         return false;
1722 }
1723
1724 #define VM_NO_THP (VM_SPECIAL | VM_HUGETLB | VM_SHARED | VM_MAYSHARE)
1725
1726 int hugepage_madvise(struct vm_area_struct *vma,
1727                      unsigned long *vm_flags, int advice)
1728 {
1729         switch (advice) {
1730         case MADV_HUGEPAGE:
1731 #ifdef CONFIG_S390
1732                 /*
1733                  * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
1734                  * can't handle this properly after s390_enable_sie, so we simply
1735                  * ignore the madvise to prevent qemu from causing a SIGSEGV.
1736                  */
1737                 if (mm_has_pgste(vma->vm_mm))
1738                         return 0;
1739 #endif
1740                 /*
1741                  * Be somewhat over-protective like KSM for now!
1742                  */
1743                 if (*vm_flags & VM_NO_THP)
1744                         return -EINVAL;
1745                 *vm_flags &= ~VM_NOHUGEPAGE;
1746                 *vm_flags |= VM_HUGEPAGE;
1747                 /*
1748                  * If the vma become good for khugepaged to scan,
1749                  * register it here without waiting a page fault that
1750                  * may not happen any time soon.
1751                  */
1752                 if (unlikely(khugepaged_enter_vma_merge(vma, *vm_flags)))
1753                         return -ENOMEM;
1754                 break;
1755         case MADV_NOHUGEPAGE:
1756                 /*
1757                  * Be somewhat over-protective like KSM for now!
1758                  */
1759                 if (*vm_flags & VM_NO_THP)
1760                         return -EINVAL;
1761                 *vm_flags &= ~VM_HUGEPAGE;
1762                 *vm_flags |= VM_NOHUGEPAGE;
1763                 /*
1764                  * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
1765                  * this vma even if we leave the mm registered in khugepaged if
1766                  * it got registered before VM_NOHUGEPAGE was set.
1767                  */
1768                 break;
1769         }
1770
1771         return 0;
1772 }
1773
1774 static int __init khugepaged_slab_init(void)
1775 {
1776         mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
1777                                           sizeof(struct mm_slot),
1778                                           __alignof__(struct mm_slot), 0, NULL);
1779         if (!mm_slot_cache)
1780                 return -ENOMEM;
1781
1782         return 0;
1783 }
1784
1785 static void __init khugepaged_slab_exit(void)
1786 {
1787         kmem_cache_destroy(mm_slot_cache);
1788 }
1789
1790 static inline struct mm_slot *alloc_mm_slot(void)
1791 {
1792         if (!mm_slot_cache)     /* initialization failed */
1793                 return NULL;
1794         return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
1795 }
1796
1797 static inline void free_mm_slot(struct mm_slot *mm_slot)
1798 {
1799         kmem_cache_free(mm_slot_cache, mm_slot);
1800 }
1801
1802 static struct mm_slot *get_mm_slot(struct mm_struct *mm)
1803 {
1804         struct mm_slot *mm_slot;
1805
1806         hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm)
1807                 if (mm == mm_slot->mm)
1808                         return mm_slot;
1809
1810         return NULL;
1811 }
1812
1813 static void insert_to_mm_slots_hash(struct mm_struct *mm,
1814                                     struct mm_slot *mm_slot)
1815 {
1816         mm_slot->mm = mm;
1817         hash_add(mm_slots_hash, &mm_slot->hash, (long)mm);
1818 }
1819
1820 static inline int khugepaged_test_exit(struct mm_struct *mm)
1821 {
1822         return atomic_read(&mm->mm_users) == 0;
1823 }
1824
1825 int __khugepaged_enter(struct mm_struct *mm)
1826 {
1827         struct mm_slot *mm_slot;
1828         int wakeup;
1829
1830         mm_slot = alloc_mm_slot();
1831         if (!mm_slot)
1832                 return -ENOMEM;
1833
1834         /* __khugepaged_exit() must not run from under us */
1835         VM_BUG_ON_MM(khugepaged_test_exit(mm), mm);
1836         if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
1837                 free_mm_slot(mm_slot);
1838                 return 0;
1839         }
1840
1841         spin_lock(&khugepaged_mm_lock);
1842         insert_to_mm_slots_hash(mm, mm_slot);
1843         /*
1844          * Insert just behind the scanning cursor, to let the area settle
1845          * down a little.
1846          */
1847         wakeup = list_empty(&khugepaged_scan.mm_head);
1848         list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
1849         spin_unlock(&khugepaged_mm_lock);
1850
1851         atomic_inc(&mm->mm_count);
1852         if (wakeup)
1853                 wake_up_interruptible(&khugepaged_wait);
1854
1855         return 0;
1856 }
1857
1858 int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
1859                                unsigned long vm_flags)
1860 {
1861         unsigned long hstart, hend;
1862         if (!vma->anon_vma)
1863                 /*
1864                  * Not yet faulted in so we will register later in the
1865                  * page fault if needed.
1866                  */
1867                 return 0;
1868         if (vma->vm_ops)
1869                 /* khugepaged not yet working on file or special mappings */
1870                 return 0;
1871         VM_BUG_ON_VMA(vm_flags & VM_NO_THP, vma);
1872         hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
1873         hend = vma->vm_end & HPAGE_PMD_MASK;
1874         if (hstart < hend)
1875                 return khugepaged_enter(vma, vm_flags);
1876         return 0;
1877 }
1878
1879 void __khugepaged_exit(struct mm_struct *mm)
1880 {
1881         struct mm_slot *mm_slot;
1882         int free = 0;
1883
1884         spin_lock(&khugepaged_mm_lock);
1885         mm_slot = get_mm_slot(mm);
1886         if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
1887                 hash_del(&mm_slot->hash);
1888                 list_del(&mm_slot->mm_node);
1889                 free = 1;
1890         }
1891         spin_unlock(&khugepaged_mm_lock);
1892
1893         if (free) {
1894                 clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1895                 free_mm_slot(mm_slot);
1896                 mmdrop(mm);
1897         } else if (mm_slot) {
1898                 /*
1899                  * This is required to serialize against
1900                  * khugepaged_test_exit() (which is guaranteed to run
1901                  * under mmap sem read mode). Stop here (after we
1902                  * return all pagetables will be destroyed) until
1903                  * khugepaged has finished working on the pagetables
1904                  * under the mmap_sem.
1905                  */
1906                 down_write(&mm->mmap_sem);
1907                 up_write(&mm->mmap_sem);
1908         }
1909 }
1910
1911 static void release_pte_page(struct page *page)
1912 {
1913         /* 0 stands for page_is_file_cache(page) == false */
1914         dec_zone_page_state(page, NR_ISOLATED_ANON + 0);
1915         unlock_page(page);
1916         putback_lru_page(page);
1917 }
1918
1919 static void release_pte_pages(pte_t *pte, pte_t *_pte)
1920 {
1921         while (--_pte >= pte) {
1922                 pte_t pteval = *_pte;
1923                 if (!pte_none(pteval) && !is_zero_pfn(pte_pfn(pteval)))
1924                         release_pte_page(pte_page(pteval));
1925         }
1926 }
1927
1928 static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
1929                                         unsigned long address,
1930                                         pte_t *pte)
1931 {
1932         struct page *page = NULL;
1933         pte_t *_pte;
1934         int none_or_zero = 0, result = 0;
1935         bool referenced = false, writable = false;
1936
1937         for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
1938              _pte++, address += PAGE_SIZE) {
1939                 pte_t pteval = *_pte;
1940                 if (pte_none(pteval) || (pte_present(pteval) &&
1941                                 is_zero_pfn(pte_pfn(pteval)))) {
1942                         if (!userfaultfd_armed(vma) &&
1943                             ++none_or_zero <= khugepaged_max_ptes_none) {
1944                                 continue;
1945                         } else {
1946                                 result = SCAN_EXCEED_NONE_PTE;
1947                                 goto out;
1948                         }
1949                 }
1950                 if (!pte_present(pteval)) {
1951                         result = SCAN_PTE_NON_PRESENT;
1952                         goto out;
1953                 }
1954                 page = vm_normal_page(vma, address, pteval);
1955                 if (unlikely(!page)) {
1956                         result = SCAN_PAGE_NULL;
1957                         goto out;
1958                 }
1959
1960                 VM_BUG_ON_PAGE(PageCompound(page), page);
1961                 VM_BUG_ON_PAGE(!PageAnon(page), page);
1962                 VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
1963
1964                 /*
1965                  * We can do it before isolate_lru_page because the
1966                  * page can't be freed from under us. NOTE: PG_lock
1967                  * is needed to serialize against split_huge_page
1968                  * when invoked from the VM.
1969                  */
1970                 if (!trylock_page(page)) {
1971                         result = SCAN_PAGE_LOCK;
1972                         goto out;
1973                 }
1974
1975                 /*
1976                  * cannot use mapcount: can't collapse if there's a gup pin.
1977                  * The page must only be referenced by the scanned process
1978                  * and page swap cache.
1979                  */
1980                 if (page_count(page) != 1 + !!PageSwapCache(page)) {
1981                         unlock_page(page);
1982                         result = SCAN_PAGE_COUNT;
1983                         goto out;
1984                 }
1985                 if (pte_write(pteval)) {
1986                         writable = true;
1987                 } else {
1988                         if (PageSwapCache(page) && !reuse_swap_page(page)) {
1989                                 unlock_page(page);
1990                                 result = SCAN_SWAP_CACHE_PAGE;
1991                                 goto out;
1992                         }
1993                         /*
1994                          * Page is not in the swap cache. It can be collapsed
1995                          * into a THP.
1996                          */
1997                 }
1998
1999                 /*
2000                  * Isolate the page to avoid collapsing an hugepage
2001                  * currently in use by the VM.
2002                  */
2003                 if (isolate_lru_page(page)) {
2004                         unlock_page(page);
2005                         result = SCAN_DEL_PAGE_LRU;
2006                         goto out;
2007                 }
2008                 /* 0 stands for page_is_file_cache(page) == false */
2009                 inc_zone_page_state(page, NR_ISOLATED_ANON + 0);
2010                 VM_BUG_ON_PAGE(!PageLocked(page), page);
2011                 VM_BUG_ON_PAGE(PageLRU(page), page);
2012
2013                 /* If there is no mapped pte young don't collapse the page */
2014                 if (pte_young(pteval) ||
2015                     page_is_young(page) || PageReferenced(page) ||
2016                     mmu_notifier_test_young(vma->vm_mm, address))
2017                         referenced = true;
2018         }
2019         if (likely(writable)) {
2020                 if (likely(referenced)) {
2021                         result = SCAN_SUCCEED;
2022                         trace_mm_collapse_huge_page_isolate(page_to_pfn(page), none_or_zero,
2023                                                             referenced, writable, result);
2024                         return 1;
2025                 }
2026         } else {
2027                 result = SCAN_PAGE_RO;
2028         }
2029
2030 out:
2031         release_pte_pages(pte, _pte);
2032         trace_mm_collapse_huge_page_isolate(page_to_pfn(page), none_or_zero,
2033                                             referenced, writable, result);
2034         return 0;
2035 }
2036
2037 static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
2038                                       struct vm_area_struct *vma,
2039                                       unsigned long address,
2040                                       spinlock_t *ptl)
2041 {
2042         pte_t *_pte;
2043         for (_pte = pte; _pte < pte+HPAGE_PMD_NR; _pte++) {
2044                 pte_t pteval = *_pte;
2045                 struct page *src_page;
2046
2047                 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
2048                         clear_user_highpage(page, address);
2049                         add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
2050                         if (is_zero_pfn(pte_pfn(pteval))) {
2051                                 /*
2052                                  * ptl mostly unnecessary.
2053                                  */
2054                                 spin_lock(ptl);
2055                                 /*
2056                                  * paravirt calls inside pte_clear here are
2057                                  * superfluous.
2058                                  */
2059                                 pte_clear(vma->vm_mm, address, _pte);
2060                                 spin_unlock(ptl);
2061                         }
2062                 } else {
2063                         src_page = pte_page(pteval);
2064                         copy_user_highpage(page, src_page, address, vma);
2065                         VM_BUG_ON_PAGE(page_mapcount(src_page) != 1, src_page);
2066                         release_pte_page(src_page);
2067                         /*
2068                          * ptl mostly unnecessary, but preempt has to
2069                          * be disabled to update the per-cpu stats
2070                          * inside page_remove_rmap().
2071                          */
2072                         spin_lock(ptl);
2073                         /*
2074                          * paravirt calls inside pte_clear here are
2075                          * superfluous.
2076                          */
2077                         pte_clear(vma->vm_mm, address, _pte);
2078                         page_remove_rmap(src_page, false);
2079                         spin_unlock(ptl);
2080                         free_page_and_swap_cache(src_page);
2081                 }
2082
2083                 address += PAGE_SIZE;
2084                 page++;
2085         }
2086 }
2087
2088 static void khugepaged_alloc_sleep(void)
2089 {
2090         DEFINE_WAIT(wait);
2091
2092         add_wait_queue(&khugepaged_wait, &wait);
2093         freezable_schedule_timeout_interruptible(
2094                 msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
2095         remove_wait_queue(&khugepaged_wait, &wait);
2096 }
2097
2098 static int khugepaged_node_load[MAX_NUMNODES];
2099
2100 static bool khugepaged_scan_abort(int nid)
2101 {
2102         int i;
2103
2104         /*
2105          * If zone_reclaim_mode is disabled, then no extra effort is made to
2106          * allocate memory locally.
2107          */
2108         if (!zone_reclaim_mode)
2109                 return false;
2110
2111         /* If there is a count for this node already, it must be acceptable */
2112         if (khugepaged_node_load[nid])
2113                 return false;
2114
2115         for (i = 0; i < MAX_NUMNODES; i++) {
2116                 if (!khugepaged_node_load[i])
2117                         continue;
2118                 if (node_distance(nid, i) > RECLAIM_DISTANCE)
2119                         return true;
2120         }
2121         return false;
2122 }
2123
2124 #ifdef CONFIG_NUMA
2125 static int khugepaged_find_target_node(void)
2126 {
2127         static int last_khugepaged_target_node = NUMA_NO_NODE;
2128         int nid, target_node = 0, max_value = 0;
2129
2130         /* find first node with max normal pages hit */
2131         for (nid = 0; nid < MAX_NUMNODES; nid++)
2132                 if (khugepaged_node_load[nid] > max_value) {
2133                         max_value = khugepaged_node_load[nid];
2134                         target_node = nid;
2135                 }
2136
2137         /* do some balance if several nodes have the same hit record */
2138         if (target_node <= last_khugepaged_target_node)
2139                 for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES;
2140                                 nid++)
2141                         if (max_value == khugepaged_node_load[nid]) {
2142                                 target_node = nid;
2143                                 break;
2144                         }
2145
2146         last_khugepaged_target_node = target_node;
2147         return target_node;
2148 }
2149
2150 static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
2151 {
2152         if (IS_ERR(*hpage)) {
2153                 if (!*wait)
2154                         return false;
2155
2156                 *wait = false;
2157                 *hpage = NULL;
2158                 khugepaged_alloc_sleep();
2159         } else if (*hpage) {
2160                 put_page(*hpage);
2161                 *hpage = NULL;
2162         }
2163
2164         return true;
2165 }
2166
2167 static struct page *
2168 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, struct mm_struct *mm,
2169                        unsigned long address, int node)
2170 {
2171         VM_BUG_ON_PAGE(*hpage, *hpage);
2172
2173         /*
2174          * Before allocating the hugepage, release the mmap_sem read lock.
2175          * The allocation can take potentially a long time if it involves
2176          * sync compaction, and we do not need to hold the mmap_sem during
2177          * that. We will recheck the vma after taking it again in write mode.
2178          */
2179         up_read(&mm->mmap_sem);
2180
2181         *hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER);
2182         if (unlikely(!*hpage)) {
2183                 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
2184                 *hpage = ERR_PTR(-ENOMEM);
2185                 return NULL;
2186         }
2187
2188         prep_transhuge_page(*hpage);
2189         count_vm_event(THP_COLLAPSE_ALLOC);
2190         return *hpage;
2191 }
2192 #else
2193 static int khugepaged_find_target_node(void)
2194 {
2195         return 0;
2196 }
2197
2198 static inline struct page *alloc_hugepage(int defrag)
2199 {
2200         struct page *page;
2201
2202         page = alloc_pages(alloc_hugepage_gfpmask(defrag, 0), HPAGE_PMD_ORDER);
2203         if (page)
2204                 prep_transhuge_page(page);
2205         return page;
2206 }
2207
2208 static struct page *khugepaged_alloc_hugepage(bool *wait)
2209 {
2210         struct page *hpage;
2211
2212         do {
2213                 hpage = alloc_hugepage(khugepaged_defrag());
2214                 if (!hpage) {
2215                         count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
2216                         if (!*wait)
2217                                 return NULL;
2218
2219                         *wait = false;
2220                         khugepaged_alloc_sleep();
2221                 } else
2222                         count_vm_event(THP_COLLAPSE_ALLOC);
2223         } while (unlikely(!hpage) && likely(khugepaged_enabled()));
2224
2225         return hpage;
2226 }
2227
2228 static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
2229 {
2230         if (!*hpage)
2231                 *hpage = khugepaged_alloc_hugepage(wait);
2232
2233         if (unlikely(!*hpage))
2234                 return false;
2235
2236         return true;
2237 }
2238
2239 static struct page *
2240 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, struct mm_struct *mm,
2241                        unsigned long address, int node)
2242 {
2243         up_read(&mm->mmap_sem);
2244         VM_BUG_ON(!*hpage);
2245
2246         return  *hpage;
2247 }
2248 #endif
2249
2250 static bool hugepage_vma_check(struct vm_area_struct *vma)
2251 {
2252         if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) ||
2253             (vma->vm_flags & VM_NOHUGEPAGE))
2254                 return false;
2255         if (!vma->anon_vma || vma->vm_ops)
2256                 return false;
2257         if (is_vma_temporary_stack(vma))
2258                 return false;
2259         VM_BUG_ON_VMA(vma->vm_flags & VM_NO_THP, vma);
2260         return true;
2261 }
2262
2263 static void collapse_huge_page(struct mm_struct *mm,
2264                                    unsigned long address,
2265                                    struct page **hpage,
2266                                    struct vm_area_struct *vma,
2267                                    int node)
2268 {
2269         pmd_t *pmd, _pmd;
2270         pte_t *pte;
2271         pgtable_t pgtable;
2272         struct page *new_page;
2273         spinlock_t *pmd_ptl, *pte_ptl;
2274         int isolated, result = 0;
2275         unsigned long hstart, hend;
2276         struct mem_cgroup *memcg;
2277         unsigned long mmun_start;       /* For mmu_notifiers */
2278         unsigned long mmun_end;         /* For mmu_notifiers */
2279         gfp_t gfp;
2280
2281         VM_BUG_ON(address & ~HPAGE_PMD_MASK);
2282
2283         /* Only allocate from the target node */
2284         gfp = alloc_hugepage_gfpmask(khugepaged_defrag(), __GFP_OTHER_NODE) |
2285                 __GFP_THISNODE;
2286
2287         /* release the mmap_sem read lock. */
2288         new_page = khugepaged_alloc_page(hpage, gfp, mm, address, node);
2289         if (!new_page) {
2290                 result = SCAN_ALLOC_HUGE_PAGE_FAIL;
2291                 goto out_nolock;
2292         }
2293
2294         if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) {
2295                 result = SCAN_CGROUP_CHARGE_FAIL;
2296                 goto out_nolock;
2297         }
2298
2299         /*
2300          * Prevent all access to pagetables with the exception of
2301          * gup_fast later hanlded by the ptep_clear_flush and the VM
2302          * handled by the anon_vma lock + PG_lock.
2303          */
2304         down_write(&mm->mmap_sem);
2305         if (unlikely(khugepaged_test_exit(mm))) {
2306                 result = SCAN_ANY_PROCESS;
2307                 goto out;
2308         }
2309
2310         vma = find_vma(mm, address);
2311         if (!vma) {
2312                 result = SCAN_VMA_NULL;
2313                 goto out;
2314         }
2315         hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2316         hend = vma->vm_end & HPAGE_PMD_MASK;
2317         if (address < hstart || address + HPAGE_PMD_SIZE > hend) {
2318                 result = SCAN_ADDRESS_RANGE;
2319                 goto out;
2320         }
2321         if (!hugepage_vma_check(vma)) {
2322                 result = SCAN_VMA_CHECK;
2323                 goto out;
2324         }
2325         pmd = mm_find_pmd(mm, address);
2326         if (!pmd) {
2327                 result = SCAN_PMD_NULL;
2328                 goto out;
2329         }
2330
2331         anon_vma_lock_write(vma->anon_vma);
2332
2333         pte = pte_offset_map(pmd, address);
2334         pte_ptl = pte_lockptr(mm, pmd);
2335
2336         mmun_start = address;
2337         mmun_end   = address + HPAGE_PMD_SIZE;
2338         mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
2339         pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
2340         /*
2341          * After this gup_fast can't run anymore. This also removes
2342          * any huge TLB entry from the CPU so we won't allow
2343          * huge and small TLB entries for the same virtual address
2344          * to avoid the risk of CPU bugs in that area.
2345          */
2346         _pmd = pmdp_collapse_flush(vma, address, pmd);
2347         spin_unlock(pmd_ptl);
2348         mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
2349
2350         spin_lock(pte_ptl);
2351         isolated = __collapse_huge_page_isolate(vma, address, pte);
2352         spin_unlock(pte_ptl);
2353
2354         if (unlikely(!isolated)) {
2355                 pte_unmap(pte);
2356                 spin_lock(pmd_ptl);
2357                 BUG_ON(!pmd_none(*pmd));
2358                 /*
2359                  * We can only use set_pmd_at when establishing
2360                  * hugepmds and never for establishing regular pmds that
2361                  * points to regular pagetables. Use pmd_populate for that
2362                  */
2363                 pmd_populate(mm, pmd, pmd_pgtable(_pmd));
2364                 spin_unlock(pmd_ptl);
2365                 anon_vma_unlock_write(vma->anon_vma);
2366                 result = SCAN_FAIL;
2367                 goto out;
2368         }
2369
2370         /*
2371          * All pages are isolated and locked so anon_vma rmap
2372          * can't run anymore.
2373          */
2374         anon_vma_unlock_write(vma->anon_vma);
2375
2376         __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl);
2377         pte_unmap(pte);
2378         __SetPageUptodate(new_page);
2379         pgtable = pmd_pgtable(_pmd);
2380
2381         _pmd = mk_huge_pmd(new_page, vma->vm_page_prot);
2382         _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
2383
2384         /*
2385          * spin_lock() below is not the equivalent of smp_wmb(), so
2386          * this is needed to avoid the copy_huge_page writes to become
2387          * visible after the set_pmd_at() write.
2388          */
2389         smp_wmb();
2390
2391         spin_lock(pmd_ptl);
2392         BUG_ON(!pmd_none(*pmd));
2393         page_add_new_anon_rmap(new_page, vma, address, true);
2394         mem_cgroup_commit_charge(new_page, memcg, false, true);
2395         lru_cache_add_active_or_unevictable(new_page, vma);
2396         pgtable_trans_huge_deposit(mm, pmd, pgtable);
2397         set_pmd_at(mm, address, pmd, _pmd);
2398         update_mmu_cache_pmd(vma, address, pmd);
2399         spin_unlock(pmd_ptl);
2400
2401         *hpage = NULL;
2402
2403         khugepaged_pages_collapsed++;
2404         result = SCAN_SUCCEED;
2405 out_up_write:
2406         up_write(&mm->mmap_sem);
2407         trace_mm_collapse_huge_page(mm, isolated, result);
2408         return;
2409
2410 out_nolock:
2411         trace_mm_collapse_huge_page(mm, isolated, result);
2412         return;
2413 out:
2414         mem_cgroup_cancel_charge(new_page, memcg, true);
2415         goto out_up_write;
2416 }
2417
2418 static int khugepaged_scan_pmd(struct mm_struct *mm,
2419                                struct vm_area_struct *vma,
2420                                unsigned long address,
2421                                struct page **hpage)
2422 {
2423         pmd_t *pmd;
2424         pte_t *pte, *_pte;
2425         int ret = 0, none_or_zero = 0, result = 0;
2426         struct page *page = NULL;
2427         unsigned long _address;
2428         spinlock_t *ptl;
2429         int node = NUMA_NO_NODE;
2430         bool writable = false, referenced = false;
2431
2432         VM_BUG_ON(address & ~HPAGE_PMD_MASK);
2433
2434         pmd = mm_find_pmd(mm, address);
2435         if (!pmd) {
2436                 result = SCAN_PMD_NULL;
2437                 goto out;
2438         }
2439
2440         memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
2441         pte = pte_offset_map_lock(mm, pmd, address, &ptl);
2442         for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
2443              _pte++, _address += PAGE_SIZE) {
2444                 pte_t pteval = *_pte;
2445                 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
2446                         if (!userfaultfd_armed(vma) &&
2447                             ++none_or_zero <= khugepaged_max_ptes_none) {
2448                                 continue;
2449                         } else {
2450                                 result = SCAN_EXCEED_NONE_PTE;
2451                                 goto out_unmap;
2452                         }
2453                 }
2454                 if (!pte_present(pteval)) {
2455                         result = SCAN_PTE_NON_PRESENT;
2456                         goto out_unmap;
2457                 }
2458                 if (pte_write(pteval))
2459                         writable = true;
2460
2461                 page = vm_normal_page(vma, _address, pteval);
2462                 if (unlikely(!page)) {
2463                         result = SCAN_PAGE_NULL;
2464                         goto out_unmap;
2465                 }
2466
2467                 /* TODO: teach khugepaged to collapse THP mapped with pte */
2468                 if (PageCompound(page)) {
2469                         result = SCAN_PAGE_COMPOUND;
2470                         goto out_unmap;
2471                 }
2472
2473                 /*
2474                  * Record which node the original page is from and save this
2475                  * information to khugepaged_node_load[].
2476                  * Khupaged will allocate hugepage from the node has the max
2477                  * hit record.
2478                  */
2479                 node = page_to_nid(page);
2480                 if (khugepaged_scan_abort(node)) {
2481                         result = SCAN_SCAN_ABORT;
2482                         goto out_unmap;
2483                 }
2484                 khugepaged_node_load[node]++;
2485                 if (!PageLRU(page)) {
2486                         result = SCAN_SCAN_ABORT;
2487                         goto out_unmap;
2488                 }
2489                 if (PageLocked(page)) {
2490                         result = SCAN_PAGE_LOCK;
2491                         goto out_unmap;
2492                 }
2493                 if (!PageAnon(page)) {
2494                         result = SCAN_PAGE_ANON;
2495                         goto out_unmap;
2496                 }
2497
2498                 /*
2499                  * cannot use mapcount: can't collapse if there's a gup pin.
2500                  * The page must only be referenced by the scanned process
2501                  * and page swap cache.
2502                  */
2503                 if (page_count(page) != 1 + !!PageSwapCache(page)) {
2504                         result = SCAN_PAGE_COUNT;
2505                         goto out_unmap;
2506                 }
2507                 if (pte_young(pteval) ||
2508                     page_is_young(page) || PageReferenced(page) ||
2509                     mmu_notifier_test_young(vma->vm_mm, address))
2510                         referenced = true;
2511         }
2512         if (writable) {
2513                 if (referenced) {
2514                         result = SCAN_SUCCEED;
2515                         ret = 1;
2516                 } else {
2517                         result = SCAN_NO_REFERENCED_PAGE;
2518                 }
2519         } else {
2520                 result = SCAN_PAGE_RO;
2521         }
2522 out_unmap:
2523         pte_unmap_unlock(pte, ptl);
2524         if (ret) {
2525                 node = khugepaged_find_target_node();
2526                 /* collapse_huge_page will return with the mmap_sem released */
2527                 collapse_huge_page(mm, address, hpage, vma, node);
2528         }
2529 out:
2530         trace_mm_khugepaged_scan_pmd(mm, page_to_pfn(page), writable, referenced,
2531                                      none_or_zero, result);
2532         return ret;
2533 }
2534
2535 static void collect_mm_slot(struct mm_slot *mm_slot)
2536 {
2537         struct mm_struct *mm = mm_slot->mm;
2538
2539         VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
2540
2541         if (khugepaged_test_exit(mm)) {
2542                 /* free mm_slot */
2543                 hash_del(&mm_slot->hash);
2544                 list_del(&mm_slot->mm_node);
2545
2546                 /*
2547                  * Not strictly needed because the mm exited already.
2548                  *
2549                  * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
2550                  */
2551
2552                 /* khugepaged_mm_lock actually not necessary for the below */
2553                 free_mm_slot(mm_slot);
2554                 mmdrop(mm);
2555         }
2556 }
2557
2558 static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
2559                                             struct page **hpage)
2560         __releases(&khugepaged_mm_lock)
2561         __acquires(&khugepaged_mm_lock)
2562 {
2563         struct mm_slot *mm_slot;
2564         struct mm_struct *mm;
2565         struct vm_area_struct *vma;
2566         int progress = 0;
2567
2568         VM_BUG_ON(!pages);
2569         VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
2570
2571         if (khugepaged_scan.mm_slot)
2572                 mm_slot = khugepaged_scan.mm_slot;
2573         else {
2574                 mm_slot = list_entry(khugepaged_scan.mm_head.next,
2575                                      struct mm_slot, mm_node);
2576                 khugepaged_scan.address = 0;
2577                 khugepaged_scan.mm_slot = mm_slot;
2578         }
2579         spin_unlock(&khugepaged_mm_lock);
2580
2581         mm = mm_slot->mm;
2582         down_read(&mm->mmap_sem);
2583         if (unlikely(khugepaged_test_exit(mm)))
2584                 vma = NULL;
2585         else
2586                 vma = find_vma(mm, khugepaged_scan.address);
2587
2588         progress++;
2589         for (; vma; vma = vma->vm_next) {
2590                 unsigned long hstart, hend;
2591
2592                 cond_resched();
2593                 if (unlikely(khugepaged_test_exit(mm))) {
2594                         progress++;
2595                         break;
2596                 }
2597                 if (!hugepage_vma_check(vma)) {
2598 skip:
2599                         progress++;
2600                         continue;
2601                 }
2602                 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2603                 hend = vma->vm_end & HPAGE_PMD_MASK;
2604                 if (hstart >= hend)
2605                         goto skip;
2606                 if (khugepaged_scan.address > hend)
2607                         goto skip;
2608                 if (khugepaged_scan.address < hstart)
2609                         khugepaged_scan.address = hstart;
2610                 VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
2611
2612                 while (khugepaged_scan.address < hend) {
2613                         int ret;
2614                         cond_resched();
2615                         if (unlikely(khugepaged_test_exit(mm)))
2616                                 goto breakouterloop;
2617
2618                         VM_BUG_ON(khugepaged_scan.address < hstart ||
2619                                   khugepaged_scan.address + HPAGE_PMD_SIZE >
2620                                   hend);
2621                         ret = khugepaged_scan_pmd(mm, vma,
2622                                                   khugepaged_scan.address,
2623                                                   hpage);
2624                         /* move to next address */
2625                         khugepaged_scan.address += HPAGE_PMD_SIZE;
2626                         progress += HPAGE_PMD_NR;
2627                         if (ret)
2628                                 /* we released mmap_sem so break loop */
2629                                 goto breakouterloop_mmap_sem;
2630                         if (progress >= pages)
2631                                 goto breakouterloop;
2632                 }
2633         }
2634 breakouterloop:
2635         up_read(&mm->mmap_sem); /* exit_mmap will destroy ptes after this */
2636 breakouterloop_mmap_sem:
2637
2638         spin_lock(&khugepaged_mm_lock);
2639         VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
2640         /*
2641          * Release the current mm_slot if this mm is about to die, or
2642          * if we scanned all vmas of this mm.
2643          */
2644         if (khugepaged_test_exit(mm) || !vma) {
2645                 /*
2646                  * Make sure that if mm_users is reaching zero while
2647                  * khugepaged runs here, khugepaged_exit will find
2648                  * mm_slot not pointing to the exiting mm.
2649                  */
2650                 if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
2651                         khugepaged_scan.mm_slot = list_entry(
2652                                 mm_slot->mm_node.next,
2653                                 struct mm_slot, mm_node);
2654                         khugepaged_scan.address = 0;
2655                 } else {
2656                         khugepaged_scan.mm_slot = NULL;
2657                         khugepaged_full_scans++;
2658                 }
2659
2660                 collect_mm_slot(mm_slot);
2661         }
2662
2663         return progress;
2664 }
2665
2666 static int khugepaged_has_work(void)
2667 {
2668         return !list_empty(&khugepaged_scan.mm_head) &&
2669                 khugepaged_enabled();
2670 }
2671
2672 static int khugepaged_wait_event(void)
2673 {
2674         return !list_empty(&khugepaged_scan.mm_head) ||
2675                 kthread_should_stop();
2676 }
2677
2678 static void khugepaged_do_scan(void)
2679 {
2680         struct page *hpage = NULL;
2681         unsigned int progress = 0, pass_through_head = 0;
2682         unsigned int pages = khugepaged_pages_to_scan;
2683         bool wait = true;
2684
2685         barrier(); /* write khugepaged_pages_to_scan to local stack */
2686
2687         while (progress < pages) {
2688                 if (!khugepaged_prealloc_page(&hpage, &wait))
2689                         break;
2690
2691                 cond_resched();
2692
2693                 if (unlikely(kthread_should_stop() || try_to_freeze()))
2694                         break;
2695
2696                 spin_lock(&khugepaged_mm_lock);
2697                 if (!khugepaged_scan.mm_slot)
2698                         pass_through_head++;
2699                 if (khugepaged_has_work() &&
2700                     pass_through_head < 2)
2701                         progress += khugepaged_scan_mm_slot(pages - progress,
2702                                                             &hpage);
2703                 else
2704                         progress = pages;
2705                 spin_unlock(&khugepaged_mm_lock);
2706         }
2707
2708         if (!IS_ERR_OR_NULL(hpage))
2709                 put_page(hpage);
2710 }
2711
2712 static void khugepaged_wait_work(void)
2713 {
2714         if (khugepaged_has_work()) {
2715                 if (!khugepaged_scan_sleep_millisecs)
2716                         return;
2717
2718                 wait_event_freezable_timeout(khugepaged_wait,
2719                                              kthread_should_stop(),
2720                         msecs_to_jiffies(khugepaged_scan_sleep_millisecs));
2721                 return;
2722         }
2723
2724         if (khugepaged_enabled())
2725                 wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
2726 }
2727
2728 static int khugepaged(void *none)
2729 {
2730         struct mm_slot *mm_slot;
2731
2732         set_freezable();
2733         set_user_nice(current, MAX_NICE);
2734
2735         while (!kthread_should_stop()) {
2736                 khugepaged_do_scan();
2737                 khugepaged_wait_work();
2738         }
2739
2740         spin_lock(&khugepaged_mm_lock);
2741         mm_slot = khugepaged_scan.mm_slot;
2742         khugepaged_scan.mm_slot = NULL;
2743         if (mm_slot)
2744                 collect_mm_slot(mm_slot);
2745         spin_unlock(&khugepaged_mm_lock);
2746         return 0;
2747 }
2748
2749 static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
2750                 unsigned long haddr, pmd_t *pmd)
2751 {
2752         struct mm_struct *mm = vma->vm_mm;
2753         pgtable_t pgtable;
2754         pmd_t _pmd;
2755         int i;
2756
2757         /* leave pmd empty until pte is filled */
2758         pmdp_huge_clear_flush_notify(vma, haddr, pmd);
2759
2760         pgtable = pgtable_trans_huge_withdraw(mm, pmd);
2761         pmd_populate(mm, &_pmd, pgtable);
2762
2763         for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
2764                 pte_t *pte, entry;
2765                 entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot);
2766                 entry = pte_mkspecial(entry);
2767                 pte = pte_offset_map(&_pmd, haddr);
2768                 VM_BUG_ON(!pte_none(*pte));
2769                 set_pte_at(mm, haddr, pte, entry);
2770                 pte_unmap(pte);
2771         }
2772         smp_wmb(); /* make pte visible before pmd */
2773         pmd_populate(mm, pmd, pgtable);
2774         put_huge_zero_page();
2775 }
2776
2777 static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
2778                 unsigned long haddr, bool freeze)
2779 {
2780         struct mm_struct *mm = vma->vm_mm;
2781         struct page *page;
2782         pgtable_t pgtable;
2783         pmd_t _pmd;
2784         bool young, write, dirty;
2785         int i;
2786
2787         VM_BUG_ON(haddr & ~HPAGE_PMD_MASK);
2788         VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
2789         VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PMD_SIZE, vma);
2790         VM_BUG_ON(!pmd_trans_huge(*pmd));
2791
2792         count_vm_event(THP_SPLIT_PMD);
2793
2794         if (vma_is_dax(vma)) {
2795                 pmd_t _pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd);
2796                 if (is_huge_zero_pmd(_pmd))
2797                         put_huge_zero_page();
2798                 return;
2799         } else if (is_huge_zero_pmd(*pmd)) {
2800                 return __split_huge_zero_page_pmd(vma, haddr, pmd);
2801         }
2802
2803         page = pmd_page(*pmd);
2804         VM_BUG_ON_PAGE(!page_count(page), page);
2805         atomic_add(HPAGE_PMD_NR - 1, &page->_count);
2806         write = pmd_write(*pmd);
2807         young = pmd_young(*pmd);
2808         dirty = pmd_dirty(*pmd);
2809
2810         pgtable = pgtable_trans_huge_withdraw(mm, pmd);
2811         pmd_populate(mm, &_pmd, pgtable);
2812
2813         for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
2814                 pte_t entry, *pte;
2815                 /*
2816                  * Note that NUMA hinting access restrictions are not
2817                  * transferred to avoid any possibility of altering
2818                  * permissions across VMAs.
2819                  */
2820                 if (freeze) {
2821                         swp_entry_t swp_entry;
2822                         swp_entry = make_migration_entry(page + i, write);
2823                         entry = swp_entry_to_pte(swp_entry);
2824                 } else {
2825                         entry = mk_pte(page + i, vma->vm_page_prot);
2826                         entry = maybe_mkwrite(entry, vma);
2827                         if (!write)
2828                                 entry = pte_wrprotect(entry);
2829                         if (!young)
2830                                 entry = pte_mkold(entry);
2831                 }
2832                 if (dirty)
2833                         SetPageDirty(page + i);
2834                 pte = pte_offset_map(&_pmd, haddr);
2835                 BUG_ON(!pte_none(*pte));
2836                 set_pte_at(mm, haddr, pte, entry);
2837                 atomic_inc(&page[i]._mapcount);
2838                 pte_unmap(pte);
2839         }
2840
2841         /*
2842          * Set PG_double_map before dropping compound_mapcount to avoid
2843          * false-negative page_mapped().
2844          */
2845         if (compound_mapcount(page) > 1 && !TestSetPageDoubleMap(page)) {
2846                 for (i = 0; i < HPAGE_PMD_NR; i++)
2847                         atomic_inc(&page[i]._mapcount);
2848         }
2849
2850         if (atomic_add_negative(-1, compound_mapcount_ptr(page))) {
2851                 /* Last compound_mapcount is gone. */
2852                 __dec_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
2853                 if (TestClearPageDoubleMap(page)) {
2854                         /* No need in mapcount reference anymore */
2855                         for (i = 0; i < HPAGE_PMD_NR; i++)
2856                                 atomic_dec(&page[i]._mapcount);
2857                 }
2858         }
2859
2860         smp_wmb(); /* make pte visible before pmd */
2861         /*
2862          * Up to this point the pmd is present and huge and userland has the
2863          * whole access to the hugepage during the split (which happens in
2864          * place). If we overwrite the pmd with the not-huge version pointing
2865          * to the pte here (which of course we could if all CPUs were bug
2866          * free), userland could trigger a small page size TLB miss on the
2867          * small sized TLB while the hugepage TLB entry is still established in
2868          * the huge TLB. Some CPU doesn't like that.
2869          * See http://support.amd.com/us/Processor_TechDocs/41322.pdf, Erratum
2870          * 383 on page 93. Intel should be safe but is also warns that it's
2871          * only safe if the permission and cache attributes of the two entries
2872          * loaded in the two TLB is identical (which should be the case here).
2873          * But it is generally safer to never allow small and huge TLB entries
2874          * for the same virtual address to be loaded simultaneously. So instead
2875          * of doing "pmd_populate(); flush_pmd_tlb_range();" we first mark the
2876          * current pmd notpresent (atomically because here the pmd_trans_huge
2877          * and pmd_trans_splitting must remain set at all times on the pmd
2878          * until the split is complete for this pmd), then we flush the SMP TLB
2879          * and finally we write the non-huge version of the pmd entry with
2880          * pmd_populate.
2881          */
2882         pmdp_invalidate(vma, haddr, pmd);
2883         pmd_populate(mm, pmd, pgtable);
2884
2885         if (freeze) {
2886                 for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
2887                         page_remove_rmap(page + i, false);
2888                         put_page(page + i);
2889                 }
2890         }
2891 }
2892
2893 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
2894                 unsigned long address)
2895 {
2896         spinlock_t *ptl;
2897         struct mm_struct *mm = vma->vm_mm;
2898         struct page *page = NULL;
2899         unsigned long haddr = address & HPAGE_PMD_MASK;
2900
2901         mmu_notifier_invalidate_range_start(mm, haddr, haddr + HPAGE_PMD_SIZE);
2902         ptl = pmd_lock(mm, pmd);
2903         if (unlikely(!pmd_trans_huge(*pmd)))
2904                 goto out;
2905         page = pmd_page(*pmd);
2906         __split_huge_pmd_locked(vma, pmd, haddr, false);
2907         if (PageMlocked(page))
2908                 get_page(page);
2909         else
2910                 page = NULL;
2911 out:
2912         spin_unlock(ptl);
2913         mmu_notifier_invalidate_range_end(mm, haddr, haddr + HPAGE_PMD_SIZE);
2914         if (page) {
2915                 lock_page(page);
2916                 munlock_vma_page(page);
2917                 unlock_page(page);
2918                 put_page(page);
2919         }
2920 }
2921
2922 static void split_huge_pmd_address(struct vm_area_struct *vma,
2923                                     unsigned long address)
2924 {
2925         pgd_t *pgd;
2926         pud_t *pud;
2927         pmd_t *pmd;
2928
2929         VM_BUG_ON(!(address & ~HPAGE_PMD_MASK));
2930
2931         pgd = pgd_offset(vma->vm_mm, address);
2932         if (!pgd_present(*pgd))
2933                 return;
2934
2935         pud = pud_offset(pgd, address);
2936         if (!pud_present(*pud))
2937                 return;
2938
2939         pmd = pmd_offset(pud, address);
2940         if (!pmd_present(*pmd) || !pmd_trans_huge(*pmd))
2941                 return;
2942         /*
2943          * Caller holds the mmap_sem write mode, so a huge pmd cannot
2944          * materialize from under us.
2945          */
2946         split_huge_pmd(vma, pmd, address);
2947 }
2948
2949 void vma_adjust_trans_huge(struct vm_area_struct *vma,
2950                              unsigned long start,
2951                              unsigned long end,
2952                              long adjust_next)
2953 {
2954         /*
2955          * If the new start address isn't hpage aligned and it could
2956          * previously contain an hugepage: check if we need to split
2957          * an huge pmd.
2958          */
2959         if (start & ~HPAGE_PMD_MASK &&
2960             (start & HPAGE_PMD_MASK) >= vma->vm_start &&
2961             (start & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end)
2962                 split_huge_pmd_address(vma, start);
2963
2964         /*
2965          * If the new end address isn't hpage aligned and it could
2966          * previously contain an hugepage: check if we need to split
2967          * an huge pmd.
2968          */
2969         if (end & ~HPAGE_PMD_MASK &&
2970             (end & HPAGE_PMD_MASK) >= vma->vm_start &&
2971             (end & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end)
2972                 split_huge_pmd_address(vma, end);
2973
2974         /*
2975          * If we're also updating the vma->vm_next->vm_start, if the new
2976          * vm_next->vm_start isn't page aligned and it could previously
2977          * contain an hugepage: check if we need to split an huge pmd.
2978          */
2979         if (adjust_next > 0) {
2980                 struct vm_area_struct *next = vma->vm_next;
2981                 unsigned long nstart = next->vm_start;
2982                 nstart += adjust_next << PAGE_SHIFT;
2983                 if (nstart & ~HPAGE_PMD_MASK &&
2984                     (nstart & HPAGE_PMD_MASK) >= next->vm_start &&
2985                     (nstart & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= next->vm_end)
2986                         split_huge_pmd_address(next, nstart);
2987         }
2988 }
2989
2990 static void freeze_page_vma(struct vm_area_struct *vma, struct page *page,
2991                 unsigned long address)
2992 {
2993         unsigned long haddr = address & HPAGE_PMD_MASK;
2994         spinlock_t *ptl;
2995         pgd_t *pgd;
2996         pud_t *pud;
2997         pmd_t *pmd;
2998         pte_t *pte;
2999         int i, nr = HPAGE_PMD_NR;
3000
3001         /* Skip pages which doesn't belong to the VMA */
3002         if (address < vma->vm_start) {
3003                 int off = (vma->vm_start - address) >> PAGE_SHIFT;
3004                 page += off;
3005                 nr -= off;
3006                 address = vma->vm_start;
3007         }
3008
3009         pgd = pgd_offset(vma->vm_mm, address);
3010         if (!pgd_present(*pgd))
3011                 return;
3012         pud = pud_offset(pgd, address);
3013         if (!pud_present(*pud))
3014                 return;
3015         pmd = pmd_offset(pud, address);
3016         ptl = pmd_lock(vma->vm_mm, pmd);
3017         if (!pmd_present(*pmd)) {
3018                 spin_unlock(ptl);
3019                 return;
3020         }
3021         if (pmd_trans_huge(*pmd)) {
3022                 if (page == pmd_page(*pmd))
3023                         __split_huge_pmd_locked(vma, pmd, haddr, true);
3024                 spin_unlock(ptl);
3025                 return;
3026         }
3027         spin_unlock(ptl);
3028
3029         pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
3030         for (i = 0; i < nr; i++, address += PAGE_SIZE, page++, pte++) {
3031                 pte_t entry, swp_pte;
3032                 swp_entry_t swp_entry;
3033
3034                 /*
3035                  * We've just crossed page table boundary: need to map next one.
3036                  * It can happen if THP was mremaped to non PMD-aligned address.
3037                  */
3038                 if (unlikely(address == haddr + HPAGE_PMD_SIZE)) {
3039                         pte_unmap_unlock(pte - 1, ptl);
3040                         pmd = mm_find_pmd(vma->vm_mm, address);
3041                         if (!pmd)
3042                                 return;
3043                         pte = pte_offset_map_lock(vma->vm_mm, pmd,
3044                                         address, &ptl);
3045                 }
3046
3047                 if (!pte_present(*pte))
3048                         continue;
3049                 if (page_to_pfn(page) != pte_pfn(*pte))
3050                         continue;
3051                 flush_cache_page(vma, address, page_to_pfn(page));
3052                 entry = ptep_clear_flush(vma, address, pte);
3053                 if (pte_dirty(entry))
3054                         SetPageDirty(page);
3055                 swp_entry = make_migration_entry(page, pte_write(entry));
3056                 swp_pte = swp_entry_to_pte(swp_entry);
3057                 if (pte_soft_dirty(entry))
3058                         swp_pte = pte_swp_mksoft_dirty(swp_pte);
3059                 set_pte_at(vma->vm_mm, address, pte, swp_pte);
3060                 page_remove_rmap(page, false);
3061                 put_page(page);
3062         }
3063         pte_unmap_unlock(pte - 1, ptl);
3064 }
3065
3066 static void freeze_page(struct anon_vma *anon_vma, struct page *page)
3067 {
3068         struct anon_vma_chain *avc;
3069         pgoff_t pgoff = page_to_pgoff(page);
3070
3071         VM_BUG_ON_PAGE(!PageHead(page), page);
3072
3073         anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff,
3074                         pgoff + HPAGE_PMD_NR - 1) {
3075                 unsigned long address = __vma_address(page, avc->vma);
3076
3077                 mmu_notifier_invalidate_range_start(avc->vma->vm_mm,
3078                                 address, address + HPAGE_PMD_SIZE);
3079                 freeze_page_vma(avc->vma, page, address);
3080                 mmu_notifier_invalidate_range_end(avc->vma->vm_mm,
3081                                 address, address + HPAGE_PMD_SIZE);
3082         }
3083 }
3084
3085 static void unfreeze_page_vma(struct vm_area_struct *vma, struct page *page,
3086                 unsigned long address)
3087 {
3088         spinlock_t *ptl;
3089         pmd_t *pmd;
3090         pte_t *pte, entry;
3091         swp_entry_t swp_entry;
3092         unsigned long haddr = address & HPAGE_PMD_MASK;
3093         int i, nr = HPAGE_PMD_NR;
3094
3095         /* Skip pages which doesn't belong to the VMA */
3096         if (address < vma->vm_start) {
3097                 int off = (vma->vm_start - address) >> PAGE_SHIFT;
3098                 page += off;
3099                 nr -= off;
3100                 address = vma->vm_start;
3101         }
3102
3103         pmd = mm_find_pmd(vma->vm_mm, address);
3104         if (!pmd)
3105                 return;
3106
3107         pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
3108         for (i = 0; i < nr; i++, address += PAGE_SIZE, page++, pte++) {
3109                 /*
3110                  * We've just crossed page table boundary: need to map next one.
3111                  * It can happen if THP was mremaped to non-PMD aligned address.
3112                  */
3113                 if (unlikely(address == haddr + HPAGE_PMD_SIZE)) {
3114                         pte_unmap_unlock(pte - 1, ptl);
3115                         pmd = mm_find_pmd(vma->vm_mm, address);
3116                         if (!pmd)
3117                                 return;
3118                         pte = pte_offset_map_lock(vma->vm_mm, pmd,
3119                                         address, &ptl);
3120                 }
3121
3122                 if (!is_swap_pte(*pte))
3123                         continue;
3124
3125                 swp_entry = pte_to_swp_entry(*pte);
3126                 if (!is_migration_entry(swp_entry))
3127                         continue;
3128                 if (migration_entry_to_page(swp_entry) != page)
3129                         continue;
3130
3131                 get_page(page);
3132                 page_add_anon_rmap(page, vma, address, false);
3133
3134                 entry = pte_mkold(mk_pte(page, vma->vm_page_prot));
3135                 if (PageDirty(page))
3136                         entry = pte_mkdirty(entry);
3137                 if (is_write_migration_entry(swp_entry))
3138                         entry = maybe_mkwrite(entry, vma);
3139
3140                 flush_dcache_page(page);
3141                 set_pte_at(vma->vm_mm, address, pte, entry);
3142
3143                 /* No need to invalidate - it was non-present before */
3144                 update_mmu_cache(vma, address, pte);
3145         }
3146         pte_unmap_unlock(pte - 1, ptl);
3147 }
3148
3149 static void unfreeze_page(struct anon_vma *anon_vma, struct page *page)
3150 {
3151         struct anon_vma_chain *avc;
3152         pgoff_t pgoff = page_to_pgoff(page);
3153
3154         anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root,
3155                         pgoff, pgoff + HPAGE_PMD_NR - 1) {
3156                 unsigned long address = __vma_address(page, avc->vma);
3157
3158                 mmu_notifier_invalidate_range_start(avc->vma->vm_mm,
3159                                 address, address + HPAGE_PMD_SIZE);
3160                 unfreeze_page_vma(avc->vma, page, address);
3161                 mmu_notifier_invalidate_range_end(avc->vma->vm_mm,
3162                                 address, address + HPAGE_PMD_SIZE);
3163         }
3164 }
3165
3166 static int __split_huge_page_tail(struct page *head, int tail,
3167                 struct lruvec *lruvec, struct list_head *list)
3168 {
3169         int mapcount;
3170         struct page *page_tail = head + tail;
3171
3172         mapcount = atomic_read(&page_tail->_mapcount) + 1;
3173         VM_BUG_ON_PAGE(atomic_read(&page_tail->_count) != 0, page_tail);
3174
3175         /*
3176          * tail_page->_count is zero and not changing from under us. But
3177          * get_page_unless_zero() may be running from under us on the
3178          * tail_page. If we used atomic_set() below instead of atomic_add(), we
3179          * would then run atomic_set() concurrently with
3180          * get_page_unless_zero(), and atomic_set() is implemented in C not
3181          * using locked ops. spin_unlock on x86 sometime uses locked ops
3182          * because of PPro errata 66, 92, so unless somebody can guarantee
3183          * atomic_set() here would be safe on all archs (and not only on x86),
3184          * it's safer to use atomic_add().
3185          */
3186         atomic_add(mapcount + 1, &page_tail->_count);
3187
3188
3189         page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
3190         page_tail->flags |= (head->flags &
3191                         ((1L << PG_referenced) |
3192                          (1L << PG_swapbacked) |
3193                          (1L << PG_mlocked) |
3194                          (1L << PG_uptodate) |
3195                          (1L << PG_active) |
3196                          (1L << PG_locked) |
3197                          (1L << PG_unevictable) |
3198                          (1L << PG_dirty)));
3199
3200         /*
3201          * After clearing PageTail the gup refcount can be released.
3202          * Page flags also must be visible before we make the page non-compound.
3203          */
3204         smp_wmb();
3205
3206         clear_compound_head(page_tail);
3207
3208         if (page_is_young(head))
3209                 set_page_young(page_tail);
3210         if (page_is_idle(head))
3211                 set_page_idle(page_tail);
3212
3213         /* ->mapping in first tail page is compound_mapcount */
3214         VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING,
3215                         page_tail);
3216         page_tail->mapping = head->mapping;
3217
3218         page_tail->index = head->index + tail;
3219         page_cpupid_xchg_last(page_tail, page_cpupid_last(head));
3220         lru_add_page_tail(head, page_tail, lruvec, list);
3221
3222         return mapcount;
3223 }
3224
3225 static void __split_huge_page(struct page *page, struct list_head *list)
3226 {
3227         struct page *head = compound_head(page);
3228         struct zone *zone = page_zone(head);
3229         struct lruvec *lruvec;
3230         int i, tail_mapcount;
3231
3232         /* prevent PageLRU to go away from under us, and freeze lru stats */
3233         spin_lock_irq(&zone->lru_lock);
3234         lruvec = mem_cgroup_page_lruvec(head, zone);
3235
3236         /* complete memcg works before add pages to LRU */
3237         mem_cgroup_split_huge_fixup(head);
3238
3239         tail_mapcount = 0;
3240         for (i = HPAGE_PMD_NR - 1; i >= 1; i--)
3241                 tail_mapcount += __split_huge_page_tail(head, i, lruvec, list);
3242         atomic_sub(tail_mapcount, &head->_count);
3243
3244         ClearPageCompound(head);
3245         spin_unlock_irq(&zone->lru_lock);
3246
3247         unfreeze_page(page_anon_vma(head), head);
3248
3249         for (i = 0; i < HPAGE_PMD_NR; i++) {
3250                 struct page *subpage = head + i;
3251                 if (subpage == page)
3252                         continue;
3253                 unlock_page(subpage);
3254
3255                 /*
3256                  * Subpages may be freed if there wasn't any mapping
3257                  * like if add_to_swap() is running on a lru page that
3258                  * had its mapping zapped. And freeing these pages
3259                  * requires taking the lru_lock so we do the put_page
3260                  * of the tail pages after the split is complete.
3261                  */
3262                 put_page(subpage);
3263         }
3264 }
3265
3266 int total_mapcount(struct page *page)
3267 {
3268         int i, ret;
3269
3270         VM_BUG_ON_PAGE(PageTail(page), page);
3271
3272         if (likely(!PageCompound(page)))
3273                 return atomic_read(&page->_mapcount) + 1;
3274
3275         ret = compound_mapcount(page);
3276         if (PageHuge(page))
3277                 return ret;
3278         for (i = 0; i < HPAGE_PMD_NR; i++)
3279                 ret += atomic_read(&page[i]._mapcount) + 1;
3280         if (PageDoubleMap(page))
3281                 ret -= HPAGE_PMD_NR;
3282         return ret;
3283 }
3284
3285 /*
3286  * This function splits huge page into normal pages. @page can point to any
3287  * subpage of huge page to split. Split doesn't change the position of @page.
3288  *
3289  * Only caller must hold pin on the @page, otherwise split fails with -EBUSY.
3290  * The huge page must be locked.
3291  *
3292  * If @list is null, tail pages will be added to LRU list, otherwise, to @list.
3293  *
3294  * Both head page and tail pages will inherit mapping, flags, and so on from
3295  * the hugepage.
3296  *
3297  * GUP pin and PG_locked transferred to @page. Rest subpages can be freed if
3298  * they are not mapped.
3299  *
3300  * Returns 0 if the hugepage is split successfully.
3301  * Returns -EBUSY if the page is pinned or if anon_vma disappeared from under
3302  * us.
3303  */
3304 int split_huge_page_to_list(struct page *page, struct list_head *list)
3305 {
3306         struct page *head = compound_head(page);
3307         struct anon_vma *anon_vma;
3308         int count, mapcount, ret;
3309         bool mlocked;
3310
3311         VM_BUG_ON_PAGE(is_huge_zero_page(page), page);
3312         VM_BUG_ON_PAGE(!PageAnon(page), page);
3313         VM_BUG_ON_PAGE(!PageLocked(page), page);
3314         VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
3315         VM_BUG_ON_PAGE(!PageCompound(page), page);
3316
3317         /*
3318          * The caller does not necessarily hold an mmap_sem that would prevent
3319          * the anon_vma disappearing so we first we take a reference to it
3320          * and then lock the anon_vma for write. This is similar to
3321          * page_lock_anon_vma_read except the write lock is taken to serialise
3322          * against parallel split or collapse operations.
3323          */
3324         anon_vma = page_get_anon_vma(head);
3325         if (!anon_vma) {
3326                 ret = -EBUSY;
3327                 goto out;
3328         }
3329         anon_vma_lock_write(anon_vma);
3330
3331         /*
3332          * Racy check if we can split the page, before freeze_page() will
3333          * split PMDs
3334          */
3335         if (total_mapcount(head) != page_count(head) - 1) {
3336                 ret = -EBUSY;
3337                 goto out_unlock;
3338         }
3339
3340         mlocked = PageMlocked(page);
3341         freeze_page(anon_vma, head);
3342         VM_BUG_ON_PAGE(compound_mapcount(head), head);
3343
3344         /* Make sure the page is not on per-CPU pagevec as it takes pin */
3345         if (mlocked)
3346                 lru_add_drain();
3347
3348         /* Prevent deferred_split_scan() touching ->_count */
3349         spin_lock(&split_queue_lock);
3350         count = page_count(head);
3351         mapcount = total_mapcount(head);
3352         if (!mapcount && count == 1) {
3353                 if (!list_empty(page_deferred_list(head))) {
3354                         split_queue_len--;
3355                         list_del(page_deferred_list(head));
3356                 }
3357                 spin_unlock(&split_queue_lock);
3358                 __split_huge_page(page, list);
3359                 ret = 0;
3360         } else if (IS_ENABLED(CONFIG_DEBUG_VM) && mapcount) {
3361                 spin_unlock(&split_queue_lock);
3362                 pr_alert("total_mapcount: %u, page_count(): %u\n",
3363                                 mapcount, count);
3364                 if (PageTail(page))
3365                         dump_page(head, NULL);
3366                 dump_page(page, "total_mapcount(head) > 0");
3367                 BUG();
3368         } else {
3369                 spin_unlock(&split_queue_lock);
3370                 unfreeze_page(anon_vma, head);
3371                 ret = -EBUSY;
3372         }
3373
3374 out_unlock:
3375         anon_vma_unlock_write(anon_vma);
3376         put_anon_vma(anon_vma);
3377 out:
3378         count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED);
3379         return ret;
3380 }
3381
3382 void free_transhuge_page(struct page *page)
3383 {
3384         unsigned long flags;
3385
3386         spin_lock_irqsave(&split_queue_lock, flags);
3387         if (!list_empty(page_deferred_list(page))) {
3388                 split_queue_len--;
3389                 list_del(page_deferred_list(page));
3390         }
3391         spin_unlock_irqrestore(&split_queue_lock, flags);
3392         free_compound_page(page);
3393 }
3394
3395 void deferred_split_huge_page(struct page *page)
3396 {
3397         unsigned long flags;
3398
3399         VM_BUG_ON_PAGE(!PageTransHuge(page), page);
3400
3401         spin_lock_irqsave(&split_queue_lock, flags);
3402         if (list_empty(page_deferred_list(page))) {
3403                 list_add_tail(page_deferred_list(page), &split_queue);
3404                 split_queue_len++;
3405         }
3406         spin_unlock_irqrestore(&split_queue_lock, flags);
3407 }
3408
3409 static unsigned long deferred_split_count(struct shrinker *shrink,
3410                 struct shrink_control *sc)
3411 {
3412         /*
3413          * Split a page from split_queue will free up at least one page,
3414          * at most HPAGE_PMD_NR - 1. We don't track exact number.
3415          * Let's use HPAGE_PMD_NR / 2 as ballpark.
3416          */
3417         return ACCESS_ONCE(split_queue_len) * HPAGE_PMD_NR / 2;
3418 }
3419
3420 static unsigned long deferred_split_scan(struct shrinker *shrink,
3421                 struct shrink_control *sc)
3422 {
3423         unsigned long flags;
3424         LIST_HEAD(list), *pos, *next;
3425         struct page *page;
3426         int split = 0;
3427
3428         spin_lock_irqsave(&split_queue_lock, flags);
3429         list_splice_init(&split_queue, &list);
3430
3431         /* Take pin on all head pages to avoid freeing them under us */
3432         list_for_each_safe(pos, next, &list) {
3433                 page = list_entry((void *)pos, struct page, mapping);
3434                 page = compound_head(page);
3435                 /* race with put_compound_page() */
3436                 if (!get_page_unless_zero(page)) {
3437                         list_del_init(page_deferred_list(page));
3438                         split_queue_len--;
3439                 }
3440         }
3441         spin_unlock_irqrestore(&split_queue_lock, flags);
3442
3443         list_for_each_safe(pos, next, &list) {
3444                 page = list_entry((void *)pos, struct page, mapping);
3445                 lock_page(page);
3446                 /* split_huge_page() removes page from list on success */
3447                 if (!split_huge_page(page))
3448                         split++;
3449                 unlock_page(page);
3450                 put_page(page);
3451         }
3452
3453         spin_lock_irqsave(&split_queue_lock, flags);
3454         list_splice_tail(&list, &split_queue);
3455         spin_unlock_irqrestore(&split_queue_lock, flags);
3456
3457         return split * HPAGE_PMD_NR / 2;
3458 }
3459
3460 static struct shrinker deferred_split_shrinker = {
3461         .count_objects = deferred_split_count,
3462         .scan_objects = deferred_split_scan,
3463         .seeks = DEFAULT_SEEKS,
3464 };
3465
3466 #ifdef CONFIG_DEBUG_FS
3467 static int split_huge_pages_set(void *data, u64 val)
3468 {
3469         struct zone *zone;
3470         struct page *page;
3471         unsigned long pfn, max_zone_pfn;
3472         unsigned long total = 0, split = 0;
3473
3474         if (val != 1)
3475                 return -EINVAL;
3476
3477         for_each_populated_zone(zone) {
3478                 max_zone_pfn = zone_end_pfn(zone);
3479                 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) {
3480                         if (!pfn_valid(pfn))
3481                                 continue;
3482
3483                         page = pfn_to_page(pfn);
3484                         if (!get_page_unless_zero(page))
3485                                 continue;
3486
3487                         if (zone != page_zone(page))
3488                                 goto next;
3489
3490                         if (!PageHead(page) || !PageAnon(page) ||
3491                                         PageHuge(page))
3492                                 goto next;
3493
3494                         total++;
3495                         lock_page(page);
3496                         if (!split_huge_page(page))
3497                                 split++;
3498                         unlock_page(page);
3499 next:
3500                         put_page(page);
3501                 }
3502         }
3503
3504         pr_info("%lu of %lu THP split", split, total);
3505
3506         return 0;
3507 }
3508 DEFINE_SIMPLE_ATTRIBUTE(split_huge_pages_fops, NULL, split_huge_pages_set,
3509                 "%llu\n");
3510
3511 static int __init split_huge_pages_debugfs(void)
3512 {
3513         void *ret;
3514
3515         ret = debugfs_create_file("split_huge_pages", 0644, NULL, NULL,
3516                         &split_huge_pages_fops);
3517         if (!ret)
3518                 pr_warn("Failed to create split_huge_pages in debugfs");
3519         return 0;
3520 }
3521 late_initcall(split_huge_pages_debugfs);
3522 #endif