]> git.karo-electronics.de Git - mv-sheeva.git/blob - mm/huge_memory.c
memcg: make mem_cgroup_split_huge_fixup() more efficient
[mv-sheeva.git] / mm / huge_memory.c
1 /*
2  *  Copyright (C) 2009  Red Hat, Inc.
3  *
4  *  This work is licensed under the terms of the GNU GPL, version 2. See
5  *  the COPYING file in the top-level directory.
6  */
7
8 #include <linux/mm.h>
9 #include <linux/sched.h>
10 #include <linux/highmem.h>
11 #include <linux/hugetlb.h>
12 #include <linux/mmu_notifier.h>
13 #include <linux/rmap.h>
14 #include <linux/swap.h>
15 #include <linux/mm_inline.h>
16 #include <linux/kthread.h>
17 #include <linux/khugepaged.h>
18 #include <linux/freezer.h>
19 #include <linux/mman.h>
20 #include <asm/tlb.h>
21 #include <asm/pgalloc.h>
22 #include "internal.h"
23
24 /*
25  * By default transparent hugepage support is enabled for all mappings
26  * and khugepaged scans all mappings. Defrag is only invoked by
27  * khugepaged hugepage allocations and by page faults inside
28  * MADV_HUGEPAGE regions to avoid the risk of slowing down short lived
29  * allocations.
30  */
31 unsigned long transparent_hugepage_flags __read_mostly =
32 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS
33         (1<<TRANSPARENT_HUGEPAGE_FLAG)|
34 #endif
35 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE
36         (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)|
37 #endif
38         (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)|
39         (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
40
41 /* default scan 8*512 pte (or vmas) every 30 second */
42 static unsigned int khugepaged_pages_to_scan __read_mostly = HPAGE_PMD_NR*8;
43 static unsigned int khugepaged_pages_collapsed;
44 static unsigned int khugepaged_full_scans;
45 static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
46 /* during fragmentation poll the hugepage allocator once every minute */
47 static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
48 static struct task_struct *khugepaged_thread __read_mostly;
49 static DEFINE_MUTEX(khugepaged_mutex);
50 static DEFINE_SPINLOCK(khugepaged_mm_lock);
51 static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
52 /*
53  * default collapse hugepages if there is at least one pte mapped like
54  * it would have happened if the vma was large enough during page
55  * fault.
56  */
57 static unsigned int khugepaged_max_ptes_none __read_mostly = HPAGE_PMD_NR-1;
58
59 static int khugepaged(void *none);
60 static int mm_slots_hash_init(void);
61 static int khugepaged_slab_init(void);
62 static void khugepaged_slab_free(void);
63
64 #define MM_SLOTS_HASH_HEADS 1024
65 static struct hlist_head *mm_slots_hash __read_mostly;
66 static struct kmem_cache *mm_slot_cache __read_mostly;
67
68 /**
69  * struct mm_slot - hash lookup from mm to mm_slot
70  * @hash: hash collision list
71  * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
72  * @mm: the mm that this information is valid for
73  */
74 struct mm_slot {
75         struct hlist_node hash;
76         struct list_head mm_node;
77         struct mm_struct *mm;
78 };
79
80 /**
81  * struct khugepaged_scan - cursor for scanning
82  * @mm_head: the head of the mm list to scan
83  * @mm_slot: the current mm_slot we are scanning
84  * @address: the next address inside that to be scanned
85  *
86  * There is only the one khugepaged_scan instance of this cursor structure.
87  */
88 struct khugepaged_scan {
89         struct list_head mm_head;
90         struct mm_slot *mm_slot;
91         unsigned long address;
92 };
93 static struct khugepaged_scan khugepaged_scan = {
94         .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
95 };
96
97
98 static int set_recommended_min_free_kbytes(void)
99 {
100         struct zone *zone;
101         int nr_zones = 0;
102         unsigned long recommended_min;
103         extern int min_free_kbytes;
104
105         if (!test_bit(TRANSPARENT_HUGEPAGE_FLAG,
106                       &transparent_hugepage_flags) &&
107             !test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
108                       &transparent_hugepage_flags))
109                 return 0;
110
111         for_each_populated_zone(zone)
112                 nr_zones++;
113
114         /* Make sure at least 2 hugepages are free for MIGRATE_RESERVE */
115         recommended_min = pageblock_nr_pages * nr_zones * 2;
116
117         /*
118          * Make sure that on average at least two pageblocks are almost free
119          * of another type, one for a migratetype to fall back to and a
120          * second to avoid subsequent fallbacks of other types There are 3
121          * MIGRATE_TYPES we care about.
122          */
123         recommended_min += pageblock_nr_pages * nr_zones *
124                            MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
125
126         /* don't ever allow to reserve more than 5% of the lowmem */
127         recommended_min = min(recommended_min,
128                               (unsigned long) nr_free_buffer_pages() / 20);
129         recommended_min <<= (PAGE_SHIFT-10);
130
131         if (recommended_min > min_free_kbytes)
132                 min_free_kbytes = recommended_min;
133         setup_per_zone_wmarks();
134         return 0;
135 }
136 late_initcall(set_recommended_min_free_kbytes);
137
138 static int start_khugepaged(void)
139 {
140         int err = 0;
141         if (khugepaged_enabled()) {
142                 int wakeup;
143                 if (unlikely(!mm_slot_cache || !mm_slots_hash)) {
144                         err = -ENOMEM;
145                         goto out;
146                 }
147                 mutex_lock(&khugepaged_mutex);
148                 if (!khugepaged_thread)
149                         khugepaged_thread = kthread_run(khugepaged, NULL,
150                                                         "khugepaged");
151                 if (unlikely(IS_ERR(khugepaged_thread))) {
152                         printk(KERN_ERR
153                                "khugepaged: kthread_run(khugepaged) failed\n");
154                         err = PTR_ERR(khugepaged_thread);
155                         khugepaged_thread = NULL;
156                 }
157                 wakeup = !list_empty(&khugepaged_scan.mm_head);
158                 mutex_unlock(&khugepaged_mutex);
159                 if (wakeup)
160                         wake_up_interruptible(&khugepaged_wait);
161
162                 set_recommended_min_free_kbytes();
163         } else
164                 /* wakeup to exit */
165                 wake_up_interruptible(&khugepaged_wait);
166 out:
167         return err;
168 }
169
170 #ifdef CONFIG_SYSFS
171
172 static ssize_t double_flag_show(struct kobject *kobj,
173                                 struct kobj_attribute *attr, char *buf,
174                                 enum transparent_hugepage_flag enabled,
175                                 enum transparent_hugepage_flag req_madv)
176 {
177         if (test_bit(enabled, &transparent_hugepage_flags)) {
178                 VM_BUG_ON(test_bit(req_madv, &transparent_hugepage_flags));
179                 return sprintf(buf, "[always] madvise never\n");
180         } else if (test_bit(req_madv, &transparent_hugepage_flags))
181                 return sprintf(buf, "always [madvise] never\n");
182         else
183                 return sprintf(buf, "always madvise [never]\n");
184 }
185 static ssize_t double_flag_store(struct kobject *kobj,
186                                  struct kobj_attribute *attr,
187                                  const char *buf, size_t count,
188                                  enum transparent_hugepage_flag enabled,
189                                  enum transparent_hugepage_flag req_madv)
190 {
191         if (!memcmp("always", buf,
192                     min(sizeof("always")-1, count))) {
193                 set_bit(enabled, &transparent_hugepage_flags);
194                 clear_bit(req_madv, &transparent_hugepage_flags);
195         } else if (!memcmp("madvise", buf,
196                            min(sizeof("madvise")-1, count))) {
197                 clear_bit(enabled, &transparent_hugepage_flags);
198                 set_bit(req_madv, &transparent_hugepage_flags);
199         } else if (!memcmp("never", buf,
200                            min(sizeof("never")-1, count))) {
201                 clear_bit(enabled, &transparent_hugepage_flags);
202                 clear_bit(req_madv, &transparent_hugepage_flags);
203         } else
204                 return -EINVAL;
205
206         return count;
207 }
208
209 static ssize_t enabled_show(struct kobject *kobj,
210                             struct kobj_attribute *attr, char *buf)
211 {
212         return double_flag_show(kobj, attr, buf,
213                                 TRANSPARENT_HUGEPAGE_FLAG,
214                                 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG);
215 }
216 static ssize_t enabled_store(struct kobject *kobj,
217                              struct kobj_attribute *attr,
218                              const char *buf, size_t count)
219 {
220         ssize_t ret;
221
222         ret = double_flag_store(kobj, attr, buf, count,
223                                 TRANSPARENT_HUGEPAGE_FLAG,
224                                 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG);
225
226         if (ret > 0) {
227                 int err = start_khugepaged();
228                 if (err)
229                         ret = err;
230         }
231
232         if (ret > 0 &&
233             (test_bit(TRANSPARENT_HUGEPAGE_FLAG,
234                       &transparent_hugepage_flags) ||
235              test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
236                       &transparent_hugepage_flags)))
237                 set_recommended_min_free_kbytes();
238
239         return ret;
240 }
241 static struct kobj_attribute enabled_attr =
242         __ATTR(enabled, 0644, enabled_show, enabled_store);
243
244 static ssize_t single_flag_show(struct kobject *kobj,
245                                 struct kobj_attribute *attr, char *buf,
246                                 enum transparent_hugepage_flag flag)
247 {
248         return sprintf(buf, "%d\n",
249                        !!test_bit(flag, &transparent_hugepage_flags));
250 }
251
252 static ssize_t single_flag_store(struct kobject *kobj,
253                                  struct kobj_attribute *attr,
254                                  const char *buf, size_t count,
255                                  enum transparent_hugepage_flag flag)
256 {
257         unsigned long value;
258         int ret;
259
260         ret = kstrtoul(buf, 10, &value);
261         if (ret < 0)
262                 return ret;
263         if (value > 1)
264                 return -EINVAL;
265
266         if (value)
267                 set_bit(flag, &transparent_hugepage_flags);
268         else
269                 clear_bit(flag, &transparent_hugepage_flags);
270
271         return count;
272 }
273
274 /*
275  * Currently defrag only disables __GFP_NOWAIT for allocation. A blind
276  * __GFP_REPEAT is too aggressive, it's never worth swapping tons of
277  * memory just to allocate one more hugepage.
278  */
279 static ssize_t defrag_show(struct kobject *kobj,
280                            struct kobj_attribute *attr, char *buf)
281 {
282         return double_flag_show(kobj, attr, buf,
283                                 TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
284                                 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG);
285 }
286 static ssize_t defrag_store(struct kobject *kobj,
287                             struct kobj_attribute *attr,
288                             const char *buf, size_t count)
289 {
290         return double_flag_store(kobj, attr, buf, count,
291                                  TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
292                                  TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG);
293 }
294 static struct kobj_attribute defrag_attr =
295         __ATTR(defrag, 0644, defrag_show, defrag_store);
296
297 #ifdef CONFIG_DEBUG_VM
298 static ssize_t debug_cow_show(struct kobject *kobj,
299                                 struct kobj_attribute *attr, char *buf)
300 {
301         return single_flag_show(kobj, attr, buf,
302                                 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG);
303 }
304 static ssize_t debug_cow_store(struct kobject *kobj,
305                                struct kobj_attribute *attr,
306                                const char *buf, size_t count)
307 {
308         return single_flag_store(kobj, attr, buf, count,
309                                  TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG);
310 }
311 static struct kobj_attribute debug_cow_attr =
312         __ATTR(debug_cow, 0644, debug_cow_show, debug_cow_store);
313 #endif /* CONFIG_DEBUG_VM */
314
315 static struct attribute *hugepage_attr[] = {
316         &enabled_attr.attr,
317         &defrag_attr.attr,
318 #ifdef CONFIG_DEBUG_VM
319         &debug_cow_attr.attr,
320 #endif
321         NULL,
322 };
323
324 static struct attribute_group hugepage_attr_group = {
325         .attrs = hugepage_attr,
326 };
327
328 static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
329                                          struct kobj_attribute *attr,
330                                          char *buf)
331 {
332         return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs);
333 }
334
335 static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
336                                           struct kobj_attribute *attr,
337                                           const char *buf, size_t count)
338 {
339         unsigned long msecs;
340         int err;
341
342         err = strict_strtoul(buf, 10, &msecs);
343         if (err || msecs > UINT_MAX)
344                 return -EINVAL;
345
346         khugepaged_scan_sleep_millisecs = msecs;
347         wake_up_interruptible(&khugepaged_wait);
348
349         return count;
350 }
351 static struct kobj_attribute scan_sleep_millisecs_attr =
352         __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show,
353                scan_sleep_millisecs_store);
354
355 static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
356                                           struct kobj_attribute *attr,
357                                           char *buf)
358 {
359         return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
360 }
361
362 static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
363                                            struct kobj_attribute *attr,
364                                            const char *buf, size_t count)
365 {
366         unsigned long msecs;
367         int err;
368
369         err = strict_strtoul(buf, 10, &msecs);
370         if (err || msecs > UINT_MAX)
371                 return -EINVAL;
372
373         khugepaged_alloc_sleep_millisecs = msecs;
374         wake_up_interruptible(&khugepaged_wait);
375
376         return count;
377 }
378 static struct kobj_attribute alloc_sleep_millisecs_attr =
379         __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show,
380                alloc_sleep_millisecs_store);
381
382 static ssize_t pages_to_scan_show(struct kobject *kobj,
383                                   struct kobj_attribute *attr,
384                                   char *buf)
385 {
386         return sprintf(buf, "%u\n", khugepaged_pages_to_scan);
387 }
388 static ssize_t pages_to_scan_store(struct kobject *kobj,
389                                    struct kobj_attribute *attr,
390                                    const char *buf, size_t count)
391 {
392         int err;
393         unsigned long pages;
394
395         err = strict_strtoul(buf, 10, &pages);
396         if (err || !pages || pages > UINT_MAX)
397                 return -EINVAL;
398
399         khugepaged_pages_to_scan = pages;
400
401         return count;
402 }
403 static struct kobj_attribute pages_to_scan_attr =
404         __ATTR(pages_to_scan, 0644, pages_to_scan_show,
405                pages_to_scan_store);
406
407 static ssize_t pages_collapsed_show(struct kobject *kobj,
408                                     struct kobj_attribute *attr,
409                                     char *buf)
410 {
411         return sprintf(buf, "%u\n", khugepaged_pages_collapsed);
412 }
413 static struct kobj_attribute pages_collapsed_attr =
414         __ATTR_RO(pages_collapsed);
415
416 static ssize_t full_scans_show(struct kobject *kobj,
417                                struct kobj_attribute *attr,
418                                char *buf)
419 {
420         return sprintf(buf, "%u\n", khugepaged_full_scans);
421 }
422 static struct kobj_attribute full_scans_attr =
423         __ATTR_RO(full_scans);
424
425 static ssize_t khugepaged_defrag_show(struct kobject *kobj,
426                                       struct kobj_attribute *attr, char *buf)
427 {
428         return single_flag_show(kobj, attr, buf,
429                                 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
430 }
431 static ssize_t khugepaged_defrag_store(struct kobject *kobj,
432                                        struct kobj_attribute *attr,
433                                        const char *buf, size_t count)
434 {
435         return single_flag_store(kobj, attr, buf, count,
436                                  TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
437 }
438 static struct kobj_attribute khugepaged_defrag_attr =
439         __ATTR(defrag, 0644, khugepaged_defrag_show,
440                khugepaged_defrag_store);
441
442 /*
443  * max_ptes_none controls if khugepaged should collapse hugepages over
444  * any unmapped ptes in turn potentially increasing the memory
445  * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
446  * reduce the available free memory in the system as it
447  * runs. Increasing max_ptes_none will instead potentially reduce the
448  * free memory in the system during the khugepaged scan.
449  */
450 static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj,
451                                              struct kobj_attribute *attr,
452                                              char *buf)
453 {
454         return sprintf(buf, "%u\n", khugepaged_max_ptes_none);
455 }
456 static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj,
457                                               struct kobj_attribute *attr,
458                                               const char *buf, size_t count)
459 {
460         int err;
461         unsigned long max_ptes_none;
462
463         err = strict_strtoul(buf, 10, &max_ptes_none);
464         if (err || max_ptes_none > HPAGE_PMD_NR-1)
465                 return -EINVAL;
466
467         khugepaged_max_ptes_none = max_ptes_none;
468
469         return count;
470 }
471 static struct kobj_attribute khugepaged_max_ptes_none_attr =
472         __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show,
473                khugepaged_max_ptes_none_store);
474
475 static struct attribute *khugepaged_attr[] = {
476         &khugepaged_defrag_attr.attr,
477         &khugepaged_max_ptes_none_attr.attr,
478         &pages_to_scan_attr.attr,
479         &pages_collapsed_attr.attr,
480         &full_scans_attr.attr,
481         &scan_sleep_millisecs_attr.attr,
482         &alloc_sleep_millisecs_attr.attr,
483         NULL,
484 };
485
486 static struct attribute_group khugepaged_attr_group = {
487         .attrs = khugepaged_attr,
488         .name = "khugepaged",
489 };
490 #endif /* CONFIG_SYSFS */
491
492 static int __init hugepage_init(void)
493 {
494         int err;
495 #ifdef CONFIG_SYSFS
496         static struct kobject *hugepage_kobj;
497 #endif
498
499         err = -EINVAL;
500         if (!has_transparent_hugepage()) {
501                 transparent_hugepage_flags = 0;
502                 goto out;
503         }
504
505 #ifdef CONFIG_SYSFS
506         err = -ENOMEM;
507         hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj);
508         if (unlikely(!hugepage_kobj)) {
509                 printk(KERN_ERR "hugepage: failed kobject create\n");
510                 goto out;
511         }
512
513         err = sysfs_create_group(hugepage_kobj, &hugepage_attr_group);
514         if (err) {
515                 printk(KERN_ERR "hugepage: failed register hugeage group\n");
516                 goto out;
517         }
518
519         err = sysfs_create_group(hugepage_kobj, &khugepaged_attr_group);
520         if (err) {
521                 printk(KERN_ERR "hugepage: failed register hugeage group\n");
522                 goto out;
523         }
524 #endif
525
526         err = khugepaged_slab_init();
527         if (err)
528                 goto out;
529
530         err = mm_slots_hash_init();
531         if (err) {
532                 khugepaged_slab_free();
533                 goto out;
534         }
535
536         /*
537          * By default disable transparent hugepages on smaller systems,
538          * where the extra memory used could hurt more than TLB overhead
539          * is likely to save.  The admin can still enable it through /sys.
540          */
541         if (totalram_pages < (512 << (20 - PAGE_SHIFT)))
542                 transparent_hugepage_flags = 0;
543
544         start_khugepaged();
545
546         set_recommended_min_free_kbytes();
547
548 out:
549         return err;
550 }
551 module_init(hugepage_init)
552
553 static int __init setup_transparent_hugepage(char *str)
554 {
555         int ret = 0;
556         if (!str)
557                 goto out;
558         if (!strcmp(str, "always")) {
559                 set_bit(TRANSPARENT_HUGEPAGE_FLAG,
560                         &transparent_hugepage_flags);
561                 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
562                           &transparent_hugepage_flags);
563                 ret = 1;
564         } else if (!strcmp(str, "madvise")) {
565                 clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
566                           &transparent_hugepage_flags);
567                 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
568                         &transparent_hugepage_flags);
569                 ret = 1;
570         } else if (!strcmp(str, "never")) {
571                 clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
572                           &transparent_hugepage_flags);
573                 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
574                           &transparent_hugepage_flags);
575                 ret = 1;
576         }
577 out:
578         if (!ret)
579                 printk(KERN_WARNING
580                        "transparent_hugepage= cannot parse, ignored\n");
581         return ret;
582 }
583 __setup("transparent_hugepage=", setup_transparent_hugepage);
584
585 static void prepare_pmd_huge_pte(pgtable_t pgtable,
586                                  struct mm_struct *mm)
587 {
588         assert_spin_locked(&mm->page_table_lock);
589
590         /* FIFO */
591         if (!mm->pmd_huge_pte)
592                 INIT_LIST_HEAD(&pgtable->lru);
593         else
594                 list_add(&pgtable->lru, &mm->pmd_huge_pte->lru);
595         mm->pmd_huge_pte = pgtable;
596 }
597
598 static inline pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
599 {
600         if (likely(vma->vm_flags & VM_WRITE))
601                 pmd = pmd_mkwrite(pmd);
602         return pmd;
603 }
604
605 static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
606                                         struct vm_area_struct *vma,
607                                         unsigned long haddr, pmd_t *pmd,
608                                         struct page *page)
609 {
610         int ret = 0;
611         pgtable_t pgtable;
612
613         VM_BUG_ON(!PageCompound(page));
614         pgtable = pte_alloc_one(mm, haddr);
615         if (unlikely(!pgtable)) {
616                 mem_cgroup_uncharge_page(page);
617                 put_page(page);
618                 return VM_FAULT_OOM;
619         }
620
621         clear_huge_page(page, haddr, HPAGE_PMD_NR);
622         __SetPageUptodate(page);
623
624         spin_lock(&mm->page_table_lock);
625         if (unlikely(!pmd_none(*pmd))) {
626                 spin_unlock(&mm->page_table_lock);
627                 mem_cgroup_uncharge_page(page);
628                 put_page(page);
629                 pte_free(mm, pgtable);
630         } else {
631                 pmd_t entry;
632                 entry = mk_pmd(page, vma->vm_page_prot);
633                 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
634                 entry = pmd_mkhuge(entry);
635                 /*
636                  * The spinlocking to take the lru_lock inside
637                  * page_add_new_anon_rmap() acts as a full memory
638                  * barrier to be sure clear_huge_page writes become
639                  * visible after the set_pmd_at() write.
640                  */
641                 page_add_new_anon_rmap(page, vma, haddr);
642                 set_pmd_at(mm, haddr, pmd, entry);
643                 prepare_pmd_huge_pte(pgtable, mm);
644                 add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
645                 spin_unlock(&mm->page_table_lock);
646         }
647
648         return ret;
649 }
650
651 static inline gfp_t alloc_hugepage_gfpmask(int defrag, gfp_t extra_gfp)
652 {
653         return (GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_WAIT)) | extra_gfp;
654 }
655
656 static inline struct page *alloc_hugepage_vma(int defrag,
657                                               struct vm_area_struct *vma,
658                                               unsigned long haddr, int nd,
659                                               gfp_t extra_gfp)
660 {
661         return alloc_pages_vma(alloc_hugepage_gfpmask(defrag, extra_gfp),
662                                HPAGE_PMD_ORDER, vma, haddr, nd);
663 }
664
665 #ifndef CONFIG_NUMA
666 static inline struct page *alloc_hugepage(int defrag)
667 {
668         return alloc_pages(alloc_hugepage_gfpmask(defrag, 0),
669                            HPAGE_PMD_ORDER);
670 }
671 #endif
672
673 int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
674                                unsigned long address, pmd_t *pmd,
675                                unsigned int flags)
676 {
677         struct page *page;
678         unsigned long haddr = address & HPAGE_PMD_MASK;
679         pte_t *pte;
680
681         if (haddr >= vma->vm_start && haddr + HPAGE_PMD_SIZE <= vma->vm_end) {
682                 if (unlikely(anon_vma_prepare(vma)))
683                         return VM_FAULT_OOM;
684                 if (unlikely(khugepaged_enter(vma)))
685                         return VM_FAULT_OOM;
686                 page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
687                                           vma, haddr, numa_node_id(), 0);
688                 if (unlikely(!page)) {
689                         count_vm_event(THP_FAULT_FALLBACK);
690                         goto out;
691                 }
692                 count_vm_event(THP_FAULT_ALLOC);
693                 if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) {
694                         put_page(page);
695                         goto out;
696                 }
697
698                 return __do_huge_pmd_anonymous_page(mm, vma, haddr, pmd, page);
699         }
700 out:
701         /*
702          * Use __pte_alloc instead of pte_alloc_map, because we can't
703          * run pte_offset_map on the pmd, if an huge pmd could
704          * materialize from under us from a different thread.
705          */
706         if (unlikely(__pte_alloc(mm, vma, pmd, address)))
707                 return VM_FAULT_OOM;
708         /* if an huge pmd materialized from under us just retry later */
709         if (unlikely(pmd_trans_huge(*pmd)))
710                 return 0;
711         /*
712          * A regular pmd is established and it can't morph into a huge pmd
713          * from under us anymore at this point because we hold the mmap_sem
714          * read mode and khugepaged takes it in write mode. So now it's
715          * safe to run pte_offset_map().
716          */
717         pte = pte_offset_map(pmd, address);
718         return handle_pte_fault(mm, vma, address, pte, pmd, flags);
719 }
720
721 int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
722                   pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
723                   struct vm_area_struct *vma)
724 {
725         struct page *src_page;
726         pmd_t pmd;
727         pgtable_t pgtable;
728         int ret;
729
730         ret = -ENOMEM;
731         pgtable = pte_alloc_one(dst_mm, addr);
732         if (unlikely(!pgtable))
733                 goto out;
734
735         spin_lock(&dst_mm->page_table_lock);
736         spin_lock_nested(&src_mm->page_table_lock, SINGLE_DEPTH_NESTING);
737
738         ret = -EAGAIN;
739         pmd = *src_pmd;
740         if (unlikely(!pmd_trans_huge(pmd))) {
741                 pte_free(dst_mm, pgtable);
742                 goto out_unlock;
743         }
744         if (unlikely(pmd_trans_splitting(pmd))) {
745                 /* split huge page running from under us */
746                 spin_unlock(&src_mm->page_table_lock);
747                 spin_unlock(&dst_mm->page_table_lock);
748                 pte_free(dst_mm, pgtable);
749
750                 wait_split_huge_page(vma->anon_vma, src_pmd); /* src_vma */
751                 goto out;
752         }
753         src_page = pmd_page(pmd);
754         VM_BUG_ON(!PageHead(src_page));
755         get_page(src_page);
756         page_dup_rmap(src_page);
757         add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
758
759         pmdp_set_wrprotect(src_mm, addr, src_pmd);
760         pmd = pmd_mkold(pmd_wrprotect(pmd));
761         set_pmd_at(dst_mm, addr, dst_pmd, pmd);
762         prepare_pmd_huge_pte(pgtable, dst_mm);
763
764         ret = 0;
765 out_unlock:
766         spin_unlock(&src_mm->page_table_lock);
767         spin_unlock(&dst_mm->page_table_lock);
768 out:
769         return ret;
770 }
771
772 /* no "address" argument so destroys page coloring of some arch */
773 pgtable_t get_pmd_huge_pte(struct mm_struct *mm)
774 {
775         pgtable_t pgtable;
776
777         assert_spin_locked(&mm->page_table_lock);
778
779         /* FIFO */
780         pgtable = mm->pmd_huge_pte;
781         if (list_empty(&pgtable->lru))
782                 mm->pmd_huge_pte = NULL;
783         else {
784                 mm->pmd_huge_pte = list_entry(pgtable->lru.next,
785                                               struct page, lru);
786                 list_del(&pgtable->lru);
787         }
788         return pgtable;
789 }
790
791 static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
792                                         struct vm_area_struct *vma,
793                                         unsigned long address,
794                                         pmd_t *pmd, pmd_t orig_pmd,
795                                         struct page *page,
796                                         unsigned long haddr)
797 {
798         pgtable_t pgtable;
799         pmd_t _pmd;
800         int ret = 0, i;
801         struct page **pages;
802
803         pages = kmalloc(sizeof(struct page *) * HPAGE_PMD_NR,
804                         GFP_KERNEL);
805         if (unlikely(!pages)) {
806                 ret |= VM_FAULT_OOM;
807                 goto out;
808         }
809
810         for (i = 0; i < HPAGE_PMD_NR; i++) {
811                 pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE |
812                                                __GFP_OTHER_NODE,
813                                                vma, address, page_to_nid(page));
814                 if (unlikely(!pages[i] ||
815                              mem_cgroup_newpage_charge(pages[i], mm,
816                                                        GFP_KERNEL))) {
817                         if (pages[i])
818                                 put_page(pages[i]);
819                         mem_cgroup_uncharge_start();
820                         while (--i >= 0) {
821                                 mem_cgroup_uncharge_page(pages[i]);
822                                 put_page(pages[i]);
823                         }
824                         mem_cgroup_uncharge_end();
825                         kfree(pages);
826                         ret |= VM_FAULT_OOM;
827                         goto out;
828                 }
829         }
830
831         for (i = 0; i < HPAGE_PMD_NR; i++) {
832                 copy_user_highpage(pages[i], page + i,
833                                    haddr + PAGE_SIZE * i, vma);
834                 __SetPageUptodate(pages[i]);
835                 cond_resched();
836         }
837
838         spin_lock(&mm->page_table_lock);
839         if (unlikely(!pmd_same(*pmd, orig_pmd)))
840                 goto out_free_pages;
841         VM_BUG_ON(!PageHead(page));
842
843         pmdp_clear_flush_notify(vma, haddr, pmd);
844         /* leave pmd empty until pte is filled */
845
846         pgtable = get_pmd_huge_pte(mm);
847         pmd_populate(mm, &_pmd, pgtable);
848
849         for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
850                 pte_t *pte, entry;
851                 entry = mk_pte(pages[i], vma->vm_page_prot);
852                 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
853                 page_add_new_anon_rmap(pages[i], vma, haddr);
854                 pte = pte_offset_map(&_pmd, haddr);
855                 VM_BUG_ON(!pte_none(*pte));
856                 set_pte_at(mm, haddr, pte, entry);
857                 pte_unmap(pte);
858         }
859         kfree(pages);
860
861         mm->nr_ptes++;
862         smp_wmb(); /* make pte visible before pmd */
863         pmd_populate(mm, pmd, pgtable);
864         page_remove_rmap(page);
865         spin_unlock(&mm->page_table_lock);
866
867         ret |= VM_FAULT_WRITE;
868         put_page(page);
869
870 out:
871         return ret;
872
873 out_free_pages:
874         spin_unlock(&mm->page_table_lock);
875         mem_cgroup_uncharge_start();
876         for (i = 0; i < HPAGE_PMD_NR; i++) {
877                 mem_cgroup_uncharge_page(pages[i]);
878                 put_page(pages[i]);
879         }
880         mem_cgroup_uncharge_end();
881         kfree(pages);
882         goto out;
883 }
884
885 int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
886                         unsigned long address, pmd_t *pmd, pmd_t orig_pmd)
887 {
888         int ret = 0;
889         struct page *page, *new_page;
890         unsigned long haddr;
891
892         VM_BUG_ON(!vma->anon_vma);
893         spin_lock(&mm->page_table_lock);
894         if (unlikely(!pmd_same(*pmd, orig_pmd)))
895                 goto out_unlock;
896
897         page = pmd_page(orig_pmd);
898         VM_BUG_ON(!PageCompound(page) || !PageHead(page));
899         haddr = address & HPAGE_PMD_MASK;
900         if (page_mapcount(page) == 1) {
901                 pmd_t entry;
902                 entry = pmd_mkyoung(orig_pmd);
903                 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
904                 if (pmdp_set_access_flags(vma, haddr, pmd, entry,  1))
905                         update_mmu_cache(vma, address, entry);
906                 ret |= VM_FAULT_WRITE;
907                 goto out_unlock;
908         }
909         get_page(page);
910         spin_unlock(&mm->page_table_lock);
911
912         if (transparent_hugepage_enabled(vma) &&
913             !transparent_hugepage_debug_cow())
914                 new_page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
915                                               vma, haddr, numa_node_id(), 0);
916         else
917                 new_page = NULL;
918
919         if (unlikely(!new_page)) {
920                 count_vm_event(THP_FAULT_FALLBACK);
921                 ret = do_huge_pmd_wp_page_fallback(mm, vma, address,
922                                                    pmd, orig_pmd, page, haddr);
923                 put_page(page);
924                 goto out;
925         }
926         count_vm_event(THP_FAULT_ALLOC);
927
928         if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
929                 put_page(new_page);
930                 put_page(page);
931                 ret |= VM_FAULT_OOM;
932                 goto out;
933         }
934
935         copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR);
936         __SetPageUptodate(new_page);
937
938         spin_lock(&mm->page_table_lock);
939         put_page(page);
940         if (unlikely(!pmd_same(*pmd, orig_pmd))) {
941                 mem_cgroup_uncharge_page(new_page);
942                 put_page(new_page);
943         } else {
944                 pmd_t entry;
945                 VM_BUG_ON(!PageHead(page));
946                 entry = mk_pmd(new_page, vma->vm_page_prot);
947                 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
948                 entry = pmd_mkhuge(entry);
949                 pmdp_clear_flush_notify(vma, haddr, pmd);
950                 page_add_new_anon_rmap(new_page, vma, haddr);
951                 set_pmd_at(mm, haddr, pmd, entry);
952                 update_mmu_cache(vma, address, entry);
953                 page_remove_rmap(page);
954                 put_page(page);
955                 ret |= VM_FAULT_WRITE;
956         }
957 out_unlock:
958         spin_unlock(&mm->page_table_lock);
959 out:
960         return ret;
961 }
962
963 struct page *follow_trans_huge_pmd(struct mm_struct *mm,
964                                    unsigned long addr,
965                                    pmd_t *pmd,
966                                    unsigned int flags)
967 {
968         struct page *page = NULL;
969
970         assert_spin_locked(&mm->page_table_lock);
971
972         if (flags & FOLL_WRITE && !pmd_write(*pmd))
973                 goto out;
974
975         page = pmd_page(*pmd);
976         VM_BUG_ON(!PageHead(page));
977         if (flags & FOLL_TOUCH) {
978                 pmd_t _pmd;
979                 /*
980                  * We should set the dirty bit only for FOLL_WRITE but
981                  * for now the dirty bit in the pmd is meaningless.
982                  * And if the dirty bit will become meaningful and
983                  * we'll only set it with FOLL_WRITE, an atomic
984                  * set_bit will be required on the pmd to set the
985                  * young bit, instead of the current set_pmd_at.
986                  */
987                 _pmd = pmd_mkyoung(pmd_mkdirty(*pmd));
988                 set_pmd_at(mm, addr & HPAGE_PMD_MASK, pmd, _pmd);
989         }
990         page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
991         VM_BUG_ON(!PageCompound(page));
992         if (flags & FOLL_GET)
993                 get_page_foll(page);
994
995 out:
996         return page;
997 }
998
999 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1000                  pmd_t *pmd)
1001 {
1002         int ret = 0;
1003
1004         spin_lock(&tlb->mm->page_table_lock);
1005         if (likely(pmd_trans_huge(*pmd))) {
1006                 if (unlikely(pmd_trans_splitting(*pmd))) {
1007                         spin_unlock(&tlb->mm->page_table_lock);
1008                         wait_split_huge_page(vma->anon_vma,
1009                                              pmd);
1010                 } else {
1011                         struct page *page;
1012                         pgtable_t pgtable;
1013                         pgtable = get_pmd_huge_pte(tlb->mm);
1014                         page = pmd_page(*pmd);
1015                         pmd_clear(pmd);
1016                         page_remove_rmap(page);
1017                         VM_BUG_ON(page_mapcount(page) < 0);
1018                         add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
1019                         VM_BUG_ON(!PageHead(page));
1020                         spin_unlock(&tlb->mm->page_table_lock);
1021                         tlb_remove_page(tlb, page);
1022                         pte_free(tlb->mm, pgtable);
1023                         ret = 1;
1024                 }
1025         } else
1026                 spin_unlock(&tlb->mm->page_table_lock);
1027
1028         return ret;
1029 }
1030
1031 int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
1032                 unsigned long addr, unsigned long end,
1033                 unsigned char *vec)
1034 {
1035         int ret = 0;
1036
1037         spin_lock(&vma->vm_mm->page_table_lock);
1038         if (likely(pmd_trans_huge(*pmd))) {
1039                 ret = !pmd_trans_splitting(*pmd);
1040                 spin_unlock(&vma->vm_mm->page_table_lock);
1041                 if (unlikely(!ret))
1042                         wait_split_huge_page(vma->anon_vma, pmd);
1043                 else {
1044                         /*
1045                          * All logical pages in the range are present
1046                          * if backed by a huge page.
1047                          */
1048                         memset(vec, 1, (end - addr) >> PAGE_SHIFT);
1049                 }
1050         } else
1051                 spin_unlock(&vma->vm_mm->page_table_lock);
1052
1053         return ret;
1054 }
1055
1056 int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma,
1057                   unsigned long old_addr,
1058                   unsigned long new_addr, unsigned long old_end,
1059                   pmd_t *old_pmd, pmd_t *new_pmd)
1060 {
1061         int ret = 0;
1062         pmd_t pmd;
1063
1064         struct mm_struct *mm = vma->vm_mm;
1065
1066         if ((old_addr & ~HPAGE_PMD_MASK) ||
1067             (new_addr & ~HPAGE_PMD_MASK) ||
1068             old_end - old_addr < HPAGE_PMD_SIZE ||
1069             (new_vma->vm_flags & VM_NOHUGEPAGE))
1070                 goto out;
1071
1072         /*
1073          * The destination pmd shouldn't be established, free_pgtables()
1074          * should have release it.
1075          */
1076         if (WARN_ON(!pmd_none(*new_pmd))) {
1077                 VM_BUG_ON(pmd_trans_huge(*new_pmd));
1078                 goto out;
1079         }
1080
1081         spin_lock(&mm->page_table_lock);
1082         if (likely(pmd_trans_huge(*old_pmd))) {
1083                 if (pmd_trans_splitting(*old_pmd)) {
1084                         spin_unlock(&mm->page_table_lock);
1085                         wait_split_huge_page(vma->anon_vma, old_pmd);
1086                         ret = -1;
1087                 } else {
1088                         pmd = pmdp_get_and_clear(mm, old_addr, old_pmd);
1089                         VM_BUG_ON(!pmd_none(*new_pmd));
1090                         set_pmd_at(mm, new_addr, new_pmd, pmd);
1091                         spin_unlock(&mm->page_table_lock);
1092                         ret = 1;
1093                 }
1094         } else {
1095                 spin_unlock(&mm->page_table_lock);
1096         }
1097 out:
1098         return ret;
1099 }
1100
1101 int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
1102                 unsigned long addr, pgprot_t newprot)
1103 {
1104         struct mm_struct *mm = vma->vm_mm;
1105         int ret = 0;
1106
1107         spin_lock(&mm->page_table_lock);
1108         if (likely(pmd_trans_huge(*pmd))) {
1109                 if (unlikely(pmd_trans_splitting(*pmd))) {
1110                         spin_unlock(&mm->page_table_lock);
1111                         wait_split_huge_page(vma->anon_vma, pmd);
1112                 } else {
1113                         pmd_t entry;
1114
1115                         entry = pmdp_get_and_clear(mm, addr, pmd);
1116                         entry = pmd_modify(entry, newprot);
1117                         set_pmd_at(mm, addr, pmd, entry);
1118                         spin_unlock(&vma->vm_mm->page_table_lock);
1119                         flush_tlb_range(vma, addr, addr + HPAGE_PMD_SIZE);
1120                         ret = 1;
1121                 }
1122         } else
1123                 spin_unlock(&vma->vm_mm->page_table_lock);
1124
1125         return ret;
1126 }
1127
1128 pmd_t *page_check_address_pmd(struct page *page,
1129                               struct mm_struct *mm,
1130                               unsigned long address,
1131                               enum page_check_address_pmd_flag flag)
1132 {
1133         pgd_t *pgd;
1134         pud_t *pud;
1135         pmd_t *pmd, *ret = NULL;
1136
1137         if (address & ~HPAGE_PMD_MASK)
1138                 goto out;
1139
1140         pgd = pgd_offset(mm, address);
1141         if (!pgd_present(*pgd))
1142                 goto out;
1143
1144         pud = pud_offset(pgd, address);
1145         if (!pud_present(*pud))
1146                 goto out;
1147
1148         pmd = pmd_offset(pud, address);
1149         if (pmd_none(*pmd))
1150                 goto out;
1151         if (pmd_page(*pmd) != page)
1152                 goto out;
1153         /*
1154          * split_vma() may create temporary aliased mappings. There is
1155          * no risk as long as all huge pmd are found and have their
1156          * splitting bit set before __split_huge_page_refcount
1157          * runs. Finding the same huge pmd more than once during the
1158          * same rmap walk is not a problem.
1159          */
1160         if (flag == PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG &&
1161             pmd_trans_splitting(*pmd))
1162                 goto out;
1163         if (pmd_trans_huge(*pmd)) {
1164                 VM_BUG_ON(flag == PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG &&
1165                           !pmd_trans_splitting(*pmd));
1166                 ret = pmd;
1167         }
1168 out:
1169         return ret;
1170 }
1171
1172 static int __split_huge_page_splitting(struct page *page,
1173                                        struct vm_area_struct *vma,
1174                                        unsigned long address)
1175 {
1176         struct mm_struct *mm = vma->vm_mm;
1177         pmd_t *pmd;
1178         int ret = 0;
1179
1180         spin_lock(&mm->page_table_lock);
1181         pmd = page_check_address_pmd(page, mm, address,
1182                                      PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG);
1183         if (pmd) {
1184                 /*
1185                  * We can't temporarily set the pmd to null in order
1186                  * to split it, the pmd must remain marked huge at all
1187                  * times or the VM won't take the pmd_trans_huge paths
1188                  * and it won't wait on the anon_vma->root->mutex to
1189                  * serialize against split_huge_page*.
1190                  */
1191                 pmdp_splitting_flush_notify(vma, address, pmd);
1192                 ret = 1;
1193         }
1194         spin_unlock(&mm->page_table_lock);
1195
1196         return ret;
1197 }
1198
1199 static void __split_huge_page_refcount(struct page *page)
1200 {
1201         int i;
1202         unsigned long head_index = page->index;
1203         struct zone *zone = page_zone(page);
1204         int zonestat;
1205         int tail_count = 0;
1206
1207         /* prevent PageLRU to go away from under us, and freeze lru stats */
1208         spin_lock_irq(&zone->lru_lock);
1209         compound_lock(page);
1210         /* complete memcg works before add pages to LRU */
1211         mem_cgroup_split_huge_fixup(page);
1212
1213         for (i = 1; i < HPAGE_PMD_NR; i++) {
1214                 struct page *page_tail = page + i;
1215
1216                 /* tail_page->_mapcount cannot change */
1217                 BUG_ON(page_mapcount(page_tail) < 0);
1218                 tail_count += page_mapcount(page_tail);
1219                 /* check for overflow */
1220                 BUG_ON(tail_count < 0);
1221                 BUG_ON(atomic_read(&page_tail->_count) != 0);
1222                 /*
1223                  * tail_page->_count is zero and not changing from
1224                  * under us. But get_page_unless_zero() may be running
1225                  * from under us on the tail_page. If we used
1226                  * atomic_set() below instead of atomic_add(), we
1227                  * would then run atomic_set() concurrently with
1228                  * get_page_unless_zero(), and atomic_set() is
1229                  * implemented in C not using locked ops. spin_unlock
1230                  * on x86 sometime uses locked ops because of PPro
1231                  * errata 66, 92, so unless somebody can guarantee
1232                  * atomic_set() here would be safe on all archs (and
1233                  * not only on x86), it's safer to use atomic_add().
1234                  */
1235                 atomic_add(page_mapcount(page) + page_mapcount(page_tail) + 1,
1236                            &page_tail->_count);
1237
1238                 /* after clearing PageTail the gup refcount can be released */
1239                 smp_mb();
1240
1241                 /*
1242                  * retain hwpoison flag of the poisoned tail page:
1243                  *   fix for the unsuitable process killed on Guest Machine(KVM)
1244                  *   by the memory-failure.
1245                  */
1246                 page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP | __PG_HWPOISON;
1247                 page_tail->flags |= (page->flags &
1248                                      ((1L << PG_referenced) |
1249                                       (1L << PG_swapbacked) |
1250                                       (1L << PG_mlocked) |
1251                                       (1L << PG_uptodate)));
1252                 page_tail->flags |= (1L << PG_dirty);
1253
1254                 /* clear PageTail before overwriting first_page */
1255                 smp_wmb();
1256
1257                 /*
1258                  * __split_huge_page_splitting() already set the
1259                  * splitting bit in all pmd that could map this
1260                  * hugepage, that will ensure no CPU can alter the
1261                  * mapcount on the head page. The mapcount is only
1262                  * accounted in the head page and it has to be
1263                  * transferred to all tail pages in the below code. So
1264                  * for this code to be safe, the split the mapcount
1265                  * can't change. But that doesn't mean userland can't
1266                  * keep changing and reading the page contents while
1267                  * we transfer the mapcount, so the pmd splitting
1268                  * status is achieved setting a reserved bit in the
1269                  * pmd, not by clearing the present bit.
1270                 */
1271                 page_tail->_mapcount = page->_mapcount;
1272
1273                 BUG_ON(page_tail->mapping);
1274                 page_tail->mapping = page->mapping;
1275
1276                 page_tail->index = ++head_index;
1277
1278                 BUG_ON(!PageAnon(page_tail));
1279                 BUG_ON(!PageUptodate(page_tail));
1280                 BUG_ON(!PageDirty(page_tail));
1281                 BUG_ON(!PageSwapBacked(page_tail));
1282
1283
1284                 lru_add_page_tail(zone, page, page_tail);
1285         }
1286         atomic_sub(tail_count, &page->_count);
1287         BUG_ON(atomic_read(&page->_count) <= 0);
1288
1289         __dec_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
1290         __mod_zone_page_state(zone, NR_ANON_PAGES, HPAGE_PMD_NR);
1291
1292         /*
1293          * A hugepage counts for HPAGE_PMD_NR pages on the LRU statistics,
1294          * so adjust those appropriately if this page is on the LRU.
1295          */
1296         if (PageLRU(page)) {
1297                 zonestat = NR_LRU_BASE + page_lru(page);
1298                 __mod_zone_page_state(zone, zonestat, -(HPAGE_PMD_NR-1));
1299         }
1300
1301         ClearPageCompound(page);
1302         compound_unlock(page);
1303         spin_unlock_irq(&zone->lru_lock);
1304
1305         for (i = 1; i < HPAGE_PMD_NR; i++) {
1306                 struct page *page_tail = page + i;
1307                 BUG_ON(page_count(page_tail) <= 0);
1308                 /*
1309                  * Tail pages may be freed if there wasn't any mapping
1310                  * like if add_to_swap() is running on a lru page that
1311                  * had its mapping zapped. And freeing these pages
1312                  * requires taking the lru_lock so we do the put_page
1313                  * of the tail pages after the split is complete.
1314                  */
1315                 put_page(page_tail);
1316         }
1317
1318         /*
1319          * Only the head page (now become a regular page) is required
1320          * to be pinned by the caller.
1321          */
1322         BUG_ON(page_count(page) <= 0);
1323 }
1324
1325 static int __split_huge_page_map(struct page *page,
1326                                  struct vm_area_struct *vma,
1327                                  unsigned long address)
1328 {
1329         struct mm_struct *mm = vma->vm_mm;
1330         pmd_t *pmd, _pmd;
1331         int ret = 0, i;
1332         pgtable_t pgtable;
1333         unsigned long haddr;
1334
1335         spin_lock(&mm->page_table_lock);
1336         pmd = page_check_address_pmd(page, mm, address,
1337                                      PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG);
1338         if (pmd) {
1339                 pgtable = get_pmd_huge_pte(mm);
1340                 pmd_populate(mm, &_pmd, pgtable);
1341
1342                 for (i = 0, haddr = address; i < HPAGE_PMD_NR;
1343                      i++, haddr += PAGE_SIZE) {
1344                         pte_t *pte, entry;
1345                         BUG_ON(PageCompound(page+i));
1346                         entry = mk_pte(page + i, vma->vm_page_prot);
1347                         entry = maybe_mkwrite(pte_mkdirty(entry), vma);
1348                         if (!pmd_write(*pmd))
1349                                 entry = pte_wrprotect(entry);
1350                         else
1351                                 BUG_ON(page_mapcount(page) != 1);
1352                         if (!pmd_young(*pmd))
1353                                 entry = pte_mkold(entry);
1354                         pte = pte_offset_map(&_pmd, haddr);
1355                         BUG_ON(!pte_none(*pte));
1356                         set_pte_at(mm, haddr, pte, entry);
1357                         pte_unmap(pte);
1358                 }
1359
1360                 mm->nr_ptes++;
1361                 smp_wmb(); /* make pte visible before pmd */
1362                 /*
1363                  * Up to this point the pmd is present and huge and
1364                  * userland has the whole access to the hugepage
1365                  * during the split (which happens in place). If we
1366                  * overwrite the pmd with the not-huge version
1367                  * pointing to the pte here (which of course we could
1368                  * if all CPUs were bug free), userland could trigger
1369                  * a small page size TLB miss on the small sized TLB
1370                  * while the hugepage TLB entry is still established
1371                  * in the huge TLB. Some CPU doesn't like that. See
1372                  * http://support.amd.com/us/Processor_TechDocs/41322.pdf,
1373                  * Erratum 383 on page 93. Intel should be safe but is
1374                  * also warns that it's only safe if the permission
1375                  * and cache attributes of the two entries loaded in
1376                  * the two TLB is identical (which should be the case
1377                  * here). But it is generally safer to never allow
1378                  * small and huge TLB entries for the same virtual
1379                  * address to be loaded simultaneously. So instead of
1380                  * doing "pmd_populate(); flush_tlb_range();" we first
1381                  * mark the current pmd notpresent (atomically because
1382                  * here the pmd_trans_huge and pmd_trans_splitting
1383                  * must remain set at all times on the pmd until the
1384                  * split is complete for this pmd), then we flush the
1385                  * SMP TLB and finally we write the non-huge version
1386                  * of the pmd entry with pmd_populate.
1387                  */
1388                 set_pmd_at(mm, address, pmd, pmd_mknotpresent(*pmd));
1389                 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
1390                 pmd_populate(mm, pmd, pgtable);
1391                 ret = 1;
1392         }
1393         spin_unlock(&mm->page_table_lock);
1394
1395         return ret;
1396 }
1397
1398 /* must be called with anon_vma->root->mutex hold */
1399 static void __split_huge_page(struct page *page,
1400                               struct anon_vma *anon_vma)
1401 {
1402         int mapcount, mapcount2;
1403         struct anon_vma_chain *avc;
1404
1405         BUG_ON(!PageHead(page));
1406         BUG_ON(PageTail(page));
1407
1408         mapcount = 0;
1409         list_for_each_entry(avc, &anon_vma->head, same_anon_vma) {
1410                 struct vm_area_struct *vma = avc->vma;
1411                 unsigned long addr = vma_address(page, vma);
1412                 BUG_ON(is_vma_temporary_stack(vma));
1413                 if (addr == -EFAULT)
1414                         continue;
1415                 mapcount += __split_huge_page_splitting(page, vma, addr);
1416         }
1417         /*
1418          * It is critical that new vmas are added to the tail of the
1419          * anon_vma list. This guarantes that if copy_huge_pmd() runs
1420          * and establishes a child pmd before
1421          * __split_huge_page_splitting() freezes the parent pmd (so if
1422          * we fail to prevent copy_huge_pmd() from running until the
1423          * whole __split_huge_page() is complete), we will still see
1424          * the newly established pmd of the child later during the
1425          * walk, to be able to set it as pmd_trans_splitting too.
1426          */
1427         if (mapcount != page_mapcount(page))
1428                 printk(KERN_ERR "mapcount %d page_mapcount %d\n",
1429                        mapcount, page_mapcount(page));
1430         BUG_ON(mapcount != page_mapcount(page));
1431
1432         __split_huge_page_refcount(page);
1433
1434         mapcount2 = 0;
1435         list_for_each_entry(avc, &anon_vma->head, same_anon_vma) {
1436                 struct vm_area_struct *vma = avc->vma;
1437                 unsigned long addr = vma_address(page, vma);
1438                 BUG_ON(is_vma_temporary_stack(vma));
1439                 if (addr == -EFAULT)
1440                         continue;
1441                 mapcount2 += __split_huge_page_map(page, vma, addr);
1442         }
1443         if (mapcount != mapcount2)
1444                 printk(KERN_ERR "mapcount %d mapcount2 %d page_mapcount %d\n",
1445                        mapcount, mapcount2, page_mapcount(page));
1446         BUG_ON(mapcount != mapcount2);
1447 }
1448
1449 int split_huge_page(struct page *page)
1450 {
1451         struct anon_vma *anon_vma;
1452         int ret = 1;
1453
1454         BUG_ON(!PageAnon(page));
1455         anon_vma = page_lock_anon_vma(page);
1456         if (!anon_vma)
1457                 goto out;
1458         ret = 0;
1459         if (!PageCompound(page))
1460                 goto out_unlock;
1461
1462         BUG_ON(!PageSwapBacked(page));
1463         __split_huge_page(page, anon_vma);
1464         count_vm_event(THP_SPLIT);
1465
1466         BUG_ON(PageCompound(page));
1467 out_unlock:
1468         page_unlock_anon_vma(anon_vma);
1469 out:
1470         return ret;
1471 }
1472
1473 #define VM_NO_THP (VM_SPECIAL|VM_INSERTPAGE|VM_MIXEDMAP|VM_SAO| \
1474                    VM_HUGETLB|VM_SHARED|VM_MAYSHARE)
1475
1476 int hugepage_madvise(struct vm_area_struct *vma,
1477                      unsigned long *vm_flags, int advice)
1478 {
1479         switch (advice) {
1480         case MADV_HUGEPAGE:
1481                 /*
1482                  * Be somewhat over-protective like KSM for now!
1483                  */
1484                 if (*vm_flags & (VM_HUGEPAGE | VM_NO_THP))
1485                         return -EINVAL;
1486                 *vm_flags &= ~VM_NOHUGEPAGE;
1487                 *vm_flags |= VM_HUGEPAGE;
1488                 /*
1489                  * If the vma become good for khugepaged to scan,
1490                  * register it here without waiting a page fault that
1491                  * may not happen any time soon.
1492                  */
1493                 if (unlikely(khugepaged_enter_vma_merge(vma)))
1494                         return -ENOMEM;
1495                 break;
1496         case MADV_NOHUGEPAGE:
1497                 /*
1498                  * Be somewhat over-protective like KSM for now!
1499                  */
1500                 if (*vm_flags & (VM_NOHUGEPAGE | VM_NO_THP))
1501                         return -EINVAL;
1502                 *vm_flags &= ~VM_HUGEPAGE;
1503                 *vm_flags |= VM_NOHUGEPAGE;
1504                 /*
1505                  * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
1506                  * this vma even if we leave the mm registered in khugepaged if
1507                  * it got registered before VM_NOHUGEPAGE was set.
1508                  */
1509                 break;
1510         }
1511
1512         return 0;
1513 }
1514
1515 static int __init khugepaged_slab_init(void)
1516 {
1517         mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
1518                                           sizeof(struct mm_slot),
1519                                           __alignof__(struct mm_slot), 0, NULL);
1520         if (!mm_slot_cache)
1521                 return -ENOMEM;
1522
1523         return 0;
1524 }
1525
1526 static void __init khugepaged_slab_free(void)
1527 {
1528         kmem_cache_destroy(mm_slot_cache);
1529         mm_slot_cache = NULL;
1530 }
1531
1532 static inline struct mm_slot *alloc_mm_slot(void)
1533 {
1534         if (!mm_slot_cache)     /* initialization failed */
1535                 return NULL;
1536         return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
1537 }
1538
1539 static inline void free_mm_slot(struct mm_slot *mm_slot)
1540 {
1541         kmem_cache_free(mm_slot_cache, mm_slot);
1542 }
1543
1544 static int __init mm_slots_hash_init(void)
1545 {
1546         mm_slots_hash = kzalloc(MM_SLOTS_HASH_HEADS * sizeof(struct hlist_head),
1547                                 GFP_KERNEL);
1548         if (!mm_slots_hash)
1549                 return -ENOMEM;
1550         return 0;
1551 }
1552
1553 #if 0
1554 static void __init mm_slots_hash_free(void)
1555 {
1556         kfree(mm_slots_hash);
1557         mm_slots_hash = NULL;
1558 }
1559 #endif
1560
1561 static struct mm_slot *get_mm_slot(struct mm_struct *mm)
1562 {
1563         struct mm_slot *mm_slot;
1564         struct hlist_head *bucket;
1565         struct hlist_node *node;
1566
1567         bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct))
1568                                 % MM_SLOTS_HASH_HEADS];
1569         hlist_for_each_entry(mm_slot, node, bucket, hash) {
1570                 if (mm == mm_slot->mm)
1571                         return mm_slot;
1572         }
1573         return NULL;
1574 }
1575
1576 static void insert_to_mm_slots_hash(struct mm_struct *mm,
1577                                     struct mm_slot *mm_slot)
1578 {
1579         struct hlist_head *bucket;
1580
1581         bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct))
1582                                 % MM_SLOTS_HASH_HEADS];
1583         mm_slot->mm = mm;
1584         hlist_add_head(&mm_slot->hash, bucket);
1585 }
1586
1587 static inline int khugepaged_test_exit(struct mm_struct *mm)
1588 {
1589         return atomic_read(&mm->mm_users) == 0;
1590 }
1591
1592 int __khugepaged_enter(struct mm_struct *mm)
1593 {
1594         struct mm_slot *mm_slot;
1595         int wakeup;
1596
1597         mm_slot = alloc_mm_slot();
1598         if (!mm_slot)
1599                 return -ENOMEM;
1600
1601         /* __khugepaged_exit() must not run from under us */
1602         VM_BUG_ON(khugepaged_test_exit(mm));
1603         if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
1604                 free_mm_slot(mm_slot);
1605                 return 0;
1606         }
1607
1608         spin_lock(&khugepaged_mm_lock);
1609         insert_to_mm_slots_hash(mm, mm_slot);
1610         /*
1611          * Insert just behind the scanning cursor, to let the area settle
1612          * down a little.
1613          */
1614         wakeup = list_empty(&khugepaged_scan.mm_head);
1615         list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
1616         spin_unlock(&khugepaged_mm_lock);
1617
1618         atomic_inc(&mm->mm_count);
1619         if (wakeup)
1620                 wake_up_interruptible(&khugepaged_wait);
1621
1622         return 0;
1623 }
1624
1625 int khugepaged_enter_vma_merge(struct vm_area_struct *vma)
1626 {
1627         unsigned long hstart, hend;
1628         if (!vma->anon_vma)
1629                 /*
1630                  * Not yet faulted in so we will register later in the
1631                  * page fault if needed.
1632                  */
1633                 return 0;
1634         if (vma->vm_ops)
1635                 /* khugepaged not yet working on file or special mappings */
1636                 return 0;
1637         /*
1638          * If is_pfn_mapping() is true is_learn_pfn_mapping() must be
1639          * true too, verify it here.
1640          */
1641         VM_BUG_ON(is_linear_pfn_mapping(vma) || vma->vm_flags & VM_NO_THP);
1642         hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
1643         hend = vma->vm_end & HPAGE_PMD_MASK;
1644         if (hstart < hend)
1645                 return khugepaged_enter(vma);
1646         return 0;
1647 }
1648
1649 void __khugepaged_exit(struct mm_struct *mm)
1650 {
1651         struct mm_slot *mm_slot;
1652         int free = 0;
1653
1654         spin_lock(&khugepaged_mm_lock);
1655         mm_slot = get_mm_slot(mm);
1656         if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
1657                 hlist_del(&mm_slot->hash);
1658                 list_del(&mm_slot->mm_node);
1659                 free = 1;
1660         }
1661         spin_unlock(&khugepaged_mm_lock);
1662
1663         if (free) {
1664                 clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1665                 free_mm_slot(mm_slot);
1666                 mmdrop(mm);
1667         } else if (mm_slot) {
1668                 /*
1669                  * This is required to serialize against
1670                  * khugepaged_test_exit() (which is guaranteed to run
1671                  * under mmap sem read mode). Stop here (after we
1672                  * return all pagetables will be destroyed) until
1673                  * khugepaged has finished working on the pagetables
1674                  * under the mmap_sem.
1675                  */
1676                 down_write(&mm->mmap_sem);
1677                 up_write(&mm->mmap_sem);
1678         }
1679 }
1680
1681 static void release_pte_page(struct page *page)
1682 {
1683         /* 0 stands for page_is_file_cache(page) == false */
1684         dec_zone_page_state(page, NR_ISOLATED_ANON + 0);
1685         unlock_page(page);
1686         putback_lru_page(page);
1687 }
1688
1689 static void release_pte_pages(pte_t *pte, pte_t *_pte)
1690 {
1691         while (--_pte >= pte) {
1692                 pte_t pteval = *_pte;
1693                 if (!pte_none(pteval))
1694                         release_pte_page(pte_page(pteval));
1695         }
1696 }
1697
1698 static void release_all_pte_pages(pte_t *pte)
1699 {
1700         release_pte_pages(pte, pte + HPAGE_PMD_NR);
1701 }
1702
1703 static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
1704                                         unsigned long address,
1705                                         pte_t *pte)
1706 {
1707         struct page *page;
1708         pte_t *_pte;
1709         int referenced = 0, isolated = 0, none = 0;
1710         for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
1711              _pte++, address += PAGE_SIZE) {
1712                 pte_t pteval = *_pte;
1713                 if (pte_none(pteval)) {
1714                         if (++none <= khugepaged_max_ptes_none)
1715                                 continue;
1716                         else {
1717                                 release_pte_pages(pte, _pte);
1718                                 goto out;
1719                         }
1720                 }
1721                 if (!pte_present(pteval) || !pte_write(pteval)) {
1722                         release_pte_pages(pte, _pte);
1723                         goto out;
1724                 }
1725                 page = vm_normal_page(vma, address, pteval);
1726                 if (unlikely(!page)) {
1727                         release_pte_pages(pte, _pte);
1728                         goto out;
1729                 }
1730                 VM_BUG_ON(PageCompound(page));
1731                 BUG_ON(!PageAnon(page));
1732                 VM_BUG_ON(!PageSwapBacked(page));
1733
1734                 /* cannot use mapcount: can't collapse if there's a gup pin */
1735                 if (page_count(page) != 1) {
1736                         release_pte_pages(pte, _pte);
1737                         goto out;
1738                 }
1739                 /*
1740                  * We can do it before isolate_lru_page because the
1741                  * page can't be freed from under us. NOTE: PG_lock
1742                  * is needed to serialize against split_huge_page
1743                  * when invoked from the VM.
1744                  */
1745                 if (!trylock_page(page)) {
1746                         release_pte_pages(pte, _pte);
1747                         goto out;
1748                 }
1749                 /*
1750                  * Isolate the page to avoid collapsing an hugepage
1751                  * currently in use by the VM.
1752                  */
1753                 if (isolate_lru_page(page)) {
1754                         unlock_page(page);
1755                         release_pte_pages(pte, _pte);
1756                         goto out;
1757                 }
1758                 /* 0 stands for page_is_file_cache(page) == false */
1759                 inc_zone_page_state(page, NR_ISOLATED_ANON + 0);
1760                 VM_BUG_ON(!PageLocked(page));
1761                 VM_BUG_ON(PageLRU(page));
1762
1763                 /* If there is no mapped pte young don't collapse the page */
1764                 if (pte_young(pteval) || PageReferenced(page) ||
1765                     mmu_notifier_test_young(vma->vm_mm, address))
1766                         referenced = 1;
1767         }
1768         if (unlikely(!referenced))
1769                 release_all_pte_pages(pte);
1770         else
1771                 isolated = 1;
1772 out:
1773         return isolated;
1774 }
1775
1776 static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
1777                                       struct vm_area_struct *vma,
1778                                       unsigned long address,
1779                                       spinlock_t *ptl)
1780 {
1781         pte_t *_pte;
1782         for (_pte = pte; _pte < pte+HPAGE_PMD_NR; _pte++) {
1783                 pte_t pteval = *_pte;
1784                 struct page *src_page;
1785
1786                 if (pte_none(pteval)) {
1787                         clear_user_highpage(page, address);
1788                         add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
1789                 } else {
1790                         src_page = pte_page(pteval);
1791                         copy_user_highpage(page, src_page, address, vma);
1792                         VM_BUG_ON(page_mapcount(src_page) != 1);
1793                         VM_BUG_ON(page_count(src_page) != 2);
1794                         release_pte_page(src_page);
1795                         /*
1796                          * ptl mostly unnecessary, but preempt has to
1797                          * be disabled to update the per-cpu stats
1798                          * inside page_remove_rmap().
1799                          */
1800                         spin_lock(ptl);
1801                         /*
1802                          * paravirt calls inside pte_clear here are
1803                          * superfluous.
1804                          */
1805                         pte_clear(vma->vm_mm, address, _pte);
1806                         page_remove_rmap(src_page);
1807                         spin_unlock(ptl);
1808                         free_page_and_swap_cache(src_page);
1809                 }
1810
1811                 address += PAGE_SIZE;
1812                 page++;
1813         }
1814 }
1815
1816 static void collapse_huge_page(struct mm_struct *mm,
1817                                unsigned long address,
1818                                struct page **hpage,
1819                                struct vm_area_struct *vma,
1820                                int node)
1821 {
1822         pgd_t *pgd;
1823         pud_t *pud;
1824         pmd_t *pmd, _pmd;
1825         pte_t *pte;
1826         pgtable_t pgtable;
1827         struct page *new_page;
1828         spinlock_t *ptl;
1829         int isolated;
1830         unsigned long hstart, hend;
1831
1832         VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1833 #ifndef CONFIG_NUMA
1834         up_read(&mm->mmap_sem);
1835         VM_BUG_ON(!*hpage);
1836         new_page = *hpage;
1837 #else
1838         VM_BUG_ON(*hpage);
1839         /*
1840          * Allocate the page while the vma is still valid and under
1841          * the mmap_sem read mode so there is no memory allocation
1842          * later when we take the mmap_sem in write mode. This is more
1843          * friendly behavior (OTOH it may actually hide bugs) to
1844          * filesystems in userland with daemons allocating memory in
1845          * the userland I/O paths.  Allocating memory with the
1846          * mmap_sem in read mode is good idea also to allow greater
1847          * scalability.
1848          */
1849         new_page = alloc_hugepage_vma(khugepaged_defrag(), vma, address,
1850                                       node, __GFP_OTHER_NODE);
1851
1852         /*
1853          * After allocating the hugepage, release the mmap_sem read lock in
1854          * preparation for taking it in write mode.
1855          */
1856         up_read(&mm->mmap_sem);
1857         if (unlikely(!new_page)) {
1858                 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
1859                 *hpage = ERR_PTR(-ENOMEM);
1860                 return;
1861         }
1862 #endif
1863
1864         count_vm_event(THP_COLLAPSE_ALLOC);
1865         if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
1866 #ifdef CONFIG_NUMA
1867                 put_page(new_page);
1868 #endif
1869                 return;
1870         }
1871
1872         /*
1873          * Prevent all access to pagetables with the exception of
1874          * gup_fast later hanlded by the ptep_clear_flush and the VM
1875          * handled by the anon_vma lock + PG_lock.
1876          */
1877         down_write(&mm->mmap_sem);
1878         if (unlikely(khugepaged_test_exit(mm)))
1879                 goto out;
1880
1881         vma = find_vma(mm, address);
1882         hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
1883         hend = vma->vm_end & HPAGE_PMD_MASK;
1884         if (address < hstart || address + HPAGE_PMD_SIZE > hend)
1885                 goto out;
1886
1887         if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) ||
1888             (vma->vm_flags & VM_NOHUGEPAGE))
1889                 goto out;
1890
1891         if (!vma->anon_vma || vma->vm_ops)
1892                 goto out;
1893         if (is_vma_temporary_stack(vma))
1894                 goto out;
1895         /*
1896          * If is_pfn_mapping() is true is_learn_pfn_mapping() must be
1897          * true too, verify it here.
1898          */
1899         VM_BUG_ON(is_linear_pfn_mapping(vma) || vma->vm_flags & VM_NO_THP);
1900
1901         pgd = pgd_offset(mm, address);
1902         if (!pgd_present(*pgd))
1903                 goto out;
1904
1905         pud = pud_offset(pgd, address);
1906         if (!pud_present(*pud))
1907                 goto out;
1908
1909         pmd = pmd_offset(pud, address);
1910         /* pmd can't go away or become huge under us */
1911         if (!pmd_present(*pmd) || pmd_trans_huge(*pmd))
1912                 goto out;
1913
1914         anon_vma_lock(vma->anon_vma);
1915
1916         pte = pte_offset_map(pmd, address);
1917         ptl = pte_lockptr(mm, pmd);
1918
1919         spin_lock(&mm->page_table_lock); /* probably unnecessary */
1920         /*
1921          * After this gup_fast can't run anymore. This also removes
1922          * any huge TLB entry from the CPU so we won't allow
1923          * huge and small TLB entries for the same virtual address
1924          * to avoid the risk of CPU bugs in that area.
1925          */
1926         _pmd = pmdp_clear_flush_notify(vma, address, pmd);
1927         spin_unlock(&mm->page_table_lock);
1928
1929         spin_lock(ptl);
1930         isolated = __collapse_huge_page_isolate(vma, address, pte);
1931         spin_unlock(ptl);
1932
1933         if (unlikely(!isolated)) {
1934                 pte_unmap(pte);
1935                 spin_lock(&mm->page_table_lock);
1936                 BUG_ON(!pmd_none(*pmd));
1937                 set_pmd_at(mm, address, pmd, _pmd);
1938                 spin_unlock(&mm->page_table_lock);
1939                 anon_vma_unlock(vma->anon_vma);
1940                 goto out;
1941         }
1942
1943         /*
1944          * All pages are isolated and locked so anon_vma rmap
1945          * can't run anymore.
1946          */
1947         anon_vma_unlock(vma->anon_vma);
1948
1949         __collapse_huge_page_copy(pte, new_page, vma, address, ptl);
1950         pte_unmap(pte);
1951         __SetPageUptodate(new_page);
1952         pgtable = pmd_pgtable(_pmd);
1953         VM_BUG_ON(page_count(pgtable) != 1);
1954         VM_BUG_ON(page_mapcount(pgtable) != 0);
1955
1956         _pmd = mk_pmd(new_page, vma->vm_page_prot);
1957         _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
1958         _pmd = pmd_mkhuge(_pmd);
1959
1960         /*
1961          * spin_lock() below is not the equivalent of smp_wmb(), so
1962          * this is needed to avoid the copy_huge_page writes to become
1963          * visible after the set_pmd_at() write.
1964          */
1965         smp_wmb();
1966
1967         spin_lock(&mm->page_table_lock);
1968         BUG_ON(!pmd_none(*pmd));
1969         page_add_new_anon_rmap(new_page, vma, address);
1970         set_pmd_at(mm, address, pmd, _pmd);
1971         update_mmu_cache(vma, address, _pmd);
1972         prepare_pmd_huge_pte(pgtable, mm);
1973         mm->nr_ptes--;
1974         spin_unlock(&mm->page_table_lock);
1975
1976 #ifndef CONFIG_NUMA
1977         *hpage = NULL;
1978 #endif
1979         khugepaged_pages_collapsed++;
1980 out_up_write:
1981         up_write(&mm->mmap_sem);
1982         return;
1983
1984 out:
1985         mem_cgroup_uncharge_page(new_page);
1986 #ifdef CONFIG_NUMA
1987         put_page(new_page);
1988 #endif
1989         goto out_up_write;
1990 }
1991
1992 static int khugepaged_scan_pmd(struct mm_struct *mm,
1993                                struct vm_area_struct *vma,
1994                                unsigned long address,
1995                                struct page **hpage)
1996 {
1997         pgd_t *pgd;
1998         pud_t *pud;
1999         pmd_t *pmd;
2000         pte_t *pte, *_pte;
2001         int ret = 0, referenced = 0, none = 0;
2002         struct page *page;
2003         unsigned long _address;
2004         spinlock_t *ptl;
2005         int node = -1;
2006
2007         VM_BUG_ON(address & ~HPAGE_PMD_MASK);
2008
2009         pgd = pgd_offset(mm, address);
2010         if (!pgd_present(*pgd))
2011                 goto out;
2012
2013         pud = pud_offset(pgd, address);
2014         if (!pud_present(*pud))
2015                 goto out;
2016
2017         pmd = pmd_offset(pud, address);
2018         if (!pmd_present(*pmd) || pmd_trans_huge(*pmd))
2019                 goto out;
2020
2021         pte = pte_offset_map_lock(mm, pmd, address, &ptl);
2022         for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
2023              _pte++, _address += PAGE_SIZE) {
2024                 pte_t pteval = *_pte;
2025                 if (pte_none(pteval)) {
2026                         if (++none <= khugepaged_max_ptes_none)
2027                                 continue;
2028                         else
2029                                 goto out_unmap;
2030                 }
2031                 if (!pte_present(pteval) || !pte_write(pteval))
2032                         goto out_unmap;
2033                 page = vm_normal_page(vma, _address, pteval);
2034                 if (unlikely(!page))
2035                         goto out_unmap;
2036                 /*
2037                  * Chose the node of the first page. This could
2038                  * be more sophisticated and look at more pages,
2039                  * but isn't for now.
2040                  */
2041                 if (node == -1)
2042                         node = page_to_nid(page);
2043                 VM_BUG_ON(PageCompound(page));
2044                 if (!PageLRU(page) || PageLocked(page) || !PageAnon(page))
2045                         goto out_unmap;
2046                 /* cannot use mapcount: can't collapse if there's a gup pin */
2047                 if (page_count(page) != 1)
2048                         goto out_unmap;
2049                 if (pte_young(pteval) || PageReferenced(page) ||
2050                     mmu_notifier_test_young(vma->vm_mm, address))
2051                         referenced = 1;
2052         }
2053         if (referenced)
2054                 ret = 1;
2055 out_unmap:
2056         pte_unmap_unlock(pte, ptl);
2057         if (ret)
2058                 /* collapse_huge_page will return with the mmap_sem released */
2059                 collapse_huge_page(mm, address, hpage, vma, node);
2060 out:
2061         return ret;
2062 }
2063
2064 static void collect_mm_slot(struct mm_slot *mm_slot)
2065 {
2066         struct mm_struct *mm = mm_slot->mm;
2067
2068         VM_BUG_ON(!spin_is_locked(&khugepaged_mm_lock));
2069
2070         if (khugepaged_test_exit(mm)) {
2071                 /* free mm_slot */
2072                 hlist_del(&mm_slot->hash);
2073                 list_del(&mm_slot->mm_node);
2074
2075                 /*
2076                  * Not strictly needed because the mm exited already.
2077                  *
2078                  * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
2079                  */
2080
2081                 /* khugepaged_mm_lock actually not necessary for the below */
2082                 free_mm_slot(mm_slot);
2083                 mmdrop(mm);
2084         }
2085 }
2086
2087 static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
2088                                             struct page **hpage)
2089         __releases(&khugepaged_mm_lock)
2090         __acquires(&khugepaged_mm_lock)
2091 {
2092         struct mm_slot *mm_slot;
2093         struct mm_struct *mm;
2094         struct vm_area_struct *vma;
2095         int progress = 0;
2096
2097         VM_BUG_ON(!pages);
2098         VM_BUG_ON(!spin_is_locked(&khugepaged_mm_lock));
2099
2100         if (khugepaged_scan.mm_slot)
2101                 mm_slot = khugepaged_scan.mm_slot;
2102         else {
2103                 mm_slot = list_entry(khugepaged_scan.mm_head.next,
2104                                      struct mm_slot, mm_node);
2105                 khugepaged_scan.address = 0;
2106                 khugepaged_scan.mm_slot = mm_slot;
2107         }
2108         spin_unlock(&khugepaged_mm_lock);
2109
2110         mm = mm_slot->mm;
2111         down_read(&mm->mmap_sem);
2112         if (unlikely(khugepaged_test_exit(mm)))
2113                 vma = NULL;
2114         else
2115                 vma = find_vma(mm, khugepaged_scan.address);
2116
2117         progress++;
2118         for (; vma; vma = vma->vm_next) {
2119                 unsigned long hstart, hend;
2120
2121                 cond_resched();
2122                 if (unlikely(khugepaged_test_exit(mm))) {
2123                         progress++;
2124                         break;
2125                 }
2126
2127                 if ((!(vma->vm_flags & VM_HUGEPAGE) &&
2128                      !khugepaged_always()) ||
2129                     (vma->vm_flags & VM_NOHUGEPAGE)) {
2130                 skip:
2131                         progress++;
2132                         continue;
2133                 }
2134                 if (!vma->anon_vma || vma->vm_ops)
2135                         goto skip;
2136                 if (is_vma_temporary_stack(vma))
2137                         goto skip;
2138                 /*
2139                  * If is_pfn_mapping() is true is_learn_pfn_mapping()
2140                  * must be true too, verify it here.
2141                  */
2142                 VM_BUG_ON(is_linear_pfn_mapping(vma) ||
2143                           vma->vm_flags & VM_NO_THP);
2144
2145                 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2146                 hend = vma->vm_end & HPAGE_PMD_MASK;
2147                 if (hstart >= hend)
2148                         goto skip;
2149                 if (khugepaged_scan.address > hend)
2150                         goto skip;
2151                 if (khugepaged_scan.address < hstart)
2152                         khugepaged_scan.address = hstart;
2153                 VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
2154
2155                 while (khugepaged_scan.address < hend) {
2156                         int ret;
2157                         cond_resched();
2158                         if (unlikely(khugepaged_test_exit(mm)))
2159                                 goto breakouterloop;
2160
2161                         VM_BUG_ON(khugepaged_scan.address < hstart ||
2162                                   khugepaged_scan.address + HPAGE_PMD_SIZE >
2163                                   hend);
2164                         ret = khugepaged_scan_pmd(mm, vma,
2165                                                   khugepaged_scan.address,
2166                                                   hpage);
2167                         /* move to next address */
2168                         khugepaged_scan.address += HPAGE_PMD_SIZE;
2169                         progress += HPAGE_PMD_NR;
2170                         if (ret)
2171                                 /* we released mmap_sem so break loop */
2172                                 goto breakouterloop_mmap_sem;
2173                         if (progress >= pages)
2174                                 goto breakouterloop;
2175                 }
2176         }
2177 breakouterloop:
2178         up_read(&mm->mmap_sem); /* exit_mmap will destroy ptes after this */
2179 breakouterloop_mmap_sem:
2180
2181         spin_lock(&khugepaged_mm_lock);
2182         VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
2183         /*
2184          * Release the current mm_slot if this mm is about to die, or
2185          * if we scanned all vmas of this mm.
2186          */
2187         if (khugepaged_test_exit(mm) || !vma) {
2188                 /*
2189                  * Make sure that if mm_users is reaching zero while
2190                  * khugepaged runs here, khugepaged_exit will find
2191                  * mm_slot not pointing to the exiting mm.
2192                  */
2193                 if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
2194                         khugepaged_scan.mm_slot = list_entry(
2195                                 mm_slot->mm_node.next,
2196                                 struct mm_slot, mm_node);
2197                         khugepaged_scan.address = 0;
2198                 } else {
2199                         khugepaged_scan.mm_slot = NULL;
2200                         khugepaged_full_scans++;
2201                 }
2202
2203                 collect_mm_slot(mm_slot);
2204         }
2205
2206         return progress;
2207 }
2208
2209 static int khugepaged_has_work(void)
2210 {
2211         return !list_empty(&khugepaged_scan.mm_head) &&
2212                 khugepaged_enabled();
2213 }
2214
2215 static int khugepaged_wait_event(void)
2216 {
2217         return !list_empty(&khugepaged_scan.mm_head) ||
2218                 !khugepaged_enabled();
2219 }
2220
2221 static void khugepaged_do_scan(struct page **hpage)
2222 {
2223         unsigned int progress = 0, pass_through_head = 0;
2224         unsigned int pages = khugepaged_pages_to_scan;
2225
2226         barrier(); /* write khugepaged_pages_to_scan to local stack */
2227
2228         while (progress < pages) {
2229                 cond_resched();
2230
2231 #ifndef CONFIG_NUMA
2232                 if (!*hpage) {
2233                         *hpage = alloc_hugepage(khugepaged_defrag());
2234                         if (unlikely(!*hpage)) {
2235                                 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
2236                                 break;
2237                         }
2238                         count_vm_event(THP_COLLAPSE_ALLOC);
2239                 }
2240 #else
2241                 if (IS_ERR(*hpage))
2242                         break;
2243 #endif
2244
2245                 if (unlikely(kthread_should_stop() || freezing(current)))
2246                         break;
2247
2248                 spin_lock(&khugepaged_mm_lock);
2249                 if (!khugepaged_scan.mm_slot)
2250                         pass_through_head++;
2251                 if (khugepaged_has_work() &&
2252                     pass_through_head < 2)
2253                         progress += khugepaged_scan_mm_slot(pages - progress,
2254                                                             hpage);
2255                 else
2256                         progress = pages;
2257                 spin_unlock(&khugepaged_mm_lock);
2258         }
2259 }
2260
2261 static void khugepaged_alloc_sleep(void)
2262 {
2263         wait_event_freezable_timeout(khugepaged_wait, false,
2264                         msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
2265 }
2266
2267 #ifndef CONFIG_NUMA
2268 static struct page *khugepaged_alloc_hugepage(void)
2269 {
2270         struct page *hpage;
2271
2272         do {
2273                 hpage = alloc_hugepage(khugepaged_defrag());
2274                 if (!hpage) {
2275                         count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
2276                         khugepaged_alloc_sleep();
2277                 } else
2278                         count_vm_event(THP_COLLAPSE_ALLOC);
2279         } while (unlikely(!hpage) &&
2280                  likely(khugepaged_enabled()));
2281         return hpage;
2282 }
2283 #endif
2284
2285 static void khugepaged_loop(void)
2286 {
2287         struct page *hpage;
2288
2289 #ifdef CONFIG_NUMA
2290         hpage = NULL;
2291 #endif
2292         while (likely(khugepaged_enabled())) {
2293 #ifndef CONFIG_NUMA
2294                 hpage = khugepaged_alloc_hugepage();
2295                 if (unlikely(!hpage))
2296                         break;
2297 #else
2298                 if (IS_ERR(hpage)) {
2299                         khugepaged_alloc_sleep();
2300                         hpage = NULL;
2301                 }
2302 #endif
2303
2304                 khugepaged_do_scan(&hpage);
2305 #ifndef CONFIG_NUMA
2306                 if (hpage)
2307                         put_page(hpage);
2308 #endif
2309                 try_to_freeze();
2310                 if (unlikely(kthread_should_stop()))
2311                         break;
2312                 if (khugepaged_has_work()) {
2313                         if (!khugepaged_scan_sleep_millisecs)
2314                                 continue;
2315                         wait_event_freezable_timeout(khugepaged_wait, false,
2316                             msecs_to_jiffies(khugepaged_scan_sleep_millisecs));
2317                 } else if (khugepaged_enabled())
2318                         wait_event_freezable(khugepaged_wait,
2319                                              khugepaged_wait_event());
2320         }
2321 }
2322
2323 static int khugepaged(void *none)
2324 {
2325         struct mm_slot *mm_slot;
2326
2327         set_freezable();
2328         set_user_nice(current, 19);
2329
2330         /* serialize with start_khugepaged() */
2331         mutex_lock(&khugepaged_mutex);
2332
2333         for (;;) {
2334                 mutex_unlock(&khugepaged_mutex);
2335                 VM_BUG_ON(khugepaged_thread != current);
2336                 khugepaged_loop();
2337                 VM_BUG_ON(khugepaged_thread != current);
2338
2339                 mutex_lock(&khugepaged_mutex);
2340                 if (!khugepaged_enabled())
2341                         break;
2342                 if (unlikely(kthread_should_stop()))
2343                         break;
2344         }
2345
2346         spin_lock(&khugepaged_mm_lock);
2347         mm_slot = khugepaged_scan.mm_slot;
2348         khugepaged_scan.mm_slot = NULL;
2349         if (mm_slot)
2350                 collect_mm_slot(mm_slot);
2351         spin_unlock(&khugepaged_mm_lock);
2352
2353         khugepaged_thread = NULL;
2354         mutex_unlock(&khugepaged_mutex);
2355
2356         return 0;
2357 }
2358
2359 void __split_huge_page_pmd(struct mm_struct *mm, pmd_t *pmd)
2360 {
2361         struct page *page;
2362
2363         spin_lock(&mm->page_table_lock);
2364         if (unlikely(!pmd_trans_huge(*pmd))) {
2365                 spin_unlock(&mm->page_table_lock);
2366                 return;
2367         }
2368         page = pmd_page(*pmd);
2369         VM_BUG_ON(!page_count(page));
2370         get_page(page);
2371         spin_unlock(&mm->page_table_lock);
2372
2373         split_huge_page(page);
2374
2375         put_page(page);
2376         BUG_ON(pmd_trans_huge(*pmd));
2377 }
2378
2379 static void split_huge_page_address(struct mm_struct *mm,
2380                                     unsigned long address)
2381 {
2382         pgd_t *pgd;
2383         pud_t *pud;
2384         pmd_t *pmd;
2385
2386         VM_BUG_ON(!(address & ~HPAGE_PMD_MASK));
2387
2388         pgd = pgd_offset(mm, address);
2389         if (!pgd_present(*pgd))
2390                 return;
2391
2392         pud = pud_offset(pgd, address);
2393         if (!pud_present(*pud))
2394                 return;
2395
2396         pmd = pmd_offset(pud, address);
2397         if (!pmd_present(*pmd))
2398                 return;
2399         /*
2400          * Caller holds the mmap_sem write mode, so a huge pmd cannot
2401          * materialize from under us.
2402          */
2403         split_huge_page_pmd(mm, pmd);
2404 }
2405
2406 void __vma_adjust_trans_huge(struct vm_area_struct *vma,
2407                              unsigned long start,
2408                              unsigned long end,
2409                              long adjust_next)
2410 {
2411         /*
2412          * If the new start address isn't hpage aligned and it could
2413          * previously contain an hugepage: check if we need to split
2414          * an huge pmd.
2415          */
2416         if (start & ~HPAGE_PMD_MASK &&
2417             (start & HPAGE_PMD_MASK) >= vma->vm_start &&
2418             (start & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end)
2419                 split_huge_page_address(vma->vm_mm, start);
2420
2421         /*
2422          * If the new end address isn't hpage aligned and it could
2423          * previously contain an hugepage: check if we need to split
2424          * an huge pmd.
2425          */
2426         if (end & ~HPAGE_PMD_MASK &&
2427             (end & HPAGE_PMD_MASK) >= vma->vm_start &&
2428             (end & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end)
2429                 split_huge_page_address(vma->vm_mm, end);
2430
2431         /*
2432          * If we're also updating the vma->vm_next->vm_start, if the new
2433          * vm_next->vm_start isn't page aligned and it could previously
2434          * contain an hugepage: check if we need to split an huge pmd.
2435          */
2436         if (adjust_next > 0) {
2437                 struct vm_area_struct *next = vma->vm_next;
2438                 unsigned long nstart = next->vm_start;
2439                 nstart += adjust_next << PAGE_SHIFT;
2440                 if (nstart & ~HPAGE_PMD_MASK &&
2441                     (nstart & HPAGE_PMD_MASK) >= next->vm_start &&
2442                     (nstart & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= next->vm_end)
2443                         split_huge_page_address(next->vm_mm, nstart);
2444         }
2445 }