]> git.karo-electronics.de Git - mv-sheeva.git/blob - mm/huge_memory.c
thp: add numa awareness to hugepage allocations
[mv-sheeva.git] / mm / huge_memory.c
1 /*
2  *  Copyright (C) 2009  Red Hat, Inc.
3  *
4  *  This work is licensed under the terms of the GNU GPL, version 2. See
5  *  the COPYING file in the top-level directory.
6  */
7
8 #include <linux/mm.h>
9 #include <linux/sched.h>
10 #include <linux/highmem.h>
11 #include <linux/hugetlb.h>
12 #include <linux/mmu_notifier.h>
13 #include <linux/rmap.h>
14 #include <linux/swap.h>
15 #include <linux/mm_inline.h>
16 #include <linux/kthread.h>
17 #include <linux/khugepaged.h>
18 #include <asm/tlb.h>
19 #include <asm/pgalloc.h>
20 #include "internal.h"
21
22 /*
23  * By default transparent hugepage support is enabled for all mappings
24  * and khugepaged scans all mappings. Defrag is only invoked by
25  * khugepaged hugepage allocations and by page faults inside
26  * MADV_HUGEPAGE regions to avoid the risk of slowing down short lived
27  * allocations.
28  */
29 unsigned long transparent_hugepage_flags __read_mostly =
30         (1<<TRANSPARENT_HUGEPAGE_FLAG)|
31         (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)|
32         (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
33
34 /* default scan 8*512 pte (or vmas) every 30 second */
35 static unsigned int khugepaged_pages_to_scan __read_mostly = HPAGE_PMD_NR*8;
36 static unsigned int khugepaged_pages_collapsed;
37 static unsigned int khugepaged_full_scans;
38 static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
39 /* during fragmentation poll the hugepage allocator once every minute */
40 static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
41 static struct task_struct *khugepaged_thread __read_mostly;
42 static DEFINE_MUTEX(khugepaged_mutex);
43 static DEFINE_SPINLOCK(khugepaged_mm_lock);
44 static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
45 /*
46  * default collapse hugepages if there is at least one pte mapped like
47  * it would have happened if the vma was large enough during page
48  * fault.
49  */
50 static unsigned int khugepaged_max_ptes_none __read_mostly = HPAGE_PMD_NR-1;
51
52 static int khugepaged(void *none);
53 static int mm_slots_hash_init(void);
54 static int khugepaged_slab_init(void);
55 static void khugepaged_slab_free(void);
56
57 #define MM_SLOTS_HASH_HEADS 1024
58 static struct hlist_head *mm_slots_hash __read_mostly;
59 static struct kmem_cache *mm_slot_cache __read_mostly;
60
61 /**
62  * struct mm_slot - hash lookup from mm to mm_slot
63  * @hash: hash collision list
64  * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
65  * @mm: the mm that this information is valid for
66  */
67 struct mm_slot {
68         struct hlist_node hash;
69         struct list_head mm_node;
70         struct mm_struct *mm;
71 };
72
73 /**
74  * struct khugepaged_scan - cursor for scanning
75  * @mm_head: the head of the mm list to scan
76  * @mm_slot: the current mm_slot we are scanning
77  * @address: the next address inside that to be scanned
78  *
79  * There is only the one khugepaged_scan instance of this cursor structure.
80  */
81 struct khugepaged_scan {
82         struct list_head mm_head;
83         struct mm_slot *mm_slot;
84         unsigned long address;
85 } khugepaged_scan = {
86         .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
87 };
88
89
90 static int set_recommended_min_free_kbytes(void)
91 {
92         struct zone *zone;
93         int nr_zones = 0;
94         unsigned long recommended_min;
95         extern int min_free_kbytes;
96
97         if (!test_bit(TRANSPARENT_HUGEPAGE_FLAG,
98                       &transparent_hugepage_flags) &&
99             !test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
100                       &transparent_hugepage_flags))
101                 return 0;
102
103         for_each_populated_zone(zone)
104                 nr_zones++;
105
106         /* Make sure at least 2 hugepages are free for MIGRATE_RESERVE */
107         recommended_min = pageblock_nr_pages * nr_zones * 2;
108
109         /*
110          * Make sure that on average at least two pageblocks are almost free
111          * of another type, one for a migratetype to fall back to and a
112          * second to avoid subsequent fallbacks of other types There are 3
113          * MIGRATE_TYPES we care about.
114          */
115         recommended_min += pageblock_nr_pages * nr_zones *
116                            MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
117
118         /* don't ever allow to reserve more than 5% of the lowmem */
119         recommended_min = min(recommended_min,
120                               (unsigned long) nr_free_buffer_pages() / 20);
121         recommended_min <<= (PAGE_SHIFT-10);
122
123         if (recommended_min > min_free_kbytes)
124                 min_free_kbytes = recommended_min;
125         setup_per_zone_wmarks();
126         return 0;
127 }
128 late_initcall(set_recommended_min_free_kbytes);
129
130 static int start_khugepaged(void)
131 {
132         int err = 0;
133         if (khugepaged_enabled()) {
134                 int wakeup;
135                 if (unlikely(!mm_slot_cache || !mm_slots_hash)) {
136                         err = -ENOMEM;
137                         goto out;
138                 }
139                 mutex_lock(&khugepaged_mutex);
140                 if (!khugepaged_thread)
141                         khugepaged_thread = kthread_run(khugepaged, NULL,
142                                                         "khugepaged");
143                 if (unlikely(IS_ERR(khugepaged_thread))) {
144                         printk(KERN_ERR
145                                "khugepaged: kthread_run(khugepaged) failed\n");
146                         err = PTR_ERR(khugepaged_thread);
147                         khugepaged_thread = NULL;
148                 }
149                 wakeup = !list_empty(&khugepaged_scan.mm_head);
150                 mutex_unlock(&khugepaged_mutex);
151                 if (wakeup)
152                         wake_up_interruptible(&khugepaged_wait);
153
154                 set_recommended_min_free_kbytes();
155         } else
156                 /* wakeup to exit */
157                 wake_up_interruptible(&khugepaged_wait);
158 out:
159         return err;
160 }
161
162 #ifdef CONFIG_SYSFS
163
164 static ssize_t double_flag_show(struct kobject *kobj,
165                                 struct kobj_attribute *attr, char *buf,
166                                 enum transparent_hugepage_flag enabled,
167                                 enum transparent_hugepage_flag req_madv)
168 {
169         if (test_bit(enabled, &transparent_hugepage_flags)) {
170                 VM_BUG_ON(test_bit(req_madv, &transparent_hugepage_flags));
171                 return sprintf(buf, "[always] madvise never\n");
172         } else if (test_bit(req_madv, &transparent_hugepage_flags))
173                 return sprintf(buf, "always [madvise] never\n");
174         else
175                 return sprintf(buf, "always madvise [never]\n");
176 }
177 static ssize_t double_flag_store(struct kobject *kobj,
178                                  struct kobj_attribute *attr,
179                                  const char *buf, size_t count,
180                                  enum transparent_hugepage_flag enabled,
181                                  enum transparent_hugepage_flag req_madv)
182 {
183         if (!memcmp("always", buf,
184                     min(sizeof("always")-1, count))) {
185                 set_bit(enabled, &transparent_hugepage_flags);
186                 clear_bit(req_madv, &transparent_hugepage_flags);
187         } else if (!memcmp("madvise", buf,
188                            min(sizeof("madvise")-1, count))) {
189                 clear_bit(enabled, &transparent_hugepage_flags);
190                 set_bit(req_madv, &transparent_hugepage_flags);
191         } else if (!memcmp("never", buf,
192                            min(sizeof("never")-1, count))) {
193                 clear_bit(enabled, &transparent_hugepage_flags);
194                 clear_bit(req_madv, &transparent_hugepage_flags);
195         } else
196                 return -EINVAL;
197
198         return count;
199 }
200
201 static ssize_t enabled_show(struct kobject *kobj,
202                             struct kobj_attribute *attr, char *buf)
203 {
204         return double_flag_show(kobj, attr, buf,
205                                 TRANSPARENT_HUGEPAGE_FLAG,
206                                 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG);
207 }
208 static ssize_t enabled_store(struct kobject *kobj,
209                              struct kobj_attribute *attr,
210                              const char *buf, size_t count)
211 {
212         ssize_t ret;
213
214         ret = double_flag_store(kobj, attr, buf, count,
215                                 TRANSPARENT_HUGEPAGE_FLAG,
216                                 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG);
217
218         if (ret > 0) {
219                 int err = start_khugepaged();
220                 if (err)
221                         ret = err;
222         }
223
224         if (ret > 0 &&
225             (test_bit(TRANSPARENT_HUGEPAGE_FLAG,
226                       &transparent_hugepage_flags) ||
227              test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
228                       &transparent_hugepage_flags)))
229                 set_recommended_min_free_kbytes();
230
231         return ret;
232 }
233 static struct kobj_attribute enabled_attr =
234         __ATTR(enabled, 0644, enabled_show, enabled_store);
235
236 static ssize_t single_flag_show(struct kobject *kobj,
237                                 struct kobj_attribute *attr, char *buf,
238                                 enum transparent_hugepage_flag flag)
239 {
240         if (test_bit(flag, &transparent_hugepage_flags))
241                 return sprintf(buf, "[yes] no\n");
242         else
243                 return sprintf(buf, "yes [no]\n");
244 }
245 static ssize_t single_flag_store(struct kobject *kobj,
246                                  struct kobj_attribute *attr,
247                                  const char *buf, size_t count,
248                                  enum transparent_hugepage_flag flag)
249 {
250         if (!memcmp("yes", buf,
251                     min(sizeof("yes")-1, count))) {
252                 set_bit(flag, &transparent_hugepage_flags);
253         } else if (!memcmp("no", buf,
254                            min(sizeof("no")-1, count))) {
255                 clear_bit(flag, &transparent_hugepage_flags);
256         } else
257                 return -EINVAL;
258
259         return count;
260 }
261
262 /*
263  * Currently defrag only disables __GFP_NOWAIT for allocation. A blind
264  * __GFP_REPEAT is too aggressive, it's never worth swapping tons of
265  * memory just to allocate one more hugepage.
266  */
267 static ssize_t defrag_show(struct kobject *kobj,
268                            struct kobj_attribute *attr, char *buf)
269 {
270         return double_flag_show(kobj, attr, buf,
271                                 TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
272                                 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG);
273 }
274 static ssize_t defrag_store(struct kobject *kobj,
275                             struct kobj_attribute *attr,
276                             const char *buf, size_t count)
277 {
278         return double_flag_store(kobj, attr, buf, count,
279                                  TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
280                                  TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG);
281 }
282 static struct kobj_attribute defrag_attr =
283         __ATTR(defrag, 0644, defrag_show, defrag_store);
284
285 #ifdef CONFIG_DEBUG_VM
286 static ssize_t debug_cow_show(struct kobject *kobj,
287                                 struct kobj_attribute *attr, char *buf)
288 {
289         return single_flag_show(kobj, attr, buf,
290                                 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG);
291 }
292 static ssize_t debug_cow_store(struct kobject *kobj,
293                                struct kobj_attribute *attr,
294                                const char *buf, size_t count)
295 {
296         return single_flag_store(kobj, attr, buf, count,
297                                  TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG);
298 }
299 static struct kobj_attribute debug_cow_attr =
300         __ATTR(debug_cow, 0644, debug_cow_show, debug_cow_store);
301 #endif /* CONFIG_DEBUG_VM */
302
303 static struct attribute *hugepage_attr[] = {
304         &enabled_attr.attr,
305         &defrag_attr.attr,
306 #ifdef CONFIG_DEBUG_VM
307         &debug_cow_attr.attr,
308 #endif
309         NULL,
310 };
311
312 static struct attribute_group hugepage_attr_group = {
313         .attrs = hugepage_attr,
314 };
315
316 static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
317                                          struct kobj_attribute *attr,
318                                          char *buf)
319 {
320         return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs);
321 }
322
323 static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
324                                           struct kobj_attribute *attr,
325                                           const char *buf, size_t count)
326 {
327         unsigned long msecs;
328         int err;
329
330         err = strict_strtoul(buf, 10, &msecs);
331         if (err || msecs > UINT_MAX)
332                 return -EINVAL;
333
334         khugepaged_scan_sleep_millisecs = msecs;
335         wake_up_interruptible(&khugepaged_wait);
336
337         return count;
338 }
339 static struct kobj_attribute scan_sleep_millisecs_attr =
340         __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show,
341                scan_sleep_millisecs_store);
342
343 static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
344                                           struct kobj_attribute *attr,
345                                           char *buf)
346 {
347         return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
348 }
349
350 static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
351                                            struct kobj_attribute *attr,
352                                            const char *buf, size_t count)
353 {
354         unsigned long msecs;
355         int err;
356
357         err = strict_strtoul(buf, 10, &msecs);
358         if (err || msecs > UINT_MAX)
359                 return -EINVAL;
360
361         khugepaged_alloc_sleep_millisecs = msecs;
362         wake_up_interruptible(&khugepaged_wait);
363
364         return count;
365 }
366 static struct kobj_attribute alloc_sleep_millisecs_attr =
367         __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show,
368                alloc_sleep_millisecs_store);
369
370 static ssize_t pages_to_scan_show(struct kobject *kobj,
371                                   struct kobj_attribute *attr,
372                                   char *buf)
373 {
374         return sprintf(buf, "%u\n", khugepaged_pages_to_scan);
375 }
376 static ssize_t pages_to_scan_store(struct kobject *kobj,
377                                    struct kobj_attribute *attr,
378                                    const char *buf, size_t count)
379 {
380         int err;
381         unsigned long pages;
382
383         err = strict_strtoul(buf, 10, &pages);
384         if (err || !pages || pages > UINT_MAX)
385                 return -EINVAL;
386
387         khugepaged_pages_to_scan = pages;
388
389         return count;
390 }
391 static struct kobj_attribute pages_to_scan_attr =
392         __ATTR(pages_to_scan, 0644, pages_to_scan_show,
393                pages_to_scan_store);
394
395 static ssize_t pages_collapsed_show(struct kobject *kobj,
396                                     struct kobj_attribute *attr,
397                                     char *buf)
398 {
399         return sprintf(buf, "%u\n", khugepaged_pages_collapsed);
400 }
401 static struct kobj_attribute pages_collapsed_attr =
402         __ATTR_RO(pages_collapsed);
403
404 static ssize_t full_scans_show(struct kobject *kobj,
405                                struct kobj_attribute *attr,
406                                char *buf)
407 {
408         return sprintf(buf, "%u\n", khugepaged_full_scans);
409 }
410 static struct kobj_attribute full_scans_attr =
411         __ATTR_RO(full_scans);
412
413 static ssize_t khugepaged_defrag_show(struct kobject *kobj,
414                                       struct kobj_attribute *attr, char *buf)
415 {
416         return single_flag_show(kobj, attr, buf,
417                                 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
418 }
419 static ssize_t khugepaged_defrag_store(struct kobject *kobj,
420                                        struct kobj_attribute *attr,
421                                        const char *buf, size_t count)
422 {
423         return single_flag_store(kobj, attr, buf, count,
424                                  TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
425 }
426 static struct kobj_attribute khugepaged_defrag_attr =
427         __ATTR(defrag, 0644, khugepaged_defrag_show,
428                khugepaged_defrag_store);
429
430 /*
431  * max_ptes_none controls if khugepaged should collapse hugepages over
432  * any unmapped ptes in turn potentially increasing the memory
433  * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
434  * reduce the available free memory in the system as it
435  * runs. Increasing max_ptes_none will instead potentially reduce the
436  * free memory in the system during the khugepaged scan.
437  */
438 static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj,
439                                              struct kobj_attribute *attr,
440                                              char *buf)
441 {
442         return sprintf(buf, "%u\n", khugepaged_max_ptes_none);
443 }
444 static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj,
445                                               struct kobj_attribute *attr,
446                                               const char *buf, size_t count)
447 {
448         int err;
449         unsigned long max_ptes_none;
450
451         err = strict_strtoul(buf, 10, &max_ptes_none);
452         if (err || max_ptes_none > HPAGE_PMD_NR-1)
453                 return -EINVAL;
454
455         khugepaged_max_ptes_none = max_ptes_none;
456
457         return count;
458 }
459 static struct kobj_attribute khugepaged_max_ptes_none_attr =
460         __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show,
461                khugepaged_max_ptes_none_store);
462
463 static struct attribute *khugepaged_attr[] = {
464         &khugepaged_defrag_attr.attr,
465         &khugepaged_max_ptes_none_attr.attr,
466         &pages_to_scan_attr.attr,
467         &pages_collapsed_attr.attr,
468         &full_scans_attr.attr,
469         &scan_sleep_millisecs_attr.attr,
470         &alloc_sleep_millisecs_attr.attr,
471         NULL,
472 };
473
474 static struct attribute_group khugepaged_attr_group = {
475         .attrs = khugepaged_attr,
476         .name = "khugepaged",
477 };
478 #endif /* CONFIG_SYSFS */
479
480 static int __init hugepage_init(void)
481 {
482         int err;
483 #ifdef CONFIG_SYSFS
484         static struct kobject *hugepage_kobj;
485
486         err = -ENOMEM;
487         hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj);
488         if (unlikely(!hugepage_kobj)) {
489                 printk(KERN_ERR "hugepage: failed kobject create\n");
490                 goto out;
491         }
492
493         err = sysfs_create_group(hugepage_kobj, &hugepage_attr_group);
494         if (err) {
495                 printk(KERN_ERR "hugepage: failed register hugeage group\n");
496                 goto out;
497         }
498
499         err = sysfs_create_group(hugepage_kobj, &khugepaged_attr_group);
500         if (err) {
501                 printk(KERN_ERR "hugepage: failed register hugeage group\n");
502                 goto out;
503         }
504 #endif
505
506         err = khugepaged_slab_init();
507         if (err)
508                 goto out;
509
510         err = mm_slots_hash_init();
511         if (err) {
512                 khugepaged_slab_free();
513                 goto out;
514         }
515
516         start_khugepaged();
517
518         set_recommended_min_free_kbytes();
519
520 out:
521         return err;
522 }
523 module_init(hugepage_init)
524
525 static int __init setup_transparent_hugepage(char *str)
526 {
527         int ret = 0;
528         if (!str)
529                 goto out;
530         if (!strcmp(str, "always")) {
531                 set_bit(TRANSPARENT_HUGEPAGE_FLAG,
532                         &transparent_hugepage_flags);
533                 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
534                           &transparent_hugepage_flags);
535                 ret = 1;
536         } else if (!strcmp(str, "madvise")) {
537                 clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
538                           &transparent_hugepage_flags);
539                 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
540                         &transparent_hugepage_flags);
541                 ret = 1;
542         } else if (!strcmp(str, "never")) {
543                 clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
544                           &transparent_hugepage_flags);
545                 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
546                           &transparent_hugepage_flags);
547                 ret = 1;
548         }
549 out:
550         if (!ret)
551                 printk(KERN_WARNING
552                        "transparent_hugepage= cannot parse, ignored\n");
553         return ret;
554 }
555 __setup("transparent_hugepage=", setup_transparent_hugepage);
556
557 static void prepare_pmd_huge_pte(pgtable_t pgtable,
558                                  struct mm_struct *mm)
559 {
560         assert_spin_locked(&mm->page_table_lock);
561
562         /* FIFO */
563         if (!mm->pmd_huge_pte)
564                 INIT_LIST_HEAD(&pgtable->lru);
565         else
566                 list_add(&pgtable->lru, &mm->pmd_huge_pte->lru);
567         mm->pmd_huge_pte = pgtable;
568 }
569
570 static inline pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
571 {
572         if (likely(vma->vm_flags & VM_WRITE))
573                 pmd = pmd_mkwrite(pmd);
574         return pmd;
575 }
576
577 static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
578                                         struct vm_area_struct *vma,
579                                         unsigned long haddr, pmd_t *pmd,
580                                         struct page *page)
581 {
582         int ret = 0;
583         pgtable_t pgtable;
584
585         VM_BUG_ON(!PageCompound(page));
586         pgtable = pte_alloc_one(mm, haddr);
587         if (unlikely(!pgtable)) {
588                 mem_cgroup_uncharge_page(page);
589                 put_page(page);
590                 return VM_FAULT_OOM;
591         }
592
593         clear_huge_page(page, haddr, HPAGE_PMD_NR);
594         __SetPageUptodate(page);
595
596         spin_lock(&mm->page_table_lock);
597         if (unlikely(!pmd_none(*pmd))) {
598                 spin_unlock(&mm->page_table_lock);
599                 mem_cgroup_uncharge_page(page);
600                 put_page(page);
601                 pte_free(mm, pgtable);
602         } else {
603                 pmd_t entry;
604                 entry = mk_pmd(page, vma->vm_page_prot);
605                 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
606                 entry = pmd_mkhuge(entry);
607                 /*
608                  * The spinlocking to take the lru_lock inside
609                  * page_add_new_anon_rmap() acts as a full memory
610                  * barrier to be sure clear_huge_page writes become
611                  * visible after the set_pmd_at() write.
612                  */
613                 page_add_new_anon_rmap(page, vma, haddr);
614                 set_pmd_at(mm, haddr, pmd, entry);
615                 prepare_pmd_huge_pte(pgtable, mm);
616                 add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
617                 spin_unlock(&mm->page_table_lock);
618         }
619
620         return ret;
621 }
622
623 static inline gfp_t alloc_hugepage_gfpmask(int defrag)
624 {
625         return GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_WAIT);
626 }
627
628 static inline struct page *alloc_hugepage_vma(int defrag,
629                                               struct vm_area_struct *vma,
630                                               unsigned long haddr)
631 {
632         return alloc_pages_vma(alloc_hugepage_gfpmask(defrag),
633                                HPAGE_PMD_ORDER, vma, haddr);
634 }
635
636 #ifndef CONFIG_NUMA
637 static inline struct page *alloc_hugepage(int defrag)
638 {
639         return alloc_pages(alloc_hugepage_gfpmask(defrag),
640                            HPAGE_PMD_ORDER);
641 }
642 #endif
643
644 int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
645                                unsigned long address, pmd_t *pmd,
646                                unsigned int flags)
647 {
648         struct page *page;
649         unsigned long haddr = address & HPAGE_PMD_MASK;
650         pte_t *pte;
651
652         if (haddr >= vma->vm_start && haddr + HPAGE_PMD_SIZE <= vma->vm_end) {
653                 if (unlikely(anon_vma_prepare(vma)))
654                         return VM_FAULT_OOM;
655                 if (unlikely(khugepaged_enter(vma)))
656                         return VM_FAULT_OOM;
657                 page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
658                                           vma, haddr);
659                 if (unlikely(!page))
660                         goto out;
661                 if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) {
662                         put_page(page);
663                         goto out;
664                 }
665
666                 return __do_huge_pmd_anonymous_page(mm, vma, haddr, pmd, page);
667         }
668 out:
669         /*
670          * Use __pte_alloc instead of pte_alloc_map, because we can't
671          * run pte_offset_map on the pmd, if an huge pmd could
672          * materialize from under us from a different thread.
673          */
674         if (unlikely(__pte_alloc(mm, vma, pmd, address)))
675                 return VM_FAULT_OOM;
676         /* if an huge pmd materialized from under us just retry later */
677         if (unlikely(pmd_trans_huge(*pmd)))
678                 return 0;
679         /*
680          * A regular pmd is established and it can't morph into a huge pmd
681          * from under us anymore at this point because we hold the mmap_sem
682          * read mode and khugepaged takes it in write mode. So now it's
683          * safe to run pte_offset_map().
684          */
685         pte = pte_offset_map(pmd, address);
686         return handle_pte_fault(mm, vma, address, pte, pmd, flags);
687 }
688
689 int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
690                   pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
691                   struct vm_area_struct *vma)
692 {
693         struct page *src_page;
694         pmd_t pmd;
695         pgtable_t pgtable;
696         int ret;
697
698         ret = -ENOMEM;
699         pgtable = pte_alloc_one(dst_mm, addr);
700         if (unlikely(!pgtable))
701                 goto out;
702
703         spin_lock(&dst_mm->page_table_lock);
704         spin_lock_nested(&src_mm->page_table_lock, SINGLE_DEPTH_NESTING);
705
706         ret = -EAGAIN;
707         pmd = *src_pmd;
708         if (unlikely(!pmd_trans_huge(pmd))) {
709                 pte_free(dst_mm, pgtable);
710                 goto out_unlock;
711         }
712         if (unlikely(pmd_trans_splitting(pmd))) {
713                 /* split huge page running from under us */
714                 spin_unlock(&src_mm->page_table_lock);
715                 spin_unlock(&dst_mm->page_table_lock);
716                 pte_free(dst_mm, pgtable);
717
718                 wait_split_huge_page(vma->anon_vma, src_pmd); /* src_vma */
719                 goto out;
720         }
721         src_page = pmd_page(pmd);
722         VM_BUG_ON(!PageHead(src_page));
723         get_page(src_page);
724         page_dup_rmap(src_page);
725         add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
726
727         pmdp_set_wrprotect(src_mm, addr, src_pmd);
728         pmd = pmd_mkold(pmd_wrprotect(pmd));
729         set_pmd_at(dst_mm, addr, dst_pmd, pmd);
730         prepare_pmd_huge_pte(pgtable, dst_mm);
731
732         ret = 0;
733 out_unlock:
734         spin_unlock(&src_mm->page_table_lock);
735         spin_unlock(&dst_mm->page_table_lock);
736 out:
737         return ret;
738 }
739
740 /* no "address" argument so destroys page coloring of some arch */
741 pgtable_t get_pmd_huge_pte(struct mm_struct *mm)
742 {
743         pgtable_t pgtable;
744
745         assert_spin_locked(&mm->page_table_lock);
746
747         /* FIFO */
748         pgtable = mm->pmd_huge_pte;
749         if (list_empty(&pgtable->lru))
750                 mm->pmd_huge_pte = NULL;
751         else {
752                 mm->pmd_huge_pte = list_entry(pgtable->lru.next,
753                                               struct page, lru);
754                 list_del(&pgtable->lru);
755         }
756         return pgtable;
757 }
758
759 static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
760                                         struct vm_area_struct *vma,
761                                         unsigned long address,
762                                         pmd_t *pmd, pmd_t orig_pmd,
763                                         struct page *page,
764                                         unsigned long haddr)
765 {
766         pgtable_t pgtable;
767         pmd_t _pmd;
768         int ret = 0, i;
769         struct page **pages;
770
771         pages = kmalloc(sizeof(struct page *) * HPAGE_PMD_NR,
772                         GFP_KERNEL);
773         if (unlikely(!pages)) {
774                 ret |= VM_FAULT_OOM;
775                 goto out;
776         }
777
778         for (i = 0; i < HPAGE_PMD_NR; i++) {
779                 pages[i] = alloc_page_vma(GFP_HIGHUSER_MOVABLE,
780                                           vma, address);
781                 if (unlikely(!pages[i] ||
782                              mem_cgroup_newpage_charge(pages[i], mm,
783                                                        GFP_KERNEL))) {
784                         if (pages[i])
785                                 put_page(pages[i]);
786                         mem_cgroup_uncharge_start();
787                         while (--i >= 0) {
788                                 mem_cgroup_uncharge_page(pages[i]);
789                                 put_page(pages[i]);
790                         }
791                         mem_cgroup_uncharge_end();
792                         kfree(pages);
793                         ret |= VM_FAULT_OOM;
794                         goto out;
795                 }
796         }
797
798         for (i = 0; i < HPAGE_PMD_NR; i++) {
799                 copy_user_highpage(pages[i], page + i,
800                                    haddr + PAGE_SHIFT*i, vma);
801                 __SetPageUptodate(pages[i]);
802                 cond_resched();
803         }
804
805         spin_lock(&mm->page_table_lock);
806         if (unlikely(!pmd_same(*pmd, orig_pmd)))
807                 goto out_free_pages;
808         VM_BUG_ON(!PageHead(page));
809
810         pmdp_clear_flush_notify(vma, haddr, pmd);
811         /* leave pmd empty until pte is filled */
812
813         pgtable = get_pmd_huge_pte(mm);
814         pmd_populate(mm, &_pmd, pgtable);
815
816         for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
817                 pte_t *pte, entry;
818                 entry = mk_pte(pages[i], vma->vm_page_prot);
819                 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
820                 page_add_new_anon_rmap(pages[i], vma, haddr);
821                 pte = pte_offset_map(&_pmd, haddr);
822                 VM_BUG_ON(!pte_none(*pte));
823                 set_pte_at(mm, haddr, pte, entry);
824                 pte_unmap(pte);
825         }
826         kfree(pages);
827
828         mm->nr_ptes++;
829         smp_wmb(); /* make pte visible before pmd */
830         pmd_populate(mm, pmd, pgtable);
831         page_remove_rmap(page);
832         spin_unlock(&mm->page_table_lock);
833
834         ret |= VM_FAULT_WRITE;
835         put_page(page);
836
837 out:
838         return ret;
839
840 out_free_pages:
841         spin_unlock(&mm->page_table_lock);
842         mem_cgroup_uncharge_start();
843         for (i = 0; i < HPAGE_PMD_NR; i++) {
844                 mem_cgroup_uncharge_page(pages[i]);
845                 put_page(pages[i]);
846         }
847         mem_cgroup_uncharge_end();
848         kfree(pages);
849         goto out;
850 }
851
852 int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
853                         unsigned long address, pmd_t *pmd, pmd_t orig_pmd)
854 {
855         int ret = 0;
856         struct page *page, *new_page;
857         unsigned long haddr;
858
859         VM_BUG_ON(!vma->anon_vma);
860         spin_lock(&mm->page_table_lock);
861         if (unlikely(!pmd_same(*pmd, orig_pmd)))
862                 goto out_unlock;
863
864         page = pmd_page(orig_pmd);
865         VM_BUG_ON(!PageCompound(page) || !PageHead(page));
866         haddr = address & HPAGE_PMD_MASK;
867         if (page_mapcount(page) == 1) {
868                 pmd_t entry;
869                 entry = pmd_mkyoung(orig_pmd);
870                 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
871                 if (pmdp_set_access_flags(vma, haddr, pmd, entry,  1))
872                         update_mmu_cache(vma, address, entry);
873                 ret |= VM_FAULT_WRITE;
874                 goto out_unlock;
875         }
876         get_page(page);
877         spin_unlock(&mm->page_table_lock);
878
879         if (transparent_hugepage_enabled(vma) &&
880             !transparent_hugepage_debug_cow())
881                 new_page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
882                                               vma, haddr);
883         else
884                 new_page = NULL;
885
886         if (unlikely(!new_page)) {
887                 ret = do_huge_pmd_wp_page_fallback(mm, vma, address,
888                                                    pmd, orig_pmd, page, haddr);
889                 put_page(page);
890                 goto out;
891         }
892
893         if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
894                 put_page(new_page);
895                 put_page(page);
896                 ret |= VM_FAULT_OOM;
897                 goto out;
898         }
899
900         copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR);
901         __SetPageUptodate(new_page);
902
903         spin_lock(&mm->page_table_lock);
904         put_page(page);
905         if (unlikely(!pmd_same(*pmd, orig_pmd))) {
906                 mem_cgroup_uncharge_page(new_page);
907                 put_page(new_page);
908         } else {
909                 pmd_t entry;
910                 VM_BUG_ON(!PageHead(page));
911                 entry = mk_pmd(new_page, vma->vm_page_prot);
912                 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
913                 entry = pmd_mkhuge(entry);
914                 pmdp_clear_flush_notify(vma, haddr, pmd);
915                 page_add_new_anon_rmap(new_page, vma, haddr);
916                 set_pmd_at(mm, haddr, pmd, entry);
917                 update_mmu_cache(vma, address, entry);
918                 page_remove_rmap(page);
919                 put_page(page);
920                 ret |= VM_FAULT_WRITE;
921         }
922 out_unlock:
923         spin_unlock(&mm->page_table_lock);
924 out:
925         return ret;
926 }
927
928 struct page *follow_trans_huge_pmd(struct mm_struct *mm,
929                                    unsigned long addr,
930                                    pmd_t *pmd,
931                                    unsigned int flags)
932 {
933         struct page *page = NULL;
934
935         assert_spin_locked(&mm->page_table_lock);
936
937         if (flags & FOLL_WRITE && !pmd_write(*pmd))
938                 goto out;
939
940         page = pmd_page(*pmd);
941         VM_BUG_ON(!PageHead(page));
942         if (flags & FOLL_TOUCH) {
943                 pmd_t _pmd;
944                 /*
945                  * We should set the dirty bit only for FOLL_WRITE but
946                  * for now the dirty bit in the pmd is meaningless.
947                  * And if the dirty bit will become meaningful and
948                  * we'll only set it with FOLL_WRITE, an atomic
949                  * set_bit will be required on the pmd to set the
950                  * young bit, instead of the current set_pmd_at.
951                  */
952                 _pmd = pmd_mkyoung(pmd_mkdirty(*pmd));
953                 set_pmd_at(mm, addr & HPAGE_PMD_MASK, pmd, _pmd);
954         }
955         page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
956         VM_BUG_ON(!PageCompound(page));
957         if (flags & FOLL_GET)
958                 get_page(page);
959
960 out:
961         return page;
962 }
963
964 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
965                  pmd_t *pmd)
966 {
967         int ret = 0;
968
969         spin_lock(&tlb->mm->page_table_lock);
970         if (likely(pmd_trans_huge(*pmd))) {
971                 if (unlikely(pmd_trans_splitting(*pmd))) {
972                         spin_unlock(&tlb->mm->page_table_lock);
973                         wait_split_huge_page(vma->anon_vma,
974                                              pmd);
975                 } else {
976                         struct page *page;
977                         pgtable_t pgtable;
978                         pgtable = get_pmd_huge_pte(tlb->mm);
979                         page = pmd_page(*pmd);
980                         pmd_clear(pmd);
981                         page_remove_rmap(page);
982                         VM_BUG_ON(page_mapcount(page) < 0);
983                         add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
984                         VM_BUG_ON(!PageHead(page));
985                         spin_unlock(&tlb->mm->page_table_lock);
986                         tlb_remove_page(tlb, page);
987                         pte_free(tlb->mm, pgtable);
988                         ret = 1;
989                 }
990         } else
991                 spin_unlock(&tlb->mm->page_table_lock);
992
993         return ret;
994 }
995
996 int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
997                 unsigned long addr, unsigned long end,
998                 unsigned char *vec)
999 {
1000         int ret = 0;
1001
1002         spin_lock(&vma->vm_mm->page_table_lock);
1003         if (likely(pmd_trans_huge(*pmd))) {
1004                 ret = !pmd_trans_splitting(*pmd);
1005                 spin_unlock(&vma->vm_mm->page_table_lock);
1006                 if (unlikely(!ret))
1007                         wait_split_huge_page(vma->anon_vma, pmd);
1008                 else {
1009                         /*
1010                          * All logical pages in the range are present
1011                          * if backed by a huge page.
1012                          */
1013                         memset(vec, 1, (end - addr) >> PAGE_SHIFT);
1014                 }
1015         } else
1016                 spin_unlock(&vma->vm_mm->page_table_lock);
1017
1018         return ret;
1019 }
1020
1021 int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
1022                 unsigned long addr, pgprot_t newprot)
1023 {
1024         struct mm_struct *mm = vma->vm_mm;
1025         int ret = 0;
1026
1027         spin_lock(&mm->page_table_lock);
1028         if (likely(pmd_trans_huge(*pmd))) {
1029                 if (unlikely(pmd_trans_splitting(*pmd))) {
1030                         spin_unlock(&mm->page_table_lock);
1031                         wait_split_huge_page(vma->anon_vma, pmd);
1032                 } else {
1033                         pmd_t entry;
1034
1035                         entry = pmdp_get_and_clear(mm, addr, pmd);
1036                         entry = pmd_modify(entry, newprot);
1037                         set_pmd_at(mm, addr, pmd, entry);
1038                         spin_unlock(&vma->vm_mm->page_table_lock);
1039                         flush_tlb_range(vma, addr, addr + HPAGE_PMD_SIZE);
1040                         ret = 1;
1041                 }
1042         } else
1043                 spin_unlock(&vma->vm_mm->page_table_lock);
1044
1045         return ret;
1046 }
1047
1048 pmd_t *page_check_address_pmd(struct page *page,
1049                               struct mm_struct *mm,
1050                               unsigned long address,
1051                               enum page_check_address_pmd_flag flag)
1052 {
1053         pgd_t *pgd;
1054         pud_t *pud;
1055         pmd_t *pmd, *ret = NULL;
1056
1057         if (address & ~HPAGE_PMD_MASK)
1058                 goto out;
1059
1060         pgd = pgd_offset(mm, address);
1061         if (!pgd_present(*pgd))
1062                 goto out;
1063
1064         pud = pud_offset(pgd, address);
1065         if (!pud_present(*pud))
1066                 goto out;
1067
1068         pmd = pmd_offset(pud, address);
1069         if (pmd_none(*pmd))
1070                 goto out;
1071         if (pmd_page(*pmd) != page)
1072                 goto out;
1073         VM_BUG_ON(flag == PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG &&
1074                   pmd_trans_splitting(*pmd));
1075         if (pmd_trans_huge(*pmd)) {
1076                 VM_BUG_ON(flag == PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG &&
1077                           !pmd_trans_splitting(*pmd));
1078                 ret = pmd;
1079         }
1080 out:
1081         return ret;
1082 }
1083
1084 static int __split_huge_page_splitting(struct page *page,
1085                                        struct vm_area_struct *vma,
1086                                        unsigned long address)
1087 {
1088         struct mm_struct *mm = vma->vm_mm;
1089         pmd_t *pmd;
1090         int ret = 0;
1091
1092         spin_lock(&mm->page_table_lock);
1093         pmd = page_check_address_pmd(page, mm, address,
1094                                      PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG);
1095         if (pmd) {
1096                 /*
1097                  * We can't temporarily set the pmd to null in order
1098                  * to split it, the pmd must remain marked huge at all
1099                  * times or the VM won't take the pmd_trans_huge paths
1100                  * and it won't wait on the anon_vma->root->lock to
1101                  * serialize against split_huge_page*.
1102                  */
1103                 pmdp_splitting_flush_notify(vma, address, pmd);
1104                 ret = 1;
1105         }
1106         spin_unlock(&mm->page_table_lock);
1107
1108         return ret;
1109 }
1110
1111 static void __split_huge_page_refcount(struct page *page)
1112 {
1113         int i;
1114         unsigned long head_index = page->index;
1115         struct zone *zone = page_zone(page);
1116
1117         /* prevent PageLRU to go away from under us, and freeze lru stats */
1118         spin_lock_irq(&zone->lru_lock);
1119         compound_lock(page);
1120
1121         for (i = 1; i < HPAGE_PMD_NR; i++) {
1122                 struct page *page_tail = page + i;
1123
1124                 /* tail_page->_count cannot change */
1125                 atomic_sub(atomic_read(&page_tail->_count), &page->_count);
1126                 BUG_ON(page_count(page) <= 0);
1127                 atomic_add(page_mapcount(page) + 1, &page_tail->_count);
1128                 BUG_ON(atomic_read(&page_tail->_count) <= 0);
1129
1130                 /* after clearing PageTail the gup refcount can be released */
1131                 smp_mb();
1132
1133                 page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1134                 page_tail->flags |= (page->flags &
1135                                      ((1L << PG_referenced) |
1136                                       (1L << PG_swapbacked) |
1137                                       (1L << PG_mlocked) |
1138                                       (1L << PG_uptodate)));
1139                 page_tail->flags |= (1L << PG_dirty);
1140
1141                 /*
1142                  * 1) clear PageTail before overwriting first_page
1143                  * 2) clear PageTail before clearing PageHead for VM_BUG_ON
1144                  */
1145                 smp_wmb();
1146
1147                 /*
1148                  * __split_huge_page_splitting() already set the
1149                  * splitting bit in all pmd that could map this
1150                  * hugepage, that will ensure no CPU can alter the
1151                  * mapcount on the head page. The mapcount is only
1152                  * accounted in the head page and it has to be
1153                  * transferred to all tail pages in the below code. So
1154                  * for this code to be safe, the split the mapcount
1155                  * can't change. But that doesn't mean userland can't
1156                  * keep changing and reading the page contents while
1157                  * we transfer the mapcount, so the pmd splitting
1158                  * status is achieved setting a reserved bit in the
1159                  * pmd, not by clearing the present bit.
1160                 */
1161                 BUG_ON(page_mapcount(page_tail));
1162                 page_tail->_mapcount = page->_mapcount;
1163
1164                 BUG_ON(page_tail->mapping);
1165                 page_tail->mapping = page->mapping;
1166
1167                 page_tail->index = ++head_index;
1168
1169                 BUG_ON(!PageAnon(page_tail));
1170                 BUG_ON(!PageUptodate(page_tail));
1171                 BUG_ON(!PageDirty(page_tail));
1172                 BUG_ON(!PageSwapBacked(page_tail));
1173
1174                 lru_add_page_tail(zone, page, page_tail);
1175         }
1176
1177         __dec_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
1178         __mod_zone_page_state(zone, NR_ANON_PAGES, HPAGE_PMD_NR);
1179
1180         ClearPageCompound(page);
1181         compound_unlock(page);
1182         spin_unlock_irq(&zone->lru_lock);
1183
1184         for (i = 1; i < HPAGE_PMD_NR; i++) {
1185                 struct page *page_tail = page + i;
1186                 BUG_ON(page_count(page_tail) <= 0);
1187                 /*
1188                  * Tail pages may be freed if there wasn't any mapping
1189                  * like if add_to_swap() is running on a lru page that
1190                  * had its mapping zapped. And freeing these pages
1191                  * requires taking the lru_lock so we do the put_page
1192                  * of the tail pages after the split is complete.
1193                  */
1194                 put_page(page_tail);
1195         }
1196
1197         /*
1198          * Only the head page (now become a regular page) is required
1199          * to be pinned by the caller.
1200          */
1201         BUG_ON(page_count(page) <= 0);
1202 }
1203
1204 static int __split_huge_page_map(struct page *page,
1205                                  struct vm_area_struct *vma,
1206                                  unsigned long address)
1207 {
1208         struct mm_struct *mm = vma->vm_mm;
1209         pmd_t *pmd, _pmd;
1210         int ret = 0, i;
1211         pgtable_t pgtable;
1212         unsigned long haddr;
1213
1214         spin_lock(&mm->page_table_lock);
1215         pmd = page_check_address_pmd(page, mm, address,
1216                                      PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG);
1217         if (pmd) {
1218                 pgtable = get_pmd_huge_pte(mm);
1219                 pmd_populate(mm, &_pmd, pgtable);
1220
1221                 for (i = 0, haddr = address; i < HPAGE_PMD_NR;
1222                      i++, haddr += PAGE_SIZE) {
1223                         pte_t *pte, entry;
1224                         BUG_ON(PageCompound(page+i));
1225                         entry = mk_pte(page + i, vma->vm_page_prot);
1226                         entry = maybe_mkwrite(pte_mkdirty(entry), vma);
1227                         if (!pmd_write(*pmd))
1228                                 entry = pte_wrprotect(entry);
1229                         else
1230                                 BUG_ON(page_mapcount(page) != 1);
1231                         if (!pmd_young(*pmd))
1232                                 entry = pte_mkold(entry);
1233                         pte = pte_offset_map(&_pmd, haddr);
1234                         BUG_ON(!pte_none(*pte));
1235                         set_pte_at(mm, haddr, pte, entry);
1236                         pte_unmap(pte);
1237                 }
1238
1239                 mm->nr_ptes++;
1240                 smp_wmb(); /* make pte visible before pmd */
1241                 /*
1242                  * Up to this point the pmd is present and huge and
1243                  * userland has the whole access to the hugepage
1244                  * during the split (which happens in place). If we
1245                  * overwrite the pmd with the not-huge version
1246                  * pointing to the pte here (which of course we could
1247                  * if all CPUs were bug free), userland could trigger
1248                  * a small page size TLB miss on the small sized TLB
1249                  * while the hugepage TLB entry is still established
1250                  * in the huge TLB. Some CPU doesn't like that. See
1251                  * http://support.amd.com/us/Processor_TechDocs/41322.pdf,
1252                  * Erratum 383 on page 93. Intel should be safe but is
1253                  * also warns that it's only safe if the permission
1254                  * and cache attributes of the two entries loaded in
1255                  * the two TLB is identical (which should be the case
1256                  * here). But it is generally safer to never allow
1257                  * small and huge TLB entries for the same virtual
1258                  * address to be loaded simultaneously. So instead of
1259                  * doing "pmd_populate(); flush_tlb_range();" we first
1260                  * mark the current pmd notpresent (atomically because
1261                  * here the pmd_trans_huge and pmd_trans_splitting
1262                  * must remain set at all times on the pmd until the
1263                  * split is complete for this pmd), then we flush the
1264                  * SMP TLB and finally we write the non-huge version
1265                  * of the pmd entry with pmd_populate.
1266                  */
1267                 set_pmd_at(mm, address, pmd, pmd_mknotpresent(*pmd));
1268                 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
1269                 pmd_populate(mm, pmd, pgtable);
1270                 ret = 1;
1271         }
1272         spin_unlock(&mm->page_table_lock);
1273
1274         return ret;
1275 }
1276
1277 /* must be called with anon_vma->root->lock hold */
1278 static void __split_huge_page(struct page *page,
1279                               struct anon_vma *anon_vma)
1280 {
1281         int mapcount, mapcount2;
1282         struct anon_vma_chain *avc;
1283
1284         BUG_ON(!PageHead(page));
1285         BUG_ON(PageTail(page));
1286
1287         mapcount = 0;
1288         list_for_each_entry(avc, &anon_vma->head, same_anon_vma) {
1289                 struct vm_area_struct *vma = avc->vma;
1290                 unsigned long addr = vma_address(page, vma);
1291                 BUG_ON(is_vma_temporary_stack(vma));
1292                 if (addr == -EFAULT)
1293                         continue;
1294                 mapcount += __split_huge_page_splitting(page, vma, addr);
1295         }
1296         /*
1297          * It is critical that new vmas are added to the tail of the
1298          * anon_vma list. This guarantes that if copy_huge_pmd() runs
1299          * and establishes a child pmd before
1300          * __split_huge_page_splitting() freezes the parent pmd (so if
1301          * we fail to prevent copy_huge_pmd() from running until the
1302          * whole __split_huge_page() is complete), we will still see
1303          * the newly established pmd of the child later during the
1304          * walk, to be able to set it as pmd_trans_splitting too.
1305          */
1306         if (mapcount != page_mapcount(page))
1307                 printk(KERN_ERR "mapcount %d page_mapcount %d\n",
1308                        mapcount, page_mapcount(page));
1309         BUG_ON(mapcount != page_mapcount(page));
1310
1311         __split_huge_page_refcount(page);
1312
1313         mapcount2 = 0;
1314         list_for_each_entry(avc, &anon_vma->head, same_anon_vma) {
1315                 struct vm_area_struct *vma = avc->vma;
1316                 unsigned long addr = vma_address(page, vma);
1317                 BUG_ON(is_vma_temporary_stack(vma));
1318                 if (addr == -EFAULT)
1319                         continue;
1320                 mapcount2 += __split_huge_page_map(page, vma, addr);
1321         }
1322         if (mapcount != mapcount2)
1323                 printk(KERN_ERR "mapcount %d mapcount2 %d page_mapcount %d\n",
1324                        mapcount, mapcount2, page_mapcount(page));
1325         BUG_ON(mapcount != mapcount2);
1326 }
1327
1328 int split_huge_page(struct page *page)
1329 {
1330         struct anon_vma *anon_vma;
1331         int ret = 1;
1332
1333         BUG_ON(!PageAnon(page));
1334         anon_vma = page_lock_anon_vma(page);
1335         if (!anon_vma)
1336                 goto out;
1337         ret = 0;
1338         if (!PageCompound(page))
1339                 goto out_unlock;
1340
1341         BUG_ON(!PageSwapBacked(page));
1342         __split_huge_page(page, anon_vma);
1343
1344         BUG_ON(PageCompound(page));
1345 out_unlock:
1346         page_unlock_anon_vma(anon_vma);
1347 out:
1348         return ret;
1349 }
1350
1351 int hugepage_madvise(unsigned long *vm_flags)
1352 {
1353         /*
1354          * Be somewhat over-protective like KSM for now!
1355          */
1356         if (*vm_flags & (VM_HUGEPAGE | VM_SHARED  | VM_MAYSHARE   |
1357                          VM_PFNMAP   | VM_IO      | VM_DONTEXPAND |
1358                          VM_RESERVED | VM_HUGETLB | VM_INSERTPAGE |
1359                          VM_MIXEDMAP | VM_SAO))
1360                 return -EINVAL;
1361
1362         *vm_flags |= VM_HUGEPAGE;
1363
1364         return 0;
1365 }
1366
1367 static int __init khugepaged_slab_init(void)
1368 {
1369         mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
1370                                           sizeof(struct mm_slot),
1371                                           __alignof__(struct mm_slot), 0, NULL);
1372         if (!mm_slot_cache)
1373                 return -ENOMEM;
1374
1375         return 0;
1376 }
1377
1378 static void __init khugepaged_slab_free(void)
1379 {
1380         kmem_cache_destroy(mm_slot_cache);
1381         mm_slot_cache = NULL;
1382 }
1383
1384 static inline struct mm_slot *alloc_mm_slot(void)
1385 {
1386         if (!mm_slot_cache)     /* initialization failed */
1387                 return NULL;
1388         return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
1389 }
1390
1391 static inline void free_mm_slot(struct mm_slot *mm_slot)
1392 {
1393         kmem_cache_free(mm_slot_cache, mm_slot);
1394 }
1395
1396 static int __init mm_slots_hash_init(void)
1397 {
1398         mm_slots_hash = kzalloc(MM_SLOTS_HASH_HEADS * sizeof(struct hlist_head),
1399                                 GFP_KERNEL);
1400         if (!mm_slots_hash)
1401                 return -ENOMEM;
1402         return 0;
1403 }
1404
1405 #if 0
1406 static void __init mm_slots_hash_free(void)
1407 {
1408         kfree(mm_slots_hash);
1409         mm_slots_hash = NULL;
1410 }
1411 #endif
1412
1413 static struct mm_slot *get_mm_slot(struct mm_struct *mm)
1414 {
1415         struct mm_slot *mm_slot;
1416         struct hlist_head *bucket;
1417         struct hlist_node *node;
1418
1419         bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct))
1420                                 % MM_SLOTS_HASH_HEADS];
1421         hlist_for_each_entry(mm_slot, node, bucket, hash) {
1422                 if (mm == mm_slot->mm)
1423                         return mm_slot;
1424         }
1425         return NULL;
1426 }
1427
1428 static void insert_to_mm_slots_hash(struct mm_struct *mm,
1429                                     struct mm_slot *mm_slot)
1430 {
1431         struct hlist_head *bucket;
1432
1433         bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct))
1434                                 % MM_SLOTS_HASH_HEADS];
1435         mm_slot->mm = mm;
1436         hlist_add_head(&mm_slot->hash, bucket);
1437 }
1438
1439 static inline int khugepaged_test_exit(struct mm_struct *mm)
1440 {
1441         return atomic_read(&mm->mm_users) == 0;
1442 }
1443
1444 int __khugepaged_enter(struct mm_struct *mm)
1445 {
1446         struct mm_slot *mm_slot;
1447         int wakeup;
1448
1449         mm_slot = alloc_mm_slot();
1450         if (!mm_slot)
1451                 return -ENOMEM;
1452
1453         /* __khugepaged_exit() must not run from under us */
1454         VM_BUG_ON(khugepaged_test_exit(mm));
1455         if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
1456                 free_mm_slot(mm_slot);
1457                 return 0;
1458         }
1459
1460         spin_lock(&khugepaged_mm_lock);
1461         insert_to_mm_slots_hash(mm, mm_slot);
1462         /*
1463          * Insert just behind the scanning cursor, to let the area settle
1464          * down a little.
1465          */
1466         wakeup = list_empty(&khugepaged_scan.mm_head);
1467         list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
1468         spin_unlock(&khugepaged_mm_lock);
1469
1470         atomic_inc(&mm->mm_count);
1471         if (wakeup)
1472                 wake_up_interruptible(&khugepaged_wait);
1473
1474         return 0;
1475 }
1476
1477 int khugepaged_enter_vma_merge(struct vm_area_struct *vma)
1478 {
1479         unsigned long hstart, hend;
1480         if (!vma->anon_vma)
1481                 /*
1482                  * Not yet faulted in so we will register later in the
1483                  * page fault if needed.
1484                  */
1485                 return 0;
1486         if (vma->vm_file || vma->vm_ops)
1487                 /* khugepaged not yet working on file or special mappings */
1488                 return 0;
1489         VM_BUG_ON(is_linear_pfn_mapping(vma) || is_pfn_mapping(vma));
1490         hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
1491         hend = vma->vm_end & HPAGE_PMD_MASK;
1492         if (hstart < hend)
1493                 return khugepaged_enter(vma);
1494         return 0;
1495 }
1496
1497 void __khugepaged_exit(struct mm_struct *mm)
1498 {
1499         struct mm_slot *mm_slot;
1500         int free = 0;
1501
1502         spin_lock(&khugepaged_mm_lock);
1503         mm_slot = get_mm_slot(mm);
1504         if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
1505                 hlist_del(&mm_slot->hash);
1506                 list_del(&mm_slot->mm_node);
1507                 free = 1;
1508         }
1509
1510         if (free) {
1511                 spin_unlock(&khugepaged_mm_lock);
1512                 clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1513                 free_mm_slot(mm_slot);
1514                 mmdrop(mm);
1515         } else if (mm_slot) {
1516                 spin_unlock(&khugepaged_mm_lock);
1517                 /*
1518                  * This is required to serialize against
1519                  * khugepaged_test_exit() (which is guaranteed to run
1520                  * under mmap sem read mode). Stop here (after we
1521                  * return all pagetables will be destroyed) until
1522                  * khugepaged has finished working on the pagetables
1523                  * under the mmap_sem.
1524                  */
1525                 down_write(&mm->mmap_sem);
1526                 up_write(&mm->mmap_sem);
1527         } else
1528                 spin_unlock(&khugepaged_mm_lock);
1529 }
1530
1531 static void release_pte_page(struct page *page)
1532 {
1533         /* 0 stands for page_is_file_cache(page) == false */
1534         dec_zone_page_state(page, NR_ISOLATED_ANON + 0);
1535         unlock_page(page);
1536         putback_lru_page(page);
1537 }
1538
1539 static void release_pte_pages(pte_t *pte, pte_t *_pte)
1540 {
1541         while (--_pte >= pte) {
1542                 pte_t pteval = *_pte;
1543                 if (!pte_none(pteval))
1544                         release_pte_page(pte_page(pteval));
1545         }
1546 }
1547
1548 static void release_all_pte_pages(pte_t *pte)
1549 {
1550         release_pte_pages(pte, pte + HPAGE_PMD_NR);
1551 }
1552
1553 static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
1554                                         unsigned long address,
1555                                         pte_t *pte)
1556 {
1557         struct page *page;
1558         pte_t *_pte;
1559         int referenced = 0, isolated = 0, none = 0;
1560         for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
1561              _pte++, address += PAGE_SIZE) {
1562                 pte_t pteval = *_pte;
1563                 if (pte_none(pteval)) {
1564                         if (++none <= khugepaged_max_ptes_none)
1565                                 continue;
1566                         else {
1567                                 release_pte_pages(pte, _pte);
1568                                 goto out;
1569                         }
1570                 }
1571                 if (!pte_present(pteval) || !pte_write(pteval)) {
1572                         release_pte_pages(pte, _pte);
1573                         goto out;
1574                 }
1575                 page = vm_normal_page(vma, address, pteval);
1576                 if (unlikely(!page)) {
1577                         release_pte_pages(pte, _pte);
1578                         goto out;
1579                 }
1580                 VM_BUG_ON(PageCompound(page));
1581                 BUG_ON(!PageAnon(page));
1582                 VM_BUG_ON(!PageSwapBacked(page));
1583
1584                 /* cannot use mapcount: can't collapse if there's a gup pin */
1585                 if (page_count(page) != 1) {
1586                         release_pte_pages(pte, _pte);
1587                         goto out;
1588                 }
1589                 /*
1590                  * We can do it before isolate_lru_page because the
1591                  * page can't be freed from under us. NOTE: PG_lock
1592                  * is needed to serialize against split_huge_page
1593                  * when invoked from the VM.
1594                  */
1595                 if (!trylock_page(page)) {
1596                         release_pte_pages(pte, _pte);
1597                         goto out;
1598                 }
1599                 /*
1600                  * Isolate the page to avoid collapsing an hugepage
1601                  * currently in use by the VM.
1602                  */
1603                 if (isolate_lru_page(page)) {
1604                         unlock_page(page);
1605                         release_pte_pages(pte, _pte);
1606                         goto out;
1607                 }
1608                 /* 0 stands for page_is_file_cache(page) == false */
1609                 inc_zone_page_state(page, NR_ISOLATED_ANON + 0);
1610                 VM_BUG_ON(!PageLocked(page));
1611                 VM_BUG_ON(PageLRU(page));
1612
1613                 /* If there is no mapped pte young don't collapse the page */
1614                 if (pte_young(pteval))
1615                         referenced = 1;
1616         }
1617         if (unlikely(!referenced))
1618                 release_all_pte_pages(pte);
1619         else
1620                 isolated = 1;
1621 out:
1622         return isolated;
1623 }
1624
1625 static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
1626                                       struct vm_area_struct *vma,
1627                                       unsigned long address,
1628                                       spinlock_t *ptl)
1629 {
1630         pte_t *_pte;
1631         for (_pte = pte; _pte < pte+HPAGE_PMD_NR; _pte++) {
1632                 pte_t pteval = *_pte;
1633                 struct page *src_page;
1634
1635                 if (pte_none(pteval)) {
1636                         clear_user_highpage(page, address);
1637                         add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
1638                 } else {
1639                         src_page = pte_page(pteval);
1640                         copy_user_highpage(page, src_page, address, vma);
1641                         VM_BUG_ON(page_mapcount(src_page) != 1);
1642                         VM_BUG_ON(page_count(src_page) != 2);
1643                         release_pte_page(src_page);
1644                         /*
1645                          * ptl mostly unnecessary, but preempt has to
1646                          * be disabled to update the per-cpu stats
1647                          * inside page_remove_rmap().
1648                          */
1649                         spin_lock(ptl);
1650                         /*
1651                          * paravirt calls inside pte_clear here are
1652                          * superfluous.
1653                          */
1654                         pte_clear(vma->vm_mm, address, _pte);
1655                         page_remove_rmap(src_page);
1656                         spin_unlock(ptl);
1657                         free_page_and_swap_cache(src_page);
1658                 }
1659
1660                 address += PAGE_SIZE;
1661                 page++;
1662         }
1663 }
1664
1665 static void collapse_huge_page(struct mm_struct *mm,
1666                                unsigned long address,
1667                                struct page **hpage)
1668 {
1669         struct vm_area_struct *vma;
1670         pgd_t *pgd;
1671         pud_t *pud;
1672         pmd_t *pmd, _pmd;
1673         pte_t *pte;
1674         pgtable_t pgtable;
1675         struct page *new_page;
1676         spinlock_t *ptl;
1677         int isolated;
1678         unsigned long hstart, hend;
1679
1680         VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1681 #ifndef CONFIG_NUMA
1682         VM_BUG_ON(!*hpage);
1683 #else
1684         VM_BUG_ON(*hpage);
1685 #endif
1686
1687         /*
1688          * Prevent all access to pagetables with the exception of
1689          * gup_fast later hanlded by the ptep_clear_flush and the VM
1690          * handled by the anon_vma lock + PG_lock.
1691          */
1692         down_write(&mm->mmap_sem);
1693         if (unlikely(khugepaged_test_exit(mm)))
1694                 goto out;
1695
1696         vma = find_vma(mm, address);
1697         hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
1698         hend = vma->vm_end & HPAGE_PMD_MASK;
1699         if (address < hstart || address + HPAGE_PMD_SIZE > hend)
1700                 goto out;
1701
1702         if (!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always())
1703                 goto out;
1704
1705         /* VM_PFNMAP vmas may have vm_ops null but vm_file set */
1706         if (!vma->anon_vma || vma->vm_ops || vma->vm_file)
1707                 goto out;
1708         VM_BUG_ON(is_linear_pfn_mapping(vma) || is_pfn_mapping(vma));
1709
1710         pgd = pgd_offset(mm, address);
1711         if (!pgd_present(*pgd))
1712                 goto out;
1713
1714         pud = pud_offset(pgd, address);
1715         if (!pud_present(*pud))
1716                 goto out;
1717
1718         pmd = pmd_offset(pud, address);
1719         /* pmd can't go away or become huge under us */
1720         if (!pmd_present(*pmd) || pmd_trans_huge(*pmd))
1721                 goto out;
1722
1723 #ifndef CONFIG_NUMA
1724         new_page = *hpage;
1725 #else
1726         new_page = alloc_hugepage_vma(khugepaged_defrag(), vma, address);
1727         if (unlikely(!new_page)) {
1728                 *hpage = ERR_PTR(-ENOMEM);
1729                 goto out;
1730         }
1731 #endif
1732         if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL)))
1733                 goto out_put_page;
1734
1735         anon_vma_lock(vma->anon_vma);
1736
1737         pte = pte_offset_map(pmd, address);
1738         ptl = pte_lockptr(mm, pmd);
1739
1740         spin_lock(&mm->page_table_lock); /* probably unnecessary */
1741         /*
1742          * After this gup_fast can't run anymore. This also removes
1743          * any huge TLB entry from the CPU so we won't allow
1744          * huge and small TLB entries for the same virtual address
1745          * to avoid the risk of CPU bugs in that area.
1746          */
1747         _pmd = pmdp_clear_flush_notify(vma, address, pmd);
1748         spin_unlock(&mm->page_table_lock);
1749
1750         spin_lock(ptl);
1751         isolated = __collapse_huge_page_isolate(vma, address, pte);
1752         spin_unlock(ptl);
1753         pte_unmap(pte);
1754
1755         if (unlikely(!isolated)) {
1756                 spin_lock(&mm->page_table_lock);
1757                 BUG_ON(!pmd_none(*pmd));
1758                 set_pmd_at(mm, address, pmd, _pmd);
1759                 spin_unlock(&mm->page_table_lock);
1760                 anon_vma_unlock(vma->anon_vma);
1761                 mem_cgroup_uncharge_page(new_page);
1762                 goto out_put_page;
1763         }
1764
1765         /*
1766          * All pages are isolated and locked so anon_vma rmap
1767          * can't run anymore.
1768          */
1769         anon_vma_unlock(vma->anon_vma);
1770
1771         __collapse_huge_page_copy(pte, new_page, vma, address, ptl);
1772         __SetPageUptodate(new_page);
1773         pgtable = pmd_pgtable(_pmd);
1774         VM_BUG_ON(page_count(pgtable) != 1);
1775         VM_BUG_ON(page_mapcount(pgtable) != 0);
1776
1777         _pmd = mk_pmd(new_page, vma->vm_page_prot);
1778         _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
1779         _pmd = pmd_mkhuge(_pmd);
1780
1781         /*
1782          * spin_lock() below is not the equivalent of smp_wmb(), so
1783          * this is needed to avoid the copy_huge_page writes to become
1784          * visible after the set_pmd_at() write.
1785          */
1786         smp_wmb();
1787
1788         spin_lock(&mm->page_table_lock);
1789         BUG_ON(!pmd_none(*pmd));
1790         page_add_new_anon_rmap(new_page, vma, address);
1791         set_pmd_at(mm, address, pmd, _pmd);
1792         update_mmu_cache(vma, address, entry);
1793         prepare_pmd_huge_pte(pgtable, mm);
1794         mm->nr_ptes--;
1795         spin_unlock(&mm->page_table_lock);
1796
1797 #ifndef CONFIG_NUMA
1798         *hpage = NULL;
1799 #endif
1800         khugepaged_pages_collapsed++;
1801 out:
1802         up_write(&mm->mmap_sem);
1803         return;
1804
1805 out_put_page:
1806 #ifdef CONFIG_NUMA
1807         put_page(new_page);
1808 #endif
1809         goto out;
1810 }
1811
1812 static int khugepaged_scan_pmd(struct mm_struct *mm,
1813                                struct vm_area_struct *vma,
1814                                unsigned long address,
1815                                struct page **hpage)
1816 {
1817         pgd_t *pgd;
1818         pud_t *pud;
1819         pmd_t *pmd;
1820         pte_t *pte, *_pte;
1821         int ret = 0, referenced = 0, none = 0;
1822         struct page *page;
1823         unsigned long _address;
1824         spinlock_t *ptl;
1825
1826         VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1827
1828         pgd = pgd_offset(mm, address);
1829         if (!pgd_present(*pgd))
1830                 goto out;
1831
1832         pud = pud_offset(pgd, address);
1833         if (!pud_present(*pud))
1834                 goto out;
1835
1836         pmd = pmd_offset(pud, address);
1837         if (!pmd_present(*pmd) || pmd_trans_huge(*pmd))
1838                 goto out;
1839
1840         pte = pte_offset_map_lock(mm, pmd, address, &ptl);
1841         for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
1842              _pte++, _address += PAGE_SIZE) {
1843                 pte_t pteval = *_pte;
1844                 if (pte_none(pteval)) {
1845                         if (++none <= khugepaged_max_ptes_none)
1846                                 continue;
1847                         else
1848                                 goto out_unmap;
1849                 }
1850                 if (!pte_present(pteval) || !pte_write(pteval))
1851                         goto out_unmap;
1852                 page = vm_normal_page(vma, _address, pteval);
1853                 if (unlikely(!page))
1854                         goto out_unmap;
1855                 VM_BUG_ON(PageCompound(page));
1856                 if (!PageLRU(page) || PageLocked(page) || !PageAnon(page))
1857                         goto out_unmap;
1858                 /* cannot use mapcount: can't collapse if there's a gup pin */
1859                 if (page_count(page) != 1)
1860                         goto out_unmap;
1861                 if (pte_young(pteval))
1862                         referenced = 1;
1863         }
1864         if (referenced)
1865                 ret = 1;
1866 out_unmap:
1867         pte_unmap_unlock(pte, ptl);
1868         if (ret) {
1869                 up_read(&mm->mmap_sem);
1870                 collapse_huge_page(mm, address, hpage);
1871         }
1872 out:
1873         return ret;
1874 }
1875
1876 static void collect_mm_slot(struct mm_slot *mm_slot)
1877 {
1878         struct mm_struct *mm = mm_slot->mm;
1879
1880         VM_BUG_ON(!spin_is_locked(&khugepaged_mm_lock));
1881
1882         if (khugepaged_test_exit(mm)) {
1883                 /* free mm_slot */
1884                 hlist_del(&mm_slot->hash);
1885                 list_del(&mm_slot->mm_node);
1886
1887                 /*
1888                  * Not strictly needed because the mm exited already.
1889                  *
1890                  * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1891                  */
1892
1893                 /* khugepaged_mm_lock actually not necessary for the below */
1894                 free_mm_slot(mm_slot);
1895                 mmdrop(mm);
1896         }
1897 }
1898
1899 static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
1900                                             struct page **hpage)
1901 {
1902         struct mm_slot *mm_slot;
1903         struct mm_struct *mm;
1904         struct vm_area_struct *vma;
1905         int progress = 0;
1906
1907         VM_BUG_ON(!pages);
1908         VM_BUG_ON(!spin_is_locked(&khugepaged_mm_lock));
1909
1910         if (khugepaged_scan.mm_slot)
1911                 mm_slot = khugepaged_scan.mm_slot;
1912         else {
1913                 mm_slot = list_entry(khugepaged_scan.mm_head.next,
1914                                      struct mm_slot, mm_node);
1915                 khugepaged_scan.address = 0;
1916                 khugepaged_scan.mm_slot = mm_slot;
1917         }
1918         spin_unlock(&khugepaged_mm_lock);
1919
1920         mm = mm_slot->mm;
1921         down_read(&mm->mmap_sem);
1922         if (unlikely(khugepaged_test_exit(mm)))
1923                 vma = NULL;
1924         else
1925                 vma = find_vma(mm, khugepaged_scan.address);
1926
1927         progress++;
1928         for (; vma; vma = vma->vm_next) {
1929                 unsigned long hstart, hend;
1930
1931                 cond_resched();
1932                 if (unlikely(khugepaged_test_exit(mm))) {
1933                         progress++;
1934                         break;
1935                 }
1936
1937                 if (!(vma->vm_flags & VM_HUGEPAGE) &&
1938                     !khugepaged_always()) {
1939                         progress++;
1940                         continue;
1941                 }
1942
1943                 /* VM_PFNMAP vmas may have vm_ops null but vm_file set */
1944                 if (!vma->anon_vma || vma->vm_ops || vma->vm_file) {
1945                         khugepaged_scan.address = vma->vm_end;
1946                         progress++;
1947                         continue;
1948                 }
1949                 VM_BUG_ON(is_linear_pfn_mapping(vma) || is_pfn_mapping(vma));
1950
1951                 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
1952                 hend = vma->vm_end & HPAGE_PMD_MASK;
1953                 if (hstart >= hend) {
1954                         progress++;
1955                         continue;
1956                 }
1957                 if (khugepaged_scan.address < hstart)
1958                         khugepaged_scan.address = hstart;
1959                 if (khugepaged_scan.address > hend) {
1960                         khugepaged_scan.address = hend + HPAGE_PMD_SIZE;
1961                         progress++;
1962                         continue;
1963                 }
1964                 BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
1965
1966                 while (khugepaged_scan.address < hend) {
1967                         int ret;
1968                         cond_resched();
1969                         if (unlikely(khugepaged_test_exit(mm)))
1970                                 goto breakouterloop;
1971
1972                         VM_BUG_ON(khugepaged_scan.address < hstart ||
1973                                   khugepaged_scan.address + HPAGE_PMD_SIZE >
1974                                   hend);
1975                         ret = khugepaged_scan_pmd(mm, vma,
1976                                                   khugepaged_scan.address,
1977                                                   hpage);
1978                         /* move to next address */
1979                         khugepaged_scan.address += HPAGE_PMD_SIZE;
1980                         progress += HPAGE_PMD_NR;
1981                         if (ret)
1982                                 /* we released mmap_sem so break loop */
1983                                 goto breakouterloop_mmap_sem;
1984                         if (progress >= pages)
1985                                 goto breakouterloop;
1986                 }
1987         }
1988 breakouterloop:
1989         up_read(&mm->mmap_sem); /* exit_mmap will destroy ptes after this */
1990 breakouterloop_mmap_sem:
1991
1992         spin_lock(&khugepaged_mm_lock);
1993         BUG_ON(khugepaged_scan.mm_slot != mm_slot);
1994         /*
1995          * Release the current mm_slot if this mm is about to die, or
1996          * if we scanned all vmas of this mm.
1997          */
1998         if (khugepaged_test_exit(mm) || !vma) {
1999                 /*
2000                  * Make sure that if mm_users is reaching zero while
2001                  * khugepaged runs here, khugepaged_exit will find
2002                  * mm_slot not pointing to the exiting mm.
2003                  */
2004                 if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
2005                         khugepaged_scan.mm_slot = list_entry(
2006                                 mm_slot->mm_node.next,
2007                                 struct mm_slot, mm_node);
2008                         khugepaged_scan.address = 0;
2009                 } else {
2010                         khugepaged_scan.mm_slot = NULL;
2011                         khugepaged_full_scans++;
2012                 }
2013
2014                 collect_mm_slot(mm_slot);
2015         }
2016
2017         return progress;
2018 }
2019
2020 static int khugepaged_has_work(void)
2021 {
2022         return !list_empty(&khugepaged_scan.mm_head) &&
2023                 khugepaged_enabled();
2024 }
2025
2026 static int khugepaged_wait_event(void)
2027 {
2028         return !list_empty(&khugepaged_scan.mm_head) ||
2029                 !khugepaged_enabled();
2030 }
2031
2032 static void khugepaged_do_scan(struct page **hpage)
2033 {
2034         unsigned int progress = 0, pass_through_head = 0;
2035         unsigned int pages = khugepaged_pages_to_scan;
2036
2037         barrier(); /* write khugepaged_pages_to_scan to local stack */
2038
2039         while (progress < pages) {
2040                 cond_resched();
2041
2042 #ifndef CONFIG_NUMA
2043                 if (!*hpage) {
2044                         *hpage = alloc_hugepage(khugepaged_defrag());
2045                         if (unlikely(!*hpage))
2046                                 break;
2047                 }
2048 #else
2049                 if (IS_ERR(*hpage))
2050                         break;
2051 #endif
2052
2053                 spin_lock(&khugepaged_mm_lock);
2054                 if (!khugepaged_scan.mm_slot)
2055                         pass_through_head++;
2056                 if (khugepaged_has_work() &&
2057                     pass_through_head < 2)
2058                         progress += khugepaged_scan_mm_slot(pages - progress,
2059                                                             hpage);
2060                 else
2061                         progress = pages;
2062                 spin_unlock(&khugepaged_mm_lock);
2063         }
2064 }
2065
2066 static void khugepaged_alloc_sleep(void)
2067 {
2068         DEFINE_WAIT(wait);
2069         add_wait_queue(&khugepaged_wait, &wait);
2070         schedule_timeout_interruptible(
2071                 msecs_to_jiffies(
2072                         khugepaged_alloc_sleep_millisecs));
2073         remove_wait_queue(&khugepaged_wait, &wait);
2074 }
2075
2076 #ifndef CONFIG_NUMA
2077 static struct page *khugepaged_alloc_hugepage(void)
2078 {
2079         struct page *hpage;
2080
2081         do {
2082                 hpage = alloc_hugepage(khugepaged_defrag());
2083                 if (!hpage)
2084                         khugepaged_alloc_sleep();
2085         } while (unlikely(!hpage) &&
2086                  likely(khugepaged_enabled()));
2087         return hpage;
2088 }
2089 #endif
2090
2091 static void khugepaged_loop(void)
2092 {
2093         struct page *hpage;
2094
2095 #ifdef CONFIG_NUMA
2096         hpage = NULL;
2097 #endif
2098         while (likely(khugepaged_enabled())) {
2099 #ifndef CONFIG_NUMA
2100                 hpage = khugepaged_alloc_hugepage();
2101                 if (unlikely(!hpage))
2102                         break;
2103 #else
2104                 if (IS_ERR(hpage)) {
2105                         khugepaged_alloc_sleep();
2106                         hpage = NULL;
2107                 }
2108 #endif
2109
2110                 khugepaged_do_scan(&hpage);
2111 #ifndef CONFIG_NUMA
2112                 if (hpage)
2113                         put_page(hpage);
2114 #endif
2115                 if (khugepaged_has_work()) {
2116                         DEFINE_WAIT(wait);
2117                         if (!khugepaged_scan_sleep_millisecs)
2118                                 continue;
2119                         add_wait_queue(&khugepaged_wait, &wait);
2120                         schedule_timeout_interruptible(
2121                                 msecs_to_jiffies(
2122                                         khugepaged_scan_sleep_millisecs));
2123                         remove_wait_queue(&khugepaged_wait, &wait);
2124                 } else if (khugepaged_enabled())
2125                         wait_event_interruptible(khugepaged_wait,
2126                                                  khugepaged_wait_event());
2127         }
2128 }
2129
2130 static int khugepaged(void *none)
2131 {
2132         struct mm_slot *mm_slot;
2133
2134         set_user_nice(current, 19);
2135
2136         /* serialize with start_khugepaged() */
2137         mutex_lock(&khugepaged_mutex);
2138
2139         for (;;) {
2140                 mutex_unlock(&khugepaged_mutex);
2141                 BUG_ON(khugepaged_thread != current);
2142                 khugepaged_loop();
2143                 BUG_ON(khugepaged_thread != current);
2144
2145                 mutex_lock(&khugepaged_mutex);
2146                 if (!khugepaged_enabled())
2147                         break;
2148         }
2149
2150         spin_lock(&khugepaged_mm_lock);
2151         mm_slot = khugepaged_scan.mm_slot;
2152         khugepaged_scan.mm_slot = NULL;
2153         if (mm_slot)
2154                 collect_mm_slot(mm_slot);
2155         spin_unlock(&khugepaged_mm_lock);
2156
2157         khugepaged_thread = NULL;
2158         mutex_unlock(&khugepaged_mutex);
2159
2160         return 0;
2161 }
2162
2163 void __split_huge_page_pmd(struct mm_struct *mm, pmd_t *pmd)
2164 {
2165         struct page *page;
2166
2167         spin_lock(&mm->page_table_lock);
2168         if (unlikely(!pmd_trans_huge(*pmd))) {
2169                 spin_unlock(&mm->page_table_lock);
2170                 return;
2171         }
2172         page = pmd_page(*pmd);
2173         VM_BUG_ON(!page_count(page));
2174         get_page(page);
2175         spin_unlock(&mm->page_table_lock);
2176
2177         split_huge_page(page);
2178
2179         put_page(page);
2180         BUG_ON(pmd_trans_huge(*pmd));
2181 }