]> git.karo-electronics.de Git - karo-tx-linux.git/blob - mm/hugetlb.c
drm/i915: Start exploiting drm_device subclassing
[karo-tx-linux.git] / mm / hugetlb.c
1 /*
2  * Generic hugetlb support.
3  * (C) Nadia Yvette Chambers, April 2004
4  */
5 #include <linux/list.h>
6 #include <linux/init.h>
7 #include <linux/mm.h>
8 #include <linux/seq_file.h>
9 #include <linux/sysctl.h>
10 #include <linux/highmem.h>
11 #include <linux/mmu_notifier.h>
12 #include <linux/nodemask.h>
13 #include <linux/pagemap.h>
14 #include <linux/mempolicy.h>
15 #include <linux/compiler.h>
16 #include <linux/cpuset.h>
17 #include <linux/mutex.h>
18 #include <linux/bootmem.h>
19 #include <linux/sysfs.h>
20 #include <linux/slab.h>
21 #include <linux/rmap.h>
22 #include <linux/swap.h>
23 #include <linux/swapops.h>
24 #include <linux/page-isolation.h>
25 #include <linux/jhash.h>
26
27 #include <asm/page.h>
28 #include <asm/pgtable.h>
29 #include <asm/tlb.h>
30
31 #include <linux/io.h>
32 #include <linux/hugetlb.h>
33 #include <linux/hugetlb_cgroup.h>
34 #include <linux/node.h>
35 #include "internal.h"
36
37 int hugepages_treat_as_movable;
38
39 int hugetlb_max_hstate __read_mostly;
40 unsigned int default_hstate_idx;
41 struct hstate hstates[HUGE_MAX_HSTATE];
42 /*
43  * Minimum page order among possible hugepage sizes, set to a proper value
44  * at boot time.
45  */
46 static unsigned int minimum_order __read_mostly = UINT_MAX;
47
48 __initdata LIST_HEAD(huge_boot_pages);
49
50 /* for command line parsing */
51 static struct hstate * __initdata parsed_hstate;
52 static unsigned long __initdata default_hstate_max_huge_pages;
53 static unsigned long __initdata default_hstate_size;
54 static bool __initdata parsed_valid_hugepagesz = true;
55
56 /*
57  * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
58  * free_huge_pages, and surplus_huge_pages.
59  */
60 DEFINE_SPINLOCK(hugetlb_lock);
61
62 /*
63  * Serializes faults on the same logical page.  This is used to
64  * prevent spurious OOMs when the hugepage pool is fully utilized.
65  */
66 static int num_fault_mutexes;
67 struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
68
69 /* Forward declaration */
70 static int hugetlb_acct_memory(struct hstate *h, long delta);
71
72 static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
73 {
74         bool free = (spool->count == 0) && (spool->used_hpages == 0);
75
76         spin_unlock(&spool->lock);
77
78         /* If no pages are used, and no other handles to the subpool
79          * remain, give up any reservations mased on minimum size and
80          * free the subpool */
81         if (free) {
82                 if (spool->min_hpages != -1)
83                         hugetlb_acct_memory(spool->hstate,
84                                                 -spool->min_hpages);
85                 kfree(spool);
86         }
87 }
88
89 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
90                                                 long min_hpages)
91 {
92         struct hugepage_subpool *spool;
93
94         spool = kzalloc(sizeof(*spool), GFP_KERNEL);
95         if (!spool)
96                 return NULL;
97
98         spin_lock_init(&spool->lock);
99         spool->count = 1;
100         spool->max_hpages = max_hpages;
101         spool->hstate = h;
102         spool->min_hpages = min_hpages;
103
104         if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) {
105                 kfree(spool);
106                 return NULL;
107         }
108         spool->rsv_hpages = min_hpages;
109
110         return spool;
111 }
112
113 void hugepage_put_subpool(struct hugepage_subpool *spool)
114 {
115         spin_lock(&spool->lock);
116         BUG_ON(!spool->count);
117         spool->count--;
118         unlock_or_release_subpool(spool);
119 }
120
121 /*
122  * Subpool accounting for allocating and reserving pages.
123  * Return -ENOMEM if there are not enough resources to satisfy the
124  * the request.  Otherwise, return the number of pages by which the
125  * global pools must be adjusted (upward).  The returned value may
126  * only be different than the passed value (delta) in the case where
127  * a subpool minimum size must be manitained.
128  */
129 static long hugepage_subpool_get_pages(struct hugepage_subpool *spool,
130                                       long delta)
131 {
132         long ret = delta;
133
134         if (!spool)
135                 return ret;
136
137         spin_lock(&spool->lock);
138
139         if (spool->max_hpages != -1) {          /* maximum size accounting */
140                 if ((spool->used_hpages + delta) <= spool->max_hpages)
141                         spool->used_hpages += delta;
142                 else {
143                         ret = -ENOMEM;
144                         goto unlock_ret;
145                 }
146         }
147
148         /* minimum size accounting */
149         if (spool->min_hpages != -1 && spool->rsv_hpages) {
150                 if (delta > spool->rsv_hpages) {
151                         /*
152                          * Asking for more reserves than those already taken on
153                          * behalf of subpool.  Return difference.
154                          */
155                         ret = delta - spool->rsv_hpages;
156                         spool->rsv_hpages = 0;
157                 } else {
158                         ret = 0;        /* reserves already accounted for */
159                         spool->rsv_hpages -= delta;
160                 }
161         }
162
163 unlock_ret:
164         spin_unlock(&spool->lock);
165         return ret;
166 }
167
168 /*
169  * Subpool accounting for freeing and unreserving pages.
170  * Return the number of global page reservations that must be dropped.
171  * The return value may only be different than the passed value (delta)
172  * in the case where a subpool minimum size must be maintained.
173  */
174 static long hugepage_subpool_put_pages(struct hugepage_subpool *spool,
175                                        long delta)
176 {
177         long ret = delta;
178
179         if (!spool)
180                 return delta;
181
182         spin_lock(&spool->lock);
183
184         if (spool->max_hpages != -1)            /* maximum size accounting */
185                 spool->used_hpages -= delta;
186
187          /* minimum size accounting */
188         if (spool->min_hpages != -1 && spool->used_hpages < spool->min_hpages) {
189                 if (spool->rsv_hpages + delta <= spool->min_hpages)
190                         ret = 0;
191                 else
192                         ret = spool->rsv_hpages + delta - spool->min_hpages;
193
194                 spool->rsv_hpages += delta;
195                 if (spool->rsv_hpages > spool->min_hpages)
196                         spool->rsv_hpages = spool->min_hpages;
197         }
198
199         /*
200          * If hugetlbfs_put_super couldn't free spool due to an outstanding
201          * quota reference, free it now.
202          */
203         unlock_or_release_subpool(spool);
204
205         return ret;
206 }
207
208 static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
209 {
210         return HUGETLBFS_SB(inode->i_sb)->spool;
211 }
212
213 static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
214 {
215         return subpool_inode(file_inode(vma->vm_file));
216 }
217
218 /*
219  * Region tracking -- allows tracking of reservations and instantiated pages
220  *                    across the pages in a mapping.
221  *
222  * The region data structures are embedded into a resv_map and protected
223  * by a resv_map's lock.  The set of regions within the resv_map represent
224  * reservations for huge pages, or huge pages that have already been
225  * instantiated within the map.  The from and to elements are huge page
226  * indicies into the associated mapping.  from indicates the starting index
227  * of the region.  to represents the first index past the end of  the region.
228  *
229  * For example, a file region structure with from == 0 and to == 4 represents
230  * four huge pages in a mapping.  It is important to note that the to element
231  * represents the first element past the end of the region. This is used in
232  * arithmetic as 4(to) - 0(from) = 4 huge pages in the region.
233  *
234  * Interval notation of the form [from, to) will be used to indicate that
235  * the endpoint from is inclusive and to is exclusive.
236  */
237 struct file_region {
238         struct list_head link;
239         long from;
240         long to;
241 };
242
243 /*
244  * Add the huge page range represented by [f, t) to the reserve
245  * map.  In the normal case, existing regions will be expanded
246  * to accommodate the specified range.  Sufficient regions should
247  * exist for expansion due to the previous call to region_chg
248  * with the same range.  However, it is possible that region_del
249  * could have been called after region_chg and modifed the map
250  * in such a way that no region exists to be expanded.  In this
251  * case, pull a region descriptor from the cache associated with
252  * the map and use that for the new range.
253  *
254  * Return the number of new huge pages added to the map.  This
255  * number is greater than or equal to zero.
256  */
257 static long region_add(struct resv_map *resv, long f, long t)
258 {
259         struct list_head *head = &resv->regions;
260         struct file_region *rg, *nrg, *trg;
261         long add = 0;
262
263         spin_lock(&resv->lock);
264         /* Locate the region we are either in or before. */
265         list_for_each_entry(rg, head, link)
266                 if (f <= rg->to)
267                         break;
268
269         /*
270          * If no region exists which can be expanded to include the
271          * specified range, the list must have been modified by an
272          * interleving call to region_del().  Pull a region descriptor
273          * from the cache and use it for this range.
274          */
275         if (&rg->link == head || t < rg->from) {
276                 VM_BUG_ON(resv->region_cache_count <= 0);
277
278                 resv->region_cache_count--;
279                 nrg = list_first_entry(&resv->region_cache, struct file_region,
280                                         link);
281                 list_del(&nrg->link);
282
283                 nrg->from = f;
284                 nrg->to = t;
285                 list_add(&nrg->link, rg->link.prev);
286
287                 add += t - f;
288                 goto out_locked;
289         }
290
291         /* Round our left edge to the current segment if it encloses us. */
292         if (f > rg->from)
293                 f = rg->from;
294
295         /* Check for and consume any regions we now overlap with. */
296         nrg = rg;
297         list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
298                 if (&rg->link == head)
299                         break;
300                 if (rg->from > t)
301                         break;
302
303                 /* If this area reaches higher then extend our area to
304                  * include it completely.  If this is not the first area
305                  * which we intend to reuse, free it. */
306                 if (rg->to > t)
307                         t = rg->to;
308                 if (rg != nrg) {
309                         /* Decrement return value by the deleted range.
310                          * Another range will span this area so that by
311                          * end of routine add will be >= zero
312                          */
313                         add -= (rg->to - rg->from);
314                         list_del(&rg->link);
315                         kfree(rg);
316                 }
317         }
318
319         add += (nrg->from - f);         /* Added to beginning of region */
320         nrg->from = f;
321         add += t - nrg->to;             /* Added to end of region */
322         nrg->to = t;
323
324 out_locked:
325         resv->adds_in_progress--;
326         spin_unlock(&resv->lock);
327         VM_BUG_ON(add < 0);
328         return add;
329 }
330
331 /*
332  * Examine the existing reserve map and determine how many
333  * huge pages in the specified range [f, t) are NOT currently
334  * represented.  This routine is called before a subsequent
335  * call to region_add that will actually modify the reserve
336  * map to add the specified range [f, t).  region_chg does
337  * not change the number of huge pages represented by the
338  * map.  However, if the existing regions in the map can not
339  * be expanded to represent the new range, a new file_region
340  * structure is added to the map as a placeholder.  This is
341  * so that the subsequent region_add call will have all the
342  * regions it needs and will not fail.
343  *
344  * Upon entry, region_chg will also examine the cache of region descriptors
345  * associated with the map.  If there are not enough descriptors cached, one
346  * will be allocated for the in progress add operation.
347  *
348  * Returns the number of huge pages that need to be added to the existing
349  * reservation map for the range [f, t).  This number is greater or equal to
350  * zero.  -ENOMEM is returned if a new file_region structure or cache entry
351  * is needed and can not be allocated.
352  */
353 static long region_chg(struct resv_map *resv, long f, long t)
354 {
355         struct list_head *head = &resv->regions;
356         struct file_region *rg, *nrg = NULL;
357         long chg = 0;
358
359 retry:
360         spin_lock(&resv->lock);
361 retry_locked:
362         resv->adds_in_progress++;
363
364         /*
365          * Check for sufficient descriptors in the cache to accommodate
366          * the number of in progress add operations.
367          */
368         if (resv->adds_in_progress > resv->region_cache_count) {
369                 struct file_region *trg;
370
371                 VM_BUG_ON(resv->adds_in_progress - resv->region_cache_count > 1);
372                 /* Must drop lock to allocate a new descriptor. */
373                 resv->adds_in_progress--;
374                 spin_unlock(&resv->lock);
375
376                 trg = kmalloc(sizeof(*trg), GFP_KERNEL);
377                 if (!trg) {
378                         kfree(nrg);
379                         return -ENOMEM;
380                 }
381
382                 spin_lock(&resv->lock);
383                 list_add(&trg->link, &resv->region_cache);
384                 resv->region_cache_count++;
385                 goto retry_locked;
386         }
387
388         /* Locate the region we are before or in. */
389         list_for_each_entry(rg, head, link)
390                 if (f <= rg->to)
391                         break;
392
393         /* If we are below the current region then a new region is required.
394          * Subtle, allocate a new region at the position but make it zero
395          * size such that we can guarantee to record the reservation. */
396         if (&rg->link == head || t < rg->from) {
397                 if (!nrg) {
398                         resv->adds_in_progress--;
399                         spin_unlock(&resv->lock);
400                         nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
401                         if (!nrg)
402                                 return -ENOMEM;
403
404                         nrg->from = f;
405                         nrg->to   = f;
406                         INIT_LIST_HEAD(&nrg->link);
407                         goto retry;
408                 }
409
410                 list_add(&nrg->link, rg->link.prev);
411                 chg = t - f;
412                 goto out_nrg;
413         }
414
415         /* Round our left edge to the current segment if it encloses us. */
416         if (f > rg->from)
417                 f = rg->from;
418         chg = t - f;
419
420         /* Check for and consume any regions we now overlap with. */
421         list_for_each_entry(rg, rg->link.prev, link) {
422                 if (&rg->link == head)
423                         break;
424                 if (rg->from > t)
425                         goto out;
426
427                 /* We overlap with this area, if it extends further than
428                  * us then we must extend ourselves.  Account for its
429                  * existing reservation. */
430                 if (rg->to > t) {
431                         chg += rg->to - t;
432                         t = rg->to;
433                 }
434                 chg -= rg->to - rg->from;
435         }
436
437 out:
438         spin_unlock(&resv->lock);
439         /*  We already know we raced and no longer need the new region */
440         kfree(nrg);
441         return chg;
442 out_nrg:
443         spin_unlock(&resv->lock);
444         return chg;
445 }
446
447 /*
448  * Abort the in progress add operation.  The adds_in_progress field
449  * of the resv_map keeps track of the operations in progress between
450  * calls to region_chg and region_add.  Operations are sometimes
451  * aborted after the call to region_chg.  In such cases, region_abort
452  * is called to decrement the adds_in_progress counter.
453  *
454  * NOTE: The range arguments [f, t) are not needed or used in this
455  * routine.  They are kept to make reading the calling code easier as
456  * arguments will match the associated region_chg call.
457  */
458 static void region_abort(struct resv_map *resv, long f, long t)
459 {
460         spin_lock(&resv->lock);
461         VM_BUG_ON(!resv->region_cache_count);
462         resv->adds_in_progress--;
463         spin_unlock(&resv->lock);
464 }
465
466 /*
467  * Delete the specified range [f, t) from the reserve map.  If the
468  * t parameter is LONG_MAX, this indicates that ALL regions after f
469  * should be deleted.  Locate the regions which intersect [f, t)
470  * and either trim, delete or split the existing regions.
471  *
472  * Returns the number of huge pages deleted from the reserve map.
473  * In the normal case, the return value is zero or more.  In the
474  * case where a region must be split, a new region descriptor must
475  * be allocated.  If the allocation fails, -ENOMEM will be returned.
476  * NOTE: If the parameter t == LONG_MAX, then we will never split
477  * a region and possibly return -ENOMEM.  Callers specifying
478  * t == LONG_MAX do not need to check for -ENOMEM error.
479  */
480 static long region_del(struct resv_map *resv, long f, long t)
481 {
482         struct list_head *head = &resv->regions;
483         struct file_region *rg, *trg;
484         struct file_region *nrg = NULL;
485         long del = 0;
486
487 retry:
488         spin_lock(&resv->lock);
489         list_for_each_entry_safe(rg, trg, head, link) {
490                 /*
491                  * Skip regions before the range to be deleted.  file_region
492                  * ranges are normally of the form [from, to).  However, there
493                  * may be a "placeholder" entry in the map which is of the form
494                  * (from, to) with from == to.  Check for placeholder entries
495                  * at the beginning of the range to be deleted.
496                  */
497                 if (rg->to <= f && (rg->to != rg->from || rg->to != f))
498                         continue;
499
500                 if (rg->from >= t)
501                         break;
502
503                 if (f > rg->from && t < rg->to) { /* Must split region */
504                         /*
505                          * Check for an entry in the cache before dropping
506                          * lock and attempting allocation.
507                          */
508                         if (!nrg &&
509                             resv->region_cache_count > resv->adds_in_progress) {
510                                 nrg = list_first_entry(&resv->region_cache,
511                                                         struct file_region,
512                                                         link);
513                                 list_del(&nrg->link);
514                                 resv->region_cache_count--;
515                         }
516
517                         if (!nrg) {
518                                 spin_unlock(&resv->lock);
519                                 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
520                                 if (!nrg)
521                                         return -ENOMEM;
522                                 goto retry;
523                         }
524
525                         del += t - f;
526
527                         /* New entry for end of split region */
528                         nrg->from = t;
529                         nrg->to = rg->to;
530                         INIT_LIST_HEAD(&nrg->link);
531
532                         /* Original entry is trimmed */
533                         rg->to = f;
534
535                         list_add(&nrg->link, &rg->link);
536                         nrg = NULL;
537                         break;
538                 }
539
540                 if (f <= rg->from && t >= rg->to) { /* Remove entire region */
541                         del += rg->to - rg->from;
542                         list_del(&rg->link);
543                         kfree(rg);
544                         continue;
545                 }
546
547                 if (f <= rg->from) {    /* Trim beginning of region */
548                         del += t - rg->from;
549                         rg->from = t;
550                 } else {                /* Trim end of region */
551                         del += rg->to - f;
552                         rg->to = f;
553                 }
554         }
555
556         spin_unlock(&resv->lock);
557         kfree(nrg);
558         return del;
559 }
560
561 /*
562  * A rare out of memory error was encountered which prevented removal of
563  * the reserve map region for a page.  The huge page itself was free'ed
564  * and removed from the page cache.  This routine will adjust the subpool
565  * usage count, and the global reserve count if needed.  By incrementing
566  * these counts, the reserve map entry which could not be deleted will
567  * appear as a "reserved" entry instead of simply dangling with incorrect
568  * counts.
569  */
570 void hugetlb_fix_reserve_counts(struct inode *inode, bool restore_reserve)
571 {
572         struct hugepage_subpool *spool = subpool_inode(inode);
573         long rsv_adjust;
574
575         rsv_adjust = hugepage_subpool_get_pages(spool, 1);
576         if (restore_reserve && rsv_adjust) {
577                 struct hstate *h = hstate_inode(inode);
578
579                 hugetlb_acct_memory(h, 1);
580         }
581 }
582
583 /*
584  * Count and return the number of huge pages in the reserve map
585  * that intersect with the range [f, t).
586  */
587 static long region_count(struct resv_map *resv, long f, long t)
588 {
589         struct list_head *head = &resv->regions;
590         struct file_region *rg;
591         long chg = 0;
592
593         spin_lock(&resv->lock);
594         /* Locate each segment we overlap with, and count that overlap. */
595         list_for_each_entry(rg, head, link) {
596                 long seg_from;
597                 long seg_to;
598
599                 if (rg->to <= f)
600                         continue;
601                 if (rg->from >= t)
602                         break;
603
604                 seg_from = max(rg->from, f);
605                 seg_to = min(rg->to, t);
606
607                 chg += seg_to - seg_from;
608         }
609         spin_unlock(&resv->lock);
610
611         return chg;
612 }
613
614 /*
615  * Convert the address within this vma to the page offset within
616  * the mapping, in pagecache page units; huge pages here.
617  */
618 static pgoff_t vma_hugecache_offset(struct hstate *h,
619                         struct vm_area_struct *vma, unsigned long address)
620 {
621         return ((address - vma->vm_start) >> huge_page_shift(h)) +
622                         (vma->vm_pgoff >> huge_page_order(h));
623 }
624
625 pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
626                                      unsigned long address)
627 {
628         return vma_hugecache_offset(hstate_vma(vma), vma, address);
629 }
630 EXPORT_SYMBOL_GPL(linear_hugepage_index);
631
632 /*
633  * Return the size of the pages allocated when backing a VMA. In the majority
634  * cases this will be same size as used by the page table entries.
635  */
636 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
637 {
638         struct hstate *hstate;
639
640         if (!is_vm_hugetlb_page(vma))
641                 return PAGE_SIZE;
642
643         hstate = hstate_vma(vma);
644
645         return 1UL << huge_page_shift(hstate);
646 }
647 EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
648
649 /*
650  * Return the page size being used by the MMU to back a VMA. In the majority
651  * of cases, the page size used by the kernel matches the MMU size. On
652  * architectures where it differs, an architecture-specific version of this
653  * function is required.
654  */
655 #ifndef vma_mmu_pagesize
656 unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
657 {
658         return vma_kernel_pagesize(vma);
659 }
660 #endif
661
662 /*
663  * Flags for MAP_PRIVATE reservations.  These are stored in the bottom
664  * bits of the reservation map pointer, which are always clear due to
665  * alignment.
666  */
667 #define HPAGE_RESV_OWNER    (1UL << 0)
668 #define HPAGE_RESV_UNMAPPED (1UL << 1)
669 #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
670
671 /*
672  * These helpers are used to track how many pages are reserved for
673  * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
674  * is guaranteed to have their future faults succeed.
675  *
676  * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
677  * the reserve counters are updated with the hugetlb_lock held. It is safe
678  * to reset the VMA at fork() time as it is not in use yet and there is no
679  * chance of the global counters getting corrupted as a result of the values.
680  *
681  * The private mapping reservation is represented in a subtly different
682  * manner to a shared mapping.  A shared mapping has a region map associated
683  * with the underlying file, this region map represents the backing file
684  * pages which have ever had a reservation assigned which this persists even
685  * after the page is instantiated.  A private mapping has a region map
686  * associated with the original mmap which is attached to all VMAs which
687  * reference it, this region map represents those offsets which have consumed
688  * reservation ie. where pages have been instantiated.
689  */
690 static unsigned long get_vma_private_data(struct vm_area_struct *vma)
691 {
692         return (unsigned long)vma->vm_private_data;
693 }
694
695 static void set_vma_private_data(struct vm_area_struct *vma,
696                                                         unsigned long value)
697 {
698         vma->vm_private_data = (void *)value;
699 }
700
701 struct resv_map *resv_map_alloc(void)
702 {
703         struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
704         struct file_region *rg = kmalloc(sizeof(*rg), GFP_KERNEL);
705
706         if (!resv_map || !rg) {
707                 kfree(resv_map);
708                 kfree(rg);
709                 return NULL;
710         }
711
712         kref_init(&resv_map->refs);
713         spin_lock_init(&resv_map->lock);
714         INIT_LIST_HEAD(&resv_map->regions);
715
716         resv_map->adds_in_progress = 0;
717
718         INIT_LIST_HEAD(&resv_map->region_cache);
719         list_add(&rg->link, &resv_map->region_cache);
720         resv_map->region_cache_count = 1;
721
722         return resv_map;
723 }
724
725 void resv_map_release(struct kref *ref)
726 {
727         struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
728         struct list_head *head = &resv_map->region_cache;
729         struct file_region *rg, *trg;
730
731         /* Clear out any active regions before we release the map. */
732         region_del(resv_map, 0, LONG_MAX);
733
734         /* ... and any entries left in the cache */
735         list_for_each_entry_safe(rg, trg, head, link) {
736                 list_del(&rg->link);
737                 kfree(rg);
738         }
739
740         VM_BUG_ON(resv_map->adds_in_progress);
741
742         kfree(resv_map);
743 }
744
745 static inline struct resv_map *inode_resv_map(struct inode *inode)
746 {
747         return inode->i_mapping->private_data;
748 }
749
750 static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
751 {
752         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
753         if (vma->vm_flags & VM_MAYSHARE) {
754                 struct address_space *mapping = vma->vm_file->f_mapping;
755                 struct inode *inode = mapping->host;
756
757                 return inode_resv_map(inode);
758
759         } else {
760                 return (struct resv_map *)(get_vma_private_data(vma) &
761                                                         ~HPAGE_RESV_MASK);
762         }
763 }
764
765 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
766 {
767         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
768         VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
769
770         set_vma_private_data(vma, (get_vma_private_data(vma) &
771                                 HPAGE_RESV_MASK) | (unsigned long)map);
772 }
773
774 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
775 {
776         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
777         VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
778
779         set_vma_private_data(vma, get_vma_private_data(vma) | flags);
780 }
781
782 static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
783 {
784         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
785
786         return (get_vma_private_data(vma) & flag) != 0;
787 }
788
789 /* Reset counters to 0 and clear all HPAGE_RESV_* flags */
790 void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
791 {
792         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
793         if (!(vma->vm_flags & VM_MAYSHARE))
794                 vma->vm_private_data = (void *)0;
795 }
796
797 /* Returns true if the VMA has associated reserve pages */
798 static bool vma_has_reserves(struct vm_area_struct *vma, long chg)
799 {
800         if (vma->vm_flags & VM_NORESERVE) {
801                 /*
802                  * This address is already reserved by other process(chg == 0),
803                  * so, we should decrement reserved count. Without decrementing,
804                  * reserve count remains after releasing inode, because this
805                  * allocated page will go into page cache and is regarded as
806                  * coming from reserved pool in releasing step.  Currently, we
807                  * don't have any other solution to deal with this situation
808                  * properly, so add work-around here.
809                  */
810                 if (vma->vm_flags & VM_MAYSHARE && chg == 0)
811                         return true;
812                 else
813                         return false;
814         }
815
816         /* Shared mappings always use reserves */
817         if (vma->vm_flags & VM_MAYSHARE) {
818                 /*
819                  * We know VM_NORESERVE is not set.  Therefore, there SHOULD
820                  * be a region map for all pages.  The only situation where
821                  * there is no region map is if a hole was punched via
822                  * fallocate.  In this case, there really are no reverves to
823                  * use.  This situation is indicated if chg != 0.
824                  */
825                 if (chg)
826                         return false;
827                 else
828                         return true;
829         }
830
831         /*
832          * Only the process that called mmap() has reserves for
833          * private mappings.
834          */
835         if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
836                 return true;
837
838         return false;
839 }
840
841 static void enqueue_huge_page(struct hstate *h, struct page *page)
842 {
843         int nid = page_to_nid(page);
844         list_move(&page->lru, &h->hugepage_freelists[nid]);
845         h->free_huge_pages++;
846         h->free_huge_pages_node[nid]++;
847 }
848
849 static struct page *dequeue_huge_page_node(struct hstate *h, int nid)
850 {
851         struct page *page;
852
853         list_for_each_entry(page, &h->hugepage_freelists[nid], lru)
854                 if (!is_migrate_isolate_page(page))
855                         break;
856         /*
857          * if 'non-isolated free hugepage' not found on the list,
858          * the allocation fails.
859          */
860         if (&h->hugepage_freelists[nid] == &page->lru)
861                 return NULL;
862         list_move(&page->lru, &h->hugepage_activelist);
863         set_page_refcounted(page);
864         h->free_huge_pages--;
865         h->free_huge_pages_node[nid]--;
866         return page;
867 }
868
869 /* Movability of hugepages depends on migration support. */
870 static inline gfp_t htlb_alloc_mask(struct hstate *h)
871 {
872         if (hugepages_treat_as_movable || hugepage_migration_supported(h))
873                 return GFP_HIGHUSER_MOVABLE;
874         else
875                 return GFP_HIGHUSER;
876 }
877
878 static struct page *dequeue_huge_page_vma(struct hstate *h,
879                                 struct vm_area_struct *vma,
880                                 unsigned long address, int avoid_reserve,
881                                 long chg)
882 {
883         struct page *page = NULL;
884         struct mempolicy *mpol;
885         nodemask_t *nodemask;
886         struct zonelist *zonelist;
887         struct zone *zone;
888         struct zoneref *z;
889         unsigned int cpuset_mems_cookie;
890
891         /*
892          * A child process with MAP_PRIVATE mappings created by their parent
893          * have no page reserves. This check ensures that reservations are
894          * not "stolen". The child may still get SIGKILLed
895          */
896         if (!vma_has_reserves(vma, chg) &&
897                         h->free_huge_pages - h->resv_huge_pages == 0)
898                 goto err;
899
900         /* If reserves cannot be used, ensure enough pages are in the pool */
901         if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
902                 goto err;
903
904 retry_cpuset:
905         cpuset_mems_cookie = read_mems_allowed_begin();
906         zonelist = huge_zonelist(vma, address,
907                                         htlb_alloc_mask(h), &mpol, &nodemask);
908
909         for_each_zone_zonelist_nodemask(zone, z, zonelist,
910                                                 MAX_NR_ZONES - 1, nodemask) {
911                 if (cpuset_zone_allowed(zone, htlb_alloc_mask(h))) {
912                         page = dequeue_huge_page_node(h, zone_to_nid(zone));
913                         if (page) {
914                                 if (avoid_reserve)
915                                         break;
916                                 if (!vma_has_reserves(vma, chg))
917                                         break;
918
919                                 SetPagePrivate(page);
920                                 h->resv_huge_pages--;
921                                 break;
922                         }
923                 }
924         }
925
926         mpol_cond_put(mpol);
927         if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
928                 goto retry_cpuset;
929         return page;
930
931 err:
932         return NULL;
933 }
934
935 /*
936  * common helper functions for hstate_next_node_to_{alloc|free}.
937  * We may have allocated or freed a huge page based on a different
938  * nodes_allowed previously, so h->next_node_to_{alloc|free} might
939  * be outside of *nodes_allowed.  Ensure that we use an allowed
940  * node for alloc or free.
941  */
942 static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
943 {
944         nid = next_node_in(nid, *nodes_allowed);
945         VM_BUG_ON(nid >= MAX_NUMNODES);
946
947         return nid;
948 }
949
950 static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
951 {
952         if (!node_isset(nid, *nodes_allowed))
953                 nid = next_node_allowed(nid, nodes_allowed);
954         return nid;
955 }
956
957 /*
958  * returns the previously saved node ["this node"] from which to
959  * allocate a persistent huge page for the pool and advance the
960  * next node from which to allocate, handling wrap at end of node
961  * mask.
962  */
963 static int hstate_next_node_to_alloc(struct hstate *h,
964                                         nodemask_t *nodes_allowed)
965 {
966         int nid;
967
968         VM_BUG_ON(!nodes_allowed);
969
970         nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
971         h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
972
973         return nid;
974 }
975
976 /*
977  * helper for free_pool_huge_page() - return the previously saved
978  * node ["this node"] from which to free a huge page.  Advance the
979  * next node id whether or not we find a free huge page to free so
980  * that the next attempt to free addresses the next node.
981  */
982 static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
983 {
984         int nid;
985
986         VM_BUG_ON(!nodes_allowed);
987
988         nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
989         h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
990
991         return nid;
992 }
993
994 #define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask)           \
995         for (nr_nodes = nodes_weight(*mask);                            \
996                 nr_nodes > 0 &&                                         \
997                 ((node = hstate_next_node_to_alloc(hs, mask)) || 1);    \
998                 nr_nodes--)
999
1000 #define for_each_node_mask_to_free(hs, nr_nodes, node, mask)            \
1001         for (nr_nodes = nodes_weight(*mask);                            \
1002                 nr_nodes > 0 &&                                         \
1003                 ((node = hstate_next_node_to_free(hs, mask)) || 1);     \
1004                 nr_nodes--)
1005
1006 #if defined(CONFIG_X86_64) && ((defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA))
1007 static void destroy_compound_gigantic_page(struct page *page,
1008                                         unsigned int order)
1009 {
1010         int i;
1011         int nr_pages = 1 << order;
1012         struct page *p = page + 1;
1013
1014         for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1015                 clear_compound_head(p);
1016                 set_page_refcounted(p);
1017         }
1018
1019         set_compound_order(page, 0);
1020         __ClearPageHead(page);
1021 }
1022
1023 static void free_gigantic_page(struct page *page, unsigned int order)
1024 {
1025         free_contig_range(page_to_pfn(page), 1 << order);
1026 }
1027
1028 static int __alloc_gigantic_page(unsigned long start_pfn,
1029                                 unsigned long nr_pages)
1030 {
1031         unsigned long end_pfn = start_pfn + nr_pages;
1032         return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
1033 }
1034
1035 static bool pfn_range_valid_gigantic(struct zone *z,
1036                         unsigned long start_pfn, unsigned long nr_pages)
1037 {
1038         unsigned long i, end_pfn = start_pfn + nr_pages;
1039         struct page *page;
1040
1041         for (i = start_pfn; i < end_pfn; i++) {
1042                 if (!pfn_valid(i))
1043                         return false;
1044
1045                 page = pfn_to_page(i);
1046
1047                 if (page_zone(page) != z)
1048                         return false;
1049
1050                 if (PageReserved(page))
1051                         return false;
1052
1053                 if (page_count(page) > 0)
1054                         return false;
1055
1056                 if (PageHuge(page))
1057                         return false;
1058         }
1059
1060         return true;
1061 }
1062
1063 static bool zone_spans_last_pfn(const struct zone *zone,
1064                         unsigned long start_pfn, unsigned long nr_pages)
1065 {
1066         unsigned long last_pfn = start_pfn + nr_pages - 1;
1067         return zone_spans_pfn(zone, last_pfn);
1068 }
1069
1070 static struct page *alloc_gigantic_page(int nid, unsigned int order)
1071 {
1072         unsigned long nr_pages = 1 << order;
1073         unsigned long ret, pfn, flags;
1074         struct zone *z;
1075
1076         z = NODE_DATA(nid)->node_zones;
1077         for (; z - NODE_DATA(nid)->node_zones < MAX_NR_ZONES; z++) {
1078                 spin_lock_irqsave(&z->lock, flags);
1079
1080                 pfn = ALIGN(z->zone_start_pfn, nr_pages);
1081                 while (zone_spans_last_pfn(z, pfn, nr_pages)) {
1082                         if (pfn_range_valid_gigantic(z, pfn, nr_pages)) {
1083                                 /*
1084                                  * We release the zone lock here because
1085                                  * alloc_contig_range() will also lock the zone
1086                                  * at some point. If there's an allocation
1087                                  * spinning on this lock, it may win the race
1088                                  * and cause alloc_contig_range() to fail...
1089                                  */
1090                                 spin_unlock_irqrestore(&z->lock, flags);
1091                                 ret = __alloc_gigantic_page(pfn, nr_pages);
1092                                 if (!ret)
1093                                         return pfn_to_page(pfn);
1094                                 spin_lock_irqsave(&z->lock, flags);
1095                         }
1096                         pfn += nr_pages;
1097                 }
1098
1099                 spin_unlock_irqrestore(&z->lock, flags);
1100         }
1101
1102         return NULL;
1103 }
1104
1105 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid);
1106 static void prep_compound_gigantic_page(struct page *page, unsigned int order);
1107
1108 static struct page *alloc_fresh_gigantic_page_node(struct hstate *h, int nid)
1109 {
1110         struct page *page;
1111
1112         page = alloc_gigantic_page(nid, huge_page_order(h));
1113         if (page) {
1114                 prep_compound_gigantic_page(page, huge_page_order(h));
1115                 prep_new_huge_page(h, page, nid);
1116         }
1117
1118         return page;
1119 }
1120
1121 static int alloc_fresh_gigantic_page(struct hstate *h,
1122                                 nodemask_t *nodes_allowed)
1123 {
1124         struct page *page = NULL;
1125         int nr_nodes, node;
1126
1127         for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
1128                 page = alloc_fresh_gigantic_page_node(h, node);
1129                 if (page)
1130                         return 1;
1131         }
1132
1133         return 0;
1134 }
1135
1136 static inline bool gigantic_page_supported(void) { return true; }
1137 #else
1138 static inline bool gigantic_page_supported(void) { return false; }
1139 static inline void free_gigantic_page(struct page *page, unsigned int order) { }
1140 static inline void destroy_compound_gigantic_page(struct page *page,
1141                                                 unsigned int order) { }
1142 static inline int alloc_fresh_gigantic_page(struct hstate *h,
1143                                         nodemask_t *nodes_allowed) { return 0; }
1144 #endif
1145
1146 static void update_and_free_page(struct hstate *h, struct page *page)
1147 {
1148         int i;
1149
1150         if (hstate_is_gigantic(h) && !gigantic_page_supported())
1151                 return;
1152
1153         h->nr_huge_pages--;
1154         h->nr_huge_pages_node[page_to_nid(page)]--;
1155         for (i = 0; i < pages_per_huge_page(h); i++) {
1156                 page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
1157                                 1 << PG_referenced | 1 << PG_dirty |
1158                                 1 << PG_active | 1 << PG_private |
1159                                 1 << PG_writeback);
1160         }
1161         VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page);
1162         set_compound_page_dtor(page, NULL_COMPOUND_DTOR);
1163         set_page_refcounted(page);
1164         if (hstate_is_gigantic(h)) {
1165                 destroy_compound_gigantic_page(page, huge_page_order(h));
1166                 free_gigantic_page(page, huge_page_order(h));
1167         } else {
1168                 __free_pages(page, huge_page_order(h));
1169         }
1170 }
1171
1172 struct hstate *size_to_hstate(unsigned long size)
1173 {
1174         struct hstate *h;
1175
1176         for_each_hstate(h) {
1177                 if (huge_page_size(h) == size)
1178                         return h;
1179         }
1180         return NULL;
1181 }
1182
1183 /*
1184  * Test to determine whether the hugepage is "active/in-use" (i.e. being linked
1185  * to hstate->hugepage_activelist.)
1186  *
1187  * This function can be called for tail pages, but never returns true for them.
1188  */
1189 bool page_huge_active(struct page *page)
1190 {
1191         VM_BUG_ON_PAGE(!PageHuge(page), page);
1192         return PageHead(page) && PagePrivate(&page[1]);
1193 }
1194
1195 /* never called for tail page */
1196 static void set_page_huge_active(struct page *page)
1197 {
1198         VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
1199         SetPagePrivate(&page[1]);
1200 }
1201
1202 static void clear_page_huge_active(struct page *page)
1203 {
1204         VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
1205         ClearPagePrivate(&page[1]);
1206 }
1207
1208 void free_huge_page(struct page *page)
1209 {
1210         /*
1211          * Can't pass hstate in here because it is called from the
1212          * compound page destructor.
1213          */
1214         struct hstate *h = page_hstate(page);
1215         int nid = page_to_nid(page);
1216         struct hugepage_subpool *spool =
1217                 (struct hugepage_subpool *)page_private(page);
1218         bool restore_reserve;
1219
1220         set_page_private(page, 0);
1221         page->mapping = NULL;
1222         VM_BUG_ON_PAGE(page_count(page), page);
1223         VM_BUG_ON_PAGE(page_mapcount(page), page);
1224         restore_reserve = PagePrivate(page);
1225         ClearPagePrivate(page);
1226
1227         /*
1228          * A return code of zero implies that the subpool will be under its
1229          * minimum size if the reservation is not restored after page is free.
1230          * Therefore, force restore_reserve operation.
1231          */
1232         if (hugepage_subpool_put_pages(spool, 1) == 0)
1233                 restore_reserve = true;
1234
1235         spin_lock(&hugetlb_lock);
1236         clear_page_huge_active(page);
1237         hugetlb_cgroup_uncharge_page(hstate_index(h),
1238                                      pages_per_huge_page(h), page);
1239         if (restore_reserve)
1240                 h->resv_huge_pages++;
1241
1242         if (h->surplus_huge_pages_node[nid]) {
1243                 /* remove the page from active list */
1244                 list_del(&page->lru);
1245                 update_and_free_page(h, page);
1246                 h->surplus_huge_pages--;
1247                 h->surplus_huge_pages_node[nid]--;
1248         } else {
1249                 arch_clear_hugepage_flags(page);
1250                 enqueue_huge_page(h, page);
1251         }
1252         spin_unlock(&hugetlb_lock);
1253 }
1254
1255 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
1256 {
1257         INIT_LIST_HEAD(&page->lru);
1258         set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
1259         spin_lock(&hugetlb_lock);
1260         set_hugetlb_cgroup(page, NULL);
1261         h->nr_huge_pages++;
1262         h->nr_huge_pages_node[nid]++;
1263         spin_unlock(&hugetlb_lock);
1264         put_page(page); /* free it into the hugepage allocator */
1265 }
1266
1267 static void prep_compound_gigantic_page(struct page *page, unsigned int order)
1268 {
1269         int i;
1270         int nr_pages = 1 << order;
1271         struct page *p = page + 1;
1272
1273         /* we rely on prep_new_huge_page to set the destructor */
1274         set_compound_order(page, order);
1275         __ClearPageReserved(page);
1276         __SetPageHead(page);
1277         for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1278                 /*
1279                  * For gigantic hugepages allocated through bootmem at
1280                  * boot, it's safer to be consistent with the not-gigantic
1281                  * hugepages and clear the PG_reserved bit from all tail pages
1282                  * too.  Otherwse drivers using get_user_pages() to access tail
1283                  * pages may get the reference counting wrong if they see
1284                  * PG_reserved set on a tail page (despite the head page not
1285                  * having PG_reserved set).  Enforcing this consistency between
1286                  * head and tail pages allows drivers to optimize away a check
1287                  * on the head page when they need know if put_page() is needed
1288                  * after get_user_pages().
1289                  */
1290                 __ClearPageReserved(p);
1291                 set_page_count(p, 0);
1292                 set_compound_head(p, page);
1293         }
1294         atomic_set(compound_mapcount_ptr(page), -1);
1295 }
1296
1297 /*
1298  * PageHuge() only returns true for hugetlbfs pages, but not for normal or
1299  * transparent huge pages.  See the PageTransHuge() documentation for more
1300  * details.
1301  */
1302 int PageHuge(struct page *page)
1303 {
1304         if (!PageCompound(page))
1305                 return 0;
1306
1307         page = compound_head(page);
1308         return page[1].compound_dtor == HUGETLB_PAGE_DTOR;
1309 }
1310 EXPORT_SYMBOL_GPL(PageHuge);
1311
1312 /*
1313  * PageHeadHuge() only returns true for hugetlbfs head page, but not for
1314  * normal or transparent huge pages.
1315  */
1316 int PageHeadHuge(struct page *page_head)
1317 {
1318         if (!PageHead(page_head))
1319                 return 0;
1320
1321         return get_compound_page_dtor(page_head) == free_huge_page;
1322 }
1323
1324 pgoff_t __basepage_index(struct page *page)
1325 {
1326         struct page *page_head = compound_head(page);
1327         pgoff_t index = page_index(page_head);
1328         unsigned long compound_idx;
1329
1330         if (!PageHuge(page_head))
1331                 return page_index(page);
1332
1333         if (compound_order(page_head) >= MAX_ORDER)
1334                 compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
1335         else
1336                 compound_idx = page - page_head;
1337
1338         return (index << compound_order(page_head)) + compound_idx;
1339 }
1340
1341 static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
1342 {
1343         struct page *page;
1344
1345         page = __alloc_pages_node(nid,
1346                 htlb_alloc_mask(h)|__GFP_COMP|__GFP_THISNODE|
1347                                                 __GFP_REPEAT|__GFP_NOWARN,
1348                 huge_page_order(h));
1349         if (page) {
1350                 prep_new_huge_page(h, page, nid);
1351         }
1352
1353         return page;
1354 }
1355
1356 static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
1357 {
1358         struct page *page;
1359         int nr_nodes, node;
1360         int ret = 0;
1361
1362         for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
1363                 page = alloc_fresh_huge_page_node(h, node);
1364                 if (page) {
1365                         ret = 1;
1366                         break;
1367                 }
1368         }
1369
1370         if (ret)
1371                 count_vm_event(HTLB_BUDDY_PGALLOC);
1372         else
1373                 count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
1374
1375         return ret;
1376 }
1377
1378 /*
1379  * Free huge page from pool from next node to free.
1380  * Attempt to keep persistent huge pages more or less
1381  * balanced over allowed nodes.
1382  * Called with hugetlb_lock locked.
1383  */
1384 static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
1385                                                          bool acct_surplus)
1386 {
1387         int nr_nodes, node;
1388         int ret = 0;
1389
1390         for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
1391                 /*
1392                  * If we're returning unused surplus pages, only examine
1393                  * nodes with surplus pages.
1394                  */
1395                 if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
1396                     !list_empty(&h->hugepage_freelists[node])) {
1397                         struct page *page =
1398                                 list_entry(h->hugepage_freelists[node].next,
1399                                           struct page, lru);
1400                         list_del(&page->lru);
1401                         h->free_huge_pages--;
1402                         h->free_huge_pages_node[node]--;
1403                         if (acct_surplus) {
1404                                 h->surplus_huge_pages--;
1405                                 h->surplus_huge_pages_node[node]--;
1406                         }
1407                         update_and_free_page(h, page);
1408                         ret = 1;
1409                         break;
1410                 }
1411         }
1412
1413         return ret;
1414 }
1415
1416 /*
1417  * Dissolve a given free hugepage into free buddy pages. This function does
1418  * nothing for in-use (including surplus) hugepages.
1419  */
1420 static void dissolve_free_huge_page(struct page *page)
1421 {
1422         spin_lock(&hugetlb_lock);
1423         if (PageHuge(page) && !page_count(page)) {
1424                 struct hstate *h = page_hstate(page);
1425                 int nid = page_to_nid(page);
1426                 list_del(&page->lru);
1427                 h->free_huge_pages--;
1428                 h->free_huge_pages_node[nid]--;
1429                 update_and_free_page(h, page);
1430         }
1431         spin_unlock(&hugetlb_lock);
1432 }
1433
1434 /*
1435  * Dissolve free hugepages in a given pfn range. Used by memory hotplug to
1436  * make specified memory blocks removable from the system.
1437  * Note that start_pfn should aligned with (minimum) hugepage size.
1438  */
1439 void dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
1440 {
1441         unsigned long pfn;
1442
1443         if (!hugepages_supported())
1444                 return;
1445
1446         VM_BUG_ON(!IS_ALIGNED(start_pfn, 1 << minimum_order));
1447         for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order)
1448                 dissolve_free_huge_page(pfn_to_page(pfn));
1449 }
1450
1451 /*
1452  * There are 3 ways this can get called:
1453  * 1. With vma+addr: we use the VMA's memory policy
1454  * 2. With !vma, but nid=NUMA_NO_NODE:  We try to allocate a huge
1455  *    page from any node, and let the buddy allocator itself figure
1456  *    it out.
1457  * 3. With !vma, but nid!=NUMA_NO_NODE.  We allocate a huge page
1458  *    strictly from 'nid'
1459  */
1460 static struct page *__hugetlb_alloc_buddy_huge_page(struct hstate *h,
1461                 struct vm_area_struct *vma, unsigned long addr, int nid)
1462 {
1463         int order = huge_page_order(h);
1464         gfp_t gfp = htlb_alloc_mask(h)|__GFP_COMP|__GFP_REPEAT|__GFP_NOWARN;
1465         unsigned int cpuset_mems_cookie;
1466
1467         /*
1468          * We need a VMA to get a memory policy.  If we do not
1469          * have one, we use the 'nid' argument.
1470          *
1471          * The mempolicy stuff below has some non-inlined bits
1472          * and calls ->vm_ops.  That makes it hard to optimize at
1473          * compile-time, even when NUMA is off and it does
1474          * nothing.  This helps the compiler optimize it out.
1475          */
1476         if (!IS_ENABLED(CONFIG_NUMA) || !vma) {
1477                 /*
1478                  * If a specific node is requested, make sure to
1479                  * get memory from there, but only when a node
1480                  * is explicitly specified.
1481                  */
1482                 if (nid != NUMA_NO_NODE)
1483                         gfp |= __GFP_THISNODE;
1484                 /*
1485                  * Make sure to call something that can handle
1486                  * nid=NUMA_NO_NODE
1487                  */
1488                 return alloc_pages_node(nid, gfp, order);
1489         }
1490
1491         /*
1492          * OK, so we have a VMA.  Fetch the mempolicy and try to
1493          * allocate a huge page with it.  We will only reach this
1494          * when CONFIG_NUMA=y.
1495          */
1496         do {
1497                 struct page *page;
1498                 struct mempolicy *mpol;
1499                 struct zonelist *zl;
1500                 nodemask_t *nodemask;
1501
1502                 cpuset_mems_cookie = read_mems_allowed_begin();
1503                 zl = huge_zonelist(vma, addr, gfp, &mpol, &nodemask);
1504                 mpol_cond_put(mpol);
1505                 page = __alloc_pages_nodemask(gfp, order, zl, nodemask);
1506                 if (page)
1507                         return page;
1508         } while (read_mems_allowed_retry(cpuset_mems_cookie));
1509
1510         return NULL;
1511 }
1512
1513 /*
1514  * There are two ways to allocate a huge page:
1515  * 1. When you have a VMA and an address (like a fault)
1516  * 2. When you have no VMA (like when setting /proc/.../nr_hugepages)
1517  *
1518  * 'vma' and 'addr' are only for (1).  'nid' is always NUMA_NO_NODE in
1519  * this case which signifies that the allocation should be done with
1520  * respect for the VMA's memory policy.
1521  *
1522  * For (2), we ignore 'vma' and 'addr' and use 'nid' exclusively. This
1523  * implies that memory policies will not be taken in to account.
1524  */
1525 static struct page *__alloc_buddy_huge_page(struct hstate *h,
1526                 struct vm_area_struct *vma, unsigned long addr, int nid)
1527 {
1528         struct page *page;
1529         unsigned int r_nid;
1530
1531         if (hstate_is_gigantic(h))
1532                 return NULL;
1533
1534         /*
1535          * Make sure that anyone specifying 'nid' is not also specifying a VMA.
1536          * This makes sure the caller is picking _one_ of the modes with which
1537          * we can call this function, not both.
1538          */
1539         if (vma || (addr != -1)) {
1540                 VM_WARN_ON_ONCE(addr == -1);
1541                 VM_WARN_ON_ONCE(nid != NUMA_NO_NODE);
1542         }
1543         /*
1544          * Assume we will successfully allocate the surplus page to
1545          * prevent racing processes from causing the surplus to exceed
1546          * overcommit
1547          *
1548          * This however introduces a different race, where a process B
1549          * tries to grow the static hugepage pool while alloc_pages() is
1550          * called by process A. B will only examine the per-node
1551          * counters in determining if surplus huge pages can be
1552          * converted to normal huge pages in adjust_pool_surplus(). A
1553          * won't be able to increment the per-node counter, until the
1554          * lock is dropped by B, but B doesn't drop hugetlb_lock until
1555          * no more huge pages can be converted from surplus to normal
1556          * state (and doesn't try to convert again). Thus, we have a
1557          * case where a surplus huge page exists, the pool is grown, and
1558          * the surplus huge page still exists after, even though it
1559          * should just have been converted to a normal huge page. This
1560          * does not leak memory, though, as the hugepage will be freed
1561          * once it is out of use. It also does not allow the counters to
1562          * go out of whack in adjust_pool_surplus() as we don't modify
1563          * the node values until we've gotten the hugepage and only the
1564          * per-node value is checked there.
1565          */
1566         spin_lock(&hugetlb_lock);
1567         if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
1568                 spin_unlock(&hugetlb_lock);
1569                 return NULL;
1570         } else {
1571                 h->nr_huge_pages++;
1572                 h->surplus_huge_pages++;
1573         }
1574         spin_unlock(&hugetlb_lock);
1575
1576         page = __hugetlb_alloc_buddy_huge_page(h, vma, addr, nid);
1577
1578         spin_lock(&hugetlb_lock);
1579         if (page) {
1580                 INIT_LIST_HEAD(&page->lru);
1581                 r_nid = page_to_nid(page);
1582                 set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
1583                 set_hugetlb_cgroup(page, NULL);
1584                 /*
1585                  * We incremented the global counters already
1586                  */
1587                 h->nr_huge_pages_node[r_nid]++;
1588                 h->surplus_huge_pages_node[r_nid]++;
1589                 __count_vm_event(HTLB_BUDDY_PGALLOC);
1590         } else {
1591                 h->nr_huge_pages--;
1592                 h->surplus_huge_pages--;
1593                 __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
1594         }
1595         spin_unlock(&hugetlb_lock);
1596
1597         return page;
1598 }
1599
1600 /*
1601  * Allocate a huge page from 'nid'.  Note, 'nid' may be
1602  * NUMA_NO_NODE, which means that it may be allocated
1603  * anywhere.
1604  */
1605 static
1606 struct page *__alloc_buddy_huge_page_no_mpol(struct hstate *h, int nid)
1607 {
1608         unsigned long addr = -1;
1609
1610         return __alloc_buddy_huge_page(h, NULL, addr, nid);
1611 }
1612
1613 /*
1614  * Use the VMA's mpolicy to allocate a huge page from the buddy.
1615  */
1616 static
1617 struct page *__alloc_buddy_huge_page_with_mpol(struct hstate *h,
1618                 struct vm_area_struct *vma, unsigned long addr)
1619 {
1620         return __alloc_buddy_huge_page(h, vma, addr, NUMA_NO_NODE);
1621 }
1622
1623 /*
1624  * This allocation function is useful in the context where vma is irrelevant.
1625  * E.g. soft-offlining uses this function because it only cares physical
1626  * address of error page.
1627  */
1628 struct page *alloc_huge_page_node(struct hstate *h, int nid)
1629 {
1630         struct page *page = NULL;
1631
1632         spin_lock(&hugetlb_lock);
1633         if (h->free_huge_pages - h->resv_huge_pages > 0)
1634                 page = dequeue_huge_page_node(h, nid);
1635         spin_unlock(&hugetlb_lock);
1636
1637         if (!page)
1638                 page = __alloc_buddy_huge_page_no_mpol(h, nid);
1639
1640         return page;
1641 }
1642
1643 /*
1644  * Increase the hugetlb pool such that it can accommodate a reservation
1645  * of size 'delta'.
1646  */
1647 static int gather_surplus_pages(struct hstate *h, int delta)
1648 {
1649         struct list_head surplus_list;
1650         struct page *page, *tmp;
1651         int ret, i;
1652         int needed, allocated;
1653         bool alloc_ok = true;
1654
1655         needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
1656         if (needed <= 0) {
1657                 h->resv_huge_pages += delta;
1658                 return 0;
1659         }
1660
1661         allocated = 0;
1662         INIT_LIST_HEAD(&surplus_list);
1663
1664         ret = -ENOMEM;
1665 retry:
1666         spin_unlock(&hugetlb_lock);
1667         for (i = 0; i < needed; i++) {
1668                 page = __alloc_buddy_huge_page_no_mpol(h, NUMA_NO_NODE);
1669                 if (!page) {
1670                         alloc_ok = false;
1671                         break;
1672                 }
1673                 list_add(&page->lru, &surplus_list);
1674         }
1675         allocated += i;
1676
1677         /*
1678          * After retaking hugetlb_lock, we need to recalculate 'needed'
1679          * because either resv_huge_pages or free_huge_pages may have changed.
1680          */
1681         spin_lock(&hugetlb_lock);
1682         needed = (h->resv_huge_pages + delta) -
1683                         (h->free_huge_pages + allocated);
1684         if (needed > 0) {
1685                 if (alloc_ok)
1686                         goto retry;
1687                 /*
1688                  * We were not able to allocate enough pages to
1689                  * satisfy the entire reservation so we free what
1690                  * we've allocated so far.
1691                  */
1692                 goto free;
1693         }
1694         /*
1695          * The surplus_list now contains _at_least_ the number of extra pages
1696          * needed to accommodate the reservation.  Add the appropriate number
1697          * of pages to the hugetlb pool and free the extras back to the buddy
1698          * allocator.  Commit the entire reservation here to prevent another
1699          * process from stealing the pages as they are added to the pool but
1700          * before they are reserved.
1701          */
1702         needed += allocated;
1703         h->resv_huge_pages += delta;
1704         ret = 0;
1705
1706         /* Free the needed pages to the hugetlb pool */
1707         list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
1708                 if ((--needed) < 0)
1709                         break;
1710                 /*
1711                  * This page is now managed by the hugetlb allocator and has
1712                  * no users -- drop the buddy allocator's reference.
1713                  */
1714                 put_page_testzero(page);
1715                 VM_BUG_ON_PAGE(page_count(page), page);
1716                 enqueue_huge_page(h, page);
1717         }
1718 free:
1719         spin_unlock(&hugetlb_lock);
1720
1721         /* Free unnecessary surplus pages to the buddy allocator */
1722         list_for_each_entry_safe(page, tmp, &surplus_list, lru)
1723                 put_page(page);
1724         spin_lock(&hugetlb_lock);
1725
1726         return ret;
1727 }
1728
1729 /*
1730  * When releasing a hugetlb pool reservation, any surplus pages that were
1731  * allocated to satisfy the reservation must be explicitly freed if they were
1732  * never used.
1733  * Called with hugetlb_lock held.
1734  */
1735 static void return_unused_surplus_pages(struct hstate *h,
1736                                         unsigned long unused_resv_pages)
1737 {
1738         unsigned long nr_pages;
1739
1740         /* Uncommit the reservation */
1741         h->resv_huge_pages -= unused_resv_pages;
1742
1743         /* Cannot return gigantic pages currently */
1744         if (hstate_is_gigantic(h))
1745                 return;
1746
1747         nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
1748
1749         /*
1750          * We want to release as many surplus pages as possible, spread
1751          * evenly across all nodes with memory. Iterate across these nodes
1752          * until we can no longer free unreserved surplus pages. This occurs
1753          * when the nodes with surplus pages have no free pages.
1754          * free_pool_huge_page() will balance the the freed pages across the
1755          * on-line nodes with memory and will handle the hstate accounting.
1756          */
1757         while (nr_pages--) {
1758                 if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
1759                         break;
1760                 cond_resched_lock(&hugetlb_lock);
1761         }
1762 }
1763
1764
1765 /*
1766  * vma_needs_reservation, vma_commit_reservation and vma_end_reservation
1767  * are used by the huge page allocation routines to manage reservations.
1768  *
1769  * vma_needs_reservation is called to determine if the huge page at addr
1770  * within the vma has an associated reservation.  If a reservation is
1771  * needed, the value 1 is returned.  The caller is then responsible for
1772  * managing the global reservation and subpool usage counts.  After
1773  * the huge page has been allocated, vma_commit_reservation is called
1774  * to add the page to the reservation map.  If the page allocation fails,
1775  * the reservation must be ended instead of committed.  vma_end_reservation
1776  * is called in such cases.
1777  *
1778  * In the normal case, vma_commit_reservation returns the same value
1779  * as the preceding vma_needs_reservation call.  The only time this
1780  * is not the case is if a reserve map was changed between calls.  It
1781  * is the responsibility of the caller to notice the difference and
1782  * take appropriate action.
1783  */
1784 enum vma_resv_mode {
1785         VMA_NEEDS_RESV,
1786         VMA_COMMIT_RESV,
1787         VMA_END_RESV,
1788 };
1789 static long __vma_reservation_common(struct hstate *h,
1790                                 struct vm_area_struct *vma, unsigned long addr,
1791                                 enum vma_resv_mode mode)
1792 {
1793         struct resv_map *resv;
1794         pgoff_t idx;
1795         long ret;
1796
1797         resv = vma_resv_map(vma);
1798         if (!resv)
1799                 return 1;
1800
1801         idx = vma_hugecache_offset(h, vma, addr);
1802         switch (mode) {
1803         case VMA_NEEDS_RESV:
1804                 ret = region_chg(resv, idx, idx + 1);
1805                 break;
1806         case VMA_COMMIT_RESV:
1807                 ret = region_add(resv, idx, idx + 1);
1808                 break;
1809         case VMA_END_RESV:
1810                 region_abort(resv, idx, idx + 1);
1811                 ret = 0;
1812                 break;
1813         default:
1814                 BUG();
1815         }
1816
1817         if (vma->vm_flags & VM_MAYSHARE)
1818                 return ret;
1819         else
1820                 return ret < 0 ? ret : 0;
1821 }
1822
1823 static long vma_needs_reservation(struct hstate *h,
1824                         struct vm_area_struct *vma, unsigned long addr)
1825 {
1826         return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV);
1827 }
1828
1829 static long vma_commit_reservation(struct hstate *h,
1830                         struct vm_area_struct *vma, unsigned long addr)
1831 {
1832         return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV);
1833 }
1834
1835 static void vma_end_reservation(struct hstate *h,
1836                         struct vm_area_struct *vma, unsigned long addr)
1837 {
1838         (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV);
1839 }
1840
1841 struct page *alloc_huge_page(struct vm_area_struct *vma,
1842                                     unsigned long addr, int avoid_reserve)
1843 {
1844         struct hugepage_subpool *spool = subpool_vma(vma);
1845         struct hstate *h = hstate_vma(vma);
1846         struct page *page;
1847         long map_chg, map_commit;
1848         long gbl_chg;
1849         int ret, idx;
1850         struct hugetlb_cgroup *h_cg;
1851
1852         idx = hstate_index(h);
1853         /*
1854          * Examine the region/reserve map to determine if the process
1855          * has a reservation for the page to be allocated.  A return
1856          * code of zero indicates a reservation exists (no change).
1857          */
1858         map_chg = gbl_chg = vma_needs_reservation(h, vma, addr);
1859         if (map_chg < 0)
1860                 return ERR_PTR(-ENOMEM);
1861
1862         /*
1863          * Processes that did not create the mapping will have no
1864          * reserves as indicated by the region/reserve map. Check
1865          * that the allocation will not exceed the subpool limit.
1866          * Allocations for MAP_NORESERVE mappings also need to be
1867          * checked against any subpool limit.
1868          */
1869         if (map_chg || avoid_reserve) {
1870                 gbl_chg = hugepage_subpool_get_pages(spool, 1);
1871                 if (gbl_chg < 0) {
1872                         vma_end_reservation(h, vma, addr);
1873                         return ERR_PTR(-ENOSPC);
1874                 }
1875
1876                 /*
1877                  * Even though there was no reservation in the region/reserve
1878                  * map, there could be reservations associated with the
1879                  * subpool that can be used.  This would be indicated if the
1880                  * return value of hugepage_subpool_get_pages() is zero.
1881                  * However, if avoid_reserve is specified we still avoid even
1882                  * the subpool reservations.
1883                  */
1884                 if (avoid_reserve)
1885                         gbl_chg = 1;
1886         }
1887
1888         ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
1889         if (ret)
1890                 goto out_subpool_put;
1891
1892         spin_lock(&hugetlb_lock);
1893         /*
1894          * glb_chg is passed to indicate whether or not a page must be taken
1895          * from the global free pool (global change).  gbl_chg == 0 indicates
1896          * a reservation exists for the allocation.
1897          */
1898         page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg);
1899         if (!page) {
1900                 spin_unlock(&hugetlb_lock);
1901                 page = __alloc_buddy_huge_page_with_mpol(h, vma, addr);
1902                 if (!page)
1903                         goto out_uncharge_cgroup;
1904                 if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) {
1905                         SetPagePrivate(page);
1906                         h->resv_huge_pages--;
1907                 }
1908                 spin_lock(&hugetlb_lock);
1909                 list_move(&page->lru, &h->hugepage_activelist);
1910                 /* Fall through */
1911         }
1912         hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
1913         spin_unlock(&hugetlb_lock);
1914
1915         set_page_private(page, (unsigned long)spool);
1916
1917         map_commit = vma_commit_reservation(h, vma, addr);
1918         if (unlikely(map_chg > map_commit)) {
1919                 /*
1920                  * The page was added to the reservation map between
1921                  * vma_needs_reservation and vma_commit_reservation.
1922                  * This indicates a race with hugetlb_reserve_pages.
1923                  * Adjust for the subpool count incremented above AND
1924                  * in hugetlb_reserve_pages for the same page.  Also,
1925                  * the reservation count added in hugetlb_reserve_pages
1926                  * no longer applies.
1927                  */
1928                 long rsv_adjust;
1929
1930                 rsv_adjust = hugepage_subpool_put_pages(spool, 1);
1931                 hugetlb_acct_memory(h, -rsv_adjust);
1932         }
1933         return page;
1934
1935 out_uncharge_cgroup:
1936         hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
1937 out_subpool_put:
1938         if (map_chg || avoid_reserve)
1939                 hugepage_subpool_put_pages(spool, 1);
1940         vma_end_reservation(h, vma, addr);
1941         return ERR_PTR(-ENOSPC);
1942 }
1943
1944 /*
1945  * alloc_huge_page()'s wrapper which simply returns the page if allocation
1946  * succeeds, otherwise NULL. This function is called from new_vma_page(),
1947  * where no ERR_VALUE is expected to be returned.
1948  */
1949 struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
1950                                 unsigned long addr, int avoid_reserve)
1951 {
1952         struct page *page = alloc_huge_page(vma, addr, avoid_reserve);
1953         if (IS_ERR(page))
1954                 page = NULL;
1955         return page;
1956 }
1957
1958 int __weak alloc_bootmem_huge_page(struct hstate *h)
1959 {
1960         struct huge_bootmem_page *m;
1961         int nr_nodes, node;
1962
1963         for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
1964                 void *addr;
1965
1966                 addr = memblock_virt_alloc_try_nid_nopanic(
1967                                 huge_page_size(h), huge_page_size(h),
1968                                 0, BOOTMEM_ALLOC_ACCESSIBLE, node);
1969                 if (addr) {
1970                         /*
1971                          * Use the beginning of the huge page to store the
1972                          * huge_bootmem_page struct (until gather_bootmem
1973                          * puts them into the mem_map).
1974                          */
1975                         m = addr;
1976                         goto found;
1977                 }
1978         }
1979         return 0;
1980
1981 found:
1982         BUG_ON(!IS_ALIGNED(virt_to_phys(m), huge_page_size(h)));
1983         /* Put them into a private list first because mem_map is not up yet */
1984         list_add(&m->list, &huge_boot_pages);
1985         m->hstate = h;
1986         return 1;
1987 }
1988
1989 static void __init prep_compound_huge_page(struct page *page,
1990                 unsigned int order)
1991 {
1992         if (unlikely(order > (MAX_ORDER - 1)))
1993                 prep_compound_gigantic_page(page, order);
1994         else
1995                 prep_compound_page(page, order);
1996 }
1997
1998 /* Put bootmem huge pages into the standard lists after mem_map is up */
1999 static void __init gather_bootmem_prealloc(void)
2000 {
2001         struct huge_bootmem_page *m;
2002
2003         list_for_each_entry(m, &huge_boot_pages, list) {
2004                 struct hstate *h = m->hstate;
2005                 struct page *page;
2006
2007 #ifdef CONFIG_HIGHMEM
2008                 page = pfn_to_page(m->phys >> PAGE_SHIFT);
2009                 memblock_free_late(__pa(m),
2010                                    sizeof(struct huge_bootmem_page));
2011 #else
2012                 page = virt_to_page(m);
2013 #endif
2014                 WARN_ON(page_count(page) != 1);
2015                 prep_compound_huge_page(page, h->order);
2016                 WARN_ON(PageReserved(page));
2017                 prep_new_huge_page(h, page, page_to_nid(page));
2018                 /*
2019                  * If we had gigantic hugepages allocated at boot time, we need
2020                  * to restore the 'stolen' pages to totalram_pages in order to
2021                  * fix confusing memory reports from free(1) and another
2022                  * side-effects, like CommitLimit going negative.
2023                  */
2024                 if (hstate_is_gigantic(h))
2025                         adjust_managed_page_count(page, 1 << h->order);
2026         }
2027 }
2028
2029 static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
2030 {
2031         unsigned long i;
2032
2033         for (i = 0; i < h->max_huge_pages; ++i) {
2034                 if (hstate_is_gigantic(h)) {
2035                         if (!alloc_bootmem_huge_page(h))
2036                                 break;
2037                 } else if (!alloc_fresh_huge_page(h,
2038                                          &node_states[N_MEMORY]))
2039                         break;
2040         }
2041         h->max_huge_pages = i;
2042 }
2043
2044 static void __init hugetlb_init_hstates(void)
2045 {
2046         struct hstate *h;
2047
2048         for_each_hstate(h) {
2049                 if (minimum_order > huge_page_order(h))
2050                         minimum_order = huge_page_order(h);
2051
2052                 /* oversize hugepages were init'ed in early boot */
2053                 if (!hstate_is_gigantic(h))
2054                         hugetlb_hstate_alloc_pages(h);
2055         }
2056         VM_BUG_ON(minimum_order == UINT_MAX);
2057 }
2058
2059 static char * __init memfmt(char *buf, unsigned long n)
2060 {
2061         if (n >= (1UL << 30))
2062                 sprintf(buf, "%lu GB", n >> 30);
2063         else if (n >= (1UL << 20))
2064                 sprintf(buf, "%lu MB", n >> 20);
2065         else
2066                 sprintf(buf, "%lu KB", n >> 10);
2067         return buf;
2068 }
2069
2070 static void __init report_hugepages(void)
2071 {
2072         struct hstate *h;
2073
2074         for_each_hstate(h) {
2075                 char buf[32];
2076                 pr_info("HugeTLB registered %s page size, pre-allocated %ld pages\n",
2077                         memfmt(buf, huge_page_size(h)),
2078                         h->free_huge_pages);
2079         }
2080 }
2081
2082 #ifdef CONFIG_HIGHMEM
2083 static void try_to_free_low(struct hstate *h, unsigned long count,
2084                                                 nodemask_t *nodes_allowed)
2085 {
2086         int i;
2087
2088         if (hstate_is_gigantic(h))
2089                 return;
2090
2091         for_each_node_mask(i, *nodes_allowed) {
2092                 struct page *page, *next;
2093                 struct list_head *freel = &h->hugepage_freelists[i];
2094                 list_for_each_entry_safe(page, next, freel, lru) {
2095                         if (count >= h->nr_huge_pages)
2096                                 return;
2097                         if (PageHighMem(page))
2098                                 continue;
2099                         list_del(&page->lru);
2100                         update_and_free_page(h, page);
2101                         h->free_huge_pages--;
2102                         h->free_huge_pages_node[page_to_nid(page)]--;
2103                 }
2104         }
2105 }
2106 #else
2107 static inline void try_to_free_low(struct hstate *h, unsigned long count,
2108                                                 nodemask_t *nodes_allowed)
2109 {
2110 }
2111 #endif
2112
2113 /*
2114  * Increment or decrement surplus_huge_pages.  Keep node-specific counters
2115  * balanced by operating on them in a round-robin fashion.
2116  * Returns 1 if an adjustment was made.
2117  */
2118 static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
2119                                 int delta)
2120 {
2121         int nr_nodes, node;
2122
2123         VM_BUG_ON(delta != -1 && delta != 1);
2124
2125         if (delta < 0) {
2126                 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
2127                         if (h->surplus_huge_pages_node[node])
2128                                 goto found;
2129                 }
2130         } else {
2131                 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
2132                         if (h->surplus_huge_pages_node[node] <
2133                                         h->nr_huge_pages_node[node])
2134                                 goto found;
2135                 }
2136         }
2137         return 0;
2138
2139 found:
2140         h->surplus_huge_pages += delta;
2141         h->surplus_huge_pages_node[node] += delta;
2142         return 1;
2143 }
2144
2145 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
2146 static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
2147                                                 nodemask_t *nodes_allowed)
2148 {
2149         unsigned long min_count, ret;
2150
2151         if (hstate_is_gigantic(h) && !gigantic_page_supported())
2152                 return h->max_huge_pages;
2153
2154         /*
2155          * Increase the pool size
2156          * First take pages out of surplus state.  Then make up the
2157          * remaining difference by allocating fresh huge pages.
2158          *
2159          * We might race with __alloc_buddy_huge_page() here and be unable
2160          * to convert a surplus huge page to a normal huge page. That is
2161          * not critical, though, it just means the overall size of the
2162          * pool might be one hugepage larger than it needs to be, but
2163          * within all the constraints specified by the sysctls.
2164          */
2165         spin_lock(&hugetlb_lock);
2166         while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
2167                 if (!adjust_pool_surplus(h, nodes_allowed, -1))
2168                         break;
2169         }
2170
2171         while (count > persistent_huge_pages(h)) {
2172                 /*
2173                  * If this allocation races such that we no longer need the
2174                  * page, free_huge_page will handle it by freeing the page
2175                  * and reducing the surplus.
2176                  */
2177                 spin_unlock(&hugetlb_lock);
2178                 if (hstate_is_gigantic(h))
2179                         ret = alloc_fresh_gigantic_page(h, nodes_allowed);
2180                 else
2181                         ret = alloc_fresh_huge_page(h, nodes_allowed);
2182                 spin_lock(&hugetlb_lock);
2183                 if (!ret)
2184                         goto out;
2185
2186                 /* Bail for signals. Probably ctrl-c from user */
2187                 if (signal_pending(current))
2188                         goto out;
2189         }
2190
2191         /*
2192          * Decrease the pool size
2193          * First return free pages to the buddy allocator (being careful
2194          * to keep enough around to satisfy reservations).  Then place
2195          * pages into surplus state as needed so the pool will shrink
2196          * to the desired size as pages become free.
2197          *
2198          * By placing pages into the surplus state independent of the
2199          * overcommit value, we are allowing the surplus pool size to
2200          * exceed overcommit. There are few sane options here. Since
2201          * __alloc_buddy_huge_page() is checking the global counter,
2202          * though, we'll note that we're not allowed to exceed surplus
2203          * and won't grow the pool anywhere else. Not until one of the
2204          * sysctls are changed, or the surplus pages go out of use.
2205          */
2206         min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
2207         min_count = max(count, min_count);
2208         try_to_free_low(h, min_count, nodes_allowed);
2209         while (min_count < persistent_huge_pages(h)) {
2210                 if (!free_pool_huge_page(h, nodes_allowed, 0))
2211                         break;
2212                 cond_resched_lock(&hugetlb_lock);
2213         }
2214         while (count < persistent_huge_pages(h)) {
2215                 if (!adjust_pool_surplus(h, nodes_allowed, 1))
2216                         break;
2217         }
2218 out:
2219         ret = persistent_huge_pages(h);
2220         spin_unlock(&hugetlb_lock);
2221         return ret;
2222 }
2223
2224 #define HSTATE_ATTR_RO(_name) \
2225         static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
2226
2227 #define HSTATE_ATTR(_name) \
2228         static struct kobj_attribute _name##_attr = \
2229                 __ATTR(_name, 0644, _name##_show, _name##_store)
2230
2231 static struct kobject *hugepages_kobj;
2232 static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
2233
2234 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
2235
2236 static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
2237 {
2238         int i;
2239
2240         for (i = 0; i < HUGE_MAX_HSTATE; i++)
2241                 if (hstate_kobjs[i] == kobj) {
2242                         if (nidp)
2243                                 *nidp = NUMA_NO_NODE;
2244                         return &hstates[i];
2245                 }
2246
2247         return kobj_to_node_hstate(kobj, nidp);
2248 }
2249
2250 static ssize_t nr_hugepages_show_common(struct kobject *kobj,
2251                                         struct kobj_attribute *attr, char *buf)
2252 {
2253         struct hstate *h;
2254         unsigned long nr_huge_pages;
2255         int nid;
2256
2257         h = kobj_to_hstate(kobj, &nid);
2258         if (nid == NUMA_NO_NODE)
2259                 nr_huge_pages = h->nr_huge_pages;
2260         else
2261                 nr_huge_pages = h->nr_huge_pages_node[nid];
2262
2263         return sprintf(buf, "%lu\n", nr_huge_pages);
2264 }
2265
2266 static ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
2267                                            struct hstate *h, int nid,
2268                                            unsigned long count, size_t len)
2269 {
2270         int err;
2271         NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY);
2272
2273         if (hstate_is_gigantic(h) && !gigantic_page_supported()) {
2274                 err = -EINVAL;
2275                 goto out;
2276         }
2277
2278         if (nid == NUMA_NO_NODE) {
2279                 /*
2280                  * global hstate attribute
2281                  */
2282                 if (!(obey_mempolicy &&
2283                                 init_nodemask_of_mempolicy(nodes_allowed))) {
2284                         NODEMASK_FREE(nodes_allowed);
2285                         nodes_allowed = &node_states[N_MEMORY];
2286                 }
2287         } else if (nodes_allowed) {
2288                 /*
2289                  * per node hstate attribute: adjust count to global,
2290                  * but restrict alloc/free to the specified node.
2291                  */
2292                 count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
2293                 init_nodemask_of_node(nodes_allowed, nid);
2294         } else
2295                 nodes_allowed = &node_states[N_MEMORY];
2296
2297         h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed);
2298
2299         if (nodes_allowed != &node_states[N_MEMORY])
2300                 NODEMASK_FREE(nodes_allowed);
2301
2302         return len;
2303 out:
2304         NODEMASK_FREE(nodes_allowed);
2305         return err;
2306 }
2307
2308 static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
2309                                          struct kobject *kobj, const char *buf,
2310                                          size_t len)
2311 {
2312         struct hstate *h;
2313         unsigned long count;
2314         int nid;
2315         int err;
2316
2317         err = kstrtoul(buf, 10, &count);
2318         if (err)
2319                 return err;
2320
2321         h = kobj_to_hstate(kobj, &nid);
2322         return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len);
2323 }
2324
2325 static ssize_t nr_hugepages_show(struct kobject *kobj,
2326                                        struct kobj_attribute *attr, char *buf)
2327 {
2328         return nr_hugepages_show_common(kobj, attr, buf);
2329 }
2330
2331 static ssize_t nr_hugepages_store(struct kobject *kobj,
2332                struct kobj_attribute *attr, const char *buf, size_t len)
2333 {
2334         return nr_hugepages_store_common(false, kobj, buf, len);
2335 }
2336 HSTATE_ATTR(nr_hugepages);
2337
2338 #ifdef CONFIG_NUMA
2339
2340 /*
2341  * hstate attribute for optionally mempolicy-based constraint on persistent
2342  * huge page alloc/free.
2343  */
2344 static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
2345                                        struct kobj_attribute *attr, char *buf)
2346 {
2347         return nr_hugepages_show_common(kobj, attr, buf);
2348 }
2349
2350 static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
2351                struct kobj_attribute *attr, const char *buf, size_t len)
2352 {
2353         return nr_hugepages_store_common(true, kobj, buf, len);
2354 }
2355 HSTATE_ATTR(nr_hugepages_mempolicy);
2356 #endif
2357
2358
2359 static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
2360                                         struct kobj_attribute *attr, char *buf)
2361 {
2362         struct hstate *h = kobj_to_hstate(kobj, NULL);
2363         return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
2364 }
2365
2366 static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
2367                 struct kobj_attribute *attr, const char *buf, size_t count)
2368 {
2369         int err;
2370         unsigned long input;
2371         struct hstate *h = kobj_to_hstate(kobj, NULL);
2372
2373         if (hstate_is_gigantic(h))
2374                 return -EINVAL;
2375
2376         err = kstrtoul(buf, 10, &input);
2377         if (err)
2378                 return err;
2379
2380         spin_lock(&hugetlb_lock);
2381         h->nr_overcommit_huge_pages = input;
2382         spin_unlock(&hugetlb_lock);
2383
2384         return count;
2385 }
2386 HSTATE_ATTR(nr_overcommit_hugepages);
2387
2388 static ssize_t free_hugepages_show(struct kobject *kobj,
2389                                         struct kobj_attribute *attr, char *buf)
2390 {
2391         struct hstate *h;
2392         unsigned long free_huge_pages;
2393         int nid;
2394
2395         h = kobj_to_hstate(kobj, &nid);
2396         if (nid == NUMA_NO_NODE)
2397                 free_huge_pages = h->free_huge_pages;
2398         else
2399                 free_huge_pages = h->free_huge_pages_node[nid];
2400
2401         return sprintf(buf, "%lu\n", free_huge_pages);
2402 }
2403 HSTATE_ATTR_RO(free_hugepages);
2404
2405 static ssize_t resv_hugepages_show(struct kobject *kobj,
2406                                         struct kobj_attribute *attr, char *buf)
2407 {
2408         struct hstate *h = kobj_to_hstate(kobj, NULL);
2409         return sprintf(buf, "%lu\n", h->resv_huge_pages);
2410 }
2411 HSTATE_ATTR_RO(resv_hugepages);
2412
2413 static ssize_t surplus_hugepages_show(struct kobject *kobj,
2414                                         struct kobj_attribute *attr, char *buf)
2415 {
2416         struct hstate *h;
2417         unsigned long surplus_huge_pages;
2418         int nid;
2419
2420         h = kobj_to_hstate(kobj, &nid);
2421         if (nid == NUMA_NO_NODE)
2422                 surplus_huge_pages = h->surplus_huge_pages;
2423         else
2424                 surplus_huge_pages = h->surplus_huge_pages_node[nid];
2425
2426         return sprintf(buf, "%lu\n", surplus_huge_pages);
2427 }
2428 HSTATE_ATTR_RO(surplus_hugepages);
2429
2430 static struct attribute *hstate_attrs[] = {
2431         &nr_hugepages_attr.attr,
2432         &nr_overcommit_hugepages_attr.attr,
2433         &free_hugepages_attr.attr,
2434         &resv_hugepages_attr.attr,
2435         &surplus_hugepages_attr.attr,
2436 #ifdef CONFIG_NUMA
2437         &nr_hugepages_mempolicy_attr.attr,
2438 #endif
2439         NULL,
2440 };
2441
2442 static struct attribute_group hstate_attr_group = {
2443         .attrs = hstate_attrs,
2444 };
2445
2446 static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
2447                                     struct kobject **hstate_kobjs,
2448                                     struct attribute_group *hstate_attr_group)
2449 {
2450         int retval;
2451         int hi = hstate_index(h);
2452
2453         hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
2454         if (!hstate_kobjs[hi])
2455                 return -ENOMEM;
2456
2457         retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
2458         if (retval)
2459                 kobject_put(hstate_kobjs[hi]);
2460
2461         return retval;
2462 }
2463
2464 static void __init hugetlb_sysfs_init(void)
2465 {
2466         struct hstate *h;
2467         int err;
2468
2469         hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
2470         if (!hugepages_kobj)
2471                 return;
2472
2473         for_each_hstate(h) {
2474                 err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
2475                                          hstate_kobjs, &hstate_attr_group);
2476                 if (err)
2477                         pr_err("Hugetlb: Unable to add hstate %s", h->name);
2478         }
2479 }
2480
2481 #ifdef CONFIG_NUMA
2482
2483 /*
2484  * node_hstate/s - associate per node hstate attributes, via their kobjects,
2485  * with node devices in node_devices[] using a parallel array.  The array
2486  * index of a node device or _hstate == node id.
2487  * This is here to avoid any static dependency of the node device driver, in
2488  * the base kernel, on the hugetlb module.
2489  */
2490 struct node_hstate {
2491         struct kobject          *hugepages_kobj;
2492         struct kobject          *hstate_kobjs[HUGE_MAX_HSTATE];
2493 };
2494 static struct node_hstate node_hstates[MAX_NUMNODES];
2495
2496 /*
2497  * A subset of global hstate attributes for node devices
2498  */
2499 static struct attribute *per_node_hstate_attrs[] = {
2500         &nr_hugepages_attr.attr,
2501         &free_hugepages_attr.attr,
2502         &surplus_hugepages_attr.attr,
2503         NULL,
2504 };
2505
2506 static struct attribute_group per_node_hstate_attr_group = {
2507         .attrs = per_node_hstate_attrs,
2508 };
2509
2510 /*
2511  * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
2512  * Returns node id via non-NULL nidp.
2513  */
2514 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
2515 {
2516         int nid;
2517
2518         for (nid = 0; nid < nr_node_ids; nid++) {
2519                 struct node_hstate *nhs = &node_hstates[nid];
2520                 int i;
2521                 for (i = 0; i < HUGE_MAX_HSTATE; i++)
2522                         if (nhs->hstate_kobjs[i] == kobj) {
2523                                 if (nidp)
2524                                         *nidp = nid;
2525                                 return &hstates[i];
2526                         }
2527         }
2528
2529         BUG();
2530         return NULL;
2531 }
2532
2533 /*
2534  * Unregister hstate attributes from a single node device.
2535  * No-op if no hstate attributes attached.
2536  */
2537 static void hugetlb_unregister_node(struct node *node)
2538 {
2539         struct hstate *h;
2540         struct node_hstate *nhs = &node_hstates[node->dev.id];
2541
2542         if (!nhs->hugepages_kobj)
2543                 return;         /* no hstate attributes */
2544
2545         for_each_hstate(h) {
2546                 int idx = hstate_index(h);
2547                 if (nhs->hstate_kobjs[idx]) {
2548                         kobject_put(nhs->hstate_kobjs[idx]);
2549                         nhs->hstate_kobjs[idx] = NULL;
2550                 }
2551         }
2552
2553         kobject_put(nhs->hugepages_kobj);
2554         nhs->hugepages_kobj = NULL;
2555 }
2556
2557
2558 /*
2559  * Register hstate attributes for a single node device.
2560  * No-op if attributes already registered.
2561  */
2562 static void hugetlb_register_node(struct node *node)
2563 {
2564         struct hstate *h;
2565         struct node_hstate *nhs = &node_hstates[node->dev.id];
2566         int err;
2567
2568         if (nhs->hugepages_kobj)
2569                 return;         /* already allocated */
2570
2571         nhs->hugepages_kobj = kobject_create_and_add("hugepages",
2572                                                         &node->dev.kobj);
2573         if (!nhs->hugepages_kobj)
2574                 return;
2575
2576         for_each_hstate(h) {
2577                 err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
2578                                                 nhs->hstate_kobjs,
2579                                                 &per_node_hstate_attr_group);
2580                 if (err) {
2581                         pr_err("Hugetlb: Unable to add hstate %s for node %d\n",
2582                                 h->name, node->dev.id);
2583                         hugetlb_unregister_node(node);
2584                         break;
2585                 }
2586         }
2587 }
2588
2589 /*
2590  * hugetlb init time:  register hstate attributes for all registered node
2591  * devices of nodes that have memory.  All on-line nodes should have
2592  * registered their associated device by this time.
2593  */
2594 static void __init hugetlb_register_all_nodes(void)
2595 {
2596         int nid;
2597
2598         for_each_node_state(nid, N_MEMORY) {
2599                 struct node *node = node_devices[nid];
2600                 if (node->dev.id == nid)
2601                         hugetlb_register_node(node);
2602         }
2603
2604         /*
2605          * Let the node device driver know we're here so it can
2606          * [un]register hstate attributes on node hotplug.
2607          */
2608         register_hugetlbfs_with_node(hugetlb_register_node,
2609                                      hugetlb_unregister_node);
2610 }
2611 #else   /* !CONFIG_NUMA */
2612
2613 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
2614 {
2615         BUG();
2616         if (nidp)
2617                 *nidp = -1;
2618         return NULL;
2619 }
2620
2621 static void hugetlb_register_all_nodes(void) { }
2622
2623 #endif
2624
2625 static int __init hugetlb_init(void)
2626 {
2627         int i;
2628
2629         if (!hugepages_supported())
2630                 return 0;
2631
2632         if (!size_to_hstate(default_hstate_size)) {
2633                 default_hstate_size = HPAGE_SIZE;
2634                 if (!size_to_hstate(default_hstate_size))
2635                         hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
2636         }
2637         default_hstate_idx = hstate_index(size_to_hstate(default_hstate_size));
2638         if (default_hstate_max_huge_pages) {
2639                 if (!default_hstate.max_huge_pages)
2640                         default_hstate.max_huge_pages = default_hstate_max_huge_pages;
2641         }
2642
2643         hugetlb_init_hstates();
2644         gather_bootmem_prealloc();
2645         report_hugepages();
2646
2647         hugetlb_sysfs_init();
2648         hugetlb_register_all_nodes();
2649         hugetlb_cgroup_file_init();
2650
2651 #ifdef CONFIG_SMP
2652         num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus());
2653 #else
2654         num_fault_mutexes = 1;
2655 #endif
2656         hugetlb_fault_mutex_table =
2657                 kmalloc(sizeof(struct mutex) * num_fault_mutexes, GFP_KERNEL);
2658         BUG_ON(!hugetlb_fault_mutex_table);
2659
2660         for (i = 0; i < num_fault_mutexes; i++)
2661                 mutex_init(&hugetlb_fault_mutex_table[i]);
2662         return 0;
2663 }
2664 subsys_initcall(hugetlb_init);
2665
2666 /* Should be called on processing a hugepagesz=... option */
2667 void __init hugetlb_bad_size(void)
2668 {
2669         parsed_valid_hugepagesz = false;
2670 }
2671
2672 void __init hugetlb_add_hstate(unsigned int order)
2673 {
2674         struct hstate *h;
2675         unsigned long i;
2676
2677         if (size_to_hstate(PAGE_SIZE << order)) {
2678                 pr_warn("hugepagesz= specified twice, ignoring\n");
2679                 return;
2680         }
2681         BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
2682         BUG_ON(order == 0);
2683         h = &hstates[hugetlb_max_hstate++];
2684         h->order = order;
2685         h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
2686         h->nr_huge_pages = 0;
2687         h->free_huge_pages = 0;
2688         for (i = 0; i < MAX_NUMNODES; ++i)
2689                 INIT_LIST_HEAD(&h->hugepage_freelists[i]);
2690         INIT_LIST_HEAD(&h->hugepage_activelist);
2691         h->next_nid_to_alloc = first_memory_node;
2692         h->next_nid_to_free = first_memory_node;
2693         snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
2694                                         huge_page_size(h)/1024);
2695
2696         parsed_hstate = h;
2697 }
2698
2699 static int __init hugetlb_nrpages_setup(char *s)
2700 {
2701         unsigned long *mhp;
2702         static unsigned long *last_mhp;
2703
2704         if (!parsed_valid_hugepagesz) {
2705                 pr_warn("hugepages = %s preceded by "
2706                         "an unsupported hugepagesz, ignoring\n", s);
2707                 parsed_valid_hugepagesz = true;
2708                 return 1;
2709         }
2710         /*
2711          * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter yet,
2712          * so this hugepages= parameter goes to the "default hstate".
2713          */
2714         else if (!hugetlb_max_hstate)
2715                 mhp = &default_hstate_max_huge_pages;
2716         else
2717                 mhp = &parsed_hstate->max_huge_pages;
2718
2719         if (mhp == last_mhp) {
2720                 pr_warn("hugepages= specified twice without interleaving hugepagesz=, ignoring\n");
2721                 return 1;
2722         }
2723
2724         if (sscanf(s, "%lu", mhp) <= 0)
2725                 *mhp = 0;
2726
2727         /*
2728          * Global state is always initialized later in hugetlb_init.
2729          * But we need to allocate >= MAX_ORDER hstates here early to still
2730          * use the bootmem allocator.
2731          */
2732         if (hugetlb_max_hstate && parsed_hstate->order >= MAX_ORDER)
2733                 hugetlb_hstate_alloc_pages(parsed_hstate);
2734
2735         last_mhp = mhp;
2736
2737         return 1;
2738 }
2739 __setup("hugepages=", hugetlb_nrpages_setup);
2740
2741 static int __init hugetlb_default_setup(char *s)
2742 {
2743         default_hstate_size = memparse(s, &s);
2744         return 1;
2745 }
2746 __setup("default_hugepagesz=", hugetlb_default_setup);
2747
2748 static unsigned int cpuset_mems_nr(unsigned int *array)
2749 {
2750         int node;
2751         unsigned int nr = 0;
2752
2753         for_each_node_mask(node, cpuset_current_mems_allowed)
2754                 nr += array[node];
2755
2756         return nr;
2757 }
2758
2759 #ifdef CONFIG_SYSCTL
2760 static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
2761                          struct ctl_table *table, int write,
2762                          void __user *buffer, size_t *length, loff_t *ppos)
2763 {
2764         struct hstate *h = &default_hstate;
2765         unsigned long tmp = h->max_huge_pages;
2766         int ret;
2767
2768         if (!hugepages_supported())
2769                 return -EOPNOTSUPP;
2770
2771         table->data = &tmp;
2772         table->maxlen = sizeof(unsigned long);
2773         ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2774         if (ret)
2775                 goto out;
2776
2777         if (write)
2778                 ret = __nr_hugepages_store_common(obey_mempolicy, h,
2779                                                   NUMA_NO_NODE, tmp, *length);
2780 out:
2781         return ret;
2782 }
2783
2784 int hugetlb_sysctl_handler(struct ctl_table *table, int write,
2785                           void __user *buffer, size_t *length, loff_t *ppos)
2786 {
2787
2788         return hugetlb_sysctl_handler_common(false, table, write,
2789                                                         buffer, length, ppos);
2790 }
2791
2792 #ifdef CONFIG_NUMA
2793 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
2794                           void __user *buffer, size_t *length, loff_t *ppos)
2795 {
2796         return hugetlb_sysctl_handler_common(true, table, write,
2797                                                         buffer, length, ppos);
2798 }
2799 #endif /* CONFIG_NUMA */
2800
2801 int hugetlb_overcommit_handler(struct ctl_table *table, int write,
2802                         void __user *buffer,
2803                         size_t *length, loff_t *ppos)
2804 {
2805         struct hstate *h = &default_hstate;
2806         unsigned long tmp;
2807         int ret;
2808
2809         if (!hugepages_supported())
2810                 return -EOPNOTSUPP;
2811
2812         tmp = h->nr_overcommit_huge_pages;
2813
2814         if (write && hstate_is_gigantic(h))
2815                 return -EINVAL;
2816
2817         table->data = &tmp;
2818         table->maxlen = sizeof(unsigned long);
2819         ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2820         if (ret)
2821                 goto out;
2822
2823         if (write) {
2824                 spin_lock(&hugetlb_lock);
2825                 h->nr_overcommit_huge_pages = tmp;
2826                 spin_unlock(&hugetlb_lock);
2827         }
2828 out:
2829         return ret;
2830 }
2831
2832 #endif /* CONFIG_SYSCTL */
2833
2834 void hugetlb_report_meminfo(struct seq_file *m)
2835 {
2836         struct hstate *h = &default_hstate;
2837         if (!hugepages_supported())
2838                 return;
2839         seq_printf(m,
2840                         "HugePages_Total:   %5lu\n"
2841                         "HugePages_Free:    %5lu\n"
2842                         "HugePages_Rsvd:    %5lu\n"
2843                         "HugePages_Surp:    %5lu\n"
2844                         "Hugepagesize:   %8lu kB\n",
2845                         h->nr_huge_pages,
2846                         h->free_huge_pages,
2847                         h->resv_huge_pages,
2848                         h->surplus_huge_pages,
2849                         1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
2850 }
2851
2852 int hugetlb_report_node_meminfo(int nid, char *buf)
2853 {
2854         struct hstate *h = &default_hstate;
2855         if (!hugepages_supported())
2856                 return 0;
2857         return sprintf(buf,
2858                 "Node %d HugePages_Total: %5u\n"
2859                 "Node %d HugePages_Free:  %5u\n"
2860                 "Node %d HugePages_Surp:  %5u\n",
2861                 nid, h->nr_huge_pages_node[nid],
2862                 nid, h->free_huge_pages_node[nid],
2863                 nid, h->surplus_huge_pages_node[nid]);
2864 }
2865
2866 void hugetlb_show_meminfo(void)
2867 {
2868         struct hstate *h;
2869         int nid;
2870
2871         if (!hugepages_supported())
2872                 return;
2873
2874         for_each_node_state(nid, N_MEMORY)
2875                 for_each_hstate(h)
2876                         pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
2877                                 nid,
2878                                 h->nr_huge_pages_node[nid],
2879                                 h->free_huge_pages_node[nid],
2880                                 h->surplus_huge_pages_node[nid],
2881                                 1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
2882 }
2883
2884 void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm)
2885 {
2886         seq_printf(m, "HugetlbPages:\t%8lu kB\n",
2887                    atomic_long_read(&mm->hugetlb_usage) << (PAGE_SHIFT - 10));
2888 }
2889
2890 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
2891 unsigned long hugetlb_total_pages(void)
2892 {
2893         struct hstate *h;
2894         unsigned long nr_total_pages = 0;
2895
2896         for_each_hstate(h)
2897                 nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
2898         return nr_total_pages;
2899 }
2900
2901 static int hugetlb_acct_memory(struct hstate *h, long delta)
2902 {
2903         int ret = -ENOMEM;
2904
2905         spin_lock(&hugetlb_lock);
2906         /*
2907          * When cpuset is configured, it breaks the strict hugetlb page
2908          * reservation as the accounting is done on a global variable. Such
2909          * reservation is completely rubbish in the presence of cpuset because
2910          * the reservation is not checked against page availability for the
2911          * current cpuset. Application can still potentially OOM'ed by kernel
2912          * with lack of free htlb page in cpuset that the task is in.
2913          * Attempt to enforce strict accounting with cpuset is almost
2914          * impossible (or too ugly) because cpuset is too fluid that
2915          * task or memory node can be dynamically moved between cpusets.
2916          *
2917          * The change of semantics for shared hugetlb mapping with cpuset is
2918          * undesirable. However, in order to preserve some of the semantics,
2919          * we fall back to check against current free page availability as
2920          * a best attempt and hopefully to minimize the impact of changing
2921          * semantics that cpuset has.
2922          */
2923         if (delta > 0) {
2924                 if (gather_surplus_pages(h, delta) < 0)
2925                         goto out;
2926
2927                 if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
2928                         return_unused_surplus_pages(h, delta);
2929                         goto out;
2930                 }
2931         }
2932
2933         ret = 0;
2934         if (delta < 0)
2935                 return_unused_surplus_pages(h, (unsigned long) -delta);
2936
2937 out:
2938         spin_unlock(&hugetlb_lock);
2939         return ret;
2940 }
2941
2942 static void hugetlb_vm_op_open(struct vm_area_struct *vma)
2943 {
2944         struct resv_map *resv = vma_resv_map(vma);
2945
2946         /*
2947          * This new VMA should share its siblings reservation map if present.
2948          * The VMA will only ever have a valid reservation map pointer where
2949          * it is being copied for another still existing VMA.  As that VMA
2950          * has a reference to the reservation map it cannot disappear until
2951          * after this open call completes.  It is therefore safe to take a
2952          * new reference here without additional locking.
2953          */
2954         if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
2955                 kref_get(&resv->refs);
2956 }
2957
2958 static void hugetlb_vm_op_close(struct vm_area_struct *vma)
2959 {
2960         struct hstate *h = hstate_vma(vma);
2961         struct resv_map *resv = vma_resv_map(vma);
2962         struct hugepage_subpool *spool = subpool_vma(vma);
2963         unsigned long reserve, start, end;
2964         long gbl_reserve;
2965
2966         if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER))
2967                 return;
2968
2969         start = vma_hugecache_offset(h, vma, vma->vm_start);
2970         end = vma_hugecache_offset(h, vma, vma->vm_end);
2971
2972         reserve = (end - start) - region_count(resv, start, end);
2973
2974         kref_put(&resv->refs, resv_map_release);
2975
2976         if (reserve) {
2977                 /*
2978                  * Decrement reserve counts.  The global reserve count may be
2979                  * adjusted if the subpool has a minimum size.
2980                  */
2981                 gbl_reserve = hugepage_subpool_put_pages(spool, reserve);
2982                 hugetlb_acct_memory(h, -gbl_reserve);
2983         }
2984 }
2985
2986 /*
2987  * We cannot handle pagefaults against hugetlb pages at all.  They cause
2988  * handle_mm_fault() to try to instantiate regular-sized pages in the
2989  * hugegpage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
2990  * this far.
2991  */
2992 static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2993 {
2994         BUG();
2995         return 0;
2996 }
2997
2998 const struct vm_operations_struct hugetlb_vm_ops = {
2999         .fault = hugetlb_vm_op_fault,
3000         .open = hugetlb_vm_op_open,
3001         .close = hugetlb_vm_op_close,
3002 };
3003
3004 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
3005                                 int writable)
3006 {
3007         pte_t entry;
3008
3009         if (writable) {
3010                 entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page,
3011                                          vma->vm_page_prot)));
3012         } else {
3013                 entry = huge_pte_wrprotect(mk_huge_pte(page,
3014                                            vma->vm_page_prot));
3015         }
3016         entry = pte_mkyoung(entry);
3017         entry = pte_mkhuge(entry);
3018         entry = arch_make_huge_pte(entry, vma, page, writable);
3019
3020         return entry;
3021 }
3022
3023 static void set_huge_ptep_writable(struct vm_area_struct *vma,
3024                                    unsigned long address, pte_t *ptep)
3025 {
3026         pte_t entry;
3027
3028         entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep)));
3029         if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
3030                 update_mmu_cache(vma, address, ptep);
3031 }
3032
3033 static int is_hugetlb_entry_migration(pte_t pte)
3034 {
3035         swp_entry_t swp;
3036
3037         if (huge_pte_none(pte) || pte_present(pte))
3038                 return 0;
3039         swp = pte_to_swp_entry(pte);
3040         if (non_swap_entry(swp) && is_migration_entry(swp))
3041                 return 1;
3042         else
3043                 return 0;
3044 }
3045
3046 static int is_hugetlb_entry_hwpoisoned(pte_t pte)
3047 {
3048         swp_entry_t swp;
3049
3050         if (huge_pte_none(pte) || pte_present(pte))
3051                 return 0;
3052         swp = pte_to_swp_entry(pte);
3053         if (non_swap_entry(swp) && is_hwpoison_entry(swp))
3054                 return 1;
3055         else
3056                 return 0;
3057 }
3058
3059 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
3060                             struct vm_area_struct *vma)
3061 {
3062         pte_t *src_pte, *dst_pte, entry;
3063         struct page *ptepage;
3064         unsigned long addr;
3065         int cow;
3066         struct hstate *h = hstate_vma(vma);
3067         unsigned long sz = huge_page_size(h);
3068         unsigned long mmun_start;       /* For mmu_notifiers */
3069         unsigned long mmun_end;         /* For mmu_notifiers */
3070         int ret = 0;
3071
3072         cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
3073
3074         mmun_start = vma->vm_start;
3075         mmun_end = vma->vm_end;
3076         if (cow)
3077                 mmu_notifier_invalidate_range_start(src, mmun_start, mmun_end);
3078
3079         for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
3080                 spinlock_t *src_ptl, *dst_ptl;
3081                 src_pte = huge_pte_offset(src, addr);
3082                 if (!src_pte)
3083                         continue;
3084                 dst_pte = huge_pte_alloc(dst, addr, sz);
3085                 if (!dst_pte) {
3086                         ret = -ENOMEM;
3087                         break;
3088                 }
3089
3090                 /* If the pagetables are shared don't copy or take references */
3091                 if (dst_pte == src_pte)
3092                         continue;
3093
3094                 dst_ptl = huge_pte_lock(h, dst, dst_pte);
3095                 src_ptl = huge_pte_lockptr(h, src, src_pte);
3096                 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
3097                 entry = huge_ptep_get(src_pte);
3098                 if (huge_pte_none(entry)) { /* skip none entry */
3099                         ;
3100                 } else if (unlikely(is_hugetlb_entry_migration(entry) ||
3101                                     is_hugetlb_entry_hwpoisoned(entry))) {
3102                         swp_entry_t swp_entry = pte_to_swp_entry(entry);
3103
3104                         if (is_write_migration_entry(swp_entry) && cow) {
3105                                 /*
3106                                  * COW mappings require pages in both
3107                                  * parent and child to be set to read.
3108                                  */
3109                                 make_migration_entry_read(&swp_entry);
3110                                 entry = swp_entry_to_pte(swp_entry);
3111                                 set_huge_pte_at(src, addr, src_pte, entry);
3112                         }
3113                         set_huge_pte_at(dst, addr, dst_pte, entry);
3114                 } else {
3115                         if (cow) {
3116                                 huge_ptep_set_wrprotect(src, addr, src_pte);
3117                                 mmu_notifier_invalidate_range(src, mmun_start,
3118                                                                    mmun_end);
3119                         }
3120                         entry = huge_ptep_get(src_pte);
3121                         ptepage = pte_page(entry);
3122                         get_page(ptepage);
3123                         page_dup_rmap(ptepage, true);
3124                         set_huge_pte_at(dst, addr, dst_pte, entry);
3125                         hugetlb_count_add(pages_per_huge_page(h), dst);
3126                 }
3127                 spin_unlock(src_ptl);
3128                 spin_unlock(dst_ptl);
3129         }
3130
3131         if (cow)
3132                 mmu_notifier_invalidate_range_end(src, mmun_start, mmun_end);
3133
3134         return ret;
3135 }
3136
3137 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
3138                             unsigned long start, unsigned long end,
3139                             struct page *ref_page)
3140 {
3141         int force_flush = 0;
3142         struct mm_struct *mm = vma->vm_mm;
3143         unsigned long address;
3144         pte_t *ptep;
3145         pte_t pte;
3146         spinlock_t *ptl;
3147         struct page *page;
3148         struct hstate *h = hstate_vma(vma);
3149         unsigned long sz = huge_page_size(h);
3150         const unsigned long mmun_start = start; /* For mmu_notifiers */
3151         const unsigned long mmun_end   = end;   /* For mmu_notifiers */
3152
3153         WARN_ON(!is_vm_hugetlb_page(vma));
3154         BUG_ON(start & ~huge_page_mask(h));
3155         BUG_ON(end & ~huge_page_mask(h));
3156
3157         tlb_start_vma(tlb, vma);
3158         mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
3159         address = start;
3160 again:
3161         for (; address < end; address += sz) {
3162                 ptep = huge_pte_offset(mm, address);
3163                 if (!ptep)
3164                         continue;
3165
3166                 ptl = huge_pte_lock(h, mm, ptep);
3167                 if (huge_pmd_unshare(mm, &address, ptep))
3168                         goto unlock;
3169
3170                 pte = huge_ptep_get(ptep);
3171                 if (huge_pte_none(pte))
3172                         goto unlock;
3173
3174                 /*
3175                  * Migrating hugepage or HWPoisoned hugepage is already
3176                  * unmapped and its refcount is dropped, so just clear pte here.
3177                  */
3178                 if (unlikely(!pte_present(pte))) {
3179                         huge_pte_clear(mm, address, ptep);
3180                         goto unlock;
3181                 }
3182
3183                 page = pte_page(pte);
3184                 /*
3185                  * If a reference page is supplied, it is because a specific
3186                  * page is being unmapped, not a range. Ensure the page we
3187                  * are about to unmap is the actual page of interest.
3188                  */
3189                 if (ref_page) {
3190                         if (page != ref_page)
3191                                 goto unlock;
3192
3193                         /*
3194                          * Mark the VMA as having unmapped its page so that
3195                          * future faults in this VMA will fail rather than
3196                          * looking like data was lost
3197                          */
3198                         set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
3199                 }
3200
3201                 pte = huge_ptep_get_and_clear(mm, address, ptep);
3202                 tlb_remove_tlb_entry(tlb, ptep, address);
3203                 if (huge_pte_dirty(pte))
3204                         set_page_dirty(page);
3205
3206                 hugetlb_count_sub(pages_per_huge_page(h), mm);
3207                 page_remove_rmap(page, true);
3208                 force_flush = !__tlb_remove_page(tlb, page);
3209                 if (force_flush) {
3210                         address += sz;
3211                         spin_unlock(ptl);
3212                         break;
3213                 }
3214                 /* Bail out after unmapping reference page if supplied */
3215                 if (ref_page) {
3216                         spin_unlock(ptl);
3217                         break;
3218                 }
3219 unlock:
3220                 spin_unlock(ptl);
3221         }
3222         /*
3223          * mmu_gather ran out of room to batch pages, we break out of
3224          * the PTE lock to avoid doing the potential expensive TLB invalidate
3225          * and page-free while holding it.
3226          */
3227         if (force_flush) {
3228                 force_flush = 0;
3229                 tlb_flush_mmu(tlb);
3230                 if (address < end && !ref_page)
3231                         goto again;
3232         }
3233         mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
3234         tlb_end_vma(tlb, vma);
3235 }
3236
3237 void __unmap_hugepage_range_final(struct mmu_gather *tlb,
3238                           struct vm_area_struct *vma, unsigned long start,
3239                           unsigned long end, struct page *ref_page)
3240 {
3241         __unmap_hugepage_range(tlb, vma, start, end, ref_page);
3242
3243         /*
3244          * Clear this flag so that x86's huge_pmd_share page_table_shareable
3245          * test will fail on a vma being torn down, and not grab a page table
3246          * on its way out.  We're lucky that the flag has such an appropriate
3247          * name, and can in fact be safely cleared here. We could clear it
3248          * before the __unmap_hugepage_range above, but all that's necessary
3249          * is to clear it before releasing the i_mmap_rwsem. This works
3250          * because in the context this is called, the VMA is about to be
3251          * destroyed and the i_mmap_rwsem is held.
3252          */
3253         vma->vm_flags &= ~VM_MAYSHARE;
3254 }
3255
3256 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
3257                           unsigned long end, struct page *ref_page)
3258 {
3259         struct mm_struct *mm;
3260         struct mmu_gather tlb;
3261
3262         mm = vma->vm_mm;
3263
3264         tlb_gather_mmu(&tlb, mm, start, end);
3265         __unmap_hugepage_range(&tlb, vma, start, end, ref_page);
3266         tlb_finish_mmu(&tlb, start, end);
3267 }
3268
3269 /*
3270  * This is called when the original mapper is failing to COW a MAP_PRIVATE
3271  * mappping it owns the reserve page for. The intention is to unmap the page
3272  * from other VMAs and let the children be SIGKILLed if they are faulting the
3273  * same region.
3274  */
3275 static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
3276                               struct page *page, unsigned long address)
3277 {
3278         struct hstate *h = hstate_vma(vma);
3279         struct vm_area_struct *iter_vma;
3280         struct address_space *mapping;
3281         pgoff_t pgoff;
3282
3283         /*
3284          * vm_pgoff is in PAGE_SIZE units, hence the different calculation
3285          * from page cache lookup which is in HPAGE_SIZE units.
3286          */
3287         address = address & huge_page_mask(h);
3288         pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
3289                         vma->vm_pgoff;
3290         mapping = file_inode(vma->vm_file)->i_mapping;
3291
3292         /*
3293          * Take the mapping lock for the duration of the table walk. As
3294          * this mapping should be shared between all the VMAs,
3295          * __unmap_hugepage_range() is called as the lock is already held
3296          */
3297         i_mmap_lock_write(mapping);
3298         vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
3299                 /* Do not unmap the current VMA */
3300                 if (iter_vma == vma)
3301                         continue;
3302
3303                 /*
3304                  * Shared VMAs have their own reserves and do not affect
3305                  * MAP_PRIVATE accounting but it is possible that a shared
3306                  * VMA is using the same page so check and skip such VMAs.
3307                  */
3308                 if (iter_vma->vm_flags & VM_MAYSHARE)
3309                         continue;
3310
3311                 /*
3312                  * Unmap the page from other VMAs without their own reserves.
3313                  * They get marked to be SIGKILLed if they fault in these
3314                  * areas. This is because a future no-page fault on this VMA
3315                  * could insert a zeroed page instead of the data existing
3316                  * from the time of fork. This would look like data corruption
3317                  */
3318                 if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
3319                         unmap_hugepage_range(iter_vma, address,
3320                                              address + huge_page_size(h), page);
3321         }
3322         i_mmap_unlock_write(mapping);
3323 }
3324
3325 /*
3326  * Hugetlb_cow() should be called with page lock of the original hugepage held.
3327  * Called with hugetlb_instantiation_mutex held and pte_page locked so we
3328  * cannot race with other handlers or page migration.
3329  * Keep the pte_same checks anyway to make transition from the mutex easier.
3330  */
3331 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
3332                         unsigned long address, pte_t *ptep, pte_t pte,
3333                         struct page *pagecache_page, spinlock_t *ptl)
3334 {
3335         struct hstate *h = hstate_vma(vma);
3336         struct page *old_page, *new_page;
3337         int ret = 0, outside_reserve = 0;
3338         unsigned long mmun_start;       /* For mmu_notifiers */
3339         unsigned long mmun_end;         /* For mmu_notifiers */
3340
3341         old_page = pte_page(pte);
3342
3343 retry_avoidcopy:
3344         /* If no-one else is actually using this page, avoid the copy
3345          * and just make the page writable */
3346         if (page_mapcount(old_page) == 1 && PageAnon(old_page)) {
3347                 page_move_anon_rmap(old_page, vma, address);
3348                 set_huge_ptep_writable(vma, address, ptep);
3349                 return 0;
3350         }
3351
3352         /*
3353          * If the process that created a MAP_PRIVATE mapping is about to
3354          * perform a COW due to a shared page count, attempt to satisfy
3355          * the allocation without using the existing reserves. The pagecache
3356          * page is used to determine if the reserve at this address was
3357          * consumed or not. If reserves were used, a partial faulted mapping
3358          * at the time of fork() could consume its reserves on COW instead
3359          * of the full address range.
3360          */
3361         if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
3362                         old_page != pagecache_page)
3363                 outside_reserve = 1;
3364
3365         get_page(old_page);
3366
3367         /*
3368          * Drop page table lock as buddy allocator may be called. It will
3369          * be acquired again before returning to the caller, as expected.
3370          */
3371         spin_unlock(ptl);
3372         new_page = alloc_huge_page(vma, address, outside_reserve);
3373
3374         if (IS_ERR(new_page)) {
3375                 /*
3376                  * If a process owning a MAP_PRIVATE mapping fails to COW,
3377                  * it is due to references held by a child and an insufficient
3378                  * huge page pool. To guarantee the original mappers
3379                  * reliability, unmap the page from child processes. The child
3380                  * may get SIGKILLed if it later faults.
3381                  */
3382                 if (outside_reserve) {
3383                         put_page(old_page);
3384                         BUG_ON(huge_pte_none(pte));
3385                         unmap_ref_private(mm, vma, old_page, address);
3386                         BUG_ON(huge_pte_none(pte));
3387                         spin_lock(ptl);
3388                         ptep = huge_pte_offset(mm, address & huge_page_mask(h));
3389                         if (likely(ptep &&
3390                                    pte_same(huge_ptep_get(ptep), pte)))
3391                                 goto retry_avoidcopy;
3392                         /*
3393                          * race occurs while re-acquiring page table
3394                          * lock, and our job is done.
3395                          */
3396                         return 0;
3397                 }
3398
3399                 ret = (PTR_ERR(new_page) == -ENOMEM) ?
3400                         VM_FAULT_OOM : VM_FAULT_SIGBUS;
3401                 goto out_release_old;
3402         }
3403
3404         /*
3405          * When the original hugepage is shared one, it does not have
3406          * anon_vma prepared.
3407          */
3408         if (unlikely(anon_vma_prepare(vma))) {
3409                 ret = VM_FAULT_OOM;
3410                 goto out_release_all;
3411         }
3412
3413         copy_user_huge_page(new_page, old_page, address, vma,
3414                             pages_per_huge_page(h));
3415         __SetPageUptodate(new_page);
3416         set_page_huge_active(new_page);
3417
3418         mmun_start = address & huge_page_mask(h);
3419         mmun_end = mmun_start + huge_page_size(h);
3420         mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
3421
3422         /*
3423          * Retake the page table lock to check for racing updates
3424          * before the page tables are altered
3425          */
3426         spin_lock(ptl);
3427         ptep = huge_pte_offset(mm, address & huge_page_mask(h));
3428         if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
3429                 ClearPagePrivate(new_page);
3430
3431                 /* Break COW */
3432                 huge_ptep_clear_flush(vma, address, ptep);
3433                 mmu_notifier_invalidate_range(mm, mmun_start, mmun_end);
3434                 set_huge_pte_at(mm, address, ptep,
3435                                 make_huge_pte(vma, new_page, 1));
3436                 page_remove_rmap(old_page, true);
3437                 hugepage_add_new_anon_rmap(new_page, vma, address);
3438                 /* Make the old page be freed below */
3439                 new_page = old_page;
3440         }
3441         spin_unlock(ptl);
3442         mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
3443 out_release_all:
3444         put_page(new_page);
3445 out_release_old:
3446         put_page(old_page);
3447
3448         spin_lock(ptl); /* Caller expects lock to be held */
3449         return ret;
3450 }
3451
3452 /* Return the pagecache page at a given address within a VMA */
3453 static struct page *hugetlbfs_pagecache_page(struct hstate *h,
3454                         struct vm_area_struct *vma, unsigned long address)
3455 {
3456         struct address_space *mapping;
3457         pgoff_t idx;
3458
3459         mapping = vma->vm_file->f_mapping;
3460         idx = vma_hugecache_offset(h, vma, address);
3461
3462         return find_lock_page(mapping, idx);
3463 }
3464
3465 /*
3466  * Return whether there is a pagecache page to back given address within VMA.
3467  * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
3468  */
3469 static bool hugetlbfs_pagecache_present(struct hstate *h,
3470                         struct vm_area_struct *vma, unsigned long address)
3471 {
3472         struct address_space *mapping;
3473         pgoff_t idx;
3474         struct page *page;
3475
3476         mapping = vma->vm_file->f_mapping;
3477         idx = vma_hugecache_offset(h, vma, address);
3478
3479         page = find_get_page(mapping, idx);
3480         if (page)
3481                 put_page(page);
3482         return page != NULL;
3483 }
3484
3485 int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
3486                            pgoff_t idx)
3487 {
3488         struct inode *inode = mapping->host;
3489         struct hstate *h = hstate_inode(inode);
3490         int err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
3491
3492         if (err)
3493                 return err;
3494         ClearPagePrivate(page);
3495
3496         spin_lock(&inode->i_lock);
3497         inode->i_blocks += blocks_per_huge_page(h);
3498         spin_unlock(&inode->i_lock);
3499         return 0;
3500 }
3501
3502 static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
3503                            struct address_space *mapping, pgoff_t idx,
3504                            unsigned long address, pte_t *ptep, unsigned int flags)
3505 {
3506         struct hstate *h = hstate_vma(vma);
3507         int ret = VM_FAULT_SIGBUS;
3508         int anon_rmap = 0;
3509         unsigned long size;
3510         struct page *page;
3511         pte_t new_pte;
3512         spinlock_t *ptl;
3513
3514         /*
3515          * Currently, we are forced to kill the process in the event the
3516          * original mapper has unmapped pages from the child due to a failed
3517          * COW. Warn that such a situation has occurred as it may not be obvious
3518          */
3519         if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
3520                 pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n",
3521                            current->pid);
3522                 return ret;
3523         }
3524
3525         /*
3526          * Use page lock to guard against racing truncation
3527          * before we get page_table_lock.
3528          */
3529 retry:
3530         page = find_lock_page(mapping, idx);
3531         if (!page) {
3532                 size = i_size_read(mapping->host) >> huge_page_shift(h);
3533                 if (idx >= size)
3534                         goto out;
3535                 page = alloc_huge_page(vma, address, 0);
3536                 if (IS_ERR(page)) {
3537                         ret = PTR_ERR(page);
3538                         if (ret == -ENOMEM)
3539                                 ret = VM_FAULT_OOM;
3540                         else
3541                                 ret = VM_FAULT_SIGBUS;
3542                         goto out;
3543                 }
3544                 clear_huge_page(page, address, pages_per_huge_page(h));
3545                 __SetPageUptodate(page);
3546                 set_page_huge_active(page);
3547
3548                 if (vma->vm_flags & VM_MAYSHARE) {
3549                         int err = huge_add_to_page_cache(page, mapping, idx);
3550                         if (err) {
3551                                 put_page(page);
3552                                 if (err == -EEXIST)
3553                                         goto retry;
3554                                 goto out;
3555                         }
3556                 } else {
3557                         lock_page(page);
3558                         if (unlikely(anon_vma_prepare(vma))) {
3559                                 ret = VM_FAULT_OOM;
3560                                 goto backout_unlocked;
3561                         }
3562                         anon_rmap = 1;
3563                 }
3564         } else {
3565                 /*
3566                  * If memory error occurs between mmap() and fault, some process
3567                  * don't have hwpoisoned swap entry for errored virtual address.
3568                  * So we need to block hugepage fault by PG_hwpoison bit check.
3569                  */
3570                 if (unlikely(PageHWPoison(page))) {
3571                         ret = VM_FAULT_HWPOISON |
3572                                 VM_FAULT_SET_HINDEX(hstate_index(h));
3573                         goto backout_unlocked;
3574                 }
3575         }
3576
3577         /*
3578          * If we are going to COW a private mapping later, we examine the
3579          * pending reservations for this page now. This will ensure that
3580          * any allocations necessary to record that reservation occur outside
3581          * the spinlock.
3582          */
3583         if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
3584                 if (vma_needs_reservation(h, vma, address) < 0) {
3585                         ret = VM_FAULT_OOM;
3586                         goto backout_unlocked;
3587                 }
3588                 /* Just decrements count, does not deallocate */
3589                 vma_end_reservation(h, vma, address);
3590         }
3591
3592         ptl = huge_pte_lockptr(h, mm, ptep);
3593         spin_lock(ptl);
3594         size = i_size_read(mapping->host) >> huge_page_shift(h);
3595         if (idx >= size)
3596                 goto backout;
3597
3598         ret = 0;
3599         if (!huge_pte_none(huge_ptep_get(ptep)))
3600                 goto backout;
3601
3602         if (anon_rmap) {
3603                 ClearPagePrivate(page);
3604                 hugepage_add_new_anon_rmap(page, vma, address);
3605         } else
3606                 page_dup_rmap(page, true);
3607         new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
3608                                 && (vma->vm_flags & VM_SHARED)));
3609         set_huge_pte_at(mm, address, ptep, new_pte);
3610
3611         hugetlb_count_add(pages_per_huge_page(h), mm);
3612         if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
3613                 /* Optimization, do the COW without a second fault */
3614                 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page, ptl);
3615         }
3616
3617         spin_unlock(ptl);
3618         unlock_page(page);
3619 out:
3620         return ret;
3621
3622 backout:
3623         spin_unlock(ptl);
3624 backout_unlocked:
3625         unlock_page(page);
3626         put_page(page);
3627         goto out;
3628 }
3629
3630 #ifdef CONFIG_SMP
3631 u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
3632                             struct vm_area_struct *vma,
3633                             struct address_space *mapping,
3634                             pgoff_t idx, unsigned long address)
3635 {
3636         unsigned long key[2];
3637         u32 hash;
3638
3639         if (vma->vm_flags & VM_SHARED) {
3640                 key[0] = (unsigned long) mapping;
3641                 key[1] = idx;
3642         } else {
3643                 key[0] = (unsigned long) mm;
3644                 key[1] = address >> huge_page_shift(h);
3645         }
3646
3647         hash = jhash2((u32 *)&key, sizeof(key)/sizeof(u32), 0);
3648
3649         return hash & (num_fault_mutexes - 1);
3650 }
3651 #else
3652 /*
3653  * For uniprocesor systems we always use a single mutex, so just
3654  * return 0 and avoid the hashing overhead.
3655  */
3656 u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
3657                             struct vm_area_struct *vma,
3658                             struct address_space *mapping,
3659                             pgoff_t idx, unsigned long address)
3660 {
3661         return 0;
3662 }
3663 #endif
3664
3665 int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3666                         unsigned long address, unsigned int flags)
3667 {
3668         pte_t *ptep, entry;
3669         spinlock_t *ptl;
3670         int ret;
3671         u32 hash;
3672         pgoff_t idx;
3673         struct page *page = NULL;
3674         struct page *pagecache_page = NULL;
3675         struct hstate *h = hstate_vma(vma);
3676         struct address_space *mapping;
3677         int need_wait_lock = 0;
3678
3679         address &= huge_page_mask(h);
3680
3681         ptep = huge_pte_offset(mm, address);
3682         if (ptep) {
3683                 entry = huge_ptep_get(ptep);
3684                 if (unlikely(is_hugetlb_entry_migration(entry))) {
3685                         migration_entry_wait_huge(vma, mm, ptep);
3686                         return 0;
3687                 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
3688                         return VM_FAULT_HWPOISON_LARGE |
3689                                 VM_FAULT_SET_HINDEX(hstate_index(h));
3690         } else {
3691                 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
3692                 if (!ptep)
3693                         return VM_FAULT_OOM;
3694         }
3695
3696         mapping = vma->vm_file->f_mapping;
3697         idx = vma_hugecache_offset(h, vma, address);
3698
3699         /*
3700          * Serialize hugepage allocation and instantiation, so that we don't
3701          * get spurious allocation failures if two CPUs race to instantiate
3702          * the same page in the page cache.
3703          */
3704         hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, idx, address);
3705         mutex_lock(&hugetlb_fault_mutex_table[hash]);
3706
3707         entry = huge_ptep_get(ptep);
3708         if (huge_pte_none(entry)) {
3709                 ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags);
3710                 goto out_mutex;
3711         }
3712
3713         ret = 0;
3714
3715         /*
3716          * entry could be a migration/hwpoison entry at this point, so this
3717          * check prevents the kernel from going below assuming that we have
3718          * a active hugepage in pagecache. This goto expects the 2nd page fault,
3719          * and is_hugetlb_entry_(migration|hwpoisoned) check will properly
3720          * handle it.
3721          */
3722         if (!pte_present(entry))
3723                 goto out_mutex;
3724
3725         /*
3726          * If we are going to COW the mapping later, we examine the pending
3727          * reservations for this page now. This will ensure that any
3728          * allocations necessary to record that reservation occur outside the
3729          * spinlock. For private mappings, we also lookup the pagecache
3730          * page now as it is used to determine if a reservation has been
3731          * consumed.
3732          */
3733         if ((flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) {
3734                 if (vma_needs_reservation(h, vma, address) < 0) {
3735                         ret = VM_FAULT_OOM;
3736                         goto out_mutex;
3737                 }
3738                 /* Just decrements count, does not deallocate */
3739                 vma_end_reservation(h, vma, address);
3740
3741                 if (!(vma->vm_flags & VM_MAYSHARE))
3742                         pagecache_page = hugetlbfs_pagecache_page(h,
3743                                                                 vma, address);
3744         }
3745
3746         ptl = huge_pte_lock(h, mm, ptep);
3747
3748         /* Check for a racing update before calling hugetlb_cow */
3749         if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
3750                 goto out_ptl;
3751
3752         /*
3753          * hugetlb_cow() requires page locks of pte_page(entry) and
3754          * pagecache_page, so here we need take the former one
3755          * when page != pagecache_page or !pagecache_page.
3756          */
3757         page = pte_page(entry);
3758         if (page != pagecache_page)
3759                 if (!trylock_page(page)) {
3760                         need_wait_lock = 1;
3761                         goto out_ptl;
3762                 }
3763
3764         get_page(page);
3765
3766         if (flags & FAULT_FLAG_WRITE) {
3767                 if (!huge_pte_write(entry)) {
3768                         ret = hugetlb_cow(mm, vma, address, ptep, entry,
3769                                         pagecache_page, ptl);
3770                         goto out_put_page;
3771                 }
3772                 entry = huge_pte_mkdirty(entry);
3773         }
3774         entry = pte_mkyoung(entry);
3775         if (huge_ptep_set_access_flags(vma, address, ptep, entry,
3776                                                 flags & FAULT_FLAG_WRITE))
3777                 update_mmu_cache(vma, address, ptep);
3778 out_put_page:
3779         if (page != pagecache_page)
3780                 unlock_page(page);
3781         put_page(page);
3782 out_ptl:
3783         spin_unlock(ptl);
3784
3785         if (pagecache_page) {
3786                 unlock_page(pagecache_page);
3787                 put_page(pagecache_page);
3788         }
3789 out_mutex:
3790         mutex_unlock(&hugetlb_fault_mutex_table[hash]);
3791         /*
3792          * Generally it's safe to hold refcount during waiting page lock. But
3793          * here we just wait to defer the next page fault to avoid busy loop and
3794          * the page is not used after unlocked before returning from the current
3795          * page fault. So we are safe from accessing freed page, even if we wait
3796          * here without taking refcount.
3797          */
3798         if (need_wait_lock)
3799                 wait_on_page_locked(page);
3800         return ret;
3801 }
3802
3803 long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
3804                          struct page **pages, struct vm_area_struct **vmas,
3805                          unsigned long *position, unsigned long *nr_pages,
3806                          long i, unsigned int flags)
3807 {
3808         unsigned long pfn_offset;
3809         unsigned long vaddr = *position;
3810         unsigned long remainder = *nr_pages;
3811         struct hstate *h = hstate_vma(vma);
3812
3813         while (vaddr < vma->vm_end && remainder) {
3814                 pte_t *pte;
3815                 spinlock_t *ptl = NULL;
3816                 int absent;
3817                 struct page *page;
3818
3819                 /*
3820                  * If we have a pending SIGKILL, don't keep faulting pages and
3821                  * potentially allocating memory.
3822                  */
3823                 if (unlikely(fatal_signal_pending(current))) {
3824                         remainder = 0;
3825                         break;
3826                 }
3827
3828                 /*
3829                  * Some archs (sparc64, sh*) have multiple pte_ts to
3830                  * each hugepage.  We have to make sure we get the
3831                  * first, for the page indexing below to work.
3832                  *
3833                  * Note that page table lock is not held when pte is null.
3834                  */
3835                 pte = huge_pte_offset(mm, vaddr & huge_page_mask(h));
3836                 if (pte)
3837                         ptl = huge_pte_lock(h, mm, pte);
3838                 absent = !pte || huge_pte_none(huge_ptep_get(pte));
3839
3840                 /*
3841                  * When coredumping, it suits get_dump_page if we just return
3842                  * an error where there's an empty slot with no huge pagecache
3843                  * to back it.  This way, we avoid allocating a hugepage, and
3844                  * the sparse dumpfile avoids allocating disk blocks, but its
3845                  * huge holes still show up with zeroes where they need to be.
3846                  */
3847                 if (absent && (flags & FOLL_DUMP) &&
3848                     !hugetlbfs_pagecache_present(h, vma, vaddr)) {
3849                         if (pte)
3850                                 spin_unlock(ptl);
3851                         remainder = 0;
3852                         break;
3853                 }
3854
3855                 /*
3856                  * We need call hugetlb_fault for both hugepages under migration
3857                  * (in which case hugetlb_fault waits for the migration,) and
3858                  * hwpoisoned hugepages (in which case we need to prevent the
3859                  * caller from accessing to them.) In order to do this, we use
3860                  * here is_swap_pte instead of is_hugetlb_entry_migration and
3861                  * is_hugetlb_entry_hwpoisoned. This is because it simply covers
3862                  * both cases, and because we can't follow correct pages
3863                  * directly from any kind of swap entries.
3864                  */
3865                 if (absent || is_swap_pte(huge_ptep_get(pte)) ||
3866                     ((flags & FOLL_WRITE) &&
3867                       !huge_pte_write(huge_ptep_get(pte)))) {
3868                         int ret;
3869
3870                         if (pte)
3871                                 spin_unlock(ptl);
3872                         ret = hugetlb_fault(mm, vma, vaddr,
3873                                 (flags & FOLL_WRITE) ? FAULT_FLAG_WRITE : 0);
3874                         if (!(ret & VM_FAULT_ERROR))
3875                                 continue;
3876
3877                         remainder = 0;
3878                         break;
3879                 }
3880
3881                 pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
3882                 page = pte_page(huge_ptep_get(pte));
3883 same_page:
3884                 if (pages) {
3885                         pages[i] = mem_map_offset(page, pfn_offset);
3886                         get_page(pages[i]);
3887                 }
3888
3889                 if (vmas)
3890                         vmas[i] = vma;
3891
3892                 vaddr += PAGE_SIZE;
3893                 ++pfn_offset;
3894                 --remainder;
3895                 ++i;
3896                 if (vaddr < vma->vm_end && remainder &&
3897                                 pfn_offset < pages_per_huge_page(h)) {
3898                         /*
3899                          * We use pfn_offset to avoid touching the pageframes
3900                          * of this compound page.
3901                          */
3902                         goto same_page;
3903                 }
3904                 spin_unlock(ptl);
3905         }
3906         *nr_pages = remainder;
3907         *position = vaddr;
3908
3909         return i ? i : -EFAULT;
3910 }
3911
3912 unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
3913                 unsigned long address, unsigned long end, pgprot_t newprot)
3914 {
3915         struct mm_struct *mm = vma->vm_mm;
3916         unsigned long start = address;
3917         pte_t *ptep;
3918         pte_t pte;
3919         struct hstate *h = hstate_vma(vma);
3920         unsigned long pages = 0;
3921
3922         BUG_ON(address >= end);
3923         flush_cache_range(vma, address, end);
3924
3925         mmu_notifier_invalidate_range_start(mm, start, end);
3926         i_mmap_lock_write(vma->vm_file->f_mapping);
3927         for (; address < end; address += huge_page_size(h)) {
3928                 spinlock_t *ptl;
3929                 ptep = huge_pte_offset(mm, address);
3930                 if (!ptep)
3931                         continue;
3932                 ptl = huge_pte_lock(h, mm, ptep);
3933                 if (huge_pmd_unshare(mm, &address, ptep)) {
3934                         pages++;
3935                         spin_unlock(ptl);
3936                         continue;
3937                 }
3938                 pte = huge_ptep_get(ptep);
3939                 if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
3940                         spin_unlock(ptl);
3941                         continue;
3942                 }
3943                 if (unlikely(is_hugetlb_entry_migration(pte))) {
3944                         swp_entry_t entry = pte_to_swp_entry(pte);
3945
3946                         if (is_write_migration_entry(entry)) {
3947                                 pte_t newpte;
3948
3949                                 make_migration_entry_read(&entry);
3950                                 newpte = swp_entry_to_pte(entry);
3951                                 set_huge_pte_at(mm, address, ptep, newpte);
3952                                 pages++;
3953                         }
3954                         spin_unlock(ptl);
3955                         continue;
3956                 }
3957                 if (!huge_pte_none(pte)) {
3958                         pte = huge_ptep_get_and_clear(mm, address, ptep);
3959                         pte = pte_mkhuge(huge_pte_modify(pte, newprot));
3960                         pte = arch_make_huge_pte(pte, vma, NULL, 0);
3961                         set_huge_pte_at(mm, address, ptep, pte);
3962                         pages++;
3963                 }
3964                 spin_unlock(ptl);
3965         }
3966         /*
3967          * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare
3968          * may have cleared our pud entry and done put_page on the page table:
3969          * once we release i_mmap_rwsem, another task can do the final put_page
3970          * and that page table be reused and filled with junk.
3971          */
3972         flush_tlb_range(vma, start, end);
3973         mmu_notifier_invalidate_range(mm, start, end);
3974         i_mmap_unlock_write(vma->vm_file->f_mapping);
3975         mmu_notifier_invalidate_range_end(mm, start, end);
3976
3977         return pages << h->order;
3978 }
3979
3980 int hugetlb_reserve_pages(struct inode *inode,
3981                                         long from, long to,
3982                                         struct vm_area_struct *vma,
3983                                         vm_flags_t vm_flags)
3984 {
3985         long ret, chg;
3986         struct hstate *h = hstate_inode(inode);
3987         struct hugepage_subpool *spool = subpool_inode(inode);
3988         struct resv_map *resv_map;
3989         long gbl_reserve;
3990
3991         /*
3992          * Only apply hugepage reservation if asked. At fault time, an
3993          * attempt will be made for VM_NORESERVE to allocate a page
3994          * without using reserves
3995          */
3996         if (vm_flags & VM_NORESERVE)
3997                 return 0;
3998
3999         /*
4000          * Shared mappings base their reservation on the number of pages that
4001          * are already allocated on behalf of the file. Private mappings need
4002          * to reserve the full area even if read-only as mprotect() may be
4003          * called to make the mapping read-write. Assume !vma is a shm mapping
4004          */
4005         if (!vma || vma->vm_flags & VM_MAYSHARE) {
4006                 resv_map = inode_resv_map(inode);
4007
4008                 chg = region_chg(resv_map, from, to);
4009
4010         } else {
4011                 resv_map = resv_map_alloc();
4012                 if (!resv_map)
4013                         return -ENOMEM;
4014
4015                 chg = to - from;
4016
4017                 set_vma_resv_map(vma, resv_map);
4018                 set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
4019         }
4020
4021         if (chg < 0) {
4022                 ret = chg;
4023                 goto out_err;
4024         }
4025
4026         /*
4027          * There must be enough pages in the subpool for the mapping. If
4028          * the subpool has a minimum size, there may be some global
4029          * reservations already in place (gbl_reserve).
4030          */
4031         gbl_reserve = hugepage_subpool_get_pages(spool, chg);
4032         if (gbl_reserve < 0) {
4033                 ret = -ENOSPC;
4034                 goto out_err;
4035         }
4036
4037         /*
4038          * Check enough hugepages are available for the reservation.
4039          * Hand the pages back to the subpool if there are not
4040          */
4041         ret = hugetlb_acct_memory(h, gbl_reserve);
4042         if (ret < 0) {
4043                 /* put back original number of pages, chg */
4044                 (void)hugepage_subpool_put_pages(spool, chg);
4045                 goto out_err;
4046         }
4047
4048         /*
4049          * Account for the reservations made. Shared mappings record regions
4050          * that have reservations as they are shared by multiple VMAs.
4051          * When the last VMA disappears, the region map says how much
4052          * the reservation was and the page cache tells how much of
4053          * the reservation was consumed. Private mappings are per-VMA and
4054          * only the consumed reservations are tracked. When the VMA
4055          * disappears, the original reservation is the VMA size and the
4056          * consumed reservations are stored in the map. Hence, nothing
4057          * else has to be done for private mappings here
4058          */
4059         if (!vma || vma->vm_flags & VM_MAYSHARE) {
4060                 long add = region_add(resv_map, from, to);
4061
4062                 if (unlikely(chg > add)) {
4063                         /*
4064                          * pages in this range were added to the reserve
4065                          * map between region_chg and region_add.  This
4066                          * indicates a race with alloc_huge_page.  Adjust
4067                          * the subpool and reserve counts modified above
4068                          * based on the difference.
4069                          */
4070                         long rsv_adjust;
4071
4072                         rsv_adjust = hugepage_subpool_put_pages(spool,
4073                                                                 chg - add);
4074                         hugetlb_acct_memory(h, -rsv_adjust);
4075                 }
4076         }
4077         return 0;
4078 out_err:
4079         if (!vma || vma->vm_flags & VM_MAYSHARE)
4080                 region_abort(resv_map, from, to);
4081         if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
4082                 kref_put(&resv_map->refs, resv_map_release);
4083         return ret;
4084 }
4085
4086 long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
4087                                                                 long freed)
4088 {
4089         struct hstate *h = hstate_inode(inode);
4090         struct resv_map *resv_map = inode_resv_map(inode);
4091         long chg = 0;
4092         struct hugepage_subpool *spool = subpool_inode(inode);
4093         long gbl_reserve;
4094
4095         if (resv_map) {
4096                 chg = region_del(resv_map, start, end);
4097                 /*
4098                  * region_del() can fail in the rare case where a region
4099                  * must be split and another region descriptor can not be
4100                  * allocated.  If end == LONG_MAX, it will not fail.
4101                  */
4102                 if (chg < 0)
4103                         return chg;
4104         }
4105
4106         spin_lock(&inode->i_lock);
4107         inode->i_blocks -= (blocks_per_huge_page(h) * freed);
4108         spin_unlock(&inode->i_lock);
4109
4110         /*
4111          * If the subpool has a minimum size, the number of global
4112          * reservations to be released may be adjusted.
4113          */
4114         gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed));
4115         hugetlb_acct_memory(h, -gbl_reserve);
4116
4117         return 0;
4118 }
4119
4120 #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
4121 static unsigned long page_table_shareable(struct vm_area_struct *svma,
4122                                 struct vm_area_struct *vma,
4123                                 unsigned long addr, pgoff_t idx)
4124 {
4125         unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
4126                                 svma->vm_start;
4127         unsigned long sbase = saddr & PUD_MASK;
4128         unsigned long s_end = sbase + PUD_SIZE;
4129
4130         /* Allow segments to share if only one is marked locked */
4131         unsigned long vm_flags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
4132         unsigned long svm_flags = svma->vm_flags & VM_LOCKED_CLEAR_MASK;
4133
4134         /*
4135          * match the virtual addresses, permission and the alignment of the
4136          * page table page.
4137          */
4138         if (pmd_index(addr) != pmd_index(saddr) ||
4139             vm_flags != svm_flags ||
4140             sbase < svma->vm_start || svma->vm_end < s_end)
4141                 return 0;
4142
4143         return saddr;
4144 }
4145
4146 static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr)
4147 {
4148         unsigned long base = addr & PUD_MASK;
4149         unsigned long end = base + PUD_SIZE;
4150
4151         /*
4152          * check on proper vm_flags and page table alignment
4153          */
4154         if (vma->vm_flags & VM_MAYSHARE &&
4155             vma->vm_start <= base && end <= vma->vm_end)
4156                 return true;
4157         return false;
4158 }
4159
4160 /*
4161  * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
4162  * and returns the corresponding pte. While this is not necessary for the
4163  * !shared pmd case because we can allocate the pmd later as well, it makes the
4164  * code much cleaner. pmd allocation is essential for the shared case because
4165  * pud has to be populated inside the same i_mmap_rwsem section - otherwise
4166  * racing tasks could either miss the sharing (see huge_pte_offset) or select a
4167  * bad pmd for sharing.
4168  */
4169 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
4170 {
4171         struct vm_area_struct *vma = find_vma(mm, addr);
4172         struct address_space *mapping = vma->vm_file->f_mapping;
4173         pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
4174                         vma->vm_pgoff;
4175         struct vm_area_struct *svma;
4176         unsigned long saddr;
4177         pte_t *spte = NULL;
4178         pte_t *pte;
4179         spinlock_t *ptl;
4180
4181         if (!vma_shareable(vma, addr))
4182                 return (pte_t *)pmd_alloc(mm, pud, addr);
4183
4184         i_mmap_lock_write(mapping);
4185         vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
4186                 if (svma == vma)
4187                         continue;
4188
4189                 saddr = page_table_shareable(svma, vma, addr, idx);
4190                 if (saddr) {
4191                         spte = huge_pte_offset(svma->vm_mm, saddr);
4192                         if (spte) {
4193                                 mm_inc_nr_pmds(mm);
4194                                 get_page(virt_to_page(spte));
4195                                 break;
4196                         }
4197                 }
4198         }
4199
4200         if (!spte)
4201                 goto out;
4202
4203         ptl = huge_pte_lockptr(hstate_vma(vma), mm, spte);
4204         spin_lock(ptl);
4205         if (pud_none(*pud)) {
4206                 pud_populate(mm, pud,
4207                                 (pmd_t *)((unsigned long)spte & PAGE_MASK));
4208         } else {
4209                 put_page(virt_to_page(spte));
4210                 mm_inc_nr_pmds(mm);
4211         }
4212         spin_unlock(ptl);
4213 out:
4214         pte = (pte_t *)pmd_alloc(mm, pud, addr);
4215         i_mmap_unlock_write(mapping);
4216         return pte;
4217 }
4218
4219 /*
4220  * unmap huge page backed by shared pte.
4221  *
4222  * Hugetlb pte page is ref counted at the time of mapping.  If pte is shared
4223  * indicated by page_count > 1, unmap is achieved by clearing pud and
4224  * decrementing the ref count. If count == 1, the pte page is not shared.
4225  *
4226  * called with page table lock held.
4227  *
4228  * returns: 1 successfully unmapped a shared pte page
4229  *          0 the underlying pte page is not shared, or it is the last user
4230  */
4231 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
4232 {
4233         pgd_t *pgd = pgd_offset(mm, *addr);
4234         pud_t *pud = pud_offset(pgd, *addr);
4235
4236         BUG_ON(page_count(virt_to_page(ptep)) == 0);
4237         if (page_count(virt_to_page(ptep)) == 1)
4238                 return 0;
4239
4240         pud_clear(pud);
4241         put_page(virt_to_page(ptep));
4242         mm_dec_nr_pmds(mm);
4243         *addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE;
4244         return 1;
4245 }
4246 #define want_pmd_share()        (1)
4247 #else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
4248 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
4249 {
4250         return NULL;
4251 }
4252
4253 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
4254 {
4255         return 0;
4256 }
4257 #define want_pmd_share()        (0)
4258 #endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
4259
4260 #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
4261 pte_t *huge_pte_alloc(struct mm_struct *mm,
4262                         unsigned long addr, unsigned long sz)
4263 {
4264         pgd_t *pgd;
4265         pud_t *pud;
4266         pte_t *pte = NULL;
4267
4268         pgd = pgd_offset(mm, addr);
4269         pud = pud_alloc(mm, pgd, addr);
4270         if (pud) {
4271                 if (sz == PUD_SIZE) {
4272                         pte = (pte_t *)pud;
4273                 } else {
4274                         BUG_ON(sz != PMD_SIZE);
4275                         if (want_pmd_share() && pud_none(*pud))
4276                                 pte = huge_pmd_share(mm, addr, pud);
4277                         else
4278                                 pte = (pte_t *)pmd_alloc(mm, pud, addr);
4279                 }
4280         }
4281         BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));
4282
4283         return pte;
4284 }
4285
4286 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
4287 {
4288         pgd_t *pgd;
4289         pud_t *pud;
4290         pmd_t *pmd = NULL;
4291
4292         pgd = pgd_offset(mm, addr);
4293         if (pgd_present(*pgd)) {
4294                 pud = pud_offset(pgd, addr);
4295                 if (pud_present(*pud)) {
4296                         if (pud_huge(*pud))
4297                                 return (pte_t *)pud;
4298                         pmd = pmd_offset(pud, addr);
4299                 }
4300         }
4301         return (pte_t *) pmd;
4302 }
4303
4304 #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
4305
4306 /*
4307  * These functions are overwritable if your architecture needs its own
4308  * behavior.
4309  */
4310 struct page * __weak
4311 follow_huge_addr(struct mm_struct *mm, unsigned long address,
4312                               int write)
4313 {
4314         return ERR_PTR(-EINVAL);
4315 }
4316
4317 struct page * __weak
4318 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
4319                 pmd_t *pmd, int flags)
4320 {
4321         struct page *page = NULL;
4322         spinlock_t *ptl;
4323 retry:
4324         ptl = pmd_lockptr(mm, pmd);
4325         spin_lock(ptl);
4326         /*
4327          * make sure that the address range covered by this pmd is not
4328          * unmapped from other threads.
4329          */
4330         if (!pmd_huge(*pmd))
4331                 goto out;
4332         if (pmd_present(*pmd)) {
4333                 page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
4334                 if (flags & FOLL_GET)
4335                         get_page(page);
4336         } else {
4337                 if (is_hugetlb_entry_migration(huge_ptep_get((pte_t *)pmd))) {
4338                         spin_unlock(ptl);
4339                         __migration_entry_wait(mm, (pte_t *)pmd, ptl);
4340                         goto retry;
4341                 }
4342                 /*
4343                  * hwpoisoned entry is treated as no_page_table in
4344                  * follow_page_mask().
4345                  */
4346         }
4347 out:
4348         spin_unlock(ptl);
4349         return page;
4350 }
4351
4352 struct page * __weak
4353 follow_huge_pud(struct mm_struct *mm, unsigned long address,
4354                 pud_t *pud, int flags)
4355 {
4356         if (flags & FOLL_GET)
4357                 return NULL;
4358
4359         return pte_page(*(pte_t *)pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
4360 }
4361
4362 #ifdef CONFIG_MEMORY_FAILURE
4363
4364 /*
4365  * This function is called from memory failure code.
4366  * Assume the caller holds page lock of the head page.
4367  */
4368 int dequeue_hwpoisoned_huge_page(struct page *hpage)
4369 {
4370         struct hstate *h = page_hstate(hpage);
4371         int nid = page_to_nid(hpage);
4372         int ret = -EBUSY;
4373
4374         spin_lock(&hugetlb_lock);
4375         /*
4376          * Just checking !page_huge_active is not enough, because that could be
4377          * an isolated/hwpoisoned hugepage (which have >0 refcount).
4378          */
4379         if (!page_huge_active(hpage) && !page_count(hpage)) {
4380                 /*
4381                  * Hwpoisoned hugepage isn't linked to activelist or freelist,
4382                  * but dangling hpage->lru can trigger list-debug warnings
4383                  * (this happens when we call unpoison_memory() on it),
4384                  * so let it point to itself with list_del_init().
4385                  */
4386                 list_del_init(&hpage->lru);
4387                 set_page_refcounted(hpage);
4388                 h->free_huge_pages--;
4389                 h->free_huge_pages_node[nid]--;
4390                 ret = 0;
4391         }
4392         spin_unlock(&hugetlb_lock);
4393         return ret;
4394 }
4395 #endif
4396
4397 bool isolate_huge_page(struct page *page, struct list_head *list)
4398 {
4399         bool ret = true;
4400
4401         VM_BUG_ON_PAGE(!PageHead(page), page);
4402         spin_lock(&hugetlb_lock);
4403         if (!page_huge_active(page) || !get_page_unless_zero(page)) {
4404                 ret = false;
4405                 goto unlock;
4406         }
4407         clear_page_huge_active(page);
4408         list_move_tail(&page->lru, list);
4409 unlock:
4410         spin_unlock(&hugetlb_lock);
4411         return ret;
4412 }
4413
4414 void putback_active_hugepage(struct page *page)
4415 {
4416         VM_BUG_ON_PAGE(!PageHead(page), page);
4417         spin_lock(&hugetlb_lock);
4418         set_page_huge_active(page);
4419         list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
4420         spin_unlock(&hugetlb_lock);
4421         put_page(page);
4422 }