2 * Generic hugetlb support.
3 * (C) William Irwin, April 2004
6 #include <linux/list.h>
7 #include <linux/init.h>
8 #include <linux/module.h>
10 #include <linux/sysctl.h>
11 #include <linux/highmem.h>
12 #include <linux/nodemask.h>
13 #include <linux/pagemap.h>
14 #include <linux/mempolicy.h>
15 #include <linux/cpuset.h>
16 #include <linux/mutex.h>
19 #include <asm/pgtable.h>
21 #include <linux/hugetlb.h>
24 const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
25 static unsigned long nr_huge_pages, free_huge_pages, resv_huge_pages;
26 static unsigned long surplus_huge_pages;
27 unsigned long max_huge_pages;
28 static struct list_head hugepage_freelists[MAX_NUMNODES];
29 static unsigned int nr_huge_pages_node[MAX_NUMNODES];
30 static unsigned int free_huge_pages_node[MAX_NUMNODES];
31 static unsigned int surplus_huge_pages_node[MAX_NUMNODES];
32 static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
33 unsigned long hugepages_treat_as_movable;
34 int hugetlb_dynamic_pool;
35 static int hugetlb_next_nid;
38 * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
40 static DEFINE_SPINLOCK(hugetlb_lock);
42 static void clear_huge_page(struct page *page, unsigned long addr)
47 for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); i++) {
49 clear_user_highpage(page + i, addr + i * PAGE_SIZE);
53 static void copy_huge_page(struct page *dst, struct page *src,
54 unsigned long addr, struct vm_area_struct *vma)
59 for (i = 0; i < HPAGE_SIZE/PAGE_SIZE; i++) {
61 copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma);
65 static void enqueue_huge_page(struct page *page)
67 int nid = page_to_nid(page);
68 list_add(&page->lru, &hugepage_freelists[nid]);
70 free_huge_pages_node[nid]++;
73 static struct page *dequeue_huge_page(struct vm_area_struct *vma,
74 unsigned long address)
77 struct page *page = NULL;
78 struct mempolicy *mpol;
79 struct zonelist *zonelist = huge_zonelist(vma, address,
80 htlb_alloc_mask, &mpol);
83 for (z = zonelist->zones; *z; z++) {
84 nid = zone_to_nid(*z);
85 if (cpuset_zone_allowed_softwall(*z, htlb_alloc_mask) &&
86 !list_empty(&hugepage_freelists[nid])) {
87 page = list_entry(hugepage_freelists[nid].next,
91 free_huge_pages_node[nid]--;
92 if (vma && vma->vm_flags & VM_MAYSHARE)
97 mpol_free(mpol); /* unref if mpol !NULL */
101 static void update_and_free_page(struct page *page)
105 nr_huge_pages_node[page_to_nid(page)]--;
106 for (i = 0; i < (HPAGE_SIZE / PAGE_SIZE); i++) {
107 page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced |
108 1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
109 1 << PG_private | 1<< PG_writeback);
111 set_compound_page_dtor(page, NULL);
112 set_page_refcounted(page);
113 __free_pages(page, HUGETLB_PAGE_ORDER);
116 static void free_huge_page(struct page *page)
118 int nid = page_to_nid(page);
120 BUG_ON(page_count(page));
121 INIT_LIST_HEAD(&page->lru);
123 spin_lock(&hugetlb_lock);
124 if (surplus_huge_pages_node[nid]) {
125 update_and_free_page(page);
126 surplus_huge_pages--;
127 surplus_huge_pages_node[nid]--;
129 enqueue_huge_page(page);
131 spin_unlock(&hugetlb_lock);
135 * Increment or decrement surplus_huge_pages. Keep node-specific counters
136 * balanced by operating on them in a round-robin fashion.
137 * Returns 1 if an adjustment was made.
139 static int adjust_pool_surplus(int delta)
145 VM_BUG_ON(delta != -1 && delta != 1);
147 nid = next_node(nid, node_online_map);
148 if (nid == MAX_NUMNODES)
149 nid = first_node(node_online_map);
151 /* To shrink on this node, there must be a surplus page */
152 if (delta < 0 && !surplus_huge_pages_node[nid])
154 /* Surplus cannot exceed the total number of pages */
155 if (delta > 0 && surplus_huge_pages_node[nid] >=
156 nr_huge_pages_node[nid])
159 surplus_huge_pages += delta;
160 surplus_huge_pages_node[nid] += delta;
163 } while (nid != prev_nid);
169 static struct page *alloc_fresh_huge_page_node(int nid)
173 page = alloc_pages_node(nid,
174 htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|__GFP_NOWARN,
177 set_compound_page_dtor(page, free_huge_page);
178 spin_lock(&hugetlb_lock);
180 nr_huge_pages_node[nid]++;
181 spin_unlock(&hugetlb_lock);
182 put_page(page); /* free it into the hugepage allocator */
188 static int alloc_fresh_huge_page(void)
195 start_nid = hugetlb_next_nid;
198 page = alloc_fresh_huge_page_node(hugetlb_next_nid);
202 * Use a helper variable to find the next node and then
203 * copy it back to hugetlb_next_nid afterwards:
204 * otherwise there's a window in which a racer might
205 * pass invalid nid MAX_NUMNODES to alloc_pages_node.
206 * But we don't need to use a spin_lock here: it really
207 * doesn't matter if occasionally a racer chooses the
208 * same nid as we do. Move nid forward in the mask even
209 * if we just successfully allocated a hugepage so that
210 * the next caller gets hugepages on the next node.
212 next_nid = next_node(hugetlb_next_nid, node_online_map);
213 if (next_nid == MAX_NUMNODES)
214 next_nid = first_node(node_online_map);
215 hugetlb_next_nid = next_nid;
216 } while (!page && hugetlb_next_nid != start_nid);
221 static struct page *alloc_buddy_huge_page(struct vm_area_struct *vma,
222 unsigned long address)
226 /* Check if the dynamic pool is enabled */
227 if (!hugetlb_dynamic_pool)
230 page = alloc_pages(htlb_alloc_mask|__GFP_COMP|__GFP_NOWARN,
233 set_compound_page_dtor(page, free_huge_page);
234 spin_lock(&hugetlb_lock);
236 nr_huge_pages_node[page_to_nid(page)]++;
237 surplus_huge_pages++;
238 surplus_huge_pages_node[page_to_nid(page)]++;
239 spin_unlock(&hugetlb_lock);
246 * Increase the hugetlb pool such that it can accomodate a reservation
249 static int gather_surplus_pages(int delta)
251 struct list_head surplus_list;
252 struct page *page, *tmp;
254 int needed, allocated;
256 needed = (resv_huge_pages + delta) - free_huge_pages;
261 INIT_LIST_HEAD(&surplus_list);
265 spin_unlock(&hugetlb_lock);
266 for (i = 0; i < needed; i++) {
267 page = alloc_buddy_huge_page(NULL, 0);
270 * We were not able to allocate enough pages to
271 * satisfy the entire reservation so we free what
272 * we've allocated so far.
274 spin_lock(&hugetlb_lock);
279 list_add(&page->lru, &surplus_list);
284 * After retaking hugetlb_lock, we need to recalculate 'needed'
285 * because either resv_huge_pages or free_huge_pages may have changed.
287 spin_lock(&hugetlb_lock);
288 needed = (resv_huge_pages + delta) - (free_huge_pages + allocated);
293 * The surplus_list now contains _at_least_ the number of extra pages
294 * needed to accomodate the reservation. Add the appropriate number
295 * of pages to the hugetlb pool and free the extras back to the buddy
301 list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
302 list_del(&page->lru);
304 enqueue_huge_page(page);
307 * Decrement the refcount and free the page using its
308 * destructor. This must be done with hugetlb_lock
309 * unlocked which is safe because free_huge_page takes
310 * hugetlb_lock before deciding how to free the page.
312 spin_unlock(&hugetlb_lock);
314 spin_lock(&hugetlb_lock);
322 * When releasing a hugetlb pool reservation, any surplus pages that were
323 * allocated to satisfy the reservation must be explicitly freed if they were
326 void return_unused_surplus_pages(unsigned long unused_resv_pages)
330 unsigned long nr_pages;
332 nr_pages = min(unused_resv_pages, surplus_huge_pages);
335 nid = next_node(nid, node_online_map);
336 if (nid == MAX_NUMNODES)
337 nid = first_node(node_online_map);
339 if (!surplus_huge_pages_node[nid])
342 if (!list_empty(&hugepage_freelists[nid])) {
343 page = list_entry(hugepage_freelists[nid].next,
345 list_del(&page->lru);
346 update_and_free_page(page);
348 free_huge_pages_node[nid]--;
349 surplus_huge_pages--;
350 surplus_huge_pages_node[nid]--;
357 static struct page *alloc_huge_page_shared(struct vm_area_struct *vma,
362 spin_lock(&hugetlb_lock);
363 page = dequeue_huge_page(vma, addr);
364 spin_unlock(&hugetlb_lock);
368 static struct page *alloc_huge_page_private(struct vm_area_struct *vma,
371 struct page *page = NULL;
373 spin_lock(&hugetlb_lock);
374 if (free_huge_pages > resv_huge_pages)
375 page = dequeue_huge_page(vma, addr);
376 spin_unlock(&hugetlb_lock);
378 page = alloc_buddy_huge_page(vma, addr);
382 static struct page *alloc_huge_page(struct vm_area_struct *vma,
387 if (vma->vm_flags & VM_MAYSHARE)
388 page = alloc_huge_page_shared(vma, addr);
390 page = alloc_huge_page_private(vma, addr);
392 set_page_refcounted(page);
396 static int __init hugetlb_init(void)
400 if (HPAGE_SHIFT == 0)
403 for (i = 0; i < MAX_NUMNODES; ++i)
404 INIT_LIST_HEAD(&hugepage_freelists[i]);
406 hugetlb_next_nid = first_node(node_online_map);
408 for (i = 0; i < max_huge_pages; ++i) {
409 if (!alloc_fresh_huge_page())
412 max_huge_pages = free_huge_pages = nr_huge_pages = i;
413 printk("Total HugeTLB memory allocated, %ld\n", free_huge_pages);
416 module_init(hugetlb_init);
418 static int __init hugetlb_setup(char *s)
420 if (sscanf(s, "%lu", &max_huge_pages) <= 0)
424 __setup("hugepages=", hugetlb_setup);
426 static unsigned int cpuset_mems_nr(unsigned int *array)
431 for_each_node_mask(node, cpuset_current_mems_allowed)
438 #ifdef CONFIG_HIGHMEM
439 static void try_to_free_low(unsigned long count)
443 for (i = 0; i < MAX_NUMNODES; ++i) {
444 struct page *page, *next;
445 list_for_each_entry_safe(page, next, &hugepage_freelists[i], lru) {
446 if (count >= nr_huge_pages)
448 if (PageHighMem(page))
450 list_del(&page->lru);
451 update_and_free_page(page);
453 free_huge_pages_node[page_to_nid(page)]--;
458 static inline void try_to_free_low(unsigned long count)
463 #define persistent_huge_pages (nr_huge_pages - surplus_huge_pages)
464 static unsigned long set_max_huge_pages(unsigned long count)
466 unsigned long min_count, ret;
469 * Increase the pool size
470 * First take pages out of surplus state. Then make up the
471 * remaining difference by allocating fresh huge pages.
473 spin_lock(&hugetlb_lock);
474 while (surplus_huge_pages && count > persistent_huge_pages) {
475 if (!adjust_pool_surplus(-1))
479 while (count > persistent_huge_pages) {
482 * If this allocation races such that we no longer need the
483 * page, free_huge_page will handle it by freeing the page
484 * and reducing the surplus.
486 spin_unlock(&hugetlb_lock);
487 ret = alloc_fresh_huge_page();
488 spin_lock(&hugetlb_lock);
495 * Decrease the pool size
496 * First return free pages to the buddy allocator (being careful
497 * to keep enough around to satisfy reservations). Then place
498 * pages into surplus state as needed so the pool will shrink
499 * to the desired size as pages become free.
501 min_count = resv_huge_pages + nr_huge_pages - free_huge_pages;
502 min_count = max(count, min_count);
503 try_to_free_low(min_count);
504 while (min_count < persistent_huge_pages) {
505 struct page *page = dequeue_huge_page(NULL, 0);
508 update_and_free_page(page);
510 while (count < persistent_huge_pages) {
511 if (!adjust_pool_surplus(1))
515 ret = persistent_huge_pages;
516 spin_unlock(&hugetlb_lock);
520 int hugetlb_sysctl_handler(struct ctl_table *table, int write,
521 struct file *file, void __user *buffer,
522 size_t *length, loff_t *ppos)
524 proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
525 max_huge_pages = set_max_huge_pages(max_huge_pages);
529 int hugetlb_treat_movable_handler(struct ctl_table *table, int write,
530 struct file *file, void __user *buffer,
531 size_t *length, loff_t *ppos)
533 proc_dointvec(table, write, file, buffer, length, ppos);
534 if (hugepages_treat_as_movable)
535 htlb_alloc_mask = GFP_HIGHUSER_MOVABLE;
537 htlb_alloc_mask = GFP_HIGHUSER;
541 #endif /* CONFIG_SYSCTL */
543 int hugetlb_report_meminfo(char *buf)
546 "HugePages_Total: %5lu\n"
547 "HugePages_Free: %5lu\n"
548 "HugePages_Rsvd: %5lu\n"
549 "HugePages_Surp: %5lu\n"
550 "Hugepagesize: %5lu kB\n",
558 int hugetlb_report_node_meminfo(int nid, char *buf)
561 "Node %d HugePages_Total: %5u\n"
562 "Node %d HugePages_Free: %5u\n",
563 nid, nr_huge_pages_node[nid],
564 nid, free_huge_pages_node[nid]);
567 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
568 unsigned long hugetlb_total_pages(void)
570 return nr_huge_pages * (HPAGE_SIZE / PAGE_SIZE);
574 * We cannot handle pagefaults against hugetlb pages at all. They cause
575 * handle_mm_fault() to try to instantiate regular-sized pages in the
576 * hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get
579 static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
585 struct vm_operations_struct hugetlb_vm_ops = {
586 .fault = hugetlb_vm_op_fault,
589 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
596 pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
598 entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot));
600 entry = pte_mkyoung(entry);
601 entry = pte_mkhuge(entry);
606 static void set_huge_ptep_writable(struct vm_area_struct *vma,
607 unsigned long address, pte_t *ptep)
611 entry = pte_mkwrite(pte_mkdirty(*ptep));
612 if (ptep_set_access_flags(vma, address, ptep, entry, 1)) {
613 update_mmu_cache(vma, address, entry);
618 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
619 struct vm_area_struct *vma)
621 pte_t *src_pte, *dst_pte, entry;
622 struct page *ptepage;
626 cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
628 for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
629 src_pte = huge_pte_offset(src, addr);
632 dst_pte = huge_pte_alloc(dst, addr);
635 spin_lock(&dst->page_table_lock);
636 spin_lock(&src->page_table_lock);
637 if (!pte_none(*src_pte)) {
639 ptep_set_wrprotect(src, addr, src_pte);
641 ptepage = pte_page(entry);
643 set_huge_pte_at(dst, addr, dst_pte, entry);
645 spin_unlock(&src->page_table_lock);
646 spin_unlock(&dst->page_table_lock);
654 void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
657 struct mm_struct *mm = vma->vm_mm;
658 unsigned long address;
664 * A page gathering list, protected by per file i_mmap_lock. The
665 * lock is used to avoid list corruption from multiple unmapping
666 * of the same page since we are using page->lru.
668 LIST_HEAD(page_list);
670 WARN_ON(!is_vm_hugetlb_page(vma));
671 BUG_ON(start & ~HPAGE_MASK);
672 BUG_ON(end & ~HPAGE_MASK);
674 spin_lock(&mm->page_table_lock);
675 for (address = start; address < end; address += HPAGE_SIZE) {
676 ptep = huge_pte_offset(mm, address);
680 if (huge_pmd_unshare(mm, &address, ptep))
683 pte = huge_ptep_get_and_clear(mm, address, ptep);
687 page = pte_page(pte);
689 set_page_dirty(page);
690 list_add(&page->lru, &page_list);
692 spin_unlock(&mm->page_table_lock);
693 flush_tlb_range(vma, start, end);
694 list_for_each_entry_safe(page, tmp, &page_list, lru) {
695 list_del(&page->lru);
700 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
704 * It is undesirable to test vma->vm_file as it should be non-null
705 * for valid hugetlb area. However, vm_file will be NULL in the error
706 * cleanup path of do_mmap_pgoff. When hugetlbfs ->mmap method fails,
707 * do_mmap_pgoff() nullifies vma->vm_file before calling this function
708 * to clean up. Since no pte has actually been setup, it is safe to
709 * do nothing in this case.
712 spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
713 __unmap_hugepage_range(vma, start, end);
714 spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
718 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
719 unsigned long address, pte_t *ptep, pte_t pte)
721 struct page *old_page, *new_page;
724 old_page = pte_page(pte);
726 /* If no-one else is actually using this page, avoid the copy
727 * and just make the page writable */
728 avoidcopy = (page_count(old_page) == 1);
730 set_huge_ptep_writable(vma, address, ptep);
734 page_cache_get(old_page);
735 new_page = alloc_huge_page(vma, address);
738 page_cache_release(old_page);
742 spin_unlock(&mm->page_table_lock);
743 copy_huge_page(new_page, old_page, address, vma);
744 spin_lock(&mm->page_table_lock);
746 ptep = huge_pte_offset(mm, address & HPAGE_MASK);
747 if (likely(pte_same(*ptep, pte))) {
749 set_huge_pte_at(mm, address, ptep,
750 make_huge_pte(vma, new_page, 1));
751 /* Make the old page be freed below */
754 page_cache_release(new_page);
755 page_cache_release(old_page);
759 static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
760 unsigned long address, pte_t *ptep, int write_access)
762 int ret = VM_FAULT_SIGBUS;
766 struct address_space *mapping;
769 mapping = vma->vm_file->f_mapping;
770 idx = ((address - vma->vm_start) >> HPAGE_SHIFT)
771 + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
774 * Use page lock to guard against racing truncation
775 * before we get page_table_lock.
778 page = find_lock_page(mapping, idx);
780 size = i_size_read(mapping->host) >> HPAGE_SHIFT;
783 if (hugetlb_get_quota(mapping))
785 page = alloc_huge_page(vma, address);
787 hugetlb_put_quota(mapping);
791 clear_huge_page(page, address);
793 if (vma->vm_flags & VM_SHARED) {
796 err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
799 hugetlb_put_quota(mapping);
808 spin_lock(&mm->page_table_lock);
809 size = i_size_read(mapping->host) >> HPAGE_SHIFT;
814 if (!pte_none(*ptep))
817 new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
818 && (vma->vm_flags & VM_SHARED)));
819 set_huge_pte_at(mm, address, ptep, new_pte);
821 if (write_access && !(vma->vm_flags & VM_SHARED)) {
822 /* Optimization, do the COW without a second fault */
823 ret = hugetlb_cow(mm, vma, address, ptep, new_pte);
826 spin_unlock(&mm->page_table_lock);
832 spin_unlock(&mm->page_table_lock);
833 hugetlb_put_quota(mapping);
839 int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
840 unsigned long address, int write_access)
845 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
847 ptep = huge_pte_alloc(mm, address);
852 * Serialize hugepage allocation and instantiation, so that we don't
853 * get spurious allocation failures if two CPUs race to instantiate
854 * the same page in the page cache.
856 mutex_lock(&hugetlb_instantiation_mutex);
858 if (pte_none(entry)) {
859 ret = hugetlb_no_page(mm, vma, address, ptep, write_access);
860 mutex_unlock(&hugetlb_instantiation_mutex);
866 spin_lock(&mm->page_table_lock);
867 /* Check for a racing update before calling hugetlb_cow */
868 if (likely(pte_same(entry, *ptep)))
869 if (write_access && !pte_write(entry))
870 ret = hugetlb_cow(mm, vma, address, ptep, entry);
871 spin_unlock(&mm->page_table_lock);
872 mutex_unlock(&hugetlb_instantiation_mutex);
877 int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
878 struct page **pages, struct vm_area_struct **vmas,
879 unsigned long *position, int *length, int i,
882 unsigned long pfn_offset;
883 unsigned long vaddr = *position;
884 int remainder = *length;
886 spin_lock(&mm->page_table_lock);
887 while (vaddr < vma->vm_end && remainder) {
892 * Some archs (sparc64, sh*) have multiple pte_ts to
893 * each hugepage. We have to make * sure we get the
894 * first, for the page indexing below to work.
896 pte = huge_pte_offset(mm, vaddr & HPAGE_MASK);
898 if (!pte || pte_none(*pte)) {
901 spin_unlock(&mm->page_table_lock);
902 ret = hugetlb_fault(mm, vma, vaddr, write);
903 spin_lock(&mm->page_table_lock);
904 if (!(ret & VM_FAULT_ERROR))
913 pfn_offset = (vaddr & ~HPAGE_MASK) >> PAGE_SHIFT;
914 page = pte_page(*pte);
918 pages[i] = page + pfn_offset;
928 if (vaddr < vma->vm_end && remainder &&
929 pfn_offset < HPAGE_SIZE/PAGE_SIZE) {
931 * We use pfn_offset to avoid touching the pageframes
932 * of this compound page.
937 spin_unlock(&mm->page_table_lock);
944 void hugetlb_change_protection(struct vm_area_struct *vma,
945 unsigned long address, unsigned long end, pgprot_t newprot)
947 struct mm_struct *mm = vma->vm_mm;
948 unsigned long start = address;
952 BUG_ON(address >= end);
953 flush_cache_range(vma, address, end);
955 spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
956 spin_lock(&mm->page_table_lock);
957 for (; address < end; address += HPAGE_SIZE) {
958 ptep = huge_pte_offset(mm, address);
961 if (huge_pmd_unshare(mm, &address, ptep))
963 if (!pte_none(*ptep)) {
964 pte = huge_ptep_get_and_clear(mm, address, ptep);
965 pte = pte_mkhuge(pte_modify(pte, newprot));
966 set_huge_pte_at(mm, address, ptep, pte);
969 spin_unlock(&mm->page_table_lock);
970 spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
972 flush_tlb_range(vma, start, end);
976 struct list_head link;
981 static long region_add(struct list_head *head, long f, long t)
983 struct file_region *rg, *nrg, *trg;
985 /* Locate the region we are either in or before. */
986 list_for_each_entry(rg, head, link)
990 /* Round our left edge to the current segment if it encloses us. */
994 /* Check for and consume any regions we now overlap with. */
996 list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
997 if (&rg->link == head)
1002 /* If this area reaches higher then extend our area to
1003 * include it completely. If this is not the first area
1004 * which we intend to reuse, free it. */
1008 list_del(&rg->link);
1017 static long region_chg(struct list_head *head, long f, long t)
1019 struct file_region *rg, *nrg;
1022 /* Locate the region we are before or in. */
1023 list_for_each_entry(rg, head, link)
1027 /* If we are below the current region then a new region is required.
1028 * Subtle, allocate a new region at the position but make it zero
1029 * size such that we can guarantee to record the reservation. */
1030 if (&rg->link == head || t < rg->from) {
1031 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
1036 INIT_LIST_HEAD(&nrg->link);
1037 list_add(&nrg->link, rg->link.prev);
1042 /* Round our left edge to the current segment if it encloses us. */
1047 /* Check for and consume any regions we now overlap with. */
1048 list_for_each_entry(rg, rg->link.prev, link) {
1049 if (&rg->link == head)
1054 /* We overlap with this area, if it extends futher than
1055 * us then we must extend ourselves. Account for its
1056 * existing reservation. */
1061 chg -= rg->to - rg->from;
1066 static long region_truncate(struct list_head *head, long end)
1068 struct file_region *rg, *trg;
1071 /* Locate the region we are either in or before. */
1072 list_for_each_entry(rg, head, link)
1075 if (&rg->link == head)
1078 /* If we are in the middle of a region then adjust it. */
1079 if (end > rg->from) {
1082 rg = list_entry(rg->link.next, typeof(*rg), link);
1085 /* Drop any remaining regions. */
1086 list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
1087 if (&rg->link == head)
1089 chg += rg->to - rg->from;
1090 list_del(&rg->link);
1096 static int hugetlb_acct_memory(long delta)
1100 spin_lock(&hugetlb_lock);
1102 * When cpuset is configured, it breaks the strict hugetlb page
1103 * reservation as the accounting is done on a global variable. Such
1104 * reservation is completely rubbish in the presence of cpuset because
1105 * the reservation is not checked against page availability for the
1106 * current cpuset. Application can still potentially OOM'ed by kernel
1107 * with lack of free htlb page in cpuset that the task is in.
1108 * Attempt to enforce strict accounting with cpuset is almost
1109 * impossible (or too ugly) because cpuset is too fluid that
1110 * task or memory node can be dynamically moved between cpusets.
1112 * The change of semantics for shared hugetlb mapping with cpuset is
1113 * undesirable. However, in order to preserve some of the semantics,
1114 * we fall back to check against current free page availability as
1115 * a best attempt and hopefully to minimize the impact of changing
1116 * semantics that cpuset has.
1119 if (gather_surplus_pages(delta) < 0)
1122 if (delta > cpuset_mems_nr(free_huge_pages_node))
1127 resv_huge_pages += delta;
1129 return_unused_surplus_pages((unsigned long) -delta);
1132 spin_unlock(&hugetlb_lock);
1136 int hugetlb_reserve_pages(struct inode *inode, long from, long to)
1140 chg = region_chg(&inode->i_mapping->private_list, from, to);
1144 ret = hugetlb_acct_memory(chg);
1147 region_add(&inode->i_mapping->private_list, from, to);
1151 void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
1153 long chg = region_truncate(&inode->i_mapping->private_list, offset);
1154 hugetlb_acct_memory(freed - chg);