2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
14 * TILE Huge TLB Page Support for Kernel.
15 * Taken from i386 hugetlb implementation:
16 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
19 #include <linux/init.h>
22 #include <linux/hugetlb.h>
23 #include <linux/pagemap.h>
24 #include <linux/smp_lock.h>
25 #include <linux/slab.h>
26 #include <linux/err.h>
27 #include <linux/sysctl.h>
28 #include <linux/mman.h>
30 #include <asm/tlbflush.h>
32 pte_t *huge_pte_alloc(struct mm_struct *mm,
33 unsigned long addr, unsigned long sz)
39 /* We do not yet support multiple huge page sizes. */
40 BUG_ON(sz != PMD_SIZE);
42 pgd = pgd_offset(mm, addr);
43 pud = pud_alloc(mm, pgd, addr);
45 pte = (pte_t *) pmd_alloc(mm, pud, addr);
46 BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));
51 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
57 pgd = pgd_offset(mm, addr);
58 if (pgd_present(*pgd)) {
59 pud = pud_offset(pgd, addr);
60 if (pud_present(*pud))
61 pmd = pmd_offset(pud, addr);
67 struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
70 unsigned long start = address;
74 struct vm_area_struct *vma;
76 vma = find_vma(mm, addr);
77 if (!vma || !is_vm_hugetlb_page(vma))
78 return ERR_PTR(-EINVAL);
80 pte = huge_pte_offset(mm, address);
82 /* hugetlb should be locked, and hence, prefaulted */
83 WARN_ON(!pte || pte_none(*pte));
85 page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
87 WARN_ON(!PageHead(page));
92 int pmd_huge(pmd_t pmd)
97 int pud_huge(pud_t pud)
102 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
103 pmd_t *pmd, int write)
110 struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
113 return ERR_PTR(-EINVAL);
116 int pmd_huge(pmd_t pmd)
118 return !!(pmd_val(pmd) & _PAGE_HUGE_PAGE);
121 int pud_huge(pud_t pud)
123 return !!(pud_val(pud) & _PAGE_HUGE_PAGE);
126 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
127 pmd_t *pmd, int write)
131 page = pte_page(*(pte_t *)pmd);
133 page += ((address & ~PMD_MASK) >> PAGE_SHIFT);
137 struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
138 pud_t *pud, int write)
142 page = pte_page(*(pte_t *)pud);
144 page += ((address & ~PUD_MASK) >> PAGE_SHIFT);
148 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
155 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
156 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
157 unsigned long addr, unsigned long len,
158 unsigned long pgoff, unsigned long flags)
160 struct hstate *h = hstate_file(file);
161 struct mm_struct *mm = current->mm;
162 struct vm_area_struct *vma;
163 unsigned long start_addr;
165 if (len > mm->cached_hole_size) {
166 start_addr = mm->free_area_cache;
168 start_addr = TASK_UNMAPPED_BASE;
169 mm->cached_hole_size = 0;
173 addr = ALIGN(start_addr, huge_page_size(h));
175 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
176 /* At this point: (!vma || addr < vma->vm_end). */
177 if (TASK_SIZE - len < addr) {
179 * Start a new search - just in case we missed
182 if (start_addr != TASK_UNMAPPED_BASE) {
183 start_addr = TASK_UNMAPPED_BASE;
184 mm->cached_hole_size = 0;
189 if (!vma || addr + len <= vma->vm_start) {
190 mm->free_area_cache = addr + len;
193 if (addr + mm->cached_hole_size < vma->vm_start)
194 mm->cached_hole_size = vma->vm_start - addr;
195 addr = ALIGN(vma->vm_end, huge_page_size(h));
199 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
200 unsigned long addr0, unsigned long len,
201 unsigned long pgoff, unsigned long flags)
203 struct hstate *h = hstate_file(file);
204 struct mm_struct *mm = current->mm;
205 struct vm_area_struct *vma, *prev_vma;
206 unsigned long base = mm->mmap_base, addr = addr0;
207 unsigned long largest_hole = mm->cached_hole_size;
210 /* don't allow allocations above current base */
211 if (mm->free_area_cache > base)
212 mm->free_area_cache = base;
214 if (len <= largest_hole) {
216 mm->free_area_cache = base;
219 /* make sure it can fit in the remaining address space */
220 if (mm->free_area_cache < len)
223 /* either no address requested or cant fit in requested address hole */
224 addr = (mm->free_area_cache - len) & huge_page_mask(h);
227 * Lookup failure means no vma is above this address,
228 * i.e. return with success:
230 vma = find_vma_prev(mm, addr, &prev_vma);
237 * new region fits between prev_vma->vm_end and
238 * vma->vm_start, use it:
240 if (addr + len <= vma->vm_start &&
241 (!prev_vma || (addr >= prev_vma->vm_end))) {
242 /* remember the address as a hint for next time */
243 mm->cached_hole_size = largest_hole;
244 mm->free_area_cache = addr;
247 /* pull free_area_cache down to the first hole */
248 if (mm->free_area_cache == vma->vm_end) {
249 mm->free_area_cache = vma->vm_start;
250 mm->cached_hole_size = largest_hole;
254 /* remember the largest hole we saw so far */
255 if (addr + largest_hole < vma->vm_start)
256 largest_hole = vma->vm_start - addr;
258 /* try just below the current vma->vm_start */
259 addr = (vma->vm_start - len) & huge_page_mask(h);
261 } while (len <= vma->vm_start);
265 * if hint left us with no space for the requested
266 * mapping then try again:
269 mm->free_area_cache = base;
275 * A failed mmap() very likely causes application failure,
276 * so fall back to the bottom-up function here. This scenario
277 * can happen with large stack limits and large mmap()
280 mm->free_area_cache = TASK_UNMAPPED_BASE;
281 mm->cached_hole_size = ~0UL;
282 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
286 * Restore the topdown base:
288 mm->free_area_cache = base;
289 mm->cached_hole_size = ~0UL;
294 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
295 unsigned long len, unsigned long pgoff, unsigned long flags)
297 struct hstate *h = hstate_file(file);
298 struct mm_struct *mm = current->mm;
299 struct vm_area_struct *vma;
301 if (len & ~huge_page_mask(h))
306 if (flags & MAP_FIXED) {
307 if (prepare_hugepage_range(file, addr, len))
313 addr = ALIGN(addr, huge_page_size(h));
314 vma = find_vma(mm, addr);
315 if (TASK_SIZE - len >= addr &&
316 (!vma || addr + len <= vma->vm_start))
319 if (current->mm->get_unmapped_area == arch_get_unmapped_area)
320 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
323 return hugetlb_get_unmapped_area_topdown(file, addr, len,
327 static __init int setup_hugepagesz(char *opt)
329 unsigned long ps = memparse(opt, &opt);
330 if (ps == PMD_SIZE) {
331 hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
332 } else if (ps == PUD_SIZE) {
333 hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
335 pr_err("hugepagesz: Unsupported page size %lu M\n",
341 __setup("hugepagesz=", setup_hugepagesz);
343 #endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/