2 * address space "slices" (meta-segments) support
4 * Copyright (C) 2007 Benjamin Herrenschmidt, IBM Corporation.
6 * Based on hugetlb implementation
8 * Copyright (C) 2003 David Gibson, IBM Corporation.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 #include <linux/kernel.h>
29 #include <linux/pagemap.h>
30 #include <linux/err.h>
31 #include <linux/spinlock.h>
32 #include <linux/export.h>
37 /* some sanity checks */
38 #if (PGTABLE_RANGE >> 43) > SLICE_MASK_SIZE
39 #error PGTABLE_RANGE exceeds slice_mask high_slices size
42 static DEFINE_SPINLOCK(slice_convert_lock);
48 static void slice_print_mask(const char *label, struct slice_mask mask)
50 char *p, buf[16 + 3 + 64 + 1];
56 for (i = 0; i < SLICE_NUM_LOW; i++)
57 *(p++) = (mask.low_slices & (1 << i)) ? '1' : '0';
61 for (i = 0; i < SLICE_NUM_HIGH; i++)
62 *(p++) = (mask.high_slices & (1ul << i)) ? '1' : '0';
65 printk(KERN_DEBUG "%s:%s\n", label, buf);
68 #define slice_dbg(fmt...) do { if (_slice_debug) pr_debug(fmt); } while(0)
72 static void slice_print_mask(const char *label, struct slice_mask mask) {}
73 #define slice_dbg(fmt...)
77 static struct slice_mask slice_range_to_mask(unsigned long start,
80 unsigned long end = start + len - 1;
81 struct slice_mask ret = { 0, 0 };
83 if (start < SLICE_LOW_TOP) {
84 unsigned long mend = min(end, SLICE_LOW_TOP);
85 unsigned long mstart = min(start, SLICE_LOW_TOP);
87 ret.low_slices = (1u << (GET_LOW_SLICE_INDEX(mend) + 1))
88 - (1u << GET_LOW_SLICE_INDEX(mstart));
91 if ((start + len) > SLICE_LOW_TOP)
92 ret.high_slices = (1ul << (GET_HIGH_SLICE_INDEX(end) + 1))
93 - (1ul << GET_HIGH_SLICE_INDEX(start));
98 static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
101 struct vm_area_struct *vma;
103 if ((mm->task_size - len) < addr)
105 vma = find_vma(mm, addr);
106 return (!vma || (addr + len) <= vma->vm_start);
109 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
111 return !slice_area_is_free(mm, slice << SLICE_LOW_SHIFT,
112 1ul << SLICE_LOW_SHIFT);
115 static int slice_high_has_vma(struct mm_struct *mm, unsigned long slice)
117 unsigned long start = slice << SLICE_HIGH_SHIFT;
118 unsigned long end = start + (1ul << SLICE_HIGH_SHIFT);
120 /* Hack, so that each addresses is controlled by exactly one
121 * of the high or low area bitmaps, the first high area starts
124 start = SLICE_LOW_TOP;
126 return !slice_area_is_free(mm, start, end - start);
129 static struct slice_mask slice_mask_for_free(struct mm_struct *mm)
131 struct slice_mask ret = { 0, 0 };
134 for (i = 0; i < SLICE_NUM_LOW; i++)
135 if (!slice_low_has_vma(mm, i))
136 ret.low_slices |= 1u << i;
138 if (mm->task_size <= SLICE_LOW_TOP)
141 for (i = 0; i < SLICE_NUM_HIGH; i++)
142 if (!slice_high_has_vma(mm, i))
143 ret.high_slices |= 1ul << i;
148 static struct slice_mask slice_mask_for_size(struct mm_struct *mm, int psize)
150 unsigned char *hpsizes;
151 int index, mask_index;
152 struct slice_mask ret = { 0, 0 };
156 lpsizes = mm->context.low_slices_psize;
157 for (i = 0; i < SLICE_NUM_LOW; i++)
158 if (((lpsizes >> (i * 4)) & 0xf) == psize)
159 ret.low_slices |= 1u << i;
161 hpsizes = mm->context.high_slices_psize;
162 for (i = 0; i < SLICE_NUM_HIGH; i++) {
163 mask_index = i & 0x1;
165 if (((hpsizes[index] >> (mask_index * 4)) & 0xf) == psize)
166 ret.high_slices |= 1ul << i;
172 static int slice_check_fit(struct slice_mask mask, struct slice_mask available)
174 return (mask.low_slices & available.low_slices) == mask.low_slices &&
175 (mask.high_slices & available.high_slices) == mask.high_slices;
178 static void slice_flush_segments(void *parm)
180 struct mm_struct *mm = parm;
183 if (mm != current->active_mm)
186 /* update the paca copy of the context struct */
187 get_paca()->context = current->active_mm->context;
189 local_irq_save(flags);
190 slb_flush_and_rebolt();
191 local_irq_restore(flags);
194 static void slice_convert(struct mm_struct *mm, struct slice_mask mask, int psize)
196 int index, mask_index;
197 /* Write the new slice psize bits */
198 unsigned char *hpsizes;
200 unsigned long i, flags;
202 slice_dbg("slice_convert(mm=%p, psize=%d)\n", mm, psize);
203 slice_print_mask(" mask", mask);
205 /* We need to use a spinlock here to protect against
206 * concurrent 64k -> 4k demotion ...
208 spin_lock_irqsave(&slice_convert_lock, flags);
210 lpsizes = mm->context.low_slices_psize;
211 for (i = 0; i < SLICE_NUM_LOW; i++)
212 if (mask.low_slices & (1u << i))
213 lpsizes = (lpsizes & ~(0xful << (i * 4))) |
214 (((unsigned long)psize) << (i * 4));
216 /* Assign the value back */
217 mm->context.low_slices_psize = lpsizes;
219 hpsizes = mm->context.high_slices_psize;
220 for (i = 0; i < SLICE_NUM_HIGH; i++) {
221 mask_index = i & 0x1;
223 if (mask.high_slices & (1ul << i))
224 hpsizes[index] = (hpsizes[index] &
225 ~(0xf << (mask_index * 4))) |
226 (((unsigned long)psize) << (mask_index * 4));
229 slice_dbg(" lsps=%lx, hsps=%lx\n",
230 mm->context.low_slices_psize,
231 mm->context.high_slices_psize);
233 spin_unlock_irqrestore(&slice_convert_lock, flags);
235 #ifdef CONFIG_SPU_BASE
236 spu_flush_all_slbs(mm);
240 static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
242 struct slice_mask available,
245 struct vm_area_struct *vma;
247 struct slice_mask mask;
248 int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
250 addr = TASK_UNMAPPED_BASE;
253 addr = _ALIGN_UP(addr, 1ul << pshift);
254 if ((TASK_SIZE - len) < addr)
256 vma = find_vma(mm, addr);
257 BUG_ON(vma && (addr >= vma->vm_end));
259 mask = slice_range_to_mask(addr, len);
260 if (!slice_check_fit(mask, available)) {
261 if (addr < SLICE_LOW_TOP)
262 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_LOW_SHIFT);
264 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
267 if (!vma || addr + len <= vma->vm_start)
275 static unsigned long slice_find_area_topdown(struct mm_struct *mm,
277 struct slice_mask available,
280 struct vm_area_struct *vma;
282 struct slice_mask mask;
283 int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
285 addr = mm->mmap_base;
287 /* Go down by chunk size */
288 addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
290 /* Check for hit with different page size */
291 mask = slice_range_to_mask(addr, len);
292 if (!slice_check_fit(mask, available)) {
293 if (addr < SLICE_LOW_TOP)
294 addr = _ALIGN_DOWN(addr, 1ul << SLICE_LOW_SHIFT);
295 else if (addr < (1ul << SLICE_HIGH_SHIFT))
296 addr = SLICE_LOW_TOP;
298 addr = _ALIGN_DOWN(addr, 1ul << SLICE_HIGH_SHIFT);
303 * Lookup failure means no vma is above this address,
304 * else if new region fits below vma->vm_start,
305 * return with success:
307 vma = find_vma(mm, addr);
308 if (!vma || (addr + len) <= vma->vm_start)
311 /* try just below the current vma->vm_start */
312 addr = vma->vm_start;
316 * A failed mmap() very likely causes application failure,
317 * so fall back to the bottom-up function here. This scenario
318 * can happen with large stack limits and large mmap()
321 return slice_find_area_bottomup(mm, len, available, psize);
325 static unsigned long slice_find_area(struct mm_struct *mm, unsigned long len,
326 struct slice_mask mask, int psize,
330 return slice_find_area_topdown(mm, len, mask, psize);
332 return slice_find_area_bottomup(mm, len, mask, psize);
335 #define or_mask(dst, src) do { \
336 (dst).low_slices |= (src).low_slices; \
337 (dst).high_slices |= (src).high_slices; \
340 #define andnot_mask(dst, src) do { \
341 (dst).low_slices &= ~(src).low_slices; \
342 (dst).high_slices &= ~(src).high_slices; \
345 #ifdef CONFIG_PPC_64K_PAGES
346 #define MMU_PAGE_BASE MMU_PAGE_64K
348 #define MMU_PAGE_BASE MMU_PAGE_4K
351 unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
352 unsigned long flags, unsigned int psize,
355 struct slice_mask mask = {0, 0};
356 struct slice_mask good_mask;
357 struct slice_mask potential_mask = {0,0} /* silence stupid warning */;
358 struct slice_mask compat_mask = {0, 0};
359 int fixed = (flags & MAP_FIXED);
360 int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
361 struct mm_struct *mm = current->mm;
362 unsigned long newaddr;
365 BUG_ON(mm->task_size == 0);
367 slice_dbg("slice_get_unmapped_area(mm=%p, psize=%d...\n", mm, psize);
368 slice_dbg(" addr=%lx, len=%lx, flags=%lx, topdown=%d\n",
369 addr, len, flags, topdown);
371 if (len > mm->task_size)
373 if (len & ((1ul << pshift) - 1))
375 if (fixed && (addr & ((1ul << pshift) - 1)))
377 if (fixed && addr > (mm->task_size - len))
380 /* If hint, make sure it matches our alignment restrictions */
381 if (!fixed && addr) {
382 addr = _ALIGN_UP(addr, 1ul << pshift);
383 slice_dbg(" aligned addr=%lx\n", addr);
384 /* Ignore hint if it's too large or overlaps a VMA */
385 if (addr > mm->task_size - len ||
386 !slice_area_is_free(mm, addr, len))
390 /* First make up a "good" mask of slices that have the right size
393 good_mask = slice_mask_for_size(mm, psize);
394 slice_print_mask(" good_mask", good_mask);
397 * Here "good" means slices that are already the right page size,
398 * "compat" means slices that have a compatible page size (i.e.
399 * 4k in a 64k pagesize kernel), and "free" means slices without
403 * check if fits in good | compat => OK
404 * check if fits in good | compat | free => convert free
407 * check if hint fits in good => OK
408 * check if hint fits in good | free => convert free
410 * search in good, found => OK
411 * search in good | free, found => convert free
412 * search in good | compat | free, found => convert free.
415 #ifdef CONFIG_PPC_64K_PAGES
416 /* If we support combo pages, we can allow 64k pages in 4k slices */
417 if (psize == MMU_PAGE_64K) {
418 compat_mask = slice_mask_for_size(mm, MMU_PAGE_4K);
420 or_mask(good_mask, compat_mask);
424 /* First check hint if it's valid or if we have MAP_FIXED */
425 if (addr != 0 || fixed) {
426 /* Build a mask for the requested range */
427 mask = slice_range_to_mask(addr, len);
428 slice_print_mask(" mask", mask);
430 /* Check if we fit in the good mask. If we do, we just return,
433 if (slice_check_fit(mask, good_mask)) {
434 slice_dbg(" fits good !\n");
438 /* Now let's see if we can find something in the existing
439 * slices for that size
441 newaddr = slice_find_area(mm, len, good_mask, psize, topdown);
442 if (newaddr != -ENOMEM) {
443 /* Found within the good mask, we don't have to setup,
444 * we thus return directly
446 slice_dbg(" found area at 0x%lx\n", newaddr);
451 /* We don't fit in the good mask, check what other slices are
452 * empty and thus can be converted
454 potential_mask = slice_mask_for_free(mm);
455 or_mask(potential_mask, good_mask);
456 slice_print_mask(" potential", potential_mask);
458 if ((addr != 0 || fixed) && slice_check_fit(mask, potential_mask)) {
459 slice_dbg(" fits potential !\n");
463 /* If we have MAP_FIXED and failed the above steps, then error out */
467 slice_dbg(" search...\n");
469 /* If we had a hint that didn't work out, see if we can fit
470 * anywhere in the good area.
473 addr = slice_find_area(mm, len, good_mask, psize, topdown);
474 if (addr != -ENOMEM) {
475 slice_dbg(" found area at 0x%lx\n", addr);
480 /* Now let's see if we can find something in the existing slices
481 * for that size plus free slices
483 addr = slice_find_area(mm, len, potential_mask, psize, topdown);
485 #ifdef CONFIG_PPC_64K_PAGES
486 if (addr == -ENOMEM && psize == MMU_PAGE_64K) {
487 /* retry the search with 4k-page slices included */
488 or_mask(potential_mask, compat_mask);
489 addr = slice_find_area(mm, len, potential_mask, psize,
497 mask = slice_range_to_mask(addr, len);
498 slice_dbg(" found potential area at 0x%lx\n", addr);
499 slice_print_mask(" mask", mask);
502 andnot_mask(mask, good_mask);
503 andnot_mask(mask, compat_mask);
504 if (mask.low_slices || mask.high_slices) {
505 slice_convert(mm, mask, psize);
506 if (psize > MMU_PAGE_BASE)
507 on_each_cpu(slice_flush_segments, mm, 1);
512 EXPORT_SYMBOL_GPL(slice_get_unmapped_area);
514 unsigned long arch_get_unmapped_area(struct file *filp,
520 return slice_get_unmapped_area(addr, len, flags,
521 current->mm->context.user_psize, 0);
524 unsigned long arch_get_unmapped_area_topdown(struct file *filp,
525 const unsigned long addr0,
526 const unsigned long len,
527 const unsigned long pgoff,
528 const unsigned long flags)
530 return slice_get_unmapped_area(addr0, len, flags,
531 current->mm->context.user_psize, 1);
534 unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr)
536 unsigned char *hpsizes;
537 int index, mask_index;
539 if (addr < SLICE_LOW_TOP) {
541 lpsizes = mm->context.low_slices_psize;
542 index = GET_LOW_SLICE_INDEX(addr);
543 return (lpsizes >> (index * 4)) & 0xf;
545 hpsizes = mm->context.high_slices_psize;
546 index = GET_HIGH_SLICE_INDEX(addr);
547 mask_index = index & 0x1;
548 return (hpsizes[index >> 1] >> (mask_index * 4)) & 0xf;
550 EXPORT_SYMBOL_GPL(get_slice_psize);
553 * This is called by hash_page when it needs to do a lazy conversion of
554 * an address space from real 64K pages to combo 4K pages (typically
555 * when hitting a non cacheable mapping on a processor or hypervisor
556 * that won't allow them for 64K pages).
558 * This is also called in init_new_context() to change back the user
559 * psize from whatever the parent context had it set to
560 * N.B. This may be called before mm->context.id has been set.
562 * This function will only change the content of the {low,high)_slice_psize
563 * masks, it will not flush SLBs as this shall be handled lazily by the
566 void slice_set_user_psize(struct mm_struct *mm, unsigned int psize)
568 int index, mask_index;
569 unsigned char *hpsizes;
570 unsigned long flags, lpsizes;
571 unsigned int old_psize;
574 slice_dbg("slice_set_user_psize(mm=%p, psize=%d)\n", mm, psize);
576 spin_lock_irqsave(&slice_convert_lock, flags);
578 old_psize = mm->context.user_psize;
579 slice_dbg(" old_psize=%d\n", old_psize);
580 if (old_psize == psize)
583 mm->context.user_psize = psize;
586 lpsizes = mm->context.low_slices_psize;
587 for (i = 0; i < SLICE_NUM_LOW; i++)
588 if (((lpsizes >> (i * 4)) & 0xf) == old_psize)
589 lpsizes = (lpsizes & ~(0xful << (i * 4))) |
590 (((unsigned long)psize) << (i * 4));
591 /* Assign the value back */
592 mm->context.low_slices_psize = lpsizes;
594 hpsizes = mm->context.high_slices_psize;
595 for (i = 0; i < SLICE_NUM_HIGH; i++) {
596 mask_index = i & 0x1;
598 if (((hpsizes[index] >> (mask_index * 4)) & 0xf) == old_psize)
599 hpsizes[index] = (hpsizes[index] &
600 ~(0xf << (mask_index * 4))) |
601 (((unsigned long)psize) << (mask_index * 4));
607 slice_dbg(" lsps=%lx, hsps=%lx\n",
608 mm->context.low_slices_psize,
609 mm->context.high_slices_psize);
612 spin_unlock_irqrestore(&slice_convert_lock, flags);
615 void slice_set_psize(struct mm_struct *mm, unsigned long address,
618 unsigned char *hpsizes;
619 unsigned long i, flags;
622 spin_lock_irqsave(&slice_convert_lock, flags);
623 if (address < SLICE_LOW_TOP) {
624 i = GET_LOW_SLICE_INDEX(address);
625 lpsizes = &mm->context.low_slices_psize;
626 *lpsizes = (*lpsizes & ~(0xful << (i * 4))) |
627 ((unsigned long) psize << (i * 4));
629 int index, mask_index;
630 i = GET_HIGH_SLICE_INDEX(address);
631 hpsizes = mm->context.high_slices_psize;
632 mask_index = i & 0x1;
634 hpsizes[index] = (hpsizes[index] &
635 ~(0xf << (mask_index * 4))) |
636 (((unsigned long)psize) << (mask_index * 4));
639 spin_unlock_irqrestore(&slice_convert_lock, flags);
641 #ifdef CONFIG_SPU_BASE
642 spu_flush_all_slbs(mm);
646 void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
647 unsigned long len, unsigned int psize)
649 struct slice_mask mask = slice_range_to_mask(start, len);
651 slice_convert(mm, mask, psize);
655 * is_hugepage_only_range() is used by generic code to verify whether
656 * a normal mmap mapping (non hugetlbfs) is valid on a given area.
658 * until the generic code provides a more generic hook and/or starts
659 * calling arch get_unmapped_area for MAP_FIXED (which our implementation
660 * here knows how to deal with), we hijack it to keep standard mappings
663 * because of that generic code limitation, MAP_FIXED mapping cannot
664 * "convert" back a slice with no VMAs to the standard page size, only
665 * get_unmapped_area() can. It would be possible to fix it here but I
666 * prefer working on fixing the generic code instead.
668 * WARNING: This will not work if hugetlbfs isn't enabled since the
669 * generic code will redefine that function as 0 in that. This is ok
670 * for now as we only use slices with hugetlbfs enabled. This should
671 * be fixed as the generic code gets fixed.
673 int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
676 struct slice_mask mask, available;
677 unsigned int psize = mm->context.user_psize;
679 mask = slice_range_to_mask(addr, len);
680 available = slice_mask_for_size(mm, psize);
681 #ifdef CONFIG_PPC_64K_PAGES
682 /* We need to account for 4k slices too */
683 if (psize == MMU_PAGE_64K) {
684 struct slice_mask compat_mask;
685 compat_mask = slice_mask_for_size(mm, MMU_PAGE_4K);
686 or_mask(available, compat_mask);
690 #if 0 /* too verbose */
691 slice_dbg("is_hugepage_only_range(mm=%p, addr=%lx, len=%lx)\n",
693 slice_print_mask(" mask", mask);
694 slice_print_mask(" available", available);
696 return !slice_check_fit(mask, available);