2 * arch/sparc64/mm/init.c
4 * Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1997-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8 #include <linux/module.h>
9 #include <linux/kernel.h>
10 #include <linux/sched.h>
11 #include <linux/string.h>
12 #include <linux/init.h>
13 #include <linux/bootmem.h>
15 #include <linux/hugetlb.h>
16 #include <linux/initrd.h>
17 #include <linux/swap.h>
18 #include <linux/pagemap.h>
19 #include <linux/poison.h>
21 #include <linux/seq_file.h>
22 #include <linux/kprobes.h>
23 #include <linux/cache.h>
24 #include <linux/sort.h>
25 #include <linux/percpu.h>
26 #include <linux/memblock.h>
27 #include <linux/mmzone.h>
28 #include <linux/gfp.h>
32 #include <asm/pgalloc.h>
33 #include <asm/pgtable.h>
34 #include <asm/oplib.h>
35 #include <asm/iommu.h>
37 #include <asm/uaccess.h>
38 #include <asm/mmu_context.h>
39 #include <asm/tlbflush.h>
41 #include <asm/starfire.h>
43 #include <asm/spitfire.h>
44 #include <asm/sections.h>
46 #include <asm/hypervisor.h>
48 #include <asm/mdesc.h>
49 #include <asm/cpudata.h>
54 unsigned long kern_linear_pte_xor[4] __read_mostly;
56 /* A bitmap, two bits for every 256MB of physical memory. These two
57 * bits determine what page size we use for kernel linear
58 * translations. They form an index into kern_linear_pte_xor[]. The
59 * value in the indexed slot is XOR'd with the TLB miss virtual
60 * address to form the resulting TTE. The mapping is:
67 * All sun4v chips support 256MB pages. Only SPARC-T4 and later
68 * support 2GB pages, and hopefully future cpus will support the 16GB
69 * pages as well. For slots 2 and 3, we encode a 256MB TTE xor there
70 * if these larger page sizes are not supported by the cpu.
72 * It would be nice to determine this from the machine description
73 * 'cpu' properties, but we need to have this table setup before the
74 * MDESC is initialized.
76 unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)];
78 #ifndef CONFIG_DEBUG_PAGEALLOC
79 /* A special kernel TSB for 4MB, 256MB, 2GB and 16GB linear mappings.
80 * Space is allocated for this right after the trap table in
81 * arch/sparc64/kernel/head.S
83 extern struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES];
86 static unsigned long cpu_pgsz_mask;
90 static struct linux_prom64_registers pavail[MAX_BANKS];
91 static int pavail_ents;
93 static int cmp_p64(const void *a, const void *b)
95 const struct linux_prom64_registers *x = a, *y = b;
97 if (x->phys_addr > y->phys_addr)
99 if (x->phys_addr < y->phys_addr)
104 static void __init read_obp_memory(const char *property,
105 struct linux_prom64_registers *regs,
108 phandle node = prom_finddevice("/memory");
109 int prop_size = prom_getproplen(node, property);
112 ents = prop_size / sizeof(struct linux_prom64_registers);
113 if (ents > MAX_BANKS) {
114 prom_printf("The machine has more %s property entries than "
115 "this kernel can support (%d).\n",
116 property, MAX_BANKS);
120 ret = prom_getproperty(node, property, (char *) regs, prop_size);
122 prom_printf("Couldn't get %s property from /memory.\n",
127 /* Sanitize what we got from the firmware, by page aligning
130 for (i = 0; i < ents; i++) {
131 unsigned long base, size;
133 base = regs[i].phys_addr;
134 size = regs[i].reg_size;
137 if (base & ~PAGE_MASK) {
138 unsigned long new_base = PAGE_ALIGN(base);
140 size -= new_base - base;
141 if ((long) size < 0L)
146 /* If it is empty, simply get rid of it.
147 * This simplifies the logic of the other
148 * functions that process these arrays.
150 memmove(®s[i], ®s[i + 1],
151 (ents - i - 1) * sizeof(regs[0]));
156 regs[i].phys_addr = base;
157 regs[i].reg_size = size;
162 sort(regs, ents, sizeof(struct linux_prom64_registers),
166 unsigned long sparc64_valid_addr_bitmap[VALID_ADDR_BITMAP_BYTES /
167 sizeof(unsigned long)];
168 EXPORT_SYMBOL(sparc64_valid_addr_bitmap);
170 /* Kernel physical address base and size in bytes. */
171 unsigned long kern_base __read_mostly;
172 unsigned long kern_size __read_mostly;
174 /* Initial ramdisk setup */
175 extern unsigned long sparc_ramdisk_image64;
176 extern unsigned int sparc_ramdisk_image;
177 extern unsigned int sparc_ramdisk_size;
179 struct page *mem_map_zero __read_mostly;
180 EXPORT_SYMBOL(mem_map_zero);
182 unsigned int sparc64_highest_unlocked_tlb_ent __read_mostly;
184 unsigned long sparc64_kern_pri_context __read_mostly;
185 unsigned long sparc64_kern_pri_nuc_bits __read_mostly;
186 unsigned long sparc64_kern_sec_context __read_mostly;
188 int num_kernel_image_mappings;
190 #ifdef CONFIG_DEBUG_DCFLUSH
191 atomic_t dcpage_flushes = ATOMIC_INIT(0);
193 atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
197 inline void flush_dcache_page_impl(struct page *page)
199 BUG_ON(tlb_type == hypervisor);
200 #ifdef CONFIG_DEBUG_DCFLUSH
201 atomic_inc(&dcpage_flushes);
204 #ifdef DCACHE_ALIASING_POSSIBLE
205 __flush_dcache_page(page_address(page),
206 ((tlb_type == spitfire) &&
207 page_mapping(page) != NULL));
209 if (page_mapping(page) != NULL &&
210 tlb_type == spitfire)
211 __flush_icache_page(__pa(page_address(page)));
215 #define PG_dcache_dirty PG_arch_1
216 #define PG_dcache_cpu_shift 32UL
217 #define PG_dcache_cpu_mask \
218 ((1UL<<ilog2(roundup_pow_of_two(NR_CPUS)))-1UL)
220 #define dcache_dirty_cpu(page) \
221 (((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask)
223 static inline void set_dcache_dirty(struct page *page, int this_cpu)
225 unsigned long mask = this_cpu;
226 unsigned long non_cpu_bits;
228 non_cpu_bits = ~(PG_dcache_cpu_mask << PG_dcache_cpu_shift);
229 mask = (mask << PG_dcache_cpu_shift) | (1UL << PG_dcache_dirty);
231 __asm__ __volatile__("1:\n\t"
233 "and %%g7, %1, %%g1\n\t"
234 "or %%g1, %0, %%g1\n\t"
235 "casx [%2], %%g7, %%g1\n\t"
237 "bne,pn %%xcc, 1b\n\t"
240 : "r" (mask), "r" (non_cpu_bits), "r" (&page->flags)
244 static inline void clear_dcache_dirty_cpu(struct page *page, unsigned long cpu)
246 unsigned long mask = (1UL << PG_dcache_dirty);
248 __asm__ __volatile__("! test_and_clear_dcache_dirty\n"
251 "srlx %%g7, %4, %%g1\n\t"
252 "and %%g1, %3, %%g1\n\t"
254 "bne,pn %%icc, 2f\n\t"
255 " andn %%g7, %1, %%g1\n\t"
256 "casx [%2], %%g7, %%g1\n\t"
258 "bne,pn %%xcc, 1b\n\t"
262 : "r" (cpu), "r" (mask), "r" (&page->flags),
263 "i" (PG_dcache_cpu_mask),
264 "i" (PG_dcache_cpu_shift)
268 static inline void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long pte)
270 unsigned long tsb_addr = (unsigned long) ent;
272 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
273 tsb_addr = __pa(tsb_addr);
275 __tsb_insert(tsb_addr, tag, pte);
278 unsigned long _PAGE_ALL_SZ_BITS __read_mostly;
280 static void flush_dcache(unsigned long pfn)
284 page = pfn_to_page(pfn);
286 unsigned long pg_flags;
288 pg_flags = page->flags;
289 if (pg_flags & (1UL << PG_dcache_dirty)) {
290 int cpu = ((pg_flags >> PG_dcache_cpu_shift) &
292 int this_cpu = get_cpu();
294 /* This is just to optimize away some function calls
298 flush_dcache_page_impl(page);
300 smp_flush_dcache_page_impl(page, cpu);
302 clear_dcache_dirty_cpu(page, cpu);
309 /* mm->context.lock must be held */
310 static void __update_mmu_tsb_insert(struct mm_struct *mm, unsigned long tsb_index,
311 unsigned long tsb_hash_shift, unsigned long address,
314 struct tsb *tsb = mm->context.tsb_block[tsb_index].tsb;
320 tsb += ((address >> tsb_hash_shift) &
321 (mm->context.tsb_block[tsb_index].tsb_nentries - 1UL));
322 tag = (address >> 22UL);
323 tsb_insert(tsb, tag, tte);
326 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
327 static inline bool is_hugetlb_pte(pte_t pte)
329 if ((tlb_type == hypervisor &&
330 (pte_val(pte) & _PAGE_SZALL_4V) == _PAGE_SZHUGE_4V) ||
331 (tlb_type != hypervisor &&
332 (pte_val(pte) & _PAGE_SZALL_4U) == _PAGE_SZHUGE_4U))
338 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
340 struct mm_struct *mm;
344 if (tlb_type != hypervisor) {
345 unsigned long pfn = pte_pfn(pte);
353 spin_lock_irqsave(&mm->context.lock, flags);
355 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
356 if (mm->context.huge_pte_count && is_hugetlb_pte(pte))
357 __update_mmu_tsb_insert(mm, MM_TSB_HUGE, HPAGE_SHIFT,
358 address, pte_val(pte));
361 __update_mmu_tsb_insert(mm, MM_TSB_BASE, PAGE_SHIFT,
362 address, pte_val(pte));
364 spin_unlock_irqrestore(&mm->context.lock, flags);
367 void flush_dcache_page(struct page *page)
369 struct address_space *mapping;
372 if (tlb_type == hypervisor)
375 /* Do not bother with the expensive D-cache flush if it
376 * is merely the zero page. The 'bigcore' testcase in GDB
377 * causes this case to run millions of times.
379 if (page == ZERO_PAGE(0))
382 this_cpu = get_cpu();
384 mapping = page_mapping(page);
385 if (mapping && !mapping_mapped(mapping)) {
386 int dirty = test_bit(PG_dcache_dirty, &page->flags);
388 int dirty_cpu = dcache_dirty_cpu(page);
390 if (dirty_cpu == this_cpu)
392 smp_flush_dcache_page_impl(page, dirty_cpu);
394 set_dcache_dirty(page, this_cpu);
396 /* We could delay the flush for the !page_mapping
397 * case too. But that case is for exec env/arg
398 * pages and those are %99 certainly going to get
399 * faulted into the tlb (and thus flushed) anyways.
401 flush_dcache_page_impl(page);
407 EXPORT_SYMBOL(flush_dcache_page);
409 void __kprobes flush_icache_range(unsigned long start, unsigned long end)
411 /* Cheetah and Hypervisor platform cpus have coherent I-cache. */
412 if (tlb_type == spitfire) {
415 /* This code only runs on Spitfire cpus so this is
416 * why we can assume _PAGE_PADDR_4U.
418 for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE) {
419 unsigned long paddr, mask = _PAGE_PADDR_4U;
421 if (kaddr >= PAGE_OFFSET)
422 paddr = kaddr & mask;
424 pgd_t *pgdp = pgd_offset_k(kaddr);
425 pud_t *pudp = pud_offset(pgdp, kaddr);
426 pmd_t *pmdp = pmd_offset(pudp, kaddr);
427 pte_t *ptep = pte_offset_kernel(pmdp, kaddr);
429 paddr = pte_val(*ptep) & mask;
431 __flush_icache_page(paddr);
435 EXPORT_SYMBOL(flush_icache_range);
437 void mmu_info(struct seq_file *m)
439 static const char *pgsz_strings[] = {
440 "8K", "64K", "512K", "4MB", "32MB",
441 "256MB", "2GB", "16GB",
445 if (tlb_type == cheetah)
446 seq_printf(m, "MMU Type\t: Cheetah\n");
447 else if (tlb_type == cheetah_plus)
448 seq_printf(m, "MMU Type\t: Cheetah+\n");
449 else if (tlb_type == spitfire)
450 seq_printf(m, "MMU Type\t: Spitfire\n");
451 else if (tlb_type == hypervisor)
452 seq_printf(m, "MMU Type\t: Hypervisor (sun4v)\n");
454 seq_printf(m, "MMU Type\t: ???\n");
456 seq_printf(m, "MMU PGSZs\t: ");
458 for (i = 0; i < ARRAY_SIZE(pgsz_strings); i++) {
459 if (cpu_pgsz_mask & (1UL << i)) {
460 seq_printf(m, "%s%s",
461 printed ? "," : "", pgsz_strings[i]);
467 #ifdef CONFIG_DEBUG_DCFLUSH
468 seq_printf(m, "DCPageFlushes\t: %d\n",
469 atomic_read(&dcpage_flushes));
471 seq_printf(m, "DCPageFlushesXC\t: %d\n",
472 atomic_read(&dcpage_flushes_xcall));
473 #endif /* CONFIG_SMP */
474 #endif /* CONFIG_DEBUG_DCFLUSH */
477 struct linux_prom_translation prom_trans[512] __read_mostly;
478 unsigned int prom_trans_ents __read_mostly;
480 unsigned long kern_locked_tte_data;
482 /* The obp translations are saved based on 8k pagesize, since obp can
483 * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS ->
484 * HI_OBP_ADDRESS range are handled in ktlb.S.
486 static inline int in_obp_range(unsigned long vaddr)
488 return (vaddr >= LOW_OBP_ADDRESS &&
489 vaddr < HI_OBP_ADDRESS);
492 static int cmp_ptrans(const void *a, const void *b)
494 const struct linux_prom_translation *x = a, *y = b;
496 if (x->virt > y->virt)
498 if (x->virt < y->virt)
503 /* Read OBP translations property into 'prom_trans[]'. */
504 static void __init read_obp_translations(void)
506 int n, node, ents, first, last, i;
508 node = prom_finddevice("/virtual-memory");
509 n = prom_getproplen(node, "translations");
510 if (unlikely(n == 0 || n == -1)) {
511 prom_printf("prom_mappings: Couldn't get size.\n");
514 if (unlikely(n > sizeof(prom_trans))) {
515 prom_printf("prom_mappings: Size %d is too big.\n", n);
519 if ((n = prom_getproperty(node, "translations",
520 (char *)&prom_trans[0],
521 sizeof(prom_trans))) == -1) {
522 prom_printf("prom_mappings: Couldn't get property.\n");
526 n = n / sizeof(struct linux_prom_translation);
530 sort(prom_trans, ents, sizeof(struct linux_prom_translation),
533 /* Now kick out all the non-OBP entries. */
534 for (i = 0; i < ents; i++) {
535 if (in_obp_range(prom_trans[i].virt))
539 for (; i < ents; i++) {
540 if (!in_obp_range(prom_trans[i].virt))
545 for (i = 0; i < (last - first); i++) {
546 struct linux_prom_translation *src = &prom_trans[i + first];
547 struct linux_prom_translation *dest = &prom_trans[i];
551 for (; i < ents; i++) {
552 struct linux_prom_translation *dest = &prom_trans[i];
553 dest->virt = dest->size = dest->data = 0x0UL;
556 prom_trans_ents = last - first;
558 if (tlb_type == spitfire) {
559 /* Clear diag TTE bits. */
560 for (i = 0; i < prom_trans_ents; i++)
561 prom_trans[i].data &= ~0x0003fe0000000000UL;
564 /* Force execute bit on. */
565 for (i = 0; i < prom_trans_ents; i++)
566 prom_trans[i].data |= (tlb_type == hypervisor ?
567 _PAGE_EXEC_4V : _PAGE_EXEC_4U);
570 static void __init hypervisor_tlb_lock(unsigned long vaddr,
574 unsigned long ret = sun4v_mmu_map_perm_addr(vaddr, 0, pte, mmu);
577 prom_printf("hypervisor_tlb_lock[%lx:%x:%lx:%lx]: "
578 "errors with %lx\n", vaddr, 0, pte, mmu, ret);
583 static unsigned long kern_large_tte(unsigned long paddr);
585 static void __init remap_kernel(void)
587 unsigned long phys_page, tte_vaddr, tte_data;
588 int i, tlb_ent = sparc64_highest_locked_tlbent();
590 tte_vaddr = (unsigned long) KERNBASE;
591 phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
592 tte_data = kern_large_tte(phys_page);
594 kern_locked_tte_data = tte_data;
596 /* Now lock us into the TLBs via Hypervisor or OBP. */
597 if (tlb_type == hypervisor) {
598 for (i = 0; i < num_kernel_image_mappings; i++) {
599 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU);
600 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU);
601 tte_vaddr += 0x400000;
602 tte_data += 0x400000;
605 for (i = 0; i < num_kernel_image_mappings; i++) {
606 prom_dtlb_load(tlb_ent - i, tte_data, tte_vaddr);
607 prom_itlb_load(tlb_ent - i, tte_data, tte_vaddr);
608 tte_vaddr += 0x400000;
609 tte_data += 0x400000;
611 sparc64_highest_unlocked_tlb_ent = tlb_ent - i;
613 if (tlb_type == cheetah_plus) {
614 sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 |
615 CTX_CHEETAH_PLUS_NUC);
616 sparc64_kern_pri_nuc_bits = CTX_CHEETAH_PLUS_NUC;
617 sparc64_kern_sec_context = CTX_CHEETAH_PLUS_CTX0;
622 static void __init inherit_prom_mappings(void)
624 /* Now fixup OBP's idea about where we really are mapped. */
625 printk("Remapping the kernel... ");
630 void prom_world(int enter)
635 __asm__ __volatile__("flushw");
638 void __flush_dcache_range(unsigned long start, unsigned long end)
642 if (tlb_type == spitfire) {
645 for (va = start; va < end; va += 32) {
646 spitfire_put_dcache_tag(va & 0x3fe0, 0x0);
650 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
653 for (va = start; va < end; va += 32)
654 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
658 "i" (ASI_DCACHE_INVALIDATE));
661 EXPORT_SYMBOL(__flush_dcache_range);
663 /* get_new_mmu_context() uses "cache + 1". */
664 DEFINE_SPINLOCK(ctx_alloc_lock);
665 unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1;
666 #define MAX_CTX_NR (1UL << CTX_NR_BITS)
667 #define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR)
668 DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR);
670 /* Caller does TLB context flushing on local CPU if necessary.
671 * The caller also ensures that CTX_VALID(mm->context) is false.
673 * We must be careful about boundary cases so that we never
674 * let the user have CTX 0 (nucleus) or we ever use a CTX
675 * version of zero (and thus NO_CONTEXT would not be caught
676 * by version mis-match tests in mmu_context.h).
678 * Always invoked with interrupts disabled.
680 void get_new_mmu_context(struct mm_struct *mm)
682 unsigned long ctx, new_ctx;
683 unsigned long orig_pgsz_bits;
686 spin_lock(&ctx_alloc_lock);
687 orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);
688 ctx = (tlb_context_cache + 1) & CTX_NR_MASK;
689 new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);
691 if (new_ctx >= (1 << CTX_NR_BITS)) {
692 new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);
693 if (new_ctx >= ctx) {
695 new_ctx = (tlb_context_cache & CTX_VERSION_MASK) +
698 new_ctx = CTX_FIRST_VERSION;
700 /* Don't call memset, for 16 entries that's just
703 mmu_context_bmap[0] = 3;
704 mmu_context_bmap[1] = 0;
705 mmu_context_bmap[2] = 0;
706 mmu_context_bmap[3] = 0;
707 for (i = 4; i < CTX_BMAP_SLOTS; i += 4) {
708 mmu_context_bmap[i + 0] = 0;
709 mmu_context_bmap[i + 1] = 0;
710 mmu_context_bmap[i + 2] = 0;
711 mmu_context_bmap[i + 3] = 0;
717 mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63));
718 new_ctx |= (tlb_context_cache & CTX_VERSION_MASK);
720 tlb_context_cache = new_ctx;
721 mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
722 spin_unlock(&ctx_alloc_lock);
724 if (unlikely(new_version))
725 smp_new_mmu_context_version();
728 static int numa_enabled = 1;
729 static int numa_debug;
731 static int __init early_numa(char *p)
736 if (strstr(p, "off"))
739 if (strstr(p, "debug"))
744 early_param("numa", early_numa);
746 #define numadbg(f, a...) \
747 do { if (numa_debug) \
748 printk(KERN_INFO f, ## a); \
751 static void __init find_ramdisk(unsigned long phys_base)
753 #ifdef CONFIG_BLK_DEV_INITRD
754 if (sparc_ramdisk_image || sparc_ramdisk_image64) {
755 unsigned long ramdisk_image;
757 /* Older versions of the bootloader only supported a
758 * 32-bit physical address for the ramdisk image
759 * location, stored at sparc_ramdisk_image. Newer
760 * SILO versions set sparc_ramdisk_image to zero and
761 * provide a full 64-bit physical address at
762 * sparc_ramdisk_image64.
764 ramdisk_image = sparc_ramdisk_image;
766 ramdisk_image = sparc_ramdisk_image64;
768 /* Another bootloader quirk. The bootloader normalizes
769 * the physical address to KERNBASE, so we have to
770 * factor that back out and add in the lowest valid
771 * physical page address to get the true physical address.
773 ramdisk_image -= KERNBASE;
774 ramdisk_image += phys_base;
776 numadbg("Found ramdisk at physical address 0x%lx, size %u\n",
777 ramdisk_image, sparc_ramdisk_size);
779 initrd_start = ramdisk_image;
780 initrd_end = ramdisk_image + sparc_ramdisk_size;
782 memblock_reserve(initrd_start, sparc_ramdisk_size);
784 initrd_start += PAGE_OFFSET;
785 initrd_end += PAGE_OFFSET;
790 struct node_mem_mask {
794 static struct node_mem_mask node_masks[MAX_NUMNODES];
795 static int num_node_masks;
797 int numa_cpu_lookup_table[NR_CPUS];
798 cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES];
800 #ifdef CONFIG_NEED_MULTIPLE_NODES
802 struct mdesc_mblock {
805 u64 offset; /* RA-to-PA */
807 static struct mdesc_mblock *mblocks;
808 static int num_mblocks;
810 static unsigned long ra_to_pa(unsigned long addr)
814 for (i = 0; i < num_mblocks; i++) {
815 struct mdesc_mblock *m = &mblocks[i];
817 if (addr >= m->base &&
818 addr < (m->base + m->size)) {
826 static int find_node(unsigned long addr)
830 addr = ra_to_pa(addr);
831 for (i = 0; i < num_node_masks; i++) {
832 struct node_mem_mask *p = &node_masks[i];
834 if ((addr & p->mask) == p->val)
840 static u64 memblock_nid_range(u64 start, u64 end, int *nid)
842 *nid = find_node(start);
844 while (start < end) {
845 int n = find_node(start);
859 /* This must be invoked after performing all of the necessary
860 * memblock_set_node() calls for 'nid'. We need to be able to get
861 * correct data from get_pfn_range_for_nid().
863 static void __init allocate_node_data(int nid)
865 struct pglist_data *p;
866 unsigned long start_pfn, end_pfn;
867 #ifdef CONFIG_NEED_MULTIPLE_NODES
870 paddr = memblock_alloc_try_nid(sizeof(struct pglist_data), SMP_CACHE_BYTES, nid);
872 prom_printf("Cannot allocate pglist_data for nid[%d]\n", nid);
875 NODE_DATA(nid) = __va(paddr);
876 memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
878 NODE_DATA(nid)->node_id = nid;
883 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
884 p->node_start_pfn = start_pfn;
885 p->node_spanned_pages = end_pfn - start_pfn;
888 static void init_node_masks_nonnuma(void)
892 numadbg("Initializing tables for non-numa.\n");
894 node_masks[0].mask = node_masks[0].val = 0;
897 for (i = 0; i < NR_CPUS; i++)
898 numa_cpu_lookup_table[i] = 0;
900 cpumask_setall(&numa_cpumask_lookup_table[0]);
903 #ifdef CONFIG_NEED_MULTIPLE_NODES
904 struct pglist_data *node_data[MAX_NUMNODES];
906 EXPORT_SYMBOL(numa_cpu_lookup_table);
907 EXPORT_SYMBOL(numa_cpumask_lookup_table);
908 EXPORT_SYMBOL(node_data);
910 struct mdesc_mlgroup {
916 static struct mdesc_mlgroup *mlgroups;
917 static int num_mlgroups;
919 static int scan_pio_for_cfg_handle(struct mdesc_handle *md, u64 pio,
924 mdesc_for_each_arc(arc, md, pio, MDESC_ARC_TYPE_FWD) {
925 u64 target = mdesc_arc_target(md, arc);
928 val = mdesc_get_property(md, target,
930 if (val && *val == cfg_handle)
936 static int scan_arcs_for_cfg_handle(struct mdesc_handle *md, u64 grp,
939 u64 arc, candidate, best_latency = ~(u64)0;
941 candidate = MDESC_NODE_NULL;
942 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) {
943 u64 target = mdesc_arc_target(md, arc);
944 const char *name = mdesc_node_name(md, target);
947 if (strcmp(name, "pio-latency-group"))
950 val = mdesc_get_property(md, target, "latency", NULL);
954 if (*val < best_latency) {
960 if (candidate == MDESC_NODE_NULL)
963 return scan_pio_for_cfg_handle(md, candidate, cfg_handle);
966 int of_node_to_nid(struct device_node *dp)
968 const struct linux_prom64_registers *regs;
969 struct mdesc_handle *md;
974 /* This is the right thing to do on currently supported
975 * SUN4U NUMA platforms as well, as the PCI controller does
976 * not sit behind any particular memory controller.
981 regs = of_get_property(dp, "reg", NULL);
985 cfg_handle = (regs->phys_addr >> 32UL) & 0x0fffffff;
991 mdesc_for_each_node_by_name(md, grp, "group") {
992 if (!scan_arcs_for_cfg_handle(md, grp, cfg_handle)) {
1004 static void __init add_node_ranges(void)
1006 struct memblock_region *reg;
1008 for_each_memblock(memory, reg) {
1009 unsigned long size = reg->size;
1010 unsigned long start, end;
1014 while (start < end) {
1015 unsigned long this_end;
1018 this_end = memblock_nid_range(start, end, &nid);
1020 numadbg("Setting memblock NUMA node nid[%d] "
1021 "start[%lx] end[%lx]\n",
1022 nid, start, this_end);
1024 memblock_set_node(start, this_end - start, nid);
1030 static int __init grab_mlgroups(struct mdesc_handle *md)
1032 unsigned long paddr;
1036 mdesc_for_each_node_by_name(md, node, "memory-latency-group")
1041 paddr = memblock_alloc(count * sizeof(struct mdesc_mlgroup),
1046 mlgroups = __va(paddr);
1047 num_mlgroups = count;
1050 mdesc_for_each_node_by_name(md, node, "memory-latency-group") {
1051 struct mdesc_mlgroup *m = &mlgroups[count++];
1056 val = mdesc_get_property(md, node, "latency", NULL);
1058 val = mdesc_get_property(md, node, "address-match", NULL);
1060 val = mdesc_get_property(md, node, "address-mask", NULL);
1063 numadbg("MLGROUP[%d]: node[%llx] latency[%llx] "
1064 "match[%llx] mask[%llx]\n",
1065 count - 1, m->node, m->latency, m->match, m->mask);
1071 static int __init grab_mblocks(struct mdesc_handle *md)
1073 unsigned long paddr;
1077 mdesc_for_each_node_by_name(md, node, "mblock")
1082 paddr = memblock_alloc(count * sizeof(struct mdesc_mblock),
1087 mblocks = __va(paddr);
1088 num_mblocks = count;
1091 mdesc_for_each_node_by_name(md, node, "mblock") {
1092 struct mdesc_mblock *m = &mblocks[count++];
1095 val = mdesc_get_property(md, node, "base", NULL);
1097 val = mdesc_get_property(md, node, "size", NULL);
1099 val = mdesc_get_property(md, node,
1100 "address-congruence-offset", NULL);
1103 numadbg("MBLOCK[%d]: base[%llx] size[%llx] offset[%llx]\n",
1104 count - 1, m->base, m->size, m->offset);
1110 static void __init numa_parse_mdesc_group_cpus(struct mdesc_handle *md,
1111 u64 grp, cpumask_t *mask)
1115 cpumask_clear(mask);
1117 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_BACK) {
1118 u64 target = mdesc_arc_target(md, arc);
1119 const char *name = mdesc_node_name(md, target);
1122 if (strcmp(name, "cpu"))
1124 id = mdesc_get_property(md, target, "id", NULL);
1125 if (*id < nr_cpu_ids)
1126 cpumask_set_cpu(*id, mask);
1130 static struct mdesc_mlgroup * __init find_mlgroup(u64 node)
1134 for (i = 0; i < num_mlgroups; i++) {
1135 struct mdesc_mlgroup *m = &mlgroups[i];
1136 if (m->node == node)
1142 static int __init numa_attach_mlgroup(struct mdesc_handle *md, u64 grp,
1145 struct mdesc_mlgroup *candidate = NULL;
1146 u64 arc, best_latency = ~(u64)0;
1147 struct node_mem_mask *n;
1149 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) {
1150 u64 target = mdesc_arc_target(md, arc);
1151 struct mdesc_mlgroup *m = find_mlgroup(target);
1154 if (m->latency < best_latency) {
1156 best_latency = m->latency;
1162 if (num_node_masks != index) {
1163 printk(KERN_ERR "Inconsistent NUMA state, "
1164 "index[%d] != num_node_masks[%d]\n",
1165 index, num_node_masks);
1169 n = &node_masks[num_node_masks++];
1171 n->mask = candidate->mask;
1172 n->val = candidate->match;
1174 numadbg("NUMA NODE[%d]: mask[%lx] val[%lx] (latency[%llx])\n",
1175 index, n->mask, n->val, candidate->latency);
1180 static int __init numa_parse_mdesc_group(struct mdesc_handle *md, u64 grp,
1186 numa_parse_mdesc_group_cpus(md, grp, &mask);
1188 for_each_cpu(cpu, &mask)
1189 numa_cpu_lookup_table[cpu] = index;
1190 cpumask_copy(&numa_cpumask_lookup_table[index], &mask);
1193 printk(KERN_INFO "NUMA GROUP[%d]: cpus [ ", index);
1194 for_each_cpu(cpu, &mask)
1199 return numa_attach_mlgroup(md, grp, index);
1202 static int __init numa_parse_mdesc(void)
1204 struct mdesc_handle *md = mdesc_grab();
1208 node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups");
1209 if (node == MDESC_NODE_NULL) {
1214 err = grab_mblocks(md);
1218 err = grab_mlgroups(md);
1223 mdesc_for_each_node_by_name(md, node, "group") {
1224 err = numa_parse_mdesc_group(md, node, count);
1232 for (i = 0; i < num_node_masks; i++) {
1233 allocate_node_data(i);
1243 static int __init numa_parse_jbus(void)
1245 unsigned long cpu, index;
1247 /* NUMA node id is encoded in bits 36 and higher, and there is
1248 * a 1-to-1 mapping from CPU ID to NUMA node ID.
1251 for_each_present_cpu(cpu) {
1252 numa_cpu_lookup_table[cpu] = index;
1253 cpumask_copy(&numa_cpumask_lookup_table[index], cpumask_of(cpu));
1254 node_masks[index].mask = ~((1UL << 36UL) - 1UL);
1255 node_masks[index].val = cpu << 36UL;
1259 num_node_masks = index;
1263 for (index = 0; index < num_node_masks; index++) {
1264 allocate_node_data(index);
1265 node_set_online(index);
1271 static int __init numa_parse_sun4u(void)
1273 if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1276 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
1277 if ((ver >> 32UL) == __JALAPENO_ID ||
1278 (ver >> 32UL) == __SERRANO_ID)
1279 return numa_parse_jbus();
1284 static int __init bootmem_init_numa(void)
1288 numadbg("bootmem_init_numa()\n");
1291 if (tlb_type == hypervisor)
1292 err = numa_parse_mdesc();
1294 err = numa_parse_sun4u();
1301 static int bootmem_init_numa(void)
1308 static void __init bootmem_init_nonnuma(void)
1310 unsigned long top_of_ram = memblock_end_of_DRAM();
1311 unsigned long total_ram = memblock_phys_mem_size();
1313 numadbg("bootmem_init_nonnuma()\n");
1315 printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
1316 top_of_ram, total_ram);
1317 printk(KERN_INFO "Memory hole size: %ldMB\n",
1318 (top_of_ram - total_ram) >> 20);
1320 init_node_masks_nonnuma();
1321 memblock_set_node(0, (phys_addr_t)ULLONG_MAX, 0);
1322 allocate_node_data(0);
1326 static unsigned long __init bootmem_init(unsigned long phys_base)
1328 unsigned long end_pfn;
1330 end_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
1331 max_pfn = max_low_pfn = end_pfn;
1332 min_low_pfn = (phys_base >> PAGE_SHIFT);
1334 if (bootmem_init_numa() < 0)
1335 bootmem_init_nonnuma();
1337 /* Dump memblock with node info. */
1338 memblock_dump_all();
1340 /* XXX cpu notifier XXX */
1342 sparse_memory_present_with_active_regions(MAX_NUMNODES);
1348 static struct linux_prom64_registers pall[MAX_BANKS] __initdata;
1349 static int pall_ents __initdata;
1351 #ifdef CONFIG_DEBUG_PAGEALLOC
1352 static unsigned long __ref kernel_map_range(unsigned long pstart,
1353 unsigned long pend, pgprot_t prot)
1355 unsigned long vstart = PAGE_OFFSET + pstart;
1356 unsigned long vend = PAGE_OFFSET + pend;
1357 unsigned long alloc_bytes = 0UL;
1359 if ((vstart & ~PAGE_MASK) || (vend & ~PAGE_MASK)) {
1360 prom_printf("kernel_map: Unaligned physmem[%lx:%lx]\n",
1365 while (vstart < vend) {
1366 unsigned long this_end, paddr = __pa(vstart);
1367 pgd_t *pgd = pgd_offset_k(vstart);
1372 pud = pud_offset(pgd, vstart);
1373 if (pud_none(*pud)) {
1376 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1377 alloc_bytes += PAGE_SIZE;
1378 pud_populate(&init_mm, pud, new);
1381 pmd = pmd_offset(pud, vstart);
1382 if (!pmd_present(*pmd)) {
1385 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1386 alloc_bytes += PAGE_SIZE;
1387 pmd_populate_kernel(&init_mm, pmd, new);
1390 pte = pte_offset_kernel(pmd, vstart);
1391 this_end = (vstart + PMD_SIZE) & PMD_MASK;
1392 if (this_end > vend)
1395 while (vstart < this_end) {
1396 pte_val(*pte) = (paddr | pgprot_val(prot));
1398 vstart += PAGE_SIZE;
1407 extern unsigned int kvmap_linear_patch[1];
1408 #endif /* CONFIG_DEBUG_PAGEALLOC */
1410 static void __init kpte_set_val(unsigned long index, unsigned long val)
1412 unsigned long *ptr = kpte_linear_bitmap;
1414 val <<= ((index % (BITS_PER_LONG / 2)) * 2);
1415 ptr += (index / (BITS_PER_LONG / 2));
1420 static const unsigned long kpte_shift_min = 28; /* 256MB */
1421 static const unsigned long kpte_shift_max = 34; /* 16GB */
1422 static const unsigned long kpte_shift_incr = 3;
1424 static unsigned long kpte_mark_using_shift(unsigned long start, unsigned long end,
1425 unsigned long shift)
1427 unsigned long size = (1UL << shift);
1428 unsigned long mask = (size - 1UL);
1429 unsigned long remains = end - start;
1432 if (remains < size || (start & mask))
1437 * shift 28 --> kern_linear_pte_xor index 1
1438 * shift 31 --> kern_linear_pte_xor index 2
1439 * shift 34 --> kern_linear_pte_xor index 3
1441 val = ((shift - kpte_shift_min) / kpte_shift_incr) + 1;
1444 if (shift != kpte_shift_max)
1448 unsigned long index = start >> kpte_shift_min;
1450 kpte_set_val(index, val);
1452 start += 1UL << kpte_shift_min;
1453 remains -= 1UL << kpte_shift_min;
1459 static void __init mark_kpte_bitmap(unsigned long start, unsigned long end)
1461 unsigned long smallest_size, smallest_mask;
1464 smallest_size = (1UL << kpte_shift_min);
1465 smallest_mask = (smallest_size - 1UL);
1467 while (start < end) {
1468 unsigned long orig_start = start;
1470 for (s = kpte_shift_max; s >= kpte_shift_min; s -= kpte_shift_incr) {
1471 start = kpte_mark_using_shift(start, end, s);
1473 if (start != orig_start)
1477 if (start == orig_start)
1478 start = (start + smallest_size) & ~smallest_mask;
1482 static void __init init_kpte_bitmap(void)
1486 for (i = 0; i < pall_ents; i++) {
1487 unsigned long phys_start, phys_end;
1489 phys_start = pall[i].phys_addr;
1490 phys_end = phys_start + pall[i].reg_size;
1492 mark_kpte_bitmap(phys_start, phys_end);
1496 static void __init kernel_physical_mapping_init(void)
1498 #ifdef CONFIG_DEBUG_PAGEALLOC
1499 unsigned long i, mem_alloced = 0UL;
1501 for (i = 0; i < pall_ents; i++) {
1502 unsigned long phys_start, phys_end;
1504 phys_start = pall[i].phys_addr;
1505 phys_end = phys_start + pall[i].reg_size;
1507 mem_alloced += kernel_map_range(phys_start, phys_end,
1511 printk("Allocated %ld bytes for kernel page tables.\n",
1514 kvmap_linear_patch[0] = 0x01000000; /* nop */
1515 flushi(&kvmap_linear_patch[0]);
1521 #ifdef CONFIG_DEBUG_PAGEALLOC
1522 void kernel_map_pages(struct page *page, int numpages, int enable)
1524 unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT;
1525 unsigned long phys_end = phys_start + (numpages * PAGE_SIZE);
1527 kernel_map_range(phys_start, phys_end,
1528 (enable ? PAGE_KERNEL : __pgprot(0)));
1530 flush_tsb_kernel_range(PAGE_OFFSET + phys_start,
1531 PAGE_OFFSET + phys_end);
1533 /* we should perform an IPI and flush all tlbs,
1534 * but that can deadlock->flush only current cpu.
1536 __flush_tlb_kernel_range(PAGE_OFFSET + phys_start,
1537 PAGE_OFFSET + phys_end);
1541 unsigned long __init find_ecache_flush_span(unsigned long size)
1545 for (i = 0; i < pavail_ents; i++) {
1546 if (pavail[i].reg_size >= size)
1547 return pavail[i].phys_addr;
1553 static void __init tsb_phys_patch(void)
1555 struct tsb_ldquad_phys_patch_entry *pquad;
1556 struct tsb_phys_patch_entry *p;
1558 pquad = &__tsb_ldquad_phys_patch;
1559 while (pquad < &__tsb_ldquad_phys_patch_end) {
1560 unsigned long addr = pquad->addr;
1562 if (tlb_type == hypervisor)
1563 *(unsigned int *) addr = pquad->sun4v_insn;
1565 *(unsigned int *) addr = pquad->sun4u_insn;
1567 __asm__ __volatile__("flush %0"
1574 p = &__tsb_phys_patch;
1575 while (p < &__tsb_phys_patch_end) {
1576 unsigned long addr = p->addr;
1578 *(unsigned int *) addr = p->insn;
1580 __asm__ __volatile__("flush %0"
1588 /* Don't mark as init, we give this to the Hypervisor. */
1589 #ifndef CONFIG_DEBUG_PAGEALLOC
1590 #define NUM_KTSB_DESCR 2
1592 #define NUM_KTSB_DESCR 1
1594 static struct hv_tsb_descr ktsb_descr[NUM_KTSB_DESCR];
1595 extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
1597 static void patch_one_ktsb_phys(unsigned int *start, unsigned int *end, unsigned long pa)
1599 pa >>= KTSB_PHYS_SHIFT;
1601 while (start < end) {
1602 unsigned int *ia = (unsigned int *)(unsigned long)*start;
1604 ia[0] = (ia[0] & ~0x3fffff) | (pa >> 10);
1605 __asm__ __volatile__("flush %0" : : "r" (ia));
1607 ia[1] = (ia[1] & ~0x3ff) | (pa & 0x3ff);
1608 __asm__ __volatile__("flush %0" : : "r" (ia + 1));
1614 static void ktsb_phys_patch(void)
1616 extern unsigned int __swapper_tsb_phys_patch;
1617 extern unsigned int __swapper_tsb_phys_patch_end;
1618 unsigned long ktsb_pa;
1620 ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE);
1621 patch_one_ktsb_phys(&__swapper_tsb_phys_patch,
1622 &__swapper_tsb_phys_patch_end, ktsb_pa);
1623 #ifndef CONFIG_DEBUG_PAGEALLOC
1625 extern unsigned int __swapper_4m_tsb_phys_patch;
1626 extern unsigned int __swapper_4m_tsb_phys_patch_end;
1627 ktsb_pa = (kern_base +
1628 ((unsigned long)&swapper_4m_tsb[0] - KERNBASE));
1629 patch_one_ktsb_phys(&__swapper_4m_tsb_phys_patch,
1630 &__swapper_4m_tsb_phys_patch_end, ktsb_pa);
1635 static void __init sun4v_ktsb_init(void)
1637 unsigned long ktsb_pa;
1639 /* First KTSB for PAGE_SIZE mappings. */
1640 ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE);
1642 switch (PAGE_SIZE) {
1645 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_8K;
1646 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_8K;
1650 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_64K;
1651 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_64K;
1655 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_512K;
1656 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_512K;
1659 case 4 * 1024 * 1024:
1660 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_4MB;
1661 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_4MB;
1665 ktsb_descr[0].assoc = 1;
1666 ktsb_descr[0].num_ttes = KERNEL_TSB_NENTRIES;
1667 ktsb_descr[0].ctx_idx = 0;
1668 ktsb_descr[0].tsb_base = ktsb_pa;
1669 ktsb_descr[0].resv = 0;
1671 #ifndef CONFIG_DEBUG_PAGEALLOC
1672 /* Second KTSB for 4MB/256MB/2GB/16GB mappings. */
1673 ktsb_pa = (kern_base +
1674 ((unsigned long)&swapper_4m_tsb[0] - KERNBASE));
1676 ktsb_descr[1].pgsz_idx = HV_PGSZ_IDX_4MB;
1677 ktsb_descr[1].pgsz_mask = ((HV_PGSZ_MASK_4MB |
1678 HV_PGSZ_MASK_256MB |
1680 HV_PGSZ_MASK_16GB) &
1682 ktsb_descr[1].assoc = 1;
1683 ktsb_descr[1].num_ttes = KERNEL_TSB4M_NENTRIES;
1684 ktsb_descr[1].ctx_idx = 0;
1685 ktsb_descr[1].tsb_base = ktsb_pa;
1686 ktsb_descr[1].resv = 0;
1690 void __cpuinit sun4v_ktsb_register(void)
1692 unsigned long pa, ret;
1694 pa = kern_base + ((unsigned long)&ktsb_descr[0] - KERNBASE);
1696 ret = sun4v_mmu_tsb_ctx0(NUM_KTSB_DESCR, pa);
1698 prom_printf("hypervisor_mmu_tsb_ctx0[%lx]: "
1699 "errors with %lx\n", pa, ret);
1704 static void __init sun4u_linear_pte_xor_finalize(void)
1706 #ifndef CONFIG_DEBUG_PAGEALLOC
1707 /* This is where we would add Panther support for
1708 * 32MB and 256MB pages.
1713 static void __init sun4v_linear_pte_xor_finalize(void)
1715 #ifndef CONFIG_DEBUG_PAGEALLOC
1716 if (cpu_pgsz_mask & HV_PGSZ_MASK_256MB) {
1717 kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZ256MB_4V) ^
1718 0xfffff80000000000UL;
1719 kern_linear_pte_xor[1] |= (_PAGE_CP_4V | _PAGE_CV_4V |
1720 _PAGE_P_4V | _PAGE_W_4V);
1722 kern_linear_pte_xor[1] = kern_linear_pte_xor[0];
1725 if (cpu_pgsz_mask & HV_PGSZ_MASK_2GB) {
1726 kern_linear_pte_xor[2] = (_PAGE_VALID | _PAGE_SZ2GB_4V) ^
1727 0xfffff80000000000UL;
1728 kern_linear_pte_xor[2] |= (_PAGE_CP_4V | _PAGE_CV_4V |
1729 _PAGE_P_4V | _PAGE_W_4V);
1731 kern_linear_pte_xor[2] = kern_linear_pte_xor[1];
1734 if (cpu_pgsz_mask & HV_PGSZ_MASK_16GB) {
1735 kern_linear_pte_xor[3] = (_PAGE_VALID | _PAGE_SZ16GB_4V) ^
1736 0xfffff80000000000UL;
1737 kern_linear_pte_xor[3] |= (_PAGE_CP_4V | _PAGE_CV_4V |
1738 _PAGE_P_4V | _PAGE_W_4V);
1740 kern_linear_pte_xor[3] = kern_linear_pte_xor[2];
1745 /* paging_init() sets up the page tables */
1747 static unsigned long last_valid_pfn;
1748 pgd_t swapper_pg_dir[2048];
1750 static void sun4u_pgprot_init(void);
1751 static void sun4v_pgprot_init(void);
1753 void __init paging_init(void)
1755 unsigned long end_pfn, shift, phys_base;
1756 unsigned long real_end, i;
1759 /* These build time checkes make sure that the dcache_dirty_cpu()
1760 * page->flags usage will work.
1762 * When a page gets marked as dcache-dirty, we store the
1763 * cpu number starting at bit 32 in the page->flags. Also,
1764 * functions like clear_dcache_dirty_cpu use the cpu mask
1765 * in 13-bit signed-immediate instruction fields.
1769 * Page flags must not reach into upper 32 bits that are used
1770 * for the cpu number
1772 BUILD_BUG_ON(NR_PAGEFLAGS > 32);
1775 * The bit fields placed in the high range must not reach below
1776 * the 32 bit boundary. Otherwise we cannot place the cpu field
1777 * at the 32 bit boundary.
1779 BUILD_BUG_ON(SECTIONS_WIDTH + NODES_WIDTH + ZONES_WIDTH +
1780 ilog2(roundup_pow_of_two(NR_CPUS)) > 32);
1782 BUILD_BUG_ON(NR_CPUS > 4096);
1784 kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
1785 kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
1787 /* Invalidate both kernel TSBs. */
1788 memset(swapper_tsb, 0x40, sizeof(swapper_tsb));
1789 #ifndef CONFIG_DEBUG_PAGEALLOC
1790 memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb));
1793 if (tlb_type == hypervisor)
1794 sun4v_pgprot_init();
1796 sun4u_pgprot_init();
1798 if (tlb_type == cheetah_plus ||
1799 tlb_type == hypervisor) {
1804 if (tlb_type == hypervisor)
1805 sun4v_patch_tlb_handlers();
1807 /* Find available physical memory...
1809 * Read it twice in order to work around a bug in openfirmware.
1810 * The call to grab this table itself can cause openfirmware to
1811 * allocate memory, which in turn can take away some space from
1812 * the list of available memory. Reading it twice makes sure
1813 * we really do get the final value.
1815 read_obp_translations();
1816 read_obp_memory("reg", &pall[0], &pall_ents);
1817 read_obp_memory("available", &pavail[0], &pavail_ents);
1818 read_obp_memory("available", &pavail[0], &pavail_ents);
1820 phys_base = 0xffffffffffffffffUL;
1821 for (i = 0; i < pavail_ents; i++) {
1822 phys_base = min(phys_base, pavail[i].phys_addr);
1823 memblock_add(pavail[i].phys_addr, pavail[i].reg_size);
1826 memblock_reserve(kern_base, kern_size);
1828 find_ramdisk(phys_base);
1830 memblock_enforce_memory_limit(cmdline_memory_size);
1832 memblock_allow_resize();
1833 memblock_dump_all();
1835 set_bit(0, mmu_context_bmap);
1837 shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE);
1839 real_end = (unsigned long)_end;
1840 num_kernel_image_mappings = DIV_ROUND_UP(real_end - KERNBASE, 1 << 22);
1841 printk("Kernel: Using %d locked TLB entries for main kernel image.\n",
1842 num_kernel_image_mappings);
1844 /* Set kernel pgd to upper alias so physical page computations
1847 init_mm.pgd += ((shift) / (sizeof(pgd_t)));
1849 memset(swapper_low_pmd_dir, 0, sizeof(swapper_low_pmd_dir));
1851 /* Now can init the kernel/bad page tables. */
1852 pud_set(pud_offset(&swapper_pg_dir[0], 0),
1853 swapper_low_pmd_dir + (shift / sizeof(pgd_t)));
1855 inherit_prom_mappings();
1859 /* Ok, we can use our TLB miss and window trap handlers safely. */
1864 prom_build_devicetree();
1865 of_populate_present_mask();
1867 of_fill_in_cpu_data();
1870 if (tlb_type == hypervisor) {
1872 mdesc_populate_present_mask(cpu_all_mask);
1874 mdesc_fill_in_cpu_data(cpu_all_mask);
1876 mdesc_get_page_sizes(cpu_all_mask, &cpu_pgsz_mask);
1878 sun4v_linear_pte_xor_finalize();
1881 sun4v_ktsb_register();
1883 unsigned long impl, ver;
1885 cpu_pgsz_mask = (HV_PGSZ_MASK_8K | HV_PGSZ_MASK_64K |
1886 HV_PGSZ_MASK_512K | HV_PGSZ_MASK_4MB);
1888 __asm__ __volatile__("rdpr %%ver, %0" : "=r" (ver));
1889 impl = ((ver >> 32) & 0xffff);
1890 if (impl == PANTHER_IMPL)
1891 cpu_pgsz_mask |= (HV_PGSZ_MASK_32MB |
1892 HV_PGSZ_MASK_256MB);
1894 sun4u_linear_pte_xor_finalize();
1897 /* Flush the TLBs and the 4M TSB so that the updated linear
1898 * pte XOR settings are realized for all mappings.
1901 #ifndef CONFIG_DEBUG_PAGEALLOC
1902 memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb));
1906 /* Setup bootmem... */
1907 last_valid_pfn = end_pfn = bootmem_init(phys_base);
1909 /* Once the OF device tree and MDESC have been setup, we know
1910 * the list of possible cpus. Therefore we can allocate the
1913 for_each_possible_cpu(i) {
1914 node = cpu_to_node(i);
1916 softirq_stack[i] = __alloc_bootmem_node(NODE_DATA(node),
1919 hardirq_stack[i] = __alloc_bootmem_node(NODE_DATA(node),
1924 kernel_physical_mapping_init();
1927 unsigned long max_zone_pfns[MAX_NR_ZONES];
1929 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
1931 max_zone_pfns[ZONE_NORMAL] = end_pfn;
1933 free_area_init_nodes(max_zone_pfns);
1936 printk("Booting Linux...\n");
1939 int page_in_phys_avail(unsigned long paddr)
1945 for (i = 0; i < pavail_ents; i++) {
1946 unsigned long start, end;
1948 start = pavail[i].phys_addr;
1949 end = start + pavail[i].reg_size;
1951 if (paddr >= start && paddr < end)
1954 if (paddr >= kern_base && paddr < (kern_base + kern_size))
1956 #ifdef CONFIG_BLK_DEV_INITRD
1957 if (paddr >= __pa(initrd_start) &&
1958 paddr < __pa(PAGE_ALIGN(initrd_end)))
1965 static struct linux_prom64_registers pavail_rescan[MAX_BANKS] __initdata;
1966 static int pavail_rescan_ents __initdata;
1968 /* Certain OBP calls, such as fetching "available" properties, can
1969 * claim physical memory. So, along with initializing the valid
1970 * address bitmap, what we do here is refetch the physical available
1971 * memory list again, and make sure it provides at least as much
1972 * memory as 'pavail' does.
1974 static void __init setup_valid_addr_bitmap_from_pavail(unsigned long *bitmap)
1978 read_obp_memory("available", &pavail_rescan[0], &pavail_rescan_ents);
1980 for (i = 0; i < pavail_ents; i++) {
1981 unsigned long old_start, old_end;
1983 old_start = pavail[i].phys_addr;
1984 old_end = old_start + pavail[i].reg_size;
1985 while (old_start < old_end) {
1988 for (n = 0; n < pavail_rescan_ents; n++) {
1989 unsigned long new_start, new_end;
1991 new_start = pavail_rescan[n].phys_addr;
1992 new_end = new_start +
1993 pavail_rescan[n].reg_size;
1995 if (new_start <= old_start &&
1996 new_end >= (old_start + PAGE_SIZE)) {
1997 set_bit(old_start >> 22, bitmap);
2002 prom_printf("mem_init: Lost memory in pavail\n");
2003 prom_printf("mem_init: OLD start[%lx] size[%lx]\n",
2004 pavail[i].phys_addr,
2005 pavail[i].reg_size);
2006 prom_printf("mem_init: NEW start[%lx] size[%lx]\n",
2007 pavail_rescan[i].phys_addr,
2008 pavail_rescan[i].reg_size);
2009 prom_printf("mem_init: Cannot continue, aborting.\n");
2013 old_start += PAGE_SIZE;
2018 static void __init patch_tlb_miss_handler_bitmap(void)
2020 extern unsigned int valid_addr_bitmap_insn[];
2021 extern unsigned int valid_addr_bitmap_patch[];
2023 valid_addr_bitmap_insn[1] = valid_addr_bitmap_patch[1];
2025 valid_addr_bitmap_insn[0] = valid_addr_bitmap_patch[0];
2026 flushi(&valid_addr_bitmap_insn[0]);
2029 static void __init register_page_bootmem_info(void)
2031 #ifdef CONFIG_NEED_MULTIPLE_NODES
2034 for_each_online_node(i)
2035 if (NODE_DATA(i)->node_spanned_pages)
2036 register_page_bootmem_info_node(NODE_DATA(i));
2039 void __init mem_init(void)
2041 unsigned long codepages, datapages, initpages;
2042 unsigned long addr, last;
2044 addr = PAGE_OFFSET + kern_base;
2045 last = PAGE_ALIGN(kern_size) + addr;
2046 while (addr < last) {
2047 set_bit(__pa(addr) >> 22, sparc64_valid_addr_bitmap);
2051 setup_valid_addr_bitmap_from_pavail(sparc64_valid_addr_bitmap);
2052 patch_tlb_miss_handler_bitmap();
2054 high_memory = __va(last_valid_pfn << PAGE_SHIFT);
2056 register_page_bootmem_info();
2057 totalram_pages = free_all_bootmem();
2059 /* We subtract one to account for the mem_map_zero page
2062 totalram_pages -= 1;
2063 num_physpages = totalram_pages;
2066 * Set up the zero page, mark it reserved, so that page count
2067 * is not manipulated when freeing the page from user ptes.
2069 mem_map_zero = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0);
2070 if (mem_map_zero == NULL) {
2071 prom_printf("paging_init: Cannot alloc zero page.\n");
2074 SetPageReserved(mem_map_zero);
2076 codepages = (((unsigned long) _etext) - ((unsigned long) _start));
2077 codepages = PAGE_ALIGN(codepages) >> PAGE_SHIFT;
2078 datapages = (((unsigned long) _edata) - ((unsigned long) _etext));
2079 datapages = PAGE_ALIGN(datapages) >> PAGE_SHIFT;
2080 initpages = (((unsigned long) __init_end) - ((unsigned long) __init_begin));
2081 initpages = PAGE_ALIGN(initpages) >> PAGE_SHIFT;
2083 printk("Memory: %luk available (%ldk kernel code, %ldk data, %ldk init) [%016lx,%016lx]\n",
2084 nr_free_pages() << (PAGE_SHIFT-10),
2085 codepages << (PAGE_SHIFT-10),
2086 datapages << (PAGE_SHIFT-10),
2087 initpages << (PAGE_SHIFT-10),
2088 PAGE_OFFSET, (last_valid_pfn << PAGE_SHIFT));
2090 if (tlb_type == cheetah || tlb_type == cheetah_plus)
2091 cheetah_ecache_flush_init();
2094 void free_initmem(void)
2096 unsigned long addr, initend;
2099 /* If the physical memory maps were trimmed by kernel command
2100 * line options, don't even try freeing this initmem stuff up.
2101 * The kernel image could have been in the trimmed out region
2102 * and if so the freeing below will free invalid page structs.
2104 if (cmdline_memory_size)
2108 * The init section is aligned to 8k in vmlinux.lds. Page align for >8k pagesizes.
2110 addr = PAGE_ALIGN((unsigned long)(__init_begin));
2111 initend = (unsigned long)(__init_end) & PAGE_MASK;
2112 for (; addr < initend; addr += PAGE_SIZE) {
2117 ((unsigned long) __va(kern_base)) -
2118 ((unsigned long) KERNBASE));
2119 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
2122 p = virt_to_page(page);
2124 ClearPageReserved(p);
2132 #ifdef CONFIG_BLK_DEV_INITRD
2133 void free_initrd_mem(unsigned long start, unsigned long end)
2136 printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
2137 for (; start < end; start += PAGE_SIZE) {
2138 struct page *p = virt_to_page(start);
2140 ClearPageReserved(p);
2148 #define _PAGE_CACHE_4U (_PAGE_CP_4U | _PAGE_CV_4U)
2149 #define _PAGE_CACHE_4V (_PAGE_CP_4V | _PAGE_CV_4V)
2150 #define __DIRTY_BITS_4U (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U)
2151 #define __DIRTY_BITS_4V (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V)
2152 #define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_READ_4U | _PAGE_R)
2153 #define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R)
2155 pgprot_t PAGE_KERNEL __read_mostly;
2156 EXPORT_SYMBOL(PAGE_KERNEL);
2158 pgprot_t PAGE_KERNEL_LOCKED __read_mostly;
2159 pgprot_t PAGE_COPY __read_mostly;
2161 pgprot_t PAGE_SHARED __read_mostly;
2162 EXPORT_SYMBOL(PAGE_SHARED);
2164 unsigned long pg_iobits __read_mostly;
2166 unsigned long _PAGE_IE __read_mostly;
2167 EXPORT_SYMBOL(_PAGE_IE);
2169 unsigned long _PAGE_E __read_mostly;
2170 EXPORT_SYMBOL(_PAGE_E);
2172 unsigned long _PAGE_CACHE __read_mostly;
2173 EXPORT_SYMBOL(_PAGE_CACHE);
2175 #ifdef CONFIG_SPARSEMEM_VMEMMAP
2176 unsigned long vmemmap_table[VMEMMAP_SIZE];
2178 static long __meminitdata addr_start, addr_end;
2179 static int __meminitdata node_start;
2181 int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
2183 unsigned long vstart = (unsigned long) start;
2184 unsigned long vend = (unsigned long) (start + nr);
2185 unsigned long phys_start = (vstart - VMEMMAP_BASE);
2186 unsigned long phys_end = (vend - VMEMMAP_BASE);
2187 unsigned long addr = phys_start & VMEMMAP_CHUNK_MASK;
2188 unsigned long end = VMEMMAP_ALIGN(phys_end);
2189 unsigned long pte_base;
2191 pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4U |
2192 _PAGE_CP_4U | _PAGE_CV_4U |
2193 _PAGE_P_4U | _PAGE_W_4U);
2194 if (tlb_type == hypervisor)
2195 pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4V |
2196 _PAGE_CP_4V | _PAGE_CV_4V |
2197 _PAGE_P_4V | _PAGE_W_4V);
2199 for (; addr < end; addr += VMEMMAP_CHUNK) {
2200 unsigned long *vmem_pp =
2201 vmemmap_table + (addr >> VMEMMAP_CHUNK_SHIFT);
2204 if (!(*vmem_pp & _PAGE_VALID)) {
2205 block = vmemmap_alloc_block(1UL << 22, node);
2209 *vmem_pp = pte_base | __pa(block);
2211 /* check to see if we have contiguous blocks */
2212 if (addr_end != addr || node_start != node) {
2214 printk(KERN_DEBUG " [%lx-%lx] on node %d\n",
2215 addr_start, addr_end-1, node_start);
2219 addr_end = addr + VMEMMAP_CHUNK;
2225 void __meminit vmemmap_populate_print_last(void)
2228 printk(KERN_DEBUG " [%lx-%lx] on node %d\n",
2229 addr_start, addr_end-1, node_start);
2236 void vmemmap_free(struct page *memmap, unsigned long nr_pages)
2240 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
2242 static void prot_init_common(unsigned long page_none,
2243 unsigned long page_shared,
2244 unsigned long page_copy,
2245 unsigned long page_readonly,
2246 unsigned long page_exec_bit)
2248 PAGE_COPY = __pgprot(page_copy);
2249 PAGE_SHARED = __pgprot(page_shared);
2251 protection_map[0x0] = __pgprot(page_none);
2252 protection_map[0x1] = __pgprot(page_readonly & ~page_exec_bit);
2253 protection_map[0x2] = __pgprot(page_copy & ~page_exec_bit);
2254 protection_map[0x3] = __pgprot(page_copy & ~page_exec_bit);
2255 protection_map[0x4] = __pgprot(page_readonly);
2256 protection_map[0x5] = __pgprot(page_readonly);
2257 protection_map[0x6] = __pgprot(page_copy);
2258 protection_map[0x7] = __pgprot(page_copy);
2259 protection_map[0x8] = __pgprot(page_none);
2260 protection_map[0x9] = __pgprot(page_readonly & ~page_exec_bit);
2261 protection_map[0xa] = __pgprot(page_shared & ~page_exec_bit);
2262 protection_map[0xb] = __pgprot(page_shared & ~page_exec_bit);
2263 protection_map[0xc] = __pgprot(page_readonly);
2264 protection_map[0xd] = __pgprot(page_readonly);
2265 protection_map[0xe] = __pgprot(page_shared);
2266 protection_map[0xf] = __pgprot(page_shared);
2269 static void __init sun4u_pgprot_init(void)
2271 unsigned long page_none, page_shared, page_copy, page_readonly;
2272 unsigned long page_exec_bit;
2275 PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
2276 _PAGE_CACHE_4U | _PAGE_P_4U |
2277 __ACCESS_BITS_4U | __DIRTY_BITS_4U |
2279 PAGE_KERNEL_LOCKED = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
2280 _PAGE_CACHE_4U | _PAGE_P_4U |
2281 __ACCESS_BITS_4U | __DIRTY_BITS_4U |
2282 _PAGE_EXEC_4U | _PAGE_L_4U);
2284 _PAGE_IE = _PAGE_IE_4U;
2285 _PAGE_E = _PAGE_E_4U;
2286 _PAGE_CACHE = _PAGE_CACHE_4U;
2288 pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4U | __DIRTY_BITS_4U |
2289 __ACCESS_BITS_4U | _PAGE_E_4U);
2291 #ifdef CONFIG_DEBUG_PAGEALLOC
2292 kern_linear_pte_xor[0] = _PAGE_VALID ^ 0xfffff80000000000UL;
2294 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4U) ^
2295 0xfffff80000000000UL;
2297 kern_linear_pte_xor[0] |= (_PAGE_CP_4U | _PAGE_CV_4U |
2298 _PAGE_P_4U | _PAGE_W_4U);
2300 for (i = 1; i < 4; i++)
2301 kern_linear_pte_xor[i] = kern_linear_pte_xor[0];
2303 _PAGE_ALL_SZ_BITS = (_PAGE_SZ4MB_4U | _PAGE_SZ512K_4U |
2304 _PAGE_SZ64K_4U | _PAGE_SZ8K_4U |
2305 _PAGE_SZ32MB_4U | _PAGE_SZ256MB_4U);
2308 page_none = _PAGE_PRESENT_4U | _PAGE_ACCESSED_4U | _PAGE_CACHE_4U;
2309 page_shared = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
2310 __ACCESS_BITS_4U | _PAGE_WRITE_4U | _PAGE_EXEC_4U);
2311 page_copy = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
2312 __ACCESS_BITS_4U | _PAGE_EXEC_4U);
2313 page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
2314 __ACCESS_BITS_4U | _PAGE_EXEC_4U);
2316 page_exec_bit = _PAGE_EXEC_4U;
2318 prot_init_common(page_none, page_shared, page_copy, page_readonly,
2322 static void __init sun4v_pgprot_init(void)
2324 unsigned long page_none, page_shared, page_copy, page_readonly;
2325 unsigned long page_exec_bit;
2328 PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4V | _PAGE_VALID |
2329 _PAGE_CACHE_4V | _PAGE_P_4V |
2330 __ACCESS_BITS_4V | __DIRTY_BITS_4V |
2332 PAGE_KERNEL_LOCKED = PAGE_KERNEL;
2334 _PAGE_IE = _PAGE_IE_4V;
2335 _PAGE_E = _PAGE_E_4V;
2336 _PAGE_CACHE = _PAGE_CACHE_4V;
2338 #ifdef CONFIG_DEBUG_PAGEALLOC
2339 kern_linear_pte_xor[0] = _PAGE_VALID ^ 0xfffff80000000000UL;
2341 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^
2342 0xfffff80000000000UL;
2344 kern_linear_pte_xor[0] |= (_PAGE_CP_4V | _PAGE_CV_4V |
2345 _PAGE_P_4V | _PAGE_W_4V);
2347 for (i = 1; i < 4; i++)
2348 kern_linear_pte_xor[i] = kern_linear_pte_xor[0];
2350 pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4V | __DIRTY_BITS_4V |
2351 __ACCESS_BITS_4V | _PAGE_E_4V);
2353 _PAGE_ALL_SZ_BITS = (_PAGE_SZ16GB_4V | _PAGE_SZ2GB_4V |
2354 _PAGE_SZ256MB_4V | _PAGE_SZ32MB_4V |
2355 _PAGE_SZ4MB_4V | _PAGE_SZ512K_4V |
2356 _PAGE_SZ64K_4V | _PAGE_SZ8K_4V);
2358 page_none = _PAGE_PRESENT_4V | _PAGE_ACCESSED_4V | _PAGE_CACHE_4V;
2359 page_shared = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V |
2360 __ACCESS_BITS_4V | _PAGE_WRITE_4V | _PAGE_EXEC_4V);
2361 page_copy = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V |
2362 __ACCESS_BITS_4V | _PAGE_EXEC_4V);
2363 page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V |
2364 __ACCESS_BITS_4V | _PAGE_EXEC_4V);
2366 page_exec_bit = _PAGE_EXEC_4V;
2368 prot_init_common(page_none, page_shared, page_copy, page_readonly,
2372 unsigned long pte_sz_bits(unsigned long sz)
2374 if (tlb_type == hypervisor) {
2378 return _PAGE_SZ8K_4V;
2380 return _PAGE_SZ64K_4V;
2382 return _PAGE_SZ512K_4V;
2383 case 4 * 1024 * 1024:
2384 return _PAGE_SZ4MB_4V;
2390 return _PAGE_SZ8K_4U;
2392 return _PAGE_SZ64K_4U;
2394 return _PAGE_SZ512K_4U;
2395 case 4 * 1024 * 1024:
2396 return _PAGE_SZ4MB_4U;
2401 pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space, unsigned long page_size)
2405 pte_val(pte) = page | pgprot_val(pgprot_noncached(prot));
2406 pte_val(pte) |= (((unsigned long)space) << 32);
2407 pte_val(pte) |= pte_sz_bits(page_size);
2412 static unsigned long kern_large_tte(unsigned long paddr)
2416 val = (_PAGE_VALID | _PAGE_SZ4MB_4U |
2417 _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_P_4U |
2418 _PAGE_EXEC_4U | _PAGE_L_4U | _PAGE_W_4U);
2419 if (tlb_type == hypervisor)
2420 val = (_PAGE_VALID | _PAGE_SZ4MB_4V |
2421 _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_P_4V |
2422 _PAGE_EXEC_4V | _PAGE_W_4V);
2427 /* If not locked, zap it. */
2428 void __flush_tlb_all(void)
2430 unsigned long pstate;
2433 __asm__ __volatile__("flushw\n\t"
2434 "rdpr %%pstate, %0\n\t"
2435 "wrpr %0, %1, %%pstate"
2438 if (tlb_type == hypervisor) {
2439 sun4v_mmu_demap_all();
2440 } else if (tlb_type == spitfire) {
2441 for (i = 0; i < 64; i++) {
2442 /* Spitfire Errata #32 workaround */
2443 /* NOTE: Always runs on spitfire, so no
2444 * cheetah+ page size encodings.
2446 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
2450 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
2452 if (!(spitfire_get_dtlb_data(i) & _PAGE_L_4U)) {
2453 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
2456 : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
2457 spitfire_put_dtlb_data(i, 0x0UL);
2460 /* Spitfire Errata #32 workaround */
2461 /* NOTE: Always runs on spitfire, so no
2462 * cheetah+ page size encodings.
2464 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
2468 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
2470 if (!(spitfire_get_itlb_data(i) & _PAGE_L_4U)) {
2471 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
2474 : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
2475 spitfire_put_itlb_data(i, 0x0UL);
2478 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
2479 cheetah_flush_dtlb_all();
2480 cheetah_flush_itlb_all();
2482 __asm__ __volatile__("wrpr %0, 0, %%pstate"
2486 static pte_t *get_from_cache(struct mm_struct *mm)
2491 spin_lock(&mm->page_table_lock);
2492 page = mm->context.pgtable_page;
2495 void *p = page_address(page);
2497 mm->context.pgtable_page = NULL;
2499 ret = (pte_t *) (p + (PAGE_SIZE / 2));
2501 spin_unlock(&mm->page_table_lock);
2506 static struct page *__alloc_for_cache(struct mm_struct *mm)
2508 struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK |
2509 __GFP_REPEAT | __GFP_ZERO);
2512 spin_lock(&mm->page_table_lock);
2513 if (!mm->context.pgtable_page) {
2514 atomic_set(&page->_count, 2);
2515 mm->context.pgtable_page = page;
2517 spin_unlock(&mm->page_table_lock);
2522 pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
2523 unsigned long address)
2528 pte = get_from_cache(mm);
2532 page = __alloc_for_cache(mm);
2534 pte = (pte_t *) page_address(page);
2539 pgtable_t pte_alloc_one(struct mm_struct *mm,
2540 unsigned long address)
2545 pte = get_from_cache(mm);
2549 page = __alloc_for_cache(mm);
2551 pgtable_page_ctor(page);
2552 pte = (pte_t *) page_address(page);
2558 void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
2560 struct page *page = virt_to_page(pte);
2561 if (put_page_testzero(page))
2562 free_hot_cold_page(page, 0);
2565 static void __pte_free(pgtable_t pte)
2567 struct page *page = virt_to_page(pte);
2568 if (put_page_testzero(page)) {
2569 pgtable_page_dtor(page);
2570 free_hot_cold_page(page, 0);
2574 void pte_free(struct mm_struct *mm, pgtable_t pte)
2579 void pgtable_free(void *table, bool is_page)
2584 kmem_cache_free(pgtable_cache, table);
2587 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
2588 static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot, bool for_modify)
2590 if (pgprot_val(pgprot) & _PAGE_VALID)
2591 pmd_val(pmd) |= PMD_HUGE_PRESENT;
2592 if (tlb_type == hypervisor) {
2593 if (pgprot_val(pgprot) & _PAGE_WRITE_4V)
2594 pmd_val(pmd) |= PMD_HUGE_WRITE;
2595 if (pgprot_val(pgprot) & _PAGE_EXEC_4V)
2596 pmd_val(pmd) |= PMD_HUGE_EXEC;
2599 if (pgprot_val(pgprot) & _PAGE_ACCESSED_4V)
2600 pmd_val(pmd) |= PMD_HUGE_ACCESSED;
2601 if (pgprot_val(pgprot) & _PAGE_MODIFIED_4V)
2602 pmd_val(pmd) |= PMD_HUGE_DIRTY;
2605 if (pgprot_val(pgprot) & _PAGE_WRITE_4U)
2606 pmd_val(pmd) |= PMD_HUGE_WRITE;
2607 if (pgprot_val(pgprot) & _PAGE_EXEC_4U)
2608 pmd_val(pmd) |= PMD_HUGE_EXEC;
2611 if (pgprot_val(pgprot) & _PAGE_ACCESSED_4U)
2612 pmd_val(pmd) |= PMD_HUGE_ACCESSED;
2613 if (pgprot_val(pgprot) & _PAGE_MODIFIED_4U)
2614 pmd_val(pmd) |= PMD_HUGE_DIRTY;
2621 pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
2625 pmd_val(pmd) = (page_nr << ((PAGE_SHIFT - PMD_PADDR_SHIFT)));
2626 pmd_val(pmd) |= PMD_ISHUGE;
2627 pmd = pmd_set_protbits(pmd, pgprot, false);
2631 pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
2633 pmd_val(pmd) &= ~(PMD_HUGE_PRESENT |
2636 pmd = pmd_set_protbits(pmd, newprot, true);
2640 pgprot_t pmd_pgprot(pmd_t entry)
2642 unsigned long pte = 0;
2644 if (pmd_val(entry) & PMD_HUGE_PRESENT)
2647 if (tlb_type == hypervisor) {
2648 if (pmd_val(entry) & PMD_HUGE_PRESENT)
2649 pte |= _PAGE_PRESENT_4V;
2650 if (pmd_val(entry) & PMD_HUGE_EXEC)
2651 pte |= _PAGE_EXEC_4V;
2652 if (pmd_val(entry) & PMD_HUGE_WRITE)
2654 if (pmd_val(entry) & PMD_HUGE_ACCESSED)
2655 pte |= _PAGE_ACCESSED_4V;
2656 if (pmd_val(entry) & PMD_HUGE_DIRTY)
2657 pte |= _PAGE_MODIFIED_4V;
2658 pte |= _PAGE_CP_4V|_PAGE_CV_4V;
2660 if (pmd_val(entry) & PMD_HUGE_PRESENT)
2661 pte |= _PAGE_PRESENT_4U;
2662 if (pmd_val(entry) & PMD_HUGE_EXEC)
2663 pte |= _PAGE_EXEC_4U;
2664 if (pmd_val(entry) & PMD_HUGE_WRITE)
2666 if (pmd_val(entry) & PMD_HUGE_ACCESSED)
2667 pte |= _PAGE_ACCESSED_4U;
2668 if (pmd_val(entry) & PMD_HUGE_DIRTY)
2669 pte |= _PAGE_MODIFIED_4U;
2670 pte |= _PAGE_CP_4U|_PAGE_CV_4U;
2673 return __pgprot(pte);
2676 void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
2679 unsigned long pte, flags;
2680 struct mm_struct *mm;
2684 if (!pmd_large(entry) || !pmd_young(entry))
2687 pte = (pmd_val(entry) & ~PMD_HUGE_PROTBITS);
2688 pte <<= PMD_PADDR_SHIFT;
2691 prot = pmd_pgprot(entry);
2693 if (tlb_type == hypervisor)
2694 pgprot_val(prot) |= _PAGE_SZHUGE_4V;
2696 pgprot_val(prot) |= _PAGE_SZHUGE_4U;
2698 pte |= pgprot_val(prot);
2702 spin_lock_irqsave(&mm->context.lock, flags);
2704 if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL)
2705 __update_mmu_tsb_insert(mm, MM_TSB_HUGE, HPAGE_SHIFT,
2708 spin_unlock_irqrestore(&mm->context.lock, flags);
2710 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
2712 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
2713 static void context_reload(void *__data)
2715 struct mm_struct *mm = __data;
2717 if (mm == current->mm)
2718 load_secondary_context(mm);
2721 void hugetlb_setup(struct pt_regs *regs)
2723 struct mm_struct *mm = current->mm;
2724 struct tsb_config *tp;
2726 if (in_atomic() || !mm) {
2727 const struct exception_table_entry *entry;
2729 entry = search_exception_tables(regs->tpc);
2731 regs->tpc = entry->fixup;
2732 regs->tnpc = regs->tpc + 4;
2735 pr_alert("Unexpected HugeTLB setup in atomic context.\n");
2736 die_if_kernel("HugeTSB in atomic", regs);
2739 tp = &mm->context.tsb_block[MM_TSB_HUGE];
2740 if (likely(tp->tsb == NULL))
2741 tsb_grow(mm, MM_TSB_HUGE, 0);
2743 tsb_context_switch(mm);
2746 /* On UltraSPARC-III+ and later, configure the second half of
2747 * the Data-TLB for huge pages.
2749 if (tlb_type == cheetah_plus) {
2752 spin_lock(&ctx_alloc_lock);
2753 ctx = mm->context.sparc64_ctx_val;
2754 ctx &= ~CTX_PGSZ_MASK;
2755 ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT;
2756 ctx |= CTX_PGSZ_HUGE << CTX_PGSZ1_SHIFT;
2758 if (ctx != mm->context.sparc64_ctx_val) {
2759 /* When changing the page size fields, we
2760 * must perform a context flush so that no
2761 * stale entries match. This flush must
2762 * occur with the original context register
2765 do_flush_tlb_mm(mm);
2767 /* Reload the context register of all processors
2768 * also executing in this address space.
2770 mm->context.sparc64_ctx_val = ctx;
2771 on_each_cpu(context_reload, mm, 0);
2773 spin_unlock(&ctx_alloc_lock);