]> git.karo-electronics.de Git - karo-tx-linux.git/blob - arch/sparc/mm/init_64.c
sparc64: Fix numa node distance initialization
[karo-tx-linux.git] / arch / sparc / mm / init_64.c
1 /*
2  *  arch/sparc64/mm/init.c
3  *
4  *  Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu)
5  *  Copyright (C) 1997-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
6  */
7  
8 #include <linux/module.h>
9 #include <linux/kernel.h>
10 #include <linux/sched.h>
11 #include <linux/string.h>
12 #include <linux/init.h>
13 #include <linux/bootmem.h>
14 #include <linux/mm.h>
15 #include <linux/hugetlb.h>
16 #include <linux/initrd.h>
17 #include <linux/swap.h>
18 #include <linux/pagemap.h>
19 #include <linux/poison.h>
20 #include <linux/fs.h>
21 #include <linux/seq_file.h>
22 #include <linux/kprobes.h>
23 #include <linux/cache.h>
24 #include <linux/sort.h>
25 #include <linux/ioport.h>
26 #include <linux/percpu.h>
27 #include <linux/memblock.h>
28 #include <linux/mmzone.h>
29 #include <linux/gfp.h>
30
31 #include <asm/head.h>
32 #include <asm/page.h>
33 #include <asm/pgalloc.h>
34 #include <asm/pgtable.h>
35 #include <asm/oplib.h>
36 #include <asm/iommu.h>
37 #include <asm/io.h>
38 #include <asm/uaccess.h>
39 #include <asm/mmu_context.h>
40 #include <asm/tlbflush.h>
41 #include <asm/dma.h>
42 #include <asm/starfire.h>
43 #include <asm/tlb.h>
44 #include <asm/spitfire.h>
45 #include <asm/sections.h>
46 #include <asm/tsb.h>
47 #include <asm/hypervisor.h>
48 #include <asm/prom.h>
49 #include <asm/mdesc.h>
50 #include <asm/cpudata.h>
51 #include <asm/setup.h>
52 #include <asm/irq.h>
53
54 #include "init_64.h"
55
56 unsigned long kern_linear_pte_xor[4] __read_mostly;
57 static unsigned long page_cache4v_flag;
58
59 /* A bitmap, two bits for every 256MB of physical memory.  These two
60  * bits determine what page size we use for kernel linear
61  * translations.  They form an index into kern_linear_pte_xor[].  The
62  * value in the indexed slot is XOR'd with the TLB miss virtual
63  * address to form the resulting TTE.  The mapping is:
64  *
65  *      0       ==>     4MB
66  *      1       ==>     256MB
67  *      2       ==>     2GB
68  *      3       ==>     16GB
69  *
70  * All sun4v chips support 256MB pages.  Only SPARC-T4 and later
71  * support 2GB pages, and hopefully future cpus will support the 16GB
72  * pages as well.  For slots 2 and 3, we encode a 256MB TTE xor there
73  * if these larger page sizes are not supported by the cpu.
74  *
75  * It would be nice to determine this from the machine description
76  * 'cpu' properties, but we need to have this table setup before the
77  * MDESC is initialized.
78  */
79
80 #ifndef CONFIG_DEBUG_PAGEALLOC
81 /* A special kernel TSB for 4MB, 256MB, 2GB and 16GB linear mappings.
82  * Space is allocated for this right after the trap table in
83  * arch/sparc64/kernel/head.S
84  */
85 extern struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES];
86 #endif
87 extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
88
89 static unsigned long cpu_pgsz_mask;
90
91 #define MAX_BANKS       1024
92
93 static struct linux_prom64_registers pavail[MAX_BANKS];
94 static int pavail_ents;
95
96 u64 numa_latency[MAX_NUMNODES][MAX_NUMNODES];
97
98 static int cmp_p64(const void *a, const void *b)
99 {
100         const struct linux_prom64_registers *x = a, *y = b;
101
102         if (x->phys_addr > y->phys_addr)
103                 return 1;
104         if (x->phys_addr < y->phys_addr)
105                 return -1;
106         return 0;
107 }
108
109 static void __init read_obp_memory(const char *property,
110                                    struct linux_prom64_registers *regs,
111                                    int *num_ents)
112 {
113         phandle node = prom_finddevice("/memory");
114         int prop_size = prom_getproplen(node, property);
115         int ents, ret, i;
116
117         ents = prop_size / sizeof(struct linux_prom64_registers);
118         if (ents > MAX_BANKS) {
119                 prom_printf("The machine has more %s property entries than "
120                             "this kernel can support (%d).\n",
121                             property, MAX_BANKS);
122                 prom_halt();
123         }
124
125         ret = prom_getproperty(node, property, (char *) regs, prop_size);
126         if (ret == -1) {
127                 prom_printf("Couldn't get %s property from /memory.\n",
128                                 property);
129                 prom_halt();
130         }
131
132         /* Sanitize what we got from the firmware, by page aligning
133          * everything.
134          */
135         for (i = 0; i < ents; i++) {
136                 unsigned long base, size;
137
138                 base = regs[i].phys_addr;
139                 size = regs[i].reg_size;
140
141                 size &= PAGE_MASK;
142                 if (base & ~PAGE_MASK) {
143                         unsigned long new_base = PAGE_ALIGN(base);
144
145                         size -= new_base - base;
146                         if ((long) size < 0L)
147                                 size = 0UL;
148                         base = new_base;
149                 }
150                 if (size == 0UL) {
151                         /* If it is empty, simply get rid of it.
152                          * This simplifies the logic of the other
153                          * functions that process these arrays.
154                          */
155                         memmove(&regs[i], &regs[i + 1],
156                                 (ents - i - 1) * sizeof(regs[0]));
157                         i--;
158                         ents--;
159                         continue;
160                 }
161                 regs[i].phys_addr = base;
162                 regs[i].reg_size = size;
163         }
164
165         *num_ents = ents;
166
167         sort(regs, ents, sizeof(struct linux_prom64_registers),
168              cmp_p64, NULL);
169 }
170
171 /* Kernel physical address base and size in bytes.  */
172 unsigned long kern_base __read_mostly;
173 unsigned long kern_size __read_mostly;
174
175 /* Initial ramdisk setup */
176 extern unsigned long sparc_ramdisk_image64;
177 extern unsigned int sparc_ramdisk_image;
178 extern unsigned int sparc_ramdisk_size;
179
180 struct page *mem_map_zero __read_mostly;
181 EXPORT_SYMBOL(mem_map_zero);
182
183 unsigned int sparc64_highest_unlocked_tlb_ent __read_mostly;
184
185 unsigned long sparc64_kern_pri_context __read_mostly;
186 unsigned long sparc64_kern_pri_nuc_bits __read_mostly;
187 unsigned long sparc64_kern_sec_context __read_mostly;
188
189 int num_kernel_image_mappings;
190
191 #ifdef CONFIG_DEBUG_DCFLUSH
192 atomic_t dcpage_flushes = ATOMIC_INIT(0);
193 #ifdef CONFIG_SMP
194 atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
195 #endif
196 #endif
197
198 inline void flush_dcache_page_impl(struct page *page)
199 {
200         BUG_ON(tlb_type == hypervisor);
201 #ifdef CONFIG_DEBUG_DCFLUSH
202         atomic_inc(&dcpage_flushes);
203 #endif
204
205 #ifdef DCACHE_ALIASING_POSSIBLE
206         __flush_dcache_page(page_address(page),
207                             ((tlb_type == spitfire) &&
208                              page_mapping(page) != NULL));
209 #else
210         if (page_mapping(page) != NULL &&
211             tlb_type == spitfire)
212                 __flush_icache_page(__pa(page_address(page)));
213 #endif
214 }
215
216 #define PG_dcache_dirty         PG_arch_1
217 #define PG_dcache_cpu_shift     32UL
218 #define PG_dcache_cpu_mask      \
219         ((1UL<<ilog2(roundup_pow_of_two(NR_CPUS)))-1UL)
220
221 #define dcache_dirty_cpu(page) \
222         (((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask)
223
224 static inline void set_dcache_dirty(struct page *page, int this_cpu)
225 {
226         unsigned long mask = this_cpu;
227         unsigned long non_cpu_bits;
228
229         non_cpu_bits = ~(PG_dcache_cpu_mask << PG_dcache_cpu_shift);
230         mask = (mask << PG_dcache_cpu_shift) | (1UL << PG_dcache_dirty);
231
232         __asm__ __volatile__("1:\n\t"
233                              "ldx       [%2], %%g7\n\t"
234                              "and       %%g7, %1, %%g1\n\t"
235                              "or        %%g1, %0, %%g1\n\t"
236                              "casx      [%2], %%g7, %%g1\n\t"
237                              "cmp       %%g7, %%g1\n\t"
238                              "bne,pn    %%xcc, 1b\n\t"
239                              " nop"
240                              : /* no outputs */
241                              : "r" (mask), "r" (non_cpu_bits), "r" (&page->flags)
242                              : "g1", "g7");
243 }
244
245 static inline void clear_dcache_dirty_cpu(struct page *page, unsigned long cpu)
246 {
247         unsigned long mask = (1UL << PG_dcache_dirty);
248
249         __asm__ __volatile__("! test_and_clear_dcache_dirty\n"
250                              "1:\n\t"
251                              "ldx       [%2], %%g7\n\t"
252                              "srlx      %%g7, %4, %%g1\n\t"
253                              "and       %%g1, %3, %%g1\n\t"
254                              "cmp       %%g1, %0\n\t"
255                              "bne,pn    %%icc, 2f\n\t"
256                              " andn     %%g7, %1, %%g1\n\t"
257                              "casx      [%2], %%g7, %%g1\n\t"
258                              "cmp       %%g7, %%g1\n\t"
259                              "bne,pn    %%xcc, 1b\n\t"
260                              " nop\n"
261                              "2:"
262                              : /* no outputs */
263                              : "r" (cpu), "r" (mask), "r" (&page->flags),
264                                "i" (PG_dcache_cpu_mask),
265                                "i" (PG_dcache_cpu_shift)
266                              : "g1", "g7");
267 }
268
269 static inline void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long pte)
270 {
271         unsigned long tsb_addr = (unsigned long) ent;
272
273         if (tlb_type == cheetah_plus || tlb_type == hypervisor)
274                 tsb_addr = __pa(tsb_addr);
275
276         __tsb_insert(tsb_addr, tag, pte);
277 }
278
279 unsigned long _PAGE_ALL_SZ_BITS __read_mostly;
280
281 static void flush_dcache(unsigned long pfn)
282 {
283         struct page *page;
284
285         page = pfn_to_page(pfn);
286         if (page) {
287                 unsigned long pg_flags;
288
289                 pg_flags = page->flags;
290                 if (pg_flags & (1UL << PG_dcache_dirty)) {
291                         int cpu = ((pg_flags >> PG_dcache_cpu_shift) &
292                                    PG_dcache_cpu_mask);
293                         int this_cpu = get_cpu();
294
295                         /* This is just to optimize away some function calls
296                          * in the SMP case.
297                          */
298                         if (cpu == this_cpu)
299                                 flush_dcache_page_impl(page);
300                         else
301                                 smp_flush_dcache_page_impl(page, cpu);
302
303                         clear_dcache_dirty_cpu(page, cpu);
304
305                         put_cpu();
306                 }
307         }
308 }
309
310 /* mm->context.lock must be held */
311 static void __update_mmu_tsb_insert(struct mm_struct *mm, unsigned long tsb_index,
312                                     unsigned long tsb_hash_shift, unsigned long address,
313                                     unsigned long tte)
314 {
315         struct tsb *tsb = mm->context.tsb_block[tsb_index].tsb;
316         unsigned long tag;
317
318         if (unlikely(!tsb))
319                 return;
320
321         tsb += ((address >> tsb_hash_shift) &
322                 (mm->context.tsb_block[tsb_index].tsb_nentries - 1UL));
323         tag = (address >> 22UL);
324         tsb_insert(tsb, tag, tte);
325 }
326
327 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
328 static inline bool is_hugetlb_pte(pte_t pte)
329 {
330         if ((tlb_type == hypervisor &&
331              (pte_val(pte) & _PAGE_SZALL_4V) == _PAGE_SZHUGE_4V) ||
332             (tlb_type != hypervisor &&
333              (pte_val(pte) & _PAGE_SZALL_4U) == _PAGE_SZHUGE_4U))
334                 return true;
335         return false;
336 }
337 #endif
338
339 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
340 {
341         struct mm_struct *mm;
342         unsigned long flags;
343         pte_t pte = *ptep;
344
345         if (tlb_type != hypervisor) {
346                 unsigned long pfn = pte_pfn(pte);
347
348                 if (pfn_valid(pfn))
349                         flush_dcache(pfn);
350         }
351
352         mm = vma->vm_mm;
353
354         /* Don't insert a non-valid PTE into the TSB, we'll deadlock.  */
355         if (!pte_accessible(mm, pte))
356                 return;
357
358         spin_lock_irqsave(&mm->context.lock, flags);
359
360 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
361         if (mm->context.huge_pte_count && is_hugetlb_pte(pte))
362                 __update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT,
363                                         address, pte_val(pte));
364         else
365 #endif
366                 __update_mmu_tsb_insert(mm, MM_TSB_BASE, PAGE_SHIFT,
367                                         address, pte_val(pte));
368
369         spin_unlock_irqrestore(&mm->context.lock, flags);
370 }
371
372 void flush_dcache_page(struct page *page)
373 {
374         struct address_space *mapping;
375         int this_cpu;
376
377         if (tlb_type == hypervisor)
378                 return;
379
380         /* Do not bother with the expensive D-cache flush if it
381          * is merely the zero page.  The 'bigcore' testcase in GDB
382          * causes this case to run millions of times.
383          */
384         if (page == ZERO_PAGE(0))
385                 return;
386
387         this_cpu = get_cpu();
388
389         mapping = page_mapping(page);
390         if (mapping && !mapping_mapped(mapping)) {
391                 int dirty = test_bit(PG_dcache_dirty, &page->flags);
392                 if (dirty) {
393                         int dirty_cpu = dcache_dirty_cpu(page);
394
395                         if (dirty_cpu == this_cpu)
396                                 goto out;
397                         smp_flush_dcache_page_impl(page, dirty_cpu);
398                 }
399                 set_dcache_dirty(page, this_cpu);
400         } else {
401                 /* We could delay the flush for the !page_mapping
402                  * case too.  But that case is for exec env/arg
403                  * pages and those are %99 certainly going to get
404                  * faulted into the tlb (and thus flushed) anyways.
405                  */
406                 flush_dcache_page_impl(page);
407         }
408
409 out:
410         put_cpu();
411 }
412 EXPORT_SYMBOL(flush_dcache_page);
413
414 void __kprobes flush_icache_range(unsigned long start, unsigned long end)
415 {
416         /* Cheetah and Hypervisor platform cpus have coherent I-cache. */
417         if (tlb_type == spitfire) {
418                 unsigned long kaddr;
419
420                 /* This code only runs on Spitfire cpus so this is
421                  * why we can assume _PAGE_PADDR_4U.
422                  */
423                 for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE) {
424                         unsigned long paddr, mask = _PAGE_PADDR_4U;
425
426                         if (kaddr >= PAGE_OFFSET)
427                                 paddr = kaddr & mask;
428                         else {
429                                 pgd_t *pgdp = pgd_offset_k(kaddr);
430                                 pud_t *pudp = pud_offset(pgdp, kaddr);
431                                 pmd_t *pmdp = pmd_offset(pudp, kaddr);
432                                 pte_t *ptep = pte_offset_kernel(pmdp, kaddr);
433
434                                 paddr = pte_val(*ptep) & mask;
435                         }
436                         __flush_icache_page(paddr);
437                 }
438         }
439 }
440 EXPORT_SYMBOL(flush_icache_range);
441
442 void mmu_info(struct seq_file *m)
443 {
444         static const char *pgsz_strings[] = {
445                 "8K", "64K", "512K", "4MB", "32MB",
446                 "256MB", "2GB", "16GB",
447         };
448         int i, printed;
449
450         if (tlb_type == cheetah)
451                 seq_printf(m, "MMU Type\t: Cheetah\n");
452         else if (tlb_type == cheetah_plus)
453                 seq_printf(m, "MMU Type\t: Cheetah+\n");
454         else if (tlb_type == spitfire)
455                 seq_printf(m, "MMU Type\t: Spitfire\n");
456         else if (tlb_type == hypervisor)
457                 seq_printf(m, "MMU Type\t: Hypervisor (sun4v)\n");
458         else
459                 seq_printf(m, "MMU Type\t: ???\n");
460
461         seq_printf(m, "MMU PGSZs\t: ");
462         printed = 0;
463         for (i = 0; i < ARRAY_SIZE(pgsz_strings); i++) {
464                 if (cpu_pgsz_mask & (1UL << i)) {
465                         seq_printf(m, "%s%s",
466                                    printed ? "," : "", pgsz_strings[i]);
467                         printed++;
468                 }
469         }
470         seq_putc(m, '\n');
471
472 #ifdef CONFIG_DEBUG_DCFLUSH
473         seq_printf(m, "DCPageFlushes\t: %d\n",
474                    atomic_read(&dcpage_flushes));
475 #ifdef CONFIG_SMP
476         seq_printf(m, "DCPageFlushesXC\t: %d\n",
477                    atomic_read(&dcpage_flushes_xcall));
478 #endif /* CONFIG_SMP */
479 #endif /* CONFIG_DEBUG_DCFLUSH */
480 }
481
482 struct linux_prom_translation prom_trans[512] __read_mostly;
483 unsigned int prom_trans_ents __read_mostly;
484
485 unsigned long kern_locked_tte_data;
486
487 /* The obp translations are saved based on 8k pagesize, since obp can
488  * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS ->
489  * HI_OBP_ADDRESS range are handled in ktlb.S.
490  */
491 static inline int in_obp_range(unsigned long vaddr)
492 {
493         return (vaddr >= LOW_OBP_ADDRESS &&
494                 vaddr < HI_OBP_ADDRESS);
495 }
496
497 static int cmp_ptrans(const void *a, const void *b)
498 {
499         const struct linux_prom_translation *x = a, *y = b;
500
501         if (x->virt > y->virt)
502                 return 1;
503         if (x->virt < y->virt)
504                 return -1;
505         return 0;
506 }
507
508 /* Read OBP translations property into 'prom_trans[]'.  */
509 static void __init read_obp_translations(void)
510 {
511         int n, node, ents, first, last, i;
512
513         node = prom_finddevice("/virtual-memory");
514         n = prom_getproplen(node, "translations");
515         if (unlikely(n == 0 || n == -1)) {
516                 prom_printf("prom_mappings: Couldn't get size.\n");
517                 prom_halt();
518         }
519         if (unlikely(n > sizeof(prom_trans))) {
520                 prom_printf("prom_mappings: Size %d is too big.\n", n);
521                 prom_halt();
522         }
523
524         if ((n = prom_getproperty(node, "translations",
525                                   (char *)&prom_trans[0],
526                                   sizeof(prom_trans))) == -1) {
527                 prom_printf("prom_mappings: Couldn't get property.\n");
528                 prom_halt();
529         }
530
531         n = n / sizeof(struct linux_prom_translation);
532
533         ents = n;
534
535         sort(prom_trans, ents, sizeof(struct linux_prom_translation),
536              cmp_ptrans, NULL);
537
538         /* Now kick out all the non-OBP entries.  */
539         for (i = 0; i < ents; i++) {
540                 if (in_obp_range(prom_trans[i].virt))
541                         break;
542         }
543         first = i;
544         for (; i < ents; i++) {
545                 if (!in_obp_range(prom_trans[i].virt))
546                         break;
547         }
548         last = i;
549
550         for (i = 0; i < (last - first); i++) {
551                 struct linux_prom_translation *src = &prom_trans[i + first];
552                 struct linux_prom_translation *dest = &prom_trans[i];
553
554                 *dest = *src;
555         }
556         for (; i < ents; i++) {
557                 struct linux_prom_translation *dest = &prom_trans[i];
558                 dest->virt = dest->size = dest->data = 0x0UL;
559         }
560
561         prom_trans_ents = last - first;
562
563         if (tlb_type == spitfire) {
564                 /* Clear diag TTE bits. */
565                 for (i = 0; i < prom_trans_ents; i++)
566                         prom_trans[i].data &= ~0x0003fe0000000000UL;
567         }
568
569         /* Force execute bit on.  */
570         for (i = 0; i < prom_trans_ents; i++)
571                 prom_trans[i].data |= (tlb_type == hypervisor ?
572                                        _PAGE_EXEC_4V : _PAGE_EXEC_4U);
573 }
574
575 static void __init hypervisor_tlb_lock(unsigned long vaddr,
576                                        unsigned long pte,
577                                        unsigned long mmu)
578 {
579         unsigned long ret = sun4v_mmu_map_perm_addr(vaddr, 0, pte, mmu);
580
581         if (ret != 0) {
582                 prom_printf("hypervisor_tlb_lock[%lx:%x:%lx:%lx]: "
583                             "errors with %lx\n", vaddr, 0, pte, mmu, ret);
584                 prom_halt();
585         }
586 }
587
588 static unsigned long kern_large_tte(unsigned long paddr);
589
590 static void __init remap_kernel(void)
591 {
592         unsigned long phys_page, tte_vaddr, tte_data;
593         int i, tlb_ent = sparc64_highest_locked_tlbent();
594
595         tte_vaddr = (unsigned long) KERNBASE;
596         phys_page = (prom_boot_mapping_phys_low >> ILOG2_4MB) << ILOG2_4MB;
597         tte_data = kern_large_tte(phys_page);
598
599         kern_locked_tte_data = tte_data;
600
601         /* Now lock us into the TLBs via Hypervisor or OBP. */
602         if (tlb_type == hypervisor) {
603                 for (i = 0; i < num_kernel_image_mappings; i++) {
604                         hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU);
605                         hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU);
606                         tte_vaddr += 0x400000;
607                         tte_data += 0x400000;
608                 }
609         } else {
610                 for (i = 0; i < num_kernel_image_mappings; i++) {
611                         prom_dtlb_load(tlb_ent - i, tte_data, tte_vaddr);
612                         prom_itlb_load(tlb_ent - i, tte_data, tte_vaddr);
613                         tte_vaddr += 0x400000;
614                         tte_data += 0x400000;
615                 }
616                 sparc64_highest_unlocked_tlb_ent = tlb_ent - i;
617         }
618         if (tlb_type == cheetah_plus) {
619                 sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 |
620                                             CTX_CHEETAH_PLUS_NUC);
621                 sparc64_kern_pri_nuc_bits = CTX_CHEETAH_PLUS_NUC;
622                 sparc64_kern_sec_context = CTX_CHEETAH_PLUS_CTX0;
623         }
624 }
625
626
627 static void __init inherit_prom_mappings(void)
628 {
629         /* Now fixup OBP's idea about where we really are mapped. */
630         printk("Remapping the kernel... ");
631         remap_kernel();
632         printk("done.\n");
633 }
634
635 void prom_world(int enter)
636 {
637         if (!enter)
638                 set_fs(get_fs());
639
640         __asm__ __volatile__("flushw");
641 }
642
643 void __flush_dcache_range(unsigned long start, unsigned long end)
644 {
645         unsigned long va;
646
647         if (tlb_type == spitfire) {
648                 int n = 0;
649
650                 for (va = start; va < end; va += 32) {
651                         spitfire_put_dcache_tag(va & 0x3fe0, 0x0);
652                         if (++n >= 512)
653                                 break;
654                 }
655         } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
656                 start = __pa(start);
657                 end = __pa(end);
658                 for (va = start; va < end; va += 32)
659                         __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
660                                              "membar #Sync"
661                                              : /* no outputs */
662                                              : "r" (va),
663                                                "i" (ASI_DCACHE_INVALIDATE));
664         }
665 }
666 EXPORT_SYMBOL(__flush_dcache_range);
667
668 /* get_new_mmu_context() uses "cache + 1".  */
669 DEFINE_SPINLOCK(ctx_alloc_lock);
670 unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1;
671 #define MAX_CTX_NR      (1UL << CTX_NR_BITS)
672 #define CTX_BMAP_SLOTS  BITS_TO_LONGS(MAX_CTX_NR)
673 DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR);
674
675 /* Caller does TLB context flushing on local CPU if necessary.
676  * The caller also ensures that CTX_VALID(mm->context) is false.
677  *
678  * We must be careful about boundary cases so that we never
679  * let the user have CTX 0 (nucleus) or we ever use a CTX
680  * version of zero (and thus NO_CONTEXT would not be caught
681  * by version mis-match tests in mmu_context.h).
682  *
683  * Always invoked with interrupts disabled.
684  */
685 void get_new_mmu_context(struct mm_struct *mm)
686 {
687         unsigned long ctx, new_ctx;
688         unsigned long orig_pgsz_bits;
689         int new_version;
690
691         spin_lock(&ctx_alloc_lock);
692         orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);
693         ctx = (tlb_context_cache + 1) & CTX_NR_MASK;
694         new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);
695         new_version = 0;
696         if (new_ctx >= (1 << CTX_NR_BITS)) {
697                 new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);
698                 if (new_ctx >= ctx) {
699                         int i;
700                         new_ctx = (tlb_context_cache & CTX_VERSION_MASK) +
701                                 CTX_FIRST_VERSION;
702                         if (new_ctx == 1)
703                                 new_ctx = CTX_FIRST_VERSION;
704
705                         /* Don't call memset, for 16 entries that's just
706                          * plain silly...
707                          */
708                         mmu_context_bmap[0] = 3;
709                         mmu_context_bmap[1] = 0;
710                         mmu_context_bmap[2] = 0;
711                         mmu_context_bmap[3] = 0;
712                         for (i = 4; i < CTX_BMAP_SLOTS; i += 4) {
713                                 mmu_context_bmap[i + 0] = 0;
714                                 mmu_context_bmap[i + 1] = 0;
715                                 mmu_context_bmap[i + 2] = 0;
716                                 mmu_context_bmap[i + 3] = 0;
717                         }
718                         new_version = 1;
719                         goto out;
720                 }
721         }
722         mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63));
723         new_ctx |= (tlb_context_cache & CTX_VERSION_MASK);
724 out:
725         tlb_context_cache = new_ctx;
726         mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
727         spin_unlock(&ctx_alloc_lock);
728
729         if (unlikely(new_version))
730                 smp_new_mmu_context_version();
731 }
732
733 static int numa_enabled = 1;
734 static int numa_debug;
735
736 static int __init early_numa(char *p)
737 {
738         if (!p)
739                 return 0;
740
741         if (strstr(p, "off"))
742                 numa_enabled = 0;
743
744         if (strstr(p, "debug"))
745                 numa_debug = 1;
746
747         return 0;
748 }
749 early_param("numa", early_numa);
750
751 #define numadbg(f, a...) \
752 do {    if (numa_debug) \
753                 printk(KERN_INFO f, ## a); \
754 } while (0)
755
756 static void __init find_ramdisk(unsigned long phys_base)
757 {
758 #ifdef CONFIG_BLK_DEV_INITRD
759         if (sparc_ramdisk_image || sparc_ramdisk_image64) {
760                 unsigned long ramdisk_image;
761
762                 /* Older versions of the bootloader only supported a
763                  * 32-bit physical address for the ramdisk image
764                  * location, stored at sparc_ramdisk_image.  Newer
765                  * SILO versions set sparc_ramdisk_image to zero and
766                  * provide a full 64-bit physical address at
767                  * sparc_ramdisk_image64.
768                  */
769                 ramdisk_image = sparc_ramdisk_image;
770                 if (!ramdisk_image)
771                         ramdisk_image = sparc_ramdisk_image64;
772
773                 /* Another bootloader quirk.  The bootloader normalizes
774                  * the physical address to KERNBASE, so we have to
775                  * factor that back out and add in the lowest valid
776                  * physical page address to get the true physical address.
777                  */
778                 ramdisk_image -= KERNBASE;
779                 ramdisk_image += phys_base;
780
781                 numadbg("Found ramdisk at physical address 0x%lx, size %u\n",
782                         ramdisk_image, sparc_ramdisk_size);
783
784                 initrd_start = ramdisk_image;
785                 initrd_end = ramdisk_image + sparc_ramdisk_size;
786
787                 memblock_reserve(initrd_start, sparc_ramdisk_size);
788
789                 initrd_start += PAGE_OFFSET;
790                 initrd_end += PAGE_OFFSET;
791         }
792 #endif
793 }
794
795 struct node_mem_mask {
796         unsigned long mask;
797         unsigned long val;
798 };
799 static struct node_mem_mask node_masks[MAX_NUMNODES];
800 static int num_node_masks;
801
802 #ifdef CONFIG_NEED_MULTIPLE_NODES
803
804 int numa_cpu_lookup_table[NR_CPUS];
805 cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES];
806
807 struct mdesc_mblock {
808         u64     base;
809         u64     size;
810         u64     offset; /* RA-to-PA */
811 };
812 static struct mdesc_mblock *mblocks;
813 static int num_mblocks;
814
815 static unsigned long ra_to_pa(unsigned long addr)
816 {
817         int i;
818
819         for (i = 0; i < num_mblocks; i++) {
820                 struct mdesc_mblock *m = &mblocks[i];
821
822                 if (addr >= m->base &&
823                     addr < (m->base + m->size)) {
824                         addr += m->offset;
825                         break;
826                 }
827         }
828         return addr;
829 }
830
831 static int find_node(unsigned long addr)
832 {
833         int i;
834
835         addr = ra_to_pa(addr);
836         for (i = 0; i < num_node_masks; i++) {
837                 struct node_mem_mask *p = &node_masks[i];
838
839                 if ((addr & p->mask) == p->val)
840                         return i;
841         }
842         /* The following condition has been observed on LDOM guests.*/
843         WARN_ONCE(1, "find_node: A physical address doesn't match a NUMA node"
844                 " rule. Some physical memory will be owned by node 0.");
845         return 0;
846 }
847
848 static u64 memblock_nid_range(u64 start, u64 end, int *nid)
849 {
850         *nid = find_node(start);
851         start += PAGE_SIZE;
852         while (start < end) {
853                 int n = find_node(start);
854
855                 if (n != *nid)
856                         break;
857                 start += PAGE_SIZE;
858         }
859
860         if (start > end)
861                 start = end;
862
863         return start;
864 }
865 #endif
866
867 /* This must be invoked after performing all of the necessary
868  * memblock_set_node() calls for 'nid'.  We need to be able to get
869  * correct data from get_pfn_range_for_nid().
870  */
871 static void __init allocate_node_data(int nid)
872 {
873         struct pglist_data *p;
874         unsigned long start_pfn, end_pfn;
875 #ifdef CONFIG_NEED_MULTIPLE_NODES
876         unsigned long paddr;
877
878         paddr = memblock_alloc_try_nid(sizeof(struct pglist_data), SMP_CACHE_BYTES, nid);
879         if (!paddr) {
880                 prom_printf("Cannot allocate pglist_data for nid[%d]\n", nid);
881                 prom_halt();
882         }
883         NODE_DATA(nid) = __va(paddr);
884         memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
885
886         NODE_DATA(nid)->node_id = nid;
887 #endif
888
889         p = NODE_DATA(nid);
890
891         get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
892         p->node_start_pfn = start_pfn;
893         p->node_spanned_pages = end_pfn - start_pfn;
894 }
895
896 static void init_node_masks_nonnuma(void)
897 {
898 #ifdef CONFIG_NEED_MULTIPLE_NODES
899         int i;
900 #endif
901
902         numadbg("Initializing tables for non-numa.\n");
903
904         node_masks[0].mask = node_masks[0].val = 0;
905         num_node_masks = 1;
906
907 #ifdef CONFIG_NEED_MULTIPLE_NODES
908         for (i = 0; i < NR_CPUS; i++)
909                 numa_cpu_lookup_table[i] = 0;
910
911         cpumask_setall(&numa_cpumask_lookup_table[0]);
912 #endif
913 }
914
915 #ifdef CONFIG_NEED_MULTIPLE_NODES
916 struct pglist_data *node_data[MAX_NUMNODES];
917
918 EXPORT_SYMBOL(numa_cpu_lookup_table);
919 EXPORT_SYMBOL(numa_cpumask_lookup_table);
920 EXPORT_SYMBOL(node_data);
921
922 struct mdesc_mlgroup {
923         u64     node;
924         u64     latency;
925         u64     match;
926         u64     mask;
927 };
928 static struct mdesc_mlgroup *mlgroups;
929 static int num_mlgroups;
930
931 static int scan_pio_for_cfg_handle(struct mdesc_handle *md, u64 pio,
932                                    u32 cfg_handle)
933 {
934         u64 arc;
935
936         mdesc_for_each_arc(arc, md, pio, MDESC_ARC_TYPE_FWD) {
937                 u64 target = mdesc_arc_target(md, arc);
938                 const u64 *val;
939
940                 val = mdesc_get_property(md, target,
941                                          "cfg-handle", NULL);
942                 if (val && *val == cfg_handle)
943                         return 0;
944         }
945         return -ENODEV;
946 }
947
948 static int scan_arcs_for_cfg_handle(struct mdesc_handle *md, u64 grp,
949                                     u32 cfg_handle)
950 {
951         u64 arc, candidate, best_latency = ~(u64)0;
952
953         candidate = MDESC_NODE_NULL;
954         mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) {
955                 u64 target = mdesc_arc_target(md, arc);
956                 const char *name = mdesc_node_name(md, target);
957                 const u64 *val;
958
959                 if (strcmp(name, "pio-latency-group"))
960                         continue;
961
962                 val = mdesc_get_property(md, target, "latency", NULL);
963                 if (!val)
964                         continue;
965
966                 if (*val < best_latency) {
967                         candidate = target;
968                         best_latency = *val;
969                 }
970         }
971
972         if (candidate == MDESC_NODE_NULL)
973                 return -ENODEV;
974
975         return scan_pio_for_cfg_handle(md, candidate, cfg_handle);
976 }
977
978 int of_node_to_nid(struct device_node *dp)
979 {
980         const struct linux_prom64_registers *regs;
981         struct mdesc_handle *md;
982         u32 cfg_handle;
983         int count, nid;
984         u64 grp;
985
986         /* This is the right thing to do on currently supported
987          * SUN4U NUMA platforms as well, as the PCI controller does
988          * not sit behind any particular memory controller.
989          */
990         if (!mlgroups)
991                 return -1;
992
993         regs = of_get_property(dp, "reg", NULL);
994         if (!regs)
995                 return -1;
996
997         cfg_handle = (regs->phys_addr >> 32UL) & 0x0fffffff;
998
999         md = mdesc_grab();
1000
1001         count = 0;
1002         nid = -1;
1003         mdesc_for_each_node_by_name(md, grp, "group") {
1004                 if (!scan_arcs_for_cfg_handle(md, grp, cfg_handle)) {
1005                         nid = count;
1006                         break;
1007                 }
1008                 count++;
1009         }
1010
1011         mdesc_release(md);
1012
1013         return nid;
1014 }
1015
1016 static void __init add_node_ranges(void)
1017 {
1018         struct memblock_region *reg;
1019
1020         for_each_memblock(memory, reg) {
1021                 unsigned long size = reg->size;
1022                 unsigned long start, end;
1023
1024                 start = reg->base;
1025                 end = start + size;
1026                 while (start < end) {
1027                         unsigned long this_end;
1028                         int nid;
1029
1030                         this_end = memblock_nid_range(start, end, &nid);
1031
1032                         numadbg("Setting memblock NUMA node nid[%d] "
1033                                 "start[%lx] end[%lx]\n",
1034                                 nid, start, this_end);
1035
1036                         memblock_set_node(start, this_end - start,
1037                                           &memblock.memory, nid);
1038                         start = this_end;
1039                 }
1040         }
1041 }
1042
1043 static int __init grab_mlgroups(struct mdesc_handle *md)
1044 {
1045         unsigned long paddr;
1046         int count = 0;
1047         u64 node;
1048
1049         mdesc_for_each_node_by_name(md, node, "memory-latency-group")
1050                 count++;
1051         if (!count)
1052                 return -ENOENT;
1053
1054         paddr = memblock_alloc(count * sizeof(struct mdesc_mlgroup),
1055                           SMP_CACHE_BYTES);
1056         if (!paddr)
1057                 return -ENOMEM;
1058
1059         mlgroups = __va(paddr);
1060         num_mlgroups = count;
1061
1062         count = 0;
1063         mdesc_for_each_node_by_name(md, node, "memory-latency-group") {
1064                 struct mdesc_mlgroup *m = &mlgroups[count++];
1065                 const u64 *val;
1066
1067                 m->node = node;
1068
1069                 val = mdesc_get_property(md, node, "latency", NULL);
1070                 m->latency = *val;
1071                 val = mdesc_get_property(md, node, "address-match", NULL);
1072                 m->match = *val;
1073                 val = mdesc_get_property(md, node, "address-mask", NULL);
1074                 m->mask = *val;
1075
1076                 numadbg("MLGROUP[%d]: node[%llx] latency[%llx] "
1077                         "match[%llx] mask[%llx]\n",
1078                         count - 1, m->node, m->latency, m->match, m->mask);
1079         }
1080
1081         return 0;
1082 }
1083
1084 static int __init grab_mblocks(struct mdesc_handle *md)
1085 {
1086         unsigned long paddr;
1087         int count = 0;
1088         u64 node;
1089
1090         mdesc_for_each_node_by_name(md, node, "mblock")
1091                 count++;
1092         if (!count)
1093                 return -ENOENT;
1094
1095         paddr = memblock_alloc(count * sizeof(struct mdesc_mblock),
1096                           SMP_CACHE_BYTES);
1097         if (!paddr)
1098                 return -ENOMEM;
1099
1100         mblocks = __va(paddr);
1101         num_mblocks = count;
1102
1103         count = 0;
1104         mdesc_for_each_node_by_name(md, node, "mblock") {
1105                 struct mdesc_mblock *m = &mblocks[count++];
1106                 const u64 *val;
1107
1108                 val = mdesc_get_property(md, node, "base", NULL);
1109                 m->base = *val;
1110                 val = mdesc_get_property(md, node, "size", NULL);
1111                 m->size = *val;
1112                 val = mdesc_get_property(md, node,
1113                                          "address-congruence-offset", NULL);
1114
1115                 /* The address-congruence-offset property is optional.
1116                  * Explicity zero it be identifty this.
1117                  */
1118                 if (val)
1119                         m->offset = *val;
1120                 else
1121                         m->offset = 0UL;
1122
1123                 numadbg("MBLOCK[%d]: base[%llx] size[%llx] offset[%llx]\n",
1124                         count - 1, m->base, m->size, m->offset);
1125         }
1126
1127         return 0;
1128 }
1129
1130 static void __init numa_parse_mdesc_group_cpus(struct mdesc_handle *md,
1131                                                u64 grp, cpumask_t *mask)
1132 {
1133         u64 arc;
1134
1135         cpumask_clear(mask);
1136
1137         mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_BACK) {
1138                 u64 target = mdesc_arc_target(md, arc);
1139                 const char *name = mdesc_node_name(md, target);
1140                 const u64 *id;
1141
1142                 if (strcmp(name, "cpu"))
1143                         continue;
1144                 id = mdesc_get_property(md, target, "id", NULL);
1145                 if (*id < nr_cpu_ids)
1146                         cpumask_set_cpu(*id, mask);
1147         }
1148 }
1149
1150 static struct mdesc_mlgroup * __init find_mlgroup(u64 node)
1151 {
1152         int i;
1153
1154         for (i = 0; i < num_mlgroups; i++) {
1155                 struct mdesc_mlgroup *m = &mlgroups[i];
1156                 if (m->node == node)
1157                         return m;
1158         }
1159         return NULL;
1160 }
1161
1162 int __node_distance(int from, int to)
1163 {
1164         if ((from >= MAX_NUMNODES) || (to >= MAX_NUMNODES)) {
1165                 pr_warn("Returning default NUMA distance value for %d->%d\n",
1166                         from, to);
1167                 return (from == to) ? LOCAL_DISTANCE : REMOTE_DISTANCE;
1168         }
1169         return numa_latency[from][to];
1170 }
1171
1172 static int find_best_numa_node_for_mlgroup(struct mdesc_mlgroup *grp)
1173 {
1174         int i;
1175
1176         for (i = 0; i < MAX_NUMNODES; i++) {
1177                 struct node_mem_mask *n = &node_masks[i];
1178
1179                 if ((grp->mask == n->mask) && (grp->match == n->val))
1180                         break;
1181         }
1182         return i;
1183 }
1184
1185 static void find_numa_latencies_for_group(struct mdesc_handle *md, u64 grp,
1186                                           int index)
1187 {
1188         u64 arc;
1189
1190         mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) {
1191                 int tnode;
1192                 u64 target = mdesc_arc_target(md, arc);
1193                 struct mdesc_mlgroup *m = find_mlgroup(target);
1194
1195                 if (!m)
1196                         continue;
1197                 tnode = find_best_numa_node_for_mlgroup(m);
1198                 if (tnode == MAX_NUMNODES)
1199                         continue;
1200                 numa_latency[index][tnode] = m->latency;
1201         }
1202 }
1203
1204 static int __init numa_attach_mlgroup(struct mdesc_handle *md, u64 grp,
1205                                       int index)
1206 {
1207         struct mdesc_mlgroup *candidate = NULL;
1208         u64 arc, best_latency = ~(u64)0;
1209         struct node_mem_mask *n;
1210
1211         mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) {
1212                 u64 target = mdesc_arc_target(md, arc);
1213                 struct mdesc_mlgroup *m = find_mlgroup(target);
1214                 if (!m)
1215                         continue;
1216                 if (m->latency < best_latency) {
1217                         candidate = m;
1218                         best_latency = m->latency;
1219                 }
1220         }
1221         if (!candidate)
1222                 return -ENOENT;
1223
1224         if (num_node_masks != index) {
1225                 printk(KERN_ERR "Inconsistent NUMA state, "
1226                        "index[%d] != num_node_masks[%d]\n",
1227                        index, num_node_masks);
1228                 return -EINVAL;
1229         }
1230
1231         n = &node_masks[num_node_masks++];
1232
1233         n->mask = candidate->mask;
1234         n->val = candidate->match;
1235
1236         numadbg("NUMA NODE[%d]: mask[%lx] val[%lx] (latency[%llx])\n",
1237                 index, n->mask, n->val, candidate->latency);
1238
1239         return 0;
1240 }
1241
1242 static int __init numa_parse_mdesc_group(struct mdesc_handle *md, u64 grp,
1243                                          int index)
1244 {
1245         cpumask_t mask;
1246         int cpu;
1247
1248         numa_parse_mdesc_group_cpus(md, grp, &mask);
1249
1250         for_each_cpu(cpu, &mask)
1251                 numa_cpu_lookup_table[cpu] = index;
1252         cpumask_copy(&numa_cpumask_lookup_table[index], &mask);
1253
1254         if (numa_debug) {
1255                 printk(KERN_INFO "NUMA GROUP[%d]: cpus [ ", index);
1256                 for_each_cpu(cpu, &mask)
1257                         printk("%d ", cpu);
1258                 printk("]\n");
1259         }
1260
1261         return numa_attach_mlgroup(md, grp, index);
1262 }
1263
1264 static int __init numa_parse_mdesc(void)
1265 {
1266         struct mdesc_handle *md = mdesc_grab();
1267         int i, j, err, count;
1268         u64 node;
1269
1270         node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups");
1271         if (node == MDESC_NODE_NULL) {
1272                 mdesc_release(md);
1273                 return -ENOENT;
1274         }
1275
1276         err = grab_mblocks(md);
1277         if (err < 0)
1278                 goto out;
1279
1280         err = grab_mlgroups(md);
1281         if (err < 0)
1282                 goto out;
1283
1284         count = 0;
1285         mdesc_for_each_node_by_name(md, node, "group") {
1286                 err = numa_parse_mdesc_group(md, node, count);
1287                 if (err < 0)
1288                         break;
1289                 count++;
1290         }
1291
1292         count = 0;
1293         mdesc_for_each_node_by_name(md, node, "group") {
1294                 find_numa_latencies_for_group(md, node, count);
1295                 count++;
1296         }
1297
1298         /* Normalize numa latency matrix according to ACPI SLIT spec. */
1299         for (i = 0; i < MAX_NUMNODES; i++) {
1300                 u64 self_latency = numa_latency[i][i];
1301
1302                 for (j = 0; j < MAX_NUMNODES; j++) {
1303                         numa_latency[i][j] =
1304                                 (numa_latency[i][j] * LOCAL_DISTANCE) /
1305                                 self_latency;
1306                 }
1307         }
1308
1309         add_node_ranges();
1310
1311         for (i = 0; i < num_node_masks; i++) {
1312                 allocate_node_data(i);
1313                 node_set_online(i);
1314         }
1315
1316         err = 0;
1317 out:
1318         mdesc_release(md);
1319         return err;
1320 }
1321
1322 static int __init numa_parse_jbus(void)
1323 {
1324         unsigned long cpu, index;
1325
1326         /* NUMA node id is encoded in bits 36 and higher, and there is
1327          * a 1-to-1 mapping from CPU ID to NUMA node ID.
1328          */
1329         index = 0;
1330         for_each_present_cpu(cpu) {
1331                 numa_cpu_lookup_table[cpu] = index;
1332                 cpumask_copy(&numa_cpumask_lookup_table[index], cpumask_of(cpu));
1333                 node_masks[index].mask = ~((1UL << 36UL) - 1UL);
1334                 node_masks[index].val = cpu << 36UL;
1335
1336                 index++;
1337         }
1338         num_node_masks = index;
1339
1340         add_node_ranges();
1341
1342         for (index = 0; index < num_node_masks; index++) {
1343                 allocate_node_data(index);
1344                 node_set_online(index);
1345         }
1346
1347         return 0;
1348 }
1349
1350 static int __init numa_parse_sun4u(void)
1351 {
1352         if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1353                 unsigned long ver;
1354
1355                 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
1356                 if ((ver >> 32UL) == __JALAPENO_ID ||
1357                     (ver >> 32UL) == __SERRANO_ID)
1358                         return numa_parse_jbus();
1359         }
1360         return -1;
1361 }
1362
1363 static int __init bootmem_init_numa(void)
1364 {
1365         int i, j;
1366         int err = -1;
1367
1368         numadbg("bootmem_init_numa()\n");
1369
1370         /* Some sane defaults for numa latency values */
1371         for (i = 0; i < MAX_NUMNODES; i++) {
1372                 for (j = 0; j < MAX_NUMNODES; j++)
1373                         numa_latency[i][j] = (i == j) ?
1374                                 LOCAL_DISTANCE : REMOTE_DISTANCE;
1375         }
1376
1377         if (numa_enabled) {
1378                 if (tlb_type == hypervisor)
1379                         err = numa_parse_mdesc();
1380                 else
1381                         err = numa_parse_sun4u();
1382         }
1383         return err;
1384 }
1385
1386 #else
1387
1388 static int bootmem_init_numa(void)
1389 {
1390         return -1;
1391 }
1392
1393 #endif
1394
1395 static void __init bootmem_init_nonnuma(void)
1396 {
1397         unsigned long top_of_ram = memblock_end_of_DRAM();
1398         unsigned long total_ram = memblock_phys_mem_size();
1399
1400         numadbg("bootmem_init_nonnuma()\n");
1401
1402         printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
1403                top_of_ram, total_ram);
1404         printk(KERN_INFO "Memory hole size: %ldMB\n",
1405                (top_of_ram - total_ram) >> 20);
1406
1407         init_node_masks_nonnuma();
1408         memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0);
1409         allocate_node_data(0);
1410         node_set_online(0);
1411 }
1412
1413 static unsigned long __init bootmem_init(unsigned long phys_base)
1414 {
1415         unsigned long end_pfn;
1416
1417         end_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
1418         max_pfn = max_low_pfn = end_pfn;
1419         min_low_pfn = (phys_base >> PAGE_SHIFT);
1420
1421         if (bootmem_init_numa() < 0)
1422                 bootmem_init_nonnuma();
1423
1424         /* Dump memblock with node info. */
1425         memblock_dump_all();
1426
1427         /* XXX cpu notifier XXX */
1428
1429         sparse_memory_present_with_active_regions(MAX_NUMNODES);
1430         sparse_init();
1431
1432         return end_pfn;
1433 }
1434
1435 static struct linux_prom64_registers pall[MAX_BANKS] __initdata;
1436 static int pall_ents __initdata;
1437
1438 static unsigned long max_phys_bits = 40;
1439
1440 bool kern_addr_valid(unsigned long addr)
1441 {
1442         pgd_t *pgd;
1443         pud_t *pud;
1444         pmd_t *pmd;
1445         pte_t *pte;
1446
1447         if ((long)addr < 0L) {
1448                 unsigned long pa = __pa(addr);
1449
1450                 if ((addr >> max_phys_bits) != 0UL)
1451                         return false;
1452
1453                 return pfn_valid(pa >> PAGE_SHIFT);
1454         }
1455
1456         if (addr >= (unsigned long) KERNBASE &&
1457             addr < (unsigned long)&_end)
1458                 return true;
1459
1460         pgd = pgd_offset_k(addr);
1461         if (pgd_none(*pgd))
1462                 return 0;
1463
1464         pud = pud_offset(pgd, addr);
1465         if (pud_none(*pud))
1466                 return 0;
1467
1468         if (pud_large(*pud))
1469                 return pfn_valid(pud_pfn(*pud));
1470
1471         pmd = pmd_offset(pud, addr);
1472         if (pmd_none(*pmd))
1473                 return 0;
1474
1475         if (pmd_large(*pmd))
1476                 return pfn_valid(pmd_pfn(*pmd));
1477
1478         pte = pte_offset_kernel(pmd, addr);
1479         if (pte_none(*pte))
1480                 return 0;
1481
1482         return pfn_valid(pte_pfn(*pte));
1483 }
1484 EXPORT_SYMBOL(kern_addr_valid);
1485
1486 static unsigned long __ref kernel_map_hugepud(unsigned long vstart,
1487                                               unsigned long vend,
1488                                               pud_t *pud)
1489 {
1490         const unsigned long mask16gb = (1UL << 34) - 1UL;
1491         u64 pte_val = vstart;
1492
1493         /* Each PUD is 8GB */
1494         if ((vstart & mask16gb) ||
1495             (vend - vstart <= mask16gb)) {
1496                 pte_val ^= kern_linear_pte_xor[2];
1497                 pud_val(*pud) = pte_val | _PAGE_PUD_HUGE;
1498
1499                 return vstart + PUD_SIZE;
1500         }
1501
1502         pte_val ^= kern_linear_pte_xor[3];
1503         pte_val |= _PAGE_PUD_HUGE;
1504
1505         vend = vstart + mask16gb + 1UL;
1506         while (vstart < vend) {
1507                 pud_val(*pud) = pte_val;
1508
1509                 pte_val += PUD_SIZE;
1510                 vstart += PUD_SIZE;
1511                 pud++;
1512         }
1513         return vstart;
1514 }
1515
1516 static bool kernel_can_map_hugepud(unsigned long vstart, unsigned long vend,
1517                                    bool guard)
1518 {
1519         if (guard && !(vstart & ~PUD_MASK) && (vend - vstart) >= PUD_SIZE)
1520                 return true;
1521
1522         return false;
1523 }
1524
1525 static unsigned long __ref kernel_map_hugepmd(unsigned long vstart,
1526                                               unsigned long vend,
1527                                               pmd_t *pmd)
1528 {
1529         const unsigned long mask256mb = (1UL << 28) - 1UL;
1530         const unsigned long mask2gb = (1UL << 31) - 1UL;
1531         u64 pte_val = vstart;
1532
1533         /* Each PMD is 8MB */
1534         if ((vstart & mask256mb) ||
1535             (vend - vstart <= mask256mb)) {
1536                 pte_val ^= kern_linear_pte_xor[0];
1537                 pmd_val(*pmd) = pte_val | _PAGE_PMD_HUGE;
1538
1539                 return vstart + PMD_SIZE;
1540         }
1541
1542         if ((vstart & mask2gb) ||
1543             (vend - vstart <= mask2gb)) {
1544                 pte_val ^= kern_linear_pte_xor[1];
1545                 pte_val |= _PAGE_PMD_HUGE;
1546                 vend = vstart + mask256mb + 1UL;
1547         } else {
1548                 pte_val ^= kern_linear_pte_xor[2];
1549                 pte_val |= _PAGE_PMD_HUGE;
1550                 vend = vstart + mask2gb + 1UL;
1551         }
1552
1553         while (vstart < vend) {
1554                 pmd_val(*pmd) = pte_val;
1555
1556                 pte_val += PMD_SIZE;
1557                 vstart += PMD_SIZE;
1558                 pmd++;
1559         }
1560
1561         return vstart;
1562 }
1563
1564 static bool kernel_can_map_hugepmd(unsigned long vstart, unsigned long vend,
1565                                    bool guard)
1566 {
1567         if (guard && !(vstart & ~PMD_MASK) && (vend - vstart) >= PMD_SIZE)
1568                 return true;
1569
1570         return false;
1571 }
1572
1573 static unsigned long __ref kernel_map_range(unsigned long pstart,
1574                                             unsigned long pend, pgprot_t prot,
1575                                             bool use_huge)
1576 {
1577         unsigned long vstart = PAGE_OFFSET + pstart;
1578         unsigned long vend = PAGE_OFFSET + pend;
1579         unsigned long alloc_bytes = 0UL;
1580
1581         if ((vstart & ~PAGE_MASK) || (vend & ~PAGE_MASK)) {
1582                 prom_printf("kernel_map: Unaligned physmem[%lx:%lx]\n",
1583                             vstart, vend);
1584                 prom_halt();
1585         }
1586
1587         while (vstart < vend) {
1588                 unsigned long this_end, paddr = __pa(vstart);
1589                 pgd_t *pgd = pgd_offset_k(vstart);
1590                 pud_t *pud;
1591                 pmd_t *pmd;
1592                 pte_t *pte;
1593
1594                 if (pgd_none(*pgd)) {
1595                         pud_t *new;
1596
1597                         new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1598                         alloc_bytes += PAGE_SIZE;
1599                         pgd_populate(&init_mm, pgd, new);
1600                 }
1601                 pud = pud_offset(pgd, vstart);
1602                 if (pud_none(*pud)) {
1603                         pmd_t *new;
1604
1605                         if (kernel_can_map_hugepud(vstart, vend, use_huge)) {
1606                                 vstart = kernel_map_hugepud(vstart, vend, pud);
1607                                 continue;
1608                         }
1609                         new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1610                         alloc_bytes += PAGE_SIZE;
1611                         pud_populate(&init_mm, pud, new);
1612                 }
1613
1614                 pmd = pmd_offset(pud, vstart);
1615                 if (pmd_none(*pmd)) {
1616                         pte_t *new;
1617
1618                         if (kernel_can_map_hugepmd(vstart, vend, use_huge)) {
1619                                 vstart = kernel_map_hugepmd(vstart, vend, pmd);
1620                                 continue;
1621                         }
1622                         new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1623                         alloc_bytes += PAGE_SIZE;
1624                         pmd_populate_kernel(&init_mm, pmd, new);
1625                 }
1626
1627                 pte = pte_offset_kernel(pmd, vstart);
1628                 this_end = (vstart + PMD_SIZE) & PMD_MASK;
1629                 if (this_end > vend)
1630                         this_end = vend;
1631
1632                 while (vstart < this_end) {
1633                         pte_val(*pte) = (paddr | pgprot_val(prot));
1634
1635                         vstart += PAGE_SIZE;
1636                         paddr += PAGE_SIZE;
1637                         pte++;
1638                 }
1639         }
1640
1641         return alloc_bytes;
1642 }
1643
1644 static void __init flush_all_kernel_tsbs(void)
1645 {
1646         int i;
1647
1648         for (i = 0; i < KERNEL_TSB_NENTRIES; i++) {
1649                 struct tsb *ent = &swapper_tsb[i];
1650
1651                 ent->tag = (1UL << TSB_TAG_INVALID_BIT);
1652         }
1653 #ifndef CONFIG_DEBUG_PAGEALLOC
1654         for (i = 0; i < KERNEL_TSB4M_NENTRIES; i++) {
1655                 struct tsb *ent = &swapper_4m_tsb[i];
1656
1657                 ent->tag = (1UL << TSB_TAG_INVALID_BIT);
1658         }
1659 #endif
1660 }
1661
1662 extern unsigned int kvmap_linear_patch[1];
1663
1664 static void __init kernel_physical_mapping_init(void)
1665 {
1666         unsigned long i, mem_alloced = 0UL;
1667         bool use_huge = true;
1668
1669 #ifdef CONFIG_DEBUG_PAGEALLOC
1670         use_huge = false;
1671 #endif
1672         for (i = 0; i < pall_ents; i++) {
1673                 unsigned long phys_start, phys_end;
1674
1675                 phys_start = pall[i].phys_addr;
1676                 phys_end = phys_start + pall[i].reg_size;
1677
1678                 mem_alloced += kernel_map_range(phys_start, phys_end,
1679                                                 PAGE_KERNEL, use_huge);
1680         }
1681
1682         printk("Allocated %ld bytes for kernel page tables.\n",
1683                mem_alloced);
1684
1685         kvmap_linear_patch[0] = 0x01000000; /* nop */
1686         flushi(&kvmap_linear_patch[0]);
1687
1688         flush_all_kernel_tsbs();
1689
1690         __flush_tlb_all();
1691 }
1692
1693 #ifdef CONFIG_DEBUG_PAGEALLOC
1694 void __kernel_map_pages(struct page *page, int numpages, int enable)
1695 {
1696         unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT;
1697         unsigned long phys_end = phys_start + (numpages * PAGE_SIZE);
1698
1699         kernel_map_range(phys_start, phys_end,
1700                          (enable ? PAGE_KERNEL : __pgprot(0)), false);
1701
1702         flush_tsb_kernel_range(PAGE_OFFSET + phys_start,
1703                                PAGE_OFFSET + phys_end);
1704
1705         /* we should perform an IPI and flush all tlbs,
1706          * but that can deadlock->flush only current cpu.
1707          */
1708         __flush_tlb_kernel_range(PAGE_OFFSET + phys_start,
1709                                  PAGE_OFFSET + phys_end);
1710 }
1711 #endif
1712
1713 unsigned long __init find_ecache_flush_span(unsigned long size)
1714 {
1715         int i;
1716
1717         for (i = 0; i < pavail_ents; i++) {
1718                 if (pavail[i].reg_size >= size)
1719                         return pavail[i].phys_addr;
1720         }
1721
1722         return ~0UL;
1723 }
1724
1725 unsigned long PAGE_OFFSET;
1726 EXPORT_SYMBOL(PAGE_OFFSET);
1727
1728 unsigned long VMALLOC_END   = 0x0000010000000000UL;
1729 EXPORT_SYMBOL(VMALLOC_END);
1730
1731 unsigned long sparc64_va_hole_top =    0xfffff80000000000UL;
1732 unsigned long sparc64_va_hole_bottom = 0x0000080000000000UL;
1733
1734 static void __init setup_page_offset(void)
1735 {
1736         if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1737                 /* Cheetah/Panther support a full 64-bit virtual
1738                  * address, so we can use all that our page tables
1739                  * support.
1740                  */
1741                 sparc64_va_hole_top =    0xfff0000000000000UL;
1742                 sparc64_va_hole_bottom = 0x0010000000000000UL;
1743
1744                 max_phys_bits = 42;
1745         } else if (tlb_type == hypervisor) {
1746                 switch (sun4v_chip_type) {
1747                 case SUN4V_CHIP_NIAGARA1:
1748                 case SUN4V_CHIP_NIAGARA2:
1749                         /* T1 and T2 support 48-bit virtual addresses.  */
1750                         sparc64_va_hole_top =    0xffff800000000000UL;
1751                         sparc64_va_hole_bottom = 0x0000800000000000UL;
1752
1753                         max_phys_bits = 39;
1754                         break;
1755                 case SUN4V_CHIP_NIAGARA3:
1756                         /* T3 supports 48-bit virtual addresses.  */
1757                         sparc64_va_hole_top =    0xffff800000000000UL;
1758                         sparc64_va_hole_bottom = 0x0000800000000000UL;
1759
1760                         max_phys_bits = 43;
1761                         break;
1762                 case SUN4V_CHIP_NIAGARA4:
1763                 case SUN4V_CHIP_NIAGARA5:
1764                 case SUN4V_CHIP_SPARC64X:
1765                 case SUN4V_CHIP_SPARC_M6:
1766                         /* T4 and later support 52-bit virtual addresses.  */
1767                         sparc64_va_hole_top =    0xfff8000000000000UL;
1768                         sparc64_va_hole_bottom = 0x0008000000000000UL;
1769                         max_phys_bits = 47;
1770                         break;
1771                 case SUN4V_CHIP_SPARC_M7:
1772                 default:
1773                         /* M7 and later support 52-bit virtual addresses.  */
1774                         sparc64_va_hole_top =    0xfff8000000000000UL;
1775                         sparc64_va_hole_bottom = 0x0008000000000000UL;
1776                         max_phys_bits = 49;
1777                         break;
1778                 }
1779         }
1780
1781         if (max_phys_bits > MAX_PHYS_ADDRESS_BITS) {
1782                 prom_printf("MAX_PHYS_ADDRESS_BITS is too small, need %lu\n",
1783                             max_phys_bits);
1784                 prom_halt();
1785         }
1786
1787         PAGE_OFFSET = sparc64_va_hole_top;
1788         VMALLOC_END = ((sparc64_va_hole_bottom >> 1) +
1789                        (sparc64_va_hole_bottom >> 2));
1790
1791         pr_info("MM: PAGE_OFFSET is 0x%016lx (max_phys_bits == %lu)\n",
1792                 PAGE_OFFSET, max_phys_bits);
1793         pr_info("MM: VMALLOC [0x%016lx --> 0x%016lx]\n",
1794                 VMALLOC_START, VMALLOC_END);
1795         pr_info("MM: VMEMMAP [0x%016lx --> 0x%016lx]\n",
1796                 VMEMMAP_BASE, VMEMMAP_BASE << 1);
1797 }
1798
1799 static void __init tsb_phys_patch(void)
1800 {
1801         struct tsb_ldquad_phys_patch_entry *pquad;
1802         struct tsb_phys_patch_entry *p;
1803
1804         pquad = &__tsb_ldquad_phys_patch;
1805         while (pquad < &__tsb_ldquad_phys_patch_end) {
1806                 unsigned long addr = pquad->addr;
1807
1808                 if (tlb_type == hypervisor)
1809                         *(unsigned int *) addr = pquad->sun4v_insn;
1810                 else
1811                         *(unsigned int *) addr = pquad->sun4u_insn;
1812                 wmb();
1813                 __asm__ __volatile__("flush     %0"
1814                                      : /* no outputs */
1815                                      : "r" (addr));
1816
1817                 pquad++;
1818         }
1819
1820         p = &__tsb_phys_patch;
1821         while (p < &__tsb_phys_patch_end) {
1822                 unsigned long addr = p->addr;
1823
1824                 *(unsigned int *) addr = p->insn;
1825                 wmb();
1826                 __asm__ __volatile__("flush     %0"
1827                                      : /* no outputs */
1828                                      : "r" (addr));
1829
1830                 p++;
1831         }
1832 }
1833
1834 /* Don't mark as init, we give this to the Hypervisor.  */
1835 #ifndef CONFIG_DEBUG_PAGEALLOC
1836 #define NUM_KTSB_DESCR  2
1837 #else
1838 #define NUM_KTSB_DESCR  1
1839 #endif
1840 static struct hv_tsb_descr ktsb_descr[NUM_KTSB_DESCR];
1841
1842 /* The swapper TSBs are loaded with a base sequence of:
1843  *
1844  *      sethi   %uhi(SYMBOL), REG1
1845  *      sethi   %hi(SYMBOL), REG2
1846  *      or      REG1, %ulo(SYMBOL), REG1
1847  *      or      REG2, %lo(SYMBOL), REG2
1848  *      sllx    REG1, 32, REG1
1849  *      or      REG1, REG2, REG1
1850  *
1851  * When we use physical addressing for the TSB accesses, we patch the
1852  * first four instructions in the above sequence.
1853  */
1854
1855 static void patch_one_ktsb_phys(unsigned int *start, unsigned int *end, unsigned long pa)
1856 {
1857         unsigned long high_bits, low_bits;
1858
1859         high_bits = (pa >> 32) & 0xffffffff;
1860         low_bits = (pa >> 0) & 0xffffffff;
1861
1862         while (start < end) {
1863                 unsigned int *ia = (unsigned int *)(unsigned long)*start;
1864
1865                 ia[0] = (ia[0] & ~0x3fffff) | (high_bits >> 10);
1866                 __asm__ __volatile__("flush     %0" : : "r" (ia));
1867
1868                 ia[1] = (ia[1] & ~0x3fffff) | (low_bits >> 10);
1869                 __asm__ __volatile__("flush     %0" : : "r" (ia + 1));
1870
1871                 ia[2] = (ia[2] & ~0x1fff) | (high_bits & 0x3ff);
1872                 __asm__ __volatile__("flush     %0" : : "r" (ia + 2));
1873
1874                 ia[3] = (ia[3] & ~0x1fff) | (low_bits & 0x3ff);
1875                 __asm__ __volatile__("flush     %0" : : "r" (ia + 3));
1876
1877                 start++;
1878         }
1879 }
1880
1881 static void ktsb_phys_patch(void)
1882 {
1883         extern unsigned int __swapper_tsb_phys_patch;
1884         extern unsigned int __swapper_tsb_phys_patch_end;
1885         unsigned long ktsb_pa;
1886
1887         ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE);
1888         patch_one_ktsb_phys(&__swapper_tsb_phys_patch,
1889                             &__swapper_tsb_phys_patch_end, ktsb_pa);
1890 #ifndef CONFIG_DEBUG_PAGEALLOC
1891         {
1892         extern unsigned int __swapper_4m_tsb_phys_patch;
1893         extern unsigned int __swapper_4m_tsb_phys_patch_end;
1894         ktsb_pa = (kern_base +
1895                    ((unsigned long)&swapper_4m_tsb[0] - KERNBASE));
1896         patch_one_ktsb_phys(&__swapper_4m_tsb_phys_patch,
1897                             &__swapper_4m_tsb_phys_patch_end, ktsb_pa);
1898         }
1899 #endif
1900 }
1901
1902 static void __init sun4v_ktsb_init(void)
1903 {
1904         unsigned long ktsb_pa;
1905
1906         /* First KTSB for PAGE_SIZE mappings.  */
1907         ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE);
1908
1909         switch (PAGE_SIZE) {
1910         case 8 * 1024:
1911         default:
1912                 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_8K;
1913                 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_8K;
1914                 break;
1915
1916         case 64 * 1024:
1917                 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_64K;
1918                 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_64K;
1919                 break;
1920
1921         case 512 * 1024:
1922                 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_512K;
1923                 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_512K;
1924                 break;
1925
1926         case 4 * 1024 * 1024:
1927                 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_4MB;
1928                 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_4MB;
1929                 break;
1930         }
1931
1932         ktsb_descr[0].assoc = 1;
1933         ktsb_descr[0].num_ttes = KERNEL_TSB_NENTRIES;
1934         ktsb_descr[0].ctx_idx = 0;
1935         ktsb_descr[0].tsb_base = ktsb_pa;
1936         ktsb_descr[0].resv = 0;
1937
1938 #ifndef CONFIG_DEBUG_PAGEALLOC
1939         /* Second KTSB for 4MB/256MB/2GB/16GB mappings.  */
1940         ktsb_pa = (kern_base +
1941                    ((unsigned long)&swapper_4m_tsb[0] - KERNBASE));
1942
1943         ktsb_descr[1].pgsz_idx = HV_PGSZ_IDX_4MB;
1944         ktsb_descr[1].pgsz_mask = ((HV_PGSZ_MASK_4MB |
1945                                     HV_PGSZ_MASK_256MB |
1946                                     HV_PGSZ_MASK_2GB |
1947                                     HV_PGSZ_MASK_16GB) &
1948                                    cpu_pgsz_mask);
1949         ktsb_descr[1].assoc = 1;
1950         ktsb_descr[1].num_ttes = KERNEL_TSB4M_NENTRIES;
1951         ktsb_descr[1].ctx_idx = 0;
1952         ktsb_descr[1].tsb_base = ktsb_pa;
1953         ktsb_descr[1].resv = 0;
1954 #endif
1955 }
1956
1957 void sun4v_ktsb_register(void)
1958 {
1959         unsigned long pa, ret;
1960
1961         pa = kern_base + ((unsigned long)&ktsb_descr[0] - KERNBASE);
1962
1963         ret = sun4v_mmu_tsb_ctx0(NUM_KTSB_DESCR, pa);
1964         if (ret != 0) {
1965                 prom_printf("hypervisor_mmu_tsb_ctx0[%lx]: "
1966                             "errors with %lx\n", pa, ret);
1967                 prom_halt();
1968         }
1969 }
1970
1971 static void __init sun4u_linear_pte_xor_finalize(void)
1972 {
1973 #ifndef CONFIG_DEBUG_PAGEALLOC
1974         /* This is where we would add Panther support for
1975          * 32MB and 256MB pages.
1976          */
1977 #endif
1978 }
1979
1980 static void __init sun4v_linear_pte_xor_finalize(void)
1981 {
1982         unsigned long pagecv_flag;
1983
1984         /* Bit 9 of TTE is no longer CV bit on M7 processor and it instead
1985          * enables MCD error. Do not set bit 9 on M7 processor.
1986          */
1987         switch (sun4v_chip_type) {
1988         case SUN4V_CHIP_SPARC_M7:
1989                 pagecv_flag = 0x00;
1990                 break;
1991         default:
1992                 pagecv_flag = _PAGE_CV_4V;
1993                 break;
1994         }
1995 #ifndef CONFIG_DEBUG_PAGEALLOC
1996         if (cpu_pgsz_mask & HV_PGSZ_MASK_256MB) {
1997                 kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZ256MB_4V) ^
1998                         PAGE_OFFSET;
1999                 kern_linear_pte_xor[1] |= (_PAGE_CP_4V | pagecv_flag |
2000                                            _PAGE_P_4V | _PAGE_W_4V);
2001         } else {
2002                 kern_linear_pte_xor[1] = kern_linear_pte_xor[0];
2003         }
2004
2005         if (cpu_pgsz_mask & HV_PGSZ_MASK_2GB) {
2006                 kern_linear_pte_xor[2] = (_PAGE_VALID | _PAGE_SZ2GB_4V) ^
2007                         PAGE_OFFSET;
2008                 kern_linear_pte_xor[2] |= (_PAGE_CP_4V | pagecv_flag |
2009                                            _PAGE_P_4V | _PAGE_W_4V);
2010         } else {
2011                 kern_linear_pte_xor[2] = kern_linear_pte_xor[1];
2012         }
2013
2014         if (cpu_pgsz_mask & HV_PGSZ_MASK_16GB) {
2015                 kern_linear_pte_xor[3] = (_PAGE_VALID | _PAGE_SZ16GB_4V) ^
2016                         PAGE_OFFSET;
2017                 kern_linear_pte_xor[3] |= (_PAGE_CP_4V | pagecv_flag |
2018                                            _PAGE_P_4V | _PAGE_W_4V);
2019         } else {
2020                 kern_linear_pte_xor[3] = kern_linear_pte_xor[2];
2021         }
2022 #endif
2023 }
2024
2025 /* paging_init() sets up the page tables */
2026
2027 static unsigned long last_valid_pfn;
2028
2029 static void sun4u_pgprot_init(void);
2030 static void sun4v_pgprot_init(void);
2031
2032 static phys_addr_t __init available_memory(void)
2033 {
2034         phys_addr_t available = 0ULL;
2035         phys_addr_t pa_start, pa_end;
2036         u64 i;
2037
2038         for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &pa_start,
2039                                 &pa_end, NULL)
2040                 available = available + (pa_end  - pa_start);
2041
2042         return available;
2043 }
2044
2045 #define _PAGE_CACHE_4U  (_PAGE_CP_4U | _PAGE_CV_4U)
2046 #define _PAGE_CACHE_4V  (_PAGE_CP_4V | _PAGE_CV_4V)
2047 #define __DIRTY_BITS_4U  (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U)
2048 #define __DIRTY_BITS_4V  (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V)
2049 #define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_READ_4U | _PAGE_R)
2050 #define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R)
2051
2052 /* We need to exclude reserved regions. This exclusion will include
2053  * vmlinux and initrd. To be more precise the initrd size could be used to
2054  * compute a new lower limit because it is freed later during initialization.
2055  */
2056 static void __init reduce_memory(phys_addr_t limit_ram)
2057 {
2058         phys_addr_t avail_ram = available_memory();
2059         phys_addr_t pa_start, pa_end;
2060         u64 i;
2061
2062         if (limit_ram >= avail_ram)
2063                 return;
2064
2065         for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &pa_start,
2066                                 &pa_end, NULL) {
2067                 phys_addr_t region_size = pa_end - pa_start;
2068                 phys_addr_t clip_start = pa_start;
2069
2070                 avail_ram = avail_ram - region_size;
2071                 /* Are we consuming too much? */
2072                 if (avail_ram < limit_ram) {
2073                         phys_addr_t give_back = limit_ram - avail_ram;
2074
2075                         region_size = region_size - give_back;
2076                         clip_start = clip_start + give_back;
2077                 }
2078
2079                 memblock_remove(clip_start, region_size);
2080
2081                 if (avail_ram <= limit_ram)
2082                         break;
2083                 i = 0UL;
2084         }
2085 }
2086
2087 void __init paging_init(void)
2088 {
2089         unsigned long end_pfn, shift, phys_base;
2090         unsigned long real_end, i;
2091         int node;
2092
2093         setup_page_offset();
2094
2095         /* These build time checkes make sure that the dcache_dirty_cpu()
2096          * page->flags usage will work.
2097          *
2098          * When a page gets marked as dcache-dirty, we store the
2099          * cpu number starting at bit 32 in the page->flags.  Also,
2100          * functions like clear_dcache_dirty_cpu use the cpu mask
2101          * in 13-bit signed-immediate instruction fields.
2102          */
2103
2104         /*
2105          * Page flags must not reach into upper 32 bits that are used
2106          * for the cpu number
2107          */
2108         BUILD_BUG_ON(NR_PAGEFLAGS > 32);
2109
2110         /*
2111          * The bit fields placed in the high range must not reach below
2112          * the 32 bit boundary. Otherwise we cannot place the cpu field
2113          * at the 32 bit boundary.
2114          */
2115         BUILD_BUG_ON(SECTIONS_WIDTH + NODES_WIDTH + ZONES_WIDTH +
2116                 ilog2(roundup_pow_of_two(NR_CPUS)) > 32);
2117
2118         BUILD_BUG_ON(NR_CPUS > 4096);
2119
2120         kern_base = (prom_boot_mapping_phys_low >> ILOG2_4MB) << ILOG2_4MB;
2121         kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
2122
2123         /* Invalidate both kernel TSBs.  */
2124         memset(swapper_tsb, 0x40, sizeof(swapper_tsb));
2125 #ifndef CONFIG_DEBUG_PAGEALLOC
2126         memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb));
2127 #endif
2128
2129         /* TTE.cv bit on sparc v9 occupies the same position as TTE.mcde
2130          * bit on M7 processor. This is a conflicting usage of the same
2131          * bit. Enabling TTE.cv on M7 would turn on Memory Corruption
2132          * Detection error on all pages and this will lead to problems
2133          * later. Kernel does not run with MCD enabled and hence rest
2134          * of the required steps to fully configure memory corruption
2135          * detection are not taken. We need to ensure TTE.mcde is not
2136          * set on M7 processor. Compute the value of cacheability
2137          * flag for use later taking this into consideration.
2138          */
2139         switch (sun4v_chip_type) {
2140         case SUN4V_CHIP_SPARC_M7:
2141                 page_cache4v_flag = _PAGE_CP_4V;
2142                 break;
2143         default:
2144                 page_cache4v_flag = _PAGE_CACHE_4V;
2145                 break;
2146         }
2147
2148         if (tlb_type == hypervisor)
2149                 sun4v_pgprot_init();
2150         else
2151                 sun4u_pgprot_init();
2152
2153         if (tlb_type == cheetah_plus ||
2154             tlb_type == hypervisor) {
2155                 tsb_phys_patch();
2156                 ktsb_phys_patch();
2157         }
2158
2159         if (tlb_type == hypervisor)
2160                 sun4v_patch_tlb_handlers();
2161
2162         /* Find available physical memory...
2163          *
2164          * Read it twice in order to work around a bug in openfirmware.
2165          * The call to grab this table itself can cause openfirmware to
2166          * allocate memory, which in turn can take away some space from
2167          * the list of available memory.  Reading it twice makes sure
2168          * we really do get the final value.
2169          */
2170         read_obp_translations();
2171         read_obp_memory("reg", &pall[0], &pall_ents);
2172         read_obp_memory("available", &pavail[0], &pavail_ents);
2173         read_obp_memory("available", &pavail[0], &pavail_ents);
2174
2175         phys_base = 0xffffffffffffffffUL;
2176         for (i = 0; i < pavail_ents; i++) {
2177                 phys_base = min(phys_base, pavail[i].phys_addr);
2178                 memblock_add(pavail[i].phys_addr, pavail[i].reg_size);
2179         }
2180
2181         memblock_reserve(kern_base, kern_size);
2182
2183         find_ramdisk(phys_base);
2184
2185         if (cmdline_memory_size)
2186                 reduce_memory(cmdline_memory_size);
2187
2188         memblock_allow_resize();
2189         memblock_dump_all();
2190
2191         set_bit(0, mmu_context_bmap);
2192
2193         shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE);
2194
2195         real_end = (unsigned long)_end;
2196         num_kernel_image_mappings = DIV_ROUND_UP(real_end - KERNBASE, 1 << ILOG2_4MB);
2197         printk("Kernel: Using %d locked TLB entries for main kernel image.\n",
2198                num_kernel_image_mappings);
2199
2200         /* Set kernel pgd to upper alias so physical page computations
2201          * work.
2202          */
2203         init_mm.pgd += ((shift) / (sizeof(pgd_t)));
2204         
2205         memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
2206
2207         inherit_prom_mappings();
2208         
2209         /* Ok, we can use our TLB miss and window trap handlers safely.  */
2210         setup_tba();
2211
2212         __flush_tlb_all();
2213
2214         prom_build_devicetree();
2215         of_populate_present_mask();
2216 #ifndef CONFIG_SMP
2217         of_fill_in_cpu_data();
2218 #endif
2219
2220         if (tlb_type == hypervisor) {
2221                 sun4v_mdesc_init();
2222                 mdesc_populate_present_mask(cpu_all_mask);
2223 #ifndef CONFIG_SMP
2224                 mdesc_fill_in_cpu_data(cpu_all_mask);
2225 #endif
2226                 mdesc_get_page_sizes(cpu_all_mask, &cpu_pgsz_mask);
2227
2228                 sun4v_linear_pte_xor_finalize();
2229
2230                 sun4v_ktsb_init();
2231                 sun4v_ktsb_register();
2232         } else {
2233                 unsigned long impl, ver;
2234
2235                 cpu_pgsz_mask = (HV_PGSZ_MASK_8K | HV_PGSZ_MASK_64K |
2236                                  HV_PGSZ_MASK_512K | HV_PGSZ_MASK_4MB);
2237
2238                 __asm__ __volatile__("rdpr %%ver, %0" : "=r" (ver));
2239                 impl = ((ver >> 32) & 0xffff);
2240                 if (impl == PANTHER_IMPL)
2241                         cpu_pgsz_mask |= (HV_PGSZ_MASK_32MB |
2242                                           HV_PGSZ_MASK_256MB);
2243
2244                 sun4u_linear_pte_xor_finalize();
2245         }
2246
2247         /* Flush the TLBs and the 4M TSB so that the updated linear
2248          * pte XOR settings are realized for all mappings.
2249          */
2250         __flush_tlb_all();
2251 #ifndef CONFIG_DEBUG_PAGEALLOC
2252         memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb));
2253 #endif
2254         __flush_tlb_all();
2255
2256         /* Setup bootmem... */
2257         last_valid_pfn = end_pfn = bootmem_init(phys_base);
2258
2259         /* Once the OF device tree and MDESC have been setup, we know
2260          * the list of possible cpus.  Therefore we can allocate the
2261          * IRQ stacks.
2262          */
2263         for_each_possible_cpu(i) {
2264                 node = cpu_to_node(i);
2265
2266                 softirq_stack[i] = __alloc_bootmem_node(NODE_DATA(node),
2267                                                         THREAD_SIZE,
2268                                                         THREAD_SIZE, 0);
2269                 hardirq_stack[i] = __alloc_bootmem_node(NODE_DATA(node),
2270                                                         THREAD_SIZE,
2271                                                         THREAD_SIZE, 0);
2272         }
2273
2274         kernel_physical_mapping_init();
2275
2276         {
2277                 unsigned long max_zone_pfns[MAX_NR_ZONES];
2278
2279                 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
2280
2281                 max_zone_pfns[ZONE_NORMAL] = end_pfn;
2282
2283                 free_area_init_nodes(max_zone_pfns);
2284         }
2285
2286         printk("Booting Linux...\n");
2287 }
2288
2289 int page_in_phys_avail(unsigned long paddr)
2290 {
2291         int i;
2292
2293         paddr &= PAGE_MASK;
2294
2295         for (i = 0; i < pavail_ents; i++) {
2296                 unsigned long start, end;
2297
2298                 start = pavail[i].phys_addr;
2299                 end = start + pavail[i].reg_size;
2300
2301                 if (paddr >= start && paddr < end)
2302                         return 1;
2303         }
2304         if (paddr >= kern_base && paddr < (kern_base + kern_size))
2305                 return 1;
2306 #ifdef CONFIG_BLK_DEV_INITRD
2307         if (paddr >= __pa(initrd_start) &&
2308             paddr < __pa(PAGE_ALIGN(initrd_end)))
2309                 return 1;
2310 #endif
2311
2312         return 0;
2313 }
2314
2315 static void __init register_page_bootmem_info(void)
2316 {
2317 #ifdef CONFIG_NEED_MULTIPLE_NODES
2318         int i;
2319
2320         for_each_online_node(i)
2321                 if (NODE_DATA(i)->node_spanned_pages)
2322                         register_page_bootmem_info_node(NODE_DATA(i));
2323 #endif
2324 }
2325 void __init mem_init(void)
2326 {
2327         high_memory = __va(last_valid_pfn << PAGE_SHIFT);
2328
2329         register_page_bootmem_info();
2330         free_all_bootmem();
2331
2332         /*
2333          * Set up the zero page, mark it reserved, so that page count
2334          * is not manipulated when freeing the page from user ptes.
2335          */
2336         mem_map_zero = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0);
2337         if (mem_map_zero == NULL) {
2338                 prom_printf("paging_init: Cannot alloc zero page.\n");
2339                 prom_halt();
2340         }
2341         mark_page_reserved(mem_map_zero);
2342
2343         mem_init_print_info(NULL);
2344
2345         if (tlb_type == cheetah || tlb_type == cheetah_plus)
2346                 cheetah_ecache_flush_init();
2347 }
2348
2349 void free_initmem(void)
2350 {
2351         unsigned long addr, initend;
2352         int do_free = 1;
2353
2354         /* If the physical memory maps were trimmed by kernel command
2355          * line options, don't even try freeing this initmem stuff up.
2356          * The kernel image could have been in the trimmed out region
2357          * and if so the freeing below will free invalid page structs.
2358          */
2359         if (cmdline_memory_size)
2360                 do_free = 0;
2361
2362         /*
2363          * The init section is aligned to 8k in vmlinux.lds. Page align for >8k pagesizes.
2364          */
2365         addr = PAGE_ALIGN((unsigned long)(__init_begin));
2366         initend = (unsigned long)(__init_end) & PAGE_MASK;
2367         for (; addr < initend; addr += PAGE_SIZE) {
2368                 unsigned long page;
2369
2370                 page = (addr +
2371                         ((unsigned long) __va(kern_base)) -
2372                         ((unsigned long) KERNBASE));
2373                 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
2374
2375                 if (do_free)
2376                         free_reserved_page(virt_to_page(page));
2377         }
2378 }
2379
2380 #ifdef CONFIG_BLK_DEV_INITRD
2381 void free_initrd_mem(unsigned long start, unsigned long end)
2382 {
2383         free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
2384                            "initrd");
2385 }
2386 #endif
2387
2388 pgprot_t PAGE_KERNEL __read_mostly;
2389 EXPORT_SYMBOL(PAGE_KERNEL);
2390
2391 pgprot_t PAGE_KERNEL_LOCKED __read_mostly;
2392 pgprot_t PAGE_COPY __read_mostly;
2393
2394 pgprot_t PAGE_SHARED __read_mostly;
2395 EXPORT_SYMBOL(PAGE_SHARED);
2396
2397 unsigned long pg_iobits __read_mostly;
2398
2399 unsigned long _PAGE_IE __read_mostly;
2400 EXPORT_SYMBOL(_PAGE_IE);
2401
2402 unsigned long _PAGE_E __read_mostly;
2403 EXPORT_SYMBOL(_PAGE_E);
2404
2405 unsigned long _PAGE_CACHE __read_mostly;
2406 EXPORT_SYMBOL(_PAGE_CACHE);
2407
2408 #ifdef CONFIG_SPARSEMEM_VMEMMAP
2409 int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend,
2410                                int node)
2411 {
2412         unsigned long pte_base;
2413
2414         pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4U |
2415                     _PAGE_CP_4U | _PAGE_CV_4U |
2416                     _PAGE_P_4U | _PAGE_W_4U);
2417         if (tlb_type == hypervisor)
2418                 pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4V |
2419                             page_cache4v_flag | _PAGE_P_4V | _PAGE_W_4V);
2420
2421         pte_base |= _PAGE_PMD_HUGE;
2422
2423         vstart = vstart & PMD_MASK;
2424         vend = ALIGN(vend, PMD_SIZE);
2425         for (; vstart < vend; vstart += PMD_SIZE) {
2426                 pgd_t *pgd = pgd_offset_k(vstart);
2427                 unsigned long pte;
2428                 pud_t *pud;
2429                 pmd_t *pmd;
2430
2431                 if (pgd_none(*pgd)) {
2432                         pud_t *new = vmemmap_alloc_block(PAGE_SIZE, node);
2433
2434                         if (!new)
2435                                 return -ENOMEM;
2436                         pgd_populate(&init_mm, pgd, new);
2437                 }
2438
2439                 pud = pud_offset(pgd, vstart);
2440                 if (pud_none(*pud)) {
2441                         pmd_t *new = vmemmap_alloc_block(PAGE_SIZE, node);
2442
2443                         if (!new)
2444                                 return -ENOMEM;
2445                         pud_populate(&init_mm, pud, new);
2446                 }
2447
2448                 pmd = pmd_offset(pud, vstart);
2449
2450                 pte = pmd_val(*pmd);
2451                 if (!(pte & _PAGE_VALID)) {
2452                         void *block = vmemmap_alloc_block(PMD_SIZE, node);
2453
2454                         if (!block)
2455                                 return -ENOMEM;
2456
2457                         pmd_val(*pmd) = pte_base | __pa(block);
2458                 }
2459         }
2460
2461         return 0;
2462 }
2463
2464 void vmemmap_free(unsigned long start, unsigned long end)
2465 {
2466 }
2467 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
2468
2469 static void prot_init_common(unsigned long page_none,
2470                              unsigned long page_shared,
2471                              unsigned long page_copy,
2472                              unsigned long page_readonly,
2473                              unsigned long page_exec_bit)
2474 {
2475         PAGE_COPY = __pgprot(page_copy);
2476         PAGE_SHARED = __pgprot(page_shared);
2477
2478         protection_map[0x0] = __pgprot(page_none);
2479         protection_map[0x1] = __pgprot(page_readonly & ~page_exec_bit);
2480         protection_map[0x2] = __pgprot(page_copy & ~page_exec_bit);
2481         protection_map[0x3] = __pgprot(page_copy & ~page_exec_bit);
2482         protection_map[0x4] = __pgprot(page_readonly);
2483         protection_map[0x5] = __pgprot(page_readonly);
2484         protection_map[0x6] = __pgprot(page_copy);
2485         protection_map[0x7] = __pgprot(page_copy);
2486         protection_map[0x8] = __pgprot(page_none);
2487         protection_map[0x9] = __pgprot(page_readonly & ~page_exec_bit);
2488         protection_map[0xa] = __pgprot(page_shared & ~page_exec_bit);
2489         protection_map[0xb] = __pgprot(page_shared & ~page_exec_bit);
2490         protection_map[0xc] = __pgprot(page_readonly);
2491         protection_map[0xd] = __pgprot(page_readonly);
2492         protection_map[0xe] = __pgprot(page_shared);
2493         protection_map[0xf] = __pgprot(page_shared);
2494 }
2495
2496 static void __init sun4u_pgprot_init(void)
2497 {
2498         unsigned long page_none, page_shared, page_copy, page_readonly;
2499         unsigned long page_exec_bit;
2500         int i;
2501
2502         PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
2503                                 _PAGE_CACHE_4U | _PAGE_P_4U |
2504                                 __ACCESS_BITS_4U | __DIRTY_BITS_4U |
2505                                 _PAGE_EXEC_4U);
2506         PAGE_KERNEL_LOCKED = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
2507                                        _PAGE_CACHE_4U | _PAGE_P_4U |
2508                                        __ACCESS_BITS_4U | __DIRTY_BITS_4U |
2509                                        _PAGE_EXEC_4U | _PAGE_L_4U);
2510
2511         _PAGE_IE = _PAGE_IE_4U;
2512         _PAGE_E = _PAGE_E_4U;
2513         _PAGE_CACHE = _PAGE_CACHE_4U;
2514
2515         pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4U | __DIRTY_BITS_4U |
2516                      __ACCESS_BITS_4U | _PAGE_E_4U);
2517
2518 #ifdef CONFIG_DEBUG_PAGEALLOC
2519         kern_linear_pte_xor[0] = _PAGE_VALID ^ PAGE_OFFSET;
2520 #else
2521         kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4U) ^
2522                 PAGE_OFFSET;
2523 #endif
2524         kern_linear_pte_xor[0] |= (_PAGE_CP_4U | _PAGE_CV_4U |
2525                                    _PAGE_P_4U | _PAGE_W_4U);
2526
2527         for (i = 1; i < 4; i++)
2528                 kern_linear_pte_xor[i] = kern_linear_pte_xor[0];
2529
2530         _PAGE_ALL_SZ_BITS =  (_PAGE_SZ4MB_4U | _PAGE_SZ512K_4U |
2531                               _PAGE_SZ64K_4U | _PAGE_SZ8K_4U |
2532                               _PAGE_SZ32MB_4U | _PAGE_SZ256MB_4U);
2533
2534
2535         page_none = _PAGE_PRESENT_4U | _PAGE_ACCESSED_4U | _PAGE_CACHE_4U;
2536         page_shared = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
2537                        __ACCESS_BITS_4U | _PAGE_WRITE_4U | _PAGE_EXEC_4U);
2538         page_copy   = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
2539                        __ACCESS_BITS_4U | _PAGE_EXEC_4U);
2540         page_readonly   = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
2541                            __ACCESS_BITS_4U | _PAGE_EXEC_4U);
2542
2543         page_exec_bit = _PAGE_EXEC_4U;
2544
2545         prot_init_common(page_none, page_shared, page_copy, page_readonly,
2546                          page_exec_bit);
2547 }
2548
2549 static void __init sun4v_pgprot_init(void)
2550 {
2551         unsigned long page_none, page_shared, page_copy, page_readonly;
2552         unsigned long page_exec_bit;
2553         int i;
2554
2555         PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4V | _PAGE_VALID |
2556                                 page_cache4v_flag | _PAGE_P_4V |
2557                                 __ACCESS_BITS_4V | __DIRTY_BITS_4V |
2558                                 _PAGE_EXEC_4V);
2559         PAGE_KERNEL_LOCKED = PAGE_KERNEL;
2560
2561         _PAGE_IE = _PAGE_IE_4V;
2562         _PAGE_E = _PAGE_E_4V;
2563         _PAGE_CACHE = page_cache4v_flag;
2564
2565 #ifdef CONFIG_DEBUG_PAGEALLOC
2566         kern_linear_pte_xor[0] = _PAGE_VALID ^ PAGE_OFFSET;
2567 #else
2568         kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^
2569                 PAGE_OFFSET;
2570 #endif
2571         kern_linear_pte_xor[0] |= (page_cache4v_flag | _PAGE_P_4V |
2572                                    _PAGE_W_4V);
2573
2574         for (i = 1; i < 4; i++)
2575                 kern_linear_pte_xor[i] = kern_linear_pte_xor[0];
2576
2577         pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4V | __DIRTY_BITS_4V |
2578                      __ACCESS_BITS_4V | _PAGE_E_4V);
2579
2580         _PAGE_ALL_SZ_BITS = (_PAGE_SZ16GB_4V | _PAGE_SZ2GB_4V |
2581                              _PAGE_SZ256MB_4V | _PAGE_SZ32MB_4V |
2582                              _PAGE_SZ4MB_4V | _PAGE_SZ512K_4V |
2583                              _PAGE_SZ64K_4V | _PAGE_SZ8K_4V);
2584
2585         page_none = _PAGE_PRESENT_4V | _PAGE_ACCESSED_4V | page_cache4v_flag;
2586         page_shared = (_PAGE_VALID | _PAGE_PRESENT_4V | page_cache4v_flag |
2587                        __ACCESS_BITS_4V | _PAGE_WRITE_4V | _PAGE_EXEC_4V);
2588         page_copy   = (_PAGE_VALID | _PAGE_PRESENT_4V | page_cache4v_flag |
2589                        __ACCESS_BITS_4V | _PAGE_EXEC_4V);
2590         page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4V | page_cache4v_flag |
2591                          __ACCESS_BITS_4V | _PAGE_EXEC_4V);
2592
2593         page_exec_bit = _PAGE_EXEC_4V;
2594
2595         prot_init_common(page_none, page_shared, page_copy, page_readonly,
2596                          page_exec_bit);
2597 }
2598
2599 unsigned long pte_sz_bits(unsigned long sz)
2600 {
2601         if (tlb_type == hypervisor) {
2602                 switch (sz) {
2603                 case 8 * 1024:
2604                 default:
2605                         return _PAGE_SZ8K_4V;
2606                 case 64 * 1024:
2607                         return _PAGE_SZ64K_4V;
2608                 case 512 * 1024:
2609                         return _PAGE_SZ512K_4V;
2610                 case 4 * 1024 * 1024:
2611                         return _PAGE_SZ4MB_4V;
2612                 }
2613         } else {
2614                 switch (sz) {
2615                 case 8 * 1024:
2616                 default:
2617                         return _PAGE_SZ8K_4U;
2618                 case 64 * 1024:
2619                         return _PAGE_SZ64K_4U;
2620                 case 512 * 1024:
2621                         return _PAGE_SZ512K_4U;
2622                 case 4 * 1024 * 1024:
2623                         return _PAGE_SZ4MB_4U;
2624                 }
2625         }
2626 }
2627
2628 pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space, unsigned long page_size)
2629 {
2630         pte_t pte;
2631
2632         pte_val(pte)  = page | pgprot_val(pgprot_noncached(prot));
2633         pte_val(pte) |= (((unsigned long)space) << 32);
2634         pte_val(pte) |= pte_sz_bits(page_size);
2635
2636         return pte;
2637 }
2638
2639 static unsigned long kern_large_tte(unsigned long paddr)
2640 {
2641         unsigned long val;
2642
2643         val = (_PAGE_VALID | _PAGE_SZ4MB_4U |
2644                _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_P_4U |
2645                _PAGE_EXEC_4U | _PAGE_L_4U | _PAGE_W_4U);
2646         if (tlb_type == hypervisor)
2647                 val = (_PAGE_VALID | _PAGE_SZ4MB_4V |
2648                        page_cache4v_flag | _PAGE_P_4V |
2649                        _PAGE_EXEC_4V | _PAGE_W_4V);
2650
2651         return val | paddr;
2652 }
2653
2654 /* If not locked, zap it. */
2655 void __flush_tlb_all(void)
2656 {
2657         unsigned long pstate;
2658         int i;
2659
2660         __asm__ __volatile__("flushw\n\t"
2661                              "rdpr      %%pstate, %0\n\t"
2662                              "wrpr      %0, %1, %%pstate"
2663                              : "=r" (pstate)
2664                              : "i" (PSTATE_IE));
2665         if (tlb_type == hypervisor) {
2666                 sun4v_mmu_demap_all();
2667         } else if (tlb_type == spitfire) {
2668                 for (i = 0; i < 64; i++) {
2669                         /* Spitfire Errata #32 workaround */
2670                         /* NOTE: Always runs on spitfire, so no
2671                          *       cheetah+ page size encodings.
2672                          */
2673                         __asm__ __volatile__("stxa      %0, [%1] %2\n\t"
2674                                              "flush     %%g6"
2675                                              : /* No outputs */
2676                                              : "r" (0),
2677                                              "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
2678
2679                         if (!(spitfire_get_dtlb_data(i) & _PAGE_L_4U)) {
2680                                 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
2681                                                      "membar #Sync"
2682                                                      : /* no outputs */
2683                                                      : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
2684                                 spitfire_put_dtlb_data(i, 0x0UL);
2685                         }
2686
2687                         /* Spitfire Errata #32 workaround */
2688                         /* NOTE: Always runs on spitfire, so no
2689                          *       cheetah+ page size encodings.
2690                          */
2691                         __asm__ __volatile__("stxa      %0, [%1] %2\n\t"
2692                                              "flush     %%g6"
2693                                              : /* No outputs */
2694                                              : "r" (0),
2695                                              "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
2696
2697                         if (!(spitfire_get_itlb_data(i) & _PAGE_L_4U)) {
2698                                 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
2699                                                      "membar #Sync"
2700                                                      : /* no outputs */
2701                                                      : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
2702                                 spitfire_put_itlb_data(i, 0x0UL);
2703                         }
2704                 }
2705         } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
2706                 cheetah_flush_dtlb_all();
2707                 cheetah_flush_itlb_all();
2708         }
2709         __asm__ __volatile__("wrpr      %0, 0, %%pstate"
2710                              : : "r" (pstate));
2711 }
2712
2713 pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
2714                             unsigned long address)
2715 {
2716         struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK |
2717                                        __GFP_REPEAT | __GFP_ZERO);
2718         pte_t *pte = NULL;
2719
2720         if (page)
2721                 pte = (pte_t *) page_address(page);
2722
2723         return pte;
2724 }
2725
2726 pgtable_t pte_alloc_one(struct mm_struct *mm,
2727                         unsigned long address)
2728 {
2729         struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK |
2730                                        __GFP_REPEAT | __GFP_ZERO);
2731         if (!page)
2732                 return NULL;
2733         if (!pgtable_page_ctor(page)) {
2734                 free_hot_cold_page(page, 0);
2735                 return NULL;
2736         }
2737         return (pte_t *) page_address(page);
2738 }
2739
2740 void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
2741 {
2742         free_page((unsigned long)pte);
2743 }
2744
2745 static void __pte_free(pgtable_t pte)
2746 {
2747         struct page *page = virt_to_page(pte);
2748
2749         pgtable_page_dtor(page);
2750         __free_page(page);
2751 }
2752
2753 void pte_free(struct mm_struct *mm, pgtable_t pte)
2754 {
2755         __pte_free(pte);
2756 }
2757
2758 void pgtable_free(void *table, bool is_page)
2759 {
2760         if (is_page)
2761                 __pte_free(table);
2762         else
2763                 kmem_cache_free(pgtable_cache, table);
2764 }
2765
2766 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
2767 void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
2768                           pmd_t *pmd)
2769 {
2770         unsigned long pte, flags;
2771         struct mm_struct *mm;
2772         pmd_t entry = *pmd;
2773
2774         if (!pmd_large(entry) || !pmd_young(entry))
2775                 return;
2776
2777         pte = pmd_val(entry);
2778
2779         /* Don't insert a non-valid PMD into the TSB, we'll deadlock.  */
2780         if (!(pte & _PAGE_VALID))
2781                 return;
2782
2783         /* We are fabricating 8MB pages using 4MB real hw pages.  */
2784         pte |= (addr & (1UL << REAL_HPAGE_SHIFT));
2785
2786         mm = vma->vm_mm;
2787
2788         spin_lock_irqsave(&mm->context.lock, flags);
2789
2790         if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL)
2791                 __update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT,
2792                                         addr, pte);
2793
2794         spin_unlock_irqrestore(&mm->context.lock, flags);
2795 }
2796 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
2797
2798 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
2799 static void context_reload(void *__data)
2800 {
2801         struct mm_struct *mm = __data;
2802
2803         if (mm == current->mm)
2804                 load_secondary_context(mm);
2805 }
2806
2807 void hugetlb_setup(struct pt_regs *regs)
2808 {
2809         struct mm_struct *mm = current->mm;
2810         struct tsb_config *tp;
2811
2812         if (faulthandler_disabled() || !mm) {
2813                 const struct exception_table_entry *entry;
2814
2815                 entry = search_exception_tables(regs->tpc);
2816                 if (entry) {
2817                         regs->tpc = entry->fixup;
2818                         regs->tnpc = regs->tpc + 4;
2819                         return;
2820                 }
2821                 pr_alert("Unexpected HugeTLB setup in atomic context.\n");
2822                 die_if_kernel("HugeTSB in atomic", regs);
2823         }
2824
2825         tp = &mm->context.tsb_block[MM_TSB_HUGE];
2826         if (likely(tp->tsb == NULL))
2827                 tsb_grow(mm, MM_TSB_HUGE, 0);
2828
2829         tsb_context_switch(mm);
2830         smp_tsb_sync(mm);
2831
2832         /* On UltraSPARC-III+ and later, configure the second half of
2833          * the Data-TLB for huge pages.
2834          */
2835         if (tlb_type == cheetah_plus) {
2836                 unsigned long ctx;
2837
2838                 spin_lock(&ctx_alloc_lock);
2839                 ctx = mm->context.sparc64_ctx_val;
2840                 ctx &= ~CTX_PGSZ_MASK;
2841                 ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT;
2842                 ctx |= CTX_PGSZ_HUGE << CTX_PGSZ1_SHIFT;
2843
2844                 if (ctx != mm->context.sparc64_ctx_val) {
2845                         /* When changing the page size fields, we
2846                          * must perform a context flush so that no
2847                          * stale entries match.  This flush must
2848                          * occur with the original context register
2849                          * settings.
2850                          */
2851                         do_flush_tlb_mm(mm);
2852
2853                         /* Reload the context register of all processors
2854                          * also executing in this address space.
2855                          */
2856                         mm->context.sparc64_ctx_val = ctx;
2857                         on_each_cpu(context_reload, mm, 0);
2858                 }
2859                 spin_unlock(&ctx_alloc_lock);
2860         }
2861 }
2862 #endif
2863
2864 static struct resource code_resource = {
2865         .name   = "Kernel code",
2866         .flags  = IORESOURCE_BUSY | IORESOURCE_MEM
2867 };
2868
2869 static struct resource data_resource = {
2870         .name   = "Kernel data",
2871         .flags  = IORESOURCE_BUSY | IORESOURCE_MEM
2872 };
2873
2874 static struct resource bss_resource = {
2875         .name   = "Kernel bss",
2876         .flags  = IORESOURCE_BUSY | IORESOURCE_MEM
2877 };
2878
2879 static inline resource_size_t compute_kern_paddr(void *addr)
2880 {
2881         return (resource_size_t) (addr - KERNBASE + kern_base);
2882 }
2883
2884 static void __init kernel_lds_init(void)
2885 {
2886         code_resource.start = compute_kern_paddr(_text);
2887         code_resource.end   = compute_kern_paddr(_etext - 1);
2888         data_resource.start = compute_kern_paddr(_etext);
2889         data_resource.end   = compute_kern_paddr(_edata - 1);
2890         bss_resource.start  = compute_kern_paddr(__bss_start);
2891         bss_resource.end    = compute_kern_paddr(_end - 1);
2892 }
2893
2894 static int __init report_memory(void)
2895 {
2896         int i;
2897         struct resource *res;
2898
2899         kernel_lds_init();
2900
2901         for (i = 0; i < pavail_ents; i++) {
2902                 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
2903
2904                 if (!res) {
2905                         pr_warn("Failed to allocate source.\n");
2906                         break;
2907                 }
2908
2909                 res->name = "System RAM";
2910                 res->start = pavail[i].phys_addr;
2911                 res->end = pavail[i].phys_addr + pavail[i].reg_size - 1;
2912                 res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
2913
2914                 if (insert_resource(&iomem_resource, res) < 0) {
2915                         pr_warn("Resource insertion failed.\n");
2916                         break;
2917                 }
2918
2919                 insert_resource(res, &code_resource);
2920                 insert_resource(res, &data_resource);
2921                 insert_resource(res, &bss_resource);
2922         }
2923
2924         return 0;
2925 }
2926 arch_initcall(report_memory);
2927
2928 #ifdef CONFIG_SMP
2929 #define do_flush_tlb_kernel_range       smp_flush_tlb_kernel_range
2930 #else
2931 #define do_flush_tlb_kernel_range       __flush_tlb_kernel_range
2932 #endif
2933
2934 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
2935 {
2936         if (start < HI_OBP_ADDRESS && end > LOW_OBP_ADDRESS) {
2937                 if (start < LOW_OBP_ADDRESS) {
2938                         flush_tsb_kernel_range(start, LOW_OBP_ADDRESS);
2939                         do_flush_tlb_kernel_range(start, LOW_OBP_ADDRESS);
2940                 }
2941                 if (end > HI_OBP_ADDRESS) {
2942                         flush_tsb_kernel_range(HI_OBP_ADDRESS, end);
2943                         do_flush_tlb_kernel_range(HI_OBP_ADDRESS, end);
2944                 }
2945         } else {
2946                 flush_tsb_kernel_range(start, end);
2947                 do_flush_tlb_kernel_range(start, end);
2948         }
2949 }