]> git.karo-electronics.de Git - karo-tx-linux.git/blob - arch/sparc/mm/init_64.c
sparc64: add per-cpu mm of secondary contexts
[karo-tx-linux.git] / arch / sparc / mm / init_64.c
1 /*
2  *  arch/sparc64/mm/init.c
3  *
4  *  Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu)
5  *  Copyright (C) 1997-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
6  */
7  
8 #include <linux/extable.h>
9 #include <linux/kernel.h>
10 #include <linux/sched.h>
11 #include <linux/string.h>
12 #include <linux/init.h>
13 #include <linux/bootmem.h>
14 #include <linux/mm.h>
15 #include <linux/hugetlb.h>
16 #include <linux/initrd.h>
17 #include <linux/swap.h>
18 #include <linux/pagemap.h>
19 #include <linux/poison.h>
20 #include <linux/fs.h>
21 #include <linux/seq_file.h>
22 #include <linux/kprobes.h>
23 #include <linux/cache.h>
24 #include <linux/sort.h>
25 #include <linux/ioport.h>
26 #include <linux/percpu.h>
27 #include <linux/memblock.h>
28 #include <linux/mmzone.h>
29 #include <linux/gfp.h>
30
31 #include <asm/head.h>
32 #include <asm/page.h>
33 #include <asm/pgalloc.h>
34 #include <asm/pgtable.h>
35 #include <asm/oplib.h>
36 #include <asm/iommu.h>
37 #include <asm/io.h>
38 #include <linux/uaccess.h>
39 #include <asm/mmu_context.h>
40 #include <asm/tlbflush.h>
41 #include <asm/dma.h>
42 #include <asm/starfire.h>
43 #include <asm/tlb.h>
44 #include <asm/spitfire.h>
45 #include <asm/sections.h>
46 #include <asm/tsb.h>
47 #include <asm/hypervisor.h>
48 #include <asm/prom.h>
49 #include <asm/mdesc.h>
50 #include <asm/cpudata.h>
51 #include <asm/setup.h>
52 #include <asm/irq.h>
53
54 #include "init_64.h"
55
56 unsigned long kern_linear_pte_xor[4] __read_mostly;
57 static unsigned long page_cache4v_flag;
58
59 /* A bitmap, two bits for every 256MB of physical memory.  These two
60  * bits determine what page size we use for kernel linear
61  * translations.  They form an index into kern_linear_pte_xor[].  The
62  * value in the indexed slot is XOR'd with the TLB miss virtual
63  * address to form the resulting TTE.  The mapping is:
64  *
65  *      0       ==>     4MB
66  *      1       ==>     256MB
67  *      2       ==>     2GB
68  *      3       ==>     16GB
69  *
70  * All sun4v chips support 256MB pages.  Only SPARC-T4 and later
71  * support 2GB pages, and hopefully future cpus will support the 16GB
72  * pages as well.  For slots 2 and 3, we encode a 256MB TTE xor there
73  * if these larger page sizes are not supported by the cpu.
74  *
75  * It would be nice to determine this from the machine description
76  * 'cpu' properties, but we need to have this table setup before the
77  * MDESC is initialized.
78  */
79
80 #ifndef CONFIG_DEBUG_PAGEALLOC
81 /* A special kernel TSB for 4MB, 256MB, 2GB and 16GB linear mappings.
82  * Space is allocated for this right after the trap table in
83  * arch/sparc64/kernel/head.S
84  */
85 extern struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES];
86 #endif
87 extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
88
89 static unsigned long cpu_pgsz_mask;
90
91 #define MAX_BANKS       1024
92
93 static struct linux_prom64_registers pavail[MAX_BANKS];
94 static int pavail_ents;
95
96 u64 numa_latency[MAX_NUMNODES][MAX_NUMNODES];
97
98 static int cmp_p64(const void *a, const void *b)
99 {
100         const struct linux_prom64_registers *x = a, *y = b;
101
102         if (x->phys_addr > y->phys_addr)
103                 return 1;
104         if (x->phys_addr < y->phys_addr)
105                 return -1;
106         return 0;
107 }
108
109 static void __init read_obp_memory(const char *property,
110                                    struct linux_prom64_registers *regs,
111                                    int *num_ents)
112 {
113         phandle node = prom_finddevice("/memory");
114         int prop_size = prom_getproplen(node, property);
115         int ents, ret, i;
116
117         ents = prop_size / sizeof(struct linux_prom64_registers);
118         if (ents > MAX_BANKS) {
119                 prom_printf("The machine has more %s property entries than "
120                             "this kernel can support (%d).\n",
121                             property, MAX_BANKS);
122                 prom_halt();
123         }
124
125         ret = prom_getproperty(node, property, (char *) regs, prop_size);
126         if (ret == -1) {
127                 prom_printf("Couldn't get %s property from /memory.\n",
128                                 property);
129                 prom_halt();
130         }
131
132         /* Sanitize what we got from the firmware, by page aligning
133          * everything.
134          */
135         for (i = 0; i < ents; i++) {
136                 unsigned long base, size;
137
138                 base = regs[i].phys_addr;
139                 size = regs[i].reg_size;
140
141                 size &= PAGE_MASK;
142                 if (base & ~PAGE_MASK) {
143                         unsigned long new_base = PAGE_ALIGN(base);
144
145                         size -= new_base - base;
146                         if ((long) size < 0L)
147                                 size = 0UL;
148                         base = new_base;
149                 }
150                 if (size == 0UL) {
151                         /* If it is empty, simply get rid of it.
152                          * This simplifies the logic of the other
153                          * functions that process these arrays.
154                          */
155                         memmove(&regs[i], &regs[i + 1],
156                                 (ents - i - 1) * sizeof(regs[0]));
157                         i--;
158                         ents--;
159                         continue;
160                 }
161                 regs[i].phys_addr = base;
162                 regs[i].reg_size = size;
163         }
164
165         *num_ents = ents;
166
167         sort(regs, ents, sizeof(struct linux_prom64_registers),
168              cmp_p64, NULL);
169 }
170
171 /* Kernel physical address base and size in bytes.  */
172 unsigned long kern_base __read_mostly;
173 unsigned long kern_size __read_mostly;
174
175 /* Initial ramdisk setup */
176 extern unsigned long sparc_ramdisk_image64;
177 extern unsigned int sparc_ramdisk_image;
178 extern unsigned int sparc_ramdisk_size;
179
180 struct page *mem_map_zero __read_mostly;
181 EXPORT_SYMBOL(mem_map_zero);
182
183 unsigned int sparc64_highest_unlocked_tlb_ent __read_mostly;
184
185 unsigned long sparc64_kern_pri_context __read_mostly;
186 unsigned long sparc64_kern_pri_nuc_bits __read_mostly;
187 unsigned long sparc64_kern_sec_context __read_mostly;
188
189 int num_kernel_image_mappings;
190
191 #ifdef CONFIG_DEBUG_DCFLUSH
192 atomic_t dcpage_flushes = ATOMIC_INIT(0);
193 #ifdef CONFIG_SMP
194 atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
195 #endif
196 #endif
197
198 inline void flush_dcache_page_impl(struct page *page)
199 {
200         BUG_ON(tlb_type == hypervisor);
201 #ifdef CONFIG_DEBUG_DCFLUSH
202         atomic_inc(&dcpage_flushes);
203 #endif
204
205 #ifdef DCACHE_ALIASING_POSSIBLE
206         __flush_dcache_page(page_address(page),
207                             ((tlb_type == spitfire) &&
208                              page_mapping(page) != NULL));
209 #else
210         if (page_mapping(page) != NULL &&
211             tlb_type == spitfire)
212                 __flush_icache_page(__pa(page_address(page)));
213 #endif
214 }
215
216 #define PG_dcache_dirty         PG_arch_1
217 #define PG_dcache_cpu_shift     32UL
218 #define PG_dcache_cpu_mask      \
219         ((1UL<<ilog2(roundup_pow_of_two(NR_CPUS)))-1UL)
220
221 #define dcache_dirty_cpu(page) \
222         (((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask)
223
224 static inline void set_dcache_dirty(struct page *page, int this_cpu)
225 {
226         unsigned long mask = this_cpu;
227         unsigned long non_cpu_bits;
228
229         non_cpu_bits = ~(PG_dcache_cpu_mask << PG_dcache_cpu_shift);
230         mask = (mask << PG_dcache_cpu_shift) | (1UL << PG_dcache_dirty);
231
232         __asm__ __volatile__("1:\n\t"
233                              "ldx       [%2], %%g7\n\t"
234                              "and       %%g7, %1, %%g1\n\t"
235                              "or        %%g1, %0, %%g1\n\t"
236                              "casx      [%2], %%g7, %%g1\n\t"
237                              "cmp       %%g7, %%g1\n\t"
238                              "bne,pn    %%xcc, 1b\n\t"
239                              " nop"
240                              : /* no outputs */
241                              : "r" (mask), "r" (non_cpu_bits), "r" (&page->flags)
242                              : "g1", "g7");
243 }
244
245 static inline void clear_dcache_dirty_cpu(struct page *page, unsigned long cpu)
246 {
247         unsigned long mask = (1UL << PG_dcache_dirty);
248
249         __asm__ __volatile__("! test_and_clear_dcache_dirty\n"
250                              "1:\n\t"
251                              "ldx       [%2], %%g7\n\t"
252                              "srlx      %%g7, %4, %%g1\n\t"
253                              "and       %%g1, %3, %%g1\n\t"
254                              "cmp       %%g1, %0\n\t"
255                              "bne,pn    %%icc, 2f\n\t"
256                              " andn     %%g7, %1, %%g1\n\t"
257                              "casx      [%2], %%g7, %%g1\n\t"
258                              "cmp       %%g7, %%g1\n\t"
259                              "bne,pn    %%xcc, 1b\n\t"
260                              " nop\n"
261                              "2:"
262                              : /* no outputs */
263                              : "r" (cpu), "r" (mask), "r" (&page->flags),
264                                "i" (PG_dcache_cpu_mask),
265                                "i" (PG_dcache_cpu_shift)
266                              : "g1", "g7");
267 }
268
269 static inline void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long pte)
270 {
271         unsigned long tsb_addr = (unsigned long) ent;
272
273         if (tlb_type == cheetah_plus || tlb_type == hypervisor)
274                 tsb_addr = __pa(tsb_addr);
275
276         __tsb_insert(tsb_addr, tag, pte);
277 }
278
279 unsigned long _PAGE_ALL_SZ_BITS __read_mostly;
280
281 static void flush_dcache(unsigned long pfn)
282 {
283         struct page *page;
284
285         page = pfn_to_page(pfn);
286         if (page) {
287                 unsigned long pg_flags;
288
289                 pg_flags = page->flags;
290                 if (pg_flags & (1UL << PG_dcache_dirty)) {
291                         int cpu = ((pg_flags >> PG_dcache_cpu_shift) &
292                                    PG_dcache_cpu_mask);
293                         int this_cpu = get_cpu();
294
295                         /* This is just to optimize away some function calls
296                          * in the SMP case.
297                          */
298                         if (cpu == this_cpu)
299                                 flush_dcache_page_impl(page);
300                         else
301                                 smp_flush_dcache_page_impl(page, cpu);
302
303                         clear_dcache_dirty_cpu(page, cpu);
304
305                         put_cpu();
306                 }
307         }
308 }
309
310 /* mm->context.lock must be held */
311 static void __update_mmu_tsb_insert(struct mm_struct *mm, unsigned long tsb_index,
312                                     unsigned long tsb_hash_shift, unsigned long address,
313                                     unsigned long tte)
314 {
315         struct tsb *tsb = mm->context.tsb_block[tsb_index].tsb;
316         unsigned long tag;
317
318         if (unlikely(!tsb))
319                 return;
320
321         tsb += ((address >> tsb_hash_shift) &
322                 (mm->context.tsb_block[tsb_index].tsb_nentries - 1UL));
323         tag = (address >> 22UL);
324         tsb_insert(tsb, tag, tte);
325 }
326
327 #ifdef CONFIG_HUGETLB_PAGE
328 static int __init setup_hugepagesz(char *string)
329 {
330         unsigned long long hugepage_size;
331         unsigned int hugepage_shift;
332         unsigned short hv_pgsz_idx;
333         unsigned int hv_pgsz_mask;
334         int rc = 0;
335
336         hugepage_size = memparse(string, &string);
337         hugepage_shift = ilog2(hugepage_size);
338
339         switch (hugepage_shift) {
340         case HPAGE_2GB_SHIFT:
341                 hv_pgsz_mask = HV_PGSZ_MASK_2GB;
342                 hv_pgsz_idx = HV_PGSZ_IDX_2GB;
343                 break;
344         case HPAGE_256MB_SHIFT:
345                 hv_pgsz_mask = HV_PGSZ_MASK_256MB;
346                 hv_pgsz_idx = HV_PGSZ_IDX_256MB;
347                 break;
348         case HPAGE_SHIFT:
349                 hv_pgsz_mask = HV_PGSZ_MASK_4MB;
350                 hv_pgsz_idx = HV_PGSZ_IDX_4MB;
351                 break;
352         case HPAGE_64K_SHIFT:
353                 hv_pgsz_mask = HV_PGSZ_MASK_64K;
354                 hv_pgsz_idx = HV_PGSZ_IDX_64K;
355                 break;
356         default:
357                 hv_pgsz_mask = 0;
358         }
359
360         if ((hv_pgsz_mask & cpu_pgsz_mask) == 0U) {
361                 hugetlb_bad_size();
362                 pr_err("hugepagesz=%llu not supported by MMU.\n",
363                         hugepage_size);
364                 goto out;
365         }
366
367         hugetlb_add_hstate(hugepage_shift - PAGE_SHIFT);
368         rc = 1;
369
370 out:
371         return rc;
372 }
373 __setup("hugepagesz=", setup_hugepagesz);
374 #endif  /* CONFIG_HUGETLB_PAGE */
375
376 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
377 {
378         struct mm_struct *mm;
379         unsigned long flags;
380         pte_t pte = *ptep;
381
382         if (tlb_type != hypervisor) {
383                 unsigned long pfn = pte_pfn(pte);
384
385                 if (pfn_valid(pfn))
386                         flush_dcache(pfn);
387         }
388
389         mm = vma->vm_mm;
390
391         /* Don't insert a non-valid PTE into the TSB, we'll deadlock.  */
392         if (!pte_accessible(mm, pte))
393                 return;
394
395         spin_lock_irqsave(&mm->context.lock, flags);
396
397 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
398         if ((mm->context.hugetlb_pte_count || mm->context.thp_pte_count) &&
399             is_hugetlb_pmd(__pmd(pte_val(pte)))) {
400                 /* We are fabricating 8MB pages using 4MB real hw pages.  */
401                 pte_val(pte) |= (address & (1UL << REAL_HPAGE_SHIFT));
402                 __update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT,
403                                         address, pte_val(pte));
404         } else
405 #endif
406                 __update_mmu_tsb_insert(mm, MM_TSB_BASE, PAGE_SHIFT,
407                                         address, pte_val(pte));
408
409         spin_unlock_irqrestore(&mm->context.lock, flags);
410 }
411
412 void flush_dcache_page(struct page *page)
413 {
414         struct address_space *mapping;
415         int this_cpu;
416
417         if (tlb_type == hypervisor)
418                 return;
419
420         /* Do not bother with the expensive D-cache flush if it
421          * is merely the zero page.  The 'bigcore' testcase in GDB
422          * causes this case to run millions of times.
423          */
424         if (page == ZERO_PAGE(0))
425                 return;
426
427         this_cpu = get_cpu();
428
429         mapping = page_mapping(page);
430         if (mapping && !mapping_mapped(mapping)) {
431                 int dirty = test_bit(PG_dcache_dirty, &page->flags);
432                 if (dirty) {
433                         int dirty_cpu = dcache_dirty_cpu(page);
434
435                         if (dirty_cpu == this_cpu)
436                                 goto out;
437                         smp_flush_dcache_page_impl(page, dirty_cpu);
438                 }
439                 set_dcache_dirty(page, this_cpu);
440         } else {
441                 /* We could delay the flush for the !page_mapping
442                  * case too.  But that case is for exec env/arg
443                  * pages and those are %99 certainly going to get
444                  * faulted into the tlb (and thus flushed) anyways.
445                  */
446                 flush_dcache_page_impl(page);
447         }
448
449 out:
450         put_cpu();
451 }
452 EXPORT_SYMBOL(flush_dcache_page);
453
454 void __kprobes flush_icache_range(unsigned long start, unsigned long end)
455 {
456         /* Cheetah and Hypervisor platform cpus have coherent I-cache. */
457         if (tlb_type == spitfire) {
458                 unsigned long kaddr;
459
460                 /* This code only runs on Spitfire cpus so this is
461                  * why we can assume _PAGE_PADDR_4U.
462                  */
463                 for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE) {
464                         unsigned long paddr, mask = _PAGE_PADDR_4U;
465
466                         if (kaddr >= PAGE_OFFSET)
467                                 paddr = kaddr & mask;
468                         else {
469                                 pgd_t *pgdp = pgd_offset_k(kaddr);
470                                 pud_t *pudp = pud_offset(pgdp, kaddr);
471                                 pmd_t *pmdp = pmd_offset(pudp, kaddr);
472                                 pte_t *ptep = pte_offset_kernel(pmdp, kaddr);
473
474                                 paddr = pte_val(*ptep) & mask;
475                         }
476                         __flush_icache_page(paddr);
477                 }
478         }
479 }
480 EXPORT_SYMBOL(flush_icache_range);
481
482 void mmu_info(struct seq_file *m)
483 {
484         static const char *pgsz_strings[] = {
485                 "8K", "64K", "512K", "4MB", "32MB",
486                 "256MB", "2GB", "16GB",
487         };
488         int i, printed;
489
490         if (tlb_type == cheetah)
491                 seq_printf(m, "MMU Type\t: Cheetah\n");
492         else if (tlb_type == cheetah_plus)
493                 seq_printf(m, "MMU Type\t: Cheetah+\n");
494         else if (tlb_type == spitfire)
495                 seq_printf(m, "MMU Type\t: Spitfire\n");
496         else if (tlb_type == hypervisor)
497                 seq_printf(m, "MMU Type\t: Hypervisor (sun4v)\n");
498         else
499                 seq_printf(m, "MMU Type\t: ???\n");
500
501         seq_printf(m, "MMU PGSZs\t: ");
502         printed = 0;
503         for (i = 0; i < ARRAY_SIZE(pgsz_strings); i++) {
504                 if (cpu_pgsz_mask & (1UL << i)) {
505                         seq_printf(m, "%s%s",
506                                    printed ? "," : "", pgsz_strings[i]);
507                         printed++;
508                 }
509         }
510         seq_putc(m, '\n');
511
512 #ifdef CONFIG_DEBUG_DCFLUSH
513         seq_printf(m, "DCPageFlushes\t: %d\n",
514                    atomic_read(&dcpage_flushes));
515 #ifdef CONFIG_SMP
516         seq_printf(m, "DCPageFlushesXC\t: %d\n",
517                    atomic_read(&dcpage_flushes_xcall));
518 #endif /* CONFIG_SMP */
519 #endif /* CONFIG_DEBUG_DCFLUSH */
520 }
521
522 struct linux_prom_translation prom_trans[512] __read_mostly;
523 unsigned int prom_trans_ents __read_mostly;
524
525 unsigned long kern_locked_tte_data;
526
527 /* The obp translations are saved based on 8k pagesize, since obp can
528  * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS ->
529  * HI_OBP_ADDRESS range are handled in ktlb.S.
530  */
531 static inline int in_obp_range(unsigned long vaddr)
532 {
533         return (vaddr >= LOW_OBP_ADDRESS &&
534                 vaddr < HI_OBP_ADDRESS);
535 }
536
537 static int cmp_ptrans(const void *a, const void *b)
538 {
539         const struct linux_prom_translation *x = a, *y = b;
540
541         if (x->virt > y->virt)
542                 return 1;
543         if (x->virt < y->virt)
544                 return -1;
545         return 0;
546 }
547
548 /* Read OBP translations property into 'prom_trans[]'.  */
549 static void __init read_obp_translations(void)
550 {
551         int n, node, ents, first, last, i;
552
553         node = prom_finddevice("/virtual-memory");
554         n = prom_getproplen(node, "translations");
555         if (unlikely(n == 0 || n == -1)) {
556                 prom_printf("prom_mappings: Couldn't get size.\n");
557                 prom_halt();
558         }
559         if (unlikely(n > sizeof(prom_trans))) {
560                 prom_printf("prom_mappings: Size %d is too big.\n", n);
561                 prom_halt();
562         }
563
564         if ((n = prom_getproperty(node, "translations",
565                                   (char *)&prom_trans[0],
566                                   sizeof(prom_trans))) == -1) {
567                 prom_printf("prom_mappings: Couldn't get property.\n");
568                 prom_halt();
569         }
570
571         n = n / sizeof(struct linux_prom_translation);
572
573         ents = n;
574
575         sort(prom_trans, ents, sizeof(struct linux_prom_translation),
576              cmp_ptrans, NULL);
577
578         /* Now kick out all the non-OBP entries.  */
579         for (i = 0; i < ents; i++) {
580                 if (in_obp_range(prom_trans[i].virt))
581                         break;
582         }
583         first = i;
584         for (; i < ents; i++) {
585                 if (!in_obp_range(prom_trans[i].virt))
586                         break;
587         }
588         last = i;
589
590         for (i = 0; i < (last - first); i++) {
591                 struct linux_prom_translation *src = &prom_trans[i + first];
592                 struct linux_prom_translation *dest = &prom_trans[i];
593
594                 *dest = *src;
595         }
596         for (; i < ents; i++) {
597                 struct linux_prom_translation *dest = &prom_trans[i];
598                 dest->virt = dest->size = dest->data = 0x0UL;
599         }
600
601         prom_trans_ents = last - first;
602
603         if (tlb_type == spitfire) {
604                 /* Clear diag TTE bits. */
605                 for (i = 0; i < prom_trans_ents; i++)
606                         prom_trans[i].data &= ~0x0003fe0000000000UL;
607         }
608
609         /* Force execute bit on.  */
610         for (i = 0; i < prom_trans_ents; i++)
611                 prom_trans[i].data |= (tlb_type == hypervisor ?
612                                        _PAGE_EXEC_4V : _PAGE_EXEC_4U);
613 }
614
615 static void __init hypervisor_tlb_lock(unsigned long vaddr,
616                                        unsigned long pte,
617                                        unsigned long mmu)
618 {
619         unsigned long ret = sun4v_mmu_map_perm_addr(vaddr, 0, pte, mmu);
620
621         if (ret != 0) {
622                 prom_printf("hypervisor_tlb_lock[%lx:%x:%lx:%lx]: "
623                             "errors with %lx\n", vaddr, 0, pte, mmu, ret);
624                 prom_halt();
625         }
626 }
627
628 static unsigned long kern_large_tte(unsigned long paddr);
629
630 static void __init remap_kernel(void)
631 {
632         unsigned long phys_page, tte_vaddr, tte_data;
633         int i, tlb_ent = sparc64_highest_locked_tlbent();
634
635         tte_vaddr = (unsigned long) KERNBASE;
636         phys_page = (prom_boot_mapping_phys_low >> ILOG2_4MB) << ILOG2_4MB;
637         tte_data = kern_large_tte(phys_page);
638
639         kern_locked_tte_data = tte_data;
640
641         /* Now lock us into the TLBs via Hypervisor or OBP. */
642         if (tlb_type == hypervisor) {
643                 for (i = 0; i < num_kernel_image_mappings; i++) {
644                         hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU);
645                         hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU);
646                         tte_vaddr += 0x400000;
647                         tte_data += 0x400000;
648                 }
649         } else {
650                 for (i = 0; i < num_kernel_image_mappings; i++) {
651                         prom_dtlb_load(tlb_ent - i, tte_data, tte_vaddr);
652                         prom_itlb_load(tlb_ent - i, tte_data, tte_vaddr);
653                         tte_vaddr += 0x400000;
654                         tte_data += 0x400000;
655                 }
656                 sparc64_highest_unlocked_tlb_ent = tlb_ent - i;
657         }
658         if (tlb_type == cheetah_plus) {
659                 sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 |
660                                             CTX_CHEETAH_PLUS_NUC);
661                 sparc64_kern_pri_nuc_bits = CTX_CHEETAH_PLUS_NUC;
662                 sparc64_kern_sec_context = CTX_CHEETAH_PLUS_CTX0;
663         }
664 }
665
666
667 static void __init inherit_prom_mappings(void)
668 {
669         /* Now fixup OBP's idea about where we really are mapped. */
670         printk("Remapping the kernel... ");
671         remap_kernel();
672         printk("done.\n");
673 }
674
675 void prom_world(int enter)
676 {
677         if (!enter)
678                 set_fs(get_fs());
679
680         __asm__ __volatile__("flushw");
681 }
682
683 void __flush_dcache_range(unsigned long start, unsigned long end)
684 {
685         unsigned long va;
686
687         if (tlb_type == spitfire) {
688                 int n = 0;
689
690                 for (va = start; va < end; va += 32) {
691                         spitfire_put_dcache_tag(va & 0x3fe0, 0x0);
692                         if (++n >= 512)
693                                 break;
694                 }
695         } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
696                 start = __pa(start);
697                 end = __pa(end);
698                 for (va = start; va < end; va += 32)
699                         __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
700                                              "membar #Sync"
701                                              : /* no outputs */
702                                              : "r" (va),
703                                                "i" (ASI_DCACHE_INVALIDATE));
704         }
705 }
706 EXPORT_SYMBOL(__flush_dcache_range);
707
708 /* get_new_mmu_context() uses "cache + 1".  */
709 DEFINE_SPINLOCK(ctx_alloc_lock);
710 unsigned long tlb_context_cache = CTX_FIRST_VERSION;
711 #define MAX_CTX_NR      (1UL << CTX_NR_BITS)
712 #define CTX_BMAP_SLOTS  BITS_TO_LONGS(MAX_CTX_NR)
713 DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR);
714 DEFINE_PER_CPU(struct mm_struct *, per_cpu_secondary_mm) = {0};
715
716 /* Caller does TLB context flushing on local CPU if necessary.
717  * The caller also ensures that CTX_VALID(mm->context) is false.
718  *
719  * We must be careful about boundary cases so that we never
720  * let the user have CTX 0 (nucleus) or we ever use a CTX
721  * version of zero (and thus NO_CONTEXT would not be caught
722  * by version mis-match tests in mmu_context.h).
723  *
724  * Always invoked with interrupts disabled.
725  */
726 void get_new_mmu_context(struct mm_struct *mm)
727 {
728         unsigned long ctx, new_ctx;
729         unsigned long orig_pgsz_bits;
730         int new_version;
731
732         spin_lock(&ctx_alloc_lock);
733         orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);
734         ctx = (tlb_context_cache + 1) & CTX_NR_MASK;
735         new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);
736         new_version = 0;
737         if (new_ctx >= (1 << CTX_NR_BITS)) {
738                 new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);
739                 if (new_ctx >= ctx) {
740                         int i;
741                         new_ctx = (tlb_context_cache & CTX_VERSION_MASK) +
742                                 CTX_FIRST_VERSION + 1;
743                         if (new_ctx == 1)
744                                 new_ctx = CTX_FIRST_VERSION + 1;
745
746                         /* Don't call memset, for 16 entries that's just
747                          * plain silly...
748                          */
749                         mmu_context_bmap[0] = 3;
750                         mmu_context_bmap[1] = 0;
751                         mmu_context_bmap[2] = 0;
752                         mmu_context_bmap[3] = 0;
753                         for (i = 4; i < CTX_BMAP_SLOTS; i += 4) {
754                                 mmu_context_bmap[i + 0] = 0;
755                                 mmu_context_bmap[i + 1] = 0;
756                                 mmu_context_bmap[i + 2] = 0;
757                                 mmu_context_bmap[i + 3] = 0;
758                         }
759                         new_version = 1;
760                         goto out;
761                 }
762         }
763         if (mm->context.sparc64_ctx_val)
764                 cpumask_clear(mm_cpumask(mm));
765         mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63));
766         new_ctx |= (tlb_context_cache & CTX_VERSION_MASK);
767 out:
768         tlb_context_cache = new_ctx;
769         mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
770         spin_unlock(&ctx_alloc_lock);
771
772         if (unlikely(new_version))
773                 smp_new_mmu_context_version();
774 }
775
776 static int numa_enabled = 1;
777 static int numa_debug;
778
779 static int __init early_numa(char *p)
780 {
781         if (!p)
782                 return 0;
783
784         if (strstr(p, "off"))
785                 numa_enabled = 0;
786
787         if (strstr(p, "debug"))
788                 numa_debug = 1;
789
790         return 0;
791 }
792 early_param("numa", early_numa);
793
794 #define numadbg(f, a...) \
795 do {    if (numa_debug) \
796                 printk(KERN_INFO f, ## a); \
797 } while (0)
798
799 static void __init find_ramdisk(unsigned long phys_base)
800 {
801 #ifdef CONFIG_BLK_DEV_INITRD
802         if (sparc_ramdisk_image || sparc_ramdisk_image64) {
803                 unsigned long ramdisk_image;
804
805                 /* Older versions of the bootloader only supported a
806                  * 32-bit physical address for the ramdisk image
807                  * location, stored at sparc_ramdisk_image.  Newer
808                  * SILO versions set sparc_ramdisk_image to zero and
809                  * provide a full 64-bit physical address at
810                  * sparc_ramdisk_image64.
811                  */
812                 ramdisk_image = sparc_ramdisk_image;
813                 if (!ramdisk_image)
814                         ramdisk_image = sparc_ramdisk_image64;
815
816                 /* Another bootloader quirk.  The bootloader normalizes
817                  * the physical address to KERNBASE, so we have to
818                  * factor that back out and add in the lowest valid
819                  * physical page address to get the true physical address.
820                  */
821                 ramdisk_image -= KERNBASE;
822                 ramdisk_image += phys_base;
823
824                 numadbg("Found ramdisk at physical address 0x%lx, size %u\n",
825                         ramdisk_image, sparc_ramdisk_size);
826
827                 initrd_start = ramdisk_image;
828                 initrd_end = ramdisk_image + sparc_ramdisk_size;
829
830                 memblock_reserve(initrd_start, sparc_ramdisk_size);
831
832                 initrd_start += PAGE_OFFSET;
833                 initrd_end += PAGE_OFFSET;
834         }
835 #endif
836 }
837
838 struct node_mem_mask {
839         unsigned long mask;
840         unsigned long match;
841 };
842 static struct node_mem_mask node_masks[MAX_NUMNODES];
843 static int num_node_masks;
844
845 #ifdef CONFIG_NEED_MULTIPLE_NODES
846
847 struct mdesc_mlgroup {
848         u64     node;
849         u64     latency;
850         u64     match;
851         u64     mask;
852 };
853
854 static struct mdesc_mlgroup *mlgroups;
855 static int num_mlgroups;
856
857 int numa_cpu_lookup_table[NR_CPUS];
858 cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES];
859
860 struct mdesc_mblock {
861         u64     base;
862         u64     size;
863         u64     offset; /* RA-to-PA */
864 };
865 static struct mdesc_mblock *mblocks;
866 static int num_mblocks;
867
868 static struct mdesc_mblock * __init addr_to_mblock(unsigned long addr)
869 {
870         struct mdesc_mblock *m = NULL;
871         int i;
872
873         for (i = 0; i < num_mblocks; i++) {
874                 m = &mblocks[i];
875
876                 if (addr >= m->base &&
877                     addr < (m->base + m->size)) {
878                         break;
879                 }
880         }
881
882         return m;
883 }
884
885 static u64 __init memblock_nid_range_sun4u(u64 start, u64 end, int *nid)
886 {
887         int prev_nid, new_nid;
888
889         prev_nid = -1;
890         for ( ; start < end; start += PAGE_SIZE) {
891                 for (new_nid = 0; new_nid < num_node_masks; new_nid++) {
892                         struct node_mem_mask *p = &node_masks[new_nid];
893
894                         if ((start & p->mask) == p->match) {
895                                 if (prev_nid == -1)
896                                         prev_nid = new_nid;
897                                 break;
898                         }
899                 }
900
901                 if (new_nid == num_node_masks) {
902                         prev_nid = 0;
903                         WARN_ONCE(1, "addr[%Lx] doesn't match a NUMA node rule. Some memory will be owned by node 0.",
904                                   start);
905                         break;
906                 }
907
908                 if (prev_nid != new_nid)
909                         break;
910         }
911         *nid = prev_nid;
912
913         return start > end ? end : start;
914 }
915
916 static u64 __init memblock_nid_range(u64 start, u64 end, int *nid)
917 {
918         u64 ret_end, pa_start, m_mask, m_match, m_end;
919         struct mdesc_mblock *mblock;
920         int _nid, i;
921
922         if (tlb_type != hypervisor)
923                 return memblock_nid_range_sun4u(start, end, nid);
924
925         mblock = addr_to_mblock(start);
926         if (!mblock) {
927                 WARN_ONCE(1, "memblock_nid_range: Can't find mblock addr[%Lx]",
928                           start);
929
930                 _nid = 0;
931                 ret_end = end;
932                 goto done;
933         }
934
935         pa_start = start + mblock->offset;
936         m_match = 0;
937         m_mask = 0;
938
939         for (_nid = 0; _nid < num_node_masks; _nid++) {
940                 struct node_mem_mask *const m = &node_masks[_nid];
941
942                 if ((pa_start & m->mask) == m->match) {
943                         m_match = m->match;
944                         m_mask = m->mask;
945                         break;
946                 }
947         }
948
949         if (num_node_masks == _nid) {
950                 /* We could not find NUMA group, so default to 0, but lets
951                  * search for latency group, so we could calculate the correct
952                  * end address that we return
953                  */
954                 _nid = 0;
955
956                 for (i = 0; i < num_mlgroups; i++) {
957                         struct mdesc_mlgroup *const m = &mlgroups[i];
958
959                         if ((pa_start & m->mask) == m->match) {
960                                 m_match = m->match;
961                                 m_mask = m->mask;
962                                 break;
963                         }
964                 }
965
966                 if (i == num_mlgroups) {
967                         WARN_ONCE(1, "memblock_nid_range: Can't find latency group addr[%Lx]",
968                                   start);
969
970                         ret_end = end;
971                         goto done;
972                 }
973         }
974
975         /*
976          * Each latency group has match and mask, and each memory block has an
977          * offset.  An address belongs to a latency group if its address matches
978          * the following formula: ((addr + offset) & mask) == match
979          * It is, however, slow to check every single page if it matches a
980          * particular latency group. As optimization we calculate end value by
981          * using bit arithmetics.
982          */
983         m_end = m_match + (1ul << __ffs(m_mask)) - mblock->offset;
984         m_end += pa_start & ~((1ul << fls64(m_mask)) - 1);
985         ret_end = m_end > end ? end : m_end;
986
987 done:
988         *nid = _nid;
989         return ret_end;
990 }
991 #endif
992
993 /* This must be invoked after performing all of the necessary
994  * memblock_set_node() calls for 'nid'.  We need to be able to get
995  * correct data from get_pfn_range_for_nid().
996  */
997 static void __init allocate_node_data(int nid)
998 {
999         struct pglist_data *p;
1000         unsigned long start_pfn, end_pfn;
1001 #ifdef CONFIG_NEED_MULTIPLE_NODES
1002         unsigned long paddr;
1003
1004         paddr = memblock_alloc_try_nid(sizeof(struct pglist_data), SMP_CACHE_BYTES, nid);
1005         if (!paddr) {
1006                 prom_printf("Cannot allocate pglist_data for nid[%d]\n", nid);
1007                 prom_halt();
1008         }
1009         NODE_DATA(nid) = __va(paddr);
1010         memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
1011
1012         NODE_DATA(nid)->node_id = nid;
1013 #endif
1014
1015         p = NODE_DATA(nid);
1016
1017         get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
1018         p->node_start_pfn = start_pfn;
1019         p->node_spanned_pages = end_pfn - start_pfn;
1020 }
1021
1022 static void init_node_masks_nonnuma(void)
1023 {
1024 #ifdef CONFIG_NEED_MULTIPLE_NODES
1025         int i;
1026 #endif
1027
1028         numadbg("Initializing tables for non-numa.\n");
1029
1030         node_masks[0].mask = 0;
1031         node_masks[0].match = 0;
1032         num_node_masks = 1;
1033
1034 #ifdef CONFIG_NEED_MULTIPLE_NODES
1035         for (i = 0; i < NR_CPUS; i++)
1036                 numa_cpu_lookup_table[i] = 0;
1037
1038         cpumask_setall(&numa_cpumask_lookup_table[0]);
1039 #endif
1040 }
1041
1042 #ifdef CONFIG_NEED_MULTIPLE_NODES
1043 struct pglist_data *node_data[MAX_NUMNODES];
1044
1045 EXPORT_SYMBOL(numa_cpu_lookup_table);
1046 EXPORT_SYMBOL(numa_cpumask_lookup_table);
1047 EXPORT_SYMBOL(node_data);
1048
1049 static int scan_pio_for_cfg_handle(struct mdesc_handle *md, u64 pio,
1050                                    u32 cfg_handle)
1051 {
1052         u64 arc;
1053
1054         mdesc_for_each_arc(arc, md, pio, MDESC_ARC_TYPE_FWD) {
1055                 u64 target = mdesc_arc_target(md, arc);
1056                 const u64 *val;
1057
1058                 val = mdesc_get_property(md, target,
1059                                          "cfg-handle", NULL);
1060                 if (val && *val == cfg_handle)
1061                         return 0;
1062         }
1063         return -ENODEV;
1064 }
1065
1066 static int scan_arcs_for_cfg_handle(struct mdesc_handle *md, u64 grp,
1067                                     u32 cfg_handle)
1068 {
1069         u64 arc, candidate, best_latency = ~(u64)0;
1070
1071         candidate = MDESC_NODE_NULL;
1072         mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) {
1073                 u64 target = mdesc_arc_target(md, arc);
1074                 const char *name = mdesc_node_name(md, target);
1075                 const u64 *val;
1076
1077                 if (strcmp(name, "pio-latency-group"))
1078                         continue;
1079
1080                 val = mdesc_get_property(md, target, "latency", NULL);
1081                 if (!val)
1082                         continue;
1083
1084                 if (*val < best_latency) {
1085                         candidate = target;
1086                         best_latency = *val;
1087                 }
1088         }
1089
1090         if (candidate == MDESC_NODE_NULL)
1091                 return -ENODEV;
1092
1093         return scan_pio_for_cfg_handle(md, candidate, cfg_handle);
1094 }
1095
1096 int of_node_to_nid(struct device_node *dp)
1097 {
1098         const struct linux_prom64_registers *regs;
1099         struct mdesc_handle *md;
1100         u32 cfg_handle;
1101         int count, nid;
1102         u64 grp;
1103
1104         /* This is the right thing to do on currently supported
1105          * SUN4U NUMA platforms as well, as the PCI controller does
1106          * not sit behind any particular memory controller.
1107          */
1108         if (!mlgroups)
1109                 return -1;
1110
1111         regs = of_get_property(dp, "reg", NULL);
1112         if (!regs)
1113                 return -1;
1114
1115         cfg_handle = (regs->phys_addr >> 32UL) & 0x0fffffff;
1116
1117         md = mdesc_grab();
1118
1119         count = 0;
1120         nid = -1;
1121         mdesc_for_each_node_by_name(md, grp, "group") {
1122                 if (!scan_arcs_for_cfg_handle(md, grp, cfg_handle)) {
1123                         nid = count;
1124                         break;
1125                 }
1126                 count++;
1127         }
1128
1129         mdesc_release(md);
1130
1131         return nid;
1132 }
1133
1134 static void __init add_node_ranges(void)
1135 {
1136         struct memblock_region *reg;
1137         unsigned long prev_max;
1138
1139 memblock_resized:
1140         prev_max = memblock.memory.max;
1141
1142         for_each_memblock(memory, reg) {
1143                 unsigned long size = reg->size;
1144                 unsigned long start, end;
1145
1146                 start = reg->base;
1147                 end = start + size;
1148                 while (start < end) {
1149                         unsigned long this_end;
1150                         int nid;
1151
1152                         this_end = memblock_nid_range(start, end, &nid);
1153
1154                         numadbg("Setting memblock NUMA node nid[%d] "
1155                                 "start[%lx] end[%lx]\n",
1156                                 nid, start, this_end);
1157
1158                         memblock_set_node(start, this_end - start,
1159                                           &memblock.memory, nid);
1160                         if (memblock.memory.max != prev_max)
1161                                 goto memblock_resized;
1162                         start = this_end;
1163                 }
1164         }
1165 }
1166
1167 static int __init grab_mlgroups(struct mdesc_handle *md)
1168 {
1169         unsigned long paddr;
1170         int count = 0;
1171         u64 node;
1172
1173         mdesc_for_each_node_by_name(md, node, "memory-latency-group")
1174                 count++;
1175         if (!count)
1176                 return -ENOENT;
1177
1178         paddr = memblock_alloc(count * sizeof(struct mdesc_mlgroup),
1179                           SMP_CACHE_BYTES);
1180         if (!paddr)
1181                 return -ENOMEM;
1182
1183         mlgroups = __va(paddr);
1184         num_mlgroups = count;
1185
1186         count = 0;
1187         mdesc_for_each_node_by_name(md, node, "memory-latency-group") {
1188                 struct mdesc_mlgroup *m = &mlgroups[count++];
1189                 const u64 *val;
1190
1191                 m->node = node;
1192
1193                 val = mdesc_get_property(md, node, "latency", NULL);
1194                 m->latency = *val;
1195                 val = mdesc_get_property(md, node, "address-match", NULL);
1196                 m->match = *val;
1197                 val = mdesc_get_property(md, node, "address-mask", NULL);
1198                 m->mask = *val;
1199
1200                 numadbg("MLGROUP[%d]: node[%llx] latency[%llx] "
1201                         "match[%llx] mask[%llx]\n",
1202                         count - 1, m->node, m->latency, m->match, m->mask);
1203         }
1204
1205         return 0;
1206 }
1207
1208 static int __init grab_mblocks(struct mdesc_handle *md)
1209 {
1210         unsigned long paddr;
1211         int count = 0;
1212         u64 node;
1213
1214         mdesc_for_each_node_by_name(md, node, "mblock")
1215                 count++;
1216         if (!count)
1217                 return -ENOENT;
1218
1219         paddr = memblock_alloc(count * sizeof(struct mdesc_mblock),
1220                           SMP_CACHE_BYTES);
1221         if (!paddr)
1222                 return -ENOMEM;
1223
1224         mblocks = __va(paddr);
1225         num_mblocks = count;
1226
1227         count = 0;
1228         mdesc_for_each_node_by_name(md, node, "mblock") {
1229                 struct mdesc_mblock *m = &mblocks[count++];
1230                 const u64 *val;
1231
1232                 val = mdesc_get_property(md, node, "base", NULL);
1233                 m->base = *val;
1234                 val = mdesc_get_property(md, node, "size", NULL);
1235                 m->size = *val;
1236                 val = mdesc_get_property(md, node,
1237                                          "address-congruence-offset", NULL);
1238
1239                 /* The address-congruence-offset property is optional.
1240                  * Explicity zero it be identifty this.
1241                  */
1242                 if (val)
1243                         m->offset = *val;
1244                 else
1245                         m->offset = 0UL;
1246
1247                 numadbg("MBLOCK[%d]: base[%llx] size[%llx] offset[%llx]\n",
1248                         count - 1, m->base, m->size, m->offset);
1249         }
1250
1251         return 0;
1252 }
1253
1254 static void __init numa_parse_mdesc_group_cpus(struct mdesc_handle *md,
1255                                                u64 grp, cpumask_t *mask)
1256 {
1257         u64 arc;
1258
1259         cpumask_clear(mask);
1260
1261         mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_BACK) {
1262                 u64 target = mdesc_arc_target(md, arc);
1263                 const char *name = mdesc_node_name(md, target);
1264                 const u64 *id;
1265
1266                 if (strcmp(name, "cpu"))
1267                         continue;
1268                 id = mdesc_get_property(md, target, "id", NULL);
1269                 if (*id < nr_cpu_ids)
1270                         cpumask_set_cpu(*id, mask);
1271         }
1272 }
1273
1274 static struct mdesc_mlgroup * __init find_mlgroup(u64 node)
1275 {
1276         int i;
1277
1278         for (i = 0; i < num_mlgroups; i++) {
1279                 struct mdesc_mlgroup *m = &mlgroups[i];
1280                 if (m->node == node)
1281                         return m;
1282         }
1283         return NULL;
1284 }
1285
1286 int __node_distance(int from, int to)
1287 {
1288         if ((from >= MAX_NUMNODES) || (to >= MAX_NUMNODES)) {
1289                 pr_warn("Returning default NUMA distance value for %d->%d\n",
1290                         from, to);
1291                 return (from == to) ? LOCAL_DISTANCE : REMOTE_DISTANCE;
1292         }
1293         return numa_latency[from][to];
1294 }
1295
1296 static int __init find_best_numa_node_for_mlgroup(struct mdesc_mlgroup *grp)
1297 {
1298         int i;
1299
1300         for (i = 0; i < MAX_NUMNODES; i++) {
1301                 struct node_mem_mask *n = &node_masks[i];
1302
1303                 if ((grp->mask == n->mask) && (grp->match == n->match))
1304                         break;
1305         }
1306         return i;
1307 }
1308
1309 static void __init find_numa_latencies_for_group(struct mdesc_handle *md,
1310                                                  u64 grp, int index)
1311 {
1312         u64 arc;
1313
1314         mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) {
1315                 int tnode;
1316                 u64 target = mdesc_arc_target(md, arc);
1317                 struct mdesc_mlgroup *m = find_mlgroup(target);
1318
1319                 if (!m)
1320                         continue;
1321                 tnode = find_best_numa_node_for_mlgroup(m);
1322                 if (tnode == MAX_NUMNODES)
1323                         continue;
1324                 numa_latency[index][tnode] = m->latency;
1325         }
1326 }
1327
1328 static int __init numa_attach_mlgroup(struct mdesc_handle *md, u64 grp,
1329                                       int index)
1330 {
1331         struct mdesc_mlgroup *candidate = NULL;
1332         u64 arc, best_latency = ~(u64)0;
1333         struct node_mem_mask *n;
1334
1335         mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) {
1336                 u64 target = mdesc_arc_target(md, arc);
1337                 struct mdesc_mlgroup *m = find_mlgroup(target);
1338                 if (!m)
1339                         continue;
1340                 if (m->latency < best_latency) {
1341                         candidate = m;
1342                         best_latency = m->latency;
1343                 }
1344         }
1345         if (!candidate)
1346                 return -ENOENT;
1347
1348         if (num_node_masks != index) {
1349                 printk(KERN_ERR "Inconsistent NUMA state, "
1350                        "index[%d] != num_node_masks[%d]\n",
1351                        index, num_node_masks);
1352                 return -EINVAL;
1353         }
1354
1355         n = &node_masks[num_node_masks++];
1356
1357         n->mask = candidate->mask;
1358         n->match = candidate->match;
1359
1360         numadbg("NUMA NODE[%d]: mask[%lx] match[%lx] (latency[%llx])\n",
1361                 index, n->mask, n->match, candidate->latency);
1362
1363         return 0;
1364 }
1365
1366 static int __init numa_parse_mdesc_group(struct mdesc_handle *md, u64 grp,
1367                                          int index)
1368 {
1369         cpumask_t mask;
1370         int cpu;
1371
1372         numa_parse_mdesc_group_cpus(md, grp, &mask);
1373
1374         for_each_cpu(cpu, &mask)
1375                 numa_cpu_lookup_table[cpu] = index;
1376         cpumask_copy(&numa_cpumask_lookup_table[index], &mask);
1377
1378         if (numa_debug) {
1379                 printk(KERN_INFO "NUMA GROUP[%d]: cpus [ ", index);
1380                 for_each_cpu(cpu, &mask)
1381                         printk("%d ", cpu);
1382                 printk("]\n");
1383         }
1384
1385         return numa_attach_mlgroup(md, grp, index);
1386 }
1387
1388 static int __init numa_parse_mdesc(void)
1389 {
1390         struct mdesc_handle *md = mdesc_grab();
1391         int i, j, err, count;
1392         u64 node;
1393
1394         node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups");
1395         if (node == MDESC_NODE_NULL) {
1396                 mdesc_release(md);
1397                 return -ENOENT;
1398         }
1399
1400         err = grab_mblocks(md);
1401         if (err < 0)
1402                 goto out;
1403
1404         err = grab_mlgroups(md);
1405         if (err < 0)
1406                 goto out;
1407
1408         count = 0;
1409         mdesc_for_each_node_by_name(md, node, "group") {
1410                 err = numa_parse_mdesc_group(md, node, count);
1411                 if (err < 0)
1412                         break;
1413                 count++;
1414         }
1415
1416         count = 0;
1417         mdesc_for_each_node_by_name(md, node, "group") {
1418                 find_numa_latencies_for_group(md, node, count);
1419                 count++;
1420         }
1421
1422         /* Normalize numa latency matrix according to ACPI SLIT spec. */
1423         for (i = 0; i < MAX_NUMNODES; i++) {
1424                 u64 self_latency = numa_latency[i][i];
1425
1426                 for (j = 0; j < MAX_NUMNODES; j++) {
1427                         numa_latency[i][j] =
1428                                 (numa_latency[i][j] * LOCAL_DISTANCE) /
1429                                 self_latency;
1430                 }
1431         }
1432
1433         add_node_ranges();
1434
1435         for (i = 0; i < num_node_masks; i++) {
1436                 allocate_node_data(i);
1437                 node_set_online(i);
1438         }
1439
1440         err = 0;
1441 out:
1442         mdesc_release(md);
1443         return err;
1444 }
1445
1446 static int __init numa_parse_jbus(void)
1447 {
1448         unsigned long cpu, index;
1449
1450         /* NUMA node id is encoded in bits 36 and higher, and there is
1451          * a 1-to-1 mapping from CPU ID to NUMA node ID.
1452          */
1453         index = 0;
1454         for_each_present_cpu(cpu) {
1455                 numa_cpu_lookup_table[cpu] = index;
1456                 cpumask_copy(&numa_cpumask_lookup_table[index], cpumask_of(cpu));
1457                 node_masks[index].mask = ~((1UL << 36UL) - 1UL);
1458                 node_masks[index].match = cpu << 36UL;
1459
1460                 index++;
1461         }
1462         num_node_masks = index;
1463
1464         add_node_ranges();
1465
1466         for (index = 0; index < num_node_masks; index++) {
1467                 allocate_node_data(index);
1468                 node_set_online(index);
1469         }
1470
1471         return 0;
1472 }
1473
1474 static int __init numa_parse_sun4u(void)
1475 {
1476         if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1477                 unsigned long ver;
1478
1479                 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
1480                 if ((ver >> 32UL) == __JALAPENO_ID ||
1481                     (ver >> 32UL) == __SERRANO_ID)
1482                         return numa_parse_jbus();
1483         }
1484         return -1;
1485 }
1486
1487 static int __init bootmem_init_numa(void)
1488 {
1489         int i, j;
1490         int err = -1;
1491
1492         numadbg("bootmem_init_numa()\n");
1493
1494         /* Some sane defaults for numa latency values */
1495         for (i = 0; i < MAX_NUMNODES; i++) {
1496                 for (j = 0; j < MAX_NUMNODES; j++)
1497                         numa_latency[i][j] = (i == j) ?
1498                                 LOCAL_DISTANCE : REMOTE_DISTANCE;
1499         }
1500
1501         if (numa_enabled) {
1502                 if (tlb_type == hypervisor)
1503                         err = numa_parse_mdesc();
1504                 else
1505                         err = numa_parse_sun4u();
1506         }
1507         return err;
1508 }
1509
1510 #else
1511
1512 static int bootmem_init_numa(void)
1513 {
1514         return -1;
1515 }
1516
1517 #endif
1518
1519 static void __init bootmem_init_nonnuma(void)
1520 {
1521         unsigned long top_of_ram = memblock_end_of_DRAM();
1522         unsigned long total_ram = memblock_phys_mem_size();
1523
1524         numadbg("bootmem_init_nonnuma()\n");
1525
1526         printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
1527                top_of_ram, total_ram);
1528         printk(KERN_INFO "Memory hole size: %ldMB\n",
1529                (top_of_ram - total_ram) >> 20);
1530
1531         init_node_masks_nonnuma();
1532         memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0);
1533         allocate_node_data(0);
1534         node_set_online(0);
1535 }
1536
1537 static unsigned long __init bootmem_init(unsigned long phys_base)
1538 {
1539         unsigned long end_pfn;
1540
1541         end_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
1542         max_pfn = max_low_pfn = end_pfn;
1543         min_low_pfn = (phys_base >> PAGE_SHIFT);
1544
1545         if (bootmem_init_numa() < 0)
1546                 bootmem_init_nonnuma();
1547
1548         /* Dump memblock with node info. */
1549         memblock_dump_all();
1550
1551         /* XXX cpu notifier XXX */
1552
1553         sparse_memory_present_with_active_regions(MAX_NUMNODES);
1554         sparse_init();
1555
1556         return end_pfn;
1557 }
1558
1559 static struct linux_prom64_registers pall[MAX_BANKS] __initdata;
1560 static int pall_ents __initdata;
1561
1562 static unsigned long max_phys_bits = 40;
1563
1564 bool kern_addr_valid(unsigned long addr)
1565 {
1566         pgd_t *pgd;
1567         pud_t *pud;
1568         pmd_t *pmd;
1569         pte_t *pte;
1570
1571         if ((long)addr < 0L) {
1572                 unsigned long pa = __pa(addr);
1573
1574                 if ((pa >> max_phys_bits) != 0UL)
1575                         return false;
1576
1577                 return pfn_valid(pa >> PAGE_SHIFT);
1578         }
1579
1580         if (addr >= (unsigned long) KERNBASE &&
1581             addr < (unsigned long)&_end)
1582                 return true;
1583
1584         pgd = pgd_offset_k(addr);
1585         if (pgd_none(*pgd))
1586                 return 0;
1587
1588         pud = pud_offset(pgd, addr);
1589         if (pud_none(*pud))
1590                 return 0;
1591
1592         if (pud_large(*pud))
1593                 return pfn_valid(pud_pfn(*pud));
1594
1595         pmd = pmd_offset(pud, addr);
1596         if (pmd_none(*pmd))
1597                 return 0;
1598
1599         if (pmd_large(*pmd))
1600                 return pfn_valid(pmd_pfn(*pmd));
1601
1602         pte = pte_offset_kernel(pmd, addr);
1603         if (pte_none(*pte))
1604                 return 0;
1605
1606         return pfn_valid(pte_pfn(*pte));
1607 }
1608 EXPORT_SYMBOL(kern_addr_valid);
1609
1610 static unsigned long __ref kernel_map_hugepud(unsigned long vstart,
1611                                               unsigned long vend,
1612                                               pud_t *pud)
1613 {
1614         const unsigned long mask16gb = (1UL << 34) - 1UL;
1615         u64 pte_val = vstart;
1616
1617         /* Each PUD is 8GB */
1618         if ((vstart & mask16gb) ||
1619             (vend - vstart <= mask16gb)) {
1620                 pte_val ^= kern_linear_pte_xor[2];
1621                 pud_val(*pud) = pte_val | _PAGE_PUD_HUGE;
1622
1623                 return vstart + PUD_SIZE;
1624         }
1625
1626         pte_val ^= kern_linear_pte_xor[3];
1627         pte_val |= _PAGE_PUD_HUGE;
1628
1629         vend = vstart + mask16gb + 1UL;
1630         while (vstart < vend) {
1631                 pud_val(*pud) = pte_val;
1632
1633                 pte_val += PUD_SIZE;
1634                 vstart += PUD_SIZE;
1635                 pud++;
1636         }
1637         return vstart;
1638 }
1639
1640 static bool kernel_can_map_hugepud(unsigned long vstart, unsigned long vend,
1641                                    bool guard)
1642 {
1643         if (guard && !(vstart & ~PUD_MASK) && (vend - vstart) >= PUD_SIZE)
1644                 return true;
1645
1646         return false;
1647 }
1648
1649 static unsigned long __ref kernel_map_hugepmd(unsigned long vstart,
1650                                               unsigned long vend,
1651                                               pmd_t *pmd)
1652 {
1653         const unsigned long mask256mb = (1UL << 28) - 1UL;
1654         const unsigned long mask2gb = (1UL << 31) - 1UL;
1655         u64 pte_val = vstart;
1656
1657         /* Each PMD is 8MB */
1658         if ((vstart & mask256mb) ||
1659             (vend - vstart <= mask256mb)) {
1660                 pte_val ^= kern_linear_pte_xor[0];
1661                 pmd_val(*pmd) = pte_val | _PAGE_PMD_HUGE;
1662
1663                 return vstart + PMD_SIZE;
1664         }
1665
1666         if ((vstart & mask2gb) ||
1667             (vend - vstart <= mask2gb)) {
1668                 pte_val ^= kern_linear_pte_xor[1];
1669                 pte_val |= _PAGE_PMD_HUGE;
1670                 vend = vstart + mask256mb + 1UL;
1671         } else {
1672                 pte_val ^= kern_linear_pte_xor[2];
1673                 pte_val |= _PAGE_PMD_HUGE;
1674                 vend = vstart + mask2gb + 1UL;
1675         }
1676
1677         while (vstart < vend) {
1678                 pmd_val(*pmd) = pte_val;
1679
1680                 pte_val += PMD_SIZE;
1681                 vstart += PMD_SIZE;
1682                 pmd++;
1683         }
1684
1685         return vstart;
1686 }
1687
1688 static bool kernel_can_map_hugepmd(unsigned long vstart, unsigned long vend,
1689                                    bool guard)
1690 {
1691         if (guard && !(vstart & ~PMD_MASK) && (vend - vstart) >= PMD_SIZE)
1692                 return true;
1693
1694         return false;
1695 }
1696
1697 static unsigned long __ref kernel_map_range(unsigned long pstart,
1698                                             unsigned long pend, pgprot_t prot,
1699                                             bool use_huge)
1700 {
1701         unsigned long vstart = PAGE_OFFSET + pstart;
1702         unsigned long vend = PAGE_OFFSET + pend;
1703         unsigned long alloc_bytes = 0UL;
1704
1705         if ((vstart & ~PAGE_MASK) || (vend & ~PAGE_MASK)) {
1706                 prom_printf("kernel_map: Unaligned physmem[%lx:%lx]\n",
1707                             vstart, vend);
1708                 prom_halt();
1709         }
1710
1711         while (vstart < vend) {
1712                 unsigned long this_end, paddr = __pa(vstart);
1713                 pgd_t *pgd = pgd_offset_k(vstart);
1714                 pud_t *pud;
1715                 pmd_t *pmd;
1716                 pte_t *pte;
1717
1718                 if (pgd_none(*pgd)) {
1719                         pud_t *new;
1720
1721                         new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1722                         alloc_bytes += PAGE_SIZE;
1723                         pgd_populate(&init_mm, pgd, new);
1724                 }
1725                 pud = pud_offset(pgd, vstart);
1726                 if (pud_none(*pud)) {
1727                         pmd_t *new;
1728
1729                         if (kernel_can_map_hugepud(vstart, vend, use_huge)) {
1730                                 vstart = kernel_map_hugepud(vstart, vend, pud);
1731                                 continue;
1732                         }
1733                         new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1734                         alloc_bytes += PAGE_SIZE;
1735                         pud_populate(&init_mm, pud, new);
1736                 }
1737
1738                 pmd = pmd_offset(pud, vstart);
1739                 if (pmd_none(*pmd)) {
1740                         pte_t *new;
1741
1742                         if (kernel_can_map_hugepmd(vstart, vend, use_huge)) {
1743                                 vstart = kernel_map_hugepmd(vstart, vend, pmd);
1744                                 continue;
1745                         }
1746                         new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1747                         alloc_bytes += PAGE_SIZE;
1748                         pmd_populate_kernel(&init_mm, pmd, new);
1749                 }
1750
1751                 pte = pte_offset_kernel(pmd, vstart);
1752                 this_end = (vstart + PMD_SIZE) & PMD_MASK;
1753                 if (this_end > vend)
1754                         this_end = vend;
1755
1756                 while (vstart < this_end) {
1757                         pte_val(*pte) = (paddr | pgprot_val(prot));
1758
1759                         vstart += PAGE_SIZE;
1760                         paddr += PAGE_SIZE;
1761                         pte++;
1762                 }
1763         }
1764
1765         return alloc_bytes;
1766 }
1767
1768 static void __init flush_all_kernel_tsbs(void)
1769 {
1770         int i;
1771
1772         for (i = 0; i < KERNEL_TSB_NENTRIES; i++) {
1773                 struct tsb *ent = &swapper_tsb[i];
1774
1775                 ent->tag = (1UL << TSB_TAG_INVALID_BIT);
1776         }
1777 #ifndef CONFIG_DEBUG_PAGEALLOC
1778         for (i = 0; i < KERNEL_TSB4M_NENTRIES; i++) {
1779                 struct tsb *ent = &swapper_4m_tsb[i];
1780
1781                 ent->tag = (1UL << TSB_TAG_INVALID_BIT);
1782         }
1783 #endif
1784 }
1785
1786 extern unsigned int kvmap_linear_patch[1];
1787
1788 static void __init kernel_physical_mapping_init(void)
1789 {
1790         unsigned long i, mem_alloced = 0UL;
1791         bool use_huge = true;
1792
1793 #ifdef CONFIG_DEBUG_PAGEALLOC
1794         use_huge = false;
1795 #endif
1796         for (i = 0; i < pall_ents; i++) {
1797                 unsigned long phys_start, phys_end;
1798
1799                 phys_start = pall[i].phys_addr;
1800                 phys_end = phys_start + pall[i].reg_size;
1801
1802                 mem_alloced += kernel_map_range(phys_start, phys_end,
1803                                                 PAGE_KERNEL, use_huge);
1804         }
1805
1806         printk("Allocated %ld bytes for kernel page tables.\n",
1807                mem_alloced);
1808
1809         kvmap_linear_patch[0] = 0x01000000; /* nop */
1810         flushi(&kvmap_linear_patch[0]);
1811
1812         flush_all_kernel_tsbs();
1813
1814         __flush_tlb_all();
1815 }
1816
1817 #ifdef CONFIG_DEBUG_PAGEALLOC
1818 void __kernel_map_pages(struct page *page, int numpages, int enable)
1819 {
1820         unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT;
1821         unsigned long phys_end = phys_start + (numpages * PAGE_SIZE);
1822
1823         kernel_map_range(phys_start, phys_end,
1824                          (enable ? PAGE_KERNEL : __pgprot(0)), false);
1825
1826         flush_tsb_kernel_range(PAGE_OFFSET + phys_start,
1827                                PAGE_OFFSET + phys_end);
1828
1829         /* we should perform an IPI and flush all tlbs,
1830          * but that can deadlock->flush only current cpu.
1831          */
1832         __flush_tlb_kernel_range(PAGE_OFFSET + phys_start,
1833                                  PAGE_OFFSET + phys_end);
1834 }
1835 #endif
1836
1837 unsigned long __init find_ecache_flush_span(unsigned long size)
1838 {
1839         int i;
1840
1841         for (i = 0; i < pavail_ents; i++) {
1842                 if (pavail[i].reg_size >= size)
1843                         return pavail[i].phys_addr;
1844         }
1845
1846         return ~0UL;
1847 }
1848
1849 unsigned long PAGE_OFFSET;
1850 EXPORT_SYMBOL(PAGE_OFFSET);
1851
1852 unsigned long VMALLOC_END   = 0x0000010000000000UL;
1853 EXPORT_SYMBOL(VMALLOC_END);
1854
1855 unsigned long sparc64_va_hole_top =    0xfffff80000000000UL;
1856 unsigned long sparc64_va_hole_bottom = 0x0000080000000000UL;
1857
1858 static void __init setup_page_offset(void)
1859 {
1860         if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1861                 /* Cheetah/Panther support a full 64-bit virtual
1862                  * address, so we can use all that our page tables
1863                  * support.
1864                  */
1865                 sparc64_va_hole_top =    0xfff0000000000000UL;
1866                 sparc64_va_hole_bottom = 0x0010000000000000UL;
1867
1868                 max_phys_bits = 42;
1869         } else if (tlb_type == hypervisor) {
1870                 switch (sun4v_chip_type) {
1871                 case SUN4V_CHIP_NIAGARA1:
1872                 case SUN4V_CHIP_NIAGARA2:
1873                         /* T1 and T2 support 48-bit virtual addresses.  */
1874                         sparc64_va_hole_top =    0xffff800000000000UL;
1875                         sparc64_va_hole_bottom = 0x0000800000000000UL;
1876
1877                         max_phys_bits = 39;
1878                         break;
1879                 case SUN4V_CHIP_NIAGARA3:
1880                         /* T3 supports 48-bit virtual addresses.  */
1881                         sparc64_va_hole_top =    0xffff800000000000UL;
1882                         sparc64_va_hole_bottom = 0x0000800000000000UL;
1883
1884                         max_phys_bits = 43;
1885                         break;
1886                 case SUN4V_CHIP_NIAGARA4:
1887                 case SUN4V_CHIP_NIAGARA5:
1888                 case SUN4V_CHIP_SPARC64X:
1889                 case SUN4V_CHIP_SPARC_M6:
1890                         /* T4 and later support 52-bit virtual addresses.  */
1891                         sparc64_va_hole_top =    0xfff8000000000000UL;
1892                         sparc64_va_hole_bottom = 0x0008000000000000UL;
1893                         max_phys_bits = 47;
1894                         break;
1895                 case SUN4V_CHIP_SPARC_M7:
1896                 case SUN4V_CHIP_SPARC_SN:
1897                 default:
1898                         /* M7 and later support 52-bit virtual addresses.  */
1899                         sparc64_va_hole_top =    0xfff8000000000000UL;
1900                         sparc64_va_hole_bottom = 0x0008000000000000UL;
1901                         max_phys_bits = 49;
1902                         break;
1903                 }
1904         }
1905
1906         if (max_phys_bits > MAX_PHYS_ADDRESS_BITS) {
1907                 prom_printf("MAX_PHYS_ADDRESS_BITS is too small, need %lu\n",
1908                             max_phys_bits);
1909                 prom_halt();
1910         }
1911
1912         PAGE_OFFSET = sparc64_va_hole_top;
1913         VMALLOC_END = ((sparc64_va_hole_bottom >> 1) +
1914                        (sparc64_va_hole_bottom >> 2));
1915
1916         pr_info("MM: PAGE_OFFSET is 0x%016lx (max_phys_bits == %lu)\n",
1917                 PAGE_OFFSET, max_phys_bits);
1918         pr_info("MM: VMALLOC [0x%016lx --> 0x%016lx]\n",
1919                 VMALLOC_START, VMALLOC_END);
1920         pr_info("MM: VMEMMAP [0x%016lx --> 0x%016lx]\n",
1921                 VMEMMAP_BASE, VMEMMAP_BASE << 1);
1922 }
1923
1924 static void __init tsb_phys_patch(void)
1925 {
1926         struct tsb_ldquad_phys_patch_entry *pquad;
1927         struct tsb_phys_patch_entry *p;
1928
1929         pquad = &__tsb_ldquad_phys_patch;
1930         while (pquad < &__tsb_ldquad_phys_patch_end) {
1931                 unsigned long addr = pquad->addr;
1932
1933                 if (tlb_type == hypervisor)
1934                         *(unsigned int *) addr = pquad->sun4v_insn;
1935                 else
1936                         *(unsigned int *) addr = pquad->sun4u_insn;
1937                 wmb();
1938                 __asm__ __volatile__("flush     %0"
1939                                      : /* no outputs */
1940                                      : "r" (addr));
1941
1942                 pquad++;
1943         }
1944
1945         p = &__tsb_phys_patch;
1946         while (p < &__tsb_phys_patch_end) {
1947                 unsigned long addr = p->addr;
1948
1949                 *(unsigned int *) addr = p->insn;
1950                 wmb();
1951                 __asm__ __volatile__("flush     %0"
1952                                      : /* no outputs */
1953                                      : "r" (addr));
1954
1955                 p++;
1956         }
1957 }
1958
1959 /* Don't mark as init, we give this to the Hypervisor.  */
1960 #ifndef CONFIG_DEBUG_PAGEALLOC
1961 #define NUM_KTSB_DESCR  2
1962 #else
1963 #define NUM_KTSB_DESCR  1
1964 #endif
1965 static struct hv_tsb_descr ktsb_descr[NUM_KTSB_DESCR];
1966
1967 /* The swapper TSBs are loaded with a base sequence of:
1968  *
1969  *      sethi   %uhi(SYMBOL), REG1
1970  *      sethi   %hi(SYMBOL), REG2
1971  *      or      REG1, %ulo(SYMBOL), REG1
1972  *      or      REG2, %lo(SYMBOL), REG2
1973  *      sllx    REG1, 32, REG1
1974  *      or      REG1, REG2, REG1
1975  *
1976  * When we use physical addressing for the TSB accesses, we patch the
1977  * first four instructions in the above sequence.
1978  */
1979
1980 static void patch_one_ktsb_phys(unsigned int *start, unsigned int *end, unsigned long pa)
1981 {
1982         unsigned long high_bits, low_bits;
1983
1984         high_bits = (pa >> 32) & 0xffffffff;
1985         low_bits = (pa >> 0) & 0xffffffff;
1986
1987         while (start < end) {
1988                 unsigned int *ia = (unsigned int *)(unsigned long)*start;
1989
1990                 ia[0] = (ia[0] & ~0x3fffff) | (high_bits >> 10);
1991                 __asm__ __volatile__("flush     %0" : : "r" (ia));
1992
1993                 ia[1] = (ia[1] & ~0x3fffff) | (low_bits >> 10);
1994                 __asm__ __volatile__("flush     %0" : : "r" (ia + 1));
1995
1996                 ia[2] = (ia[2] & ~0x1fff) | (high_bits & 0x3ff);
1997                 __asm__ __volatile__("flush     %0" : : "r" (ia + 2));
1998
1999                 ia[3] = (ia[3] & ~0x1fff) | (low_bits & 0x3ff);
2000                 __asm__ __volatile__("flush     %0" : : "r" (ia + 3));
2001
2002                 start++;
2003         }
2004 }
2005
2006 static void ktsb_phys_patch(void)
2007 {
2008         extern unsigned int __swapper_tsb_phys_patch;
2009         extern unsigned int __swapper_tsb_phys_patch_end;
2010         unsigned long ktsb_pa;
2011
2012         ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE);
2013         patch_one_ktsb_phys(&__swapper_tsb_phys_patch,
2014                             &__swapper_tsb_phys_patch_end, ktsb_pa);
2015 #ifndef CONFIG_DEBUG_PAGEALLOC
2016         {
2017         extern unsigned int __swapper_4m_tsb_phys_patch;
2018         extern unsigned int __swapper_4m_tsb_phys_patch_end;
2019         ktsb_pa = (kern_base +
2020                    ((unsigned long)&swapper_4m_tsb[0] - KERNBASE));
2021         patch_one_ktsb_phys(&__swapper_4m_tsb_phys_patch,
2022                             &__swapper_4m_tsb_phys_patch_end, ktsb_pa);
2023         }
2024 #endif
2025 }
2026
2027 static void __init sun4v_ktsb_init(void)
2028 {
2029         unsigned long ktsb_pa;
2030
2031         /* First KTSB for PAGE_SIZE mappings.  */
2032         ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE);
2033
2034         switch (PAGE_SIZE) {
2035         case 8 * 1024:
2036         default:
2037                 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_8K;
2038                 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_8K;
2039                 break;
2040
2041         case 64 * 1024:
2042                 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_64K;
2043                 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_64K;
2044                 break;
2045
2046         case 512 * 1024:
2047                 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_512K;
2048                 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_512K;
2049                 break;
2050
2051         case 4 * 1024 * 1024:
2052                 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_4MB;
2053                 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_4MB;
2054                 break;
2055         }
2056
2057         ktsb_descr[0].assoc = 1;
2058         ktsb_descr[0].num_ttes = KERNEL_TSB_NENTRIES;
2059         ktsb_descr[0].ctx_idx = 0;
2060         ktsb_descr[0].tsb_base = ktsb_pa;
2061         ktsb_descr[0].resv = 0;
2062
2063 #ifndef CONFIG_DEBUG_PAGEALLOC
2064         /* Second KTSB for 4MB/256MB/2GB/16GB mappings.  */
2065         ktsb_pa = (kern_base +
2066                    ((unsigned long)&swapper_4m_tsb[0] - KERNBASE));
2067
2068         ktsb_descr[1].pgsz_idx = HV_PGSZ_IDX_4MB;
2069         ktsb_descr[1].pgsz_mask = ((HV_PGSZ_MASK_4MB |
2070                                     HV_PGSZ_MASK_256MB |
2071                                     HV_PGSZ_MASK_2GB |
2072                                     HV_PGSZ_MASK_16GB) &
2073                                    cpu_pgsz_mask);
2074         ktsb_descr[1].assoc = 1;
2075         ktsb_descr[1].num_ttes = KERNEL_TSB4M_NENTRIES;
2076         ktsb_descr[1].ctx_idx = 0;
2077         ktsb_descr[1].tsb_base = ktsb_pa;
2078         ktsb_descr[1].resv = 0;
2079 #endif
2080 }
2081
2082 void sun4v_ktsb_register(void)
2083 {
2084         unsigned long pa, ret;
2085
2086         pa = kern_base + ((unsigned long)&ktsb_descr[0] - KERNBASE);
2087
2088         ret = sun4v_mmu_tsb_ctx0(NUM_KTSB_DESCR, pa);
2089         if (ret != 0) {
2090                 prom_printf("hypervisor_mmu_tsb_ctx0[%lx]: "
2091                             "errors with %lx\n", pa, ret);
2092                 prom_halt();
2093         }
2094 }
2095
2096 static void __init sun4u_linear_pte_xor_finalize(void)
2097 {
2098 #ifndef CONFIG_DEBUG_PAGEALLOC
2099         /* This is where we would add Panther support for
2100          * 32MB and 256MB pages.
2101          */
2102 #endif
2103 }
2104
2105 static void __init sun4v_linear_pte_xor_finalize(void)
2106 {
2107         unsigned long pagecv_flag;
2108
2109         /* Bit 9 of TTE is no longer CV bit on M7 processor and it instead
2110          * enables MCD error. Do not set bit 9 on M7 processor.
2111          */
2112         switch (sun4v_chip_type) {
2113         case SUN4V_CHIP_SPARC_M7:
2114         case SUN4V_CHIP_SPARC_SN:
2115                 pagecv_flag = 0x00;
2116                 break;
2117         default:
2118                 pagecv_flag = _PAGE_CV_4V;
2119                 break;
2120         }
2121 #ifndef CONFIG_DEBUG_PAGEALLOC
2122         if (cpu_pgsz_mask & HV_PGSZ_MASK_256MB) {
2123                 kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZ256MB_4V) ^
2124                         PAGE_OFFSET;
2125                 kern_linear_pte_xor[1] |= (_PAGE_CP_4V | pagecv_flag |
2126                                            _PAGE_P_4V | _PAGE_W_4V);
2127         } else {
2128                 kern_linear_pte_xor[1] = kern_linear_pte_xor[0];
2129         }
2130
2131         if (cpu_pgsz_mask & HV_PGSZ_MASK_2GB) {
2132                 kern_linear_pte_xor[2] = (_PAGE_VALID | _PAGE_SZ2GB_4V) ^
2133                         PAGE_OFFSET;
2134                 kern_linear_pte_xor[2] |= (_PAGE_CP_4V | pagecv_flag |
2135                                            _PAGE_P_4V | _PAGE_W_4V);
2136         } else {
2137                 kern_linear_pte_xor[2] = kern_linear_pte_xor[1];
2138         }
2139
2140         if (cpu_pgsz_mask & HV_PGSZ_MASK_16GB) {
2141                 kern_linear_pte_xor[3] = (_PAGE_VALID | _PAGE_SZ16GB_4V) ^
2142                         PAGE_OFFSET;
2143                 kern_linear_pte_xor[3] |= (_PAGE_CP_4V | pagecv_flag |
2144                                            _PAGE_P_4V | _PAGE_W_4V);
2145         } else {
2146                 kern_linear_pte_xor[3] = kern_linear_pte_xor[2];
2147         }
2148 #endif
2149 }
2150
2151 /* paging_init() sets up the page tables */
2152
2153 static unsigned long last_valid_pfn;
2154
2155 static void sun4u_pgprot_init(void);
2156 static void sun4v_pgprot_init(void);
2157
2158 static phys_addr_t __init available_memory(void)
2159 {
2160         phys_addr_t available = 0ULL;
2161         phys_addr_t pa_start, pa_end;
2162         u64 i;
2163
2164         for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &pa_start,
2165                                 &pa_end, NULL)
2166                 available = available + (pa_end  - pa_start);
2167
2168         return available;
2169 }
2170
2171 #define _PAGE_CACHE_4U  (_PAGE_CP_4U | _PAGE_CV_4U)
2172 #define _PAGE_CACHE_4V  (_PAGE_CP_4V | _PAGE_CV_4V)
2173 #define __DIRTY_BITS_4U  (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U)
2174 #define __DIRTY_BITS_4V  (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V)
2175 #define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_READ_4U | _PAGE_R)
2176 #define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R)
2177
2178 /* We need to exclude reserved regions. This exclusion will include
2179  * vmlinux and initrd. To be more precise the initrd size could be used to
2180  * compute a new lower limit because it is freed later during initialization.
2181  */
2182 static void __init reduce_memory(phys_addr_t limit_ram)
2183 {
2184         phys_addr_t avail_ram = available_memory();
2185         phys_addr_t pa_start, pa_end;
2186         u64 i;
2187
2188         if (limit_ram >= avail_ram)
2189                 return;
2190
2191         for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &pa_start,
2192                                 &pa_end, NULL) {
2193                 phys_addr_t region_size = pa_end - pa_start;
2194                 phys_addr_t clip_start = pa_start;
2195
2196                 avail_ram = avail_ram - region_size;
2197                 /* Are we consuming too much? */
2198                 if (avail_ram < limit_ram) {
2199                         phys_addr_t give_back = limit_ram - avail_ram;
2200
2201                         region_size = region_size - give_back;
2202                         clip_start = clip_start + give_back;
2203                 }
2204
2205                 memblock_remove(clip_start, region_size);
2206
2207                 if (avail_ram <= limit_ram)
2208                         break;
2209                 i = 0UL;
2210         }
2211 }
2212
2213 void __init paging_init(void)
2214 {
2215         unsigned long end_pfn, shift, phys_base;
2216         unsigned long real_end, i;
2217
2218         setup_page_offset();
2219
2220         /* These build time checkes make sure that the dcache_dirty_cpu()
2221          * page->flags usage will work.
2222          *
2223          * When a page gets marked as dcache-dirty, we store the
2224          * cpu number starting at bit 32 in the page->flags.  Also,
2225          * functions like clear_dcache_dirty_cpu use the cpu mask
2226          * in 13-bit signed-immediate instruction fields.
2227          */
2228
2229         /*
2230          * Page flags must not reach into upper 32 bits that are used
2231          * for the cpu number
2232          */
2233         BUILD_BUG_ON(NR_PAGEFLAGS > 32);
2234
2235         /*
2236          * The bit fields placed in the high range must not reach below
2237          * the 32 bit boundary. Otherwise we cannot place the cpu field
2238          * at the 32 bit boundary.
2239          */
2240         BUILD_BUG_ON(SECTIONS_WIDTH + NODES_WIDTH + ZONES_WIDTH +
2241                 ilog2(roundup_pow_of_two(NR_CPUS)) > 32);
2242
2243         BUILD_BUG_ON(NR_CPUS > 4096);
2244
2245         kern_base = (prom_boot_mapping_phys_low >> ILOG2_4MB) << ILOG2_4MB;
2246         kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
2247
2248         /* Invalidate both kernel TSBs.  */
2249         memset(swapper_tsb, 0x40, sizeof(swapper_tsb));
2250 #ifndef CONFIG_DEBUG_PAGEALLOC
2251         memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb));
2252 #endif
2253
2254         /* TTE.cv bit on sparc v9 occupies the same position as TTE.mcde
2255          * bit on M7 processor. This is a conflicting usage of the same
2256          * bit. Enabling TTE.cv on M7 would turn on Memory Corruption
2257          * Detection error on all pages and this will lead to problems
2258          * later. Kernel does not run with MCD enabled and hence rest
2259          * of the required steps to fully configure memory corruption
2260          * detection are not taken. We need to ensure TTE.mcde is not
2261          * set on M7 processor. Compute the value of cacheability
2262          * flag for use later taking this into consideration.
2263          */
2264         switch (sun4v_chip_type) {
2265         case SUN4V_CHIP_SPARC_M7:
2266         case SUN4V_CHIP_SPARC_SN:
2267                 page_cache4v_flag = _PAGE_CP_4V;
2268                 break;
2269         default:
2270                 page_cache4v_flag = _PAGE_CACHE_4V;
2271                 break;
2272         }
2273
2274         if (tlb_type == hypervisor)
2275                 sun4v_pgprot_init();
2276         else
2277                 sun4u_pgprot_init();
2278
2279         if (tlb_type == cheetah_plus ||
2280             tlb_type == hypervisor) {
2281                 tsb_phys_patch();
2282                 ktsb_phys_patch();
2283         }
2284
2285         if (tlb_type == hypervisor)
2286                 sun4v_patch_tlb_handlers();
2287
2288         /* Find available physical memory...
2289          *
2290          * Read it twice in order to work around a bug in openfirmware.
2291          * The call to grab this table itself can cause openfirmware to
2292          * allocate memory, which in turn can take away some space from
2293          * the list of available memory.  Reading it twice makes sure
2294          * we really do get the final value.
2295          */
2296         read_obp_translations();
2297         read_obp_memory("reg", &pall[0], &pall_ents);
2298         read_obp_memory("available", &pavail[0], &pavail_ents);
2299         read_obp_memory("available", &pavail[0], &pavail_ents);
2300
2301         phys_base = 0xffffffffffffffffUL;
2302         for (i = 0; i < pavail_ents; i++) {
2303                 phys_base = min(phys_base, pavail[i].phys_addr);
2304                 memblock_add(pavail[i].phys_addr, pavail[i].reg_size);
2305         }
2306
2307         memblock_reserve(kern_base, kern_size);
2308
2309         find_ramdisk(phys_base);
2310
2311         if (cmdline_memory_size)
2312                 reduce_memory(cmdline_memory_size);
2313
2314         memblock_allow_resize();
2315         memblock_dump_all();
2316
2317         set_bit(0, mmu_context_bmap);
2318
2319         shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE);
2320
2321         real_end = (unsigned long)_end;
2322         num_kernel_image_mappings = DIV_ROUND_UP(real_end - KERNBASE, 1 << ILOG2_4MB);
2323         printk("Kernel: Using %d locked TLB entries for main kernel image.\n",
2324                num_kernel_image_mappings);
2325
2326         /* Set kernel pgd to upper alias so physical page computations
2327          * work.
2328          */
2329         init_mm.pgd += ((shift) / (sizeof(pgd_t)));
2330         
2331         memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
2332
2333         inherit_prom_mappings();
2334         
2335         /* Ok, we can use our TLB miss and window trap handlers safely.  */
2336         setup_tba();
2337
2338         __flush_tlb_all();
2339
2340         prom_build_devicetree();
2341         of_populate_present_mask();
2342 #ifndef CONFIG_SMP
2343         of_fill_in_cpu_data();
2344 #endif
2345
2346         if (tlb_type == hypervisor) {
2347                 sun4v_mdesc_init();
2348                 mdesc_populate_present_mask(cpu_all_mask);
2349 #ifndef CONFIG_SMP
2350                 mdesc_fill_in_cpu_data(cpu_all_mask);
2351 #endif
2352                 mdesc_get_page_sizes(cpu_all_mask, &cpu_pgsz_mask);
2353
2354                 sun4v_linear_pte_xor_finalize();
2355
2356                 sun4v_ktsb_init();
2357                 sun4v_ktsb_register();
2358         } else {
2359                 unsigned long impl, ver;
2360
2361                 cpu_pgsz_mask = (HV_PGSZ_MASK_8K | HV_PGSZ_MASK_64K |
2362                                  HV_PGSZ_MASK_512K | HV_PGSZ_MASK_4MB);
2363
2364                 __asm__ __volatile__("rdpr %%ver, %0" : "=r" (ver));
2365                 impl = ((ver >> 32) & 0xffff);
2366                 if (impl == PANTHER_IMPL)
2367                         cpu_pgsz_mask |= (HV_PGSZ_MASK_32MB |
2368                                           HV_PGSZ_MASK_256MB);
2369
2370                 sun4u_linear_pte_xor_finalize();
2371         }
2372
2373         /* Flush the TLBs and the 4M TSB so that the updated linear
2374          * pte XOR settings are realized for all mappings.
2375          */
2376         __flush_tlb_all();
2377 #ifndef CONFIG_DEBUG_PAGEALLOC
2378         memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb));
2379 #endif
2380         __flush_tlb_all();
2381
2382         /* Setup bootmem... */
2383         last_valid_pfn = end_pfn = bootmem_init(phys_base);
2384
2385         kernel_physical_mapping_init();
2386
2387         {
2388                 unsigned long max_zone_pfns[MAX_NR_ZONES];
2389
2390                 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
2391
2392                 max_zone_pfns[ZONE_NORMAL] = end_pfn;
2393
2394                 free_area_init_nodes(max_zone_pfns);
2395         }
2396
2397         printk("Booting Linux...\n");
2398 }
2399
2400 int page_in_phys_avail(unsigned long paddr)
2401 {
2402         int i;
2403
2404         paddr &= PAGE_MASK;
2405
2406         for (i = 0; i < pavail_ents; i++) {
2407                 unsigned long start, end;
2408
2409                 start = pavail[i].phys_addr;
2410                 end = start + pavail[i].reg_size;
2411
2412                 if (paddr >= start && paddr < end)
2413                         return 1;
2414         }
2415         if (paddr >= kern_base && paddr < (kern_base + kern_size))
2416                 return 1;
2417 #ifdef CONFIG_BLK_DEV_INITRD
2418         if (paddr >= __pa(initrd_start) &&
2419             paddr < __pa(PAGE_ALIGN(initrd_end)))
2420                 return 1;
2421 #endif
2422
2423         return 0;
2424 }
2425
2426 static void __init register_page_bootmem_info(void)
2427 {
2428 #ifdef CONFIG_NEED_MULTIPLE_NODES
2429         int i;
2430
2431         for_each_online_node(i)
2432                 if (NODE_DATA(i)->node_spanned_pages)
2433                         register_page_bootmem_info_node(NODE_DATA(i));
2434 #endif
2435 }
2436 void __init mem_init(void)
2437 {
2438         high_memory = __va(last_valid_pfn << PAGE_SHIFT);
2439
2440         register_page_bootmem_info();
2441         free_all_bootmem();
2442
2443         /*
2444          * Set up the zero page, mark it reserved, so that page count
2445          * is not manipulated when freeing the page from user ptes.
2446          */
2447         mem_map_zero = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0);
2448         if (mem_map_zero == NULL) {
2449                 prom_printf("paging_init: Cannot alloc zero page.\n");
2450                 prom_halt();
2451         }
2452         mark_page_reserved(mem_map_zero);
2453
2454         mem_init_print_info(NULL);
2455
2456         if (tlb_type == cheetah || tlb_type == cheetah_plus)
2457                 cheetah_ecache_flush_init();
2458 }
2459
2460 void free_initmem(void)
2461 {
2462         unsigned long addr, initend;
2463         int do_free = 1;
2464
2465         /* If the physical memory maps were trimmed by kernel command
2466          * line options, don't even try freeing this initmem stuff up.
2467          * The kernel image could have been in the trimmed out region
2468          * and if so the freeing below will free invalid page structs.
2469          */
2470         if (cmdline_memory_size)
2471                 do_free = 0;
2472
2473         /*
2474          * The init section is aligned to 8k in vmlinux.lds. Page align for >8k pagesizes.
2475          */
2476         addr = PAGE_ALIGN((unsigned long)(__init_begin));
2477         initend = (unsigned long)(__init_end) & PAGE_MASK;
2478         for (; addr < initend; addr += PAGE_SIZE) {
2479                 unsigned long page;
2480
2481                 page = (addr +
2482                         ((unsigned long) __va(kern_base)) -
2483                         ((unsigned long) KERNBASE));
2484                 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
2485
2486                 if (do_free)
2487                         free_reserved_page(virt_to_page(page));
2488         }
2489 }
2490
2491 #ifdef CONFIG_BLK_DEV_INITRD
2492 void free_initrd_mem(unsigned long start, unsigned long end)
2493 {
2494         free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
2495                            "initrd");
2496 }
2497 #endif
2498
2499 pgprot_t PAGE_KERNEL __read_mostly;
2500 EXPORT_SYMBOL(PAGE_KERNEL);
2501
2502 pgprot_t PAGE_KERNEL_LOCKED __read_mostly;
2503 pgprot_t PAGE_COPY __read_mostly;
2504
2505 pgprot_t PAGE_SHARED __read_mostly;
2506 EXPORT_SYMBOL(PAGE_SHARED);
2507
2508 unsigned long pg_iobits __read_mostly;
2509
2510 unsigned long _PAGE_IE __read_mostly;
2511 EXPORT_SYMBOL(_PAGE_IE);
2512
2513 unsigned long _PAGE_E __read_mostly;
2514 EXPORT_SYMBOL(_PAGE_E);
2515
2516 unsigned long _PAGE_CACHE __read_mostly;
2517 EXPORT_SYMBOL(_PAGE_CACHE);
2518
2519 #ifdef CONFIG_SPARSEMEM_VMEMMAP
2520 int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend,
2521                                int node)
2522 {
2523         unsigned long pte_base;
2524
2525         pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4U |
2526                     _PAGE_CP_4U | _PAGE_CV_4U |
2527                     _PAGE_P_4U | _PAGE_W_4U);
2528         if (tlb_type == hypervisor)
2529                 pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4V |
2530                             page_cache4v_flag | _PAGE_P_4V | _PAGE_W_4V);
2531
2532         pte_base |= _PAGE_PMD_HUGE;
2533
2534         vstart = vstart & PMD_MASK;
2535         vend = ALIGN(vend, PMD_SIZE);
2536         for (; vstart < vend; vstart += PMD_SIZE) {
2537                 pgd_t *pgd = pgd_offset_k(vstart);
2538                 unsigned long pte;
2539                 pud_t *pud;
2540                 pmd_t *pmd;
2541
2542                 if (pgd_none(*pgd)) {
2543                         pud_t *new = vmemmap_alloc_block(PAGE_SIZE, node);
2544
2545                         if (!new)
2546                                 return -ENOMEM;
2547                         pgd_populate(&init_mm, pgd, new);
2548                 }
2549
2550                 pud = pud_offset(pgd, vstart);
2551                 if (pud_none(*pud)) {
2552                         pmd_t *new = vmemmap_alloc_block(PAGE_SIZE, node);
2553
2554                         if (!new)
2555                                 return -ENOMEM;
2556                         pud_populate(&init_mm, pud, new);
2557                 }
2558
2559                 pmd = pmd_offset(pud, vstart);
2560
2561                 pte = pmd_val(*pmd);
2562                 if (!(pte & _PAGE_VALID)) {
2563                         void *block = vmemmap_alloc_block(PMD_SIZE, node);
2564
2565                         if (!block)
2566                                 return -ENOMEM;
2567
2568                         pmd_val(*pmd) = pte_base | __pa(block);
2569                 }
2570         }
2571
2572         return 0;
2573 }
2574
2575 void vmemmap_free(unsigned long start, unsigned long end)
2576 {
2577 }
2578 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
2579
2580 static void prot_init_common(unsigned long page_none,
2581                              unsigned long page_shared,
2582                              unsigned long page_copy,
2583                              unsigned long page_readonly,
2584                              unsigned long page_exec_bit)
2585 {
2586         PAGE_COPY = __pgprot(page_copy);
2587         PAGE_SHARED = __pgprot(page_shared);
2588
2589         protection_map[0x0] = __pgprot(page_none);
2590         protection_map[0x1] = __pgprot(page_readonly & ~page_exec_bit);
2591         protection_map[0x2] = __pgprot(page_copy & ~page_exec_bit);
2592         protection_map[0x3] = __pgprot(page_copy & ~page_exec_bit);
2593         protection_map[0x4] = __pgprot(page_readonly);
2594         protection_map[0x5] = __pgprot(page_readonly);
2595         protection_map[0x6] = __pgprot(page_copy);
2596         protection_map[0x7] = __pgprot(page_copy);
2597         protection_map[0x8] = __pgprot(page_none);
2598         protection_map[0x9] = __pgprot(page_readonly & ~page_exec_bit);
2599         protection_map[0xa] = __pgprot(page_shared & ~page_exec_bit);
2600         protection_map[0xb] = __pgprot(page_shared & ~page_exec_bit);
2601         protection_map[0xc] = __pgprot(page_readonly);
2602         protection_map[0xd] = __pgprot(page_readonly);
2603         protection_map[0xe] = __pgprot(page_shared);
2604         protection_map[0xf] = __pgprot(page_shared);
2605 }
2606
2607 static void __init sun4u_pgprot_init(void)
2608 {
2609         unsigned long page_none, page_shared, page_copy, page_readonly;
2610         unsigned long page_exec_bit;
2611         int i;
2612
2613         PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
2614                                 _PAGE_CACHE_4U | _PAGE_P_4U |
2615                                 __ACCESS_BITS_4U | __DIRTY_BITS_4U |
2616                                 _PAGE_EXEC_4U);
2617         PAGE_KERNEL_LOCKED = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
2618                                        _PAGE_CACHE_4U | _PAGE_P_4U |
2619                                        __ACCESS_BITS_4U | __DIRTY_BITS_4U |
2620                                        _PAGE_EXEC_4U | _PAGE_L_4U);
2621
2622         _PAGE_IE = _PAGE_IE_4U;
2623         _PAGE_E = _PAGE_E_4U;
2624         _PAGE_CACHE = _PAGE_CACHE_4U;
2625
2626         pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4U | __DIRTY_BITS_4U |
2627                      __ACCESS_BITS_4U | _PAGE_E_4U);
2628
2629 #ifdef CONFIG_DEBUG_PAGEALLOC
2630         kern_linear_pte_xor[0] = _PAGE_VALID ^ PAGE_OFFSET;
2631 #else
2632         kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4U) ^
2633                 PAGE_OFFSET;
2634 #endif
2635         kern_linear_pte_xor[0] |= (_PAGE_CP_4U | _PAGE_CV_4U |
2636                                    _PAGE_P_4U | _PAGE_W_4U);
2637
2638         for (i = 1; i < 4; i++)
2639                 kern_linear_pte_xor[i] = kern_linear_pte_xor[0];
2640
2641         _PAGE_ALL_SZ_BITS =  (_PAGE_SZ4MB_4U | _PAGE_SZ512K_4U |
2642                               _PAGE_SZ64K_4U | _PAGE_SZ8K_4U |
2643                               _PAGE_SZ32MB_4U | _PAGE_SZ256MB_4U);
2644
2645
2646         page_none = _PAGE_PRESENT_4U | _PAGE_ACCESSED_4U | _PAGE_CACHE_4U;
2647         page_shared = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
2648                        __ACCESS_BITS_4U | _PAGE_WRITE_4U | _PAGE_EXEC_4U);
2649         page_copy   = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
2650                        __ACCESS_BITS_4U | _PAGE_EXEC_4U);
2651         page_readonly   = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
2652                            __ACCESS_BITS_4U | _PAGE_EXEC_4U);
2653
2654         page_exec_bit = _PAGE_EXEC_4U;
2655
2656         prot_init_common(page_none, page_shared, page_copy, page_readonly,
2657                          page_exec_bit);
2658 }
2659
2660 static void __init sun4v_pgprot_init(void)
2661 {
2662         unsigned long page_none, page_shared, page_copy, page_readonly;
2663         unsigned long page_exec_bit;
2664         int i;
2665
2666         PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4V | _PAGE_VALID |
2667                                 page_cache4v_flag | _PAGE_P_4V |
2668                                 __ACCESS_BITS_4V | __DIRTY_BITS_4V |
2669                                 _PAGE_EXEC_4V);
2670         PAGE_KERNEL_LOCKED = PAGE_KERNEL;
2671
2672         _PAGE_IE = _PAGE_IE_4V;
2673         _PAGE_E = _PAGE_E_4V;
2674         _PAGE_CACHE = page_cache4v_flag;
2675
2676 #ifdef CONFIG_DEBUG_PAGEALLOC
2677         kern_linear_pte_xor[0] = _PAGE_VALID ^ PAGE_OFFSET;
2678 #else
2679         kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^
2680                 PAGE_OFFSET;
2681 #endif
2682         kern_linear_pte_xor[0] |= (page_cache4v_flag | _PAGE_P_4V |
2683                                    _PAGE_W_4V);
2684
2685         for (i = 1; i < 4; i++)
2686                 kern_linear_pte_xor[i] = kern_linear_pte_xor[0];
2687
2688         pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4V | __DIRTY_BITS_4V |
2689                      __ACCESS_BITS_4V | _PAGE_E_4V);
2690
2691         _PAGE_ALL_SZ_BITS = (_PAGE_SZ16GB_4V | _PAGE_SZ2GB_4V |
2692                              _PAGE_SZ256MB_4V | _PAGE_SZ32MB_4V |
2693                              _PAGE_SZ4MB_4V | _PAGE_SZ512K_4V |
2694                              _PAGE_SZ64K_4V | _PAGE_SZ8K_4V);
2695
2696         page_none = _PAGE_PRESENT_4V | _PAGE_ACCESSED_4V | page_cache4v_flag;
2697         page_shared = (_PAGE_VALID | _PAGE_PRESENT_4V | page_cache4v_flag |
2698                        __ACCESS_BITS_4V | _PAGE_WRITE_4V | _PAGE_EXEC_4V);
2699         page_copy   = (_PAGE_VALID | _PAGE_PRESENT_4V | page_cache4v_flag |
2700                        __ACCESS_BITS_4V | _PAGE_EXEC_4V);
2701         page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4V | page_cache4v_flag |
2702                          __ACCESS_BITS_4V | _PAGE_EXEC_4V);
2703
2704         page_exec_bit = _PAGE_EXEC_4V;
2705
2706         prot_init_common(page_none, page_shared, page_copy, page_readonly,
2707                          page_exec_bit);
2708 }
2709
2710 unsigned long pte_sz_bits(unsigned long sz)
2711 {
2712         if (tlb_type == hypervisor) {
2713                 switch (sz) {
2714                 case 8 * 1024:
2715                 default:
2716                         return _PAGE_SZ8K_4V;
2717                 case 64 * 1024:
2718                         return _PAGE_SZ64K_4V;
2719                 case 512 * 1024:
2720                         return _PAGE_SZ512K_4V;
2721                 case 4 * 1024 * 1024:
2722                         return _PAGE_SZ4MB_4V;
2723                 }
2724         } else {
2725                 switch (sz) {
2726                 case 8 * 1024:
2727                 default:
2728                         return _PAGE_SZ8K_4U;
2729                 case 64 * 1024:
2730                         return _PAGE_SZ64K_4U;
2731                 case 512 * 1024:
2732                         return _PAGE_SZ512K_4U;
2733                 case 4 * 1024 * 1024:
2734                         return _PAGE_SZ4MB_4U;
2735                 }
2736         }
2737 }
2738
2739 pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space, unsigned long page_size)
2740 {
2741         pte_t pte;
2742
2743         pte_val(pte)  = page | pgprot_val(pgprot_noncached(prot));
2744         pte_val(pte) |= (((unsigned long)space) << 32);
2745         pte_val(pte) |= pte_sz_bits(page_size);
2746
2747         return pte;
2748 }
2749
2750 static unsigned long kern_large_tte(unsigned long paddr)
2751 {
2752         unsigned long val;
2753
2754         val = (_PAGE_VALID | _PAGE_SZ4MB_4U |
2755                _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_P_4U |
2756                _PAGE_EXEC_4U | _PAGE_L_4U | _PAGE_W_4U);
2757         if (tlb_type == hypervisor)
2758                 val = (_PAGE_VALID | _PAGE_SZ4MB_4V |
2759                        page_cache4v_flag | _PAGE_P_4V |
2760                        _PAGE_EXEC_4V | _PAGE_W_4V);
2761
2762         return val | paddr;
2763 }
2764
2765 /* If not locked, zap it. */
2766 void __flush_tlb_all(void)
2767 {
2768         unsigned long pstate;
2769         int i;
2770
2771         __asm__ __volatile__("flushw\n\t"
2772                              "rdpr      %%pstate, %0\n\t"
2773                              "wrpr      %0, %1, %%pstate"
2774                              : "=r" (pstate)
2775                              : "i" (PSTATE_IE));
2776         if (tlb_type == hypervisor) {
2777                 sun4v_mmu_demap_all();
2778         } else if (tlb_type == spitfire) {
2779                 for (i = 0; i < 64; i++) {
2780                         /* Spitfire Errata #32 workaround */
2781                         /* NOTE: Always runs on spitfire, so no
2782                          *       cheetah+ page size encodings.
2783                          */
2784                         __asm__ __volatile__("stxa      %0, [%1] %2\n\t"
2785                                              "flush     %%g6"
2786                                              : /* No outputs */
2787                                              : "r" (0),
2788                                              "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
2789
2790                         if (!(spitfire_get_dtlb_data(i) & _PAGE_L_4U)) {
2791                                 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
2792                                                      "membar #Sync"
2793                                                      : /* no outputs */
2794                                                      : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
2795                                 spitfire_put_dtlb_data(i, 0x0UL);
2796                         }
2797
2798                         /* Spitfire Errata #32 workaround */
2799                         /* NOTE: Always runs on spitfire, so no
2800                          *       cheetah+ page size encodings.
2801                          */
2802                         __asm__ __volatile__("stxa      %0, [%1] %2\n\t"
2803                                              "flush     %%g6"
2804                                              : /* No outputs */
2805                                              : "r" (0),
2806                                              "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
2807
2808                         if (!(spitfire_get_itlb_data(i) & _PAGE_L_4U)) {
2809                                 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
2810                                                      "membar #Sync"
2811                                                      : /* no outputs */
2812                                                      : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
2813                                 spitfire_put_itlb_data(i, 0x0UL);
2814                         }
2815                 }
2816         } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
2817                 cheetah_flush_dtlb_all();
2818                 cheetah_flush_itlb_all();
2819         }
2820         __asm__ __volatile__("wrpr      %0, 0, %%pstate"
2821                              : : "r" (pstate));
2822 }
2823
2824 pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
2825                             unsigned long address)
2826 {
2827         struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
2828         pte_t *pte = NULL;
2829
2830         if (page)
2831                 pte = (pte_t *) page_address(page);
2832
2833         return pte;
2834 }
2835
2836 pgtable_t pte_alloc_one(struct mm_struct *mm,
2837                         unsigned long address)
2838 {
2839         struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
2840         if (!page)
2841                 return NULL;
2842         if (!pgtable_page_ctor(page)) {
2843                 free_hot_cold_page(page, 0);
2844                 return NULL;
2845         }
2846         return (pte_t *) page_address(page);
2847 }
2848
2849 void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
2850 {
2851         free_page((unsigned long)pte);
2852 }
2853
2854 static void __pte_free(pgtable_t pte)
2855 {
2856         struct page *page = virt_to_page(pte);
2857
2858         pgtable_page_dtor(page);
2859         __free_page(page);
2860 }
2861
2862 void pte_free(struct mm_struct *mm, pgtable_t pte)
2863 {
2864         __pte_free(pte);
2865 }
2866
2867 void pgtable_free(void *table, bool is_page)
2868 {
2869         if (is_page)
2870                 __pte_free(table);
2871         else
2872                 kmem_cache_free(pgtable_cache, table);
2873 }
2874
2875 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
2876 void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
2877                           pmd_t *pmd)
2878 {
2879         unsigned long pte, flags;
2880         struct mm_struct *mm;
2881         pmd_t entry = *pmd;
2882
2883         if (!pmd_large(entry) || !pmd_young(entry))
2884                 return;
2885
2886         pte = pmd_val(entry);
2887
2888         /* Don't insert a non-valid PMD into the TSB, we'll deadlock.  */
2889         if (!(pte & _PAGE_VALID))
2890                 return;
2891
2892         /* We are fabricating 8MB pages using 4MB real hw pages.  */
2893         pte |= (addr & (1UL << REAL_HPAGE_SHIFT));
2894
2895         mm = vma->vm_mm;
2896
2897         spin_lock_irqsave(&mm->context.lock, flags);
2898
2899         if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL)
2900                 __update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT,
2901                                         addr, pte);
2902
2903         spin_unlock_irqrestore(&mm->context.lock, flags);
2904 }
2905 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
2906
2907 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
2908 static void context_reload(void *__data)
2909 {
2910         struct mm_struct *mm = __data;
2911
2912         if (mm == current->mm)
2913                 load_secondary_context(mm);
2914 }
2915
2916 void hugetlb_setup(struct pt_regs *regs)
2917 {
2918         struct mm_struct *mm = current->mm;
2919         struct tsb_config *tp;
2920
2921         if (faulthandler_disabled() || !mm) {
2922                 const struct exception_table_entry *entry;
2923
2924                 entry = search_exception_tables(regs->tpc);
2925                 if (entry) {
2926                         regs->tpc = entry->fixup;
2927                         regs->tnpc = regs->tpc + 4;
2928                         return;
2929                 }
2930                 pr_alert("Unexpected HugeTLB setup in atomic context.\n");
2931                 die_if_kernel("HugeTSB in atomic", regs);
2932         }
2933
2934         tp = &mm->context.tsb_block[MM_TSB_HUGE];
2935         if (likely(tp->tsb == NULL))
2936                 tsb_grow(mm, MM_TSB_HUGE, 0);
2937
2938         tsb_context_switch(mm);
2939         smp_tsb_sync(mm);
2940
2941         /* On UltraSPARC-III+ and later, configure the second half of
2942          * the Data-TLB for huge pages.
2943          */
2944         if (tlb_type == cheetah_plus) {
2945                 bool need_context_reload = false;
2946                 unsigned long ctx;
2947
2948                 spin_lock_irq(&ctx_alloc_lock);
2949                 ctx = mm->context.sparc64_ctx_val;
2950                 ctx &= ~CTX_PGSZ_MASK;
2951                 ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT;
2952                 ctx |= CTX_PGSZ_HUGE << CTX_PGSZ1_SHIFT;
2953
2954                 if (ctx != mm->context.sparc64_ctx_val) {
2955                         /* When changing the page size fields, we
2956                          * must perform a context flush so that no
2957                          * stale entries match.  This flush must
2958                          * occur with the original context register
2959                          * settings.
2960                          */
2961                         do_flush_tlb_mm(mm);
2962
2963                         /* Reload the context register of all processors
2964                          * also executing in this address space.
2965                          */
2966                         mm->context.sparc64_ctx_val = ctx;
2967                         need_context_reload = true;
2968                 }
2969                 spin_unlock_irq(&ctx_alloc_lock);
2970
2971                 if (need_context_reload)
2972                         on_each_cpu(context_reload, mm, 0);
2973         }
2974 }
2975 #endif
2976
2977 static struct resource code_resource = {
2978         .name   = "Kernel code",
2979         .flags  = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
2980 };
2981
2982 static struct resource data_resource = {
2983         .name   = "Kernel data",
2984         .flags  = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
2985 };
2986
2987 static struct resource bss_resource = {
2988         .name   = "Kernel bss",
2989         .flags  = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
2990 };
2991
2992 static inline resource_size_t compute_kern_paddr(void *addr)
2993 {
2994         return (resource_size_t) (addr - KERNBASE + kern_base);
2995 }
2996
2997 static void __init kernel_lds_init(void)
2998 {
2999         code_resource.start = compute_kern_paddr(_text);
3000         code_resource.end   = compute_kern_paddr(_etext - 1);
3001         data_resource.start = compute_kern_paddr(_etext);
3002         data_resource.end   = compute_kern_paddr(_edata - 1);
3003         bss_resource.start  = compute_kern_paddr(__bss_start);
3004         bss_resource.end    = compute_kern_paddr(_end - 1);
3005 }
3006
3007 static int __init report_memory(void)
3008 {
3009         int i;
3010         struct resource *res;
3011
3012         kernel_lds_init();
3013
3014         for (i = 0; i < pavail_ents; i++) {
3015                 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
3016
3017                 if (!res) {
3018                         pr_warn("Failed to allocate source.\n");
3019                         break;
3020                 }
3021
3022                 res->name = "System RAM";
3023                 res->start = pavail[i].phys_addr;
3024                 res->end = pavail[i].phys_addr + pavail[i].reg_size - 1;
3025                 res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM;
3026
3027                 if (insert_resource(&iomem_resource, res) < 0) {
3028                         pr_warn("Resource insertion failed.\n");
3029                         break;
3030                 }
3031
3032                 insert_resource(res, &code_resource);
3033                 insert_resource(res, &data_resource);
3034                 insert_resource(res, &bss_resource);
3035         }
3036
3037         return 0;
3038 }
3039 arch_initcall(report_memory);
3040
3041 #ifdef CONFIG_SMP
3042 #define do_flush_tlb_kernel_range       smp_flush_tlb_kernel_range
3043 #else
3044 #define do_flush_tlb_kernel_range       __flush_tlb_kernel_range
3045 #endif
3046
3047 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
3048 {
3049         if (start < HI_OBP_ADDRESS && end > LOW_OBP_ADDRESS) {
3050                 if (start < LOW_OBP_ADDRESS) {
3051                         flush_tsb_kernel_range(start, LOW_OBP_ADDRESS);
3052                         do_flush_tlb_kernel_range(start, LOW_OBP_ADDRESS);
3053                 }
3054                 if (end > HI_OBP_ADDRESS) {
3055                         flush_tsb_kernel_range(HI_OBP_ADDRESS, end);
3056                         do_flush_tlb_kernel_range(HI_OBP_ADDRESS, end);
3057                 }
3058         } else {
3059                 flush_tsb_kernel_range(start, end);
3060                 do_flush_tlb_kernel_range(start, end);
3061         }
3062 }