]> git.karo-electronics.de Git - karo-tx-linux.git/blob - arch/arm/kernel/setup.c
Merge remote-tracking branch 'signal/for-next'
[karo-tx-linux.git] / arch / arm / kernel / setup.c
1 /*
2  *  linux/arch/arm/kernel/setup.c
3  *
4  *  Copyright (C) 1995-2001 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/export.h>
11 #include <linux/kernel.h>
12 #include <linux/stddef.h>
13 #include <linux/ioport.h>
14 #include <linux/delay.h>
15 #include <linux/utsname.h>
16 #include <linux/initrd.h>
17 #include <linux/console.h>
18 #include <linux/bootmem.h>
19 #include <linux/seq_file.h>
20 #include <linux/screen_info.h>
21 #include <linux/init.h>
22 #include <linux/kexec.h>
23 #include <linux/of_fdt.h>
24 #include <linux/cpu.h>
25 #include <linux/interrupt.h>
26 #include <linux/smp.h>
27 #include <linux/proc_fs.h>
28 #include <linux/memblock.h>
29 #include <linux/bug.h>
30 #include <linux/compiler.h>
31 #include <linux/sort.h>
32
33 #include <asm/unified.h>
34 #include <asm/cp15.h>
35 #include <asm/cpu.h>
36 #include <asm/cputype.h>
37 #include <asm/elf.h>
38 #include <asm/procinfo.h>
39 #include <asm/sections.h>
40 #include <asm/setup.h>
41 #include <asm/smp_plat.h>
42 #include <asm/mach-types.h>
43 #include <asm/cacheflush.h>
44 #include <asm/cachetype.h>
45 #include <asm/tlbflush.h>
46
47 #include <asm/prom.h>
48 #include <asm/mach/arch.h>
49 #include <asm/mach/irq.h>
50 #include <asm/mach/time.h>
51 #include <asm/system_info.h>
52 #include <asm/system_misc.h>
53 #include <asm/traps.h>
54 #include <asm/unwind.h>
55 #include <asm/memblock.h>
56 #include <asm/virt.h>
57
58 #include "atags.h"
59 #include "tcm.h"
60
61
62 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
63 char fpe_type[8];
64
65 static int __init fpe_setup(char *line)
66 {
67         memcpy(fpe_type, line, 8);
68         return 1;
69 }
70
71 __setup("fpe=", fpe_setup);
72 #endif
73
74 extern void paging_init(struct machine_desc *desc);
75 extern void sanity_check_meminfo(void);
76 extern void reboot_setup(char *str);
77 extern void setup_dma_zone(struct machine_desc *desc);
78
79 unsigned int processor_id;
80 EXPORT_SYMBOL(processor_id);
81 unsigned int __machine_arch_type __read_mostly;
82 EXPORT_SYMBOL(__machine_arch_type);
83 unsigned int cacheid __read_mostly;
84 EXPORT_SYMBOL(cacheid);
85
86 unsigned int __atags_pointer __initdata;
87
88 unsigned int system_rev;
89 EXPORT_SYMBOL(system_rev);
90
91 unsigned int system_serial_low;
92 EXPORT_SYMBOL(system_serial_low);
93
94 unsigned int system_serial_high;
95 EXPORT_SYMBOL(system_serial_high);
96
97 unsigned int elf_hwcap __read_mostly;
98 EXPORT_SYMBOL(elf_hwcap);
99
100
101 #ifdef MULTI_CPU
102 struct processor processor __read_mostly;
103 #endif
104 #ifdef MULTI_TLB
105 struct cpu_tlb_fns cpu_tlb __read_mostly;
106 #endif
107 #ifdef MULTI_USER
108 struct cpu_user_fns cpu_user __read_mostly;
109 #endif
110 #ifdef MULTI_CACHE
111 struct cpu_cache_fns cpu_cache __read_mostly;
112 #endif
113 #ifdef CONFIG_OUTER_CACHE
114 struct outer_cache_fns outer_cache __read_mostly;
115 EXPORT_SYMBOL(outer_cache);
116 #endif
117
118 /*
119  * Cached cpu_architecture() result for use by assembler code.
120  * C code should use the cpu_architecture() function instead of accessing this
121  * variable directly.
122  */
123 int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
124
125 struct stack {
126         u32 irq[3];
127         u32 abt[3];
128         u32 und[3];
129 } ____cacheline_aligned;
130
131 #ifndef CONFIG_CPU_V7M
132 static struct stack stacks[NR_CPUS];
133 #endif
134
135 char elf_platform[ELF_PLATFORM_SIZE];
136 EXPORT_SYMBOL(elf_platform);
137
138 static const char *cpu_name;
139 static const char *machine_name;
140 static char __initdata cmd_line[COMMAND_LINE_SIZE];
141 struct machine_desc *machine_desc __initdata;
142
143 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
144 #define ENDIANNESS ((char)endian_test.l)
145
146 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
147
148 /*
149  * Standard memory resources
150  */
151 static struct resource mem_res[] = {
152         {
153                 .name = "Video RAM",
154                 .start = 0,
155                 .end = 0,
156                 .flags = IORESOURCE_MEM
157         },
158         {
159                 .name = "Kernel code",
160                 .start = 0,
161                 .end = 0,
162                 .flags = IORESOURCE_MEM
163         },
164         {
165                 .name = "Kernel data",
166                 .start = 0,
167                 .end = 0,
168                 .flags = IORESOURCE_MEM
169         }
170 };
171
172 #define video_ram   mem_res[0]
173 #define kernel_code mem_res[1]
174 #define kernel_data mem_res[2]
175
176 static struct resource io_res[] = {
177         {
178                 .name = "reserved",
179                 .start = 0x3bc,
180                 .end = 0x3be,
181                 .flags = IORESOURCE_IO | IORESOURCE_BUSY
182         },
183         {
184                 .name = "reserved",
185                 .start = 0x378,
186                 .end = 0x37f,
187                 .flags = IORESOURCE_IO | IORESOURCE_BUSY
188         },
189         {
190                 .name = "reserved",
191                 .start = 0x278,
192                 .end = 0x27f,
193                 .flags = IORESOURCE_IO | IORESOURCE_BUSY
194         }
195 };
196
197 #define lp0 io_res[0]
198 #define lp1 io_res[1]
199 #define lp2 io_res[2]
200
201 static const char *proc_arch[] = {
202         "undefined/unknown",
203         "3",
204         "4",
205         "4T",
206         "5",
207         "5T",
208         "5TE",
209         "5TEJ",
210         "6TEJ",
211         "7",
212         "7M",
213         "?(12)",
214         "?(13)",
215         "?(14)",
216         "?(15)",
217         "?(16)",
218         "?(17)",
219 };
220
221 #ifdef CONFIG_CPU_V7M
222 static int __get_cpu_architecture(void)
223 {
224         return CPU_ARCH_ARMv7M;
225 }
226 #else
227 static int __get_cpu_architecture(void)
228 {
229         int cpu_arch;
230
231         if ((read_cpuid_id() & 0x0008f000) == 0) {
232                 cpu_arch = CPU_ARCH_UNKNOWN;
233         } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
234                 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
235         } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
236                 cpu_arch = (read_cpuid_id() >> 16) & 7;
237                 if (cpu_arch)
238                         cpu_arch += CPU_ARCH_ARMv3;
239         } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
240                 unsigned int mmfr0;
241
242                 /* Revised CPUID format. Read the Memory Model Feature
243                  * Register 0 and check for VMSAv7 or PMSAv7 */
244                 asm("mrc        p15, 0, %0, c0, c1, 4"
245                     : "=r" (mmfr0));
246                 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
247                     (mmfr0 & 0x000000f0) >= 0x00000030)
248                         cpu_arch = CPU_ARCH_ARMv7;
249                 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
250                          (mmfr0 & 0x000000f0) == 0x00000020)
251                         cpu_arch = CPU_ARCH_ARMv6;
252                 else
253                         cpu_arch = CPU_ARCH_UNKNOWN;
254         } else
255                 cpu_arch = CPU_ARCH_UNKNOWN;
256
257         return cpu_arch;
258 }
259 #endif
260
261 int __pure cpu_architecture(void)
262 {
263         BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
264
265         return __cpu_architecture;
266 }
267
268 static int cpu_has_aliasing_icache(unsigned int arch)
269 {
270         int aliasing_icache;
271         unsigned int id_reg, num_sets, line_size;
272
273         /* PIPT caches never alias. */
274         if (icache_is_pipt())
275                 return 0;
276
277         /* arch specifies the register format */
278         switch (arch) {
279         case CPU_ARCH_ARMv7:
280                 asm("mcr        p15, 2, %0, c0, c0, 0 @ set CSSELR"
281                     : /* No output operands */
282                     : "r" (1));
283                 isb();
284                 asm("mrc        p15, 1, %0, c0, c0, 0 @ read CCSIDR"
285                     : "=r" (id_reg));
286                 line_size = 4 << ((id_reg & 0x7) + 2);
287                 num_sets = ((id_reg >> 13) & 0x7fff) + 1;
288                 aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
289                 break;
290         case CPU_ARCH_ARMv6:
291                 aliasing_icache = read_cpuid_cachetype() & (1 << 11);
292                 break;
293         default:
294                 /* I-cache aliases will be handled by D-cache aliasing code */
295                 aliasing_icache = 0;
296         }
297
298         return aliasing_icache;
299 }
300
301 static void __init cacheid_init(void)
302 {
303         unsigned int cachetype = read_cpuid_cachetype();
304         unsigned int arch = cpu_architecture();
305
306         if (arch >= CPU_ARCH_ARMv6) {
307                 if ((cachetype & (7 << 29)) == 4 << 29) {
308                         /* ARMv7 register format */
309                         arch = CPU_ARCH_ARMv7;
310                         cacheid = CACHEID_VIPT_NONALIASING;
311                         switch (cachetype & (3 << 14)) {
312                         case (1 << 14):
313                                 cacheid |= CACHEID_ASID_TAGGED;
314                                 break;
315                         case (3 << 14):
316                                 cacheid |= CACHEID_PIPT;
317                                 break;
318                         }
319                 } else {
320                         arch = CPU_ARCH_ARMv6;
321                         if (cachetype & (1 << 23))
322                                 cacheid = CACHEID_VIPT_ALIASING;
323                         else
324                                 cacheid = CACHEID_VIPT_NONALIASING;
325                 }
326                 if (cpu_has_aliasing_icache(arch))
327                         cacheid |= CACHEID_VIPT_I_ALIASING;
328         } else {
329                 cacheid = CACHEID_VIVT;
330         }
331
332         printk("CPU: %s data cache, %s instruction cache\n",
333                 cache_is_vivt() ? "VIVT" :
334                 cache_is_vipt_aliasing() ? "VIPT aliasing" :
335                 cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
336                 cache_is_vivt() ? "VIVT" :
337                 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
338                 icache_is_vipt_aliasing() ? "VIPT aliasing" :
339                 icache_is_pipt() ? "PIPT" :
340                 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
341 }
342
343 /*
344  * These functions re-use the assembly code in head.S, which
345  * already provide the required functionality.
346  */
347 extern struct proc_info_list *lookup_processor_type(unsigned int);
348
349 void __init early_print(const char *str, ...)
350 {
351         extern void printascii(const char *);
352         char buf[256];
353         va_list ap;
354
355         va_start(ap, str);
356         vsnprintf(buf, sizeof(buf), str, ap);
357         va_end(ap);
358
359 #ifdef CONFIG_DEBUG_LL
360         printascii(buf);
361 #endif
362         printk("%s", buf);
363 }
364
365 static void __init feat_v6_fixup(void)
366 {
367         int id = read_cpuid_id();
368
369         if ((id & 0xff0f0000) != 0x41070000)
370                 return;
371
372         /*
373          * HWCAP_TLS is available only on 1136 r1p0 and later,
374          * see also kuser_get_tls_init.
375          */
376         if ((((id >> 4) & 0xfff) == 0xb36) && (((id >> 20) & 3) == 0))
377                 elf_hwcap &= ~HWCAP_TLS;
378 }
379
380 /*
381  * cpu_init - initialise one CPU.
382  *
383  * cpu_init sets up the per-CPU stacks.
384  */
385 void cpu_init(void)
386 {
387 #ifndef CONFIG_CPU_V7M
388         unsigned int cpu = smp_processor_id();
389         struct stack *stk = &stacks[cpu];
390
391         if (cpu >= NR_CPUS) {
392                 printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
393                 BUG();
394         }
395
396         cpu_proc_init();
397
398         /*
399          * Define the placement constraint for the inline asm directive below.
400          * In Thumb-2, msr with an immediate value is not allowed.
401          */
402 #ifdef CONFIG_THUMB2_KERNEL
403 #define PLC     "r"
404 #else
405 #define PLC     "I"
406 #endif
407
408         /*
409          * setup stacks for re-entrant exception handlers
410          */
411         __asm__ (
412         "msr    cpsr_c, %1\n\t"
413         "add    r14, %0, %2\n\t"
414         "mov    sp, r14\n\t"
415         "msr    cpsr_c, %3\n\t"
416         "add    r14, %0, %4\n\t"
417         "mov    sp, r14\n\t"
418         "msr    cpsr_c, %5\n\t"
419         "add    r14, %0, %6\n\t"
420         "mov    sp, r14\n\t"
421         "msr    cpsr_c, %7"
422             :
423             : "r" (stk),
424               PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
425               "I" (offsetof(struct stack, irq[0])),
426               PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
427               "I" (offsetof(struct stack, abt[0])),
428               PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
429               "I" (offsetof(struct stack, und[0])),
430               PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
431             : "r14");
432 #endif
433 }
434
435 int __cpu_logical_map[NR_CPUS];
436
437 void __init smp_setup_processor_id(void)
438 {
439         int i;
440         u32 cpu = is_smp() ? read_cpuid_mpidr() & 0xff : 0;
441
442         cpu_logical_map(0) = cpu;
443         for (i = 1; i < NR_CPUS; ++i)
444                 cpu_logical_map(i) = i == cpu ? 0 : i;
445
446         printk(KERN_INFO "Booting Linux on physical CPU %d\n", cpu);
447 }
448
449 static void __init setup_processor(void)
450 {
451         struct proc_info_list *list;
452
453         /*
454          * locate processor in the list of supported processor
455          * types.  The linker builds this table for us from the
456          * entries in arch/arm/mm/proc-*.S
457          */
458         list = lookup_processor_type(read_cpuid_id());
459         if (!list) {
460                 printk("CPU configuration botched (ID %08x), unable "
461                        "to continue.\n", read_cpuid_id());
462                 while (1);
463         }
464
465         cpu_name = list->cpu_name;
466         __cpu_architecture = __get_cpu_architecture();
467
468 #ifdef MULTI_CPU
469         processor = *list->proc;
470 #endif
471 #ifdef MULTI_TLB
472         cpu_tlb = *list->tlb;
473 #endif
474 #ifdef MULTI_USER
475         cpu_user = *list->user;
476 #endif
477 #ifdef MULTI_CACHE
478         cpu_cache = *list->cache;
479 #endif
480
481         printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
482                cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
483                proc_arch[cpu_architecture()], cr_alignment);
484
485         snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
486                  list->arch_name, ENDIANNESS);
487         snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
488                  list->elf_name, ENDIANNESS);
489         elf_hwcap = list->elf_hwcap;
490 #ifndef CONFIG_ARM_THUMB
491         elf_hwcap &= ~HWCAP_THUMB;
492 #endif
493
494         feat_v6_fixup();
495
496         cacheid_init();
497         cpu_init();
498 }
499
500 void __init dump_machine_table(void)
501 {
502         struct machine_desc *p;
503
504         early_print("Available machine support:\n\nID (hex)\tNAME\n");
505         for_each_machine_desc(p)
506                 early_print("%08x\t%s\n", p->nr, p->name);
507
508         early_print("\nPlease check your kernel config and/or bootloader.\n");
509
510         while (true)
511                 /* can't use cpu_relax() here as it may require MMU setup */;
512 }
513
514 int __init arm_add_memory(phys_addr_t start, phys_addr_t size)
515 {
516         struct membank *bank = &meminfo.bank[meminfo.nr_banks];
517
518         if (meminfo.nr_banks >= NR_BANKS) {
519                 printk(KERN_CRIT "NR_BANKS too low, "
520                         "ignoring memory at 0x%08llx\n", (long long)start);
521                 return -EINVAL;
522         }
523
524         /*
525          * Ensure that start/size are aligned to a page boundary.
526          * Size is appropriately rounded down, start is rounded up.
527          */
528         size -= start & ~PAGE_MASK;
529         bank->start = PAGE_ALIGN(start);
530
531 #ifndef CONFIG_LPAE
532         if (bank->start + size < bank->start) {
533                 printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in "
534                         "32-bit physical address space\n", (long long)start);
535                 /*
536                  * To ensure bank->start + bank->size is representable in
537                  * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
538                  * This means we lose a page after masking.
539                  */
540                 size = ULONG_MAX - bank->start;
541         }
542 #endif
543
544         bank->size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
545
546         /*
547          * Check whether this memory region has non-zero size or
548          * invalid node number.
549          */
550         if (bank->size == 0)
551                 return -EINVAL;
552
553         meminfo.nr_banks++;
554         return 0;
555 }
556
557 /*
558  * Pick out the memory size.  We look for mem=size@start,
559  * where start and size are "size[KkMm]"
560  */
561 static int __init early_mem(char *p)
562 {
563         static int usermem __initdata = 0;
564         phys_addr_t size;
565         phys_addr_t start;
566         char *endp;
567
568         /*
569          * If the user specifies memory size, we
570          * blow away any automatically generated
571          * size.
572          */
573         if (usermem == 0) {
574                 usermem = 1;
575                 meminfo.nr_banks = 0;
576         }
577
578         start = PHYS_OFFSET;
579         size  = memparse(p, &endp);
580         if (*endp == '@')
581                 start = memparse(endp + 1, NULL);
582
583         arm_add_memory(start, size);
584
585         return 0;
586 }
587 early_param("mem", early_mem);
588
589 static void __init request_standard_resources(struct machine_desc *mdesc)
590 {
591         struct memblock_region *region;
592         struct resource *res;
593
594         kernel_code.start   = virt_to_phys(_text);
595         kernel_code.end     = virt_to_phys(_etext - 1);
596         kernel_data.start   = virt_to_phys(_sdata);
597         kernel_data.end     = virt_to_phys(_end - 1);
598
599         for_each_memblock(memory, region) {
600                 res = alloc_bootmem_low(sizeof(*res));
601                 res->name  = "System RAM";
602                 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
603                 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
604                 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
605
606                 request_resource(&iomem_resource, res);
607
608                 if (kernel_code.start >= res->start &&
609                     kernel_code.end <= res->end)
610                         request_resource(res, &kernel_code);
611                 if (kernel_data.start >= res->start &&
612                     kernel_data.end <= res->end)
613                         request_resource(res, &kernel_data);
614         }
615
616         if (mdesc->video_start) {
617                 video_ram.start = mdesc->video_start;
618                 video_ram.end   = mdesc->video_end;
619                 request_resource(&iomem_resource, &video_ram);
620         }
621
622         /*
623          * Some machines don't have the possibility of ever
624          * possessing lp0, lp1 or lp2
625          */
626         if (mdesc->reserve_lp0)
627                 request_resource(&ioport_resource, &lp0);
628         if (mdesc->reserve_lp1)
629                 request_resource(&ioport_resource, &lp1);
630         if (mdesc->reserve_lp2)
631                 request_resource(&ioport_resource, &lp2);
632 }
633
634 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
635 struct screen_info screen_info = {
636  .orig_video_lines      = 30,
637  .orig_video_cols       = 80,
638  .orig_video_mode       = 0,
639  .orig_video_ega_bx     = 0,
640  .orig_video_isVGA      = 1,
641  .orig_video_points     = 8
642 };
643 #endif
644
645 static int __init customize_machine(void)
646 {
647         /* customizes platform devices, or adds new ones */
648         if (machine_desc->init_machine)
649                 machine_desc->init_machine();
650         return 0;
651 }
652 arch_initcall(customize_machine);
653
654 static int __init init_machine_late(void)
655 {
656         if (machine_desc->init_late)
657                 machine_desc->init_late();
658         return 0;
659 }
660 late_initcall(init_machine_late);
661
662 #ifdef CONFIG_KEXEC
663 static inline unsigned long long get_total_mem(void)
664 {
665         unsigned long total;
666
667         total = max_low_pfn - min_low_pfn;
668         return total << PAGE_SHIFT;
669 }
670
671 /**
672  * reserve_crashkernel() - reserves memory are for crash kernel
673  *
674  * This function reserves memory area given in "crashkernel=" kernel command
675  * line parameter. The memory reserved is used by a dump capture kernel when
676  * primary kernel is crashing.
677  */
678 static void __init reserve_crashkernel(void)
679 {
680         unsigned long long crash_size, crash_base;
681         unsigned long long total_mem;
682         int ret;
683
684         total_mem = get_total_mem();
685         ret = parse_crashkernel(boot_command_line, total_mem,
686                                 &crash_size, &crash_base);
687         if (ret)
688                 return;
689
690         ret = reserve_bootmem(crash_base, crash_size, BOOTMEM_EXCLUSIVE);
691         if (ret < 0) {
692                 printk(KERN_WARNING "crashkernel reservation failed - "
693                        "memory is in use (0x%lx)\n", (unsigned long)crash_base);
694                 return;
695         }
696
697         printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
698                "for crashkernel (System RAM: %ldMB)\n",
699                (unsigned long)(crash_size >> 20),
700                (unsigned long)(crash_base >> 20),
701                (unsigned long)(total_mem >> 20));
702
703         crashk_res.start = crash_base;
704         crashk_res.end = crash_base + crash_size - 1;
705         insert_resource(&iomem_resource, &crashk_res);
706 }
707 #else
708 static inline void reserve_crashkernel(void) {}
709 #endif /* CONFIG_KEXEC */
710
711 static int __init meminfo_cmp(const void *_a, const void *_b)
712 {
713         const struct membank *a = _a, *b = _b;
714         long cmp = bank_pfn_start(a) - bank_pfn_start(b);
715         return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
716 }
717
718 void __init hyp_mode_check(void)
719 {
720 #ifdef CONFIG_ARM_VIRT_EXT
721         if (is_hyp_mode_available()) {
722                 pr_info("CPU: All CPU(s) started in HYP mode.\n");
723                 pr_info("CPU: Virtualization extensions available.\n");
724         } else if (is_hyp_mode_mismatched()) {
725                 pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
726                         __boot_cpu_mode & MODE_MASK);
727                 pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
728         } else
729                 pr_info("CPU: All CPU(s) started in SVC mode.\n");
730 #endif
731 }
732
733 void __init setup_arch(char **cmdline_p)
734 {
735         struct machine_desc *mdesc;
736
737         setup_processor();
738         mdesc = setup_machine_fdt(__atags_pointer);
739         if (!mdesc)
740                 mdesc = setup_machine_tags(__atags_pointer, machine_arch_type);
741         machine_desc = mdesc;
742         machine_name = mdesc->name;
743
744         setup_dma_zone(mdesc);
745
746         if (mdesc->restart_mode)
747                 reboot_setup(&mdesc->restart_mode);
748
749         init_mm.start_code = (unsigned long) _text;
750         init_mm.end_code   = (unsigned long) _etext;
751         init_mm.end_data   = (unsigned long) _edata;
752         init_mm.brk        = (unsigned long) _end;
753
754         /* populate cmd_line too for later use, preserving boot_command_line */
755         strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
756         *cmdline_p = cmd_line;
757
758         parse_early_param();
759
760         sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
761         sanity_check_meminfo();
762         arm_memblock_init(&meminfo, mdesc);
763
764         paging_init(mdesc);
765         request_standard_resources(mdesc);
766
767         if (mdesc->restart)
768                 arm_pm_restart = mdesc->restart;
769
770         unflatten_device_tree();
771
772 #ifdef CONFIG_SMP
773         if (is_smp()) {
774                 smp_set_ops(mdesc->smp);
775                 smp_init_cpus();
776         }
777 #endif
778
779         if (!is_smp())
780                 hyp_mode_check();
781
782         reserve_crashkernel();
783
784         tcm_init();
785
786 #ifdef CONFIG_MULTI_IRQ_HANDLER
787         handle_arch_irq = mdesc->handle_irq;
788 #endif
789
790 #ifdef CONFIG_VT
791 #if defined(CONFIG_VGA_CONSOLE)
792         conswitchp = &vga_con;
793 #elif defined(CONFIG_DUMMY_CONSOLE)
794         conswitchp = &dummy_con;
795 #endif
796 #endif
797
798         if (mdesc->init_early)
799                 mdesc->init_early();
800 }
801
802
803 static int __init topology_init(void)
804 {
805         int cpu;
806
807         for_each_possible_cpu(cpu) {
808                 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
809                 cpuinfo->cpu.hotpluggable = 1;
810                 register_cpu(&cpuinfo->cpu, cpu);
811         }
812
813         return 0;
814 }
815 subsys_initcall(topology_init);
816
817 #ifdef CONFIG_HAVE_PROC_CPU
818 static int __init proc_cpu_init(void)
819 {
820         struct proc_dir_entry *res;
821
822         res = proc_mkdir("cpu", NULL);
823         if (!res)
824                 return -ENOMEM;
825         return 0;
826 }
827 fs_initcall(proc_cpu_init);
828 #endif
829
830 static const char *hwcap_str[] = {
831         "swp",
832         "half",
833         "thumb",
834         "26bit",
835         "fastmult",
836         "fpa",
837         "vfp",
838         "edsp",
839         "java",
840         "iwmmxt",
841         "crunch",
842         "thumbee",
843         "neon",
844         "vfpv3",
845         "vfpv3d16",
846         "tls",
847         "vfpv4",
848         "idiva",
849         "idivt",
850         NULL
851 };
852
853 static int c_show(struct seq_file *m, void *v)
854 {
855         int i;
856
857         seq_printf(m, "Processor\t: %s rev %d (%s)\n",
858                    cpu_name, read_cpuid_id() & 15, elf_platform);
859
860 #if defined(CONFIG_SMP)
861         for_each_online_cpu(i) {
862                 /*
863                  * glibc reads /proc/cpuinfo to determine the number of
864                  * online processors, looking for lines beginning with
865                  * "processor".  Give glibc what it expects.
866                  */
867                 seq_printf(m, "processor\t: %d\n", i);
868                 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n",
869                            per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
870                            (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
871         }
872 #else /* CONFIG_SMP */
873         seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
874                    loops_per_jiffy / (500000/HZ),
875                    (loops_per_jiffy / (5000/HZ)) % 100);
876 #endif
877
878         /* dump out the processor features */
879         seq_puts(m, "Features\t: ");
880
881         for (i = 0; hwcap_str[i]; i++)
882                 if (elf_hwcap & (1 << i))
883                         seq_printf(m, "%s ", hwcap_str[i]);
884
885         seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24);
886         seq_printf(m, "CPU architecture: %s\n", proc_arch[cpu_architecture()]);
887
888         if ((read_cpuid_id() & 0x0008f000) == 0x00000000) {
889                 /* pre-ARM7 */
890                 seq_printf(m, "CPU part\t: %07x\n", read_cpuid_id() >> 4);
891         } else {
892                 if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
893                         /* ARM7 */
894                         seq_printf(m, "CPU variant\t: 0x%02x\n",
895                                    (read_cpuid_id() >> 16) & 127);
896                 } else {
897                         /* post-ARM7 */
898                         seq_printf(m, "CPU variant\t: 0x%x\n",
899                                    (read_cpuid_id() >> 20) & 15);
900                 }
901                 seq_printf(m, "CPU part\t: 0x%03x\n",
902                            (read_cpuid_id() >> 4) & 0xfff);
903         }
904         seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15);
905
906         seq_puts(m, "\n");
907
908         seq_printf(m, "Hardware\t: %s\n", machine_name);
909         seq_printf(m, "Revision\t: %04x\n", system_rev);
910         seq_printf(m, "Serial\t\t: %08x%08x\n",
911                    system_serial_high, system_serial_low);
912
913         return 0;
914 }
915
916 static void *c_start(struct seq_file *m, loff_t *pos)
917 {
918         return *pos < 1 ? (void *)1 : NULL;
919 }
920
921 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
922 {
923         ++*pos;
924         return NULL;
925 }
926
927 static void c_stop(struct seq_file *m, void *v)
928 {
929 }
930
931 const struct seq_operations cpuinfo_op = {
932         .start  = c_start,
933         .next   = c_next,
934         .stop   = c_stop,
935         .show   = c_show
936 };