]> git.karo-electronics.de Git - karo-tx-linux.git/blob - arch/arm/kernel/setup.c
Merge branch 'for-next' of git://git.pengutronix.de/git/ukl/linux into devel-stable
[karo-tx-linux.git] / arch / arm / kernel / setup.c
1 /*
2  *  linux/arch/arm/kernel/setup.c
3  *
4  *  Copyright (C) 1995-2001 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/export.h>
11 #include <linux/kernel.h>
12 #include <linux/stddef.h>
13 #include <linux/ioport.h>
14 #include <linux/delay.h>
15 #include <linux/utsname.h>
16 #include <linux/initrd.h>
17 #include <linux/console.h>
18 #include <linux/bootmem.h>
19 #include <linux/seq_file.h>
20 #include <linux/screen_info.h>
21 #include <linux/of_platform.h>
22 #include <linux/init.h>
23 #include <linux/kexec.h>
24 #include <linux/of_fdt.h>
25 #include <linux/cpu.h>
26 #include <linux/interrupt.h>
27 #include <linux/smp.h>
28 #include <linux/proc_fs.h>
29 #include <linux/memblock.h>
30 #include <linux/bug.h>
31 #include <linux/compiler.h>
32 #include <linux/sort.h>
33
34 #include <asm/unified.h>
35 #include <asm/cp15.h>
36 #include <asm/cpu.h>
37 #include <asm/cputype.h>
38 #include <asm/elf.h>
39 #include <asm/procinfo.h>
40 #include <asm/sections.h>
41 #include <asm/setup.h>
42 #include <asm/smp_plat.h>
43 #include <asm/mach-types.h>
44 #include <asm/cacheflush.h>
45 #include <asm/cachetype.h>
46 #include <asm/tlbflush.h>
47
48 #include <asm/prom.h>
49 #include <asm/mach/arch.h>
50 #include <asm/mach/irq.h>
51 #include <asm/mach/time.h>
52 #include <asm/system_info.h>
53 #include <asm/system_misc.h>
54 #include <asm/traps.h>
55 #include <asm/unwind.h>
56 #include <asm/memblock.h>
57 #include <asm/virt.h>
58
59 #include "atags.h"
60
61
62 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
63 char fpe_type[8];
64
65 static int __init fpe_setup(char *line)
66 {
67         memcpy(fpe_type, line, 8);
68         return 1;
69 }
70
71 __setup("fpe=", fpe_setup);
72 #endif
73
74 extern void paging_init(struct machine_desc *desc);
75 extern void sanity_check_meminfo(void);
76 extern void reboot_setup(char *str);
77 extern void setup_dma_zone(struct machine_desc *desc);
78
79 unsigned int processor_id;
80 EXPORT_SYMBOL(processor_id);
81 unsigned int __machine_arch_type __read_mostly;
82 EXPORT_SYMBOL(__machine_arch_type);
83 unsigned int cacheid __read_mostly;
84 EXPORT_SYMBOL(cacheid);
85
86 unsigned int __atags_pointer __initdata;
87
88 unsigned int system_rev;
89 EXPORT_SYMBOL(system_rev);
90
91 unsigned int system_serial_low;
92 EXPORT_SYMBOL(system_serial_low);
93
94 unsigned int system_serial_high;
95 EXPORT_SYMBOL(system_serial_high);
96
97 unsigned int elf_hwcap __read_mostly;
98 EXPORT_SYMBOL(elf_hwcap);
99
100
101 #ifdef MULTI_CPU
102 struct processor processor __read_mostly;
103 #endif
104 #ifdef MULTI_TLB
105 struct cpu_tlb_fns cpu_tlb __read_mostly;
106 #endif
107 #ifdef MULTI_USER
108 struct cpu_user_fns cpu_user __read_mostly;
109 #endif
110 #ifdef MULTI_CACHE
111 struct cpu_cache_fns cpu_cache __read_mostly;
112 #endif
113 #ifdef CONFIG_OUTER_CACHE
114 struct outer_cache_fns outer_cache __read_mostly;
115 EXPORT_SYMBOL(outer_cache);
116 #endif
117
118 /*
119  * Cached cpu_architecture() result for use by assembler code.
120  * C code should use the cpu_architecture() function instead of accessing this
121  * variable directly.
122  */
123 int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
124
125 struct stack {
126         u32 irq[3];
127         u32 abt[3];
128         u32 und[3];
129 } ____cacheline_aligned;
130
131 #ifndef CONFIG_CPU_V7M
132 static struct stack stacks[NR_CPUS];
133 #endif
134
135 char elf_platform[ELF_PLATFORM_SIZE];
136 EXPORT_SYMBOL(elf_platform);
137
138 static const char *cpu_name;
139 static const char *machine_name;
140 static char __initdata cmd_line[COMMAND_LINE_SIZE];
141 struct machine_desc *machine_desc __initdata;
142
143 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
144 #define ENDIANNESS ((char)endian_test.l)
145
146 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
147
148 /*
149  * Standard memory resources
150  */
151 static struct resource mem_res[] = {
152         {
153                 .name = "Video RAM",
154                 .start = 0,
155                 .end = 0,
156                 .flags = IORESOURCE_MEM
157         },
158         {
159                 .name = "Kernel code",
160                 .start = 0,
161                 .end = 0,
162                 .flags = IORESOURCE_MEM
163         },
164         {
165                 .name = "Kernel data",
166                 .start = 0,
167                 .end = 0,
168                 .flags = IORESOURCE_MEM
169         }
170 };
171
172 #define video_ram   mem_res[0]
173 #define kernel_code mem_res[1]
174 #define kernel_data mem_res[2]
175
176 static struct resource io_res[] = {
177         {
178                 .name = "reserved",
179                 .start = 0x3bc,
180                 .end = 0x3be,
181                 .flags = IORESOURCE_IO | IORESOURCE_BUSY
182         },
183         {
184                 .name = "reserved",
185                 .start = 0x378,
186                 .end = 0x37f,
187                 .flags = IORESOURCE_IO | IORESOURCE_BUSY
188         },
189         {
190                 .name = "reserved",
191                 .start = 0x278,
192                 .end = 0x27f,
193                 .flags = IORESOURCE_IO | IORESOURCE_BUSY
194         }
195 };
196
197 #define lp0 io_res[0]
198 #define lp1 io_res[1]
199 #define lp2 io_res[2]
200
201 static const char *proc_arch[] = {
202         "undefined/unknown",
203         "3",
204         "4",
205         "4T",
206         "5",
207         "5T",
208         "5TE",
209         "5TEJ",
210         "6TEJ",
211         "7",
212         "7M",
213         "?(12)",
214         "?(13)",
215         "?(14)",
216         "?(15)",
217         "?(16)",
218         "?(17)",
219 };
220
221 #ifdef CONFIG_CPU_V7M
222 static int __get_cpu_architecture(void)
223 {
224         return CPU_ARCH_ARMv7M;
225 }
226 #else
227 static int __get_cpu_architecture(void)
228 {
229         int cpu_arch;
230
231         if ((read_cpuid_id() & 0x0008f000) == 0) {
232                 cpu_arch = CPU_ARCH_UNKNOWN;
233         } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
234                 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
235         } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
236                 cpu_arch = (read_cpuid_id() >> 16) & 7;
237                 if (cpu_arch)
238                         cpu_arch += CPU_ARCH_ARMv3;
239         } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
240                 unsigned int mmfr0;
241
242                 /* Revised CPUID format. Read the Memory Model Feature
243                  * Register 0 and check for VMSAv7 or PMSAv7 */
244                 asm("mrc        p15, 0, %0, c0, c1, 4"
245                     : "=r" (mmfr0));
246                 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
247                     (mmfr0 & 0x000000f0) >= 0x00000030)
248                         cpu_arch = CPU_ARCH_ARMv7;
249                 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
250                          (mmfr0 & 0x000000f0) == 0x00000020)
251                         cpu_arch = CPU_ARCH_ARMv6;
252                 else
253                         cpu_arch = CPU_ARCH_UNKNOWN;
254         } else
255                 cpu_arch = CPU_ARCH_UNKNOWN;
256
257         return cpu_arch;
258 }
259 #endif
260
261 int __pure cpu_architecture(void)
262 {
263         BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
264
265         return __cpu_architecture;
266 }
267
268 static int cpu_has_aliasing_icache(unsigned int arch)
269 {
270         int aliasing_icache;
271         unsigned int id_reg, num_sets, line_size;
272
273         /* PIPT caches never alias. */
274         if (icache_is_pipt())
275                 return 0;
276
277         /* arch specifies the register format */
278         switch (arch) {
279         case CPU_ARCH_ARMv7:
280                 asm("mcr        p15, 2, %0, c0, c0, 0 @ set CSSELR"
281                     : /* No output operands */
282                     : "r" (1));
283                 isb();
284                 asm("mrc        p15, 1, %0, c0, c0, 0 @ read CCSIDR"
285                     : "=r" (id_reg));
286                 line_size = 4 << ((id_reg & 0x7) + 2);
287                 num_sets = ((id_reg >> 13) & 0x7fff) + 1;
288                 aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
289                 break;
290         case CPU_ARCH_ARMv6:
291                 aliasing_icache = read_cpuid_cachetype() & (1 << 11);
292                 break;
293         default:
294                 /* I-cache aliases will be handled by D-cache aliasing code */
295                 aliasing_icache = 0;
296         }
297
298         return aliasing_icache;
299 }
300
301 static void __init cacheid_init(void)
302 {
303         unsigned int arch = cpu_architecture();
304
305         if (arch == CPU_ARCH_ARMv7M) {
306                 cacheid = 0;
307         } else if (arch >= CPU_ARCH_ARMv6) {
308                 unsigned int cachetype = read_cpuid_cachetype();
309                 if ((cachetype & (7 << 29)) == 4 << 29) {
310                         /* ARMv7 register format */
311                         arch = CPU_ARCH_ARMv7;
312                         cacheid = CACHEID_VIPT_NONALIASING;
313                         switch (cachetype & (3 << 14)) {
314                         case (1 << 14):
315                                 cacheid |= CACHEID_ASID_TAGGED;
316                                 break;
317                         case (3 << 14):
318                                 cacheid |= CACHEID_PIPT;
319                                 break;
320                         }
321                 } else {
322                         arch = CPU_ARCH_ARMv6;
323                         if (cachetype & (1 << 23))
324                                 cacheid = CACHEID_VIPT_ALIASING;
325                         else
326                                 cacheid = CACHEID_VIPT_NONALIASING;
327                 }
328                 if (cpu_has_aliasing_icache(arch))
329                         cacheid |= CACHEID_VIPT_I_ALIASING;
330         } else {
331                 cacheid = CACHEID_VIVT;
332         }
333
334         printk("CPU: %s data cache, %s instruction cache\n",
335                 cache_is_vivt() ? "VIVT" :
336                 cache_is_vipt_aliasing() ? "VIPT aliasing" :
337                 cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
338                 cache_is_vivt() ? "VIVT" :
339                 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
340                 icache_is_vipt_aliasing() ? "VIPT aliasing" :
341                 icache_is_pipt() ? "PIPT" :
342                 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
343 }
344
345 /*
346  * These functions re-use the assembly code in head.S, which
347  * already provide the required functionality.
348  */
349 extern struct proc_info_list *lookup_processor_type(unsigned int);
350
351 void __init early_print(const char *str, ...)
352 {
353         extern void printascii(const char *);
354         char buf[256];
355         va_list ap;
356
357         va_start(ap, str);
358         vsnprintf(buf, sizeof(buf), str, ap);
359         va_end(ap);
360
361 #ifdef CONFIG_DEBUG_LL
362         printascii(buf);
363 #endif
364         printk("%s", buf);
365 }
366
367 static void __init cpuid_init_hwcaps(void)
368 {
369         unsigned int divide_instrs;
370
371         if (cpu_architecture() < CPU_ARCH_ARMv7)
372                 return;
373
374         divide_instrs = (read_cpuid_ext(CPUID_EXT_ISAR0) & 0x0f000000) >> 24;
375
376         switch (divide_instrs) {
377         case 2:
378                 elf_hwcap |= HWCAP_IDIVA;
379         case 1:
380                 elf_hwcap |= HWCAP_IDIVT;
381         }
382 }
383
384 static void __init feat_v6_fixup(void)
385 {
386         int id = read_cpuid_id();
387
388         if ((id & 0xff0f0000) != 0x41070000)
389                 return;
390
391         /*
392          * HWCAP_TLS is available only on 1136 r1p0 and later,
393          * see also kuser_get_tls_init.
394          */
395         if ((((id >> 4) & 0xfff) == 0xb36) && (((id >> 20) & 3) == 0))
396                 elf_hwcap &= ~HWCAP_TLS;
397 }
398
399 /*
400  * cpu_init - initialise one CPU.
401  *
402  * cpu_init sets up the per-CPU stacks.
403  */
404 void notrace cpu_init(void)
405 {
406 #ifndef CONFIG_CPU_V7M
407         unsigned int cpu = smp_processor_id();
408         struct stack *stk = &stacks[cpu];
409
410         if (cpu >= NR_CPUS) {
411                 printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
412                 BUG();
413         }
414
415         /*
416          * This only works on resume and secondary cores. For booting on the
417          * boot cpu, smp_prepare_boot_cpu is called after percpu area setup.
418          */
419         set_my_cpu_offset(per_cpu_offset(cpu));
420
421         cpu_proc_init();
422
423         /*
424          * Define the placement constraint for the inline asm directive below.
425          * In Thumb-2, msr with an immediate value is not allowed.
426          */
427 #ifdef CONFIG_THUMB2_KERNEL
428 #define PLC     "r"
429 #else
430 #define PLC     "I"
431 #endif
432
433         /*
434          * setup stacks for re-entrant exception handlers
435          */
436         __asm__ (
437         "msr    cpsr_c, %1\n\t"
438         "add    r14, %0, %2\n\t"
439         "mov    sp, r14\n\t"
440         "msr    cpsr_c, %3\n\t"
441         "add    r14, %0, %4\n\t"
442         "mov    sp, r14\n\t"
443         "msr    cpsr_c, %5\n\t"
444         "add    r14, %0, %6\n\t"
445         "mov    sp, r14\n\t"
446         "msr    cpsr_c, %7"
447             :
448             : "r" (stk),
449               PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
450               "I" (offsetof(struct stack, irq[0])),
451               PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
452               "I" (offsetof(struct stack, abt[0])),
453               PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
454               "I" (offsetof(struct stack, und[0])),
455               PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
456             : "r14");
457 #endif
458 }
459
460 int __cpu_logical_map[NR_CPUS];
461
462 void __init smp_setup_processor_id(void)
463 {
464         int i;
465         u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
466         u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
467
468         cpu_logical_map(0) = cpu;
469         for (i = 1; i < nr_cpu_ids; ++i)
470                 cpu_logical_map(i) = i == cpu ? 0 : i;
471
472         printk(KERN_INFO "Booting Linux on physical CPU 0x%x\n", mpidr);
473 }
474
475 static void __init setup_processor(void)
476 {
477         struct proc_info_list *list;
478
479         /*
480          * locate processor in the list of supported processor
481          * types.  The linker builds this table for us from the
482          * entries in arch/arm/mm/proc-*.S
483          */
484         list = lookup_processor_type(read_cpuid_id());
485         if (!list) {
486                 printk("CPU configuration botched (ID %08x), unable "
487                        "to continue.\n", read_cpuid_id());
488                 while (1);
489         }
490
491         cpu_name = list->cpu_name;
492         __cpu_architecture = __get_cpu_architecture();
493
494 #ifdef MULTI_CPU
495         processor = *list->proc;
496 #endif
497 #ifdef MULTI_TLB
498         cpu_tlb = *list->tlb;
499 #endif
500 #ifdef MULTI_USER
501         cpu_user = *list->user;
502 #endif
503 #ifdef MULTI_CACHE
504         cpu_cache = *list->cache;
505 #endif
506
507         printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
508                cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
509                proc_arch[cpu_architecture()], cr_alignment);
510
511         snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
512                  list->arch_name, ENDIANNESS);
513         snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
514                  list->elf_name, ENDIANNESS);
515         elf_hwcap = list->elf_hwcap;
516
517         cpuid_init_hwcaps();
518
519 #ifndef CONFIG_ARM_THUMB
520         elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
521 #endif
522
523         feat_v6_fixup();
524
525         cacheid_init();
526         cpu_init();
527 }
528
529 void __init dump_machine_table(void)
530 {
531         struct machine_desc *p;
532
533         early_print("Available machine support:\n\nID (hex)\tNAME\n");
534         for_each_machine_desc(p)
535                 early_print("%08x\t%s\n", p->nr, p->name);
536
537         early_print("\nPlease check your kernel config and/or bootloader.\n");
538
539         while (true)
540                 /* can't use cpu_relax() here as it may require MMU setup */;
541 }
542
543 int __init arm_add_memory(phys_addr_t start, phys_addr_t size)
544 {
545         struct membank *bank = &meminfo.bank[meminfo.nr_banks];
546
547         if (meminfo.nr_banks >= NR_BANKS) {
548                 printk(KERN_CRIT "NR_BANKS too low, "
549                         "ignoring memory at 0x%08llx\n", (long long)start);
550                 return -EINVAL;
551         }
552
553         /*
554          * Ensure that start/size are aligned to a page boundary.
555          * Size is appropriately rounded down, start is rounded up.
556          */
557         size -= start & ~PAGE_MASK;
558         bank->start = PAGE_ALIGN(start);
559
560 #ifndef CONFIG_ARM_LPAE
561         if (bank->start + size < bank->start) {
562                 printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in "
563                         "32-bit physical address space\n", (long long)start);
564                 /*
565                  * To ensure bank->start + bank->size is representable in
566                  * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
567                  * This means we lose a page after masking.
568                  */
569                 size = ULONG_MAX - bank->start;
570         }
571 #endif
572
573         bank->size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
574
575         /*
576          * Check whether this memory region has non-zero size or
577          * invalid node number.
578          */
579         if (bank->size == 0)
580                 return -EINVAL;
581
582         meminfo.nr_banks++;
583         return 0;
584 }
585
586 /*
587  * Pick out the memory size.  We look for mem=size@start,
588  * where start and size are "size[KkMm]"
589  */
590 static int __init early_mem(char *p)
591 {
592         static int usermem __initdata = 0;
593         phys_addr_t size;
594         phys_addr_t start;
595         char *endp;
596
597         /*
598          * If the user specifies memory size, we
599          * blow away any automatically generated
600          * size.
601          */
602         if (usermem == 0) {
603                 usermem = 1;
604                 meminfo.nr_banks = 0;
605         }
606
607         start = PHYS_OFFSET;
608         size  = memparse(p, &endp);
609         if (*endp == '@')
610                 start = memparse(endp + 1, NULL);
611
612         arm_add_memory(start, size);
613
614         return 0;
615 }
616 early_param("mem", early_mem);
617
618 static void __init request_standard_resources(struct machine_desc *mdesc)
619 {
620         struct memblock_region *region;
621         struct resource *res;
622
623         kernel_code.start   = virt_to_phys(_text);
624         kernel_code.end     = virt_to_phys(_etext - 1);
625         kernel_data.start   = virt_to_phys(_sdata);
626         kernel_data.end     = virt_to_phys(_end - 1);
627
628         for_each_memblock(memory, region) {
629                 res = alloc_bootmem_low(sizeof(*res));
630                 res->name  = "System RAM";
631                 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
632                 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
633                 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
634
635                 request_resource(&iomem_resource, res);
636
637                 if (kernel_code.start >= res->start &&
638                     kernel_code.end <= res->end)
639                         request_resource(res, &kernel_code);
640                 if (kernel_data.start >= res->start &&
641                     kernel_data.end <= res->end)
642                         request_resource(res, &kernel_data);
643         }
644
645         if (mdesc->video_start) {
646                 video_ram.start = mdesc->video_start;
647                 video_ram.end   = mdesc->video_end;
648                 request_resource(&iomem_resource, &video_ram);
649         }
650
651         /*
652          * Some machines don't have the possibility of ever
653          * possessing lp0, lp1 or lp2
654          */
655         if (mdesc->reserve_lp0)
656                 request_resource(&ioport_resource, &lp0);
657         if (mdesc->reserve_lp1)
658                 request_resource(&ioport_resource, &lp1);
659         if (mdesc->reserve_lp2)
660                 request_resource(&ioport_resource, &lp2);
661 }
662
663 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
664 struct screen_info screen_info = {
665  .orig_video_lines      = 30,
666  .orig_video_cols       = 80,
667  .orig_video_mode       = 0,
668  .orig_video_ega_bx     = 0,
669  .orig_video_isVGA      = 1,
670  .orig_video_points     = 8
671 };
672 #endif
673
674 static int __init customize_machine(void)
675 {
676         /*
677          * customizes platform devices, or adds new ones
678          * On DT based machines, we fall back to populating the
679          * machine from the device tree, if no callback is provided,
680          * otherwise we would always need an init_machine callback.
681          */
682         if (machine_desc->init_machine)
683                 machine_desc->init_machine();
684 #ifdef CONFIG_OF
685         else
686                 of_platform_populate(NULL, of_default_bus_match_table,
687                                         NULL, NULL);
688 #endif
689         return 0;
690 }
691 arch_initcall(customize_machine);
692
693 static int __init init_machine_late(void)
694 {
695         if (machine_desc->init_late)
696                 machine_desc->init_late();
697         return 0;
698 }
699 late_initcall(init_machine_late);
700
701 #ifdef CONFIG_KEXEC
702 static inline unsigned long long get_total_mem(void)
703 {
704         unsigned long total;
705
706         total = max_low_pfn - min_low_pfn;
707         return total << PAGE_SHIFT;
708 }
709
710 /**
711  * reserve_crashkernel() - reserves memory are for crash kernel
712  *
713  * This function reserves memory area given in "crashkernel=" kernel command
714  * line parameter. The memory reserved is used by a dump capture kernel when
715  * primary kernel is crashing.
716  */
717 static void __init reserve_crashkernel(void)
718 {
719         unsigned long long crash_size, crash_base;
720         unsigned long long total_mem;
721         int ret;
722
723         total_mem = get_total_mem();
724         ret = parse_crashkernel(boot_command_line, total_mem,
725                                 &crash_size, &crash_base);
726         if (ret)
727                 return;
728
729         ret = reserve_bootmem(crash_base, crash_size, BOOTMEM_EXCLUSIVE);
730         if (ret < 0) {
731                 printk(KERN_WARNING "crashkernel reservation failed - "
732                        "memory is in use (0x%lx)\n", (unsigned long)crash_base);
733                 return;
734         }
735
736         printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
737                "for crashkernel (System RAM: %ldMB)\n",
738                (unsigned long)(crash_size >> 20),
739                (unsigned long)(crash_base >> 20),
740                (unsigned long)(total_mem >> 20));
741
742         crashk_res.start = crash_base;
743         crashk_res.end = crash_base + crash_size - 1;
744         insert_resource(&iomem_resource, &crashk_res);
745 }
746 #else
747 static inline void reserve_crashkernel(void) {}
748 #endif /* CONFIG_KEXEC */
749
750 static int __init meminfo_cmp(const void *_a, const void *_b)
751 {
752         const struct membank *a = _a, *b = _b;
753         long cmp = bank_pfn_start(a) - bank_pfn_start(b);
754         return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
755 }
756
757 void __init hyp_mode_check(void)
758 {
759 #ifdef CONFIG_ARM_VIRT_EXT
760         if (is_hyp_mode_available()) {
761                 pr_info("CPU: All CPU(s) started in HYP mode.\n");
762                 pr_info("CPU: Virtualization extensions available.\n");
763         } else if (is_hyp_mode_mismatched()) {
764                 pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
765                         __boot_cpu_mode & MODE_MASK);
766                 pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
767         } else
768                 pr_info("CPU: All CPU(s) started in SVC mode.\n");
769 #endif
770 }
771
772 void __init setup_arch(char **cmdline_p)
773 {
774         struct machine_desc *mdesc;
775
776         setup_processor();
777         mdesc = setup_machine_fdt(__atags_pointer);
778         if (!mdesc)
779                 mdesc = setup_machine_tags(__atags_pointer, __machine_arch_type);
780         machine_desc = mdesc;
781         machine_name = mdesc->name;
782
783         setup_dma_zone(mdesc);
784
785         if (mdesc->restart_mode)
786                 reboot_setup(&mdesc->restart_mode);
787
788         init_mm.start_code = (unsigned long) _text;
789         init_mm.end_code   = (unsigned long) _etext;
790         init_mm.end_data   = (unsigned long) _edata;
791         init_mm.brk        = (unsigned long) _end;
792
793         /* populate cmd_line too for later use, preserving boot_command_line */
794         strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
795         *cmdline_p = cmd_line;
796
797         parse_early_param();
798
799         sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
800         sanity_check_meminfo();
801         arm_memblock_init(&meminfo, mdesc);
802
803         paging_init(mdesc);
804         request_standard_resources(mdesc);
805
806         if (mdesc->restart)
807                 arm_pm_restart = mdesc->restart;
808
809         unflatten_device_tree();
810
811         arm_dt_init_cpu_maps();
812 #ifdef CONFIG_SMP
813         if (is_smp()) {
814                 smp_set_ops(mdesc->smp);
815                 smp_init_cpus();
816         }
817 #endif
818
819         if (!is_smp())
820                 hyp_mode_check();
821
822         reserve_crashkernel();
823
824 #ifdef CONFIG_MULTI_IRQ_HANDLER
825         handle_arch_irq = mdesc->handle_irq;
826 #endif
827
828 #ifdef CONFIG_VT
829 #if defined(CONFIG_VGA_CONSOLE)
830         conswitchp = &vga_con;
831 #elif defined(CONFIG_DUMMY_CONSOLE)
832         conswitchp = &dummy_con;
833 #endif
834 #endif
835
836         if (mdesc->init_early)
837                 mdesc->init_early();
838 }
839
840
841 static int __init topology_init(void)
842 {
843         int cpu;
844
845         for_each_possible_cpu(cpu) {
846                 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
847                 cpuinfo->cpu.hotpluggable = 1;
848                 register_cpu(&cpuinfo->cpu, cpu);
849         }
850
851         return 0;
852 }
853 subsys_initcall(topology_init);
854
855 #ifdef CONFIG_HAVE_PROC_CPU
856 static int __init proc_cpu_init(void)
857 {
858         struct proc_dir_entry *res;
859
860         res = proc_mkdir("cpu", NULL);
861         if (!res)
862                 return -ENOMEM;
863         return 0;
864 }
865 fs_initcall(proc_cpu_init);
866 #endif
867
868 static const char *hwcap_str[] = {
869         "swp",
870         "half",
871         "thumb",
872         "26bit",
873         "fastmult",
874         "fpa",
875         "vfp",
876         "edsp",
877         "java",
878         "iwmmxt",
879         "crunch",
880         "thumbee",
881         "neon",
882         "vfpv3",
883         "vfpv3d16",
884         "tls",
885         "vfpv4",
886         "idiva",
887         "idivt",
888         NULL
889 };
890
891 static int c_show(struct seq_file *m, void *v)
892 {
893         int i, j;
894         u32 cpuid;
895
896         for_each_online_cpu(i) {
897                 /*
898                  * glibc reads /proc/cpuinfo to determine the number of
899                  * online processors, looking for lines beginning with
900                  * "processor".  Give glibc what it expects.
901                  */
902                 seq_printf(m, "processor\t: %d\n", i);
903                 cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id();
904                 seq_printf(m, "model name\t: %s rev %d (%s)\n",
905                            cpu_name, cpuid & 15, elf_platform);
906
907 #if defined(CONFIG_SMP)
908                 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
909                            per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
910                            (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
911 #else
912                 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
913                            loops_per_jiffy / (500000/HZ),
914                            (loops_per_jiffy / (5000/HZ)) % 100);
915 #endif
916                 /* dump out the processor features */
917                 seq_puts(m, "Features\t: ");
918
919                 for (j = 0; hwcap_str[j]; j++)
920                         if (elf_hwcap & (1 << j))
921                                 seq_printf(m, "%s ", hwcap_str[j]);
922
923                 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24);
924                 seq_printf(m, "CPU architecture: %s\n",
925                            proc_arch[cpu_architecture()]);
926
927                 if ((cpuid & 0x0008f000) == 0x00000000) {
928                         /* pre-ARM7 */
929                         seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4);
930                 } else {
931                         if ((cpuid & 0x0008f000) == 0x00007000) {
932                                 /* ARM7 */
933                                 seq_printf(m, "CPU variant\t: 0x%02x\n",
934                                            (cpuid >> 16) & 127);
935                         } else {
936                                 /* post-ARM7 */
937                                 seq_printf(m, "CPU variant\t: 0x%x\n",
938                                            (cpuid >> 20) & 15);
939                         }
940                         seq_printf(m, "CPU part\t: 0x%03x\n",
941                                    (cpuid >> 4) & 0xfff);
942                 }
943                 seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15);
944         }
945
946         seq_printf(m, "Hardware\t: %s\n", machine_name);
947         seq_printf(m, "Revision\t: %04x\n", system_rev);
948         seq_printf(m, "Serial\t\t: %08x%08x\n",
949                    system_serial_high, system_serial_low);
950
951         return 0;
952 }
953
954 static void *c_start(struct seq_file *m, loff_t *pos)
955 {
956         return *pos < 1 ? (void *)1 : NULL;
957 }
958
959 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
960 {
961         ++*pos;
962         return NULL;
963 }
964
965 static void c_stop(struct seq_file *m, void *v)
966 {
967 }
968
969 const struct seq_operations cpuinfo_op = {
970         .start  = c_start,
971         .next   = c_next,
972         .stop   = c_stop,
973         .show   = c_show
974 };