]> git.karo-electronics.de Git - karo-tx-linux.git/blob - arch/arm/kernel/setup.c
ARM: ARMv7-M: implement read_cpuid_ext
[karo-tx-linux.git] / arch / arm / kernel / setup.c
1 /*
2  *  linux/arch/arm/kernel/setup.c
3  *
4  *  Copyright (C) 1995-2001 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/export.h>
11 #include <linux/kernel.h>
12 #include <linux/stddef.h>
13 #include <linux/ioport.h>
14 #include <linux/delay.h>
15 #include <linux/utsname.h>
16 #include <linux/initrd.h>
17 #include <linux/console.h>
18 #include <linux/bootmem.h>
19 #include <linux/seq_file.h>
20 #include <linux/screen_info.h>
21 #include <linux/init.h>
22 #include <linux/kexec.h>
23 #include <linux/of_fdt.h>
24 #include <linux/cpu.h>
25 #include <linux/interrupt.h>
26 #include <linux/smp.h>
27 #include <linux/proc_fs.h>
28 #include <linux/memblock.h>
29 #include <linux/bug.h>
30 #include <linux/compiler.h>
31 #include <linux/sort.h>
32
33 #include <asm/unified.h>
34 #include <asm/cp15.h>
35 #include <asm/cpu.h>
36 #include <asm/cputype.h>
37 #include <asm/elf.h>
38 #include <asm/procinfo.h>
39 #include <asm/sections.h>
40 #include <asm/setup.h>
41 #include <asm/smp_plat.h>
42 #include <asm/mach-types.h>
43 #include <asm/cacheflush.h>
44 #include <asm/cachetype.h>
45 #include <asm/tlbflush.h>
46
47 #include <asm/prom.h>
48 #include <asm/mach/arch.h>
49 #include <asm/mach/irq.h>
50 #include <asm/mach/time.h>
51 #include <asm/system_info.h>
52 #include <asm/system_misc.h>
53 #include <asm/traps.h>
54 #include <asm/unwind.h>
55 #include <asm/memblock.h>
56 #include <asm/virt.h>
57
58 #include "atags.h"
59 #include "tcm.h"
60
61
62 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
63 char fpe_type[8];
64
65 static int __init fpe_setup(char *line)
66 {
67         memcpy(fpe_type, line, 8);
68         return 1;
69 }
70
71 __setup("fpe=", fpe_setup);
72 #endif
73
74 extern void paging_init(struct machine_desc *desc);
75 extern void sanity_check_meminfo(void);
76 extern void reboot_setup(char *str);
77 extern void setup_dma_zone(struct machine_desc *desc);
78
79 unsigned int processor_id;
80 EXPORT_SYMBOL(processor_id);
81 unsigned int __machine_arch_type __read_mostly;
82 EXPORT_SYMBOL(__machine_arch_type);
83 unsigned int cacheid __read_mostly;
84 EXPORT_SYMBOL(cacheid);
85
86 unsigned int __atags_pointer __initdata;
87
88 unsigned int system_rev;
89 EXPORT_SYMBOL(system_rev);
90
91 unsigned int system_serial_low;
92 EXPORT_SYMBOL(system_serial_low);
93
94 unsigned int system_serial_high;
95 EXPORT_SYMBOL(system_serial_high);
96
97 unsigned int elf_hwcap __read_mostly;
98 EXPORT_SYMBOL(elf_hwcap);
99
100
101 #ifdef MULTI_CPU
102 struct processor processor __read_mostly;
103 #endif
104 #ifdef MULTI_TLB
105 struct cpu_tlb_fns cpu_tlb __read_mostly;
106 #endif
107 #ifdef MULTI_USER
108 struct cpu_user_fns cpu_user __read_mostly;
109 #endif
110 #ifdef MULTI_CACHE
111 struct cpu_cache_fns cpu_cache __read_mostly;
112 #endif
113 #ifdef CONFIG_OUTER_CACHE
114 struct outer_cache_fns outer_cache __read_mostly;
115 EXPORT_SYMBOL(outer_cache);
116 #endif
117
118 /*
119  * Cached cpu_architecture() result for use by assembler code.
120  * C code should use the cpu_architecture() function instead of accessing this
121  * variable directly.
122  */
123 int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
124
125 struct stack {
126         u32 irq[3];
127         u32 abt[3];
128         u32 und[3];
129 } ____cacheline_aligned;
130
131 #ifndef CONFIG_CPU_V7M
132 static struct stack stacks[NR_CPUS];
133 #endif
134
135 char elf_platform[ELF_PLATFORM_SIZE];
136 EXPORT_SYMBOL(elf_platform);
137
138 static const char *cpu_name;
139 static const char *machine_name;
140 static char __initdata cmd_line[COMMAND_LINE_SIZE];
141 struct machine_desc *machine_desc __initdata;
142
143 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
144 #define ENDIANNESS ((char)endian_test.l)
145
146 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
147
148 /*
149  * Standard memory resources
150  */
151 static struct resource mem_res[] = {
152         {
153                 .name = "Video RAM",
154                 .start = 0,
155                 .end = 0,
156                 .flags = IORESOURCE_MEM
157         },
158         {
159                 .name = "Kernel code",
160                 .start = 0,
161                 .end = 0,
162                 .flags = IORESOURCE_MEM
163         },
164         {
165                 .name = "Kernel data",
166                 .start = 0,
167                 .end = 0,
168                 .flags = IORESOURCE_MEM
169         }
170 };
171
172 #define video_ram   mem_res[0]
173 #define kernel_code mem_res[1]
174 #define kernel_data mem_res[2]
175
176 static struct resource io_res[] = {
177         {
178                 .name = "reserved",
179                 .start = 0x3bc,
180                 .end = 0x3be,
181                 .flags = IORESOURCE_IO | IORESOURCE_BUSY
182         },
183         {
184                 .name = "reserved",
185                 .start = 0x378,
186                 .end = 0x37f,
187                 .flags = IORESOURCE_IO | IORESOURCE_BUSY
188         },
189         {
190                 .name = "reserved",
191                 .start = 0x278,
192                 .end = 0x27f,
193                 .flags = IORESOURCE_IO | IORESOURCE_BUSY
194         }
195 };
196
197 #define lp0 io_res[0]
198 #define lp1 io_res[1]
199 #define lp2 io_res[2]
200
201 static const char *proc_arch[] = {
202         "undefined/unknown",
203         "3",
204         "4",
205         "4T",
206         "5",
207         "5T",
208         "5TE",
209         "5TEJ",
210         "6TEJ",
211         "7",
212         "7M",
213         "?(12)",
214         "?(13)",
215         "?(14)",
216         "?(15)",
217         "?(16)",
218         "?(17)",
219 };
220
221 #ifdef CONFIG_CPU_V7M
222 static int __get_cpu_architecture(void)
223 {
224         return CPU_ARCH_ARMv7M;
225 }
226 #else
227 static int __get_cpu_architecture(void)
228 {
229         int cpu_arch;
230
231         if ((read_cpuid_id() & 0x0008f000) == 0) {
232                 cpu_arch = CPU_ARCH_UNKNOWN;
233         } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
234                 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
235         } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
236                 cpu_arch = (read_cpuid_id() >> 16) & 7;
237                 if (cpu_arch)
238                         cpu_arch += CPU_ARCH_ARMv3;
239         } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
240                 unsigned int mmfr0;
241
242                 /* Revised CPUID format. Read the Memory Model Feature
243                  * Register 0 and check for VMSAv7 or PMSAv7 */
244                 asm("mrc        p15, 0, %0, c0, c1, 4"
245                     : "=r" (mmfr0));
246                 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
247                     (mmfr0 & 0x000000f0) >= 0x00000030)
248                         cpu_arch = CPU_ARCH_ARMv7;
249                 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
250                          (mmfr0 & 0x000000f0) == 0x00000020)
251                         cpu_arch = CPU_ARCH_ARMv6;
252                 else
253                         cpu_arch = CPU_ARCH_UNKNOWN;
254         } else
255                 cpu_arch = CPU_ARCH_UNKNOWN;
256
257         return cpu_arch;
258 }
259 #endif
260
261 int __pure cpu_architecture(void)
262 {
263         BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
264
265         return __cpu_architecture;
266 }
267
268 static int cpu_has_aliasing_icache(unsigned int arch)
269 {
270         int aliasing_icache;
271         unsigned int id_reg, num_sets, line_size;
272
273         /* PIPT caches never alias. */
274         if (icache_is_pipt())
275                 return 0;
276
277         /* arch specifies the register format */
278         switch (arch) {
279         case CPU_ARCH_ARMv7:
280                 asm("mcr        p15, 2, %0, c0, c0, 0 @ set CSSELR"
281                     : /* No output operands */
282                     : "r" (1));
283                 isb();
284                 asm("mrc        p15, 1, %0, c0, c0, 0 @ read CCSIDR"
285                     : "=r" (id_reg));
286                 line_size = 4 << ((id_reg & 0x7) + 2);
287                 num_sets = ((id_reg >> 13) & 0x7fff) + 1;
288                 aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
289                 break;
290         case CPU_ARCH_ARMv6:
291                 aliasing_icache = read_cpuid_cachetype() & (1 << 11);
292                 break;
293         default:
294                 /* I-cache aliases will be handled by D-cache aliasing code */
295                 aliasing_icache = 0;
296         }
297
298         return aliasing_icache;
299 }
300
301 static void __init cacheid_init(void)
302 {
303         unsigned int arch = cpu_architecture();
304
305         if (arch == CPU_ARCH_ARMv7M) {
306                 cacheid = 0;
307         } else if (arch >= CPU_ARCH_ARMv6) {
308                 unsigned int cachetype = read_cpuid_cachetype();
309                 if ((cachetype & (7 << 29)) == 4 << 29) {
310                         /* ARMv7 register format */
311                         arch = CPU_ARCH_ARMv7;
312                         cacheid = CACHEID_VIPT_NONALIASING;
313                         switch (cachetype & (3 << 14)) {
314                         case (1 << 14):
315                                 cacheid |= CACHEID_ASID_TAGGED;
316                                 break;
317                         case (3 << 14):
318                                 cacheid |= CACHEID_PIPT;
319                                 break;
320                         }
321                 } else {
322                         arch = CPU_ARCH_ARMv6;
323                         if (cachetype & (1 << 23))
324                                 cacheid = CACHEID_VIPT_ALIASING;
325                         else
326                                 cacheid = CACHEID_VIPT_NONALIASING;
327                 }
328                 if (cpu_has_aliasing_icache(arch))
329                         cacheid |= CACHEID_VIPT_I_ALIASING;
330         } else {
331                 cacheid = CACHEID_VIVT;
332         }
333
334         printk("CPU: %s data cache, %s instruction cache\n",
335                 cache_is_vivt() ? "VIVT" :
336                 cache_is_vipt_aliasing() ? "VIPT aliasing" :
337                 cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
338                 cache_is_vivt() ? "VIVT" :
339                 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
340                 icache_is_vipt_aliasing() ? "VIPT aliasing" :
341                 icache_is_pipt() ? "PIPT" :
342                 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
343 }
344
345 /*
346  * These functions re-use the assembly code in head.S, which
347  * already provide the required functionality.
348  */
349 extern struct proc_info_list *lookup_processor_type(unsigned int);
350
351 void __init early_print(const char *str, ...)
352 {
353         extern void printascii(const char *);
354         char buf[256];
355         va_list ap;
356
357         va_start(ap, str);
358         vsnprintf(buf, sizeof(buf), str, ap);
359         va_end(ap);
360
361 #ifdef CONFIG_DEBUG_LL
362         printascii(buf);
363 #endif
364         printk("%s", buf);
365 }
366
367 static void __init feat_v6_fixup(void)
368 {
369         int id = read_cpuid_id();
370
371         if ((id & 0xff0f0000) != 0x41070000)
372                 return;
373
374         /*
375          * HWCAP_TLS is available only on 1136 r1p0 and later,
376          * see also kuser_get_tls_init.
377          */
378         if ((((id >> 4) & 0xfff) == 0xb36) && (((id >> 20) & 3) == 0))
379                 elf_hwcap &= ~HWCAP_TLS;
380 }
381
382 /*
383  * cpu_init - initialise one CPU.
384  *
385  * cpu_init sets up the per-CPU stacks.
386  */
387 void cpu_init(void)
388 {
389 #ifndef CONFIG_CPU_V7M
390         unsigned int cpu = smp_processor_id();
391         struct stack *stk = &stacks[cpu];
392
393         if (cpu >= NR_CPUS) {
394                 printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
395                 BUG();
396         }
397
398         /*
399          * This only works on resume and secondary cores. For booting on the
400          * boot cpu, smp_prepare_boot_cpu is called after percpu area setup.
401          */
402         set_my_cpu_offset(per_cpu_offset(cpu));
403
404         cpu_proc_init();
405
406         /*
407          * Define the placement constraint for the inline asm directive below.
408          * In Thumb-2, msr with an immediate value is not allowed.
409          */
410 #ifdef CONFIG_THUMB2_KERNEL
411 #define PLC     "r"
412 #else
413 #define PLC     "I"
414 #endif
415
416         /*
417          * setup stacks for re-entrant exception handlers
418          */
419         __asm__ (
420         "msr    cpsr_c, %1\n\t"
421         "add    r14, %0, %2\n\t"
422         "mov    sp, r14\n\t"
423         "msr    cpsr_c, %3\n\t"
424         "add    r14, %0, %4\n\t"
425         "mov    sp, r14\n\t"
426         "msr    cpsr_c, %5\n\t"
427         "add    r14, %0, %6\n\t"
428         "mov    sp, r14\n\t"
429         "msr    cpsr_c, %7"
430             :
431             : "r" (stk),
432               PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
433               "I" (offsetof(struct stack, irq[0])),
434               PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
435               "I" (offsetof(struct stack, abt[0])),
436               PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
437               "I" (offsetof(struct stack, und[0])),
438               PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
439             : "r14");
440 #endif
441 }
442
443 int __cpu_logical_map[NR_CPUS];
444
445 void __init smp_setup_processor_id(void)
446 {
447         int i;
448         u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
449         u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
450
451         cpu_logical_map(0) = cpu;
452         for (i = 1; i < nr_cpu_ids; ++i)
453                 cpu_logical_map(i) = i == cpu ? 0 : i;
454
455         printk(KERN_INFO "Booting Linux on physical CPU 0x%x\n", mpidr);
456 }
457
458 static void __init setup_processor(void)
459 {
460         struct proc_info_list *list;
461
462         /*
463          * locate processor in the list of supported processor
464          * types.  The linker builds this table for us from the
465          * entries in arch/arm/mm/proc-*.S
466          */
467         list = lookup_processor_type(read_cpuid_id());
468         if (!list) {
469                 printk("CPU configuration botched (ID %08x), unable "
470                        "to continue.\n", read_cpuid_id());
471                 while (1);
472         }
473
474         cpu_name = list->cpu_name;
475         __cpu_architecture = __get_cpu_architecture();
476
477 #ifdef MULTI_CPU
478         processor = *list->proc;
479 #endif
480 #ifdef MULTI_TLB
481         cpu_tlb = *list->tlb;
482 #endif
483 #ifdef MULTI_USER
484         cpu_user = *list->user;
485 #endif
486 #ifdef MULTI_CACHE
487         cpu_cache = *list->cache;
488 #endif
489
490         printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
491                cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
492                proc_arch[cpu_architecture()], cr_alignment);
493
494         snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
495                  list->arch_name, ENDIANNESS);
496         snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
497                  list->elf_name, ENDIANNESS);
498         elf_hwcap = list->elf_hwcap;
499 #ifndef CONFIG_ARM_THUMB
500         elf_hwcap &= ~HWCAP_THUMB;
501 #endif
502
503         feat_v6_fixup();
504
505         cacheid_init();
506         cpu_init();
507 }
508
509 void __init dump_machine_table(void)
510 {
511         struct machine_desc *p;
512
513         early_print("Available machine support:\n\nID (hex)\tNAME\n");
514         for_each_machine_desc(p)
515                 early_print("%08x\t%s\n", p->nr, p->name);
516
517         early_print("\nPlease check your kernel config and/or bootloader.\n");
518
519         while (true)
520                 /* can't use cpu_relax() here as it may require MMU setup */;
521 }
522
523 int __init arm_add_memory(phys_addr_t start, phys_addr_t size)
524 {
525         struct membank *bank = &meminfo.bank[meminfo.nr_banks];
526
527         if (meminfo.nr_banks >= NR_BANKS) {
528                 printk(KERN_CRIT "NR_BANKS too low, "
529                         "ignoring memory at 0x%08llx\n", (long long)start);
530                 return -EINVAL;
531         }
532
533         /*
534          * Ensure that start/size are aligned to a page boundary.
535          * Size is appropriately rounded down, start is rounded up.
536          */
537         size -= start & ~PAGE_MASK;
538         bank->start = PAGE_ALIGN(start);
539
540 #ifndef CONFIG_LPAE
541         if (bank->start + size < bank->start) {
542                 printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in "
543                         "32-bit physical address space\n", (long long)start);
544                 /*
545                  * To ensure bank->start + bank->size is representable in
546                  * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
547                  * This means we lose a page after masking.
548                  */
549                 size = ULONG_MAX - bank->start;
550         }
551 #endif
552
553         bank->size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
554
555         /*
556          * Check whether this memory region has non-zero size or
557          * invalid node number.
558          */
559         if (bank->size == 0)
560                 return -EINVAL;
561
562         meminfo.nr_banks++;
563         return 0;
564 }
565
566 /*
567  * Pick out the memory size.  We look for mem=size@start,
568  * where start and size are "size[KkMm]"
569  */
570 static int __init early_mem(char *p)
571 {
572         static int usermem __initdata = 0;
573         phys_addr_t size;
574         phys_addr_t start;
575         char *endp;
576
577         /*
578          * If the user specifies memory size, we
579          * blow away any automatically generated
580          * size.
581          */
582         if (usermem == 0) {
583                 usermem = 1;
584                 meminfo.nr_banks = 0;
585         }
586
587         start = PHYS_OFFSET;
588         size  = memparse(p, &endp);
589         if (*endp == '@')
590                 start = memparse(endp + 1, NULL);
591
592         arm_add_memory(start, size);
593
594         return 0;
595 }
596 early_param("mem", early_mem);
597
598 static void __init request_standard_resources(struct machine_desc *mdesc)
599 {
600         struct memblock_region *region;
601         struct resource *res;
602
603         kernel_code.start   = virt_to_phys(_text);
604         kernel_code.end     = virt_to_phys(_etext - 1);
605         kernel_data.start   = virt_to_phys(_sdata);
606         kernel_data.end     = virt_to_phys(_end - 1);
607
608         for_each_memblock(memory, region) {
609                 res = alloc_bootmem_low(sizeof(*res));
610                 res->name  = "System RAM";
611                 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
612                 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
613                 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
614
615                 request_resource(&iomem_resource, res);
616
617                 if (kernel_code.start >= res->start &&
618                     kernel_code.end <= res->end)
619                         request_resource(res, &kernel_code);
620                 if (kernel_data.start >= res->start &&
621                     kernel_data.end <= res->end)
622                         request_resource(res, &kernel_data);
623         }
624
625         if (mdesc->video_start) {
626                 video_ram.start = mdesc->video_start;
627                 video_ram.end   = mdesc->video_end;
628                 request_resource(&iomem_resource, &video_ram);
629         }
630
631         /*
632          * Some machines don't have the possibility of ever
633          * possessing lp0, lp1 or lp2
634          */
635         if (mdesc->reserve_lp0)
636                 request_resource(&ioport_resource, &lp0);
637         if (mdesc->reserve_lp1)
638                 request_resource(&ioport_resource, &lp1);
639         if (mdesc->reserve_lp2)
640                 request_resource(&ioport_resource, &lp2);
641 }
642
643 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
644 struct screen_info screen_info = {
645  .orig_video_lines      = 30,
646  .orig_video_cols       = 80,
647  .orig_video_mode       = 0,
648  .orig_video_ega_bx     = 0,
649  .orig_video_isVGA      = 1,
650  .orig_video_points     = 8
651 };
652 #endif
653
654 static int __init customize_machine(void)
655 {
656         /* customizes platform devices, or adds new ones */
657         if (machine_desc->init_machine)
658                 machine_desc->init_machine();
659         return 0;
660 }
661 arch_initcall(customize_machine);
662
663 static int __init init_machine_late(void)
664 {
665         if (machine_desc->init_late)
666                 machine_desc->init_late();
667         return 0;
668 }
669 late_initcall(init_machine_late);
670
671 #ifdef CONFIG_KEXEC
672 static inline unsigned long long get_total_mem(void)
673 {
674         unsigned long total;
675
676         total = max_low_pfn - min_low_pfn;
677         return total << PAGE_SHIFT;
678 }
679
680 /**
681  * reserve_crashkernel() - reserves memory are for crash kernel
682  *
683  * This function reserves memory area given in "crashkernel=" kernel command
684  * line parameter. The memory reserved is used by a dump capture kernel when
685  * primary kernel is crashing.
686  */
687 static void __init reserve_crashkernel(void)
688 {
689         unsigned long long crash_size, crash_base;
690         unsigned long long total_mem;
691         int ret;
692
693         total_mem = get_total_mem();
694         ret = parse_crashkernel(boot_command_line, total_mem,
695                                 &crash_size, &crash_base);
696         if (ret)
697                 return;
698
699         ret = reserve_bootmem(crash_base, crash_size, BOOTMEM_EXCLUSIVE);
700         if (ret < 0) {
701                 printk(KERN_WARNING "crashkernel reservation failed - "
702                        "memory is in use (0x%lx)\n", (unsigned long)crash_base);
703                 return;
704         }
705
706         printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
707                "for crashkernel (System RAM: %ldMB)\n",
708                (unsigned long)(crash_size >> 20),
709                (unsigned long)(crash_base >> 20),
710                (unsigned long)(total_mem >> 20));
711
712         crashk_res.start = crash_base;
713         crashk_res.end = crash_base + crash_size - 1;
714         insert_resource(&iomem_resource, &crashk_res);
715 }
716 #else
717 static inline void reserve_crashkernel(void) {}
718 #endif /* CONFIG_KEXEC */
719
720 static int __init meminfo_cmp(const void *_a, const void *_b)
721 {
722         const struct membank *a = _a, *b = _b;
723         long cmp = bank_pfn_start(a) - bank_pfn_start(b);
724         return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
725 }
726
727 void __init hyp_mode_check(void)
728 {
729 #ifdef CONFIG_ARM_VIRT_EXT
730         if (is_hyp_mode_available()) {
731                 pr_info("CPU: All CPU(s) started in HYP mode.\n");
732                 pr_info("CPU: Virtualization extensions available.\n");
733         } else if (is_hyp_mode_mismatched()) {
734                 pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
735                         __boot_cpu_mode & MODE_MASK);
736                 pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
737         } else
738                 pr_info("CPU: All CPU(s) started in SVC mode.\n");
739 #endif
740 }
741
742 void __init setup_arch(char **cmdline_p)
743 {
744         struct machine_desc *mdesc;
745
746         setup_processor();
747         mdesc = setup_machine_fdt(__atags_pointer);
748         if (!mdesc)
749                 mdesc = setup_machine_tags(__atags_pointer, __machine_arch_type);
750         machine_desc = mdesc;
751         machine_name = mdesc->name;
752
753         setup_dma_zone(mdesc);
754
755         if (mdesc->restart_mode)
756                 reboot_setup(&mdesc->restart_mode);
757
758         init_mm.start_code = (unsigned long) _text;
759         init_mm.end_code   = (unsigned long) _etext;
760         init_mm.end_data   = (unsigned long) _edata;
761         init_mm.brk        = (unsigned long) _end;
762
763         /* populate cmd_line too for later use, preserving boot_command_line */
764         strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
765         *cmdline_p = cmd_line;
766
767         parse_early_param();
768
769         sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
770         sanity_check_meminfo();
771         arm_memblock_init(&meminfo, mdesc);
772
773         paging_init(mdesc);
774         request_standard_resources(mdesc);
775
776         if (mdesc->restart)
777                 arm_pm_restart = mdesc->restart;
778
779         unflatten_device_tree();
780
781         arm_dt_init_cpu_maps();
782 #ifdef CONFIG_SMP
783         if (is_smp()) {
784                 smp_set_ops(mdesc->smp);
785                 smp_init_cpus();
786         }
787 #endif
788
789         if (!is_smp())
790                 hyp_mode_check();
791
792         reserve_crashkernel();
793
794         tcm_init();
795
796 #ifdef CONFIG_MULTI_IRQ_HANDLER
797         handle_arch_irq = mdesc->handle_irq;
798 #endif
799
800 #ifdef CONFIG_VT
801 #if defined(CONFIG_VGA_CONSOLE)
802         conswitchp = &vga_con;
803 #elif defined(CONFIG_DUMMY_CONSOLE)
804         conswitchp = &dummy_con;
805 #endif
806 #endif
807
808         if (mdesc->init_early)
809                 mdesc->init_early();
810 }
811
812
813 static int __init topology_init(void)
814 {
815         int cpu;
816
817         for_each_possible_cpu(cpu) {
818                 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
819                 cpuinfo->cpu.hotpluggable = 1;
820                 register_cpu(&cpuinfo->cpu, cpu);
821         }
822
823         return 0;
824 }
825 subsys_initcall(topology_init);
826
827 #ifdef CONFIG_HAVE_PROC_CPU
828 static int __init proc_cpu_init(void)
829 {
830         struct proc_dir_entry *res;
831
832         res = proc_mkdir("cpu", NULL);
833         if (!res)
834                 return -ENOMEM;
835         return 0;
836 }
837 fs_initcall(proc_cpu_init);
838 #endif
839
840 static const char *hwcap_str[] = {
841         "swp",
842         "half",
843         "thumb",
844         "26bit",
845         "fastmult",
846         "fpa",
847         "vfp",
848         "edsp",
849         "java",
850         "iwmmxt",
851         "crunch",
852         "thumbee",
853         "neon",
854         "vfpv3",
855         "vfpv3d16",
856         "tls",
857         "vfpv4",
858         "idiva",
859         "idivt",
860         NULL
861 };
862
863 static int c_show(struct seq_file *m, void *v)
864 {
865         int i, j;
866         u32 cpuid;
867
868         for_each_online_cpu(i) {
869                 /*
870                  * glibc reads /proc/cpuinfo to determine the number of
871                  * online processors, looking for lines beginning with
872                  * "processor".  Give glibc what it expects.
873                  */
874                 seq_printf(m, "processor\t: %d\n", i);
875                 cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id();
876                 seq_printf(m, "model name\t: %s rev %d (%s)\n",
877                            cpu_name, cpuid & 15, elf_platform);
878
879 #if defined(CONFIG_SMP)
880                 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
881                            per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
882                            (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
883 #else
884                 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
885                            loops_per_jiffy / (500000/HZ),
886                            (loops_per_jiffy / (5000/HZ)) % 100);
887 #endif
888                 /* dump out the processor features */
889                 seq_puts(m, "Features\t: ");
890
891                 for (j = 0; hwcap_str[j]; j++)
892                         if (elf_hwcap & (1 << j))
893                                 seq_printf(m, "%s ", hwcap_str[j]);
894
895                 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24);
896                 seq_printf(m, "CPU architecture: %s\n",
897                            proc_arch[cpu_architecture()]);
898
899                 if ((cpuid & 0x0008f000) == 0x00000000) {
900                         /* pre-ARM7 */
901                         seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4);
902                 } else {
903                         if ((cpuid & 0x0008f000) == 0x00007000) {
904                                 /* ARM7 */
905                                 seq_printf(m, "CPU variant\t: 0x%02x\n",
906                                            (cpuid >> 16) & 127);
907                         } else {
908                                 /* post-ARM7 */
909                                 seq_printf(m, "CPU variant\t: 0x%x\n",
910                                            (cpuid >> 20) & 15);
911                         }
912                         seq_printf(m, "CPU part\t: 0x%03x\n",
913                                    (cpuid >> 4) & 0xfff);
914                 }
915                 seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15);
916         }
917
918         seq_printf(m, "Hardware\t: %s\n", machine_name);
919         seq_printf(m, "Revision\t: %04x\n", system_rev);
920         seq_printf(m, "Serial\t\t: %08x%08x\n",
921                    system_serial_high, system_serial_low);
922
923         return 0;
924 }
925
926 static void *c_start(struct seq_file *m, loff_t *pos)
927 {
928         return *pos < 1 ? (void *)1 : NULL;
929 }
930
931 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
932 {
933         ++*pos;
934         return NULL;
935 }
936
937 static void c_stop(struct seq_file *m, void *v)
938 {
939 }
940
941 const struct seq_operations cpuinfo_op = {
942         .start  = c_start,
943         .next   = c_next,
944         .stop   = c_stop,
945         .show   = c_show
946 };