]> git.karo-electronics.de Git - karo-tx-linux.git/blob - arch/arm/kernel/setup.c
Merge branch 'smp' into misc
[karo-tx-linux.git] / arch / arm / kernel / setup.c
1 /*
2  *  linux/arch/arm/kernel/setup.c
3  *
4  *  Copyright (C) 1995-2001 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/stddef.h>
13 #include <linux/ioport.h>
14 #include <linux/delay.h>
15 #include <linux/utsname.h>
16 #include <linux/initrd.h>
17 #include <linux/console.h>
18 #include <linux/bootmem.h>
19 #include <linux/seq_file.h>
20 #include <linux/screen_info.h>
21 #include <linux/init.h>
22 #include <linux/kexec.h>
23 #include <linux/crash_dump.h>
24 #include <linux/root_dev.h>
25 #include <linux/cpu.h>
26 #include <linux/interrupt.h>
27 #include <linux/smp.h>
28 #include <linux/fs.h>
29 #include <linux/proc_fs.h>
30 #include <linux/memblock.h>
31
32 #include <asm/unified.h>
33 #include <asm/cpu.h>
34 #include <asm/cputype.h>
35 #include <asm/elf.h>
36 #include <asm/procinfo.h>
37 #include <asm/sections.h>
38 #include <asm/setup.h>
39 #include <asm/smp_plat.h>
40 #include <asm/mach-types.h>
41 #include <asm/cacheflush.h>
42 #include <asm/cachetype.h>
43 #include <asm/tlbflush.h>
44
45 #include <asm/mach/arch.h>
46 #include <asm/mach/irq.h>
47 #include <asm/mach/time.h>
48 #include <asm/traps.h>
49 #include <asm/unwind.h>
50
51 #if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
52 #include "compat.h"
53 #endif
54 #include "atags.h"
55 #include "tcm.h"
56
57 #ifndef MEM_SIZE
58 #define MEM_SIZE        (16*1024*1024)
59 #endif
60
61 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
62 char fpe_type[8];
63
64 static int __init fpe_setup(char *line)
65 {
66         memcpy(fpe_type, line, 8);
67         return 1;
68 }
69
70 __setup("fpe=", fpe_setup);
71 #endif
72
73 extern void paging_init(struct machine_desc *desc);
74 extern void reboot_setup(char *str);
75
76 unsigned int processor_id;
77 EXPORT_SYMBOL(processor_id);
78 unsigned int __machine_arch_type __read_mostly;
79 EXPORT_SYMBOL(__machine_arch_type);
80 unsigned int cacheid __read_mostly;
81 EXPORT_SYMBOL(cacheid);
82
83 unsigned int __atags_pointer __initdata;
84
85 unsigned int system_rev;
86 EXPORT_SYMBOL(system_rev);
87
88 unsigned int system_serial_low;
89 EXPORT_SYMBOL(system_serial_low);
90
91 unsigned int system_serial_high;
92 EXPORT_SYMBOL(system_serial_high);
93
94 unsigned int elf_hwcap __read_mostly;
95 EXPORT_SYMBOL(elf_hwcap);
96
97
98 #ifdef MULTI_CPU
99 struct processor processor __read_mostly;
100 #endif
101 #ifdef MULTI_TLB
102 struct cpu_tlb_fns cpu_tlb __read_mostly;
103 #endif
104 #ifdef MULTI_USER
105 struct cpu_user_fns cpu_user __read_mostly;
106 #endif
107 #ifdef MULTI_CACHE
108 struct cpu_cache_fns cpu_cache __read_mostly;
109 #endif
110 #ifdef CONFIG_OUTER_CACHE
111 struct outer_cache_fns outer_cache __read_mostly;
112 EXPORT_SYMBOL(outer_cache);
113 #endif
114
115 struct stack {
116         u32 irq[3];
117         u32 abt[3];
118         u32 und[3];
119 } ____cacheline_aligned;
120
121 static struct stack stacks[NR_CPUS];
122
123 char elf_platform[ELF_PLATFORM_SIZE];
124 EXPORT_SYMBOL(elf_platform);
125
126 static const char *cpu_name;
127 static const char *machine_name;
128 static char __initdata cmd_line[COMMAND_LINE_SIZE];
129 struct machine_desc *machine_desc __initdata;
130
131 static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
132 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
133 #define ENDIANNESS ((char)endian_test.l)
134
135 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
136
137 /*
138  * Standard memory resources
139  */
140 static struct resource mem_res[] = {
141         {
142                 .name = "Video RAM",
143                 .start = 0,
144                 .end = 0,
145                 .flags = IORESOURCE_MEM
146         },
147         {
148                 .name = "Kernel text",
149                 .start = 0,
150                 .end = 0,
151                 .flags = IORESOURCE_MEM
152         },
153         {
154                 .name = "Kernel data",
155                 .start = 0,
156                 .end = 0,
157                 .flags = IORESOURCE_MEM
158         }
159 };
160
161 #define video_ram   mem_res[0]
162 #define kernel_code mem_res[1]
163 #define kernel_data mem_res[2]
164
165 static struct resource io_res[] = {
166         {
167                 .name = "reserved",
168                 .start = 0x3bc,
169                 .end = 0x3be,
170                 .flags = IORESOURCE_IO | IORESOURCE_BUSY
171         },
172         {
173                 .name = "reserved",
174                 .start = 0x378,
175                 .end = 0x37f,
176                 .flags = IORESOURCE_IO | IORESOURCE_BUSY
177         },
178         {
179                 .name = "reserved",
180                 .start = 0x278,
181                 .end = 0x27f,
182                 .flags = IORESOURCE_IO | IORESOURCE_BUSY
183         }
184 };
185
186 #define lp0 io_res[0]
187 #define lp1 io_res[1]
188 #define lp2 io_res[2]
189
190 static const char *proc_arch[] = {
191         "undefined/unknown",
192         "3",
193         "4",
194         "4T",
195         "5",
196         "5T",
197         "5TE",
198         "5TEJ",
199         "6TEJ",
200         "7",
201         "?(11)",
202         "?(12)",
203         "?(13)",
204         "?(14)",
205         "?(15)",
206         "?(16)",
207         "?(17)",
208 };
209
210 int cpu_architecture(void)
211 {
212         int cpu_arch;
213
214         if ((read_cpuid_id() & 0x0008f000) == 0) {
215                 cpu_arch = CPU_ARCH_UNKNOWN;
216         } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
217                 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
218         } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
219                 cpu_arch = (read_cpuid_id() >> 16) & 7;
220                 if (cpu_arch)
221                         cpu_arch += CPU_ARCH_ARMv3;
222         } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
223                 unsigned int mmfr0;
224
225                 /* Revised CPUID format. Read the Memory Model Feature
226                  * Register 0 and check for VMSAv7 or PMSAv7 */
227                 asm("mrc        p15, 0, %0, c0, c1, 4"
228                     : "=r" (mmfr0));
229                 if ((mmfr0 & 0x0000000f) == 0x00000003 ||
230                     (mmfr0 & 0x000000f0) == 0x00000030)
231                         cpu_arch = CPU_ARCH_ARMv7;
232                 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
233                          (mmfr0 & 0x000000f0) == 0x00000020)
234                         cpu_arch = CPU_ARCH_ARMv6;
235                 else
236                         cpu_arch = CPU_ARCH_UNKNOWN;
237         } else
238                 cpu_arch = CPU_ARCH_UNKNOWN;
239
240         return cpu_arch;
241 }
242
243 static int cpu_has_aliasing_icache(unsigned int arch)
244 {
245         int aliasing_icache;
246         unsigned int id_reg, num_sets, line_size;
247
248         /* arch specifies the register format */
249         switch (arch) {
250         case CPU_ARCH_ARMv7:
251                 asm("mcr        p15, 2, %0, c0, c0, 0 @ set CSSELR"
252                     : /* No output operands */
253                     : "r" (1));
254                 isb();
255                 asm("mrc        p15, 1, %0, c0, c0, 0 @ read CCSIDR"
256                     : "=r" (id_reg));
257                 line_size = 4 << ((id_reg & 0x7) + 2);
258                 num_sets = ((id_reg >> 13) & 0x7fff) + 1;
259                 aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
260                 break;
261         case CPU_ARCH_ARMv6:
262                 aliasing_icache = read_cpuid_cachetype() & (1 << 11);
263                 break;
264         default:
265                 /* I-cache aliases will be handled by D-cache aliasing code */
266                 aliasing_icache = 0;
267         }
268
269         return aliasing_icache;
270 }
271
272 static void __init cacheid_init(void)
273 {
274         unsigned int cachetype = read_cpuid_cachetype();
275         unsigned int arch = cpu_architecture();
276
277         if (arch >= CPU_ARCH_ARMv6) {
278                 if ((cachetype & (7 << 29)) == 4 << 29) {
279                         /* ARMv7 register format */
280                         cacheid = CACHEID_VIPT_NONALIASING;
281                         if ((cachetype & (3 << 14)) == 1 << 14)
282                                 cacheid |= CACHEID_ASID_TAGGED;
283                         else if (cpu_has_aliasing_icache(CPU_ARCH_ARMv7))
284                                 cacheid |= CACHEID_VIPT_I_ALIASING;
285                 } else if (cachetype & (1 << 23)) {
286                         cacheid = CACHEID_VIPT_ALIASING;
287                 } else {
288                         cacheid = CACHEID_VIPT_NONALIASING;
289                         if (cpu_has_aliasing_icache(CPU_ARCH_ARMv6))
290                                 cacheid |= CACHEID_VIPT_I_ALIASING;
291                 }
292         } else {
293                 cacheid = CACHEID_VIVT;
294         }
295
296         printk("CPU: %s data cache, %s instruction cache\n",
297                 cache_is_vivt() ? "VIVT" :
298                 cache_is_vipt_aliasing() ? "VIPT aliasing" :
299                 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown",
300                 cache_is_vivt() ? "VIVT" :
301                 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
302                 icache_is_vipt_aliasing() ? "VIPT aliasing" :
303                 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
304 }
305
306 /*
307  * These functions re-use the assembly code in head.S, which
308  * already provide the required functionality.
309  */
310 extern struct proc_info_list *lookup_processor_type(unsigned int);
311 extern struct machine_desc *lookup_machine_type(unsigned int);
312
313 static void __init feat_v6_fixup(void)
314 {
315         int id = read_cpuid_id();
316
317         if ((id & 0xff0f0000) != 0x41070000)
318                 return;
319
320         /*
321          * HWCAP_TLS is available only on 1136 r1p0 and later,
322          * see also kuser_get_tls_init.
323          */
324         if ((((id >> 4) & 0xfff) == 0xb36) && (((id >> 20) & 3) == 0))
325                 elf_hwcap &= ~HWCAP_TLS;
326 }
327
328 static void __init setup_processor(void)
329 {
330         struct proc_info_list *list;
331
332         /*
333          * locate processor in the list of supported processor
334          * types.  The linker builds this table for us from the
335          * entries in arch/arm/mm/proc-*.S
336          */
337         list = lookup_processor_type(read_cpuid_id());
338         if (!list) {
339                 printk("CPU configuration botched (ID %08x), unable "
340                        "to continue.\n", read_cpuid_id());
341                 while (1);
342         }
343
344         cpu_name = list->cpu_name;
345
346 #ifdef MULTI_CPU
347         processor = *list->proc;
348 #endif
349 #ifdef MULTI_TLB
350         cpu_tlb = *list->tlb;
351 #endif
352 #ifdef MULTI_USER
353         cpu_user = *list->user;
354 #endif
355 #ifdef MULTI_CACHE
356         cpu_cache = *list->cache;
357 #endif
358
359         printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
360                cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
361                proc_arch[cpu_architecture()], cr_alignment);
362
363         sprintf(init_utsname()->machine, "%s%c", list->arch_name, ENDIANNESS);
364         sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS);
365         elf_hwcap = list->elf_hwcap;
366 #ifndef CONFIG_ARM_THUMB
367         elf_hwcap &= ~HWCAP_THUMB;
368 #endif
369
370         feat_v6_fixup();
371
372         cacheid_init();
373         cpu_proc_init();
374 }
375
376 /*
377  * cpu_init - initialise one CPU.
378  *
379  * cpu_init sets up the per-CPU stacks.
380  */
381 void cpu_init(void)
382 {
383         unsigned int cpu = smp_processor_id();
384         struct stack *stk = &stacks[cpu];
385
386         if (cpu >= NR_CPUS) {
387                 printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
388                 BUG();
389         }
390
391         /*
392          * Define the placement constraint for the inline asm directive below.
393          * In Thumb-2, msr with an immediate value is not allowed.
394          */
395 #ifdef CONFIG_THUMB2_KERNEL
396 #define PLC     "r"
397 #else
398 #define PLC     "I"
399 #endif
400
401         /*
402          * setup stacks for re-entrant exception handlers
403          */
404         __asm__ (
405         "msr    cpsr_c, %1\n\t"
406         "add    r14, %0, %2\n\t"
407         "mov    sp, r14\n\t"
408         "msr    cpsr_c, %3\n\t"
409         "add    r14, %0, %4\n\t"
410         "mov    sp, r14\n\t"
411         "msr    cpsr_c, %5\n\t"
412         "add    r14, %0, %6\n\t"
413         "mov    sp, r14\n\t"
414         "msr    cpsr_c, %7"
415             :
416             : "r" (stk),
417               PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
418               "I" (offsetof(struct stack, irq[0])),
419               PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
420               "I" (offsetof(struct stack, abt[0])),
421               PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
422               "I" (offsetof(struct stack, und[0])),
423               PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
424             : "r14");
425 }
426
427 static struct machine_desc * __init setup_machine(unsigned int nr)
428 {
429         struct machine_desc *list;
430
431         /*
432          * locate machine in the list of supported machines.
433          */
434         list = lookup_machine_type(nr);
435         if (!list) {
436                 printk("Machine configuration botched (nr %d), unable "
437                        "to continue.\n", nr);
438                 while (1);
439         }
440
441         printk("Machine: %s\n", list->name);
442
443         return list;
444 }
445
446 static int __init arm_add_memory(unsigned long start, unsigned long size)
447 {
448         struct membank *bank = &meminfo.bank[meminfo.nr_banks];
449
450         if (meminfo.nr_banks >= NR_BANKS) {
451                 printk(KERN_CRIT "NR_BANKS too low, "
452                         "ignoring memory at %#lx\n", start);
453                 return -EINVAL;
454         }
455
456         /*
457          * Ensure that start/size are aligned to a page boundary.
458          * Size is appropriately rounded down, start is rounded up.
459          */
460         size -= start & ~PAGE_MASK;
461         bank->start = PAGE_ALIGN(start);
462         bank->size  = size & PAGE_MASK;
463
464         /*
465          * Check whether this memory region has non-zero size or
466          * invalid node number.
467          */
468         if (bank->size == 0)
469                 return -EINVAL;
470
471         meminfo.nr_banks++;
472         return 0;
473 }
474
475 /*
476  * Pick out the memory size.  We look for mem=size@start,
477  * where start and size are "size[KkMm]"
478  */
479 static int __init early_mem(char *p)
480 {
481         static int usermem __initdata = 0;
482         unsigned long size, start;
483         char *endp;
484
485         /*
486          * If the user specifies memory size, we
487          * blow away any automatically generated
488          * size.
489          */
490         if (usermem == 0) {
491                 usermem = 1;
492                 meminfo.nr_banks = 0;
493         }
494
495         start = PHYS_OFFSET;
496         size  = memparse(p, &endp);
497         if (*endp == '@')
498                 start = memparse(endp + 1, NULL);
499
500         arm_add_memory(start, size);
501
502         return 0;
503 }
504 early_param("mem", early_mem);
505
506 static void __init
507 setup_ramdisk(int doload, int prompt, int image_start, unsigned int rd_sz)
508 {
509 #ifdef CONFIG_BLK_DEV_RAM
510         extern int rd_size, rd_image_start, rd_prompt, rd_doload;
511
512         rd_image_start = image_start;
513         rd_prompt = prompt;
514         rd_doload = doload;
515
516         if (rd_sz)
517                 rd_size = rd_sz;
518 #endif
519 }
520
521 static void __init
522 request_standard_resources(struct meminfo *mi, struct machine_desc *mdesc)
523 {
524         struct resource *res;
525         int i;
526
527         kernel_code.start   = virt_to_phys(_text);
528         kernel_code.end     = virt_to_phys(_etext - 1);
529         kernel_data.start   = virt_to_phys(_sdata);
530         kernel_data.end     = virt_to_phys(_end - 1);
531
532         for (i = 0; i < mi->nr_banks; i++) {
533                 if (mi->bank[i].size == 0)
534                         continue;
535
536                 res = alloc_bootmem_low(sizeof(*res));
537                 res->name  = "System RAM";
538                 res->start = mi->bank[i].start;
539                 res->end   = mi->bank[i].start + mi->bank[i].size - 1;
540                 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
541
542                 request_resource(&iomem_resource, res);
543
544                 if (kernel_code.start >= res->start &&
545                     kernel_code.end <= res->end)
546                         request_resource(res, &kernel_code);
547                 if (kernel_data.start >= res->start &&
548                     kernel_data.end <= res->end)
549                         request_resource(res, &kernel_data);
550         }
551
552         if (mdesc->video_start) {
553                 video_ram.start = mdesc->video_start;
554                 video_ram.end   = mdesc->video_end;
555                 request_resource(&iomem_resource, &video_ram);
556         }
557
558         /*
559          * Some machines don't have the possibility of ever
560          * possessing lp0, lp1 or lp2
561          */
562         if (mdesc->reserve_lp0)
563                 request_resource(&ioport_resource, &lp0);
564         if (mdesc->reserve_lp1)
565                 request_resource(&ioport_resource, &lp1);
566         if (mdesc->reserve_lp2)
567                 request_resource(&ioport_resource, &lp2);
568 }
569
570 /*
571  *  Tag parsing.
572  *
573  * This is the new way of passing data to the kernel at boot time.  Rather
574  * than passing a fixed inflexible structure to the kernel, we pass a list
575  * of variable-sized tags to the kernel.  The first tag must be a ATAG_CORE
576  * tag for the list to be recognised (to distinguish the tagged list from
577  * a param_struct).  The list is terminated with a zero-length tag (this tag
578  * is not parsed in any way).
579  */
580 static int __init parse_tag_core(const struct tag *tag)
581 {
582         if (tag->hdr.size > 2) {
583                 if ((tag->u.core.flags & 1) == 0)
584                         root_mountflags &= ~MS_RDONLY;
585                 ROOT_DEV = old_decode_dev(tag->u.core.rootdev);
586         }
587         return 0;
588 }
589
590 __tagtable(ATAG_CORE, parse_tag_core);
591
592 static int __init parse_tag_mem32(const struct tag *tag)
593 {
594         return arm_add_memory(tag->u.mem.start, tag->u.mem.size);
595 }
596
597 __tagtable(ATAG_MEM, parse_tag_mem32);
598
599 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
600 struct screen_info screen_info = {
601  .orig_video_lines      = 30,
602  .orig_video_cols       = 80,
603  .orig_video_mode       = 0,
604  .orig_video_ega_bx     = 0,
605  .orig_video_isVGA      = 1,
606  .orig_video_points     = 8
607 };
608
609 static int __init parse_tag_videotext(const struct tag *tag)
610 {
611         screen_info.orig_x            = tag->u.videotext.x;
612         screen_info.orig_y            = tag->u.videotext.y;
613         screen_info.orig_video_page   = tag->u.videotext.video_page;
614         screen_info.orig_video_mode   = tag->u.videotext.video_mode;
615         screen_info.orig_video_cols   = tag->u.videotext.video_cols;
616         screen_info.orig_video_ega_bx = tag->u.videotext.video_ega_bx;
617         screen_info.orig_video_lines  = tag->u.videotext.video_lines;
618         screen_info.orig_video_isVGA  = tag->u.videotext.video_isvga;
619         screen_info.orig_video_points = tag->u.videotext.video_points;
620         return 0;
621 }
622
623 __tagtable(ATAG_VIDEOTEXT, parse_tag_videotext);
624 #endif
625
626 static int __init parse_tag_ramdisk(const struct tag *tag)
627 {
628         setup_ramdisk((tag->u.ramdisk.flags & 1) == 0,
629                       (tag->u.ramdisk.flags & 2) == 0,
630                       tag->u.ramdisk.start, tag->u.ramdisk.size);
631         return 0;
632 }
633
634 __tagtable(ATAG_RAMDISK, parse_tag_ramdisk);
635
636 static int __init parse_tag_serialnr(const struct tag *tag)
637 {
638         system_serial_low = tag->u.serialnr.low;
639         system_serial_high = tag->u.serialnr.high;
640         return 0;
641 }
642
643 __tagtable(ATAG_SERIAL, parse_tag_serialnr);
644
645 static int __init parse_tag_revision(const struct tag *tag)
646 {
647         system_rev = tag->u.revision.rev;
648         return 0;
649 }
650
651 __tagtable(ATAG_REVISION, parse_tag_revision);
652
653 #ifndef CONFIG_CMDLINE_FORCE
654 static int __init parse_tag_cmdline(const struct tag *tag)
655 {
656         strlcpy(default_command_line, tag->u.cmdline.cmdline, COMMAND_LINE_SIZE);
657         return 0;
658 }
659
660 __tagtable(ATAG_CMDLINE, parse_tag_cmdline);
661 #endif /* CONFIG_CMDLINE_FORCE */
662
663 /*
664  * Scan the tag table for this tag, and call its parse function.
665  * The tag table is built by the linker from all the __tagtable
666  * declarations.
667  */
668 static int __init parse_tag(const struct tag *tag)
669 {
670         extern struct tagtable __tagtable_begin, __tagtable_end;
671         struct tagtable *t;
672
673         for (t = &__tagtable_begin; t < &__tagtable_end; t++)
674                 if (tag->hdr.tag == t->tag) {
675                         t->parse(tag);
676                         break;
677                 }
678
679         return t < &__tagtable_end;
680 }
681
682 /*
683  * Parse all tags in the list, checking both the global and architecture
684  * specific tag tables.
685  */
686 static void __init parse_tags(const struct tag *t)
687 {
688         for (; t->hdr.size; t = tag_next(t))
689                 if (!parse_tag(t))
690                         printk(KERN_WARNING
691                                 "Ignoring unrecognised tag 0x%08x\n",
692                                 t->hdr.tag);
693 }
694
695 /*
696  * This holds our defaults.
697  */
698 static struct init_tags {
699         struct tag_header hdr1;
700         struct tag_core   core;
701         struct tag_header hdr2;
702         struct tag_mem32  mem;
703         struct tag_header hdr3;
704 } init_tags __initdata = {
705         { tag_size(tag_core), ATAG_CORE },
706         { 1, PAGE_SIZE, 0xff },
707         { tag_size(tag_mem32), ATAG_MEM },
708         { MEM_SIZE, PHYS_OFFSET },
709         { 0, ATAG_NONE }
710 };
711
712 static int __init customize_machine(void)
713 {
714         /* customizes platform devices, or adds new ones */
715         if (machine_desc->init_machine)
716                 machine_desc->init_machine();
717         return 0;
718 }
719 arch_initcall(customize_machine);
720
721 #ifdef CONFIG_KEXEC
722 static inline unsigned long long get_total_mem(void)
723 {
724         unsigned long total;
725
726         total = max_low_pfn - min_low_pfn;
727         return total << PAGE_SHIFT;
728 }
729
730 /**
731  * reserve_crashkernel() - reserves memory are for crash kernel
732  *
733  * This function reserves memory area given in "crashkernel=" kernel command
734  * line parameter. The memory reserved is used by a dump capture kernel when
735  * primary kernel is crashing.
736  */
737 static void __init reserve_crashkernel(void)
738 {
739         unsigned long long crash_size, crash_base;
740         unsigned long long total_mem;
741         int ret;
742
743         total_mem = get_total_mem();
744         ret = parse_crashkernel(boot_command_line, total_mem,
745                                 &crash_size, &crash_base);
746         if (ret)
747                 return;
748
749         ret = reserve_bootmem(crash_base, crash_size, BOOTMEM_EXCLUSIVE);
750         if (ret < 0) {
751                 printk(KERN_WARNING "crashkernel reservation failed - "
752                        "memory is in use (0x%lx)\n", (unsigned long)crash_base);
753                 return;
754         }
755
756         printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
757                "for crashkernel (System RAM: %ldMB)\n",
758                (unsigned long)(crash_size >> 20),
759                (unsigned long)(crash_base >> 20),
760                (unsigned long)(total_mem >> 20));
761
762         crashk_res.start = crash_base;
763         crashk_res.end = crash_base + crash_size - 1;
764         insert_resource(&iomem_resource, &crashk_res);
765 }
766 #else
767 static inline void reserve_crashkernel(void) {}
768 #endif /* CONFIG_KEXEC */
769
770 /*
771  * Note: elfcorehdr_addr is not just limited to vmcore. It is also used by
772  * is_kdump_kernel() to determine if we are booting after a panic. Hence
773  * ifdef it under CONFIG_CRASH_DUMP and not CONFIG_PROC_VMCORE.
774  */
775
776 #ifdef CONFIG_CRASH_DUMP
777 /*
778  * elfcorehdr= specifies the location of elf core header stored by the crashed
779  * kernel. This option will be passed by kexec loader to the capture kernel.
780  */
781 static int __init setup_elfcorehdr(char *arg)
782 {
783         char *end;
784
785         if (!arg)
786                 return -EINVAL;
787
788         elfcorehdr_addr = memparse(arg, &end);
789         return end > arg ? 0 : -EINVAL;
790 }
791 early_param("elfcorehdr", setup_elfcorehdr);
792 #endif /* CONFIG_CRASH_DUMP */
793
794 static void __init squash_mem_tags(struct tag *tag)
795 {
796         for (; tag->hdr.size; tag = tag_next(tag))
797                 if (tag->hdr.tag == ATAG_MEM)
798                         tag->hdr.tag = ATAG_NONE;
799 }
800
801 void __init setup_arch(char **cmdline_p)
802 {
803         struct tag *tags = (struct tag *)&init_tags;
804         struct machine_desc *mdesc;
805         char *from = default_command_line;
806
807         unwind_init();
808
809         setup_processor();
810         mdesc = setup_machine(machine_arch_type);
811         machine_desc = mdesc;
812         machine_name = mdesc->name;
813
814         if (mdesc->soft_reboot)
815                 reboot_setup("s");
816
817         if (__atags_pointer)
818                 tags = phys_to_virt(__atags_pointer);
819         else if (mdesc->boot_params)
820                 tags = phys_to_virt(mdesc->boot_params);
821
822 #if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
823         /*
824          * If we have the old style parameters, convert them to
825          * a tag list.
826          */
827         if (tags->hdr.tag != ATAG_CORE)
828                 convert_to_tag_list(tags);
829 #endif
830         if (tags->hdr.tag != ATAG_CORE)
831                 tags = (struct tag *)&init_tags;
832
833         if (mdesc->fixup)
834                 mdesc->fixup(mdesc, tags, &from, &meminfo);
835
836         if (tags->hdr.tag == ATAG_CORE) {
837                 if (meminfo.nr_banks != 0)
838                         squash_mem_tags(tags);
839                 save_atags(tags);
840                 parse_tags(tags);
841         }
842
843         init_mm.start_code = (unsigned long) _text;
844         init_mm.end_code   = (unsigned long) _etext;
845         init_mm.end_data   = (unsigned long) _edata;
846         init_mm.brk        = (unsigned long) _end;
847
848         /* parse_early_param needs a boot_command_line */
849         strlcpy(boot_command_line, from, COMMAND_LINE_SIZE);
850
851         /* populate cmd_line too for later use, preserving boot_command_line */
852         strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
853         *cmdline_p = cmd_line;
854
855         parse_early_param();
856
857         arm_memblock_init(&meminfo, mdesc);
858
859         paging_init(mdesc);
860         request_standard_resources(&meminfo, mdesc);
861
862 #ifdef CONFIG_SMP
863         if (is_smp())
864                 smp_init_cpus();
865 #endif
866         reserve_crashkernel();
867
868         cpu_init();
869         tcm_init();
870
871 #ifdef CONFIG_MULTI_IRQ_HANDLER
872         handle_arch_irq = mdesc->handle_irq;
873 #endif
874
875 #ifdef CONFIG_VT
876 #if defined(CONFIG_VGA_CONSOLE)
877         conswitchp = &vga_con;
878 #elif defined(CONFIG_DUMMY_CONSOLE)
879         conswitchp = &dummy_con;
880 #endif
881 #endif
882         early_trap_init();
883
884         if (mdesc->init_early)
885                 mdesc->init_early();
886 }
887
888
889 static int __init topology_init(void)
890 {
891         int cpu;
892
893         for_each_possible_cpu(cpu) {
894                 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
895                 cpuinfo->cpu.hotpluggable = 1;
896                 register_cpu(&cpuinfo->cpu, cpu);
897         }
898
899         return 0;
900 }
901 subsys_initcall(topology_init);
902
903 #ifdef CONFIG_HAVE_PROC_CPU
904 static int __init proc_cpu_init(void)
905 {
906         struct proc_dir_entry *res;
907
908         res = proc_mkdir("cpu", NULL);
909         if (!res)
910                 return -ENOMEM;
911         return 0;
912 }
913 fs_initcall(proc_cpu_init);
914 #endif
915
916 static const char *hwcap_str[] = {
917         "swp",
918         "half",
919         "thumb",
920         "26bit",
921         "fastmult",
922         "fpa",
923         "vfp",
924         "edsp",
925         "java",
926         "iwmmxt",
927         "crunch",
928         "thumbee",
929         "neon",
930         "vfpv3",
931         "vfpv3d16",
932         NULL
933 };
934
935 static int c_show(struct seq_file *m, void *v)
936 {
937         int i;
938
939         seq_printf(m, "Processor\t: %s rev %d (%s)\n",
940                    cpu_name, read_cpuid_id() & 15, elf_platform);
941
942 #if defined(CONFIG_SMP)
943         for_each_online_cpu(i) {
944                 /*
945                  * glibc reads /proc/cpuinfo to determine the number of
946                  * online processors, looking for lines beginning with
947                  * "processor".  Give glibc what it expects.
948                  */
949                 seq_printf(m, "processor\t: %d\n", i);
950                 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n",
951                            per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
952                            (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
953         }
954 #else /* CONFIG_SMP */
955         seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
956                    loops_per_jiffy / (500000/HZ),
957                    (loops_per_jiffy / (5000/HZ)) % 100);
958 #endif
959
960         /* dump out the processor features */
961         seq_puts(m, "Features\t: ");
962
963         for (i = 0; hwcap_str[i]; i++)
964                 if (elf_hwcap & (1 << i))
965                         seq_printf(m, "%s ", hwcap_str[i]);
966
967         seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24);
968         seq_printf(m, "CPU architecture: %s\n", proc_arch[cpu_architecture()]);
969
970         if ((read_cpuid_id() & 0x0008f000) == 0x00000000) {
971                 /* pre-ARM7 */
972                 seq_printf(m, "CPU part\t: %07x\n", read_cpuid_id() >> 4);
973         } else {
974                 if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
975                         /* ARM7 */
976                         seq_printf(m, "CPU variant\t: 0x%02x\n",
977                                    (read_cpuid_id() >> 16) & 127);
978                 } else {
979                         /* post-ARM7 */
980                         seq_printf(m, "CPU variant\t: 0x%x\n",
981                                    (read_cpuid_id() >> 20) & 15);
982                 }
983                 seq_printf(m, "CPU part\t: 0x%03x\n",
984                            (read_cpuid_id() >> 4) & 0xfff);
985         }
986         seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15);
987
988         seq_puts(m, "\n");
989
990         seq_printf(m, "Hardware\t: %s\n", machine_name);
991         seq_printf(m, "Revision\t: %04x\n", system_rev);
992         seq_printf(m, "Serial\t\t: %08x%08x\n",
993                    system_serial_high, system_serial_low);
994
995         return 0;
996 }
997
998 static void *c_start(struct seq_file *m, loff_t *pos)
999 {
1000         return *pos < 1 ? (void *)1 : NULL;
1001 }
1002
1003 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1004 {
1005         ++*pos;
1006         return NULL;
1007 }
1008
1009 static void c_stop(struct seq_file *m, void *v)
1010 {
1011 }
1012
1013 const struct seq_operations cpuinfo_op = {
1014         .start  = c_start,
1015         .next   = c_next,
1016         .stop   = c_stop,
1017         .show   = c_show
1018 };