]> git.karo-electronics.de Git - mv-sheeva.git/blob - arch/arm/kernel/setup.c
42c2f0cedf1b35111ee44eb7b3fe8a81c6b44427
[mv-sheeva.git] / arch / arm / kernel / setup.c
1 /*
2  *  linux/arch/arm/kernel/setup.c
3  *
4  *  Copyright (C) 1995-2001 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/stddef.h>
13 #include <linux/ioport.h>
14 #include <linux/delay.h>
15 #include <linux/utsname.h>
16 #include <linux/initrd.h>
17 #include <linux/console.h>
18 #include <linux/bootmem.h>
19 #include <linux/seq_file.h>
20 #include <linux/screen_info.h>
21 #include <linux/init.h>
22 #include <linux/kexec.h>
23 #include <linux/crash_dump.h>
24 #include <linux/root_dev.h>
25 #include <linux/cpu.h>
26 #include <linux/interrupt.h>
27 #include <linux/smp.h>
28 #include <linux/fs.h>
29 #include <linux/proc_fs.h>
30 #include <linux/memblock.h>
31
32 #include <asm/unified.h>
33 #include <asm/cpu.h>
34 #include <asm/cputype.h>
35 #include <asm/elf.h>
36 #include <asm/procinfo.h>
37 #include <asm/sections.h>
38 #include <asm/setup.h>
39 #include <asm/smp_plat.h>
40 #include <asm/mach-types.h>
41 #include <asm/cacheflush.h>
42 #include <asm/cachetype.h>
43 #include <asm/tlbflush.h>
44
45 #include <asm/mach/arch.h>
46 #include <asm/mach/irq.h>
47 #include <asm/mach/time.h>
48 #include <asm/traps.h>
49 #include <asm/unwind.h>
50
51 #if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
52 #include "compat.h"
53 #endif
54 #include "atags.h"
55 #include "tcm.h"
56
57 #ifndef MEM_SIZE
58 #define MEM_SIZE        (16*1024*1024)
59 #endif
60
61 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
62 char fpe_type[8];
63
64 static int __init fpe_setup(char *line)
65 {
66         memcpy(fpe_type, line, 8);
67         return 1;
68 }
69
70 __setup("fpe=", fpe_setup);
71 #endif
72
73 extern void paging_init(struct machine_desc *desc);
74 extern void reboot_setup(char *str);
75
76 unsigned int processor_id;
77 EXPORT_SYMBOL(processor_id);
78 unsigned int __machine_arch_type __read_mostly;
79 EXPORT_SYMBOL(__machine_arch_type);
80 unsigned int cacheid __read_mostly;
81 EXPORT_SYMBOL(cacheid);
82
83 unsigned int __atags_pointer __initdata;
84
85 unsigned int system_rev;
86 EXPORT_SYMBOL(system_rev);
87
88 unsigned int system_serial_low;
89 EXPORT_SYMBOL(system_serial_low);
90
91 unsigned int system_serial_high;
92 EXPORT_SYMBOL(system_serial_high);
93
94 unsigned int elf_hwcap __read_mostly;
95 EXPORT_SYMBOL(elf_hwcap);
96
97
98 #ifdef MULTI_CPU
99 struct processor processor __read_mostly;
100 #endif
101 #ifdef MULTI_TLB
102 struct cpu_tlb_fns cpu_tlb __read_mostly;
103 #endif
104 #ifdef MULTI_USER
105 struct cpu_user_fns cpu_user __read_mostly;
106 #endif
107 #ifdef MULTI_CACHE
108 struct cpu_cache_fns cpu_cache __read_mostly;
109 #endif
110 #ifdef CONFIG_OUTER_CACHE
111 struct outer_cache_fns outer_cache __read_mostly;
112 EXPORT_SYMBOL(outer_cache);
113 #endif
114
115 struct stack {
116         u32 irq[3];
117         u32 abt[3];
118         u32 und[3];
119 } ____cacheline_aligned;
120
121 static struct stack stacks[NR_CPUS];
122
123 char elf_platform[ELF_PLATFORM_SIZE];
124 EXPORT_SYMBOL(elf_platform);
125
126 static const char *cpu_name;
127 static const char *machine_name;
128 static char __initdata cmd_line[COMMAND_LINE_SIZE];
129 struct machine_desc *machine_desc __initdata;
130
131 static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
132 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
133 #define ENDIANNESS ((char)endian_test.l)
134
135 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
136
137 /*
138  * Standard memory resources
139  */
140 static struct resource mem_res[] = {
141         {
142                 .name = "Video RAM",
143                 .start = 0,
144                 .end = 0,
145                 .flags = IORESOURCE_MEM
146         },
147         {
148                 .name = "Kernel text",
149                 .start = 0,
150                 .end = 0,
151                 .flags = IORESOURCE_MEM
152         },
153         {
154                 .name = "Kernel data",
155                 .start = 0,
156                 .end = 0,
157                 .flags = IORESOURCE_MEM
158         }
159 };
160
161 #define video_ram   mem_res[0]
162 #define kernel_code mem_res[1]
163 #define kernel_data mem_res[2]
164
165 static struct resource io_res[] = {
166         {
167                 .name = "reserved",
168                 .start = 0x3bc,
169                 .end = 0x3be,
170                 .flags = IORESOURCE_IO | IORESOURCE_BUSY
171         },
172         {
173                 .name = "reserved",
174                 .start = 0x378,
175                 .end = 0x37f,
176                 .flags = IORESOURCE_IO | IORESOURCE_BUSY
177         },
178         {
179                 .name = "reserved",
180                 .start = 0x278,
181                 .end = 0x27f,
182                 .flags = IORESOURCE_IO | IORESOURCE_BUSY
183         }
184 };
185
186 #define lp0 io_res[0]
187 #define lp1 io_res[1]
188 #define lp2 io_res[2]
189
190 static const char *proc_arch[] = {
191         "undefined/unknown",
192         "3",
193         "4",
194         "4T",
195         "5",
196         "5T",
197         "5TE",
198         "5TEJ",
199         "6TEJ",
200         "7",
201         "?(11)",
202         "?(12)",
203         "?(13)",
204         "?(14)",
205         "?(15)",
206         "?(16)",
207         "?(17)",
208 };
209
210 int cpu_architecture(void)
211 {
212         int cpu_arch;
213
214         if ((read_cpuid_id() & 0x0008f000) == 0) {
215                 cpu_arch = CPU_ARCH_UNKNOWN;
216         } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
217                 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
218         } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
219                 cpu_arch = (read_cpuid_id() >> 16) & 7;
220                 if (cpu_arch)
221                         cpu_arch += CPU_ARCH_ARMv3;
222         } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
223                 unsigned int mmfr0;
224
225                 /* Revised CPUID format. Read the Memory Model Feature
226                  * Register 0 and check for VMSAv7 or PMSAv7 */
227                 asm("mrc        p15, 0, %0, c0, c1, 4"
228                     : "=r" (mmfr0));
229                 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
230                     (mmfr0 & 0x000000f0) >= 0x00000030)
231                         cpu_arch = CPU_ARCH_ARMv7;
232                 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
233                          (mmfr0 & 0x000000f0) == 0x00000020)
234                         cpu_arch = CPU_ARCH_ARMv6;
235                 else
236                         cpu_arch = CPU_ARCH_UNKNOWN;
237         } else
238                 cpu_arch = CPU_ARCH_UNKNOWN;
239
240         return cpu_arch;
241 }
242
243 static int cpu_has_aliasing_icache(unsigned int arch)
244 {
245         int aliasing_icache;
246         unsigned int id_reg, num_sets, line_size;
247
248         /* arch specifies the register format */
249         switch (arch) {
250         case CPU_ARCH_ARMv7:
251                 asm("mcr        p15, 2, %0, c0, c0, 0 @ set CSSELR"
252                     : /* No output operands */
253                     : "r" (1));
254                 isb();
255                 asm("mrc        p15, 1, %0, c0, c0, 0 @ read CCSIDR"
256                     : "=r" (id_reg));
257                 line_size = 4 << ((id_reg & 0x7) + 2);
258                 num_sets = ((id_reg >> 13) & 0x7fff) + 1;
259                 aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
260                 break;
261         case CPU_ARCH_ARMv6:
262                 aliasing_icache = read_cpuid_cachetype() & (1 << 11);
263                 break;
264         default:
265                 /* I-cache aliases will be handled by D-cache aliasing code */
266                 aliasing_icache = 0;
267         }
268
269         return aliasing_icache;
270 }
271
272 static void __init cacheid_init(void)
273 {
274         unsigned int cachetype = read_cpuid_cachetype();
275         unsigned int arch = cpu_architecture();
276
277         if (arch >= CPU_ARCH_ARMv6) {
278                 if ((cachetype & (7 << 29)) == 4 << 29) {
279                         /* ARMv7 register format */
280                         cacheid = CACHEID_VIPT_NONALIASING;
281                         if ((cachetype & (3 << 14)) == 1 << 14)
282                                 cacheid |= CACHEID_ASID_TAGGED;
283                         else if (cpu_has_aliasing_icache(CPU_ARCH_ARMv7))
284                                 cacheid |= CACHEID_VIPT_I_ALIASING;
285                 } else if (cachetype & (1 << 23)) {
286                         cacheid = CACHEID_VIPT_ALIASING;
287                 } else {
288                         cacheid = CACHEID_VIPT_NONALIASING;
289                         if (cpu_has_aliasing_icache(CPU_ARCH_ARMv6))
290                                 cacheid |= CACHEID_VIPT_I_ALIASING;
291                 }
292         } else {
293                 cacheid = CACHEID_VIVT;
294         }
295
296         printk("CPU: %s data cache, %s instruction cache\n",
297                 cache_is_vivt() ? "VIVT" :
298                 cache_is_vipt_aliasing() ? "VIPT aliasing" :
299                 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown",
300                 cache_is_vivt() ? "VIVT" :
301                 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
302                 icache_is_vipt_aliasing() ? "VIPT aliasing" :
303                 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
304 }
305
306 /*
307  * These functions re-use the assembly code in head.S, which
308  * already provide the required functionality.
309  */
310 extern struct proc_info_list *lookup_processor_type(unsigned int);
311
312 static void __init early_print(const char *str, ...)
313 {
314         extern void printascii(const char *);
315         char buf[256];
316         va_list ap;
317
318         va_start(ap, str);
319         vsnprintf(buf, sizeof(buf), str, ap);
320         va_end(ap);
321
322 #ifdef CONFIG_DEBUG_LL
323         printascii(buf);
324 #endif
325         printk("%s", buf);
326 }
327
328 static void __init feat_v6_fixup(void)
329 {
330         int id = read_cpuid_id();
331
332         if ((id & 0xff0f0000) != 0x41070000)
333                 return;
334
335         /*
336          * HWCAP_TLS is available only on 1136 r1p0 and later,
337          * see also kuser_get_tls_init.
338          */
339         if ((((id >> 4) & 0xfff) == 0xb36) && (((id >> 20) & 3) == 0))
340                 elf_hwcap &= ~HWCAP_TLS;
341 }
342
343 static void __init setup_processor(void)
344 {
345         struct proc_info_list *list;
346
347         /*
348          * locate processor in the list of supported processor
349          * types.  The linker builds this table for us from the
350          * entries in arch/arm/mm/proc-*.S
351          */
352         list = lookup_processor_type(read_cpuid_id());
353         if (!list) {
354                 printk("CPU configuration botched (ID %08x), unable "
355                        "to continue.\n", read_cpuid_id());
356                 while (1);
357         }
358
359         cpu_name = list->cpu_name;
360
361 #ifdef MULTI_CPU
362         processor = *list->proc;
363 #endif
364 #ifdef MULTI_TLB
365         cpu_tlb = *list->tlb;
366 #endif
367 #ifdef MULTI_USER
368         cpu_user = *list->user;
369 #endif
370 #ifdef MULTI_CACHE
371         cpu_cache = *list->cache;
372 #endif
373
374         printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
375                cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
376                proc_arch[cpu_architecture()], cr_alignment);
377
378         sprintf(init_utsname()->machine, "%s%c", list->arch_name, ENDIANNESS);
379         sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS);
380         elf_hwcap = list->elf_hwcap;
381 #ifndef CONFIG_ARM_THUMB
382         elf_hwcap &= ~HWCAP_THUMB;
383 #endif
384
385         feat_v6_fixup();
386
387         cacheid_init();
388         cpu_proc_init();
389 }
390
391 /*
392  * cpu_init - initialise one CPU.
393  *
394  * cpu_init sets up the per-CPU stacks.
395  */
396 void cpu_init(void)
397 {
398         unsigned int cpu = smp_processor_id();
399         struct stack *stk = &stacks[cpu];
400
401         if (cpu >= NR_CPUS) {
402                 printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
403                 BUG();
404         }
405
406         /*
407          * Define the placement constraint for the inline asm directive below.
408          * In Thumb-2, msr with an immediate value is not allowed.
409          */
410 #ifdef CONFIG_THUMB2_KERNEL
411 #define PLC     "r"
412 #else
413 #define PLC     "I"
414 #endif
415
416         /*
417          * setup stacks for re-entrant exception handlers
418          */
419         __asm__ (
420         "msr    cpsr_c, %1\n\t"
421         "add    r14, %0, %2\n\t"
422         "mov    sp, r14\n\t"
423         "msr    cpsr_c, %3\n\t"
424         "add    r14, %0, %4\n\t"
425         "mov    sp, r14\n\t"
426         "msr    cpsr_c, %5\n\t"
427         "add    r14, %0, %6\n\t"
428         "mov    sp, r14\n\t"
429         "msr    cpsr_c, %7"
430             :
431             : "r" (stk),
432               PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
433               "I" (offsetof(struct stack, irq[0])),
434               PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
435               "I" (offsetof(struct stack, abt[0])),
436               PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
437               "I" (offsetof(struct stack, und[0])),
438               PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
439             : "r14");
440 }
441
442 static void __init dump_machine_table(void)
443 {
444         struct machine_desc *p;
445
446         early_print("Available machine support:\n\nID (hex)\tNAME\n");
447         for_each_machine_desc(p)
448                 early_print("%08x\t%s\n", p->nr, p->name);
449
450         early_print("\nPlease check your kernel config and/or bootloader.\n");
451
452         while (true)
453                 /* can't use cpu_relax() here as it may require MMU setup */;
454 }
455
456 int __init arm_add_memory(phys_addr_t start, unsigned long size)
457 {
458         struct membank *bank = &meminfo.bank[meminfo.nr_banks];
459
460         if (meminfo.nr_banks >= NR_BANKS) {
461                 printk(KERN_CRIT "NR_BANKS too low, "
462                         "ignoring memory at 0x%08llx\n", (long long)start);
463                 return -EINVAL;
464         }
465
466         /*
467          * Ensure that start/size are aligned to a page boundary.
468          * Size is appropriately rounded down, start is rounded up.
469          */
470         size -= start & ~PAGE_MASK;
471         bank->start = PAGE_ALIGN(start);
472         bank->size  = size & PAGE_MASK;
473
474         /*
475          * Check whether this memory region has non-zero size or
476          * invalid node number.
477          */
478         if (bank->size == 0)
479                 return -EINVAL;
480
481         meminfo.nr_banks++;
482         return 0;
483 }
484
485 /*
486  * Pick out the memory size.  We look for mem=size@start,
487  * where start and size are "size[KkMm]"
488  */
489 static int __init early_mem(char *p)
490 {
491         static int usermem __initdata = 0;
492         unsigned long size;
493         phys_addr_t start;
494         char *endp;
495
496         /*
497          * If the user specifies memory size, we
498          * blow away any automatically generated
499          * size.
500          */
501         if (usermem == 0) {
502                 usermem = 1;
503                 meminfo.nr_banks = 0;
504         }
505
506         start = PHYS_OFFSET;
507         size  = memparse(p, &endp);
508         if (*endp == '@')
509                 start = memparse(endp + 1, NULL);
510
511         arm_add_memory(start, size);
512
513         return 0;
514 }
515 early_param("mem", early_mem);
516
517 static void __init
518 setup_ramdisk(int doload, int prompt, int image_start, unsigned int rd_sz)
519 {
520 #ifdef CONFIG_BLK_DEV_RAM
521         extern int rd_size, rd_image_start, rd_prompt, rd_doload;
522
523         rd_image_start = image_start;
524         rd_prompt = prompt;
525         rd_doload = doload;
526
527         if (rd_sz)
528                 rd_size = rd_sz;
529 #endif
530 }
531
532 static void __init request_standard_resources(struct machine_desc *mdesc)
533 {
534         struct memblock_region *region;
535         struct resource *res;
536
537         kernel_code.start   = virt_to_phys(_text);
538         kernel_code.end     = virt_to_phys(_etext - 1);
539         kernel_data.start   = virt_to_phys(_sdata);
540         kernel_data.end     = virt_to_phys(_end - 1);
541
542         for_each_memblock(memory, region) {
543                 res = alloc_bootmem_low(sizeof(*res));
544                 res->name  = "System RAM";
545                 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
546                 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
547                 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
548
549                 request_resource(&iomem_resource, res);
550
551                 if (kernel_code.start >= res->start &&
552                     kernel_code.end <= res->end)
553                         request_resource(res, &kernel_code);
554                 if (kernel_data.start >= res->start &&
555                     kernel_data.end <= res->end)
556                         request_resource(res, &kernel_data);
557         }
558
559         if (mdesc->video_start) {
560                 video_ram.start = mdesc->video_start;
561                 video_ram.end   = mdesc->video_end;
562                 request_resource(&iomem_resource, &video_ram);
563         }
564
565         /*
566          * Some machines don't have the possibility of ever
567          * possessing lp0, lp1 or lp2
568          */
569         if (mdesc->reserve_lp0)
570                 request_resource(&ioport_resource, &lp0);
571         if (mdesc->reserve_lp1)
572                 request_resource(&ioport_resource, &lp1);
573         if (mdesc->reserve_lp2)
574                 request_resource(&ioport_resource, &lp2);
575 }
576
577 /*
578  *  Tag parsing.
579  *
580  * This is the new way of passing data to the kernel at boot time.  Rather
581  * than passing a fixed inflexible structure to the kernel, we pass a list
582  * of variable-sized tags to the kernel.  The first tag must be a ATAG_CORE
583  * tag for the list to be recognised (to distinguish the tagged list from
584  * a param_struct).  The list is terminated with a zero-length tag (this tag
585  * is not parsed in any way).
586  */
587 static int __init parse_tag_core(const struct tag *tag)
588 {
589         if (tag->hdr.size > 2) {
590                 if ((tag->u.core.flags & 1) == 0)
591                         root_mountflags &= ~MS_RDONLY;
592                 ROOT_DEV = old_decode_dev(tag->u.core.rootdev);
593         }
594         return 0;
595 }
596
597 __tagtable(ATAG_CORE, parse_tag_core);
598
599 static int __init parse_tag_mem32(const struct tag *tag)
600 {
601         return arm_add_memory(tag->u.mem.start, tag->u.mem.size);
602 }
603
604 __tagtable(ATAG_MEM, parse_tag_mem32);
605
606 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
607 struct screen_info screen_info = {
608  .orig_video_lines      = 30,
609  .orig_video_cols       = 80,
610  .orig_video_mode       = 0,
611  .orig_video_ega_bx     = 0,
612  .orig_video_isVGA      = 1,
613  .orig_video_points     = 8
614 };
615
616 static int __init parse_tag_videotext(const struct tag *tag)
617 {
618         screen_info.orig_x            = tag->u.videotext.x;
619         screen_info.orig_y            = tag->u.videotext.y;
620         screen_info.orig_video_page   = tag->u.videotext.video_page;
621         screen_info.orig_video_mode   = tag->u.videotext.video_mode;
622         screen_info.orig_video_cols   = tag->u.videotext.video_cols;
623         screen_info.orig_video_ega_bx = tag->u.videotext.video_ega_bx;
624         screen_info.orig_video_lines  = tag->u.videotext.video_lines;
625         screen_info.orig_video_isVGA  = tag->u.videotext.video_isvga;
626         screen_info.orig_video_points = tag->u.videotext.video_points;
627         return 0;
628 }
629
630 __tagtable(ATAG_VIDEOTEXT, parse_tag_videotext);
631 #endif
632
633 static int __init parse_tag_ramdisk(const struct tag *tag)
634 {
635         setup_ramdisk((tag->u.ramdisk.flags & 1) == 0,
636                       (tag->u.ramdisk.flags & 2) == 0,
637                       tag->u.ramdisk.start, tag->u.ramdisk.size);
638         return 0;
639 }
640
641 __tagtable(ATAG_RAMDISK, parse_tag_ramdisk);
642
643 static int __init parse_tag_serialnr(const struct tag *tag)
644 {
645         system_serial_low = tag->u.serialnr.low;
646         system_serial_high = tag->u.serialnr.high;
647         return 0;
648 }
649
650 __tagtable(ATAG_SERIAL, parse_tag_serialnr);
651
652 static int __init parse_tag_revision(const struct tag *tag)
653 {
654         system_rev = tag->u.revision.rev;
655         return 0;
656 }
657
658 __tagtable(ATAG_REVISION, parse_tag_revision);
659
660 static int __init parse_tag_cmdline(const struct tag *tag)
661 {
662 #ifndef CONFIG_CMDLINE_FORCE
663         strlcpy(default_command_line, tag->u.cmdline.cmdline, COMMAND_LINE_SIZE);
664 #else
665         pr_warning("Ignoring tag cmdline (using the default kernel command line)\n");
666 #endif /* CONFIG_CMDLINE_FORCE */
667         return 0;
668 }
669
670 __tagtable(ATAG_CMDLINE, parse_tag_cmdline);
671
672 /*
673  * Scan the tag table for this tag, and call its parse function.
674  * The tag table is built by the linker from all the __tagtable
675  * declarations.
676  */
677 static int __init parse_tag(const struct tag *tag)
678 {
679         extern struct tagtable __tagtable_begin, __tagtable_end;
680         struct tagtable *t;
681
682         for (t = &__tagtable_begin; t < &__tagtable_end; t++)
683                 if (tag->hdr.tag == t->tag) {
684                         t->parse(tag);
685                         break;
686                 }
687
688         return t < &__tagtable_end;
689 }
690
691 /*
692  * Parse all tags in the list, checking both the global and architecture
693  * specific tag tables.
694  */
695 static void __init parse_tags(const struct tag *t)
696 {
697         for (; t->hdr.size; t = tag_next(t))
698                 if (!parse_tag(t))
699                         printk(KERN_WARNING
700                                 "Ignoring unrecognised tag 0x%08x\n",
701                                 t->hdr.tag);
702 }
703
704 /*
705  * This holds our defaults.
706  */
707 static struct init_tags {
708         struct tag_header hdr1;
709         struct tag_core   core;
710         struct tag_header hdr2;
711         struct tag_mem32  mem;
712         struct tag_header hdr3;
713 } init_tags __initdata = {
714         { tag_size(tag_core), ATAG_CORE },
715         { 1, PAGE_SIZE, 0xff },
716         { tag_size(tag_mem32), ATAG_MEM },
717         { MEM_SIZE },
718         { 0, ATAG_NONE }
719 };
720
721 static int __init customize_machine(void)
722 {
723         /* customizes platform devices, or adds new ones */
724         if (machine_desc->init_machine)
725                 machine_desc->init_machine();
726         return 0;
727 }
728 arch_initcall(customize_machine);
729
730 #ifdef CONFIG_KEXEC
731 static inline unsigned long long get_total_mem(void)
732 {
733         unsigned long total;
734
735         total = max_low_pfn - min_low_pfn;
736         return total << PAGE_SHIFT;
737 }
738
739 /**
740  * reserve_crashkernel() - reserves memory are for crash kernel
741  *
742  * This function reserves memory area given in "crashkernel=" kernel command
743  * line parameter. The memory reserved is used by a dump capture kernel when
744  * primary kernel is crashing.
745  */
746 static void __init reserve_crashkernel(void)
747 {
748         unsigned long long crash_size, crash_base;
749         unsigned long long total_mem;
750         int ret;
751
752         total_mem = get_total_mem();
753         ret = parse_crashkernel(boot_command_line, total_mem,
754                                 &crash_size, &crash_base);
755         if (ret)
756                 return;
757
758         ret = reserve_bootmem(crash_base, crash_size, BOOTMEM_EXCLUSIVE);
759         if (ret < 0) {
760                 printk(KERN_WARNING "crashkernel reservation failed - "
761                        "memory is in use (0x%lx)\n", (unsigned long)crash_base);
762                 return;
763         }
764
765         printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
766                "for crashkernel (System RAM: %ldMB)\n",
767                (unsigned long)(crash_size >> 20),
768                (unsigned long)(crash_base >> 20),
769                (unsigned long)(total_mem >> 20));
770
771         crashk_res.start = crash_base;
772         crashk_res.end = crash_base + crash_size - 1;
773         insert_resource(&iomem_resource, &crashk_res);
774 }
775 #else
776 static inline void reserve_crashkernel(void) {}
777 #endif /* CONFIG_KEXEC */
778
779 static void __init squash_mem_tags(struct tag *tag)
780 {
781         for (; tag->hdr.size; tag = tag_next(tag))
782                 if (tag->hdr.tag == ATAG_MEM)
783                         tag->hdr.tag = ATAG_NONE;
784 }
785
786 static struct machine_desc * __init setup_machine_tags(unsigned int nr)
787 {
788         struct tag *tags = (struct tag *)&init_tags;
789         struct machine_desc *mdesc = NULL, *p;
790         char *from = default_command_line;
791
792         init_tags.mem.start = PHYS_OFFSET;
793
794         /*
795          * locate machine in the list of supported machines.
796          */
797         for_each_machine_desc(p)
798                 if (nr == p->nr) {
799                         printk("Machine: %s\n", p->name);
800                         mdesc = p;
801                         break;
802                 }
803
804         if (!mdesc) {
805                 early_print("\nError: unrecognized/unsupported machine ID"
806                         " (r1 = 0x%08x).\n\n", nr);
807                 dump_machine_table(); /* does not return */
808         }
809
810         if (__atags_pointer)
811                 tags = phys_to_virt(__atags_pointer);
812         else if (mdesc->boot_params) {
813 #ifdef CONFIG_MMU
814                 /*
815                  * We still are executing with a minimal MMU mapping created
816                  * with the presumption that the machine default for this
817                  * is located in the first MB of RAM.  Anything else will
818                  * fault and silently hang the kernel at this point.
819                  */
820                 if (mdesc->boot_params < PHYS_OFFSET ||
821                     mdesc->boot_params >= PHYS_OFFSET + SZ_1M) {
822                         printk(KERN_WARNING
823                                "Default boot params at physical 0x%08lx out of reach\n",
824                                mdesc->boot_params);
825                 } else
826 #endif
827                 {
828                         tags = phys_to_virt(mdesc->boot_params);
829                 }
830         }
831
832 #if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
833         /*
834          * If we have the old style parameters, convert them to
835          * a tag list.
836          */
837         if (tags->hdr.tag != ATAG_CORE)
838                 convert_to_tag_list(tags);
839 #endif
840         if (tags->hdr.tag != ATAG_CORE)
841                 tags = (struct tag *)&init_tags;
842
843         if (mdesc->fixup)
844                 mdesc->fixup(mdesc, tags, &from, &meminfo);
845
846         if (tags->hdr.tag == ATAG_CORE) {
847                 if (meminfo.nr_banks != 0)
848                         squash_mem_tags(tags);
849                 save_atags(tags);
850                 parse_tags(tags);
851         }
852
853         /* parse_early_param needs a boot_command_line */
854         strlcpy(boot_command_line, from, COMMAND_LINE_SIZE);
855
856         return mdesc;
857 }
858
859
860 void __init setup_arch(char **cmdline_p)
861 {
862         struct machine_desc *mdesc;
863
864         unwind_init();
865
866         setup_processor();
867         mdesc = setup_machine_tags(machine_arch_type);
868         machine_desc = mdesc;
869         machine_name = mdesc->name;
870
871         if (mdesc->soft_reboot)
872                 reboot_setup("s");
873
874         init_mm.start_code = (unsigned long) _text;
875         init_mm.end_code   = (unsigned long) _etext;
876         init_mm.end_data   = (unsigned long) _edata;
877         init_mm.brk        = (unsigned long) _end;
878
879         /* populate cmd_line too for later use, preserving boot_command_line */
880         strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
881         *cmdline_p = cmd_line;
882
883         parse_early_param();
884
885         arm_memblock_init(&meminfo, mdesc);
886
887         paging_init(mdesc);
888         request_standard_resources(mdesc);
889
890 #ifdef CONFIG_SMP
891         if (is_smp())
892                 smp_init_cpus();
893 #endif
894         reserve_crashkernel();
895
896         cpu_init();
897         tcm_init();
898
899 #ifdef CONFIG_MULTI_IRQ_HANDLER
900         handle_arch_irq = mdesc->handle_irq;
901 #endif
902
903 #ifdef CONFIG_VT
904 #if defined(CONFIG_VGA_CONSOLE)
905         conswitchp = &vga_con;
906 #elif defined(CONFIG_DUMMY_CONSOLE)
907         conswitchp = &dummy_con;
908 #endif
909 #endif
910         early_trap_init();
911
912         if (mdesc->init_early)
913                 mdesc->init_early();
914 }
915
916
917 static int __init topology_init(void)
918 {
919         int cpu;
920
921         for_each_possible_cpu(cpu) {
922                 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
923                 cpuinfo->cpu.hotpluggable = 1;
924                 register_cpu(&cpuinfo->cpu, cpu);
925         }
926
927         return 0;
928 }
929 subsys_initcall(topology_init);
930
931 #ifdef CONFIG_HAVE_PROC_CPU
932 static int __init proc_cpu_init(void)
933 {
934         struct proc_dir_entry *res;
935
936         res = proc_mkdir("cpu", NULL);
937         if (!res)
938                 return -ENOMEM;
939         return 0;
940 }
941 fs_initcall(proc_cpu_init);
942 #endif
943
944 static const char *hwcap_str[] = {
945         "swp",
946         "half",
947         "thumb",
948         "26bit",
949         "fastmult",
950         "fpa",
951         "vfp",
952         "edsp",
953         "java",
954         "iwmmxt",
955         "crunch",
956         "thumbee",
957         "neon",
958         "vfpv3",
959         "vfpv3d16",
960         NULL
961 };
962
963 static int c_show(struct seq_file *m, void *v)
964 {
965         int i;
966
967         seq_printf(m, "Processor\t: %s rev %d (%s)\n",
968                    cpu_name, read_cpuid_id() & 15, elf_platform);
969
970 #if defined(CONFIG_SMP)
971         for_each_online_cpu(i) {
972                 /*
973                  * glibc reads /proc/cpuinfo to determine the number of
974                  * online processors, looking for lines beginning with
975                  * "processor".  Give glibc what it expects.
976                  */
977                 seq_printf(m, "processor\t: %d\n", i);
978                 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n",
979                            per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
980                            (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
981         }
982 #else /* CONFIG_SMP */
983         seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
984                    loops_per_jiffy / (500000/HZ),
985                    (loops_per_jiffy / (5000/HZ)) % 100);
986 #endif
987
988         /* dump out the processor features */
989         seq_puts(m, "Features\t: ");
990
991         for (i = 0; hwcap_str[i]; i++)
992                 if (elf_hwcap & (1 << i))
993                         seq_printf(m, "%s ", hwcap_str[i]);
994
995         seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24);
996         seq_printf(m, "CPU architecture: %s\n", proc_arch[cpu_architecture()]);
997
998         if ((read_cpuid_id() & 0x0008f000) == 0x00000000) {
999                 /* pre-ARM7 */
1000                 seq_printf(m, "CPU part\t: %07x\n", read_cpuid_id() >> 4);
1001         } else {
1002                 if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
1003                         /* ARM7 */
1004                         seq_printf(m, "CPU variant\t: 0x%02x\n",
1005                                    (read_cpuid_id() >> 16) & 127);
1006                 } else {
1007                         /* post-ARM7 */
1008                         seq_printf(m, "CPU variant\t: 0x%x\n",
1009                                    (read_cpuid_id() >> 20) & 15);
1010                 }
1011                 seq_printf(m, "CPU part\t: 0x%03x\n",
1012                            (read_cpuid_id() >> 4) & 0xfff);
1013         }
1014         seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15);
1015
1016         seq_puts(m, "\n");
1017
1018         seq_printf(m, "Hardware\t: %s\n", machine_name);
1019         seq_printf(m, "Revision\t: %04x\n", system_rev);
1020         seq_printf(m, "Serial\t\t: %08x%08x\n",
1021                    system_serial_high, system_serial_low);
1022
1023         return 0;
1024 }
1025
1026 static void *c_start(struct seq_file *m, loff_t *pos)
1027 {
1028         return *pos < 1 ? (void *)1 : NULL;
1029 }
1030
1031 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1032 {
1033         ++*pos;
1034         return NULL;
1035 }
1036
1037 static void c_stop(struct seq_file *m, void *v)
1038 {
1039 }
1040
1041 const struct seq_operations cpuinfo_op = {
1042         .start  = c_start,
1043         .next   = c_next,
1044         .stop   = c_stop,
1045         .show   = c_show
1046 };