]> git.karo-electronics.de Git - mv-sheeva.git/blob - arch/arm/kernel/setup.c
ARM: 7029/1: Make cpu_architecture into a global variable
[mv-sheeva.git] / arch / arm / kernel / setup.c
1 /*
2  *  linux/arch/arm/kernel/setup.c
3  *
4  *  Copyright (C) 1995-2001 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/stddef.h>
13 #include <linux/ioport.h>
14 #include <linux/delay.h>
15 #include <linux/utsname.h>
16 #include <linux/initrd.h>
17 #include <linux/console.h>
18 #include <linux/bootmem.h>
19 #include <linux/seq_file.h>
20 #include <linux/screen_info.h>
21 #include <linux/init.h>
22 #include <linux/kexec.h>
23 #include <linux/of_fdt.h>
24 #include <linux/crash_dump.h>
25 #include <linux/root_dev.h>
26 #include <linux/cpu.h>
27 #include <linux/interrupt.h>
28 #include <linux/smp.h>
29 #include <linux/fs.h>
30 #include <linux/proc_fs.h>
31 #include <linux/memblock.h>
32 #include <linux/bug.h>
33 #include <linux/compiler.h>
34
35 #include <asm/unified.h>
36 #include <asm/cpu.h>
37 #include <asm/cputype.h>
38 #include <asm/elf.h>
39 #include <asm/procinfo.h>
40 #include <asm/sections.h>
41 #include <asm/setup.h>
42 #include <asm/smp_plat.h>
43 #include <asm/mach-types.h>
44 #include <asm/cacheflush.h>
45 #include <asm/cachetype.h>
46 #include <asm/tlbflush.h>
47 #include <asm/system.h>
48
49 #include <asm/prom.h>
50 #include <asm/mach/arch.h>
51 #include <asm/mach/irq.h>
52 #include <asm/mach/time.h>
53 #include <asm/traps.h>
54 #include <asm/unwind.h>
55
56 #if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
57 #include "compat.h"
58 #endif
59 #include "atags.h"
60 #include "tcm.h"
61
62 #ifndef MEM_SIZE
63 #define MEM_SIZE        (16*1024*1024)
64 #endif
65
66 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
67 char fpe_type[8];
68
69 static int __init fpe_setup(char *line)
70 {
71         memcpy(fpe_type, line, 8);
72         return 1;
73 }
74
75 __setup("fpe=", fpe_setup);
76 #endif
77
78 extern void paging_init(struct machine_desc *desc);
79 extern void sanity_check_meminfo(void);
80 extern void reboot_setup(char *str);
81
82 unsigned int processor_id;
83 EXPORT_SYMBOL(processor_id);
84 unsigned int __machine_arch_type __read_mostly;
85 EXPORT_SYMBOL(__machine_arch_type);
86 unsigned int cacheid __read_mostly;
87 EXPORT_SYMBOL(cacheid);
88
89 unsigned int __atags_pointer __initdata;
90
91 unsigned int system_rev;
92 EXPORT_SYMBOL(system_rev);
93
94 unsigned int system_serial_low;
95 EXPORT_SYMBOL(system_serial_low);
96
97 unsigned int system_serial_high;
98 EXPORT_SYMBOL(system_serial_high);
99
100 unsigned int elf_hwcap __read_mostly;
101 EXPORT_SYMBOL(elf_hwcap);
102
103
104 #ifdef MULTI_CPU
105 struct processor processor __read_mostly;
106 #endif
107 #ifdef MULTI_TLB
108 struct cpu_tlb_fns cpu_tlb __read_mostly;
109 #endif
110 #ifdef MULTI_USER
111 struct cpu_user_fns cpu_user __read_mostly;
112 #endif
113 #ifdef MULTI_CACHE
114 struct cpu_cache_fns cpu_cache __read_mostly;
115 #endif
116 #ifdef CONFIG_OUTER_CACHE
117 struct outer_cache_fns outer_cache __read_mostly;
118 EXPORT_SYMBOL(outer_cache);
119 #endif
120
121 /*
122  * Cached cpu_architecture() result for use by assembler code.
123  * C code should use the cpu_architecture() function instead of accessing this
124  * variable directly.
125  */
126 int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
127
128 struct stack {
129         u32 irq[3];
130         u32 abt[3];
131         u32 und[3];
132 } ____cacheline_aligned;
133
134 static struct stack stacks[NR_CPUS];
135
136 char elf_platform[ELF_PLATFORM_SIZE];
137 EXPORT_SYMBOL(elf_platform);
138
139 static const char *cpu_name;
140 static const char *machine_name;
141 static char __initdata cmd_line[COMMAND_LINE_SIZE];
142 struct machine_desc *machine_desc __initdata;
143
144 static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
145 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
146 #define ENDIANNESS ((char)endian_test.l)
147
148 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
149
150 /*
151  * Standard memory resources
152  */
153 static struct resource mem_res[] = {
154         {
155                 .name = "Video RAM",
156                 .start = 0,
157                 .end = 0,
158                 .flags = IORESOURCE_MEM
159         },
160         {
161                 .name = "Kernel text",
162                 .start = 0,
163                 .end = 0,
164                 .flags = IORESOURCE_MEM
165         },
166         {
167                 .name = "Kernel data",
168                 .start = 0,
169                 .end = 0,
170                 .flags = IORESOURCE_MEM
171         }
172 };
173
174 #define video_ram   mem_res[0]
175 #define kernel_code mem_res[1]
176 #define kernel_data mem_res[2]
177
178 static struct resource io_res[] = {
179         {
180                 .name = "reserved",
181                 .start = 0x3bc,
182                 .end = 0x3be,
183                 .flags = IORESOURCE_IO | IORESOURCE_BUSY
184         },
185         {
186                 .name = "reserved",
187                 .start = 0x378,
188                 .end = 0x37f,
189                 .flags = IORESOURCE_IO | IORESOURCE_BUSY
190         },
191         {
192                 .name = "reserved",
193                 .start = 0x278,
194                 .end = 0x27f,
195                 .flags = IORESOURCE_IO | IORESOURCE_BUSY
196         }
197 };
198
199 #define lp0 io_res[0]
200 #define lp1 io_res[1]
201 #define lp2 io_res[2]
202
203 static const char *proc_arch[] = {
204         "undefined/unknown",
205         "3",
206         "4",
207         "4T",
208         "5",
209         "5T",
210         "5TE",
211         "5TEJ",
212         "6TEJ",
213         "7",
214         "?(11)",
215         "?(12)",
216         "?(13)",
217         "?(14)",
218         "?(15)",
219         "?(16)",
220         "?(17)",
221 };
222
223 static int __get_cpu_architecture(void)
224 {
225         int cpu_arch;
226
227         if ((read_cpuid_id() & 0x0008f000) == 0) {
228                 cpu_arch = CPU_ARCH_UNKNOWN;
229         } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
230                 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
231         } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
232                 cpu_arch = (read_cpuid_id() >> 16) & 7;
233                 if (cpu_arch)
234                         cpu_arch += CPU_ARCH_ARMv3;
235         } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
236                 unsigned int mmfr0;
237
238                 /* Revised CPUID format. Read the Memory Model Feature
239                  * Register 0 and check for VMSAv7 or PMSAv7 */
240                 asm("mrc        p15, 0, %0, c0, c1, 4"
241                     : "=r" (mmfr0));
242                 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
243                     (mmfr0 & 0x000000f0) >= 0x00000030)
244                         cpu_arch = CPU_ARCH_ARMv7;
245                 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
246                          (mmfr0 & 0x000000f0) == 0x00000020)
247                         cpu_arch = CPU_ARCH_ARMv6;
248                 else
249                         cpu_arch = CPU_ARCH_UNKNOWN;
250         } else
251                 cpu_arch = CPU_ARCH_UNKNOWN;
252
253         return cpu_arch;
254 }
255
256 int __pure cpu_architecture(void)
257 {
258         BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
259
260         return __cpu_architecture;
261 }
262
263 static int cpu_has_aliasing_icache(unsigned int arch)
264 {
265         int aliasing_icache;
266         unsigned int id_reg, num_sets, line_size;
267
268         /* arch specifies the register format */
269         switch (arch) {
270         case CPU_ARCH_ARMv7:
271                 asm("mcr        p15, 2, %0, c0, c0, 0 @ set CSSELR"
272                     : /* No output operands */
273                     : "r" (1));
274                 isb();
275                 asm("mrc        p15, 1, %0, c0, c0, 0 @ read CCSIDR"
276                     : "=r" (id_reg));
277                 line_size = 4 << ((id_reg & 0x7) + 2);
278                 num_sets = ((id_reg >> 13) & 0x7fff) + 1;
279                 aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
280                 break;
281         case CPU_ARCH_ARMv6:
282                 aliasing_icache = read_cpuid_cachetype() & (1 << 11);
283                 break;
284         default:
285                 /* I-cache aliases will be handled by D-cache aliasing code */
286                 aliasing_icache = 0;
287         }
288
289         return aliasing_icache;
290 }
291
292 static void __init cacheid_init(void)
293 {
294         unsigned int cachetype = read_cpuid_cachetype();
295         unsigned int arch = cpu_architecture();
296
297         if (arch >= CPU_ARCH_ARMv6) {
298                 if ((cachetype & (7 << 29)) == 4 << 29) {
299                         /* ARMv7 register format */
300                         arch = CPU_ARCH_ARMv7;
301                         cacheid = CACHEID_VIPT_NONALIASING;
302                         if ((cachetype & (3 << 14)) == 1 << 14)
303                                 cacheid |= CACHEID_ASID_TAGGED;
304                 } else {
305                         arch = CPU_ARCH_ARMv6;
306                         if (cachetype & (1 << 23))
307                                 cacheid = CACHEID_VIPT_ALIASING;
308                         else
309                                 cacheid = CACHEID_VIPT_NONALIASING;
310                 }
311                 if (cpu_has_aliasing_icache(arch))
312                         cacheid |= CACHEID_VIPT_I_ALIASING;
313         } else {
314                 cacheid = CACHEID_VIVT;
315         }
316
317         printk("CPU: %s data cache, %s instruction cache\n",
318                 cache_is_vivt() ? "VIVT" :
319                 cache_is_vipt_aliasing() ? "VIPT aliasing" :
320                 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown",
321                 cache_is_vivt() ? "VIVT" :
322                 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
323                 icache_is_vipt_aliasing() ? "VIPT aliasing" :
324                 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
325 }
326
327 /*
328  * These functions re-use the assembly code in head.S, which
329  * already provide the required functionality.
330  */
331 extern struct proc_info_list *lookup_processor_type(unsigned int);
332
333 void __init early_print(const char *str, ...)
334 {
335         extern void printascii(const char *);
336         char buf[256];
337         va_list ap;
338
339         va_start(ap, str);
340         vsnprintf(buf, sizeof(buf), str, ap);
341         va_end(ap);
342
343 #ifdef CONFIG_DEBUG_LL
344         printascii(buf);
345 #endif
346         printk("%s", buf);
347 }
348
349 static void __init feat_v6_fixup(void)
350 {
351         int id = read_cpuid_id();
352
353         if ((id & 0xff0f0000) != 0x41070000)
354                 return;
355
356         /*
357          * HWCAP_TLS is available only on 1136 r1p0 and later,
358          * see also kuser_get_tls_init.
359          */
360         if ((((id >> 4) & 0xfff) == 0xb36) && (((id >> 20) & 3) == 0))
361                 elf_hwcap &= ~HWCAP_TLS;
362 }
363
364 /*
365  * cpu_init - initialise one CPU.
366  *
367  * cpu_init sets up the per-CPU stacks.
368  */
369 void cpu_init(void)
370 {
371         unsigned int cpu = smp_processor_id();
372         struct stack *stk = &stacks[cpu];
373
374         if (cpu >= NR_CPUS) {
375                 printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
376                 BUG();
377         }
378
379         cpu_proc_init();
380
381         /*
382          * Define the placement constraint for the inline asm directive below.
383          * In Thumb-2, msr with an immediate value is not allowed.
384          */
385 #ifdef CONFIG_THUMB2_KERNEL
386 #define PLC     "r"
387 #else
388 #define PLC     "I"
389 #endif
390
391         /*
392          * setup stacks for re-entrant exception handlers
393          */
394         __asm__ (
395         "msr    cpsr_c, %1\n\t"
396         "add    r14, %0, %2\n\t"
397         "mov    sp, r14\n\t"
398         "msr    cpsr_c, %3\n\t"
399         "add    r14, %0, %4\n\t"
400         "mov    sp, r14\n\t"
401         "msr    cpsr_c, %5\n\t"
402         "add    r14, %0, %6\n\t"
403         "mov    sp, r14\n\t"
404         "msr    cpsr_c, %7"
405             :
406             : "r" (stk),
407               PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
408               "I" (offsetof(struct stack, irq[0])),
409               PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
410               "I" (offsetof(struct stack, abt[0])),
411               PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
412               "I" (offsetof(struct stack, und[0])),
413               PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
414             : "r14");
415 }
416
417 static void __init setup_processor(void)
418 {
419         struct proc_info_list *list;
420
421         /*
422          * locate processor in the list of supported processor
423          * types.  The linker builds this table for us from the
424          * entries in arch/arm/mm/proc-*.S
425          */
426         list = lookup_processor_type(read_cpuid_id());
427         if (!list) {
428                 printk("CPU configuration botched (ID %08x), unable "
429                        "to continue.\n", read_cpuid_id());
430                 while (1);
431         }
432
433         cpu_name = list->cpu_name;
434         __cpu_architecture = __get_cpu_architecture();
435
436 #ifdef MULTI_CPU
437         processor = *list->proc;
438 #endif
439 #ifdef MULTI_TLB
440         cpu_tlb = *list->tlb;
441 #endif
442 #ifdef MULTI_USER
443         cpu_user = *list->user;
444 #endif
445 #ifdef MULTI_CACHE
446         cpu_cache = *list->cache;
447 #endif
448
449         printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
450                cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
451                proc_arch[cpu_architecture()], cr_alignment);
452
453         sprintf(init_utsname()->machine, "%s%c", list->arch_name, ENDIANNESS);
454         sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS);
455         elf_hwcap = list->elf_hwcap;
456 #ifndef CONFIG_ARM_THUMB
457         elf_hwcap &= ~HWCAP_THUMB;
458 #endif
459
460         feat_v6_fixup();
461
462         cacheid_init();
463         cpu_init();
464 }
465
466 void __init dump_machine_table(void)
467 {
468         struct machine_desc *p;
469
470         early_print("Available machine support:\n\nID (hex)\tNAME\n");
471         for_each_machine_desc(p)
472                 early_print("%08x\t%s\n", p->nr, p->name);
473
474         early_print("\nPlease check your kernel config and/or bootloader.\n");
475
476         while (true)
477                 /* can't use cpu_relax() here as it may require MMU setup */;
478 }
479
480 int __init arm_add_memory(phys_addr_t start, unsigned long size)
481 {
482         struct membank *bank = &meminfo.bank[meminfo.nr_banks];
483
484         if (meminfo.nr_banks >= NR_BANKS) {
485                 printk(KERN_CRIT "NR_BANKS too low, "
486                         "ignoring memory at 0x%08llx\n", (long long)start);
487                 return -EINVAL;
488         }
489
490         /*
491          * Ensure that start/size are aligned to a page boundary.
492          * Size is appropriately rounded down, start is rounded up.
493          */
494         size -= start & ~PAGE_MASK;
495         bank->start = PAGE_ALIGN(start);
496         bank->size  = size & PAGE_MASK;
497
498         /*
499          * Check whether this memory region has non-zero size or
500          * invalid node number.
501          */
502         if (bank->size == 0)
503                 return -EINVAL;
504
505         meminfo.nr_banks++;
506         return 0;
507 }
508
509 /*
510  * Pick out the memory size.  We look for mem=size@start,
511  * where start and size are "size[KkMm]"
512  */
513 static int __init early_mem(char *p)
514 {
515         static int usermem __initdata = 0;
516         unsigned long size;
517         phys_addr_t start;
518         char *endp;
519
520         /*
521          * If the user specifies memory size, we
522          * blow away any automatically generated
523          * size.
524          */
525         if (usermem == 0) {
526                 usermem = 1;
527                 meminfo.nr_banks = 0;
528         }
529
530         start = PHYS_OFFSET;
531         size  = memparse(p, &endp);
532         if (*endp == '@')
533                 start = memparse(endp + 1, NULL);
534
535         arm_add_memory(start, size);
536
537         return 0;
538 }
539 early_param("mem", early_mem);
540
541 static void __init
542 setup_ramdisk(int doload, int prompt, int image_start, unsigned int rd_sz)
543 {
544 #ifdef CONFIG_BLK_DEV_RAM
545         extern int rd_size, rd_image_start, rd_prompt, rd_doload;
546
547         rd_image_start = image_start;
548         rd_prompt = prompt;
549         rd_doload = doload;
550
551         if (rd_sz)
552                 rd_size = rd_sz;
553 #endif
554 }
555
556 static void __init request_standard_resources(struct machine_desc *mdesc)
557 {
558         struct memblock_region *region;
559         struct resource *res;
560
561         kernel_code.start   = virt_to_phys(_text);
562         kernel_code.end     = virt_to_phys(_etext - 1);
563         kernel_data.start   = virt_to_phys(_sdata);
564         kernel_data.end     = virt_to_phys(_end - 1);
565
566         for_each_memblock(memory, region) {
567                 res = alloc_bootmem_low(sizeof(*res));
568                 res->name  = "System RAM";
569                 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
570                 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
571                 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
572
573                 request_resource(&iomem_resource, res);
574
575                 if (kernel_code.start >= res->start &&
576                     kernel_code.end <= res->end)
577                         request_resource(res, &kernel_code);
578                 if (kernel_data.start >= res->start &&
579                     kernel_data.end <= res->end)
580                         request_resource(res, &kernel_data);
581         }
582
583         if (mdesc->video_start) {
584                 video_ram.start = mdesc->video_start;
585                 video_ram.end   = mdesc->video_end;
586                 request_resource(&iomem_resource, &video_ram);
587         }
588
589         /*
590          * Some machines don't have the possibility of ever
591          * possessing lp0, lp1 or lp2
592          */
593         if (mdesc->reserve_lp0)
594                 request_resource(&ioport_resource, &lp0);
595         if (mdesc->reserve_lp1)
596                 request_resource(&ioport_resource, &lp1);
597         if (mdesc->reserve_lp2)
598                 request_resource(&ioport_resource, &lp2);
599 }
600
601 /*
602  *  Tag parsing.
603  *
604  * This is the new way of passing data to the kernel at boot time.  Rather
605  * than passing a fixed inflexible structure to the kernel, we pass a list
606  * of variable-sized tags to the kernel.  The first tag must be a ATAG_CORE
607  * tag for the list to be recognised (to distinguish the tagged list from
608  * a param_struct).  The list is terminated with a zero-length tag (this tag
609  * is not parsed in any way).
610  */
611 static int __init parse_tag_core(const struct tag *tag)
612 {
613         if (tag->hdr.size > 2) {
614                 if ((tag->u.core.flags & 1) == 0)
615                         root_mountflags &= ~MS_RDONLY;
616                 ROOT_DEV = old_decode_dev(tag->u.core.rootdev);
617         }
618         return 0;
619 }
620
621 __tagtable(ATAG_CORE, parse_tag_core);
622
623 static int __init parse_tag_mem32(const struct tag *tag)
624 {
625         return arm_add_memory(tag->u.mem.start, tag->u.mem.size);
626 }
627
628 __tagtable(ATAG_MEM, parse_tag_mem32);
629
630 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
631 struct screen_info screen_info = {
632  .orig_video_lines      = 30,
633  .orig_video_cols       = 80,
634  .orig_video_mode       = 0,
635  .orig_video_ega_bx     = 0,
636  .orig_video_isVGA      = 1,
637  .orig_video_points     = 8
638 };
639
640 static int __init parse_tag_videotext(const struct tag *tag)
641 {
642         screen_info.orig_x            = tag->u.videotext.x;
643         screen_info.orig_y            = tag->u.videotext.y;
644         screen_info.orig_video_page   = tag->u.videotext.video_page;
645         screen_info.orig_video_mode   = tag->u.videotext.video_mode;
646         screen_info.orig_video_cols   = tag->u.videotext.video_cols;
647         screen_info.orig_video_ega_bx = tag->u.videotext.video_ega_bx;
648         screen_info.orig_video_lines  = tag->u.videotext.video_lines;
649         screen_info.orig_video_isVGA  = tag->u.videotext.video_isvga;
650         screen_info.orig_video_points = tag->u.videotext.video_points;
651         return 0;
652 }
653
654 __tagtable(ATAG_VIDEOTEXT, parse_tag_videotext);
655 #endif
656
657 static int __init parse_tag_ramdisk(const struct tag *tag)
658 {
659         setup_ramdisk((tag->u.ramdisk.flags & 1) == 0,
660                       (tag->u.ramdisk.flags & 2) == 0,
661                       tag->u.ramdisk.start, tag->u.ramdisk.size);
662         return 0;
663 }
664
665 __tagtable(ATAG_RAMDISK, parse_tag_ramdisk);
666
667 static int __init parse_tag_serialnr(const struct tag *tag)
668 {
669         system_serial_low = tag->u.serialnr.low;
670         system_serial_high = tag->u.serialnr.high;
671         return 0;
672 }
673
674 __tagtable(ATAG_SERIAL, parse_tag_serialnr);
675
676 static int __init parse_tag_revision(const struct tag *tag)
677 {
678         system_rev = tag->u.revision.rev;
679         return 0;
680 }
681
682 __tagtable(ATAG_REVISION, parse_tag_revision);
683
684 static int __init parse_tag_cmdline(const struct tag *tag)
685 {
686 #if defined(CONFIG_CMDLINE_EXTEND)
687         strlcat(default_command_line, " ", COMMAND_LINE_SIZE);
688         strlcat(default_command_line, tag->u.cmdline.cmdline,
689                 COMMAND_LINE_SIZE);
690 #elif defined(CONFIG_CMDLINE_FORCE)
691         pr_warning("Ignoring tag cmdline (using the default kernel command line)\n");
692 #else
693         strlcpy(default_command_line, tag->u.cmdline.cmdline,
694                 COMMAND_LINE_SIZE);
695 #endif
696         return 0;
697 }
698
699 __tagtable(ATAG_CMDLINE, parse_tag_cmdline);
700
701 /*
702  * Scan the tag table for this tag, and call its parse function.
703  * The tag table is built by the linker from all the __tagtable
704  * declarations.
705  */
706 static int __init parse_tag(const struct tag *tag)
707 {
708         extern struct tagtable __tagtable_begin, __tagtable_end;
709         struct tagtable *t;
710
711         for (t = &__tagtable_begin; t < &__tagtable_end; t++)
712                 if (tag->hdr.tag == t->tag) {
713                         t->parse(tag);
714                         break;
715                 }
716
717         return t < &__tagtable_end;
718 }
719
720 /*
721  * Parse all tags in the list, checking both the global and architecture
722  * specific tag tables.
723  */
724 static void __init parse_tags(const struct tag *t)
725 {
726         for (; t->hdr.size; t = tag_next(t))
727                 if (!parse_tag(t))
728                         printk(KERN_WARNING
729                                 "Ignoring unrecognised tag 0x%08x\n",
730                                 t->hdr.tag);
731 }
732
733 /*
734  * This holds our defaults.
735  */
736 static struct init_tags {
737         struct tag_header hdr1;
738         struct tag_core   core;
739         struct tag_header hdr2;
740         struct tag_mem32  mem;
741         struct tag_header hdr3;
742 } init_tags __initdata = {
743         { tag_size(tag_core), ATAG_CORE },
744         { 1, PAGE_SIZE, 0xff },
745         { tag_size(tag_mem32), ATAG_MEM },
746         { MEM_SIZE },
747         { 0, ATAG_NONE }
748 };
749
750 static int __init customize_machine(void)
751 {
752         /* customizes platform devices, or adds new ones */
753         if (machine_desc->init_machine)
754                 machine_desc->init_machine();
755         return 0;
756 }
757 arch_initcall(customize_machine);
758
759 #ifdef CONFIG_KEXEC
760 static inline unsigned long long get_total_mem(void)
761 {
762         unsigned long total;
763
764         total = max_low_pfn - min_low_pfn;
765         return total << PAGE_SHIFT;
766 }
767
768 /**
769  * reserve_crashkernel() - reserves memory are for crash kernel
770  *
771  * This function reserves memory area given in "crashkernel=" kernel command
772  * line parameter. The memory reserved is used by a dump capture kernel when
773  * primary kernel is crashing.
774  */
775 static void __init reserve_crashkernel(void)
776 {
777         unsigned long long crash_size, crash_base;
778         unsigned long long total_mem;
779         int ret;
780
781         total_mem = get_total_mem();
782         ret = parse_crashkernel(boot_command_line, total_mem,
783                                 &crash_size, &crash_base);
784         if (ret)
785                 return;
786
787         ret = reserve_bootmem(crash_base, crash_size, BOOTMEM_EXCLUSIVE);
788         if (ret < 0) {
789                 printk(KERN_WARNING "crashkernel reservation failed - "
790                        "memory is in use (0x%lx)\n", (unsigned long)crash_base);
791                 return;
792         }
793
794         printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
795                "for crashkernel (System RAM: %ldMB)\n",
796                (unsigned long)(crash_size >> 20),
797                (unsigned long)(crash_base >> 20),
798                (unsigned long)(total_mem >> 20));
799
800         crashk_res.start = crash_base;
801         crashk_res.end = crash_base + crash_size - 1;
802         insert_resource(&iomem_resource, &crashk_res);
803 }
804 #else
805 static inline void reserve_crashkernel(void) {}
806 #endif /* CONFIG_KEXEC */
807
808 static void __init squash_mem_tags(struct tag *tag)
809 {
810         for (; tag->hdr.size; tag = tag_next(tag))
811                 if (tag->hdr.tag == ATAG_MEM)
812                         tag->hdr.tag = ATAG_NONE;
813 }
814
815 static struct machine_desc * __init setup_machine_tags(unsigned int nr)
816 {
817         struct tag *tags = (struct tag *)&init_tags;
818         struct machine_desc *mdesc = NULL, *p;
819         char *from = default_command_line;
820
821         init_tags.mem.start = PHYS_OFFSET;
822
823         /*
824          * locate machine in the list of supported machines.
825          */
826         for_each_machine_desc(p)
827                 if (nr == p->nr) {
828                         printk("Machine: %s\n", p->name);
829                         mdesc = p;
830                         break;
831                 }
832
833         if (!mdesc) {
834                 early_print("\nError: unrecognized/unsupported machine ID"
835                         " (r1 = 0x%08x).\n\n", nr);
836                 dump_machine_table(); /* does not return */
837         }
838
839         if (__atags_pointer)
840                 tags = phys_to_virt(__atags_pointer);
841         else if (mdesc->boot_params) {
842 #ifdef CONFIG_MMU
843                 /*
844                  * We still are executing with a minimal MMU mapping created
845                  * with the presumption that the machine default for this
846                  * is located in the first MB of RAM.  Anything else will
847                  * fault and silently hang the kernel at this point.
848                  */
849                 if (mdesc->boot_params < PHYS_OFFSET ||
850                     mdesc->boot_params >= PHYS_OFFSET + SZ_1M) {
851                         printk(KERN_WARNING
852                                "Default boot params at physical 0x%08lx out of reach\n",
853                                mdesc->boot_params);
854                 } else
855 #endif
856                 {
857                         tags = phys_to_virt(mdesc->boot_params);
858                 }
859         }
860
861 #if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
862         /*
863          * If we have the old style parameters, convert them to
864          * a tag list.
865          */
866         if (tags->hdr.tag != ATAG_CORE)
867                 convert_to_tag_list(tags);
868 #endif
869
870         if (tags->hdr.tag != ATAG_CORE) {
871 #if defined(CONFIG_OF)
872                 /*
873                  * If CONFIG_OF is set, then assume this is a reasonably
874                  * modern system that should pass boot parameters
875                  */
876                 early_print("Warning: Neither atags nor dtb found\n");
877 #endif
878                 tags = (struct tag *)&init_tags;
879         }
880
881         if (mdesc->fixup)
882                 mdesc->fixup(mdesc, tags, &from, &meminfo);
883
884         if (tags->hdr.tag == ATAG_CORE) {
885                 if (meminfo.nr_banks != 0)
886                         squash_mem_tags(tags);
887                 save_atags(tags);
888                 parse_tags(tags);
889         }
890
891         /* parse_early_param needs a boot_command_line */
892         strlcpy(boot_command_line, from, COMMAND_LINE_SIZE);
893
894         return mdesc;
895 }
896
897
898 void __init setup_arch(char **cmdline_p)
899 {
900         struct machine_desc *mdesc;
901
902         unwind_init();
903
904         setup_processor();
905         mdesc = setup_machine_fdt(__atags_pointer);
906         if (!mdesc)
907                 mdesc = setup_machine_tags(machine_arch_type);
908         machine_desc = mdesc;
909         machine_name = mdesc->name;
910
911         if (mdesc->soft_reboot)
912                 reboot_setup("s");
913
914         init_mm.start_code = (unsigned long) _text;
915         init_mm.end_code   = (unsigned long) _etext;
916         init_mm.end_data   = (unsigned long) _edata;
917         init_mm.brk        = (unsigned long) _end;
918
919         /* populate cmd_line too for later use, preserving boot_command_line */
920         strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
921         *cmdline_p = cmd_line;
922
923         parse_early_param();
924
925         sanity_check_meminfo();
926         arm_memblock_init(&meminfo, mdesc);
927
928         paging_init(mdesc);
929         request_standard_resources(mdesc);
930
931         unflatten_device_tree();
932
933 #ifdef CONFIG_SMP
934         if (is_smp())
935                 smp_init_cpus();
936 #endif
937         reserve_crashkernel();
938
939         tcm_init();
940
941 #ifdef CONFIG_ZONE_DMA
942         if (mdesc->dma_zone_size) {
943                 extern unsigned long arm_dma_zone_size;
944                 arm_dma_zone_size = mdesc->dma_zone_size;
945         }
946 #endif
947 #ifdef CONFIG_MULTI_IRQ_HANDLER
948         handle_arch_irq = mdesc->handle_irq;
949 #endif
950
951 #ifdef CONFIG_VT
952 #if defined(CONFIG_VGA_CONSOLE)
953         conswitchp = &vga_con;
954 #elif defined(CONFIG_DUMMY_CONSOLE)
955         conswitchp = &dummy_con;
956 #endif
957 #endif
958         early_trap_init();
959
960         if (mdesc->init_early)
961                 mdesc->init_early();
962 }
963
964
965 static int __init topology_init(void)
966 {
967         int cpu;
968
969         for_each_possible_cpu(cpu) {
970                 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
971                 cpuinfo->cpu.hotpluggable = 1;
972                 register_cpu(&cpuinfo->cpu, cpu);
973         }
974
975         return 0;
976 }
977 subsys_initcall(topology_init);
978
979 #ifdef CONFIG_HAVE_PROC_CPU
980 static int __init proc_cpu_init(void)
981 {
982         struct proc_dir_entry *res;
983
984         res = proc_mkdir("cpu", NULL);
985         if (!res)
986                 return -ENOMEM;
987         return 0;
988 }
989 fs_initcall(proc_cpu_init);
990 #endif
991
992 static const char *hwcap_str[] = {
993         "swp",
994         "half",
995         "thumb",
996         "26bit",
997         "fastmult",
998         "fpa",
999         "vfp",
1000         "edsp",
1001         "java",
1002         "iwmmxt",
1003         "crunch",
1004         "thumbee",
1005         "neon",
1006         "vfpv3",
1007         "vfpv3d16",
1008         "tls",
1009         "vfpv4",
1010         "idiva",
1011         "idivt",
1012         NULL
1013 };
1014
1015 static int c_show(struct seq_file *m, void *v)
1016 {
1017         int i;
1018
1019         seq_printf(m, "Processor\t: %s rev %d (%s)\n",
1020                    cpu_name, read_cpuid_id() & 15, elf_platform);
1021
1022 #if defined(CONFIG_SMP)
1023         for_each_online_cpu(i) {
1024                 /*
1025                  * glibc reads /proc/cpuinfo to determine the number of
1026                  * online processors, looking for lines beginning with
1027                  * "processor".  Give glibc what it expects.
1028                  */
1029                 seq_printf(m, "processor\t: %d\n", i);
1030                 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n",
1031                            per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
1032                            (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
1033         }
1034 #else /* CONFIG_SMP */
1035         seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1036                    loops_per_jiffy / (500000/HZ),
1037                    (loops_per_jiffy / (5000/HZ)) % 100);
1038 #endif
1039
1040         /* dump out the processor features */
1041         seq_puts(m, "Features\t: ");
1042
1043         for (i = 0; hwcap_str[i]; i++)
1044                 if (elf_hwcap & (1 << i))
1045                         seq_printf(m, "%s ", hwcap_str[i]);
1046
1047         seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24);
1048         seq_printf(m, "CPU architecture: %s\n", proc_arch[cpu_architecture()]);
1049
1050         if ((read_cpuid_id() & 0x0008f000) == 0x00000000) {
1051                 /* pre-ARM7 */
1052                 seq_printf(m, "CPU part\t: %07x\n", read_cpuid_id() >> 4);
1053         } else {
1054                 if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
1055                         /* ARM7 */
1056                         seq_printf(m, "CPU variant\t: 0x%02x\n",
1057                                    (read_cpuid_id() >> 16) & 127);
1058                 } else {
1059                         /* post-ARM7 */
1060                         seq_printf(m, "CPU variant\t: 0x%x\n",
1061                                    (read_cpuid_id() >> 20) & 15);
1062                 }
1063                 seq_printf(m, "CPU part\t: 0x%03x\n",
1064                            (read_cpuid_id() >> 4) & 0xfff);
1065         }
1066         seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15);
1067
1068         seq_puts(m, "\n");
1069
1070         seq_printf(m, "Hardware\t: %s\n", machine_name);
1071         seq_printf(m, "Revision\t: %04x\n", system_rev);
1072         seq_printf(m, "Serial\t\t: %08x%08x\n",
1073                    system_serial_high, system_serial_low);
1074
1075         return 0;
1076 }
1077
1078 static void *c_start(struct seq_file *m, loff_t *pos)
1079 {
1080         return *pos < 1 ? (void *)1 : NULL;
1081 }
1082
1083 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1084 {
1085         ++*pos;
1086         return NULL;
1087 }
1088
1089 static void c_stop(struct seq_file *m, void *v)
1090 {
1091 }
1092
1093 const struct seq_operations cpuinfo_op = {
1094         .start  = c_start,
1095         .next   = c_next,
1096         .stop   = c_stop,
1097         .show   = c_show
1098 };