2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1995 Linus Torvalds
7 * Copyright (C) 1995 Waldorf Electronics
8 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 01, 02, 03 Ralf Baechle
9 * Copyright (C) 1996 Stoned Elipot
10 * Copyright (C) 1999 Silicon Graphics, Inc.
11 * Copyright (C) 2000, 2001, 2002, 2007 Maciej W. Rozycki
13 #include <linux/init.h>
14 #include <linux/ioport.h>
15 #include <linux/export.h>
16 #include <linux/screen_info.h>
17 #include <linux/memblock.h>
18 #include <linux/bootmem.h>
19 #include <linux/initrd.h>
20 #include <linux/root_dev.h>
21 #include <linux/highmem.h>
22 #include <linux/console.h>
23 #include <linux/pfn.h>
24 #include <linux/debugfs.h>
25 #include <linux/kexec.h>
26 #include <linux/sizes.h>
27 #include <linux/device.h>
28 #include <linux/dma-contiguous.h>
30 #include <asm/addrspace.h>
31 #include <asm/bootinfo.h>
33 #include <asm/cache.h>
36 #include <asm/sections.h>
37 #include <asm/setup.h>
38 #include <asm/smp-ops.h>
41 struct cpuinfo_mips cpu_data[NR_CPUS] __read_mostly;
43 EXPORT_SYMBOL(cpu_data);
46 struct screen_info screen_info;
50 * Despite it's name this variable is even if we don't have PCI
52 unsigned int PCI_DMA_BUS_IS_PHYS;
54 EXPORT_SYMBOL(PCI_DMA_BUS_IS_PHYS);
59 * These are initialized so they are in the .data section
61 unsigned long mips_machtype __read_mostly = MACH_UNKNOWN;
63 EXPORT_SYMBOL(mips_machtype);
65 struct boot_mem_map boot_mem_map;
67 static char __initdata command_line[COMMAND_LINE_SIZE];
68 char __initdata arcs_cmdline[COMMAND_LINE_SIZE];
70 #ifdef CONFIG_CMDLINE_BOOL
71 static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
75 * mips_io_port_base is the begin of the address space to which x86 style
76 * I/O ports are mapped.
78 const unsigned long mips_io_port_base = -1;
79 EXPORT_SYMBOL(mips_io_port_base);
81 static struct resource code_resource = { .name = "Kernel code", };
82 static struct resource data_resource = { .name = "Kernel data", };
84 static void *detect_magic __initdata = detect_memory_region;
86 void __init add_memory_region(phys_addr_t start, phys_addr_t size, long type)
88 int x = boot_mem_map.nr_map;
92 if (start + size < start) {
93 pr_warn("Trying to add an invalid memory region, skipped\n");
98 * Try to merge with existing entry, if any.
100 for (i = 0; i < boot_mem_map.nr_map; i++) {
101 struct boot_mem_map_entry *entry = boot_mem_map.map + i;
104 if (entry->type != type)
107 if (start + size < entry->addr)
108 continue; /* no overlap */
110 if (entry->addr + entry->size < start)
111 continue; /* no overlap */
113 top = max(entry->addr + entry->size, start + size);
114 entry->addr = min(entry->addr, start);
115 entry->size = top - entry->addr;
120 if (boot_mem_map.nr_map == BOOT_MEM_MAP_MAX) {
121 pr_err("Ooops! Too many entries in the memory map!\n");
125 boot_mem_map.map[x].addr = start;
126 boot_mem_map.map[x].size = size;
127 boot_mem_map.map[x].type = type;
128 boot_mem_map.nr_map++;
131 void __init detect_memory_region(phys_addr_t start, phys_addr_t sz_min, phys_addr_t sz_max)
133 void *dm = &detect_magic;
136 for (size = sz_min; size < sz_max; size <<= 1) {
137 if (!memcmp(dm, dm + size, sizeof(detect_magic)))
141 pr_debug("Memory: %lluMB of RAM detected at 0x%llx (min: %lluMB, max: %lluMB)\n",
142 ((unsigned long long) size) / SZ_1M,
143 (unsigned long long) start,
144 ((unsigned long long) sz_min) / SZ_1M,
145 ((unsigned long long) sz_max) / SZ_1M);
147 add_memory_region(start, size, BOOT_MEM_RAM);
150 static void __init print_memory_map(void)
153 const int field = 2 * sizeof(unsigned long);
155 for (i = 0; i < boot_mem_map.nr_map; i++) {
156 printk(KERN_INFO " memory: %0*Lx @ %0*Lx ",
157 field, (unsigned long long) boot_mem_map.map[i].size,
158 field, (unsigned long long) boot_mem_map.map[i].addr);
160 switch (boot_mem_map.map[i].type) {
162 printk(KERN_CONT "(usable)\n");
164 case BOOT_MEM_INIT_RAM:
165 printk(KERN_CONT "(usable after init)\n");
167 case BOOT_MEM_ROM_DATA:
168 printk(KERN_CONT "(ROM data)\n");
170 case BOOT_MEM_RESERVED:
171 printk(KERN_CONT "(reserved)\n");
174 printk(KERN_CONT "type %lu\n", boot_mem_map.map[i].type);
183 #ifdef CONFIG_BLK_DEV_INITRD
185 static int __init rd_start_early(char *p)
187 unsigned long start = memparse(p, &p);
190 /* Guess if the sign extension was forgotten by bootloader */
194 initrd_start = start;
198 early_param("rd_start", rd_start_early);
200 static int __init rd_size_early(char *p)
202 initrd_end += memparse(p, &p);
205 early_param("rd_size", rd_size_early);
207 /* it returns the next free pfn after initrd */
208 static unsigned long __init init_initrd(void)
213 * Board specific code or command line parser should have
214 * already set up initrd_start and initrd_end. In these cases
215 * perfom sanity checks and use them if all looks good.
217 if (!initrd_start || initrd_end <= initrd_start)
220 if (initrd_start & ~PAGE_MASK) {
221 pr_err("initrd start must be page aligned\n");
224 if (initrd_start < PAGE_OFFSET) {
225 pr_err("initrd start < PAGE_OFFSET\n");
230 * Sanitize initrd addresses. For example firmware
231 * can't guess if they need to pass them through
232 * 64-bits values if the kernel has been built in pure
233 * 32-bit. We need also to switch from KSEG0 to XKPHYS
234 * addresses now, so the code can now safely use __pa().
236 end = __pa(initrd_end);
237 initrd_end = (unsigned long)__va(end);
238 initrd_start = (unsigned long)__va(__pa(initrd_start));
240 ROOT_DEV = Root_RAM0;
248 static void __init finalize_initrd(void)
250 unsigned long size = initrd_end - initrd_start;
253 printk(KERN_INFO "Initrd not found or empty");
256 if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) {
257 printk(KERN_ERR "Initrd extends beyond end of memory");
261 reserve_bootmem(__pa(initrd_start), size, BOOTMEM_DEFAULT);
262 initrd_below_start_ok = 1;
264 pr_info("Initial ramdisk at: 0x%lx (%lu bytes)\n",
268 printk(KERN_CONT " - disabling initrd\n");
273 #else /* !CONFIG_BLK_DEV_INITRD */
275 static unsigned long __init init_initrd(void)
280 #define finalize_initrd() do {} while (0)
285 * Initialize the bootmem allocator. It also setup initrd related data
288 #if defined(CONFIG_SGI_IP27) || (defined(CONFIG_CPU_LOONGSON3) && defined(CONFIG_NUMA))
290 static void __init bootmem_init(void)
296 #else /* !CONFIG_SGI_IP27 */
298 static void __init bootmem_init(void)
300 unsigned long reserved_end;
301 unsigned long mapstart = ~0UL;
302 unsigned long bootmap_size;
306 * Sanity check any INITRD first. We don't take it into account
307 * for bootmem setup initially, rely on the end-of-kernel-code
308 * as our memory range starting point. Once bootmem is inited we
309 * will reserve the area used for the initrd.
312 reserved_end = (unsigned long) PFN_UP(__pa_symbol(&_end));
315 * max_low_pfn is not a number of pages. The number of pages
316 * of the system is given by 'max_low_pfn - min_low_pfn'.
322 * Find the highest page frame number we have available.
324 for (i = 0; i < boot_mem_map.nr_map; i++) {
325 unsigned long start, end;
327 if (boot_mem_map.map[i].type != BOOT_MEM_RAM)
330 start = PFN_UP(boot_mem_map.map[i].addr);
331 end = PFN_DOWN(boot_mem_map.map[i].addr
332 + boot_mem_map.map[i].size);
334 if (end > max_low_pfn)
336 if (start < min_low_pfn)
338 if (end <= reserved_end)
340 #ifdef CONFIG_BLK_DEV_INITRD
341 /* mapstart should be after initrd_end */
342 if (initrd_end && end <= (unsigned long)PFN_UP(__pa(initrd_end)))
345 if (start >= mapstart)
347 mapstart = max(reserved_end, start);
350 if (min_low_pfn >= max_low_pfn)
351 panic("Incorrect memory mapping !!!");
352 if (min_low_pfn > ARCH_PFN_OFFSET) {
353 pr_info("Wasting %lu bytes for tracking %lu unused pages\n",
354 (min_low_pfn - ARCH_PFN_OFFSET) * sizeof(struct page),
355 min_low_pfn - ARCH_PFN_OFFSET);
356 } else if (min_low_pfn < ARCH_PFN_OFFSET) {
357 pr_info("%lu free pages won't be used\n",
358 ARCH_PFN_OFFSET - min_low_pfn);
360 min_low_pfn = ARCH_PFN_OFFSET;
363 * Determine low and high memory ranges
365 max_pfn = max_low_pfn;
366 if (max_low_pfn > PFN_DOWN(HIGHMEM_START)) {
367 #ifdef CONFIG_HIGHMEM
368 highstart_pfn = PFN_DOWN(HIGHMEM_START);
369 highend_pfn = max_low_pfn;
371 max_low_pfn = PFN_DOWN(HIGHMEM_START);
375 * Initialize the boot-time allocator with low memory only.
377 bootmap_size = init_bootmem_node(NODE_DATA(0), mapstart,
378 min_low_pfn, max_low_pfn);
381 for (i = 0; i < boot_mem_map.nr_map; i++) {
382 unsigned long start, end;
384 start = PFN_UP(boot_mem_map.map[i].addr);
385 end = PFN_DOWN(boot_mem_map.map[i].addr
386 + boot_mem_map.map[i].size);
388 if (start <= min_low_pfn)
393 #ifndef CONFIG_HIGHMEM
394 if (end > max_low_pfn)
398 * ... finally, is the area going away?
404 memblock_add_node(PFN_PHYS(start), PFN_PHYS(end - start), 0);
408 * Register fully available low RAM pages with the bootmem allocator.
410 for (i = 0; i < boot_mem_map.nr_map; i++) {
411 unsigned long start, end, size;
413 start = PFN_UP(boot_mem_map.map[i].addr);
414 end = PFN_DOWN(boot_mem_map.map[i].addr
415 + boot_mem_map.map[i].size);
418 * Reserve usable memory.
420 switch (boot_mem_map.map[i].type) {
423 case BOOT_MEM_INIT_RAM:
424 memory_present(0, start, end);
427 /* Not usable memory */
432 * We are rounding up the start address of usable memory
433 * and at the end of the usable range downwards.
435 if (start >= max_low_pfn)
437 if (start < reserved_end)
438 start = reserved_end;
439 if (end > max_low_pfn)
443 * ... finally, is the area going away?
449 /* Register lowmem ranges */
450 free_bootmem(PFN_PHYS(start), size << PAGE_SHIFT);
451 memory_present(0, start, end);
455 * Reserve the bootmap memory.
457 reserve_bootmem(PFN_PHYS(mapstart), bootmap_size, BOOTMEM_DEFAULT);
460 * Reserve initrd memory if needed.
465 #endif /* CONFIG_SGI_IP27 */
468 * arch_mem_init - initialize memory management subsystem
470 * o plat_mem_setup() detects the memory configuration and will record detected
471 * memory areas using add_memory_region.
473 * At this stage the memory configuration of the system is known to the
474 * kernel but generic memory management system is still entirely uninitialized.
479 * o dma_contiguous_reserve()
481 * At this stage the bootmem allocator is ready to use.
483 * NOTE: historically plat_mem_setup did the entire platform initialization.
484 * This was rather impractical because it meant plat_mem_setup had to
485 * get away without any kind of memory allocator. To keep old code from
486 * breaking plat_setup was just renamed to plat_mem_setup and a second platform
487 * initialization hook for anything else was introduced.
490 static int usermem __initdata;
492 static int __init early_parse_mem(char *p)
494 phys_addr_t start, size;
497 * If a user specifies memory size, we
498 * blow away any automatically generated
502 boot_mem_map.nr_map = 0;
506 size = memparse(p, &p);
508 start = memparse(p + 1, &p);
510 add_memory_region(start, size, BOOT_MEM_RAM);
513 early_param("mem", early_parse_mem);
515 #ifdef CONFIG_PROC_VMCORE
516 unsigned long setup_elfcorehdr, setup_elfcorehdr_size;
517 static int __init early_parse_elfcorehdr(char *p)
521 setup_elfcorehdr = memparse(p, &p);
523 for (i = 0; i < boot_mem_map.nr_map; i++) {
524 unsigned long start = boot_mem_map.map[i].addr;
525 unsigned long end = (boot_mem_map.map[i].addr +
526 boot_mem_map.map[i].size);
527 if (setup_elfcorehdr >= start && setup_elfcorehdr < end) {
529 * Reserve from the elf core header to the end of
530 * the memory segment, that should all be kdump
533 setup_elfcorehdr_size = end - setup_elfcorehdr;
538 * If we don't find it in the memory map, then we shouldn't
539 * have to worry about it, as the new kernel won't use it.
543 early_param("elfcorehdr", early_parse_elfcorehdr);
546 static void __init arch_mem_addpart(phys_addr_t mem, phys_addr_t end, int type)
555 /* Make sure it is in the boot_mem_map */
556 for (i = 0; i < boot_mem_map.nr_map; i++) {
557 if (mem >= boot_mem_map.map[i].addr &&
558 mem < (boot_mem_map.map[i].addr +
559 boot_mem_map.map[i].size))
562 add_memory_region(mem, size, type);
566 static inline unsigned long long get_total_mem(void)
568 unsigned long long total;
570 total = max_pfn - min_low_pfn;
571 return total << PAGE_SHIFT;
574 static void __init mips_parse_crashkernel(void)
576 unsigned long long total_mem;
577 unsigned long long crash_size, crash_base;
580 total_mem = get_total_mem();
581 ret = parse_crashkernel(boot_command_line, total_mem,
582 &crash_size, &crash_base);
583 if (ret != 0 || crash_size <= 0)
586 crashk_res.start = crash_base;
587 crashk_res.end = crash_base + crash_size - 1;
590 static void __init request_crashkernel(struct resource *res)
594 ret = request_resource(res, &crashk_res);
596 pr_info("Reserving %ldMB of memory at %ldMB for crashkernel\n",
597 (unsigned long)((crashk_res.end -
598 crashk_res.start + 1) >> 20),
599 (unsigned long)(crashk_res.start >> 20));
601 #else /* !defined(CONFIG_KEXEC) */
602 static void __init mips_parse_crashkernel(void)
606 static void __init request_crashkernel(struct resource *res)
609 #endif /* !defined(CONFIG_KEXEC) */
611 static void __init arch_mem_init(char **cmdline_p)
613 struct memblock_region *reg;
614 extern void plat_mem_setup(void);
616 /* call board setup routine */
620 * Make sure all kernel memory is in the maps. The "UP" and
621 * "DOWN" are opposite for initdata since if it crosses over
622 * into another memory section you don't want that to be
623 * freed when the initdata is freed.
625 arch_mem_addpart(PFN_DOWN(__pa_symbol(&_text)) << PAGE_SHIFT,
626 PFN_UP(__pa_symbol(&_edata)) << PAGE_SHIFT,
628 arch_mem_addpart(PFN_UP(__pa_symbol(&__init_begin)) << PAGE_SHIFT,
629 PFN_DOWN(__pa_symbol(&__init_end)) << PAGE_SHIFT,
632 pr_info("Determined physical RAM map:\n");
635 #ifdef CONFIG_CMDLINE_BOOL
636 #ifdef CONFIG_CMDLINE_OVERRIDE
637 strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
639 if (builtin_cmdline[0]) {
640 strlcat(arcs_cmdline, " ", COMMAND_LINE_SIZE);
641 strlcat(arcs_cmdline, builtin_cmdline, COMMAND_LINE_SIZE);
643 strlcpy(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE);
646 strlcpy(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE);
648 strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
650 *cmdline_p = command_line;
655 pr_info("User-defined physical RAM map:\n");
660 #ifdef CONFIG_PROC_VMCORE
661 if (setup_elfcorehdr && setup_elfcorehdr_size) {
662 printk(KERN_INFO "kdump reserved memory at %lx-%lx\n",
663 setup_elfcorehdr, setup_elfcorehdr_size);
664 reserve_bootmem(setup_elfcorehdr, setup_elfcorehdr_size,
669 mips_parse_crashkernel();
671 if (crashk_res.start != crashk_res.end)
672 reserve_bootmem(crashk_res.start,
673 crashk_res.end - crashk_res.start + 1,
678 plat_swiotlb_setup();
681 dma_contiguous_reserve(PFN_PHYS(max_low_pfn));
682 /* Tell bootmem about cma reserved memblock section */
683 for_each_memblock(reserved, reg)
685 reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
688 static void __init resource_init(void)
692 if (UNCAC_BASE != IO_BASE)
695 code_resource.start = __pa_symbol(&_text);
696 code_resource.end = __pa_symbol(&_etext) - 1;
697 data_resource.start = __pa_symbol(&_etext);
698 data_resource.end = __pa_symbol(&_edata) - 1;
700 for (i = 0; i < boot_mem_map.nr_map; i++) {
701 struct resource *res;
702 unsigned long start, end;
704 start = boot_mem_map.map[i].addr;
705 end = boot_mem_map.map[i].addr + boot_mem_map.map[i].size - 1;
706 if (start >= HIGHMEM_START)
708 if (end >= HIGHMEM_START)
709 end = HIGHMEM_START - 1;
711 res = alloc_bootmem(sizeof(struct resource));
712 switch (boot_mem_map.map[i].type) {
714 case BOOT_MEM_INIT_RAM:
715 case BOOT_MEM_ROM_DATA:
716 res->name = "System RAM";
718 case BOOT_MEM_RESERVED:
720 res->name = "reserved";
726 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
727 request_resource(&iomem_resource, res);
730 * We don't know which RAM region contains kernel data,
731 * so we try it repeatedly and let the resource manager
734 request_resource(res, &code_resource);
735 request_resource(res, &data_resource);
736 request_crashkernel(res);
741 static void __init prefill_possible_map(void)
743 int i, possible = num_possible_cpus();
745 if (possible > nr_cpu_ids)
746 possible = nr_cpu_ids;
748 for (i = 0; i < possible; i++)
749 set_cpu_possible(i, true);
750 for (; i < NR_CPUS; i++)
751 set_cpu_possible(i, false);
753 nr_cpu_ids = possible;
756 static inline void prefill_possible_map(void) {}
759 void __init setup_arch(char **cmdline_p)
764 setup_early_fdc_console();
765 #ifdef CONFIG_EARLY_PRINTK
766 setup_early_printk();
771 #if defined(CONFIG_VT)
772 #if defined(CONFIG_VGA_CONSOLE)
773 conswitchp = &vga_con;
774 #elif defined(CONFIG_DUMMY_CONSOLE)
775 conswitchp = &dummy_con;
779 arch_mem_init(cmdline_p);
783 prefill_possible_map();
788 unsigned long kernelsp[NR_CPUS];
789 unsigned long fw_arg0, fw_arg1, fw_arg2, fw_arg3;
791 #ifdef CONFIG_DEBUG_FS
792 struct dentry *mips_debugfs_dir;
793 static int __init debugfs_mips(void)
797 d = debugfs_create_dir("mips", NULL);
800 mips_debugfs_dir = d;
803 arch_initcall(debugfs_mips);