From: Paul Mundt Date: Thu, 13 May 2010 08:48:05 +0000 (+0900) Subject: Merge branch 'sh/lmb' X-Git-Url: https://git.karo-electronics.de/?a=commitdiff_plain;h=ef4ed97d6bd91aa41907181e80a7feaf2721719a;p=linux-beck.git Merge branch 'sh/lmb' Conflicts: arch/sh/kernel/setup.c --- ef4ed97d6bd91aa41907181e80a7feaf2721719a diff --cc arch/sh/kernel/machine_kexec.c index 0e90c7f9564f,7f68fc0e89e8..5a559e666eb3 --- a/arch/sh/kernel/machine_kexec.c +++ b/arch/sh/kernel/machine_kexec.c @@@ -147,7 -147,61 +147,64 @@@ void arch_crash_save_vmcoreinfo(void VMCOREINFO_SYMBOL(node_data); VMCOREINFO_LENGTH(node_data, MAX_NUMNODES); #endif +#ifdef CONFIG_X2TLB + VMCOREINFO_CONFIG(X2TLB); +#endif } + + void __init reserve_crashkernel(void) + { + unsigned long long crash_size, crash_base; + int ret; + + /* this is necessary because of lmb_phys_mem_size() */ + lmb_analyze(); + + ret = parse_crashkernel(boot_command_line, lmb_phys_mem_size(), + &crash_size, &crash_base); + if (ret == 0 && crash_size > 0) { + crashk_res.start = crash_base; + crashk_res.end = crash_base + crash_size - 1; + } + + if (crashk_res.end == crashk_res.start) + goto disable; + + crash_size = PAGE_ALIGN(crashk_res.end - crashk_res.start + 1); + if (!crashk_res.start) { + unsigned long max = lmb_end_of_DRAM() - memory_limit; + crashk_res.start = __lmb_alloc_base(crash_size, PAGE_SIZE, max); + if (!crashk_res.start) { + pr_err("crashkernel allocation failed\n"); + goto disable; + } + } else { + ret = lmb_reserve(crashk_res.start, crash_size); + if (unlikely(ret < 0)) { + pr_err("crashkernel reservation failed - " + "memory is in use\n"); + goto disable; + } + } + + crashk_res.end = crashk_res.start + crash_size - 1; + + /* + * Crash kernel trumps memory limit + */ + if ((lmb_end_of_DRAM() - memory_limit) <= crashk_res.end) { + memory_limit = 0; + pr_info("Disabled memory limit for crashkernel\n"); + } + + pr_info("Reserving %ldMB of memory at 0x%08lx " + "for crashkernel (System RAM: %ldMB)\n", + (unsigned long)(crash_size >> 20), + (unsigned long)(crashk_res.start), + (unsigned long)(lmb_phys_mem_size() >> 20)); + + return; + + disable: + crashk_res.start = crashk_res.end = 0; + } diff --cc arch/sh/kernel/setup.c index 4f1585f41f2b,57bd93838f15..272734681d29 --- a/arch/sh/kernel/setup.c +++ b/arch/sh/kernel/setup.c @@@ -287,100 -218,37 +218,18 @@@ void __init __add_active_range(unsigne request_resource(res, &data_resource); request_resource(res, &bss_resource); - add_active_range(nid, start_pfn, end_pfn); - } - - void __init setup_bootmem_allocator(unsigned long free_pfn) - { - unsigned long bootmap_size; - unsigned long bootmap_pages, bootmem_paddr; - u64 total_pages = (lmb_end_of_DRAM() - __MEMORY_START) >> PAGE_SHIFT; - int i; - - bootmap_pages = bootmem_bootmap_pages(total_pages); - - bootmem_paddr = lmb_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE); - /* - * Find a proper area for the bootmem bitmap. After this - * bootstrap step all allocations (until the page allocator - * is intact) must be done via bootmem_alloc(). + * Also make sure that there is a PMB mapping that covers this + * range before we attempt to activate it, to avoid reset by MMU. + * We can hit this path with NUMA or memory hot-add. */ - bootmap_size = init_bootmem_node(NODE_DATA(0), - bootmem_paddr >> PAGE_SHIFT, - min_low_pfn, max_low_pfn); - - /* Add active regions with valid PFNs. */ - for (i = 0; i < lmb.memory.cnt; i++) { - unsigned long start_pfn, end_pfn; - start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT; - end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i); - __add_active_range(0, start_pfn, end_pfn); - } - - /* - * Add all physical memory to the bootmem map and mark each - * area as present. - */ - register_bootmem_low_pages(); - - /* Reserve the sections we're already using. */ - for (i = 0; i < lmb.reserved.cnt; i++) - reserve_bootmem(lmb.reserved.region[i].base, - lmb_size_bytes(&lmb.reserved, i), - BOOTMEM_DEFAULT); - - node_set_online(0); + pmb_bolt_mapping((unsigned long)__va(start), start, end - start, + PAGE_KERNEL); - sparse_memory_present_with_active_regions(0); - - check_for_initrd(); - - reserve_crashkernel(); - } - - #ifndef CONFIG_NEED_MULTIPLE_NODES - static void __init setup_memory(void) - { - unsigned long start_pfn; - u64 base = min_low_pfn << PAGE_SHIFT; - u64 size = (max_low_pfn << PAGE_SHIFT) - base; - - /* - * Partially used pages are not usable - thus - * we are rounding upwards: - */ - start_pfn = PFN_UP(__pa(_end)); - - lmb_add(base, size); - - /* - * Reserve the kernel text and - * Reserve the bootmem bitmap. We do this in two steps (first step - * was init_bootmem()), because this catches the (definitely buggy) - * case of us accidentally initializing the bootmem allocator with - * an invalid RAM area. - */ - lmb_reserve(__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET, - (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) - - (__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET)); - - /* - * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET. - */ - if (CONFIG_ZERO_PAGE_OFFSET != 0) - lmb_reserve(__MEMORY_START, CONFIG_ZERO_PAGE_OFFSET); - - lmb_analyze(); - lmb_dump_all(); - - setup_bootmem_allocator(start_pfn); + add_active_range(nid, start_pfn, end_pfn); } - #else - extern void __init setup_memory(void); - #endif - void __init __attribute__ ((weak)) plat_early_device_setup(void) -/* - * Note: elfcorehdr_addr is not just limited to vmcore. It is also used by - * is_kdump_kernel() to determine if we are booting after a panic. Hence - * ifdef it under CONFIG_CRASH_DUMP and not CONFIG_PROC_VMCORE. - */ -#ifdef CONFIG_CRASH_DUMP -/* elfcorehdr= specifies the location of elf core header - * stored by the crashed kernel. - */ -static int __init parse_elfcorehdr(char *arg) -{ - if (!arg) - return -EINVAL; - elfcorehdr_addr = memparse(arg, &arg); - return 0; -} -early_param("elfcorehdr", parse_elfcorehdr); -#endif - + void __init __weak plat_early_device_setup(void) { }