*/
unsigned long memory_start;
EXPORT_SYMBOL(memory_start);
-unsigned long memory_end; /* due to mm/nommu.c */
unsigned long memory_size;
EXPORT_SYMBOL(memory_size);
+unsigned long lowmem_size;
/*
* paging_init() sets up the page tables - in fact we've already done this.
/* Clean every zones */
memset(zones_size, 0, sizeof(zones_size));
- zones_size[ZONE_DMA] = max_mapnr;
+ zones_size[ZONE_DMA] = max_pfn;
free_area_init(zones_size);
}
/* Find main memory where is the kernel */
for_each_memblock(memory, reg) {
memory_start = (u32)reg->base;
- memory_end = (u32) reg->base + reg->size;
+ lowmem_size = reg->size;
if ((memory_start <= (u32)_text) &&
- ((u32)_text <= memory_end)) {
- memory_size = memory_end - memory_start;
+ ((u32)_text <= (memory_start + lowmem_size - 1))) {
+ memory_size = lowmem_size;
PAGE_OFFSET = memory_start;
- printk(KERN_INFO "%s: Main mem: 0x%x-0x%x, "
+ printk(KERN_INFO "%s: Main mem: 0x%x, "
"size 0x%08x\n", __func__, (u32) memory_start,
- (u32) memory_end, (u32) memory_size);
+ (u32) memory_size);
break;
}
}
- if (!memory_start || !memory_end) {
- panic("%s: Missing memory setting 0x%08x-0x%08x\n",
- __func__, (u32) memory_start, (u32) memory_end);
+ if (!memory_start || !memory_size) {
+ panic("%s: Missing memory setting 0x%08x, size=0x%08x\n",
+ __func__, (u32) memory_start, (u32) memory_size);
}
/* reservation of region where is the kernel */
kernel_align_start = PAGE_DOWN((u32)_text);
/* ALIGN can be remove because _end in vmlinux.lds.S is align */
kernel_align_size = PAGE_UP((u32)klimit) - kernel_align_start;
- memblock_reserve(kernel_align_start, kernel_align_size);
- printk(KERN_INFO "%s: kernel addr=0x%08x-0x%08x size=0x%08x\n",
+ printk(KERN_INFO "%s: kernel addr:0x%08x-0x%08x size=0x%08x\n",
__func__, kernel_align_start, kernel_align_start
+ kernel_align_size, kernel_align_size);
-
+ memblock_reserve(kernel_align_start, kernel_align_size);
#endif
/*
* Kernel:
min_low_pfn = memory_start >> PAGE_SHIFT; /* minimum for allocation */
/* RAM is assumed contiguous */
num_physpages = max_mapnr = memory_size >> PAGE_SHIFT;
- max_pfn = max_low_pfn = memory_end >> PAGE_SHIFT;
+ max_low_pfn = ((u64)memory_start + (u64)lowmem_size) >> PAGE_SHIFT;
+ max_pfn = ((u64)memory_start + (u64)memory_size) >> PAGE_SHIFT;
printk(KERN_INFO "%s: max_mapnr: %#lx\n", __func__, max_mapnr);
printk(KERN_INFO "%s: min_low_pfn: %#lx\n", __func__, min_low_pfn);
printk(KERN_INFO "%s: max_low_pfn: %#lx\n", __func__, max_low_pfn);
+ printk(KERN_INFO "%s: max_pfn: %#lx\n", __func__, max_pfn);
/*
* Find an area to use for the bootmem bitmap.
memblock_reserve(PFN_UP(TOPHYS((u32)klimit)) << PAGE_SHIFT, map_size);
/* free bootmem is whole main memory */
- free_bootmem(memory_start, memory_size);
+ free_bootmem(memory_start, lowmem_size);
/* reserve allocate blocks */
for_each_memblock(reserved, reg) {
- pr_debug("reserved - 0x%08x-0x%08x\n",
- (u32) reg->base, (u32) reg->size);
- reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
+ unsigned long top = reg->base + reg->size - 1;
+
+ pr_debug("reserved - 0x%08x-0x%08x, %lx, %lx\n",
+ (u32) reg->base, (u32) reg->size, top,
+ memory_start + lowmem_size - 1);
+
+ if (top <= (memory_start + lowmem_size - 1)) {
+ reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
+ } else if (reg->base < (memory_start + lowmem_size - 1)) {
+ unsigned long trunc_size = memory_start + lowmem_size -
+ reg->base;
+ reserve_bootmem(reg->base, trunc_size, BOOTMEM_DEFAULT);
+ }
}
+
#ifdef CONFIG_MMU
init_bootmem_done = 1;
#endif
void __init mem_init(void)
{
- high_memory = (void *)__va(memory_end);
+ high_memory = (void *)__va(memory_start + lowmem_size - 1);
+
/* this will put all memory onto the freelists */
totalram_pages += free_all_bootmem();
maxmem = memparse(p, &p);
if (maxmem && memory_size > maxmem) {
memory_size = maxmem;
- memory_end = memory_start + memory_size;
memblock.memory.regions[0].size = memory_size;
}
}
}
/* Find main memory where the kernel is */
memory_start = (u32) memblock.memory.regions[0].base;
- memory_end = (u32) memblock.memory.regions[0].base +
- (u32) memblock.memory.regions[0].size;
- memory_size = memory_end - memory_start;
+ lowmem_size = memory_size = (u32) memblock.memory.regions[0].size;
+
+ if (lowmem_size > CONFIG_LOWMEM_SIZE) {
+ lowmem_size = CONFIG_LOWMEM_SIZE;
+ memory_size = lowmem_size;
+ }
mm_cmdline_setup(); /* FIXME parse args from command line - not used */
ioremap_base = 0xfe000000UL; /* for now, could be 0xfffff000 */
#endif /* CONFIG_HIGHMEM_START_BOOL */
ioremap_bot = ioremap_base;
-
/* Initialize the context management stuff */
mmu_context_init();
+
+ /* Shortly after that, the entire linear mapping will be available */
+ /* This will also cause that unflatten device tree will be allocated
+ * inside 768MB limit */
+ memblock_set_current_limit(memory_start + lowmem_size - 1);
}
/* This is only called until mem_init is done. */