2 * linux/arch/arm/mm/init.c
4 * Copyright (C) 1995-2005 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 #include <linux/kernel.h>
11 #include <linux/errno.h>
12 #include <linux/swap.h>
13 #include <linux/init.h>
14 #include <linux/bootmem.h>
15 #include <linux/mman.h>
16 #include <linux/nodemask.h>
17 #include <linux/initrd.h>
18 #include <linux/highmem.h>
19 #include <linux/gfp.h>
20 #include <linux/memblock.h>
22 #include <asm/mach-types.h>
23 #include <asm/sections.h>
24 #include <asm/setup.h>
25 #include <asm/sizes.h>
27 #include <asm/fixmap.h>
29 #include <asm/mach/arch.h>
30 #include <asm/mach/map.h>
34 static unsigned long phys_initrd_start __initdata = 0;
35 static unsigned long phys_initrd_size __initdata = 0;
37 static int __init early_initrd(char *p)
39 unsigned long start, size;
42 start = memparse(p, &endp);
44 size = memparse(endp + 1, NULL);
46 phys_initrd_start = start;
47 phys_initrd_size = size;
51 early_param("initrd", early_initrd);
53 static int __init parse_tag_initrd(const struct tag *tag)
55 printk(KERN_WARNING "ATAG_INITRD is deprecated; "
56 "please update your bootloader.\n");
57 phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
58 phys_initrd_size = tag->u.initrd.size;
62 __tagtable(ATAG_INITRD, parse_tag_initrd);
64 static int __init parse_tag_initrd2(const struct tag *tag)
66 phys_initrd_start = tag->u.initrd.start;
67 phys_initrd_size = tag->u.initrd.size;
71 __tagtable(ATAG_INITRD2, parse_tag_initrd2);
74 * This keeps memory configuration data used by a couple memory
75 * initialization functions, as well as show_mem() for the skipping
76 * of holes in the memory map. It is populated by arm_add_memory().
78 struct meminfo meminfo;
82 int free = 0, total = 0, reserved = 0;
83 int shared = 0, cached = 0, slab = 0, i;
84 struct meminfo * mi = &meminfo;
86 printk("Mem-info:\n");
89 for_each_bank (i, mi) {
90 struct membank *bank = &mi->bank[i];
91 unsigned int pfn1, pfn2;
92 struct page *page, *end;
94 pfn1 = bank_pfn_start(bank);
95 pfn2 = bank_pfn_end(bank);
97 page = pfn_to_page(pfn1);
98 end = pfn_to_page(pfn2 - 1) + 1;
102 if (PageReserved(page))
104 else if (PageSwapCache(page))
106 else if (PageSlab(page))
108 else if (!page_count(page))
111 shared += page_count(page) - 1;
113 } while (page < end);
116 printk("%d pages of RAM\n", total);
117 printk("%d free pages\n", free);
118 printk("%d reserved pages\n", reserved);
119 printk("%d slab pages\n", slab);
120 printk("%d pages shared\n", shared);
121 printk("%d pages swap cached\n", cached);
124 static void __init find_limits(struct meminfo *mi,
125 unsigned long *min, unsigned long *max_low, unsigned long *max_high)
130 *max_low = *max_high = 0;
132 for_each_bank (i, mi) {
133 struct membank *bank = &mi->bank[i];
134 unsigned long start, end;
136 start = bank_pfn_start(bank);
137 end = bank_pfn_end(bank);
150 static void __init arm_bootmem_init(struct meminfo *mi,
151 unsigned long start_pfn, unsigned long end_pfn)
153 unsigned int boot_pages;
159 * Allocate the bootmem bitmap page. This must be in a region
160 * of memory which has already been mapped.
162 boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
163 bitmap = memblock_alloc_base(boot_pages << PAGE_SHIFT, L1_CACHE_BYTES,
164 __pfn_to_phys(end_pfn));
167 * Initialise the bootmem allocator, handing the
168 * memory banks over to bootmem.
171 pgdat = NODE_DATA(0);
172 init_bootmem_node(pgdat, __phys_to_pfn(bitmap), start_pfn, end_pfn);
174 for_each_bank(i, mi) {
175 struct membank *bank = &mi->bank[i];
177 free_bootmem(bank_phys_start(bank), bank_phys_size(bank));
181 * Reserve the memblock reserved regions in bootmem.
183 for (i = 0; i < memblock.reserved.cnt; i++) {
184 phys_addr_t start = memblock_start_pfn(&memblock.reserved, i);
185 if (start >= start_pfn &&
186 memblock_end_pfn(&memblock.reserved, i) <= end_pfn)
187 reserve_bootmem_node(pgdat, __pfn_to_phys(start),
188 memblock_size_bytes(&memblock.reserved, i),
193 static void __init arm_bootmem_free(struct meminfo *mi, unsigned long min,
194 unsigned long max_low, unsigned long max_high)
196 unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
200 * initialise the zones.
202 memset(zone_size, 0, sizeof(zone_size));
205 * The memory size has already been determined. If we need
206 * to do anything fancy with the allocation of this memory
207 * to the zones, now is the time to do it.
209 zone_size[0] = max_low - min;
210 #ifdef CONFIG_HIGHMEM
211 zone_size[ZONE_HIGHMEM] = max_high - max_low;
215 * Calculate the size of the holes.
216 * holes = node_size - sum(bank_sizes)
218 memcpy(zhole_size, zone_size, sizeof(zhole_size));
219 for_each_bank(i, mi) {
221 #ifdef CONFIG_HIGHMEM
222 if (mi->bank[i].highmem)
225 zhole_size[idx] -= bank_pfn_size(&mi->bank[i]);
229 * Adjust the sizes according to any special requirements for
232 arch_adjust_zones(zone_size, zhole_size);
234 free_area_init_node(0, zone_size, min, zhole_size);
237 #ifndef CONFIG_SPARSEMEM
238 int pfn_valid(unsigned long pfn)
240 struct memblock_region *mem = &memblock.memory;
241 unsigned int left = 0, right = mem->cnt;
244 unsigned int mid = (right + left) / 2;
246 if (pfn < memblock_start_pfn(mem, mid))
248 else if (pfn >= memblock_end_pfn(mem, mid))
252 } while (left < right);
255 EXPORT_SYMBOL(pfn_valid);
257 static void arm_memory_present(void)
261 static void arm_memory_present(void)
264 for (i = 0; i < memblock.memory.cnt; i++)
265 memory_present(0, memblock_start_pfn(&memblock.memory, i),
266 memblock_end_pfn(&memblock.memory, i));
270 void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc)
275 for (i = 0; i < mi->nr_banks; i++)
276 memblock_add(mi->bank[i].start, mi->bank[i].size);
278 /* Register the kernel text, kernel data and initrd with memblock. */
279 #ifdef CONFIG_XIP_KERNEL
280 memblock_reserve(__pa(_data), _end - _data);
282 memblock_reserve(__pa(_stext), _end - _stext);
284 #ifdef CONFIG_BLK_DEV_INITRD
285 if (phys_initrd_size) {
286 memblock_reserve(phys_initrd_start, phys_initrd_size);
288 /* Now convert initrd to virtual addresses */
289 initrd_start = __phys_to_virt(phys_initrd_start);
290 initrd_end = initrd_start + phys_initrd_size;
294 arm_mm_memblock_reserve();
296 /* reserve any platform specific memblock areas */
304 void __init bootmem_init(void)
306 struct meminfo *mi = &meminfo;
307 unsigned long min, max_low, max_high;
309 max_low = max_high = 0;
311 find_limits(mi, &min, &max_low, &max_high);
313 arm_bootmem_init(mi, min, max_low);
316 * Sparsemem tries to allocate bootmem in memory_present(),
317 * so must be done after the fixed reservations
319 arm_memory_present();
322 * sparse_init() needs the bootmem allocator up and running.
327 * Now free the memory - free_area_init_node needs
328 * the sparse mem_map arrays initialized by sparse_init()
329 * for memmap_init_zone(), otherwise all PFNs are invalid.
331 arm_bootmem_free(mi, min, max_low, max_high);
333 high_memory = __va((max_low << PAGE_SHIFT) - 1) + 1;
336 * This doesn't seem to be used by the Linux memory manager any
337 * more, but is used by ll_rw_block. If we can get rid of it, we
338 * also get rid of some of the stuff above as well.
340 * Note: max_low_pfn and max_pfn reflect the number of _pages_ in
341 * the system, not the maximum PFN.
343 max_low_pfn = max_low - PHYS_PFN_OFFSET;
344 max_pfn = max_high - PHYS_PFN_OFFSET;
347 static inline int free_area(unsigned long pfn, unsigned long end, char *s)
349 unsigned int pages = 0, size = (end - pfn) << (PAGE_SHIFT - 10);
351 for (; pfn < end; pfn++) {
352 struct page *page = pfn_to_page(pfn);
353 ClearPageReserved(page);
354 init_page_count(page);
360 printk(KERN_INFO "Freeing %s memory: %dK\n", s, size);
366 free_memmap(unsigned long start_pfn, unsigned long end_pfn)
368 struct page *start_pg, *end_pg;
369 unsigned long pg, pgend;
372 * Convert start_pfn/end_pfn to a struct page pointer.
374 start_pg = pfn_to_page(start_pfn - 1) + 1;
375 end_pg = pfn_to_page(end_pfn);
378 * Convert to physical addresses, and
379 * round start upwards and end downwards.
381 pg = PAGE_ALIGN(__pa(start_pg));
382 pgend = __pa(end_pg) & PAGE_MASK;
385 * If there are free pages between these,
386 * free the section of the memmap array.
389 free_bootmem(pg, pgend - pg);
393 * The mem_map array can get very big. Free the unused area of the memory map.
395 static void __init free_unused_memmap(struct meminfo *mi)
397 unsigned long bank_start, prev_bank_end = 0;
401 * This relies on each bank being in address order.
402 * The banks are sorted previously in bootmem_init().
404 for_each_bank(i, mi) {
405 struct membank *bank = &mi->bank[i];
407 bank_start = bank_pfn_start(bank);
410 * If we had a previous bank, and there is a space
411 * between the current bank and the previous, free it.
413 if (prev_bank_end && prev_bank_end < bank_start)
414 free_memmap(prev_bank_end, bank_start);
417 * Align up here since the VM subsystem insists that the
418 * memmap entries are valid from the bank end aligned to
419 * MAX_ORDER_NR_PAGES.
421 prev_bank_end = ALIGN(bank_pfn_end(bank), MAX_ORDER_NR_PAGES);
426 * mem_init() marks the free areas in the mem_map and tells us how much
427 * memory is free. This is done after various parts of the system have
428 * claimed their memory after the kernel image.
430 void __init mem_init(void)
432 unsigned long reserved_pages, free_pages;
434 #ifdef CONFIG_HAVE_TCM
435 /* These pointers are filled in on TCM detection */
440 max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map;
442 /* this will put all unused low memory onto the freelists */
443 free_unused_memmap(&meminfo);
445 totalram_pages += free_all_bootmem();
448 /* now that our DMA memory is actually so designated, we can free it */
449 totalram_pages += free_area(PHYS_PFN_OFFSET,
450 __phys_to_pfn(__pa(swapper_pg_dir)), NULL);
453 #ifdef CONFIG_HIGHMEM
454 /* set highmem page free */
455 for_each_bank (i, &meminfo) {
456 unsigned long start = bank_pfn_start(&meminfo.bank[i]);
457 unsigned long end = bank_pfn_end(&meminfo.bank[i]);
458 if (start >= max_low_pfn + PHYS_PFN_OFFSET)
459 totalhigh_pages += free_area(start, end, NULL);
461 totalram_pages += totalhigh_pages;
464 reserved_pages = free_pages = 0;
466 for_each_bank(i, &meminfo) {
467 struct membank *bank = &meminfo.bank[i];
468 unsigned int pfn1, pfn2;
469 struct page *page, *end;
471 pfn1 = bank_pfn_start(bank);
472 pfn2 = bank_pfn_end(bank);
474 page = pfn_to_page(pfn1);
475 end = pfn_to_page(pfn2 - 1) + 1;
478 if (PageReserved(page))
480 else if (!page_count(page))
483 } while (page < end);
487 * Since our memory may not be contiguous, calculate the
488 * real number of pages we have in this system
490 printk(KERN_INFO "Memory:");
492 for (i = 0; i < meminfo.nr_banks; i++) {
493 num_physpages += bank_pfn_size(&meminfo.bank[i]);
494 printk(" %ldMB", bank_phys_size(&meminfo.bank[i]) >> 20);
496 printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT));
498 printk(KERN_NOTICE "Memory: %luk/%luk available, %luk reserved, %luK highmem\n",
499 nr_free_pages() << (PAGE_SHIFT-10),
500 free_pages << (PAGE_SHIFT-10),
501 reserved_pages << (PAGE_SHIFT-10),
502 totalhigh_pages << (PAGE_SHIFT-10));
504 #define MLK(b, t) b, t, ((t) - (b)) >> 10
505 #define MLM(b, t) b, t, ((t) - (b)) >> 20
506 #define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K)
508 printk(KERN_NOTICE "Virtual kernel memory layout:\n"
509 " vector : 0x%08lx - 0x%08lx (%4ld kB)\n"
510 #ifdef CONFIG_HAVE_TCM
511 " DTCM : 0x%08lx - 0x%08lx (%4ld kB)\n"
512 " ITCM : 0x%08lx - 0x%08lx (%4ld kB)\n"
514 " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
516 " DMA : 0x%08lx - 0x%08lx (%4ld MB)\n"
518 " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
519 " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n"
520 #ifdef CONFIG_HIGHMEM
521 " pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n"
523 " modules : 0x%08lx - 0x%08lx (%4ld MB)\n"
524 " .init : 0x%p" " - 0x%p" " (%4d kB)\n"
525 " .text : 0x%p" " - 0x%p" " (%4d kB)\n"
526 " .data : 0x%p" " - 0x%p" " (%4d kB)\n",
528 MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) +
530 #ifdef CONFIG_HAVE_TCM
531 MLK(DTCM_OFFSET, (unsigned long) dtcm_end),
532 MLK(ITCM_OFFSET, (unsigned long) itcm_end),
534 MLK(FIXADDR_START, FIXADDR_TOP),
536 MLM(CONSISTENT_BASE, CONSISTENT_END),
538 MLM(VMALLOC_START, VMALLOC_END),
539 MLM(PAGE_OFFSET, (unsigned long)high_memory),
540 #ifdef CONFIG_HIGHMEM
541 MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) *
544 MLM(MODULES_VADDR, MODULES_END),
546 MLK_ROUNDUP(__init_begin, __init_end),
547 MLK_ROUNDUP(_text, _etext),
548 MLK_ROUNDUP(_data, _edata));
555 * Check boundaries twice: Some fundamental inconsistencies can
556 * be detected at build time already.
559 BUILD_BUG_ON(VMALLOC_END > CONSISTENT_BASE);
560 BUG_ON(VMALLOC_END > CONSISTENT_BASE);
562 BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR);
563 BUG_ON(TASK_SIZE > MODULES_VADDR);
566 #ifdef CONFIG_HIGHMEM
567 BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
568 BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
571 if (PAGE_SIZE >= 16384 && num_physpages <= 128) {
572 extern int sysctl_overcommit_memory;
574 * On a machine this small we won't get
575 * anywhere without overcommit, so turn
578 sysctl_overcommit_memory = OVERCOMMIT_ALWAYS;
582 void free_initmem(void)
584 #ifdef CONFIG_HAVE_TCM
585 extern char __tcm_start, __tcm_end;
587 totalram_pages += free_area(__phys_to_pfn(__pa(&__tcm_start)),
588 __phys_to_pfn(__pa(&__tcm_end)),
592 if (!machine_is_integrator() && !machine_is_cintegrator())
593 totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)),
594 __phys_to_pfn(__pa(__init_end)),
598 #ifdef CONFIG_BLK_DEV_INITRD
600 static int keep_initrd;
602 void free_initrd_mem(unsigned long start, unsigned long end)
605 totalram_pages += free_area(__phys_to_pfn(__pa(start)),
606 __phys_to_pfn(__pa(end)),
610 static int __init keepinitrd_setup(char *__unused)
616 __setup("keepinitrd", keepinitrd_setup);