2 * linux/arch/arm/mm/init.c
4 * Copyright (C) 1995-2005 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 #include <linux/kernel.h>
11 #include <linux/errno.h>
12 #include <linux/swap.h>
13 #include <linux/init.h>
14 #include <linux/bootmem.h>
15 #include <linux/mman.h>
16 #include <linux/nodemask.h>
17 #include <linux/initrd.h>
18 #include <linux/sort.h>
19 #include <linux/highmem.h>
21 #include <asm/mach-types.h>
22 #include <asm/sections.h>
23 #include <asm/setup.h>
24 #include <asm/sizes.h>
26 #include <asm/fixmap.h>
28 #include <asm/mach/arch.h>
29 #include <asm/mach/map.h>
33 static unsigned long phys_initrd_start __initdata = 0;
34 static unsigned long phys_initrd_size __initdata = 0;
36 static int __init early_initrd(char *p)
38 unsigned long start, size;
41 start = memparse(p, &endp);
43 size = memparse(endp + 1, NULL);
45 phys_initrd_start = start;
46 phys_initrd_size = size;
50 early_param("initrd", early_initrd);
52 static int __init parse_tag_initrd(const struct tag *tag)
54 printk(KERN_WARNING "ATAG_INITRD is deprecated; "
55 "please update your bootloader.\n");
56 phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
57 phys_initrd_size = tag->u.initrd.size;
61 __tagtable(ATAG_INITRD, parse_tag_initrd);
63 static int __init parse_tag_initrd2(const struct tag *tag)
65 phys_initrd_start = tag->u.initrd.start;
66 phys_initrd_size = tag->u.initrd.size;
70 __tagtable(ATAG_INITRD2, parse_tag_initrd2);
73 * This keeps memory configuration data used by a couple memory
74 * initialization functions, as well as show_mem() for the skipping
75 * of holes in the memory map. It is populated by arm_add_memory().
77 struct meminfo meminfo;
81 int free = 0, total = 0, reserved = 0;
82 int shared = 0, cached = 0, slab = 0, node, i;
83 struct meminfo * mi = &meminfo;
85 printk("Mem-info:\n");
87 for_each_online_node(node) {
88 pg_data_t *n = NODE_DATA(node);
89 struct page *map = pgdat_page_nr(n, 0) - n->node_start_pfn;
91 for_each_nodebank (i,mi,node) {
92 struct membank *bank = &mi->bank[i];
93 unsigned int pfn1, pfn2;
94 struct page *page, *end;
96 pfn1 = bank_pfn_start(bank);
97 pfn2 = bank_pfn_end(bank);
104 if (PageReserved(page))
106 else if (PageSwapCache(page))
108 else if (PageSlab(page))
110 else if (!page_count(page))
113 shared += page_count(page) - 1;
115 } while (page < end);
119 printk("%d pages of RAM\n", total);
120 printk("%d free pages\n", free);
121 printk("%d reserved pages\n", reserved);
122 printk("%d slab pages\n", slab);
123 printk("%d pages shared\n", shared);
124 printk("%d pages swap cached\n", cached);
127 static void __init find_node_limits(int node, struct meminfo *mi,
128 unsigned long *min, unsigned long *max_low, unsigned long *max_high)
133 *max_low = *max_high = 0;
135 for_each_nodebank(i, mi, node) {
136 struct membank *bank = &mi->bank[i];
137 unsigned long start, end;
139 start = bank_pfn_start(bank);
140 end = bank_pfn_end(bank);
154 * FIXME: We really want to avoid allocating the bootmap bitmap
155 * over the top of the initrd. Hopefully, this is located towards
156 * the start of a bank, so if we allocate the bootmap bitmap at
157 * the end, we won't clash.
159 static unsigned int __init
160 find_bootmap_pfn(int node, struct meminfo *mi, unsigned int bootmap_pages)
162 unsigned int start_pfn, i, bootmap_pfn;
164 start_pfn = PAGE_ALIGN(__pa(_end)) >> PAGE_SHIFT;
167 for_each_nodebank(i, mi, node) {
168 struct membank *bank = &mi->bank[i];
169 unsigned int start, end;
171 start = bank_pfn_start(bank);
172 end = bank_pfn_end(bank);
177 if (start < start_pfn)
183 if (end - start >= bootmap_pages) {
189 if (bootmap_pfn == 0)
195 static int __init check_initrd(struct meminfo *mi)
197 int initrd_node = -2;
198 #ifdef CONFIG_BLK_DEV_INITRD
199 unsigned long end = phys_initrd_start + phys_initrd_size;
202 * Make sure that the initrd is within a valid area of
205 if (phys_initrd_size) {
210 for (i = 0; i < mi->nr_banks; i++) {
211 struct membank *bank = &mi->bank[i];
212 if (bank_phys_start(bank) <= phys_initrd_start &&
213 end <= bank_phys_end(bank))
214 initrd_node = bank->node;
218 if (initrd_node == -1) {
219 printk(KERN_ERR "INITRD: 0x%08lx+0x%08lx extends beyond "
220 "physical memory - disabling initrd\n",
221 phys_initrd_start, phys_initrd_size);
222 phys_initrd_start = phys_initrd_size = 0;
229 static inline void map_memory_bank(struct membank *bank)
234 map.pfn = bank_pfn_start(bank);
235 map.virtual = __phys_to_virt(bank_phys_start(bank));
236 map.length = bank_phys_size(bank);
237 map.type = MT_MEMORY;
239 create_mapping(&map);
243 static void __init bootmem_init_node(int node, struct meminfo *mi,
244 unsigned long start_pfn, unsigned long end_pfn)
246 unsigned long boot_pfn;
247 unsigned int boot_pages;
252 * Map the memory banks for this node.
254 for_each_nodebank(i, mi, node) {
255 struct membank *bank = &mi->bank[i];
258 map_memory_bank(bank);
262 * Allocate the bootmem bitmap page.
264 boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
265 boot_pfn = find_bootmap_pfn(node, mi, boot_pages);
268 * Initialise the bootmem allocator for this node, handing the
269 * memory banks over to bootmem.
271 node_set_online(node);
272 pgdat = NODE_DATA(node);
273 init_bootmem_node(pgdat, boot_pfn, start_pfn, end_pfn);
275 for_each_nodebank(i, mi, node) {
276 struct membank *bank = &mi->bank[i];
278 free_bootmem_node(pgdat, bank_phys_start(bank), bank_phys_size(bank));
282 * Reserve the bootmem bitmap for this node.
284 reserve_bootmem_node(pgdat, boot_pfn << PAGE_SHIFT,
285 boot_pages << PAGE_SHIFT, BOOTMEM_DEFAULT);
288 static void __init bootmem_reserve_initrd(int node)
290 #ifdef CONFIG_BLK_DEV_INITRD
291 pg_data_t *pgdat = NODE_DATA(node);
294 res = reserve_bootmem_node(pgdat, phys_initrd_start,
295 phys_initrd_size, BOOTMEM_EXCLUSIVE);
298 initrd_start = __phys_to_virt(phys_initrd_start);
299 initrd_end = initrd_start + phys_initrd_size;
302 "INITRD: 0x%08lx+0x%08lx overlaps in-use "
303 "memory region - disabling initrd\n",
304 phys_initrd_start, phys_initrd_size);
309 static void __init bootmem_free_node(int node, struct meminfo *mi)
311 unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
312 unsigned long min, max_low, max_high;
315 find_node_limits(node, mi, &min, &max_low, &max_high);
318 * initialise the zones within this node.
320 memset(zone_size, 0, sizeof(zone_size));
323 * The size of this node has already been determined. If we need
324 * to do anything fancy with the allocation of this memory to the
325 * zones, now is the time to do it.
327 zone_size[0] = max_low - min;
328 #ifdef CONFIG_HIGHMEM
329 zone_size[ZONE_HIGHMEM] = max_high - max_low;
333 * For each bank in this node, calculate the size of the holes.
334 * holes = node_size - sum(bank_sizes_in_node)
336 memcpy(zhole_size, zone_size, sizeof(zhole_size));
337 for_each_nodebank(i, mi, node) {
339 #ifdef CONFIG_HIGHMEM
340 if (mi->bank[i].highmem)
343 zhole_size[idx] -= bank_pfn_size(&mi->bank[i]);
347 * Adjust the sizes according to any special requirements for
350 arch_adjust_zones(node, zone_size, zhole_size);
352 free_area_init_node(node, zone_size, min, zhole_size);
355 #ifndef CONFIG_SPARSEMEM
356 int pfn_valid(unsigned long pfn)
358 struct meminfo *mi = &meminfo;
359 unsigned int left = 0, right = mi->nr_banks;
362 unsigned int mid = (right + left) / 2;
363 struct membank *bank = &mi->bank[mid];
365 if (pfn < bank_pfn_start(bank))
367 else if (pfn >= bank_pfn_end(bank))
371 } while (left < right);
374 EXPORT_SYMBOL(pfn_valid);
376 static void arm_memory_present(struct meminfo *mi, int node)
380 static void arm_memory_present(struct meminfo *mi, int node)
383 for_each_nodebank(i, mi, node) {
384 struct membank *bank = &mi->bank[i];
385 memory_present(node, bank_pfn_start(bank), bank_pfn_end(bank));
390 static int __init meminfo_cmp(const void *_a, const void *_b)
392 const struct membank *a = _a, *b = _b;
393 long cmp = bank_pfn_start(a) - bank_pfn_start(b);
394 return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
397 void __init bootmem_init(void)
399 struct meminfo *mi = &meminfo;
400 unsigned long min, max_low, max_high;
401 int node, initrd_node;
403 sort(&mi->bank, mi->nr_banks, sizeof(mi->bank[0]), meminfo_cmp, NULL);
406 * Locate which node contains the ramdisk image, if any.
408 initrd_node = check_initrd(mi);
410 max_low = max_high = 0;
413 * Run through each node initialising the bootmem allocator.
415 for_each_node(node) {
416 unsigned long node_low, node_high;
418 find_node_limits(node, mi, &min, &node_low, &node_high);
420 if (node_low > max_low)
422 if (node_high > max_high)
423 max_high = node_high;
426 * If there is no memory in this node, ignore it.
427 * (We can't have nodes which have no lowmem)
432 bootmem_init_node(node, mi, min, node_low);
435 * Reserve any special node zero regions.
438 reserve_node_zero(NODE_DATA(node));
441 * If the initrd is in this node, reserve its memory.
443 if (node == initrd_node)
444 bootmem_reserve_initrd(node);
447 * Sparsemem tries to allocate bootmem in memory_present(),
448 * so must be done after the fixed reservations
450 arm_memory_present(mi, node);
454 * sparse_init() needs the bootmem allocator up and running.
459 * Now free memory in each node - free_area_init_node needs
460 * the sparse mem_map arrays initialized by sparse_init()
461 * for memmap_init_zone(), otherwise all PFNs are invalid.
464 bootmem_free_node(node, mi);
466 high_memory = __va((max_low << PAGE_SHIFT) - 1) + 1;
469 * This doesn't seem to be used by the Linux memory manager any
470 * more, but is used by ll_rw_block. If we can get rid of it, we
471 * also get rid of some of the stuff above as well.
473 * Note: max_low_pfn and max_pfn reflect the number of _pages_ in
474 * the system, not the maximum PFN.
476 max_low_pfn = max_low - PHYS_PFN_OFFSET;
477 max_pfn = max_high - PHYS_PFN_OFFSET;
480 static inline int free_area(unsigned long pfn, unsigned long end, char *s)
482 unsigned int pages = 0, size = (end - pfn) << (PAGE_SHIFT - 10);
484 for (; pfn < end; pfn++) {
485 struct page *page = pfn_to_page(pfn);
486 ClearPageReserved(page);
487 init_page_count(page);
493 printk(KERN_INFO "Freeing %s memory: %dK\n", s, size);
499 free_memmap(int node, unsigned long start_pfn, unsigned long end_pfn)
501 struct page *start_pg, *end_pg;
502 unsigned long pg, pgend;
505 * Convert start_pfn/end_pfn to a struct page pointer.
507 start_pg = pfn_to_page(start_pfn - 1) + 1;
508 end_pg = pfn_to_page(end_pfn);
511 * Convert to physical addresses, and
512 * round start upwards and end downwards.
514 pg = PAGE_ALIGN(__pa(start_pg));
515 pgend = __pa(end_pg) & PAGE_MASK;
518 * If there are free pages between these,
519 * free the section of the memmap array.
522 free_bootmem_node(NODE_DATA(node), pg, pgend - pg);
526 * The mem_map array can get very big. Free the unused area of the memory map.
528 static void __init free_unused_memmap_node(int node, struct meminfo *mi)
530 unsigned long bank_start, prev_bank_end = 0;
534 * [FIXME] This relies on each bank being in address order. This
535 * may not be the case, especially if the user has provided the
536 * information on the command line.
538 for_each_nodebank(i, mi, node) {
539 struct membank *bank = &mi->bank[i];
541 bank_start = bank_pfn_start(bank);
542 if (bank_start < prev_bank_end) {
543 printk(KERN_ERR "MEM: unordered memory banks. "
544 "Not freeing memmap.\n");
549 * If we had a previous bank, and there is a space
550 * between the current bank and the previous, free it.
552 if (prev_bank_end && prev_bank_end != bank_start)
553 free_memmap(node, prev_bank_end, bank_start);
555 prev_bank_end = bank_pfn_end(bank);
560 * mem_init() marks the free areas in the mem_map and tells us how much
561 * memory is free. This is done after various parts of the system have
562 * claimed their memory after the kernel image.
564 void __init mem_init(void)
566 unsigned long reserved_pages, free_pages;
569 #ifndef CONFIG_DISCONTIGMEM
570 max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map;
573 /* this will put all unused low memory onto the freelists */
574 for_each_online_node(node) {
575 pg_data_t *pgdat = NODE_DATA(node);
577 free_unused_memmap_node(node, &meminfo);
579 if (pgdat->node_spanned_pages != 0)
580 totalram_pages += free_all_bootmem_node(pgdat);
584 /* now that our DMA memory is actually so designated, we can free it */
585 totalram_pages += free_area(PHYS_PFN_OFFSET,
586 __phys_to_pfn(__pa(swapper_pg_dir)), NULL);
589 #ifdef CONFIG_HIGHMEM
590 /* set highmem page free */
591 for_each_online_node(node) {
592 for_each_nodebank (i, &meminfo, node) {
593 unsigned long start = bank_pfn_start(&meminfo.bank[i]);
594 unsigned long end = bank_pfn_end(&meminfo.bank[i]);
595 if (start >= max_low_pfn + PHYS_PFN_OFFSET)
596 totalhigh_pages += free_area(start, end, NULL);
599 totalram_pages += totalhigh_pages;
602 reserved_pages = free_pages = 0;
604 for_each_online_node(node) {
605 pg_data_t *n = NODE_DATA(node);
606 struct page *map = pgdat_page_nr(n, 0) - n->node_start_pfn;
608 for_each_nodebank(i, &meminfo, node) {
609 struct membank *bank = &meminfo.bank[i];
610 unsigned int pfn1, pfn2;
611 struct page *page, *end;
613 pfn1 = bank_pfn_start(bank);
614 pfn2 = bank_pfn_end(bank);
620 if (PageReserved(page))
622 else if (!page_count(page))
625 } while (page < end);
630 * Since our memory may not be contiguous, calculate the
631 * real number of pages we have in this system
633 printk(KERN_INFO "Memory:");
635 for (i = 0; i < meminfo.nr_banks; i++) {
636 num_physpages += bank_pfn_size(&meminfo.bank[i]);
637 printk(" %ldMB", bank_phys_size(&meminfo.bank[i]) >> 20);
639 printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT));
641 printk(KERN_NOTICE "Memory: %luk/%luk available, %luk reserved, %luK highmem\n",
642 nr_free_pages() << (PAGE_SHIFT-10),
643 free_pages << (PAGE_SHIFT-10),
644 reserved_pages << (PAGE_SHIFT-10),
645 totalhigh_pages << (PAGE_SHIFT-10));
647 #define MLK(b, t) b, t, ((t) - (b)) >> 10
648 #define MLM(b, t) b, t, ((t) - (b)) >> 20
649 #define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K)
651 printk(KERN_NOTICE "Virtual kernel memory layout:\n"
652 " vector : 0x%08lx - 0x%08lx (%4ld kB)\n"
653 " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
655 " DMA : 0x%08lx - 0x%08lx (%4ld MB)\n"
657 " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
658 " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n"
659 #ifdef CONFIG_HIGHMEM
660 " pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n"
662 " modules : 0x%08lx - 0x%08lx (%4ld MB)\n"
663 " .init : 0x%p" " - 0x%p" " (%4d kB)\n"
664 " .text : 0x%p" " - 0x%p" " (%4d kB)\n"
665 " .data : 0x%p" " - 0x%p" " (%4d kB)\n",
667 MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) +
669 MLK(FIXADDR_START, FIXADDR_TOP),
671 MLM(CONSISTENT_BASE, CONSISTENT_END),
673 MLM(VMALLOC_START, VMALLOC_END),
674 MLM(PAGE_OFFSET, (unsigned long)high_memory),
675 #ifdef CONFIG_HIGHMEM
676 MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) *
679 MLM(MODULES_VADDR, MODULES_END),
681 MLK_ROUNDUP(__init_begin, __init_end),
682 MLK_ROUNDUP(_text, _etext),
683 MLK_ROUNDUP(_data, _edata));
690 * Check boundaries twice: Some fundamental inconsistencies can
691 * be detected at build time already.
694 BUILD_BUG_ON(VMALLOC_END > CONSISTENT_BASE);
695 BUG_ON(VMALLOC_END > CONSISTENT_BASE);
697 BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR);
698 BUG_ON(TASK_SIZE > MODULES_VADDR);
701 #ifdef CONFIG_HIGHMEM
702 BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
703 BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
706 if (PAGE_SIZE >= 16384 && num_physpages <= 128) {
707 extern int sysctl_overcommit_memory;
709 * On a machine this small we won't get
710 * anywhere without overcommit, so turn
713 sysctl_overcommit_memory = OVERCOMMIT_ALWAYS;
717 void free_initmem(void)
719 #ifdef CONFIG_HAVE_TCM
720 extern char *__tcm_start, *__tcm_end;
722 totalram_pages += free_area(__phys_to_pfn(__pa(__tcm_start)),
723 __phys_to_pfn(__pa(__tcm_end)),
727 if (!machine_is_integrator() && !machine_is_cintegrator())
728 totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)),
729 __phys_to_pfn(__pa(__init_end)),
733 #ifdef CONFIG_BLK_DEV_INITRD
735 static int keep_initrd;
737 void free_initrd_mem(unsigned long start, unsigned long end)
740 totalram_pages += free_area(__phys_to_pfn(__pa(start)),
741 __phys_to_pfn(__pa(end)),
745 static int __init keepinitrd_setup(char *__unused)
751 __setup("keepinitrd", keepinitrd_setup);