]> git.karo-electronics.de Git - mv-sheeva.git/blob - arch/arm/mm/init.c
d366051e14fe3d76ae26987d73174b533ed2aa09
[mv-sheeva.git] / arch / arm / mm / init.c
1 /*
2  *  linux/arch/arm/mm/init.c
3  *
4  *  Copyright (C) 1995-2005 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/kernel.h>
11 #include <linux/errno.h>
12 #include <linux/swap.h>
13 #include <linux/init.h>
14 #include <linux/bootmem.h>
15 #include <linux/mman.h>
16 #include <linux/export.h>
17 #include <linux/nodemask.h>
18 #include <linux/initrd.h>
19 #include <linux/of_fdt.h>
20 #include <linux/highmem.h>
21 #include <linux/gfp.h>
22 #include <linux/memblock.h>
23
24 #include <asm/mach-types.h>
25 #include <asm/prom.h>
26 #include <asm/sections.h>
27 #include <asm/setup.h>
28 #include <asm/sizes.h>
29 #include <asm/tlb.h>
30 #include <asm/fixmap.h>
31
32 #include <asm/mach/arch.h>
33 #include <asm/mach/map.h>
34
35 #include "mm.h"
36
37 static unsigned long phys_initrd_start __initdata = 0;
38 static unsigned long phys_initrd_size __initdata = 0;
39
40 static int __init early_initrd(char *p)
41 {
42         unsigned long start, size;
43         char *endp;
44
45         start = memparse(p, &endp);
46         if (*endp == ',') {
47                 size = memparse(endp + 1, NULL);
48
49                 phys_initrd_start = start;
50                 phys_initrd_size = size;
51         }
52         return 0;
53 }
54 early_param("initrd", early_initrd);
55
56 static int __init parse_tag_initrd(const struct tag *tag)
57 {
58         printk(KERN_WARNING "ATAG_INITRD is deprecated; "
59                 "please update your bootloader.\n");
60         phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
61         phys_initrd_size = tag->u.initrd.size;
62         return 0;
63 }
64
65 __tagtable(ATAG_INITRD, parse_tag_initrd);
66
67 static int __init parse_tag_initrd2(const struct tag *tag)
68 {
69         phys_initrd_start = tag->u.initrd.start;
70         phys_initrd_size = tag->u.initrd.size;
71         return 0;
72 }
73
74 __tagtable(ATAG_INITRD2, parse_tag_initrd2);
75
76 #ifdef CONFIG_OF_FLATTREE
77 void __init early_init_dt_setup_initrd_arch(unsigned long start, unsigned long end)
78 {
79         phys_initrd_start = start;
80         phys_initrd_size = end - start;
81 }
82 #endif /* CONFIG_OF_FLATTREE */
83
84 /*
85  * This keeps memory configuration data used by a couple memory
86  * initialization functions, as well as show_mem() for the skipping
87  * of holes in the memory map.  It is populated by arm_add_memory().
88  */
89 struct meminfo meminfo;
90
91 void show_mem(unsigned int filter)
92 {
93         int free = 0, total = 0, reserved = 0;
94         int shared = 0, cached = 0, slab = 0, i;
95         struct meminfo * mi = &meminfo;
96
97         printk("Mem-info:\n");
98         show_free_areas(filter);
99
100         for_each_bank (i, mi) {
101                 struct membank *bank = &mi->bank[i];
102                 unsigned int pfn1, pfn2;
103                 struct page *page, *end;
104
105                 pfn1 = bank_pfn_start(bank);
106                 pfn2 = bank_pfn_end(bank);
107
108                 page = pfn_to_page(pfn1);
109                 end  = pfn_to_page(pfn2 - 1) + 1;
110
111                 do {
112                         total++;
113                         if (PageReserved(page))
114                                 reserved++;
115                         else if (PageSwapCache(page))
116                                 cached++;
117                         else if (PageSlab(page))
118                                 slab++;
119                         else if (!page_count(page))
120                                 free++;
121                         else
122                                 shared += page_count(page) - 1;
123                         page++;
124                 } while (page < end);
125         }
126
127         printk("%d pages of RAM\n", total);
128         printk("%d free pages\n", free);
129         printk("%d reserved pages\n", reserved);
130         printk("%d slab pages\n", slab);
131         printk("%d pages shared\n", shared);
132         printk("%d pages swap cached\n", cached);
133 }
134
135 static void __init find_limits(unsigned long *min, unsigned long *max_low,
136                                unsigned long *max_high)
137 {
138         struct meminfo *mi = &meminfo;
139         int i;
140
141         /* This assumes the meminfo array is properly sorted */
142         *min = bank_pfn_start(&mi->bank[0]);
143         for_each_bank (i, mi)
144                 if (mi->bank[i].highmem)
145                                 break;
146         *max_low = bank_pfn_end(&mi->bank[i - 1]);
147         *max_high = bank_pfn_end(&mi->bank[mi->nr_banks - 1]);
148 }
149
150 static void __init arm_bootmem_init(unsigned long start_pfn,
151         unsigned long end_pfn)
152 {
153         struct memblock_region *reg;
154         unsigned int boot_pages;
155         phys_addr_t bitmap;
156         pg_data_t *pgdat;
157
158         /*
159          * Allocate the bootmem bitmap page.  This must be in a region
160          * of memory which has already been mapped.
161          */
162         boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
163         bitmap = memblock_alloc_base(boot_pages << PAGE_SHIFT, L1_CACHE_BYTES,
164                                 __pfn_to_phys(end_pfn));
165
166         /*
167          * Initialise the bootmem allocator, handing the
168          * memory banks over to bootmem.
169          */
170         node_set_online(0);
171         pgdat = NODE_DATA(0);
172         init_bootmem_node(pgdat, __phys_to_pfn(bitmap), start_pfn, end_pfn);
173
174         /* Free the lowmem regions from memblock into bootmem. */
175         for_each_memblock(memory, reg) {
176                 unsigned long start = memblock_region_memory_base_pfn(reg);
177                 unsigned long end = memblock_region_memory_end_pfn(reg);
178
179                 if (end >= end_pfn)
180                         end = end_pfn;
181                 if (start >= end)
182                         break;
183
184                 free_bootmem(__pfn_to_phys(start), (end - start) << PAGE_SHIFT);
185         }
186
187         /* Reserve the lowmem memblock reserved regions in bootmem. */
188         for_each_memblock(reserved, reg) {
189                 unsigned long start = memblock_region_reserved_base_pfn(reg);
190                 unsigned long end = memblock_region_reserved_end_pfn(reg);
191
192                 if (end >= end_pfn)
193                         end = end_pfn;
194                 if (start >= end)
195                         break;
196
197                 reserve_bootmem(__pfn_to_phys(start),
198                                 (end - start) << PAGE_SHIFT, BOOTMEM_DEFAULT);
199         }
200 }
201
202 #ifdef CONFIG_ZONE_DMA
203
204 unsigned long arm_dma_zone_size __read_mostly;
205 EXPORT_SYMBOL(arm_dma_zone_size);
206
207 /*
208  * The DMA mask corresponding to the maximum bus address allocatable
209  * using GFP_DMA.  The default here places no restriction on DMA
210  * allocations.  This must be the smallest DMA mask in the system,
211  * so a successful GFP_DMA allocation will always satisfy this.
212  */
213 u32 arm_dma_limit;
214
215 static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
216         unsigned long dma_size)
217 {
218         if (size[0] <= dma_size)
219                 return;
220
221         size[ZONE_NORMAL] = size[0] - dma_size;
222         size[ZONE_DMA] = dma_size;
223         hole[ZONE_NORMAL] = hole[0];
224         hole[ZONE_DMA] = 0;
225 }
226 #endif
227
228 static void __init arm_bootmem_free(unsigned long min, unsigned long max_low,
229         unsigned long max_high)
230 {
231         unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
232         struct memblock_region *reg;
233
234         /*
235          * initialise the zones.
236          */
237         memset(zone_size, 0, sizeof(zone_size));
238
239         /*
240          * The memory size has already been determined.  If we need
241          * to do anything fancy with the allocation of this memory
242          * to the zones, now is the time to do it.
243          */
244         zone_size[0] = max_low - min;
245 #ifdef CONFIG_HIGHMEM
246         zone_size[ZONE_HIGHMEM] = max_high - max_low;
247 #endif
248
249         /*
250          * Calculate the size of the holes.
251          *  holes = node_size - sum(bank_sizes)
252          */
253         memcpy(zhole_size, zone_size, sizeof(zhole_size));
254         for_each_memblock(memory, reg) {
255                 unsigned long start = memblock_region_memory_base_pfn(reg);
256                 unsigned long end = memblock_region_memory_end_pfn(reg);
257
258                 if (start < max_low) {
259                         unsigned long low_end = min(end, max_low);
260                         zhole_size[0] -= low_end - start;
261                 }
262 #ifdef CONFIG_HIGHMEM
263                 if (end > max_low) {
264                         unsigned long high_start = max(start, max_low);
265                         zhole_size[ZONE_HIGHMEM] -= end - high_start;
266                 }
267 #endif
268         }
269
270 #ifdef CONFIG_ZONE_DMA
271         /*
272          * Adjust the sizes according to any special requirements for
273          * this machine type.
274          */
275         if (arm_dma_zone_size) {
276                 arm_adjust_dma_zone(zone_size, zhole_size,
277                         arm_dma_zone_size >> PAGE_SHIFT);
278                 arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
279         } else
280                 arm_dma_limit = 0xffffffff;
281 #endif
282
283         free_area_init_node(0, zone_size, min, zhole_size);
284 }
285
286 #ifdef CONFIG_HAVE_ARCH_PFN_VALID
287 int pfn_valid(unsigned long pfn)
288 {
289         return memblock_is_memory(__pfn_to_phys(pfn));
290 }
291 EXPORT_SYMBOL(pfn_valid);
292 #endif
293
294 #ifndef CONFIG_SPARSEMEM
295 static void arm_memory_present(void)
296 {
297 }
298 #else
299 static void arm_memory_present(void)
300 {
301         struct memblock_region *reg;
302
303         for_each_memblock(memory, reg)
304                 memory_present(0, memblock_region_memory_base_pfn(reg),
305                                memblock_region_memory_end_pfn(reg));
306 }
307 #endif
308
309 void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc)
310 {
311         int i;
312
313         memblock_init();
314         for (i = 0; i < mi->nr_banks; i++)
315                 memblock_add(mi->bank[i].start, mi->bank[i].size);
316
317         /* Register the kernel text, kernel data and initrd with memblock. */
318 #ifdef CONFIG_XIP_KERNEL
319         memblock_reserve(__pa(_sdata), _end - _sdata);
320 #else
321         memblock_reserve(__pa(_stext), _end - _stext);
322 #endif
323 #ifdef CONFIG_BLK_DEV_INITRD
324         if (phys_initrd_size &&
325             !memblock_is_region_memory(phys_initrd_start, phys_initrd_size)) {
326                 pr_err("INITRD: 0x%08lx+0x%08lx is not a memory region - disabling initrd\n",
327                        phys_initrd_start, phys_initrd_size);
328                 phys_initrd_start = phys_initrd_size = 0;
329         }
330         if (phys_initrd_size &&
331             memblock_is_region_reserved(phys_initrd_start, phys_initrd_size)) {
332                 pr_err("INITRD: 0x%08lx+0x%08lx overlaps in-use memory region - disabling initrd\n",
333                        phys_initrd_start, phys_initrd_size);
334                 phys_initrd_start = phys_initrd_size = 0;
335         }
336         if (phys_initrd_size) {
337                 memblock_reserve(phys_initrd_start, phys_initrd_size);
338
339                 /* Now convert initrd to virtual addresses */
340                 initrd_start = __phys_to_virt(phys_initrd_start);
341                 initrd_end = initrd_start + phys_initrd_size;
342         }
343 #endif
344
345         arm_mm_memblock_reserve();
346         arm_dt_memblock_reserve();
347
348         /* reserve any platform specific memblock areas */
349         if (mdesc->reserve)
350                 mdesc->reserve();
351
352         memblock_analyze();
353         memblock_dump_all();
354 }
355
356 void __init bootmem_init(void)
357 {
358         unsigned long min, max_low, max_high;
359
360         max_low = max_high = 0;
361
362         find_limits(&min, &max_low, &max_high);
363
364         arm_bootmem_init(min, max_low);
365
366         /*
367          * Sparsemem tries to allocate bootmem in memory_present(),
368          * so must be done after the fixed reservations
369          */
370         arm_memory_present();
371
372         /*
373          * sparse_init() needs the bootmem allocator up and running.
374          */
375         sparse_init();
376
377         /*
378          * Now free the memory - free_area_init_node needs
379          * the sparse mem_map arrays initialized by sparse_init()
380          * for memmap_init_zone(), otherwise all PFNs are invalid.
381          */
382         arm_bootmem_free(min, max_low, max_high);
383
384         high_memory = __va(((phys_addr_t)max_low << PAGE_SHIFT) - 1) + 1;
385
386         /*
387          * This doesn't seem to be used by the Linux memory manager any
388          * more, but is used by ll_rw_block.  If we can get rid of it, we
389          * also get rid of some of the stuff above as well.
390          *
391          * Note: max_low_pfn and max_pfn reflect the number of _pages_ in
392          * the system, not the maximum PFN.
393          */
394         max_low_pfn = max_low - PHYS_PFN_OFFSET;
395         max_pfn = max_high - PHYS_PFN_OFFSET;
396 }
397
398 static inline int free_area(unsigned long pfn, unsigned long end, char *s)
399 {
400         unsigned int pages = 0, size = (end - pfn) << (PAGE_SHIFT - 10);
401
402         for (; pfn < end; pfn++) {
403                 struct page *page = pfn_to_page(pfn);
404                 ClearPageReserved(page);
405                 init_page_count(page);
406                 __free_page(page);
407                 pages++;
408         }
409
410         if (size && s)
411                 printk(KERN_INFO "Freeing %s memory: %dK\n", s, size);
412
413         return pages;
414 }
415
416 /*
417  * Poison init memory with an undefined instruction (ARM) or a branch to an
418  * undefined instruction (Thumb).
419  */
420 static inline void poison_init_mem(void *s, size_t count)
421 {
422         u32 *p = (u32 *)s;
423         for (; count != 0; count -= 4)
424                 *p++ = 0xe7fddef0;
425 }
426
427 static inline void
428 free_memmap(unsigned long start_pfn, unsigned long end_pfn)
429 {
430         struct page *start_pg, *end_pg;
431         unsigned long pg, pgend;
432
433         /*
434          * Convert start_pfn/end_pfn to a struct page pointer.
435          */
436         start_pg = pfn_to_page(start_pfn - 1) + 1;
437         end_pg = pfn_to_page(end_pfn - 1) + 1;
438
439         /*
440          * Convert to physical addresses, and
441          * round start upwards and end downwards.
442          */
443         pg = (unsigned long)PAGE_ALIGN(__pa(start_pg));
444         pgend = (unsigned long)__pa(end_pg) & PAGE_MASK;
445
446         /*
447          * If there are free pages between these,
448          * free the section of the memmap array.
449          */
450         if (pg < pgend)
451                 free_bootmem(pg, pgend - pg);
452 }
453
454 /*
455  * The mem_map array can get very big.  Free the unused area of the memory map.
456  */
457 static void __init free_unused_memmap(struct meminfo *mi)
458 {
459         unsigned long bank_start, prev_bank_end = 0;
460         unsigned int i;
461
462         /*
463          * This relies on each bank being in address order.
464          * The banks are sorted previously in bootmem_init().
465          */
466         for_each_bank(i, mi) {
467                 struct membank *bank = &mi->bank[i];
468
469                 bank_start = bank_pfn_start(bank);
470
471 #ifdef CONFIG_SPARSEMEM
472                 /*
473                  * Take care not to free memmap entries that don't exist
474                  * due to SPARSEMEM sections which aren't present.
475                  */
476                 bank_start = min(bank_start,
477                                  ALIGN(prev_bank_end, PAGES_PER_SECTION));
478 #else
479                 /*
480                  * Align down here since the VM subsystem insists that the
481                  * memmap entries are valid from the bank start aligned to
482                  * MAX_ORDER_NR_PAGES.
483                  */
484                 bank_start = round_down(bank_start, MAX_ORDER_NR_PAGES);
485 #endif
486                 /*
487                  * If we had a previous bank, and there is a space
488                  * between the current bank and the previous, free it.
489                  */
490                 if (prev_bank_end && prev_bank_end < bank_start)
491                         free_memmap(prev_bank_end, bank_start);
492
493                 /*
494                  * Align up here since the VM subsystem insists that the
495                  * memmap entries are valid from the bank end aligned to
496                  * MAX_ORDER_NR_PAGES.
497                  */
498                 prev_bank_end = ALIGN(bank_pfn_end(bank), MAX_ORDER_NR_PAGES);
499         }
500
501 #ifdef CONFIG_SPARSEMEM
502         if (!IS_ALIGNED(prev_bank_end, PAGES_PER_SECTION))
503                 free_memmap(prev_bank_end,
504                             ALIGN(prev_bank_end, PAGES_PER_SECTION));
505 #endif
506 }
507
508 static void __init free_highpages(void)
509 {
510 #ifdef CONFIG_HIGHMEM
511         unsigned long max_low = max_low_pfn + PHYS_PFN_OFFSET;
512         struct memblock_region *mem, *res;
513
514         /* set highmem page free */
515         for_each_memblock(memory, mem) {
516                 unsigned long start = memblock_region_memory_base_pfn(mem);
517                 unsigned long end = memblock_region_memory_end_pfn(mem);
518
519                 /* Ignore complete lowmem entries */
520                 if (end <= max_low)
521                         continue;
522
523                 /* Truncate partial highmem entries */
524                 if (start < max_low)
525                         start = max_low;
526
527                 /* Find and exclude any reserved regions */
528                 for_each_memblock(reserved, res) {
529                         unsigned long res_start, res_end;
530
531                         res_start = memblock_region_reserved_base_pfn(res);
532                         res_end = memblock_region_reserved_end_pfn(res);
533
534                         if (res_end < start)
535                                 continue;
536                         if (res_start < start)
537                                 res_start = start;
538                         if (res_start > end)
539                                 res_start = end;
540                         if (res_end > end)
541                                 res_end = end;
542                         if (res_start != start)
543                                 totalhigh_pages += free_area(start, res_start,
544                                                              NULL);
545                         start = res_end;
546                         if (start == end)
547                                 break;
548                 }
549
550                 /* And now free anything which remains */
551                 if (start < end)
552                         totalhigh_pages += free_area(start, end, NULL);
553         }
554         totalram_pages += totalhigh_pages;
555 #endif
556 }
557
558 /*
559  * mem_init() marks the free areas in the mem_map and tells us how much
560  * memory is free.  This is done after various parts of the system have
561  * claimed their memory after the kernel image.
562  */
563 void __init mem_init(void)
564 {
565         unsigned long reserved_pages, free_pages;
566         struct memblock_region *reg;
567         int i;
568 #ifdef CONFIG_HAVE_TCM
569         /* These pointers are filled in on TCM detection */
570         extern u32 dtcm_end;
571         extern u32 itcm_end;
572 #endif
573
574         max_mapnr   = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map;
575
576         /* this will put all unused low memory onto the freelists */
577         free_unused_memmap(&meminfo);
578
579         totalram_pages += free_all_bootmem();
580
581 #ifdef CONFIG_SA1111
582         /* now that our DMA memory is actually so designated, we can free it */
583         totalram_pages += free_area(PHYS_PFN_OFFSET,
584                                     __phys_to_pfn(__pa(swapper_pg_dir)), NULL);
585 #endif
586
587         free_highpages();
588
589         reserved_pages = free_pages = 0;
590
591         for_each_bank(i, &meminfo) {
592                 struct membank *bank = &meminfo.bank[i];
593                 unsigned int pfn1, pfn2;
594                 struct page *page, *end;
595
596                 pfn1 = bank_pfn_start(bank);
597                 pfn2 = bank_pfn_end(bank);
598
599                 page = pfn_to_page(pfn1);
600                 end  = pfn_to_page(pfn2 - 1) + 1;
601
602                 do {
603                         if (PageReserved(page))
604                                 reserved_pages++;
605                         else if (!page_count(page))
606                                 free_pages++;
607                         page++;
608                 } while (page < end);
609         }
610
611         /*
612          * Since our memory may not be contiguous, calculate the
613          * real number of pages we have in this system
614          */
615         printk(KERN_INFO "Memory:");
616         num_physpages = 0;
617         for_each_memblock(memory, reg) {
618                 unsigned long pages = memblock_region_memory_end_pfn(reg) -
619                         memblock_region_memory_base_pfn(reg);
620                 num_physpages += pages;
621                 printk(" %ldMB", pages >> (20 - PAGE_SHIFT));
622         }
623         printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT));
624
625         printk(KERN_NOTICE "Memory: %luk/%luk available, %luk reserved, %luK highmem\n",
626                 nr_free_pages() << (PAGE_SHIFT-10),
627                 free_pages << (PAGE_SHIFT-10),
628                 reserved_pages << (PAGE_SHIFT-10),
629                 totalhigh_pages << (PAGE_SHIFT-10));
630
631 #define MLK(b, t) b, t, ((t) - (b)) >> 10
632 #define MLM(b, t) b, t, ((t) - (b)) >> 20
633 #define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K)
634
635         printk(KERN_NOTICE "Virtual kernel memory layout:\n"
636                         "    vector  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
637 #ifdef CONFIG_HAVE_TCM
638                         "    DTCM    : 0x%08lx - 0x%08lx   (%4ld kB)\n"
639                         "    ITCM    : 0x%08lx - 0x%08lx   (%4ld kB)\n"
640 #endif
641                         "    fixmap  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
642                         "    vmalloc : 0x%08lx - 0x%08lx   (%4ld MB)\n"
643                         "    lowmem  : 0x%08lx - 0x%08lx   (%4ld MB)\n"
644 #ifdef CONFIG_HIGHMEM
645                         "    pkmap   : 0x%08lx - 0x%08lx   (%4ld MB)\n"
646 #endif
647                         "    modules : 0x%08lx - 0x%08lx   (%4ld MB)\n"
648                         "      .text : 0x%p" " - 0x%p" "   (%4d kB)\n"
649                         "      .init : 0x%p" " - 0x%p" "   (%4d kB)\n"
650                         "      .data : 0x%p" " - 0x%p" "   (%4d kB)\n"
651                         "       .bss : 0x%p" " - 0x%p" "   (%4d kB)\n",
652
653                         MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) +
654                                 (PAGE_SIZE)),
655 #ifdef CONFIG_HAVE_TCM
656                         MLK(DTCM_OFFSET, (unsigned long) dtcm_end),
657                         MLK(ITCM_OFFSET, (unsigned long) itcm_end),
658 #endif
659                         MLK(FIXADDR_START, FIXADDR_TOP),
660                         MLM(VMALLOC_START, VMALLOC_END),
661                         MLM(PAGE_OFFSET, (unsigned long)high_memory),
662 #ifdef CONFIG_HIGHMEM
663                         MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) *
664                                 (PAGE_SIZE)),
665 #endif
666                         MLM(MODULES_VADDR, MODULES_END),
667
668                         MLK_ROUNDUP(_text, _etext),
669                         MLK_ROUNDUP(__init_begin, __init_end),
670                         MLK_ROUNDUP(_sdata, _edata),
671                         MLK_ROUNDUP(__bss_start, __bss_stop));
672
673 #undef MLK
674 #undef MLM
675 #undef MLK_ROUNDUP
676
677         /*
678          * Check boundaries twice: Some fundamental inconsistencies can
679          * be detected at build time already.
680          */
681 #ifdef CONFIG_MMU
682         BUILD_BUG_ON(TASK_SIZE                          > MODULES_VADDR);
683         BUG_ON(TASK_SIZE                                > MODULES_VADDR);
684 #endif
685
686 #ifdef CONFIG_HIGHMEM
687         BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
688         BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE      > PAGE_OFFSET);
689 #endif
690
691         if (PAGE_SIZE >= 16384 && num_physpages <= 128) {
692                 extern int sysctl_overcommit_memory;
693                 /*
694                  * On a machine this small we won't get
695                  * anywhere without overcommit, so turn
696                  * it on by default.
697                  */
698                 sysctl_overcommit_memory = OVERCOMMIT_ALWAYS;
699         }
700 }
701
702 void free_initmem(void)
703 {
704 #ifdef CONFIG_HAVE_TCM
705         extern char __tcm_start, __tcm_end;
706
707         poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
708         totalram_pages += free_area(__phys_to_pfn(__pa(&__tcm_start)),
709                                     __phys_to_pfn(__pa(&__tcm_end)),
710                                     "TCM link");
711 #endif
712
713         poison_init_mem(__init_begin, __init_end - __init_begin);
714         if (!machine_is_integrator() && !machine_is_cintegrator())
715                 totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)),
716                                             __phys_to_pfn(__pa(__init_end)),
717                                             "init");
718 }
719
720 #ifdef CONFIG_BLK_DEV_INITRD
721
722 static int keep_initrd;
723
724 void free_initrd_mem(unsigned long start, unsigned long end)
725 {
726         if (!keep_initrd) {
727                 poison_init_mem((void *)start, PAGE_ALIGN(end) - start);
728                 totalram_pages += free_area(__phys_to_pfn(__pa(start)),
729                                             __phys_to_pfn(__pa(end)),
730                                             "initrd");
731         }
732 }
733
734 static int __init keepinitrd_setup(char *__unused)
735 {
736         keep_initrd = 1;
737         return 1;
738 }
739
740 __setup("keepinitrd", keepinitrd_setup);
741 #endif