]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
sparc64: Fix cmdline_memory_size handling bugs.
authorDavid S. Miller <davem@davemloft.net>
Wed, 3 Sep 2008 08:21:23 +0000 (01:21 -0700)
committerGreg Kroah-Hartman <gregkh@suse.de>
Mon, 8 Sep 2008 11:44:29 +0000 (04:44 -0700)
[ Upstream commit f2b6079464fc73cf12f08248180a618f05033a70 ]

First, lmb_enforce_memory_limit() interprets it's argument
(mostly, heh) as a size limit not an address limit.  So pass
the raw cmdline_memory_size value into it.  And we don't
need to check it against zero, lmb_enforce_memory_limit() does
that for us.

Next, free_initmem() needs special handling when the kernel
command line trims the available memory.  The problem case is
if the trimmed out memory is where the kernel image itself
resides.

When that memory is trimmed out, we don't add those physical
ram areas to the sparsemem active ranges, amongst other things.
Which means that this free_initmem() code will free up invalid
page structs, resulting in either crashes or hangs.

Just quick fix this by not freeing initmem at all if "mem="
was given on the boot command line.

Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
arch/sparc64/mm/init.c

index c2094e19143561309e43d2ccf012dd890ada8cbe..387d3f666a56eb6c7f6cad87d905e4324ca57432 100644 (file)
@@ -1772,8 +1772,7 @@ void __init paging_init(void)
 
        find_ramdisk(phys_base);
 
-       if (cmdline_memory_size)
-               lmb_enforce_memory_limit(phys_base + cmdline_memory_size);
+       lmb_enforce_memory_limit(cmdline_memory_size);
 
        lmb_analyze();
        lmb_dump_all();
@@ -2010,6 +2009,15 @@ void __init mem_init(void)
 void free_initmem(void)
 {
        unsigned long addr, initend;
+       int do_free = 1;
+
+       /* If the physical memory maps were trimmed by kernel command
+        * line options, don't even try freeing this initmem stuff up.
+        * The kernel image could have been in the trimmed out region
+        * and if so the freeing below will free invalid page structs.
+        */
+       if (cmdline_memory_size)
+               do_free = 0;
 
        /*
         * The init section is aligned to 8k in vmlinux.lds. Page align for >8k pagesizes.
@@ -2024,13 +2032,16 @@ void free_initmem(void)
                        ((unsigned long) __va(kern_base)) -
                        ((unsigned long) KERNBASE));
                memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
-               p = virt_to_page(page);
 
-               ClearPageReserved(p);
-               init_page_count(p);
-               __free_page(p);
-               num_physpages++;
-               totalram_pages++;
+               if (do_free) {
+                       p = virt_to_page(page);
+
+                       ClearPageReserved(p);
+                       init_page_count(p);
+                       __free_page(p);
+                       num_physpages++;
+                       totalram_pages++;
+               }
        }
 }