]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
mm: accurately calculate zone->managed_pages for highmem zones
authorJiang Liu <liuj97@gmail.com>
Wed, 19 Jun 2013 00:06:18 +0000 (10:06 +1000)
committerStephen Rothwell <sfr@canb.auug.org.au>
Wed, 19 Jun 2013 07:13:12 +0000 (17:13 +1000)
Commit "mm: introduce new field 'managed_pages' to struct zone" assumes
that all highmem pages will be freed into the buddy system by function
mem_init().  But that's not always true, some architectures may reserve
some highmem pages during boot.  For example PPC may allocate highmem
pages for giagant HugeTLB pages, and several architectures have code to
check PageReserved flag to exclude highmem pages allocated during boot
when freeing highmem pages into the buddy system.

So treat highmem pages in the same way as normal pages, that is to:
1) reset zone->managed_pages to zero in mem_init().
2) recalculate managed_pages when freeing pages into the buddy system.

Signed-off-by: Jiang Liu <jiang.liu@huawei.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Tejun Heo <tj@kernel.org>
Cc: Joonsoo Kim <js1304@gmail.com>
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Kamezawa Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Marek Szyprowski <m.szyprowski@samsung.com>
Cc: "Michael S. Tsirkin" <mst@redhat.com>
Cc: <sworddragon2@aol.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Chris Metcalf <cmetcalf@tilera.com>
Cc: David Howells <dhowells@redhat.com>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jeremy Fitzhardinge <jeremy@goop.org>
Cc: Jianguo Wu <wujianguo@huawei.com>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: Michel Lespinasse <walken@google.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Tang Chen <tangchen@cn.fujitsu.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Wen Congyang <wency@cn.fujitsu.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com>
Cc: Russell King <rmk@arm.linux.org.uk>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
arch/metag/mm/init.c
arch/x86/mm/highmem_32.c
include/linux/bootmem.h
mm/bootmem.c
mm/nobootmem.c
mm/page_alloc.c

index 5e2238dd72e0193eb2f96f513c07226d670f1ca4..d7595f58fad50316d8849023712039a6a7312506 100644 (file)
@@ -380,6 +380,12 @@ void __init mem_init(void)
 
 #ifdef CONFIG_HIGHMEM
        unsigned long tmp;
+
+       /*
+        * Explicitly reset zone->managed_pages because highmem pages are
+        * freed before calling free_all_bootmem_node();
+        */
+       reset_all_zones_managed_pages();
        for (tmp = highstart_pfn; tmp < highend_pfn; tmp++)
                free_highmem_page(pfn_to_page(tmp));
        num_physpages += totalhigh_pages;
index 252b8f5489ba6e1b84152545d211ba31121ffb7f..4500142bc4aa46429cb2be41a7ee3407426f6155 100644 (file)
@@ -1,6 +1,7 @@
 #include <linux/highmem.h>
 #include <linux/module.h>
 #include <linux/swap.h> /* for totalram_pages */
+#include <linux/bootmem.h>
 
 void *kmap(struct page *page)
 {
@@ -121,6 +122,11 @@ void __init set_highmem_pages_init(void)
        struct zone *zone;
        int nid;
 
+       /*
+        * Explicitly reset zone->managed_pages because set_highmem_pages_init()
+        * is invoked before free_all_bootmem()
+        */
+       reset_all_zones_managed_pages();
        for_each_zone(zone) {
                unsigned long zone_start_pfn, zone_end_pfn;
 
index 5f0b0e1f7c08abab8c8701751fe0948667a12149..0e48c3221d82ae371907f00334e6074dcbfb4461 100644 (file)
@@ -46,6 +46,7 @@ extern unsigned long init_bootmem(unsigned long addr, unsigned long memend);
 
 extern unsigned long free_all_bootmem_node(pg_data_t *pgdat);
 extern unsigned long free_all_bootmem(void);
+extern void reset_all_zones_managed_pages(void);
 
 extern void free_bootmem_node(pg_data_t *pgdat,
                              unsigned long addr,
index 2b0bcb019ec222b8d56be811866e421a6287f13b..eb792323187bc8b936ca0147ecf158f0fe81ab27 100644 (file)
@@ -241,20 +241,26 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
        return count;
 }
 
-static void reset_node_lowmem_managed_pages(pg_data_t *pgdat)
+static int reset_managed_pages_done __initdata;
+
+static inline void __init reset_node_managed_pages(pg_data_t *pgdat)
 {
        struct zone *z;
 
-       /*
-        * In free_area_init_core(), highmem zone's managed_pages is set to
-        * present_pages, and bootmem allocator doesn't allocate from highmem
-        * zones. So there's no need to recalculate managed_pages because all
-        * highmem pages will be managed by the buddy system. Here highmem
-        * zone also includes highmem movable zone.
-        */
+       if (reset_managed_pages_done)
+               return;
+
        for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
-               if (!is_highmem(z))
-                       z->managed_pages = 0;
+               z->managed_pages = 0;
+}
+
+void __init reset_all_zones_managed_pages(void)
+{
+       struct pglist_data *pgdat;
+
+       for_each_online_pgdat(pgdat)
+               reset_node_managed_pages(pgdat);
+       reset_managed_pages_done = 1;
 }
 
 /**
@@ -266,7 +272,7 @@ static void reset_node_lowmem_managed_pages(pg_data_t *pgdat)
 unsigned long __init free_all_bootmem_node(pg_data_t *pgdat)
 {
        register_page_bootmem_info_node(pgdat);
-       reset_node_lowmem_managed_pages(pgdat);
+       reset_node_managed_pages(pgdat);
        return free_all_bootmem_core(pgdat->bdata);
 }
 
@@ -279,10 +285,8 @@ unsigned long __init free_all_bootmem(void)
 {
        unsigned long total_pages = 0;
        bootmem_data_t *bdata;
-       struct pglist_data *pgdat;
 
-       for_each_online_pgdat(pgdat)
-               reset_node_lowmem_managed_pages(pgdat);
+       reset_all_zones_managed_pages();
 
        list_for_each_entry(bdata, &bdata_list, list)
                total_pages += free_all_bootmem_core(bdata);
index bdd3fa2fc73b8395fa78979874eb46c2e3ab20c5..0ae8d91365af26de7d5d2cfc541f40a32c868fdb 100644 (file)
@@ -137,20 +137,25 @@ static unsigned long __init free_low_memory_core_early(void)
        return count;
 }
 
-static void reset_node_lowmem_managed_pages(pg_data_t *pgdat)
+static int reset_managed_pages_done __initdata;
+
+static inline void __init reset_node_managed_pages(pg_data_t *pgdat)
 {
        struct zone *z;
 
-       /*
-        * In free_area_init_core(), highmem zone's managed_pages is set to
-        * present_pages, and bootmem allocator doesn't allocate from highmem
-        * zones. So there's no need to recalculate managed_pages because all
-        * highmem pages will be managed by the buddy system. Here highmem
-        * zone also includes highmem movable zone.
-        */
+       if (reset_managed_pages_done)
+               return;
        for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
-               if (!is_highmem(z))
-                       z->managed_pages = 0;
+               z->managed_pages = 0;
+}
+
+void __init reset_all_zones_managed_pages(void)
+{
+       struct pglist_data *pgdat;
+
+       for_each_online_pgdat(pgdat)
+               reset_node_managed_pages(pgdat);
+       reset_managed_pages_done = 1;
 }
 
 /**
@@ -160,10 +165,7 @@ static void reset_node_lowmem_managed_pages(pg_data_t *pgdat)
  */
 unsigned long __init free_all_bootmem(void)
 {
-       struct pglist_data *pgdat;
-
-       for_each_online_pgdat(pgdat)
-               reset_node_lowmem_managed_pages(pgdat);
+       reset_all_zones_managed_pages();
 
        /*
         * We need to use MAX_NUMNODES instead of NODE_DATA(0)->node_id
index f22542f6dc1280d7911476c15b418e9630b5a2fa..22438eba00b63fa64e12f759f0739c9e94313684 100644 (file)
@@ -5232,6 +5232,7 @@ void free_highmem_page(struct page *page)
 {
        __free_reserved_page(page);
        totalram_pages++;
+       page_zone(page)->managed_pages++;
        totalhigh_pages++;
 }
 #endif