]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
mm: use a dedicated lock to protect totalram_pages and zone->managed_pages
authorJiang Liu <liuj97@gmail.com>
Wed, 3 Jul 2013 22:03:14 +0000 (15:03 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 3 Jul 2013 23:07:33 +0000 (16:07 -0700)
Currently lock_memory_hotplug()/unlock_memory_hotplug() are used to
protect totalram_pages and zone->managed_pages.  Other than the memory
hotplug driver, totalram_pages and zone->managed_pages may also be
modified at runtime by other drivers, such as Xen balloon,
virtio_balloon etc.  For those cases, memory hotplug lock is a little
too heavy, so introduce a dedicated lock to protect totalram_pages and
zone->managed_pages.

Now we have a simplified locking rules totalram_pages and
zone->managed_pages as:

1) no locking for read accesses because they are unsigned long.
2) no locking for write accesses at boot time in single-threaded context.
3) serialize write accesses at runtime by acquiring the dedicated
   managed_page_count_lock.

Also adjust zone->managed_pages when freeing reserved pages into the
buddy system, to keep totalram_pages and zone->managed_pages in
consistence.

[akpm@linux-foundation.org: don't export adjust_managed_page_count to modules (for now)]
Signed-off-by: Jiang Liu <jiang.liu@huawei.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Michel Lespinasse <walken@google.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: "Michael S. Tsirkin" <mst@redhat.com>
Cc: <sworddragon2@aol.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Chris Metcalf <cmetcalf@tilera.com>
Cc: David Howells <dhowells@redhat.com>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jeremy Fitzhardinge <jeremy@goop.org>
Cc: Jianguo Wu <wujianguo@huawei.com>
Cc: Joonsoo Kim <js1304@gmail.com>
Cc: Kamezawa Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: Marek Szyprowski <m.szyprowski@samsung.com>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Tang Chen <tangchen@cn.fujitsu.com>
Cc: Tejun Heo <tj@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Wen Congyang <wency@cn.fujitsu.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com>
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: Russell King <rmk@arm.linux.org.uk>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/mm.h
include/linux/mmzone.h
mm/page_alloc.c

index 083cc0ba23848db7d5b35670dd3dffccd96e01fb..4310f80ce956ba5a1f245214df33d34d17449267 100644 (file)
@@ -1313,6 +1313,7 @@ extern void free_initmem(void);
  */
 extern unsigned long free_reserved_area(void *start, void *end,
                                        int poison, char *s);
+
 #ifdef CONFIG_HIGHMEM
 /*
  * Free a highmem page into the buddy system, adjusting totalhigh_pages
@@ -1321,10 +1322,7 @@ extern unsigned long free_reserved_area(void *start, void *end,
 extern void free_highmem_page(struct page *page);
 #endif
 
-static inline void adjust_managed_page_count(struct page *page, long count)
-{
-       totalram_pages += count;
-}
+extern void adjust_managed_page_count(struct page *page, long count);
 
 /* Free the reserved page into the buddy system, so it gets managed. */
 static inline void __free_reserved_page(struct page *page)
index e511f9429f1e3813a15864256c5813c27524a107..09d381b71fd8a10a68e88ed2b4ab3599b1277b1e 100644 (file)
@@ -474,10 +474,16 @@ struct zone {
         * frequently read in proximity to zone->lock.  It's good to
         * give them a chance of being in the same cacheline.
         *
-        * Write access to present_pages and managed_pages at runtime should
-        * be protected by lock_memory_hotplug()/unlock_memory_hotplug().
-        * Any reader who can't tolerant drift of present_pages and
-        * managed_pages should hold memory hotplug lock to get a stable value.
+        * Write access to present_pages at runtime should be protected by
+        * lock_memory_hotplug()/unlock_memory_hotplug().  Any reader who can't
+        * tolerant drift of present_pages should hold memory hotplug lock to
+        * get a stable value.
+        *
+        * Read access to managed_pages should be safe because it's unsigned
+        * long. Write access to zone->managed_pages and totalram_pages are
+        * protected by managed_page_count_lock at runtime. Idealy only
+        * adjust_managed_page_count() should be used instead of directly
+        * touching zone->managed_pages and totalram_pages.
         */
        unsigned long           spanned_pages;
        unsigned long           present_pages;
index 22438eba00b63fa64e12f759f0739c9e94313684..93f292a60cb03d0f799aed15ce94b761e4d3715f 100644 (file)
@@ -103,6 +103,9 @@ nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
 };
 EXPORT_SYMBOL(node_states);
 
+/* Protect totalram_pages and zone->managed_pages */
+static DEFINE_SPINLOCK(managed_page_count_lock);
+
 unsigned long totalram_pages __read_mostly;
 unsigned long totalreserve_pages __read_mostly;
 /*
@@ -5206,6 +5209,14 @@ early_param("movablecore", cmdline_parse_movablecore);
 
 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
 
+void adjust_managed_page_count(struct page *page, long count)
+{
+       spin_lock(&managed_page_count_lock);
+       page_zone(page)->managed_pages += count;
+       totalram_pages += count;
+       spin_unlock(&managed_page_count_lock);
+}
+
 unsigned long free_reserved_area(void *start, void *end, int poison, char *s)
 {
        void *pos;