]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
memory-hotplug: fix pages missed by race rather than failing
authorMinchan Kim <minchan@kernel.org>
Thu, 13 Sep 2012 00:59:00 +0000 (10:59 +1000)
committerStephen Rothwell <sfr@canb.auug.org.au>
Thu, 20 Sep 2012 07:05:09 +0000 (17:05 +1000)
If race between allocation and isolation in memory-hotplug offline
happens, some pages could be in MIGRATE_MOVABLE of free_list although the
pageblock's migratetype of the page is MIGRATE_ISOLATE.

The race could be detected by get_freepage_migratetype in
__test_page_isolated_in_pageblock.  If it is detected, now EBUSY gets
bubbled all the way up and the hotplug operations fails.

But better idea is instead of returning and failing memory-hotremove, move
the free page to the correct list at the time it is detected.  It could
enhance memory-hotremove operation success ratio although the race is
really rare.

Suggested by Mel Gorman.

Signed-off-by: Minchan Kim <minchan@kernel.org>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Reviewed-by: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com>
Acked-by: Mel Gorman <mgorman@suse.de>
Cc: Xishi Qiu <qiuxishi@huawei.com>
Cc: Wen Congyang <wency@cn.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/page-isolation.h
mm/page_alloc.c
mm/page_isolation.c

index 105077aa7685c61dbb66cabc8473302283a0e3a5..fca8c0a5c1883d6079d39d5e11a7ca0b9fc4bc39 100644 (file)
@@ -6,6 +6,10 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count);
 void set_pageblock_migratetype(struct page *page, int migratetype);
 int move_freepages_block(struct zone *zone, struct page *page,
                                int migratetype);
+int move_freepages(struct zone *zone,
+                         struct page *start_page, struct page *end_page,
+                         int migratetype);
+
 /*
  * Changes migrate type in [start_pfn, end_pfn) to be MIGRATE_ISOLATE.
  * If specified range includes migrate types other than MOVABLE or CMA,
index d97ad0e72f261c1ce548a9eeb699f1198c8cf504..dffa91eb0425faf143c8f7b9cbdebb413f0e5801 100644 (file)
@@ -918,7 +918,7 @@ static int fallbacks[MIGRATE_TYPES][4] = {
  * Note that start_page and end_pages are not aligned on a pageblock
  * boundary. If alignment is required, use move_freepages_block()
  */
-static int move_freepages(struct zone *zone,
+int move_freepages(struct zone *zone,
                          struct page *start_page, struct page *end_page,
                          int migratetype)
 {
index 7ba740551d8ced2750af27906bff97bf7b69d1ad..a42fa8d49357927b75f32d690de9325a18541a4d 100644 (file)
@@ -194,8 +194,19 @@ __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn)
                }
                page = pfn_to_page(pfn);
                if (PageBuddy(page)) {
-                       if (get_freepage_migratetype(page) != MIGRATE_ISOLATE)
-                               break;
+                       /*
+                        * If race between isolatation and allocation happens,
+                        * some free pages could be in MIGRATE_MOVABLE list
+                        * although pageblock's migratation type of the page
+                        * is MIGRATE_ISOLATE. Catch it and move the page into
+                        * MIGRATE_ISOLATE list.
+                        */
+                       if (get_freepage_migratetype(page) != MIGRATE_ISOLATE) {
+                               struct page *end_page = page +
+                                               (1 << page_order(page)) - 1;
+                               move_freepages(page_zone(page), page, end_page,
+                                               MIGRATE_ISOLATE);
+                       }
                        pfn += 1 << page_order(page);
                }
                else if (page_count(page) == 0 &&