From 29cfd54de3204fe1b8b6a4ea744a21b43f3e00fa Mon Sep 17 00:00:00 2001 From: Minchan Kim Date: Fri, 7 Sep 2012 10:23:56 +1000 Subject: [PATCH] mm: support MIGRATE_DISCARD Introduce MIGRATE_DISCARD mode in migration. It drops *clean cache pages* instead of migration so that migration latency could be reduced by avoiding (memcpy + page remapping). It's useful for CMA because latency of migration is very important rather than eviction of background processes's workingset. In addition, it needs less free pages for migration targets so it could avoid memory reclaiming to get free pages, which is another factor increase latency. Signed-off-by: Minchan Kim Cc: Marek Szyprowski Cc: Michal Nazarewicz Cc: Rik van Riel Cc: Mel Gorman Signed-off-by: Andrew Morton --- include/linux/migrate_mode.h | 7 ++++++ mm/migrate.c | 41 +++++++++++++++++++++++++++++++++--- mm/page_alloc.c | 2 +- 3 files changed, 46 insertions(+), 4 deletions(-) diff --git a/include/linux/migrate_mode.h b/include/linux/migrate_mode.h index 8848cadefb36..4eb1646d072f 100644 --- a/include/linux/migrate_mode.h +++ b/include/linux/migrate_mode.h @@ -14,6 +14,13 @@ */ #define MIGRATE_SYNC ((__force migrate_mode_t)0x4) +/* + * MIGRTATE_DISCARD will discard clean cache page instead of migration. + * MIGRATE_ASYNC, MIGRATE_SYNC_LIGHT, MIGRATE_SYNC shouldn't be used + * together with OR flag in current implementation. + */ +#define MIGRATE_DISCARD ((__force migrate_mode_t)0x8) + typedef unsigned __bitwise__ migrate_mode_t; #endif /* MIGRATE_MODE_H_INCLUDED */ diff --git a/mm/migrate.c b/mm/migrate.c index 28d464be0dd9..2de7709455f1 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -678,6 +678,19 @@ static int move_to_new_page(struct page *newpage, struct page *page, return rc; } +static int discard_page(struct page *page) +{ + int ret = -EAGAIN; + + struct address_space *mapping = page_mapping(page); + if (page_has_private(page)) + if (!try_to_release_page(page, GFP_KERNEL)) + return ret; + if (remove_mapping(mapping, page)) + ret = 0; + return ret; +} + static int __unmap_and_move(struct page *page, struct page *newpage, int force, bool offlining, migrate_mode_t mode) { @@ -685,6 +698,9 @@ static int __unmap_and_move(struct page *page, struct page *newpage, int remap_swapcache = 1; struct mem_cgroup *mem; struct anon_vma *anon_vma = NULL; + enum ttu_flags ttu_flags; + bool discard_mode = false; + bool file = false; if (!trylock_page(page)) { if (!force || (mode & MIGRATE_ASYNC)) @@ -799,12 +815,31 @@ static int __unmap_and_move(struct page *page, struct page *newpage, goto skip_unmap; } + file = page_is_file_cache(page); + ttu_flags = TTU_IGNORE_ACCESS; +retry: + if (!(mode & MIGRATE_DISCARD) || !file || PageDirty(page)) + ttu_flags |= (TTU_MIGRATION | TTU_IGNORE_MLOCK); + else + discard_mode = true; + /* Establish migration ptes or remove ptes */ - try_to_unmap(page, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS); + rc = try_to_unmap(page, ttu_flags); skip_unmap: - if (!page_mapped(page)) - rc = move_to_new_page(newpage, page, remap_swapcache, mode); + if (rc == SWAP_SUCCESS) { + if (!discard_mode) { + rc = move_to_new_page(newpage, page, + remap_swapcache, mode); + } else { + rc = discard_page(page); + goto uncharge; + } + } else if (rc == SWAP_MLOCK && discard_mode) { + mode &= ~MIGRATE_DISCARD; + discard_mode = false; + goto retry; + } if (rc && remap_swapcache) remove_migration_ptes(page, page); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index a4ff74e67ec2..8eb236bc8261 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -5701,7 +5701,7 @@ static int __alloc_contig_migrate_range(unsigned long start, unsigned long end) ret = migrate_pages(&cc.migratepages, __alloc_contig_migrate_alloc, - 0, false, MIGRATE_SYNC); + 0, false, MIGRATE_SYNC|MIGRATE_DISCARD); } putback_lru_pages(&cc.migratepages); -- 2.39.5