From: Minchan Kim Date: Wed, 28 Sep 2011 00:50:18 +0000 (+1000) Subject: mm: compaction: compact unevictable pages X-Git-Tag: next-20110929~2^2~150 X-Git-Url: https://git.karo-electronics.de/?a=commitdiff_plain;h=ca345a3e85b85ecde54965c30ffc2ebaf7fa8442;p=karo-tx-linux.git mm: compaction: compact unevictable pages Presently compaction doesn't handle mlocked pages as it uses __isolate_lru_page which doesn't consider unevicatable pages. It is used by just lumpy so it is pointless that it isolates unevictable pages. But the situation has changed. Compaction can handle unevictable pages and it can help getting big contiguos pages in memory whcih is fragmented by many pinned pages with mlock. I tested this patch with following scenario. 1. A : allocate 80% anon pages in system 2. B : allocate 20% mlocked page in system /* Maybe, mlocked pages are located in low pfn address */ 3. kill A /* high pfn address are free */ 4. echo 1 > /proc/sys/vm/compact_memory old: compact_blocks_moved 251 compact_pages_moved 44 new: compact_blocks_moved 258 compact_pages_moved 412 Signed-off-by: Minchan Kim Cc: Mel Gorman Cc: Johannes Weiner Reviewed-by: Rik van Riel Signed-off-by: Andrew Morton <> --- diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 188cb2ffe8db..82b505e003ae 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -169,10 +169,12 @@ static inline int is_unevictable_lru(enum lru_list l) #define ISOLATE_INACTIVE ((__force isolate_mode_t)0x1) /* Isolate active pages */ #define ISOLATE_ACTIVE ((__force isolate_mode_t)0x2) +/* Isolate unevictable pages */ +#define ISOLATE_UNEVICTABLE ((__force isolate_mode_t)0x4) /* Isolate clean file */ -#define ISOLATE_CLEAN ((__force isolate_mode_t)0x4) +#define ISOLATE_CLEAN ((__force isolate_mode_t)0x8) /* Isolate unmapped file */ -#define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x8) +#define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x10) /* LRU Isolation modes. */ typedef unsigned __bitwise__ isolate_mode_t; diff --git a/mm/compaction.c b/mm/compaction.c index a0e420207ebf..0e572d1b9889 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -261,7 +261,8 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone, unsigned long last_pageblock_nr = 0, pageblock_nr; unsigned long nr_scanned = 0, nr_isolated = 0; struct list_head *migratelist = &cc->migratepages; - isolate_mode_t mode = ISOLATE_ACTIVE|ISOLATE_INACTIVE; + isolate_mode_t mode = ISOLATE_ACTIVE| ISOLATE_INACTIVE | + ISOLATE_UNEVICTABLE; /* Do not scan outside zone boundaries */ low_pfn = max(cc->migrate_pfn, zone->zone_start_pfn); diff --git a/mm/vmscan.c b/mm/vmscan.c index b970f5c489fa..ec83496ff754 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1038,12 +1038,7 @@ int __isolate_lru_page(struct page *page, isolate_mode_t mode, int file) if (!all_lru_mode && !!page_is_file_cache(page) != file) return ret; - /* - * When this function is being called for lumpy reclaim, we - * initially look into all LRU pages, active, inactive and - * unevictable; only give shrink_page_list evictable pages. - */ - if (PageUnevictable(page)) + if (PageUnevictable(page) && !(mode & ISOLATE_UNEVICTABLE)) return ret; ret = -EBUSY;