]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
mm: change isolate mode from #define to bitwise type
authorMinchan Kim <minchan.kim@gmail.com>
Wed, 5 Oct 2011 00:42:45 +0000 (11:42 +1100)
committerStephen Rothwell <sfr@canb.auug.org.au>
Tue, 11 Oct 2011 08:43:39 +0000 (19:43 +1100)
Change ISOLATE_XXX macro with bitwise isolate_mode_t type.  Normally,
macro isn't recommended as it's type-unsafe and making debugging harder as
symbol cannot be passed throught to the debugger.

Quote from Johannes
" Hmm, it would probably be cleaner to fully convert the isolation mode
into independent flags.  INACTIVE, ACTIVE, BOTH is currently a
tri-state among flags, which is a bit ugly."

This patch moves isolate mode from swap.h to mmzone.h by memcontrol.h

Signed-off-by: Minchan Kim <minchan.kim@gmail.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Rik van Riel <riel@redhat.com>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Documentation/trace/postprocess/trace-vmscan-postprocess.pl
include/linux/memcontrol.h
include/linux/mmzone.h
include/linux/swap.h
include/trace/events/vmscan.h
mm/compaction.c
mm/memcontrol.c
mm/vmscan.c

index 12cecc83cd91c658c71524bba59762b83f810829..4a37c4759cd231f72a8b5f253deec9fdec334886 100644 (file)
@@ -379,10 +379,10 @@ EVENT_PROCESS:
 
                        # To closer match vmstat scanning statistics, only count isolate_both
                        # and isolate_inactive as scanning. isolate_active is rotation
-                       # isolate_inactive == 0
-                       # isolate_active   == 1
-                       # isolate_both     == 2
-                       if ($isolate_mode != 1) {
+                       # isolate_inactive == 1
+                       # isolate_active   == 2
+                       # isolate_both     == 3
+                       if ($isolate_mode != 2) {
                                $perprocesspid{$process_pid}->{HIGH_NR_SCANNED} += $nr_scanned;
                        }
                        $perprocesspid{$process_pid}->{HIGH_NR_CONTIG_DIRTY} += $nr_contig_dirty;
index 343bd7661f2ac648360daf5ca96a1c87ee24567d..ac797fa03ef83503668c0cf56c16e18004bc50c4 100644 (file)
@@ -35,7 +35,8 @@ enum mem_cgroup_page_stat_item {
 extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
                                        struct list_head *dst,
                                        unsigned long *scanned, int order,
-                                       int mode, struct zone *z,
+                                       isolate_mode_t mode,
+                                       struct zone *z,
                                        struct mem_cgroup *mem_cont,
                                        int active, int file);
 
index be1ac8d7789be37b21dd5fdf72832aea621926bd..4bf3b1eed05aa8dfee8dd07fe7cf359164223aa7 100644 (file)
@@ -164,6 +164,14 @@ static inline int is_unevictable_lru(enum lru_list l)
 #define LRU_ALL_EVICTABLE (LRU_ALL_FILE | LRU_ALL_ANON)
 #define LRU_ALL             ((1 << NR_LRU_LISTS) - 1)
 
+/* Isolate inactive pages */
+#define ISOLATE_INACTIVE       ((__force fmode_t)0x1)
+/* Isolate active pages */
+#define ISOLATE_ACTIVE         ((__force fmode_t)0x2)
+
+/* LRU Isolation modes. */
+typedef unsigned __bitwise__ isolate_mode_t;
+
 enum zone_watermarks {
        WMARK_MIN,
        WMARK_LOW,
index ccc1160914cfa892cf0c587012ffd25ac4993188..c02de0f3a0cbd284731f80badc0f54ec8a86c413 100644 (file)
@@ -247,15 +247,10 @@ static inline void lru_cache_add_file(struct page *page)
        __lru_cache_add(page, LRU_INACTIVE_FILE);
 }
 
-/* LRU Isolation modes. */
-#define ISOLATE_INACTIVE 0     /* Isolate inactive pages. */
-#define ISOLATE_ACTIVE 1       /* Isolate active pages. */
-#define ISOLATE_BOTH 2         /* Isolate both active and inactive pages. */
-
 /* linux/mm/vmscan.c */
 extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
                                        gfp_t gfp_mask, nodemask_t *mask);
-extern int __isolate_lru_page(struct page *page, int mode, int file);
+extern int __isolate_lru_page(struct page *page, isolate_mode_t mode, int file);
 extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem,
                                                  gfp_t gfp_mask, bool noswap);
 extern unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
index 36851f7f13daf558107d0df16052f5d8939bd1d1..edc4b3d25a2d4e38917c83554eccd23a32fc8815 100644 (file)
@@ -266,7 +266,7 @@ DECLARE_EVENT_CLASS(mm_vmscan_lru_isolate_template,
                unsigned long nr_lumpy_taken,
                unsigned long nr_lumpy_dirty,
                unsigned long nr_lumpy_failed,
-               int isolate_mode),
+               isolate_mode_t isolate_mode),
 
        TP_ARGS(order, nr_requested, nr_scanned, nr_taken, nr_lumpy_taken, nr_lumpy_dirty, nr_lumpy_failed, isolate_mode),
 
@@ -278,7 +278,7 @@ DECLARE_EVENT_CLASS(mm_vmscan_lru_isolate_template,
                __field(unsigned long, nr_lumpy_taken)
                __field(unsigned long, nr_lumpy_dirty)
                __field(unsigned long, nr_lumpy_failed)
-               __field(int, isolate_mode)
+               __field(isolate_mode_t, isolate_mode)
        ),
 
        TP_fast_assign(
@@ -312,7 +312,7 @@ DEFINE_EVENT(mm_vmscan_lru_isolate_template, mm_vmscan_lru_isolate,
                unsigned long nr_lumpy_taken,
                unsigned long nr_lumpy_dirty,
                unsigned long nr_lumpy_failed,
-               int isolate_mode),
+               isolate_mode_t isolate_mode),
 
        TP_ARGS(order, nr_requested, nr_scanned, nr_taken, nr_lumpy_taken, nr_lumpy_dirty, nr_lumpy_failed, isolate_mode)
 
@@ -327,7 +327,7 @@ DEFINE_EVENT(mm_vmscan_lru_isolate_template, mm_vmscan_memcg_isolate,
                unsigned long nr_lumpy_taken,
                unsigned long nr_lumpy_dirty,
                unsigned long nr_lumpy_failed,
-               int isolate_mode),
+               isolate_mode_t isolate_mode),
 
        TP_ARGS(order, nr_requested, nr_scanned, nr_taken, nr_lumpy_taken, nr_lumpy_dirty, nr_lumpy_failed, isolate_mode)
 
index b2977a5d659a51f5eb5f928605329d8643168136..47f717fa42334552f6d57add946961549b734801 100644 (file)
@@ -349,7 +349,8 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
                }
 
                /* Try isolate the page */
-               if (__isolate_lru_page(page, ISOLATE_BOTH, 0) != 0)
+               if (__isolate_lru_page(page,
+                               ISOLATE_ACTIVE|ISOLATE_INACTIVE, 0) != 0)
                        continue;
 
                VM_BUG_ON(PageTransCompound(page));
index cf9f614568b0377662be4cc528341a9f83e699ba..1364b5e8baa80d86bc7a4d6cf4b11c34d53f8813 100644 (file)
@@ -1186,7 +1186,8 @@ mem_cgroup_get_reclaim_stat_from_page(struct page *page)
 unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
                                        struct list_head *dst,
                                        unsigned long *scanned, int order,
-                                       int mode, struct zone *z,
+                                       isolate_mode_t mode,
+                                       struct zone *z,
                                        struct mem_cgroup *mem_cont,
                                        int active, int file)
 {
index 16f704ac5a5cb6c43f543900d868e553dec26bcd..d56848fd96a472676c5437a0f5df587f719861d8 100644 (file)
@@ -1012,23 +1012,27 @@ keep_lumpy:
  *
  * returns 0 on success, -ve errno on failure.
  */
-int __isolate_lru_page(struct page *page, int mode, int file)
+int __isolate_lru_page(struct page *page, isolate_mode_t mode, int file)
 {
+       bool all_lru_mode;
        int ret = -EINVAL;
 
        /* Only take pages on the LRU. */
        if (!PageLRU(page))
                return ret;
 
+       all_lru_mode = (mode & (ISOLATE_ACTIVE|ISOLATE_INACTIVE)) ==
+               (ISOLATE_ACTIVE|ISOLATE_INACTIVE);
+
        /*
         * When checking the active state, we need to be sure we are
         * dealing with comparible boolean values.  Take the logical not
         * of each.
         */
-       if (mode != ISOLATE_BOTH && (!PageActive(page) != !mode))
+       if (!all_lru_mode && !PageActive(page) != !(mode & ISOLATE_ACTIVE))
                return ret;
 
-       if (mode != ISOLATE_BOTH && page_is_file_cache(page) != file)
+       if (!all_lru_mode && !!page_is_file_cache(page) != file)
                return ret;
 
        /*
@@ -1076,7 +1080,8 @@ int __isolate_lru_page(struct page *page, int mode, int file)
  */
 static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
                struct list_head *src, struct list_head *dst,
-               unsigned long *scanned, int order, int mode, int file)
+               unsigned long *scanned, int order, isolate_mode_t mode,
+               int file)
 {
        unsigned long nr_taken = 0;
        unsigned long nr_lumpy_taken = 0;
@@ -1201,8 +1206,8 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
 static unsigned long isolate_pages_global(unsigned long nr,
                                        struct list_head *dst,
                                        unsigned long *scanned, int order,
-                                       int mode, struct zone *z,
-                                       int active, int file)
+                                       isolate_mode_t mode,
+                                       struct zone *z, int active, int file)
 {
        int lru = LRU_BASE;
        if (active)
@@ -1448,6 +1453,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone,
        unsigned long nr_taken;
        unsigned long nr_anon;
        unsigned long nr_file;
+       isolate_mode_t reclaim_mode = ISOLATE_INACTIVE;
 
        while (unlikely(too_many_isolated(zone, file, sc))) {
                congestion_wait(BLK_RW_ASYNC, HZ/10);
@@ -1458,15 +1464,15 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone,
        }
 
        set_reclaim_mode(priority, sc, false);
+       if (sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM)
+               reclaim_mode |= ISOLATE_ACTIVE;
+
        lru_add_drain();
        spin_lock_irq(&zone->lru_lock);
 
        if (scanning_global_lru(sc)) {
-               nr_taken = isolate_pages_global(nr_to_scan,
-                       &page_list, &nr_scanned, sc->order,
-                       sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM ?
-                                       ISOLATE_BOTH : ISOLATE_INACTIVE,
-                       zone, 0, file);
+               nr_taken = isolate_pages_global(nr_to_scan, &page_list,
+                       &nr_scanned, sc->order, reclaim_mode, zone, 0, file);
                zone->pages_scanned += nr_scanned;
                if (current_is_kswapd())
                        __count_zone_vm_events(PGSCAN_KSWAPD, zone,
@@ -1475,12 +1481,9 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone,
                        __count_zone_vm_events(PGSCAN_DIRECT, zone,
                                               nr_scanned);
        } else {
-               nr_taken = mem_cgroup_isolate_pages(nr_to_scan,
-                       &page_list, &nr_scanned, sc->order,
-                       sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM ?
-                                       ISOLATE_BOTH : ISOLATE_INACTIVE,
-                       zone, sc->mem_cgroup,
-                       0, file);
+               nr_taken = mem_cgroup_isolate_pages(nr_to_scan, &page_list,
+                       &nr_scanned, sc->order, reclaim_mode, zone,
+                       sc->mem_cgroup, 0, file);
                /*
                 * mem_cgroup_isolate_pages() keeps track of
                 * scanned pages on its own.