]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
mm: vmscan: remove reclaim_mode_t
authorMel Gorman <mgorman@suse.de>
Tue, 29 May 2012 22:06:20 +0000 (15:06 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 29 May 2012 23:22:19 +0000 (16:22 -0700)
There is little motiviation for reclaim_mode_t once RECLAIM_MODE_[A]SYNC
and lumpy reclaim have been removed.  This patch gets rid of
reclaim_mode_t as well and improves the documentation about what
reclaim/compaction is and when it is triggered.

Signed-off-by: Mel Gorman <mgorman@suse.de>
Acked-by: Rik van Riel <riel@redhat.com>
Acked-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Konstantin Khlebnikov <khlebnikov@openvz.org>
Cc: Hugh Dickins <hughd@google.com>
Cc: Ying Han <yinghan@google.com>
Cc: Andy Whitcroft <apw@shadowen.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/trace/events/vmscan.h
mm/vmscan.c

index 82f693395ac51e7bdf5660ab0d5207e845c2dda2..bab3b87e4064e9fd804182474127c2a84e49ef29 100644 (file)
                {RECLAIM_WB_ASYNC,      "RECLAIM_WB_ASYNC"}     \
                ) : "RECLAIM_WB_NONE"
 
-#define trace_reclaim_flags(page, sync) ( \
+#define trace_reclaim_flags(page) ( \
        (page_is_file_cache(page) ? RECLAIM_WB_FILE : RECLAIM_WB_ANON) | \
        (RECLAIM_WB_ASYNC) \
        )
 
-#define trace_shrink_flags(file, sync) \
+#define trace_shrink_flags(file) \
        ( \
                (file ? RECLAIM_WB_FILE : RECLAIM_WB_ANON) | \
                (RECLAIM_WB_ASYNC) \
index e27f27d4cc1949e587ee5898fe7c479d5dfc1177..68e5819d0f1bc5345ba5488acecd7387b1dda27d 100644 (file)
 #define CREATE_TRACE_POINTS
 #include <trace/events/vmscan.h>
 
-/*
- * reclaim_mode determines how the inactive list is shrunk
- * RECLAIM_MODE_SINGLE: Reclaim only order-0 pages
- * RECLAIM_MODE_COMPACTION: For high-order allocations, reclaim a number of
- *                     order-0 pages and then compact the zone
- */
-typedef unsigned __bitwise__ reclaim_mode_t;
-#define RECLAIM_MODE_SINGLE            ((__force reclaim_mode_t)0x01u)
-#define RECLAIM_MODE_COMPACTION                ((__force reclaim_mode_t)0x10u)
-
 struct scan_control {
        /* Incremented by the number of inactive pages that were scanned */
        unsigned long nr_scanned;
@@ -88,12 +78,6 @@ struct scan_control {
 
        int order;
 
-       /*
-        * Intend to reclaim enough continuous memory rather than reclaim
-        * enough amount of memory. i.e, mode for high order allocation.
-        */
-       reclaim_mode_t reclaim_mode;
-
        /*
         * The memory cgroup that hit its limit and as a result is the
         * primary target of this reclaim invocation.
@@ -356,25 +340,6 @@ out:
        return ret;
 }
 
-static void set_reclaim_mode(int priority, struct scan_control *sc)
-{
-       /*
-        * Restrict reclaim/compaction to costly allocations or when
-        * under memory pressure
-        */
-       if (COMPACTION_BUILD && sc->order &&
-                       (sc->order > PAGE_ALLOC_COSTLY_ORDER ||
-                        priority < DEF_PRIORITY - 2))
-               sc->reclaim_mode = RECLAIM_MODE_COMPACTION;
-       else
-               sc->reclaim_mode = RECLAIM_MODE_SINGLE;
-}
-
-static void reset_reclaim_mode(struct scan_control *sc)
-{
-       sc->reclaim_mode = RECLAIM_MODE_SINGLE;
-}
-
 static inline int is_page_cache_freeable(struct page *page)
 {
        /*
@@ -497,8 +462,7 @@ static pageout_t pageout(struct page *page, struct address_space *mapping,
                        /* synchronous write or broken a_ops? */
                        ClearPageReclaim(page);
                }
-               trace_mm_vmscan_writepage(page,
-                       trace_reclaim_flags(page, sc->reclaim_mode));
+               trace_mm_vmscan_writepage(page, trace_reclaim_flags(page));
                inc_zone_page_state(page, NR_VMSCAN_WRITE);
                return PAGE_SUCCESS;
        }
@@ -953,7 +917,6 @@ cull_mlocked:
                        try_to_free_swap(page);
                unlock_page(page);
                putback_lru_page(page);
-               reset_reclaim_mode(sc);
                continue;
 
 activate_locked:
@@ -1348,8 +1311,6 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz,
                        return SWAP_CLUSTER_MAX;
        }
 
-       set_reclaim_mode(priority, sc);
-
        lru_add_drain();
 
        if (!sc->may_unmap)
@@ -1433,7 +1394,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz,
                zone_idx(zone),
                nr_scanned, nr_reclaimed,
                priority,
-               trace_shrink_flags(file, sc->reclaim_mode));
+               trace_shrink_flags(file));
        return nr_reclaimed;
 }
 
@@ -1512,8 +1473,6 @@ static void shrink_active_list(unsigned long nr_to_scan,
 
        lru_add_drain();
 
-       reset_reclaim_mode(sc);
-
        if (!sc->may_unmap)
                isolate_mode |= ISOLATE_UNMAPPED;
        if (!sc->may_writepage)
@@ -1826,23 +1785,35 @@ out:
        }
 }
 
+/* Use reclaim/compaction for costly allocs or under memory pressure */
+static bool in_reclaim_compaction(int priority, struct scan_control *sc)
+{
+       if (COMPACTION_BUILD && sc->order &&
+                       (sc->order > PAGE_ALLOC_COSTLY_ORDER ||
+                        priority < DEF_PRIORITY - 2))
+               return true;
+
+       return false;
+}
+
 /*
- * Reclaim/compaction depends on a number of pages being freed. To avoid
- * disruption to the system, a small number of order-0 pages continue to be
- * rotated and reclaimed in the normal fashion. However, by the time we get
- * back to the allocator and call try_to_compact_zone(), we ensure that
- * there are enough free pages for it to be likely successful
+ * Reclaim/compaction is used for high-order allocation requests. It reclaims
+ * order-0 pages before compacting the zone. should_continue_reclaim() returns
+ * true if more pages should be reclaimed such that when the page allocator
+ * calls try_to_compact_zone() that it will have enough free pages to succeed.
+ * It will give up earlier than that if there is difficulty reclaiming pages.
  */
 static inline bool should_continue_reclaim(struct mem_cgroup_zone *mz,
                                        unsigned long nr_reclaimed,
                                        unsigned long nr_scanned,
+                                       int priority,
                                        struct scan_control *sc)
 {
        unsigned long pages_for_compaction;
        unsigned long inactive_lru_pages;
 
        /* If not in reclaim/compaction mode, stop */
-       if (!(sc->reclaim_mode & RECLAIM_MODE_COMPACTION))
+       if (!in_reclaim_compaction(priority, sc))
                return false;
 
        /* Consider stopping depending on scan and reclaim activity */
@@ -1944,7 +1915,8 @@ restart:
 
        /* reclaim/compaction might need reclaim to continue */
        if (should_continue_reclaim(mz, nr_reclaimed,
-                                       sc->nr_scanned - nr_scanned, sc))
+                                       sc->nr_scanned - nr_scanned,
+                                       priority, sc))
                goto restart;
 
        throttle_vm_writeout(sc->gfp_mask);