]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
mm: vmscan: immediately reclaim end-of-LRU dirty pages when writeback completes
authorMel Gorman <mgorman@suse.de>
Mon, 24 Oct 2011 14:54:08 +0000 (01:54 +1100)
committerStephen Rothwell <sfr@canb.auug.org.au>
Tue, 25 Oct 2011 09:07:39 +0000 (20:07 +1100)
When direct reclaim encounters a dirty page, it gets recycled around the
LRU for another cycle.  This patch marks the page PageReclaim similar to
deactivate_page() so that the page gets reclaimed almost immediately after
the page gets cleaned.  This is to avoid reclaiming clean pages that are
younger than a dirty page encountered at the end of the LRU that might
have been something like a use-once page.

Signed-off-by: Mel Gorman <mgorman@suse.de>
Acked-by: Johannes Weiner <jweiner@redhat.com>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Minchan Kim <minchan.kim@gmail.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Alex Elder <aelder@sgi.com>
Cc: Theodore Ts'o <tytso@mit.edu>
Cc: Chris Mason <chris.mason@oracle.com>
Cc: Dave Hansen <dave@linux.vnet.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/mmzone.h
mm/vmscan.c
mm/vmstat.c

index 2c41b2c1943b43ff6df8f62dc3cd3edf5c42a220..188cb2ffe8db2685a5b1b50e584026a1ffcfd9f6 100644 (file)
@@ -100,7 +100,7 @@ enum zone_stat_item {
        NR_UNSTABLE_NFS,        /* NFS unstable pages */
        NR_BOUNCE,
        NR_VMSCAN_WRITE,
-       NR_VMSCAN_WRITE_SKIP,
+       NR_VMSCAN_IMMEDIATE,    /* Prioritise for reclaim when writeback ends */
        NR_WRITEBACK_TEMP,      /* Writeback using temporary buffers */
        NR_ISOLATED_ANON,       /* Temporary isolated pages from anon lru */
        NR_ISOLATED_FILE,       /* Temporary isolated pages from file lru */
index df53dc8eba89bb72b611453718f877b9e3964189..20b685267edda0ae20aeb182018c9c2894045eb3 100644 (file)
@@ -866,7 +866,15 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                         */
                        if (page_is_file_cache(page) &&
                                        (!current_is_kswapd() || priority >= DEF_PRIORITY - 2)) {
-                               inc_zone_page_state(page, NR_VMSCAN_WRITE_SKIP);
+                               /*
+                                * Immediately reclaim when written back.
+                                * Similar in principal to deactivate_page()
+                                * except we already have the page isolated
+                                * and know it's dirty
+                                */
+                               inc_zone_page_state(page, NR_VMSCAN_IMMEDIATE);
+                               SetPageReclaim(page);
+
                                goto keep_locked;
                        }
 
index 210bd8ff3a6e03745370253c89605e4b9db1fc51..56e529a40517d115754554191a2ba0ab6535f93c 100644 (file)
@@ -702,7 +702,7 @@ const char * const vmstat_text[] = {
        "nr_unstable",
        "nr_bounce",
        "nr_vmscan_write",
-       "nr_vmscan_write_skip",
+       "nr_vmscan_immediate_reclaim",
        "nr_writeback_temp",
        "nr_isolated_anon",
        "nr_isolated_file",