]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
It is preferable that no dirty pages are dispatched for cleaning from the
authorMel Gorman <mgorman@suse.de>
Wed, 24 Aug 2011 23:47:01 +0000 (09:47 +1000)
committerStephen Rothwell <sfr@canb.auug.org.au>
Wed, 31 Aug 2011 04:27:30 +0000 (14:27 +1000)
page reclaim path.  At normal priorities, this patch prevents kswapd
writing pages.

However, page reclaim does have a requirement that pages be freed in a
particular zone.  If it is failing to make sufficient progress (reclaiming
< SWAP_CLUSTER_MAX at any priority priority), the priority is raised to
scan more pages.  A priority of DEF_PRIORITY - 3 is considered to be the
point where kswapd is getting into trouble reclaiming pages.  If this
priority is reached, kswapd will dispatch pages for writing.

Signed-off-by: Mel Gorman <mgorman@suse.de>
Reviewed-by: Minchan Kim <minchan.kim@gmail.com>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Johannes Weiner <jweiner@redhat.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Rik van Riel <riel@redhat.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Alex Elder <aelder@sgi.com>
Cc: Theodore Ts'o <tytso@mit.edu>
Cc: Chris Mason <chris.mason@oracle.com>
Cc: Dave Hansen <dave@linux.vnet.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/vmscan.c

index 9a22bf66eb6460d266803605867300057accb02c..fd58e9756dffb20a4420c014de60c8f0ee7ae9d0 100644 (file)
@@ -751,7 +751,8 @@ static noinline_for_stack void free_page_list(struct list_head *free_pages)
  */
 static unsigned long shrink_page_list(struct list_head *page_list,
                                      struct zone *zone,
-                                     struct scan_control *sc)
+                                     struct scan_control *sc,
+                                     int priority)
 {
        LIST_HEAD(ret_pages);
        LIST_HEAD(free_pages);
@@ -857,9 +858,11 @@ static unsigned long shrink_page_list(struct list_head *page_list,
 
                        /*
                         * Only kswapd can writeback filesystem pages to
-                        * avoid risk of stack overflow
+                        * avoid risk of stack overflow but do not writeback
+                        * unless under significant pressure.
                         */
-                       if (page_is_file_cache(page) && !current_is_kswapd()) {
+                       if (page_is_file_cache(page) &&
+                                       (!current_is_kswapd() || priority >= DEF_PRIORITY - 2)) {
                                inc_zone_page_state(page, NR_VMSCAN_WRITE_SKIP);
                                goto keep_locked;
                        }
@@ -1516,12 +1519,12 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone,
 
        spin_unlock_irq(&zone->lru_lock);
 
-       nr_reclaimed = shrink_page_list(&page_list, zone, sc);
+       nr_reclaimed = shrink_page_list(&page_list, zone, sc, priority);
 
        /* Check if we should syncronously wait for writeback */
        if (should_reclaim_stall(nr_taken, nr_reclaimed, priority, sc)) {
                set_reclaim_mode(priority, sc, true);
-               nr_reclaimed += shrink_page_list(&page_list, zone, sc);
+               nr_reclaimed += shrink_page_list(&page_list, zone, sc, priority);
        }
 
        if (!scanning_global_lru(sc))