]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
mm: memcg: only evict file pages when we have plenty
authorJohannes Weiner <hannes@cmpxchg.org>
Wed, 20 Feb 2013 02:13:59 +0000 (13:13 +1100)
committerStephen Rothwell <sfr@canb.auug.org.au>
Wed, 20 Feb 2013 05:52:20 +0000 (16:52 +1100)
e986850 ("mm, vmscan: only evict file pages when we have plenty") makes a
point of not going for anonymous memory while there is still enough
inactive cache around.

The check was added only for global reclaim, but it is just as useful to
reduce swapping in memory cgroup reclaim:

200M-memcg-defconfig-j2

                                 vanilla                   patched
Real time              454.06 (  +0.00%)         453.71 (  -0.08%)
User time              668.57 (  +0.00%)         668.73 (  +0.02%)
System time            128.92 (  +0.00%)         129.53 (  +0.46%)
Swap in               1246.80 (  +0.00%)         814.40 ( -34.65%)
Swap out              1198.90 (  +0.00%)         827.00 ( -30.99%)
Pages allocated   16431288.10 (  +0.00%)    16434035.30 (  +0.02%)
Major faults           681.50 (  +0.00%)         593.70 ( -12.86%)
THP faults             237.20 (  +0.00%)         242.40 (  +2.18%)
THP collapse           241.20 (  +0.00%)         248.50 (  +3.01%)
THP splits             157.30 (  +0.00%)         161.40 (  +2.59%)

Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Acked-by: Michal Hocko <mhocko@suse.cz>
Acked-by: Rik van Riel <riel@redhat.com>
Acked-by: Mel Gorman <mgorman@suse.de>
Cc: Hugh Dickins <hughd@google.com>
Cc: Satoru Moriya <satoru.moriya@hds.com>
Cc: Simon Jeons <simon.jeons@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/vmscan.c

index 196709f5ee5862753f5f3731bdeab58c2c45c323..ff842d9a7714b1434d27345a0acbde6d4b9a88e1 100644 (file)
@@ -1700,18 +1700,20 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
                        fraction[1] = 0;
                        denominator = 1;
                        goto out;
-               } else if (!inactive_file_is_low_global(zone)) {
-                       /*
-                        * There is enough inactive page cache, do not
-                        * reclaim anything from the working set right now.
-                        */
-                       fraction[0] = 0;
-                       fraction[1] = 1;
-                       denominator = 1;
-                       goto out;
                }
        }
 
+       /*
+        * There is enough inactive page cache, do not reclaim
+        * anything from the anonymous working set right now.
+        */
+       if (!inactive_file_is_low(lruvec)) {
+               fraction[0] = 0;
+               fraction[1] = 1;
+               denominator = 1;
+               goto out;
+       }
+
        /*
         * With swappiness at 100, anonymous and file have the same priority.
         * This scanning priority is essentially the inverse of IO cost.