]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
shrinker: add node awareness
authorDave Chinner <dchinner@redhat.com>
Thu, 27 Jun 2013 23:52:16 +0000 (09:52 +1000)
committerStephen Rothwell <sfr@canb.auug.org.au>
Fri, 28 Jun 2013 06:37:59 +0000 (16:37 +1000)
Pass the node of the current zone being reclaimed to shrink_slab(),
allowing the shrinker control nodemask to be set appropriately for node
aware shrinkers.

Signed-off-by: Dave Chinner <dchinner@redhat.com>
Signed-off-by: Glauber Costa <glommer@openvz.org>
Acked-by: Mel Gorman <mgorman@suse.de>
Cc: "Theodore Ts'o" <tytso@mit.edu>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
Cc: Arve Hjønnevåg <arve@android.com>
Cc: Carlos Maiolino <cmaiolino@redhat.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Chuck Lever <chuck.lever@oracle.com>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: David Rientjes <rientjes@google.com>
Cc: Gleb Natapov <gleb@redhat.com>
Cc: Greg Thelen <gthelen@google.com>
Cc: J. Bruce Fields <bfields@redhat.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Jerome Glisse <jglisse@redhat.com>
Cc: John Stultz <john.stultz@linaro.org>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Kent Overstreet <koverstreet@google.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Steven Whitehouse <swhiteho@redhat.com>
Cc: Thomas Hellstrom <thellstrom@vmware.com>
Cc: Trond Myklebust <Trond.Myklebust@netapp.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
drivers/staging/android/ashmem.c
fs/drop_caches.c
include/linux/shrinker.h
mm/memory-failure.c
mm/vmscan.c

index 21a3f7250531c6a4e7891843f08f1919a31f890e..65f36d7287149a799c88207fcc1566134b6ea8d7 100644 (file)
@@ -692,6 +692,9 @@ static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
                                .gfp_mask = GFP_KERNEL,
                                .nr_to_scan = 0,
                        };
+
+                       nodes_setall(sc.nodes_to_scan);
+
                        ret = ashmem_shrink(&ashmem_shrinker, &sc);
                        sc.nr_to_scan = ret;
                        ashmem_shrink(&ashmem_shrinker, &sc);
index c00e055b62820945bef291fa68b145a4d7145667..9fd702f5bfb2886a715e787b6470615ea07021e0 100644 (file)
@@ -44,6 +44,7 @@ static void drop_slab(void)
                .gfp_mask = GFP_KERNEL,
        };
 
+       nodes_setall(shrink.nodes_to_scan);
        do {
                nr_objects = shrink_slab(&shrink, 1000, 1000);
        } while (nr_objects > 10);
index 884e76222e1baf66d3a7649a3f07dcf7f54f52fe..76f520c4c394866e22839a3c12261e15280ee382 100644 (file)
@@ -16,6 +16,9 @@ struct shrink_control {
 
        /* How many slab objects shrinker() should scan and try to reclaim */
        unsigned long nr_to_scan;
+
+       /* shrink from these nodes */
+       nodemask_t nodes_to_scan;
 };
 
 #define SHRINK_STOP (~0UL)
index 2c13aa7a0164101a360e4f776090f6b183aab5ab..09ae11169e39aa0bd32237df0668fede1d1348c3 100644 (file)
@@ -248,10 +248,12 @@ void shake_page(struct page *p, int access)
         */
        if (access) {
                int nr;
+               int nid = page_to_nid(p);
                do {
                        struct shrink_control shrink = {
                                .gfp_mask = GFP_KERNEL,
                        };
+                       node_set(nid, shrink.nodes_to_scan);
 
                        nr = shrink_slab(&shrink, 1000, 1000);
                        if (page_count(p) == 1)
index dac8491d7915b3a1c80cbb42c016c0ecebee3282..54a533ad2799eee33ae92ee68c8d18adb20b573c 100644 (file)
@@ -2386,12 +2386,16 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
                 */
                if (global_reclaim(sc)) {
                        unsigned long lru_pages = 0;
+
+                       nodes_clear(shrink->nodes_to_scan);
                        for_each_zone_zonelist(zone, z, zonelist,
                                        gfp_zone(sc->gfp_mask)) {
                                if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
                                        continue;
 
                                lru_pages += zone_reclaimable_pages(zone);
+                               node_set(zone_to_nid(zone),
+                                        shrink->nodes_to_scan);
                        }
 
                        shrink_slab(shrink, sc->nr_scanned, lru_pages);
@@ -2848,6 +2852,8 @@ static bool kswapd_shrink_zone(struct zone *zone,
                return true;
 
        shrink_zone(zone, sc);
+       nodes_clear(shrink.nodes_to_scan);
+       node_set(zone_to_nid(zone), shrink.nodes_to_scan);
 
        reclaim_state->reclaimed_slab = 0;
        nr_slab = shrink_slab(&shrink, sc->nr_scanned, lru_pages);
@@ -3556,10 +3562,9 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
                 * number of slab pages and shake the slab until it is reduced
                 * by the same nr_pages that we used for reclaiming unmapped
                 * pages.
-                *
-                * Note that shrink_slab will free memory on all zones and may
-                * take a long time.
                 */
+               nodes_clear(shrink.nodes_to_scan);
+               node_set(zone_to_nid(zone), shrink.nodes_to_scan);
                for (;;) {
                        unsigned long lru_pages = zone_reclaimable_pages(zone);