]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
vmscan: use atomic-long for shrinker batching
authorKonstantin Khlebnikov <khlebnikov@openvz.org>
Wed, 16 Nov 2011 23:40:58 +0000 (10:40 +1100)
committerStephen Rothwell <sfr@canb.auug.org.au>
Wed, 23 Nov 2011 03:04:32 +0000 (14:04 +1100)
Use atomic-long operations instead of looping around cmpxchg().

[akpm@linux-foundation.org: massage atomic.h inclusions]
Signed-off-by: Konstantin Khlebnikov <khlebnikov@openvz.org>
Cc: Dave Chinner <david@fromorbit.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/fs.h
include/linux/mm.h
include/linux/shrinker.h
mm/vmscan.c

index e3130220ce3e3ddf7837c64b5fdadb0342f9f690..e546aff64e078477cc7631ea4ae0abf2a1789321 100644 (file)
@@ -393,8 +393,8 @@ struct inodes_stat_t {
 #include <linux/semaphore.h>
 #include <linux/fiemap.h>
 #include <linux/rculist_bl.h>
-#include <linux/shrinker.h>
 #include <linux/atomic.h>
+#include <linux/shrinker.h>
 
 #include <asm/byteorder.h>
 
index 3dc3a8c2c4858a1d3400aa2d5fd029d36a1177c6..4baadd18f4ad3402f47fbd2ac919bafba519bed4 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/mmzone.h>
 #include <linux/rbtree.h>
 #include <linux/prio_tree.h>
+#include <linux/atomic.h>
 #include <linux/debug_locks.h>
 #include <linux/mm_types.h>
 #include <linux/range.h>
index a83833a1f7a26f589a7584252ff413333286f5ea..07ceb97d53facc505bae4b489d3cae96bd7d4021 100644 (file)
@@ -35,7 +35,7 @@ struct shrinker {
 
        /* These are for internal use */
        struct list_head list;
-       long nr;        /* objs pending delete */
+       atomic_long_t nr_in_batch; /* objs pending delete */
 };
 #define DEFAULT_SEEKS 2 /* A good number if you don't know better. */
 extern void register_shrinker(struct shrinker *);
index f5255442ae2bddfc39c8cecc53a93f64b816d8f8..f54a05b7a61d9eb658562b191996beb9d4cea397 100644 (file)
@@ -183,7 +183,7 @@ static unsigned long zone_nr_lru_pages(struct zone *zone,
  */
 void register_shrinker(struct shrinker *shrinker)
 {
-       shrinker->nr = 0;
+       atomic_long_set(&shrinker->nr_in_batch, 0);
        down_write(&shrinker_rwsem);
        list_add_tail(&shrinker->list, &shrinker_list);
        up_write(&shrinker_rwsem);
@@ -264,9 +264,7 @@ unsigned long shrink_slab(struct shrink_control *shrink,
                 * and zero it so that other concurrent shrinker invocations
                 * don't also do this scanning work.
                 */
-               do {
-                       nr = shrinker->nr;
-               } while (cmpxchg(&shrinker->nr, nr, 0) != nr);
+               nr = atomic_long_xchg(&shrinker->nr_in_batch, 0);
 
                total_scan = nr;
                delta = (4 * nr_pages_scanned) / shrinker->seeks;
@@ -328,12 +326,11 @@ unsigned long shrink_slab(struct shrink_control *shrink,
                 * manner that handles concurrent updates. If we exhausted the
                 * scan, there is no need to do an update.
                 */
-               do {
-                       nr = shrinker->nr;
-                       new_nr = total_scan + nr;
-                       if (total_scan <= 0)
-                               break;
-               } while (cmpxchg(&shrinker->nr, nr, new_nr) != nr);
+               if (total_scan > 0)
+                       new_nr = atomic_long_add_return(total_scan,
+                                       &shrinker->nr_in_batch);
+               else
+                       new_nr = atomic_long_read(&shrinker->nr_in_batch);
 
                trace_mm_shrink_slab_end(shrinker, shrink_ret, nr, new_nr);
        }