]> git.karo-electronics.de Git - linux-beck.git/commitdiff
slub: Add cmpxchg_double_slab()
authorChristoph Lameter <cl@linux.com>
Wed, 1 Jun 2011 17:25:49 +0000 (12:25 -0500)
committerPekka Enberg <penberg@kernel.org>
Sat, 2 Jul 2011 10:26:53 +0000 (13:26 +0300)
Add a function that operates on the second doubleword in the page struct
and manipulates the object counters, the freelist and the frozen attribute.

Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Pekka Enberg <penberg@kernel.org>
include/linux/slub_def.h
mm/slub.c

index c8668d161dd89251de828b30f2ccda69eda6f979..b4271529414774010284cb84e12a90252c548e4b 100644 (file)
@@ -33,6 +33,7 @@ enum stat_item {
        DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */
        ORDER_FALLBACK,         /* Number of times fallback was necessary */
        CMPXCHG_DOUBLE_CPU_FAIL,/* Failure of this_cpu_cmpxchg_double */
+       CMPXCHG_DOUBLE_FAIL,    /* Number of times that cmpxchg double did not match */
        NR_SLUB_STAT_ITEMS };
 
 struct kmem_cache_cpu {
index 5a2d3d8e05588a8cab4690126ea8f43d52923e77..be6715dd0ee8e1e34d16d9d75ab4f7e7f9e71fa0 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -131,6 +131,9 @@ static inline int kmem_cache_debug(struct kmem_cache *s)
 /* Enable to test recovery from slab corruption on boot */
 #undef SLUB_RESILIENCY_TEST
 
+/* Enable to log cmpxchg failures */
+#undef SLUB_DEBUG_CMPXCHG
+
 /*
  * Mininum number of partial slabs. These will be left on the partial
  * lists even if they are empty. kmem_cache_shrink may reclaim them.
@@ -170,6 +173,7 @@ static inline int kmem_cache_debug(struct kmem_cache *s)
 
 /* Internal SLUB flags */
 #define __OBJECT_POISON                0x80000000UL /* Poison object */
+#define __CMPXCHG_DOUBLE       0x40000000UL /* Use cmpxchg_double */
 
 static int kmem_size = sizeof(struct kmem_cache);
 
@@ -338,6 +342,37 @@ static inline int oo_objects(struct kmem_cache_order_objects x)
        return x.x & OO_MASK;
 }
 
+static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
+               void *freelist_old, unsigned long counters_old,
+               void *freelist_new, unsigned long counters_new,
+               const char *n)
+{
+#ifdef CONFIG_CMPXCHG_DOUBLE
+       if (s->flags & __CMPXCHG_DOUBLE) {
+               if (cmpxchg_double(&page->freelist,
+                       freelist_old, counters_old,
+                       freelist_new, counters_new))
+               return 1;
+       } else
+#endif
+       {
+               if (page->freelist == freelist_old && page->counters == counters_old) {
+                       page->freelist = freelist_new;
+                       page->counters = counters_new;
+                       return 1;
+               }
+       }
+
+       cpu_relax();
+       stat(s, CMPXCHG_DOUBLE_FAIL);
+
+#ifdef SLUB_DEBUG_CMPXCHG
+       printk(KERN_INFO "%s %s: cmpxchg double redo ", n, s->name);
+#endif
+
+       return 0;
+}
+
 #ifdef CONFIG_SLUB_DEBUG
 /*
  * Determine a map of object in use on a page.
@@ -2596,6 +2631,12 @@ static int kmem_cache_open(struct kmem_cache *s,
                }
        }
 
+#ifdef CONFIG_CMPXCHG_DOUBLE
+       if (system_has_cmpxchg_double() && (s->flags & SLAB_DEBUG_FLAGS) == 0)
+               /* Enable fast mode */
+               s->flags |= __CMPXCHG_DOUBLE;
+#endif
+
        /*
         * The larger the object size is, the more pages we want on the partial
         * list to avoid pounding the page allocator excessively.
@@ -4248,8 +4289,10 @@ static ssize_t sanity_checks_store(struct kmem_cache *s,
                                const char *buf, size_t length)
 {
        s->flags &= ~SLAB_DEBUG_FREE;
-       if (buf[0] == '1')
+       if (buf[0] == '1') {
+               s->flags &= ~__CMPXCHG_DOUBLE;
                s->flags |= SLAB_DEBUG_FREE;
+       }
        return length;
 }
 SLAB_ATTR(sanity_checks);
@@ -4263,8 +4306,10 @@ static ssize_t trace_store(struct kmem_cache *s, const char *buf,
                                                        size_t length)
 {
        s->flags &= ~SLAB_TRACE;
-       if (buf[0] == '1')
+       if (buf[0] == '1') {
+               s->flags &= ~__CMPXCHG_DOUBLE;
                s->flags |= SLAB_TRACE;
+       }
        return length;
 }
 SLAB_ATTR(trace);
@@ -4281,8 +4326,10 @@ static ssize_t red_zone_store(struct kmem_cache *s,
                return -EBUSY;
 
        s->flags &= ~SLAB_RED_ZONE;
-       if (buf[0] == '1')
+       if (buf[0] == '1') {
+               s->flags &= ~__CMPXCHG_DOUBLE;
                s->flags |= SLAB_RED_ZONE;
+       }
        calculate_sizes(s, -1);
        return length;
 }
@@ -4300,8 +4347,10 @@ static ssize_t poison_store(struct kmem_cache *s,
                return -EBUSY;
 
        s->flags &= ~SLAB_POISON;
-       if (buf[0] == '1')
+       if (buf[0] == '1') {
+               s->flags &= ~__CMPXCHG_DOUBLE;
                s->flags |= SLAB_POISON;
+       }
        calculate_sizes(s, -1);
        return length;
 }
@@ -4319,8 +4368,10 @@ static ssize_t store_user_store(struct kmem_cache *s,
                return -EBUSY;
 
        s->flags &= ~SLAB_STORE_USER;
-       if (buf[0] == '1')
+       if (buf[0] == '1') {
+               s->flags &= ~__CMPXCHG_DOUBLE;
                s->flags |= SLAB_STORE_USER;
+       }
        calculate_sizes(s, -1);
        return length;
 }
@@ -4493,6 +4544,8 @@ STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head);
 STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail);
 STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees);
 STAT_ATTR(ORDER_FALLBACK, order_fallback);
+STAT_ATTR(CMPXCHG_DOUBLE_CPU_FAIL, cmpxchg_double_cpu_fail);
+STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail);
 #endif
 
 static struct attribute *slab_attrs[] = {
@@ -4550,6 +4603,8 @@ static struct attribute *slab_attrs[] = {
        &deactivate_to_tail_attr.attr,
        &deactivate_remote_frees_attr.attr,
        &order_fallback_attr.attr,
+       &cmpxchg_double_fail_attr.attr,
+       &cmpxchg_double_cpu_fail_attr.attr,
 #endif
 #ifdef CONFIG_FAILSLAB
        &failslab_attr.attr,