]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
mm,slub,x86: decouple size of struct page from CONFIG_CMPXCHG_LOCAL
authorHeiko Carstens <heiko.carstens@de.ibm.com>
Thu, 8 Dec 2011 04:41:54 +0000 (15:41 +1100)
committerStephen Rothwell <sfr@canb.auug.org.au>
Fri, 9 Dec 2011 04:52:40 +0000 (15:52 +1100)
While implementing cmpxchg_double() on s390 I realized that we don't set
CONFIG_CMPXCHG_LOCAL besides the fact that we have support for it.
However setting that option will increase the size of struct page by eight
bytes on 64 bit, which we certainly do not want.  Also, it doesn't make
sense that a present cpu feature should increase the size of struct page.

Besides that it looks like the dependency to CMPXCHG_LOCAL is wrong and
that it should depend on CMPXCHG_DOUBLE instead.

This patch:

If an architecture supports CMPXCHG_LOCAL this shouldn't result
automatically in larger struct pages if the SLUB allocator is used.
Instead introduce a new config option "HAVE_ALIGNED_STRUCT_PAGE" which can
be selected if a double word aligned struct page is required.  Also update
x86 Kconfig so that it should work as before.

Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Acked-by: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
arch/Kconfig
arch/x86/Kconfig
include/linux/mm_types.h
mm/slub.c

index e24f8e47bb6f57ae48d000d6335d2c9e3b88477c..0fb17e0c4cd0896c2c9758c301872849c27f792b 100644 (file)
@@ -188,4 +188,12 @@ config HAVE_RCU_TABLE_FREE
 config ARCH_HAVE_NMI_SAFE_CMPXCHG
        bool
 
+config HAVE_ALIGNED_STRUCT_PAGE
+       bool
+       help
+         This makes sure that struct pages are double word aligned and that
+         e.g. the SLUB allocator can perform double word atomic operations
+         on a struct page for better performance. However selecting this
+         might increase the size of a struct page by a word.
+
 source "kernel/gcov/Kconfig"
index baff69e4345a0e31ff2a1f199dfce017b0e5ebe9..05375dcd7c92381c6aae4212a98ec232304c8141 100644 (file)
@@ -60,6 +60,7 @@ config X86
        select PERF_EVENTS
        select HAVE_PERF_EVENTS_NMI
        select ANON_INODES
+       select HAVE_ALIGNED_STRUCT_PAGE if SLUB && !M386
        select HAVE_ARCH_KMEMCHECK
        select HAVE_USER_RETURN_NOTIFIER
        select HAVE_ARCH_JUMP_LABEL
index be242ebdc92d28b40f7a4bc263ce51727f285f89..81a56dfd24f159acbfbf70a29fe49e3fdbc0fd7f 100644 (file)
@@ -152,12 +152,11 @@ struct page {
 #endif
 }
 /*
- * If another subsystem starts using the double word pairing for atomic
- * operations on struct page then it must change the #if to ensure
- * proper alignment of the page struct.
+ * The struct page can be forced to be double word aligned so that atomic ops
+ * on double words work. The SLUB allocator can make use of such a feature.
  */
-#if defined(CONFIG_SLUB) && defined(CONFIG_CMPXCHG_LOCAL)
-       __attribute__((__aligned__(2*sizeof(unsigned long))))
+#ifdef CONFIG_HAVE_ALIGNED_STRUCT_PAGE
+       __aligned(2 * sizeof(unsigned long))
 #endif
 ;
 
index 04e89e50a6dd29683d89069903261690f8427e50..b5c6db2dfe7f6c4af7090cc34bdfa6f1610c5505 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -366,7 +366,7 @@ static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page
                const char *n)
 {
        VM_BUG_ON(!irqs_disabled());
-#ifdef CONFIG_CMPXCHG_DOUBLE
+#if defined(CONFIG_CMPXCHG_DOUBLE) && defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
        if (s->flags & __CMPXCHG_DOUBLE) {
                if (cmpxchg_double(&page->freelist,
                        freelist_old, counters_old,
@@ -400,7 +400,7 @@ static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
                void *freelist_new, unsigned long counters_new,
                const char *n)
 {
-#ifdef CONFIG_CMPXCHG_DOUBLE
+#if defined(CONFIG_CMPXCHG_DOUBLE) && defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
        if (s->flags & __CMPXCHG_DOUBLE) {
                if (cmpxchg_double(&page->freelist,
                        freelist_old, counters_old,
@@ -3002,7 +3002,7 @@ static int kmem_cache_open(struct kmem_cache *s,
                }
        }
 
-#ifdef CONFIG_CMPXCHG_DOUBLE
+#if defined(CONFIG_CMPXCHG_DOUBLE) && defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
        if (system_has_cmpxchg_double() && (s->flags & SLAB_DEBUG_FLAGS) == 0)
                /* Enable fast mode */
                s->flags |= __CMPXCHG_DOUBLE;