From 35be03cafb8f5ddcc1236e90144b6ec76296b789 Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Wed, 10 Jul 2013 09:56:49 +0300 Subject: [PATCH] Revert "mm/sl[aou]b: Move kmalloc definitions to slab.h" This reverts commit 590a63973e364be0dab35ddbdc18b3ad5f386aed. Stephen Rothwell writes: After merging the slab tree, today's linux-next build (x86_64 allmodconfig) failed like this: In file included from include/linux/slab.h:17:0, from include/linux/crypto.h:24, from arch/x86/kernel/asm-offsets.c:8: include/linux/kmemleak.h: In function 'kmemleak_alloc_recursive': include/linux/kmemleak.h:44:16: error: 'SLAB_NOLEAKTRACE' undeclared (first use in this function) if (!(flags & SLAB_NOLEAKTRACE)) ^ include/linux/kmemleak.h: In function 'kmemleak_free_recursive': include/linux/kmemleak.h:50:16: error: 'SLAB_NOLEAKTRACE' undeclared (first use in this function) if (!(flags & SLAB_NOLEAKTRACE)) ^ Probably caused by commit 590a63973e36 ("mm/sl[aou]b: Move kmalloc definitions to slab.h"). I have used the slab tree from next-20130709 for today. And, yes, I am a little annoyed by this. Reported-by: Stephen Rothwell Signed-off-by: Pekka Enberg --- include/linux/slab.h | 74 +++------------------------------------- include/linux/slab_def.h | 40 ++++++++++++++++++++++ include/linux/slob_def.h | 9 +++++ include/linux/slub_def.h | 56 ++++++++++++++++++++++++++++++ mm/slab.c | 2 +- 5 files changed, 111 insertions(+), 70 deletions(-) create mode 100644 include/linux/slob_def.h diff --git a/include/linux/slab.h b/include/linux/slab.h index 067c8d305e12..f193292ae86b 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -4,8 +4,6 @@ * (C) SGI 2006, Christoph Lameter * Cleaned up and restructured to ease the addition of alternative * implementations of SLAB allocators. - * (C) Linux Foundation 2008-2013 - * Unified interface for all slab allocators */ #ifndef _LINUX_SLAB_H @@ -14,7 +12,6 @@ #include #include #include -#include /* @@ -315,12 +312,12 @@ kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node) #ifdef CONFIG_TRACING extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, gfp_t gfpflags, - int node, size_t size); + int node); #else static __always_inline void * kmem_cache_alloc_node_trace(struct kmem_cache *s, gfp_t gfpflags, - int node, size_t size) + int node) { return kmem_cache_alloc_node(s, gfpflags, node); } @@ -334,70 +331,9 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s, #include #endif -static __always_inline void * -kmalloc_order(size_t size, gfp_t flags, unsigned int order) -{ - void *ret; - - flags |= (__GFP_COMP | __GFP_KMEMCG); - ret = (void *) __get_free_pages(flags, order); - kmemleak_alloc(ret, size, 1, flags); - return ret; -} - -#ifdef CONFIG_TRACING -extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order); -#else -static __always_inline void * -kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) -{ - return kmalloc_order(size, flags, order); -} -#endif - -static __always_inline void *kmalloc_large(size_t size, gfp_t flags) -{ - unsigned int order = get_order(size); - return kmalloc_order_trace(size, flags, order); -} - -#ifdef CONFIG_TRACING -extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t); -#else -static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s, - gfp_t flags, size_t size) -{ - return kmem_cache_alloc(s, flags); -} -#endif - -/** - * kmalloc - allocate memory - * @size: how many bytes of memory are required. - * @flags: the type of memory to allocate (see kcalloc). - * - * kmalloc is the normal method of allocating memory - * for objects smaller than page size in the kernel. - */ -static __always_inline void *kmalloc(size_t size, gfp_t flags) -{ - if (__builtin_constant_p(size)) { - if (size > KMALLOC_MAX_CACHE_SIZE) - return kmalloc_large(size, flags); -#ifndef CONFIG_SLOB - if (!(flags & GFP_DMA)) { - int index = kmalloc_index(size); - - if (!index) - return ZERO_SIZE_PTR; - - return kmem_cache_alloc_trace(kmalloc_caches[index], - flags, size); - } +#ifdef CONFIG_SLOB +#include #endif - } - return __kmalloc(size, flags); -} /* * Determine size used for the nth kmalloc cache. @@ -430,7 +366,7 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) return ZERO_SIZE_PTR; return kmem_cache_alloc_node_trace(kmalloc_caches[i], - flags, node, size); + flags, node); } #endif return __kmalloc_node(size, flags, node); diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index 078eaaf54b46..8ce56a59c088 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h @@ -102,4 +102,44 @@ struct kmem_cache { */ }; +#ifdef CONFIG_TRACING +extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t); +#else +static __always_inline void * +kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size) +{ + return kmem_cache_alloc(cachep, flags); +} +#endif + +static __always_inline void *kmalloc(size_t size, gfp_t flags) +{ + struct kmem_cache *cachep; + void *ret; + + if (__builtin_constant_p(size)) { + int i; + + if (!size) + return ZERO_SIZE_PTR; + + if (WARN_ON_ONCE(size > KMALLOC_MAX_SIZE)) + return NULL; + + i = kmalloc_index(size); + +#ifdef CONFIG_ZONE_DMA + if (flags & GFP_DMA) + cachep = kmalloc_dma_caches[i]; + else +#endif + cachep = kmalloc_caches[i]; + + ret = kmem_cache_alloc_trace(cachep, flags, size); + + return ret; + } + return __kmalloc(size, flags); +} + #endif /* _LINUX_SLAB_DEF_H */ diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h new file mode 100644 index 000000000000..b5eca694f4a3 --- /dev/null +++ b/include/linux/slob_def.h @@ -0,0 +1,9 @@ +#ifndef __LINUX_SLOB_DEF_H +#define __LINUX_SLOB_DEF_H + +static __always_inline void *kmalloc(size_t size, gfp_t flags) +{ + return __kmalloc_node(size, flags, NUMA_NO_NODE); +} + +#endif /* __LINUX_SLOB_DEF_H */ diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 75b4be3f6b82..b66279f2785b 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -12,6 +12,8 @@ #include #include +#include + enum stat_item { ALLOC_FASTPATH, /* Allocation from cpu slab */ ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */ @@ -102,6 +104,17 @@ struct kmem_cache { struct kmem_cache_node *node[MAX_NUMNODES]; }; +static __always_inline void * +kmalloc_order(size_t size, gfp_t flags, unsigned int order) +{ + void *ret; + + flags |= (__GFP_COMP | __GFP_KMEMCG); + ret = (void *) __get_free_pages(flags, order); + kmemleak_alloc(ret, size, 1, flags); + return ret; +} + /** * Calling this on allocated memory will check that the memory * is expected to be in use, and print warnings if not. @@ -115,4 +128,47 @@ static inline bool verify_mem_not_deleted(const void *x) } #endif +#ifdef CONFIG_TRACING +extern void * +kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size); +extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order); +#else +static __always_inline void * +kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size) +{ + return kmem_cache_alloc(s, gfpflags); +} + +static __always_inline void * +kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) +{ + return kmalloc_order(size, flags, order); +} +#endif + +static __always_inline void *kmalloc_large(size_t size, gfp_t flags) +{ + unsigned int order = get_order(size); + return kmalloc_order_trace(size, flags, order); +} + +static __always_inline void *kmalloc(size_t size, gfp_t flags) +{ + if (__builtin_constant_p(size)) { + if (size > KMALLOC_MAX_CACHE_SIZE) + return kmalloc_large(size, flags); + + if (!(flags & GFP_DMA)) { + int index = kmalloc_index(size); + + if (!index) + return ZERO_SIZE_PTR; + + return kmem_cache_alloc_trace(kmalloc_caches[index], + flags, size); + } + } + return __kmalloc(size, flags); +} + #endif /* _LINUX_SLUB_DEF_H */ diff --git a/mm/slab.c b/mm/slab.c index 57ab42297d96..d6c20e4a4106 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -3688,7 +3688,7 @@ __do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller) cachep = kmalloc_slab(size, flags); if (unlikely(ZERO_OR_NULL_PTR(cachep))) return cachep; - return kmem_cache_alloc_node_trace(cachep, flags, node, size); + return kmem_cache_alloc_node_trace(cachep, flags, node); } #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING) -- 2.39.5