From: Christoph Lameter Date: Mon, 8 Jul 2013 18:56:21 +0000 (+0000) Subject: mm/sl[aou]b: Move kmalloc definitions to slab.h X-Git-Tag: next-20130717~35^2~3^2 X-Git-Url: https://git.karo-electronics.de/?a=commitdiff_plain;h=590a63973e36;p=karo-tx-linux.git mm/sl[aou]b: Move kmalloc definitions to slab.h All the kmallocs are mostly doing the same. Unify them. slob_def.h becomes empty. So remove it. Signed-off-by: Christoph Lameter Signed-off-by: Pekka Enberg --- diff --git a/include/linux/slab.h b/include/linux/slab.h index f193292ae86b..067c8d305e12 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -4,6 +4,8 @@ * (C) SGI 2006, Christoph Lameter * Cleaned up and restructured to ease the addition of alternative * implementations of SLAB allocators. + * (C) Linux Foundation 2008-2013 + * Unified interface for all slab allocators */ #ifndef _LINUX_SLAB_H @@ -12,6 +14,7 @@ #include #include #include +#include /* @@ -312,12 +315,12 @@ kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node) #ifdef CONFIG_TRACING extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, gfp_t gfpflags, - int node); + int node, size_t size); #else static __always_inline void * kmem_cache_alloc_node_trace(struct kmem_cache *s, gfp_t gfpflags, - int node) + int node, size_t size) { return kmem_cache_alloc_node(s, gfpflags, node); } @@ -331,9 +334,70 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s, #include #endif -#ifdef CONFIG_SLOB -#include +static __always_inline void * +kmalloc_order(size_t size, gfp_t flags, unsigned int order) +{ + void *ret; + + flags |= (__GFP_COMP | __GFP_KMEMCG); + ret = (void *) __get_free_pages(flags, order); + kmemleak_alloc(ret, size, 1, flags); + return ret; +} + +#ifdef CONFIG_TRACING +extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order); +#else +static __always_inline void * +kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) +{ + return kmalloc_order(size, flags, order); +} +#endif + +static __always_inline void *kmalloc_large(size_t size, gfp_t flags) +{ + unsigned int order = get_order(size); + return kmalloc_order_trace(size, flags, order); +} + +#ifdef CONFIG_TRACING +extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t); +#else +static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s, + gfp_t flags, size_t size) +{ + return kmem_cache_alloc(s, flags); +} +#endif + +/** + * kmalloc - allocate memory + * @size: how many bytes of memory are required. + * @flags: the type of memory to allocate (see kcalloc). + * + * kmalloc is the normal method of allocating memory + * for objects smaller than page size in the kernel. + */ +static __always_inline void *kmalloc(size_t size, gfp_t flags) +{ + if (__builtin_constant_p(size)) { + if (size > KMALLOC_MAX_CACHE_SIZE) + return kmalloc_large(size, flags); +#ifndef CONFIG_SLOB + if (!(flags & GFP_DMA)) { + int index = kmalloc_index(size); + + if (!index) + return ZERO_SIZE_PTR; + + return kmem_cache_alloc_trace(kmalloc_caches[index], + flags, size); + } #endif + } + return __kmalloc(size, flags); +} /* * Determine size used for the nth kmalloc cache. @@ -366,7 +430,7 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) return ZERO_SIZE_PTR; return kmem_cache_alloc_node_trace(kmalloc_caches[i], - flags, node); + flags, node, size); } #endif return __kmalloc_node(size, flags, node); diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index 8ce56a59c088..078eaaf54b46 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h @@ -102,44 +102,4 @@ struct kmem_cache { */ }; -#ifdef CONFIG_TRACING -extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t); -#else -static __always_inline void * -kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size) -{ - return kmem_cache_alloc(cachep, flags); -} -#endif - -static __always_inline void *kmalloc(size_t size, gfp_t flags) -{ - struct kmem_cache *cachep; - void *ret; - - if (__builtin_constant_p(size)) { - int i; - - if (!size) - return ZERO_SIZE_PTR; - - if (WARN_ON_ONCE(size > KMALLOC_MAX_SIZE)) - return NULL; - - i = kmalloc_index(size); - -#ifdef CONFIG_ZONE_DMA - if (flags & GFP_DMA) - cachep = kmalloc_dma_caches[i]; - else -#endif - cachep = kmalloc_caches[i]; - - ret = kmem_cache_alloc_trace(cachep, flags, size); - - return ret; - } - return __kmalloc(size, flags); -} - #endif /* _LINUX_SLAB_DEF_H */ diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h deleted file mode 100644 index b5eca694f4a3..000000000000 --- a/include/linux/slob_def.h +++ /dev/null @@ -1,9 +0,0 @@ -#ifndef __LINUX_SLOB_DEF_H -#define __LINUX_SLOB_DEF_H - -static __always_inline void *kmalloc(size_t size, gfp_t flags) -{ - return __kmalloc_node(size, flags, NUMA_NO_NODE); -} - -#endif /* __LINUX_SLOB_DEF_H */ diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index b66279f2785b..75b4be3f6b82 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -12,8 +12,6 @@ #include #include -#include - enum stat_item { ALLOC_FASTPATH, /* Allocation from cpu slab */ ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */ @@ -104,17 +102,6 @@ struct kmem_cache { struct kmem_cache_node *node[MAX_NUMNODES]; }; -static __always_inline void * -kmalloc_order(size_t size, gfp_t flags, unsigned int order) -{ - void *ret; - - flags |= (__GFP_COMP | __GFP_KMEMCG); - ret = (void *) __get_free_pages(flags, order); - kmemleak_alloc(ret, size, 1, flags); - return ret; -} - /** * Calling this on allocated memory will check that the memory * is expected to be in use, and print warnings if not. @@ -128,47 +115,4 @@ static inline bool verify_mem_not_deleted(const void *x) } #endif -#ifdef CONFIG_TRACING -extern void * -kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size); -extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order); -#else -static __always_inline void * -kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size) -{ - return kmem_cache_alloc(s, gfpflags); -} - -static __always_inline void * -kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) -{ - return kmalloc_order(size, flags, order); -} -#endif - -static __always_inline void *kmalloc_large(size_t size, gfp_t flags) -{ - unsigned int order = get_order(size); - return kmalloc_order_trace(size, flags, order); -} - -static __always_inline void *kmalloc(size_t size, gfp_t flags) -{ - if (__builtin_constant_p(size)) { - if (size > KMALLOC_MAX_CACHE_SIZE) - return kmalloc_large(size, flags); - - if (!(flags & GFP_DMA)) { - int index = kmalloc_index(size); - - if (!index) - return ZERO_SIZE_PTR; - - return kmem_cache_alloc_trace(kmalloc_caches[index], - flags, size); - } - } - return __kmalloc(size, flags); -} - #endif /* _LINUX_SLUB_DEF_H */ diff --git a/mm/slab.c b/mm/slab.c index d6c20e4a4106..57ab42297d96 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -3688,7 +3688,7 @@ __do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller) cachep = kmalloc_slab(size, flags); if (unlikely(ZERO_OR_NULL_PTR(cachep))) return cachep; - return kmem_cache_alloc_node_trace(cachep, flags, node); + return kmem_cache_alloc_node_trace(cachep, flags, node, size); } #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING)