This reverts commit
590a63973e364be0dab35ddbdc18b3ad5f386aed.
Stephen Rothwell writes:
After merging the slab tree, today's linux-next build (x86_64
allmodconfig) failed like this:
In file included from include/linux/slab.h:17:0,
from include/linux/crypto.h:24,
from arch/x86/kernel/asm-offsets.c:8:
include/linux/kmemleak.h: In function 'kmemleak_alloc_recursive':
include/linux/kmemleak.h:44:16: error: 'SLAB_NOLEAKTRACE' undeclared (first use in this function)
if (!(flags & SLAB_NOLEAKTRACE))
^
include/linux/kmemleak.h: In function 'kmemleak_free_recursive':
include/linux/kmemleak.h:50:16: error: 'SLAB_NOLEAKTRACE' undeclared (first use in this function)
if (!(flags & SLAB_NOLEAKTRACE))
^
Probably caused by commit
590a63973e36 ("mm/sl[aou]b: Move kmalloc
definitions to slab.h").
I have used the slab tree from next-
20130709 for today.
And, yes, I am a little annoyed by this.
Reported-by: Stephen Rothwell <sfr@canb.auug.org.au>
Signed-off-by: Pekka Enberg <penberg@kernel.org>
* (C) SGI 2006, Christoph Lameter
* Cleaned up and restructured to ease the addition of alternative
* implementations of SLAB allocators.
- * (C) Linux Foundation 2008-2013
- * Unified interface for all slab allocators
*/
#ifndef _LINUX_SLAB_H
#include <linux/gfp.h>
#include <linux/types.h>
#include <linux/workqueue.h>
-#include <linux/kmemleak.h>
/*
#ifdef CONFIG_TRACING
extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
gfp_t gfpflags,
- int node, size_t size);
+ int node);
#else
static __always_inline void *
kmem_cache_alloc_node_trace(struct kmem_cache *s,
gfp_t gfpflags,
- int node, size_t size)
+ int node)
{
return kmem_cache_alloc_node(s, gfpflags, node);
}
#include <linux/slub_def.h>
#endif
-static __always_inline void *
-kmalloc_order(size_t size, gfp_t flags, unsigned int order)
-{
- void *ret;
-
- flags |= (__GFP_COMP | __GFP_KMEMCG);
- ret = (void *) __get_free_pages(flags, order);
- kmemleak_alloc(ret, size, 1, flags);
- return ret;
-}
-
-#ifdef CONFIG_TRACING
-extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order);
-#else
-static __always_inline void *
-kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
-{
- return kmalloc_order(size, flags, order);
-}
-#endif
-
-static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
-{
- unsigned int order = get_order(size);
- return kmalloc_order_trace(size, flags, order);
-}
-
-#ifdef CONFIG_TRACING
-extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t);
-#else
-static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s,
- gfp_t flags, size_t size)
-{
- return kmem_cache_alloc(s, flags);
-}
-#endif
-
-/**
- * kmalloc - allocate memory
- * @size: how many bytes of memory are required.
- * @flags: the type of memory to allocate (see kcalloc).
- *
- * kmalloc is the normal method of allocating memory
- * for objects smaller than page size in the kernel.
- */
-static __always_inline void *kmalloc(size_t size, gfp_t flags)
-{
- if (__builtin_constant_p(size)) {
- if (size > KMALLOC_MAX_CACHE_SIZE)
- return kmalloc_large(size, flags);
-#ifndef CONFIG_SLOB
- if (!(flags & GFP_DMA)) {
- int index = kmalloc_index(size);
-
- if (!index)
- return ZERO_SIZE_PTR;
-
- return kmem_cache_alloc_trace(kmalloc_caches[index],
- flags, size);
- }
+#ifdef CONFIG_SLOB
+#include <linux/slob_def.h>
#endif
- }
- return __kmalloc(size, flags);
-}
/*
* Determine size used for the nth kmalloc cache.
return ZERO_SIZE_PTR;
return kmem_cache_alloc_node_trace(kmalloc_caches[i],
- flags, node, size);
+ flags, node);
}
#endif
return __kmalloc_node(size, flags, node);
*/
};
+#ifdef CONFIG_TRACING
+extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t);
+#else
+static __always_inline void *
+kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
+{
+ return kmem_cache_alloc(cachep, flags);
+}
+#endif
+
+static __always_inline void *kmalloc(size_t size, gfp_t flags)
+{
+ struct kmem_cache *cachep;
+ void *ret;
+
+ if (__builtin_constant_p(size)) {
+ int i;
+
+ if (!size)
+ return ZERO_SIZE_PTR;
+
+ if (WARN_ON_ONCE(size > KMALLOC_MAX_SIZE))
+ return NULL;
+
+ i = kmalloc_index(size);
+
+#ifdef CONFIG_ZONE_DMA
+ if (flags & GFP_DMA)
+ cachep = kmalloc_dma_caches[i];
+ else
+#endif
+ cachep = kmalloc_caches[i];
+
+ ret = kmem_cache_alloc_trace(cachep, flags, size);
+
+ return ret;
+ }
+ return __kmalloc(size, flags);
+}
+
#endif /* _LINUX_SLAB_DEF_H */
--- /dev/null
+#ifndef __LINUX_SLOB_DEF_H
+#define __LINUX_SLOB_DEF_H
+
+static __always_inline void *kmalloc(size_t size, gfp_t flags)
+{
+ return __kmalloc_node(size, flags, NUMA_NO_NODE);
+}
+
+#endif /* __LINUX_SLOB_DEF_H */
#include <linux/workqueue.h>
#include <linux/kobject.h>
+#include <linux/kmemleak.h>
+
enum stat_item {
ALLOC_FASTPATH, /* Allocation from cpu slab */
ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */
struct kmem_cache_node *node[MAX_NUMNODES];
};
+static __always_inline void *
+kmalloc_order(size_t size, gfp_t flags, unsigned int order)
+{
+ void *ret;
+
+ flags |= (__GFP_COMP | __GFP_KMEMCG);
+ ret = (void *) __get_free_pages(flags, order);
+ kmemleak_alloc(ret, size, 1, flags);
+ return ret;
+}
+
/**
* Calling this on allocated memory will check that the memory
* is expected to be in use, and print warnings if not.
}
#endif
+#ifdef CONFIG_TRACING
+extern void *
+kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size);
+extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order);
+#else
+static __always_inline void *
+kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
+{
+ return kmem_cache_alloc(s, gfpflags);
+}
+
+static __always_inline void *
+kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
+{
+ return kmalloc_order(size, flags, order);
+}
+#endif
+
+static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
+{
+ unsigned int order = get_order(size);
+ return kmalloc_order_trace(size, flags, order);
+}
+
+static __always_inline void *kmalloc(size_t size, gfp_t flags)
+{
+ if (__builtin_constant_p(size)) {
+ if (size > KMALLOC_MAX_CACHE_SIZE)
+ return kmalloc_large(size, flags);
+
+ if (!(flags & GFP_DMA)) {
+ int index = kmalloc_index(size);
+
+ if (!index)
+ return ZERO_SIZE_PTR;
+
+ return kmem_cache_alloc_trace(kmalloc_caches[index],
+ flags, size);
+ }
+ }
+ return __kmalloc(size, flags);
+}
+
#endif /* _LINUX_SLUB_DEF_H */
cachep = kmalloc_slab(size, flags);
if (unlikely(ZERO_OR_NULL_PTR(cachep)))
return cachep;
- return kmem_cache_alloc_node_trace(cachep, flags, node, size);
+ return kmem_cache_alloc_node_trace(cachep, flags, node);
}
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING)