2 * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk).
4 * (C) SGI 2006, Christoph Lameter
5 * Cleaned up and restructured to ease the addition of alternative
6 * implementations of SLAB allocators.
12 #include <linux/gfp.h>
13 #include <linux/types.h>
14 #include <linux/workqueue.h>
18 * Flags to pass to kmem_cache_create().
19 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
21 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
22 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
23 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
24 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
25 #define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */
26 #define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */
27 #define SLAB_PANIC 0x00040000UL /* Panic if kmem_cache_create() fails */
29 * SLAB_DESTROY_BY_RCU - **WARNING** READ THIS!
31 * This delays freeing the SLAB page by a grace period, it does _NOT_
32 * delay object freeing. This means that if you do kmem_cache_free()
33 * that memory location is free to be reused at any time. Thus it may
34 * be possible to see another object there in the same RCU grace period.
36 * This feature only ensures the memory location backing the object
37 * stays valid, the trick to using this is relying on an independent
38 * object validation pass. Something like:
42 * obj = lockless_lookup(key);
44 * if (!try_get_ref(obj)) // might fail for free objects
47 * if (obj->key != key) { // not the object we expected
54 * See also the comment on struct slab_rcu in mm/slab.c.
56 #define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */
57 #define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */
58 #define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */
60 /* Flag to prevent checks on free */
61 #ifdef CONFIG_DEBUG_OBJECTS
62 # define SLAB_DEBUG_OBJECTS 0x00400000UL
64 # define SLAB_DEBUG_OBJECTS 0x00000000UL
67 #define SLAB_NOLEAKTRACE 0x00800000UL /* Avoid kmemleak tracing */
69 /* Don't track use of uninitialized memory */
70 #ifdef CONFIG_KMEMCHECK
71 # define SLAB_NOTRACK 0x01000000UL
73 # define SLAB_NOTRACK 0x00000000UL
75 #ifdef CONFIG_FAILSLAB
76 # define SLAB_FAILSLAB 0x02000000UL /* Fault injection mark */
78 # define SLAB_FAILSLAB 0x00000000UL
81 /* The following flags affect the page allocator grouping pages by mobility */
82 #define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */
83 #define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */
85 * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
87 * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault.
89 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
90 * Both make kfree a no-op.
92 #define ZERO_SIZE_PTR ((void *)16)
94 #define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
95 (unsigned long)ZERO_SIZE_PTR)
98 * Common fields provided in kmem_cache by all slab allocators
99 * This struct is either used directly by the allocator (SLOB)
100 * or the allocator must include definitions for all fields
101 * provided in kmem_cache_common in their definition of kmem_cache.
103 * Once we can do anonymous structs (C11 standard) we could put a
104 * anonymous struct definition in these allocators so that the
105 * separate allocations in the kmem_cache structure of SLAB and
106 * SLUB is no longer needed.
110 unsigned int object_size;/* The original size of the object */
111 unsigned int size; /* The aligned/padded/added on size */
112 unsigned int align; /* Alignment as calculated */
113 unsigned long flags; /* Active flags on the slab */
114 const char *name; /* Slab name for sysfs */
115 int refcount; /* Use counter */
116 void (*ctor)(void *); /* Called on object slot creation */
117 struct list_head list; /* List of all slab caches on the system */
123 * struct kmem_cache related prototypes
125 void __init kmem_cache_init(void);
126 int slab_is_available(void);
128 struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
132 kmem_cache_create_memcg(struct mem_cgroup *, const char *, size_t, size_t,
133 unsigned long, void (*)(void *));
134 void kmem_cache_destroy(struct kmem_cache *);
135 int kmem_cache_shrink(struct kmem_cache *);
136 void kmem_cache_free(struct kmem_cache *, void *);
139 * Please use this macro to create slab caches. Simply specify the
140 * name of the structure and maybe some flags that are listed above.
142 * The alignment of the struct determines object alignment. If you
143 * f.e. add ____cacheline_aligned_in_smp to the struct declaration
144 * then the objects will be properly aligned in SMP configurations.
146 #define KMEM_CACHE(__struct, __flags) kmem_cache_create(#__struct,\
147 sizeof(struct __struct), __alignof__(struct __struct),\
151 * The largest kmalloc size supported by the slab allocators is
152 * 32 megabyte (2^25) or the maximum allocatable page order if that is
155 * WARNING: Its not easy to increase this value since the allocators have
156 * to do various tricks to work around compiler limitations in order to
157 * ensure proper constant folding.
159 #define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \
160 (MAX_ORDER + PAGE_SHIFT - 1) : 25)
162 #define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_HIGH)
163 #define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_HIGH - PAGE_SHIFT)
166 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
167 * alignment larger than the alignment of a 64-bit integer.
168 * Setting ARCH_KMALLOC_MINALIGN in arch headers allows that.
170 #ifdef ARCH_DMA_MINALIGN
171 #define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
173 #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
177 * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
178 * Intended for arches that get misalignment faults even for 64 bit integer
181 #ifndef ARCH_SLAB_MINALIGN
182 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
185 * This is the main placeholder for memcg-related information in kmem caches.
186 * struct kmem_cache will hold a pointer to it, so the memory cost while
187 * disabled is 1 pointer. The runtime cost while enabled, gets bigger than it
188 * would otherwise be if that would be bundled in kmem_cache: we'll need an
189 * extra pointer chase. But the trade off clearly lays in favor of not
190 * penalizing non-users.
192 * Both the root cache and the child caches will have it. For the root cache,
193 * this will hold a dynamically allocated array large enough to hold
194 * information about the currently limited memcgs in the system.
196 * Child caches will hold extra metadata needed for its operation. Fields are:
198 * @memcg: pointer to the memcg this cache belongs to
199 * @list: list_head for the list of all caches in this memcg
200 * @root_cache: pointer to the global, root cache, this cache was derived from
201 * @dead: set to true after the memcg dies; the cache may still be around.
202 * @nr_pages: number of pages that belongs to this cache.
203 * @destroy: worker to be called whenever we are ready, or believe we may be
204 * ready, to destroy this cache.
206 struct memcg_cache_params {
209 struct kmem_cache *memcg_caches[0];
211 struct mem_cgroup *memcg;
212 struct list_head list;
213 struct kmem_cache *root_cache;
216 struct work_struct destroy;
221 int memcg_update_all_caches(int num_memcgs);
224 * Common kmalloc functions provided by all allocators
226 void * __must_check __krealloc(const void *, size_t, gfp_t);
227 void * __must_check krealloc(const void *, size_t, gfp_t);
228 void kfree(const void *);
229 void kzfree(const void *);
230 size_t ksize(const void *);
233 * Allocator specific definitions. These are mainly used to establish optimized
234 * ways to convert kmalloc() calls to kmem_cache_alloc() invocations by
235 * selecting the appropriate general cache at compile time.
237 * Allocators must define at least:
243 * Those wishing to support NUMA must also define:
245 * kmem_cache_alloc_node()
248 * See each allocator definition file for additional comments and
249 * implementation notes.
252 #include <linux/slub_def.h>
253 #elif defined(CONFIG_SLOB)
254 #include <linux/slob_def.h>
256 #include <linux/slab_def.h>
260 * kmalloc_array - allocate memory for an array.
261 * @n: number of elements.
262 * @size: element size.
263 * @flags: the type of memory to allocate.
265 * The @flags argument may be one of:
267 * %GFP_USER - Allocate memory on behalf of user. May sleep.
269 * %GFP_KERNEL - Allocate normal kernel ram. May sleep.
271 * %GFP_ATOMIC - Allocation will not sleep. May use emergency pools.
272 * For example, use this inside interrupt handlers.
274 * %GFP_HIGHUSER - Allocate pages from high memory.
276 * %GFP_NOIO - Do not do any I/O at all while trying to get memory.
278 * %GFP_NOFS - Do not make any fs calls while trying to get memory.
280 * %GFP_NOWAIT - Allocation will not sleep.
282 * %GFP_THISNODE - Allocate node-local memory only.
284 * %GFP_DMA - Allocation suitable for DMA.
285 * Should only be used for kmalloc() caches. Otherwise, use a
286 * slab created with SLAB_DMA.
288 * Also it is possible to set different flags by OR'ing
289 * in one or more of the following additional @flags:
291 * %__GFP_COLD - Request cache-cold pages instead of
292 * trying to return cache-warm pages.
294 * %__GFP_HIGH - This allocation has high priority and may use emergency pools.
296 * %__GFP_NOFAIL - Indicate that this allocation is in no way allowed to fail
297 * (think twice before using).
299 * %__GFP_NORETRY - If memory is not immediately available,
300 * then give up at once.
302 * %__GFP_NOWARN - If allocation fails, don't issue any warnings.
304 * %__GFP_REPEAT - If allocation fails initially, try once more before failing.
306 * There are other flags available as well, but these are not intended
307 * for general use, and so are not documented here. For a full list of
308 * potential flags, always refer to linux/gfp.h.
310 static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
312 if (size != 0 && n > SIZE_MAX / size)
314 return __kmalloc(n * size, flags);
318 * kcalloc - allocate memory for an array. The memory is set to zero.
319 * @n: number of elements.
320 * @size: element size.
321 * @flags: the type of memory to allocate (see kmalloc).
323 static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
325 return kmalloc_array(n, size, flags | __GFP_ZERO);
328 #if !defined(CONFIG_NUMA) && !defined(CONFIG_SLOB)
330 * kmalloc_node - allocate memory from a specific node
331 * @size: how many bytes of memory are required.
332 * @flags: the type of memory to allocate (see kcalloc).
333 * @node: node to allocate from.
335 * kmalloc() for non-local nodes, used to allocate from a specific node
336 * if available. Equivalent to kmalloc() in the non-NUMA single-node
339 static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
341 return kmalloc(size, flags);
344 static inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
346 return __kmalloc(size, flags);
349 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
351 static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
352 gfp_t flags, int node)
354 return kmem_cache_alloc(cachep, flags);
356 #endif /* !CONFIG_NUMA && !CONFIG_SLOB */
359 * kmalloc_track_caller is a special version of kmalloc that records the
360 * calling function of the routine calling it for slab leak tracking instead
361 * of just the calling function (confusing, eh?).
362 * It's useful when the call to kmalloc comes from a widely-used standard
363 * allocator where we care about the real place the memory allocation
364 * request comes from.
366 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
367 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
368 (defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
369 extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
370 #define kmalloc_track_caller(size, flags) \
371 __kmalloc_track_caller(size, flags, _RET_IP_)
373 #define kmalloc_track_caller(size, flags) \
374 __kmalloc(size, flags)
375 #endif /* DEBUG_SLAB */
379 * kmalloc_node_track_caller is a special version of kmalloc_node that
380 * records the calling function of the routine calling it for slab leak
381 * tracking instead of just the calling function (confusing, eh?).
382 * It's useful when the call to kmalloc_node comes from a widely-used
383 * standard allocator where we care about the real place the memory
384 * allocation request comes from.
386 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
387 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
388 (defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
389 extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
390 #define kmalloc_node_track_caller(size, flags, node) \
391 __kmalloc_node_track_caller(size, flags, node, \
394 #define kmalloc_node_track_caller(size, flags, node) \
395 __kmalloc_node(size, flags, node)
398 #else /* CONFIG_NUMA */
400 #define kmalloc_node_track_caller(size, flags, node) \
401 kmalloc_track_caller(size, flags)
403 #endif /* CONFIG_NUMA */
408 static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
410 return kmem_cache_alloc(k, flags | __GFP_ZERO);
414 * kzalloc - allocate memory. The memory is set to zero.
415 * @size: how many bytes of memory are required.
416 * @flags: the type of memory to allocate (see kmalloc).
418 static inline void *kzalloc(size_t size, gfp_t flags)
420 return kmalloc(size, flags | __GFP_ZERO);
424 * kzalloc_node - allocate zeroed memory from a particular memory node.
425 * @size: how many bytes of memory are required.
426 * @flags: the type of memory to allocate (see kmalloc).
427 * @node: memory node from which to allocate
429 static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
431 return kmalloc_node(size, flags | __GFP_ZERO, node);
435 * Determine the size of a slab object
437 static inline unsigned int kmem_cache_size(struct kmem_cache *s)
439 return s->object_size;
442 void __init kmem_cache_init_late(void);
444 #endif /* _LINUX_SLAB_H */