1 #ifndef _LINUX_SLAB_DEF_H
2 #define _LINUX_SLAB_DEF_H
5 * Definitions unique to the original Linux SLAB allocator.
7 * What we provide here is a way to optimize the frequent kmalloc
8 * calls in the kernel by selecting the appropriate general cache
9 * if kmalloc was called with a size that can be established at
13 #include <linux/init.h>
14 #include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */
15 #include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */
16 #include <linux/compiler.h>
18 #include <trace/events/kmem.h>
20 #ifndef ARCH_KMALLOC_MINALIGN
22 * Enforce a minimum alignment for the kmalloc caches.
23 * Usually, the kmalloc caches are cache_line_size() aligned, except when
24 * DEBUG and FORCED_DEBUG are enabled, then they are BYTES_PER_WORD aligned.
25 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
26 * alignment larger than the alignment of a 64-bit integer.
27 * ARCH_KMALLOC_MINALIGN allows that.
28 * Note that increasing this value may disable some debug features.
30 #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
33 #ifndef ARCH_SLAB_MINALIGN
35 * Enforce a minimum alignment for all caches.
36 * Intended for archs that get misalignment faults even for BYTES_PER_WORD
37 * aligned buffers. Includes ARCH_KMALLOC_MINALIGN.
38 * If possible: Do not enable this flag for CONFIG_DEBUG_SLAB, it disables
39 * some debug features.
41 #define ARCH_SLAB_MINALIGN 0
51 /* 1) per-cpu data, touched during every alloc/free */
52 struct array_cache *array[NR_CPUS];
53 /* 2) Cache tunables. Protected by cache_chain_mutex */
54 unsigned int batchcount;
58 unsigned int buffer_size;
59 u32 reciprocal_buffer_size;
60 /* 3) touched by every alloc & free from the backend */
62 unsigned int flags; /* constant flags */
63 unsigned int num; /* # of objs per slab */
65 /* 4) cache_grow/shrink */
66 /* order of pgs per slab (2^n) */
67 unsigned int gfporder;
69 /* force GFP flags, e.g. GFP_DMA */
72 size_t colour; /* cache colouring range */
73 unsigned int colour_off; /* colour offset */
74 struct kmem_cache *slabp_cache;
75 unsigned int slab_size;
76 unsigned int dflags; /* dynamic flags */
78 /* constructor func */
79 void (*ctor)(void *obj);
81 /* 5) cache creation/removal */
83 struct list_head next;
86 #ifdef CONFIG_DEBUG_SLAB
87 unsigned long num_active;
88 unsigned long num_allocations;
89 unsigned long high_mark;
93 unsigned long max_freeable;
94 unsigned long node_allocs;
95 unsigned long node_frees;
96 unsigned long node_overflow;
103 * If debugging is enabled, then the allocator can add additional
104 * fields and/or padding to every object. buffer_size contains the total
105 * object size including these internal fields, the following two
106 * variables contain the offset to the user object and its size.
110 #endif /* CONFIG_DEBUG_SLAB */
113 * We put nodelists[] at the end of kmem_cache, because we want to size
114 * this array to nr_node_ids slots instead of MAX_NUMNODES
115 * (see kmem_cache_init())
116 * We still use [MAX_NUMNODES] and not [1] or [0] because cache_cache
117 * is statically defined, so we reserve the max number of nodes.
119 struct kmem_list3 *nodelists[MAX_NUMNODES];
121 * Do not add fields after nodelists[]
125 /* Size description struct for general caches. */
128 struct kmem_cache *cs_cachep;
129 #ifdef CONFIG_ZONE_DMA
130 struct kmem_cache *cs_dmacachep;
133 extern struct cache_sizes malloc_sizes[];
135 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
136 void *__kmalloc(size_t size, gfp_t flags);
138 #ifdef CONFIG_TRACING
139 extern void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags);
140 extern size_t slab_buffer_size(struct kmem_cache *cachep);
142 static __always_inline void *
143 kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags)
145 return kmem_cache_alloc(cachep, flags);
147 static inline size_t slab_buffer_size(struct kmem_cache *cachep)
153 static __always_inline void *kmalloc(size_t size, gfp_t flags)
155 struct kmem_cache *cachep;
158 if (__builtin_constant_p(size)) {
162 return ZERO_SIZE_PTR;
169 #include <linux/kmalloc_sizes.h>
173 #ifdef CONFIG_ZONE_DMA
175 cachep = malloc_sizes[i].cs_dmacachep;
178 cachep = malloc_sizes[i].cs_cachep;
180 ret = kmem_cache_alloc_notrace(cachep, flags);
182 trace_kmalloc(_THIS_IP_, ret,
183 size, slab_buffer_size(cachep), flags);
187 return __kmalloc(size, flags);
191 extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
192 extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
194 #ifdef CONFIG_TRACING
195 extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
199 static __always_inline void *
200 kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
204 return kmem_cache_alloc_node(cachep, flags, nodeid);
208 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
210 struct kmem_cache *cachep;
213 if (__builtin_constant_p(size)) {
217 return ZERO_SIZE_PTR;
224 #include <linux/kmalloc_sizes.h>
228 #ifdef CONFIG_ZONE_DMA
230 cachep = malloc_sizes[i].cs_dmacachep;
233 cachep = malloc_sizes[i].cs_cachep;
235 ret = kmem_cache_alloc_node_notrace(cachep, flags, node);
237 trace_kmalloc_node(_THIS_IP_, ret,
238 size, slab_buffer_size(cachep),
243 return __kmalloc_node(size, flags, node);
246 #endif /* CONFIG_NUMA */
248 #endif /* _LINUX_SLAB_DEF_H */