1 #ifndef _LINUX_SLAB_DEF_H
2 #define _LINUX_SLAB_DEF_H
4 #include <linux/reciprocal_div.h>
7 * Definitions unique to the original Linux SLAB allocator.
11 struct array_cache __percpu *cpu_cache;
13 /* 1) Cache tunables. Protected by slab_mutex */
14 unsigned int batchcount;
19 struct reciprocal_value reciprocal_buffer_size;
20 /* 2) touched by every alloc & free from the backend */
22 unsigned int flags; /* constant flags */
23 unsigned int num; /* # of objs per slab */
25 /* 3) cache_grow/shrink */
26 /* order of pgs per slab (2^n) */
27 unsigned int gfporder;
29 /* force GFP flags, e.g. GFP_DMA */
32 size_t colour; /* cache colouring range */
33 unsigned int colour_off; /* colour offset */
34 struct kmem_cache *freelist_cache;
35 unsigned int freelist_size;
37 /* constructor func */
38 void (*ctor)(void *obj);
40 /* 4) cache creation/removal */
42 struct list_head list;
48 #ifdef CONFIG_DEBUG_SLAB
49 unsigned long num_active;
50 unsigned long num_allocations;
51 unsigned long high_mark;
55 unsigned long max_freeable;
56 unsigned long node_allocs;
57 unsigned long node_frees;
58 unsigned long node_overflow;
63 #ifdef CONFIG_DEBUG_SLAB_LEAK
64 atomic_t store_user_clean;
68 * If debugging is enabled, then the allocator can add additional
69 * fields and/or padding to every object. size contains the total
70 * object size including these internal fields, the following two
71 * variables contain the offset to the user object and its size.
74 #endif /* CONFIG_DEBUG_SLAB */
77 struct memcg_cache_params memcg_params;
80 struct kasan_cache kasan_info;
83 #ifdef CONFIG_SLAB_FREELIST_RANDOM
84 unsigned int *random_seq;
87 struct kmem_cache_node *node[MAX_NUMNODES];
90 static inline void *nearest_obj(struct kmem_cache *cache, struct page *page,
93 void *object = x - (x - page->s_mem) % cache->size;
94 void *last_object = page->s_mem + (cache->num - 1) * cache->size;
96 if (unlikely(object > last_object))
102 #endif /* _LINUX_SLAB_DEF_H */