#include "internal.h"
+#ifdef CONFIG_KASAN
+#include "kasan/kasan.h"
+#endif
+
/*
* Lock order:
* 1. slab_mutex (Global Mutex)
#endif
}
+static inline void *fixup_red_left(struct kmem_cache *s, void *p)
+{
+ if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE)
+ p += s->red_left_pad;
+
+ return p;
+}
+
static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
{
#ifdef CONFIG_SLUB_CPU_PARTIAL
* Core slab cache functions
*******************************************************************/
-/* Verify that a pointer has an address that is valid within a slab page */
-static inline int check_valid_pointer(struct kmem_cache *s,
- struct page *page, const void *object)
-{
- void *base;
-
- if (!object)
- return 1;
-
- base = page_address(page);
- if (object < base || object >= base + page->objects * s->size ||
- (object - base) % s->size) {
- return 0;
- }
-
- return 1;
-}
-
static inline void *get_freepointer(struct kmem_cache *s, void *object)
{
return *(void **)(object + s->offset);
{
void *p;
-#ifdef CONFIG_DEBUG_PAGEALLOC
+ if (!debug_pagealloc_enabled())
+ return get_freepointer(s, object);
probe_kernel_read(&p, (void **)(object + s->offset), sizeof(p));
-#else
- p = get_freepointer(s, object);
-#endif
return p;
}
set_bit(slab_index(p, s, addr), map);
}
+static inline int size_from_object(struct kmem_cache *s)
+{
+ if (s->flags & SLAB_RED_ZONE)
+ return s->size - s->red_left_pad;
+
+ return s->size;
+}
+
+static inline void *restore_red_left(struct kmem_cache *s, void *p)
+{
+ if (s->flags & SLAB_RED_ZONE)
+ p -= s->red_left_pad;
+
+ return p;
+}
+
/*
* Debug settings:
*/
/*
* Object debugging
*/
+
+/* Verify that a pointer has an address that is valid within a slab page */
+static inline int check_valid_pointer(struct kmem_cache *s,
+ struct page *page, void *object)
+{
+ void *base;
+
+ if (!object)
+ return 1;
+
+ base = page_address(page);
+ object = restore_red_left(s, object);
+ if (object < base || object >= base + page->objects * s->size ||
+ (object - base) % s->size) {
+ return 0;
+ }
+
+ return 1;
+}
+
static void print_section(char *text, u8 *addr, unsigned int length)
{
metadata_access_enable();
pr_err("INFO: Object 0x%p @offset=%tu fp=0x%p\n\n",
p, p - addr, get_freepointer(s, p));
- if (p > addr + 16)
+ if (s->flags & SLAB_RED_ZONE)
+ print_section("Redzone ", p - s->red_left_pad, s->red_left_pad);
+ else if (p > addr + 16)
print_section("Bytes b4 ", p - 16, 16);
print_section("Object ", p, min_t(unsigned long, s->object_size,
if (s->flags & SLAB_STORE_USER)
off += 2 * sizeof(struct track);
- if (off != s->size)
+ if (off != size_from_object(s))
/* Beginning of the filler is the free pointer */
- print_section("Padding ", p + off, s->size - off);
+ print_section("Padding ", p + off, size_from_object(s) - off);
dump_stack();
}
{
u8 *p = object;
+ if (s->flags & SLAB_RED_ZONE)
+ memset(p - s->red_left_pad, val, s->red_left_pad);
+
if (s->flags & __OBJECT_POISON) {
memset(p, POISON_FREE, s->object_size - 1);
p[s->object_size - 1] = POISON_END;
/* We also have user information there */
off += 2 * sizeof(struct track);
- if (s->size == off)
+ if (size_from_object(s) == off)
return 1;
return check_bytes_and_report(s, page, p, "Object padding",
- p + off, POISON_INUSE, s->size - off);
+ p + off, POISON_INUSE, size_from_object(s) - off);
}
/* Check the pad bytes at the end of a slab page */
u8 *endobject = object + s->object_size;
if (s->flags & SLAB_RED_ZONE) {
+ if (!check_bytes_and_report(s, page, object, "Redzone",
+ object - s->red_left_pad, val, s->red_left_pad))
+ return 0;
+
if (!check_bytes_and_report(s, page, object, "Redzone",
endobject, val, s->inuse - s->object_size))
return 0;
}
/* Object debug checks for alloc/free paths */
-static void setup_object_debug(struct kmem_cache *s, struct page *page,
+static void *setup_object_debug(struct kmem_cache *s, struct page *page,
void *object)
{
if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)))
- return;
+ return object;
+ object = fixup_red_left(s, object);
init_object(s, object, SLUB_RED_INACTIVE);
init_tracking(s, object);
+
+ return object;
}
static noinline int alloc_debug_processing(struct kmem_cache *s,
return flags;
}
#else /* !CONFIG_SLUB_DEBUG */
-static inline void setup_object_debug(struct kmem_cache *s,
- struct page *page, void *object) {}
+static inline void *setup_object_debug(struct kmem_cache *s,
+ struct page *page, void *object) { return object; }
static inline int alloc_debug_processing(struct kmem_cache *s,
struct page *page, void *object, unsigned long addr) { return 0; }
#endif
}
-static void setup_object(struct kmem_cache *s, struct page *page,
+static void *setup_object(struct kmem_cache *s, struct page *page,
void *object)
{
- setup_object_debug(s, page, object);
+ object = setup_object_debug(s, page, object);
if (unlikely(s->ctor)) {
kasan_unpoison_object_data(s, object);
s->ctor(object);
kasan_poison_object_data(s, object);
}
+
+ return object;
}
/*
kasan_poison_slab(page);
for_each_object_idx(p, idx, s, start, page->objects) {
- setup_object(s, page, p);
- if (likely(idx < page->objects))
- set_freepointer(s, p, p + s->size);
- else
- set_freepointer(s, p, NULL);
+ void *object = setup_object(s, page, p);
+
+ if (likely(idx < page->objects)) {
+ set_freepointer(s, object,
+ fixup_red_left(s, p + s->size));
+ } else
+ set_freepointer(s, object, NULL);
}
- page->freelist = start;
+ page->freelist = fixup_red_left(s, start);
page->inuse = page->objects;
page->frozen = 1;
slab_pad_check(s, page);
for_each_object(p, s, page_address(page),
- page->objects)
- check_object(s, page, p, SLUB_RED_INACTIVE);
+ page->objects) {
+ void *object = fixup_red_left(s, p);
+
+ check_object(s, page, object, SLUB_RED_INACTIVE);
+ }
}
kmemcheck_free_shadow(page, compound_order(page));
page_mapcount_reset(page);
if (current->reclaim_state)
current->reclaim_state->reclaimed_slab += pages;
- __free_kmem_pages(page, order);
+ memcg_uncharge_slab(page, order, s);
+ __free_pages(page, order);
}
#define need_reserve_slab_rcu \
size_t first_skipped_index = 0;
int lookahead = 3;
void *object;
+ struct page *page;
/* Always re-init detached_freelist */
df->page = NULL;
do {
object = p[--size];
+ /* Do we need !ZERO_OR_NULL_PTR(object) here? (for kfree) */
} while (!object && size);
if (!object)
return 0;
- /* Support for memcg, compiler can optimize this out */
- df->s = cache_from_obj(s, object);
+ page = virt_to_head_page(object);
+ if (!s) {
+ /* Handle kalloc'ed objects */
+ if (unlikely(!PageSlab(page))) {
+ BUG_ON(!PageCompound(page));
+ kfree_hook(object);
+ __free_kmem_pages(page, compound_order(page));
+ p[size] = NULL; /* mark object processed */
+ return size;
+ }
+ /* Derive kmem_cache from object */
+ df->s = page->slab_cache;
+ } else {
+ df->s = cache_from_obj(s, object); /* Support for memcg */
+ }
/* Start new detached freelist */
+ df->page = page;
set_freepointer(df->s, object, NULL);
- df->page = virt_to_head_page(object);
df->tail = object;
df->freelist = object;
p[size] = NULL; /* mark object processed */
* of the object.
*/
size += sizeof(void *);
+
+ if (flags & SLAB_RED_ZONE) {
+ s->red_left_pad = sizeof(void *);
+#ifdef CONFIG_KASAN
+ s->red_left_pad = min_t(int, s->red_left_pad,
+ KASAN_SHADOW_SCALE_SIZE);
+#endif
+ s->red_left_pad = ALIGN(s->red_left_pad, s->align);
+ size += s->red_left_pad;
+ }
#endif
/*
get_map(s, page, map);
for_each_object(p, s, addr, page->objects) {
+ void *object = fixup_red_left(s, p);
if (!test_bit(slab_index(p, s, addr), map)) {
- pr_err("INFO: Object 0x%p @offset=%tu\n", p, p - addr);
- print_tracking(s, p);
+ pr_err("INFO: Object 0x%p @offset=%tu\n",
+ object, object - addr);
+ print_tracking(s, object);
}
}
slab_unlock(page);
get_map(s, page, map);
for_each_object(p, s, addr, page->objects) {
+ void *object = fixup_red_left(s, p);
+
if (test_bit(slab_index(p, s, addr), map))
- if (!check_object(s, page, p, SLUB_RED_INACTIVE))
+ if (!check_object(s, page, object, SLUB_RED_INACTIVE))
return 0;
}
- for_each_object(p, s, addr, page->objects)
+ for_each_object(p, s, addr, page->objects) {
+ void *object = fixup_red_left(s, p);
+
if (!test_bit(slab_index(p, s, addr), map))
- if (!check_object(s, page, p, SLUB_RED_ACTIVE))
+ if (!check_object(s, page, object, SLUB_RED_ACTIVE))
return 0;
+ }
+
return 1;
}
bitmap_zero(map, page->objects);
get_map(s, page, map);
- for_each_object(p, s, addr, page->objects)
+ for_each_object(p, s, addr, page->objects) {
+ void *object = fixup_red_left(s, p);
+
if (!test_bit(slab_index(p, s, addr), map))
- add_location(t, s, get_track(s, p, alloc));
+ add_location(t, s, get_track(s, object, alloc));
+ }
}
static int list_locations(struct kmem_cache *s, char *buf,