From 911b71da4817a5831badd016e73bb43d4c539a06 Mon Sep 17 00:00:00 2001 From: Minchan Kim Date: Wed, 8 Apr 2015 09:44:46 +1000 Subject: [PATCH] zsmalloc: factor out obj_[malloc|free] In later patch, migration needs some part of functions in zs_malloc and zs_free so this patch factor out them. Signed-off-by: Minchan Kim Cc: Juneho Choi Cc: Gunho Lee Cc: Luigi Semenzato Cc: Dan Streetman Cc: Seth Jennings Cc: Nitin Gupta Cc: Jerome Marchand Cc: Sergey Senozhatsky Cc: Joonsoo Kim Cc: Mel Gorman Signed-off-by: Andrew Morton --- mm/zsmalloc.c | 98 +++++++++++++++++++++++++++++++-------------------- 1 file changed, 60 insertions(+), 38 deletions(-) diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 6f3cfbf5e237..55b171016f4f 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -525,11 +525,10 @@ static void remove_zspage(struct page *page, struct size_class *class, * page from the freelist of the old fullness group to that of the new * fullness group. */ -static enum fullness_group fix_fullness_group(struct zs_pool *pool, +static enum fullness_group fix_fullness_group(struct size_class *class, struct page *page) { int class_idx; - struct size_class *class; enum fullness_group currfg, newfg; BUG_ON(!is_first_page(page)); @@ -539,7 +538,6 @@ static enum fullness_group fix_fullness_group(struct zs_pool *pool, if (newfg == currfg) goto out; - class = pool->size_class[class_idx]; remove_zspage(page, class, currfg); insert_zspage(page, class, newfg); set_zspage_mapping(page, class_idx, newfg); @@ -1281,6 +1279,33 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle) } EXPORT_SYMBOL_GPL(zs_unmap_object); +static unsigned long obj_malloc(struct page *first_page, + struct size_class *class, unsigned long handle) +{ + unsigned long obj; + struct link_free *link; + + struct page *m_page; + unsigned long m_objidx, m_offset; + void *vaddr; + + obj = (unsigned long)first_page->freelist; + obj_to_location(obj, &m_page, &m_objidx); + m_offset = obj_idx_to_offset(m_page, m_objidx, class->size); + + vaddr = kmap_atomic(m_page); + link = (struct link_free *)vaddr + m_offset / sizeof(*link); + first_page->freelist = link->next; + /* record handle in the header of allocated chunk */ + link->handle = handle; + kunmap_atomic(vaddr); + first_page->inuse++; + zs_stat_inc(class, OBJ_USED, 1); + + return obj; +} + + /** * zs_malloc - Allocate block of given size from pool. * @pool: pool to allocate from @@ -1293,12 +1318,8 @@ EXPORT_SYMBOL_GPL(zs_unmap_object); unsigned long zs_malloc(struct zs_pool *pool, size_t size) { unsigned long handle, obj; - struct link_free *link; struct size_class *class; - void *vaddr; - - struct page *first_page, *m_page; - unsigned long m_objidx, m_offset; + struct page *first_page; if (unlikely(!size || (size + ZS_HANDLE_SIZE) > ZS_MAX_ALLOC_SIZE)) return 0; @@ -1331,22 +1352,9 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size) class->size, class->pages_per_zspage)); } - obj = (unsigned long)first_page->freelist; - obj_to_location(obj, &m_page, &m_objidx); - m_offset = obj_idx_to_offset(m_page, m_objidx, class->size); - - vaddr = kmap_atomic(m_page); - link = (struct link_free *)vaddr + m_offset / sizeof(*link); - first_page->freelist = link->next; - - /* record handle in the header of allocated chunk */ - link->handle = handle; - kunmap_atomic(vaddr); - - first_page->inuse++; - zs_stat_inc(class, OBJ_USED, 1); + obj = obj_malloc(first_page, class, handle); /* Now move the zspage to another fullness group, if required */ - fix_fullness_group(pool, first_page); + fix_fullness_group(class, first_page); record_obj(handle, obj); spin_unlock(&class->lock); @@ -1354,46 +1362,60 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size) } EXPORT_SYMBOL_GPL(zs_malloc); -void zs_free(struct zs_pool *pool, unsigned long handle) +static void obj_free(struct zs_pool *pool, struct size_class *class, + unsigned long obj) { struct link_free *link; struct page *first_page, *f_page; - unsigned long obj, f_objidx, f_offset; + unsigned long f_objidx, f_offset; void *vaddr; - int class_idx; - struct size_class *class; enum fullness_group fullness; - if (unlikely(!handle)) - return; + BUG_ON(!obj); - obj = handle_to_obj(handle); - free_handle(pool, handle); obj_to_location(obj, &f_page, &f_objidx); first_page = get_first_page(f_page); get_zspage_mapping(first_page, &class_idx, &fullness); - class = pool->size_class[class_idx]; f_offset = obj_idx_to_offset(f_page, f_objidx, class->size); - spin_lock(&class->lock); + vaddr = kmap_atomic(f_page); /* Insert this object in containing zspage's freelist */ - vaddr = kmap_atomic(f_page); link = (struct link_free *)(vaddr + f_offset); link->next = first_page->freelist; kunmap_atomic(vaddr); first_page->freelist = (void *)obj; - first_page->inuse--; - fullness = fix_fullness_group(pool, first_page); - zs_stat_dec(class, OBJ_USED, 1); +} + +void zs_free(struct zs_pool *pool, unsigned long handle) +{ + struct page *first_page, *f_page; + unsigned long obj, f_objidx; + int class_idx; + struct size_class *class; + enum fullness_group fullness; + + if (unlikely(!handle)) + return; + + obj = handle_to_obj(handle); + free_handle(pool, handle); + obj_to_location(obj, &f_page, &f_objidx); + first_page = get_first_page(f_page); + + get_zspage_mapping(first_page, &class_idx, &fullness); + class = pool->size_class[class_idx]; + + spin_lock(&class->lock); + obj_free(pool, class, obj); + fullness = fix_fullness_group(class, first_page); if (fullness == ZS_EMPTY) zs_stat_dec(class, OBJ_ALLOCATED, get_maxobj_per_zspage( class->size, class->pages_per_zspage)); - spin_unlock(&class->lock); if (fullness == ZS_EMPTY) { -- 2.39.5