*
* page->private (union with page->first_page): refers to the
* component page after the first page
+ * If the page is first_page for huge object, it stores handle.
+ * Look at size_class->huge.
* page->freelist: points to the first free object in zspage.
* Free objects are linked together using in-place
* metadata.
#define ZS_MIN_ALLOC_SIZE \
MAX(32, (ZS_MAX_PAGES_PER_ZSPAGE << PAGE_SHIFT >> OBJ_INDEX_BITS))
/* each chunk includes extra space to keep handle */
-#define ZS_MAX_ALLOC_SIZE (PAGE_SIZE + ZS_HANDLE_SIZE)
+#define ZS_MAX_ALLOC_SIZE PAGE_SIZE
/*
* On systems with 4K page size, this gives 255 size classes! There is a
/* Number of PAGE_SIZE sized pages to combine to form a 'zspage' */
int pages_per_zspage;
+ /* huge object: pages_per_zspage == 1 && maxobj_per_zspage == 1 */
+ bool huge;
#ifdef CONFIG_ZSMALLOC_STAT
struct zs_size_stat stats;
#endif
char *vm_addr; /* address of kmap_atomic()'ed pages */
enum zs_mapmode vm_mm; /* mapping mode */
+ bool huge;
};
static int create_handle_cache(struct zs_pool *pool)
idx = DIV_ROUND_UP(size - ZS_MIN_ALLOC_SIZE,
ZS_SIZE_CLASS_DELTA);
- return idx;
+ return min(zs_size_classes - 1, idx);
}
/*
return *(unsigned long *)handle;
}
-unsigned long obj_to_head(void *obj)
+static unsigned long obj_to_head(struct size_class *class, struct page *page,
+ void *obj)
{
- return *(unsigned long *)obj;
+ if (class->huge) {
+ VM_BUG_ON(!is_first_page(page));
+ return *(unsigned long *)page_private(page);
+ } else
+ return *(unsigned long *)obj;
}
static unsigned long obj_idx_to_offset(struct page *page,
if (area->vm_mm == ZS_MM_RO)
goto out;
- buf = area->vm_buf + ZS_HANDLE_SIZE;
- size -= ZS_HANDLE_SIZE;
- off += ZS_HANDLE_SIZE;
+ buf = area->vm_buf;
+ if (!area->huge) {
+ buf = buf + ZS_HANDLE_SIZE;
+ size -= ZS_HANDLE_SIZE;
+ off += ZS_HANDLE_SIZE;
+ }
sizes[0] = PAGE_SIZE - off;
sizes[1] = size - sizes[0];
ret = __zs_map_object(area, pages, off, class->size);
out:
- return ret + ZS_HANDLE_SIZE;
+ if (!class->huge)
+ ret += ZS_HANDLE_SIZE;
+
+ return ret;
}
EXPORT_SYMBOL_GPL(zs_map_object);
vaddr = kmap_atomic(m_page);
link = (struct link_free *)vaddr + m_offset / sizeof(*link);
first_page->freelist = link->next;
- /* record handle in the header of allocated chunk */
- link->handle = handle;
+ if (!class->huge)
+ /* record handle in the header of allocated chunk */
+ link->handle = handle;
+ else
+ /* record handle in first_page->private */
+ set_page_private(first_page, handle);
kunmap_atomic(vaddr);
first_page->inuse++;
zs_stat_inc(class, OBJ_USED, 1);
struct size_class *class;
struct page *first_page;
- if (unlikely(!size || (size + ZS_HANDLE_SIZE) > ZS_MAX_ALLOC_SIZE))
+ if (unlikely(!size || size > ZS_MAX_ALLOC_SIZE))
return 0;
handle = alloc_handle(pool);
/* extra space in chunk to keep the handle */
size += ZS_HANDLE_SIZE;
class = pool->size_class[get_size_class_index(size)];
+ /* In huge class size, we store the handle into first_page->private */
+ if (class->huge) {
+ size -= ZS_HANDLE_SIZE;
+ class = pool->size_class[get_size_class_index(size)];
+ }
spin_lock(&class->lock);
first_page = find_get_zspage(class);
/* Insert this object in containing zspage's freelist */
link = (struct link_free *)(vaddr + f_offset);
link->next = first_page->freelist;
+ if (class->huge)
+ set_page_private(first_page, 0);
kunmap_atomic(vaddr);
first_page->freelist = (void *)obj;
first_page->inuse--;
offset += class->size * index;
while (offset < PAGE_SIZE) {
- head = obj_to_head(addr + offset);
+ head = obj_to_head(class, page, addr + offset);
if (head & OBJ_ALLOCATED_TAG) {
handle = head & ~OBJ_ALLOCATED_TAG;
if (trypin_tag(handle))
class->size = size;
class->index = i;
class->pages_per_zspage = pages_per_zspage;
+ if (pages_per_zspage == 1 &&
+ get_maxobj_per_zspage(size, pages_per_zspage) == 1)
+ class->huge = true;
spin_lock_init(&class->lock);
pool->size_class[i] = class;