*/
/*
- * This allocator is designed for use with zram. Thus, the allocator is
- * supposed to work well under low memory conditions. In particular, it
- * never attempts higher order page allocation which is very likely to
- * fail under memory pressure. On the other hand, if we just use single
- * (0-order) pages, it would suffer from very high fragmentation --
- * any object of size PAGE_SIZE/2 or larger would occupy an entire page.
- * This was one of the major issues with its predecessor (xvmalloc).
- *
- * To overcome these issues, zsmalloc allocates a bunch of 0-order pages
- * and links them together using various 'struct page' fields. These linked
- * pages act as a single higher-order page i.e. an object can span 0-order
- * page boundaries. The code refers to these linked pages as a single entity
- * called zspage.
- *
- * For simplicity, zsmalloc can only allocate objects of size up to PAGE_SIZE
- * since this satisfies the requirements of all its current users (in the
- * worst case, page is incompressible and is thus stored "as-is" i.e. in
- * uncompressed form). For allocation requests larger than this size, failure
- * is returned (see zs_malloc).
- *
- * Additionally, zs_malloc() does not return a dereferenceable pointer.
- * Instead, it returns an opaque handle (unsigned long) which encodes actual
- * location of the allocated object. The reason for this indirection is that
- * zsmalloc does not keep zspages permanently mapped since that would cause
- * issues on 32-bit systems where the VA region for kernel space mappings
- * is very small. So, before using the allocating memory, the object has to
- * be mapped using zs_map_object() to get a usable pointer and subsequently
- * unmapped using zs_unmap_object().
- *
* Following is how we use various fields and flags of underlying
* struct page(s) to form a zspage.
*
* to form a zspage for each size class. This is important
* to reduce wastage due to unusable space left at end of
* each zspage which is given as:
- * wastage = Zp - Zp % size_class
+ * wastage = Zp % class_size
+ * usage = Zp - wastage
* where Zp = zspage size = k * PAGE_SIZE where k = 1, 2, ...
*
* For example, for size class of 3/8 * PAGE_SIZE, we should
/* extra space in chunk to keep the handle */
size += ZS_HANDLE_SIZE;
class = pool->size_class[get_size_class_index(size)];
- /* In huge class size, we store the handle into first_page->private */
- if (class->huge) {
- size -= ZS_HANDLE_SIZE;
- class = pool->size_class[get_size_class_index(size)];
- }
spin_lock(&class->lock);
first_page = find_get_zspage(class);
if (written == class->size)
break;
- if (s_off + size >= PAGE_SIZE) {
+ s_off += size;
+ s_size -= size;
+ d_off += size;
+ d_size -= size;
+
+ if (s_off >= PAGE_SIZE) {
kunmap_atomic(d_addr);
kunmap_atomic(s_addr);
s_page = get_next_page(s_page);
d_addr = kmap_atomic(d_page);
s_size = class->size - written;
s_off = 0;
- } else {
- s_off += size;
- s_size -= size;
}
- if (d_off + size >= PAGE_SIZE) {
+ if (d_off >= PAGE_SIZE) {
kunmap_atomic(d_addr);
d_page = get_next_page(d_page);
BUG_ON(!d_page);
d_addr = kmap_atomic(d_page);
d_size = class->size - written;
d_off = 0;
- } else {
- d_off += size;
- d_size -= size;
}
}
static void putback_zspage(struct zs_pool *pool, struct size_class *class,
struct page *first_page)
{
- int class_idx;
enum fullness_group fullness;
BUG_ON(!is_first_page(first_page));
- get_zspage_mapping(first_page, &class_idx, &fullness);
+ fullness = get_fullness_group(first_page);
insert_zspage(first_page, class, fullness);
- fullness = fix_fullness_group(class, first_page);
+ set_zspage_mapping(first_page, class->index, fullness);
+
if (fullness == ZS_EMPTY) {
zs_stat_dec(class, OBJ_ALLOCATED, get_maxobj_per_zspage(
class->size, class->pages_per_zspage));
struct page *dst_page = NULL;
unsigned long nr_total_migrated = 0;
- cond_resched();
-
spin_lock(&class->lock);
while ((src_page = isolate_source_page(class))) {
nr_migrated += __zs_compact(pool, class);
}
- synchronize_rcu();
-
return nr_migrated;
}
EXPORT_SYMBOL_GPL(zs_compact);