2 * Compressed RAM block device
4 * Copyright (C) 2008, 2009, 2010 Nitin Gupta
5 * 2012, 2013 Minchan Kim
7 * This code is released using a dual license strategy: BSD/GPL
8 * You can choose the licence that better fits your requirements.
10 * Released under the terms of 3-clause BSD License
11 * Released under the terms of GNU General Public License Version 2.0
15 #define KMSG_COMPONENT "zram"
16 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
18 #include <linux/module.h>
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/bitops.h>
22 #include <linux/blkdev.h>
23 #include <linux/buffer_head.h>
24 #include <linux/device.h>
25 #include <linux/genhd.h>
26 #include <linux/highmem.h>
27 #include <linux/slab.h>
28 #include <linux/backing-dev.h>
29 #include <linux/string.h>
30 #include <linux/vmalloc.h>
31 #include <linux/err.h>
32 #include <linux/idr.h>
33 #include <linux/sysfs.h>
34 #include <linux/cpuhotplug.h>
38 static DEFINE_IDR(zram_index_idr);
39 /* idr index must be protected */
40 static DEFINE_MUTEX(zram_index_mutex);
42 static int zram_major;
43 static const char *default_compressor = "lzo";
45 /* Module params (documentation at end) */
46 static unsigned int num_devices = 1;
48 static inline void deprecated_attr_warn(const char *name)
50 pr_warn_once("%d (%s) Attribute %s (and others) will be removed. %s\n",
54 "See zram documentation.");
57 #define ZRAM_ATTR_RO(name) \
58 static ssize_t name##_show(struct device *d, \
59 struct device_attribute *attr, char *b) \
61 struct zram *zram = dev_to_zram(d); \
63 deprecated_attr_warn(__stringify(name)); \
64 return scnprintf(b, PAGE_SIZE, "%llu\n", \
65 (u64)atomic64_read(&zram->stats.name)); \
67 static DEVICE_ATTR_RO(name);
69 static inline bool init_done(struct zram *zram)
71 return zram->disksize;
74 static inline struct zram *dev_to_zram(struct device *dev)
76 return (struct zram *)dev_to_disk(dev)->private_data;
79 /* flag operations require table entry bit_spin_lock() being held */
80 static int zram_test_flag(struct zram_meta *meta, u32 index,
81 enum zram_pageflags flag)
83 return meta->table[index].value & BIT(flag);
86 static void zram_set_flag(struct zram_meta *meta, u32 index,
87 enum zram_pageflags flag)
89 meta->table[index].value |= BIT(flag);
92 static void zram_clear_flag(struct zram_meta *meta, u32 index,
93 enum zram_pageflags flag)
95 meta->table[index].value &= ~BIT(flag);
98 static size_t zram_get_obj_size(struct zram_meta *meta, u32 index)
100 return meta->table[index].value & (BIT(ZRAM_FLAG_SHIFT) - 1);
103 static void zram_set_obj_size(struct zram_meta *meta,
104 u32 index, size_t size)
106 unsigned long flags = meta->table[index].value >> ZRAM_FLAG_SHIFT;
108 meta->table[index].value = (flags << ZRAM_FLAG_SHIFT) | size;
111 static inline bool is_partial_io(struct bio_vec *bvec)
113 return bvec->bv_len != PAGE_SIZE;
116 static void zram_revalidate_disk(struct zram *zram)
118 revalidate_disk(zram->disk);
119 /* revalidate_disk reset the BDI_CAP_STABLE_WRITES so set again */
120 zram->disk->queue->backing_dev_info->capabilities |=
121 BDI_CAP_STABLE_WRITES;
125 * Check if request is within bounds and aligned on zram logical blocks.
127 static inline bool valid_io_request(struct zram *zram,
128 sector_t start, unsigned int size)
132 /* unaligned request */
133 if (unlikely(start & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
135 if (unlikely(size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
138 end = start + (size >> SECTOR_SHIFT);
139 bound = zram->disksize >> SECTOR_SHIFT;
140 /* out of range range */
141 if (unlikely(start >= bound || end > bound || start > end))
144 /* I/O request is valid */
148 static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
150 if (*offset + bvec->bv_len >= PAGE_SIZE)
152 *offset = (*offset + bvec->bv_len) % PAGE_SIZE;
155 static inline void update_used_max(struct zram *zram,
156 const unsigned long pages)
158 unsigned long old_max, cur_max;
160 old_max = atomic_long_read(&zram->stats.max_used_pages);
165 old_max = atomic_long_cmpxchg(
166 &zram->stats.max_used_pages, cur_max, pages);
167 } while (old_max != cur_max);
170 static bool page_zero_filled(void *ptr)
175 page = (unsigned long *)ptr;
177 for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) {
185 static void handle_zero_page(struct bio_vec *bvec)
187 struct page *page = bvec->bv_page;
190 user_mem = kmap_atomic(page);
191 if (is_partial_io(bvec))
192 memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
194 clear_page(user_mem);
195 kunmap_atomic(user_mem);
197 flush_dcache_page(page);
200 static ssize_t initstate_show(struct device *dev,
201 struct device_attribute *attr, char *buf)
204 struct zram *zram = dev_to_zram(dev);
206 down_read(&zram->init_lock);
207 val = init_done(zram);
208 up_read(&zram->init_lock);
210 return scnprintf(buf, PAGE_SIZE, "%u\n", val);
213 static ssize_t disksize_show(struct device *dev,
214 struct device_attribute *attr, char *buf)
216 struct zram *zram = dev_to_zram(dev);
218 return scnprintf(buf, PAGE_SIZE, "%llu\n", zram->disksize);
221 static ssize_t orig_data_size_show(struct device *dev,
222 struct device_attribute *attr, char *buf)
224 struct zram *zram = dev_to_zram(dev);
226 deprecated_attr_warn("orig_data_size");
227 return scnprintf(buf, PAGE_SIZE, "%llu\n",
228 (u64)(atomic64_read(&zram->stats.pages_stored)) << PAGE_SHIFT);
231 static ssize_t mem_used_total_show(struct device *dev,
232 struct device_attribute *attr, char *buf)
235 struct zram *zram = dev_to_zram(dev);
237 deprecated_attr_warn("mem_used_total");
238 down_read(&zram->init_lock);
239 if (init_done(zram)) {
240 struct zram_meta *meta = zram->meta;
241 val = zs_get_total_pages(meta->mem_pool);
243 up_read(&zram->init_lock);
245 return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
248 static ssize_t mem_limit_show(struct device *dev,
249 struct device_attribute *attr, char *buf)
252 struct zram *zram = dev_to_zram(dev);
254 deprecated_attr_warn("mem_limit");
255 down_read(&zram->init_lock);
256 val = zram->limit_pages;
257 up_read(&zram->init_lock);
259 return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
262 static ssize_t mem_limit_store(struct device *dev,
263 struct device_attribute *attr, const char *buf, size_t len)
267 struct zram *zram = dev_to_zram(dev);
269 limit = memparse(buf, &tmp);
270 if (buf == tmp) /* no chars parsed, invalid input */
273 down_write(&zram->init_lock);
274 zram->limit_pages = PAGE_ALIGN(limit) >> PAGE_SHIFT;
275 up_write(&zram->init_lock);
280 static ssize_t mem_used_max_show(struct device *dev,
281 struct device_attribute *attr, char *buf)
284 struct zram *zram = dev_to_zram(dev);
286 deprecated_attr_warn("mem_used_max");
287 down_read(&zram->init_lock);
289 val = atomic_long_read(&zram->stats.max_used_pages);
290 up_read(&zram->init_lock);
292 return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
295 static ssize_t mem_used_max_store(struct device *dev,
296 struct device_attribute *attr, const char *buf, size_t len)
300 struct zram *zram = dev_to_zram(dev);
302 err = kstrtoul(buf, 10, &val);
306 down_read(&zram->init_lock);
307 if (init_done(zram)) {
308 struct zram_meta *meta = zram->meta;
309 atomic_long_set(&zram->stats.max_used_pages,
310 zs_get_total_pages(meta->mem_pool));
312 up_read(&zram->init_lock);
318 * We switched to per-cpu streams and this attr is not needed anymore.
319 * However, we will keep it around for some time, because:
320 * a) we may revert per-cpu streams in the future
321 * b) it's visible to user space and we need to follow our 2 years
322 * retirement rule; but we already have a number of 'soon to be
323 * altered' attrs, so max_comp_streams need to wait for the next
326 static ssize_t max_comp_streams_show(struct device *dev,
327 struct device_attribute *attr, char *buf)
329 return scnprintf(buf, PAGE_SIZE, "%d\n", num_online_cpus());
332 static ssize_t max_comp_streams_store(struct device *dev,
333 struct device_attribute *attr, const char *buf, size_t len)
338 static ssize_t comp_algorithm_show(struct device *dev,
339 struct device_attribute *attr, char *buf)
342 struct zram *zram = dev_to_zram(dev);
344 down_read(&zram->init_lock);
345 sz = zcomp_available_show(zram->compressor, buf);
346 up_read(&zram->init_lock);
351 static ssize_t comp_algorithm_store(struct device *dev,
352 struct device_attribute *attr, const char *buf, size_t len)
354 struct zram *zram = dev_to_zram(dev);
355 char compressor[CRYPTO_MAX_ALG_NAME];
358 strlcpy(compressor, buf, sizeof(compressor));
359 /* ignore trailing newline */
360 sz = strlen(compressor);
361 if (sz > 0 && compressor[sz - 1] == '\n')
362 compressor[sz - 1] = 0x00;
364 if (!zcomp_available_algorithm(compressor))
367 down_write(&zram->init_lock);
368 if (init_done(zram)) {
369 up_write(&zram->init_lock);
370 pr_info("Can't change algorithm for initialized device\n");
374 strlcpy(zram->compressor, compressor, sizeof(compressor));
375 up_write(&zram->init_lock);
379 static ssize_t compact_store(struct device *dev,
380 struct device_attribute *attr, const char *buf, size_t len)
382 struct zram *zram = dev_to_zram(dev);
383 struct zram_meta *meta;
385 down_read(&zram->init_lock);
386 if (!init_done(zram)) {
387 up_read(&zram->init_lock);
392 zs_compact(meta->mem_pool);
393 up_read(&zram->init_lock);
398 static ssize_t io_stat_show(struct device *dev,
399 struct device_attribute *attr, char *buf)
401 struct zram *zram = dev_to_zram(dev);
404 down_read(&zram->init_lock);
405 ret = scnprintf(buf, PAGE_SIZE,
406 "%8llu %8llu %8llu %8llu\n",
407 (u64)atomic64_read(&zram->stats.failed_reads),
408 (u64)atomic64_read(&zram->stats.failed_writes),
409 (u64)atomic64_read(&zram->stats.invalid_io),
410 (u64)atomic64_read(&zram->stats.notify_free));
411 up_read(&zram->init_lock);
416 static ssize_t mm_stat_show(struct device *dev,
417 struct device_attribute *attr, char *buf)
419 struct zram *zram = dev_to_zram(dev);
420 struct zs_pool_stats pool_stats;
421 u64 orig_size, mem_used = 0;
425 memset(&pool_stats, 0x00, sizeof(struct zs_pool_stats));
427 down_read(&zram->init_lock);
428 if (init_done(zram)) {
429 mem_used = zs_get_total_pages(zram->meta->mem_pool);
430 zs_pool_stats(zram->meta->mem_pool, &pool_stats);
433 orig_size = atomic64_read(&zram->stats.pages_stored);
434 max_used = atomic_long_read(&zram->stats.max_used_pages);
436 ret = scnprintf(buf, PAGE_SIZE,
437 "%8llu %8llu %8llu %8lu %8ld %8llu %8lu\n",
438 orig_size << PAGE_SHIFT,
439 (u64)atomic64_read(&zram->stats.compr_data_size),
440 mem_used << PAGE_SHIFT,
441 zram->limit_pages << PAGE_SHIFT,
442 max_used << PAGE_SHIFT,
443 (u64)atomic64_read(&zram->stats.zero_pages),
444 pool_stats.pages_compacted);
445 up_read(&zram->init_lock);
450 static ssize_t debug_stat_show(struct device *dev,
451 struct device_attribute *attr, char *buf)
454 struct zram *zram = dev_to_zram(dev);
457 down_read(&zram->init_lock);
458 ret = scnprintf(buf, PAGE_SIZE,
459 "version: %d\n%8llu\n",
461 (u64)atomic64_read(&zram->stats.writestall));
462 up_read(&zram->init_lock);
467 static DEVICE_ATTR_RO(io_stat);
468 static DEVICE_ATTR_RO(mm_stat);
469 static DEVICE_ATTR_RO(debug_stat);
470 ZRAM_ATTR_RO(num_reads);
471 ZRAM_ATTR_RO(num_writes);
472 ZRAM_ATTR_RO(failed_reads);
473 ZRAM_ATTR_RO(failed_writes);
474 ZRAM_ATTR_RO(invalid_io);
475 ZRAM_ATTR_RO(notify_free);
476 ZRAM_ATTR_RO(zero_pages);
477 ZRAM_ATTR_RO(compr_data_size);
479 static inline bool zram_meta_get(struct zram *zram)
481 if (atomic_inc_not_zero(&zram->refcount))
486 static inline void zram_meta_put(struct zram *zram)
488 atomic_dec(&zram->refcount);
491 static void zram_meta_free(struct zram_meta *meta, u64 disksize)
493 size_t num_pages = disksize >> PAGE_SHIFT;
496 /* Free all pages that are still in this zram device */
497 for (index = 0; index < num_pages; index++) {
498 unsigned long handle = meta->table[index].handle;
503 zs_free(meta->mem_pool, handle);
506 zs_destroy_pool(meta->mem_pool);
511 static struct zram_meta *zram_meta_alloc(char *pool_name, u64 disksize)
514 struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL);
519 num_pages = disksize >> PAGE_SHIFT;
520 meta->table = vzalloc(num_pages * sizeof(*meta->table));
522 pr_err("Error allocating zram address table\n");
526 meta->mem_pool = zs_create_pool(pool_name);
527 if (!meta->mem_pool) {
528 pr_err("Error creating memory pool\n");
541 * To protect concurrent access to the same index entry,
542 * caller should hold this table index entry's bit_spinlock to
543 * indicate this index entry is accessing.
545 static void zram_free_page(struct zram *zram, size_t index)
547 struct zram_meta *meta = zram->meta;
548 unsigned long handle = meta->table[index].handle;
550 if (unlikely(!handle)) {
552 * No memory is allocated for zero filled pages.
553 * Simply clear zero page flag.
555 if (zram_test_flag(meta, index, ZRAM_ZERO)) {
556 zram_clear_flag(meta, index, ZRAM_ZERO);
557 atomic64_dec(&zram->stats.zero_pages);
562 zs_free(meta->mem_pool, handle);
564 atomic64_sub(zram_get_obj_size(meta, index),
565 &zram->stats.compr_data_size);
566 atomic64_dec(&zram->stats.pages_stored);
568 meta->table[index].handle = 0;
569 zram_set_obj_size(meta, index, 0);
572 static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
576 struct zram_meta *meta = zram->meta;
577 unsigned long handle;
580 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
581 handle = meta->table[index].handle;
582 size = zram_get_obj_size(meta, index);
584 if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
585 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
590 cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
591 if (size == PAGE_SIZE) {
592 copy_page(mem, cmem);
594 struct zcomp_strm *zstrm = zcomp_stream_get(zram->comp);
596 ret = zcomp_decompress(zstrm, cmem, size, mem);
597 zcomp_stream_put(zram->comp);
599 zs_unmap_object(meta->mem_pool, handle);
600 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
602 /* Should NEVER happen. Return bio error if it does. */
604 pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
611 static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
612 u32 index, int offset)
616 unsigned char *user_mem, *uncmem = NULL;
617 struct zram_meta *meta = zram->meta;
618 page = bvec->bv_page;
620 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
621 if (unlikely(!meta->table[index].handle) ||
622 zram_test_flag(meta, index, ZRAM_ZERO)) {
623 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
624 handle_zero_page(bvec);
627 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
629 if (is_partial_io(bvec))
630 /* Use a temporary buffer to decompress the page */
631 uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
633 user_mem = kmap_atomic(page);
634 if (!is_partial_io(bvec))
638 pr_err("Unable to allocate temp memory\n");
643 ret = zram_decompress_page(zram, uncmem, index);
644 /* Should NEVER happen. Return bio error if it does. */
648 if (is_partial_io(bvec))
649 memcpy(user_mem + bvec->bv_offset, uncmem + offset,
652 flush_dcache_page(page);
655 kunmap_atomic(user_mem);
656 if (is_partial_io(bvec))
661 static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
666 unsigned long handle = 0;
668 unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
669 struct zram_meta *meta = zram->meta;
670 struct zcomp_strm *zstrm = NULL;
671 unsigned long alloced_pages;
673 page = bvec->bv_page;
674 if (is_partial_io(bvec)) {
676 * This is a partial IO. We need to read the full page
677 * before to write the changes.
679 uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
684 ret = zram_decompress_page(zram, uncmem, index);
690 user_mem = kmap_atomic(page);
691 if (is_partial_io(bvec)) {
692 memcpy(uncmem + offset, user_mem + bvec->bv_offset,
694 kunmap_atomic(user_mem);
700 if (page_zero_filled(uncmem)) {
702 kunmap_atomic(user_mem);
703 /* Free memory associated with this sector now. */
704 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
705 zram_free_page(zram, index);
706 zram_set_flag(meta, index, ZRAM_ZERO);
707 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
709 atomic64_inc(&zram->stats.zero_pages);
714 zstrm = zcomp_stream_get(zram->comp);
715 ret = zcomp_compress(zstrm, uncmem, &clen);
716 if (!is_partial_io(bvec)) {
717 kunmap_atomic(user_mem);
723 pr_err("Compression failed! err=%d\n", ret);
728 if (unlikely(clen > max_zpage_size)) {
730 if (is_partial_io(bvec))
735 * handle allocation has 2 paths:
736 * a) fast path is executed with preemption disabled (for
737 * per-cpu streams) and has __GFP_DIRECT_RECLAIM bit clear,
738 * since we can't sleep;
739 * b) slow path enables preemption and attempts to allocate
740 * the page with __GFP_DIRECT_RECLAIM bit set. we have to
741 * put per-cpu compression stream and, thus, to re-do
742 * the compression once handle is allocated.
744 * if we have a 'non-null' handle here then we are coming
745 * from the slow path and handle has already been allocated.
748 handle = zs_malloc(meta->mem_pool, clen,
749 __GFP_KSWAPD_RECLAIM |
754 zcomp_stream_put(zram->comp);
757 atomic64_inc(&zram->stats.writestall);
759 handle = zs_malloc(meta->mem_pool, clen,
760 GFP_NOIO | __GFP_HIGHMEM |
765 pr_err("Error allocating memory for compressed page: %u, size=%u\n",
771 alloced_pages = zs_get_total_pages(meta->mem_pool);
772 update_used_max(zram, alloced_pages);
774 if (zram->limit_pages && alloced_pages > zram->limit_pages) {
775 zs_free(meta->mem_pool, handle);
780 cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO);
782 if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
783 src = kmap_atomic(page);
784 copy_page(cmem, src);
787 memcpy(cmem, src, clen);
790 zcomp_stream_put(zram->comp);
792 zs_unmap_object(meta->mem_pool, handle);
795 * Free memory associated with this sector
796 * before overwriting unused sectors.
798 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
799 zram_free_page(zram, index);
801 meta->table[index].handle = handle;
802 zram_set_obj_size(meta, index, clen);
803 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
806 atomic64_add(clen, &zram->stats.compr_data_size);
807 atomic64_inc(&zram->stats.pages_stored);
810 zcomp_stream_put(zram->comp);
811 if (is_partial_io(bvec))
817 * zram_bio_discard - handler on discard request
818 * @index: physical block index in PAGE_SIZE units
819 * @offset: byte offset within physical block
821 static void zram_bio_discard(struct zram *zram, u32 index,
822 int offset, struct bio *bio)
824 size_t n = bio->bi_iter.bi_size;
825 struct zram_meta *meta = zram->meta;
828 * zram manages data in physical block size units. Because logical block
829 * size isn't identical with physical block size on some arch, we
830 * could get a discard request pointing to a specific offset within a
831 * certain physical block. Although we can handle this request by
832 * reading that physiclal block and decompressing and partially zeroing
833 * and re-compressing and then re-storing it, this isn't reasonable
834 * because our intent with a discard request is to save memory. So
835 * skipping this logical block is appropriate here.
838 if (n <= (PAGE_SIZE - offset))
841 n -= (PAGE_SIZE - offset);
845 while (n >= PAGE_SIZE) {
846 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
847 zram_free_page(zram, index);
848 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
849 atomic64_inc(&zram->stats.notify_free);
855 static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
856 int offset, bool is_write)
858 unsigned long start_time = jiffies;
859 int rw_acct = is_write ? REQ_OP_WRITE : REQ_OP_READ;
862 generic_start_io_acct(rw_acct, bvec->bv_len >> SECTOR_SHIFT,
866 atomic64_inc(&zram->stats.num_reads);
867 ret = zram_bvec_read(zram, bvec, index, offset);
869 atomic64_inc(&zram->stats.num_writes);
870 ret = zram_bvec_write(zram, bvec, index, offset);
873 generic_end_io_acct(rw_acct, &zram->disk->part0, start_time);
877 atomic64_inc(&zram->stats.failed_reads);
879 atomic64_inc(&zram->stats.failed_writes);
885 static void __zram_make_request(struct zram *zram, struct bio *bio)
890 struct bvec_iter iter;
892 index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
893 offset = (bio->bi_iter.bi_sector &
894 (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
896 if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) {
897 zram_bio_discard(zram, index, offset, bio);
902 bio_for_each_segment(bvec, bio, iter) {
903 int max_transfer_size = PAGE_SIZE - offset;
905 if (bvec.bv_len > max_transfer_size) {
907 * zram_bvec_rw() can only make operation on a single
908 * zram page. Split the bio vector.
912 bv.bv_page = bvec.bv_page;
913 bv.bv_len = max_transfer_size;
914 bv.bv_offset = bvec.bv_offset;
916 if (zram_bvec_rw(zram, &bv, index, offset,
917 op_is_write(bio_op(bio))) < 0)
920 bv.bv_len = bvec.bv_len - max_transfer_size;
921 bv.bv_offset += max_transfer_size;
922 if (zram_bvec_rw(zram, &bv, index + 1, 0,
923 op_is_write(bio_op(bio))) < 0)
926 if (zram_bvec_rw(zram, &bvec, index, offset,
927 op_is_write(bio_op(bio))) < 0)
930 update_position(&index, &offset, &bvec);
941 * Handler function for all zram I/O requests.
943 static blk_qc_t zram_make_request(struct request_queue *queue, struct bio *bio)
945 struct zram *zram = queue->queuedata;
947 if (unlikely(!zram_meta_get(zram)))
950 blk_queue_split(queue, &bio, queue->bio_split);
952 if (!valid_io_request(zram, bio->bi_iter.bi_sector,
953 bio->bi_iter.bi_size)) {
954 atomic64_inc(&zram->stats.invalid_io);
958 __zram_make_request(zram, bio);
960 return BLK_QC_T_NONE;
965 return BLK_QC_T_NONE;
968 static void zram_slot_free_notify(struct block_device *bdev,
972 struct zram_meta *meta;
974 zram = bdev->bd_disk->private_data;
977 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
978 zram_free_page(zram, index);
979 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
980 atomic64_inc(&zram->stats.notify_free);
983 static int zram_rw_page(struct block_device *bdev, sector_t sector,
984 struct page *page, bool is_write)
986 int offset, err = -EIO;
991 zram = bdev->bd_disk->private_data;
992 if (unlikely(!zram_meta_get(zram)))
995 if (!valid_io_request(zram, sector, PAGE_SIZE)) {
996 atomic64_inc(&zram->stats.invalid_io);
1001 index = sector >> SECTORS_PER_PAGE_SHIFT;
1002 offset = sector & (SECTORS_PER_PAGE - 1) << SECTOR_SHIFT;
1005 bv.bv_len = PAGE_SIZE;
1008 err = zram_bvec_rw(zram, &bv, index, offset, is_write);
1010 zram_meta_put(zram);
1013 * If I/O fails, just return error(ie, non-zero) without
1014 * calling page_endio.
1015 * It causes resubmit the I/O with bio request by upper functions
1016 * of rw_page(e.g., swap_readpage, __swap_writepage) and
1017 * bio->bi_end_io does things to handle the error
1018 * (e.g., SetPageError, set_page_dirty and extra works).
1021 page_endio(page, is_write, 0);
1025 static void zram_reset_device(struct zram *zram)
1027 struct zram_meta *meta;
1031 down_write(&zram->init_lock);
1033 zram->limit_pages = 0;
1035 if (!init_done(zram)) {
1036 up_write(&zram->init_lock);
1042 disksize = zram->disksize;
1044 * Refcount will go down to 0 eventually and r/w handler
1045 * cannot handle further I/O so it will bail out by
1046 * check zram_meta_get.
1048 zram_meta_put(zram);
1050 * We want to free zram_meta in process context to avoid
1051 * deadlock between reclaim path and any other locks.
1053 wait_event(zram->io_done, atomic_read(&zram->refcount) == 0);
1056 memset(&zram->stats, 0, sizeof(zram->stats));
1059 set_capacity(zram->disk, 0);
1060 part_stat_set_all(&zram->disk->part0, 0);
1062 up_write(&zram->init_lock);
1063 /* I/O operation under all of CPU are done so let's free */
1064 zram_meta_free(meta, disksize);
1065 zcomp_destroy(comp);
1068 static ssize_t disksize_store(struct device *dev,
1069 struct device_attribute *attr, const char *buf, size_t len)
1073 struct zram_meta *meta;
1074 struct zram *zram = dev_to_zram(dev);
1077 disksize = memparse(buf, NULL);
1081 disksize = PAGE_ALIGN(disksize);
1082 meta = zram_meta_alloc(zram->disk->disk_name, disksize);
1086 comp = zcomp_create(zram->compressor);
1088 pr_err("Cannot initialise %s compressing backend\n",
1090 err = PTR_ERR(comp);
1094 down_write(&zram->init_lock);
1095 if (init_done(zram)) {
1096 pr_info("Cannot change disksize for initialized device\n");
1098 goto out_destroy_comp;
1101 init_waitqueue_head(&zram->io_done);
1102 atomic_set(&zram->refcount, 1);
1105 zram->disksize = disksize;
1106 set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
1107 zram_revalidate_disk(zram);
1108 up_write(&zram->init_lock);
1113 up_write(&zram->init_lock);
1114 zcomp_destroy(comp);
1116 zram_meta_free(meta, disksize);
1120 static ssize_t reset_store(struct device *dev,
1121 struct device_attribute *attr, const char *buf, size_t len)
1124 unsigned short do_reset;
1126 struct block_device *bdev;
1128 ret = kstrtou16(buf, 10, &do_reset);
1135 zram = dev_to_zram(dev);
1136 bdev = bdget_disk(zram->disk, 0);
1140 mutex_lock(&bdev->bd_mutex);
1141 /* Do not reset an active device or claimed device */
1142 if (bdev->bd_openers || zram->claim) {
1143 mutex_unlock(&bdev->bd_mutex);
1148 /* From now on, anyone can't open /dev/zram[0-9] */
1150 mutex_unlock(&bdev->bd_mutex);
1152 /* Make sure all the pending I/O are finished */
1154 zram_reset_device(zram);
1155 zram_revalidate_disk(zram);
1158 mutex_lock(&bdev->bd_mutex);
1159 zram->claim = false;
1160 mutex_unlock(&bdev->bd_mutex);
1165 static int zram_open(struct block_device *bdev, fmode_t mode)
1170 WARN_ON(!mutex_is_locked(&bdev->bd_mutex));
1172 zram = bdev->bd_disk->private_data;
1173 /* zram was claimed to reset so open request fails */
1180 static const struct block_device_operations zram_devops = {
1182 .swap_slot_free_notify = zram_slot_free_notify,
1183 .rw_page = zram_rw_page,
1184 .owner = THIS_MODULE
1187 static DEVICE_ATTR_WO(compact);
1188 static DEVICE_ATTR_RW(disksize);
1189 static DEVICE_ATTR_RO(initstate);
1190 static DEVICE_ATTR_WO(reset);
1191 static DEVICE_ATTR_RO(orig_data_size);
1192 static DEVICE_ATTR_RO(mem_used_total);
1193 static DEVICE_ATTR_RW(mem_limit);
1194 static DEVICE_ATTR_RW(mem_used_max);
1195 static DEVICE_ATTR_RW(max_comp_streams);
1196 static DEVICE_ATTR_RW(comp_algorithm);
1198 static struct attribute *zram_disk_attrs[] = {
1199 &dev_attr_disksize.attr,
1200 &dev_attr_initstate.attr,
1201 &dev_attr_reset.attr,
1202 &dev_attr_num_reads.attr,
1203 &dev_attr_num_writes.attr,
1204 &dev_attr_failed_reads.attr,
1205 &dev_attr_failed_writes.attr,
1206 &dev_attr_compact.attr,
1207 &dev_attr_invalid_io.attr,
1208 &dev_attr_notify_free.attr,
1209 &dev_attr_zero_pages.attr,
1210 &dev_attr_orig_data_size.attr,
1211 &dev_attr_compr_data_size.attr,
1212 &dev_attr_mem_used_total.attr,
1213 &dev_attr_mem_limit.attr,
1214 &dev_attr_mem_used_max.attr,
1215 &dev_attr_max_comp_streams.attr,
1216 &dev_attr_comp_algorithm.attr,
1217 &dev_attr_io_stat.attr,
1218 &dev_attr_mm_stat.attr,
1219 &dev_attr_debug_stat.attr,
1223 static struct attribute_group zram_disk_attr_group = {
1224 .attrs = zram_disk_attrs,
1228 * Allocate and initialize new zram device. the function returns
1229 * '>= 0' device_id upon success, and negative value otherwise.
1231 static int zram_add(void)
1234 struct request_queue *queue;
1237 zram = kzalloc(sizeof(struct zram), GFP_KERNEL);
1241 ret = idr_alloc(&zram_index_idr, zram, 0, 0, GFP_KERNEL);
1246 init_rwsem(&zram->init_lock);
1248 queue = blk_alloc_queue(GFP_KERNEL);
1250 pr_err("Error allocating disk queue for device %d\n",
1256 blk_queue_make_request(queue, zram_make_request);
1258 /* gendisk structure */
1259 zram->disk = alloc_disk(1);
1261 pr_err("Error allocating disk structure for device %d\n",
1264 goto out_free_queue;
1267 zram->disk->major = zram_major;
1268 zram->disk->first_minor = device_id;
1269 zram->disk->fops = &zram_devops;
1270 zram->disk->queue = queue;
1271 zram->disk->queue->queuedata = zram;
1272 zram->disk->private_data = zram;
1273 snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
1275 /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
1276 set_capacity(zram->disk, 0);
1277 /* zram devices sort of resembles non-rotational disks */
1278 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
1279 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, zram->disk->queue);
1281 * To ensure that we always get PAGE_SIZE aligned
1282 * and n*PAGE_SIZED sized I/O requests.
1284 blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
1285 blk_queue_logical_block_size(zram->disk->queue,
1286 ZRAM_LOGICAL_BLOCK_SIZE);
1287 blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
1288 blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
1289 zram->disk->queue->limits.discard_granularity = PAGE_SIZE;
1290 blk_queue_max_discard_sectors(zram->disk->queue, UINT_MAX);
1292 * zram_bio_discard() will clear all logical blocks if logical block
1293 * size is identical with physical block size(PAGE_SIZE). But if it is
1294 * different, we will skip discarding some parts of logical blocks in
1295 * the part of the request range which isn't aligned to physical block
1296 * size. So we can't ensure that all discarded logical blocks are
1299 if (ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE)
1300 zram->disk->queue->limits.discard_zeroes_data = 1;
1302 zram->disk->queue->limits.discard_zeroes_data = 0;
1303 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, zram->disk->queue);
1305 add_disk(zram->disk);
1307 ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj,
1308 &zram_disk_attr_group);
1310 pr_err("Error creating sysfs group for device %d\n",
1314 strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor));
1317 pr_info("Added device: %s\n", zram->disk->disk_name);
1321 del_gendisk(zram->disk);
1322 put_disk(zram->disk);
1324 blk_cleanup_queue(queue);
1326 idr_remove(&zram_index_idr, device_id);
1332 static int zram_remove(struct zram *zram)
1334 struct block_device *bdev;
1336 bdev = bdget_disk(zram->disk, 0);
1340 mutex_lock(&bdev->bd_mutex);
1341 if (bdev->bd_openers || zram->claim) {
1342 mutex_unlock(&bdev->bd_mutex);
1348 mutex_unlock(&bdev->bd_mutex);
1351 * Remove sysfs first, so no one will perform a disksize
1352 * store while we destroy the devices. This also helps during
1353 * hot_remove -- zram_reset_device() is the last holder of
1354 * ->init_lock, no later/concurrent disksize_store() or any
1355 * other sysfs handlers are possible.
1357 sysfs_remove_group(&disk_to_dev(zram->disk)->kobj,
1358 &zram_disk_attr_group);
1360 /* Make sure all the pending I/O are finished */
1362 zram_reset_device(zram);
1365 pr_info("Removed device: %s\n", zram->disk->disk_name);
1367 blk_cleanup_queue(zram->disk->queue);
1368 del_gendisk(zram->disk);
1369 put_disk(zram->disk);
1374 /* zram-control sysfs attributes */
1375 static ssize_t hot_add_show(struct class *class,
1376 struct class_attribute *attr,
1381 mutex_lock(&zram_index_mutex);
1383 mutex_unlock(&zram_index_mutex);
1387 return scnprintf(buf, PAGE_SIZE, "%d\n", ret);
1390 static ssize_t hot_remove_store(struct class *class,
1391 struct class_attribute *attr,
1398 /* dev_id is gendisk->first_minor, which is `int' */
1399 ret = kstrtoint(buf, 10, &dev_id);
1405 mutex_lock(&zram_index_mutex);
1407 zram = idr_find(&zram_index_idr, dev_id);
1409 ret = zram_remove(zram);
1411 idr_remove(&zram_index_idr, dev_id);
1416 mutex_unlock(&zram_index_mutex);
1417 return ret ? ret : count;
1421 * NOTE: hot_add attribute is not the usual read-only sysfs attribute. In a
1422 * sense that reading from this file does alter the state of your system -- it
1423 * creates a new un-initialized zram device and returns back this device's
1424 * device_id (or an error code if it fails to create a new device).
1426 static struct class_attribute zram_control_class_attrs[] = {
1427 __ATTR(hot_add, 0400, hot_add_show, NULL),
1428 __ATTR_WO(hot_remove),
1432 static struct class zram_control_class = {
1433 .name = "zram-control",
1434 .owner = THIS_MODULE,
1435 .class_attrs = zram_control_class_attrs,
1438 static int zram_remove_cb(int id, void *ptr, void *data)
1444 static void destroy_devices(void)
1446 class_unregister(&zram_control_class);
1447 idr_for_each(&zram_index_idr, &zram_remove_cb, NULL);
1448 idr_destroy(&zram_index_idr);
1449 unregister_blkdev(zram_major, "zram");
1450 cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE);
1453 static int __init zram_init(void)
1457 ret = cpuhp_setup_state_multi(CPUHP_ZCOMP_PREPARE, "block/zram:prepare",
1458 zcomp_cpu_up_prepare, zcomp_cpu_dead);
1462 ret = class_register(&zram_control_class);
1464 pr_err("Unable to register zram-control class\n");
1465 cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE);
1469 zram_major = register_blkdev(0, "zram");
1470 if (zram_major <= 0) {
1471 pr_err("Unable to get major number\n");
1472 class_unregister(&zram_control_class);
1473 cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE);
1477 while (num_devices != 0) {
1478 mutex_lock(&zram_index_mutex);
1480 mutex_unlock(&zram_index_mutex);
1493 static void __exit zram_exit(void)
1498 module_init(zram_init);
1499 module_exit(zram_exit);
1501 module_param(num_devices, uint, 0);
1502 MODULE_PARM_DESC(num_devices, "Number of pre-created zram devices");
1504 MODULE_LICENSE("Dual BSD/GPL");
1505 MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
1506 MODULE_DESCRIPTION("Compressed RAM Block Device");