2 * Compressed RAM block device
4 * Copyright (C) 2008, 2009, 2010 Nitin Gupta
5 * 2012, 2013 Minchan Kim
7 * This code is released using a dual license strategy: BSD/GPL
8 * You can choose the licence that better fits your requirements.
10 * Released under the terms of 3-clause BSD License
11 * Released under the terms of GNU General Public License Version 2.0
15 #define KMSG_COMPONENT "zram"
16 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
18 #ifdef CONFIG_ZRAM_DEBUG
22 #include <linux/module.h>
23 #include <linux/kernel.h>
24 #include <linux/bio.h>
25 #include <linux/bitops.h>
26 #include <linux/blkdev.h>
27 #include <linux/buffer_head.h>
28 #include <linux/device.h>
29 #include <linux/genhd.h>
30 #include <linux/highmem.h>
31 #include <linux/slab.h>
32 #include <linux/string.h>
33 #include <linux/vmalloc.h>
34 #include <linux/err.h>
39 static int zram_major;
40 static struct zram *zram_devices;
41 static const char *default_compressor = "lzo";
43 /* Module params (documentation at end) */
44 static unsigned int num_devices = 1;
46 static inline void deprecated_attr_warn(const char *name)
48 pr_warn_once("%d (%s) Attribute %s (and others) will be removed. %s\n",
52 "See zram documentation.");
55 #define ZRAM_ATTR_RO(name) \
56 static ssize_t name##_show(struct device *d, \
57 struct device_attribute *attr, char *b) \
59 struct zram *zram = dev_to_zram(d); \
61 deprecated_attr_warn(__stringify(name)); \
62 return scnprintf(b, PAGE_SIZE, "%llu\n", \
63 (u64)atomic64_read(&zram->stats.name)); \
65 static DEVICE_ATTR_RO(name);
67 static inline bool init_done(struct zram *zram)
69 return zram->disksize;
72 static inline struct zram *dev_to_zram(struct device *dev)
74 return (struct zram *)dev_to_disk(dev)->private_data;
77 static ssize_t disksize_show(struct device *dev,
78 struct device_attribute *attr, char *buf)
80 struct zram *zram = dev_to_zram(dev);
82 return scnprintf(buf, PAGE_SIZE, "%llu\n", zram->disksize);
85 static ssize_t initstate_show(struct device *dev,
86 struct device_attribute *attr, char *buf)
89 struct zram *zram = dev_to_zram(dev);
91 down_read(&zram->init_lock);
92 val = init_done(zram);
93 up_read(&zram->init_lock);
95 return scnprintf(buf, PAGE_SIZE, "%u\n", val);
98 static ssize_t orig_data_size_show(struct device *dev,
99 struct device_attribute *attr, char *buf)
101 struct zram *zram = dev_to_zram(dev);
103 deprecated_attr_warn("orig_data_size");
104 return scnprintf(buf, PAGE_SIZE, "%llu\n",
105 (u64)(atomic64_read(&zram->stats.pages_stored)) << PAGE_SHIFT);
108 static ssize_t mem_used_total_show(struct device *dev,
109 struct device_attribute *attr, char *buf)
112 struct zram *zram = dev_to_zram(dev);
114 deprecated_attr_warn("mem_used_total");
115 down_read(&zram->init_lock);
116 if (init_done(zram)) {
117 struct zram_meta *meta = zram->meta;
118 val = zs_get_total_pages(meta->mem_pool);
120 up_read(&zram->init_lock);
122 return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
125 static ssize_t max_comp_streams_show(struct device *dev,
126 struct device_attribute *attr, char *buf)
129 struct zram *zram = dev_to_zram(dev);
131 down_read(&zram->init_lock);
132 val = zram->max_comp_streams;
133 up_read(&zram->init_lock);
135 return scnprintf(buf, PAGE_SIZE, "%d\n", val);
138 static ssize_t mem_limit_show(struct device *dev,
139 struct device_attribute *attr, char *buf)
142 struct zram *zram = dev_to_zram(dev);
144 deprecated_attr_warn("mem_limit");
145 down_read(&zram->init_lock);
146 val = zram->limit_pages;
147 up_read(&zram->init_lock);
149 return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
152 static ssize_t mem_limit_store(struct device *dev,
153 struct device_attribute *attr, const char *buf, size_t len)
157 struct zram *zram = dev_to_zram(dev);
159 limit = memparse(buf, &tmp);
160 if (buf == tmp) /* no chars parsed, invalid input */
163 down_write(&zram->init_lock);
164 zram->limit_pages = PAGE_ALIGN(limit) >> PAGE_SHIFT;
165 up_write(&zram->init_lock);
170 static ssize_t mem_used_max_show(struct device *dev,
171 struct device_attribute *attr, char *buf)
174 struct zram *zram = dev_to_zram(dev);
176 deprecated_attr_warn("mem_used_max");
177 down_read(&zram->init_lock);
179 val = atomic_long_read(&zram->stats.max_used_pages);
180 up_read(&zram->init_lock);
182 return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
185 static ssize_t mem_used_max_store(struct device *dev,
186 struct device_attribute *attr, const char *buf, size_t len)
190 struct zram *zram = dev_to_zram(dev);
192 err = kstrtoul(buf, 10, &val);
196 down_read(&zram->init_lock);
197 if (init_done(zram)) {
198 struct zram_meta *meta = zram->meta;
199 atomic_long_set(&zram->stats.max_used_pages,
200 zs_get_total_pages(meta->mem_pool));
202 up_read(&zram->init_lock);
207 static ssize_t max_comp_streams_store(struct device *dev,
208 struct device_attribute *attr, const char *buf, size_t len)
211 struct zram *zram = dev_to_zram(dev);
214 ret = kstrtoint(buf, 0, &num);
220 down_write(&zram->init_lock);
221 if (init_done(zram)) {
222 if (!zcomp_set_max_streams(zram->comp, num)) {
223 pr_info("Cannot change max compression streams\n");
229 zram->max_comp_streams = num;
232 up_write(&zram->init_lock);
236 static ssize_t comp_algorithm_show(struct device *dev,
237 struct device_attribute *attr, char *buf)
240 struct zram *zram = dev_to_zram(dev);
242 down_read(&zram->init_lock);
243 sz = zcomp_available_show(zram->compressor, buf);
244 up_read(&zram->init_lock);
249 static ssize_t comp_algorithm_store(struct device *dev,
250 struct device_attribute *attr, const char *buf, size_t len)
252 struct zram *zram = dev_to_zram(dev);
253 down_write(&zram->init_lock);
254 if (init_done(zram)) {
255 up_write(&zram->init_lock);
256 pr_info("Can't change algorithm for initialized device\n");
259 strlcpy(zram->compressor, buf, sizeof(zram->compressor));
260 up_write(&zram->init_lock);
264 /* flag operations needs meta->tb_lock */
265 static int zram_test_flag(struct zram_meta *meta, u32 index,
266 enum zram_pageflags flag)
268 return meta->table[index].value & BIT(flag);
271 static void zram_set_flag(struct zram_meta *meta, u32 index,
272 enum zram_pageflags flag)
274 meta->table[index].value |= BIT(flag);
277 static void zram_clear_flag(struct zram_meta *meta, u32 index,
278 enum zram_pageflags flag)
280 meta->table[index].value &= ~BIT(flag);
283 static size_t zram_get_obj_size(struct zram_meta *meta, u32 index)
285 return meta->table[index].value & (BIT(ZRAM_FLAG_SHIFT) - 1);
288 static void zram_set_obj_size(struct zram_meta *meta,
289 u32 index, size_t size)
291 unsigned long flags = meta->table[index].value >> ZRAM_FLAG_SHIFT;
293 meta->table[index].value = (flags << ZRAM_FLAG_SHIFT) | size;
296 static inline int is_partial_io(struct bio_vec *bvec)
298 return bvec->bv_len != PAGE_SIZE;
302 * Check if request is within bounds and aligned on zram logical blocks.
304 static inline int valid_io_request(struct zram *zram,
305 sector_t start, unsigned int size)
309 /* unaligned request */
310 if (unlikely(start & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
312 if (unlikely(size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
315 end = start + (size >> SECTOR_SHIFT);
316 bound = zram->disksize >> SECTOR_SHIFT;
317 /* out of range range */
318 if (unlikely(start >= bound || end > bound || start > end))
321 /* I/O request is valid */
325 static void zram_meta_free(struct zram_meta *meta, u64 disksize)
327 size_t num_pages = disksize >> PAGE_SHIFT;
330 /* Free all pages that are still in this zram device */
331 for (index = 0; index < num_pages; index++) {
332 unsigned long handle = meta->table[index].handle;
337 zs_free(meta->mem_pool, handle);
340 zs_destroy_pool(meta->mem_pool);
345 static struct zram_meta *zram_meta_alloc(int device_id, u64 disksize)
349 struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL);
354 num_pages = disksize >> PAGE_SHIFT;
355 meta->table = vzalloc(num_pages * sizeof(*meta->table));
357 pr_err("Error allocating zram address table\n");
361 snprintf(pool_name, sizeof(pool_name), "zram%d", device_id);
362 meta->mem_pool = zs_create_pool(pool_name, GFP_NOIO | __GFP_HIGHMEM);
363 if (!meta->mem_pool) {
364 pr_err("Error creating memory pool\n");
376 static inline bool zram_meta_get(struct zram *zram)
378 if (atomic_inc_not_zero(&zram->refcount))
383 static inline void zram_meta_put(struct zram *zram)
385 atomic_dec(&zram->refcount);
388 static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
390 if (*offset + bvec->bv_len >= PAGE_SIZE)
392 *offset = (*offset + bvec->bv_len) % PAGE_SIZE;
395 static int page_zero_filled(void *ptr)
400 page = (unsigned long *)ptr;
402 for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) {
410 static void handle_zero_page(struct bio_vec *bvec)
412 struct page *page = bvec->bv_page;
415 user_mem = kmap_atomic(page);
416 if (is_partial_io(bvec))
417 memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
419 clear_page(user_mem);
420 kunmap_atomic(user_mem);
422 flush_dcache_page(page);
427 * To protect concurrent access to the same index entry,
428 * caller should hold this table index entry's bit_spinlock to
429 * indicate this index entry is accessing.
431 static void zram_free_page(struct zram *zram, size_t index)
433 struct zram_meta *meta = zram->meta;
434 unsigned long handle = meta->table[index].handle;
436 if (unlikely(!handle)) {
438 * No memory is allocated for zero filled pages.
439 * Simply clear zero page flag.
441 if (zram_test_flag(meta, index, ZRAM_ZERO)) {
442 zram_clear_flag(meta, index, ZRAM_ZERO);
443 atomic64_dec(&zram->stats.zero_pages);
448 zs_free(meta->mem_pool, handle);
450 atomic64_sub(zram_get_obj_size(meta, index),
451 &zram->stats.compr_data_size);
452 atomic64_dec(&zram->stats.pages_stored);
454 meta->table[index].handle = 0;
455 zram_set_obj_size(meta, index, 0);
458 static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
462 struct zram_meta *meta = zram->meta;
463 unsigned long handle;
466 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
467 handle = meta->table[index].handle;
468 size = zram_get_obj_size(meta, index);
470 if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
471 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
476 cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
477 if (size == PAGE_SIZE)
478 copy_page(mem, cmem);
480 ret = zcomp_decompress(zram->comp, cmem, size, mem);
481 zs_unmap_object(meta->mem_pool, handle);
482 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
484 /* Should NEVER happen. Return bio error if it does. */
486 pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
493 static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
494 u32 index, int offset)
498 unsigned char *user_mem, *uncmem = NULL;
499 struct zram_meta *meta = zram->meta;
500 page = bvec->bv_page;
502 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
503 if (unlikely(!meta->table[index].handle) ||
504 zram_test_flag(meta, index, ZRAM_ZERO)) {
505 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
506 handle_zero_page(bvec);
509 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
511 if (is_partial_io(bvec))
512 /* Use a temporary buffer to decompress the page */
513 uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
515 user_mem = kmap_atomic(page);
516 if (!is_partial_io(bvec))
520 pr_info("Unable to allocate temp memory\n");
525 ret = zram_decompress_page(zram, uncmem, index);
526 /* Should NEVER happen. Return bio error if it does. */
530 if (is_partial_io(bvec))
531 memcpy(user_mem + bvec->bv_offset, uncmem + offset,
534 flush_dcache_page(page);
537 kunmap_atomic(user_mem);
538 if (is_partial_io(bvec))
543 static inline void update_used_max(struct zram *zram,
544 const unsigned long pages)
546 unsigned long old_max, cur_max;
548 old_max = atomic_long_read(&zram->stats.max_used_pages);
553 old_max = atomic_long_cmpxchg(
554 &zram->stats.max_used_pages, cur_max, pages);
555 } while (old_max != cur_max);
558 static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
563 unsigned long handle;
565 unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
566 struct zram_meta *meta = zram->meta;
567 struct zcomp_strm *zstrm;
569 unsigned long alloced_pages;
571 page = bvec->bv_page;
572 if (is_partial_io(bvec)) {
574 * This is a partial IO. We need to read the full page
575 * before to write the changes.
577 uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
582 ret = zram_decompress_page(zram, uncmem, index);
587 zstrm = zcomp_strm_find(zram->comp);
589 user_mem = kmap_atomic(page);
591 if (is_partial_io(bvec)) {
592 memcpy(uncmem + offset, user_mem + bvec->bv_offset,
594 kunmap_atomic(user_mem);
600 if (page_zero_filled(uncmem)) {
602 kunmap_atomic(user_mem);
603 /* Free memory associated with this sector now. */
604 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
605 zram_free_page(zram, index);
606 zram_set_flag(meta, index, ZRAM_ZERO);
607 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
609 atomic64_inc(&zram->stats.zero_pages);
614 ret = zcomp_compress(zram->comp, zstrm, uncmem, &clen);
615 if (!is_partial_io(bvec)) {
616 kunmap_atomic(user_mem);
622 pr_err("Compression failed! err=%d\n", ret);
626 if (unlikely(clen > max_zpage_size)) {
628 if (is_partial_io(bvec))
632 handle = zs_malloc(meta->mem_pool, clen);
634 pr_info("Error allocating memory for compressed page: %u, size=%zu\n",
640 alloced_pages = zs_get_total_pages(meta->mem_pool);
641 if (zram->limit_pages && alloced_pages > zram->limit_pages) {
642 zs_free(meta->mem_pool, handle);
647 update_used_max(zram, alloced_pages);
649 cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO);
651 if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
652 src = kmap_atomic(page);
653 copy_page(cmem, src);
656 memcpy(cmem, src, clen);
659 zcomp_strm_release(zram->comp, zstrm);
661 zs_unmap_object(meta->mem_pool, handle);
664 * Free memory associated with this sector
665 * before overwriting unused sectors.
667 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
668 zram_free_page(zram, index);
670 meta->table[index].handle = handle;
671 zram_set_obj_size(meta, index, clen);
672 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
675 atomic64_add(clen, &zram->stats.compr_data_size);
676 atomic64_inc(&zram->stats.pages_stored);
679 zcomp_strm_release(zram->comp, zstrm);
680 if (is_partial_io(bvec))
685 static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
688 unsigned long start_time = jiffies;
691 generic_start_io_acct(rw, bvec->bv_len >> SECTOR_SHIFT,
695 atomic64_inc(&zram->stats.num_reads);
696 ret = zram_bvec_read(zram, bvec, index, offset);
698 atomic64_inc(&zram->stats.num_writes);
699 ret = zram_bvec_write(zram, bvec, index, offset);
702 generic_end_io_acct(rw, &zram->disk->part0, start_time);
706 atomic64_inc(&zram->stats.failed_reads);
708 atomic64_inc(&zram->stats.failed_writes);
715 * zram_bio_discard - handler on discard request
716 * @index: physical block index in PAGE_SIZE units
717 * @offset: byte offset within physical block
719 static void zram_bio_discard(struct zram *zram, u32 index,
720 int offset, struct bio *bio)
722 size_t n = bio->bi_iter.bi_size;
723 struct zram_meta *meta = zram->meta;
726 * zram manages data in physical block size units. Because logical block
727 * size isn't identical with physical block size on some arch, we
728 * could get a discard request pointing to a specific offset within a
729 * certain physical block. Although we can handle this request by
730 * reading that physiclal block and decompressing and partially zeroing
731 * and re-compressing and then re-storing it, this isn't reasonable
732 * because our intent with a discard request is to save memory. So
733 * skipping this logical block is appropriate here.
736 if (n <= (PAGE_SIZE - offset))
739 n -= (PAGE_SIZE - offset);
743 while (n >= PAGE_SIZE) {
744 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
745 zram_free_page(zram, index);
746 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
747 atomic64_inc(&zram->stats.notify_free);
753 static void zram_reset_device(struct zram *zram)
755 struct zram_meta *meta;
759 down_write(&zram->init_lock);
761 zram->limit_pages = 0;
763 if (!init_done(zram)) {
764 up_write(&zram->init_lock);
770 disksize = zram->disksize;
772 * Refcount will go down to 0 eventually and r/w handler
773 * cannot handle further I/O so it will bail out by
774 * check zram_meta_get.
778 * We want to free zram_meta in process context to avoid
779 * deadlock between reclaim path and any other locks.
781 wait_event(zram->io_done, atomic_read(&zram->refcount) == 0);
784 memset(&zram->stats, 0, sizeof(zram->stats));
786 zram->max_comp_streams = 1;
787 set_capacity(zram->disk, 0);
789 up_write(&zram->init_lock);
790 /* I/O operation under all of CPU are done so let's free */
791 zram_meta_free(meta, disksize);
795 static ssize_t disksize_store(struct device *dev,
796 struct device_attribute *attr, const char *buf, size_t len)
800 struct zram_meta *meta;
801 struct zram *zram = dev_to_zram(dev);
804 disksize = memparse(buf, NULL);
808 disksize = PAGE_ALIGN(disksize);
809 meta = zram_meta_alloc(zram->disk->first_minor, disksize);
813 comp = zcomp_create(zram->compressor, zram->max_comp_streams);
815 pr_info("Cannot initialise %s compressing backend\n",
821 down_write(&zram->init_lock);
822 if (init_done(zram)) {
823 pr_info("Cannot change disksize for initialized device\n");
825 goto out_destroy_comp;
828 init_waitqueue_head(&zram->io_done);
829 atomic_set(&zram->refcount, 1);
832 zram->disksize = disksize;
833 set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
834 up_write(&zram->init_lock);
837 * Revalidate disk out of the init_lock to avoid lockdep splat.
838 * It's okay because disk's capacity is protected by init_lock
839 * so that revalidate_disk always sees up-to-date capacity.
841 revalidate_disk(zram->disk);
846 up_write(&zram->init_lock);
849 zram_meta_free(meta, disksize);
853 static ssize_t reset_store(struct device *dev,
854 struct device_attribute *attr, const char *buf, size_t len)
857 unsigned short do_reset;
859 struct block_device *bdev;
861 zram = dev_to_zram(dev);
862 bdev = bdget_disk(zram->disk, 0);
867 mutex_lock(&bdev->bd_mutex);
868 /* Do not reset an active device! */
869 if (bdev->bd_openers) {
874 ret = kstrtou16(buf, 10, &do_reset);
883 /* Make sure all pending I/O is finished */
885 zram_reset_device(zram);
887 mutex_unlock(&bdev->bd_mutex);
888 revalidate_disk(zram->disk);
894 mutex_unlock(&bdev->bd_mutex);
899 static void __zram_make_request(struct zram *zram, struct bio *bio)
904 struct bvec_iter iter;
906 index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
907 offset = (bio->bi_iter.bi_sector &
908 (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
910 if (unlikely(bio->bi_rw & REQ_DISCARD)) {
911 zram_bio_discard(zram, index, offset, bio);
916 rw = bio_data_dir(bio);
917 bio_for_each_segment(bvec, bio, iter) {
918 int max_transfer_size = PAGE_SIZE - offset;
920 if (bvec.bv_len > max_transfer_size) {
922 * zram_bvec_rw() can only make operation on a single
923 * zram page. Split the bio vector.
927 bv.bv_page = bvec.bv_page;
928 bv.bv_len = max_transfer_size;
929 bv.bv_offset = bvec.bv_offset;
931 if (zram_bvec_rw(zram, &bv, index, offset, rw) < 0)
934 bv.bv_len = bvec.bv_len - max_transfer_size;
935 bv.bv_offset += max_transfer_size;
936 if (zram_bvec_rw(zram, &bv, index + 1, 0, rw) < 0)
939 if (zram_bvec_rw(zram, &bvec, index, offset, rw) < 0)
942 update_position(&index, &offset, &bvec);
945 set_bit(BIO_UPTODATE, &bio->bi_flags);
954 * Handler function for all zram I/O requests.
956 static void zram_make_request(struct request_queue *queue, struct bio *bio)
958 struct zram *zram = queue->queuedata;
960 if (unlikely(!zram_meta_get(zram)))
963 if (!valid_io_request(zram, bio->bi_iter.bi_sector,
964 bio->bi_iter.bi_size)) {
965 atomic64_inc(&zram->stats.invalid_io);
969 __zram_make_request(zram, bio);
978 static void zram_slot_free_notify(struct block_device *bdev,
982 struct zram_meta *meta;
984 zram = bdev->bd_disk->private_data;
987 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
988 zram_free_page(zram, index);
989 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
990 atomic64_inc(&zram->stats.notify_free);
993 static int zram_rw_page(struct block_device *bdev, sector_t sector,
994 struct page *page, int rw)
996 int offset, err = -EIO;
1001 zram = bdev->bd_disk->private_data;
1002 if (unlikely(!zram_meta_get(zram)))
1005 if (!valid_io_request(zram, sector, PAGE_SIZE)) {
1006 atomic64_inc(&zram->stats.invalid_io);
1011 index = sector >> SECTORS_PER_PAGE_SHIFT;
1012 offset = sector & (SECTORS_PER_PAGE - 1) << SECTOR_SHIFT;
1015 bv.bv_len = PAGE_SIZE;
1018 err = zram_bvec_rw(zram, &bv, index, offset, rw);
1020 zram_meta_put(zram);
1023 * If I/O fails, just return error(ie, non-zero) without
1024 * calling page_endio.
1025 * It causes resubmit the I/O with bio request by upper functions
1026 * of rw_page(e.g., swap_readpage, __swap_writepage) and
1027 * bio->bi_end_io does things to handle the error
1028 * (e.g., SetPageError, set_page_dirty and extra works).
1031 page_endio(page, rw, 0);
1035 static const struct block_device_operations zram_devops = {
1036 .swap_slot_free_notify = zram_slot_free_notify,
1037 .rw_page = zram_rw_page,
1038 .owner = THIS_MODULE
1041 static DEVICE_ATTR_RW(disksize);
1042 static DEVICE_ATTR_RO(initstate);
1043 static DEVICE_ATTR_WO(reset);
1044 static DEVICE_ATTR_RO(orig_data_size);
1045 static DEVICE_ATTR_RO(mem_used_total);
1046 static DEVICE_ATTR_RW(mem_limit);
1047 static DEVICE_ATTR_RW(mem_used_max);
1048 static DEVICE_ATTR_RW(max_comp_streams);
1049 static DEVICE_ATTR_RW(comp_algorithm);
1051 static ssize_t io_stat_show(struct device *dev,
1052 struct device_attribute *attr, char *buf)
1054 struct zram *zram = dev_to_zram(dev);
1057 down_read(&zram->init_lock);
1058 ret = scnprintf(buf, PAGE_SIZE,
1059 "%8llu %8llu %8llu %8llu\n",
1060 (u64)atomic64_read(&zram->stats.failed_reads),
1061 (u64)atomic64_read(&zram->stats.failed_writes),
1062 (u64)atomic64_read(&zram->stats.invalid_io),
1063 (u64)atomic64_read(&zram->stats.notify_free));
1064 up_read(&zram->init_lock);
1069 static ssize_t mm_stat_show(struct device *dev,
1070 struct device_attribute *attr, char *buf)
1072 struct zram *zram = dev_to_zram(dev);
1073 u64 orig_size, mem_used = 0;
1077 down_read(&zram->init_lock);
1078 if (init_done(zram))
1079 mem_used = zs_get_total_pages(zram->meta->mem_pool);
1081 orig_size = atomic64_read(&zram->stats.pages_stored);
1082 max_used = atomic_long_read(&zram->stats.max_used_pages);
1084 ret = scnprintf(buf, PAGE_SIZE,
1085 "%8llu %8llu %8llu %8lu %8ld %8llu %8llu\n",
1086 orig_size << PAGE_SHIFT,
1087 (u64)atomic64_read(&zram->stats.compr_data_size),
1088 mem_used << PAGE_SHIFT,
1089 zram->limit_pages << PAGE_SHIFT,
1090 max_used << PAGE_SHIFT,
1091 (u64)atomic64_read(&zram->stats.zero_pages),
1092 (u64)atomic64_read(&zram->stats.num_migrated));
1093 up_read(&zram->init_lock);
1098 static DEVICE_ATTR_RO(io_stat);
1099 static DEVICE_ATTR_RO(mm_stat);
1100 ZRAM_ATTR_RO(num_reads);
1101 ZRAM_ATTR_RO(num_writes);
1102 ZRAM_ATTR_RO(failed_reads);
1103 ZRAM_ATTR_RO(failed_writes);
1104 ZRAM_ATTR_RO(invalid_io);
1105 ZRAM_ATTR_RO(notify_free);
1106 ZRAM_ATTR_RO(zero_pages);
1107 ZRAM_ATTR_RO(compr_data_size);
1109 static struct attribute *zram_disk_attrs[] = {
1110 &dev_attr_disksize.attr,
1111 &dev_attr_initstate.attr,
1112 &dev_attr_reset.attr,
1113 &dev_attr_num_reads.attr,
1114 &dev_attr_num_writes.attr,
1115 &dev_attr_failed_reads.attr,
1116 &dev_attr_failed_writes.attr,
1117 &dev_attr_invalid_io.attr,
1118 &dev_attr_notify_free.attr,
1119 &dev_attr_zero_pages.attr,
1120 &dev_attr_orig_data_size.attr,
1121 &dev_attr_compr_data_size.attr,
1122 &dev_attr_mem_used_total.attr,
1123 &dev_attr_mem_limit.attr,
1124 &dev_attr_mem_used_max.attr,
1125 &dev_attr_max_comp_streams.attr,
1126 &dev_attr_comp_algorithm.attr,
1127 &dev_attr_io_stat.attr,
1128 &dev_attr_mm_stat.attr,
1132 static struct attribute_group zram_disk_attr_group = {
1133 .attrs = zram_disk_attrs,
1136 static int create_device(struct zram *zram, int device_id)
1138 struct request_queue *queue;
1141 init_rwsem(&zram->init_lock);
1143 queue = blk_alloc_queue(GFP_KERNEL);
1145 pr_err("Error allocating disk queue for device %d\n",
1150 blk_queue_make_request(queue, zram_make_request);
1152 /* gendisk structure */
1153 zram->disk = alloc_disk(1);
1155 pr_warn("Error allocating disk structure for device %d\n",
1158 goto out_free_queue;
1161 zram->disk->major = zram_major;
1162 zram->disk->first_minor = device_id;
1163 zram->disk->fops = &zram_devops;
1164 zram->disk->queue = queue;
1165 zram->disk->queue->queuedata = zram;
1166 zram->disk->private_data = zram;
1167 snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
1169 /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
1170 set_capacity(zram->disk, 0);
1171 /* zram devices sort of resembles non-rotational disks */
1172 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
1173 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, zram->disk->queue);
1175 * To ensure that we always get PAGE_SIZE aligned
1176 * and n*PAGE_SIZED sized I/O requests.
1178 blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
1179 blk_queue_logical_block_size(zram->disk->queue,
1180 ZRAM_LOGICAL_BLOCK_SIZE);
1181 blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
1182 blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
1183 zram->disk->queue->limits.discard_granularity = PAGE_SIZE;
1184 zram->disk->queue->limits.max_discard_sectors = UINT_MAX;
1186 * zram_bio_discard() will clear all logical blocks if logical block
1187 * size is identical with physical block size(PAGE_SIZE). But if it is
1188 * different, we will skip discarding some parts of logical blocks in
1189 * the part of the request range which isn't aligned to physical block
1190 * size. So we can't ensure that all discarded logical blocks are
1193 if (ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE)
1194 zram->disk->queue->limits.discard_zeroes_data = 1;
1196 zram->disk->queue->limits.discard_zeroes_data = 0;
1197 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, zram->disk->queue);
1199 add_disk(zram->disk);
1201 ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj,
1202 &zram_disk_attr_group);
1204 pr_warn("Error creating sysfs group");
1207 strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor));
1209 zram->max_comp_streams = 1;
1213 del_gendisk(zram->disk);
1214 put_disk(zram->disk);
1216 blk_cleanup_queue(queue);
1221 static void destroy_devices(unsigned int nr)
1226 for (i = 0; i < nr; i++) {
1227 zram = &zram_devices[i];
1229 * Remove sysfs first, so no one will perform a disksize
1230 * store while we destroy the devices
1232 sysfs_remove_group(&disk_to_dev(zram->disk)->kobj,
1233 &zram_disk_attr_group);
1235 zram_reset_device(zram);
1237 blk_cleanup_queue(zram->disk->queue);
1238 del_gendisk(zram->disk);
1239 put_disk(zram->disk);
1242 kfree(zram_devices);
1243 unregister_blkdev(zram_major, "zram");
1244 pr_info("Destroyed %u device(s)\n", nr);
1247 static int __init zram_init(void)
1251 if (num_devices > max_num_devices) {
1252 pr_warn("Invalid value for num_devices: %u\n",
1257 zram_major = register_blkdev(0, "zram");
1258 if (zram_major <= 0) {
1259 pr_warn("Unable to get major number\n");
1263 /* Allocate the device array and initialize each one */
1264 zram_devices = kzalloc(num_devices * sizeof(struct zram), GFP_KERNEL);
1265 if (!zram_devices) {
1266 unregister_blkdev(zram_major, "zram");
1270 for (dev_id = 0; dev_id < num_devices; dev_id++) {
1271 ret = create_device(&zram_devices[dev_id], dev_id);
1276 pr_info("Created %u device(s)\n", num_devices);
1280 destroy_devices(dev_id);
1284 static void __exit zram_exit(void)
1286 destroy_devices(num_devices);
1289 module_init(zram_init);
1290 module_exit(zram_exit);
1292 module_param(num_devices, uint, 0);
1293 MODULE_PARM_DESC(num_devices, "Number of zram devices");
1295 MODULE_LICENSE("Dual BSD/GPL");
1296 MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
1297 MODULE_DESCRIPTION("Compressed RAM Block Device");