]> git.karo-electronics.de Git - linux-beck.git/blob - drivers/block/zram/zram_drv.c
zram: cosmetic zram_bvec_write() cleanup
[linux-beck.git] / drivers / block / zram / zram_drv.c
1 /*
2  * Compressed RAM block device
3  *
4  * Copyright (C) 2008, 2009, 2010  Nitin Gupta
5  *               2012, 2013 Minchan Kim
6  *
7  * This code is released using a dual license strategy: BSD/GPL
8  * You can choose the licence that better fits your requirements.
9  *
10  * Released under the terms of 3-clause BSD License
11  * Released under the terms of GNU General Public License Version 2.0
12  *
13  */
14
15 #define KMSG_COMPONENT "zram"
16 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
17
18 #include <linux/module.h>
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/bitops.h>
22 #include <linux/blkdev.h>
23 #include <linux/buffer_head.h>
24 #include <linux/device.h>
25 #include <linux/genhd.h>
26 #include <linux/highmem.h>
27 #include <linux/slab.h>
28 #include <linux/string.h>
29 #include <linux/vmalloc.h>
30 #include <linux/err.h>
31 #include <linux/idr.h>
32 #include <linux/sysfs.h>
33
34 #include "zram_drv.h"
35
36 static DEFINE_IDR(zram_index_idr);
37 /* idr index must be protected */
38 static DEFINE_MUTEX(zram_index_mutex);
39
40 static int zram_major;
41 static const char *default_compressor = "lzo";
42
43 /* Module params (documentation at end) */
44 static unsigned int num_devices = 1;
45
46 static inline void deprecated_attr_warn(const char *name)
47 {
48         pr_warn_once("%d (%s) Attribute %s (and others) will be removed. %s\n",
49                         task_pid_nr(current),
50                         current->comm,
51                         name,
52                         "See zram documentation.");
53 }
54
55 #define ZRAM_ATTR_RO(name)                                              \
56 static ssize_t name##_show(struct device *d,                            \
57                                 struct device_attribute *attr, char *b) \
58 {                                                                       \
59         struct zram *zram = dev_to_zram(d);                             \
60                                                                         \
61         deprecated_attr_warn(__stringify(name));                        \
62         return scnprintf(b, PAGE_SIZE, "%llu\n",                        \
63                 (u64)atomic64_read(&zram->stats.name));                 \
64 }                                                                       \
65 static DEVICE_ATTR_RO(name);
66
67 static inline bool init_done(struct zram *zram)
68 {
69         return zram->disksize;
70 }
71
72 static inline struct zram *dev_to_zram(struct device *dev)
73 {
74         return (struct zram *)dev_to_disk(dev)->private_data;
75 }
76
77 /* flag operations require table entry bit_spin_lock() being held */
78 static int zram_test_flag(struct zram_meta *meta, u32 index,
79                         enum zram_pageflags flag)
80 {
81         return meta->table[index].value & BIT(flag);
82 }
83
84 static void zram_set_flag(struct zram_meta *meta, u32 index,
85                         enum zram_pageflags flag)
86 {
87         meta->table[index].value |= BIT(flag);
88 }
89
90 static void zram_clear_flag(struct zram_meta *meta, u32 index,
91                         enum zram_pageflags flag)
92 {
93         meta->table[index].value &= ~BIT(flag);
94 }
95
96 static size_t zram_get_obj_size(struct zram_meta *meta, u32 index)
97 {
98         return meta->table[index].value & (BIT(ZRAM_FLAG_SHIFT) - 1);
99 }
100
101 static void zram_set_obj_size(struct zram_meta *meta,
102                                         u32 index, size_t size)
103 {
104         unsigned long flags = meta->table[index].value >> ZRAM_FLAG_SHIFT;
105
106         meta->table[index].value = (flags << ZRAM_FLAG_SHIFT) | size;
107 }
108
109 static inline int is_partial_io(struct bio_vec *bvec)
110 {
111         return bvec->bv_len != PAGE_SIZE;
112 }
113
114 /*
115  * Check if request is within bounds and aligned on zram logical blocks.
116  */
117 static inline int valid_io_request(struct zram *zram,
118                 sector_t start, unsigned int size)
119 {
120         u64 end, bound;
121
122         /* unaligned request */
123         if (unlikely(start & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
124                 return 0;
125         if (unlikely(size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
126                 return 0;
127
128         end = start + (size >> SECTOR_SHIFT);
129         bound = zram->disksize >> SECTOR_SHIFT;
130         /* out of range range */
131         if (unlikely(start >= bound || end > bound || start > end))
132                 return 0;
133
134         /* I/O request is valid */
135         return 1;
136 }
137
138 static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
139 {
140         if (*offset + bvec->bv_len >= PAGE_SIZE)
141                 (*index)++;
142         *offset = (*offset + bvec->bv_len) % PAGE_SIZE;
143 }
144
145 static inline void update_used_max(struct zram *zram,
146                                         const unsigned long pages)
147 {
148         unsigned long old_max, cur_max;
149
150         old_max = atomic_long_read(&zram->stats.max_used_pages);
151
152         do {
153                 cur_max = old_max;
154                 if (pages > cur_max)
155                         old_max = atomic_long_cmpxchg(
156                                 &zram->stats.max_used_pages, cur_max, pages);
157         } while (old_max != cur_max);
158 }
159
160 static int page_zero_filled(void *ptr)
161 {
162         unsigned int pos;
163         unsigned long *page;
164
165         page = (unsigned long *)ptr;
166
167         for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) {
168                 if (page[pos])
169                         return 0;
170         }
171
172         return 1;
173 }
174
175 static void handle_zero_page(struct bio_vec *bvec)
176 {
177         struct page *page = bvec->bv_page;
178         void *user_mem;
179
180         user_mem = kmap_atomic(page);
181         if (is_partial_io(bvec))
182                 memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
183         else
184                 clear_page(user_mem);
185         kunmap_atomic(user_mem);
186
187         flush_dcache_page(page);
188 }
189
190 static ssize_t initstate_show(struct device *dev,
191                 struct device_attribute *attr, char *buf)
192 {
193         u32 val;
194         struct zram *zram = dev_to_zram(dev);
195
196         down_read(&zram->init_lock);
197         val = init_done(zram);
198         up_read(&zram->init_lock);
199
200         return scnprintf(buf, PAGE_SIZE, "%u\n", val);
201 }
202
203 static ssize_t disksize_show(struct device *dev,
204                 struct device_attribute *attr, char *buf)
205 {
206         struct zram *zram = dev_to_zram(dev);
207
208         return scnprintf(buf, PAGE_SIZE, "%llu\n", zram->disksize);
209 }
210
211 static ssize_t orig_data_size_show(struct device *dev,
212                 struct device_attribute *attr, char *buf)
213 {
214         struct zram *zram = dev_to_zram(dev);
215
216         deprecated_attr_warn("orig_data_size");
217         return scnprintf(buf, PAGE_SIZE, "%llu\n",
218                 (u64)(atomic64_read(&zram->stats.pages_stored)) << PAGE_SHIFT);
219 }
220
221 static ssize_t mem_used_total_show(struct device *dev,
222                 struct device_attribute *attr, char *buf)
223 {
224         u64 val = 0;
225         struct zram *zram = dev_to_zram(dev);
226
227         deprecated_attr_warn("mem_used_total");
228         down_read(&zram->init_lock);
229         if (init_done(zram)) {
230                 struct zram_meta *meta = zram->meta;
231                 val = zs_get_total_pages(meta->mem_pool);
232         }
233         up_read(&zram->init_lock);
234
235         return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
236 }
237
238 static ssize_t mem_limit_show(struct device *dev,
239                 struct device_attribute *attr, char *buf)
240 {
241         u64 val;
242         struct zram *zram = dev_to_zram(dev);
243
244         deprecated_attr_warn("mem_limit");
245         down_read(&zram->init_lock);
246         val = zram->limit_pages;
247         up_read(&zram->init_lock);
248
249         return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
250 }
251
252 static ssize_t mem_limit_store(struct device *dev,
253                 struct device_attribute *attr, const char *buf, size_t len)
254 {
255         u64 limit;
256         char *tmp;
257         struct zram *zram = dev_to_zram(dev);
258
259         limit = memparse(buf, &tmp);
260         if (buf == tmp) /* no chars parsed, invalid input */
261                 return -EINVAL;
262
263         down_write(&zram->init_lock);
264         zram->limit_pages = PAGE_ALIGN(limit) >> PAGE_SHIFT;
265         up_write(&zram->init_lock);
266
267         return len;
268 }
269
270 static ssize_t mem_used_max_show(struct device *dev,
271                 struct device_attribute *attr, char *buf)
272 {
273         u64 val = 0;
274         struct zram *zram = dev_to_zram(dev);
275
276         deprecated_attr_warn("mem_used_max");
277         down_read(&zram->init_lock);
278         if (init_done(zram))
279                 val = atomic_long_read(&zram->stats.max_used_pages);
280         up_read(&zram->init_lock);
281
282         return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
283 }
284
285 static ssize_t mem_used_max_store(struct device *dev,
286                 struct device_attribute *attr, const char *buf, size_t len)
287 {
288         int err;
289         unsigned long val;
290         struct zram *zram = dev_to_zram(dev);
291
292         err = kstrtoul(buf, 10, &val);
293         if (err || val != 0)
294                 return -EINVAL;
295
296         down_read(&zram->init_lock);
297         if (init_done(zram)) {
298                 struct zram_meta *meta = zram->meta;
299                 atomic_long_set(&zram->stats.max_used_pages,
300                                 zs_get_total_pages(meta->mem_pool));
301         }
302         up_read(&zram->init_lock);
303
304         return len;
305 }
306
307 static ssize_t max_comp_streams_show(struct device *dev,
308                 struct device_attribute *attr, char *buf)
309 {
310         int val;
311         struct zram *zram = dev_to_zram(dev);
312
313         down_read(&zram->init_lock);
314         val = zram->max_comp_streams;
315         up_read(&zram->init_lock);
316
317         return scnprintf(buf, PAGE_SIZE, "%d\n", val);
318 }
319
320 static ssize_t max_comp_streams_store(struct device *dev,
321                 struct device_attribute *attr, const char *buf, size_t len)
322 {
323         int num;
324         struct zram *zram = dev_to_zram(dev);
325         int ret;
326
327         ret = kstrtoint(buf, 0, &num);
328         if (ret < 0)
329                 return ret;
330         if (num < 1)
331                 return -EINVAL;
332
333         down_write(&zram->init_lock);
334         if (init_done(zram)) {
335                 if (!zcomp_set_max_streams(zram->comp, num)) {
336                         pr_info("Cannot change max compression streams\n");
337                         ret = -EINVAL;
338                         goto out;
339                 }
340         }
341
342         zram->max_comp_streams = num;
343         ret = len;
344 out:
345         up_write(&zram->init_lock);
346         return ret;
347 }
348
349 static ssize_t comp_algorithm_show(struct device *dev,
350                 struct device_attribute *attr, char *buf)
351 {
352         size_t sz;
353         struct zram *zram = dev_to_zram(dev);
354
355         down_read(&zram->init_lock);
356         sz = zcomp_available_show(zram->compressor, buf);
357         up_read(&zram->init_lock);
358
359         return sz;
360 }
361
362 static ssize_t comp_algorithm_store(struct device *dev,
363                 struct device_attribute *attr, const char *buf, size_t len)
364 {
365         struct zram *zram = dev_to_zram(dev);
366         down_write(&zram->init_lock);
367         if (init_done(zram)) {
368                 up_write(&zram->init_lock);
369                 pr_info("Can't change algorithm for initialized device\n");
370                 return -EBUSY;
371         }
372         strlcpy(zram->compressor, buf, sizeof(zram->compressor));
373         up_write(&zram->init_lock);
374         return len;
375 }
376
377 static ssize_t compact_store(struct device *dev,
378                 struct device_attribute *attr, const char *buf, size_t len)
379 {
380         unsigned long nr_migrated;
381         struct zram *zram = dev_to_zram(dev);
382         struct zram_meta *meta;
383
384         down_read(&zram->init_lock);
385         if (!init_done(zram)) {
386                 up_read(&zram->init_lock);
387                 return -EINVAL;
388         }
389
390         meta = zram->meta;
391         nr_migrated = zs_compact(meta->mem_pool);
392         atomic64_add(nr_migrated, &zram->stats.num_migrated);
393         up_read(&zram->init_lock);
394
395         return len;
396 }
397
398 static ssize_t io_stat_show(struct device *dev,
399                 struct device_attribute *attr, char *buf)
400 {
401         struct zram *zram = dev_to_zram(dev);
402         ssize_t ret;
403
404         down_read(&zram->init_lock);
405         ret = scnprintf(buf, PAGE_SIZE,
406                         "%8llu %8llu %8llu %8llu\n",
407                         (u64)atomic64_read(&zram->stats.failed_reads),
408                         (u64)atomic64_read(&zram->stats.failed_writes),
409                         (u64)atomic64_read(&zram->stats.invalid_io),
410                         (u64)atomic64_read(&zram->stats.notify_free));
411         up_read(&zram->init_lock);
412
413         return ret;
414 }
415
416 static ssize_t mm_stat_show(struct device *dev,
417                 struct device_attribute *attr, char *buf)
418 {
419         struct zram *zram = dev_to_zram(dev);
420         u64 orig_size, mem_used = 0;
421         long max_used;
422         ssize_t ret;
423
424         down_read(&zram->init_lock);
425         if (init_done(zram))
426                 mem_used = zs_get_total_pages(zram->meta->mem_pool);
427
428         orig_size = atomic64_read(&zram->stats.pages_stored);
429         max_used = atomic_long_read(&zram->stats.max_used_pages);
430
431         ret = scnprintf(buf, PAGE_SIZE,
432                         "%8llu %8llu %8llu %8lu %8ld %8llu %8llu\n",
433                         orig_size << PAGE_SHIFT,
434                         (u64)atomic64_read(&zram->stats.compr_data_size),
435                         mem_used << PAGE_SHIFT,
436                         zram->limit_pages << PAGE_SHIFT,
437                         max_used << PAGE_SHIFT,
438                         (u64)atomic64_read(&zram->stats.zero_pages),
439                         (u64)atomic64_read(&zram->stats.num_migrated));
440         up_read(&zram->init_lock);
441
442         return ret;
443 }
444
445 static DEVICE_ATTR_RO(io_stat);
446 static DEVICE_ATTR_RO(mm_stat);
447 ZRAM_ATTR_RO(num_reads);
448 ZRAM_ATTR_RO(num_writes);
449 ZRAM_ATTR_RO(failed_reads);
450 ZRAM_ATTR_RO(failed_writes);
451 ZRAM_ATTR_RO(invalid_io);
452 ZRAM_ATTR_RO(notify_free);
453 ZRAM_ATTR_RO(zero_pages);
454 ZRAM_ATTR_RO(compr_data_size);
455
456 static inline bool zram_meta_get(struct zram *zram)
457 {
458         if (atomic_inc_not_zero(&zram->refcount))
459                 return true;
460         return false;
461 }
462
463 static inline void zram_meta_put(struct zram *zram)
464 {
465         atomic_dec(&zram->refcount);
466 }
467
468 static void zram_meta_free(struct zram_meta *meta, u64 disksize)
469 {
470         size_t num_pages = disksize >> PAGE_SHIFT;
471         size_t index;
472
473         /* Free all pages that are still in this zram device */
474         for (index = 0; index < num_pages; index++) {
475                 unsigned long handle = meta->table[index].handle;
476
477                 if (!handle)
478                         continue;
479
480                 zs_free(meta->mem_pool, handle);
481         }
482
483         zs_destroy_pool(meta->mem_pool);
484         vfree(meta->table);
485         kfree(meta);
486 }
487
488 static struct zram_meta *zram_meta_alloc(int device_id, u64 disksize)
489 {
490         size_t num_pages;
491         char pool_name[8];
492         struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL);
493
494         if (!meta)
495                 return NULL;
496
497         num_pages = disksize >> PAGE_SHIFT;
498         meta->table = vzalloc(num_pages * sizeof(*meta->table));
499         if (!meta->table) {
500                 pr_err("Error allocating zram address table\n");
501                 goto out_error;
502         }
503
504         snprintf(pool_name, sizeof(pool_name), "zram%d", device_id);
505         meta->mem_pool = zs_create_pool(pool_name, GFP_NOIO | __GFP_HIGHMEM);
506         if (!meta->mem_pool) {
507                 pr_err("Error creating memory pool\n");
508                 goto out_error;
509         }
510
511         return meta;
512
513 out_error:
514         vfree(meta->table);
515         kfree(meta);
516         return NULL;
517 }
518
519 /*
520  * To protect concurrent access to the same index entry,
521  * caller should hold this table index entry's bit_spinlock to
522  * indicate this index entry is accessing.
523  */
524 static void zram_free_page(struct zram *zram, size_t index)
525 {
526         struct zram_meta *meta = zram->meta;
527         unsigned long handle = meta->table[index].handle;
528
529         if (unlikely(!handle)) {
530                 /*
531                  * No memory is allocated for zero filled pages.
532                  * Simply clear zero page flag.
533                  */
534                 if (zram_test_flag(meta, index, ZRAM_ZERO)) {
535                         zram_clear_flag(meta, index, ZRAM_ZERO);
536                         atomic64_dec(&zram->stats.zero_pages);
537                 }
538                 return;
539         }
540
541         zs_free(meta->mem_pool, handle);
542
543         atomic64_sub(zram_get_obj_size(meta, index),
544                         &zram->stats.compr_data_size);
545         atomic64_dec(&zram->stats.pages_stored);
546
547         meta->table[index].handle = 0;
548         zram_set_obj_size(meta, index, 0);
549 }
550
551 static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
552 {
553         int ret = 0;
554         unsigned char *cmem;
555         struct zram_meta *meta = zram->meta;
556         unsigned long handle;
557         size_t size;
558
559         bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
560         handle = meta->table[index].handle;
561         size = zram_get_obj_size(meta, index);
562
563         if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
564                 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
565                 clear_page(mem);
566                 return 0;
567         }
568
569         cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
570         if (size == PAGE_SIZE)
571                 copy_page(mem, cmem);
572         else
573                 ret = zcomp_decompress(zram->comp, cmem, size, mem);
574         zs_unmap_object(meta->mem_pool, handle);
575         bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
576
577         /* Should NEVER happen. Return bio error if it does. */
578         if (unlikely(ret)) {
579                 pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
580                 return ret;
581         }
582
583         return 0;
584 }
585
586 static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
587                           u32 index, int offset)
588 {
589         int ret;
590         struct page *page;
591         unsigned char *user_mem, *uncmem = NULL;
592         struct zram_meta *meta = zram->meta;
593         page = bvec->bv_page;
594
595         bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
596         if (unlikely(!meta->table[index].handle) ||
597                         zram_test_flag(meta, index, ZRAM_ZERO)) {
598                 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
599                 handle_zero_page(bvec);
600                 return 0;
601         }
602         bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
603
604         if (is_partial_io(bvec))
605                 /* Use  a temporary buffer to decompress the page */
606                 uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
607
608         user_mem = kmap_atomic(page);
609         if (!is_partial_io(bvec))
610                 uncmem = user_mem;
611
612         if (!uncmem) {
613                 pr_info("Unable to allocate temp memory\n");
614                 ret = -ENOMEM;
615                 goto out_cleanup;
616         }
617
618         ret = zram_decompress_page(zram, uncmem, index);
619         /* Should NEVER happen. Return bio error if it does. */
620         if (unlikely(ret))
621                 goto out_cleanup;
622
623         if (is_partial_io(bvec))
624                 memcpy(user_mem + bvec->bv_offset, uncmem + offset,
625                                 bvec->bv_len);
626
627         flush_dcache_page(page);
628         ret = 0;
629 out_cleanup:
630         kunmap_atomic(user_mem);
631         if (is_partial_io(bvec))
632                 kfree(uncmem);
633         return ret;
634 }
635
636 static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
637                            int offset)
638 {
639         int ret = 0;
640         size_t clen;
641         unsigned long handle;
642         struct page *page;
643         unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
644         struct zram_meta *meta = zram->meta;
645         struct zcomp_strm *zstrm = NULL;
646         unsigned long alloced_pages;
647
648         page = bvec->bv_page;
649         if (is_partial_io(bvec)) {
650                 /*
651                  * This is a partial IO. We need to read the full page
652                  * before to write the changes.
653                  */
654                 uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
655                 if (!uncmem) {
656                         ret = -ENOMEM;
657                         goto out;
658                 }
659                 ret = zram_decompress_page(zram, uncmem, index);
660                 if (ret)
661                         goto out;
662         }
663
664         zstrm = zcomp_strm_find(zram->comp);
665         user_mem = kmap_atomic(page);
666
667         if (is_partial_io(bvec)) {
668                 memcpy(uncmem + offset, user_mem + bvec->bv_offset,
669                        bvec->bv_len);
670                 kunmap_atomic(user_mem);
671                 user_mem = NULL;
672         } else {
673                 uncmem = user_mem;
674         }
675
676         if (page_zero_filled(uncmem)) {
677                 if (user_mem)
678                         kunmap_atomic(user_mem);
679                 /* Free memory associated with this sector now. */
680                 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
681                 zram_free_page(zram, index);
682                 zram_set_flag(meta, index, ZRAM_ZERO);
683                 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
684
685                 atomic64_inc(&zram->stats.zero_pages);
686                 ret = 0;
687                 goto out;
688         }
689
690         ret = zcomp_compress(zram->comp, zstrm, uncmem, &clen);
691         if (!is_partial_io(bvec)) {
692                 kunmap_atomic(user_mem);
693                 user_mem = NULL;
694                 uncmem = NULL;
695         }
696
697         if (unlikely(ret)) {
698                 pr_err("Compression failed! err=%d\n", ret);
699                 goto out;
700         }
701         src = zstrm->buffer;
702         if (unlikely(clen > max_zpage_size)) {
703                 clen = PAGE_SIZE;
704                 if (is_partial_io(bvec))
705                         src = uncmem;
706         }
707
708         handle = zs_malloc(meta->mem_pool, clen);
709         if (!handle) {
710                 pr_info("Error allocating memory for compressed page: %u, size=%zu\n",
711                         index, clen);
712                 ret = -ENOMEM;
713                 goto out;
714         }
715
716         alloced_pages = zs_get_total_pages(meta->mem_pool);
717         if (zram->limit_pages && alloced_pages > zram->limit_pages) {
718                 zs_free(meta->mem_pool, handle);
719                 ret = -ENOMEM;
720                 goto out;
721         }
722
723         update_used_max(zram, alloced_pages);
724
725         cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO);
726
727         if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
728                 src = kmap_atomic(page);
729                 copy_page(cmem, src);
730                 kunmap_atomic(src);
731         } else {
732                 memcpy(cmem, src, clen);
733         }
734
735         zcomp_strm_release(zram->comp, zstrm);
736         zstrm = NULL;
737         zs_unmap_object(meta->mem_pool, handle);
738
739         /*
740          * Free memory associated with this sector
741          * before overwriting unused sectors.
742          */
743         bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
744         zram_free_page(zram, index);
745
746         meta->table[index].handle = handle;
747         zram_set_obj_size(meta, index, clen);
748         bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
749
750         /* Update stats */
751         atomic64_add(clen, &zram->stats.compr_data_size);
752         atomic64_inc(&zram->stats.pages_stored);
753 out:
754         if (zstrm)
755                 zcomp_strm_release(zram->comp, zstrm);
756         if (is_partial_io(bvec))
757                 kfree(uncmem);
758         return ret;
759 }
760
761 /*
762  * zram_bio_discard - handler on discard request
763  * @index: physical block index in PAGE_SIZE units
764  * @offset: byte offset within physical block
765  */
766 static void zram_bio_discard(struct zram *zram, u32 index,
767                              int offset, struct bio *bio)
768 {
769         size_t n = bio->bi_iter.bi_size;
770         struct zram_meta *meta = zram->meta;
771
772         /*
773          * zram manages data in physical block size units. Because logical block
774          * size isn't identical with physical block size on some arch, we
775          * could get a discard request pointing to a specific offset within a
776          * certain physical block.  Although we can handle this request by
777          * reading that physiclal block and decompressing and partially zeroing
778          * and re-compressing and then re-storing it, this isn't reasonable
779          * because our intent with a discard request is to save memory.  So
780          * skipping this logical block is appropriate here.
781          */
782         if (offset) {
783                 if (n <= (PAGE_SIZE - offset))
784                         return;
785
786                 n -= (PAGE_SIZE - offset);
787                 index++;
788         }
789
790         while (n >= PAGE_SIZE) {
791                 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
792                 zram_free_page(zram, index);
793                 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
794                 atomic64_inc(&zram->stats.notify_free);
795                 index++;
796                 n -= PAGE_SIZE;
797         }
798 }
799
800 static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
801                         int offset, int rw)
802 {
803         unsigned long start_time = jiffies;
804         int ret;
805
806         generic_start_io_acct(rw, bvec->bv_len >> SECTOR_SHIFT,
807                         &zram->disk->part0);
808
809         if (rw == READ) {
810                 atomic64_inc(&zram->stats.num_reads);
811                 ret = zram_bvec_read(zram, bvec, index, offset);
812         } else {
813                 atomic64_inc(&zram->stats.num_writes);
814                 ret = zram_bvec_write(zram, bvec, index, offset);
815         }
816
817         generic_end_io_acct(rw, &zram->disk->part0, start_time);
818
819         if (unlikely(ret)) {
820                 if (rw == READ)
821                         atomic64_inc(&zram->stats.failed_reads);
822                 else
823                         atomic64_inc(&zram->stats.failed_writes);
824         }
825
826         return ret;
827 }
828
829 static void __zram_make_request(struct zram *zram, struct bio *bio)
830 {
831         int offset, rw;
832         u32 index;
833         struct bio_vec bvec;
834         struct bvec_iter iter;
835
836         index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
837         offset = (bio->bi_iter.bi_sector &
838                   (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
839
840         if (unlikely(bio->bi_rw & REQ_DISCARD)) {
841                 zram_bio_discard(zram, index, offset, bio);
842                 bio_endio(bio, 0);
843                 return;
844         }
845
846         rw = bio_data_dir(bio);
847         bio_for_each_segment(bvec, bio, iter) {
848                 int max_transfer_size = PAGE_SIZE - offset;
849
850                 if (bvec.bv_len > max_transfer_size) {
851                         /*
852                          * zram_bvec_rw() can only make operation on a single
853                          * zram page. Split the bio vector.
854                          */
855                         struct bio_vec bv;
856
857                         bv.bv_page = bvec.bv_page;
858                         bv.bv_len = max_transfer_size;
859                         bv.bv_offset = bvec.bv_offset;
860
861                         if (zram_bvec_rw(zram, &bv, index, offset, rw) < 0)
862                                 goto out;
863
864                         bv.bv_len = bvec.bv_len - max_transfer_size;
865                         bv.bv_offset += max_transfer_size;
866                         if (zram_bvec_rw(zram, &bv, index + 1, 0, rw) < 0)
867                                 goto out;
868                 } else
869                         if (zram_bvec_rw(zram, &bvec, index, offset, rw) < 0)
870                                 goto out;
871
872                 update_position(&index, &offset, &bvec);
873         }
874
875         set_bit(BIO_UPTODATE, &bio->bi_flags);
876         bio_endio(bio, 0);
877         return;
878
879 out:
880         bio_io_error(bio);
881 }
882
883 /*
884  * Handler function for all zram I/O requests.
885  */
886 static void zram_make_request(struct request_queue *queue, struct bio *bio)
887 {
888         struct zram *zram = queue->queuedata;
889
890         if (unlikely(!zram_meta_get(zram)))
891                 goto error;
892
893         if (!valid_io_request(zram, bio->bi_iter.bi_sector,
894                                         bio->bi_iter.bi_size)) {
895                 atomic64_inc(&zram->stats.invalid_io);
896                 goto put_zram;
897         }
898
899         __zram_make_request(zram, bio);
900         zram_meta_put(zram);
901         return;
902 put_zram:
903         zram_meta_put(zram);
904 error:
905         bio_io_error(bio);
906 }
907
908 static void zram_slot_free_notify(struct block_device *bdev,
909                                 unsigned long index)
910 {
911         struct zram *zram;
912         struct zram_meta *meta;
913
914         zram = bdev->bd_disk->private_data;
915         meta = zram->meta;
916
917         bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
918         zram_free_page(zram, index);
919         bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
920         atomic64_inc(&zram->stats.notify_free);
921 }
922
923 static int zram_rw_page(struct block_device *bdev, sector_t sector,
924                        struct page *page, int rw)
925 {
926         int offset, err = -EIO;
927         u32 index;
928         struct zram *zram;
929         struct bio_vec bv;
930
931         zram = bdev->bd_disk->private_data;
932         if (unlikely(!zram_meta_get(zram)))
933                 goto out;
934
935         if (!valid_io_request(zram, sector, PAGE_SIZE)) {
936                 atomic64_inc(&zram->stats.invalid_io);
937                 err = -EINVAL;
938                 goto put_zram;
939         }
940
941         index = sector >> SECTORS_PER_PAGE_SHIFT;
942         offset = sector & (SECTORS_PER_PAGE - 1) << SECTOR_SHIFT;
943
944         bv.bv_page = page;
945         bv.bv_len = PAGE_SIZE;
946         bv.bv_offset = 0;
947
948         err = zram_bvec_rw(zram, &bv, index, offset, rw);
949 put_zram:
950         zram_meta_put(zram);
951 out:
952         /*
953          * If I/O fails, just return error(ie, non-zero) without
954          * calling page_endio.
955          * It causes resubmit the I/O with bio request by upper functions
956          * of rw_page(e.g., swap_readpage, __swap_writepage) and
957          * bio->bi_end_io does things to handle the error
958          * (e.g., SetPageError, set_page_dirty and extra works).
959          */
960         if (err == 0)
961                 page_endio(page, rw, 0);
962         return err;
963 }
964
965 static void zram_reset_device(struct zram *zram)
966 {
967         struct zram_meta *meta;
968         struct zcomp *comp;
969         u64 disksize;
970
971         down_write(&zram->init_lock);
972
973         zram->limit_pages = 0;
974
975         if (!init_done(zram)) {
976                 up_write(&zram->init_lock);
977                 return;
978         }
979
980         meta = zram->meta;
981         comp = zram->comp;
982         disksize = zram->disksize;
983         /*
984          * Refcount will go down to 0 eventually and r/w handler
985          * cannot handle further I/O so it will bail out by
986          * check zram_meta_get.
987          */
988         zram_meta_put(zram);
989         /*
990          * We want to free zram_meta in process context to avoid
991          * deadlock between reclaim path and any other locks.
992          */
993         wait_event(zram->io_done, atomic_read(&zram->refcount) == 0);
994
995         /* Reset stats */
996         memset(&zram->stats, 0, sizeof(zram->stats));
997         zram->disksize = 0;
998         zram->max_comp_streams = 1;
999
1000         set_capacity(zram->disk, 0);
1001         part_stat_set_all(&zram->disk->part0, 0);
1002
1003         up_write(&zram->init_lock);
1004         /* I/O operation under all of CPU are done so let's free */
1005         zram_meta_free(meta, disksize);
1006         zcomp_destroy(comp);
1007 }
1008
1009 static ssize_t disksize_store(struct device *dev,
1010                 struct device_attribute *attr, const char *buf, size_t len)
1011 {
1012         u64 disksize;
1013         struct zcomp *comp;
1014         struct zram_meta *meta;
1015         struct zram *zram = dev_to_zram(dev);
1016         int err;
1017
1018         disksize = memparse(buf, NULL);
1019         if (!disksize)
1020                 return -EINVAL;
1021
1022         disksize = PAGE_ALIGN(disksize);
1023         meta = zram_meta_alloc(zram->disk->first_minor, disksize);
1024         if (!meta)
1025                 return -ENOMEM;
1026
1027         comp = zcomp_create(zram->compressor, zram->max_comp_streams);
1028         if (IS_ERR(comp)) {
1029                 pr_info("Cannot initialise %s compressing backend\n",
1030                                 zram->compressor);
1031                 err = PTR_ERR(comp);
1032                 goto out_free_meta;
1033         }
1034
1035         down_write(&zram->init_lock);
1036         if (init_done(zram)) {
1037                 pr_info("Cannot change disksize for initialized device\n");
1038                 err = -EBUSY;
1039                 goto out_destroy_comp;
1040         }
1041
1042         init_waitqueue_head(&zram->io_done);
1043         atomic_set(&zram->refcount, 1);
1044         zram->meta = meta;
1045         zram->comp = comp;
1046         zram->disksize = disksize;
1047         set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
1048         up_write(&zram->init_lock);
1049
1050         /*
1051          * Revalidate disk out of the init_lock to avoid lockdep splat.
1052          * It's okay because disk's capacity is protected by init_lock
1053          * so that revalidate_disk always sees up-to-date capacity.
1054          */
1055         revalidate_disk(zram->disk);
1056
1057         return len;
1058
1059 out_destroy_comp:
1060         up_write(&zram->init_lock);
1061         zcomp_destroy(comp);
1062 out_free_meta:
1063         zram_meta_free(meta, disksize);
1064         return err;
1065 }
1066
1067 static ssize_t reset_store(struct device *dev,
1068                 struct device_attribute *attr, const char *buf, size_t len)
1069 {
1070         int ret;
1071         unsigned short do_reset;
1072         struct zram *zram;
1073         struct block_device *bdev;
1074
1075         ret = kstrtou16(buf, 10, &do_reset);
1076         if (ret)
1077                 return ret;
1078
1079         if (!do_reset)
1080                 return -EINVAL;
1081
1082         zram = dev_to_zram(dev);
1083         bdev = bdget_disk(zram->disk, 0);
1084         if (!bdev)
1085                 return -ENOMEM;
1086
1087         mutex_lock(&bdev->bd_mutex);
1088         /* Do not reset an active device or claimed device */
1089         if (bdev->bd_openers || zram->claim) {
1090                 mutex_unlock(&bdev->bd_mutex);
1091                 bdput(bdev);
1092                 return -EBUSY;
1093         }
1094
1095         /* From now on, anyone can't open /dev/zram[0-9] */
1096         zram->claim = true;
1097         mutex_unlock(&bdev->bd_mutex);
1098
1099         /* Make sure all the pending I/O are finished */
1100         fsync_bdev(bdev);
1101         zram_reset_device(zram);
1102         revalidate_disk(zram->disk);
1103         bdput(bdev);
1104
1105         mutex_lock(&bdev->bd_mutex);
1106         zram->claim = false;
1107         mutex_unlock(&bdev->bd_mutex);
1108
1109         return len;
1110 }
1111
1112 static int zram_open(struct block_device *bdev, fmode_t mode)
1113 {
1114         int ret = 0;
1115         struct zram *zram;
1116
1117         WARN_ON(!mutex_is_locked(&bdev->bd_mutex));
1118
1119         zram = bdev->bd_disk->private_data;
1120         /* zram was claimed to reset so open request fails */
1121         if (zram->claim)
1122                 ret = -EBUSY;
1123
1124         return ret;
1125 }
1126
1127 static const struct block_device_operations zram_devops = {
1128         .open = zram_open,
1129         .swap_slot_free_notify = zram_slot_free_notify,
1130         .rw_page = zram_rw_page,
1131         .owner = THIS_MODULE
1132 };
1133
1134 static DEVICE_ATTR_WO(compact);
1135 static DEVICE_ATTR_RW(disksize);
1136 static DEVICE_ATTR_RO(initstate);
1137 static DEVICE_ATTR_WO(reset);
1138 static DEVICE_ATTR_RO(orig_data_size);
1139 static DEVICE_ATTR_RO(mem_used_total);
1140 static DEVICE_ATTR_RW(mem_limit);
1141 static DEVICE_ATTR_RW(mem_used_max);
1142 static DEVICE_ATTR_RW(max_comp_streams);
1143 static DEVICE_ATTR_RW(comp_algorithm);
1144
1145 static struct attribute *zram_disk_attrs[] = {
1146         &dev_attr_disksize.attr,
1147         &dev_attr_initstate.attr,
1148         &dev_attr_reset.attr,
1149         &dev_attr_num_reads.attr,
1150         &dev_attr_num_writes.attr,
1151         &dev_attr_failed_reads.attr,
1152         &dev_attr_failed_writes.attr,
1153         &dev_attr_compact.attr,
1154         &dev_attr_invalid_io.attr,
1155         &dev_attr_notify_free.attr,
1156         &dev_attr_zero_pages.attr,
1157         &dev_attr_orig_data_size.attr,
1158         &dev_attr_compr_data_size.attr,
1159         &dev_attr_mem_used_total.attr,
1160         &dev_attr_mem_limit.attr,
1161         &dev_attr_mem_used_max.attr,
1162         &dev_attr_max_comp_streams.attr,
1163         &dev_attr_comp_algorithm.attr,
1164         &dev_attr_io_stat.attr,
1165         &dev_attr_mm_stat.attr,
1166         NULL,
1167 };
1168
1169 static struct attribute_group zram_disk_attr_group = {
1170         .attrs = zram_disk_attrs,
1171 };
1172
1173 /*
1174  * Allocate and initialize new zram device. the function returns
1175  * '>= 0' device_id upon success, and negative value otherwise.
1176  */
1177 static int zram_add(void)
1178 {
1179         struct zram *zram;
1180         struct request_queue *queue;
1181         int ret, device_id;
1182
1183         zram = kzalloc(sizeof(struct zram), GFP_KERNEL);
1184         if (!zram)
1185                 return -ENOMEM;
1186
1187         ret = idr_alloc(&zram_index_idr, zram, 0, 0, GFP_KERNEL);
1188         if (ret < 0)
1189                 goto out_free_dev;
1190         device_id = ret;
1191
1192         init_rwsem(&zram->init_lock);
1193
1194         queue = blk_alloc_queue(GFP_KERNEL);
1195         if (!queue) {
1196                 pr_err("Error allocating disk queue for device %d\n",
1197                         device_id);
1198                 ret = -ENOMEM;
1199                 goto out_free_idr;
1200         }
1201
1202         blk_queue_make_request(queue, zram_make_request);
1203
1204         /* gendisk structure */
1205         zram->disk = alloc_disk(1);
1206         if (!zram->disk) {
1207                 pr_warn("Error allocating disk structure for device %d\n",
1208                         device_id);
1209                 ret = -ENOMEM;
1210                 goto out_free_queue;
1211         }
1212
1213         zram->disk->major = zram_major;
1214         zram->disk->first_minor = device_id;
1215         zram->disk->fops = &zram_devops;
1216         zram->disk->queue = queue;
1217         zram->disk->queue->queuedata = zram;
1218         zram->disk->private_data = zram;
1219         snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
1220
1221         /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
1222         set_capacity(zram->disk, 0);
1223         /* zram devices sort of resembles non-rotational disks */
1224         queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
1225         queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, zram->disk->queue);
1226         /*
1227          * To ensure that we always get PAGE_SIZE aligned
1228          * and n*PAGE_SIZED sized I/O requests.
1229          */
1230         blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
1231         blk_queue_logical_block_size(zram->disk->queue,
1232                                         ZRAM_LOGICAL_BLOCK_SIZE);
1233         blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
1234         blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
1235         zram->disk->queue->limits.discard_granularity = PAGE_SIZE;
1236         zram->disk->queue->limits.max_discard_sectors = UINT_MAX;
1237         /*
1238          * zram_bio_discard() will clear all logical blocks if logical block
1239          * size is identical with physical block size(PAGE_SIZE). But if it is
1240          * different, we will skip discarding some parts of logical blocks in
1241          * the part of the request range which isn't aligned to physical block
1242          * size.  So we can't ensure that all discarded logical blocks are
1243          * zeroed.
1244          */
1245         if (ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE)
1246                 zram->disk->queue->limits.discard_zeroes_data = 1;
1247         else
1248                 zram->disk->queue->limits.discard_zeroes_data = 0;
1249         queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, zram->disk->queue);
1250
1251         add_disk(zram->disk);
1252
1253         ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj,
1254                                 &zram_disk_attr_group);
1255         if (ret < 0) {
1256                 pr_warn("Error creating sysfs group");
1257                 goto out_free_disk;
1258         }
1259         strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor));
1260         zram->meta = NULL;
1261         zram->max_comp_streams = 1;
1262
1263         pr_info("Added device: %s\n", zram->disk->disk_name);
1264         return device_id;
1265
1266 out_free_disk:
1267         del_gendisk(zram->disk);
1268         put_disk(zram->disk);
1269 out_free_queue:
1270         blk_cleanup_queue(queue);
1271 out_free_idr:
1272         idr_remove(&zram_index_idr, device_id);
1273 out_free_dev:
1274         kfree(zram);
1275         return ret;
1276 }
1277
1278 static int zram_remove(struct zram *zram)
1279 {
1280         struct block_device *bdev;
1281
1282         bdev = bdget_disk(zram->disk, 0);
1283         if (!bdev)
1284                 return -ENOMEM;
1285
1286         mutex_lock(&bdev->bd_mutex);
1287         if (bdev->bd_openers || zram->claim) {
1288                 mutex_unlock(&bdev->bd_mutex);
1289                 bdput(bdev);
1290                 return -EBUSY;
1291         }
1292
1293         zram->claim = true;
1294         mutex_unlock(&bdev->bd_mutex);
1295
1296         /*
1297          * Remove sysfs first, so no one will perform a disksize
1298          * store while we destroy the devices. This also helps during
1299          * hot_remove -- zram_reset_device() is the last holder of
1300          * ->init_lock, no later/concurrent disksize_store() or any
1301          * other sysfs handlers are possible.
1302          */
1303         sysfs_remove_group(&disk_to_dev(zram->disk)->kobj,
1304                         &zram_disk_attr_group);
1305
1306         /* Make sure all the pending I/O are finished */
1307         fsync_bdev(bdev);
1308         zram_reset_device(zram);
1309         bdput(bdev);
1310
1311         pr_info("Removed device: %s\n", zram->disk->disk_name);
1312
1313         idr_remove(&zram_index_idr, zram->disk->first_minor);
1314         blk_cleanup_queue(zram->disk->queue);
1315         del_gendisk(zram->disk);
1316         put_disk(zram->disk);
1317         kfree(zram);
1318         return 0;
1319 }
1320
1321 /* zram-control sysfs attributes */
1322 static ssize_t hot_add_show(struct class *class,
1323                         struct class_attribute *attr,
1324                         char *buf)
1325 {
1326         int ret;
1327
1328         mutex_lock(&zram_index_mutex);
1329         ret = zram_add();
1330         mutex_unlock(&zram_index_mutex);
1331
1332         if (ret < 0)
1333                 return ret;
1334         return scnprintf(buf, PAGE_SIZE, "%d\n", ret);
1335 }
1336
1337 static ssize_t hot_remove_store(struct class *class,
1338                         struct class_attribute *attr,
1339                         const char *buf,
1340                         size_t count)
1341 {
1342         struct zram *zram;
1343         int ret, dev_id;
1344
1345         /* dev_id is gendisk->first_minor, which is `int' */
1346         ret = kstrtoint(buf, 10, &dev_id);
1347         if (ret)
1348                 return ret;
1349         if (dev_id < 0)
1350                 return -EINVAL;
1351
1352         mutex_lock(&zram_index_mutex);
1353
1354         zram = idr_find(&zram_index_idr, dev_id);
1355         if (zram)
1356                 ret = zram_remove(zram);
1357         else
1358                 ret = -ENODEV;
1359
1360         mutex_unlock(&zram_index_mutex);
1361         return ret ? ret : count;
1362 }
1363
1364 static struct class_attribute zram_control_class_attrs[] = {
1365         __ATTR_RO(hot_add),
1366         __ATTR_WO(hot_remove),
1367         __ATTR_NULL,
1368 };
1369
1370 static struct class zram_control_class = {
1371         .name           = "zram-control",
1372         .owner          = THIS_MODULE,
1373         .class_attrs    = zram_control_class_attrs,
1374 };
1375
1376 static int zram_remove_cb(int id, void *ptr, void *data)
1377 {
1378         zram_remove(ptr);
1379         return 0;
1380 }
1381
1382 static void destroy_devices(void)
1383 {
1384         class_unregister(&zram_control_class);
1385         idr_for_each(&zram_index_idr, &zram_remove_cb, NULL);
1386         idr_destroy(&zram_index_idr);
1387         unregister_blkdev(zram_major, "zram");
1388 }
1389
1390 static int __init zram_init(void)
1391 {
1392         int ret;
1393
1394         ret = class_register(&zram_control_class);
1395         if (ret) {
1396                 pr_warn("Unable to register zram-control class\n");
1397                 return ret;
1398         }
1399
1400         zram_major = register_blkdev(0, "zram");
1401         if (zram_major <= 0) {
1402                 pr_warn("Unable to get major number\n");
1403                 class_unregister(&zram_control_class);
1404                 return -EBUSY;
1405         }
1406
1407         while (num_devices != 0) {
1408                 mutex_lock(&zram_index_mutex);
1409                 ret = zram_add();
1410                 mutex_unlock(&zram_index_mutex);
1411                 if (ret < 0)
1412                         goto out_error;
1413                 num_devices--;
1414         }
1415
1416         return 0;
1417
1418 out_error:
1419         destroy_devices();
1420         return ret;
1421 }
1422
1423 static void __exit zram_exit(void)
1424 {
1425         destroy_devices();
1426 }
1427
1428 module_init(zram_init);
1429 module_exit(zram_exit);
1430
1431 module_param(num_devices, uint, 0);
1432 MODULE_PARM_DESC(num_devices, "Number of pre-created zram devices");
1433
1434 MODULE_LICENSE("Dual BSD/GPL");
1435 MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
1436 MODULE_DESCRIPTION("Compressed RAM Block Device");