]> git.karo-electronics.de Git - linux-beck.git/blob - drivers/staging/zram/zram_drv.c
staging: zram: remove special handle of uncompressed page
[linux-beck.git] / drivers / staging / zram / zram_drv.c
1 /*
2  * Compressed RAM block device
3  *
4  * Copyright (C) 2008, 2009, 2010  Nitin Gupta
5  *
6  * This code is released using a dual license strategy: BSD/GPL
7  * You can choose the licence that better fits your requirements.
8  *
9  * Released under the terms of 3-clause BSD License
10  * Released under the terms of GNU General Public License Version 2.0
11  *
12  * Project home: http://compcache.googlecode.com
13  */
14
15 #define KMSG_COMPONENT "zram"
16 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
17
18 #ifdef CONFIG_ZRAM_DEBUG
19 #define DEBUG
20 #endif
21
22 #include <linux/module.h>
23 #include <linux/kernel.h>
24 #include <linux/bio.h>
25 #include <linux/bitops.h>
26 #include <linux/blkdev.h>
27 #include <linux/buffer_head.h>
28 #include <linux/device.h>
29 #include <linux/genhd.h>
30 #include <linux/highmem.h>
31 #include <linux/slab.h>
32 #include <linux/lzo.h>
33 #include <linux/string.h>
34 #include <linux/vmalloc.h>
35
36 #include "zram_drv.h"
37
38 /* Globals */
39 static int zram_major;
40 struct zram *zram_devices;
41
42 /* Module params (documentation at end) */
43 static unsigned int num_devices;
44
45 static void zram_stat_inc(u32 *v)
46 {
47         *v = *v + 1;
48 }
49
50 static void zram_stat_dec(u32 *v)
51 {
52         *v = *v - 1;
53 }
54
55 static void zram_stat64_add(struct zram *zram, u64 *v, u64 inc)
56 {
57         spin_lock(&zram->stat64_lock);
58         *v = *v + inc;
59         spin_unlock(&zram->stat64_lock);
60 }
61
62 static void zram_stat64_sub(struct zram *zram, u64 *v, u64 dec)
63 {
64         spin_lock(&zram->stat64_lock);
65         *v = *v - dec;
66         spin_unlock(&zram->stat64_lock);
67 }
68
69 static void zram_stat64_inc(struct zram *zram, u64 *v)
70 {
71         zram_stat64_add(zram, v, 1);
72 }
73
74 static int zram_test_flag(struct zram *zram, u32 index,
75                         enum zram_pageflags flag)
76 {
77         return zram->table[index].flags & BIT(flag);
78 }
79
80 static void zram_set_flag(struct zram *zram, u32 index,
81                         enum zram_pageflags flag)
82 {
83         zram->table[index].flags |= BIT(flag);
84 }
85
86 static void zram_clear_flag(struct zram *zram, u32 index,
87                         enum zram_pageflags flag)
88 {
89         zram->table[index].flags &= ~BIT(flag);
90 }
91
92 static int page_zero_filled(void *ptr)
93 {
94         unsigned int pos;
95         unsigned long *page;
96
97         page = (unsigned long *)ptr;
98
99         for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) {
100                 if (page[pos])
101                         return 0;
102         }
103
104         return 1;
105 }
106
107 static void zram_set_disksize(struct zram *zram, size_t totalram_bytes)
108 {
109         if (!zram->disksize) {
110                 pr_info(
111                 "disk size not provided. You can use disksize_kb module "
112                 "param to specify size.\nUsing default: (%u%% of RAM).\n",
113                 default_disksize_perc_ram
114                 );
115                 zram->disksize = default_disksize_perc_ram *
116                                         (totalram_bytes / 100);
117         }
118
119         if (zram->disksize > 2 * (totalram_bytes)) {
120                 pr_info(
121                 "There is little point creating a zram of greater than "
122                 "twice the size of memory since we expect a 2:1 compression "
123                 "ratio. Note that zram uses about 0.1%% of the size of "
124                 "the disk when not in use so a huge zram is "
125                 "wasteful.\n"
126                 "\tMemory Size: %zu kB\n"
127                 "\tSize you selected: %llu kB\n"
128                 "Continuing anyway ...\n",
129                 totalram_bytes >> 10, zram->disksize
130                 );
131         }
132
133         zram->disksize &= PAGE_MASK;
134 }
135
136 static void zram_free_page(struct zram *zram, size_t index)
137 {
138         unsigned long handle = zram->table[index].handle;
139         u16 size = zram->table[index].size;
140
141         if (unlikely(!handle)) {
142                 /*
143                  * No memory is allocated for zero filled pages.
144                  * Simply clear zero page flag.
145                  */
146                 if (zram_test_flag(zram, index, ZRAM_ZERO)) {
147                         zram_clear_flag(zram, index, ZRAM_ZERO);
148                         zram_stat_dec(&zram->stats.pages_zero);
149                 }
150                 return;
151         }
152
153         if (unlikely(size > max_zpage_size))
154                 zram_stat_dec(&zram->stats.bad_compress);
155
156         zs_free(zram->mem_pool, handle);
157
158         if (size <= PAGE_SIZE / 2)
159                 zram_stat_dec(&zram->stats.good_compress);
160
161         zram_stat64_sub(zram, &zram->stats.compr_size,
162                         zram->table[index].size);
163         zram_stat_dec(&zram->stats.pages_stored);
164
165         zram->table[index].handle = 0;
166         zram->table[index].size = 0;
167 }
168
169 static void handle_zero_page(struct bio_vec *bvec)
170 {
171         struct page *page = bvec->bv_page;
172         void *user_mem;
173
174         user_mem = kmap_atomic(page);
175         memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
176         kunmap_atomic(user_mem);
177
178         flush_dcache_page(page);
179 }
180
181 static inline int is_partial_io(struct bio_vec *bvec)
182 {
183         return bvec->bv_len != PAGE_SIZE;
184 }
185
186 static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
187                           u32 index, int offset, struct bio *bio)
188 {
189         int ret;
190         size_t clen;
191         struct page *page;
192         unsigned char *user_mem, *cmem, *uncmem = NULL;
193
194         page = bvec->bv_page;
195
196         if (zram_test_flag(zram, index, ZRAM_ZERO)) {
197                 handle_zero_page(bvec);
198                 return 0;
199         }
200
201         /* Requested page is not present in compressed area */
202         if (unlikely(!zram->table[index].handle)) {
203                 pr_debug("Read before write: sector=%lu, size=%u",
204                          (ulong)(bio->bi_sector), bio->bi_size);
205                 handle_zero_page(bvec);
206                 return 0;
207         }
208
209         if (is_partial_io(bvec)) {
210                 /* Use  a temporary buffer to decompress the page */
211                 uncmem = kmalloc(PAGE_SIZE, GFP_KERNEL);
212                 if (!uncmem) {
213                         pr_info("Error allocating temp memory!\n");
214                         return -ENOMEM;
215                 }
216         }
217
218         user_mem = kmap_atomic(page);
219         if (!is_partial_io(bvec))
220                 uncmem = user_mem;
221         clen = PAGE_SIZE;
222
223         cmem = zs_map_object(zram->mem_pool, zram->table[index].handle);
224
225         ret = lzo1x_decompress_safe(cmem, zram->table[index].size,
226                                     uncmem, &clen);
227
228         if (is_partial_io(bvec)) {
229                 memcpy(user_mem + bvec->bv_offset, uncmem + offset,
230                        bvec->bv_len);
231                 kfree(uncmem);
232         }
233
234         zs_unmap_object(zram->mem_pool, zram->table[index].handle);
235         kunmap_atomic(user_mem);
236
237         /* Should NEVER happen. Return bio error if it does. */
238         if (unlikely(ret != LZO_E_OK)) {
239                 pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
240                 zram_stat64_inc(zram, &zram->stats.failed_reads);
241                 return ret;
242         }
243
244         flush_dcache_page(page);
245
246         return 0;
247 }
248
249 static int zram_read_before_write(struct zram *zram, char *mem, u32 index)
250 {
251         int ret;
252         size_t clen = PAGE_SIZE;
253         unsigned char *cmem;
254         unsigned long handle = zram->table[index].handle;
255
256         if (zram_test_flag(zram, index, ZRAM_ZERO) || !handle) {
257                 memset(mem, 0, PAGE_SIZE);
258                 return 0;
259         }
260
261         cmem = zs_map_object(zram->mem_pool, handle);
262         ret = lzo1x_decompress_safe(cmem, zram->table[index].size,
263                                     mem, &clen);
264         zs_unmap_object(zram->mem_pool, handle);
265
266         /* Should NEVER happen. Return bio error if it does. */
267         if (unlikely(ret != LZO_E_OK)) {
268                 pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
269                 zram_stat64_inc(zram, &zram->stats.failed_reads);
270                 return ret;
271         }
272
273         return 0;
274 }
275
276 static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
277                            int offset)
278 {
279         int ret;
280         size_t clen;
281         unsigned long handle;
282         struct page *page;
283         unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
284
285         page = bvec->bv_page;
286         src = zram->compress_buffer;
287
288         if (is_partial_io(bvec)) {
289                 /*
290                  * This is a partial IO. We need to read the full page
291                  * before to write the changes.
292                  */
293                 uncmem = kmalloc(PAGE_SIZE, GFP_KERNEL);
294                 if (!uncmem) {
295                         pr_info("Error allocating temp memory!\n");
296                         ret = -ENOMEM;
297                         goto out;
298                 }
299                 ret = zram_read_before_write(zram, uncmem, index);
300                 if (ret) {
301                         kfree(uncmem);
302                         goto out;
303                 }
304         }
305
306         /*
307          * System overwrites unused sectors. Free memory associated
308          * with this sector now.
309          */
310         if (zram->table[index].handle ||
311             zram_test_flag(zram, index, ZRAM_ZERO))
312                 zram_free_page(zram, index);
313
314         user_mem = kmap_atomic(page);
315
316         if (is_partial_io(bvec))
317                 memcpy(uncmem + offset, user_mem + bvec->bv_offset,
318                        bvec->bv_len);
319         else
320                 uncmem = user_mem;
321
322         if (page_zero_filled(uncmem)) {
323                 kunmap_atomic(user_mem);
324                 if (is_partial_io(bvec))
325                         kfree(uncmem);
326                 zram_stat_inc(&zram->stats.pages_zero);
327                 zram_set_flag(zram, index, ZRAM_ZERO);
328                 ret = 0;
329                 goto out;
330         }
331
332         ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
333                                zram->compress_workmem);
334
335         kunmap_atomic(user_mem);
336         if (is_partial_io(bvec))
337                         kfree(uncmem);
338
339         if (unlikely(ret != LZO_E_OK)) {
340                 pr_err("Compression failed! err=%d\n", ret);
341                 goto out;
342         }
343
344         if (unlikely(clen > max_zpage_size))
345                 zram_stat_inc(&zram->stats.bad_compress);
346
347         handle = zs_malloc(zram->mem_pool, clen);
348         if (!handle) {
349                 pr_info("Error allocating memory for compressed "
350                         "page: %u, size=%zu\n", index, clen);
351                 ret = -ENOMEM;
352                 goto out;
353         }
354         cmem = zs_map_object(zram->mem_pool, handle);
355
356         memcpy(cmem, src, clen);
357
358         zs_unmap_object(zram->mem_pool, handle);
359
360         zram->table[index].handle = handle;
361         zram->table[index].size = clen;
362
363         /* Update stats */
364         zram_stat64_add(zram, &zram->stats.compr_size, clen);
365         zram_stat_inc(&zram->stats.pages_stored);
366         if (clen <= PAGE_SIZE / 2)
367                 zram_stat_inc(&zram->stats.good_compress);
368
369         return 0;
370
371 out:
372         if (ret)
373                 zram_stat64_inc(zram, &zram->stats.failed_writes);
374         return ret;
375 }
376
377 static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
378                         int offset, struct bio *bio, int rw)
379 {
380         int ret;
381
382         if (rw == READ) {
383                 down_read(&zram->lock);
384                 ret = zram_bvec_read(zram, bvec, index, offset, bio);
385                 up_read(&zram->lock);
386         } else {
387                 down_write(&zram->lock);
388                 ret = zram_bvec_write(zram, bvec, index, offset);
389                 up_write(&zram->lock);
390         }
391
392         return ret;
393 }
394
395 static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
396 {
397         if (*offset + bvec->bv_len >= PAGE_SIZE)
398                 (*index)++;
399         *offset = (*offset + bvec->bv_len) % PAGE_SIZE;
400 }
401
402 static void __zram_make_request(struct zram *zram, struct bio *bio, int rw)
403 {
404         int i, offset;
405         u32 index;
406         struct bio_vec *bvec;
407
408         switch (rw) {
409         case READ:
410                 zram_stat64_inc(zram, &zram->stats.num_reads);
411                 break;
412         case WRITE:
413                 zram_stat64_inc(zram, &zram->stats.num_writes);
414                 break;
415         }
416
417         index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;
418         offset = (bio->bi_sector & (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
419
420         bio_for_each_segment(bvec, bio, i) {
421                 int max_transfer_size = PAGE_SIZE - offset;
422
423                 if (bvec->bv_len > max_transfer_size) {
424                         /*
425                          * zram_bvec_rw() can only make operation on a single
426                          * zram page. Split the bio vector.
427                          */
428                         struct bio_vec bv;
429
430                         bv.bv_page = bvec->bv_page;
431                         bv.bv_len = max_transfer_size;
432                         bv.bv_offset = bvec->bv_offset;
433
434                         if (zram_bvec_rw(zram, &bv, index, offset, bio, rw) < 0)
435                                 goto out;
436
437                         bv.bv_len = bvec->bv_len - max_transfer_size;
438                         bv.bv_offset += max_transfer_size;
439                         if (zram_bvec_rw(zram, &bv, index+1, 0, bio, rw) < 0)
440                                 goto out;
441                 } else
442                         if (zram_bvec_rw(zram, bvec, index, offset, bio, rw)
443                             < 0)
444                                 goto out;
445
446                 update_position(&index, &offset, bvec);
447         }
448
449         set_bit(BIO_UPTODATE, &bio->bi_flags);
450         bio_endio(bio, 0);
451         return;
452
453 out:
454         bio_io_error(bio);
455 }
456
457 /*
458  * Check if request is within bounds and aligned on zram logical blocks.
459  */
460 static inline int valid_io_request(struct zram *zram, struct bio *bio)
461 {
462         if (unlikely(
463                 (bio->bi_sector >= (zram->disksize >> SECTOR_SHIFT)) ||
464                 (bio->bi_sector & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)) ||
465                 (bio->bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))) {
466
467                 return 0;
468         }
469
470         /* I/O request is valid */
471         return 1;
472 }
473
474 /*
475  * Handler function for all zram I/O requests.
476  */
477 static void zram_make_request(struct request_queue *queue, struct bio *bio)
478 {
479         struct zram *zram = queue->queuedata;
480
481         if (unlikely(!zram->init_done) && zram_init_device(zram))
482                 goto error;
483
484         down_read(&zram->init_lock);
485         if (unlikely(!zram->init_done))
486                 goto error_unlock;
487
488         if (!valid_io_request(zram, bio)) {
489                 zram_stat64_inc(zram, &zram->stats.invalid_io);
490                 goto error_unlock;
491         }
492
493         __zram_make_request(zram, bio, bio_data_dir(bio));
494         up_read(&zram->init_lock);
495
496         return;
497
498 error_unlock:
499         up_read(&zram->init_lock);
500 error:
501         bio_io_error(bio);
502 }
503
504 void __zram_reset_device(struct zram *zram)
505 {
506         size_t index;
507
508         zram->init_done = 0;
509
510         /* Free various per-device buffers */
511         kfree(zram->compress_workmem);
512         free_pages((unsigned long)zram->compress_buffer, 1);
513
514         zram->compress_workmem = NULL;
515         zram->compress_buffer = NULL;
516
517         /* Free all pages that are still in this zram device */
518         for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) {
519                 unsigned long handle = zram->table[index].handle;
520                 if (!handle)
521                         continue;
522
523                 zs_free(zram->mem_pool, handle);
524         }
525
526         vfree(zram->table);
527         zram->table = NULL;
528
529         zs_destroy_pool(zram->mem_pool);
530         zram->mem_pool = NULL;
531
532         /* Reset stats */
533         memset(&zram->stats, 0, sizeof(zram->stats));
534
535         zram->disksize = 0;
536 }
537
538 void zram_reset_device(struct zram *zram)
539 {
540         down_write(&zram->init_lock);
541         __zram_reset_device(zram);
542         up_write(&zram->init_lock);
543 }
544
545 int zram_init_device(struct zram *zram)
546 {
547         int ret;
548         size_t num_pages;
549
550         down_write(&zram->init_lock);
551
552         if (zram->init_done) {
553                 up_write(&zram->init_lock);
554                 return 0;
555         }
556
557         zram_set_disksize(zram, totalram_pages << PAGE_SHIFT);
558
559         zram->compress_workmem = kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
560         if (!zram->compress_workmem) {
561                 pr_err("Error allocating compressor working memory!\n");
562                 ret = -ENOMEM;
563                 goto fail_no_table;
564         }
565
566         zram->compress_buffer =
567                 (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
568         if (!zram->compress_buffer) {
569                 pr_err("Error allocating compressor buffer space\n");
570                 ret = -ENOMEM;
571                 goto fail_no_table;
572         }
573
574         num_pages = zram->disksize >> PAGE_SHIFT;
575         zram->table = vzalloc(num_pages * sizeof(*zram->table));
576         if (!zram->table) {
577                 pr_err("Error allocating zram address table\n");
578                 ret = -ENOMEM;
579                 goto fail_no_table;
580         }
581
582         set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
583
584         /* zram devices sort of resembles non-rotational disks */
585         queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
586
587         zram->mem_pool = zs_create_pool("zram", GFP_NOIO | __GFP_HIGHMEM);
588         if (!zram->mem_pool) {
589                 pr_err("Error creating memory pool\n");
590                 ret = -ENOMEM;
591                 goto fail;
592         }
593
594         zram->init_done = 1;
595         up_write(&zram->init_lock);
596
597         pr_debug("Initialization done!\n");
598         return 0;
599
600 fail_no_table:
601         /* To prevent accessing table entries during cleanup */
602         zram->disksize = 0;
603 fail:
604         __zram_reset_device(zram);
605         up_write(&zram->init_lock);
606         pr_err("Initialization failed: err=%d\n", ret);
607         return ret;
608 }
609
610 static void zram_slot_free_notify(struct block_device *bdev,
611                                 unsigned long index)
612 {
613         struct zram *zram;
614
615         zram = bdev->bd_disk->private_data;
616         zram_free_page(zram, index);
617         zram_stat64_inc(zram, &zram->stats.notify_free);
618 }
619
620 static const struct block_device_operations zram_devops = {
621         .swap_slot_free_notify = zram_slot_free_notify,
622         .owner = THIS_MODULE
623 };
624
625 static int create_device(struct zram *zram, int device_id)
626 {
627         int ret = 0;
628
629         init_rwsem(&zram->lock);
630         init_rwsem(&zram->init_lock);
631         spin_lock_init(&zram->stat64_lock);
632
633         zram->queue = blk_alloc_queue(GFP_KERNEL);
634         if (!zram->queue) {
635                 pr_err("Error allocating disk queue for device %d\n",
636                         device_id);
637                 ret = -ENOMEM;
638                 goto out;
639         }
640
641         blk_queue_make_request(zram->queue, zram_make_request);
642         zram->queue->queuedata = zram;
643
644          /* gendisk structure */
645         zram->disk = alloc_disk(1);
646         if (!zram->disk) {
647                 blk_cleanup_queue(zram->queue);
648                 pr_warning("Error allocating disk structure for device %d\n",
649                         device_id);
650                 ret = -ENOMEM;
651                 goto out;
652         }
653
654         zram->disk->major = zram_major;
655         zram->disk->first_minor = device_id;
656         zram->disk->fops = &zram_devops;
657         zram->disk->queue = zram->queue;
658         zram->disk->private_data = zram;
659         snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
660
661         /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
662         set_capacity(zram->disk, 0);
663
664         /*
665          * To ensure that we always get PAGE_SIZE aligned
666          * and n*PAGE_SIZED sized I/O requests.
667          */
668         blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
669         blk_queue_logical_block_size(zram->disk->queue,
670                                         ZRAM_LOGICAL_BLOCK_SIZE);
671         blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
672         blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
673
674         add_disk(zram->disk);
675
676         ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj,
677                                 &zram_disk_attr_group);
678         if (ret < 0) {
679                 pr_warning("Error creating sysfs group");
680                 goto out;
681         }
682
683         zram->init_done = 0;
684
685 out:
686         return ret;
687 }
688
689 static void destroy_device(struct zram *zram)
690 {
691         sysfs_remove_group(&disk_to_dev(zram->disk)->kobj,
692                         &zram_disk_attr_group);
693
694         if (zram->disk) {
695                 del_gendisk(zram->disk);
696                 put_disk(zram->disk);
697         }
698
699         if (zram->queue)
700                 blk_cleanup_queue(zram->queue);
701 }
702
703 unsigned int zram_get_num_devices(void)
704 {
705         return num_devices;
706 }
707
708 static int __init zram_init(void)
709 {
710         int ret, dev_id;
711
712         if (num_devices > max_num_devices) {
713                 pr_warning("Invalid value for num_devices: %u\n",
714                                 num_devices);
715                 ret = -EINVAL;
716                 goto out;
717         }
718
719         zram_major = register_blkdev(0, "zram");
720         if (zram_major <= 0) {
721                 pr_warning("Unable to get major number\n");
722                 ret = -EBUSY;
723                 goto out;
724         }
725
726         if (!num_devices) {
727                 pr_info("num_devices not specified. Using default: 1\n");
728                 num_devices = 1;
729         }
730
731         /* Allocate the device array and initialize each one */
732         pr_info("Creating %u devices ...\n", num_devices);
733         zram_devices = kzalloc(num_devices * sizeof(struct zram), GFP_KERNEL);
734         if (!zram_devices) {
735                 ret = -ENOMEM;
736                 goto unregister;
737         }
738
739         for (dev_id = 0; dev_id < num_devices; dev_id++) {
740                 ret = create_device(&zram_devices[dev_id], dev_id);
741                 if (ret)
742                         goto free_devices;
743         }
744
745         return 0;
746
747 free_devices:
748         while (dev_id)
749                 destroy_device(&zram_devices[--dev_id]);
750         kfree(zram_devices);
751 unregister:
752         unregister_blkdev(zram_major, "zram");
753 out:
754         return ret;
755 }
756
757 static void __exit zram_exit(void)
758 {
759         int i;
760         struct zram *zram;
761
762         for (i = 0; i < num_devices; i++) {
763                 zram = &zram_devices[i];
764
765                 destroy_device(zram);
766                 if (zram->init_done)
767                         zram_reset_device(zram);
768         }
769
770         unregister_blkdev(zram_major, "zram");
771
772         kfree(zram_devices);
773         pr_debug("Cleanup done!\n");
774 }
775
776 module_param(num_devices, uint, 0);
777 MODULE_PARM_DESC(num_devices, "Number of zram devices");
778
779 module_init(zram_init);
780 module_exit(zram_exit);
781
782 MODULE_LICENSE("Dual BSD/GPL");
783 MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
784 MODULE_DESCRIPTION("Compressed RAM Block Device");