]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - drivers/staging/zram/zram_drv.c
Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux
[karo-tx-linux.git] / drivers / staging / zram / zram_drv.c
index 685d612a627b6163d85a1fdb1b3ae8d8c3f36988..653b074035f7b165d04d1587d428097ecfac780a 100644 (file)
@@ -135,7 +135,8 @@ static void zram_set_disksize(struct zram *zram, size_t totalram_bytes)
 
 static void zram_free_page(struct zram *zram, size_t index)
 {
-       void *handle = zram->table[index].handle;
+       unsigned long handle = zram->table[index].handle;
+       u16 size = zram->table[index].size;
 
        if (unlikely(!handle)) {
                /*
@@ -149,24 +150,19 @@ static void zram_free_page(struct zram *zram, size_t index)
                return;
        }
 
-       if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
-               __free_page(handle);
-               zram_clear_flag(zram, index, ZRAM_UNCOMPRESSED);
-               zram_stat_dec(&zram->stats.pages_expand);
-               goto out;
-       }
+       if (unlikely(size > max_zpage_size))
+               zram_stat_dec(&zram->stats.bad_compress);
 
        zs_free(zram->mem_pool, handle);
 
-       if (zram->table[index].size <= PAGE_SIZE / 2)
+       if (size <= PAGE_SIZE / 2)
                zram_stat_dec(&zram->stats.good_compress);
 
-out:
        zram_stat64_sub(zram, &zram->stats.compr_size,
                        zram->table[index].size);
        zram_stat_dec(&zram->stats.pages_stored);
 
-       zram->table[index].handle = NULL;
+       zram->table[index].handle = 0;
        zram->table[index].size = 0;
 }
 
@@ -182,22 +178,6 @@ static void handle_zero_page(struct bio_vec *bvec)
        flush_dcache_page(page);
 }
 
-static void handle_uncompressed_page(struct zram *zram, struct bio_vec *bvec,
-                                    u32 index, int offset)
-{
-       struct page *page = bvec->bv_page;
-       unsigned char *user_mem, *cmem;
-
-       user_mem = kmap_atomic(page);
-       cmem = kmap_atomic(zram->table[index].handle);
-
-       memcpy(user_mem + bvec->bv_offset, cmem + offset, bvec->bv_len);
-       kunmap_atomic(cmem);
-       kunmap_atomic(user_mem);
-
-       flush_dcache_page(page);
-}
-
 static inline int is_partial_io(struct bio_vec *bvec)
 {
        return bvec->bv_len != PAGE_SIZE;
@@ -209,7 +189,6 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
        int ret;
        size_t clen;
        struct page *page;
-       struct zobj_header *zheader;
        unsigned char *user_mem, *cmem, *uncmem = NULL;
 
        page = bvec->bv_page;
@@ -227,12 +206,6 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
                return 0;
        }
 
-       /* Page is stored uncompressed since it's incompressible */
-       if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
-               handle_uncompressed_page(zram, bvec, index, offset);
-               return 0;
-       }
-
        if (is_partial_io(bvec)) {
                /* Use  a temporary buffer to decompress the page */
                uncmem = kmalloc(PAGE_SIZE, GFP_KERNEL);
@@ -247,10 +220,10 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
                uncmem = user_mem;
        clen = PAGE_SIZE;
 
-       cmem = zs_map_object(zram->mem_pool, zram->table[index].handle);
+       cmem = zs_map_object(zram->mem_pool, zram->table[index].handle,
+                               ZS_MM_RO);
 
-       ret = lzo1x_decompress_safe(cmem + sizeof(*zheader),
-                                   zram->table[index].size,
+       ret = lzo1x_decompress_safe(cmem, zram->table[index].size,
                                    uncmem, &clen);
 
        if (is_partial_io(bvec)) {
@@ -278,28 +251,18 @@ static int zram_read_before_write(struct zram *zram, char *mem, u32 index)
 {
        int ret;
        size_t clen = PAGE_SIZE;
-       struct zobj_header *zheader;
        unsigned char *cmem;
+       unsigned long handle = zram->table[index].handle;
 
-       if (zram_test_flag(zram, index, ZRAM_ZERO) ||
-           !zram->table[index].handle) {
+       if (zram_test_flag(zram, index, ZRAM_ZERO) || !handle) {
                memset(mem, 0, PAGE_SIZE);
                return 0;
        }
 
-       cmem = zs_map_object(zram->mem_pool, zram->table[index].handle);
-
-       /* Page is stored uncompressed since it's incompressible */
-       if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
-               memcpy(mem, cmem, PAGE_SIZE);
-               kunmap_atomic(cmem);
-               return 0;
-       }
-
-       ret = lzo1x_decompress_safe(cmem + sizeof(*zheader),
-                                   zram->table[index].size,
+       cmem = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
+       ret = lzo1x_decompress_safe(cmem, zram->table[index].size,
                                    mem, &clen);
-       zs_unmap_object(zram->mem_pool, zram->table[index].handle);
+       zs_unmap_object(zram->mem_pool, handle);
 
        /* Should NEVER happen. Return bio error if it does. */
        if (unlikely(ret != LZO_E_OK)) {
@@ -315,11 +278,9 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
                           int offset)
 {
        int ret;
-       u32 store_offset;
        size_t clen;
-       void *handle;
-       struct zobj_header *zheader;
-       struct page *page, *page_store;
+       unsigned long handle;
+       struct page *page;
        unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
 
        page = bvec->bv_page;
@@ -381,57 +342,21 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
                goto out;
        }
 
-       /*
-        * Page is incompressible. Store it as-is (uncompressed)
-        * since we do not want to return too many disk write
-        * errors which has side effect of hanging the system.
-        */
-       if (unlikely(clen > max_zpage_size)) {
-               clen = PAGE_SIZE;
-               page_store = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
-               if (unlikely(!page_store)) {
-                       pr_info("Error allocating memory for "
-                               "incompressible page: %u\n", index);
-                       ret = -ENOMEM;
-                       goto out;
-               }
-
-               store_offset = 0;
-               zram_set_flag(zram, index, ZRAM_UNCOMPRESSED);
-               zram_stat_inc(&zram->stats.pages_expand);
-               handle = page_store;
-               src = kmap_atomic(page);
-               cmem = kmap_atomic(page_store);
-               goto memstore;
-       }
+       if (unlikely(clen > max_zpage_size))
+               zram_stat_inc(&zram->stats.bad_compress);
 
-       handle = zs_malloc(zram->mem_pool, clen + sizeof(*zheader));
+       handle = zs_malloc(zram->mem_pool, clen);
        if (!handle) {
                pr_info("Error allocating memory for compressed "
                        "page: %u, size=%zu\n", index, clen);
                ret = -ENOMEM;
                goto out;
        }
-       cmem = zs_map_object(zram->mem_pool, handle);
-
-memstore:
-#if 0
-       /* Back-reference needed for memory defragmentation */
-       if (!zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)) {
-               zheader = (struct zobj_header *)cmem;
-               zheader->table_idx = index;
-               cmem += sizeof(*zheader);
-       }
-#endif
+       cmem = zs_map_object(zram->mem_pool, handle, ZS_MM_WO);
 
        memcpy(cmem, src, clen);
 
-       if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
-               kunmap_atomic(cmem);
-               kunmap_atomic(src);
-       } else {
-               zs_unmap_object(zram->mem_pool, handle);
-       }
+       zs_unmap_object(zram->mem_pool, handle);
 
        zram->table[index].handle = handle;
        zram->table[index].size = clen;
@@ -592,14 +517,11 @@ void __zram_reset_device(struct zram *zram)
 
        /* Free all pages that are still in this zram device */
        for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) {
-               void *handle = zram->table[index].handle;
+               unsigned long handle = zram->table[index].handle;
                if (!handle)
                        continue;
 
-               if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)))
-                       __free_page(handle);
-               else
-                       zs_free(zram->mem_pool, handle);
+               zs_free(zram->mem_pool, handle);
        }
 
        vfree(zram->table);
@@ -724,7 +646,7 @@ static int create_device(struct zram *zram, int device_id)
        zram->disk = alloc_disk(1);
        if (!zram->disk) {
                blk_cleanup_queue(zram->queue);
-               pr_warning("Error allocating disk structure for device %d\n",
+               pr_warn("Error allocating disk structure for device %d\n",
                        device_id);
                ret = -ENOMEM;
                goto out;
@@ -755,7 +677,7 @@ static int create_device(struct zram *zram, int device_id)
        ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj,
                                &zram_disk_attr_group);
        if (ret < 0) {
-               pr_warning("Error creating sysfs group");
+               pr_warn("Error creating sysfs group");
                goto out;
        }
 
@@ -789,7 +711,7 @@ static int __init zram_init(void)
        int ret, dev_id;
 
        if (num_devices > max_num_devices) {
-               pr_warning("Invalid value for num_devices: %u\n",
+               pr_warn("Invalid value for num_devices: %u\n",
                                num_devices);
                ret = -EINVAL;
                goto out;
@@ -797,7 +719,7 @@ static int __init zram_init(void)
 
        zram_major = register_blkdev(0, "zram");
        if (zram_major <= 0) {
-               pr_warning("Unable to get major number\n");
+               pr_warn("Unable to get major number\n");
                ret = -EBUSY;
                goto out;
        }