goto out;
}
+ /*
+ * zram_slot_free_notify could miss free so that let's
+ * double check.
+ */
+ if (unlikely(meta->table[index].handle ||
+ zram_test_flag(meta, index, ZRAM_ZERO)))
+ zram_free_page(zram, index);
+
ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
meta->compress_workmem);
return ret;
}
+static void handle_pending_slot_free(struct zram *zram)
+{
+ struct zram_slot_free *free_rq;
+
+ spin_lock(&zram->slot_free_lock);
+ while (zram->slot_free_rq) {
+ free_rq = zram->slot_free_rq;
+ zram->slot_free_rq = free_rq->next;
+ zram_free_page(zram, free_rq->index);
+ kfree(free_rq);
+ }
+ spin_unlock(&zram->slot_free_lock);
+}
+
static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
int offset, struct bio *bio, int rw)
{
if (rw == READ) {
down_read(&zram->lock);
+ handle_pending_slot_free(zram);
ret = zram_bvec_read(zram, bvec, index, offset, bio);
up_read(&zram->lock);
} else {
down_write(&zram->lock);
+ handle_pending_slot_free(zram);
ret = zram_bvec_write(zram, bvec, index, offset);
up_write(&zram->lock);
}
size_t index;
struct zram_meta *meta;
+ flush_work(&zram->free_work);
+
down_write(&zram->init_lock);
if (!zram->init_done) {
up_write(&zram->init_lock);
bio_io_error(bio);
}
+static void zram_slot_free(struct work_struct *work)
+{
+ struct zram *zram;
+
+ zram = container_of(work, struct zram, free_work);
+ down_write(&zram->lock);
+ handle_pending_slot_free(zram);
+ up_write(&zram->lock);
+}
+
+static void add_slot_free(struct zram *zram, struct zram_slot_free *free_rq)
+{
+ spin_lock(&zram->slot_free_lock);
+ free_rq->next = zram->slot_free_rq;
+ zram->slot_free_rq = free_rq;
+ spin_unlock(&zram->slot_free_lock);
+}
+
static void zram_slot_free_notify(struct block_device *bdev,
unsigned long index)
{
struct zram *zram;
+ struct zram_slot_free *free_rq;
zram = bdev->bd_disk->private_data;
- down_write(&zram->lock);
- zram_free_page(zram, index);
- up_write(&zram->lock);
atomic64_inc(&zram->stats.notify_free);
+
+ free_rq = kmalloc(sizeof(struct zram_slot_free), GFP_ATOMIC);
+ if (!free_rq)
+ return;
+
+ free_rq->index = index;
+ add_slot_free(zram, free_rq);
+ schedule_work(&zram->free_work);
}
static const struct block_device_operations zram_devops = {
init_rwsem(&zram->lock);
init_rwsem(&zram->init_lock);
+ INIT_WORK(&zram->free_work, zram_slot_free);
+ spin_lock_init(&zram->slot_free_lock);
+ zram->slot_free_rq = NULL;
+
zram->queue = blk_alloc_queue(GFP_KERNEL);
if (!zram->queue) {
pr_err("Error allocating disk queue for device %d\n",
struct zs_pool *mem_pool;
};
+struct zram_slot_free {
+ unsigned long index;
+ struct zram_slot_free *next;
+};
+
struct zram {
struct zram_meta *meta;
struct rw_semaphore lock; /* protect compression buffers, table,
* 32bit stat counters against concurrent
* notifications, reads and writes */
+
+ struct work_struct free_work; /* handle pending free request */
+ struct zram_slot_free *slot_free_rq; /* list head of free request */
+
struct request_queue *queue;
struct gendisk *disk;
int init_done;
* we can store in a disk.
*/
u64 disksize; /* bytes */
+ spinlock_t slot_free_lock;
struct zram_stats stats;
};