]> git.karo-electronics.de Git - mv-sheeva.git/blobdiff - drivers/md/dm-crypt.c
dm crypt: fix async inc_pending
[mv-sheeva.git] / drivers / md / dm-crypt.c
index 13956437bc81888d38e89ba2abbaa8d62772fc52..262ed181669505678f7fd708eabebabedd4ee40f 100644 (file)
@@ -517,6 +517,27 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
        }
 }
 
+static struct dm_crypt_io *crypt_io_alloc(struct dm_target *ti,
+                                         struct bio *bio, sector_t sector)
+{
+       struct crypt_config *cc = ti->private;
+       struct dm_crypt_io *io;
+
+       io = mempool_alloc(cc->io_pool, GFP_NOIO);
+       io->target = ti;
+       io->base_bio = bio;
+       io->sector = sector;
+       io->error = 0;
+       atomic_set(&io->pending, 0);
+
+       return io;
+}
+
+static void crypt_inc_pending(struct dm_crypt_io *io)
+{
+       atomic_inc(&io->pending);
+}
+
 /*
  * One of the bios was finished. Check for completion of
  * the whole request and correctly clean up the buffer.
@@ -591,7 +612,7 @@ static void kcryptd_io_read(struct dm_crypt_io *io)
        struct bio *base_bio = io->base_bio;
        struct bio *clone;
 
-       atomic_inc(&io->pending);
+       crypt_inc_pending(io);
 
        /*
         * The block layer might modify the bvec array, so always
@@ -653,6 +674,7 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io,
                crypt_free_buffer_pages(cc, clone);
                bio_put(clone);
                io->error = -EIO;
+               crypt_dec_pending(io);
                return;
        }
 
@@ -664,19 +686,23 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io,
 
        if (async)
                kcryptd_queue_io(io);
-       else {
-               atomic_inc(&io->pending);
+       else
                generic_make_request(clone);
-       }
 }
 
-static void kcryptd_crypt_write_convert_loop(struct dm_crypt_io *io)
+static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
 {
        struct crypt_config *cc = io->target->private;
        struct bio *clone;
        unsigned remaining = io->base_bio->bi_size;
        int r;
 
+       /*
+        * Prevent io from disappearing until this function completes.
+        */
+       crypt_inc_pending(io);
+       crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, io->sector);
+
        /*
         * The allocated buffers can be smaller than the whole bio,
         * so repeat the whole process until all the data can be handled.
@@ -685,7 +711,7 @@ static void kcryptd_crypt_write_convert_loop(struct dm_crypt_io *io)
                clone = crypt_alloc_buffer(io, remaining);
                if (unlikely(!clone)) {
                        io->error = -ENOMEM;
-                       return;
+                       break;
                }
 
                io->ctx.bio_out = clone;
@@ -693,15 +719,15 @@ static void kcryptd_crypt_write_convert_loop(struct dm_crypt_io *io)
 
                remaining -= clone->bi_size;
 
+               crypt_inc_pending(io);
                r = crypt_convert(cc, &io->ctx);
 
                if (atomic_dec_and_test(&io->ctx.pending)) {
                        /* processed, no running async crypto  */
                        kcryptd_crypt_write_io_submit(io, r, 0);
                        if (unlikely(r < 0))
-                               return;
-               } else
-                       atomic_inc(&io->pending);
+                               break;
+               }
 
                /* out of memory -> run queues */
                if (unlikely(remaining)) {
@@ -711,19 +737,6 @@ static void kcryptd_crypt_write_convert_loop(struct dm_crypt_io *io)
                        congestion_wait(WRITE, HZ/100);
                }
        }
-}
-
-static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
-{
-       struct crypt_config *cc = io->target->private;
-
-       /*
-        * Prevent io from disappearing until this function completes.
-        */
-       atomic_inc(&io->pending);
-
-       crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, io->sector);
-       kcryptd_crypt_write_convert_loop(io);
 
        crypt_dec_pending(io);
 }
@@ -741,7 +754,7 @@ static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
        struct crypt_config *cc = io->target->private;
        int r = 0;
 
-       atomic_inc(&io->pending);
+       crypt_inc_pending(io);
 
        crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio,
                           io->sector);
@@ -1108,15 +1121,9 @@ static void crypt_dtr(struct dm_target *ti)
 static int crypt_map(struct dm_target *ti, struct bio *bio,
                     union map_info *map_context)
 {
-       struct crypt_config *cc = ti->private;
        struct dm_crypt_io *io;
 
-       io = mempool_alloc(cc->io_pool, GFP_NOIO);
-       io->target = ti;
-       io->base_bio = bio;
-       io->sector = bio->bi_sector - ti->begin;
-       io->error = 0;
-       atomic_set(&io->pending, 0);
+       io = crypt_io_alloc(ti, bio, bio->bi_sector - ti->begin);
 
        if (bio_data_dir(io->base_bio) == READ)
                kcryptd_queue_io(io);