]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
dm crypt: fix large block integrity support
authorMikulas Patocka <mpatocka@redhat.com>
Tue, 18 Apr 2017 20:51:54 +0000 (16:51 -0400)
committerMike Snitzer <snitzer@redhat.com>
Mon, 24 Apr 2017 16:04:34 +0000 (12:04 -0400)
Previously, dm-crypt could use blocks composed of multiple 512b sectors
but it created integrity profile for each 512b sector (it padded it with
zeroes).  Fix dm-crypt so that the integrity profile is sent for each
block not each sector.

The user must use the same block size in the DM crypt and integrity
targets.

Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
drivers/md/dm-crypt.c

index ccbc7f36bb2ee24f5c708d4b4009c1447ecb9d80..8bff6f7a4c6cb32291d0e541c6a639239f12c837 100644 (file)
@@ -938,10 +938,15 @@ static int crypt_integrity_ctr(struct crypt_config *cc, struct dm_target *ti)
                return -EINVAL;
        }
 
-       if (bi->tag_size != cc->on_disk_tag_size) {
+       if (bi->tag_size != cc->on_disk_tag_size ||
+           bi->tuple_size != cc->on_disk_tag_size) {
                ti->error = "Integrity profile tag size mismatch.";
                return -EINVAL;
        }
+       if (1 << bi->interval_exp != cc->sector_size) {
+               ti->error = "Integrity profile sector size mismatch.";
+               return -EINVAL;
+       }
 
        if (crypt_integrity_aead(cc)) {
                cc->integrity_tag_size = cc->on_disk_tag_size - cc->integrity_iv_size;
@@ -1322,7 +1327,7 @@ static int crypt_convert(struct crypt_config *cc,
                case -EINPROGRESS:
                        ctx->r.req = NULL;
                        ctx->cc_sector += sector_step;
-                       tag_offset += sector_step;
+                       tag_offset++;
                        continue;
                /*
                 * The request was already processed (synchronously).
@@ -1330,7 +1335,7 @@ static int crypt_convert(struct crypt_config *cc,
                case 0:
                        atomic_dec(&ctx->cc_pending);
                        ctx->cc_sector += sector_step;
-                       tag_offset += sector_step;
+                       tag_offset++;
                        cond_resched();
                        continue;
                /*
@@ -2735,6 +2740,8 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
                        ti->error = "Cannot allocate integrity tags mempool";
                        goto bad;
                }
+
+               cc->tag_pool_max_sectors <<= cc->sector_shift;
        }
 
        ret = -ENOMEM;
@@ -2816,16 +2823,15 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
        crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
 
        if (cc->on_disk_tag_size) {
-               unsigned tag_len = cc->on_disk_tag_size * bio_sectors(bio);
+               unsigned tag_len = cc->on_disk_tag_size * (bio_sectors(bio) >> cc->sector_shift);
 
                if (unlikely(tag_len > KMALLOC_MAX_SIZE) ||
-                   unlikely(!(io->integrity_metadata = kzalloc(tag_len,
+                   unlikely(!(io->integrity_metadata = kmalloc(tag_len,
                                GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN)))) {
                        if (bio_sectors(bio) > cc->tag_pool_max_sectors)
                                dm_accept_partial_bio(bio, cc->tag_pool_max_sectors);
                        io->integrity_metadata = mempool_alloc(cc->tag_pool, GFP_NOIO);
                        io->integrity_metadata_from_pool = true;
-                       memset(io->integrity_metadata, 0, cc->tag_pool_max_sectors * (1 << SECTOR_SHIFT));
                }
        }