{
unsigned long flags;
- if (ctx->bounce_page) {
+ if (ctx->flags & EXT4_WRITE_PATH_FL && ctx->w.bounce_page) {
if (ctx->flags & EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL)
- __free_page(ctx->bounce_page);
+ __free_page(ctx->w.bounce_page);
else
- mempool_free(ctx->bounce_page, ext4_bounce_page_pool);
- ctx->bounce_page = NULL;
+ mempool_free(ctx->w.bounce_page, ext4_bounce_page_pool);
}
- ctx->control_page = NULL;
+ ctx->w.bounce_page = NULL;
+ ctx->w.control_page = NULL;
if (ctx->flags & EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL) {
if (ctx->tfm)
crypto_free_tfm(ctx->tfm);
} else {
ctx->flags &= ~EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL;
}
+ ctx->flags &= ~EXT4_WRITE_PATH_FL;
/* Allocate a new Crypto API context if we don't already have
* one or if it isn't the right mode. */
}
BUG_ON(ci->ci_size != ext4_encryption_key_size(ci->ci_data_mode));
- /* There shouldn't be a bounce page attached to the crypto
- * context at this point. */
- BUG_ON(ctx->bounce_page);
-
out:
if (res) {
if (!IS_ERR_OR_NULL(ctx))
struct ext4_crypto_ctx *pos, *n;
list_for_each_entry_safe(pos, n, &ext4_free_crypto_ctxs, free_list) {
- if (pos->bounce_page) {
- if (pos->flags &
- EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL) {
- __free_page(pos->bounce_page);
- } else {
- mempool_free(pos->bounce_page,
- ext4_bounce_page_pool);
- }
- }
if (pos->tfm)
crypto_free_tfm(pos->tfm);
kmem_cache_free(ext4_crypto_ctx_cachep, pos);
} else {
ctx->flags |= EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL;
}
- ctx->bounce_page = ciphertext_page;
- ctx->control_page = plaintext_page;
+ ctx->flags |= EXT4_WRITE_PATH_FL;
+ ctx->w.bounce_page = ciphertext_page;
+ ctx->w.control_page = plaintext_page;
err = ext4_page_crypto(ctx, inode, EXT4_ENCRYPT, plaintext_page->index,
plaintext_page, ciphertext_page);
if (err) {
} else {
ctx->flags |= EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL;
}
- ctx->bounce_page = ciphertext_page;
+ ctx->w.bounce_page = ciphertext_page;
while (len--) {
err = ext4_page_crypto(ctx, inode, EXT4_ENCRYPT, lblk,
#define EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL 0x00000001
#define EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL 0x00000002
+#define EXT4_WRITE_PATH_FL 0x00000004
struct ext4_crypto_ctx {
struct crypto_tfm *tfm; /* Crypto API context */
- struct page *bounce_page; /* Ciphertext page on write path */
- struct page *control_page; /* Original page on write path */
- struct bio *bio; /* The bio for this context */
- struct work_struct work; /* Work queue for read complete path */
- struct list_head free_list; /* Free list */
- int flags; /* Flags */
- int mode; /* Encryption mode for tfm */
+ union {
+ struct {
+ struct page *bounce_page; /* Ciphertext page */
+ struct page *control_page; /* Original page */
+ } w;
+ struct {
+ struct bio *bio;
+ struct work_struct work;
+ } r;
+ struct list_head free_list; /* Free list */
+ };
+ char flags; /* Flags */
+ char mode; /* Encryption mode for tfm */
};
struct ext4_completion_result {
{
#ifdef CONFIG_EXT4_FS_ENCRYPTION
struct ext4_crypto_ctx *ctx =
- container_of(work, struct ext4_crypto_ctx, work);
- struct bio *bio = ctx->bio;
+ container_of(work, struct ext4_crypto_ctx, r.work);
+ struct bio *bio = ctx->r.bio;
struct bio_vec *bv;
int i;
if (err) {
ext4_release_crypto_ctx(ctx);
} else {
- INIT_WORK(&ctx->work, completion_pages);
- ctx->bio = bio;
- queue_work(ext4_read_workqueue, &ctx->work);
+ INIT_WORK(&ctx->r.work, completion_pages);
+ ctx->r.bio = bio;
+ queue_work(ext4_read_workqueue, &ctx->r.work);
return;
}
}