]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - fs/f2fs/crypto.c
f2fs: relocate the tracepoint for background_gc
[karo-tx-linux.git] / fs / f2fs / crypto.c
index c3f02b61aa5afb6d46103fed5cbf7133316ca39c..4a62ef14e93275a881f967ceabd66c6bd306345c 100644 (file)
@@ -63,9 +63,12 @@ static mempool_t *f2fs_bounce_page_pool;
 static LIST_HEAD(f2fs_free_crypto_ctxs);
 static DEFINE_SPINLOCK(f2fs_crypto_ctx_lock);
 
-struct workqueue_struct *f2fs_read_workqueue;
+static struct workqueue_struct *f2fs_read_workqueue;
 static DEFINE_MUTEX(crypto_init);
 
+static struct kmem_cache *f2fs_crypto_ctx_cachep;
+struct kmem_cache *f2fs_crypt_info_cachep;
+
 /**
  * f2fs_release_crypto_ctx() - Releases an encryption context
  * @ctx: The encryption context to release.
@@ -79,18 +82,13 @@ void f2fs_release_crypto_ctx(struct f2fs_crypto_ctx *ctx)
 {
        unsigned long flags;
 
-       if (ctx->bounce_page) {
-               if (ctx->flags & F2FS_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL)
-                       __free_page(ctx->bounce_page);
-               else
-                       mempool_free(ctx->bounce_page, f2fs_bounce_page_pool);
-               ctx->bounce_page = NULL;
+       if (ctx->flags & F2FS_WRITE_PATH_FL && ctx->w.bounce_page) {
+               mempool_free(ctx->w.bounce_page, f2fs_bounce_page_pool);
+               ctx->w.bounce_page = NULL;
        }
-       ctx->control_page = NULL;
+       ctx->w.control_page = NULL;
        if (ctx->flags & F2FS_CTX_REQUIRES_FREE_ENCRYPT_FL) {
-               if (ctx->tfm)
-                       crypto_free_tfm(ctx->tfm);
-               kfree(ctx);
+               kmem_cache_free(f2fs_crypto_ctx_cachep, ctx);
        } else {
                spin_lock_irqsave(&f2fs_crypto_ctx_lock, flags);
                list_add(&ctx->free_list, &f2fs_free_crypto_ctxs);
@@ -98,23 +96,6 @@ void f2fs_release_crypto_ctx(struct f2fs_crypto_ctx *ctx)
        }
 }
 
-/**
- * f2fs_alloc_and_init_crypto_ctx() - Allocates and inits an encryption context
- * @mask: The allocation mask.
- *
- * Return: An allocated and initialized encryption context on success. An error
- * value or NULL otherwise.
- */
-static struct f2fs_crypto_ctx *f2fs_alloc_and_init_crypto_ctx(gfp_t mask)
-{
-       struct f2fs_crypto_ctx *ctx = kzalloc(sizeof(struct f2fs_crypto_ctx),
-                                               mask);
-
-       if (!ctx)
-               return ERR_PTR(-ENOMEM);
-       return ctx;
-}
-
 /**
  * f2fs_get_crypto_ctx() - Gets an encryption context
  * @inode:       The inode for which we are doing the crypto
@@ -127,12 +108,11 @@ static struct f2fs_crypto_ctx *f2fs_alloc_and_init_crypto_ctx(gfp_t mask)
 struct f2fs_crypto_ctx *f2fs_get_crypto_ctx(struct inode *inode)
 {
        struct f2fs_crypto_ctx *ctx = NULL;
-       int res = 0;
        unsigned long flags;
        struct f2fs_crypt_info *ci = F2FS_I(inode)->i_crypt_info;
 
        if (ci == NULL)
-               return ERR_PTR(-EACCES);
+               return ERR_PTR(-ENOKEY);
 
        /*
         * We first try getting the ctx from a free list because in
@@ -151,63 +131,14 @@ struct f2fs_crypto_ctx *f2fs_get_crypto_ctx(struct inode *inode)
                list_del(&ctx->free_list);
        spin_unlock_irqrestore(&f2fs_crypto_ctx_lock, flags);
        if (!ctx) {
-               ctx = f2fs_alloc_and_init_crypto_ctx(GFP_NOFS);
-               if (IS_ERR(ctx)) {
-                       res = PTR_ERR(ctx);
-                       goto out;
-               }
+               ctx = kmem_cache_zalloc(f2fs_crypto_ctx_cachep, GFP_NOFS);
+               if (!ctx)
+                       return ERR_PTR(-ENOMEM);
                ctx->flags |= F2FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
        } else {
                ctx->flags &= ~F2FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
        }
-
-       /*
-        * Allocate a new Crypto API context if we don't already have
-        * one or if it isn't the right mode.
-        */
-       BUG_ON(ci->ci_mode == F2FS_ENCRYPTION_MODE_INVALID);
-       if (ctx->tfm && (ctx->mode != ci->ci_mode)) {
-               crypto_free_tfm(ctx->tfm);
-               ctx->tfm = NULL;
-               ctx->mode = F2FS_ENCRYPTION_MODE_INVALID;
-       }
-       if (!ctx->tfm) {
-               switch (ci->ci_mode) {
-               case F2FS_ENCRYPTION_MODE_AES_256_XTS:
-                       ctx->tfm = crypto_ablkcipher_tfm(
-                               crypto_alloc_ablkcipher("xts(aes)", 0, 0));
-                       break;
-               case F2FS_ENCRYPTION_MODE_AES_256_GCM:
-                       /*
-                        * TODO(mhalcrow): AEAD w/ gcm(aes);
-                        * crypto_aead_setauthsize()
-                        */
-                       ctx->tfm = ERR_PTR(-ENOTSUPP);
-                       break;
-               default:
-                       BUG();
-               }
-               if (IS_ERR_OR_NULL(ctx->tfm)) {
-                       res = PTR_ERR(ctx->tfm);
-                       ctx->tfm = NULL;
-                       goto out;
-               }
-               ctx->mode = ci->ci_mode;
-       }
-       BUG_ON(ci->ci_size != f2fs_encryption_key_size(ci->ci_mode));
-
-       /*
-        * There shouldn't be a bounce page attached to the crypto
-        * context at this point.
-        */
-       BUG_ON(ctx->bounce_page);
-
-out:
-       if (res) {
-               if (!IS_ERR_OR_NULL(ctx))
-                       f2fs_release_crypto_ctx(ctx);
-               ctx = ERR_PTR(res);
-       }
+       ctx->flags &= ~F2FS_WRITE_PATH_FL;
        return ctx;
 }
 
@@ -218,8 +149,8 @@ out:
 static void completion_pages(struct work_struct *work)
 {
        struct f2fs_crypto_ctx *ctx =
-               container_of(work, struct f2fs_crypto_ctx, work);
-       struct bio *bio = ctx->bio;
+               container_of(work, struct f2fs_crypto_ctx, r.work);
+       struct bio *bio = ctx->r.bio;
        struct bio_vec *bv;
        int i;
 
@@ -240,88 +171,105 @@ static void completion_pages(struct work_struct *work)
 
 void f2fs_end_io_crypto_work(struct f2fs_crypto_ctx *ctx, struct bio *bio)
 {
-       INIT_WORK(&ctx->work, completion_pages);
-       ctx->bio = bio;
-       queue_work(f2fs_read_workqueue, &ctx->work);
+       INIT_WORK(&ctx->r.work, completion_pages);
+       ctx->r.bio = bio;
+       queue_work(f2fs_read_workqueue, &ctx->r.work);
 }
 
-/**
- * f2fs_exit_crypto() - Shutdown the f2fs encryption system
- */
-void f2fs_exit_crypto(void)
+static void f2fs_crypto_destroy(void)
 {
        struct f2fs_crypto_ctx *pos, *n;
 
-       list_for_each_entry_safe(pos, n, &f2fs_free_crypto_ctxs, free_list) {
-               if (pos->bounce_page) {
-                       if (pos->flags &
-                               F2FS_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL)
-                               __free_page(pos->bounce_page);
-                       else
-                               mempool_free(pos->bounce_page,
-                                               f2fs_bounce_page_pool);
-               }
-               if (pos->tfm)
-                       crypto_free_tfm(pos->tfm);
-               kfree(pos);
-       }
+       list_for_each_entry_safe(pos, n, &f2fs_free_crypto_ctxs, free_list)
+               kmem_cache_free(f2fs_crypto_ctx_cachep, pos);
        INIT_LIST_HEAD(&f2fs_free_crypto_ctxs);
        if (f2fs_bounce_page_pool)
                mempool_destroy(f2fs_bounce_page_pool);
        f2fs_bounce_page_pool = NULL;
-       if (f2fs_read_workqueue)
-               destroy_workqueue(f2fs_read_workqueue);
-       f2fs_read_workqueue = NULL;
 }
 
 /**
- * f2fs_init_crypto() - Set up for f2fs encryption.
+ * f2fs_crypto_initialize() - Set up for f2fs encryption.
  *
  * We only call this when we start accessing encrypted files, since it
  * results in memory getting allocated that wouldn't otherwise be used.
  *
  * Return: Zero on success, non-zero otherwise.
  */
-int f2fs_init_crypto(void)
+int f2fs_crypto_initialize(void)
 {
-       int i, res;
+       int i, res = -ENOMEM;
+
+       if (f2fs_bounce_page_pool)
+               return 0;
 
        mutex_lock(&crypto_init);
-       if (f2fs_read_workqueue)
+       if (f2fs_bounce_page_pool)
                goto already_initialized;
 
-       f2fs_read_workqueue = alloc_workqueue("f2fs_crypto", WQ_HIGHPRI, 0);
-       if (!f2fs_read_workqueue) {
-               res = -ENOMEM;
-               goto fail;
-       }
-
        for (i = 0; i < num_prealloc_crypto_ctxs; i++) {
                struct f2fs_crypto_ctx *ctx;
 
-               ctx = f2fs_alloc_and_init_crypto_ctx(GFP_KERNEL);
-               if (IS_ERR(ctx)) {
-                       res = PTR_ERR(ctx);
+               ctx = kmem_cache_zalloc(f2fs_crypto_ctx_cachep, GFP_KERNEL);
+               if (!ctx)
                        goto fail;
-               }
                list_add(&ctx->free_list, &f2fs_free_crypto_ctxs);
        }
 
+       /* must be allocated at the last step to avoid race condition above */
        f2fs_bounce_page_pool =
                mempool_create_page_pool(num_prealloc_crypto_pages, 0);
-       if (!f2fs_bounce_page_pool) {
-               res = -ENOMEM;
+       if (!f2fs_bounce_page_pool)
                goto fail;
-       }
+
 already_initialized:
        mutex_unlock(&crypto_init);
        return 0;
 fail:
-       f2fs_exit_crypto();
+       f2fs_crypto_destroy();
        mutex_unlock(&crypto_init);
        return res;
 }
 
+/**
+ * f2fs_exit_crypto() - Shutdown the f2fs encryption system
+ */
+void f2fs_exit_crypto(void)
+{
+       f2fs_crypto_destroy();
+
+       if (f2fs_read_workqueue)
+               destroy_workqueue(f2fs_read_workqueue);
+       if (f2fs_crypto_ctx_cachep)
+               kmem_cache_destroy(f2fs_crypto_ctx_cachep);
+       if (f2fs_crypt_info_cachep)
+               kmem_cache_destroy(f2fs_crypt_info_cachep);
+}
+
+int __init f2fs_init_crypto(void)
+{
+       int res = -ENOMEM;
+
+       f2fs_read_workqueue = alloc_workqueue("f2fs_crypto", WQ_HIGHPRI, 0);
+       if (!f2fs_read_workqueue)
+               goto fail;
+
+       f2fs_crypto_ctx_cachep = KMEM_CACHE(f2fs_crypto_ctx,
+                                               SLAB_RECLAIM_ACCOUNT);
+       if (!f2fs_crypto_ctx_cachep)
+               goto fail;
+
+       f2fs_crypt_info_cachep = KMEM_CACHE(f2fs_crypt_info,
+                                               SLAB_RECLAIM_ACCOUNT);
+       if (!f2fs_crypt_info_cachep)
+               goto fail;
+
+       return 0;
+fail:
+       f2fs_exit_crypto();
+       return res;
+}
+
 void f2fs_restore_and_release_control_page(struct page **page)
 {
        struct f2fs_crypto_ctx *ctx;
@@ -336,7 +284,7 @@ void f2fs_restore_and_release_control_page(struct page **page)
        ctx = (struct f2fs_crypto_ctx *)page_private(bounce_page);
 
        /* restore control page */
-       *page = ctx->control_page;
+       *page = ctx->w.control_page;
 
        f2fs_restore_control_page(bounce_page);
 }
@@ -383,32 +331,11 @@ static int f2fs_page_crypto(struct f2fs_crypto_ctx *ctx,
        struct ablkcipher_request *req = NULL;
        DECLARE_F2FS_COMPLETION_RESULT(ecr);
        struct scatterlist dst, src;
-       struct f2fs_inode_info *fi = F2FS_I(inode);
-       struct crypto_ablkcipher *atfm = __crypto_ablkcipher_cast(ctx->tfm);
+       struct f2fs_crypt_info *ci = F2FS_I(inode)->i_crypt_info;
+       struct crypto_ablkcipher *tfm = ci->ci_ctfm;
        int res = 0;
 
-       BUG_ON(!ctx->tfm);
-       BUG_ON(ctx->mode != fi->i_crypt_info->ci_mode);
-
-       if (ctx->mode != F2FS_ENCRYPTION_MODE_AES_256_XTS) {
-               printk_ratelimited(KERN_ERR
-                               "%s: unsupported crypto algorithm: %d\n",
-                               __func__, ctx->mode);
-               return -ENOTSUPP;
-       }
-
-       crypto_ablkcipher_clear_flags(atfm, ~0);
-       crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_REQ_WEAK_KEY);
-
-       res = crypto_ablkcipher_setkey(atfm, fi->i_crypt_info->ci_raw,
-                                       fi->i_crypt_info->ci_size);
-       if (res) {
-               printk_ratelimited(KERN_ERR
-                               "%s: crypto_ablkcipher_setkey() failed\n",
-                               __func__);
-               return res;
-       }
-       req = ablkcipher_request_alloc(atfm, GFP_NOFS);
+       req = ablkcipher_request_alloc(tfm, GFP_NOFS);
        if (!req) {
                printk_ratelimited(KERN_ERR
                                "%s: crypto_request_alloc() failed\n",
@@ -449,6 +376,15 @@ static int f2fs_page_crypto(struct f2fs_crypto_ctx *ctx,
        return 0;
 }
 
+static struct page *alloc_bounce_page(struct f2fs_crypto_ctx *ctx)
+{
+       ctx->w.bounce_page = mempool_alloc(f2fs_bounce_page_pool, GFP_NOWAIT);
+       if (ctx->w.bounce_page == NULL)
+               return ERR_PTR(-ENOMEM);
+       ctx->flags |= F2FS_WRITE_PATH_FL;
+       return ctx->w.bounce_page;
+}
+
 /**
  * f2fs_encrypt() - Encrypts a page
  * @inode:          The inode for which the encryption should take place
@@ -478,33 +414,26 @@ struct page *f2fs_encrypt(struct inode *inode,
                return (struct page *)ctx;
 
        /* The encryption operation will require a bounce page. */
-       ciphertext_page = alloc_page(GFP_NOFS);
-       if (!ciphertext_page) {
-               /*
-                * This is a potential bottleneck, but at least we'll have
-                * forward progress.
-                */
-               ciphertext_page = mempool_alloc(f2fs_bounce_page_pool,
-                                                       GFP_NOFS);
-               if (WARN_ON_ONCE(!ciphertext_page))
-                       ciphertext_page = mempool_alloc(f2fs_bounce_page_pool,
-                                               GFP_NOFS | __GFP_WAIT);
-               ctx->flags &= ~F2FS_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL;
-       } else {
-               ctx->flags |= F2FS_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL;
-       }
-       ctx->bounce_page = ciphertext_page;
-       ctx->control_page = plaintext_page;
+       ciphertext_page = alloc_bounce_page(ctx);
+       if (IS_ERR(ciphertext_page))
+               goto err_out;
+
+       ctx->w.control_page = plaintext_page;
        err = f2fs_page_crypto(ctx, inode, F2FS_ENCRYPT, plaintext_page->index,
                                        plaintext_page, ciphertext_page);
        if (err) {
-               f2fs_release_crypto_ctx(ctx);
-               return ERR_PTR(err);
+               ciphertext_page = ERR_PTR(err);
+               goto err_out;
        }
+
        SetPagePrivate(ciphertext_page);
        set_page_private(ciphertext_page, (unsigned long)ctx);
        lock_page(ciphertext_page);
        return ciphertext_page;
+
+err_out:
+       f2fs_release_crypto_ctx(ctx);
+       return ciphertext_page;
 }
 
 /**
@@ -535,8 +464,8 @@ int f2fs_decrypt_one(struct inode *inode, struct page *page)
        struct f2fs_crypto_ctx *ctx = f2fs_get_crypto_ctx(inode);
        int ret;
 
-       if (!ctx)
-               return -ENOMEM;
+       if (IS_ERR(ctx))
+               return PTR_ERR(ctx);
        ret = f2fs_decrypt(ctx, page);
        f2fs_release_crypto_ctx(ctx);
        return ret;