2 * linux/fs/f2fs/crypto.c
4 * Copied from linux/fs/ext4/crypto.c
6 * Copyright (C) 2015, Google, Inc.
7 * Copyright (C) 2015, Motorola Mobility
9 * This contains encryption functions for f2fs
11 * Written by Michael Halcrow, 2014.
13 * Filename encryption additions
14 * Uday Savagaonkar, 2014
15 * Encryption policy handling additions
16 * Ildar Muslukhov, 2014
17 * Remove ext4_encrypted_zeroout(),
18 * add f2fs_restore_and_release_control_page()
21 * This has not yet undergone a rigorous security audit.
23 * The usage of AES-XTS should conform to recommendations in NIST
24 * Special Publication 800-38E and IEEE P1619/D16.
26 #include <crypto/hash.h>
27 #include <crypto/sha.h>
28 #include <keys/user-type.h>
29 #include <keys/encrypted-type.h>
30 #include <linux/crypto.h>
31 #include <linux/ecryptfs.h>
32 #include <linux/gfp.h>
33 #include <linux/kernel.h>
34 #include <linux/key.h>
35 #include <linux/list.h>
36 #include <linux/mempool.h>
37 #include <linux/module.h>
38 #include <linux/mutex.h>
39 #include <linux/random.h>
40 #include <linux/scatterlist.h>
41 #include <linux/spinlock_types.h>
42 #include <linux/f2fs_fs.h>
43 #include <linux/ratelimit.h>
44 #include <linux/bio.h>
49 /* Encryption added and removed here! (L: */
51 static unsigned int num_prealloc_crypto_pages = 32;
52 static unsigned int num_prealloc_crypto_ctxs = 128;
54 module_param(num_prealloc_crypto_pages, uint, 0444);
55 MODULE_PARM_DESC(num_prealloc_crypto_pages,
56 "Number of crypto pages to preallocate");
57 module_param(num_prealloc_crypto_ctxs, uint, 0444);
58 MODULE_PARM_DESC(num_prealloc_crypto_ctxs,
59 "Number of crypto contexts to preallocate");
61 static mempool_t *f2fs_bounce_page_pool;
63 static LIST_HEAD(f2fs_free_crypto_ctxs);
64 static DEFINE_SPINLOCK(f2fs_crypto_ctx_lock);
66 struct workqueue_struct *f2fs_read_workqueue;
67 static DEFINE_MUTEX(crypto_init);
69 static struct kmem_cache *f2fs_crypto_ctx_cachep;
70 struct kmem_cache *f2fs_crypt_info_cachep;
73 * f2fs_release_crypto_ctx() - Releases an encryption context
74 * @ctx: The encryption context to release.
76 * If the encryption context was allocated from the pre-allocated pool, returns
77 * it to that pool. Else, frees it.
79 * If there's a bounce page in the context, this frees that.
81 void f2fs_release_crypto_ctx(struct f2fs_crypto_ctx *ctx)
85 if (ctx->bounce_page) {
86 if (ctx->flags & F2FS_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL)
87 __free_page(ctx->bounce_page);
89 mempool_free(ctx->bounce_page, f2fs_bounce_page_pool);
90 ctx->bounce_page = NULL;
92 ctx->control_page = NULL;
93 if (ctx->flags & F2FS_CTX_REQUIRES_FREE_ENCRYPT_FL) {
95 crypto_free_tfm(ctx->tfm);
96 kmem_cache_free(f2fs_crypto_ctx_cachep, ctx);
98 spin_lock_irqsave(&f2fs_crypto_ctx_lock, flags);
99 list_add(&ctx->free_list, &f2fs_free_crypto_ctxs);
100 spin_unlock_irqrestore(&f2fs_crypto_ctx_lock, flags);
105 * f2fs_get_crypto_ctx() - Gets an encryption context
106 * @inode: The inode for which we are doing the crypto
108 * Allocates and initializes an encryption context.
110 * Return: An allocated and initialized encryption context on success; error
111 * value or NULL otherwise.
113 struct f2fs_crypto_ctx *f2fs_get_crypto_ctx(struct inode *inode)
115 struct f2fs_crypto_ctx *ctx = NULL;
118 struct f2fs_crypt_info *ci = F2FS_I(inode)->i_crypt_info;
121 return ERR_PTR(-EACCES);
124 * We first try getting the ctx from a free list because in
125 * the common case the ctx will have an allocated and
126 * initialized crypto tfm, so it's probably a worthwhile
127 * optimization. For the bounce page, we first try getting it
128 * from the kernel allocator because that's just about as fast
129 * as getting it from a list and because a cache of free pages
130 * should generally be a "last resort" option for a filesystem
131 * to be able to do its job.
133 spin_lock_irqsave(&f2fs_crypto_ctx_lock, flags);
134 ctx = list_first_entry_or_null(&f2fs_free_crypto_ctxs,
135 struct f2fs_crypto_ctx, free_list);
137 list_del(&ctx->free_list);
138 spin_unlock_irqrestore(&f2fs_crypto_ctx_lock, flags);
140 ctx = kmem_cache_zalloc(f2fs_crypto_ctx_cachep, GFP_NOFS);
145 ctx->flags |= F2FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
147 ctx->flags &= ~F2FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
151 * Allocate a new Crypto API context if we don't already have
152 * one or if it isn't the right mode.
154 if (ctx->tfm && (ctx->mode != ci->ci_data_mode)) {
155 crypto_free_tfm(ctx->tfm);
157 ctx->mode = F2FS_ENCRYPTION_MODE_INVALID;
160 switch (ci->ci_data_mode) {
161 case F2FS_ENCRYPTION_MODE_AES_256_XTS:
162 ctx->tfm = crypto_ablkcipher_tfm(
163 crypto_alloc_ablkcipher("xts(aes)", 0, 0));
165 case F2FS_ENCRYPTION_MODE_AES_256_GCM:
167 * TODO(mhalcrow): AEAD w/ gcm(aes);
168 * crypto_aead_setauthsize()
170 ctx->tfm = ERR_PTR(-ENOTSUPP);
175 if (IS_ERR_OR_NULL(ctx->tfm)) {
176 res = PTR_ERR(ctx->tfm);
180 ctx->mode = ci->ci_data_mode;
182 BUG_ON(ci->ci_size != f2fs_encryption_key_size(ci->ci_data_mode));
185 * There shouldn't be a bounce page attached to the crypto
186 * context at this point.
188 BUG_ON(ctx->bounce_page);
192 if (!IS_ERR_OR_NULL(ctx))
193 f2fs_release_crypto_ctx(ctx);
200 * Call f2fs_decrypt on every single page, reusing the encryption
203 static void completion_pages(struct work_struct *work)
205 struct f2fs_crypto_ctx *ctx =
206 container_of(work, struct f2fs_crypto_ctx, work);
207 struct bio *bio = ctx->bio;
211 bio_for_each_segment_all(bv, bio, i) {
212 struct page *page = bv->bv_page;
213 int ret = f2fs_decrypt(ctx, page);
219 SetPageUptodate(page);
222 f2fs_release_crypto_ctx(ctx);
226 void f2fs_end_io_crypto_work(struct f2fs_crypto_ctx *ctx, struct bio *bio)
228 INIT_WORK(&ctx->work, completion_pages);
230 queue_work(f2fs_read_workqueue, &ctx->work);
234 * f2fs_exit_crypto() - Shutdown the f2fs encryption system
236 void f2fs_exit_crypto(void)
238 struct f2fs_crypto_ctx *pos, *n;
240 list_for_each_entry_safe(pos, n, &f2fs_free_crypto_ctxs, free_list) {
241 if (pos->bounce_page) {
243 F2FS_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL)
244 __free_page(pos->bounce_page);
246 mempool_free(pos->bounce_page,
247 f2fs_bounce_page_pool);
250 crypto_free_tfm(pos->tfm);
251 kmem_cache_free(f2fs_crypto_ctx_cachep, pos);
253 INIT_LIST_HEAD(&f2fs_free_crypto_ctxs);
254 if (f2fs_bounce_page_pool)
255 mempool_destroy(f2fs_bounce_page_pool);
256 f2fs_bounce_page_pool = NULL;
257 if (f2fs_read_workqueue)
258 destroy_workqueue(f2fs_read_workqueue);
259 f2fs_read_workqueue = NULL;
260 if (f2fs_crypto_ctx_cachep)
261 kmem_cache_destroy(f2fs_crypto_ctx_cachep);
262 f2fs_crypto_ctx_cachep = NULL;
263 if (f2fs_crypt_info_cachep)
264 kmem_cache_destroy(f2fs_crypt_info_cachep);
265 f2fs_crypt_info_cachep = NULL;
269 * f2fs_init_crypto() - Set up for f2fs encryption.
271 * We only call this when we start accessing encrypted files, since it
272 * results in memory getting allocated that wouldn't otherwise be used.
274 * Return: Zero on success, non-zero otherwise.
276 int f2fs_init_crypto(void)
278 int i, res = -ENOMEM;
280 mutex_lock(&crypto_init);
281 if (f2fs_read_workqueue)
282 goto already_initialized;
284 f2fs_read_workqueue = alloc_workqueue("f2fs_crypto", WQ_HIGHPRI, 0);
285 if (!f2fs_read_workqueue)
288 f2fs_crypto_ctx_cachep = KMEM_CACHE(f2fs_crypto_ctx,
289 SLAB_RECLAIM_ACCOUNT);
290 if (!f2fs_crypto_ctx_cachep)
293 f2fs_crypt_info_cachep = KMEM_CACHE(f2fs_crypt_info,
294 SLAB_RECLAIM_ACCOUNT);
295 if (!f2fs_crypt_info_cachep)
298 for (i = 0; i < num_prealloc_crypto_ctxs; i++) {
299 struct f2fs_crypto_ctx *ctx;
301 ctx = kmem_cache_zalloc(f2fs_crypto_ctx_cachep, GFP_KERNEL);
306 list_add(&ctx->free_list, &f2fs_free_crypto_ctxs);
309 f2fs_bounce_page_pool =
310 mempool_create_page_pool(num_prealloc_crypto_pages, 0);
311 if (!f2fs_bounce_page_pool) {
316 mutex_unlock(&crypto_init);
320 mutex_unlock(&crypto_init);
324 void f2fs_restore_and_release_control_page(struct page **page)
326 struct f2fs_crypto_ctx *ctx;
327 struct page *bounce_page;
329 /* The bounce data pages are unmapped. */
330 if ((*page)->mapping)
333 /* The bounce data page is unmapped. */
335 ctx = (struct f2fs_crypto_ctx *)page_private(bounce_page);
337 /* restore control page */
338 *page = ctx->control_page;
340 f2fs_restore_control_page(bounce_page);
343 void f2fs_restore_control_page(struct page *data_page)
345 struct f2fs_crypto_ctx *ctx =
346 (struct f2fs_crypto_ctx *)page_private(data_page);
348 set_page_private(data_page, (unsigned long)NULL);
349 ClearPagePrivate(data_page);
350 unlock_page(data_page);
351 f2fs_release_crypto_ctx(ctx);
355 * f2fs_crypt_complete() - The completion callback for page encryption
356 * @req: The asynchronous encryption request context
357 * @res: The result of the encryption operation
359 static void f2fs_crypt_complete(struct crypto_async_request *req, int res)
361 struct f2fs_completion_result *ecr = req->data;
363 if (res == -EINPROGRESS)
366 complete(&ecr->completion);
374 static int f2fs_page_crypto(struct f2fs_crypto_ctx *ctx,
378 struct page *src_page,
379 struct page *dest_page)
381 u8 xts_tweak[F2FS_XTS_TWEAK_SIZE];
382 struct ablkcipher_request *req = NULL;
383 DECLARE_F2FS_COMPLETION_RESULT(ecr);
384 struct scatterlist dst, src;
385 struct f2fs_inode_info *fi = F2FS_I(inode);
386 struct crypto_ablkcipher *atfm = __crypto_ablkcipher_cast(ctx->tfm);
390 BUG_ON(ctx->mode != fi->i_crypt_info->ci_data_mode);
392 if (ctx->mode != F2FS_ENCRYPTION_MODE_AES_256_XTS) {
393 printk_ratelimited(KERN_ERR
394 "%s: unsupported crypto algorithm: %d\n",
395 __func__, ctx->mode);
399 crypto_ablkcipher_clear_flags(atfm, ~0);
400 crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_REQ_WEAK_KEY);
402 res = crypto_ablkcipher_setkey(atfm, fi->i_crypt_info->ci_raw,
403 fi->i_crypt_info->ci_size);
405 printk_ratelimited(KERN_ERR
406 "%s: crypto_ablkcipher_setkey() failed\n",
410 req = ablkcipher_request_alloc(atfm, GFP_NOFS);
412 printk_ratelimited(KERN_ERR
413 "%s: crypto_request_alloc() failed\n",
417 ablkcipher_request_set_callback(
418 req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
419 f2fs_crypt_complete, &ecr);
421 BUILD_BUG_ON(F2FS_XTS_TWEAK_SIZE < sizeof(index));
422 memcpy(xts_tweak, &index, sizeof(index));
423 memset(&xts_tweak[sizeof(index)], 0,
424 F2FS_XTS_TWEAK_SIZE - sizeof(index));
426 sg_init_table(&dst, 1);
427 sg_set_page(&dst, dest_page, PAGE_CACHE_SIZE, 0);
428 sg_init_table(&src, 1);
429 sg_set_page(&src, src_page, PAGE_CACHE_SIZE, 0);
430 ablkcipher_request_set_crypt(req, &src, &dst, PAGE_CACHE_SIZE,
432 if (rw == F2FS_DECRYPT)
433 res = crypto_ablkcipher_decrypt(req);
435 res = crypto_ablkcipher_encrypt(req);
436 if (res == -EINPROGRESS || res == -EBUSY) {
437 BUG_ON(req->base.data != &ecr);
438 wait_for_completion(&ecr.completion);
441 ablkcipher_request_free(req);
443 printk_ratelimited(KERN_ERR
444 "%s: crypto_ablkcipher_encrypt() returned %d\n",
452 * f2fs_encrypt() - Encrypts a page
453 * @inode: The inode for which the encryption should take place
454 * @plaintext_page: The page to encrypt. Must be locked.
456 * Allocates a ciphertext page and encrypts plaintext_page into it using the ctx
457 * encryption context.
459 * Called on the page write path. The caller must call
460 * f2fs_restore_control_page() on the returned ciphertext page to
461 * release the bounce buffer and the encryption context.
463 * Return: An allocated page with the encrypted content on success. Else, an
464 * error value or NULL.
466 struct page *f2fs_encrypt(struct inode *inode,
467 struct page *plaintext_page)
469 struct f2fs_crypto_ctx *ctx;
470 struct page *ciphertext_page = NULL;
473 BUG_ON(!PageLocked(plaintext_page));
475 ctx = f2fs_get_crypto_ctx(inode);
477 return (struct page *)ctx;
479 /* The encryption operation will require a bounce page. */
480 ciphertext_page = alloc_page(GFP_NOFS);
481 if (!ciphertext_page) {
483 * This is a potential bottleneck, but at least we'll have
486 ciphertext_page = mempool_alloc(f2fs_bounce_page_pool,
488 if (WARN_ON_ONCE(!ciphertext_page))
489 ciphertext_page = mempool_alloc(f2fs_bounce_page_pool,
490 GFP_NOFS | __GFP_WAIT);
491 ctx->flags &= ~F2FS_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL;
493 ctx->flags |= F2FS_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL;
495 ctx->bounce_page = ciphertext_page;
496 ctx->control_page = plaintext_page;
497 err = f2fs_page_crypto(ctx, inode, F2FS_ENCRYPT, plaintext_page->index,
498 plaintext_page, ciphertext_page);
500 f2fs_release_crypto_ctx(ctx);
503 SetPagePrivate(ciphertext_page);
504 set_page_private(ciphertext_page, (unsigned long)ctx);
505 lock_page(ciphertext_page);
506 return ciphertext_page;
510 * f2fs_decrypt() - Decrypts a page in-place
511 * @ctx: The encryption context.
512 * @page: The page to decrypt. Must be locked.
514 * Decrypts page in-place using the ctx encryption context.
516 * Called from the read completion callback.
518 * Return: Zero on success, non-zero otherwise.
520 int f2fs_decrypt(struct f2fs_crypto_ctx *ctx, struct page *page)
522 BUG_ON(!PageLocked(page));
524 return f2fs_page_crypto(ctx, page->mapping->host,
525 F2FS_DECRYPT, page->index, page, page);
529 * Convenience function which takes care of allocating and
530 * deallocating the encryption context
532 int f2fs_decrypt_one(struct inode *inode, struct page *page)
534 struct f2fs_crypto_ctx *ctx = f2fs_get_crypto_ctx(inode);
539 ret = f2fs_decrypt(ctx, page);
540 f2fs_release_crypto_ctx(ctx);
544 bool f2fs_valid_contents_enc_mode(uint32_t mode)
546 return (mode == F2FS_ENCRYPTION_MODE_AES_256_XTS);
550 * f2fs_validate_encryption_key_size() - Validate the encryption key size
551 * @mode: The key mode.
552 * @size: The key size to validate.
554 * Return: The validated key size for @mode. Zero if invalid.
556 uint32_t f2fs_validate_encryption_key_size(uint32_t mode, uint32_t size)
558 if (size == f2fs_encryption_key_size(mode))