unsigned long flags;
if (ctx->flags & F2FS_WRITE_PATH_FL && ctx->w.bounce_page) {
- if (ctx->flags & F2FS_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL)
- __free_page(ctx->w.bounce_page);
- else
- mempool_free(ctx->w.bounce_page, f2fs_bounce_page_pool);
+ mempool_free(ctx->w.bounce_page, f2fs_bounce_page_pool);
ctx->w.bounce_page = NULL;
}
ctx->w.control_page = NULL;
return (struct page *)ctx;
/* The encryption operation will require a bounce page. */
- ciphertext_page = alloc_page(GFP_NOFS);
+ ciphertext_page = mempool_alloc(f2fs_bounce_page_pool, GFP_NOWAIT);
if (!ciphertext_page) {
- /*
- * This is a potential bottleneck, but at least we'll have
- * forward progress.
- */
- ciphertext_page = mempool_alloc(f2fs_bounce_page_pool,
- GFP_NOFS);
- if (WARN_ON_ONCE(!ciphertext_page))
- ciphertext_page = mempool_alloc(f2fs_bounce_page_pool,
- GFP_NOFS | __GFP_WAIT);
- ctx->flags &= ~F2FS_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL;
- } else {
- ctx->flags |= F2FS_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL;
+ err = -ENOMEM;
+ goto err_out;
}
+
ctx->flags |= F2FS_WRITE_PATH_FL;
ctx->w.bounce_page = ciphertext_page;
ctx->w.control_page = plaintext_page;
err = f2fs_page_crypto(ctx, inode, F2FS_ENCRYPT, plaintext_page->index,
plaintext_page, ciphertext_page);
- if (err) {
- f2fs_release_crypto_ctx(ctx);
- return ERR_PTR(err);
- }
+ if (err)
+ goto err_out;
+
SetPagePrivate(ciphertext_page);
set_page_private(ciphertext_page, (unsigned long)ctx);
lock_page(ciphertext_page);
return ciphertext_page;
+
+err_out:
+ f2fs_release_crypto_ctx(ctx);
+ return ERR_PTR(err);
}
/**