]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
Merge remote-tracking branch 'crypto/master'
authorStephen Rothwell <sfr@canb.auug.org.au>
Thu, 11 Feb 2016 01:09:36 +0000 (12:09 +1100)
committerStephen Rothwell <sfr@canb.auug.org.au>
Thu, 11 Feb 2016 01:09:36 +0000 (12:09 +1100)
126 files changed:
Documentation/DocBook/crypto-API.tmpl
Documentation/crypto/api-intro.txt
Documentation/devicetree/bindings/rng/brcm,bcm6368.txt [new file with mode: 0644]
Documentation/devicetree/bindings/sparc_sun_oracle_rng.txt [new file with mode: 0644]
Documentation/devicetree/bindings/vendor-prefixes.txt
arch/x86/crypto/sha-mb/sha1_mb.c
arch/x86/crypto/sha-mb/sha1_mb_mgr_submit_avx2.S
crypto/Kconfig
crypto/Makefile
crypto/ahash.c
crypto/algapi.c
crypto/crc32_generic.c [moved from crypto/crc32.c with 98% similarity]
crypto/crypto_engine.c [new file with mode: 0644]
crypto/drbg.c
crypto/internal.h
crypto/keywrap.c
crypto/mcryptd.c
crypto/pcompress.c [deleted file]
crypto/shash.c
crypto/skcipher.c
crypto/tcrypt.c
crypto/testmgr.c
crypto/testmgr.h
crypto/zlib.c [deleted file]
drivers/block/cryptoloop.c
drivers/block/drbd/drbd_int.h
drivers/block/drbd/drbd_main.c
drivers/block/drbd/drbd_nl.c
drivers/block/drbd/drbd_receiver.c
drivers/block/drbd/drbd_worker.c
drivers/char/hw_random/Kconfig
drivers/char/hw_random/bcm63xx-rng.c
drivers/char/hw_random/n2-drv.c
drivers/crypto/Kconfig
drivers/crypto/atmel-aes.c
drivers/crypto/atmel-sha-regs.h
drivers/crypto/atmel-sha.c
drivers/crypto/caam/ctrl.c
drivers/crypto/caam/regs.h
drivers/crypto/ccp/ccp-crypto-aes-cmac.c
drivers/crypto/ccp/ccp-crypto-sha.c
drivers/crypto/ccp/ccp-crypto.h
drivers/crypto/ixp4xx_crypto.c
drivers/crypto/omap-aes.c
drivers/crypto/qat/qat_common/adf_accel_devices.h
drivers/crypto/qat/qat_common/adf_aer.c
drivers/crypto/qat/qat_common/adf_cfg_user.h
drivers/crypto/qat/qat_common/qat_algs.c
drivers/crypto/s5p-sss.c
drivers/crypto/sahara.c
drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
drivers/md/dm-crypt.c
drivers/net/ppp/ppp_mppe.c
drivers/net/wireless/intersil/orinoco/mic.c
drivers/net/wireless/intersil/orinoco/mic.h
drivers/net/wireless/intersil/orinoco/orinoco.h
drivers/nfc/s3fwrn5/firmware.c
drivers/scsi/iscsi_tcp.c
drivers/scsi/iscsi_tcp.h
drivers/scsi/libiscsi_tcp.c
drivers/staging/lustre/lustre/libcfs/linux/linux-crypto.c
drivers/staging/rtl8192e/rtllib_crypt_tkip.c
drivers/staging/rtl8192e/rtllib_crypt_wep.c
drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c
drivers/target/iscsi/iscsi_target.c
drivers/target/iscsi/iscsi_target_auth.c
drivers/target/iscsi/iscsi_target_login.c
drivers/usb/wusbcore/crypto.c
fs/cifs/cifsencrypt.c
fs/cifs/smbencrypt.c
fs/ecryptfs/crypto.c
fs/ecryptfs/ecryptfs_kernel.h
fs/ecryptfs/inode.c
fs/ecryptfs/keystore.c
fs/ecryptfs/main.c
fs/ecryptfs/mmap.c
fs/ecryptfs/super.c
fs/ext4/crypto.c
fs/ext4/crypto_fname.c
fs/ext4/crypto_key.c
fs/ext4/ext4_crypto.h
fs/f2fs/crypto.c
fs/f2fs/crypto_fname.c
fs/f2fs/crypto_key.c
fs/f2fs/f2fs_crypto.h
fs/nfsd/nfs4recover.c
include/crypto/algapi.h
include/crypto/compress.h [deleted file]
include/crypto/drbg.h
include/crypto/hash.h
include/crypto/internal/aead.h
include/crypto/internal/compress.h [deleted file]
include/crypto/internal/hash.h
include/crypto/skcipher.h
include/linux/crypto.h
include/linux/sunrpc/gss_krb5.h
include/net/sctp/auth.h
include/net/sctp/structs.h
include/net/tcp.h
include/scsi/libiscsi_tcp.h
include/target/iscsi/iscsi_target_core.h
net/bluetooth/smp.c
net/ceph/crypto.c
net/ipv4/tcp.c
net/ipv4/tcp_fastopen.c
net/ipv4/tcp_ipv4.c
net/ipv6/tcp_ipv6.c
net/mac802154/llsec.c
net/mac802154/llsec.h
net/rxrpc/ar-internal.h
net/rxrpc/ar-key.c
net/rxrpc/rxkad.c
net/sctp/auth.c
net/sctp/endpointola.c
net/sctp/sm_make_chunk.c
net/sctp/socket.c
net/sunrpc/auth_gss/gss_krb5_crypto.c
net/sunrpc/auth_gss/gss_krb5_keys.c
net/sunrpc/auth_gss/gss_krb5_mech.c
net/sunrpc/auth_gss/gss_krb5_seqnum.c
net/sunrpc/auth_gss/gss_krb5_wrap.c
net/wireless/lib80211_crypt_tkip.c
net/wireless/lib80211_crypt_wep.c
net/xfrm/xfrm_algo.c
security/keys/encrypted-keys/encrypted.c

index 07df23ea06e4936d6de435ba4c862ffdb4b299d1..866ff082272b42b460e51308111410e7bb7db821 100644 (file)
@@ -1761,19 +1761,6 @@ read(opfd, out, outlen);
 !Finclude/linux/crypto.h crypto_cipher_setkey
 !Finclude/linux/crypto.h crypto_cipher_encrypt_one
 !Finclude/linux/crypto.h crypto_cipher_decrypt_one
-   </sect1>
-   <sect1><title>Synchronous Message Digest API</title>
-!Pinclude/linux/crypto.h Synchronous Message Digest API
-!Finclude/linux/crypto.h crypto_alloc_hash
-!Finclude/linux/crypto.h crypto_free_hash
-!Finclude/linux/crypto.h crypto_has_hash
-!Finclude/linux/crypto.h crypto_hash_blocksize
-!Finclude/linux/crypto.h crypto_hash_digestsize
-!Finclude/linux/crypto.h crypto_hash_init
-!Finclude/linux/crypto.h crypto_hash_update
-!Finclude/linux/crypto.h crypto_hash_final
-!Finclude/linux/crypto.h crypto_hash_digest
-!Finclude/linux/crypto.h crypto_hash_setkey
    </sect1>
    <sect1><title>Message Digest Algorithm Definitions</title>
 !Pinclude/crypto/hash.h Message Digest Algorithm Definitions
index 8b49302712a890365ff3edaa2571971608ccd381..beda682e8d7750492a182380737aee06441cf432 100644 (file)
@@ -49,28 +49,33 @@ under development.
 
 Here's an example of how to use the API:
 
-       #include <linux/crypto.h>
+       #include <crypto/ahash.h>
        #include <linux/err.h>
        #include <linux/scatterlist.h>
        
        struct scatterlist sg[2];
        char result[128];
-       struct crypto_hash *tfm;
-       struct hash_desc desc;
+       struct crypto_ahash *tfm;
+       struct ahash_request *req;
        
-       tfm = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
+       tfm = crypto_alloc_ahash("md5", 0, CRYPTO_ALG_ASYNC);
        if (IS_ERR(tfm))
                fail();
                
        /* ... set up the scatterlists ... */
 
-       desc.tfm = tfm;
-       desc.flags = 0;
-       
-       if (crypto_hash_digest(&desc, sg, 2, result))
+       req = ahash_request_alloc(tfm, GFP_ATOMIC);
+       if (!req)
                fail();
+
+       ahash_request_set_callback(req, 0, NULL, NULL);
+       ahash_request_set_crypt(req, sg, result, 2);
        
-       crypto_free_hash(tfm);
+       if (crypto_ahash_digest(req))
+               fail();
+
+       ahash_request_free(req);
+       crypto_free_ahash(tfm);
 
     
 Many real examples are available in the regression test module (tcrypt.c).
diff --git a/Documentation/devicetree/bindings/rng/brcm,bcm6368.txt b/Documentation/devicetree/bindings/rng/brcm,bcm6368.txt
new file mode 100644 (file)
index 0000000..4b5ac60
--- /dev/null
@@ -0,0 +1,17 @@
+BCM6368 Random number generator
+
+Required properties:
+
+- compatible : should be "brcm,bcm6368-rng"
+- reg : Specifies base physical address and size of the registers
+- clocks : phandle to clock-controller plus clock-specifier pair
+- clock-names : "ipsec" as a clock name
+
+Example:
+       random: rng@10004180 {
+               compatible = "brcm,bcm6368-rng";
+               reg = <0x10004180 0x14>;
+
+               clocks = <&periph_clk 18>;
+               clock-names = "ipsec";
+       };
diff --git a/Documentation/devicetree/bindings/sparc_sun_oracle_rng.txt b/Documentation/devicetree/bindings/sparc_sun_oracle_rng.txt
new file mode 100644 (file)
index 0000000..b0b2111
--- /dev/null
@@ -0,0 +1,30 @@
+HWRNG support for the n2_rng driver
+
+Required properties:
+- reg          : base address to sample from
+- compatible   : should contain one of the following
+       RNG versions:
+       - 'SUNW,n2-rng' for Niagara 2 Platform (SUN UltraSPARC T2 CPU)
+       - 'SUNW,vf-rng' for Victoria Falls Platform (SUN UltraSPARC T2 Plus CPU)
+       - 'SUNW,kt-rng' for Rainbow/Yosemite Falls Platform (SUN SPARC T3/T4), (UltraSPARC KT/Niagara 3 - development names)
+       more recent systems (after Oracle acquisition of SUN)
+       - 'ORCL,m4-rng' for SPARC T5/M5
+       - 'ORCL,m7-rng' for SPARC T7/M7
+
+Examples:
+/* linux LDOM on SPARC T5-2 */
+Node 0xf029a4f4
+       .node:  f029a4f4
+       rng-#units:  00000002
+       compatible: 'ORCL,m4-rng'
+       reg:  0000000e
+       name: 'random-number-generator'
+
+/* solaris on SPARC M7-8 */
+Node 0xf028c08c
+       rng-#units:  00000003
+       compatible: 'ORCL,m7-rng'
+       reg:  0000000e
+       name:  'random-number-generator'
+
+PS: see as well prtconfs.git by DaveM
index 72e2c5a2b3278facb20378383cbb63baa6485e0f..e00029d66d66d7ab8e0b87f2764ee081d41cecea 100644 (file)
@@ -170,6 +170,7 @@ opencores   OpenCores.org
 option Option NV
 ortustech      Ortus Technology Co., Ltd.
 ovti   OmniVision Technologies
+ORCL   Oracle Corporation
 panasonic      Panasonic Corporation
 parade Parade Technologies Inc.
 pericom        Pericom Technology Inc.
@@ -227,6 +228,7 @@ startek     Startek
 ste    ST-Ericsson
 stericsson     ST-Ericsson
 synology       Synology, Inc.
+SUNW   Sun Microsystems, Inc
 tbs    TBS Technologies
 tcl    Toby Churchill Ltd.
 technologic    Technologic Systems
index a841e9765bd614b17befbcf647319e2020412d88..a8a0224fa0f8a4682f76281034a3172001f50200 100644 (file)
@@ -762,6 +762,38 @@ static int sha1_mb_async_digest(struct ahash_request *req)
        return crypto_ahash_digest(mcryptd_req);
 }
 
+static int sha1_mb_async_export(struct ahash_request *req, void *out)
+{
+       struct ahash_request *mcryptd_req = ahash_request_ctx(req);
+       struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+       struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
+       struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
+
+       memcpy(mcryptd_req, req, sizeof(*req));
+       ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
+       return crypto_ahash_export(mcryptd_req, out);
+}
+
+static int sha1_mb_async_import(struct ahash_request *req, const void *in)
+{
+       struct ahash_request *mcryptd_req = ahash_request_ctx(req);
+       struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+       struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
+       struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
+       struct crypto_shash *child = mcryptd_ahash_child(mcryptd_tfm);
+       struct mcryptd_hash_request_ctx *rctx;
+       struct shash_desc *desc;
+
+       memcpy(mcryptd_req, req, sizeof(*req));
+       ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
+       rctx = ahash_request_ctx(mcryptd_req);
+       desc = &rctx->desc;
+       desc->tfm = child;
+       desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+
+       return crypto_ahash_import(mcryptd_req, in);
+}
+
 static int sha1_mb_async_init_tfm(struct crypto_tfm *tfm)
 {
        struct mcryptd_ahash *mcryptd_tfm;
@@ -796,8 +828,11 @@ static struct ahash_alg sha1_mb_async_alg = {
        .final          = sha1_mb_async_final,
        .finup          = sha1_mb_async_finup,
        .digest         = sha1_mb_async_digest,
+       .export         = sha1_mb_async_export,
+       .import         = sha1_mb_async_import,
        .halg = {
                .digestsize     = SHA1_DIGEST_SIZE,
+               .statesize      = sizeof(struct sha1_hash_ctx),
                .base = {
                        .cra_name               = "sha1",
                        .cra_driver_name        = "sha1_mb",
index 2ab9560b53c84db34f32334a036111ab3e601ef7..c420d89b175f046491bf0e44d1cf2b22c1541fe2 100644 (file)
@@ -197,7 +197,7 @@ len_is_0:
        vpinsrd  $1, _args_digest+1*32(state , idx, 4), %xmm0, %xmm0
        vpinsrd  $2, _args_digest+2*32(state , idx, 4), %xmm0, %xmm0
        vpinsrd  $3, _args_digest+3*32(state , idx, 4), %xmm0, %xmm0
-       movl    4*32(state, idx, 4), DWORD_tmp
+       movl     _args_digest+4*32(state, idx, 4), DWORD_tmp
 
        vmovdqu  %xmm0, _result_digest(job_rax)
        movl    DWORD_tmp, _result_digest+1*16(job_rax)
index 3be07ad1d80dc835789c92ba020b58809010473a..93a1fdc1feee68c9a8b15bef682886015884ce98 100644 (file)
@@ -84,15 +84,6 @@ config CRYPTO_RNG_DEFAULT
        tristate
        select CRYPTO_DRBG_MENU
 
-config CRYPTO_PCOMP
-       tristate
-       select CRYPTO_PCOMP2
-       select CRYPTO_ALGAPI
-
-config CRYPTO_PCOMP2
-       tristate
-       select CRYPTO_ALGAPI2
-
 config CRYPTO_AKCIPHER2
        tristate
        select CRYPTO_ALGAPI2
@@ -122,7 +113,6 @@ config CRYPTO_MANAGER2
        select CRYPTO_AEAD2
        select CRYPTO_HASH2
        select CRYPTO_BLKCIPHER2
-       select CRYPTO_PCOMP2
        select CRYPTO_AKCIPHER2
 
 config CRYPTO_USER
@@ -227,6 +217,9 @@ config CRYPTO_GLUE_HELPER_X86
        depends on X86
        select CRYPTO_ALGAPI
 
+config CRYPTO_ENGINE
+       tristate
+
 comment "Authenticated Encryption with Associated Data"
 
 config CRYPTO_CCM
@@ -1506,15 +1499,6 @@ config CRYPTO_DEFLATE
 
          You will most probably want this if using IPSec.
 
-config CRYPTO_ZLIB
-       tristate "Zlib compression algorithm"
-       select CRYPTO_PCOMP
-       select ZLIB_INFLATE
-       select ZLIB_DEFLATE
-       select NLATTR
-       help
-         This is the zlib algorithm.
-
 config CRYPTO_LZO
        tristate "LZO compression algorithm"
        select CRYPTO_ALGAPI
@@ -1595,6 +1579,7 @@ endif     # if CRYPTO_DRBG_MENU
 
 config CRYPTO_JITTERENTROPY
        tristate "Jitterentropy Non-Deterministic Random Number Generator"
+       select CRYPTO_RNG
        help
          The Jitterentropy RNG is a noise that is intended
          to provide seed to another RNG. The RNG does not
index 2acdbbd304758986a0b95adbbb628e609c2a88d0..4f4ef7eaae3f27df7768ea8fbe11ad02596ed702 100644 (file)
@@ -7,6 +7,7 @@ crypto-y := api.o cipher.o compress.o memneq.o
 
 obj-$(CONFIG_CRYPTO_WORKQUEUE) += crypto_wq.o
 
+obj-$(CONFIG_CRYPTO_ENGINE) += crypto_engine.o
 obj-$(CONFIG_CRYPTO_FIPS) += fips.o
 
 crypto_algapi-$(CONFIG_PROC_FS) += proc.o
@@ -28,7 +29,6 @@ crypto_hash-y += ahash.o
 crypto_hash-y += shash.o
 obj-$(CONFIG_CRYPTO_HASH2) += crypto_hash.o
 
-obj-$(CONFIG_CRYPTO_PCOMP2) += pcompress.o
 obj-$(CONFIG_CRYPTO_AKCIPHER2) += akcipher.o
 
 $(obj)/rsapubkey-asn1.o: $(obj)/rsapubkey-asn1.c $(obj)/rsapubkey-asn1.h
@@ -99,10 +99,9 @@ obj-$(CONFIG_CRYPTO_SALSA20) += salsa20_generic.o
 obj-$(CONFIG_CRYPTO_CHACHA20) += chacha20_generic.o
 obj-$(CONFIG_CRYPTO_POLY1305) += poly1305_generic.o
 obj-$(CONFIG_CRYPTO_DEFLATE) += deflate.o
-obj-$(CONFIG_CRYPTO_ZLIB) += zlib.o
 obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += michael_mic.o
 obj-$(CONFIG_CRYPTO_CRC32C) += crc32c_generic.o
-obj-$(CONFIG_CRYPTO_CRC32) += crc32.o
+obj-$(CONFIG_CRYPTO_CRC32) += crc32_generic.o
 obj-$(CONFIG_CRYPTO_CRCT10DIF) += crct10dif_common.o crct10dif_generic.o
 obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o authencesn.o
 obj-$(CONFIG_CRYPTO_LZO) += lzo.o
index d19b52324cf520ee777743ee895efb2f537f5e62..5fc1f172963dc6914f0f6def8435943acd67dfe7 100644 (file)
@@ -166,24 +166,6 @@ int crypto_ahash_walk_first(struct ahash_request *req,
 }
 EXPORT_SYMBOL_GPL(crypto_ahash_walk_first);
 
-int crypto_hash_walk_first_compat(struct hash_desc *hdesc,
-                                 struct crypto_hash_walk *walk,
-                                 struct scatterlist *sg, unsigned int len)
-{
-       walk->total = len;
-
-       if (!walk->total) {
-               walk->entrylen = 0;
-               return 0;
-       }
-
-       walk->alignmask = crypto_hash_alignmask(hdesc->tfm);
-       walk->sg = sg;
-       walk->flags = hdesc->flags & CRYPTO_TFM_REQ_MASK;
-
-       return hash_walk_new_entry(walk);
-}
-
 static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key,
                                unsigned int keylen)
 {
@@ -542,6 +524,12 @@ struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
 }
 EXPORT_SYMBOL_GPL(crypto_alloc_ahash);
 
+int crypto_has_ahash(const char *alg_name, u32 type, u32 mask)
+{
+       return crypto_type_has_alg(alg_name, &crypto_ahash_type, type, mask);
+}
+EXPORT_SYMBOL_GPL(crypto_has_ahash);
+
 static int ahash_prepare_alg(struct ahash_alg *alg)
 {
        struct crypto_alg *base = &alg->halg.base;
index 7be76aa315796dfad085ad74be5da35204bda9e6..731255a6104f7e3482c584e4f7b737b4d0b94eef 100644 (file)
@@ -987,6 +987,21 @@ unsigned int crypto_alg_extsize(struct crypto_alg *alg)
 }
 EXPORT_SYMBOL_GPL(crypto_alg_extsize);
 
+int crypto_type_has_alg(const char *name, const struct crypto_type *frontend,
+                       u32 type, u32 mask)
+{
+       int ret = 0;
+       struct crypto_alg *alg = crypto_find_alg(name, frontend, type, mask);
+
+       if (!IS_ERR(alg)) {
+               crypto_mod_put(alg);
+               ret = 1;
+       }
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(crypto_type_has_alg);
+
 static int __init crypto_algapi_init(void)
 {
        crypto_init_proc();
similarity index 98%
rename from crypto/crc32.c
rename to crypto/crc32_generic.c
index 187ded28cb0bd76825475dfd3b4684d8043de752..aa2a25fc7482a28d0ef3fcb22cdd4edabeed8d98 100644 (file)
@@ -131,7 +131,7 @@ static struct shash_alg alg = {
        .digestsize     = CHKSUM_DIGEST_SIZE,
        .base           = {
                .cra_name               = "crc32",
-               .cra_driver_name        = "crc32-table",
+               .cra_driver_name        = "crc32-generic",
                .cra_priority           = 100,
                .cra_blocksize          = CHKSUM_BLOCK_SIZE,
                .cra_ctxsize            = sizeof(u32),
@@ -157,3 +157,4 @@ MODULE_AUTHOR("Alexander Boyko <alexander_boyko@xyratex.com>");
 MODULE_DESCRIPTION("CRC32 calculations wrapper for lib/crc32");
 MODULE_LICENSE("GPL");
 MODULE_ALIAS_CRYPTO("crc32");
+MODULE_ALIAS_CRYPTO("crc32-generic");
diff --git a/crypto/crypto_engine.c b/crypto/crypto_engine.c
new file mode 100644 (file)
index 0000000..a55c82d
--- /dev/null
@@ -0,0 +1,355 @@
+/*
+ * Handle async block request by crypto hardware engine.
+ *
+ * Copyright (C) 2016 Linaro, Inc.
+ *
+ * Author: Baolin Wang <baolin.wang@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/delay.h>
+#include "internal.h"
+
+#define CRYPTO_ENGINE_MAX_QLEN 10
+
+void crypto_finalize_request(struct crypto_engine *engine,
+                            struct ablkcipher_request *req, int err);
+
+/**
+ * crypto_pump_requests - dequeue one request from engine queue to process
+ * @engine: the hardware engine
+ * @in_kthread: true if we are in the context of the request pump thread
+ *
+ * This function checks if there is any request in the engine queue that
+ * needs processing and if so call out to the driver to initialize hardware
+ * and handle each request.
+ */
+static void crypto_pump_requests(struct crypto_engine *engine,
+                                bool in_kthread)
+{
+       struct crypto_async_request *async_req, *backlog;
+       struct ablkcipher_request *req;
+       unsigned long flags;
+       bool was_busy = false;
+       int ret;
+
+       spin_lock_irqsave(&engine->queue_lock, flags);
+
+       /* Make sure we are not already running a request */
+       if (engine->cur_req)
+               goto out;
+
+       /* If another context is idling then defer */
+       if (engine->idling) {
+               queue_kthread_work(&engine->kworker, &engine->pump_requests);
+               goto out;
+       }
+
+       /* Check if the engine queue is idle */
+       if (!crypto_queue_len(&engine->queue) || !engine->running) {
+               if (!engine->busy)
+                       goto out;
+
+               /* Only do teardown in the thread */
+               if (!in_kthread) {
+                       queue_kthread_work(&engine->kworker,
+                                          &engine->pump_requests);
+                       goto out;
+               }
+
+               engine->busy = false;
+               engine->idling = true;
+               spin_unlock_irqrestore(&engine->queue_lock, flags);
+
+               if (engine->unprepare_crypt_hardware &&
+                   engine->unprepare_crypt_hardware(engine))
+                       pr_err("failed to unprepare crypt hardware\n");
+
+               spin_lock_irqsave(&engine->queue_lock, flags);
+               engine->idling = false;
+               goto out;
+       }
+
+       /* Get the fist request from the engine queue to handle */
+       backlog = crypto_get_backlog(&engine->queue);
+       async_req = crypto_dequeue_request(&engine->queue);
+       if (!async_req)
+               goto out;
+
+       req = ablkcipher_request_cast(async_req);
+
+       engine->cur_req = req;
+       if (backlog)
+               backlog->complete(backlog, -EINPROGRESS);
+
+       if (engine->busy)
+               was_busy = true;
+       else
+               engine->busy = true;
+
+       spin_unlock_irqrestore(&engine->queue_lock, flags);
+
+       /* Until here we get the request need to be encrypted successfully */
+       if (!was_busy && engine->prepare_crypt_hardware) {
+               ret = engine->prepare_crypt_hardware(engine);
+               if (ret) {
+                       pr_err("failed to prepare crypt hardware\n");
+                       goto req_err;
+               }
+       }
+
+       if (engine->prepare_request) {
+               ret = engine->prepare_request(engine, engine->cur_req);
+               if (ret) {
+                       pr_err("failed to prepare request: %d\n", ret);
+                       goto req_err;
+               }
+               engine->cur_req_prepared = true;
+       }
+
+       ret = engine->crypt_one_request(engine, engine->cur_req);
+       if (ret) {
+               pr_err("failed to crypt one request from queue\n");
+               goto req_err;
+       }
+       return;
+
+req_err:
+       crypto_finalize_request(engine, engine->cur_req, ret);
+       return;
+
+out:
+       spin_unlock_irqrestore(&engine->queue_lock, flags);
+}
+
+static void crypto_pump_work(struct kthread_work *work)
+{
+       struct crypto_engine *engine =
+               container_of(work, struct crypto_engine, pump_requests);
+
+       crypto_pump_requests(engine, true);
+}
+
+/**
+ * crypto_transfer_request - transfer the new request into the engine queue
+ * @engine: the hardware engine
+ * @req: the request need to be listed into the engine queue
+ */
+int crypto_transfer_request(struct crypto_engine *engine,
+                           struct ablkcipher_request *req, bool need_pump)
+{
+       unsigned long flags;
+       int ret;
+
+       spin_lock_irqsave(&engine->queue_lock, flags);
+
+       if (!engine->running) {
+               spin_unlock_irqrestore(&engine->queue_lock, flags);
+               return -ESHUTDOWN;
+       }
+
+       ret = ablkcipher_enqueue_request(&engine->queue, req);
+
+       if (!engine->busy && need_pump)
+               queue_kthread_work(&engine->kworker, &engine->pump_requests);
+
+       spin_unlock_irqrestore(&engine->queue_lock, flags);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(crypto_transfer_request);
+
+/**
+ * crypto_transfer_request_to_engine - transfer one request to list into the
+ * engine queue
+ * @engine: the hardware engine
+ * @req: the request need to be listed into the engine queue
+ */
+int crypto_transfer_request_to_engine(struct crypto_engine *engine,
+                                     struct ablkcipher_request *req)
+{
+       return crypto_transfer_request(engine, req, true);
+}
+EXPORT_SYMBOL_GPL(crypto_transfer_request_to_engine);
+
+/**
+ * crypto_finalize_request - finalize one request if the request is done
+ * @engine: the hardware engine
+ * @req: the request need to be finalized
+ * @err: error number
+ */
+void crypto_finalize_request(struct crypto_engine *engine,
+                            struct ablkcipher_request *req, int err)
+{
+       unsigned long flags;
+       bool finalize_cur_req = false;
+       int ret;
+
+       spin_lock_irqsave(&engine->queue_lock, flags);
+       if (engine->cur_req == req)
+               finalize_cur_req = true;
+       spin_unlock_irqrestore(&engine->queue_lock, flags);
+
+       if (finalize_cur_req) {
+               if (engine->cur_req_prepared && engine->unprepare_request) {
+                       ret = engine->unprepare_request(engine, req);
+                       if (ret)
+                               pr_err("failed to unprepare request\n");
+               }
+
+               spin_lock_irqsave(&engine->queue_lock, flags);
+               engine->cur_req = NULL;
+               engine->cur_req_prepared = false;
+               spin_unlock_irqrestore(&engine->queue_lock, flags);
+       }
+
+       req->base.complete(&req->base, err);
+
+       queue_kthread_work(&engine->kworker, &engine->pump_requests);
+}
+EXPORT_SYMBOL_GPL(crypto_finalize_request);
+
+/**
+ * crypto_engine_start - start the hardware engine
+ * @engine: the hardware engine need to be started
+ *
+ * Return 0 on success, else on fail.
+ */
+int crypto_engine_start(struct crypto_engine *engine)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&engine->queue_lock, flags);
+
+       if (engine->running || engine->busy) {
+               spin_unlock_irqrestore(&engine->queue_lock, flags);
+               return -EBUSY;
+       }
+
+       engine->running = true;
+       spin_unlock_irqrestore(&engine->queue_lock, flags);
+
+       queue_kthread_work(&engine->kworker, &engine->pump_requests);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(crypto_engine_start);
+
+/**
+ * crypto_engine_stop - stop the hardware engine
+ * @engine: the hardware engine need to be stopped
+ *
+ * Return 0 on success, else on fail.
+ */
+int crypto_engine_stop(struct crypto_engine *engine)
+{
+       unsigned long flags;
+       unsigned limit = 500;
+       int ret = 0;
+
+       spin_lock_irqsave(&engine->queue_lock, flags);
+
+       /*
+        * If the engine queue is not empty or the engine is on busy state,
+        * we need to wait for a while to pump the requests of engine queue.
+        */
+       while ((crypto_queue_len(&engine->queue) || engine->busy) && limit--) {
+               spin_unlock_irqrestore(&engine->queue_lock, flags);
+               msleep(20);
+               spin_lock_irqsave(&engine->queue_lock, flags);
+       }
+
+       if (crypto_queue_len(&engine->queue) || engine->busy)
+               ret = -EBUSY;
+       else
+               engine->running = false;
+
+       spin_unlock_irqrestore(&engine->queue_lock, flags);
+
+       if (ret)
+               pr_warn("could not stop engine\n");
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(crypto_engine_stop);
+
+/**
+ * crypto_engine_alloc_init - allocate crypto hardware engine structure and
+ * initialize it.
+ * @dev: the device attached with one hardware engine
+ * @rt: whether this queue is set to run as a realtime task
+ *
+ * This must be called from context that can sleep.
+ * Return: the crypto engine structure on success, else NULL.
+ */
+struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
+{
+       struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
+       struct crypto_engine *engine;
+
+       if (!dev)
+               return NULL;
+
+       engine = devm_kzalloc(dev, sizeof(*engine), GFP_KERNEL);
+       if (!engine)
+               return NULL;
+
+       engine->rt = rt;
+       engine->running = false;
+       engine->busy = false;
+       engine->idling = false;
+       engine->cur_req_prepared = false;
+       engine->priv_data = dev;
+       snprintf(engine->name, sizeof(engine->name),
+                "%s-engine", dev_name(dev));
+
+       crypto_init_queue(&engine->queue, CRYPTO_ENGINE_MAX_QLEN);
+       spin_lock_init(&engine->queue_lock);
+
+       init_kthread_worker(&engine->kworker);
+       engine->kworker_task = kthread_run(kthread_worker_fn,
+                                          &engine->kworker, "%s",
+                                          engine->name);
+       if (IS_ERR(engine->kworker_task)) {
+               dev_err(dev, "failed to create crypto request pump task\n");
+               return NULL;
+       }
+       init_kthread_work(&engine->pump_requests, crypto_pump_work);
+
+       if (engine->rt) {
+               dev_info(dev, "will run requests pump with realtime priority\n");
+               sched_setscheduler(engine->kworker_task, SCHED_FIFO, &param);
+       }
+
+       return engine;
+}
+EXPORT_SYMBOL_GPL(crypto_engine_alloc_init);
+
+/**
+ * crypto_engine_exit - free the resources of hardware engine when exit
+ * @engine: the hardware engine need to be freed
+ *
+ * Return 0 for success.
+ */
+int crypto_engine_exit(struct crypto_engine *engine)
+{
+       int ret;
+
+       ret = crypto_engine_stop(engine);
+       if (ret)
+               return ret;
+
+       flush_kthread_worker(&engine->kworker);
+       kthread_stop(engine->kworker_task);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(crypto_engine_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Crypto hardware engine framework");
index ab6ef1d0856896e140580b93eb1ee5051b4e76ef..1b86310db7b1e9aca14e5c4151a34ccdf6c375cd 100644 (file)
@@ -219,48 +219,6 @@ static inline unsigned short drbg_sec_strength(drbg_flag_t flags)
        }
 }
 
-/*
- * FIPS 140-2 continuous self test
- * The test is performed on the result of one round of the output
- * function. Thus, the function implicitly knows the size of the
- * buffer.
- *
- * @drbg DRBG handle
- * @buf output buffer of random data to be checked
- *
- * return:
- *     true on success
- *     false on error
- */
-static bool drbg_fips_continuous_test(struct drbg_state *drbg,
-                                     const unsigned char *buf)
-{
-#ifdef CONFIG_CRYPTO_FIPS
-       int ret = 0;
-       /* skip test if we test the overall system */
-       if (list_empty(&drbg->test_data.list))
-               return true;
-       /* only perform test in FIPS mode */
-       if (0 == fips_enabled)
-               return true;
-       if (!drbg->fips_primed) {
-               /* Priming of FIPS test */
-               memcpy(drbg->prev, buf, drbg_blocklen(drbg));
-               drbg->fips_primed = true;
-               /* return false due to priming, i.e. another round is needed */
-               return false;
-       }
-       ret = memcmp(drbg->prev, buf, drbg_blocklen(drbg));
-       if (!ret)
-               panic("DRBG continuous self test failed\n");
-       memcpy(drbg->prev, buf, drbg_blocklen(drbg));
-       /* the test shall pass when the two compared values are not equal */
-       return ret != 0;
-#else
-       return true;
-#endif /* CONFIG_CRYPTO_FIPS */
-}
-
 /*
  * Convert an integer into a byte representation of this integer.
  * The byte representation is big-endian
@@ -603,11 +561,6 @@ static int drbg_ctr_generate(struct drbg_state *drbg,
                }
                outlen = (drbg_blocklen(drbg) < (buflen - len)) ?
                          drbg_blocklen(drbg) : (buflen - len);
-               if (!drbg_fips_continuous_test(drbg, drbg->scratchpad)) {
-                       /* 10.2.1.5.2 step 6 */
-                       crypto_inc(drbg->V, drbg_blocklen(drbg));
-                       continue;
-               }
                /* 10.2.1.5.2 step 4.3 */
                memcpy(buf + len, drbg->scratchpad, outlen);
                len += outlen;
@@ -733,8 +686,6 @@ static int drbg_hmac_generate(struct drbg_state *drbg,
                        return ret;
                outlen = (drbg_blocklen(drbg) < (buflen - len)) ?
                          drbg_blocklen(drbg) : (buflen - len);
-               if (!drbg_fips_continuous_test(drbg, drbg->V))
-                       continue;
 
                /* 10.1.2.5 step 4.2 */
                memcpy(buf + len, drbg->V, outlen);
@@ -963,10 +914,6 @@ static int drbg_hash_hashgen(struct drbg_state *drbg,
                }
                outlen = (drbg_blocklen(drbg) < (buflen - len)) ?
                          drbg_blocklen(drbg) : (buflen - len);
-               if (!drbg_fips_continuous_test(drbg, dst)) {
-                       crypto_inc(src, drbg_statelen(drbg));
-                       continue;
-               }
                /* 10.1.1.4 step hashgen 4.2 */
                memcpy(buf + len, dst, outlen);
                len += outlen;
@@ -1201,11 +1148,6 @@ static inline void drbg_dealloc_state(struct drbg_state *drbg)
        drbg->reseed_ctr = 0;
        drbg->d_ops = NULL;
        drbg->core = NULL;
-#ifdef CONFIG_CRYPTO_FIPS
-       kzfree(drbg->prev);
-       drbg->prev = NULL;
-       drbg->fips_primed = false;
-#endif
 }
 
 /*
@@ -1244,12 +1186,6 @@ static inline int drbg_alloc_state(struct drbg_state *drbg)
        drbg->C = kmalloc(drbg_statelen(drbg), GFP_KERNEL);
        if (!drbg->C)
                goto err;
-#ifdef CONFIG_CRYPTO_FIPS
-       drbg->prev = kmalloc(drbg_blocklen(drbg), GFP_KERNEL);
-       if (!drbg->prev)
-               goto err;
-       drbg->fips_primed = false;
-#endif
        /* scratchpad is only generated for CTR and Hash */
        if (drbg->core->flags & DRBG_HMAC)
                sb_size = 0;
index 00e42a3ed81431638b78a8df1616de978da1eaa6..7eefcdb00227740e39a22d61515d016b4f36caa0 100644 (file)
@@ -104,6 +104,9 @@ int crypto_probing_notify(unsigned long val, void *v);
 
 unsigned int crypto_alg_extsize(struct crypto_alg *alg);
 
+int crypto_type_has_alg(const char *name, const struct crypto_type *frontend,
+                       u32 type, u32 mask);
+
 static inline struct crypto_alg *crypto_alg_get(struct crypto_alg *alg)
 {
        atomic_inc(&alg->cra_refcnt);
index b1d106ce55f3d9c98bba9a54f301d4d94f12d982..72014f963ba7a65cbb3dd856a63f25ab647c3209 100644 (file)
@@ -212,7 +212,7 @@ static int crypto_kw_decrypt(struct blkcipher_desc *desc,
                          SEMIBSIZE))
                ret = -EBADMSG;
 
-       memzero_explicit(&block, sizeof(struct crypto_kw_block));
+       memzero_explicit(block, sizeof(struct crypto_kw_block));
 
        return ret;
 }
@@ -297,7 +297,7 @@ static int crypto_kw_encrypt(struct blkcipher_desc *desc,
        /* establish the IV for the caller to pick up */
        memcpy(desc->info, block->A, SEMIBSIZE);
 
-       memzero_explicit(&block, sizeof(struct crypto_kw_block));
+       memzero_explicit(block, sizeof(struct crypto_kw_block));
 
        return 0;
 }
index f78d4fc4e38a3fb842463229b3fa013d7c0a903c..c4eb9da49d4f55e856ea4199fd0442af77d9fd92 100644 (file)
@@ -522,6 +522,7 @@ static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
        inst->alg.halg.base.cra_flags = type;
 
        inst->alg.halg.digestsize = salg->digestsize;
+       inst->alg.halg.statesize = salg->statesize;
        inst->alg.halg.base.cra_ctxsize = sizeof(struct mcryptd_hash_ctx);
 
        inst->alg.halg.base.cra_init = mcryptd_hash_init_tfm;
diff --git a/crypto/pcompress.c b/crypto/pcompress.c
deleted file mode 100644 (file)
index 7a13b40..0000000
+++ /dev/null
@@ -1,115 +0,0 @@
-/*
- * Cryptographic API.
- *
- * Partial (de)compression operations.
- *
- * Copyright 2008 Sony Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.
- * If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/crypto.h>
-#include <linux/errno.h>
-#include <linux/module.h>
-#include <linux/seq_file.h>
-#include <linux/string.h>
-#include <linux/cryptouser.h>
-#include <net/netlink.h>
-
-#include <crypto/compress.h>
-#include <crypto/internal/compress.h>
-
-#include "internal.h"
-
-
-static int crypto_pcomp_init(struct crypto_tfm *tfm, u32 type, u32 mask)
-{
-       return 0;
-}
-
-static int crypto_pcomp_init_tfm(struct crypto_tfm *tfm)
-{
-       return 0;
-}
-
-#ifdef CONFIG_NET
-static int crypto_pcomp_report(struct sk_buff *skb, struct crypto_alg *alg)
-{
-       struct crypto_report_comp rpcomp;
-
-       strncpy(rpcomp.type, "pcomp", sizeof(rpcomp.type));
-       if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
-                   sizeof(struct crypto_report_comp), &rpcomp))
-               goto nla_put_failure;
-       return 0;
-
-nla_put_failure:
-       return -EMSGSIZE;
-}
-#else
-static int crypto_pcomp_report(struct sk_buff *skb, struct crypto_alg *alg)
-{
-       return -ENOSYS;
-}
-#endif
-
-static void crypto_pcomp_show(struct seq_file *m, struct crypto_alg *alg)
-       __attribute__ ((unused));
-static void crypto_pcomp_show(struct seq_file *m, struct crypto_alg *alg)
-{
-       seq_printf(m, "type         : pcomp\n");
-}
-
-static const struct crypto_type crypto_pcomp_type = {
-       .extsize        = crypto_alg_extsize,
-       .init           = crypto_pcomp_init,
-       .init_tfm       = crypto_pcomp_init_tfm,
-#ifdef CONFIG_PROC_FS
-       .show           = crypto_pcomp_show,
-#endif
-       .report         = crypto_pcomp_report,
-       .maskclear      = ~CRYPTO_ALG_TYPE_MASK,
-       .maskset        = CRYPTO_ALG_TYPE_MASK,
-       .type           = CRYPTO_ALG_TYPE_PCOMPRESS,
-       .tfmsize        = offsetof(struct crypto_pcomp, base),
-};
-
-struct crypto_pcomp *crypto_alloc_pcomp(const char *alg_name, u32 type,
-                                       u32 mask)
-{
-       return crypto_alloc_tfm(alg_name, &crypto_pcomp_type, type, mask);
-}
-EXPORT_SYMBOL_GPL(crypto_alloc_pcomp);
-
-int crypto_register_pcomp(struct pcomp_alg *alg)
-{
-       struct crypto_alg *base = &alg->base;
-
-       base->cra_type = &crypto_pcomp_type;
-       base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
-       base->cra_flags |= CRYPTO_ALG_TYPE_PCOMPRESS;
-
-       return crypto_register_alg(base);
-}
-EXPORT_SYMBOL_GPL(crypto_register_pcomp);
-
-int crypto_unregister_pcomp(struct pcomp_alg *alg)
-{
-       return crypto_unregister_alg(&alg->base);
-}
-EXPORT_SYMBOL_GPL(crypto_unregister_pcomp);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Partial (de)compression type");
-MODULE_AUTHOR("Sony Corporation");
index 359754591653c7b265a5e6b89d70e9a9d16d33e3..a051541a4a1718c996ba7a7b678b5b9e5e857488 100644 (file)
@@ -368,151 +368,6 @@ int crypto_init_shash_ops_async(struct crypto_tfm *tfm)
        return 0;
 }
 
-static int shash_compat_setkey(struct crypto_hash *tfm, const u8 *key,
-                              unsigned int keylen)
-{
-       struct shash_desc **descp = crypto_hash_ctx(tfm);
-       struct shash_desc *desc = *descp;
-
-       return crypto_shash_setkey(desc->tfm, key, keylen);
-}
-
-static int shash_compat_init(struct hash_desc *hdesc)
-{
-       struct shash_desc **descp = crypto_hash_ctx(hdesc->tfm);
-       struct shash_desc *desc = *descp;
-
-       desc->flags = hdesc->flags;
-
-       return crypto_shash_init(desc);
-}
-
-static int shash_compat_update(struct hash_desc *hdesc, struct scatterlist *sg,
-                              unsigned int len)
-{
-       struct shash_desc **descp = crypto_hash_ctx(hdesc->tfm);
-       struct shash_desc *desc = *descp;
-       struct crypto_hash_walk walk;
-       int nbytes;
-
-       for (nbytes = crypto_hash_walk_first_compat(hdesc, &walk, sg, len);
-            nbytes > 0; nbytes = crypto_hash_walk_done(&walk, nbytes))
-               nbytes = crypto_shash_update(desc, walk.data, nbytes);
-
-       return nbytes;
-}
-
-static int shash_compat_final(struct hash_desc *hdesc, u8 *out)
-{
-       struct shash_desc **descp = crypto_hash_ctx(hdesc->tfm);
-
-       return crypto_shash_final(*descp, out);
-}
-
-static int shash_compat_digest(struct hash_desc *hdesc, struct scatterlist *sg,
-                              unsigned int nbytes, u8 *out)
-{
-       unsigned int offset = sg->offset;
-       int err;
-
-       if (nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset)) {
-               struct shash_desc **descp = crypto_hash_ctx(hdesc->tfm);
-               struct shash_desc *desc = *descp;
-               void *data;
-
-               desc->flags = hdesc->flags;
-
-               data = kmap_atomic(sg_page(sg));
-               err = crypto_shash_digest(desc, data + offset, nbytes, out);
-               kunmap_atomic(data);
-               crypto_yield(desc->flags);
-               goto out;
-       }
-
-       err = shash_compat_init(hdesc);
-       if (err)
-               goto out;
-
-       err = shash_compat_update(hdesc, sg, nbytes);
-       if (err)
-               goto out;
-
-       err = shash_compat_final(hdesc, out);
-
-out:
-       return err;
-}
-
-static void crypto_exit_shash_ops_compat(struct crypto_tfm *tfm)
-{
-       struct shash_desc **descp = crypto_tfm_ctx(tfm);
-       struct shash_desc *desc = *descp;
-
-       crypto_free_shash(desc->tfm);
-       kzfree(desc);
-}
-
-static int crypto_init_shash_ops_compat(struct crypto_tfm *tfm)
-{
-       struct hash_tfm *crt = &tfm->crt_hash;
-       struct crypto_alg *calg = tfm->__crt_alg;
-       struct shash_alg *alg = __crypto_shash_alg(calg);
-       struct shash_desc **descp = crypto_tfm_ctx(tfm);
-       struct crypto_shash *shash;
-       struct shash_desc *desc;
-
-       if (!crypto_mod_get(calg))
-               return -EAGAIN;
-
-       shash = crypto_create_tfm(calg, &crypto_shash_type);
-       if (IS_ERR(shash)) {
-               crypto_mod_put(calg);
-               return PTR_ERR(shash);
-       }
-
-       desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(shash),
-                      GFP_KERNEL);
-       if (!desc) {
-               crypto_free_shash(shash);
-               return -ENOMEM;
-       }
-
-       *descp = desc;
-       desc->tfm = shash;
-       tfm->exit = crypto_exit_shash_ops_compat;
-
-       crt->init = shash_compat_init;
-       crt->update = shash_compat_update;
-       crt->final  = shash_compat_final;
-       crt->digest = shash_compat_digest;
-       crt->setkey = shash_compat_setkey;
-
-       crt->digestsize = alg->digestsize;
-
-       return 0;
-}
-
-static int crypto_init_shash_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
-{
-       switch (mask & CRYPTO_ALG_TYPE_MASK) {
-       case CRYPTO_ALG_TYPE_HASH_MASK:
-               return crypto_init_shash_ops_compat(tfm);
-       }
-
-       return -EINVAL;
-}
-
-static unsigned int crypto_shash_ctxsize(struct crypto_alg *alg, u32 type,
-                                        u32 mask)
-{
-       switch (mask & CRYPTO_ALG_TYPE_MASK) {
-       case CRYPTO_ALG_TYPE_HASH_MASK:
-               return sizeof(struct shash_desc *);
-       }
-
-       return 0;
-}
-
 static int crypto_shash_init_tfm(struct crypto_tfm *tfm)
 {
        struct crypto_shash *hash = __crypto_shash_cast(tfm);
@@ -559,9 +414,7 @@ static void crypto_shash_show(struct seq_file *m, struct crypto_alg *alg)
 }
 
 static const struct crypto_type crypto_shash_type = {
-       .ctxsize = crypto_shash_ctxsize,
        .extsize = crypto_alg_extsize,
-       .init = crypto_init_shash_ops,
        .init_tfm = crypto_shash_init_tfm,
 #ifdef CONFIG_PROC_FS
        .show = crypto_shash_show,
index d199c0b1751c91cbcc5a978aadb97cdf609e33ab..69230e9d4ac99499f621ff925f3f4265fa539f41 100644 (file)
@@ -118,7 +118,7 @@ static int crypto_init_skcipher_ops_blkcipher(struct crypto_tfm *tfm)
        skcipher->decrypt = skcipher_decrypt_blkcipher;
 
        skcipher->ivsize = crypto_blkcipher_ivsize(blkcipher);
-       skcipher->has_setkey = calg->cra_blkcipher.max_keysize;
+       skcipher->keysize = calg->cra_blkcipher.max_keysize;
 
        return 0;
 }
@@ -211,7 +211,7 @@ static int crypto_init_skcipher_ops_ablkcipher(struct crypto_tfm *tfm)
        skcipher->ivsize = crypto_ablkcipher_ivsize(ablkcipher);
        skcipher->reqsize = crypto_ablkcipher_reqsize(ablkcipher) +
                            sizeof(struct ablkcipher_request);
-       skcipher->has_setkey = calg->cra_ablkcipher.max_keysize;
+       skcipher->keysize = calg->cra_ablkcipher.max_keysize;
 
        return 0;
 }
index 270bc4b82bd9aaf2625bbeddf426f0cfbfea0f7e..579dce07146389f9a38d036fe48b00f347d7f070 100644 (file)
@@ -554,164 +554,6 @@ out:
        crypto_free_blkcipher(tfm);
 }
 
-static int test_hash_jiffies_digest(struct hash_desc *desc,
-                                   struct scatterlist *sg, int blen,
-                                   char *out, int secs)
-{
-       unsigned long start, end;
-       int bcount;
-       int ret;
-
-       for (start = jiffies, end = start + secs * HZ, bcount = 0;
-            time_before(jiffies, end); bcount++) {
-               ret = crypto_hash_digest(desc, sg, blen, out);
-               if (ret)
-                       return ret;
-       }
-
-       printk("%6u opers/sec, %9lu bytes/sec\n",
-              bcount / secs, ((long)bcount * blen) / secs);
-
-       return 0;
-}
-
-static int test_hash_jiffies(struct hash_desc *desc, struct scatterlist *sg,
-                            int blen, int plen, char *out, int secs)
-{
-       unsigned long start, end;
-       int bcount, pcount;
-       int ret;
-
-       if (plen == blen)
-               return test_hash_jiffies_digest(desc, sg, blen, out, secs);
-
-       for (start = jiffies, end = start + secs * HZ, bcount = 0;
-            time_before(jiffies, end); bcount++) {
-               ret = crypto_hash_init(desc);
-               if (ret)
-                       return ret;
-               for (pcount = 0; pcount < blen; pcount += plen) {
-                       ret = crypto_hash_update(desc, sg, plen);
-                       if (ret)
-                               return ret;
-               }
-               /* we assume there is enough space in 'out' for the result */
-               ret = crypto_hash_final(desc, out);
-               if (ret)
-                       return ret;
-       }
-
-       printk("%6u opers/sec, %9lu bytes/sec\n",
-              bcount / secs, ((long)bcount * blen) / secs);
-
-       return 0;
-}
-
-static int test_hash_cycles_digest(struct hash_desc *desc,
-                                  struct scatterlist *sg, int blen, char *out)
-{
-       unsigned long cycles = 0;
-       int i;
-       int ret;
-
-       local_irq_disable();
-
-       /* Warm-up run. */
-       for (i = 0; i < 4; i++) {
-               ret = crypto_hash_digest(desc, sg, blen, out);
-               if (ret)
-                       goto out;
-       }
-
-       /* The real thing. */
-       for (i = 0; i < 8; i++) {
-               cycles_t start, end;
-
-               start = get_cycles();
-
-               ret = crypto_hash_digest(desc, sg, blen, out);
-               if (ret)
-                       goto out;
-
-               end = get_cycles();
-
-               cycles += end - start;
-       }
-
-out:
-       local_irq_enable();
-
-       if (ret)
-               return ret;
-
-       printk("%6lu cycles/operation, %4lu cycles/byte\n",
-              cycles / 8, cycles / (8 * blen));
-
-       return 0;
-}
-
-static int test_hash_cycles(struct hash_desc *desc, struct scatterlist *sg,
-                           int blen, int plen, char *out)
-{
-       unsigned long cycles = 0;
-       int i, pcount;
-       int ret;
-
-       if (plen == blen)
-               return test_hash_cycles_digest(desc, sg, blen, out);
-
-       local_irq_disable();
-
-       /* Warm-up run. */
-       for (i = 0; i < 4; i++) {
-               ret = crypto_hash_init(desc);
-               if (ret)
-                       goto out;
-               for (pcount = 0; pcount < blen; pcount += plen) {
-                       ret = crypto_hash_update(desc, sg, plen);
-                       if (ret)
-                               goto out;
-               }
-               ret = crypto_hash_final(desc, out);
-               if (ret)
-                       goto out;
-       }
-
-       /* The real thing. */
-       for (i = 0; i < 8; i++) {
-               cycles_t start, end;
-
-               start = get_cycles();
-
-               ret = crypto_hash_init(desc);
-               if (ret)
-                       goto out;
-               for (pcount = 0; pcount < blen; pcount += plen) {
-                       ret = crypto_hash_update(desc, sg, plen);
-                       if (ret)
-                               goto out;
-               }
-               ret = crypto_hash_final(desc, out);
-               if (ret)
-                       goto out;
-
-               end = get_cycles();
-
-               cycles += end - start;
-       }
-
-out:
-       local_irq_enable();
-
-       if (ret)
-               return ret;
-
-       printk("%6lu cycles/operation, %4lu cycles/byte\n",
-              cycles / 8, cycles / (8 * blen));
-
-       return 0;
-}
-
 static void test_hash_sg_init(struct scatterlist *sg)
 {
        int i;
@@ -723,69 +565,6 @@ static void test_hash_sg_init(struct scatterlist *sg)
        }
 }
 
-static void test_hash_speed(const char *algo, unsigned int secs,
-                           struct hash_speed *speed)
-{
-       struct scatterlist sg[TVMEMSIZE];
-       struct crypto_hash *tfm;
-       struct hash_desc desc;
-       static char output[1024];
-       int i;
-       int ret;
-
-       tfm = crypto_alloc_hash(algo, 0, CRYPTO_ALG_ASYNC);
-
-       if (IS_ERR(tfm)) {
-               printk(KERN_ERR "failed to load transform for %s: %ld\n", algo,
-                      PTR_ERR(tfm));
-               return;
-       }
-
-       printk(KERN_INFO "\ntesting speed of %s (%s)\n", algo,
-                       get_driver_name(crypto_hash, tfm));
-
-       desc.tfm = tfm;
-       desc.flags = 0;
-
-       if (crypto_hash_digestsize(tfm) > sizeof(output)) {
-               printk(KERN_ERR "digestsize(%u) > outputbuffer(%zu)\n",
-                      crypto_hash_digestsize(tfm), sizeof(output));
-               goto out;
-       }
-
-       test_hash_sg_init(sg);
-       for (i = 0; speed[i].blen != 0; i++) {
-               if (speed[i].blen > TVMEMSIZE * PAGE_SIZE) {
-                       printk(KERN_ERR
-                              "template (%u) too big for tvmem (%lu)\n",
-                              speed[i].blen, TVMEMSIZE * PAGE_SIZE);
-                       goto out;
-               }
-
-               if (speed[i].klen)
-                       crypto_hash_setkey(tfm, tvmem[0], speed[i].klen);
-
-               printk(KERN_INFO "test%3u "
-                      "(%5u byte blocks,%5u bytes per update,%4u updates): ",
-                      i, speed[i].blen, speed[i].plen, speed[i].blen / speed[i].plen);
-
-               if (secs)
-                       ret = test_hash_jiffies(&desc, sg, speed[i].blen,
-                                               speed[i].plen, output, secs);
-               else
-                       ret = test_hash_cycles(&desc, sg, speed[i].blen,
-                                              speed[i].plen, output);
-
-               if (ret) {
-                       printk(KERN_ERR "hashing failed ret=%d\n", ret);
-                       break;
-               }
-       }
-
-out:
-       crypto_free_hash(tfm);
-}
-
 static inline int do_one_ahash_op(struct ahash_request *req, int ret)
 {
        if (ret == -EINPROGRESS || ret == -EBUSY) {
@@ -945,8 +724,8 @@ out:
        return 0;
 }
 
-static void test_ahash_speed(const char *algo, unsigned int secs,
-                            struct hash_speed *speed)
+static void test_ahash_speed_common(const char *algo, unsigned int secs,
+                                   struct hash_speed *speed, unsigned mask)
 {
        struct scatterlist sg[TVMEMSIZE];
        struct tcrypt_result tresult;
@@ -955,7 +734,7 @@ static void test_ahash_speed(const char *algo, unsigned int secs,
        char *output;
        int i, ret;
 
-       tfm = crypto_alloc_ahash(algo, 0, 0);
+       tfm = crypto_alloc_ahash(algo, 0, mask);
        if (IS_ERR(tfm)) {
                pr_err("failed to load transform for %s: %ld\n",
                       algo, PTR_ERR(tfm));
@@ -1021,6 +800,18 @@ out:
        crypto_free_ahash(tfm);
 }
 
+static void test_ahash_speed(const char *algo, unsigned int secs,
+                            struct hash_speed *speed)
+{
+       return test_ahash_speed_common(algo, secs, speed, 0);
+}
+
+static void test_hash_speed(const char *algo, unsigned int secs,
+                           struct hash_speed *speed)
+{
+       return test_ahash_speed_common(algo, secs, speed, CRYPTO_ALG_ASYNC);
+}
+
 static inline int do_one_acipher_op(struct ablkcipher_request *req, int ret)
 {
        if (ret == -EINPROGRESS || ret == -EBUSY) {
index ae8c57fd8bc7f855e4fb73145a1ec72a34540add..93f3527962eca23e1418c2c8f7691187228813f6 100644 (file)
@@ -96,13 +96,6 @@ struct comp_test_suite {
        } comp, decomp;
 };
 
-struct pcomp_test_suite {
-       struct {
-               struct pcomp_testvec *vecs;
-               unsigned int count;
-       } comp, decomp;
-};
-
 struct hash_test_suite {
        struct hash_testvec *vecs;
        unsigned int count;
@@ -133,7 +126,6 @@ struct alg_test_desc {
                struct aead_test_suite aead;
                struct cipher_test_suite cipher;
                struct comp_test_suite comp;
-               struct pcomp_test_suite pcomp;
                struct hash_test_suite hash;
                struct cprng_test_suite cprng;
                struct drbg_test_suite drbg;
@@ -198,6 +190,61 @@ static int wait_async_op(struct tcrypt_result *tr, int ret)
        return ret;
 }
 
+static int ahash_partial_update(struct ahash_request **preq,
+       struct crypto_ahash *tfm, struct hash_testvec *template,
+       void *hash_buff, int k, int temp, struct scatterlist *sg,
+       const char *algo, char *result, struct tcrypt_result *tresult)
+{
+       char *state;
+       struct ahash_request *req;
+       int statesize, ret = -EINVAL;
+
+       req = *preq;
+       statesize = crypto_ahash_statesize(
+                       crypto_ahash_reqtfm(req));
+       state = kmalloc(statesize, GFP_KERNEL);
+       if (!state) {
+               pr_err("alt: hash: Failed to alloc state for %s\n", algo);
+               goto out_nostate;
+       }
+       ret = crypto_ahash_export(req, state);
+       if (ret) {
+               pr_err("alt: hash: Failed to export() for %s\n", algo);
+               goto out;
+       }
+       ahash_request_free(req);
+       req = ahash_request_alloc(tfm, GFP_KERNEL);
+       if (!req) {
+               pr_err("alg: hash: Failed to alloc request for %s\n", algo);
+               goto out_noreq;
+       }
+       ahash_request_set_callback(req,
+               CRYPTO_TFM_REQ_MAY_BACKLOG,
+               tcrypt_complete, tresult);
+
+       memcpy(hash_buff, template->plaintext + temp,
+               template->tap[k]);
+       sg_init_one(&sg[0], hash_buff, template->tap[k]);
+       ahash_request_set_crypt(req, sg, result, template->tap[k]);
+       ret = crypto_ahash_import(req, state);
+       if (ret) {
+               pr_err("alg: hash: Failed to import() for %s\n", algo);
+               goto out;
+       }
+       ret = wait_async_op(tresult, crypto_ahash_update(req));
+       if (ret)
+               goto out;
+       *preq = req;
+       ret = 0;
+       goto out_noreq;
+out:
+       ahash_request_free(req);
+out_noreq:
+       kfree(state);
+out_nostate:
+       return ret;
+}
+
 static int __test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
                       unsigned int tcount, bool use_digest,
                       const int align_offset)
@@ -385,6 +432,84 @@ static int __test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
                }
        }
 
+       /* partial update exercise */
+       j = 0;
+       for (i = 0; i < tcount; i++) {
+               /* alignment tests are only done with continuous buffers */
+               if (align_offset != 0)
+                       break;
+
+               if (template[i].np < 2)
+                       continue;
+
+               j++;
+               memset(result, 0, MAX_DIGEST_SIZE);
+
+               ret = -EINVAL;
+               hash_buff = xbuf[0];
+               memcpy(hash_buff, template[i].plaintext,
+                       template[i].tap[0]);
+               sg_init_one(&sg[0], hash_buff, template[i].tap[0]);
+
+               if (template[i].ksize) {
+                       crypto_ahash_clear_flags(tfm, ~0);
+                       if (template[i].ksize > MAX_KEYLEN) {
+                               pr_err("alg: hash: setkey failed on test %d for %s: key size %d > %d\n",
+                                       j, algo, template[i].ksize, MAX_KEYLEN);
+                               ret = -EINVAL;
+                               goto out;
+                       }
+                       memcpy(key, template[i].key, template[i].ksize);
+                       ret = crypto_ahash_setkey(tfm, key, template[i].ksize);
+                       if (ret) {
+                               pr_err("alg: hash: setkey failed on test %d for %s: ret=%d\n",
+                                       j, algo, -ret);
+                               goto out;
+                       }
+               }
+
+               ahash_request_set_crypt(req, sg, result, template[i].tap[0]);
+               ret = wait_async_op(&tresult, crypto_ahash_init(req));
+               if (ret) {
+                       pr_err("alt: hash: init failed on test %d for %s: ret=%d\n",
+                               j, algo, -ret);
+                       goto out;
+               }
+               ret = wait_async_op(&tresult, crypto_ahash_update(req));
+               if (ret) {
+                       pr_err("alt: hash: update failed on test %d for %s: ret=%d\n",
+                               j, algo, -ret);
+                       goto out;
+               }
+
+               temp = template[i].tap[0];
+               for (k = 1; k < template[i].np; k++) {
+                       ret = ahash_partial_update(&req, tfm, &template[i],
+                               hash_buff, k, temp, &sg[0], algo, result,
+                               &tresult);
+                       if (ret) {
+                               pr_err("hash: partial update failed on test %d for %s: ret=%d\n",
+                                       j, algo, -ret);
+                               goto out_noreq;
+                       }
+                       temp += template[i].tap[k];
+               }
+               ret = wait_async_op(&tresult, crypto_ahash_final(req));
+               if (ret) {
+                       pr_err("alt: hash: final failed on test %d for %s: ret=%d\n",
+                               j, algo, -ret);
+                       goto out;
+               }
+               if (memcmp(result, template[i].digest,
+                          crypto_ahash_digestsize(tfm))) {
+                       pr_err("alg: hash: Partial Test %d failed for %s\n",
+                              j, algo);
+                       hexdump(result, crypto_ahash_digestsize(tfm));
+                       ret = -EINVAL;
+                       goto out;
+               }
+       }
+
        ret = 0;
 
 out:
@@ -488,6 +613,8 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
        aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
                                  tcrypt_complete, &result);
 
+       iv_len = crypto_aead_ivsize(tfm);
+
        for (i = 0, j = 0; i < tcount; i++) {
                if (template[i].np)
                        continue;
@@ -508,7 +635,6 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
 
                memcpy(input, template[i].input, template[i].ilen);
                memcpy(assoc, template[i].assoc, template[i].alen);
-               iv_len = crypto_aead_ivsize(tfm);
                if (template[i].iv)
                        memcpy(iv, template[i].iv, iv_len);
                else
@@ -617,7 +743,7 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
                j++;
 
                if (template[i].iv)
-                       memcpy(iv, template[i].iv, MAX_IVLEN);
+                       memcpy(iv, template[i].iv, iv_len);
                else
                        memset(iv, 0, MAX_IVLEN);
 
@@ -1293,183 +1419,6 @@ out:
        return ret;
 }
 
-static int test_pcomp(struct crypto_pcomp *tfm,
-                     struct pcomp_testvec *ctemplate,
-                     struct pcomp_testvec *dtemplate, int ctcount,
-                     int dtcount)
-{
-       const char *algo = crypto_tfm_alg_driver_name(crypto_pcomp_tfm(tfm));
-       unsigned int i;
-       char result[COMP_BUF_SIZE];
-       int res;
-
-       for (i = 0; i < ctcount; i++) {
-               struct comp_request req;
-               unsigned int produced = 0;
-
-               res = crypto_compress_setup(tfm, ctemplate[i].params,
-                                           ctemplate[i].paramsize);
-               if (res) {
-                       pr_err("alg: pcomp: compression setup failed on test "
-                              "%d for %s: error=%d\n", i + 1, algo, res);
-                       return res;
-               }
-
-               res = crypto_compress_init(tfm);
-               if (res) {
-                       pr_err("alg: pcomp: compression init failed on test "
-                              "%d for %s: error=%d\n", i + 1, algo, res);
-                       return res;
-               }
-
-               memset(result, 0, sizeof(result));
-
-               req.next_in = ctemplate[i].input;
-               req.avail_in = ctemplate[i].inlen / 2;
-               req.next_out = result;
-               req.avail_out = ctemplate[i].outlen / 2;
-
-               res = crypto_compress_update(tfm, &req);
-               if (res < 0 && (res != -EAGAIN || req.avail_in)) {
-                       pr_err("alg: pcomp: compression update failed on test "
-                              "%d for %s: error=%d\n", i + 1, algo, res);
-                       return res;
-               }
-               if (res > 0)
-                       produced += res;
-
-               /* Add remaining input data */
-               req.avail_in += (ctemplate[i].inlen + 1) / 2;
-
-               res = crypto_compress_update(tfm, &req);
-               if (res < 0 && (res != -EAGAIN || req.avail_in)) {
-                       pr_err("alg: pcomp: compression update failed on test "
-                              "%d for %s: error=%d\n", i + 1, algo, res);
-                       return res;
-               }
-               if (res > 0)
-                       produced += res;
-
-               /* Provide remaining output space */
-               req.avail_out += COMP_BUF_SIZE - ctemplate[i].outlen / 2;
-
-               res = crypto_compress_final(tfm, &req);
-               if (res < 0) {
-                       pr_err("alg: pcomp: compression final failed on test "
-                              "%d for %s: error=%d\n", i + 1, algo, res);
-                       return res;
-               }
-               produced += res;
-
-               if (COMP_BUF_SIZE - req.avail_out != ctemplate[i].outlen) {
-                       pr_err("alg: comp: Compression test %d failed for %s: "
-                              "output len = %d (expected %d)\n", i + 1, algo,
-                              COMP_BUF_SIZE - req.avail_out,
-                              ctemplate[i].outlen);
-                       return -EINVAL;
-               }
-
-               if (produced != ctemplate[i].outlen) {
-                       pr_err("alg: comp: Compression test %d failed for %s: "
-                              "returned len = %u (expected %d)\n", i + 1,
-                              algo, produced, ctemplate[i].outlen);
-                       return -EINVAL;
-               }
-
-               if (memcmp(result, ctemplate[i].output, ctemplate[i].outlen)) {
-                       pr_err("alg: pcomp: Compression test %d failed for "
-                              "%s\n", i + 1, algo);
-                       hexdump(result, ctemplate[i].outlen);
-                       return -EINVAL;
-               }
-       }
-
-       for (i = 0; i < dtcount; i++) {
-               struct comp_request req;
-               unsigned int produced = 0;
-
-               res = crypto_decompress_setup(tfm, dtemplate[i].params,
-                                             dtemplate[i].paramsize);
-               if (res) {
-                       pr_err("alg: pcomp: decompression setup failed on "
-                              "test %d for %s: error=%d\n", i + 1, algo, res);
-                       return res;
-               }
-
-               res = crypto_decompress_init(tfm);
-               if (res) {
-                       pr_err("alg: pcomp: decompression init failed on test "
-                              "%d for %s: error=%d\n", i + 1, algo, res);
-                       return res;
-               }
-
-               memset(result, 0, sizeof(result));
-
-               req.next_in = dtemplate[i].input;
-               req.avail_in = dtemplate[i].inlen / 2;
-               req.next_out = result;
-               req.avail_out = dtemplate[i].outlen / 2;
-
-               res = crypto_decompress_update(tfm, &req);
-               if (res < 0 && (res != -EAGAIN || req.avail_in)) {
-                       pr_err("alg: pcomp: decompression update failed on "
-                              "test %d for %s: error=%d\n", i + 1, algo, res);
-                       return res;
-               }
-               if (res > 0)
-                       produced += res;
-
-               /* Add remaining input data */
-               req.avail_in += (dtemplate[i].inlen + 1) / 2;
-
-               res = crypto_decompress_update(tfm, &req);
-               if (res < 0 && (res != -EAGAIN || req.avail_in)) {
-                       pr_err("alg: pcomp: decompression update failed on "
-                              "test %d for %s: error=%d\n", i + 1, algo, res);
-                       return res;
-               }
-               if (res > 0)
-                       produced += res;
-
-               /* Provide remaining output space */
-               req.avail_out += COMP_BUF_SIZE - dtemplate[i].outlen / 2;
-
-               res = crypto_decompress_final(tfm, &req);
-               if (res < 0 && (res != -EAGAIN || req.avail_in)) {
-                       pr_err("alg: pcomp: decompression final failed on "
-                              "test %d for %s: error=%d\n", i + 1, algo, res);
-                       return res;
-               }
-               if (res > 0)
-                       produced += res;
-
-               if (COMP_BUF_SIZE - req.avail_out != dtemplate[i].outlen) {
-                       pr_err("alg: comp: Decompression test %d failed for "
-                              "%s: output len = %d (expected %d)\n", i + 1,
-                              algo, COMP_BUF_SIZE - req.avail_out,
-                              dtemplate[i].outlen);
-                       return -EINVAL;
-               }
-
-               if (produced != dtemplate[i].outlen) {
-                       pr_err("alg: comp: Decompression test %d failed for "
-                              "%s: returned len = %u (expected %d)\n", i + 1,
-                              algo, produced, dtemplate[i].outlen);
-                       return -EINVAL;
-               }
-
-               if (memcmp(result, dtemplate[i].output, dtemplate[i].outlen)) {
-                       pr_err("alg: pcomp: Decompression test %d failed for "
-                              "%s\n", i + 1, algo);
-                       hexdump(result, dtemplate[i].outlen);
-                       return -EINVAL;
-               }
-       }
-
-       return 0;
-}
-
-
 static int test_cprng(struct crypto_rng *tfm, struct cprng_testvec *template,
                      unsigned int tcount)
 {
@@ -1640,28 +1589,6 @@ static int alg_test_comp(const struct alg_test_desc *desc, const char *driver,
        return err;
 }
 
-static int alg_test_pcomp(const struct alg_test_desc *desc, const char *driver,
-                         u32 type, u32 mask)
-{
-       struct crypto_pcomp *tfm;
-       int err;
-
-       tfm = crypto_alloc_pcomp(driver, type, mask);
-       if (IS_ERR(tfm)) {
-               pr_err("alg: pcomp: Failed to load transform for %s: %ld\n",
-                      driver, PTR_ERR(tfm));
-               return PTR_ERR(tfm);
-       }
-
-       err = test_pcomp(tfm, desc->suite.pcomp.comp.vecs,
-                        desc->suite.pcomp.decomp.vecs,
-                        desc->suite.pcomp.comp.count,
-                        desc->suite.pcomp.decomp.count);
-
-       crypto_free_pcomp(tfm);
-       return err;
-}
-
 static int alg_test_hash(const struct alg_test_desc *desc, const char *driver,
                         u32 type, u32 mask)
 {
@@ -2081,7 +2008,6 @@ static const struct alg_test_desc alg_test_descs[] = {
        }, {
                .alg = "ansi_cprng",
                .test = alg_test_cprng,
-               .fips_allowed = 1,
                .suite = {
                        .cprng = {
                                .vecs = ansi_cprng_aes_tv_template,
@@ -2132,6 +2058,7 @@ static const struct alg_test_desc alg_test_descs[] = {
        }, {
                .alg = "authenc(hmac(sha1),cbc(des3_ede))",
                .test = alg_test_aead,
+               .fips_allowed = 1,
                .suite = {
                        .aead = {
                                .enc = {
@@ -2177,6 +2104,7 @@ static const struct alg_test_desc alg_test_descs[] = {
        }, {
                .alg = "authenc(hmac(sha224),cbc(des3_ede))",
                .test = alg_test_aead,
+               .fips_allowed = 1,
                .suite = {
                        .aead = {
                                .enc = {
@@ -2190,6 +2118,7 @@ static const struct alg_test_desc alg_test_descs[] = {
        }, {
                .alg = "authenc(hmac(sha256),cbc(aes))",
                .test = alg_test_aead,
+               .fips_allowed = 1,
                .suite = {
                        .aead = {
                                .enc = {
@@ -2216,6 +2145,7 @@ static const struct alg_test_desc alg_test_descs[] = {
        }, {
                .alg = "authenc(hmac(sha256),cbc(des3_ede))",
                .test = alg_test_aead,
+               .fips_allowed = 1,
                .suite = {
                        .aead = {
                                .enc = {
@@ -2242,6 +2172,7 @@ static const struct alg_test_desc alg_test_descs[] = {
        }, {
                .alg = "authenc(hmac(sha384),cbc(des3_ede))",
                .test = alg_test_aead,
+               .fips_allowed = 1,
                .suite = {
                        .aead = {
                                .enc = {
@@ -2254,6 +2185,7 @@ static const struct alg_test_desc alg_test_descs[] = {
                }
        }, {
                .alg = "authenc(hmac(sha512),cbc(aes))",
+               .fips_allowed = 1,
                .test = alg_test_aead,
                .suite = {
                        .aead = {
@@ -2281,6 +2213,7 @@ static const struct alg_test_desc alg_test_descs[] = {
        }, {
                .alg = "authenc(hmac(sha512),cbc(des3_ede))",
                .test = alg_test_aead,
+               .fips_allowed = 1,
                .suite = {
                        .aead = {
                                .enc = {
@@ -3840,22 +3773,6 @@ static const struct alg_test_desc alg_test_descs[] = {
                                }
                        }
                }
-       }, {
-               .alg = "zlib",
-               .test = alg_test_pcomp,
-               .fips_allowed = 1,
-               .suite = {
-                       .pcomp = {
-                               .comp = {
-                                       .vecs = zlib_comp_tv_template,
-                                       .count = ZLIB_COMP_TEST_VECTORS
-                               },
-                               .decomp = {
-                                       .vecs = zlib_decomp_tv_template,
-                                       .count = ZLIB_DECOMP_TEST_VECTORS
-                               }
-                       }
-               }
        }
 };
 
index da0a8fd765f4ee2574db8ebcc671119e723b2a7a..487ec880e889c50659d9e033fb4b2261d593349d 100644 (file)
@@ -25,9 +25,6 @@
 #define _CRYPTO_TESTMGR_H
 
 #include <linux/netlink.h>
-#include <linux/zlib.h>
-
-#include <crypto/compress.h>
 
 #define MAX_DIGEST_SIZE                64
 #define MAX_TAP                        8
@@ -32268,14 +32265,6 @@ struct comp_testvec {
        char output[COMP_BUF_SIZE];
 };
 
-struct pcomp_testvec {
-       const void *params;
-       unsigned int paramsize;
-       int inlen, outlen;
-       char input[COMP_BUF_SIZE];
-       char output[COMP_BUF_SIZE];
-};
-
 /*
  * Deflate test vectors (null-terminated strings).
  * Params: winbits=-11, Z_DEFAULT_COMPRESSION, MAX_MEM_LEVEL.
@@ -32356,139 +32345,6 @@ static struct comp_testvec deflate_decomp_tv_template[] = {
        },
 };
 
-#define ZLIB_COMP_TEST_VECTORS 2
-#define ZLIB_DECOMP_TEST_VECTORS 2
-
-static const struct {
-       struct nlattr nla;
-       int val;
-} deflate_comp_params[] = {
-       {
-               .nla = {
-                       .nla_len        = NLA_HDRLEN + sizeof(int),
-                       .nla_type       = ZLIB_COMP_LEVEL,
-               },
-               .val                    = Z_DEFAULT_COMPRESSION,
-       }, {
-               .nla = {
-                       .nla_len        = NLA_HDRLEN + sizeof(int),
-                       .nla_type       = ZLIB_COMP_METHOD,
-               },
-               .val                    = Z_DEFLATED,
-       }, {
-               .nla = {
-                       .nla_len        = NLA_HDRLEN + sizeof(int),
-                       .nla_type       = ZLIB_COMP_WINDOWBITS,
-               },
-               .val                    = -11,
-       }, {
-               .nla = {
-                       .nla_len        = NLA_HDRLEN + sizeof(int),
-                       .nla_type       = ZLIB_COMP_MEMLEVEL,
-               },
-               .val                    = MAX_MEM_LEVEL,
-       }, {
-               .nla = {
-                       .nla_len        = NLA_HDRLEN + sizeof(int),
-                       .nla_type       = ZLIB_COMP_STRATEGY,
-               },
-               .val                    = Z_DEFAULT_STRATEGY,
-       }
-};
-
-static const struct {
-       struct nlattr nla;
-       int val;
-} deflate_decomp_params[] = {
-       {
-               .nla = {
-                       .nla_len        = NLA_HDRLEN + sizeof(int),
-                       .nla_type       = ZLIB_DECOMP_WINDOWBITS,
-               },
-               .val                    = -11,
-       }
-};
-
-static struct pcomp_testvec zlib_comp_tv_template[] = {
-       {
-               .params = &deflate_comp_params,
-               .paramsize = sizeof(deflate_comp_params),
-               .inlen  = 70,
-               .outlen = 38,
-               .input  = "Join us now and share the software "
-                       "Join us now and share the software ",
-               .output = "\xf3\xca\xcf\xcc\x53\x28\x2d\x56"
-                         "\xc8\xcb\x2f\x57\x48\xcc\x4b\x51"
-                         "\x28\xce\x48\x2c\x4a\x55\x28\xc9"
-                         "\x48\x55\x28\xce\x4f\x2b\x29\x07"
-                         "\x71\xbc\x08\x2b\x01\x00",
-       }, {
-               .params = &deflate_comp_params,
-               .paramsize = sizeof(deflate_comp_params),
-               .inlen  = 191,
-               .outlen = 122,
-               .input  = "This document describes a compression method based on the DEFLATE"
-                       "compression algorithm.  This document defines the application of "
-                       "the DEFLATE algorithm to the IP Payload Compression Protocol.",
-               .output = "\x5d\x8d\x31\x0e\xc2\x30\x10\x04"
-                         "\xbf\xb2\x2f\xc8\x1f\x10\x04\x09"
-                         "\x89\xc2\x85\x3f\x70\xb1\x2f\xf8"
-                         "\x24\xdb\x67\xd9\x47\xc1\xef\x49"
-                         "\x68\x12\x51\xae\x76\x67\xd6\x27"
-                         "\x19\x88\x1a\xde\x85\xab\x21\xf2"
-                         "\x08\x5d\x16\x1e\x20\x04\x2d\xad"
-                         "\xf3\x18\xa2\x15\x85\x2d\x69\xc4"
-                         "\x42\x83\x23\xb6\x6c\x89\x71\x9b"
-                         "\xef\xcf\x8b\x9f\xcf\x33\xca\x2f"
-                         "\xed\x62\xa9\x4c\x80\xff\x13\xaf"
-                         "\x52\x37\xed\x0e\x52\x6b\x59\x02"
-                         "\xd9\x4e\xe8\x7a\x76\x1d\x02\x98"
-                         "\xfe\x8a\x87\x83\xa3\x4f\x56\x8a"
-                         "\xb8\x9e\x8e\x5c\x57\xd3\xa0\x79"
-                         "\xfa\x02",
-       },
-};
-
-static struct pcomp_testvec zlib_decomp_tv_template[] = {
-       {
-               .params = &deflate_decomp_params,
-               .paramsize = sizeof(deflate_decomp_params),
-               .inlen  = 122,
-               .outlen = 191,
-               .input  = "\x5d\x8d\x31\x0e\xc2\x30\x10\x04"
-                         "\xbf\xb2\x2f\xc8\x1f\x10\x04\x09"
-                         "\x89\xc2\x85\x3f\x70\xb1\x2f\xf8"
-                         "\x24\xdb\x67\xd9\x47\xc1\xef\x49"
-                         "\x68\x12\x51\xae\x76\x67\xd6\x27"
-                         "\x19\x88\x1a\xde\x85\xab\x21\xf2"
-                         "\x08\x5d\x16\x1e\x20\x04\x2d\xad"
-                         "\xf3\x18\xa2\x15\x85\x2d\x69\xc4"
-                         "\x42\x83\x23\xb6\x6c\x89\x71\x9b"
-                         "\xef\xcf\x8b\x9f\xcf\x33\xca\x2f"
-                         "\xed\x62\xa9\x4c\x80\xff\x13\xaf"
-                         "\x52\x37\xed\x0e\x52\x6b\x59\x02"
-                         "\xd9\x4e\xe8\x7a\x76\x1d\x02\x98"
-                         "\xfe\x8a\x87\x83\xa3\x4f\x56\x8a"
-                         "\xb8\x9e\x8e\x5c\x57\xd3\xa0\x79"
-                         "\xfa\x02",
-               .output = "This document describes a compression method based on the DEFLATE"
-                       "compression algorithm.  This document defines the application of "
-                       "the DEFLATE algorithm to the IP Payload Compression Protocol.",
-       }, {
-               .params = &deflate_decomp_params,
-               .paramsize = sizeof(deflate_decomp_params),
-               .inlen  = 38,
-               .outlen = 70,
-               .input  = "\xf3\xca\xcf\xcc\x53\x28\x2d\x56"
-                         "\xc8\xcb\x2f\x57\x48\xcc\x4b\x51"
-                         "\x28\xce\x48\x2c\x4a\x55\x28\xc9"
-                         "\x48\x55\x28\xce\x4f\x2b\x29\x07"
-                         "\x71\xbc\x08\x2b\x01\x00",
-               .output = "Join us now and share the software "
-                       "Join us now and share the software ",
-       },
-};
-
 /*
  * LZO test vectors (null-terminated strings).
  */
diff --git a/crypto/zlib.c b/crypto/zlib.c
deleted file mode 100644 (file)
index d51a30a..0000000
+++ /dev/null
@@ -1,381 +0,0 @@
-/*
- * Cryptographic API.
- *
- * Zlib algorithm
- *
- * Copyright 2008 Sony Corporation
- *
- * Based on deflate.c, which is
- * Copyright (c) 2003 James Morris <jmorris@intercode.com.au>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- * FIXME: deflate transforms will require up to a total of about 436k of kernel
- * memory on i386 (390k for compression, the rest for decompression), as the
- * current zlib kernel code uses a worst case pre-allocation system by default.
- * This needs to be fixed so that the amount of memory required is properly
- * related to the winbits and memlevel parameters.
- */
-
-#define pr_fmt(fmt)    "%s: " fmt, __func__
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/zlib.h>
-#include <linux/vmalloc.h>
-#include <linux/interrupt.h>
-#include <linux/mm.h>
-#include <linux/net.h>
-
-#include <crypto/internal/compress.h>
-
-#include <net/netlink.h>
-
-
-struct zlib_ctx {
-       struct z_stream_s comp_stream;
-       struct z_stream_s decomp_stream;
-       int decomp_windowBits;
-};
-
-
-static void zlib_comp_exit(struct zlib_ctx *ctx)
-{
-       struct z_stream_s *stream = &ctx->comp_stream;
-
-       if (stream->workspace) {
-               zlib_deflateEnd(stream);
-               vfree(stream->workspace);
-               stream->workspace = NULL;
-       }
-}
-
-static void zlib_decomp_exit(struct zlib_ctx *ctx)
-{
-       struct z_stream_s *stream = &ctx->decomp_stream;
-
-       if (stream->workspace) {
-               zlib_inflateEnd(stream);
-               vfree(stream->workspace);
-               stream->workspace = NULL;
-       }
-}
-
-static int zlib_init(struct crypto_tfm *tfm)
-{
-       return 0;
-}
-
-static void zlib_exit(struct crypto_tfm *tfm)
-{
-       struct zlib_ctx *ctx = crypto_tfm_ctx(tfm);
-
-       zlib_comp_exit(ctx);
-       zlib_decomp_exit(ctx);
-}
-
-
-static int zlib_compress_setup(struct crypto_pcomp *tfm, const void *params,
-                              unsigned int len)
-{
-       struct zlib_ctx *ctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm));
-       struct z_stream_s *stream = &ctx->comp_stream;
-       struct nlattr *tb[ZLIB_COMP_MAX + 1];
-       int window_bits, mem_level;
-       size_t workspacesize;
-       int ret;
-
-       ret = nla_parse(tb, ZLIB_COMP_MAX, params, len, NULL);
-       if (ret)
-               return ret;
-
-       zlib_comp_exit(ctx);
-
-       window_bits = tb[ZLIB_COMP_WINDOWBITS]
-                                       ? nla_get_u32(tb[ZLIB_COMP_WINDOWBITS])
-                                       : MAX_WBITS;
-       mem_level = tb[ZLIB_COMP_MEMLEVEL]
-                                       ? nla_get_u32(tb[ZLIB_COMP_MEMLEVEL])
-                                       : DEF_MEM_LEVEL;
-
-       workspacesize = zlib_deflate_workspacesize(window_bits, mem_level);
-       stream->workspace = vzalloc(workspacesize);
-       if (!stream->workspace)
-               return -ENOMEM;
-
-       ret = zlib_deflateInit2(stream,
-                               tb[ZLIB_COMP_LEVEL]
-                                       ? nla_get_u32(tb[ZLIB_COMP_LEVEL])
-                                       : Z_DEFAULT_COMPRESSION,
-                               tb[ZLIB_COMP_METHOD]
-                                       ? nla_get_u32(tb[ZLIB_COMP_METHOD])
-                                       : Z_DEFLATED,
-                               window_bits,
-                               mem_level,
-                               tb[ZLIB_COMP_STRATEGY]
-                                       ? nla_get_u32(tb[ZLIB_COMP_STRATEGY])
-                                       : Z_DEFAULT_STRATEGY);
-       if (ret != Z_OK) {
-               vfree(stream->workspace);
-               stream->workspace = NULL;
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static int zlib_compress_init(struct crypto_pcomp *tfm)
-{
-       int ret;
-       struct zlib_ctx *dctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm));
-       struct z_stream_s *stream = &dctx->comp_stream;
-
-       ret = zlib_deflateReset(stream);
-       if (ret != Z_OK)
-               return -EINVAL;
-
-       return 0;
-}
-
-static int zlib_compress_update(struct crypto_pcomp *tfm,
-                               struct comp_request *req)
-{
-       int ret;
-       struct zlib_ctx *dctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm));
-       struct z_stream_s *stream = &dctx->comp_stream;
-
-       pr_debug("avail_in %u, avail_out %u\n", req->avail_in, req->avail_out);
-       stream->next_in = req->next_in;
-       stream->avail_in = req->avail_in;
-       stream->next_out = req->next_out;
-       stream->avail_out = req->avail_out;
-
-       ret = zlib_deflate(stream, Z_NO_FLUSH);
-       switch (ret) {
-       case Z_OK:
-               break;
-
-       case Z_BUF_ERROR:
-               pr_debug("zlib_deflate could not make progress\n");
-               return -EAGAIN;
-
-       default:
-               pr_debug("zlib_deflate failed %d\n", ret);
-               return -EINVAL;
-       }
-
-       ret = req->avail_out - stream->avail_out;
-       pr_debug("avail_in %lu, avail_out %lu (consumed %lu, produced %u)\n",
-                stream->avail_in, stream->avail_out,
-                req->avail_in - stream->avail_in, ret);
-       req->next_in = stream->next_in;
-       req->avail_in = stream->avail_in;
-       req->next_out = stream->next_out;
-       req->avail_out = stream->avail_out;
-       return ret;
-}
-
-static int zlib_compress_final(struct crypto_pcomp *tfm,
-                              struct comp_request *req)
-{
-       int ret;
-       struct zlib_ctx *dctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm));
-       struct z_stream_s *stream = &dctx->comp_stream;
-
-       pr_debug("avail_in %u, avail_out %u\n", req->avail_in, req->avail_out);
-       stream->next_in = req->next_in;
-       stream->avail_in = req->avail_in;
-       stream->next_out = req->next_out;
-       stream->avail_out = req->avail_out;
-
-       ret = zlib_deflate(stream, Z_FINISH);
-       if (ret != Z_STREAM_END) {
-               pr_debug("zlib_deflate failed %d\n", ret);
-               return -EINVAL;
-       }
-
-       ret = req->avail_out - stream->avail_out;
-       pr_debug("avail_in %lu, avail_out %lu (consumed %lu, produced %u)\n",
-                stream->avail_in, stream->avail_out,
-                req->avail_in - stream->avail_in, ret);
-       req->next_in = stream->next_in;
-       req->avail_in = stream->avail_in;
-       req->next_out = stream->next_out;
-       req->avail_out = stream->avail_out;
-       return ret;
-}
-
-
-static int zlib_decompress_setup(struct crypto_pcomp *tfm, const void *params,
-                                unsigned int len)
-{
-       struct zlib_ctx *ctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm));
-       struct z_stream_s *stream = &ctx->decomp_stream;
-       struct nlattr *tb[ZLIB_DECOMP_MAX + 1];
-       int ret = 0;
-
-       ret = nla_parse(tb, ZLIB_DECOMP_MAX, params, len, NULL);
-       if (ret)
-               return ret;
-
-       zlib_decomp_exit(ctx);
-
-       ctx->decomp_windowBits = tb[ZLIB_DECOMP_WINDOWBITS]
-                                ? nla_get_u32(tb[ZLIB_DECOMP_WINDOWBITS])
-                                : DEF_WBITS;
-
-       stream->workspace = vzalloc(zlib_inflate_workspacesize());
-       if (!stream->workspace)
-               return -ENOMEM;
-
-       ret = zlib_inflateInit2(stream, ctx->decomp_windowBits);
-       if (ret != Z_OK) {
-               vfree(stream->workspace);
-               stream->workspace = NULL;
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static int zlib_decompress_init(struct crypto_pcomp *tfm)
-{
-       int ret;
-       struct zlib_ctx *dctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm));
-       struct z_stream_s *stream = &dctx->decomp_stream;
-
-       ret = zlib_inflateReset(stream);
-       if (ret != Z_OK)
-               return -EINVAL;
-
-       return 0;
-}
-
-static int zlib_decompress_update(struct crypto_pcomp *tfm,
-                                 struct comp_request *req)
-{
-       int ret;
-       struct zlib_ctx *dctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm));
-       struct z_stream_s *stream = &dctx->decomp_stream;
-
-       pr_debug("avail_in %u, avail_out %u\n", req->avail_in, req->avail_out);
-       stream->next_in = req->next_in;
-       stream->avail_in = req->avail_in;
-       stream->next_out = req->next_out;
-       stream->avail_out = req->avail_out;
-
-       ret = zlib_inflate(stream, Z_SYNC_FLUSH);
-       switch (ret) {
-       case Z_OK:
-       case Z_STREAM_END:
-               break;
-
-       case Z_BUF_ERROR:
-               pr_debug("zlib_inflate could not make progress\n");
-               return -EAGAIN;
-
-       default:
-               pr_debug("zlib_inflate failed %d\n", ret);
-               return -EINVAL;
-       }
-
-       ret = req->avail_out - stream->avail_out;
-       pr_debug("avail_in %lu, avail_out %lu (consumed %lu, produced %u)\n",
-                stream->avail_in, stream->avail_out,
-                req->avail_in - stream->avail_in, ret);
-       req->next_in = stream->next_in;
-       req->avail_in = stream->avail_in;
-       req->next_out = stream->next_out;
-       req->avail_out = stream->avail_out;
-       return ret;
-}
-
-static int zlib_decompress_final(struct crypto_pcomp *tfm,
-                                struct comp_request *req)
-{
-       int ret;
-       struct zlib_ctx *dctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm));
-       struct z_stream_s *stream = &dctx->decomp_stream;
-
-       pr_debug("avail_in %u, avail_out %u\n", req->avail_in, req->avail_out);
-       stream->next_in = req->next_in;
-       stream->avail_in = req->avail_in;
-       stream->next_out = req->next_out;
-       stream->avail_out = req->avail_out;
-
-       if (dctx->decomp_windowBits < 0) {
-               ret = zlib_inflate(stream, Z_SYNC_FLUSH);
-               /*
-                * Work around a bug in zlib, which sometimes wants to taste an
-                * extra byte when being used in the (undocumented) raw deflate
-                * mode. (From USAGI).
-                */
-               if (ret == Z_OK && !stream->avail_in && stream->avail_out) {
-                       const void *saved_next_in = stream->next_in;
-                       u8 zerostuff = 0;
-
-                       stream->next_in = &zerostuff;
-                       stream->avail_in = 1;
-                       ret = zlib_inflate(stream, Z_FINISH);
-                       stream->next_in = saved_next_in;
-                       stream->avail_in = 0;
-               }
-       } else
-               ret = zlib_inflate(stream, Z_FINISH);
-       if (ret != Z_STREAM_END) {
-               pr_debug("zlib_inflate failed %d\n", ret);
-               return -EINVAL;
-       }
-
-       ret = req->avail_out - stream->avail_out;
-       pr_debug("avail_in %lu, avail_out %lu (consumed %lu, produced %u)\n",
-                stream->avail_in, stream->avail_out,
-                req->avail_in - stream->avail_in, ret);
-       req->next_in = stream->next_in;
-       req->avail_in = stream->avail_in;
-       req->next_out = stream->next_out;
-       req->avail_out = stream->avail_out;
-       return ret;
-}
-
-
-static struct pcomp_alg zlib_alg = {
-       .compress_setup         = zlib_compress_setup,
-       .compress_init          = zlib_compress_init,
-       .compress_update        = zlib_compress_update,
-       .compress_final         = zlib_compress_final,
-       .decompress_setup       = zlib_decompress_setup,
-       .decompress_init        = zlib_decompress_init,
-       .decompress_update      = zlib_decompress_update,
-       .decompress_final       = zlib_decompress_final,
-
-       .base                   = {
-               .cra_name       = "zlib",
-               .cra_flags      = CRYPTO_ALG_TYPE_PCOMPRESS,
-               .cra_ctxsize    = sizeof(struct zlib_ctx),
-               .cra_module     = THIS_MODULE,
-               .cra_init       = zlib_init,
-               .cra_exit       = zlib_exit,
-       }
-};
-
-static int __init zlib_mod_init(void)
-{
-       return crypto_register_pcomp(&zlib_alg);
-}
-
-static void __exit zlib_mod_fini(void)
-{
-       crypto_unregister_pcomp(&zlib_alg);
-}
-
-module_init(zlib_mod_init);
-module_exit(zlib_mod_fini);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Zlib Compression Algorithm");
-MODULE_AUTHOR("Sony Corporation");
-MODULE_ALIAS_CRYPTO("zlib");
index 99e773cb70d0b58d4a54115be3593986966def0f..3d31761c0ed05c6054f6a5de7251fb6d811b6caa 100644 (file)
@@ -21,9 +21,9 @@
 
 #include <linux/module.h>
 
+#include <crypto/skcipher.h>
 #include <linux/init.h>
 #include <linux/string.h>
-#include <linux/crypto.h>
 #include <linux/blkdev.h>
 #include <linux/scatterlist.h>
 #include <asm/uaccess.h>
@@ -46,7 +46,7 @@ cryptoloop_init(struct loop_device *lo, const struct loop_info64 *info)
        char *cipher;
        char *mode;
        char *cmsp = cms;                       /* c-m string pointer */
-       struct crypto_blkcipher *tfm;
+       struct crypto_skcipher *tfm;
 
        /* encryption breaks for non sector aligned offsets */
 
@@ -82,12 +82,12 @@ cryptoloop_init(struct loop_device *lo, const struct loop_info64 *info)
        *cmsp++ = ')';
        *cmsp = 0;
 
-       tfm = crypto_alloc_blkcipher(cms, 0, CRYPTO_ALG_ASYNC);
+       tfm = crypto_alloc_skcipher(cms, 0, CRYPTO_ALG_ASYNC);
        if (IS_ERR(tfm))
                return PTR_ERR(tfm);
 
-       err = crypto_blkcipher_setkey(tfm, info->lo_encrypt_key,
-                                     info->lo_encrypt_key_size);
+       err = crypto_skcipher_setkey(tfm, info->lo_encrypt_key,
+                                    info->lo_encrypt_key_size);
        
        if (err != 0)
                goto out_free_tfm;
@@ -96,17 +96,14 @@ cryptoloop_init(struct loop_device *lo, const struct loop_info64 *info)
        return 0;
 
  out_free_tfm:
-       crypto_free_blkcipher(tfm);
+       crypto_free_skcipher(tfm);
 
  out:
        return err;
 }
 
 
-typedef int (*encdec_cbc_t)(struct blkcipher_desc *desc,
-                       struct scatterlist *sg_out,
-                       struct scatterlist *sg_in,
-                       unsigned int nsg);
+typedef int (*encdec_cbc_t)(struct skcipher_request *req);
 
 static int
 cryptoloop_transfer(struct loop_device *lo, int cmd,
@@ -114,11 +111,8 @@ cryptoloop_transfer(struct loop_device *lo, int cmd,
                    struct page *loop_page, unsigned loop_off,
                    int size, sector_t IV)
 {
-       struct crypto_blkcipher *tfm = lo->key_data;
-       struct blkcipher_desc desc = {
-               .tfm = tfm,
-               .flags = CRYPTO_TFM_REQ_MAY_SLEEP,
-       };
+       struct crypto_skcipher *tfm = lo->key_data;
+       SKCIPHER_REQUEST_ON_STACK(req, tfm);
        struct scatterlist sg_out;
        struct scatterlist sg_in;
 
@@ -127,6 +121,10 @@ cryptoloop_transfer(struct loop_device *lo, int cmd,
        unsigned in_offs, out_offs;
        int err;
 
+       skcipher_request_set_tfm(req, tfm);
+       skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP,
+                                     NULL, NULL);
+
        sg_init_table(&sg_out, 1);
        sg_init_table(&sg_in, 1);
 
@@ -135,13 +133,13 @@ cryptoloop_transfer(struct loop_device *lo, int cmd,
                in_offs = raw_off;
                out_page = loop_page;
                out_offs = loop_off;
-               encdecfunc = crypto_blkcipher_crt(tfm)->decrypt;
+               encdecfunc = crypto_skcipher_decrypt;
        } else {
                in_page = loop_page;
                in_offs = loop_off;
                out_page = raw_page;
                out_offs = raw_off;
-               encdecfunc = crypto_blkcipher_crt(tfm)->encrypt;
+               encdecfunc = crypto_skcipher_encrypt;
        }
 
        while (size > 0) {
@@ -152,10 +150,10 @@ cryptoloop_transfer(struct loop_device *lo, int cmd,
                sg_set_page(&sg_in, in_page, sz, in_offs);
                sg_set_page(&sg_out, out_page, sz, out_offs);
 
-               desc.info = iv;
-               err = encdecfunc(&desc, &sg_out, &sg_in, sz);
+               skcipher_request_set_crypt(req, &sg_in, &sg_out, sz, iv);
+               err = encdecfunc(req);
                if (err)
-                       return err;
+                       goto out;
 
                IV++;
                size -= sz;
@@ -163,7 +161,11 @@ cryptoloop_transfer(struct loop_device *lo, int cmd,
                out_offs += sz;
        }
 
-       return 0;
+       err = 0;
+
+out:
+       skcipher_request_zero(req);
+       return err;
 }
 
 static int
@@ -175,9 +177,9 @@ cryptoloop_ioctl(struct loop_device *lo, int cmd, unsigned long arg)
 static int
 cryptoloop_release(struct loop_device *lo)
 {
-       struct crypto_blkcipher *tfm = lo->key_data;
+       struct crypto_skcipher *tfm = lo->key_data;
        if (tfm != NULL) {
-               crypto_free_blkcipher(tfm);
+               crypto_free_skcipher(tfm);
                lo->key_data = NULL;
                return 0;
        }
index 34bc84efc29e99085d9ccdeb6a4fa49f5b079446..c227fd4cad75fe62eba5bcfd8b0ef22742a080e3 100644 (file)
 #ifndef _DRBD_INT_H
 #define _DRBD_INT_H
 
+#include <crypto/hash.h>
 #include <linux/compiler.h>
 #include <linux/types.h>
 #include <linux/list.h>
 #include <linux/sched.h>
 #include <linux/bitops.h>
 #include <linux/slab.h>
-#include <linux/crypto.h>
 #include <linux/ratelimit.h>
 #include <linux/tcp.h>
 #include <linux/mutex.h>
@@ -724,11 +724,11 @@ struct drbd_connection {
 
        struct list_head transfer_log;  /* all requests not yet fully processed */
 
-       struct crypto_hash *cram_hmac_tfm;
-       struct crypto_hash *integrity_tfm;  /* checksums we compute, updates protected by connection->data->mutex */
-       struct crypto_hash *peer_integrity_tfm;  /* checksums we verify, only accessed from receiver thread  */
-       struct crypto_hash *csums_tfm;
-       struct crypto_hash *verify_tfm;
+       struct crypto_shash *cram_hmac_tfm;
+       struct crypto_ahash *integrity_tfm;  /* checksums we compute, updates protected by connection->data->mutex */
+       struct crypto_ahash *peer_integrity_tfm;  /* checksums we verify, only accessed from receiver thread  */
+       struct crypto_ahash *csums_tfm;
+       struct crypto_ahash *verify_tfm;
        void *int_dig_in;
        void *int_dig_vv;
 
@@ -1524,8 +1524,8 @@ static inline void ov_out_of_sync_print(struct drbd_device *device)
 }
 
 
-extern void drbd_csum_bio(struct crypto_hash *, struct bio *, void *);
-extern void drbd_csum_ee(struct crypto_hash *, struct drbd_peer_request *, void *);
+extern void drbd_csum_bio(struct crypto_ahash *, struct bio *, void *);
+extern void drbd_csum_ee(struct crypto_ahash *, struct drbd_peer_request *, void *);
 /* worker callbacks */
 extern int w_e_end_data_req(struct drbd_work *, int);
 extern int w_e_end_rsdata_req(struct drbd_work *, int);
index 5b43dfb798191d0c90cc3b5115956575e56410f9..fa209773d494a4efd6857aa4ef4437ee11636250 100644 (file)
@@ -1340,7 +1340,7 @@ void drbd_send_ack_dp(struct drbd_peer_device *peer_device, enum drbd_packet cmd
                      struct p_data *dp, int data_size)
 {
        if (peer_device->connection->peer_integrity_tfm)
-               data_size -= crypto_hash_digestsize(peer_device->connection->peer_integrity_tfm);
+               data_size -= crypto_ahash_digestsize(peer_device->connection->peer_integrity_tfm);
        _drbd_send_ack(peer_device, cmd, dp->sector, cpu_to_be32(data_size),
                       dp->block_id);
 }
@@ -1629,7 +1629,7 @@ int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request *
        sock = &peer_device->connection->data;
        p = drbd_prepare_command(peer_device, sock);
        digest_size = peer_device->connection->integrity_tfm ?
-                     crypto_hash_digestsize(peer_device->connection->integrity_tfm) : 0;
+                     crypto_ahash_digestsize(peer_device->connection->integrity_tfm) : 0;
 
        if (!p)
                return -EIO;
@@ -1718,7 +1718,7 @@ int drbd_send_block(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
        p = drbd_prepare_command(peer_device, sock);
 
        digest_size = peer_device->connection->integrity_tfm ?
-                     crypto_hash_digestsize(peer_device->connection->integrity_tfm) : 0;
+                     crypto_ahash_digestsize(peer_device->connection->integrity_tfm) : 0;
 
        if (!p)
                return -EIO;
@@ -2498,11 +2498,11 @@ void conn_free_crypto(struct drbd_connection *connection)
 {
        drbd_free_sock(connection);
 
-       crypto_free_hash(connection->csums_tfm);
-       crypto_free_hash(connection->verify_tfm);
-       crypto_free_hash(connection->cram_hmac_tfm);
-       crypto_free_hash(connection->integrity_tfm);
-       crypto_free_hash(connection->peer_integrity_tfm);
+       crypto_free_ahash(connection->csums_tfm);
+       crypto_free_ahash(connection->verify_tfm);
+       crypto_free_shash(connection->cram_hmac_tfm);
+       crypto_free_ahash(connection->integrity_tfm);
+       crypto_free_ahash(connection->peer_integrity_tfm);
        kfree(connection->int_dig_in);
        kfree(connection->int_dig_vv);
 
index c055c5e12f248dc77b60c89d4b723f0de3f934b3..226eb0c9f0fb33a7ee795dbf3661ec5bac58da77 100644 (file)
@@ -2160,19 +2160,34 @@ check_net_options(struct drbd_connection *connection, struct net_conf *new_net_c
 }
 
 struct crypto {
-       struct crypto_hash *verify_tfm;
-       struct crypto_hash *csums_tfm;
-       struct crypto_hash *cram_hmac_tfm;
-       struct crypto_hash *integrity_tfm;
+       struct crypto_ahash *verify_tfm;
+       struct crypto_ahash *csums_tfm;
+       struct crypto_shash *cram_hmac_tfm;
+       struct crypto_ahash *integrity_tfm;
 };
 
 static int
-alloc_hash(struct crypto_hash **tfm, char *tfm_name, int err_alg)
+alloc_shash(struct crypto_shash **tfm, char *tfm_name, int err_alg)
 {
        if (!tfm_name[0])
                return NO_ERROR;
 
-       *tfm = crypto_alloc_hash(tfm_name, 0, CRYPTO_ALG_ASYNC);
+       *tfm = crypto_alloc_shash(tfm_name, 0, 0);
+       if (IS_ERR(*tfm)) {
+               *tfm = NULL;
+               return err_alg;
+       }
+
+       return NO_ERROR;
+}
+
+static int
+alloc_ahash(struct crypto_ahash **tfm, char *tfm_name, int err_alg)
+{
+       if (!tfm_name[0])
+               return NO_ERROR;
+
+       *tfm = crypto_alloc_ahash(tfm_name, 0, CRYPTO_ALG_ASYNC);
        if (IS_ERR(*tfm)) {
                *tfm = NULL;
                return err_alg;
@@ -2187,24 +2202,24 @@ alloc_crypto(struct crypto *crypto, struct net_conf *new_net_conf)
        char hmac_name[CRYPTO_MAX_ALG_NAME];
        enum drbd_ret_code rv;
 
-       rv = alloc_hash(&crypto->csums_tfm, new_net_conf->csums_alg,
-                      ERR_CSUMS_ALG);
+       rv = alloc_ahash(&crypto->csums_tfm, new_net_conf->csums_alg,
+                        ERR_CSUMS_ALG);
        if (rv != NO_ERROR)
                return rv;
-       rv = alloc_hash(&crypto->verify_tfm, new_net_conf->verify_alg,
-                      ERR_VERIFY_ALG);
+       rv = alloc_ahash(&crypto->verify_tfm, new_net_conf->verify_alg,
+                        ERR_VERIFY_ALG);
        if (rv != NO_ERROR)
                return rv;
-       rv = alloc_hash(&crypto->integrity_tfm, new_net_conf->integrity_alg,
-                      ERR_INTEGRITY_ALG);
+       rv = alloc_ahash(&crypto->integrity_tfm, new_net_conf->integrity_alg,
+                        ERR_INTEGRITY_ALG);
        if (rv != NO_ERROR)
                return rv;
        if (new_net_conf->cram_hmac_alg[0] != 0) {
                snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
                         new_net_conf->cram_hmac_alg);
 
-               rv = alloc_hash(&crypto->cram_hmac_tfm, hmac_name,
-                              ERR_AUTH_ALG);
+               rv = alloc_shash(&crypto->cram_hmac_tfm, hmac_name,
+                                ERR_AUTH_ALG);
        }
 
        return rv;
@@ -2212,10 +2227,10 @@ alloc_crypto(struct crypto *crypto, struct net_conf *new_net_conf)
 
 static void free_crypto(struct crypto *crypto)
 {
-       crypto_free_hash(crypto->cram_hmac_tfm);
-       crypto_free_hash(crypto->integrity_tfm);
-       crypto_free_hash(crypto->csums_tfm);
-       crypto_free_hash(crypto->verify_tfm);
+       crypto_free_shash(crypto->cram_hmac_tfm);
+       crypto_free_ahash(crypto->integrity_tfm);
+       crypto_free_ahash(crypto->csums_tfm);
+       crypto_free_ahash(crypto->verify_tfm);
 }
 
 int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
@@ -2292,23 +2307,23 @@ int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
        rcu_assign_pointer(connection->net_conf, new_net_conf);
 
        if (!rsr) {
-               crypto_free_hash(connection->csums_tfm);
+               crypto_free_ahash(connection->csums_tfm);
                connection->csums_tfm = crypto.csums_tfm;
                crypto.csums_tfm = NULL;
        }
        if (!ovr) {
-               crypto_free_hash(connection->verify_tfm);
+               crypto_free_ahash(connection->verify_tfm);
                connection->verify_tfm = crypto.verify_tfm;
                crypto.verify_tfm = NULL;
        }
 
-       crypto_free_hash(connection->integrity_tfm);
+       crypto_free_ahash(connection->integrity_tfm);
        connection->integrity_tfm = crypto.integrity_tfm;
        if (connection->cstate >= C_WF_REPORT_PARAMS && connection->agreed_pro_version >= 100)
                /* Do this without trying to take connection->data.mutex again.  */
                __drbd_send_protocol(connection, P_PROTOCOL_UPDATE);
 
-       crypto_free_hash(connection->cram_hmac_tfm);
+       crypto_free_shash(connection->cram_hmac_tfm);
        connection->cram_hmac_tfm = crypto.cram_hmac_tfm;
 
        mutex_unlock(&connection->resource->conf_update);
index 1957fe8601dcbb60ee2b8d3aa9367104fd503daf..050aaa1c03504e7bb1f90be3628997385b8fe4cc 100644 (file)
@@ -1627,7 +1627,7 @@ read_in_block(struct drbd_peer_device *peer_device, u64 id, sector_t sector,
 
        digest_size = 0;
        if (!trim && peer_device->connection->peer_integrity_tfm) {
-               digest_size = crypto_hash_digestsize(peer_device->connection->peer_integrity_tfm);
+               digest_size = crypto_ahash_digestsize(peer_device->connection->peer_integrity_tfm);
                /*
                 * FIXME: Receive the incoming digest into the receive buffer
                 *        here, together with its struct p_data?
@@ -1741,7 +1741,7 @@ static int recv_dless_read(struct drbd_peer_device *peer_device, struct drbd_req
 
        digest_size = 0;
        if (peer_device->connection->peer_integrity_tfm) {
-               digest_size = crypto_hash_digestsize(peer_device->connection->peer_integrity_tfm);
+               digest_size = crypto_ahash_digestsize(peer_device->connection->peer_integrity_tfm);
                err = drbd_recv_all_warn(peer_device->connection, dig_in, digest_size);
                if (err)
                        return err;
@@ -3321,7 +3321,7 @@ static int receive_protocol(struct drbd_connection *connection, struct packet_in
        int p_proto, p_discard_my_data, p_two_primaries, cf;
        struct net_conf *nc, *old_net_conf, *new_net_conf = NULL;
        char integrity_alg[SHARED_SECRET_MAX] = "";
-       struct crypto_hash *peer_integrity_tfm = NULL;
+       struct crypto_ahash *peer_integrity_tfm = NULL;
        void *int_dig_in = NULL, *int_dig_vv = NULL;
 
        p_proto         = be32_to_cpu(p->protocol);
@@ -3402,14 +3402,14 @@ static int receive_protocol(struct drbd_connection *connection, struct packet_in
                 * change.
                 */
 
-               peer_integrity_tfm = crypto_alloc_hash(integrity_alg, 0, CRYPTO_ALG_ASYNC);
+               peer_integrity_tfm = crypto_alloc_ahash(integrity_alg, 0, CRYPTO_ALG_ASYNC);
                if (!peer_integrity_tfm) {
                        drbd_err(connection, "peer data-integrity-alg %s not supported\n",
                                 integrity_alg);
                        goto disconnect;
                }
 
-               hash_size = crypto_hash_digestsize(peer_integrity_tfm);
+               hash_size = crypto_ahash_digestsize(peer_integrity_tfm);
                int_dig_in = kmalloc(hash_size, GFP_KERNEL);
                int_dig_vv = kmalloc(hash_size, GFP_KERNEL);
                if (!(int_dig_in && int_dig_vv)) {
@@ -3439,7 +3439,7 @@ static int receive_protocol(struct drbd_connection *connection, struct packet_in
        mutex_unlock(&connection->resource->conf_update);
        mutex_unlock(&connection->data.mutex);
 
-       crypto_free_hash(connection->peer_integrity_tfm);
+       crypto_free_ahash(connection->peer_integrity_tfm);
        kfree(connection->int_dig_in);
        kfree(connection->int_dig_vv);
        connection->peer_integrity_tfm = peer_integrity_tfm;
@@ -3457,7 +3457,7 @@ static int receive_protocol(struct drbd_connection *connection, struct packet_in
 disconnect_rcu_unlock:
        rcu_read_unlock();
 disconnect:
-       crypto_free_hash(peer_integrity_tfm);
+       crypto_free_ahash(peer_integrity_tfm);
        kfree(int_dig_in);
        kfree(int_dig_vv);
        conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
@@ -3469,15 +3469,15 @@ disconnect:
  * return: NULL (alg name was "")
  *         ERR_PTR(error) if something goes wrong
  *         or the crypto hash ptr, if it worked out ok. */
-static struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_device *device,
+static struct crypto_ahash *drbd_crypto_alloc_digest_safe(const struct drbd_device *device,
                const char *alg, const char *name)
 {
-       struct crypto_hash *tfm;
+       struct crypto_ahash *tfm;
 
        if (!alg[0])
                return NULL;
 
-       tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC);
+       tfm = crypto_alloc_ahash(alg, 0, CRYPTO_ALG_ASYNC);
        if (IS_ERR(tfm)) {
                drbd_err(device, "Can not allocate \"%s\" as %s (reason: %ld)\n",
                        alg, name, PTR_ERR(tfm));
@@ -3530,8 +3530,8 @@ static int receive_SyncParam(struct drbd_connection *connection, struct packet_i
        struct drbd_device *device;
        struct p_rs_param_95 *p;
        unsigned int header_size, data_size, exp_max_sz;
-       struct crypto_hash *verify_tfm = NULL;
-       struct crypto_hash *csums_tfm = NULL;
+       struct crypto_ahash *verify_tfm = NULL;
+       struct crypto_ahash *csums_tfm = NULL;
        struct net_conf *old_net_conf, *new_net_conf = NULL;
        struct disk_conf *old_disk_conf = NULL, *new_disk_conf = NULL;
        const int apv = connection->agreed_pro_version;
@@ -3678,14 +3678,14 @@ static int receive_SyncParam(struct drbd_connection *connection, struct packet_i
                        if (verify_tfm) {
                                strcpy(new_net_conf->verify_alg, p->verify_alg);
                                new_net_conf->verify_alg_len = strlen(p->verify_alg) + 1;
-                               crypto_free_hash(peer_device->connection->verify_tfm);
+                               crypto_free_ahash(peer_device->connection->verify_tfm);
                                peer_device->connection->verify_tfm = verify_tfm;
                                drbd_info(device, "using verify-alg: \"%s\"\n", p->verify_alg);
                        }
                        if (csums_tfm) {
                                strcpy(new_net_conf->csums_alg, p->csums_alg);
                                new_net_conf->csums_alg_len = strlen(p->csums_alg) + 1;
-                               crypto_free_hash(peer_device->connection->csums_tfm);
+                               crypto_free_ahash(peer_device->connection->csums_tfm);
                                peer_device->connection->csums_tfm = csums_tfm;
                                drbd_info(device, "using csums-alg: \"%s\"\n", p->csums_alg);
                        }
@@ -3729,9 +3729,9 @@ disconnect:
        mutex_unlock(&connection->resource->conf_update);
        /* just for completeness: actually not needed,
         * as this is not reached if csums_tfm was ok. */
-       crypto_free_hash(csums_tfm);
+       crypto_free_ahash(csums_tfm);
        /* but free the verify_tfm again, if csums_tfm did not work out */
-       crypto_free_hash(verify_tfm);
+       crypto_free_ahash(verify_tfm);
        conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD);
        return -EIO;
 }
@@ -4925,14 +4925,13 @@ static int drbd_do_auth(struct drbd_connection *connection)
 {
        struct drbd_socket *sock;
        char my_challenge[CHALLENGE_LEN];  /* 64 Bytes... */
-       struct scatterlist sg;
        char *response = NULL;
        char *right_response = NULL;
        char *peers_ch = NULL;
        unsigned int key_len;
        char secret[SHARED_SECRET_MAX]; /* 64 byte */
        unsigned int resp_size;
-       struct hash_desc desc;
+       SHASH_DESC_ON_STACK(desc, connection->cram_hmac_tfm);
        struct packet_info pi;
        struct net_conf *nc;
        int err, rv;
@@ -4945,12 +4944,12 @@ static int drbd_do_auth(struct drbd_connection *connection)
        memcpy(secret, nc->shared_secret, key_len);
        rcu_read_unlock();
 
-       desc.tfm = connection->cram_hmac_tfm;
-       desc.flags = 0;
+       desc->tfm = connection->cram_hmac_tfm;
+       desc->flags = 0;
 
-       rv = crypto_hash_setkey(connection->cram_hmac_tfm, (u8 *)secret, key_len);
+       rv = crypto_shash_setkey(connection->cram_hmac_tfm, (u8 *)secret, key_len);
        if (rv) {
-               drbd_err(connection, "crypto_hash_setkey() failed with %d\n", rv);
+               drbd_err(connection, "crypto_shash_setkey() failed with %d\n", rv);
                rv = -1;
                goto fail;
        }
@@ -5011,7 +5010,7 @@ static int drbd_do_auth(struct drbd_connection *connection)
                goto fail;
        }
 
-       resp_size = crypto_hash_digestsize(connection->cram_hmac_tfm);
+       resp_size = crypto_shash_digestsize(connection->cram_hmac_tfm);
        response = kmalloc(resp_size, GFP_NOIO);
        if (response == NULL) {
                drbd_err(connection, "kmalloc of response failed\n");
@@ -5019,10 +5018,7 @@ static int drbd_do_auth(struct drbd_connection *connection)
                goto fail;
        }
 
-       sg_init_table(&sg, 1);
-       sg_set_buf(&sg, peers_ch, pi.size);
-
-       rv = crypto_hash_digest(&desc, &sg, sg.length, response);
+       rv = crypto_shash_digest(desc, peers_ch, pi.size, response);
        if (rv) {
                drbd_err(connection, "crypto_hash_digest() failed with %d\n", rv);
                rv = -1;
@@ -5070,9 +5066,8 @@ static int drbd_do_auth(struct drbd_connection *connection)
                goto fail;
        }
 
-       sg_set_buf(&sg, my_challenge, CHALLENGE_LEN);
-
-       rv = crypto_hash_digest(&desc, &sg, sg.length, right_response);
+       rv = crypto_shash_digest(desc, my_challenge, CHALLENGE_LEN,
+                                right_response);
        if (rv) {
                drbd_err(connection, "crypto_hash_digest() failed with %d\n", rv);
                rv = -1;
@@ -5091,6 +5086,7 @@ static int drbd_do_auth(struct drbd_connection *connection)
        kfree(peers_ch);
        kfree(response);
        kfree(right_response);
+       shash_desc_zero(desc);
 
        return rv;
 }
index eff716c27b1fdf53caa7440ac5bad755c271ad43..4d87499f0d54829bd22594f97202f77fdd533f2e 100644 (file)
@@ -274,51 +274,56 @@ void drbd_request_endio(struct bio *bio)
                complete_master_bio(device, &m);
 }
 
-void drbd_csum_ee(struct crypto_hash *tfm, struct drbd_peer_request *peer_req, void *digest)
+void drbd_csum_ee(struct crypto_ahash *tfm, struct drbd_peer_request *peer_req, void *digest)
 {
-       struct hash_desc desc;
+       AHASH_REQUEST_ON_STACK(req, tfm);
        struct scatterlist sg;
        struct page *page = peer_req->pages;
        struct page *tmp;
        unsigned len;
 
-       desc.tfm = tfm;
-       desc.flags = 0;
+       ahash_request_set_tfm(req, tfm);
+       ahash_request_set_callback(req, 0, NULL, NULL);
 
        sg_init_table(&sg, 1);
-       crypto_hash_init(&desc);
+       crypto_ahash_init(req);
 
        while ((tmp = page_chain_next(page))) {
                /* all but the last page will be fully used */
                sg_set_page(&sg, page, PAGE_SIZE, 0);
-               crypto_hash_update(&desc, &sg, sg.length);
+               ahash_request_set_crypt(req, &sg, NULL, sg.length);
+               crypto_ahash_update(req);
                page = tmp;
        }
        /* and now the last, possibly only partially used page */
        len = peer_req->i.size & (PAGE_SIZE - 1);
        sg_set_page(&sg, page, len ?: PAGE_SIZE, 0);
-       crypto_hash_update(&desc, &sg, sg.length);
-       crypto_hash_final(&desc, digest);
+       ahash_request_set_crypt(req, &sg, digest, sg.length);
+       crypto_ahash_finup(req);
+       ahash_request_zero(req);
 }
 
-void drbd_csum_bio(struct crypto_hash *tfm, struct bio *bio, void *digest)
+void drbd_csum_bio(struct crypto_ahash *tfm, struct bio *bio, void *digest)
 {
-       struct hash_desc desc;
+       AHASH_REQUEST_ON_STACK(req, tfm);
        struct scatterlist sg;
        struct bio_vec bvec;
        struct bvec_iter iter;
 
-       desc.tfm = tfm;
-       desc.flags = 0;
+       ahash_request_set_tfm(req, tfm);
+       ahash_request_set_callback(req, 0, NULL, NULL);
 
        sg_init_table(&sg, 1);
-       crypto_hash_init(&desc);
+       crypto_ahash_init(req);
 
        bio_for_each_segment(bvec, bio, iter) {
                sg_set_page(&sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset);
-               crypto_hash_update(&desc, &sg, sg.length);
+               ahash_request_set_crypt(req, &sg, NULL, sg.length);
+               crypto_ahash_update(req);
        }
-       crypto_hash_final(&desc, digest);
+       ahash_request_set_crypt(req, NULL, digest, 0);
+       crypto_ahash_final(req);
+       ahash_request_zero(req);
 }
 
 /* MAYBE merge common code with w_e_end_ov_req */
@@ -337,7 +342,7 @@ static int w_e_send_csum(struct drbd_work *w, int cancel)
        if (unlikely((peer_req->flags & EE_WAS_ERROR) != 0))
                goto out;
 
-       digest_size = crypto_hash_digestsize(peer_device->connection->csums_tfm);
+       digest_size = crypto_ahash_digestsize(peer_device->connection->csums_tfm);
        digest = kmalloc(digest_size, GFP_NOIO);
        if (digest) {
                sector_t sector = peer_req->i.sector;
@@ -1113,7 +1118,7 @@ int w_e_end_csum_rs_req(struct drbd_work *w, int cancel)
                 * a real fix would be much more involved,
                 * introducing more locking mechanisms */
                if (peer_device->connection->csums_tfm) {
-                       digest_size = crypto_hash_digestsize(peer_device->connection->csums_tfm);
+                       digest_size = crypto_ahash_digestsize(peer_device->connection->csums_tfm);
                        D_ASSERT(device, digest_size == di->digest_size);
                        digest = kmalloc(digest_size, GFP_NOIO);
                }
@@ -1163,7 +1168,7 @@ int w_e_end_ov_req(struct drbd_work *w, int cancel)
        if (unlikely(cancel))
                goto out;
 
-       digest_size = crypto_hash_digestsize(peer_device->connection->verify_tfm);
+       digest_size = crypto_ahash_digestsize(peer_device->connection->verify_tfm);
        digest = kmalloc(digest_size, GFP_NOIO);
        if (!digest) {
                err = 1;        /* terminate the connection in case the allocation failed */
@@ -1235,7 +1240,7 @@ int w_e_end_ov_reply(struct drbd_work *w, int cancel)
        di = peer_req->digest;
 
        if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
-               digest_size = crypto_hash_digestsize(peer_device->connection->verify_tfm);
+               digest_size = crypto_ahash_digestsize(peer_device->connection->verify_tfm);
                digest = kmalloc(digest_size, GFP_NOIO);
                if (digest) {
                        drbd_csum_ee(peer_device->connection->verify_tfm, peer_req, digest);
index ff00331bff49bdb1890333b73790ded540dc8697..f7d89387bd6260ae994fadf3923e5209f185da37 100644 (file)
@@ -77,7 +77,7 @@ config HW_RANDOM_ATMEL
 
 config HW_RANDOM_BCM63XX
        tristate "Broadcom BCM63xx Random Number Generator support"
-       depends on BCM63XX
+       depends on BCM63XX || BMIPS_GENERIC
        default HW_RANDOM
        ---help---
          This driver provides kernel-side support for the Random Number
index 4b31f1387f37fa9cbe8f09afc682df62aedab384..38553f0500c90955b2f188fd54ff5548eb16786b 100644 (file)
@@ -79,10 +79,8 @@ static int bcm63xx_rng_data_read(struct hwrng *rng, u32 *data)
 static int bcm63xx_rng_probe(struct platform_device *pdev)
 {
        struct resource *r;
-       struct clk *clk;
        int ret;
        struct bcm63xx_rng_priv *priv;
-       struct hwrng *rng;
 
        r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (!r) {
@@ -132,10 +130,17 @@ static int bcm63xx_rng_probe(struct platform_device *pdev)
        return 0;
 }
 
+static const struct of_device_id bcm63xx_rng_of_match[] = {
+       { .compatible = "brcm,bcm6368-rng", },
+       {},
+};
+MODULE_DEVICE_TABLE(of, bcm63xx_rng_of_match);
+
 static struct platform_driver bcm63xx_rng_driver = {
        .probe          = bcm63xx_rng_probe,
        .driver         = {
                .name   = "bcm63xx-rng",
+               .of_match_table = bcm63xx_rng_of_match,
        },
 };
 
index 843d6f6aee7a30ec60f7f1f93de42b0d3dc6dc8a..3b06c1d6cfb280a18bd13716bfdbf2c38fa085ed 100644 (file)
@@ -743,6 +743,16 @@ static const struct of_device_id n2rng_match[] = {
                .compatible     = "SUNW,kt-rng",
                .data           = (void *) 1,
        },
+       {
+               .name           = "random-number-generator",
+               .compatible     = "ORCL,m4-rng",
+               .data           = (void *) 1,
+       },
+       {
+               .name           = "random-number-generator",
+               .compatible     = "ORCL,m7-rng",
+               .data           = (void *) 1,
+       },
        {},
 };
 MODULE_DEVICE_TABLE(of, n2rng_match);
index 07d494276aad04195c9e7247089649b302d3082c..fed3ffbec4c16e49cf9df2b56616aca5ef58cefc 100644 (file)
@@ -296,6 +296,7 @@ config CRYPTO_DEV_OMAP_AES
        depends on ARCH_OMAP2 || ARCH_OMAP3 || ARCH_OMAP2PLUS
        select CRYPTO_AES
        select CRYPTO_BLKCIPHER
+       select CRYPTO_ENGINE
        help
          OMAP processors have AES module accelerator. Select this if you
          want to use the OMAP module for AES algorithms.
@@ -487,7 +488,7 @@ config CRYPTO_DEV_IMGTEC_HASH
 
 config CRYPTO_DEV_SUN4I_SS
        tristate "Support for Allwinner Security System cryptographic accelerator"
-       depends on ARCH_SUNXI
+       depends on ARCH_SUNXI && !64BIT
        select CRYPTO_MD5
        select CRYPTO_SHA1
        select CRYPTO_AES
index 3eb3f1279fb7e93306f903c24ce7719ee0672b70..0751035b2cb047e3f26bea709bf1d1a9d910a3a6 100644 (file)
@@ -369,12 +369,6 @@ static inline size_t atmel_aes_padlen(size_t len, size_t block_size)
        return len ? block_size - len : 0;
 }
 
-static inline struct aead_request *
-aead_request_cast(struct crypto_async_request *req)
-{
-       return container_of(req, struct aead_request, base);
-}
-
 static struct atmel_aes_dev *atmel_aes_find_dev(struct atmel_aes_base_ctx *ctx)
 {
        struct atmel_aes_dev *aes_dd = NULL;
index 83b2d74256666ecff11ead015c5f07fbb8dc74f5..e08897109cabe1ce2a44c68a7c3046026cc015c7 100644 (file)
@@ -8,6 +8,8 @@
 #define SHA_CR_START                   (1 << 0)
 #define SHA_CR_FIRST                   (1 << 4)
 #define SHA_CR_SWRST                   (1 << 8)
+#define SHA_CR_WUIHV                   (1 << 12)
+#define SHA_CR_WUIEHV                  (1 << 13)
 
 #define SHA_MR                         0x04
 #define SHA_MR_MODE_MASK               (0x3 << 0)
@@ -15,6 +17,8 @@
 #define SHA_MR_MODE_AUTO               0x1
 #define SHA_MR_MODE_PDC                        0x2
 #define SHA_MR_PROCDLY                 (1 << 4)
+#define SHA_MR_UIHV                    (1 << 5)
+#define SHA_MR_UIEHV                   (1 << 6)
 #define SHA_MR_ALGO_SHA1               (0 << 8)
 #define SHA_MR_ALGO_SHA256             (1 << 8)
 #define SHA_MR_ALGO_SHA384             (2 << 8)
index 8bf9914d4d150b439e087688b51c884fdd1d022c..f8407dc7dd38a306991851152c932fe06ac6ddc9 100644 (file)
@@ -53,6 +53,7 @@
 
 #define SHA_FLAGS_FINUP                BIT(16)
 #define SHA_FLAGS_SG           BIT(17)
+#define SHA_FLAGS_ALGO_MASK    GENMASK(22, 18)
 #define SHA_FLAGS_SHA1         BIT(18)
 #define SHA_FLAGS_SHA224       BIT(19)
 #define SHA_FLAGS_SHA256       BIT(20)
 #define SHA_FLAGS_SHA512       BIT(22)
 #define SHA_FLAGS_ERROR                BIT(23)
 #define SHA_FLAGS_PAD          BIT(24)
+#define SHA_FLAGS_RESTORE      BIT(25)
 
 #define SHA_OP_UPDATE  1
 #define SHA_OP_FINAL   2
 
-#define SHA_BUFFER_LEN         PAGE_SIZE
+#define SHA_BUFFER_LEN         (PAGE_SIZE / 16)
 
 #define ATMEL_SHA_DMA_THRESHOLD                56
 
@@ -73,10 +75,22 @@ struct atmel_sha_caps {
        bool    has_dualbuff;
        bool    has_sha224;
        bool    has_sha_384_512;
+       bool    has_uihv;
 };
 
 struct atmel_sha_dev;
 
+/*
+ * .statesize = sizeof(struct atmel_sha_state) must be <= PAGE_SIZE / 8 as
+ * tested by the ahash_prepare_alg() function.
+ */
+struct atmel_sha_state {
+       u8      digest[SHA512_DIGEST_SIZE];
+       u8      buffer[SHA_BUFFER_LEN];
+       u64     digcnt[2];
+       size_t  bufcnt;
+};
+
 struct atmel_sha_reqctx {
        struct atmel_sha_dev    *dd;
        unsigned long   flags;
@@ -122,6 +136,7 @@ struct atmel_sha_dev {
        spinlock_t              lock;
        int                     err;
        struct tasklet_struct   done_task;
+       struct tasklet_struct   queue_task;
 
        unsigned long           flags;
        struct crypto_queue     queue;
@@ -317,7 +332,8 @@ static int atmel_sha_init(struct ahash_request *req)
 static void atmel_sha_write_ctrl(struct atmel_sha_dev *dd, int dma)
 {
        struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
-       u32 valcr = 0, valmr = SHA_MR_MODE_AUTO;
+       u32 valmr = SHA_MR_MODE_AUTO;
+       unsigned int i, hashsize = 0;
 
        if (likely(dma)) {
                if (!dd->caps.has_dma)
@@ -329,22 +345,62 @@ static void atmel_sha_write_ctrl(struct atmel_sha_dev *dd, int dma)
                atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY);
        }
 
-       if (ctx->flags & SHA_FLAGS_SHA1)
+       switch (ctx->flags & SHA_FLAGS_ALGO_MASK) {
+       case SHA_FLAGS_SHA1:
                valmr |= SHA_MR_ALGO_SHA1;
-       else if (ctx->flags & SHA_FLAGS_SHA224)
+               hashsize = SHA1_DIGEST_SIZE;
+               break;
+
+       case SHA_FLAGS_SHA224:
                valmr |= SHA_MR_ALGO_SHA224;
-       else if (ctx->flags & SHA_FLAGS_SHA256)
+               hashsize = SHA256_DIGEST_SIZE;
+               break;
+
+       case SHA_FLAGS_SHA256:
                valmr |= SHA_MR_ALGO_SHA256;
-       else if (ctx->flags & SHA_FLAGS_SHA384)
+               hashsize = SHA256_DIGEST_SIZE;
+               break;
+
+       case SHA_FLAGS_SHA384:
                valmr |= SHA_MR_ALGO_SHA384;
-       else if (ctx->flags & SHA_FLAGS_SHA512)
+               hashsize = SHA512_DIGEST_SIZE;
+               break;
+
+       case SHA_FLAGS_SHA512:
                valmr |= SHA_MR_ALGO_SHA512;
+               hashsize = SHA512_DIGEST_SIZE;
+               break;
+
+       default:
+               break;
+       }
 
        /* Setting CR_FIRST only for the first iteration */
-       if (!(ctx->digcnt[0] || ctx->digcnt[1]))
-               valcr = SHA_CR_FIRST;
+       if (!(ctx->digcnt[0] || ctx->digcnt[1])) {
+               atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST);
+       } else if (dd->caps.has_uihv && (ctx->flags & SHA_FLAGS_RESTORE)) {
+               const u32 *hash = (const u32 *)ctx->digest;
+
+               /*
+                * Restore the hardware context: update the User Initialize
+                * Hash Value (UIHV) with the value saved when the latest
+                * 'update' operation completed on this very same crypto
+                * request.
+                */
+               ctx->flags &= ~SHA_FLAGS_RESTORE;
+               atmel_sha_write(dd, SHA_CR, SHA_CR_WUIHV);
+               for (i = 0; i < hashsize / sizeof(u32); ++i)
+                       atmel_sha_write(dd, SHA_REG_DIN(i), hash[i]);
+               atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST);
+               valmr |= SHA_MR_UIHV;
+       }
+       /*
+        * WARNING: If the UIHV feature is not available, the hardware CANNOT
+        * process concurrent requests: the internal registers used to store
+        * the hash/digest are still set to the partial digest output values
+        * computed during the latest round.
+        */
 
-       atmel_sha_write(dd, SHA_CR, valcr);
        atmel_sha_write(dd, SHA_MR, valmr);
 }
 
@@ -713,23 +769,31 @@ static void atmel_sha_copy_hash(struct ahash_request *req)
 {
        struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
        u32 *hash = (u32 *)ctx->digest;
-       int i;
+       unsigned int i, hashsize;
 
-       if (ctx->flags & SHA_FLAGS_SHA1)
-               for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++)
-                       hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i));
-       else if (ctx->flags & SHA_FLAGS_SHA224)
-               for (i = 0; i < SHA224_DIGEST_SIZE / sizeof(u32); i++)
-                       hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i));
-       else if (ctx->flags & SHA_FLAGS_SHA256)
-               for (i = 0; i < SHA256_DIGEST_SIZE / sizeof(u32); i++)
-                       hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i));
-       else if (ctx->flags & SHA_FLAGS_SHA384)
-               for (i = 0; i < SHA384_DIGEST_SIZE / sizeof(u32); i++)
-                       hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i));
-       else
-               for (i = 0; i < SHA512_DIGEST_SIZE / sizeof(u32); i++)
-                       hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i));
+       switch (ctx->flags & SHA_FLAGS_ALGO_MASK) {
+       case SHA_FLAGS_SHA1:
+               hashsize = SHA1_DIGEST_SIZE;
+               break;
+
+       case SHA_FLAGS_SHA224:
+       case SHA_FLAGS_SHA256:
+               hashsize = SHA256_DIGEST_SIZE;
+               break;
+
+       case SHA_FLAGS_SHA384:
+       case SHA_FLAGS_SHA512:
+               hashsize = SHA512_DIGEST_SIZE;
+               break;
+
+       default:
+               /* Should not happen... */
+               return;
+       }
+
+       for (i = 0; i < hashsize / sizeof(u32); ++i)
+               hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i));
+       ctx->flags |= SHA_FLAGS_RESTORE;
 }
 
 static void atmel_sha_copy_ready_hash(struct ahash_request *req)
@@ -788,7 +852,7 @@ static void atmel_sha_finish_req(struct ahash_request *req, int err)
                req->base.complete(&req->base, err);
 
        /* handle new request */
-       tasklet_schedule(&dd->done_task);
+       tasklet_schedule(&dd->queue_task);
 }
 
 static int atmel_sha_hw_init(struct atmel_sha_dev *dd)
@@ -939,6 +1003,7 @@ static int atmel_sha_final(struct ahash_request *req)
                if (err)
                        goto err1;
 
+               dd->req = req;
                dd->flags |= SHA_FLAGS_BUSY;
                err = atmel_sha_final_req(dd);
        } else {
@@ -979,6 +1044,39 @@ static int atmel_sha_digest(struct ahash_request *req)
        return atmel_sha_init(req) ?: atmel_sha_finup(req);
 }
 
+
+static int atmel_sha_export(struct ahash_request *req, void *out)
+{
+       const struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
+       struct atmel_sha_state state;
+
+       memcpy(state.digest, ctx->digest, SHA512_DIGEST_SIZE);
+       memcpy(state.buffer, ctx->buffer, ctx->bufcnt);
+       state.bufcnt = ctx->bufcnt;
+       state.digcnt[0] = ctx->digcnt[0];
+       state.digcnt[1] = ctx->digcnt[1];
+
+       /* out might be unaligned. */
+       memcpy(out, &state, sizeof(state));
+       return 0;
+}
+
+static int atmel_sha_import(struct ahash_request *req, const void *in)
+{
+       struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
+       struct atmel_sha_state state;
+
+       /* in might be unaligned. */
+       memcpy(&state, in, sizeof(state));
+
+       memcpy(ctx->digest, state.digest, SHA512_DIGEST_SIZE);
+       memcpy(ctx->buffer, state.buffer, state.bufcnt);
+       ctx->bufcnt = state.bufcnt;
+       ctx->digcnt[0] = state.digcnt[0];
+       ctx->digcnt[1] = state.digcnt[1];
+       return 0;
+}
+
 static int atmel_sha_cra_init(struct crypto_tfm *tfm)
 {
        crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
@@ -995,8 +1093,11 @@ static struct ahash_alg sha_1_256_algs[] = {
        .final          = atmel_sha_final,
        .finup          = atmel_sha_finup,
        .digest         = atmel_sha_digest,
+       .export         = atmel_sha_export,
+       .import         = atmel_sha_import,
        .halg = {
                .digestsize     = SHA1_DIGEST_SIZE,
+               .statesize      = sizeof(struct atmel_sha_state),
                .base   = {
                        .cra_name               = "sha1",
                        .cra_driver_name        = "atmel-sha1",
@@ -1016,8 +1117,11 @@ static struct ahash_alg sha_1_256_algs[] = {
        .final          = atmel_sha_final,
        .finup          = atmel_sha_finup,
        .digest         = atmel_sha_digest,
+       .export         = atmel_sha_export,
+       .import         = atmel_sha_import,
        .halg = {
                .digestsize     = SHA256_DIGEST_SIZE,
+               .statesize      = sizeof(struct atmel_sha_state),
                .base   = {
                        .cra_name               = "sha256",
                        .cra_driver_name        = "atmel-sha256",
@@ -1039,8 +1143,11 @@ static struct ahash_alg sha_224_alg = {
        .final          = atmel_sha_final,
        .finup          = atmel_sha_finup,
        .digest         = atmel_sha_digest,
+       .export         = atmel_sha_export,
+       .import         = atmel_sha_import,
        .halg = {
                .digestsize     = SHA224_DIGEST_SIZE,
+               .statesize      = sizeof(struct atmel_sha_state),
                .base   = {
                        .cra_name               = "sha224",
                        .cra_driver_name        = "atmel-sha224",
@@ -1062,8 +1169,11 @@ static struct ahash_alg sha_384_512_algs[] = {
        .final          = atmel_sha_final,
        .finup          = atmel_sha_finup,
        .digest         = atmel_sha_digest,
+       .export         = atmel_sha_export,
+       .import         = atmel_sha_import,
        .halg = {
                .digestsize     = SHA384_DIGEST_SIZE,
+               .statesize      = sizeof(struct atmel_sha_state),
                .base   = {
                        .cra_name               = "sha384",
                        .cra_driver_name        = "atmel-sha384",
@@ -1083,8 +1193,11 @@ static struct ahash_alg sha_384_512_algs[] = {
        .final          = atmel_sha_final,
        .finup          = atmel_sha_finup,
        .digest         = atmel_sha_digest,
+       .export         = atmel_sha_export,
+       .import         = atmel_sha_import,
        .halg = {
                .digestsize     = SHA512_DIGEST_SIZE,
+               .statesize      = sizeof(struct atmel_sha_state),
                .base   = {
                        .cra_name               = "sha512",
                        .cra_driver_name        = "atmel-sha512",
@@ -1100,16 +1213,18 @@ static struct ahash_alg sha_384_512_algs[] = {
 },
 };
 
+static void atmel_sha_queue_task(unsigned long data)
+{
+       struct atmel_sha_dev *dd = (struct atmel_sha_dev *)data;
+
+       atmel_sha_handle_queue(dd, NULL);
+}
+
 static void atmel_sha_done_task(unsigned long data)
 {
        struct atmel_sha_dev *dd = (struct atmel_sha_dev *)data;
        int err = 0;
 
-       if (!(SHA_FLAGS_BUSY & dd->flags)) {
-               atmel_sha_handle_queue(dd, NULL);
-               return;
-       }
-
        if (SHA_FLAGS_CPU & dd->flags) {
                if (SHA_FLAGS_OUTPUT_READY & dd->flags) {
                        dd->flags &= ~SHA_FLAGS_OUTPUT_READY;
@@ -1272,14 +1387,23 @@ static void atmel_sha_get_cap(struct atmel_sha_dev *dd)
        dd->caps.has_dualbuff = 0;
        dd->caps.has_sha224 = 0;
        dd->caps.has_sha_384_512 = 0;
+       dd->caps.has_uihv = 0;
 
        /* keep only major version number */
        switch (dd->hw_version & 0xff0) {
+       case 0x510:
+               dd->caps.has_dma = 1;
+               dd->caps.has_dualbuff = 1;
+               dd->caps.has_sha224 = 1;
+               dd->caps.has_sha_384_512 = 1;
+               dd->caps.has_uihv = 1;
+               break;
        case 0x420:
                dd->caps.has_dma = 1;
                dd->caps.has_dualbuff = 1;
                dd->caps.has_sha224 = 1;
                dd->caps.has_sha_384_512 = 1;
+               dd->caps.has_uihv = 1;
                break;
        case 0x410:
                dd->caps.has_dma = 1;
@@ -1366,6 +1490,8 @@ static int atmel_sha_probe(struct platform_device *pdev)
 
        tasklet_init(&sha_dd->done_task, atmel_sha_done_task,
                                        (unsigned long)sha_dd);
+       tasklet_init(&sha_dd->queue_task, atmel_sha_queue_task,
+                                       (unsigned long)sha_dd);
 
        crypto_init_queue(&sha_dd->queue, ATMEL_SHA_QUEUE_LENGTH);
 
@@ -1464,6 +1590,7 @@ err_sha_dma:
 iclk_unprepare:
        clk_unprepare(sha_dd->iclk);
 res_err:
+       tasklet_kill(&sha_dd->queue_task);
        tasklet_kill(&sha_dd->done_task);
 sha_dd_err:
        dev_err(dev, "initialization failed.\n");
@@ -1484,6 +1611,7 @@ static int atmel_sha_remove(struct platform_device *pdev)
 
        atmel_sha_unregister_algs(sha_dd);
 
+       tasklet_kill(&sha_dd->queue_task);
        tasklet_kill(&sha_dd->done_task);
 
        if (sha_dd->caps.has_dma)
index 69d4a1326feefa6ff4c404d7e093a658728f6f4a..44d30b45f3cc35a44ee692bc6b48706e819b760c 100644 (file)
@@ -534,7 +534,7 @@ static int caam_probe(struct platform_device *pdev)
         * long pointers in master configuration register
         */
        clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK, MCFGR_AWCACHE_CACH |
-                     MCFGR_AWCACHE_BUFF | MCFGR_WDENABLE |
+                     MCFGR_AWCACHE_BUFF | MCFGR_WDENABLE | MCFGR_LARGE_BURST |
                      (sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0));
 
        /*
index a8a79975682f2c5096e1beccd1c71714530a7176..0ba9c40597dcbbf75163bedb5f4e7ef0f1b46972 100644 (file)
@@ -455,7 +455,8 @@ struct caam_ctrl {
 #define MCFGR_AXIPIPE_MASK     (0xf << MCFGR_AXIPIPE_SHIFT)
 
 #define MCFGR_AXIPRI           0x00000008 /* Assert AXI priority sideband */
-#define MCFGR_BURST_64         0x00000001 /* Max burst size */
+#define MCFGR_LARGE_BURST      0x00000004 /* 128/256-byte burst size */
+#define MCFGR_BURST_64         0x00000001 /* 64-byte burst size */
 
 /* JRSTART register offsets */
 #define JRSTART_JR0_START       0x00000001 /* Start Job ring 0 */
index d89f20c04266b31ad85bf82ccf5a2169c52ba727..d095452b8828e6c50b30c8ac3254975f9f8cef6c 100644 (file)
@@ -220,6 +220,38 @@ static int ccp_aes_cmac_digest(struct ahash_request *req)
        return ccp_aes_cmac_finup(req);
 }
 
+static int ccp_aes_cmac_export(struct ahash_request *req, void *out)
+{
+       struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
+       struct ccp_aes_cmac_exp_ctx state;
+
+       state.null_msg = rctx->null_msg;
+       memcpy(state.iv, rctx->iv, sizeof(state.iv));
+       state.buf_count = rctx->buf_count;
+       memcpy(state.buf, rctx->buf, sizeof(state.buf));
+
+       /* 'out' may not be aligned so memcpy from local variable */
+       memcpy(out, &state, sizeof(state));
+
+       return 0;
+}
+
+static int ccp_aes_cmac_import(struct ahash_request *req, const void *in)
+{
+       struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
+       struct ccp_aes_cmac_exp_ctx state;
+
+       /* 'in' may not be aligned so memcpy to local variable */
+       memcpy(&state, in, sizeof(state));
+
+       rctx->null_msg = state.null_msg;
+       memcpy(rctx->iv, state.iv, sizeof(rctx->iv));
+       rctx->buf_count = state.buf_count;
+       memcpy(rctx->buf, state.buf, sizeof(rctx->buf));
+
+       return 0;
+}
+
 static int ccp_aes_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
                               unsigned int key_len)
 {
@@ -352,10 +384,13 @@ int ccp_register_aes_cmac_algs(struct list_head *head)
        alg->final = ccp_aes_cmac_final;
        alg->finup = ccp_aes_cmac_finup;
        alg->digest = ccp_aes_cmac_digest;
+       alg->export = ccp_aes_cmac_export;
+       alg->import = ccp_aes_cmac_import;
        alg->setkey = ccp_aes_cmac_setkey;
 
        halg = &alg->halg;
        halg->digestsize = AES_BLOCK_SIZE;
+       halg->statesize = sizeof(struct ccp_aes_cmac_exp_ctx);
 
        base = &halg->base;
        snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "cmac(aes)");
index d14b3f28e010897990223fffc8f932fe368a033d..7002c6b283e5763b48ce71bcb84d56963875ae40 100644 (file)
@@ -207,6 +207,42 @@ static int ccp_sha_digest(struct ahash_request *req)
        return ccp_sha_finup(req);
 }
 
+static int ccp_sha_export(struct ahash_request *req, void *out)
+{
+       struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
+       struct ccp_sha_exp_ctx state;
+
+       state.type = rctx->type;
+       state.msg_bits = rctx->msg_bits;
+       state.first = rctx->first;
+       memcpy(state.ctx, rctx->ctx, sizeof(state.ctx));
+       state.buf_count = rctx->buf_count;
+       memcpy(state.buf, rctx->buf, sizeof(state.buf));
+
+       /* 'out' may not be aligned so memcpy from local variable */
+       memcpy(out, &state, sizeof(state));
+
+       return 0;
+}
+
+static int ccp_sha_import(struct ahash_request *req, const void *in)
+{
+       struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
+       struct ccp_sha_exp_ctx state;
+
+       /* 'in' may not be aligned so memcpy to local variable */
+       memcpy(&state, in, sizeof(state));
+
+       rctx->type = state.type;
+       rctx->msg_bits = state.msg_bits;
+       rctx->first = state.first;
+       memcpy(rctx->ctx, state.ctx, sizeof(rctx->ctx));
+       rctx->buf_count = state.buf_count;
+       memcpy(rctx->buf, state.buf, sizeof(rctx->buf));
+
+       return 0;
+}
+
 static int ccp_sha_setkey(struct crypto_ahash *tfm, const u8 *key,
                          unsigned int key_len)
 {
@@ -403,9 +439,12 @@ static int ccp_register_sha_alg(struct list_head *head,
        alg->final = ccp_sha_final;
        alg->finup = ccp_sha_finup;
        alg->digest = ccp_sha_digest;
+       alg->export = ccp_sha_export;
+       alg->import = ccp_sha_import;
 
        halg = &alg->halg;
        halg->digestsize = def->digest_size;
+       halg->statesize = sizeof(struct ccp_sha_exp_ctx);
 
        base = &halg->base;
        snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
index 76a96f0f44c6d7c659c57cfed3e93928683d9687..a326ec20bfa877c1a2dce78c5f15f3a5ec0731f4 100644 (file)
@@ -129,6 +129,15 @@ struct ccp_aes_cmac_req_ctx {
        struct ccp_cmd cmd;
 };
 
+struct ccp_aes_cmac_exp_ctx {
+       unsigned int null_msg;
+
+       u8 iv[AES_BLOCK_SIZE];
+
+       unsigned int buf_count;
+       u8 buf[AES_BLOCK_SIZE];
+};
+
 /***** SHA related defines *****/
 #define MAX_SHA_CONTEXT_SIZE   SHA256_DIGEST_SIZE
 #define MAX_SHA_BLOCK_SIZE     SHA256_BLOCK_SIZE
@@ -171,6 +180,19 @@ struct ccp_sha_req_ctx {
        struct ccp_cmd cmd;
 };
 
+struct ccp_sha_exp_ctx {
+       enum ccp_sha_type type;
+
+       u64 msg_bits;
+
+       unsigned int first;
+
+       u8 ctx[MAX_SHA_CONTEXT_SIZE];
+
+       unsigned int buf_count;
+       u8 buf[MAX_SHA_BLOCK_SIZE];
+};
+
 /***** Common Context Structure *****/
 struct ccp_ctx {
        int (*complete)(struct crypto_async_request *req, int ret);
index e52496a172d05e70893f48ff3cfca309e93db343..2296934455fcd8d5ce75356365f4bc60cac320b1 100644 (file)
@@ -1031,6 +1031,18 @@ static int aead_perform(struct aead_request *req, int encrypt,
        BUG_ON(ivsize && !req->iv);
        memcpy(crypt->iv, req->iv, ivsize);
 
+       buf = chainup_buffers(dev, req->src, crypt->auth_len,
+                             &src_hook, flags, src_direction);
+       req_ctx->src = src_hook.next;
+       crypt->src_buf = src_hook.phys_next;
+       if (!buf)
+               goto free_buf_src;
+
+       lastlen = buf->buf_len;
+       if (lastlen >= authsize)
+               crypt->icv_rev_aes = buf->phys_addr +
+                                    buf->buf_len - authsize;
+
        req_ctx->dst = NULL;
 
        if (req->src != req->dst) {
@@ -1055,20 +1067,6 @@ static int aead_perform(struct aead_request *req, int encrypt,
                }
        }
 
-       buf = chainup_buffers(dev, req->src, crypt->auth_len,
-                             &src_hook, flags, src_direction);
-       req_ctx->src = src_hook.next;
-       crypt->src_buf = src_hook.phys_next;
-       if (!buf)
-               goto free_buf_src;
-
-       if (!encrypt || !req_ctx->dst) {
-               lastlen = buf->buf_len;
-               if (lastlen >= authsize)
-                       crypt->icv_rev_aes = buf->phys_addr +
-                                            buf->buf_len - authsize;
-       }
-
        if (unlikely(lastlen < authsize)) {
                /* The 12 hmac bytes are scattered,
                 * we need to copy them into a safe buffer */
index dd355bd19474fa990d8a2cb0856d4d56ab076345..d420ec751c7c9e090710f568f9b4370591c09110 100644 (file)
@@ -36,6 +36,7 @@
 #include <linux/interrupt.h>
 #include <crypto/scatterwalk.h>
 #include <crypto/aes.h>
+#include <crypto/algapi.h>
 
 #define DST_MAXBURST                   4
 #define DMA_MIN                                (DST_MAXBURST * sizeof(u32))
@@ -152,13 +153,10 @@ struct omap_aes_dev {
        unsigned long           flags;
        int                     err;
 
-       spinlock_t              lock;
-       struct crypto_queue     queue;
-
        struct tasklet_struct   done_task;
-       struct tasklet_struct   queue_task;
 
        struct ablkcipher_request       *req;
+       struct crypto_engine            *engine;
 
        /*
         * total is used by PIO mode for book keeping so introduce
@@ -532,9 +530,7 @@ static void omap_aes_finish_req(struct omap_aes_dev *dd, int err)
 
        pr_debug("err: %d\n", err);
 
-       dd->flags &= ~FLAGS_BUSY;
-
-       req->base.complete(&req->base, err);
+       crypto_finalize_request(dd->engine, req, err);
 }
 
 static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
@@ -604,34 +600,25 @@ static int omap_aes_copy_sgs(struct omap_aes_dev *dd)
 }
 
 static int omap_aes_handle_queue(struct omap_aes_dev *dd,
-                              struct ablkcipher_request *req)
+                                struct ablkcipher_request *req)
 {
-       struct crypto_async_request *async_req, *backlog;
-       struct omap_aes_ctx *ctx;
-       struct omap_aes_reqctx *rctx;
-       unsigned long flags;
-       int err, ret = 0, len;
-
-       spin_lock_irqsave(&dd->lock, flags);
        if (req)
-               ret = ablkcipher_enqueue_request(&dd->queue, req);
-       if (dd->flags & FLAGS_BUSY) {
-               spin_unlock_irqrestore(&dd->lock, flags);
-               return ret;
-       }
-       backlog = crypto_get_backlog(&dd->queue);
-       async_req = crypto_dequeue_request(&dd->queue);
-       if (async_req)
-               dd->flags |= FLAGS_BUSY;
-       spin_unlock_irqrestore(&dd->lock, flags);
+               return crypto_transfer_request_to_engine(dd->engine, req);
 
-       if (!async_req)
-               return ret;
+       return 0;
+}
 
-       if (backlog)
-               backlog->complete(backlog, -EINPROGRESS);
+static int omap_aes_prepare_req(struct crypto_engine *engine,
+                               struct ablkcipher_request *req)
+{
+       struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(
+                       crypto_ablkcipher_reqtfm(req));
+       struct omap_aes_dev *dd = omap_aes_find_dev(ctx);
+       struct omap_aes_reqctx *rctx;
+       int len;
 
-       req = ablkcipher_request_cast(async_req);
+       if (!dd)
+               return -ENODEV;
 
        /* assign new request to device */
        dd->req = req;
@@ -662,16 +649,20 @@ static int omap_aes_handle_queue(struct omap_aes_dev *dd,
        dd->ctx = ctx;
        ctx->dd = dd;
 
-       err = omap_aes_write_ctrl(dd);
-       if (!err)
-               err = omap_aes_crypt_dma_start(dd);
-       if (err) {
-               /* aes_task will not finish it, so do it here */
-               omap_aes_finish_req(dd, err);
-               tasklet_schedule(&dd->queue_task);
-       }
+       return omap_aes_write_ctrl(dd);
+}
 
-       return ret; /* return ret, which is enqueue return value */
+static int omap_aes_crypt_req(struct crypto_engine *engine,
+                             struct ablkcipher_request *req)
+{
+       struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(
+                       crypto_ablkcipher_reqtfm(req));
+       struct omap_aes_dev *dd = omap_aes_find_dev(ctx);
+
+       if (!dd)
+               return -ENODEV;
+
+       return omap_aes_crypt_dma_start(dd);
 }
 
 static void omap_aes_done_task(unsigned long data)
@@ -704,18 +695,10 @@ static void omap_aes_done_task(unsigned long data)
        }
 
        omap_aes_finish_req(dd, 0);
-       omap_aes_handle_queue(dd, NULL);
 
        pr_debug("exit\n");
 }
 
-static void omap_aes_queue_task(unsigned long data)
-{
-       struct omap_aes_dev *dd = (struct omap_aes_dev *)data;
-
-       omap_aes_handle_queue(dd, NULL);
-}
-
 static int omap_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
 {
        struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(
@@ -1175,9 +1158,6 @@ static int omap_aes_probe(struct platform_device *pdev)
        dd->dev = dev;
        platform_set_drvdata(pdev, dd);
 
-       spin_lock_init(&dd->lock);
-       crypto_init_queue(&dd->queue, OMAP_AES_QUEUE_LENGTH);
-
        err = (dev->of_node) ? omap_aes_get_res_of(dd, dev, &res) :
                               omap_aes_get_res_pdev(dd, pdev, &res);
        if (err)
@@ -1209,7 +1189,6 @@ static int omap_aes_probe(struct platform_device *pdev)
                 (reg & dd->pdata->minor_mask) >> dd->pdata->minor_shift);
 
        tasklet_init(&dd->done_task, omap_aes_done_task, (unsigned long)dd);
-       tasklet_init(&dd->queue_task, omap_aes_queue_task, (unsigned long)dd);
 
        err = omap_aes_dma_init(dd);
        if (err && AES_REG_IRQ_STATUS(dd) && AES_REG_IRQ_ENABLE(dd)) {
@@ -1250,7 +1229,20 @@ static int omap_aes_probe(struct platform_device *pdev)
                }
        }
 
+       /* Initialize crypto engine */
+       dd->engine = crypto_engine_alloc_init(dev, 1);
+       if (!dd->engine)
+               goto err_algs;
+
+       dd->engine->prepare_request = omap_aes_prepare_req;
+       dd->engine->crypt_one_request = omap_aes_crypt_req;
+       err = crypto_engine_start(dd->engine);
+       if (err)
+               goto err_engine;
+
        return 0;
+err_engine:
+       crypto_engine_exit(dd->engine);
 err_algs:
        for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
                for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
@@ -1260,7 +1252,6 @@ err_algs:
                omap_aes_dma_cleanup(dd);
 err_irq:
        tasklet_kill(&dd->done_task);
-       tasklet_kill(&dd->queue_task);
        pm_runtime_disable(dev);
 err_res:
        dd = NULL;
@@ -1286,8 +1277,8 @@ static int omap_aes_remove(struct platform_device *pdev)
                        crypto_unregister_alg(
                                        &dd->pdata->algs_info[i].algs_list[j]);
 
+       crypto_engine_exit(dd->engine);
        tasklet_kill(&dd->done_task);
-       tasklet_kill(&dd->queue_task);
        omap_aes_dma_cleanup(dd);
        pm_runtime_disable(dd->dev);
        dd = NULL;
index f96d427e502c05ea7316f66830adfeebb49fb17d..5a07208ce778209ef7397c7f628cc7fbbede4791 100644 (file)
@@ -55,8 +55,8 @@
 
 #define ADF_DH895XCC_DEVICE_NAME "dh895xcc"
 #define ADF_DH895XCCVF_DEVICE_NAME "dh895xccvf"
-#define ADF_C62X_DEVICE_NAME "c62x"
-#define ADF_C62XVF_DEVICE_NAME "c62xvf"
+#define ADF_C62X_DEVICE_NAME "c6xx"
+#define ADF_C62XVF_DEVICE_NAME "c6xxvf"
 #define ADF_C3XXX_DEVICE_NAME "c3xxx"
 #define ADF_C3XXXVF_DEVICE_NAME "c3xxxvf"
 #define ADF_DH895XCC_PCI_DEVICE_ID 0x435
index e78a1d7d88fc76ffd77d074dc9e904e523503f3d..b40d9c8dad964a122dba95371466d736a3f6a9f1 100644 (file)
@@ -121,7 +121,6 @@ static void adf_device_reset_worker(struct work_struct *work)
        adf_dev_restarting_notify(accel_dev);
        adf_dev_stop(accel_dev);
        adf_dev_shutdown(accel_dev);
-       adf_dev_restore(accel_dev);
        if (adf_dev_init(accel_dev) || adf_dev_start(accel_dev)) {
                /* The device hanged and we can't restart it so stop here */
                dev_err(&GET_DEV(accel_dev), "Restart device failed\n");
index ef5988afd4c60f59287e8a446af62e8ac6b783b9..b5484bfa699609d6b572a4e820c854a52802dbc9 100644 (file)
@@ -58,7 +58,7 @@ struct adf_user_cfg_key_val {
                uint64_t padding3;
        };
        enum adf_cfg_val_type type;
-};
+} __packed;
 
 struct adf_user_cfg_section {
        char name[ADF_CFG_MAX_SECTION_LEN_IN_BYTES];
@@ -70,7 +70,7 @@ struct adf_user_cfg_section {
                struct adf_user_cfg_section *next;
                uint64_t padding3;
        };
-};
+} __packed;
 
 struct adf_user_cfg_ctl_data {
        union {
@@ -78,5 +78,5 @@ struct adf_user_cfg_ctl_data {
                uint64_t padding;
        };
        uint8_t device_id;
-};
+} __packed;
 #endif
index 59e4c3af15edb10fe46f4e6afb0ad2d18bc92760..1e8852a8a0574593b3bb498db335e144bbeb29d7 100644 (file)
@@ -1064,8 +1064,7 @@ static int qat_alg_aead_init(struct crypto_aead *tfm,
        if (IS_ERR(ctx->hash_tfm))
                return PTR_ERR(ctx->hash_tfm);
        ctx->qat_hash_alg = hash;
-       crypto_aead_set_reqsize(tfm, sizeof(struct aead_request) +
-                                    sizeof(struct qat_crypto_request));
+       crypto_aead_set_reqsize(tfm, sizeof(struct qat_crypto_request));
        return 0;
 }
 
@@ -1114,8 +1113,7 @@ static int qat_alg_ablkcipher_init(struct crypto_tfm *tfm)
        struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
 
        spin_lock_init(&ctx->lock);
-       tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request) +
-                                       sizeof(struct qat_crypto_request);
+       tfm->crt_ablkcipher.reqsize = sizeof(struct qat_crypto_request);
        ctx->tfm = tfm;
        return 0;
 }
index f214a875582731cd2850b162b5ba96e161c6b4b6..5f161a9777e3f32f6e37aa67f607efc9d234b445 100644 (file)
@@ -224,6 +224,7 @@ static inline struct samsung_aes_variant *find_s5p_sss_version
 {
        if (IS_ENABLED(CONFIG_OF) && (pdev->dev.of_node)) {
                const struct of_device_id *match;
+
                match = of_match_node(s5p_sss_dt_match,
                                        pdev->dev.of_node);
                return (struct samsung_aes_variant *)match->data;
@@ -382,7 +383,7 @@ static void s5p_set_aes(struct s5p_aes_dev *dev,
        void __iomem *keystart;
 
        if (iv)
-               memcpy(dev->aes_ioaddr + SSS_REG_AES_IV_DATA(0), iv, 0x10);
+               memcpy_toio(dev->aes_ioaddr + SSS_REG_AES_IV_DATA(0), iv, 0x10);
 
        if (keylen == AES_KEYSIZE_256)
                keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(0);
@@ -391,13 +392,12 @@ static void s5p_set_aes(struct s5p_aes_dev *dev,
        else
                keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(4);
 
-       memcpy(keystart, key, keylen);
+       memcpy_toio(keystart, key, keylen);
 }
 
 static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
 {
        struct ablkcipher_request  *req = dev->req;
-
        uint32_t                    aes_control;
        int                         err;
        unsigned long               flags;
@@ -518,7 +518,7 @@ static int s5p_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
        struct s5p_aes_dev         *dev    = ctx->dev;
 
        if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
-               pr_err("request size is not exact amount of AES blocks\n");
+               dev_err(dev->dev, "request size is not exact amount of AES blocks\n");
                return -EINVAL;
        }
 
@@ -566,7 +566,7 @@ static int s5p_aes_cbc_decrypt(struct ablkcipher_request *req)
 
 static int s5p_aes_cra_init(struct crypto_tfm *tfm)
 {
-       struct s5p_aes_ctx  *ctx = crypto_tfm_ctx(tfm);
+       struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm);
 
        ctx->dev = s5p_dev;
        tfm->crt_ablkcipher.reqsize = sizeof(struct s5p_aes_reqctx);
@@ -701,7 +701,7 @@ static int s5p_aes_probe(struct platform_device *pdev)
                        goto err_algs;
        }
 
-       pr_info("s5p-sss driver registered\n");
+       dev_info(dev, "s5p-sss driver registered\n");
 
        return 0;
 
index 6c4f91c5e6b352e13f630cb71f4f236cf5e75263..c3f3d89e4831cdce56d8c8fccd96f4315ae00db9 100644 (file)
@@ -182,7 +182,6 @@ struct sahara_sha_reqctx {
        u8                      buf[SAHARA_MAX_SHA_BLOCK_SIZE];
        u8                      rembuf[SAHARA_MAX_SHA_BLOCK_SIZE];
        u8                      context[SHA256_DIGEST_SIZE + 4];
-       struct mutex            mutex;
        unsigned int            mode;
        unsigned int            digest_size;
        unsigned int            context_size;
@@ -1096,7 +1095,6 @@ static int sahara_sha_enqueue(struct ahash_request *req, int last)
        if (!req->nbytes && !last)
                return 0;
 
-       mutex_lock(&rctx->mutex);
        rctx->last = last;
 
        if (!rctx->active) {
@@ -1109,7 +1107,6 @@ static int sahara_sha_enqueue(struct ahash_request *req, int last)
        mutex_unlock(&dev->queue_mutex);
 
        wake_up_process(dev->kthread);
-       mutex_unlock(&rctx->mutex);
 
        return ret;
 }
@@ -1137,8 +1134,6 @@ static int sahara_sha_init(struct ahash_request *req)
        rctx->context_size = rctx->digest_size + 4;
        rctx->active = 0;
 
-       mutex_init(&rctx->mutex);
-
        return 0;
 }
 
@@ -1167,26 +1162,18 @@ static int sahara_sha_digest(struct ahash_request *req)
 
 static int sahara_sha_export(struct ahash_request *req, void *out)
 {
-       struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
-       struct sahara_ctx *ctx = crypto_ahash_ctx(ahash);
        struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
 
-       memcpy(out, ctx, sizeof(struct sahara_ctx));
-       memcpy(out + sizeof(struct sahara_sha_reqctx), rctx,
-              sizeof(struct sahara_sha_reqctx));
+       memcpy(out, rctx, sizeof(struct sahara_sha_reqctx));
 
        return 0;
 }
 
 static int sahara_sha_import(struct ahash_request *req, const void *in)
 {
-       struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
-       struct sahara_ctx *ctx = crypto_ahash_ctx(ahash);
        struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
 
-       memcpy(ctx, in, sizeof(struct sahara_ctx));
-       memcpy(rctx, in + sizeof(struct sahara_sha_reqctx),
-              sizeof(struct sahara_sha_reqctx));
+       memcpy(rctx, in, sizeof(struct sahara_sha_reqctx));
 
        return 0;
 }
@@ -1272,6 +1259,7 @@ static struct ahash_alg sha_v3_algs[] = {
        .export         = sahara_sha_export,
        .import         = sahara_sha_import,
        .halg.digestsize        = SHA1_DIGEST_SIZE,
+       .halg.statesize         = sizeof(struct sahara_sha_reqctx),
        .halg.base      = {
                .cra_name               = "sha1",
                .cra_driver_name        = "sahara-sha1",
@@ -1299,6 +1287,7 @@ static struct ahash_alg sha_v4_algs[] = {
        .export         = sahara_sha_export,
        .import         = sahara_sha_import,
        .halg.digestsize        = SHA256_DIGEST_SIZE,
+       .halg.statesize         = sizeof(struct sahara_sha_reqctx),
        .halg.base      = {
                .cra_name               = "sha256",
                .cra_driver_name        = "sahara-sha256",
index a19ee127edcafd3c70ad9e6ee8a86b1823f020f7..7be3fbcd8d78a6b5c0968a4b0846694313989d6b 100644 (file)
@@ -251,11 +251,10 @@ static int sun4i_ss_cipher_poll(struct ablkcipher_request *areq)
                spaces = readl(ss->base + SS_FCSR);
                rx_cnt = SS_RXFIFO_SPACES(spaces);
                tx_cnt = SS_TXFIFO_SPACES(spaces);
-               dev_dbg(ss->dev, "%x %u/%u %u/%u cnt=%u %u/%u %u/%u cnt=%u %u %u\n",
+               dev_dbg(ss->dev, "%x %u/%u %u/%u cnt=%u %u/%u %u/%u cnt=%u %u\n",
                        mode,
                        oi, mi.length, ileft, areq->nbytes, rx_cnt,
-                       oo, mo.length, oleft, areq->nbytes, tx_cnt,
-                       todo, ob);
+                       oo, mo.length, oleft, areq->nbytes, tx_cnt, ob);
 
                if (tx_cnt == 0)
                        continue;
index 3147c8d09ea84a0a76d0fd7ead35931a89e29aed..06a4e3cfc66aec0f9aaa20a0c0f264911f2c7f21 100644 (file)
@@ -28,6 +28,7 @@
 #include <crypto/hash.h>
 #include <crypto/md5.h>
 #include <crypto/algapi.h>
+#include <crypto/skcipher.h>
 
 #include <linux/device-mapper.h>
 
@@ -44,7 +45,7 @@ struct convert_context {
        struct bvec_iter iter_out;
        sector_t cc_sector;
        atomic_t cc_pending;
-       struct ablkcipher_request *req;
+       struct skcipher_request *req;
 };
 
 /*
@@ -86,7 +87,7 @@ struct crypt_iv_operations {
 };
 
 struct iv_essiv_private {
-       struct crypto_hash *hash_tfm;
+       struct crypto_ahash *hash_tfm;
        u8 *salt;
 };
 
@@ -153,13 +154,13 @@ struct crypt_config {
 
        /* ESSIV: struct crypto_cipher *essiv_tfm */
        void *iv_private;
-       struct crypto_ablkcipher **tfms;
+       struct crypto_skcipher **tfms;
        unsigned tfms_count;
 
        /*
         * Layout of each crypto request:
         *
-        *   struct ablkcipher_request
+        *   struct skcipher_request
         *      context
         *      padding
         *   struct dm_crypt_request
@@ -189,7 +190,7 @@ static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq);
 /*
  * Use this to access cipher attributes that are the same for each CPU.
  */
-static struct crypto_ablkcipher *any_tfm(struct crypt_config *cc)
+static struct crypto_skcipher *any_tfm(struct crypt_config *cc)
 {
        return cc->tfms[0];
 }
@@ -263,23 +264,25 @@ static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv,
 static int crypt_iv_essiv_init(struct crypt_config *cc)
 {
        struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
-       struct hash_desc desc;
+       AHASH_REQUEST_ON_STACK(req, essiv->hash_tfm);
        struct scatterlist sg;
        struct crypto_cipher *essiv_tfm;
        int err;
 
        sg_init_one(&sg, cc->key, cc->key_size);
-       desc.tfm = essiv->hash_tfm;
-       desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+       ahash_request_set_tfm(req, essiv->hash_tfm);
+       ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
+       ahash_request_set_crypt(req, &sg, essiv->salt, cc->key_size);
 
-       err = crypto_hash_digest(&desc, &sg, cc->key_size, essiv->salt);
+       err = crypto_ahash_digest(req);
+       ahash_request_zero(req);
        if (err)
                return err;
 
        essiv_tfm = cc->iv_private;
 
        err = crypto_cipher_setkey(essiv_tfm, essiv->salt,
-                           crypto_hash_digestsize(essiv->hash_tfm));
+                           crypto_ahash_digestsize(essiv->hash_tfm));
        if (err)
                return err;
 
@@ -290,7 +293,7 @@ static int crypt_iv_essiv_init(struct crypt_config *cc)
 static int crypt_iv_essiv_wipe(struct crypt_config *cc)
 {
        struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
-       unsigned salt_size = crypto_hash_digestsize(essiv->hash_tfm);
+       unsigned salt_size = crypto_ahash_digestsize(essiv->hash_tfm);
        struct crypto_cipher *essiv_tfm;
        int r, err = 0;
 
@@ -320,7 +323,7 @@ static struct crypto_cipher *setup_essiv_cpu(struct crypt_config *cc,
        }
 
        if (crypto_cipher_blocksize(essiv_tfm) !=
-           crypto_ablkcipher_ivsize(any_tfm(cc))) {
+           crypto_skcipher_ivsize(any_tfm(cc))) {
                ti->error = "Block size of ESSIV cipher does "
                            "not match IV size of block cipher";
                crypto_free_cipher(essiv_tfm);
@@ -342,7 +345,7 @@ static void crypt_iv_essiv_dtr(struct crypt_config *cc)
        struct crypto_cipher *essiv_tfm;
        struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
 
-       crypto_free_hash(essiv->hash_tfm);
+       crypto_free_ahash(essiv->hash_tfm);
        essiv->hash_tfm = NULL;
 
        kzfree(essiv->salt);
@@ -360,7 +363,7 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
                              const char *opts)
 {
        struct crypto_cipher *essiv_tfm = NULL;
-       struct crypto_hash *hash_tfm = NULL;
+       struct crypto_ahash *hash_tfm = NULL;
        u8 *salt = NULL;
        int err;
 
@@ -370,14 +373,14 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
        }
 
        /* Allocate hash algorithm */
-       hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC);
+       hash_tfm = crypto_alloc_ahash(opts, 0, CRYPTO_ALG_ASYNC);
        if (IS_ERR(hash_tfm)) {
                ti->error = "Error initializing ESSIV hash";
                err = PTR_ERR(hash_tfm);
                goto bad;
        }
 
-       salt = kzalloc(crypto_hash_digestsize(hash_tfm), GFP_KERNEL);
+       salt = kzalloc(crypto_ahash_digestsize(hash_tfm), GFP_KERNEL);
        if (!salt) {
                ti->error = "Error kmallocing salt storage in ESSIV";
                err = -ENOMEM;
@@ -388,7 +391,7 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
        cc->iv_gen_private.essiv.hash_tfm = hash_tfm;
 
        essiv_tfm = setup_essiv_cpu(cc, ti, salt,
-                               crypto_hash_digestsize(hash_tfm));
+                               crypto_ahash_digestsize(hash_tfm));
        if (IS_ERR(essiv_tfm)) {
                crypt_iv_essiv_dtr(cc);
                return PTR_ERR(essiv_tfm);
@@ -399,7 +402,7 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
 
 bad:
        if (hash_tfm && !IS_ERR(hash_tfm))
-               crypto_free_hash(hash_tfm);
+               crypto_free_ahash(hash_tfm);
        kfree(salt);
        return err;
 }
@@ -419,7 +422,7 @@ static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv,
 static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
                              const char *opts)
 {
-       unsigned bs = crypto_ablkcipher_blocksize(any_tfm(cc));
+       unsigned bs = crypto_skcipher_blocksize(any_tfm(cc));
        int log = ilog2(bs);
 
        /* we need to calculate how far we must shift the sector count
@@ -816,27 +819,27 @@ static void crypt_convert_init(struct crypt_config *cc,
 }
 
 static struct dm_crypt_request *dmreq_of_req(struct crypt_config *cc,
-                                            struct ablkcipher_request *req)
+                                            struct skcipher_request *req)
 {
        return (struct dm_crypt_request *)((char *)req + cc->dmreq_start);
 }
 
-static struct ablkcipher_request *req_of_dmreq(struct crypt_config *cc,
+static struct skcipher_request *req_of_dmreq(struct crypt_config *cc,
                                               struct dm_crypt_request *dmreq)
 {
-       return (struct ablkcipher_request *)((char *)dmreq - cc->dmreq_start);
+       return (struct skcipher_request *)((char *)dmreq - cc->dmreq_start);
 }
 
 static u8 *iv_of_dmreq(struct crypt_config *cc,
                       struct dm_crypt_request *dmreq)
 {
        return (u8 *)ALIGN((unsigned long)(dmreq + 1),
-               crypto_ablkcipher_alignmask(any_tfm(cc)) + 1);
+               crypto_skcipher_alignmask(any_tfm(cc)) + 1);
 }
 
 static int crypt_convert_block(struct crypt_config *cc,
                               struct convert_context *ctx,
-                              struct ablkcipher_request *req)
+                              struct skcipher_request *req)
 {
        struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in);
        struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out);
@@ -866,13 +869,13 @@ static int crypt_convert_block(struct crypt_config *cc,
                        return r;
        }
 
-       ablkcipher_request_set_crypt(req, &dmreq->sg_in, &dmreq->sg_out,
-                                    1 << SECTOR_SHIFT, iv);
+       skcipher_request_set_crypt(req, &dmreq->sg_in, &dmreq->sg_out,
+                                  1 << SECTOR_SHIFT, iv);
 
        if (bio_data_dir(ctx->bio_in) == WRITE)
-               r = crypto_ablkcipher_encrypt(req);
+               r = crypto_skcipher_encrypt(req);
        else
-               r = crypto_ablkcipher_decrypt(req);
+               r = crypto_skcipher_decrypt(req);
 
        if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
                r = cc->iv_gen_ops->post(cc, iv, dmreq);
@@ -891,23 +894,23 @@ static void crypt_alloc_req(struct crypt_config *cc,
        if (!ctx->req)
                ctx->req = mempool_alloc(cc->req_pool, GFP_NOIO);
 
-       ablkcipher_request_set_tfm(ctx->req, cc->tfms[key_index]);
+       skcipher_request_set_tfm(ctx->req, cc->tfms[key_index]);
 
        /*
         * Use REQ_MAY_BACKLOG so a cipher driver internally backlogs
         * requests if driver request queue is full.
         */
-       ablkcipher_request_set_callback(ctx->req,
+       skcipher_request_set_callback(ctx->req,
            CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
            kcryptd_async_done, dmreq_of_req(cc, ctx->req));
 }
 
 static void crypt_free_req(struct crypt_config *cc,
-                          struct ablkcipher_request *req, struct bio *base_bio)
+                          struct skcipher_request *req, struct bio *base_bio)
 {
        struct dm_crypt_io *io = dm_per_bio_data(base_bio, cc->per_bio_data_size);
 
-       if ((struct ablkcipher_request *)(io + 1) != req)
+       if ((struct skcipher_request *)(io + 1) != req)
                mempool_free(req, cc->req_pool);
 }
 
@@ -1437,7 +1440,7 @@ static void crypt_free_tfms(struct crypt_config *cc)
 
        for (i = 0; i < cc->tfms_count; i++)
                if (cc->tfms[i] && !IS_ERR(cc->tfms[i])) {
-                       crypto_free_ablkcipher(cc->tfms[i]);
+                       crypto_free_skcipher(cc->tfms[i]);
                        cc->tfms[i] = NULL;
                }
 
@@ -1450,13 +1453,13 @@ static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode)
        unsigned i;
        int err;
 
-       cc->tfms = kmalloc(cc->tfms_count * sizeof(struct crypto_ablkcipher *),
+       cc->tfms = kmalloc(cc->tfms_count * sizeof(struct crypto_skcipher *),
                           GFP_KERNEL);
        if (!cc->tfms)
                return -ENOMEM;
 
        for (i = 0; i < cc->tfms_count; i++) {
-               cc->tfms[i] = crypto_alloc_ablkcipher(ciphermode, 0, 0);
+               cc->tfms[i] = crypto_alloc_skcipher(ciphermode, 0, 0);
                if (IS_ERR(cc->tfms[i])) {
                        err = PTR_ERR(cc->tfms[i]);
                        crypt_free_tfms(cc);
@@ -1476,9 +1479,9 @@ static int crypt_setkey_allcpus(struct crypt_config *cc)
        subkey_size = (cc->key_size - cc->key_extra_size) >> ilog2(cc->tfms_count);
 
        for (i = 0; i < cc->tfms_count; i++) {
-               r = crypto_ablkcipher_setkey(cc->tfms[i],
-                                            cc->key + (i * subkey_size),
-                                            subkey_size);
+               r = crypto_skcipher_setkey(cc->tfms[i],
+                                          cc->key + (i * subkey_size),
+                                          subkey_size);
                if (r)
                        err = r;
        }
@@ -1645,7 +1648,7 @@ static int crypt_ctr_cipher(struct dm_target *ti,
        }
 
        /* Initialize IV */
-       cc->iv_size = crypto_ablkcipher_ivsize(any_tfm(cc));
+       cc->iv_size = crypto_skcipher_ivsize(any_tfm(cc));
        if (cc->iv_size)
                /* at least a 64 bit sector number should fit in our buffer */
                cc->iv_size = max(cc->iv_size,
@@ -1763,21 +1766,21 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
        if (ret < 0)
                goto bad;
 
-       cc->dmreq_start = sizeof(struct ablkcipher_request);
-       cc->dmreq_start += crypto_ablkcipher_reqsize(any_tfm(cc));
+       cc->dmreq_start = sizeof(struct skcipher_request);
+       cc->dmreq_start += crypto_skcipher_reqsize(any_tfm(cc));
        cc->dmreq_start = ALIGN(cc->dmreq_start, __alignof__(struct dm_crypt_request));
 
-       if (crypto_ablkcipher_alignmask(any_tfm(cc)) < CRYPTO_MINALIGN) {
+       if (crypto_skcipher_alignmask(any_tfm(cc)) < CRYPTO_MINALIGN) {
                /* Allocate the padding exactly */
                iv_size_padding = -(cc->dmreq_start + sizeof(struct dm_crypt_request))
-                               & crypto_ablkcipher_alignmask(any_tfm(cc));
+                               & crypto_skcipher_alignmask(any_tfm(cc));
        } else {
                /*
                 * If the cipher requires greater alignment than kmalloc
                 * alignment, we don't know the exact position of the
                 * initialization vector. We must assume worst case.
                 */
-               iv_size_padding = crypto_ablkcipher_alignmask(any_tfm(cc));
+               iv_size_padding = crypto_skcipher_alignmask(any_tfm(cc));
        }
 
        ret = -ENOMEM;
@@ -1922,7 +1925,7 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
 
        io = dm_per_bio_data(bio, cc->per_bio_data_size);
        crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
-       io->ctx.req = (struct ablkcipher_request *)(io + 1);
+       io->ctx.req = (struct skcipher_request *)(io + 1);
 
        if (bio_data_dir(io->base_bio) == READ) {
                if (kcryptd_io_read(io, GFP_NOWAIT))
index 05005c660d4d954527d278130824f232383c2d49..f60f7660b451754b37f7f62f53cf4d3bcbed2724 100644 (file)
@@ -42,6 +42,8 @@
  *                    deprecated in 2.6
  */
 
+#include <crypto/hash.h>
+#include <crypto/skcipher.h>
 #include <linux/err.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
@@ -49,7 +51,6 @@
 #include <linux/types.h>
 #include <linux/slab.h>
 #include <linux/string.h>
-#include <linux/crypto.h>
 #include <linux/mm.h>
 #include <linux/ppp_defs.h>
 #include <linux/ppp-comp.h>
@@ -94,8 +95,8 @@ static inline void sha_pad_init(struct sha_pad *shapad)
  * State for an MPPE (de)compressor.
  */
 struct ppp_mppe_state {
-       struct crypto_blkcipher *arc4;
-       struct crypto_hash *sha1;
+       struct crypto_skcipher *arc4;
+       struct crypto_ahash *sha1;
        unsigned char *sha1_digest;
        unsigned char master_key[MPPE_MAX_KEY_LEN];
        unsigned char session_key[MPPE_MAX_KEY_LEN];
@@ -135,7 +136,7 @@ struct ppp_mppe_state {
  */
 static void get_new_key_from_sha(struct ppp_mppe_state * state)
 {
-       struct hash_desc desc;
+       AHASH_REQUEST_ON_STACK(req, state->sha1);
        struct scatterlist sg[4];
        unsigned int nbytes;
 
@@ -148,10 +149,12 @@ static void get_new_key_from_sha(struct ppp_mppe_state * state)
        nbytes += setup_sg(&sg[3], sha_pad->sha_pad2,
                           sizeof(sha_pad->sha_pad2));
 
-       desc.tfm = state->sha1;
-       desc.flags = 0;
+       ahash_request_set_tfm(req, state->sha1);
+       ahash_request_set_callback(req, 0, NULL, NULL);
+       ahash_request_set_crypt(req, sg, state->sha1_digest, nbytes);
 
-       crypto_hash_digest(&desc, sg, nbytes, state->sha1_digest);
+       crypto_ahash_digest(req);
+       ahash_request_zero(req);
 }
 
 /*
@@ -161,20 +164,23 @@ static void get_new_key_from_sha(struct ppp_mppe_state * state)
 static void mppe_rekey(struct ppp_mppe_state * state, int initial_key)
 {
        struct scatterlist sg_in[1], sg_out[1];
-       struct blkcipher_desc desc = { .tfm = state->arc4 };
+       SKCIPHER_REQUEST_ON_STACK(req, state->arc4);
+
+       skcipher_request_set_tfm(req, state->arc4);
+       skcipher_request_set_callback(req, 0, NULL, NULL);
 
        get_new_key_from_sha(state);
        if (!initial_key) {
-               crypto_blkcipher_setkey(state->arc4, state->sha1_digest,
-                                       state->keylen);
+               crypto_skcipher_setkey(state->arc4, state->sha1_digest,
+                                      state->keylen);
                sg_init_table(sg_in, 1);
                sg_init_table(sg_out, 1);
                setup_sg(sg_in, state->sha1_digest, state->keylen);
                setup_sg(sg_out, state->session_key, state->keylen);
-               if (crypto_blkcipher_encrypt(&desc, sg_out, sg_in,
-                                            state->keylen) != 0) {
+               skcipher_request_set_crypt(req, sg_in, sg_out, state->keylen,
+                                          NULL);
+               if (crypto_skcipher_encrypt(req))
                    printk(KERN_WARNING "mppe_rekey: cipher_encrypt failed\n");
-               }
        } else {
                memcpy(state->session_key, state->sha1_digest, state->keylen);
        }
@@ -184,7 +190,8 @@ static void mppe_rekey(struct ppp_mppe_state * state, int initial_key)
                state->session_key[1] = 0x26;
                state->session_key[2] = 0x9e;
        }
-       crypto_blkcipher_setkey(state->arc4, state->session_key, state->keylen);
+       crypto_skcipher_setkey(state->arc4, state->session_key, state->keylen);
+       skcipher_request_zero(req);
 }
 
 /*
@@ -204,19 +211,19 @@ static void *mppe_alloc(unsigned char *options, int optlen)
                goto out;
 
 
-       state->arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
+       state->arc4 = crypto_alloc_skcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
        if (IS_ERR(state->arc4)) {
                state->arc4 = NULL;
                goto out_free;
        }
 
-       state->sha1 = crypto_alloc_hash("sha1", 0, CRYPTO_ALG_ASYNC);
+       state->sha1 = crypto_alloc_ahash("sha1", 0, CRYPTO_ALG_ASYNC);
        if (IS_ERR(state->sha1)) {
                state->sha1 = NULL;
                goto out_free;
        }
 
-       digestsize = crypto_hash_digestsize(state->sha1);
+       digestsize = crypto_ahash_digestsize(state->sha1);
        if (digestsize < MPPE_MAX_KEY_LEN)
                goto out_free;
 
@@ -237,15 +244,12 @@ static void *mppe_alloc(unsigned char *options, int optlen)
 
        return (void *)state;
 
-       out_free:
-           if (state->sha1_digest)
-               kfree(state->sha1_digest);
-           if (state->sha1)
-               crypto_free_hash(state->sha1);
-           if (state->arc4)
-               crypto_free_blkcipher(state->arc4);
-           kfree(state);
-       out:
+out_free:
+       kfree(state->sha1_digest);
+       crypto_free_ahash(state->sha1);
+       crypto_free_skcipher(state->arc4);
+       kfree(state);
+out:
        return NULL;
 }
 
@@ -256,13 +260,10 @@ static void mppe_free(void *arg)
 {
        struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;
        if (state) {
-           if (state->sha1_digest)
                kfree(state->sha1_digest);
-           if (state->sha1)
-               crypto_free_hash(state->sha1);
-           if (state->arc4)
-               crypto_free_blkcipher(state->arc4);
-           kfree(state);
+               crypto_free_ahash(state->sha1);
+               crypto_free_skcipher(state->arc4);
+               kfree(state);
        }
 }
 
@@ -368,8 +369,9 @@ mppe_compress(void *arg, unsigned char *ibuf, unsigned char *obuf,
              int isize, int osize)
 {
        struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;
-       struct blkcipher_desc desc = { .tfm = state->arc4 };
+       SKCIPHER_REQUEST_ON_STACK(req, state->arc4);
        int proto;
+       int err;
        struct scatterlist sg_in[1], sg_out[1];
 
        /*
@@ -426,7 +428,13 @@ mppe_compress(void *arg, unsigned char *ibuf, unsigned char *obuf,
        sg_init_table(sg_out, 1);
        setup_sg(sg_in, ibuf, isize);
        setup_sg(sg_out, obuf, osize);
-       if (crypto_blkcipher_encrypt(&desc, sg_out, sg_in, isize) != 0) {
+
+       skcipher_request_set_tfm(req, state->arc4);
+       skcipher_request_set_callback(req, 0, NULL, NULL);
+       skcipher_request_set_crypt(req, sg_in, sg_out, isize, NULL);
+       err = crypto_skcipher_encrypt(req);
+       skcipher_request_zero(req);
+       if (err) {
                printk(KERN_DEBUG "crypto_cypher_encrypt failed\n");
                return -1;
        }
@@ -475,7 +483,7 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf,
                int osize)
 {
        struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;
-       struct blkcipher_desc desc = { .tfm = state->arc4 };
+       SKCIPHER_REQUEST_ON_STACK(req, state->arc4);
        unsigned ccount;
        int flushed = MPPE_BITS(ibuf) & MPPE_BIT_FLUSHED;
        struct scatterlist sg_in[1], sg_out[1];
@@ -609,9 +617,14 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf,
        sg_init_table(sg_out, 1);
        setup_sg(sg_in, ibuf, 1);
        setup_sg(sg_out, obuf, 1);
-       if (crypto_blkcipher_decrypt(&desc, sg_out, sg_in, 1) != 0) {
+
+       skcipher_request_set_tfm(req, state->arc4);
+       skcipher_request_set_callback(req, 0, NULL, NULL);
+       skcipher_request_set_crypt(req, sg_in, sg_out, 1, NULL);
+       if (crypto_skcipher_decrypt(req)) {
                printk(KERN_DEBUG "crypto_cypher_decrypt failed\n");
-               return DECOMP_ERROR;
+               osize = DECOMP_ERROR;
+               goto out_zap_req;
        }
 
        /*
@@ -629,9 +642,11 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf,
        /* And finally, decrypt the rest of the packet. */
        setup_sg(sg_in, ibuf + 1, isize - 1);
        setup_sg(sg_out, obuf + 1, osize - 1);
-       if (crypto_blkcipher_decrypt(&desc, sg_out, sg_in, isize - 1)) {
+       skcipher_request_set_crypt(req, sg_in, sg_out, isize - 1, NULL);
+       if (crypto_skcipher_decrypt(req)) {
                printk(KERN_DEBUG "crypto_cypher_decrypt failed\n");
-               return DECOMP_ERROR;
+               osize = DECOMP_ERROR;
+               goto out_zap_req;
        }
 
        state->stats.unc_bytes += osize;
@@ -642,6 +657,8 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf,
        /* good packet credit */
        state->sanity_errors >>= 1;
 
+out_zap_req:
+       skcipher_request_zero(req);
        return osize;
 
 sanity_error:
@@ -714,8 +731,8 @@ static struct compressor ppp_mppe = {
 static int __init ppp_mppe_init(void)
 {
        int answer;
-       if (!(crypto_has_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC) &&
-             crypto_has_hash("sha1", 0, CRYPTO_ALG_ASYNC)))
+       if (!(crypto_has_skcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC) &&
+             crypto_has_ahash("sha1", 0, CRYPTO_ALG_ASYNC)))
                return -ENODEV;
 
        sha_pad = kmalloc(sizeof(struct sha_pad), GFP_KERNEL);
index fce4a843e65694d90ff9d35ca34bb1ea808d0a8f..bc7397d709d3ac5ff85b9a1057e43a93500587fc 100644 (file)
@@ -6,7 +6,7 @@
 #include <linux/string.h>
 #include <linux/if_ether.h>
 #include <linux/scatterlist.h>
-#include <linux/crypto.h>
+#include <crypto/hash.h>
 
 #include "orinoco.h"
 #include "mic.h"
@@ -16,7 +16,8 @@
 /********************************************************************/
 int orinoco_mic_init(struct orinoco_private *priv)
 {
-       priv->tx_tfm_mic = crypto_alloc_hash("michael_mic", 0, 0);
+       priv->tx_tfm_mic = crypto_alloc_ahash("michael_mic", 0,
+                                             CRYPTO_ALG_ASYNC);
        if (IS_ERR(priv->tx_tfm_mic)) {
                printk(KERN_DEBUG "orinoco_mic_init: could not allocate "
                       "crypto API michael_mic\n");
@@ -24,7 +25,8 @@ int orinoco_mic_init(struct orinoco_private *priv)
                return -ENOMEM;
        }
 
-       priv->rx_tfm_mic = crypto_alloc_hash("michael_mic", 0, 0);
+       priv->rx_tfm_mic = crypto_alloc_ahash("michael_mic", 0,
+                                             CRYPTO_ALG_ASYNC);
        if (IS_ERR(priv->rx_tfm_mic)) {
                printk(KERN_DEBUG "orinoco_mic_init: could not allocate "
                       "crypto API michael_mic\n");
@@ -38,18 +40,19 @@ int orinoco_mic_init(struct orinoco_private *priv)
 void orinoco_mic_free(struct orinoco_private *priv)
 {
        if (priv->tx_tfm_mic)
-               crypto_free_hash(priv->tx_tfm_mic);
+               crypto_free_ahash(priv->tx_tfm_mic);
        if (priv->rx_tfm_mic)
-               crypto_free_hash(priv->rx_tfm_mic);
+               crypto_free_ahash(priv->rx_tfm_mic);
 }
 
-int orinoco_mic(struct crypto_hash *tfm_michael, u8 *key,
+int orinoco_mic(struct crypto_ahash *tfm_michael, u8 *key,
                u8 *da, u8 *sa, u8 priority,
                u8 *data, size_t data_len, u8 *mic)
 {
-       struct hash_desc desc;
+       AHASH_REQUEST_ON_STACK(req, tfm_michael);
        struct scatterlist sg[2];
        u8 hdr[ETH_HLEN + 2]; /* size of header + padding */
+       int err;
 
        if (tfm_michael == NULL) {
                printk(KERN_WARNING "orinoco_mic: tfm_michael == NULL\n");
@@ -69,11 +72,13 @@ int orinoco_mic(struct crypto_hash *tfm_michael, u8 *key,
        sg_set_buf(&sg[0], hdr, sizeof(hdr));
        sg_set_buf(&sg[1], data, data_len);
 
-       if (crypto_hash_setkey(tfm_michael, key, MIC_KEYLEN))
+       if (crypto_ahash_setkey(tfm_michael, key, MIC_KEYLEN))
                return -1;
 
-       desc.tfm = tfm_michael;
-       desc.flags = 0;
-       return crypto_hash_digest(&desc, sg, data_len + sizeof(hdr),
-                                 mic);
+       ahash_request_set_tfm(req, tfm_michael);
+       ahash_request_set_callback(req, 0, NULL, NULL);
+       ahash_request_set_crypt(req, sg, mic, data_len + sizeof(hdr));
+       err = crypto_ahash_digest(req);
+       ahash_request_zero(req);
+       return err;
 }
index 04d05bc566d622edb063a15d3724afa418412653..ce731d05cc98cd2d0b6415f66868d3ad517b4753 100644 (file)
 
 /* Forward declarations */
 struct orinoco_private;
-struct crypto_hash;
+struct crypto_ahash;
 
 int orinoco_mic_init(struct orinoco_private *priv);
 void orinoco_mic_free(struct orinoco_private *priv);
-int orinoco_mic(struct crypto_hash *tfm_michael, u8 *key,
+int orinoco_mic(struct crypto_ahash *tfm_michael, u8 *key,
                u8 *da, u8 *sa, u8 priority,
                u8 *data, size_t data_len, u8 *mic);
 
index eebd2be21ee9067e7d90f45f32b227bb07601998..2f0c84b1c440cd1160bdebc3cabcb13be110a73e 100644 (file)
@@ -152,8 +152,8 @@ struct orinoco_private {
        u8 *wpa_ie;
        int wpa_ie_len;
 
-       struct crypto_hash *rx_tfm_mic;
-       struct crypto_hash *tx_tfm_mic;
+       struct crypto_ahash *rx_tfm_mic;
+       struct crypto_ahash *tx_tfm_mic;
 
        unsigned int wpa_enabled:1;
        unsigned int tkip_cm_active:1;
index 64a90252c57f24c615223246854c13edeb066142..5f97da1947e396e9252809106ea61fd26d8e1db7 100644 (file)
@@ -19,7 +19,7 @@
 
 #include <linux/completion.h>
 #include <linux/firmware.h>
-#include <linux/crypto.h>
+#include <crypto/hash.h>
 #include <crypto/sha.h>
 
 #include "s3fwrn5.h"
@@ -429,8 +429,7 @@ int s3fwrn5_fw_download(struct s3fwrn5_fw_info *fw_info)
 {
        struct s3fwrn5_fw_image *fw = &fw_info->fw;
        u8 hash_data[SHA1_DIGEST_SIZE];
-       struct scatterlist sg;
-       struct hash_desc desc;
+       struct crypto_shash *tfm;
        u32 image_size, off;
        int ret;
 
@@ -438,12 +437,31 @@ int s3fwrn5_fw_download(struct s3fwrn5_fw_info *fw_info)
 
        /* Compute SHA of firmware data */
 
-       sg_init_one(&sg, fw->image, image_size);
-       desc.tfm = crypto_alloc_hash("sha1", 0, CRYPTO_ALG_ASYNC);
-       crypto_hash_init(&desc);
-       crypto_hash_update(&desc, &sg, image_size);
-       crypto_hash_final(&desc, hash_data);
-       crypto_free_hash(desc.tfm);
+       tfm = crypto_alloc_shash("sha1", 0, 0);
+       if (IS_ERR(tfm)) {
+               ret = PTR_ERR(tfm);
+               dev_err(&fw_info->ndev->nfc_dev->dev,
+                       "Cannot allocate shash (code=%d)\n", ret);
+               goto out;
+       }
+
+       {
+               SHASH_DESC_ON_STACK(desc, tfm);
+
+               desc->tfm = tfm;
+               desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+
+               ret = crypto_shash_digest(desc, fw->image, image_size,
+                                         hash_data);
+               shash_desc_zero(desc);
+       }
+
+       crypto_free_shash(tfm);
+       if (ret) {
+               dev_err(&fw_info->ndev->nfc_dev->dev,
+                       "Cannot compute hash (code=%d)\n", ret);
+               goto out;
+       }
 
        /* Firmware update process */
 
index 0b8af186e70783e9c8318d9158b177ba25890ff3..2e4c82f8329c8ac0db7ea039f87b33f25d7344ff 100644 (file)
  *     Zhenyu Wang
  */
 
+#include <crypto/hash.h>
 #include <linux/types.h>
 #include <linux/inet.h>
 #include <linux/slab.h>
 #include <linux/file.h>
 #include <linux/blkdev.h>
-#include <linux/crypto.h>
 #include <linux/delay.h>
 #include <linux/kfifo.h>
 #include <linux/scatterlist.h>
@@ -428,7 +428,7 @@ static void iscsi_sw_tcp_send_hdr_prep(struct iscsi_conn *conn, void *hdr,
         * sufficient room.
         */
        if (conn->hdrdgst_en) {
-               iscsi_tcp_dgst_header(&tcp_sw_conn->tx_hash, hdr, hdrlen,
+               iscsi_tcp_dgst_header(tcp_sw_conn->tx_hash, hdr, hdrlen,
                                      hdr + hdrlen);
                hdrlen += ISCSI_DIGEST_SIZE;
        }
@@ -454,7 +454,7 @@ iscsi_sw_tcp_send_data_prep(struct iscsi_conn *conn, struct scatterlist *sg,
 {
        struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
        struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
-       struct hash_desc *tx_hash = NULL;
+       struct ahash_request *tx_hash = NULL;
        unsigned int hdr_spec_len;
 
        ISCSI_SW_TCP_DBG(conn, "offset=%d, datalen=%d %s\n", offset, len,
@@ -467,7 +467,7 @@ iscsi_sw_tcp_send_data_prep(struct iscsi_conn *conn, struct scatterlist *sg,
        WARN_ON(iscsi_padded(len) != iscsi_padded(hdr_spec_len));
 
        if (conn->datadgst_en)
-               tx_hash = &tcp_sw_conn->tx_hash;
+               tx_hash = tcp_sw_conn->tx_hash;
 
        return iscsi_segment_seek_sg(&tcp_sw_conn->out.data_segment,
                                     sg, count, offset, len,
@@ -480,7 +480,7 @@ iscsi_sw_tcp_send_linear_data_prep(struct iscsi_conn *conn, void *data,
 {
        struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
        struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
-       struct hash_desc *tx_hash = NULL;
+       struct ahash_request *tx_hash = NULL;
        unsigned int hdr_spec_len;
 
        ISCSI_SW_TCP_DBG(conn, "datalen=%zd %s\n", len, conn->datadgst_en ?
@@ -492,7 +492,7 @@ iscsi_sw_tcp_send_linear_data_prep(struct iscsi_conn *conn, void *data,
        WARN_ON(iscsi_padded(len) != iscsi_padded(hdr_spec_len));
 
        if (conn->datadgst_en)
-               tx_hash = &tcp_sw_conn->tx_hash;
+               tx_hash = tcp_sw_conn->tx_hash;
 
        iscsi_segment_init_linear(&tcp_sw_conn->out.data_segment,
                                data, len, NULL, tx_hash);
@@ -543,6 +543,7 @@ iscsi_sw_tcp_conn_create(struct iscsi_cls_session *cls_session,
        struct iscsi_cls_conn *cls_conn;
        struct iscsi_tcp_conn *tcp_conn;
        struct iscsi_sw_tcp_conn *tcp_sw_conn;
+       struct crypto_ahash *tfm;
 
        cls_conn = iscsi_tcp_conn_setup(cls_session, sizeof(*tcp_sw_conn),
                                        conn_idx);
@@ -552,23 +553,28 @@ iscsi_sw_tcp_conn_create(struct iscsi_cls_session *cls_session,
        tcp_conn = conn->dd_data;
        tcp_sw_conn = tcp_conn->dd_data;
 
-       tcp_sw_conn->tx_hash.tfm = crypto_alloc_hash("crc32c", 0,
-                                                    CRYPTO_ALG_ASYNC);
-       tcp_sw_conn->tx_hash.flags = 0;
-       if (IS_ERR(tcp_sw_conn->tx_hash.tfm))
+       tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
+       if (IS_ERR(tfm))
                goto free_conn;
 
-       tcp_sw_conn->rx_hash.tfm = crypto_alloc_hash("crc32c", 0,
-                                                    CRYPTO_ALG_ASYNC);
-       tcp_sw_conn->rx_hash.flags = 0;
-       if (IS_ERR(tcp_sw_conn->rx_hash.tfm))
-               goto free_tx_tfm;
-       tcp_conn->rx_hash = &tcp_sw_conn->rx_hash;
+       tcp_sw_conn->tx_hash = ahash_request_alloc(tfm, GFP_KERNEL);
+       if (!tcp_sw_conn->tx_hash)
+               goto free_tfm;
+       ahash_request_set_callback(tcp_sw_conn->tx_hash, 0, NULL, NULL);
+
+       tcp_sw_conn->rx_hash = ahash_request_alloc(tfm, GFP_KERNEL);
+       if (!tcp_sw_conn->rx_hash)
+               goto free_tx_hash;
+       ahash_request_set_callback(tcp_sw_conn->rx_hash, 0, NULL, NULL);
+
+       tcp_conn->rx_hash = tcp_sw_conn->rx_hash;
 
        return cls_conn;
 
-free_tx_tfm:
-       crypto_free_hash(tcp_sw_conn->tx_hash.tfm);
+free_tx_hash:
+       ahash_request_free(tcp_sw_conn->tx_hash);
+free_tfm:
+       crypto_free_ahash(tfm);
 free_conn:
        iscsi_conn_printk(KERN_ERR, conn,
                          "Could not create connection due to crc32c "
@@ -607,10 +613,14 @@ static void iscsi_sw_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn)
 
        iscsi_sw_tcp_release_conn(conn);
 
-       if (tcp_sw_conn->tx_hash.tfm)
-               crypto_free_hash(tcp_sw_conn->tx_hash.tfm);
-       if (tcp_sw_conn->rx_hash.tfm)
-               crypto_free_hash(tcp_sw_conn->rx_hash.tfm);
+       ahash_request_free(tcp_sw_conn->rx_hash);
+       if (tcp_sw_conn->tx_hash) {
+               struct crypto_ahash *tfm;
+
+               tfm = crypto_ahash_reqtfm(tcp_sw_conn->tx_hash);
+               ahash_request_free(tcp_sw_conn->tx_hash);
+               crypto_free_ahash(tfm);
+       }
 
        iscsi_tcp_conn_teardown(cls_conn);
 }
index f42ecb238af549327d539d822824d87001975fee..06d42d00a32319aa65c5b0c7112b662fe2fbf319 100644 (file)
@@ -45,8 +45,8 @@ struct iscsi_sw_tcp_conn {
        void                    (*old_write_space)(struct sock *);
 
        /* data and header digests */
-       struct hash_desc        tx_hash;        /* CRC32C (Tx) */
-       struct hash_desc        rx_hash;        /* CRC32C (Rx) */
+       struct ahash_request    *tx_hash;       /* CRC32C (Tx) */
+       struct ahash_request    *rx_hash;       /* CRC32C (Rx) */
 
        /* MIB custom statistics */
        uint32_t                sendpage_failures_cnt;
index 60cb6dc3c6f09a405fdc9dc157cc8d0716295134..63a1d69ff5154df7992098ce7e5e7fd4117c8a33 100644 (file)
  *     Zhenyu Wang
  */
 
+#include <crypto/hash.h>
 #include <linux/types.h>
 #include <linux/list.h>
 #include <linux/inet.h>
 #include <linux/slab.h>
 #include <linux/file.h>
 #include <linux/blkdev.h>
-#include <linux/crypto.h>
 #include <linux/delay.h>
 #include <linux/kfifo.h>
 #include <linux/scatterlist.h>
@@ -214,7 +214,8 @@ int iscsi_tcp_segment_done(struct iscsi_tcp_conn *tcp_conn,
                } else
                        sg_init_one(&sg, segment->data + segment->copied,
                                    copied);
-               crypto_hash_update(segment->hash, &sg, copied);
+               ahash_request_set_crypt(segment->hash, &sg, NULL, copied);
+               crypto_ahash_update(segment->hash);
        }
 
        segment->copied += copied;
@@ -260,7 +261,9 @@ int iscsi_tcp_segment_done(struct iscsi_tcp_conn *tcp_conn,
         * is completely handled in hdr done function.
         */
        if (segment->hash) {
-               crypto_hash_final(segment->hash, segment->digest);
+               ahash_request_set_crypt(segment->hash, NULL,
+                                       segment->digest, 0);
+               crypto_ahash_final(segment->hash);
                iscsi_tcp_segment_splice_digest(segment,
                                 recv ? segment->recv_digest : segment->digest);
                return 0;
@@ -310,13 +313,14 @@ iscsi_tcp_segment_recv(struct iscsi_tcp_conn *tcp_conn,
 }
 
 inline void
-iscsi_tcp_dgst_header(struct hash_desc *hash, const void *hdr, size_t hdrlen,
-                     unsigned char digest[ISCSI_DIGEST_SIZE])
+iscsi_tcp_dgst_header(struct ahash_request *hash, const void *hdr,
+                     size_t hdrlen, unsigned char digest[ISCSI_DIGEST_SIZE])
 {
        struct scatterlist sg;
 
        sg_init_one(&sg, hdr, hdrlen);
-       crypto_hash_digest(hash, &sg, hdrlen, digest);
+       ahash_request_set_crypt(hash, &sg, digest, hdrlen);
+       crypto_ahash_digest(hash);
 }
 EXPORT_SYMBOL_GPL(iscsi_tcp_dgst_header);
 
@@ -341,7 +345,7 @@ iscsi_tcp_dgst_verify(struct iscsi_tcp_conn *tcp_conn,
  */
 static inline void
 __iscsi_segment_init(struct iscsi_segment *segment, size_t size,
-                    iscsi_segment_done_fn_t *done, struct hash_desc *hash)
+                    iscsi_segment_done_fn_t *done, struct ahash_request *hash)
 {
        memset(segment, 0, sizeof(*segment));
        segment->total_size = size;
@@ -349,14 +353,14 @@ __iscsi_segment_init(struct iscsi_segment *segment, size_t size,
 
        if (hash) {
                segment->hash = hash;
-               crypto_hash_init(hash);
+               crypto_ahash_init(hash);
        }
 }
 
 inline void
 iscsi_segment_init_linear(struct iscsi_segment *segment, void *data,
                          size_t size, iscsi_segment_done_fn_t *done,
-                         struct hash_desc *hash)
+                         struct ahash_request *hash)
 {
        __iscsi_segment_init(segment, size, done, hash);
        segment->data = data;
@@ -368,7 +372,8 @@ inline int
 iscsi_segment_seek_sg(struct iscsi_segment *segment,
                      struct scatterlist *sg_list, unsigned int sg_count,
                      unsigned int offset, size_t size,
-                     iscsi_segment_done_fn_t *done, struct hash_desc *hash)
+                     iscsi_segment_done_fn_t *done,
+                     struct ahash_request *hash)
 {
        struct scatterlist *sg;
        unsigned int i;
@@ -431,7 +436,7 @@ static void
 iscsi_tcp_data_recv_prep(struct iscsi_tcp_conn *tcp_conn)
 {
        struct iscsi_conn *conn = tcp_conn->iscsi_conn;
-       struct hash_desc *rx_hash = NULL;
+       struct ahash_request *rx_hash = NULL;
 
        if (conn->datadgst_en &&
            !(conn->session->tt->caps & CAP_DIGEST_OFFLOAD))
@@ -686,7 +691,7 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
 
                if (tcp_conn->in.datalen) {
                        struct iscsi_tcp_task *tcp_task = task->dd_data;
-                       struct hash_desc *rx_hash = NULL;
+                       struct ahash_request *rx_hash = NULL;
                        struct scsi_data_buffer *sdb = scsi_in(task->sc);
 
                        /*
index 079d50ebfa3acd7c07c6d4d507827eec436d4293..94c01aad844be3a48dfaff19d08c09641e72eb13 100644 (file)
@@ -27,7 +27,7 @@
  * Copyright (c) 2012, Intel Corporation.
  */
 
-#include <linux/crypto.h>
+#include <crypto/hash.h>
 #include <linux/scatterlist.h>
 #include "../../../include/linux/libcfs/libcfs.h"
 #include "linux-crypto.h"
@@ -38,9 +38,11 @@ static int cfs_crypto_hash_speeds[CFS_HASH_ALG_MAX];
 
 static int cfs_crypto_hash_alloc(unsigned char alg_id,
                                 const struct cfs_crypto_hash_type **type,
-                                struct hash_desc *desc, unsigned char *key,
+                                struct ahash_request **req,
+                                unsigned char *key,
                                 unsigned int key_len)
 {
+       struct crypto_ahash *tfm;
        int     err = 0;
 
        *type = cfs_crypto_hash_type(alg_id);
@@ -50,18 +52,23 @@ static int cfs_crypto_hash_alloc(unsigned char alg_id,
                      alg_id, CFS_HASH_ALG_MAX);
                return -EINVAL;
        }
-       desc->tfm = crypto_alloc_hash((*type)->cht_name, 0, 0);
+       tfm = crypto_alloc_ahash((*type)->cht_name, 0, CRYPTO_ALG_ASYNC);
 
-       if (desc->tfm == NULL)
-               return -EINVAL;
-
-       if (IS_ERR(desc->tfm)) {
+       if (IS_ERR(tfm)) {
                CDEBUG(D_INFO, "Failed to alloc crypto hash %s\n",
                       (*type)->cht_name);
-               return PTR_ERR(desc->tfm);
+               return PTR_ERR(tfm);
        }
 
-       desc->flags = 0;
+       *req = ahash_request_alloc(tfm, GFP_KERNEL);
+       if (!*req) {
+               CDEBUG(D_INFO, "Failed to alloc ahash_request for %s\n",
+                      (*type)->cht_name);
+               crypto_free_ahash(tfm);
+               return -ENOMEM;
+       }
+
+       ahash_request_set_callback(*req, 0, NULL, NULL);
 
        /** Shash have different logic for initialization then digest
         * shash: crypto_hash_setkey, crypto_hash_init
@@ -70,23 +77,27 @@ static int cfs_crypto_hash_alloc(unsigned char alg_id,
         * cfs_crypto_hash_alloc.
         */
        if (key != NULL)
-               err = crypto_hash_setkey(desc->tfm, key, key_len);
+               err = crypto_ahash_setkey(tfm, key, key_len);
        else if ((*type)->cht_key != 0)
-               err = crypto_hash_setkey(desc->tfm,
+               err = crypto_ahash_setkey(tfm,
                                         (unsigned char *)&((*type)->cht_key),
                                         (*type)->cht_size);
 
        if (err != 0) {
-               crypto_free_hash(desc->tfm);
+               crypto_free_ahash(tfm);
                return err;
        }
 
        CDEBUG(D_INFO, "Using crypto hash: %s (%s) speed %d MB/s\n",
-              (crypto_hash_tfm(desc->tfm))->__crt_alg->cra_name,
-              (crypto_hash_tfm(desc->tfm))->__crt_alg->cra_driver_name,
+              crypto_ahash_alg_name(tfm), crypto_ahash_driver_name(tfm),
               cfs_crypto_hash_speeds[alg_id]);
 
-       return crypto_hash_init(desc);
+       err = crypto_ahash_init(*req);
+       if (err) {
+               ahash_request_free(*req);
+               crypto_free_ahash(tfm);
+       }
+       return err;
 }
 
 int cfs_crypto_hash_digest(unsigned char alg_id,
@@ -95,27 +106,29 @@ int cfs_crypto_hash_digest(unsigned char alg_id,
                           unsigned char *hash, unsigned int *hash_len)
 {
        struct scatterlist      sl;
-       struct hash_desc        hdesc;
+       struct ahash_request *req;
        int                     err;
        const struct cfs_crypto_hash_type       *type;
 
        if (buf == NULL || buf_len == 0 || hash_len == NULL)
                return -EINVAL;
 
-       err = cfs_crypto_hash_alloc(alg_id, &type, &hdesc, key, key_len);
+       err = cfs_crypto_hash_alloc(alg_id, &type, &req, key, key_len);
        if (err != 0)
                return err;
 
        if (hash == NULL || *hash_len < type->cht_size) {
                *hash_len = type->cht_size;
-               crypto_free_hash(hdesc.tfm);
+               crypto_free_ahash(crypto_ahash_reqtfm(req));
+               ahash_request_free(req);
                return -ENOSPC;
        }
        sg_init_one(&sl, buf, buf_len);
 
-       hdesc.flags = 0;
-       err = crypto_hash_digest(&hdesc, &sl, sl.length, hash);
-       crypto_free_hash(hdesc.tfm);
+       ahash_request_set_crypt(req, &sl, hash, sl.length);
+       err = crypto_ahash_digest(req);
+       crypto_free_ahash(crypto_ahash_reqtfm(req));
+       ahash_request_free(req);
 
        return err;
 }
@@ -125,22 +138,15 @@ struct cfs_crypto_hash_desc *
        cfs_crypto_hash_init(unsigned char alg_id,
                             unsigned char *key, unsigned int key_len)
 {
-
-       struct  hash_desc       *hdesc;
+       struct ahash_request *req;
        int                  err;
        const struct cfs_crypto_hash_type       *type;
 
-       hdesc = kmalloc(sizeof(*hdesc), 0);
-       if (hdesc == NULL)
-               return ERR_PTR(-ENOMEM);
+       err = cfs_crypto_hash_alloc(alg_id, &type, &req, key, key_len);
 
-       err = cfs_crypto_hash_alloc(alg_id, &type, hdesc, key, key_len);
-
-       if (err) {
-               kfree(hdesc);
+       if (err)
                return ERR_PTR(err);
-       }
-       return (struct cfs_crypto_hash_desc *)hdesc;
+       return (struct cfs_crypto_hash_desc *)req;
 }
 EXPORT_SYMBOL(cfs_crypto_hash_init);
 
@@ -148,23 +154,27 @@ int cfs_crypto_hash_update_page(struct cfs_crypto_hash_desc *hdesc,
                                struct page *page, unsigned int offset,
                                unsigned int len)
 {
+       struct ahash_request *req = (void *)hdesc;
        struct scatterlist sl;
 
        sg_init_table(&sl, 1);
        sg_set_page(&sl, page, len, offset & ~CFS_PAGE_MASK);
 
-       return crypto_hash_update((struct hash_desc *)hdesc, &sl, sl.length);
+       ahash_request_set_crypt(req, &sl, NULL, sl.length);
+       return crypto_ahash_update(req);
 }
 EXPORT_SYMBOL(cfs_crypto_hash_update_page);
 
 int cfs_crypto_hash_update(struct cfs_crypto_hash_desc *hdesc,
                           const void *buf, unsigned int buf_len)
 {
+       struct ahash_request *req = (void *)hdesc;
        struct scatterlist sl;
 
        sg_init_one(&sl, buf, buf_len);
 
-       return crypto_hash_update((struct hash_desc *)hdesc, &sl, sl.length);
+       ahash_request_set_crypt(req, &sl, NULL, sl.length);
+       return crypto_ahash_update(req);
 }
 EXPORT_SYMBOL(cfs_crypto_hash_update);
 
@@ -173,25 +183,27 @@ int cfs_crypto_hash_final(struct cfs_crypto_hash_desc *hdesc,
                          unsigned char *hash, unsigned int *hash_len)
 {
        int     err;
-       int     size = crypto_hash_digestsize(((struct hash_desc *)hdesc)->tfm);
+       struct ahash_request *req = (void *)hdesc;
+       int size = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
 
        if (hash_len == NULL) {
-               crypto_free_hash(((struct hash_desc *)hdesc)->tfm);
-               kfree(hdesc);
+               crypto_free_ahash(crypto_ahash_reqtfm(req));
+               ahash_request_free(req);
                return 0;
        }
        if (hash == NULL || *hash_len < size) {
                *hash_len = size;
                return -ENOSPC;
        }
-       err = crypto_hash_final((struct hash_desc *) hdesc, hash);
+       ahash_request_set_crypt(req, NULL, hash, 0);
+       err = crypto_ahash_final(req);
 
        if (err < 0) {
                /* May be caller can fix error */
                return err;
        }
-       crypto_free_hash(((struct hash_desc *)hdesc)->tfm);
-       kfree(hdesc);
+       crypto_free_ahash(crypto_ahash_reqtfm(req));
+       ahash_request_free(req);
        return err;
 }
 EXPORT_SYMBOL(cfs_crypto_hash_final);
index 2096d78913bd909b864f5659d7e904a182d14f68..8eac7cdd5f3ebd65f10051568367d695ec0860a4 100644 (file)
@@ -9,6 +9,8 @@
  * more details.
  */
 
+#include <crypto/hash.h>
+#include <crypto/skcipher.h>
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/slab.h>
@@ -18,7 +20,6 @@
 #include <linux/if_ether.h>
 #include <linux/if_arp.h>
 #include <linux/string.h>
-#include <linux/crypto.h>
 #include <linux/scatterlist.h>
 #include <linux/crc32.h>
 #include <linux/etherdevice.h>
@@ -48,10 +49,10 @@ struct rtllib_tkip_data {
        u32 dot11RSNAStatsTKIPLocalMICFailures;
 
        int key_idx;
-       struct crypto_blkcipher *rx_tfm_arc4;
-       struct crypto_hash *rx_tfm_michael;
-       struct crypto_blkcipher *tx_tfm_arc4;
-       struct crypto_hash *tx_tfm_michael;
+       struct crypto_skcipher *rx_tfm_arc4;
+       struct crypto_ahash *rx_tfm_michael;
+       struct crypto_skcipher *tx_tfm_arc4;
+       struct crypto_ahash *tx_tfm_michael;
        /* scratch buffers for virt_to_page() (crypto API) */
        u8 rx_hdr[16];
        u8 tx_hdr[16];
@@ -65,32 +66,32 @@ static void *rtllib_tkip_init(int key_idx)
        if (priv == NULL)
                goto fail;
        priv->key_idx = key_idx;
-       priv->tx_tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0,
-                       CRYPTO_ALG_ASYNC);
+       priv->tx_tfm_arc4 = crypto_alloc_skcipher("ecb(arc4)", 0,
+                                                 CRYPTO_ALG_ASYNC);
        if (IS_ERR(priv->tx_tfm_arc4)) {
                pr_debug("Could not allocate crypto API arc4\n");
                priv->tx_tfm_arc4 = NULL;
                goto fail;
        }
 
-       priv->tx_tfm_michael = crypto_alloc_hash("michael_mic", 0,
-                       CRYPTO_ALG_ASYNC);
+       priv->tx_tfm_michael = crypto_alloc_ahash("michael_mic", 0,
+                                                 CRYPTO_ALG_ASYNC);
        if (IS_ERR(priv->tx_tfm_michael)) {
                pr_debug("Could not allocate crypto API michael_mic\n");
                priv->tx_tfm_michael = NULL;
                goto fail;
        }
 
-       priv->rx_tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0,
-                       CRYPTO_ALG_ASYNC);
+       priv->rx_tfm_arc4 = crypto_alloc_skcipher("ecb(arc4)", 0,
+                                                 CRYPTO_ALG_ASYNC);
        if (IS_ERR(priv->rx_tfm_arc4)) {
                pr_debug("Could not allocate crypto API arc4\n");
                priv->rx_tfm_arc4 = NULL;
                goto fail;
        }
 
-       priv->rx_tfm_michael = crypto_alloc_hash("michael_mic", 0,
-                       CRYPTO_ALG_ASYNC);
+       priv->rx_tfm_michael = crypto_alloc_ahash("michael_mic", 0,
+                                                 CRYPTO_ALG_ASYNC);
        if (IS_ERR(priv->rx_tfm_michael)) {
                pr_debug("Could not allocate crypto API michael_mic\n");
                priv->rx_tfm_michael = NULL;
@@ -100,14 +101,10 @@ static void *rtllib_tkip_init(int key_idx)
 
 fail:
        if (priv) {
-               if (priv->tx_tfm_michael)
-                       crypto_free_hash(priv->tx_tfm_michael);
-               if (priv->tx_tfm_arc4)
-                       crypto_free_blkcipher(priv->tx_tfm_arc4);
-               if (priv->rx_tfm_michael)
-                       crypto_free_hash(priv->rx_tfm_michael);
-               if (priv->rx_tfm_arc4)
-                       crypto_free_blkcipher(priv->rx_tfm_arc4);
+               crypto_free_ahash(priv->tx_tfm_michael);
+               crypto_free_skcipher(priv->tx_tfm_arc4);
+               crypto_free_ahash(priv->rx_tfm_michael);
+               crypto_free_skcipher(priv->rx_tfm_arc4);
                kfree(priv);
        }
 
@@ -120,14 +117,10 @@ static void rtllib_tkip_deinit(void *priv)
        struct rtllib_tkip_data *_priv = priv;
 
        if (_priv) {
-               if (_priv->tx_tfm_michael)
-                       crypto_free_hash(_priv->tx_tfm_michael);
-               if (_priv->tx_tfm_arc4)
-                       crypto_free_blkcipher(_priv->tx_tfm_arc4);
-               if (_priv->rx_tfm_michael)
-                       crypto_free_hash(_priv->rx_tfm_michael);
-               if (_priv->rx_tfm_arc4)
-                       crypto_free_blkcipher(_priv->rx_tfm_arc4);
+               crypto_free_ahash(_priv->tx_tfm_michael);
+               crypto_free_skcipher(_priv->tx_tfm_arc4);
+               crypto_free_ahash(_priv->rx_tfm_michael);
+               crypto_free_skcipher(_priv->rx_tfm_arc4);
        }
        kfree(priv);
 }
@@ -301,7 +294,6 @@ static int rtllib_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
        struct rtllib_hdr_4addr *hdr;
        struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb +
                                    MAX_DEV_ADDR_SIZE);
-       struct blkcipher_desc desc = {.tfm = tkey->tx_tfm_arc4};
        int ret = 0;
        u8 rc4key[16],  *icv;
        u32 crc;
@@ -347,6 +339,8 @@ static int rtllib_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
        *pos++ = (tkey->tx_iv32 >> 24) & 0xff;
 
        if (!tcb_desc->bHwSec) {
+               SKCIPHER_REQUEST_ON_STACK(req, tkey->tx_tfm_arc4);
+
                icv = skb_put(skb, 4);
                crc = ~crc32_le(~0, pos, len);
                icv[0] = crc;
@@ -357,8 +351,12 @@ static int rtllib_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
                sg_init_one(&sg, pos, len+4);
 
 
-               crypto_blkcipher_setkey(tkey->tx_tfm_arc4, rc4key, 16);
-               ret = crypto_blkcipher_encrypt(&desc, &sg, &sg, len + 4);
+               crypto_skcipher_setkey(tkey->tx_tfm_arc4, rc4key, 16);
+               skcipher_request_set_tfm(req, tkey->tx_tfm_arc4);
+               skcipher_request_set_callback(req, 0, NULL, NULL);
+               skcipher_request_set_crypt(req, &sg, &sg, len + 4, NULL);
+               ret = crypto_skcipher_encrypt(req);
+               skcipher_request_zero(req);
        }
 
        tkey->tx_iv16++;
@@ -384,12 +382,12 @@ static int rtllib_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
        struct rtllib_hdr_4addr *hdr;
        struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb +
                                    MAX_DEV_ADDR_SIZE);
-       struct blkcipher_desc desc = {.tfm = tkey->rx_tfm_arc4};
        u8 rc4key[16];
        u8 icv[4];
        u32 crc;
        struct scatterlist sg;
        int plen;
+       int err;
 
        if (skb->len < hdr_len + 8 + 4)
                return -1;
@@ -425,6 +423,8 @@ static int rtllib_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
        pos += 8;
 
        if (!tcb_desc->bHwSec || (skb->cb[0] == 1)) {
+               SKCIPHER_REQUEST_ON_STACK(req, tkey->rx_tfm_arc4);
+
                if ((iv32 < tkey->rx_iv32 ||
                    (iv32 == tkey->rx_iv32 && iv16 <= tkey->rx_iv16)) &&
                    tkey->initialized) {
@@ -450,8 +450,13 @@ static int rtllib_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
 
                sg_init_one(&sg, pos, plen+4);
 
-               crypto_blkcipher_setkey(tkey->rx_tfm_arc4, rc4key, 16);
-               if (crypto_blkcipher_decrypt(&desc, &sg, &sg, plen + 4)) {
+               crypto_skcipher_setkey(tkey->rx_tfm_arc4, rc4key, 16);
+               skcipher_request_set_tfm(req, tkey->rx_tfm_arc4);
+               skcipher_request_set_callback(req, 0, NULL, NULL);
+               skcipher_request_set_crypt(req, &sg, &sg, plen + 4, NULL);
+               err = crypto_skcipher_decrypt(req);
+               skcipher_request_zero(req);
+               if (err) {
                        if (net_ratelimit()) {
                                netdev_dbg(skb->dev,
                                           "Failed to decrypt received packet from %pM\n",
@@ -500,11 +505,12 @@ static int rtllib_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
 }
 
 
-static int michael_mic(struct crypto_hash *tfm_michael, u8 *key, u8 *hdr,
+static int michael_mic(struct crypto_ahash *tfm_michael, u8 *key, u8 *hdr,
                       u8 *data, size_t data_len, u8 *mic)
 {
-       struct hash_desc desc;
+       AHASH_REQUEST_ON_STACK(req, tfm_michael);
        struct scatterlist sg[2];
+       int err;
 
        if (tfm_michael == NULL) {
                pr_warn("michael_mic: tfm_michael == NULL\n");
@@ -514,12 +520,15 @@ static int michael_mic(struct crypto_hash *tfm_michael, u8 *key, u8 *hdr,
        sg_set_buf(&sg[0], hdr, 16);
        sg_set_buf(&sg[1], data, data_len);
 
-       if (crypto_hash_setkey(tfm_michael, key, 8))
+       if (crypto_ahash_setkey(tfm_michael, key, 8))
                return -1;
 
-       desc.tfm = tfm_michael;
-       desc.flags = 0;
-       return crypto_hash_digest(&desc, sg, data_len + 16, mic);
+       ahash_request_set_tfm(req, tfm_michael);
+       ahash_request_set_callback(req, 0, NULL, NULL);
+       ahash_request_set_crypt(req, sg, mic, data_len + 16);
+       err = crypto_ahash_digest(req);
+       ahash_request_zero(req);
+       return err;
 }
 
 static void michael_mic_hdr(struct sk_buff *skb, u8 *hdr)
@@ -655,10 +664,10 @@ static int rtllib_tkip_set_key(void *key, int len, u8 *seq, void *priv)
 {
        struct rtllib_tkip_data *tkey = priv;
        int keyidx;
-       struct crypto_hash *tfm = tkey->tx_tfm_michael;
-       struct crypto_blkcipher *tfm2 = tkey->tx_tfm_arc4;
-       struct crypto_hash *tfm3 = tkey->rx_tfm_michael;
-       struct crypto_blkcipher *tfm4 = tkey->rx_tfm_arc4;
+       struct crypto_ahash *tfm = tkey->tx_tfm_michael;
+       struct crypto_skcipher *tfm2 = tkey->tx_tfm_arc4;
+       struct crypto_ahash *tfm3 = tkey->rx_tfm_michael;
+       struct crypto_skcipher *tfm4 = tkey->rx_tfm_arc4;
 
        keyidx = tkey->key_idx;
        memset(tkey, 0, sizeof(*tkey));
index 21d7eee4c9a92303b480a50c8b715b146bc618b4..b3343a5d0fd6c80b11c03cd5feecccad54f9012c 100644 (file)
@@ -9,6 +9,7 @@
  * more details.
  */
 
+#include <crypto/skcipher.h>
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/slab.h>
@@ -17,8 +18,6 @@
 #include <linux/string.h>
 #include "rtllib.h"
 
-#include <linux/crypto.h>
-
 #include <linux/scatterlist.h>
 #include <linux/crc32.h>
 
@@ -28,8 +27,8 @@ struct prism2_wep_data {
        u8 key[WEP_KEY_LEN + 1];
        u8 key_len;
        u8 key_idx;
-       struct crypto_blkcipher *tx_tfm;
-       struct crypto_blkcipher *rx_tfm;
+       struct crypto_skcipher *tx_tfm;
+       struct crypto_skcipher *rx_tfm;
 };
 
 
@@ -42,13 +41,13 @@ static void *prism2_wep_init(int keyidx)
                goto fail;
        priv->key_idx = keyidx;
 
-       priv->tx_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
+       priv->tx_tfm = crypto_alloc_skcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
        if (IS_ERR(priv->tx_tfm)) {
                pr_debug("rtllib_crypt_wep: could not allocate crypto API arc4\n");
                priv->tx_tfm = NULL;
                goto fail;
        }
-       priv->rx_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
+       priv->rx_tfm = crypto_alloc_skcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
        if (IS_ERR(priv->rx_tfm)) {
                pr_debug("rtllib_crypt_wep: could not allocate crypto API arc4\n");
                priv->rx_tfm = NULL;
@@ -62,10 +61,8 @@ static void *prism2_wep_init(int keyidx)
 
 fail:
        if (priv) {
-               if (priv->tx_tfm)
-                       crypto_free_blkcipher(priv->tx_tfm);
-               if (priv->rx_tfm)
-                       crypto_free_blkcipher(priv->rx_tfm);
+               crypto_free_skcipher(priv->tx_tfm);
+               crypto_free_skcipher(priv->rx_tfm);
                kfree(priv);
        }
        return NULL;
@@ -77,10 +74,8 @@ static void prism2_wep_deinit(void *priv)
        struct prism2_wep_data *_priv = priv;
 
        if (_priv) {
-               if (_priv->tx_tfm)
-                       crypto_free_blkcipher(_priv->tx_tfm);
-               if (_priv->rx_tfm)
-                       crypto_free_blkcipher(_priv->rx_tfm);
+               crypto_free_skcipher(_priv->tx_tfm);
+               crypto_free_skcipher(_priv->rx_tfm);
        }
        kfree(priv);
 }
@@ -99,10 +94,10 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
        u8 *pos;
        struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb +
                                    MAX_DEV_ADDR_SIZE);
-       struct blkcipher_desc desc = {.tfm = wep->tx_tfm};
        u32 crc;
        u8 *icv;
        struct scatterlist sg;
+       int err;
 
        if (skb_headroom(skb) < 4 || skb_tailroom(skb) < 4 ||
            skb->len < hdr_len){
@@ -140,6 +135,7 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
        memcpy(key + 3, wep->key, wep->key_len);
 
        if (!tcb_desc->bHwSec) {
+               SKCIPHER_REQUEST_ON_STACK(req, wep->tx_tfm);
 
                /* Append little-endian CRC32 and encrypt it to produce ICV */
                crc = ~crc32_le(~0, pos, len);
@@ -150,8 +146,13 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
                icv[3] = crc >> 24;
 
                sg_init_one(&sg, pos, len+4);
-               crypto_blkcipher_setkey(wep->tx_tfm, key, klen);
-               return crypto_blkcipher_encrypt(&desc, &sg, &sg, len + 4);
+               crypto_skcipher_setkey(wep->tx_tfm, key, klen);
+               skcipher_request_set_tfm(req, wep->tx_tfm);
+               skcipher_request_set_callback(req, 0, NULL, NULL);
+               skcipher_request_set_crypt(req, &sg, &sg, len + 4, NULL);
+               err = crypto_skcipher_encrypt(req);
+               skcipher_request_zero(req);
+               return err;
        }
 
        return 0;
@@ -173,10 +174,10 @@ static int prism2_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
        u8 keyidx, *pos;
        struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb +
                                    MAX_DEV_ADDR_SIZE);
-       struct blkcipher_desc desc = {.tfm = wep->rx_tfm};
        u32 crc;
        u8 icv[4];
        struct scatterlist sg;
+       int err;
 
        if (skb->len < hdr_len + 8)
                return -1;
@@ -198,9 +199,16 @@ static int prism2_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
        plen = skb->len - hdr_len - 8;
 
        if (!tcb_desc->bHwSec) {
+               SKCIPHER_REQUEST_ON_STACK(req, wep->rx_tfm);
+
                sg_init_one(&sg, pos, plen+4);
-               crypto_blkcipher_setkey(wep->rx_tfm, key, klen);
-               if (crypto_blkcipher_decrypt(&desc, &sg, &sg, plen + 4))
+               crypto_skcipher_setkey(wep->rx_tfm, key, klen);
+               skcipher_request_set_tfm(req, wep->rx_tfm);
+               skcipher_request_set_callback(req, 0, NULL, NULL);
+               skcipher_request_set_crypt(req, &sg, &sg, plen + 4, NULL);
+               err = crypto_skcipher_decrypt(req);
+               skcipher_request_zero(req);
+               if (err)
                        return -7;
                crc = ~crc32_le(~0, pos, plen);
                icv[0] = crc;
index 908bc2eb4d29e9842b482eb5ad9d67ae6f119acb..6fa96d57d31629aebfd49ec81cd6f13e0f46cf81 100644 (file)
@@ -21,7 +21,8 @@
 
 #include "ieee80211.h"
 
-#include <linux/crypto.h>
+#include <crypto/hash.h>
+#include <crypto/skcipher.h>
        #include <linux/scatterlist.h>
 #include <linux/crc32.h>
 
@@ -52,10 +53,10 @@ struct ieee80211_tkip_data {
 
        int key_idx;
 
-       struct crypto_blkcipher *rx_tfm_arc4;
-       struct crypto_hash *rx_tfm_michael;
-       struct crypto_blkcipher *tx_tfm_arc4;
-       struct crypto_hash *tx_tfm_michael;
+       struct crypto_skcipher *rx_tfm_arc4;
+       struct crypto_ahash *rx_tfm_michael;
+       struct crypto_skcipher *tx_tfm_arc4;
+       struct crypto_ahash *tx_tfm_michael;
 
        /* scratch buffers for virt_to_page() (crypto API) */
        u8 rx_hdr[16], tx_hdr[16];
@@ -70,7 +71,7 @@ static void *ieee80211_tkip_init(int key_idx)
                goto fail;
        priv->key_idx = key_idx;
 
-       priv->tx_tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0,
+       priv->tx_tfm_arc4 = crypto_alloc_skcipher("ecb(arc4)", 0,
                        CRYPTO_ALG_ASYNC);
        if (IS_ERR(priv->tx_tfm_arc4)) {
                printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate "
@@ -79,7 +80,7 @@ static void *ieee80211_tkip_init(int key_idx)
                goto fail;
        }
 
-       priv->tx_tfm_michael = crypto_alloc_hash("michael_mic", 0,
+       priv->tx_tfm_michael = crypto_alloc_ahash("michael_mic", 0,
                        CRYPTO_ALG_ASYNC);
        if (IS_ERR(priv->tx_tfm_michael)) {
                printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate "
@@ -88,7 +89,7 @@ static void *ieee80211_tkip_init(int key_idx)
                goto fail;
        }
 
-       priv->rx_tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0,
+       priv->rx_tfm_arc4 = crypto_alloc_skcipher("ecb(arc4)", 0,
                        CRYPTO_ALG_ASYNC);
        if (IS_ERR(priv->rx_tfm_arc4)) {
                printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate "
@@ -97,7 +98,7 @@ static void *ieee80211_tkip_init(int key_idx)
                goto fail;
        }
 
-       priv->rx_tfm_michael = crypto_alloc_hash("michael_mic", 0,
+       priv->rx_tfm_michael = crypto_alloc_ahash("michael_mic", 0,
                        CRYPTO_ALG_ASYNC);
        if (IS_ERR(priv->rx_tfm_michael)) {
                printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate "
@@ -110,14 +111,10 @@ static void *ieee80211_tkip_init(int key_idx)
 
 fail:
        if (priv) {
-               if (priv->tx_tfm_michael)
-                       crypto_free_hash(priv->tx_tfm_michael);
-               if (priv->tx_tfm_arc4)
-                       crypto_free_blkcipher(priv->tx_tfm_arc4);
-               if (priv->rx_tfm_michael)
-                       crypto_free_hash(priv->rx_tfm_michael);
-               if (priv->rx_tfm_arc4)
-                       crypto_free_blkcipher(priv->rx_tfm_arc4);
+               crypto_free_ahash(priv->tx_tfm_michael);
+               crypto_free_skcipher(priv->tx_tfm_arc4);
+               crypto_free_ahash(priv->rx_tfm_michael);
+               crypto_free_skcipher(priv->rx_tfm_arc4);
                kfree(priv);
        }
 
@@ -130,14 +127,10 @@ static void ieee80211_tkip_deinit(void *priv)
        struct ieee80211_tkip_data *_priv = priv;
 
        if (_priv) {
-               if (_priv->tx_tfm_michael)
-                       crypto_free_hash(_priv->tx_tfm_michael);
-               if (_priv->tx_tfm_arc4)
-                       crypto_free_blkcipher(_priv->tx_tfm_arc4);
-               if (_priv->rx_tfm_michael)
-                       crypto_free_hash(_priv->rx_tfm_michael);
-               if (_priv->rx_tfm_arc4)
-                       crypto_free_blkcipher(_priv->rx_tfm_arc4);
+               crypto_free_ahash(_priv->tx_tfm_michael);
+               crypto_free_skcipher(_priv->tx_tfm_arc4);
+               crypto_free_ahash(_priv->rx_tfm_michael);
+               crypto_free_skcipher(_priv->rx_tfm_arc4);
        }
        kfree(priv);
 }
@@ -312,7 +305,6 @@ static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
        u8 *pos;
        struct rtl_80211_hdr_4addr *hdr;
        cb_desc *tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
-       struct blkcipher_desc desc = {.tfm = tkey->tx_tfm_arc4};
        int ret = 0;
        u8 rc4key[16],  *icv;
        u32 crc;
@@ -357,15 +349,21 @@ static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
        *pos++ = (tkey->tx_iv32 >> 24) & 0xff;
 
        if (!tcb_desc->bHwSec) {
+               SKCIPHER_REQUEST_ON_STACK(req, tkey->tx_tfm_arc4);
+
                icv = skb_put(skb, 4);
                crc = ~crc32_le(~0, pos, len);
                icv[0] = crc;
                icv[1] = crc >> 8;
                icv[2] = crc >> 16;
                icv[3] = crc >> 24;
-               crypto_blkcipher_setkey(tkey->tx_tfm_arc4, rc4key, 16);
+               crypto_skcipher_setkey(tkey->tx_tfm_arc4, rc4key, 16);
                sg_init_one(&sg, pos, len+4);
-               ret = crypto_blkcipher_encrypt(&desc, &sg, &sg, len + 4);
+               skcipher_request_set_tfm(req, tkey->tx_tfm_arc4);
+               skcipher_request_set_callback(req, 0, NULL, NULL);
+               skcipher_request_set_crypt(req, &sg, &sg, len + 4, NULL);
+               ret = crypto_skcipher_encrypt(req);
+               skcipher_request_zero(req);
        }
 
        tkey->tx_iv16++;
@@ -390,12 +388,12 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
        u16 iv16;
        struct rtl_80211_hdr_4addr *hdr;
        cb_desc *tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
-       struct blkcipher_desc desc = {.tfm = tkey->rx_tfm_arc4};
        u8 rc4key[16];
        u8 icv[4];
        u32 crc;
        struct scatterlist sg;
        int plen;
+       int err;
 
        if (skb->len < hdr_len + 8 + 4)
                return -1;
@@ -429,6 +427,8 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
        pos += 8;
 
        if (!tcb_desc->bHwSec) {
+               SKCIPHER_REQUEST_ON_STACK(req, tkey->rx_tfm_arc4);
+
                if (iv32 < tkey->rx_iv32 ||
                (iv32 == tkey->rx_iv32 && iv16 <= tkey->rx_iv16)) {
                        if (net_ratelimit()) {
@@ -449,10 +449,16 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
 
                plen = skb->len - hdr_len - 12;
 
-               crypto_blkcipher_setkey(tkey->rx_tfm_arc4, rc4key, 16);
+               crypto_skcipher_setkey(tkey->rx_tfm_arc4, rc4key, 16);
                sg_init_one(&sg, pos, plen+4);
 
-               if (crypto_blkcipher_decrypt(&desc, &sg, &sg, plen + 4)) {
+               skcipher_request_set_tfm(req, tkey->rx_tfm_arc4);
+               skcipher_request_set_callback(req, 0, NULL, NULL);
+               skcipher_request_set_crypt(req, &sg, &sg, plen + 4, NULL);
+
+               err = crypto_skcipher_decrypt(req);
+               skcipher_request_zero(req);
+               if (err) {
                        if (net_ratelimit()) {
                                printk(KERN_DEBUG ": TKIP: failed to decrypt "
                                                "received packet from %pM\n",
@@ -501,11 +507,12 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
        return keyidx;
 }
 
-static int michael_mic(struct crypto_hash *tfm_michael, u8 *key, u8 *hdr,
+static int michael_mic(struct crypto_ahash *tfm_michael, u8 *key, u8 *hdr,
                       u8 *data, size_t data_len, u8 *mic)
 {
-       struct hash_desc desc;
+       AHASH_REQUEST_ON_STACK(req, tfm_michael);
        struct scatterlist sg[2];
+       int err;
 
        if (tfm_michael == NULL) {
                printk(KERN_WARNING "michael_mic: tfm_michael == NULL\n");
@@ -516,12 +523,15 @@ static int michael_mic(struct crypto_hash *tfm_michael, u8 *key, u8 *hdr,
        sg_set_buf(&sg[0], hdr, 16);
        sg_set_buf(&sg[1], data, data_len);
 
-       if (crypto_hash_setkey(tfm_michael, key, 8))
+       if (crypto_ahash_setkey(tfm_michael, key, 8))
                return -1;
 
-       desc.tfm = tfm_michael;
-       desc.flags = 0;
-       return crypto_hash_digest(&desc, sg, data_len + 16, mic);
+       ahash_request_set_tfm(req, tfm_michael);
+       ahash_request_set_callback(req, 0, NULL, NULL);
+       ahash_request_set_crypt(req, sg, mic, data_len + 16);
+       err = crypto_ahash_digest(req);
+       ahash_request_zero(req);
+       return err;
 }
 
 static void michael_mic_hdr(struct sk_buff *skb, u8 *hdr)
@@ -660,10 +670,10 @@ static int ieee80211_tkip_set_key(void *key, int len, u8 *seq, void *priv)
 {
        struct ieee80211_tkip_data *tkey = priv;
        int keyidx;
-       struct crypto_hash *tfm = tkey->tx_tfm_michael;
-       struct crypto_blkcipher *tfm2 = tkey->tx_tfm_arc4;
-       struct crypto_hash *tfm3 = tkey->rx_tfm_michael;
-       struct crypto_blkcipher *tfm4 = tkey->rx_tfm_arc4;
+       struct crypto_ahash *tfm = tkey->tx_tfm_michael;
+       struct crypto_skcipher *tfm2 = tkey->tx_tfm_arc4;
+       struct crypto_ahash *tfm3 = tkey->rx_tfm_michael;
+       struct crypto_skcipher *tfm4 = tkey->rx_tfm_arc4;
 
        keyidx = tkey->key_idx;
        memset(tkey, 0, sizeof(*tkey));
index 681611dc93d39c9c070af9c63e401365fda48653..ababb6de125bf12c6ae16b63a0ce82be943e6e92 100644 (file)
@@ -18,7 +18,7 @@
 
 #include "ieee80211.h"
 
-#include <linux/crypto.h>
+#include <crypto/skcipher.h>
 #include <linux/scatterlist.h>
 #include <linux/crc32.h>
 
@@ -32,8 +32,8 @@ struct prism2_wep_data {
        u8 key[WEP_KEY_LEN + 1];
        u8 key_len;
        u8 key_idx;
-       struct crypto_blkcipher *tx_tfm;
-       struct crypto_blkcipher *rx_tfm;
+       struct crypto_skcipher *tx_tfm;
+       struct crypto_skcipher *rx_tfm;
 };
 
 
@@ -46,10 +46,10 @@ static void *prism2_wep_init(int keyidx)
                return NULL;
        priv->key_idx = keyidx;
 
-       priv->tx_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
+       priv->tx_tfm = crypto_alloc_skcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
        if (IS_ERR(priv->tx_tfm))
                goto free_priv;
-       priv->rx_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
+       priv->rx_tfm = crypto_alloc_skcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
        if (IS_ERR(priv->rx_tfm))
                goto free_tx;
 
@@ -58,7 +58,7 @@ static void *prism2_wep_init(int keyidx)
 
        return priv;
 free_tx:
-       crypto_free_blkcipher(priv->tx_tfm);
+       crypto_free_skcipher(priv->tx_tfm);
 free_priv:
        kfree(priv);
        return NULL;
@@ -70,10 +70,8 @@ static void prism2_wep_deinit(void *priv)
        struct prism2_wep_data *_priv = priv;
 
        if (_priv) {
-               if (_priv->tx_tfm)
-                       crypto_free_blkcipher(_priv->tx_tfm);
-               if (_priv->rx_tfm)
-                       crypto_free_blkcipher(_priv->rx_tfm);
+               crypto_free_skcipher(_priv->tx_tfm);
+               crypto_free_skcipher(_priv->rx_tfm);
        }
        kfree(priv);
 }
@@ -91,10 +89,10 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
        u8 key[WEP_KEY_LEN + 3];
        u8 *pos;
        cb_desc *tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
-       struct blkcipher_desc desc = {.tfm = wep->tx_tfm};
        u32 crc;
        u8 *icv;
        struct scatterlist sg;
+       int err;
 
        if (skb_headroom(skb) < 4 || skb_tailroom(skb) < 4 ||
            skb->len < hdr_len)
@@ -129,6 +127,8 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
        memcpy(key + 3, wep->key, wep->key_len);
 
        if (!tcb_desc->bHwSec) {
+               SKCIPHER_REQUEST_ON_STACK(req, wep->tx_tfm);
+
                /* Append little-endian CRC32 and encrypt it to produce ICV */
                crc = ~crc32_le(~0, pos, len);
                icv = skb_put(skb, 4);
@@ -137,10 +137,16 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
                icv[2] = crc >> 16;
                icv[3] = crc >> 24;
 
-               crypto_blkcipher_setkey(wep->tx_tfm, key, klen);
+               crypto_skcipher_setkey(wep->tx_tfm, key, klen);
                sg_init_one(&sg, pos, len+4);
 
-               return crypto_blkcipher_encrypt(&desc, &sg, &sg, len + 4);
+               skcipher_request_set_tfm(req, wep->tx_tfm);
+               skcipher_request_set_callback(req, 0, NULL, NULL);
+               skcipher_request_set_crypt(req, &sg, &sg, len + 4, NULL);
+
+               err = crypto_skcipher_encrypt(req);
+               skcipher_request_zero(req);
+               return err;
        }
 
        return 0;
@@ -161,10 +167,10 @@ static int prism2_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
        u8 key[WEP_KEY_LEN + 3];
        u8 keyidx, *pos;
        cb_desc *tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
-       struct blkcipher_desc desc = {.tfm = wep->rx_tfm};
        u32 crc;
        u8 icv[4];
        struct scatterlist sg;
+       int err;
 
        if (skb->len < hdr_len + 8)
                return -1;
@@ -186,10 +192,18 @@ static int prism2_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
        plen = skb->len - hdr_len - 8;
 
        if (!tcb_desc->bHwSec) {
-               crypto_blkcipher_setkey(wep->rx_tfm, key, klen);
+               SKCIPHER_REQUEST_ON_STACK(req, wep->rx_tfm);
+
+               crypto_skcipher_setkey(wep->rx_tfm, key, klen);
                sg_init_one(&sg, pos, plen+4);
 
-               if (crypto_blkcipher_decrypt(&desc, &sg, &sg, plen + 4))
+               skcipher_request_set_tfm(req, wep->rx_tfm);
+               skcipher_request_set_callback(req, 0, NULL, NULL);
+               skcipher_request_set_crypt(req, &sg, &sg, plen + 4, NULL);
+
+               err = crypto_skcipher_decrypt(req);
+               skcipher_request_zero(req);
+               if (err)
                        return -7;
 
                crc = ~crc32_le(~0, pos, plen);
index 576a7a43470ce0b0c0932e563bdb2fa639f7b110..961202f4e9aa4a2004f85d89f7a8a638f3898814 100644 (file)
@@ -16,9 +16,9 @@
  * GNU General Public License for more details.
  ******************************************************************************/
 
+#include <crypto/hash.h>
 #include <linux/string.h>
 #include <linux/kthread.h>
-#include <linux/crypto.h>
 #include <linux/completion.h>
 #include <linux/module.h>
 #include <linux/vmalloc.h>
@@ -1190,7 +1190,7 @@ iscsit_handle_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
 }
 
 static u32 iscsit_do_crypto_hash_sg(
-       struct hash_desc *hash,
+       struct ahash_request *hash,
        struct iscsi_cmd *cmd,
        u32 data_offset,
        u32 data_length,
@@ -1201,7 +1201,7 @@ static u32 iscsit_do_crypto_hash_sg(
        struct scatterlist *sg;
        unsigned int page_off;
 
-       crypto_hash_init(hash);
+       crypto_ahash_init(hash);
 
        sg = cmd->first_data_sg;
        page_off = cmd->first_data_sg_off;
@@ -1209,7 +1209,8 @@ static u32 iscsit_do_crypto_hash_sg(
        while (data_length) {
                u32 cur_len = min_t(u32, data_length, (sg->length - page_off));
 
-               crypto_hash_update(hash, sg, cur_len);
+               ahash_request_set_crypt(hash, sg, NULL, cur_len);
+               crypto_ahash_update(hash);
 
                data_length -= cur_len;
                page_off = 0;
@@ -1221,33 +1222,34 @@ static u32 iscsit_do_crypto_hash_sg(
                struct scatterlist pad_sg;
 
                sg_init_one(&pad_sg, pad_bytes, padding);
-               crypto_hash_update(hash, &pad_sg, padding);
+               ahash_request_set_crypt(hash, &pad_sg, (u8 *)&data_crc,
+                                       padding);
+               crypto_ahash_finup(hash);
+       } else {
+               ahash_request_set_crypt(hash, NULL, (u8 *)&data_crc, 0);
+               crypto_ahash_final(hash);
        }
-       crypto_hash_final(hash, (u8 *) &data_crc);
 
        return data_crc;
 }
 
 static void iscsit_do_crypto_hash_buf(
-       struct hash_desc *hash,
+       struct ahash_request *hash,
        const void *buf,
        u32 payload_length,
        u32 padding,
        u8 *pad_bytes,
        u8 *data_crc)
 {
-       struct scatterlist sg;
+       struct scatterlist sg[2];
 
-       crypto_hash_init(hash);
+       sg_init_table(sg, ARRAY_SIZE(sg));
+       sg_set_buf(sg, buf, payload_length);
+       sg_set_buf(sg + 1, pad_bytes, padding);
 
-       sg_init_one(&sg, buf, payload_length);
-       crypto_hash_update(hash, &sg, payload_length);
+       ahash_request_set_crypt(hash, sg, data_crc, payload_length + padding);
 
-       if (padding) {
-               sg_init_one(&sg, pad_bytes, padding);
-               crypto_hash_update(hash, &sg, padding);
-       }
-       crypto_hash_final(hash, data_crc);
+       crypto_ahash_digest(hash);
 }
 
 int
@@ -1422,7 +1424,7 @@ iscsit_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
        if (conn->conn_ops->DataDigest) {
                u32 data_crc;
 
-               data_crc = iscsit_do_crypto_hash_sg(&conn->conn_rx_hash, cmd,
+               data_crc = iscsit_do_crypto_hash_sg(conn->conn_rx_hash, cmd,
                                                    be32_to_cpu(hdr->offset),
                                                    payload_length, padding,
                                                    cmd->pad_bytes);
@@ -1682,7 +1684,7 @@ static int iscsit_handle_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
                }
 
                if (conn->conn_ops->DataDigest) {
-                       iscsit_do_crypto_hash_buf(&conn->conn_rx_hash,
+                       iscsit_do_crypto_hash_buf(conn->conn_rx_hash,
                                        ping_data, payload_length,
                                        padding, cmd->pad_bytes,
                                        (u8 *)&data_crc);
@@ -2101,7 +2103,7 @@ iscsit_handle_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
                        goto reject;
 
                if (conn->conn_ops->DataDigest) {
-                       iscsit_do_crypto_hash_buf(&conn->conn_rx_hash,
+                       iscsit_do_crypto_hash_buf(conn->conn_rx_hash,
                                        text_in, payload_length,
                                        padding, (u8 *)&pad_bytes,
                                        (u8 *)&data_crc);
@@ -2440,7 +2442,7 @@ static int iscsit_handle_immediate_data(
        if (conn->conn_ops->DataDigest) {
                u32 data_crc;
 
-               data_crc = iscsit_do_crypto_hash_sg(&conn->conn_rx_hash, cmd,
+               data_crc = iscsit_do_crypto_hash_sg(conn->conn_rx_hash, cmd,
                                                    cmd->write_data_done, length, padding,
                                                    cmd->pad_bytes);
 
@@ -2553,7 +2555,7 @@ static int iscsit_send_conn_drop_async_message(
        if (conn->conn_ops->HeaderDigest) {
                u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
 
-               iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
+               iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr,
                                ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
 
                cmd->tx_size += ISCSI_CRC_LEN;
@@ -2683,7 +2685,7 @@ static int iscsit_send_datain(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
        if (conn->conn_ops->HeaderDigest) {
                u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
 
-               iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, cmd->pdu,
+               iscsit_do_crypto_hash_buf(conn->conn_tx_hash, cmd->pdu,
                                ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
 
                iov[0].iov_len += ISCSI_CRC_LEN;
@@ -2711,7 +2713,7 @@ static int iscsit_send_datain(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
                                cmd->padding);
        }
        if (conn->conn_ops->DataDigest) {
-               cmd->data_crc = iscsit_do_crypto_hash_sg(&conn->conn_tx_hash, cmd,
+               cmd->data_crc = iscsit_do_crypto_hash_sg(conn->conn_tx_hash, cmd,
                         datain.offset, datain.length, cmd->padding, cmd->pad_bytes);
 
                iov[iov_count].iov_base = &cmd->data_crc;
@@ -2857,7 +2859,7 @@ iscsit_send_logout(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
        if (conn->conn_ops->HeaderDigest) {
                u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
 
-               iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, &cmd->pdu[0],
+               iscsit_do_crypto_hash_buf(conn->conn_tx_hash, &cmd->pdu[0],
                                ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
 
                iov[0].iov_len += ISCSI_CRC_LEN;
@@ -2915,7 +2917,7 @@ static int iscsit_send_unsolicited_nopin(
        if (conn->conn_ops->HeaderDigest) {
                u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
 
-               iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
+               iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr,
                                ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
 
                tx_size += ISCSI_CRC_LEN;
@@ -2963,7 +2965,7 @@ iscsit_send_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
        if (conn->conn_ops->HeaderDigest) {
                u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
 
-               iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
+               iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr,
                                ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
 
                iov[0].iov_len += ISCSI_CRC_LEN;
@@ -2993,7 +2995,7 @@ iscsit_send_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
                                " padding bytes.\n", padding);
                }
                if (conn->conn_ops->DataDigest) {
-                       iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
+                       iscsit_do_crypto_hash_buf(conn->conn_tx_hash,
                                cmd->buf_ptr, cmd->buf_ptr_size,
                                padding, (u8 *)&cmd->pad_bytes,
                                (u8 *)&cmd->data_crc);
@@ -3049,7 +3051,7 @@ static int iscsit_send_r2t(
        if (conn->conn_ops->HeaderDigest) {
                u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
 
-               iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
+               iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr,
                                ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
 
                cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN;
@@ -3239,7 +3241,7 @@ static int iscsit_send_response(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
                }
 
                if (conn->conn_ops->DataDigest) {
-                       iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
+                       iscsit_do_crypto_hash_buf(conn->conn_tx_hash,
                                cmd->sense_buffer,
                                (cmd->se_cmd.scsi_sense_length + padding),
                                0, NULL, (u8 *)&cmd->data_crc);
@@ -3262,7 +3264,7 @@ static int iscsit_send_response(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
        if (conn->conn_ops->HeaderDigest) {
                u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
 
-               iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, cmd->pdu,
+               iscsit_do_crypto_hash_buf(conn->conn_tx_hash, cmd->pdu,
                                ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
 
                iov[0].iov_len += ISCSI_CRC_LEN;
@@ -3332,7 +3334,7 @@ iscsit_send_task_mgt_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
        if (conn->conn_ops->HeaderDigest) {
                u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
 
-               iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
+               iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr,
                                ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
 
                cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN;
@@ -3601,7 +3603,7 @@ static int iscsit_send_text_rsp(
        if (conn->conn_ops->HeaderDigest) {
                u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
 
-               iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
+               iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr,
                                ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
 
                iov[0].iov_len += ISCSI_CRC_LEN;
@@ -3611,7 +3613,7 @@ static int iscsit_send_text_rsp(
        }
 
        if (conn->conn_ops->DataDigest) {
-               iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
+               iscsit_do_crypto_hash_buf(conn->conn_tx_hash,
                                cmd->buf_ptr, text_length,
                                0, NULL, (u8 *)&cmd->data_crc);
 
@@ -3668,7 +3670,7 @@ static int iscsit_send_reject(
        if (conn->conn_ops->HeaderDigest) {
                u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
 
-               iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
+               iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr,
                                ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
 
                iov[0].iov_len += ISCSI_CRC_LEN;
@@ -3678,7 +3680,7 @@ static int iscsit_send_reject(
        }
 
        if (conn->conn_ops->DataDigest) {
-               iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, cmd->buf_ptr,
+               iscsit_do_crypto_hash_buf(conn->conn_tx_hash, cmd->buf_ptr,
                                ISCSI_HDR_LEN, 0, NULL, (u8 *)&cmd->data_crc);
 
                iov[iov_count].iov_base = &cmd->data_crc;
@@ -4145,7 +4147,7 @@ int iscsi_target_rx_thread(void *arg)
                                goto transport_err;
                        }
 
-                       iscsit_do_crypto_hash_buf(&conn->conn_rx_hash,
+                       iscsit_do_crypto_hash_buf(conn->conn_rx_hash,
                                        buffer, ISCSI_HDR_LEN,
                                        0, NULL, (u8 *)&checksum);
 
@@ -4359,10 +4361,14 @@ int iscsit_close_connection(
         */
        iscsit_check_conn_usage_count(conn);
 
-       if (conn->conn_rx_hash.tfm)
-               crypto_free_hash(conn->conn_rx_hash.tfm);
-       if (conn->conn_tx_hash.tfm)
-               crypto_free_hash(conn->conn_tx_hash.tfm);
+       ahash_request_free(conn->conn_tx_hash);
+       if (conn->conn_rx_hash) {
+               struct crypto_ahash *tfm;
+
+               tfm = crypto_ahash_reqtfm(conn->conn_rx_hash);
+               ahash_request_free(conn->conn_rx_hash);
+               crypto_free_ahash(tfm);
+       }
 
        free_cpumask_var(conn->conn_cpumask);
 
index 47e249dccb5fe7d9652bea77bddc00b35dd98429..667406fcf4d3daf03acf84b24be9d6538c95f20e 100644 (file)
@@ -16,9 +16,9 @@
  * GNU General Public License for more details.
  ******************************************************************************/
 
+#include <crypto/hash.h>
 #include <linux/kernel.h>
 #include <linux/string.h>
-#include <linux/crypto.h>
 #include <linux/err.h>
 #include <linux/scatterlist.h>
 
@@ -185,9 +185,8 @@ static int chap_server_compute_md5(
        unsigned char chap_n[MAX_CHAP_N_SIZE], chap_r[MAX_RESPONSE_LENGTH];
        size_t compare_len;
        struct iscsi_chap *chap = conn->auth_protocol;
-       struct crypto_hash *tfm;
-       struct hash_desc desc;
-       struct scatterlist sg;
+       struct crypto_shash *tfm = NULL;
+       struct shash_desc *desc = NULL;
        int auth_ret = -1, ret, challenge_len;
 
        memset(identifier, 0, 10);
@@ -245,52 +244,47 @@ static int chap_server_compute_md5(
        pr_debug("[server] Got CHAP_R=%s\n", chap_r);
        chap_string_to_hex(client_digest, chap_r, strlen(chap_r));
 
-       tfm = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
+       tfm = crypto_alloc_shash("md5", 0, 0);
        if (IS_ERR(tfm)) {
-               pr_err("Unable to allocate struct crypto_hash\n");
+               tfm = NULL;
+               pr_err("Unable to allocate struct crypto_shash\n");
                goto out;
        }
-       desc.tfm = tfm;
-       desc.flags = 0;
 
-       ret = crypto_hash_init(&desc);
-       if (ret < 0) {
-               pr_err("crypto_hash_init() failed\n");
-               crypto_free_hash(tfm);
+       desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(tfm), GFP_KERNEL);
+       if (!desc) {
+               pr_err("Unable to allocate struct shash_desc\n");
                goto out;
        }
 
-       sg_init_one(&sg, &chap->id, 1);
-       ret = crypto_hash_update(&desc, &sg, 1);
+       desc->tfm = tfm;
+       desc->flags = 0;
+
+       ret = crypto_shash_init(desc);
        if (ret < 0) {
-               pr_err("crypto_hash_update() failed for id\n");
-               crypto_free_hash(tfm);
+               pr_err("crypto_shash_init() failed\n");
                goto out;
        }
 
-       sg_init_one(&sg, &auth->password, strlen(auth->password));
-       ret = crypto_hash_update(&desc, &sg, strlen(auth->password));
+       ret = crypto_shash_update(desc, &chap->id, 1);
        if (ret < 0) {
-               pr_err("crypto_hash_update() failed for password\n");
-               crypto_free_hash(tfm);
+               pr_err("crypto_shash_update() failed for id\n");
                goto out;
        }
 
-       sg_init_one(&sg, chap->challenge, CHAP_CHALLENGE_LENGTH);
-       ret = crypto_hash_update(&desc, &sg, CHAP_CHALLENGE_LENGTH);
+       ret = crypto_shash_update(desc, (char *)&auth->password,
+                                 strlen(auth->password));
        if (ret < 0) {
-               pr_err("crypto_hash_update() failed for challenge\n");
-               crypto_free_hash(tfm);
+               pr_err("crypto_shash_update() failed for password\n");
                goto out;
        }
 
-       ret = crypto_hash_final(&desc, server_digest);
+       ret = crypto_shash_finup(desc, chap->challenge,
+                                CHAP_CHALLENGE_LENGTH, server_digest);
        if (ret < 0) {
-               pr_err("crypto_hash_final() failed for server digest\n");
-               crypto_free_hash(tfm);
+               pr_err("crypto_shash_finup() failed for challenge\n");
                goto out;
        }
-       crypto_free_hash(tfm);
 
        chap_binaryhex_to_asciihex(response, server_digest, MD5_SIGNATURE_SIZE);
        pr_debug("[server] MD5 Server Digest: %s\n", response);
@@ -306,9 +300,8 @@ static int chap_server_compute_md5(
         * authentication is not enabled.
         */
        if (!auth->authenticate_target) {
-               kfree(challenge);
-               kfree(challenge_binhex);
-               return 0;
+               auth_ret = 0;
+               goto out;
        }
        /*
         * Get CHAP_I.
@@ -372,58 +365,37 @@ static int chap_server_compute_md5(
        /*
         * Generate CHAP_N and CHAP_R for mutual authentication.
         */
-       tfm = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
-       if (IS_ERR(tfm)) {
-               pr_err("Unable to allocate struct crypto_hash\n");
-               goto out;
-       }
-       desc.tfm = tfm;
-       desc.flags = 0;
-
-       ret = crypto_hash_init(&desc);
+       ret = crypto_shash_init(desc);
        if (ret < 0) {
-               pr_err("crypto_hash_init() failed\n");
-               crypto_free_hash(tfm);
+               pr_err("crypto_shash_init() failed\n");
                goto out;
        }
 
        /* To handle both endiannesses */
        id_as_uchar = id;
-       sg_init_one(&sg, &id_as_uchar, 1);
-       ret = crypto_hash_update(&desc, &sg, 1);
+       ret = crypto_shash_update(desc, &id_as_uchar, 1);
        if (ret < 0) {
-               pr_err("crypto_hash_update() failed for id\n");
-               crypto_free_hash(tfm);
+               pr_err("crypto_shash_update() failed for id\n");
                goto out;
        }
 
-       sg_init_one(&sg, auth->password_mutual,
-                               strlen(auth->password_mutual));
-       ret = crypto_hash_update(&desc, &sg, strlen(auth->password_mutual));
+       ret = crypto_shash_update(desc, auth->password_mutual,
+                                 strlen(auth->password_mutual));
        if (ret < 0) {
-               pr_err("crypto_hash_update() failed for"
+               pr_err("crypto_shash_update() failed for"
                                " password_mutual\n");
-               crypto_free_hash(tfm);
                goto out;
        }
        /*
         * Convert received challenge to binary hex.
         */
-       sg_init_one(&sg, challenge_binhex, challenge_len);
-       ret = crypto_hash_update(&desc, &sg, challenge_len);
+       ret = crypto_shash_finup(desc, challenge_binhex, challenge_len,
+                                digest);
        if (ret < 0) {
-               pr_err("crypto_hash_update() failed for ma challenge\n");
-               crypto_free_hash(tfm);
+               pr_err("crypto_shash_finup() failed for ma challenge\n");
                goto out;
        }
 
-       ret = crypto_hash_final(&desc, digest);
-       if (ret < 0) {
-               pr_err("crypto_hash_final() failed for ma digest\n");
-               crypto_free_hash(tfm);
-               goto out;
-       }
-       crypto_free_hash(tfm);
        /*
         * Generate CHAP_N and CHAP_R.
         */
@@ -440,6 +412,8 @@ static int chap_server_compute_md5(
        pr_debug("[server] Sending CHAP_R=0x%s\n", response);
        auth_ret = 0;
 out:
+       kzfree(desc);
+       crypto_free_shash(tfm);
        kfree(challenge);
        kfree(challenge_binhex);
        return auth_ret;
index 96e78c823d13fa2f78feb6ff024fb468518be75b..8436d56c5f0c377e07caa7e0b1766051c11cf929 100644 (file)
@@ -16,9 +16,9 @@
  * GNU General Public License for more details.
  ******************************************************************************/
 
+#include <crypto/hash.h>
 #include <linux/string.h>
 #include <linux/kthread.h>
-#include <linux/crypto.h>
 #include <linux/idr.h>
 #include <scsi/iscsi_proto.h>
 #include <target/target_core_base.h>
@@ -115,27 +115,36 @@ out_login:
  */
 int iscsi_login_setup_crypto(struct iscsi_conn *conn)
 {
+       struct crypto_ahash *tfm;
+
        /*
         * Setup slicing by CRC32C algorithm for RX and TX libcrypto contexts
         * which will default to crc32c_intel.ko for cpu_has_xmm4_2, or fallback
         * to software 1x8 byte slicing from crc32c.ko
         */
-       conn->conn_rx_hash.flags = 0;
-       conn->conn_rx_hash.tfm = crypto_alloc_hash("crc32c", 0,
-                                               CRYPTO_ALG_ASYNC);
-       if (IS_ERR(conn->conn_rx_hash.tfm)) {
-               pr_err("crypto_alloc_hash() failed for conn_rx_tfm\n");
+       tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
+       if (IS_ERR(tfm)) {
+               pr_err("crypto_alloc_ahash() failed\n");
                return -ENOMEM;
        }
 
-       conn->conn_tx_hash.flags = 0;
-       conn->conn_tx_hash.tfm = crypto_alloc_hash("crc32c", 0,
-                                               CRYPTO_ALG_ASYNC);
-       if (IS_ERR(conn->conn_tx_hash.tfm)) {
-               pr_err("crypto_alloc_hash() failed for conn_tx_tfm\n");
-               crypto_free_hash(conn->conn_rx_hash.tfm);
+       conn->conn_rx_hash = ahash_request_alloc(tfm, GFP_KERNEL);
+       if (!conn->conn_rx_hash) {
+               pr_err("ahash_request_alloc() failed for conn_rx_hash\n");
+               crypto_free_ahash(tfm);
+               return -ENOMEM;
+       }
+       ahash_request_set_callback(conn->conn_rx_hash, 0, NULL, NULL);
+
+       conn->conn_tx_hash = ahash_request_alloc(tfm, GFP_KERNEL);
+       if (!conn->conn_tx_hash) {
+               pr_err("ahash_request_alloc() failed for conn_tx_hash\n");
+               ahash_request_free(conn->conn_rx_hash);
+               conn->conn_rx_hash = NULL;
+               crypto_free_ahash(tfm);
                return -ENOMEM;
        }
+       ahash_request_set_callback(conn->conn_tx_hash, 0, NULL, NULL);
 
        return 0;
 }
@@ -1174,10 +1183,14 @@ old_sess_out:
                iscsit_dec_session_usage_count(conn->sess);
        }
 
-       if (!IS_ERR(conn->conn_rx_hash.tfm))
-               crypto_free_hash(conn->conn_rx_hash.tfm);
-       if (!IS_ERR(conn->conn_tx_hash.tfm))
-               crypto_free_hash(conn->conn_tx_hash.tfm);
+       ahash_request_free(conn->conn_tx_hash);
+       if (conn->conn_rx_hash) {
+               struct crypto_ahash *tfm;
+
+               tfm = crypto_ahash_reqtfm(conn->conn_rx_hash);
+               ahash_request_free(conn->conn_rx_hash);
+               crypto_free_ahash(tfm);
+       }
 
        free_cpumask_var(conn->conn_cpumask);
 
index 50ce80d604f3af1ab6992bc9e588fc97aa051858..8ed8e34c3492837debffc0552af9e603cc3952e3 100644 (file)
@@ -45,6 +45,7 @@
  *             funneled through AES are...16 bytes in size!
  */
 
+#include <crypto/skcipher.h>
 #include <linux/crypto.h>
 #include <linux/module.h>
 #include <linux/err.h>
@@ -195,21 +196,22 @@ static void bytewise_xor(void *_bo, const void *_bi1, const void *_bi2,
  * NOTE: blen is not aligned to a block size, we'll pad zeros, that's
  *       what sg[4] is for. Maybe there is a smarter way to do this.
  */
-static int wusb_ccm_mac(struct crypto_blkcipher *tfm_cbc,
+static int wusb_ccm_mac(struct crypto_skcipher *tfm_cbc,
                        struct crypto_cipher *tfm_aes, void *mic,
                        const struct aes_ccm_nonce *n,
                        const struct aes_ccm_label *a, const void *b,
                        size_t blen)
 {
        int result = 0;
-       struct blkcipher_desc desc;
+       SKCIPHER_REQUEST_ON_STACK(req, tfm_cbc);
        struct aes_ccm_b0 b0;
        struct aes_ccm_b1 b1;
        struct aes_ccm_a ax;
        struct scatterlist sg[4], sg_dst;
-       void *iv, *dst_buf;
-       size_t ivsize, dst_size;
+       void *dst_buf;
+       size_t dst_size;
        const u8 bzero[16] = { 0 };
+       u8 iv[crypto_skcipher_ivsize(tfm_cbc)];
        size_t zero_padding;
 
        /*
@@ -232,9 +234,7 @@ static int wusb_ccm_mac(struct crypto_blkcipher *tfm_cbc,
                goto error_dst_buf;
        }
 
-       iv = crypto_blkcipher_crt(tfm_cbc)->iv;
-       ivsize = crypto_blkcipher_ivsize(tfm_cbc);
-       memset(iv, 0, ivsize);
+       memset(iv, 0, sizeof(iv));
 
        /* Setup B0 */
        b0.flags = 0x59;        /* Format B0 */
@@ -259,9 +259,11 @@ static int wusb_ccm_mac(struct crypto_blkcipher *tfm_cbc,
        sg_set_buf(&sg[3], bzero, zero_padding);
        sg_init_one(&sg_dst, dst_buf, dst_size);
 
-       desc.tfm = tfm_cbc;
-       desc.flags = 0;
-       result = crypto_blkcipher_encrypt(&desc, &sg_dst, sg, dst_size);
+       skcipher_request_set_tfm(req, tfm_cbc);
+       skcipher_request_set_callback(req, 0, NULL, NULL);
+       skcipher_request_set_crypt(req, sg, &sg_dst, dst_size, iv);
+       result = crypto_skcipher_encrypt(req);
+       skcipher_request_zero(req);
        if (result < 0) {
                printk(KERN_ERR "E: can't compute CBC-MAC tag (MIC): %d\n",
                       result);
@@ -301,18 +303,18 @@ ssize_t wusb_prf(void *out, size_t out_size,
 {
        ssize_t result, bytes = 0, bitr;
        struct aes_ccm_nonce n = *_n;
-       struct crypto_blkcipher *tfm_cbc;
+       struct crypto_skcipher *tfm_cbc;
        struct crypto_cipher *tfm_aes;
        u64 sfn = 0;
        __le64 sfn_le;
 
-       tfm_cbc = crypto_alloc_blkcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC);
+       tfm_cbc = crypto_alloc_skcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC);
        if (IS_ERR(tfm_cbc)) {
                result = PTR_ERR(tfm_cbc);
                printk(KERN_ERR "E: can't load CBC(AES): %d\n", (int)result);
                goto error_alloc_cbc;
        }
-       result = crypto_blkcipher_setkey(tfm_cbc, key, 16);
+       result = crypto_skcipher_setkey(tfm_cbc, key, 16);
        if (result < 0) {
                printk(KERN_ERR "E: can't set CBC key: %d\n", (int)result);
                goto error_setkey_cbc;
@@ -345,7 +347,7 @@ error_setkey_aes:
        crypto_free_cipher(tfm_aes);
 error_alloc_aes:
 error_setkey_cbc:
-       crypto_free_blkcipher(tfm_cbc);
+       crypto_free_skcipher(tfm_cbc);
 error_alloc_cbc:
        return result;
 }
index afa09fce81515e4caf7500b04c16dfb96a71cfd1..d41165433260673d66fef4cc074695665330124b 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/ctype.h>
 #include <linux/random.h>
 #include <linux/highmem.h>
+#include <crypto/skcipher.h>
 
 static int
 cifs_crypto_shash_md5_allocate(struct TCP_Server_Info *server)
@@ -789,38 +790,46 @@ int
 calc_seckey(struct cifs_ses *ses)
 {
        int rc;
-       struct crypto_blkcipher *tfm_arc4;
+       struct crypto_skcipher *tfm_arc4;
        struct scatterlist sgin, sgout;
-       struct blkcipher_desc desc;
+       struct skcipher_request *req;
        unsigned char sec_key[CIFS_SESS_KEY_SIZE]; /* a nonce */
 
        get_random_bytes(sec_key, CIFS_SESS_KEY_SIZE);
 
-       tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
+       tfm_arc4 = crypto_alloc_skcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
        if (IS_ERR(tfm_arc4)) {
                rc = PTR_ERR(tfm_arc4);
                cifs_dbg(VFS, "could not allocate crypto API arc4\n");
                return rc;
        }
 
-       desc.tfm = tfm_arc4;
-
-       rc = crypto_blkcipher_setkey(tfm_arc4, ses->auth_key.response,
+       rc = crypto_skcipher_setkey(tfm_arc4, ses->auth_key.response,
                                        CIFS_SESS_KEY_SIZE);
        if (rc) {
                cifs_dbg(VFS, "%s: Could not set response as a key\n",
                         __func__);
-               return rc;
+               goto out_free_cipher;
+       }
+
+       req = skcipher_request_alloc(tfm_arc4, GFP_KERNEL);
+       if (!req) {
+               rc = -ENOMEM;
+               cifs_dbg(VFS, "could not allocate crypto API arc4 request\n");
+               goto out_free_cipher;
        }
 
        sg_init_one(&sgin, sec_key, CIFS_SESS_KEY_SIZE);
        sg_init_one(&sgout, ses->ntlmssp->ciphertext, CIFS_CPHTXT_SIZE);
 
-       rc = crypto_blkcipher_encrypt(&desc, &sgout, &sgin, CIFS_CPHTXT_SIZE);
+       skcipher_request_set_callback(req, 0, NULL, NULL);
+       skcipher_request_set_crypt(req, &sgin, &sgout, CIFS_CPHTXT_SIZE, NULL);
+
+       rc = crypto_skcipher_encrypt(req);
+       skcipher_request_free(req);
        if (rc) {
                cifs_dbg(VFS, "could not encrypt session key rc: %d\n", rc);
-               crypto_free_blkcipher(tfm_arc4);
-               return rc;
+               goto out_free_cipher;
        }
 
        /* make secondary_key/nonce as session key */
@@ -828,7 +837,8 @@ calc_seckey(struct cifs_ses *ses)
        /* and make len as that of session key only */
        ses->auth_key.len = CIFS_SESS_KEY_SIZE;
 
-       crypto_free_blkcipher(tfm_arc4);
+out_free_cipher:
+       crypto_free_skcipher(tfm_arc4);
 
        return rc;
 }
index a4232ec4f2ba45386b4f25db484f7f30135b01c2..699b7868108f658a3262943c7763b13d59767a89 100644 (file)
@@ -23,6 +23,7 @@
    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 */
 
+#include <crypto/skcipher.h>
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/fs.h>
@@ -70,31 +71,42 @@ smbhash(unsigned char *out, const unsigned char *in, unsigned char *key)
 {
        int rc;
        unsigned char key2[8];
-       struct crypto_blkcipher *tfm_des;
+       struct crypto_skcipher *tfm_des;
        struct scatterlist sgin, sgout;
-       struct blkcipher_desc desc;
+       struct skcipher_request *req;
 
        str_to_key(key, key2);
 
-       tfm_des = crypto_alloc_blkcipher("ecb(des)", 0, CRYPTO_ALG_ASYNC);
+       tfm_des = crypto_alloc_skcipher("ecb(des)", 0, CRYPTO_ALG_ASYNC);
        if (IS_ERR(tfm_des)) {
                rc = PTR_ERR(tfm_des);
                cifs_dbg(VFS, "could not allocate des crypto API\n");
                goto smbhash_err;
        }
 
-       desc.tfm = tfm_des;
+       req = skcipher_request_alloc(tfm_des, GFP_KERNEL);
+       if (!req) {
+               rc = -ENOMEM;
+               cifs_dbg(VFS, "could not allocate des crypto API\n");
+               goto smbhash_free_skcipher;
+       }
 
-       crypto_blkcipher_setkey(tfm_des, key2, 8);
+       crypto_skcipher_setkey(tfm_des, key2, 8);
 
        sg_init_one(&sgin, in, 8);
        sg_init_one(&sgout, out, 8);
 
-       rc = crypto_blkcipher_encrypt(&desc, &sgout, &sgin, 8);
+       skcipher_request_set_callback(req, 0, NULL, NULL);
+       skcipher_request_set_crypt(req, &sgin, &sgout, 8, NULL);
+
+       rc = crypto_skcipher_encrypt(req);
        if (rc)
                cifs_dbg(VFS, "could not encrypt crypt key rc: %d\n", rc);
 
-       crypto_free_blkcipher(tfm_des);
+       skcipher_request_free(req);
+
+smbhash_free_skcipher:
+       crypto_free_skcipher(tfm_des);
 smbhash_err:
        return rc;
 }
index 80d6901493cf5e0867cd2572885cc16f66ff59c5..11255cbcb2db34e8e7f1e52fad1cf79437c14b05 100644 (file)
@@ -23,6 +23,8 @@
  * 02111-1307, USA.
  */
 
+#include <crypto/hash.h>
+#include <crypto/skcipher.h>
 #include <linux/fs.h>
 #include <linux/mount.h>
 #include <linux/pagemap.h>
@@ -30,7 +32,6 @@
 #include <linux/compiler.h>
 #include <linux/key.h>
 #include <linux/namei.h>
-#include <linux/crypto.h>
 #include <linux/file.h>
 #include <linux/scatterlist.h>
 #include <linux/slab.h>
@@ -74,6 +75,19 @@ void ecryptfs_from_hex(char *dst, char *src, int dst_size)
        }
 }
 
+static int ecryptfs_hash_digest(struct crypto_shash *tfm,
+                               char *src, int len, char *dst)
+{
+       SHASH_DESC_ON_STACK(desc, tfm);
+       int err;
+
+       desc->tfm = tfm;
+       desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+       err = crypto_shash_digest(desc, src, len, dst);
+       shash_desc_zero(desc);
+       return err;
+}
+
 /**
  * ecryptfs_calculate_md5 - calculates the md5 of @src
  * @dst: Pointer to 16 bytes of allocated memory
@@ -88,45 +102,26 @@ static int ecryptfs_calculate_md5(char *dst,
                                  struct ecryptfs_crypt_stat *crypt_stat,
                                  char *src, int len)
 {
-       struct scatterlist sg;
-       struct hash_desc desc = {
-               .tfm = crypt_stat->hash_tfm,
-               .flags = CRYPTO_TFM_REQ_MAY_SLEEP
-       };
+       struct crypto_shash *tfm;
        int rc = 0;
 
        mutex_lock(&crypt_stat->cs_hash_tfm_mutex);
-       sg_init_one(&sg, (u8 *)src, len);
-       if (!desc.tfm) {
-               desc.tfm = crypto_alloc_hash(ECRYPTFS_DEFAULT_HASH, 0,
-                                            CRYPTO_ALG_ASYNC);
-               if (IS_ERR(desc.tfm)) {
-                       rc = PTR_ERR(desc.tfm);
+       tfm = crypt_stat->hash_tfm;
+       if (!tfm) {
+               tfm = crypto_alloc_shash(ECRYPTFS_DEFAULT_HASH, 0, 0);
+               if (IS_ERR(tfm)) {
+                       rc = PTR_ERR(tfm);
                        ecryptfs_printk(KERN_ERR, "Error attempting to "
                                        "allocate crypto context; rc = [%d]\n",
                                        rc);
                        goto out;
                }
-               crypt_stat->hash_tfm = desc.tfm;
-       }
-       rc = crypto_hash_init(&desc);
-       if (rc) {
-               printk(KERN_ERR
-                      "%s: Error initializing crypto hash; rc = [%d]\n",
-                      __func__, rc);
-               goto out;
+               crypt_stat->hash_tfm = tfm;
        }
-       rc = crypto_hash_update(&desc, &sg, len);
+       rc = ecryptfs_hash_digest(tfm, src, len, dst);
        if (rc) {
                printk(KERN_ERR
-                      "%s: Error updating crypto hash; rc = [%d]\n",
-                      __func__, rc);
-               goto out;
-       }
-       rc = crypto_hash_final(&desc, dst);
-       if (rc) {
-               printk(KERN_ERR
-                      "%s: Error finalizing crypto hash; rc = [%d]\n",
+                      "%s: Error computing crypto hash; rc = [%d]\n",
                       __func__, rc);
                goto out;
        }
@@ -234,10 +229,8 @@ void ecryptfs_destroy_crypt_stat(struct ecryptfs_crypt_stat *crypt_stat)
 {
        struct ecryptfs_key_sig *key_sig, *key_sig_tmp;
 
-       if (crypt_stat->tfm)
-               crypto_free_ablkcipher(crypt_stat->tfm);
-       if (crypt_stat->hash_tfm)
-               crypto_free_hash(crypt_stat->hash_tfm);
+       crypto_free_skcipher(crypt_stat->tfm);
+       crypto_free_shash(crypt_stat->hash_tfm);
        list_for_each_entry_safe(key_sig, key_sig_tmp,
                                 &crypt_stat->keysig_list, crypt_stat_list) {
                list_del(&key_sig->crypt_stat_list);
@@ -342,7 +335,7 @@ static int crypt_scatterlist(struct ecryptfs_crypt_stat *crypt_stat,
                             struct scatterlist *src_sg, int size,
                             unsigned char *iv, int op)
 {
-       struct ablkcipher_request *req = NULL;
+       struct skcipher_request *req = NULL;
        struct extent_crypt_result ecr;
        int rc = 0;
 
@@ -358,20 +351,20 @@ static int crypt_scatterlist(struct ecryptfs_crypt_stat *crypt_stat,
        init_completion(&ecr.completion);
 
        mutex_lock(&crypt_stat->cs_tfm_mutex);
-       req = ablkcipher_request_alloc(crypt_stat->tfm, GFP_NOFS);
+       req = skcipher_request_alloc(crypt_stat->tfm, GFP_NOFS);
        if (!req) {
                mutex_unlock(&crypt_stat->cs_tfm_mutex);
                rc = -ENOMEM;
                goto out;
        }
 
-       ablkcipher_request_set_callback(req,
+       skcipher_request_set_callback(req,
                        CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
                        extent_crypt_complete, &ecr);
        /* Consider doing this once, when the file is opened */
        if (!(crypt_stat->flags & ECRYPTFS_KEY_SET)) {
-               rc = crypto_ablkcipher_setkey(crypt_stat->tfm, crypt_stat->key,
-                                             crypt_stat->key_size);
+               rc = crypto_skcipher_setkey(crypt_stat->tfm, crypt_stat->key,
+                                           crypt_stat->key_size);
                if (rc) {
                        ecryptfs_printk(KERN_ERR,
                                        "Error setting key; rc = [%d]\n",
@@ -383,9 +376,9 @@ static int crypt_scatterlist(struct ecryptfs_crypt_stat *crypt_stat,
                crypt_stat->flags |= ECRYPTFS_KEY_SET;
        }
        mutex_unlock(&crypt_stat->cs_tfm_mutex);
-       ablkcipher_request_set_crypt(req, src_sg, dst_sg, size, iv);
-       rc = op == ENCRYPT ? crypto_ablkcipher_encrypt(req) :
-                            crypto_ablkcipher_decrypt(req);
+       skcipher_request_set_crypt(req, src_sg, dst_sg, size, iv);
+       rc = op == ENCRYPT ? crypto_skcipher_encrypt(req) :
+                            crypto_skcipher_decrypt(req);
        if (rc == -EINPROGRESS || rc == -EBUSY) {
                struct extent_crypt_result *ecr = req->base.data;
 
@@ -394,7 +387,7 @@ static int crypt_scatterlist(struct ecryptfs_crypt_stat *crypt_stat,
                reinit_completion(&ecr->completion);
        }
 out:
-       ablkcipher_request_free(req);
+       skcipher_request_free(req);
        return rc;
 }
 
@@ -622,7 +615,7 @@ int ecryptfs_init_crypt_ctx(struct ecryptfs_crypt_stat *crypt_stat)
                                                    crypt_stat->cipher, "cbc");
        if (rc)
                goto out_unlock;
-       crypt_stat->tfm = crypto_alloc_ablkcipher(full_alg_name, 0, 0);
+       crypt_stat->tfm = crypto_alloc_skcipher(full_alg_name, 0, 0);
        if (IS_ERR(crypt_stat->tfm)) {
                rc = PTR_ERR(crypt_stat->tfm);
                crypt_stat->tfm = NULL;
@@ -631,7 +624,7 @@ int ecryptfs_init_crypt_ctx(struct ecryptfs_crypt_stat *crypt_stat)
                                full_alg_name);
                goto out_free;
        }
-       crypto_ablkcipher_set_flags(crypt_stat->tfm, CRYPTO_TFM_REQ_WEAK_KEY);
+       crypto_skcipher_set_flags(crypt_stat->tfm, CRYPTO_TFM_REQ_WEAK_KEY);
        rc = 0;
 out_free:
        kfree(full_alg_name);
@@ -1591,7 +1584,7 @@ out:
  * event, regardless of whether this function succeeds for fails.
  */
 static int
-ecryptfs_process_key_cipher(struct crypto_blkcipher **key_tfm,
+ecryptfs_process_key_cipher(struct crypto_skcipher **key_tfm,
                            char *cipher_name, size_t *key_size)
 {
        char dummy_key[ECRYPTFS_MAX_KEY_BYTES];
@@ -1609,21 +1602,18 @@ ecryptfs_process_key_cipher(struct crypto_blkcipher **key_tfm,
                                                    "ecb");
        if (rc)
                goto out;
-       *key_tfm = crypto_alloc_blkcipher(full_alg_name, 0, CRYPTO_ALG_ASYNC);
+       *key_tfm = crypto_alloc_skcipher(full_alg_name, 0, CRYPTO_ALG_ASYNC);
        if (IS_ERR(*key_tfm)) {
                rc = PTR_ERR(*key_tfm);
                printk(KERN_ERR "Unable to allocate crypto cipher with name "
                       "[%s]; rc = [%d]\n", full_alg_name, rc);
                goto out;
        }
-       crypto_blkcipher_set_flags(*key_tfm, CRYPTO_TFM_REQ_WEAK_KEY);
-       if (*key_size == 0) {
-               struct blkcipher_alg *alg = crypto_blkcipher_alg(*key_tfm);
-
-               *key_size = alg->max_keysize;
-       }
+       crypto_skcipher_set_flags(*key_tfm, CRYPTO_TFM_REQ_WEAK_KEY);
+       if (*key_size == 0)
+               *key_size = crypto_skcipher_default_keysize(*key_tfm);
        get_random_bytes(dummy_key, *key_size);
-       rc = crypto_blkcipher_setkey(*key_tfm, dummy_key, *key_size);
+       rc = crypto_skcipher_setkey(*key_tfm, dummy_key, *key_size);
        if (rc) {
                printk(KERN_ERR "Error attempting to set key of size [%zd] for "
                       "cipher [%s]; rc = [%d]\n", *key_size, full_alg_name,
@@ -1660,8 +1650,7 @@ int ecryptfs_destroy_crypto(void)
        list_for_each_entry_safe(key_tfm, key_tfm_tmp, &key_tfm_list,
                                 key_tfm_list) {
                list_del(&key_tfm->key_tfm_list);
-               if (key_tfm->key_tfm)
-                       crypto_free_blkcipher(key_tfm->key_tfm);
+               crypto_free_skcipher(key_tfm->key_tfm);
                kmem_cache_free(ecryptfs_key_tfm_cache, key_tfm);
        }
        mutex_unlock(&key_tfm_list_mutex);
@@ -1747,7 +1736,7 @@ int ecryptfs_tfm_exists(char *cipher_name, struct ecryptfs_key_tfm **key_tfm)
  * Searches for cached item first, and creates new if not found.
  * Returns 0 on success, non-zero if adding new cipher failed
  */
-int ecryptfs_get_tfm_and_mutex_for_cipher_name(struct crypto_blkcipher **tfm,
+int ecryptfs_get_tfm_and_mutex_for_cipher_name(struct crypto_skcipher **tfm,
                                               struct mutex **tfm_mutex,
                                               char *cipher_name)
 {
@@ -2120,7 +2109,7 @@ out:
 int ecryptfs_set_f_namelen(long *namelen, long lower_namelen,
                           struct ecryptfs_mount_crypt_stat *mount_crypt_stat)
 {
-       struct blkcipher_desc desc;
+       struct crypto_skcipher *tfm;
        struct mutex *tfm_mutex;
        size_t cipher_blocksize;
        int rc;
@@ -2130,7 +2119,7 @@ int ecryptfs_set_f_namelen(long *namelen, long lower_namelen,
                return 0;
        }
 
-       rc = ecryptfs_get_tfm_and_mutex_for_cipher_name(&desc.tfm, &tfm_mutex,
+       rc = ecryptfs_get_tfm_and_mutex_for_cipher_name(&tfm, &tfm_mutex,
                        mount_crypt_stat->global_default_fn_cipher_name);
        if (unlikely(rc)) {
                (*namelen) = 0;
@@ -2138,7 +2127,7 @@ int ecryptfs_set_f_namelen(long *namelen, long lower_namelen,
        }
 
        mutex_lock(tfm_mutex);
-       cipher_blocksize = crypto_blkcipher_blocksize(desc.tfm);
+       cipher_blocksize = crypto_skcipher_blocksize(tfm);
        mutex_unlock(tfm_mutex);
 
        /* Return an exact amount for the common cases */
index 7b39260c7bbaa18fda583426a101a95124639dc5..b7f81287c6880ebdd2ba6a17918fa25c72f365c9 100644 (file)
@@ -28,6 +28,7 @@
 #ifndef ECRYPTFS_KERNEL_H
 #define ECRYPTFS_KERNEL_H
 
+#include <crypto/skcipher.h>
 #include <keys/user-type.h>
 #include <keys/encrypted-type.h>
 #include <linux/fs.h>
@@ -38,7 +39,6 @@
 #include <linux/nsproxy.h>
 #include <linux/backing-dev.h>
 #include <linux/ecryptfs.h>
-#include <linux/crypto.h>
 
 #define ECRYPTFS_DEFAULT_IV_BYTES 16
 #define ECRYPTFS_DEFAULT_EXTENT_SIZE 4096
@@ -233,9 +233,9 @@ struct ecryptfs_crypt_stat {
        size_t extent_shift;
        unsigned int extent_mask;
        struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
-       struct crypto_ablkcipher *tfm;
-       struct crypto_hash *hash_tfm; /* Crypto context for generating
-                                      * the initialization vectors */
+       struct crypto_skcipher *tfm;
+       struct crypto_shash *hash_tfm; /* Crypto context for generating
+                                       * the initialization vectors */
        unsigned char cipher[ECRYPTFS_MAX_CIPHER_NAME_SIZE + 1];
        unsigned char key[ECRYPTFS_MAX_KEY_BYTES];
        unsigned char root_iv[ECRYPTFS_MAX_IV_BYTES];
@@ -309,7 +309,7 @@ struct ecryptfs_global_auth_tok {
  * keeps a list of crypto API contexts around to use when needed.
  */
 struct ecryptfs_key_tfm {
-       struct crypto_blkcipher *key_tfm;
+       struct crypto_skcipher *key_tfm;
        size_t key_size;
        struct mutex key_tfm_mutex;
        struct list_head key_tfm_list;
@@ -659,7 +659,7 @@ ecryptfs_add_new_key_tfm(struct ecryptfs_key_tfm **key_tfm, char *cipher_name,
 int ecryptfs_init_crypto(void);
 int ecryptfs_destroy_crypto(void);
 int ecryptfs_tfm_exists(char *cipher_name, struct ecryptfs_key_tfm **key_tfm);
-int ecryptfs_get_tfm_and_mutex_for_cipher_name(struct crypto_blkcipher **tfm,
+int ecryptfs_get_tfm_and_mutex_for_cipher_name(struct crypto_skcipher **tfm,
                                               struct mutex **tfm_mutex,
                                               char *cipher_name);
 int ecryptfs_keyring_auth_tok_for_sig(struct key **auth_tok_key,
index 4e685ac1024dc313e56ed8c8245fa5add9bb7c33..0a8f1b469a633dace58587b9044053f82b6fdc92 100644 (file)
@@ -29,7 +29,6 @@
 #include <linux/dcache.h>
 #include <linux/namei.h>
 #include <linux/mount.h>
-#include <linux/crypto.h>
 #include <linux/fs_stack.h>
 #include <linux/slab.h>
 #include <linux/xattr.h>
index 6bd67e2011f083e4e184e4d2d336cb15176d40ee..c5c84dfb5b3e3e0cf488a38846d4897b19fd53d0 100644 (file)
  * 02111-1307, USA.
  */
 
+#include <crypto/hash.h>
+#include <crypto/skcipher.h>
 #include <linux/string.h>
 #include <linux/pagemap.h>
 #include <linux/key.h>
 #include <linux/random.h>
-#include <linux/crypto.h>
 #include <linux/scatterlist.h>
 #include <linux/slab.h>
 #include "ecryptfs_kernel.h"
@@ -601,12 +602,13 @@ struct ecryptfs_write_tag_70_packet_silly_stack {
        struct ecryptfs_auth_tok *auth_tok;
        struct scatterlist src_sg[2];
        struct scatterlist dst_sg[2];
-       struct blkcipher_desc desc;
+       struct crypto_skcipher *skcipher_tfm;
+       struct skcipher_request *skcipher_req;
        char iv[ECRYPTFS_MAX_IV_BYTES];
        char hash[ECRYPTFS_TAG_70_DIGEST_SIZE];
        char tmp_hash[ECRYPTFS_TAG_70_DIGEST_SIZE];
-       struct hash_desc hash_desc;
-       struct scatterlist hash_sg;
+       struct crypto_shash *hash_tfm;
+       struct shash_desc *hash_desc;
 };
 
 /**
@@ -629,14 +631,13 @@ ecryptfs_write_tag_70_packet(char *dest, size_t *remaining_bytes,
        struct key *auth_tok_key = NULL;
        int rc = 0;
 
-       s = kmalloc(sizeof(*s), GFP_KERNEL);
+       s = kzalloc(sizeof(*s), GFP_KERNEL);
        if (!s) {
                printk(KERN_ERR "%s: Out of memory whilst trying to kmalloc "
                       "[%zd] bytes of kernel memory\n", __func__, sizeof(*s));
                rc = -ENOMEM;
                goto out;
        }
-       s->desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
        (*packet_size) = 0;
        rc = ecryptfs_find_auth_tok_for_sig(
                &auth_tok_key,
@@ -649,7 +650,7 @@ ecryptfs_write_tag_70_packet(char *dest, size_t *remaining_bytes,
                goto out;
        }
        rc = ecryptfs_get_tfm_and_mutex_for_cipher_name(
-               &s->desc.tfm,
+               &s->skcipher_tfm,
                &s->tfm_mutex, mount_crypt_stat->global_default_fn_cipher_name);
        if (unlikely(rc)) {
                printk(KERN_ERR "Internal error whilst attempting to get "
@@ -658,7 +659,7 @@ ecryptfs_write_tag_70_packet(char *dest, size_t *remaining_bytes,
                goto out;
        }
        mutex_lock(s->tfm_mutex);
-       s->block_size = crypto_blkcipher_blocksize(s->desc.tfm);
+       s->block_size = crypto_skcipher_blocksize(s->skcipher_tfm);
        /* Plus one for the \0 separator between the random prefix
         * and the plaintext filename */
        s->num_rand_bytes = (ECRYPTFS_FILENAME_MIN_RANDOM_PREPEND_BYTES + 1);
@@ -691,6 +692,19 @@ ecryptfs_write_tag_70_packet(char *dest, size_t *remaining_bytes,
                rc = -EINVAL;
                goto out_unlock;
        }
+
+       s->skcipher_req = skcipher_request_alloc(s->skcipher_tfm, GFP_KERNEL);
+       if (!s->skcipher_req) {
+               printk(KERN_ERR "%s: Out of kernel memory whilst attempting to "
+                      "skcipher_request_alloc for %s\n", __func__,
+                      crypto_skcipher_driver_name(s->skcipher_tfm));
+               rc = -ENOMEM;
+               goto out_unlock;
+       }
+
+       skcipher_request_set_callback(s->skcipher_req,
+                                     CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
+
        s->block_aligned_filename = kzalloc(s->block_aligned_filename_size,
                                            GFP_KERNEL);
        if (!s->block_aligned_filename) {
@@ -700,7 +714,6 @@ ecryptfs_write_tag_70_packet(char *dest, size_t *remaining_bytes,
                rc = -ENOMEM;
                goto out_unlock;
        }
-       s->i = 0;
        dest[s->i++] = ECRYPTFS_TAG_70_PACKET_TYPE;
        rc = ecryptfs_write_packet_length(&dest[s->i],
                                          (ECRYPTFS_SIG_SIZE
@@ -738,40 +751,36 @@ ecryptfs_write_tag_70_packet(char *dest, size_t *remaining_bytes,
                       "password tokens\n", __func__);
                goto out_free_unlock;
        }
-       sg_init_one(
-               &s->hash_sg,
-               (u8 *)s->auth_tok->token.password.session_key_encryption_key,
-               s->auth_tok->token.password.session_key_encryption_key_bytes);
-       s->hash_desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
-       s->hash_desc.tfm = crypto_alloc_hash(ECRYPTFS_TAG_70_DIGEST, 0,
-                                            CRYPTO_ALG_ASYNC);
-       if (IS_ERR(s->hash_desc.tfm)) {
-                       rc = PTR_ERR(s->hash_desc.tfm);
+       s->hash_tfm = crypto_alloc_shash(ECRYPTFS_TAG_70_DIGEST, 0, 0);
+       if (IS_ERR(s->hash_tfm)) {
+                       rc = PTR_ERR(s->hash_tfm);
                        printk(KERN_ERR "%s: Error attempting to "
                               "allocate hash crypto context; rc = [%d]\n",
                               __func__, rc);
                        goto out_free_unlock;
        }
-       rc = crypto_hash_init(&s->hash_desc);
-       if (rc) {
-               printk(KERN_ERR
-                      "%s: Error initializing crypto hash; rc = [%d]\n",
-                      __func__, rc);
-               goto out_release_free_unlock;
-       }
-       rc = crypto_hash_update(
-               &s->hash_desc, &s->hash_sg,
-               s->auth_tok->token.password.session_key_encryption_key_bytes);
-       if (rc) {
-               printk(KERN_ERR
-                      "%s: Error updating crypto hash; rc = [%d]\n",
-                      __func__, rc);
+
+       s->hash_desc = kmalloc(sizeof(*s->hash_desc) +
+                              crypto_shash_descsize(s->hash_tfm), GFP_KERNEL);
+       if (!s->hash_desc) {
+               printk(KERN_ERR "%s: Out of kernel memory whilst attempting to "
+                      "kmalloc [%zd] bytes\n", __func__,
+                      sizeof(*s->hash_desc) +
+                      crypto_shash_descsize(s->hash_tfm));
+               rc = -ENOMEM;
                goto out_release_free_unlock;
        }
-       rc = crypto_hash_final(&s->hash_desc, s->hash);
+
+       s->hash_desc->tfm = s->hash_tfm;
+       s->hash_desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+
+       rc = crypto_shash_digest(s->hash_desc,
+                                (u8 *)s->auth_tok->token.password.session_key_encryption_key,
+                                s->auth_tok->token.password.session_key_encryption_key_bytes,
+                                s->hash);
        if (rc) {
                printk(KERN_ERR
-                      "%s: Error finalizing crypto hash; rc = [%d]\n",
+                      "%s: Error computing crypto hash; rc = [%d]\n",
                       __func__, rc);
                goto out_release_free_unlock;
        }
@@ -780,27 +789,12 @@ ecryptfs_write_tag_70_packet(char *dest, size_t *remaining_bytes,
                        s->hash[(s->j % ECRYPTFS_TAG_70_DIGEST_SIZE)];
                if ((s->j % ECRYPTFS_TAG_70_DIGEST_SIZE)
                    == (ECRYPTFS_TAG_70_DIGEST_SIZE - 1)) {
-                       sg_init_one(&s->hash_sg, (u8 *)s->hash,
-                                   ECRYPTFS_TAG_70_DIGEST_SIZE);
-                       rc = crypto_hash_init(&s->hash_desc);
-                       if (rc) {
-                               printk(KERN_ERR
-                                      "%s: Error initializing crypto hash; "
-                                      "rc = [%d]\n", __func__, rc);
-                               goto out_release_free_unlock;
-                       }
-                       rc = crypto_hash_update(&s->hash_desc, &s->hash_sg,
-                                               ECRYPTFS_TAG_70_DIGEST_SIZE);
+                       rc = crypto_shash_digest(s->hash_desc, (u8 *)s->hash,
+                                               ECRYPTFS_TAG_70_DIGEST_SIZE,
+                                               s->tmp_hash);
                        if (rc) {
                                printk(KERN_ERR
-                                      "%s: Error updating crypto hash; "
-                                      "rc = [%d]\n", __func__, rc);
-                               goto out_release_free_unlock;
-                       }
-                       rc = crypto_hash_final(&s->hash_desc, s->tmp_hash);
-                       if (rc) {
-                               printk(KERN_ERR
-                                      "%s: Error finalizing crypto hash; "
+                                      "%s: Error computing crypto hash; "
                                       "rc = [%d]\n", __func__, rc);
                                goto out_release_free_unlock;
                        }
@@ -834,10 +828,8 @@ ecryptfs_write_tag_70_packet(char *dest, size_t *remaining_bytes,
         * of the IV here, so we just use 0's for the IV. Note the
         * constraint that ECRYPTFS_FILENAME_MIN_RANDOM_PREPEND_BYTES
         * >= ECRYPTFS_MAX_IV_BYTES. */
-       memset(s->iv, 0, ECRYPTFS_MAX_IV_BYTES);
-       s->desc.info = s->iv;
-       rc = crypto_blkcipher_setkey(
-               s->desc.tfm,
+       rc = crypto_skcipher_setkey(
+               s->skcipher_tfm,
                s->auth_tok->token.password.session_key_encryption_key,
                mount_crypt_stat->global_default_fn_cipher_key_bytes);
        if (rc < 0) {
@@ -850,8 +842,9 @@ ecryptfs_write_tag_70_packet(char *dest, size_t *remaining_bytes,
                       mount_crypt_stat->global_default_fn_cipher_key_bytes);
                goto out_release_free_unlock;
        }
-       rc = crypto_blkcipher_encrypt_iv(&s->desc, s->dst_sg, s->src_sg,
-                                        s->block_aligned_filename_size);
+       skcipher_request_set_crypt(s->skcipher_req, s->src_sg, s->dst_sg,
+                                  s->block_aligned_filename_size, s->iv);
+       rc = crypto_skcipher_encrypt(s->skcipher_req);
        if (rc) {
                printk(KERN_ERR "%s: Error attempting to encrypt filename; "
                       "rc = [%d]\n", __func__, rc);
@@ -861,7 +854,7 @@ ecryptfs_write_tag_70_packet(char *dest, size_t *remaining_bytes,
        (*packet_size) = s->i;
        (*remaining_bytes) -= (*packet_size);
 out_release_free_unlock:
-       crypto_free_hash(s->hash_desc.tfm);
+       crypto_free_shash(s->hash_tfm);
 out_free_unlock:
        kzfree(s->block_aligned_filename);
 out_unlock:
@@ -871,6 +864,8 @@ out:
                up_write(&(auth_tok_key->sem));
                key_put(auth_tok_key);
        }
+       skcipher_request_free(s->skcipher_req);
+       kzfree(s->hash_desc);
        kfree(s);
        return rc;
 }
@@ -888,7 +883,8 @@ struct ecryptfs_parse_tag_70_packet_silly_stack {
        struct ecryptfs_auth_tok *auth_tok;
        struct scatterlist src_sg[2];
        struct scatterlist dst_sg[2];
-       struct blkcipher_desc desc;
+       struct crypto_skcipher *skcipher_tfm;
+       struct skcipher_request *skcipher_req;
        char fnek_sig_hex[ECRYPTFS_SIG_SIZE_HEX + 1];
        char iv[ECRYPTFS_MAX_IV_BYTES];
        char cipher_string[ECRYPTFS_MAX_CIPHER_NAME_SIZE + 1];
@@ -922,14 +918,13 @@ ecryptfs_parse_tag_70_packet(char **filename, size_t *filename_size,
        (*packet_size) = 0;
        (*filename_size) = 0;
        (*filename) = NULL;
-       s = kmalloc(sizeof(*s), GFP_KERNEL);
+       s = kzalloc(sizeof(*s), GFP_KERNEL);
        if (!s) {
                printk(KERN_ERR "%s: Out of memory whilst trying to kmalloc "
                       "[%zd] bytes of kernel memory\n", __func__, sizeof(*s));
                rc = -ENOMEM;
                goto out;
        }
-       s->desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
        if (max_packet_size < ECRYPTFS_TAG_70_MIN_METADATA_SIZE) {
                printk(KERN_WARNING "%s: max_packet_size is [%zd]; it must be "
                       "at least [%d]\n", __func__, max_packet_size,
@@ -992,7 +987,7 @@ ecryptfs_parse_tag_70_packet(char **filename, size_t *filename_size,
                       rc);
                goto out;
        }
-       rc = ecryptfs_get_tfm_and_mutex_for_cipher_name(&s->desc.tfm,
+       rc = ecryptfs_get_tfm_and_mutex_for_cipher_name(&s->skcipher_tfm,
                                                        &s->tfm_mutex,
                                                        s->cipher_string);
        if (unlikely(rc)) {
@@ -1030,12 +1025,23 @@ ecryptfs_parse_tag_70_packet(char **filename, size_t *filename_size,
                       __func__, rc, s->block_aligned_filename_size);
                goto out_free_unlock;
        }
+
+       s->skcipher_req = skcipher_request_alloc(s->skcipher_tfm, GFP_KERNEL);
+       if (!s->skcipher_req) {
+               printk(KERN_ERR "%s: Out of kernel memory whilst attempting to "
+                      "skcipher_request_alloc for %s\n", __func__,
+                      crypto_skcipher_driver_name(s->skcipher_tfm));
+               rc = -ENOMEM;
+               goto out_free_unlock;
+       }
+
+       skcipher_request_set_callback(s->skcipher_req,
+                                     CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
+
        /* The characters in the first block effectively do the job of
         * the IV here, so we just use 0's for the IV. Note the
         * constraint that ECRYPTFS_FILENAME_MIN_RANDOM_PREPEND_BYTES
         * >= ECRYPTFS_MAX_IV_BYTES. */
-       memset(s->iv, 0, ECRYPTFS_MAX_IV_BYTES);
-       s->desc.info = s->iv;
        /* TODO: Support other key modules than passphrase for
         * filename encryption */
        if (s->auth_tok->token_type != ECRYPTFS_PASSWORD) {
@@ -1044,8 +1050,8 @@ ecryptfs_parse_tag_70_packet(char **filename, size_t *filename_size,
                       "password tokens\n", __func__);
                goto out_free_unlock;
        }
-       rc = crypto_blkcipher_setkey(
-               s->desc.tfm,
+       rc = crypto_skcipher_setkey(
+               s->skcipher_tfm,
                s->auth_tok->token.password.session_key_encryption_key,
                mount_crypt_stat->global_default_fn_cipher_key_bytes);
        if (rc < 0) {
@@ -1058,14 +1064,14 @@ ecryptfs_parse_tag_70_packet(char **filename, size_t *filename_size,
                       mount_crypt_stat->global_default_fn_cipher_key_bytes);
                goto out_free_unlock;
        }
-       rc = crypto_blkcipher_decrypt_iv(&s->desc, s->dst_sg, s->src_sg,
-                                        s->block_aligned_filename_size);
+       skcipher_request_set_crypt(s->skcipher_req, s->src_sg, s->dst_sg,
+                                  s->block_aligned_filename_size, s->iv);
+       rc = crypto_skcipher_decrypt(s->skcipher_req);
        if (rc) {
                printk(KERN_ERR "%s: Error attempting to decrypt filename; "
                       "rc = [%d]\n", __func__, rc);
                goto out_free_unlock;
        }
-       s->i = 0;
        while (s->decrypted_filename[s->i] != '\0'
               && s->i < s->block_aligned_filename_size)
                s->i++;
@@ -1108,6 +1114,7 @@ out:
                up_write(&(auth_tok_key->sem));
                key_put(auth_tok_key);
        }
+       skcipher_request_free(s->skcipher_req);
        kfree(s);
        return rc;
 }
@@ -1667,9 +1674,8 @@ decrypt_passphrase_encrypted_session_key(struct ecryptfs_auth_tok *auth_tok,
        struct scatterlist dst_sg[2];
        struct scatterlist src_sg[2];
        struct mutex *tfm_mutex;
-       struct blkcipher_desc desc = {
-               .flags = CRYPTO_TFM_REQ_MAY_SLEEP
-       };
+       struct crypto_skcipher *tfm;
+       struct skcipher_request *req = NULL;
        int rc = 0;
 
        if (unlikely(ecryptfs_verbosity > 0)) {
@@ -1680,7 +1686,7 @@ decrypt_passphrase_encrypted_session_key(struct ecryptfs_auth_tok *auth_tok,
                        auth_tok->token.password.session_key_encryption_key,
                        auth_tok->token.password.session_key_encryption_key_bytes);
        }
-       rc = ecryptfs_get_tfm_and_mutex_for_cipher_name(&desc.tfm, &tfm_mutex,
+       rc = ecryptfs_get_tfm_and_mutex_for_cipher_name(&tfm, &tfm_mutex,
                                                        crypt_stat->cipher);
        if (unlikely(rc)) {
                printk(KERN_ERR "Internal error whilst attempting to get "
@@ -1711,8 +1717,20 @@ decrypt_passphrase_encrypted_session_key(struct ecryptfs_auth_tok *auth_tok,
                goto out;
        }
        mutex_lock(tfm_mutex);
-       rc = crypto_blkcipher_setkey(
-               desc.tfm, auth_tok->token.password.session_key_encryption_key,
+       req = skcipher_request_alloc(tfm, GFP_KERNEL);
+       if (!req) {
+               mutex_unlock(tfm_mutex);
+               printk(KERN_ERR "%s: Out of kernel memory whilst attempting to "
+                      "skcipher_request_alloc for %s\n", __func__,
+                      crypto_skcipher_driver_name(tfm));
+               rc = -ENOMEM;
+               goto out;
+       }
+
+       skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP,
+                                     NULL, NULL);
+       rc = crypto_skcipher_setkey(
+               tfm, auth_tok->token.password.session_key_encryption_key,
                crypt_stat->key_size);
        if (unlikely(rc < 0)) {
                mutex_unlock(tfm_mutex);
@@ -1720,8 +1738,10 @@ decrypt_passphrase_encrypted_session_key(struct ecryptfs_auth_tok *auth_tok,
                rc = -EINVAL;
                goto out;
        }
-       rc = crypto_blkcipher_decrypt(&desc, dst_sg, src_sg,
-                                     auth_tok->session_key.encrypted_key_size);
+       skcipher_request_set_crypt(req, src_sg, dst_sg,
+                                  auth_tok->session_key.encrypted_key_size,
+                                  NULL);
+       rc = crypto_skcipher_decrypt(req);
        mutex_unlock(tfm_mutex);
        if (unlikely(rc)) {
                printk(KERN_ERR "Error decrypting; rc = [%d]\n", rc);
@@ -1738,6 +1758,7 @@ decrypt_passphrase_encrypted_session_key(struct ecryptfs_auth_tok *auth_tok,
                                  crypt_stat->key_size);
        }
 out:
+       skcipher_request_free(req);
        return rc;
 }
 
@@ -2191,16 +2212,14 @@ write_tag_3_packet(char *dest, size_t *remaining_bytes,
        size_t max_packet_size;
        struct ecryptfs_mount_crypt_stat *mount_crypt_stat =
                crypt_stat->mount_crypt_stat;
-       struct blkcipher_desc desc = {
-               .tfm = NULL,
-               .flags = CRYPTO_TFM_REQ_MAY_SLEEP
-       };
+       struct crypto_skcipher *tfm;
+       struct skcipher_request *req;
        int rc = 0;
 
        (*packet_size) = 0;
        ecryptfs_from_hex(key_rec->sig, auth_tok->token.password.signature,
                          ECRYPTFS_SIG_SIZE);
-       rc = ecryptfs_get_tfm_and_mutex_for_cipher_name(&desc.tfm, &tfm_mutex,
+       rc = ecryptfs_get_tfm_and_mutex_for_cipher_name(&tfm, &tfm_mutex,
                                                        crypt_stat->cipher);
        if (unlikely(rc)) {
                printk(KERN_ERR "Internal error whilst attempting to get "
@@ -2209,12 +2228,11 @@ write_tag_3_packet(char *dest, size_t *remaining_bytes,
                goto out;
        }
        if (mount_crypt_stat->global_default_cipher_key_size == 0) {
-               struct blkcipher_alg *alg = crypto_blkcipher_alg(desc.tfm);
-
                printk(KERN_WARNING "No key size specified at mount; "
-                      "defaulting to [%d]\n", alg->max_keysize);
+                      "defaulting to [%d]\n",
+                      crypto_skcipher_default_keysize(tfm));
                mount_crypt_stat->global_default_cipher_key_size =
-                       alg->max_keysize;
+                       crypto_skcipher_default_keysize(tfm);
        }
        if (crypt_stat->key_size == 0)
                crypt_stat->key_size =
@@ -2284,20 +2302,36 @@ write_tag_3_packet(char *dest, size_t *remaining_bytes,
                goto out;
        }
        mutex_lock(tfm_mutex);
-       rc = crypto_blkcipher_setkey(desc.tfm, session_key_encryption_key,
-                                    crypt_stat->key_size);
+       rc = crypto_skcipher_setkey(tfm, session_key_encryption_key,
+                                   crypt_stat->key_size);
        if (rc < 0) {
                mutex_unlock(tfm_mutex);
                ecryptfs_printk(KERN_ERR, "Error setting key for crypto "
                                "context; rc = [%d]\n", rc);
                goto out;
        }
+
+       req = skcipher_request_alloc(tfm, GFP_KERNEL);
+       if (!req) {
+               mutex_unlock(tfm_mutex);
+               ecryptfs_printk(KERN_ERR, "Out of kernel memory whilst "
+                               "attempting to skcipher_request_alloc for "
+                               "%s\n", crypto_skcipher_driver_name(tfm));
+               rc = -ENOMEM;
+               goto out;
+       }
+
+       skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP,
+                                     NULL, NULL);
+
        rc = 0;
        ecryptfs_printk(KERN_DEBUG, "Encrypting [%zd] bytes of the key\n",
                        crypt_stat->key_size);
-       rc = crypto_blkcipher_encrypt(&desc, dst_sg, src_sg,
-                                     (*key_rec).enc_key_size);
+       skcipher_request_set_crypt(req, src_sg, dst_sg,
+                                  (*key_rec).enc_key_size, NULL);
+       rc = crypto_skcipher_encrypt(req);
        mutex_unlock(tfm_mutex);
+       skcipher_request_free(req);
        if (rc) {
                printk(KERN_ERR "Error encrypting; rc = [%d]\n", rc);
                goto out;
index 83aa5aef93ee19678d6760e74360ee8cbf0ae5d1..5fe2cdb4898806e6e00ec95e88a5d19821a293ae 100644 (file)
@@ -29,7 +29,6 @@
 #include <linux/module.h>
 #include <linux/namei.h>
 #include <linux/skbuff.h>
-#include <linux/crypto.h>
 #include <linux/mount.h>
 #include <linux/pagemap.h>
 #include <linux/key.h>
index c6ced4cbf0cff7d3b3f754ddbf0727d44bca8809..1f5865263b3eff32fed493480ce9ff7b72759af2 100644 (file)
@@ -30,7 +30,6 @@
 #include <linux/page-flags.h>
 #include <linux/mount.h>
 #include <linux/file.h>
-#include <linux/crypto.h>
 #include <linux/scatterlist.h>
 #include <linux/slab.h>
 #include <asm/unaligned.h>
index afa1b81c3418bbfa1e18a07e9358c46807e9af30..77a486d3a51b600265a0fc1a1a0134ae74ff5ab9 100644 (file)
@@ -29,7 +29,6 @@
 #include <linux/slab.h>
 #include <linux/seq_file.h>
 #include <linux/file.h>
-#include <linux/crypto.h>
 #include <linux/statfs.h>
 #include <linux/magic.h>
 #include "ecryptfs_kernel.h"
index ec508af40cf52d732e10e1ddd4113d38f2182659..04efc304c9c147b08089dfa66948a4d2f809584b 100644 (file)
  * Special Publication 800-38E and IEEE P1619/D16.
  */
 
-#include <crypto/hash.h>
-#include <crypto/sha.h>
+#include <crypto/skcipher.h>
 #include <keys/user-type.h>
 #include <keys/encrypted-type.h>
-#include <linux/crypto.h>
 #include <linux/ecryptfs.h>
 #include <linux/gfp.h>
 #include <linux/kernel.h>
@@ -261,21 +259,21 @@ static int ext4_page_crypto(struct inode *inode,
 
 {
        u8 xts_tweak[EXT4_XTS_TWEAK_SIZE];
-       struct ablkcipher_request *req = NULL;
+       struct skcipher_request *req = NULL;
        DECLARE_EXT4_COMPLETION_RESULT(ecr);
        struct scatterlist dst, src;
        struct ext4_crypt_info *ci = EXT4_I(inode)->i_crypt_info;
-       struct crypto_ablkcipher *tfm = ci->ci_ctfm;
+       struct crypto_skcipher *tfm = ci->ci_ctfm;
        int res = 0;
 
-       req = ablkcipher_request_alloc(tfm, GFP_NOFS);
+       req = skcipher_request_alloc(tfm, GFP_NOFS);
        if (!req) {
                printk_ratelimited(KERN_ERR
                                   "%s: crypto_request_alloc() failed\n",
                                   __func__);
                return -ENOMEM;
        }
-       ablkcipher_request_set_callback(
+       skcipher_request_set_callback(
                req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
                ext4_crypt_complete, &ecr);
 
@@ -288,21 +286,21 @@ static int ext4_page_crypto(struct inode *inode,
        sg_set_page(&dst, dest_page, PAGE_CACHE_SIZE, 0);
        sg_init_table(&src, 1);
        sg_set_page(&src, src_page, PAGE_CACHE_SIZE, 0);
-       ablkcipher_request_set_crypt(req, &src, &dst, PAGE_CACHE_SIZE,
-                                    xts_tweak);
+       skcipher_request_set_crypt(req, &src, &dst, PAGE_CACHE_SIZE,
+                                  xts_tweak);
        if (rw == EXT4_DECRYPT)
-               res = crypto_ablkcipher_decrypt(req);
+               res = crypto_skcipher_decrypt(req);
        else
-               res = crypto_ablkcipher_encrypt(req);
+               res = crypto_skcipher_encrypt(req);
        if (res == -EINPROGRESS || res == -EBUSY) {
                wait_for_completion(&ecr.completion);
                res = ecr.res;
        }
-       ablkcipher_request_free(req);
+       skcipher_request_free(req);
        if (res) {
                printk_ratelimited(
                        KERN_ERR
-                       "%s: crypto_ablkcipher_encrypt() returned %d\n",
+                       "%s: crypto_skcipher_encrypt() returned %d\n",
                        __func__, res);
                return res;
        }
index 2fbef8a14760f4095300c9fdc0edcdc2909f4a65..1a2f360405dbdd1b4bdd84de735589f34010068a 100644 (file)
  *
  */
 
-#include <crypto/hash.h>
-#include <crypto/sha.h>
+#include <crypto/skcipher.h>
 #include <keys/encrypted-type.h>
 #include <keys/user-type.h>
-#include <linux/crypto.h>
 #include <linux/gfp.h>
 #include <linux/kernel.h>
 #include <linux/key.h>
@@ -65,10 +63,10 @@ static int ext4_fname_encrypt(struct inode *inode,
                              struct ext4_str *oname)
 {
        u32 ciphertext_len;
-       struct ablkcipher_request *req = NULL;
+       struct skcipher_request *req = NULL;
        DECLARE_EXT4_COMPLETION_RESULT(ecr);
        struct ext4_crypt_info *ci = EXT4_I(inode)->i_crypt_info;
-       struct crypto_ablkcipher *tfm = ci->ci_ctfm;
+       struct crypto_skcipher *tfm = ci->ci_ctfm;
        int res = 0;
        char iv[EXT4_CRYPTO_BLOCK_SIZE];
        struct scatterlist src_sg, dst_sg;
@@ -95,14 +93,14 @@ static int ext4_fname_encrypt(struct inode *inode,
        }
 
        /* Allocate request */
-       req = ablkcipher_request_alloc(tfm, GFP_NOFS);
+       req = skcipher_request_alloc(tfm, GFP_NOFS);
        if (!req) {
                printk_ratelimited(
                    KERN_ERR "%s: crypto_request_alloc() failed\n", __func__);
                kfree(alloc_buf);
                return -ENOMEM;
        }
-       ablkcipher_request_set_callback(req,
+       skcipher_request_set_callback(req,
                CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
                ext4_dir_crypt_complete, &ecr);
 
@@ -117,14 +115,14 @@ static int ext4_fname_encrypt(struct inode *inode,
        /* Create encryption request */
        sg_init_one(&src_sg, workbuf, ciphertext_len);
        sg_init_one(&dst_sg, oname->name, ciphertext_len);
-       ablkcipher_request_set_crypt(req, &src_sg, &dst_sg, ciphertext_len, iv);
-       res = crypto_ablkcipher_encrypt(req);
+       skcipher_request_set_crypt(req, &src_sg, &dst_sg, ciphertext_len, iv);
+       res = crypto_skcipher_encrypt(req);
        if (res == -EINPROGRESS || res == -EBUSY) {
                wait_for_completion(&ecr.completion);
                res = ecr.res;
        }
        kfree(alloc_buf);
-       ablkcipher_request_free(req);
+       skcipher_request_free(req);
        if (res < 0) {
                printk_ratelimited(
                    KERN_ERR "%s: Error (error code %d)\n", __func__, res);
@@ -145,11 +143,11 @@ static int ext4_fname_decrypt(struct inode *inode,
                              struct ext4_str *oname)
 {
        struct ext4_str tmp_in[2], tmp_out[1];
-       struct ablkcipher_request *req = NULL;
+       struct skcipher_request *req = NULL;
        DECLARE_EXT4_COMPLETION_RESULT(ecr);
        struct scatterlist src_sg, dst_sg;
        struct ext4_crypt_info *ci = EXT4_I(inode)->i_crypt_info;
-       struct crypto_ablkcipher *tfm = ci->ci_ctfm;
+       struct crypto_skcipher *tfm = ci->ci_ctfm;
        int res = 0;
        char iv[EXT4_CRYPTO_BLOCK_SIZE];
        unsigned lim = max_name_len(inode);
@@ -162,13 +160,13 @@ static int ext4_fname_decrypt(struct inode *inode,
        tmp_out[0].name = oname->name;
 
        /* Allocate request */
-       req = ablkcipher_request_alloc(tfm, GFP_NOFS);
+       req = skcipher_request_alloc(tfm, GFP_NOFS);
        if (!req) {
                printk_ratelimited(
                    KERN_ERR "%s: crypto_request_alloc() failed\n",  __func__);
                return -ENOMEM;
        }
-       ablkcipher_request_set_callback(req,
+       skcipher_request_set_callback(req,
                CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
                ext4_dir_crypt_complete, &ecr);
 
@@ -178,13 +176,13 @@ static int ext4_fname_decrypt(struct inode *inode,
        /* Create encryption request */
        sg_init_one(&src_sg, iname->name, iname->len);
        sg_init_one(&dst_sg, oname->name, oname->len);
-       ablkcipher_request_set_crypt(req, &src_sg, &dst_sg, iname->len, iv);
-       res = crypto_ablkcipher_decrypt(req);
+       skcipher_request_set_crypt(req, &src_sg, &dst_sg, iname->len, iv);
+       res = crypto_skcipher_decrypt(req);
        if (res == -EINPROGRESS || res == -EBUSY) {
                wait_for_completion(&ecr.completion);
                res = ecr.res;
        }
-       ablkcipher_request_free(req);
+       skcipher_request_free(req);
        if (res < 0) {
                printk_ratelimited(
                    KERN_ERR "%s: Error in ext4_fname_encrypt (error code %d)\n",
index 9a16d1e75a493f9e4663bb4ea05b9da9980ba65d..0129d688d1f7187701df12b64d23f522d0c3f31b 100644 (file)
@@ -8,6 +8,7 @@
  * Written by Michael Halcrow, Ildar Muslukhov, and Uday Savagaonkar, 2015.
  */
 
+#include <crypto/skcipher.h>
 #include <keys/encrypted-type.h>
 #include <keys/user-type.h>
 #include <linux/random.h>
@@ -41,45 +42,42 @@ static int ext4_derive_key_aes(char deriving_key[EXT4_AES_128_ECB_KEY_SIZE],
                               char derived_key[EXT4_AES_256_XTS_KEY_SIZE])
 {
        int res = 0;
-       struct ablkcipher_request *req = NULL;
+       struct skcipher_request *req = NULL;
        DECLARE_EXT4_COMPLETION_RESULT(ecr);
        struct scatterlist src_sg, dst_sg;
-       struct crypto_ablkcipher *tfm = crypto_alloc_ablkcipher("ecb(aes)", 0,
-                                                               0);
+       struct crypto_skcipher *tfm = crypto_alloc_skcipher("ecb(aes)", 0, 0);
 
        if (IS_ERR(tfm)) {
                res = PTR_ERR(tfm);
                tfm = NULL;
                goto out;
        }
-       crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_REQ_WEAK_KEY);
-       req = ablkcipher_request_alloc(tfm, GFP_NOFS);
+       crypto_skcipher_set_flags(tfm, CRYPTO_TFM_REQ_WEAK_KEY);
+       req = skcipher_request_alloc(tfm, GFP_NOFS);
        if (!req) {
                res = -ENOMEM;
                goto out;
        }
-       ablkcipher_request_set_callback(req,
+       skcipher_request_set_callback(req,
                        CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
                        derive_crypt_complete, &ecr);
-       res = crypto_ablkcipher_setkey(tfm, deriving_key,
-                                      EXT4_AES_128_ECB_KEY_SIZE);
+       res = crypto_skcipher_setkey(tfm, deriving_key,
+                                    EXT4_AES_128_ECB_KEY_SIZE);
        if (res < 0)
                goto out;
        sg_init_one(&src_sg, source_key, EXT4_AES_256_XTS_KEY_SIZE);
        sg_init_one(&dst_sg, derived_key, EXT4_AES_256_XTS_KEY_SIZE);
-       ablkcipher_request_set_crypt(req, &src_sg, &dst_sg,
-                                    EXT4_AES_256_XTS_KEY_SIZE, NULL);
-       res = crypto_ablkcipher_encrypt(req);
+       skcipher_request_set_crypt(req, &src_sg, &dst_sg,
+                                  EXT4_AES_256_XTS_KEY_SIZE, NULL);
+       res = crypto_skcipher_encrypt(req);
        if (res == -EINPROGRESS || res == -EBUSY) {
                wait_for_completion(&ecr.completion);
                res = ecr.res;
        }
 
 out:
-       if (req)
-               ablkcipher_request_free(req);
-       if (tfm)
-               crypto_free_ablkcipher(tfm);
+       skcipher_request_free(req);
+       crypto_free_skcipher(tfm);
        return res;
 }
 
@@ -90,7 +88,7 @@ void ext4_free_crypt_info(struct ext4_crypt_info *ci)
 
        if (ci->ci_keyring_key)
                key_put(ci->ci_keyring_key);
-       crypto_free_ablkcipher(ci->ci_ctfm);
+       crypto_free_skcipher(ci->ci_ctfm);
        kmem_cache_free(ext4_crypt_info_cachep, ci);
 }
 
@@ -122,7 +120,7 @@ int _ext4_get_encryption_info(struct inode *inode)
        struct ext4_encryption_context ctx;
        const struct user_key_payload *ukp;
        struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
-       struct crypto_ablkcipher *ctfm;
+       struct crypto_skcipher *ctfm;
        const char *cipher_str;
        char raw_key[EXT4_MAX_KEY_SIZE];
        char mode;
@@ -237,7 +235,7 @@ retry:
        if (res)
                goto out;
 got_key:
-       ctfm = crypto_alloc_ablkcipher(cipher_str, 0, 0);
+       ctfm = crypto_alloc_skcipher(cipher_str, 0, 0);
        if (!ctfm || IS_ERR(ctfm)) {
                res = ctfm ? PTR_ERR(ctfm) : -ENOMEM;
                printk(KERN_DEBUG
@@ -246,11 +244,11 @@ got_key:
                goto out;
        }
        crypt_info->ci_ctfm = ctfm;
-       crypto_ablkcipher_clear_flags(ctfm, ~0);
-       crypto_tfm_set_flags(crypto_ablkcipher_tfm(ctfm),
+       crypto_skcipher_clear_flags(ctfm, ~0);
+       crypto_tfm_set_flags(crypto_skcipher_tfm(ctfm),
                             CRYPTO_TFM_REQ_WEAK_KEY);
-       res = crypto_ablkcipher_setkey(ctfm, raw_key,
-                                      ext4_encryption_key_size(mode));
+       res = crypto_skcipher_setkey(ctfm, raw_key,
+                                    ext4_encryption_key_size(mode));
        if (res)
                goto out;
        memzero_explicit(raw_key, sizeof(raw_key));
index ac7d4e81379630d2f720ea8d389b8322a784f4cf..1f73c29717e19f656db7b9745c8e61c67baa0ff1 100644 (file)
@@ -77,7 +77,7 @@ struct ext4_crypt_info {
        char            ci_data_mode;
        char            ci_filename_mode;
        char            ci_flags;
-       struct crypto_ablkcipher *ci_ctfm;
+       struct crypto_skcipher *ci_ctfm;
        struct key      *ci_keyring_key;
        char            ci_master_key[EXT4_KEY_DESCRIPTOR_SIZE];
 };
index 4a62ef14e93275a881f967ceabd66c6bd306345c..95c5cf039711c6c994ef8b4a04d0ef37f777591d 100644 (file)
  * The usage of AES-XTS should conform to recommendations in NIST
  * Special Publication 800-38E and IEEE P1619/D16.
  */
-#include <crypto/hash.h>
-#include <crypto/sha.h>
+#include <crypto/skcipher.h>
 #include <keys/user-type.h>
 #include <keys/encrypted-type.h>
-#include <linux/crypto.h>
 #include <linux/ecryptfs.h>
 #include <linux/gfp.h>
 #include <linux/kernel.h>
@@ -328,21 +326,21 @@ static int f2fs_page_crypto(struct f2fs_crypto_ctx *ctx,
                                struct page *dest_page)
 {
        u8 xts_tweak[F2FS_XTS_TWEAK_SIZE];
-       struct ablkcipher_request *req = NULL;
+       struct skcipher_request *req = NULL;
        DECLARE_F2FS_COMPLETION_RESULT(ecr);
        struct scatterlist dst, src;
        struct f2fs_crypt_info *ci = F2FS_I(inode)->i_crypt_info;
-       struct crypto_ablkcipher *tfm = ci->ci_ctfm;
+       struct crypto_skcipher *tfm = ci->ci_ctfm;
        int res = 0;
 
-       req = ablkcipher_request_alloc(tfm, GFP_NOFS);
+       req = skcipher_request_alloc(tfm, GFP_NOFS);
        if (!req) {
                printk_ratelimited(KERN_ERR
                                "%s: crypto_request_alloc() failed\n",
                                __func__);
                return -ENOMEM;
        }
-       ablkcipher_request_set_callback(
+       skcipher_request_set_callback(
                req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
                f2fs_crypt_complete, &ecr);
 
@@ -355,21 +353,21 @@ static int f2fs_page_crypto(struct f2fs_crypto_ctx *ctx,
        sg_set_page(&dst, dest_page, PAGE_CACHE_SIZE, 0);
        sg_init_table(&src, 1);
        sg_set_page(&src, src_page, PAGE_CACHE_SIZE, 0);
-       ablkcipher_request_set_crypt(req, &src, &dst, PAGE_CACHE_SIZE,
-                                       xts_tweak);
+       skcipher_request_set_crypt(req, &src, &dst, PAGE_CACHE_SIZE,
+                                  xts_tweak);
        if (rw == F2FS_DECRYPT)
-               res = crypto_ablkcipher_decrypt(req);
+               res = crypto_skcipher_decrypt(req);
        else
-               res = crypto_ablkcipher_encrypt(req);
+               res = crypto_skcipher_encrypt(req);
        if (res == -EINPROGRESS || res == -EBUSY) {
                BUG_ON(req->base.data != &ecr);
                wait_for_completion(&ecr.completion);
                res = ecr.res;
        }
-       ablkcipher_request_free(req);
+       skcipher_request_free(req);
        if (res) {
                printk_ratelimited(KERN_ERR
-                       "%s: crypto_ablkcipher_encrypt() returned %d\n",
+                       "%s: crypto_skcipher_encrypt() returned %d\n",
                        __func__, res);
                return res;
        }
index ab377d496a39ad47924a3f79b48e1aad7538f2d2..16aec6653291a852fbceb82502c24d288b70db1f 100644 (file)
  *
  * This has not yet undergone a rigorous security audit.
  */
-#include <crypto/hash.h>
-#include <crypto/sha.h>
+#include <crypto/skcipher.h>
 #include <keys/encrypted-type.h>
 #include <keys/user-type.h>
-#include <linux/crypto.h>
 #include <linux/gfp.h>
 #include <linux/kernel.h>
 #include <linux/key.h>
@@ -70,10 +68,10 @@ static int f2fs_fname_encrypt(struct inode *inode,
                        const struct qstr *iname, struct f2fs_str *oname)
 {
        u32 ciphertext_len;
-       struct ablkcipher_request *req = NULL;
+       struct skcipher_request *req = NULL;
        DECLARE_F2FS_COMPLETION_RESULT(ecr);
        struct f2fs_crypt_info *ci = F2FS_I(inode)->i_crypt_info;
-       struct crypto_ablkcipher *tfm = ci->ci_ctfm;
+       struct crypto_skcipher *tfm = ci->ci_ctfm;
        int res = 0;
        char iv[F2FS_CRYPTO_BLOCK_SIZE];
        struct scatterlist src_sg, dst_sg;
@@ -99,14 +97,14 @@ static int f2fs_fname_encrypt(struct inode *inode,
        }
 
        /* Allocate request */
-       req = ablkcipher_request_alloc(tfm, GFP_NOFS);
+       req = skcipher_request_alloc(tfm, GFP_NOFS);
        if (!req) {
                printk_ratelimited(KERN_ERR
                        "%s: crypto_request_alloc() failed\n", __func__);
                kfree(alloc_buf);
                return -ENOMEM;
        }
-       ablkcipher_request_set_callback(req,
+       skcipher_request_set_callback(req,
                        CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
                        f2fs_dir_crypt_complete, &ecr);
 
@@ -121,15 +119,15 @@ static int f2fs_fname_encrypt(struct inode *inode,
        /* Create encryption request */
        sg_init_one(&src_sg, workbuf, ciphertext_len);
        sg_init_one(&dst_sg, oname->name, ciphertext_len);
-       ablkcipher_request_set_crypt(req, &src_sg, &dst_sg, ciphertext_len, iv);
-       res = crypto_ablkcipher_encrypt(req);
+       skcipher_request_set_crypt(req, &src_sg, &dst_sg, ciphertext_len, iv);
+       res = crypto_skcipher_encrypt(req);
        if (res == -EINPROGRESS || res == -EBUSY) {
                BUG_ON(req->base.data != &ecr);
                wait_for_completion(&ecr.completion);
                res = ecr.res;
        }
        kfree(alloc_buf);
-       ablkcipher_request_free(req);
+       skcipher_request_free(req);
        if (res < 0) {
                printk_ratelimited(KERN_ERR
                                "%s: Error (error code %d)\n", __func__, res);
@@ -148,11 +146,11 @@ static int f2fs_fname_encrypt(struct inode *inode,
 static int f2fs_fname_decrypt(struct inode *inode,
                        const struct f2fs_str *iname, struct f2fs_str *oname)
 {
-       struct ablkcipher_request *req = NULL;
+       struct skcipher_request *req = NULL;
        DECLARE_F2FS_COMPLETION_RESULT(ecr);
        struct scatterlist src_sg, dst_sg;
        struct f2fs_crypt_info *ci = F2FS_I(inode)->i_crypt_info;
-       struct crypto_ablkcipher *tfm = ci->ci_ctfm;
+       struct crypto_skcipher *tfm = ci->ci_ctfm;
        int res = 0;
        char iv[F2FS_CRYPTO_BLOCK_SIZE];
        unsigned lim = max_name_len(inode);
@@ -161,13 +159,13 @@ static int f2fs_fname_decrypt(struct inode *inode,
                return -EIO;
 
        /* Allocate request */
-       req = ablkcipher_request_alloc(tfm, GFP_NOFS);
+       req = skcipher_request_alloc(tfm, GFP_NOFS);
        if (!req) {
                printk_ratelimited(KERN_ERR
                        "%s: crypto_request_alloc() failed\n",  __func__);
                return -ENOMEM;
        }
-       ablkcipher_request_set_callback(req,
+       skcipher_request_set_callback(req,
                CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
                f2fs_dir_crypt_complete, &ecr);
 
@@ -177,14 +175,14 @@ static int f2fs_fname_decrypt(struct inode *inode,
        /* Create decryption request */
        sg_init_one(&src_sg, iname->name, iname->len);
        sg_init_one(&dst_sg, oname->name, oname->len);
-       ablkcipher_request_set_crypt(req, &src_sg, &dst_sg, iname->len, iv);
-       res = crypto_ablkcipher_decrypt(req);
+       skcipher_request_set_crypt(req, &src_sg, &dst_sg, iname->len, iv);
+       res = crypto_skcipher_decrypt(req);
        if (res == -EINPROGRESS || res == -EBUSY) {
                BUG_ON(req->base.data != &ecr);
                wait_for_completion(&ecr.completion);
                res = ecr.res;
        }
-       ablkcipher_request_free(req);
+       skcipher_request_free(req);
        if (res < 0) {
                printk_ratelimited(KERN_ERR
                        "%s: Error in f2fs_fname_decrypt (error code %d)\n",
index 5de2d866a25c282fecb2bdf4592bc53fafa0ca85..2aeb6273bd8f21fc217a6dfc64d1ef52ade44baa 100644 (file)
@@ -14,7 +14,7 @@
 #include <linux/random.h>
 #include <linux/scatterlist.h>
 #include <uapi/linux/keyctl.h>
-#include <crypto/hash.h>
+#include <crypto/skcipher.h>
 #include <linux/f2fs_fs.h>
 
 #include "f2fs.h"
@@ -44,46 +44,43 @@ static int f2fs_derive_key_aes(char deriving_key[F2FS_AES_128_ECB_KEY_SIZE],
                                char derived_key[F2FS_AES_256_XTS_KEY_SIZE])
 {
        int res = 0;
-       struct ablkcipher_request *req = NULL;
+       struct skcipher_request *req = NULL;
        DECLARE_F2FS_COMPLETION_RESULT(ecr);
        struct scatterlist src_sg, dst_sg;
-       struct crypto_ablkcipher *tfm = crypto_alloc_ablkcipher("ecb(aes)", 0,
-                                                               0);
+       struct crypto_skcipher *tfm = crypto_alloc_skcipher("ecb(aes)", 0, 0);
 
        if (IS_ERR(tfm)) {
                res = PTR_ERR(tfm);
                tfm = NULL;
                goto out;
        }
-       crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_REQ_WEAK_KEY);
-       req = ablkcipher_request_alloc(tfm, GFP_NOFS);
+       crypto_skcipher_set_flags(tfm, CRYPTO_TFM_REQ_WEAK_KEY);
+       req = skcipher_request_alloc(tfm, GFP_NOFS);
        if (!req) {
                res = -ENOMEM;
                goto out;
        }
-       ablkcipher_request_set_callback(req,
+       skcipher_request_set_callback(req,
                        CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
                        derive_crypt_complete, &ecr);
-       res = crypto_ablkcipher_setkey(tfm, deriving_key,
+       res = crypto_skcipher_setkey(tfm, deriving_key,
                                F2FS_AES_128_ECB_KEY_SIZE);
        if (res < 0)
                goto out;
 
        sg_init_one(&src_sg, source_key, F2FS_AES_256_XTS_KEY_SIZE);
        sg_init_one(&dst_sg, derived_key, F2FS_AES_256_XTS_KEY_SIZE);
-       ablkcipher_request_set_crypt(req, &src_sg, &dst_sg,
+       skcipher_request_set_crypt(req, &src_sg, &dst_sg,
                                        F2FS_AES_256_XTS_KEY_SIZE, NULL);
-       res = crypto_ablkcipher_encrypt(req);
+       res = crypto_skcipher_encrypt(req);
        if (res == -EINPROGRESS || res == -EBUSY) {
                BUG_ON(req->base.data != &ecr);
                wait_for_completion(&ecr.completion);
                res = ecr.res;
        }
 out:
-       if (req)
-               ablkcipher_request_free(req);
-       if (tfm)
-               crypto_free_ablkcipher(tfm);
+       skcipher_request_free(req);
+       crypto_free_skcipher(tfm);
        return res;
 }
 
@@ -93,7 +90,7 @@ static void f2fs_free_crypt_info(struct f2fs_crypt_info *ci)
                return;
 
        key_put(ci->ci_keyring_key);
-       crypto_free_ablkcipher(ci->ci_ctfm);
+       crypto_free_skcipher(ci->ci_ctfm);
        kmem_cache_free(f2fs_crypt_info_cachep, ci);
 }
 
@@ -123,7 +120,7 @@ int _f2fs_get_encryption_info(struct inode *inode)
        struct f2fs_encryption_key *master_key;
        struct f2fs_encryption_context ctx;
        const struct user_key_payload *ukp;
-       struct crypto_ablkcipher *ctfm;
+       struct crypto_skcipher *ctfm;
        const char *cipher_str;
        char raw_key[F2FS_MAX_KEY_SIZE];
        char mode;
@@ -213,7 +210,7 @@ retry:
        if (res)
                goto out;
 
-       ctfm = crypto_alloc_ablkcipher(cipher_str, 0, 0);
+       ctfm = crypto_alloc_skcipher(cipher_str, 0, 0);
        if (!ctfm || IS_ERR(ctfm)) {
                res = ctfm ? PTR_ERR(ctfm) : -ENOMEM;
                printk(KERN_DEBUG
@@ -222,11 +219,10 @@ retry:
                goto out;
        }
        crypt_info->ci_ctfm = ctfm;
-       crypto_ablkcipher_clear_flags(ctfm, ~0);
-       crypto_tfm_set_flags(crypto_ablkcipher_tfm(ctfm),
-                            CRYPTO_TFM_REQ_WEAK_KEY);
-       res = crypto_ablkcipher_setkey(ctfm, raw_key,
-                                       f2fs_encryption_key_size(mode));
+       crypto_skcipher_clear_flags(ctfm, ~0);
+       crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_REQ_WEAK_KEY);
+       res = crypto_skcipher_setkey(ctfm, raw_key,
+                                    f2fs_encryption_key_size(mode));
        if (res)
                goto out;
 
index c2c1c2b63b25529cfd9cda4bcfb82de77f99f156..ea3d1d7c97f3c3bd3f510b19a8f7b4828c964e32 100644 (file)
@@ -78,7 +78,7 @@ struct f2fs_crypt_info {
        char            ci_data_mode;
        char            ci_filename_mode;
        char            ci_flags;
-       struct crypto_ablkcipher *ci_ctfm;
+       struct crypto_skcipher *ci_ctfm;
        struct key      *ci_keyring_key;
        char            ci_master_key[F2FS_KEY_DESCRIPTOR_SIZE];
 };
index dc8ebecf561866a2d6fcee729813528852be6313..195fe2668207a2bdaf689c8e93296503facec806 100644 (file)
 *
 */
 
+#include <crypto/hash.h>
 #include <linux/file.h>
 #include <linux/slab.h>
 #include <linux/namei.h>
-#include <linux/crypto.h>
 #include <linux/sched.h>
 #include <linux/fs.h>
 #include <linux/module.h>
@@ -104,29 +104,35 @@ static int
 nfs4_make_rec_clidname(char *dname, const struct xdr_netobj *clname)
 {
        struct xdr_netobj cksum;
-       struct hash_desc desc;
-       struct scatterlist sg;
+       struct crypto_shash *tfm;
        int status;
 
        dprintk("NFSD: nfs4_make_rec_clidname for %.*s\n",
                        clname->len, clname->data);
-       desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
-       desc.tfm = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
-       if (IS_ERR(desc.tfm)) {
-               status = PTR_ERR(desc.tfm);
+       tfm = crypto_alloc_shash("md5", 0, 0);
+       if (IS_ERR(tfm)) {
+               status = PTR_ERR(tfm);
                goto out_no_tfm;
        }
 
-       cksum.len = crypto_hash_digestsize(desc.tfm);
+       cksum.len = crypto_shash_digestsize(tfm);
        cksum.data = kmalloc(cksum.len, GFP_KERNEL);
        if (cksum.data == NULL) {
                status = -ENOMEM;
                goto out;
        }
 
-       sg_init_one(&sg, clname->data, clname->len);
+       {
+               SHASH_DESC_ON_STACK(desc, tfm);
+
+               desc->tfm = tfm;
+               desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+
+               status = crypto_shash_digest(desc, clname->data, clname->len,
+                                            cksum.data);
+               shash_desc_zero(desc);
+       }
 
-       status = crypto_hash_digest(&desc, &sg, sg.length, cksum.data);
        if (status)
                goto out;
 
@@ -135,7 +141,7 @@ nfs4_make_rec_clidname(char *dname, const struct xdr_netobj *clname)
        status = 0;
 out:
        kfree(cksum.data);
-       crypto_free_hash(desc.tfm);
+       crypto_free_shash(tfm);
 out_no_tfm:
        return status;
 }
index c9fe145f7dd3bad3af8cd7902accefbc8c7b5366..eeafd21afb446ddf9e6607d86e5faea394ced07f 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/crypto.h>
 #include <linux/list.h>
 #include <linux/kernel.h>
+#include <linux/kthread.h>
 #include <linux/skbuff.h>
 
 struct crypto_aead;
@@ -128,6 +129,75 @@ struct ablkcipher_walk {
        unsigned int            blocksize;
 };
 
+#define ENGINE_NAME_LEN        30
+/*
+ * struct crypto_engine - crypto hardware engine
+ * @name: the engine name
+ * @idling: the engine is entering idle state
+ * @busy: request pump is busy
+ * @running: the engine is on working
+ * @cur_req_prepared: current request is prepared
+ * @list: link with the global crypto engine list
+ * @queue_lock: spinlock to syncronise access to request queue
+ * @queue: the crypto queue of the engine
+ * @rt: whether this queue is set to run as a realtime task
+ * @prepare_crypt_hardware: a request will soon arrive from the queue
+ * so the subsystem requests the driver to prepare the hardware
+ * by issuing this call
+ * @unprepare_crypt_hardware: there are currently no more requests on the
+ * queue so the subsystem notifies the driver that it may relax the
+ * hardware by issuing this call
+ * @prepare_request: do some prepare if need before handle the current request
+ * @unprepare_request: undo any work done by prepare_message()
+ * @crypt_one_request: do encryption for current request
+ * @kworker: thread struct for request pump
+ * @kworker_task: pointer to task for request pump kworker thread
+ * @pump_requests: work struct for scheduling work to the request pump
+ * @priv_data: the engine private data
+ * @cur_req: the current request which is on processing
+ */
+struct crypto_engine {
+       char                    name[ENGINE_NAME_LEN];
+       bool                    idling;
+       bool                    busy;
+       bool                    running;
+       bool                    cur_req_prepared;
+
+       struct list_head        list;
+       spinlock_t              queue_lock;
+       struct crypto_queue     queue;
+
+       bool                    rt;
+
+       int (*prepare_crypt_hardware)(struct crypto_engine *engine);
+       int (*unprepare_crypt_hardware)(struct crypto_engine *engine);
+
+       int (*prepare_request)(struct crypto_engine *engine,
+                              struct ablkcipher_request *req);
+       int (*unprepare_request)(struct crypto_engine *engine,
+                                struct ablkcipher_request *req);
+       int (*crypt_one_request)(struct crypto_engine *engine,
+                                struct ablkcipher_request *req);
+
+       struct kthread_worker           kworker;
+       struct task_struct              *kworker_task;
+       struct kthread_work             pump_requests;
+
+       void                            *priv_data;
+       struct ablkcipher_request       *cur_req;
+};
+
+int crypto_transfer_request(struct crypto_engine *engine,
+                           struct ablkcipher_request *req, bool need_pump);
+int crypto_transfer_request_to_engine(struct crypto_engine *engine,
+                                     struct ablkcipher_request *req);
+void crypto_finalize_request(struct crypto_engine *engine,
+                            struct ablkcipher_request *req, int err);
+int crypto_engine_start(struct crypto_engine *engine);
+int crypto_engine_stop(struct crypto_engine *engine);
+struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt);
+int crypto_engine_exit(struct crypto_engine *engine);
+
 extern const struct crypto_type crypto_ablkcipher_type;
 extern const struct crypto_type crypto_blkcipher_type;
 
@@ -184,6 +254,10 @@ int crypto_enqueue_request(struct crypto_queue *queue,
                           struct crypto_async_request *request);
 struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue);
 int crypto_tfm_in_queue(struct crypto_queue *queue, struct crypto_tfm *tfm);
+static inline unsigned int crypto_queue_len(struct crypto_queue *queue)
+{
+       return queue->qlen;
+}
 
 /* These functions require the input/output to be aligned as u32. */
 void crypto_inc(u8 *a, unsigned int size);
@@ -275,24 +349,6 @@ static inline struct cipher_alg *crypto_cipher_alg(struct crypto_cipher *tfm)
        return &crypto_cipher_tfm(tfm)->__crt_alg->cra_cipher;
 }
 
-static inline struct crypto_hash *crypto_spawn_hash(struct crypto_spawn *spawn)
-{
-       u32 type = CRYPTO_ALG_TYPE_HASH;
-       u32 mask = CRYPTO_ALG_TYPE_HASH_MASK;
-
-       return __crypto_hash_cast(crypto_spawn_tfm(spawn, type, mask));
-}
-
-static inline void *crypto_hash_ctx(struct crypto_hash *tfm)
-{
-       return crypto_tfm_ctx(&tfm->base);
-}
-
-static inline void *crypto_hash_ctx_aligned(struct crypto_hash *tfm)
-{
-       return crypto_tfm_ctx_aligned(&tfm->base);
-}
-
 static inline void blkcipher_walk_init(struct blkcipher_walk *walk,
                                       struct scatterlist *dst,
                                       struct scatterlist *src,
diff --git a/include/crypto/compress.h b/include/crypto/compress.h
deleted file mode 100644 (file)
index 5b67af8..0000000
+++ /dev/null
@@ -1,145 +0,0 @@
-/*
- * Compress: Compression algorithms under the cryptographic API.
- *
- * Copyright 2008 Sony Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.
- * If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef _CRYPTO_COMPRESS_H
-#define _CRYPTO_COMPRESS_H
-
-#include <linux/crypto.h>
-
-
-struct comp_request {
-       const void *next_in;            /* next input byte */
-       void *next_out;                 /* next output byte */
-       unsigned int avail_in;          /* bytes available at next_in */
-       unsigned int avail_out;         /* bytes available at next_out */
-};
-
-enum zlib_comp_params {
-       ZLIB_COMP_LEVEL = 1,            /* e.g. Z_DEFAULT_COMPRESSION */
-       ZLIB_COMP_METHOD,               /* e.g. Z_DEFLATED */
-       ZLIB_COMP_WINDOWBITS,           /* e.g. MAX_WBITS */
-       ZLIB_COMP_MEMLEVEL,             /* e.g. DEF_MEM_LEVEL */
-       ZLIB_COMP_STRATEGY,             /* e.g. Z_DEFAULT_STRATEGY */
-       __ZLIB_COMP_MAX,
-};
-
-#define ZLIB_COMP_MAX  (__ZLIB_COMP_MAX - 1)
-
-
-enum zlib_decomp_params {
-       ZLIB_DECOMP_WINDOWBITS = 1,     /* e.g. DEF_WBITS */
-       __ZLIB_DECOMP_MAX,
-};
-
-#define ZLIB_DECOMP_MAX        (__ZLIB_DECOMP_MAX - 1)
-
-
-struct crypto_pcomp {
-       struct crypto_tfm base;
-};
-
-struct pcomp_alg {
-       int (*compress_setup)(struct crypto_pcomp *tfm, const void *params,
-                             unsigned int len);
-       int (*compress_init)(struct crypto_pcomp *tfm);
-       int (*compress_update)(struct crypto_pcomp *tfm,
-                              struct comp_request *req);
-       int (*compress_final)(struct crypto_pcomp *tfm,
-                             struct comp_request *req);
-       int (*decompress_setup)(struct crypto_pcomp *tfm, const void *params,
-                               unsigned int len);
-       int (*decompress_init)(struct crypto_pcomp *tfm);
-       int (*decompress_update)(struct crypto_pcomp *tfm,
-                                struct comp_request *req);
-       int (*decompress_final)(struct crypto_pcomp *tfm,
-                               struct comp_request *req);
-
-       struct crypto_alg base;
-};
-
-extern struct crypto_pcomp *crypto_alloc_pcomp(const char *alg_name, u32 type,
-                                              u32 mask);
-
-static inline struct crypto_tfm *crypto_pcomp_tfm(struct crypto_pcomp *tfm)
-{
-       return &tfm->base;
-}
-
-static inline void crypto_free_pcomp(struct crypto_pcomp *tfm)
-{
-       crypto_destroy_tfm(tfm, crypto_pcomp_tfm(tfm));
-}
-
-static inline struct pcomp_alg *__crypto_pcomp_alg(struct crypto_alg *alg)
-{
-       return container_of(alg, struct pcomp_alg, base);
-}
-
-static inline struct pcomp_alg *crypto_pcomp_alg(struct crypto_pcomp *tfm)
-{
-       return __crypto_pcomp_alg(crypto_pcomp_tfm(tfm)->__crt_alg);
-}
-
-static inline int crypto_compress_setup(struct crypto_pcomp *tfm,
-                                       const void *params, unsigned int len)
-{
-       return crypto_pcomp_alg(tfm)->compress_setup(tfm, params, len);
-}
-
-static inline int crypto_compress_init(struct crypto_pcomp *tfm)
-{
-       return crypto_pcomp_alg(tfm)->compress_init(tfm);
-}
-
-static inline int crypto_compress_update(struct crypto_pcomp *tfm,
-                                        struct comp_request *req)
-{
-       return crypto_pcomp_alg(tfm)->compress_update(tfm, req);
-}
-
-static inline int crypto_compress_final(struct crypto_pcomp *tfm,
-                                       struct comp_request *req)
-{
-       return crypto_pcomp_alg(tfm)->compress_final(tfm, req);
-}
-
-static inline int crypto_decompress_setup(struct crypto_pcomp *tfm,
-                                         const void *params, unsigned int len)
-{
-       return crypto_pcomp_alg(tfm)->decompress_setup(tfm, params, len);
-}
-
-static inline int crypto_decompress_init(struct crypto_pcomp *tfm)
-{
-       return crypto_pcomp_alg(tfm)->decompress_init(tfm);
-}
-
-static inline int crypto_decompress_update(struct crypto_pcomp *tfm,
-                                          struct comp_request *req)
-{
-       return crypto_pcomp_alg(tfm)->decompress_update(tfm, req);
-}
-
-static inline int crypto_decompress_final(struct crypto_pcomp *tfm,
-                                         struct comp_request *req)
-{
-       return crypto_pcomp_alg(tfm)->decompress_final(tfm, req);
-}
-
-#endif /* _CRYPTO_COMPRESS_H */
index 9756c70899d8195eb54f7349efb911e8604d6222..d961b2b16f5593e695c353aa055595145fba8413 100644 (file)
@@ -117,10 +117,6 @@ struct drbg_state {
        void *priv_data;        /* Cipher handle */
        bool seeded;            /* DRBG fully seeded? */
        bool pr;                /* Prediction resistance enabled? */
-#ifdef CONFIG_CRYPTO_FIPS
-       bool fips_primed;       /* Continuous test primed? */
-       unsigned char *prev;    /* FIPS 140-2 continuous test value */
-#endif
        struct work_struct seed_work;   /* asynchronous seeding support */
        struct crypto_rng *jent;
        const struct drbg_state_ops *d_ops;
index 6361892ea737137899c5d9489634446cb3bdedac..1969f1416658babc28dd6d2a75363b93da36ac63 100644 (file)
@@ -14,6 +14,7 @@
 #define _CRYPTO_HASH_H
 
 #include <linux/crypto.h>
+#include <linux/string.h>
 
 struct crypto_ahash;
 
@@ -259,6 +260,28 @@ static inline void crypto_free_ahash(struct crypto_ahash *tfm)
        crypto_destroy_tfm(tfm, crypto_ahash_tfm(tfm));
 }
 
+/**
+ * crypto_has_ahash() - Search for the availability of an ahash.
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ *           ahash
+ * @type: specifies the type of the ahash
+ * @mask: specifies the mask for the ahash
+ *
+ * Return: true when the ahash is known to the kernel crypto API; false
+ *        otherwise
+ */
+int crypto_has_ahash(const char *alg_name, u32 type, u32 mask);
+
+static inline const char *crypto_ahash_alg_name(struct crypto_ahash *tfm)
+{
+       return crypto_tfm_alg_name(crypto_ahash_tfm(tfm));
+}
+
+static inline const char *crypto_ahash_driver_name(struct crypto_ahash *tfm)
+{
+       return crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm));
+}
+
 static inline unsigned int crypto_ahash_alignmask(
        struct crypto_ahash *tfm)
 {
@@ -550,6 +573,12 @@ static inline void ahash_request_free(struct ahash_request *req)
        kzfree(req);
 }
 
+static inline void ahash_request_zero(struct ahash_request *req)
+{
+       memzero_explicit(req, sizeof(*req) +
+                             crypto_ahash_reqsize(crypto_ahash_reqtfm(req)));
+}
+
 static inline struct ahash_request *ahash_request_cast(
        struct crypto_async_request *req)
 {
@@ -657,6 +686,16 @@ static inline void crypto_free_shash(struct crypto_shash *tfm)
        crypto_destroy_tfm(tfm, crypto_shash_tfm(tfm));
 }
 
+static inline const char *crypto_shash_alg_name(struct crypto_shash *tfm)
+{
+       return crypto_tfm_alg_name(crypto_shash_tfm(tfm));
+}
+
+static inline const char *crypto_shash_driver_name(struct crypto_shash *tfm)
+{
+       return crypto_tfm_alg_driver_name(crypto_shash_tfm(tfm));
+}
+
 static inline unsigned int crypto_shash_alignmask(
        struct crypto_shash *tfm)
 {
@@ -872,4 +911,10 @@ int crypto_shash_final(struct shash_desc *desc, u8 *out);
 int crypto_shash_finup(struct shash_desc *desc, const u8 *data,
                       unsigned int len, u8 *out);
 
+static inline void shash_desc_zero(struct shash_desc *desc)
+{
+       memzero_explicit(desc,
+                        sizeof(*desc) + crypto_shash_descsize(desc->tfm));
+}
+
 #endif /* _CRYPTO_HASH_H */
index 5554cdd8d6c17344f049f138e57027defc050dc1..da3864991d4c50b33a4145c57444d8a70a0db14a 100644 (file)
@@ -80,6 +80,12 @@ static inline u32 aead_request_flags(struct aead_request *req)
        return req->base.flags;
 }
 
+static inline struct aead_request *aead_request_cast(
+       struct crypto_async_request *req)
+{
+       return container_of(req, struct aead_request, base);
+}
+
 static inline void crypto_set_aead_spawn(
        struct crypto_aead_spawn *spawn, struct crypto_instance *inst)
 {
diff --git a/include/crypto/internal/compress.h b/include/crypto/internal/compress.h
deleted file mode 100644 (file)
index 178a888..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Compress: Compression algorithms under the cryptographic API.
- *
- * Copyright 2008 Sony Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.
- * If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef _CRYPTO_INTERNAL_COMPRESS_H
-#define _CRYPTO_INTERNAL_COMPRESS_H
-
-#include <crypto/compress.h>
-
-extern int crypto_register_pcomp(struct pcomp_alg *alg);
-extern int crypto_unregister_pcomp(struct pcomp_alg *alg);
-
-#endif /* _CRYPTO_INTERNAL_COMPRESS_H */
index 3b4af1d7c7e91ce7482814955ed449b4e6954253..49dae16f8929deec7e34adb3f66ef6b04767863d 100644 (file)
@@ -57,9 +57,6 @@ int crypto_hash_walk_first(struct ahash_request *req,
                           struct crypto_hash_walk *walk);
 int crypto_ahash_walk_first(struct ahash_request *req,
                           struct crypto_hash_walk *walk);
-int crypto_hash_walk_first_compat(struct hash_desc *hdesc,
-                                 struct crypto_hash_walk *walk,
-                                 struct scatterlist *sg, unsigned int len);
 
 static inline int crypto_ahash_walk_done(struct crypto_hash_walk *walk,
                                         int err)
index fd8742a40ff3e93476ec3b8cdb1370200275b6d3..905490c1da89b30ab1cf05b7a4aeaf1962e4e6ce 100644 (file)
@@ -60,8 +60,7 @@ struct crypto_skcipher {
 
        unsigned int ivsize;
        unsigned int reqsize;
-
-       bool has_setkey;
+       unsigned int keysize;
 
        struct crypto_tfm base;
 };
@@ -232,6 +231,12 @@ static inline int crypto_has_skcipher(const char *alg_name, u32 type,
                              crypto_skcipher_mask(mask));
 }
 
+static inline const char *crypto_skcipher_driver_name(
+       struct crypto_skcipher *tfm)
+{
+       return crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm));
+}
+
 /**
  * crypto_skcipher_ivsize() - obtain IV size
  * @tfm: cipher handle
@@ -309,7 +314,13 @@ static inline int crypto_skcipher_setkey(struct crypto_skcipher *tfm,
 
 static inline bool crypto_skcipher_has_setkey(struct crypto_skcipher *tfm)
 {
-       return tfm->has_setkey;
+       return tfm->keysize;
+}
+
+static inline unsigned int crypto_skcipher_default_keysize(
+       struct crypto_skcipher *tfm)
+{
+       return tfm->keysize;
 }
 
 /**
@@ -440,6 +451,13 @@ static inline void skcipher_request_free(struct skcipher_request *req)
        kzfree(req);
 }
 
+static inline void skcipher_request_zero(struct skcipher_request *req)
+{
+       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+
+       memzero_explicit(req, sizeof(*req) + crypto_skcipher_reqsize(tfm));
+}
+
 /**
  * skcipher_request_set_callback() - set asynchronous callback function
  * @req: request handle
index e71cb70a1ac2f6eb1faf9023671f469574786c60..99c94899ad0fef73d8c7854c247e5cd109ebe241 100644 (file)
@@ -54,7 +54,6 @@
 #define CRYPTO_ALG_TYPE_AHASH          0x0000000a
 #define CRYPTO_ALG_TYPE_RNG            0x0000000c
 #define CRYPTO_ALG_TYPE_AKCIPHER       0x0000000d
-#define CRYPTO_ALG_TYPE_PCOMPRESS      0x0000000f
 
 #define CRYPTO_ALG_TYPE_HASH_MASK      0x0000000e
 #define CRYPTO_ALG_TYPE_AHASH_MASK     0x0000000c
@@ -137,7 +136,6 @@ struct scatterlist;
 struct crypto_ablkcipher;
 struct crypto_async_request;
 struct crypto_blkcipher;
-struct crypto_hash;
 struct crypto_tfm;
 struct crypto_type;
 struct skcipher_givcrypt_request;
@@ -187,11 +185,6 @@ struct cipher_desc {
        void *info;
 };
 
-struct hash_desc {
-       struct crypto_hash *tfm;
-       u32 flags;
-};
-
 /**
  * DOC: Block Cipher Algorithm Definitions
  *
@@ -519,18 +512,6 @@ struct cipher_tfm {
        void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
 };
 
-struct hash_tfm {
-       int (*init)(struct hash_desc *desc);
-       int (*update)(struct hash_desc *desc,
-                     struct scatterlist *sg, unsigned int nsg);
-       int (*final)(struct hash_desc *desc, u8 *out);
-       int (*digest)(struct hash_desc *desc, struct scatterlist *sg,
-                     unsigned int nsg, u8 *out);
-       int (*setkey)(struct crypto_hash *tfm, const u8 *key,
-                     unsigned int keylen);
-       unsigned int digestsize;
-};
-
 struct compress_tfm {
        int (*cot_compress)(struct crypto_tfm *tfm,
                            const u8 *src, unsigned int slen,
@@ -543,7 +524,6 @@ struct compress_tfm {
 #define crt_ablkcipher crt_u.ablkcipher
 #define crt_blkcipher  crt_u.blkcipher
 #define crt_cipher     crt_u.cipher
-#define crt_hash       crt_u.hash
 #define crt_compress   crt_u.compress
 
 struct crypto_tfm {
@@ -554,7 +534,6 @@ struct crypto_tfm {
                struct ablkcipher_tfm ablkcipher;
                struct blkcipher_tfm blkcipher;
                struct cipher_tfm cipher;
-               struct hash_tfm hash;
                struct compress_tfm compress;
        } crt_u;
 
@@ -581,10 +560,6 @@ struct crypto_comp {
        struct crypto_tfm base;
 };
 
-struct crypto_hash {
-       struct crypto_tfm base;
-};
-
 enum {
        CRYPTOA_UNSPEC,
        CRYPTOA_ALG,
@@ -1577,233 +1552,6 @@ static inline void crypto_cipher_decrypt_one(struct crypto_cipher *tfm,
                                                dst, src);
 }
 
-/**
- * DOC: Synchronous Message Digest API
- *
- * The synchronous message digest API is used with the ciphers of type
- * CRYPTO_ALG_TYPE_HASH (listed as type "hash" in /proc/crypto)
- */
-
-static inline struct crypto_hash *__crypto_hash_cast(struct crypto_tfm *tfm)
-{
-       return (struct crypto_hash *)tfm;
-}
-
-static inline struct crypto_hash *crypto_hash_cast(struct crypto_tfm *tfm)
-{
-       BUG_ON((crypto_tfm_alg_type(tfm) ^ CRYPTO_ALG_TYPE_HASH) &
-              CRYPTO_ALG_TYPE_HASH_MASK);
-       return __crypto_hash_cast(tfm);
-}
-
-/**
- * crypto_alloc_hash() - allocate synchronous message digest handle
- * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
- *           message digest cipher
- * @type: specifies the type of the cipher
- * @mask: specifies the mask for the cipher
- *
- * Allocate a cipher handle for a message digest. The returned struct
- * crypto_hash is the cipher handle that is required for any subsequent
- * API invocation for that message digest.
- *
- * Return: allocated cipher handle in case of success; IS_ERR() is true in case
- * of an error, PTR_ERR() returns the error code.
- */
-static inline struct crypto_hash *crypto_alloc_hash(const char *alg_name,
-                                                   u32 type, u32 mask)
-{
-       type &= ~CRYPTO_ALG_TYPE_MASK;
-       mask &= ~CRYPTO_ALG_TYPE_MASK;
-       type |= CRYPTO_ALG_TYPE_HASH;
-       mask |= CRYPTO_ALG_TYPE_HASH_MASK;
-
-       return __crypto_hash_cast(crypto_alloc_base(alg_name, type, mask));
-}
-
-static inline struct crypto_tfm *crypto_hash_tfm(struct crypto_hash *tfm)
-{
-       return &tfm->base;
-}
-
-/**
- * crypto_free_hash() - zeroize and free message digest handle
- * @tfm: cipher handle to be freed
- */
-static inline void crypto_free_hash(struct crypto_hash *tfm)
-{
-       crypto_free_tfm(crypto_hash_tfm(tfm));
-}
-
-/**
- * crypto_has_hash() - Search for the availability of a message digest
- * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
- *           message digest cipher
- * @type: specifies the type of the cipher
- * @mask: specifies the mask for the cipher
- *
- * Return: true when the message digest cipher is known to the kernel crypto
- *        API; false otherwise
- */
-static inline int crypto_has_hash(const char *alg_name, u32 type, u32 mask)
-{
-       type &= ~CRYPTO_ALG_TYPE_MASK;
-       mask &= ~CRYPTO_ALG_TYPE_MASK;
-       type |= CRYPTO_ALG_TYPE_HASH;
-       mask |= CRYPTO_ALG_TYPE_HASH_MASK;
-
-       return crypto_has_alg(alg_name, type, mask);
-}
-
-static inline struct hash_tfm *crypto_hash_crt(struct crypto_hash *tfm)
-{
-       return &crypto_hash_tfm(tfm)->crt_hash;
-}
-
-/**
- * crypto_hash_blocksize() - obtain block size for message digest
- * @tfm: cipher handle
- *
- * The block size for the message digest cipher referenced with the cipher
- * handle is returned.
- *
- * Return: block size of cipher
- */
-static inline unsigned int crypto_hash_blocksize(struct crypto_hash *tfm)
-{
-       return crypto_tfm_alg_blocksize(crypto_hash_tfm(tfm));
-}
-
-static inline unsigned int crypto_hash_alignmask(struct crypto_hash *tfm)
-{
-       return crypto_tfm_alg_alignmask(crypto_hash_tfm(tfm));
-}
-
-/**
- * crypto_hash_digestsize() - obtain message digest size
- * @tfm: cipher handle
- *
- * The size for the message digest created by the message digest cipher
- * referenced with the cipher handle is returned.
- *
- * Return: message digest size
- */
-static inline unsigned int crypto_hash_digestsize(struct crypto_hash *tfm)
-{
-       return crypto_hash_crt(tfm)->digestsize;
-}
-
-static inline u32 crypto_hash_get_flags(struct crypto_hash *tfm)
-{
-       return crypto_tfm_get_flags(crypto_hash_tfm(tfm));
-}
-
-static inline void crypto_hash_set_flags(struct crypto_hash *tfm, u32 flags)
-{
-       crypto_tfm_set_flags(crypto_hash_tfm(tfm), flags);
-}
-
-static inline void crypto_hash_clear_flags(struct crypto_hash *tfm, u32 flags)
-{
-       crypto_tfm_clear_flags(crypto_hash_tfm(tfm), flags);
-}
-
-/**
- * crypto_hash_init() - (re)initialize message digest handle
- * @desc: cipher request handle that to be filled by caller --
- *       desc.tfm is filled with the hash cipher handle;
- *       desc.flags is filled with either CRYPTO_TFM_REQ_MAY_SLEEP or 0.
- *
- * The call (re-)initializes the message digest referenced by the hash cipher
- * request handle. Any potentially existing state created by previous
- * operations is discarded.
- *
- * Return: 0 if the message digest initialization was successful; < 0 if an
- *        error occurred
- */
-static inline int crypto_hash_init(struct hash_desc *desc)
-{
-       return crypto_hash_crt(desc->tfm)->init(desc);
-}
-
-/**
- * crypto_hash_update() - add data to message digest for processing
- * @desc: cipher request handle
- * @sg: scatter / gather list pointing to the data to be added to the message
- *      digest
- * @nbytes: number of bytes to be processed from @sg
- *
- * Updates the message digest state of the cipher handle pointed to by the
- * hash cipher request handle with the input data pointed to by the
- * scatter/gather list.
- *
- * Return: 0 if the message digest update was successful; < 0 if an error
- *        occurred
- */
-static inline int crypto_hash_update(struct hash_desc *desc,
-                                    struct scatterlist *sg,
-                                    unsigned int nbytes)
-{
-       return crypto_hash_crt(desc->tfm)->update(desc, sg, nbytes);
-}
-
-/**
- * crypto_hash_final() - calculate message digest
- * @desc: cipher request handle
- * @out: message digest output buffer -- The caller must ensure that the out
- *      buffer has a sufficient size (e.g. by using the crypto_hash_digestsize
- *      function).
- *
- * Finalize the message digest operation and create the message digest
- * based on all data added to the cipher handle. The message digest is placed
- * into the output buffer.
- *
- * Return: 0 if the message digest creation was successful; < 0 if an error
- *        occurred
- */
-static inline int crypto_hash_final(struct hash_desc *desc, u8 *out)
-{
-       return crypto_hash_crt(desc->tfm)->final(desc, out);
-}
-
-/**
- * crypto_hash_digest() - calculate message digest for a buffer
- * @desc: see crypto_hash_final()
- * @sg: see crypto_hash_update()
- * @nbytes:  see crypto_hash_update()
- * @out: see crypto_hash_final()
- *
- * This function is a "short-hand" for the function calls of crypto_hash_init,
- * crypto_hash_update and crypto_hash_final. The parameters have the same
- * meaning as discussed for those separate three functions.
- *
- * Return: 0 if the message digest creation was successful; < 0 if an error
- *        occurred
- */
-static inline int crypto_hash_digest(struct hash_desc *desc,
-                                    struct scatterlist *sg,
-                                    unsigned int nbytes, u8 *out)
-{
-       return crypto_hash_crt(desc->tfm)->digest(desc, sg, nbytes, out);
-}
-
-/**
- * crypto_hash_setkey() - set key for message digest
- * @hash: cipher handle
- * @key: buffer holding the key
- * @keylen: length of the key in bytes
- *
- * The caller provided key is set for the message digest cipher. The cipher
- * handle must point to a keyed hash in order for this function to succeed.
- *
- * Return: 0 if the setting of the key was successful; < 0 if an error occurred
- */
-static inline int crypto_hash_setkey(struct crypto_hash *hash,
-                                    const u8 *key, unsigned int keylen)
-{
-       return crypto_hash_crt(hash)->setkey(hash, key, keylen);
-}
-
 static inline struct crypto_comp *__crypto_comp_cast(struct crypto_tfm *tfm)
 {
        return (struct crypto_comp *)tfm;
index df02a41884874f68dfb2aded42a2d38658eaff06..7df625d41e35892b5c785faaaa744aa593f0af33 100644 (file)
@@ -36,7 +36,7 @@
  *
  */
 
-#include <linux/crypto.h>
+#include <crypto/skcipher.h>
 #include <linux/sunrpc/auth_gss.h>
 #include <linux/sunrpc/gss_err.h>
 #include <linux/sunrpc/gss_asn1.h>
@@ -71,10 +71,10 @@ struct gss_krb5_enctype {
        const u32               keyed_cksum;    /* is it a keyed cksum? */
        const u32               keybytes;       /* raw key len, in bytes */
        const u32               keylength;      /* final key len, in bytes */
-       u32 (*encrypt) (struct crypto_blkcipher *tfm,
+       u32 (*encrypt) (struct crypto_skcipher *tfm,
                        void *iv, void *in, void *out,
                        int length);            /* encryption function */
-       u32 (*decrypt) (struct crypto_blkcipher *tfm,
+       u32 (*decrypt) (struct crypto_skcipher *tfm,
                        void *iv, void *in, void *out,
                        int length);            /* decryption function */
        u32 (*mk_key) (const struct gss_krb5_enctype *gk5e,
@@ -98,12 +98,12 @@ struct krb5_ctx {
        u32                     enctype;
        u32                     flags;
        const struct gss_krb5_enctype *gk5e; /* enctype-specific info */
-       struct crypto_blkcipher *enc;
-       struct crypto_blkcipher *seq;
-       struct crypto_blkcipher *acceptor_enc;
-       struct crypto_blkcipher *initiator_enc;
-       struct crypto_blkcipher *acceptor_enc_aux;
-       struct crypto_blkcipher *initiator_enc_aux;
+       struct crypto_skcipher  *enc;
+       struct crypto_skcipher  *seq;
+       struct crypto_skcipher *acceptor_enc;
+       struct crypto_skcipher *initiator_enc;
+       struct crypto_skcipher *acceptor_enc_aux;
+       struct crypto_skcipher *initiator_enc_aux;
        u8                      Ksess[GSS_KRB5_MAX_KEYLEN]; /* session key */
        u8                      cksum[GSS_KRB5_MAX_KEYLEN];
        s32                     endtime;
@@ -262,24 +262,24 @@ gss_unwrap_kerberos(struct gss_ctx *ctx_id, int offset,
 
 
 u32
-krb5_encrypt(struct crypto_blkcipher *key,
+krb5_encrypt(struct crypto_skcipher *key,
             void *iv, void *in, void *out, int length);
 
 u32
-krb5_decrypt(struct crypto_blkcipher *key,
+krb5_decrypt(struct crypto_skcipher *key,
             void *iv, void *in, void *out, int length); 
 
 int
-gss_encrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *outbuf,
+gss_encrypt_xdr_buf(struct crypto_skcipher *tfm, struct xdr_buf *outbuf,
                    int offset, struct page **pages);
 
 int
-gss_decrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *inbuf,
+gss_decrypt_xdr_buf(struct crypto_skcipher *tfm, struct xdr_buf *inbuf,
                    int offset);
 
 s32
 krb5_make_seq_num(struct krb5_ctx *kctx,
-               struct crypto_blkcipher *key,
+               struct crypto_skcipher *key,
                int direction,
                u32 seqnum, unsigned char *cksum, unsigned char *buf);
 
@@ -320,12 +320,12 @@ gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset,
 
 int
 krb5_rc4_setup_seq_key(struct krb5_ctx *kctx,
-                      struct crypto_blkcipher *cipher,
+                      struct crypto_skcipher *cipher,
                       unsigned char *cksum);
 
 int
 krb5_rc4_setup_enc_key(struct krb5_ctx *kctx,
-                      struct crypto_blkcipher *cipher,
+                      struct crypto_skcipher *cipher,
                       s32 seqnum);
 void
 gss_krb5_make_confounder(char *p, u32 conflen);
index f2d58aa37a6fef8438d0ea0ea1ebce870a2a31bb..9b9fb122b31f6b78884a0392081a3afceb49337d 100644 (file)
 #define __sctp_auth_h__
 
 #include <linux/list.h>
-#include <linux/crypto.h>
 
 struct sctp_endpoint;
 struct sctp_association;
 struct sctp_authkey;
 struct sctp_hmacalgo;
+struct crypto_shash;
 
 /*
  * Define a generic struct that will hold all the info
@@ -90,7 +90,7 @@ int sctp_auth_asoc_copy_shkeys(const struct sctp_endpoint *ep,
                                struct sctp_association *asoc,
                                gfp_t gfp);
 int sctp_auth_init_hmacs(struct sctp_endpoint *ep, gfp_t gfp);
-void sctp_auth_destroy_hmacs(struct crypto_hash *auth_hmacs[]);
+void sctp_auth_destroy_hmacs(struct crypto_shash *auth_hmacs[]);
 struct sctp_hmac *sctp_auth_get_hmac(__u16 hmac_id);
 struct sctp_hmac *sctp_auth_asoc_get_hmac(const struct sctp_association *asoc);
 void sctp_auth_asoc_set_default_hmac(struct sctp_association *asoc,
index 205630bb5010b8ac76b84651b302e488fc1c76ff..5a57409da37bba7aaa652ec860044db4061ba4a4 100644 (file)
@@ -82,7 +82,7 @@ struct sctp_bind_addr;
 struct sctp_ulpq;
 struct sctp_ep_common;
 struct sctp_ssnmap;
-struct crypto_hash;
+struct crypto_shash;
 
 
 #include <net/sctp/tsnmap.h>
@@ -166,7 +166,7 @@ struct sctp_sock {
        struct sctp_pf *pf;
 
        /* Access to HMAC transform. */
-       struct crypto_hash *hmac;
+       struct crypto_shash *hmac;
        char *sctp_hmac_alg;
 
        /* What is our base endpointer? */
@@ -1234,7 +1234,7 @@ struct sctp_endpoint {
        /* SCTP AUTH: array of the HMACs that will be allocated
         * we need this per association so that we don't serialize
         */
-       struct crypto_hash **auth_hmacs;
+       struct crypto_shash **auth_hmacs;
 
        /* SCTP-AUTH: hmacs for the endpoint encoded into parameter */
         struct sctp_hmac_algo_param *auth_hmacs_list;
index e90db854680602414d9db5e1e93ee010adb552e4..cb8d30c20ef3efa694540d0bf92fd3e04369b34f 100644 (file)
@@ -27,7 +27,6 @@
 #include <linux/cache.h>
 #include <linux/percpu.h>
 #include <linux/skbuff.h>
-#include <linux/crypto.h>
 #include <linux/cryptohash.h>
 #include <linux/kref.h>
 #include <linux/ktime.h>
@@ -1320,9 +1319,6 @@ static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp)
        tp->retransmit_skb_hint = NULL;
 }
 
-/* MD5 Signature */
-struct crypto_hash;
-
 union tcp_md5_addr {
        struct in_addr  a4;
 #if IS_ENABLED(CONFIG_IPV6)
@@ -1371,7 +1367,7 @@ union tcp_md5sum_block {
 
 /* - pool: digest algorithm, hash description and scratch buffer */
 struct tcp_md5sig_pool {
-       struct hash_desc        md5_desc;
+       struct ahash_request    *md5_req;
        union tcp_md5sum_block  md5_blk;
 };
 
index 2a7aa75dd00926f6438197c5605553abcda8d4f3..30520d5ee3d14a576eb39ab852b1421d49a4c5b1 100644 (file)
@@ -26,7 +26,7 @@
 struct iscsi_tcp_conn;
 struct iscsi_segment;
 struct sk_buff;
-struct hash_desc;
+struct ahash_request;
 
 typedef int iscsi_segment_done_fn_t(struct iscsi_tcp_conn *,
                                    struct iscsi_segment *);
@@ -38,7 +38,7 @@ struct iscsi_segment {
        unsigned int            total_size;
        unsigned int            total_copied;
 
-       struct hash_desc        *hash;
+       struct ahash_request    *hash;
        unsigned char           padbuf[ISCSI_PAD_LEN];
        unsigned char           recv_digest[ISCSI_DIGEST_SIZE];
        unsigned char           digest[ISCSI_DIGEST_SIZE];
@@ -73,7 +73,7 @@ struct iscsi_tcp_conn {
        /* control data */
        struct iscsi_tcp_recv   in;             /* TCP receive context */
        /* CRC32C (Rx) LLD should set this is they do not offload */
-       struct hash_desc        *rx_hash;
+       struct ahash_request    *rx_hash;
 };
 
 struct iscsi_tcp_task {
@@ -111,15 +111,16 @@ extern void iscsi_tcp_segment_unmap(struct iscsi_segment *segment);
 extern void iscsi_segment_init_linear(struct iscsi_segment *segment,
                                      void *data, size_t size,
                                      iscsi_segment_done_fn_t *done,
-                                     struct hash_desc *hash);
+                                     struct ahash_request *hash);
 extern int
 iscsi_segment_seek_sg(struct iscsi_segment *segment,
                      struct scatterlist *sg_list, unsigned int sg_count,
                      unsigned int offset, size_t size,
-                     iscsi_segment_done_fn_t *done, struct hash_desc *hash);
+                     iscsi_segment_done_fn_t *done,
+                     struct ahash_request *hash);
 
 /* digest helpers */
-extern void iscsi_tcp_dgst_header(struct hash_desc *hash, const void *hdr,
+extern void iscsi_tcp_dgst_header(struct ahash_request *hash, const void *hdr,
                                  size_t hdrlen,
                                  unsigned char digest[ISCSI_DIGEST_SIZE]);
 extern struct iscsi_cls_conn *
index 373d3342002bfefdc9911f7b3f5ac57af1826f9b..c3371fa548cb9b3b30c563e89de65e9fce7fb585 100644 (file)
@@ -570,8 +570,8 @@ struct iscsi_conn {
        spinlock_t              response_queue_lock;
        spinlock_t              state_lock;
        /* libcrypto RX and TX contexts for crc32c */
-       struct hash_desc        conn_rx_hash;
-       struct hash_desc        conn_tx_hash;
+       struct ahash_request    *conn_rx_hash;
+       struct ahash_request    *conn_tx_hash;
        /* Used for scheduling TX and RX connection kthreads */
        cpumask_var_t           conn_cpumask;
        unsigned int            conn_rx_reset_cpumask:1;
index 4b175df35184b08b5695d966f66b129148c6c011..50976a6481f3cd21e33e5ee75d74dacc06470584 100644 (file)
 */
 
 #include <linux/debugfs.h>
-#include <linux/crypto.h>
 #include <linux/scatterlist.h>
 #include <crypto/b128ops.h>
+#include <crypto/hash.h>
+#include <crypto/skcipher.h>
 
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
@@ -87,8 +88,8 @@ struct smp_dev {
        u8                      min_key_size;
        u8                      max_key_size;
 
-       struct crypto_blkcipher *tfm_aes;
-       struct crypto_hash      *tfm_cmac;
+       struct crypto_skcipher  *tfm_aes;
+       struct crypto_shash     *tfm_cmac;
 };
 
 struct smp_chan {
@@ -126,8 +127,8 @@ struct smp_chan {
        u8                      dhkey[32];
        u8                      mackey[16];
 
-       struct crypto_blkcipher *tfm_aes;
-       struct crypto_hash      *tfm_cmac;
+       struct crypto_skcipher  *tfm_aes;
+       struct crypto_shash     *tfm_cmac;
 };
 
 /* These debug key values are defined in the SMP section of the core
@@ -165,12 +166,11 @@ static inline void swap_buf(const u8 *src, u8 *dst, size_t len)
  * AES-CMAC, f4, f5, f6, g2 and h6.
  */
 
-static int aes_cmac(struct crypto_hash *tfm, const u8 k[16], const u8 *m,
+static int aes_cmac(struct crypto_shash *tfm, const u8 k[16], const u8 *m,
                    size_t len, u8 mac[16])
 {
        uint8_t tmp[16], mac_msb[16], msg_msb[CMAC_MSG_MAX];
-       struct hash_desc desc;
-       struct scatterlist sg;
+       SHASH_DESC_ON_STACK(desc, tfm);
        int err;
 
        if (len > CMAC_MSG_MAX)
@@ -181,10 +181,8 @@ static int aes_cmac(struct crypto_hash *tfm, const u8 k[16], const u8 *m,
                return -EINVAL;
        }
 
-       desc.tfm = tfm;
-       desc.flags = 0;
-
-       crypto_hash_init(&desc);
+       desc->tfm = tfm;
+       desc->flags = 0;
 
        /* Swap key and message from LSB to MSB */
        swap_buf(k, tmp, 16);
@@ -193,23 +191,16 @@ static int aes_cmac(struct crypto_hash *tfm, const u8 k[16], const u8 *m,
        SMP_DBG("msg (len %zu) %*phN", len, (int) len, m);
        SMP_DBG("key %16phN", k);
 
-       err = crypto_hash_setkey(tfm, tmp, 16);
+       err = crypto_shash_setkey(tfm, tmp, 16);
        if (err) {
                BT_ERR("cipher setkey failed: %d", err);
                return err;
        }
 
-       sg_init_one(&sg, msg_msb, len);
-
-       err = crypto_hash_update(&desc, &sg, len);
+       err = crypto_shash_digest(desc, msg_msb, len, mac_msb);
+       shash_desc_zero(desc);
        if (err) {
-               BT_ERR("Hash update error %d", err);
-               return err;
-       }
-
-       err = crypto_hash_final(&desc, mac_msb);
-       if (err) {
-               BT_ERR("Hash final error %d", err);
+               BT_ERR("Hash computation error %d", err);
                return err;
        }
 
@@ -220,8 +211,8 @@ static int aes_cmac(struct crypto_hash *tfm, const u8 k[16], const u8 *m,
        return 0;
 }
 
-static int smp_f4(struct crypto_hash *tfm_cmac, const u8 u[32], const u8 v[32],
-                 const u8 x[16], u8 z, u8 res[16])
+static int smp_f4(struct crypto_shash *tfm_cmac, const u8 u[32],
+                 const u8 v[32], const u8 x[16], u8 z, u8 res[16])
 {
        u8 m[65];
        int err;
@@ -243,7 +234,7 @@ static int smp_f4(struct crypto_hash *tfm_cmac, const u8 u[32], const u8 v[32],
        return err;
 }
 
-static int smp_f5(struct crypto_hash *tfm_cmac, const u8 w[32],
+static int smp_f5(struct crypto_shash *tfm_cmac, const u8 w[32],
                  const u8 n1[16], const u8 n2[16], const u8 a1[7],
                  const u8 a2[7], u8 mackey[16], u8 ltk[16])
 {
@@ -296,7 +287,7 @@ static int smp_f5(struct crypto_hash *tfm_cmac, const u8 w[32],
        return 0;
 }
 
-static int smp_f6(struct crypto_hash *tfm_cmac, const u8 w[16],
+static int smp_f6(struct crypto_shash *tfm_cmac, const u8 w[16],
                  const u8 n1[16], const u8 n2[16], const u8 r[16],
                  const u8 io_cap[3], const u8 a1[7], const u8 a2[7],
                  u8 res[16])
@@ -324,7 +315,7 @@ static int smp_f6(struct crypto_hash *tfm_cmac, const u8 w[16],
        return err;
 }
 
-static int smp_g2(struct crypto_hash *tfm_cmac, const u8 u[32], const u8 v[32],
+static int smp_g2(struct crypto_shash *tfm_cmac, const u8 u[32], const u8 v[32],
                  const u8 x[16], const u8 y[16], u32 *val)
 {
        u8 m[80], tmp[16];
@@ -350,7 +341,7 @@ static int smp_g2(struct crypto_hash *tfm_cmac, const u8 u[32], const u8 v[32],
        return 0;
 }
 
-static int smp_h6(struct crypto_hash *tfm_cmac, const u8 w[16],
+static int smp_h6(struct crypto_shash *tfm_cmac, const u8 w[16],
                  const u8 key_id[4], u8 res[16])
 {
        int err;
@@ -370,9 +361,9 @@ static int smp_h6(struct crypto_hash *tfm_cmac, const u8 w[16],
  * s1 and ah.
  */
 
-static int smp_e(struct crypto_blkcipher *tfm, const u8 *k, u8 *r)
+static int smp_e(struct crypto_skcipher *tfm, const u8 *k, u8 *r)
 {
-       struct blkcipher_desc desc;
+       SKCIPHER_REQUEST_ON_STACK(req, tfm);
        struct scatterlist sg;
        uint8_t tmp[16], data[16];
        int err;
@@ -384,13 +375,10 @@ static int smp_e(struct crypto_blkcipher *tfm, const u8 *k, u8 *r)
                return -EINVAL;
        }
 
-       desc.tfm = tfm;
-       desc.flags = 0;
-
        /* The most significant octet of key corresponds to k[0] */
        swap_buf(k, tmp, 16);
 
-       err = crypto_blkcipher_setkey(tfm, tmp, 16);
+       err = crypto_skcipher_setkey(tfm, tmp, 16);
        if (err) {
                BT_ERR("cipher setkey failed: %d", err);
                return err;
@@ -401,7 +389,12 @@ static int smp_e(struct crypto_blkcipher *tfm, const u8 *k, u8 *r)
 
        sg_init_one(&sg, data, 16);
 
-       err = crypto_blkcipher_encrypt(&desc, &sg, &sg, 16);
+       skcipher_request_set_tfm(req, tfm);
+       skcipher_request_set_callback(req, 0, NULL, NULL);
+       skcipher_request_set_crypt(req, &sg, &sg, 16, NULL);
+
+       err = crypto_skcipher_encrypt(req);
+       skcipher_request_zero(req);
        if (err)
                BT_ERR("Encrypt data error %d", err);
 
@@ -413,7 +406,7 @@ static int smp_e(struct crypto_blkcipher *tfm, const u8 *k, u8 *r)
        return err;
 }
 
-static int smp_c1(struct crypto_blkcipher *tfm_aes, const u8 k[16],
+static int smp_c1(struct crypto_skcipher *tfm_aes, const u8 k[16],
                  const u8 r[16], const u8 preq[7], const u8 pres[7], u8 _iat,
                  const bdaddr_t *ia, u8 _rat, const bdaddr_t *ra, u8 res[16])
 {
@@ -462,7 +455,7 @@ static int smp_c1(struct crypto_blkcipher *tfm_aes, const u8 k[16],
        return err;
 }
 
-static int smp_s1(struct crypto_blkcipher *tfm_aes, const u8 k[16],
+static int smp_s1(struct crypto_skcipher *tfm_aes, const u8 k[16],
                  const u8 r1[16], const u8 r2[16], u8 _r[16])
 {
        int err;
@@ -478,7 +471,7 @@ static int smp_s1(struct crypto_blkcipher *tfm_aes, const u8 k[16],
        return err;
 }
 
-static int smp_ah(struct crypto_blkcipher *tfm, const u8 irk[16],
+static int smp_ah(struct crypto_skcipher *tfm, const u8 irk[16],
                  const u8 r[3], u8 res[3])
 {
        u8 _res[16];
@@ -766,8 +759,8 @@ static void smp_chan_destroy(struct l2cap_conn *conn)
        kzfree(smp->slave_csrk);
        kzfree(smp->link_key);
 
-       crypto_free_blkcipher(smp->tfm_aes);
-       crypto_free_hash(smp->tfm_cmac);
+       crypto_free_skcipher(smp->tfm_aes);
+       crypto_free_shash(smp->tfm_cmac);
 
        /* Ensure that we don't leave any debug key around if debug key
         * support hasn't been explicitly enabled.
@@ -1366,17 +1359,17 @@ static struct smp_chan *smp_chan_create(struct l2cap_conn *conn)
        if (!smp)
                return NULL;
 
-       smp->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0, CRYPTO_ALG_ASYNC);
+       smp->tfm_aes = crypto_alloc_skcipher("ecb(aes)", 0, CRYPTO_ALG_ASYNC);
        if (IS_ERR(smp->tfm_aes)) {
                BT_ERR("Unable to create ECB crypto context");
                kzfree(smp);
                return NULL;
        }
 
-       smp->tfm_cmac = crypto_alloc_hash("cmac(aes)", 0, CRYPTO_ALG_ASYNC);
+       smp->tfm_cmac = crypto_alloc_shash("cmac(aes)", 0, 0);
        if (IS_ERR(smp->tfm_cmac)) {
                BT_ERR("Unable to create CMAC crypto context");
-               crypto_free_blkcipher(smp->tfm_aes);
+               crypto_free_skcipher(smp->tfm_aes);
                kzfree(smp);
                return NULL;
        }
@@ -3127,8 +3120,8 @@ static struct l2cap_chan *smp_add_cid(struct hci_dev *hdev, u16 cid)
 {
        struct l2cap_chan *chan;
        struct smp_dev *smp;
-       struct crypto_blkcipher *tfm_aes;
-       struct crypto_hash *tfm_cmac;
+       struct crypto_skcipher *tfm_aes;
+       struct crypto_shash *tfm_cmac;
 
        if (cid == L2CAP_CID_SMP_BREDR) {
                smp = NULL;
@@ -3139,17 +3132,17 @@ static struct l2cap_chan *smp_add_cid(struct hci_dev *hdev, u16 cid)
        if (!smp)
                return ERR_PTR(-ENOMEM);
 
-       tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0, CRYPTO_ALG_ASYNC);
+       tfm_aes = crypto_alloc_skcipher("ecb(aes)", 0, CRYPTO_ALG_ASYNC);
        if (IS_ERR(tfm_aes)) {
                BT_ERR("Unable to create ECB crypto context");
                kzfree(smp);
                return ERR_CAST(tfm_aes);
        }
 
-       tfm_cmac = crypto_alloc_hash("cmac(aes)", 0, CRYPTO_ALG_ASYNC);
+       tfm_cmac = crypto_alloc_shash("cmac(aes)", 0, 0);
        if (IS_ERR(tfm_cmac)) {
                BT_ERR("Unable to create CMAC crypto context");
-               crypto_free_blkcipher(tfm_aes);
+               crypto_free_skcipher(tfm_aes);
                kzfree(smp);
                return ERR_CAST(tfm_cmac);
        }
@@ -3163,8 +3156,8 @@ create_chan:
        chan = l2cap_chan_create();
        if (!chan) {
                if (smp) {
-                       crypto_free_blkcipher(smp->tfm_aes);
-                       crypto_free_hash(smp->tfm_cmac);
+                       crypto_free_skcipher(smp->tfm_aes);
+                       crypto_free_shash(smp->tfm_cmac);
                        kzfree(smp);
                }
                return ERR_PTR(-ENOMEM);
@@ -3210,10 +3203,8 @@ static void smp_del_chan(struct l2cap_chan *chan)
        smp = chan->data;
        if (smp) {
                chan->data = NULL;
-               if (smp->tfm_aes)
-                       crypto_free_blkcipher(smp->tfm_aes);
-               if (smp->tfm_cmac)
-                       crypto_free_hash(smp->tfm_cmac);
+               crypto_free_skcipher(smp->tfm_aes);
+               crypto_free_shash(smp->tfm_cmac);
                kzfree(smp);
        }
 
@@ -3449,7 +3440,7 @@ void smp_unregister(struct hci_dev *hdev)
 
 #if IS_ENABLED(CONFIG_BT_SELFTEST_SMP)
 
-static int __init test_ah(struct crypto_blkcipher *tfm_aes)
+static int __init test_ah(struct crypto_skcipher *tfm_aes)
 {
        const u8 irk[16] = {
                        0x9b, 0x7d, 0x39, 0x0a, 0xa6, 0x10, 0x10, 0x34,
@@ -3469,7 +3460,7 @@ static int __init test_ah(struct crypto_blkcipher *tfm_aes)
        return 0;
 }
 
-static int __init test_c1(struct crypto_blkcipher *tfm_aes)
+static int __init test_c1(struct crypto_skcipher *tfm_aes)
 {
        const u8 k[16] = {
                        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -3499,7 +3490,7 @@ static int __init test_c1(struct crypto_blkcipher *tfm_aes)
        return 0;
 }
 
-static int __init test_s1(struct crypto_blkcipher *tfm_aes)
+static int __init test_s1(struct crypto_skcipher *tfm_aes)
 {
        const u8 k[16] = {
                        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -3524,7 +3515,7 @@ static int __init test_s1(struct crypto_blkcipher *tfm_aes)
        return 0;
 }
 
-static int __init test_f4(struct crypto_hash *tfm_cmac)
+static int __init test_f4(struct crypto_shash *tfm_cmac)
 {
        const u8 u[32] = {
                        0xe6, 0x9d, 0x35, 0x0e, 0x48, 0x01, 0x03, 0xcc,
@@ -3556,7 +3547,7 @@ static int __init test_f4(struct crypto_hash *tfm_cmac)
        return 0;
 }
 
-static int __init test_f5(struct crypto_hash *tfm_cmac)
+static int __init test_f5(struct crypto_shash *tfm_cmac)
 {
        const u8 w[32] = {
                        0x98, 0xa6, 0xbf, 0x73, 0xf3, 0x34, 0x8d, 0x86,
@@ -3593,7 +3584,7 @@ static int __init test_f5(struct crypto_hash *tfm_cmac)
        return 0;
 }
 
-static int __init test_f6(struct crypto_hash *tfm_cmac)
+static int __init test_f6(struct crypto_shash *tfm_cmac)
 {
        const u8 w[16] = {
                        0x20, 0x6e, 0x63, 0xce, 0x20, 0x6a, 0x3f, 0xfd,
@@ -3626,7 +3617,7 @@ static int __init test_f6(struct crypto_hash *tfm_cmac)
        return 0;
 }
 
-static int __init test_g2(struct crypto_hash *tfm_cmac)
+static int __init test_g2(struct crypto_shash *tfm_cmac)
 {
        const u8 u[32] = {
                        0xe6, 0x9d, 0x35, 0x0e, 0x48, 0x01, 0x03, 0xcc,
@@ -3658,7 +3649,7 @@ static int __init test_g2(struct crypto_hash *tfm_cmac)
        return 0;
 }
 
-static int __init test_h6(struct crypto_hash *tfm_cmac)
+static int __init test_h6(struct crypto_shash *tfm_cmac)
 {
        const u8 w[16] = {
                        0x9b, 0x7d, 0x39, 0x0a, 0xa6, 0x10, 0x10, 0x34,
@@ -3695,8 +3686,8 @@ static const struct file_operations test_smp_fops = {
        .llseek         = default_llseek,
 };
 
-static int __init run_selftests(struct crypto_blkcipher *tfm_aes,
-                               struct crypto_hash *tfm_cmac)
+static int __init run_selftests(struct crypto_skcipher *tfm_aes,
+                               struct crypto_shash *tfm_cmac)
 {
        ktime_t calltime, delta, rettime;
        unsigned long long duration;
@@ -3773,27 +3764,27 @@ done:
 
 int __init bt_selftest_smp(void)
 {
-       struct crypto_blkcipher *tfm_aes;
-       struct crypto_hash *tfm_cmac;
+       struct crypto_skcipher *tfm_aes;
+       struct crypto_shash *tfm_cmac;
        int err;
 
-       tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0, CRYPTO_ALG_ASYNC);
+       tfm_aes = crypto_alloc_skcipher("ecb(aes)", 0, CRYPTO_ALG_ASYNC);
        if (IS_ERR(tfm_aes)) {
                BT_ERR("Unable to create ECB crypto context");
                return PTR_ERR(tfm_aes);
        }
 
-       tfm_cmac = crypto_alloc_hash("cmac(aes)", 0, CRYPTO_ALG_ASYNC);
+       tfm_cmac = crypto_alloc_shash("cmac(aes)", 0, CRYPTO_ALG_ASYNC);
        if (IS_ERR(tfm_cmac)) {
                BT_ERR("Unable to create CMAC crypto context");
-               crypto_free_blkcipher(tfm_aes);
+               crypto_free_skcipher(tfm_aes);
                return PTR_ERR(tfm_cmac);
        }
 
        err = run_selftests(tfm_aes, tfm_cmac);
 
-       crypto_free_hash(tfm_cmac);
-       crypto_free_blkcipher(tfm_aes);
+       crypto_free_shash(tfm_cmac);
+       crypto_free_skcipher(tfm_aes);
 
        return err;
 }
index 42e8649c6e79b4950531f0027f2e67820377c066..db2847ac5f122988ce1625153fc219e74a3ddb8d 100644 (file)
@@ -4,7 +4,8 @@
 #include <linux/err.h>
 #include <linux/scatterlist.h>
 #include <linux/slab.h>
-#include <crypto/hash.h>
+#include <crypto/aes.h>
+#include <crypto/skcipher.h>
 #include <linux/key-type.h>
 
 #include <keys/ceph-type.h>
@@ -79,9 +80,9 @@ int ceph_crypto_key_unarmor(struct ceph_crypto_key *key, const char *inkey)
        return 0;
 }
 
-static struct crypto_blkcipher *ceph_crypto_alloc_cipher(void)
+static struct crypto_skcipher *ceph_crypto_alloc_cipher(void)
 {
-       return crypto_alloc_blkcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC);
+       return crypto_alloc_skcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC);
 }
 
 static const u8 *aes_iv = (u8 *)CEPH_AES_IV;
@@ -162,11 +163,10 @@ static int ceph_aes_encrypt(const void *key, int key_len,
 {
        struct scatterlist sg_in[2], prealloc_sg;
        struct sg_table sg_out;
-       struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
-       struct blkcipher_desc desc = { .tfm = tfm, .flags = 0 };
+       struct crypto_skcipher *tfm = ceph_crypto_alloc_cipher();
+       SKCIPHER_REQUEST_ON_STACK(req, tfm);
        int ret;
-       void *iv;
-       int ivsize;
+       char iv[AES_BLOCK_SIZE];
        size_t zero_padding = (0x10 - (src_len & 0x0f));
        char pad[16];
 
@@ -184,10 +184,13 @@ static int ceph_aes_encrypt(const void *key, int key_len,
        if (ret)
                goto out_tfm;
 
-       crypto_blkcipher_setkey((void *)tfm, key, key_len);
-       iv = crypto_blkcipher_crt(tfm)->iv;
-       ivsize = crypto_blkcipher_ivsize(tfm);
-       memcpy(iv, aes_iv, ivsize);
+       crypto_skcipher_setkey((void *)tfm, key, key_len);
+       memcpy(iv, aes_iv, AES_BLOCK_SIZE);
+
+       skcipher_request_set_tfm(req, tfm);
+       skcipher_request_set_callback(req, 0, NULL, NULL);
+       skcipher_request_set_crypt(req, sg_in, sg_out.sgl,
+                                  src_len + zero_padding, iv);
 
        /*
        print_hex_dump(KERN_ERR, "enc key: ", DUMP_PREFIX_NONE, 16, 1,
@@ -197,8 +200,8 @@ static int ceph_aes_encrypt(const void *key, int key_len,
        print_hex_dump(KERN_ERR, "enc pad: ", DUMP_PREFIX_NONE, 16, 1,
                        pad, zero_padding, 1);
        */
-       ret = crypto_blkcipher_encrypt(&desc, sg_out.sgl, sg_in,
-                                    src_len + zero_padding);
+       ret = crypto_skcipher_encrypt(req);
+       skcipher_request_zero(req);
        if (ret < 0) {
                pr_err("ceph_aes_crypt failed %d\n", ret);
                goto out_sg;
@@ -211,7 +214,7 @@ static int ceph_aes_encrypt(const void *key, int key_len,
 out_sg:
        teardown_sgtable(&sg_out);
 out_tfm:
-       crypto_free_blkcipher(tfm);
+       crypto_free_skcipher(tfm);
        return ret;
 }
 
@@ -222,11 +225,10 @@ static int ceph_aes_encrypt2(const void *key, int key_len, void *dst,
 {
        struct scatterlist sg_in[3], prealloc_sg;
        struct sg_table sg_out;
-       struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
-       struct blkcipher_desc desc = { .tfm = tfm, .flags = 0 };
+       struct crypto_skcipher *tfm = ceph_crypto_alloc_cipher();
+       SKCIPHER_REQUEST_ON_STACK(req, tfm);
        int ret;
-       void *iv;
-       int ivsize;
+       char iv[AES_BLOCK_SIZE];
        size_t zero_padding = (0x10 - ((src1_len + src2_len) & 0x0f));
        char pad[16];
 
@@ -245,10 +247,13 @@ static int ceph_aes_encrypt2(const void *key, int key_len, void *dst,
        if (ret)
                goto out_tfm;
 
-       crypto_blkcipher_setkey((void *)tfm, key, key_len);
-       iv = crypto_blkcipher_crt(tfm)->iv;
-       ivsize = crypto_blkcipher_ivsize(tfm);
-       memcpy(iv, aes_iv, ivsize);
+       crypto_skcipher_setkey((void *)tfm, key, key_len);
+       memcpy(iv, aes_iv, AES_BLOCK_SIZE);
+
+       skcipher_request_set_tfm(req, tfm);
+       skcipher_request_set_callback(req, 0, NULL, NULL);
+       skcipher_request_set_crypt(req, sg_in, sg_out.sgl,
+                                  src1_len + src2_len + zero_padding, iv);
 
        /*
        print_hex_dump(KERN_ERR, "enc  key: ", DUMP_PREFIX_NONE, 16, 1,
@@ -260,8 +265,8 @@ static int ceph_aes_encrypt2(const void *key, int key_len, void *dst,
        print_hex_dump(KERN_ERR, "enc  pad: ", DUMP_PREFIX_NONE, 16, 1,
                        pad, zero_padding, 1);
        */
-       ret = crypto_blkcipher_encrypt(&desc, sg_out.sgl, sg_in,
-                                    src1_len + src2_len + zero_padding);
+       ret = crypto_skcipher_encrypt(req);
+       skcipher_request_zero(req);
        if (ret < 0) {
                pr_err("ceph_aes_crypt2 failed %d\n", ret);
                goto out_sg;
@@ -274,7 +279,7 @@ static int ceph_aes_encrypt2(const void *key, int key_len, void *dst,
 out_sg:
        teardown_sgtable(&sg_out);
 out_tfm:
-       crypto_free_blkcipher(tfm);
+       crypto_free_skcipher(tfm);
        return ret;
 }
 
@@ -284,11 +289,10 @@ static int ceph_aes_decrypt(const void *key, int key_len,
 {
        struct sg_table sg_in;
        struct scatterlist sg_out[2], prealloc_sg;
-       struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
-       struct blkcipher_desc desc = { .tfm = tfm };
+       struct crypto_skcipher *tfm = ceph_crypto_alloc_cipher();
+       SKCIPHER_REQUEST_ON_STACK(req, tfm);
        char pad[16];
-       void *iv;
-       int ivsize;
+       char iv[AES_BLOCK_SIZE];
        int ret;
        int last_byte;
 
@@ -302,10 +306,13 @@ static int ceph_aes_decrypt(const void *key, int key_len,
        if (ret)
                goto out_tfm;
 
-       crypto_blkcipher_setkey((void *)tfm, key, key_len);
-       iv = crypto_blkcipher_crt(tfm)->iv;
-       ivsize = crypto_blkcipher_ivsize(tfm);
-       memcpy(iv, aes_iv, ivsize);
+       crypto_skcipher_setkey((void *)tfm, key, key_len);
+       memcpy(iv, aes_iv, AES_BLOCK_SIZE);
+
+       skcipher_request_set_tfm(req, tfm);
+       skcipher_request_set_callback(req, 0, NULL, NULL);
+       skcipher_request_set_crypt(req, sg_in.sgl, sg_out,
+                                  src_len, iv);
 
        /*
        print_hex_dump(KERN_ERR, "dec key: ", DUMP_PREFIX_NONE, 16, 1,
@@ -313,7 +320,8 @@ static int ceph_aes_decrypt(const void *key, int key_len,
        print_hex_dump(KERN_ERR, "dec  in: ", DUMP_PREFIX_NONE, 16, 1,
                       src, src_len, 1);
        */
-       ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in.sgl, src_len);
+       ret = crypto_skcipher_decrypt(req);
+       skcipher_request_zero(req);
        if (ret < 0) {
                pr_err("ceph_aes_decrypt failed %d\n", ret);
                goto out_sg;
@@ -338,7 +346,7 @@ static int ceph_aes_decrypt(const void *key, int key_len,
 out_sg:
        teardown_sgtable(&sg_in);
 out_tfm:
-       crypto_free_blkcipher(tfm);
+       crypto_free_skcipher(tfm);
        return ret;
 }
 
@@ -349,11 +357,10 @@ static int ceph_aes_decrypt2(const void *key, int key_len,
 {
        struct sg_table sg_in;
        struct scatterlist sg_out[3], prealloc_sg;
-       struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
-       struct blkcipher_desc desc = { .tfm = tfm };
+       struct crypto_skcipher *tfm = ceph_crypto_alloc_cipher();
+       SKCIPHER_REQUEST_ON_STACK(req, tfm);
        char pad[16];
-       void *iv;
-       int ivsize;
+       char iv[AES_BLOCK_SIZE];
        int ret;
        int last_byte;
 
@@ -368,10 +375,13 @@ static int ceph_aes_decrypt2(const void *key, int key_len,
        if (ret)
                goto out_tfm;
 
-       crypto_blkcipher_setkey((void *)tfm, key, key_len);
-       iv = crypto_blkcipher_crt(tfm)->iv;
-       ivsize = crypto_blkcipher_ivsize(tfm);
-       memcpy(iv, aes_iv, ivsize);
+       crypto_skcipher_setkey((void *)tfm, key, key_len);
+       memcpy(iv, aes_iv, AES_BLOCK_SIZE);
+
+       skcipher_request_set_tfm(req, tfm);
+       skcipher_request_set_callback(req, 0, NULL, NULL);
+       skcipher_request_set_crypt(req, sg_in.sgl, sg_out,
+                                  src_len, iv);
 
        /*
        print_hex_dump(KERN_ERR, "dec  key: ", DUMP_PREFIX_NONE, 16, 1,
@@ -379,7 +389,8 @@ static int ceph_aes_decrypt2(const void *key, int key_len,
        print_hex_dump(KERN_ERR, "dec   in: ", DUMP_PREFIX_NONE, 16, 1,
                       src, src_len, 1);
        */
-       ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in.sgl, src_len);
+       ret = crypto_skcipher_decrypt(req);
+       skcipher_request_zero(req);
        if (ret < 0) {
                pr_err("ceph_aes_decrypt failed %d\n", ret);
                goto out_sg;
@@ -415,7 +426,7 @@ static int ceph_aes_decrypt2(const void *key, int key_len,
 out_sg:
        teardown_sgtable(&sg_in);
 out_tfm:
-       crypto_free_blkcipher(tfm);
+       crypto_free_skcipher(tfm);
        return ret;
 }
 
index 56024d17ca516d338166ee7ad85647ecdc973c52..7d233201763bb2c8404ea952a9abb0b39998e788 100644 (file)
 
 #define pr_fmt(fmt) "TCP: " fmt
 
+#include <crypto/hash.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/types.h>
 #include <linux/swap.h>
 #include <linux/cache.h>
 #include <linux/err.h>
-#include <linux/crypto.h>
 #include <linux/time.h>
 #include <linux/slab.h>
 
@@ -2947,17 +2947,26 @@ static bool tcp_md5sig_pool_populated = false;
 
 static void __tcp_alloc_md5sig_pool(void)
 {
+       struct crypto_ahash *hash;
        int cpu;
 
+       hash = crypto_alloc_ahash("md5", 0, CRYPTO_ALG_ASYNC);
+       if (IS_ERR_OR_NULL(hash))
+               return;
+
        for_each_possible_cpu(cpu) {
-               if (!per_cpu(tcp_md5sig_pool, cpu).md5_desc.tfm) {
-                       struct crypto_hash *hash;
+               struct ahash_request *req;
 
-                       hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
-                       if (IS_ERR_OR_NULL(hash))
-                               return;
-                       per_cpu(tcp_md5sig_pool, cpu).md5_desc.tfm = hash;
-               }
+               if (per_cpu(tcp_md5sig_pool, cpu).md5_req)
+                       continue;
+
+               req = ahash_request_alloc(hash, GFP_KERNEL);
+               if (!req)
+                       return;
+
+               ahash_request_set_callback(req, 0, NULL, NULL);
+
+               per_cpu(tcp_md5sig_pool, cpu).md5_req = req;
        }
        /* before setting tcp_md5sig_pool_populated, we must commit all writes
         * to memory. See smp_rmb() in tcp_get_md5sig_pool()
@@ -3007,7 +3016,6 @@ int tcp_md5_hash_header(struct tcp_md5sig_pool *hp,
 {
        struct scatterlist sg;
        struct tcphdr hdr;
-       int err;
 
        /* We are not allowed to change tcphdr, make a local copy */
        memcpy(&hdr, th, sizeof(hdr));
@@ -3015,8 +3023,8 @@ int tcp_md5_hash_header(struct tcp_md5sig_pool *hp,
 
        /* options aren't included in the hash */
        sg_init_one(&sg, &hdr, sizeof(hdr));
-       err = crypto_hash_update(&hp->md5_desc, &sg, sizeof(hdr));
-       return err;
+       ahash_request_set_crypt(hp->md5_req, &sg, NULL, sizeof(hdr));
+       return crypto_ahash_update(hp->md5_req);
 }
 EXPORT_SYMBOL(tcp_md5_hash_header);
 
@@ -3025,7 +3033,7 @@ int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp,
 {
        struct scatterlist sg;
        const struct tcphdr *tp = tcp_hdr(skb);
-       struct hash_desc *desc = &hp->md5_desc;
+       struct ahash_request *req = hp->md5_req;
        unsigned int i;
        const unsigned int head_data_len = skb_headlen(skb) > header_len ?
                                           skb_headlen(skb) - header_len : 0;
@@ -3035,7 +3043,8 @@ int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp,
        sg_init_table(&sg, 1);
 
        sg_set_buf(&sg, ((u8 *) tp) + header_len, head_data_len);
-       if (crypto_hash_update(desc, &sg, head_data_len))
+       ahash_request_set_crypt(req, &sg, NULL, head_data_len);
+       if (crypto_ahash_update(req))
                return 1;
 
        for (i = 0; i < shi->nr_frags; ++i) {
@@ -3045,7 +3054,8 @@ int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp,
 
                sg_set_page(&sg, page, skb_frag_size(f),
                            offset_in_page(offset));
-               if (crypto_hash_update(desc, &sg, skb_frag_size(f)))
+               ahash_request_set_crypt(req, &sg, NULL, skb_frag_size(f));
+               if (crypto_ahash_update(req))
                        return 1;
        }
 
@@ -3062,7 +3072,8 @@ int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, const struct tcp_md5sig_key *ke
        struct scatterlist sg;
 
        sg_init_one(&sg, key->key, key->keylen);
-       return crypto_hash_update(&hp->md5_desc, &sg, key->keylen);
+       ahash_request_set_crypt(hp->md5_req, &sg, NULL, key->keylen);
+       return crypto_ahash_update(hp->md5_req);
 }
 EXPORT_SYMBOL(tcp_md5_hash_key);
 
index fdb286ddba04e548eb47c006ac9c334fa21d07ef..279bac6357f4de9c2fa0377929badd4adc03aae8 100644 (file)
@@ -1,3 +1,4 @@
+#include <linux/crypto.h>
 #include <linux/err.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
index 2a562834a01a41c98ec235e43184ca50d0b5d442..e37222e46e0ee6098cfaa86827d848ca2f0a6fb5 100644 (file)
@@ -81,7 +81,7 @@
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
 
-#include <linux/crypto.h>
+#include <crypto/hash.h>
 #include <linux/scatterlist.h>
 
 int sysctl_tcp_tw_reuse __read_mostly;
@@ -1038,21 +1038,22 @@ static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
        bp->len = cpu_to_be16(nbytes);
 
        sg_init_one(&sg, bp, sizeof(*bp));
-       return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
+       ahash_request_set_crypt(hp->md5_req, &sg, NULL, sizeof(*bp));
+       return crypto_ahash_update(hp->md5_req);
 }
 
 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
                               __be32 daddr, __be32 saddr, const struct tcphdr *th)
 {
        struct tcp_md5sig_pool *hp;
-       struct hash_desc *desc;
+       struct ahash_request *req;
 
        hp = tcp_get_md5sig_pool();
        if (!hp)
                goto clear_hash_noput;
-       desc = &hp->md5_desc;
+       req = hp->md5_req;
 
-       if (crypto_hash_init(desc))
+       if (crypto_ahash_init(req))
                goto clear_hash;
        if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
                goto clear_hash;
@@ -1060,7 +1061,8 @@ static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
                goto clear_hash;
        if (tcp_md5_hash_key(hp, key))
                goto clear_hash;
-       if (crypto_hash_final(desc, md5_hash))
+       ahash_request_set_crypt(req, NULL, md5_hash, 0);
+       if (crypto_ahash_final(req))
                goto clear_hash;
 
        tcp_put_md5sig_pool();
@@ -1078,7 +1080,7 @@ int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
                        const struct sk_buff *skb)
 {
        struct tcp_md5sig_pool *hp;
-       struct hash_desc *desc;
+       struct ahash_request *req;
        const struct tcphdr *th = tcp_hdr(skb);
        __be32 saddr, daddr;
 
@@ -1094,9 +1096,9 @@ int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
        hp = tcp_get_md5sig_pool();
        if (!hp)
                goto clear_hash_noput;
-       desc = &hp->md5_desc;
+       req = hp->md5_req;
 
-       if (crypto_hash_init(desc))
+       if (crypto_ahash_init(req))
                goto clear_hash;
 
        if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
@@ -1107,7 +1109,8 @@ int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
                goto clear_hash;
        if (tcp_md5_hash_key(hp, key))
                goto clear_hash;
-       if (crypto_hash_final(desc, md5_hash))
+       ahash_request_set_crypt(req, NULL, md5_hash, 0);
+       if (crypto_ahash_final(req))
                goto clear_hash;
 
        tcp_put_md5sig_pool();
index 1a5a70fb85512bb607cc8faefc6799e5340a4064..ad422babc1f50fdfe12c22f7cb1bcb97ced3f2a7 100644 (file)
@@ -66,7 +66,7 @@
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
 
-#include <linux/crypto.h>
+#include <crypto/hash.h>
 #include <linux/scatterlist.h>
 
 static void    tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
@@ -541,7 +541,8 @@ static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
        bp->len = cpu_to_be32(nbytes);
 
        sg_init_one(&sg, bp, sizeof(*bp));
-       return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
+       ahash_request_set_crypt(hp->md5_req, &sg, NULL, sizeof(*bp));
+       return crypto_ahash_update(hp->md5_req);
 }
 
 static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
@@ -549,14 +550,14 @@ static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
                               const struct tcphdr *th)
 {
        struct tcp_md5sig_pool *hp;
-       struct hash_desc *desc;
+       struct ahash_request *req;
 
        hp = tcp_get_md5sig_pool();
        if (!hp)
                goto clear_hash_noput;
-       desc = &hp->md5_desc;
+       req = hp->md5_req;
 
-       if (crypto_hash_init(desc))
+       if (crypto_ahash_init(req))
                goto clear_hash;
        if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
                goto clear_hash;
@@ -564,7 +565,8 @@ static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
                goto clear_hash;
        if (tcp_md5_hash_key(hp, key))
                goto clear_hash;
-       if (crypto_hash_final(desc, md5_hash))
+       ahash_request_set_crypt(req, NULL, md5_hash, 0);
+       if (crypto_ahash_final(req))
                goto clear_hash;
 
        tcp_put_md5sig_pool();
@@ -584,7 +586,7 @@ static int tcp_v6_md5_hash_skb(char *md5_hash,
 {
        const struct in6_addr *saddr, *daddr;
        struct tcp_md5sig_pool *hp;
-       struct hash_desc *desc;
+       struct ahash_request *req;
        const struct tcphdr *th = tcp_hdr(skb);
 
        if (sk) { /* valid for establish/request sockets */
@@ -599,9 +601,9 @@ static int tcp_v6_md5_hash_skb(char *md5_hash,
        hp = tcp_get_md5sig_pool();
        if (!hp)
                goto clear_hash_noput;
-       desc = &hp->md5_desc;
+       req = hp->md5_req;
 
-       if (crypto_hash_init(desc))
+       if (crypto_ahash_init(req))
                goto clear_hash;
 
        if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
@@ -612,7 +614,8 @@ static int tcp_v6_md5_hash_skb(char *md5_hash,
                goto clear_hash;
        if (tcp_md5_hash_key(hp, key))
                goto clear_hash;
-       if (crypto_hash_final(desc, md5_hash))
+       ahash_request_set_crypt(req, NULL, md5_hash, 0);
+       if (crypto_ahash_final(req))
                goto clear_hash;
 
        tcp_put_md5sig_pool();
index a13d02b7cee47401357f1fd7ff5e0dc74becc9ad..6a3e1c2181d3a960febf400594d9c60d3c44e700 100644 (file)
@@ -17,9 +17,9 @@
 #include <linux/err.h>
 #include <linux/bug.h>
 #include <linux/completion.h>
-#include <linux/crypto.h>
 #include <linux/ieee802154.h>
 #include <crypto/aead.h>
+#include <crypto/skcipher.h>
 
 #include "ieee802154_i.h"
 #include "llsec.h"
@@ -144,18 +144,18 @@ llsec_key_alloc(const struct ieee802154_llsec_key *template)
                        goto err_tfm;
        }
 
-       key->tfm0 = crypto_alloc_blkcipher("ctr(aes)", 0, CRYPTO_ALG_ASYNC);
+       key->tfm0 = crypto_alloc_skcipher("ctr(aes)", 0, CRYPTO_ALG_ASYNC);
        if (IS_ERR(key->tfm0))
                goto err_tfm;
 
-       if (crypto_blkcipher_setkey(key->tfm0, template->key,
-                                   IEEE802154_LLSEC_KEY_SIZE))
+       if (crypto_skcipher_setkey(key->tfm0, template->key,
+                                  IEEE802154_LLSEC_KEY_SIZE))
                goto err_tfm0;
 
        return key;
 
 err_tfm0:
-       crypto_free_blkcipher(key->tfm0);
+       crypto_free_skcipher(key->tfm0);
 err_tfm:
        for (i = 0; i < ARRAY_SIZE(key->tfm); i++)
                if (key->tfm[i])
@@ -175,7 +175,7 @@ static void llsec_key_release(struct kref *ref)
        for (i = 0; i < ARRAY_SIZE(key->tfm); i++)
                crypto_free_aead(key->tfm[i]);
 
-       crypto_free_blkcipher(key->tfm0);
+       crypto_free_skcipher(key->tfm0);
        kzfree(key);
 }
 
@@ -620,15 +620,17 @@ llsec_do_encrypt_unauth(struct sk_buff *skb, const struct mac802154_llsec *sec,
 {
        u8 iv[16];
        struct scatterlist src;
-       struct blkcipher_desc req = {
-               .tfm = key->tfm0,
-               .info = iv,
-               .flags = 0,
-       };
+       SKCIPHER_REQUEST_ON_STACK(req, key->tfm0);
+       int err;
 
        llsec_geniv(iv, sec->params.hwaddr, &hdr->sec);
        sg_init_one(&src, skb->data, skb->len);
-       return crypto_blkcipher_encrypt_iv(&req, &src, &src, skb->len);
+       skcipher_request_set_tfm(req, key->tfm0);
+       skcipher_request_set_callback(req, 0, NULL, NULL);
+       skcipher_request_set_crypt(req, &src, &src, skb->len, iv);
+       err = crypto_skcipher_encrypt(req);
+       skcipher_request_zero(req);
+       return err;
 }
 
 static struct crypto_aead*
@@ -830,11 +832,8 @@ llsec_do_decrypt_unauth(struct sk_buff *skb, const struct mac802154_llsec *sec,
        unsigned char *data;
        int datalen;
        struct scatterlist src;
-       struct blkcipher_desc req = {
-               .tfm = key->tfm0,
-               .info = iv,
-               .flags = 0,
-       };
+       SKCIPHER_REQUEST_ON_STACK(req, key->tfm0);
+       int err;
 
        llsec_geniv(iv, dev_addr, &hdr->sec);
        data = skb_mac_header(skb) + skb->mac_len;
@@ -842,7 +841,13 @@ llsec_do_decrypt_unauth(struct sk_buff *skb, const struct mac802154_llsec *sec,
 
        sg_init_one(&src, data, datalen);
 
-       return crypto_blkcipher_decrypt_iv(&req, &src, &src, datalen);
+       skcipher_request_set_tfm(req, key->tfm0);
+       skcipher_request_set_callback(req, 0, NULL, NULL);
+       skcipher_request_set_crypt(req, &src, &src, datalen, iv);
+
+       err = crypto_skcipher_decrypt(req);
+       skcipher_request_zero(req);
+       return err;
 }
 
 static int
index 950578e1d7bebab7636aae278af3964140ec94e1..6f3b658e3279cc536859cbe4f36927eafaa5c19c 100644 (file)
@@ -19,7 +19,6 @@
 
 #include <linux/slab.h>
 #include <linux/hashtable.h>
-#include <linux/crypto.h>
 #include <linux/kref.h>
 #include <linux/spinlock.h>
 #include <net/af_ieee802154.h>
@@ -30,7 +29,7 @@ struct mac802154_llsec_key {
 
        /* one tfm for each authsize (4/8/16) */
        struct crypto_aead *tfm[3];
-       struct crypto_blkcipher *tfm0;
+       struct crypto_skcipher *tfm0;
 
        struct kref ref;
 };
index 2934a73a5981ad154888904b67f8082fedac4a3c..71598f5b11b71db20495a095eb167c5e909f8b0e 100644 (file)
@@ -252,7 +252,7 @@ struct rxrpc_connection {
        struct rxrpc_security   *security;      /* applied security module */
        struct key              *key;           /* security for this connection (client) */
        struct key              *server_key;    /* security for this service */
-       struct crypto_blkcipher *cipher;        /* encryption handle */
+       struct crypto_skcipher  *cipher;        /* encryption handle */
        struct rxrpc_crypt      csum_iv;        /* packet checksum base */
        unsigned long           events;
 #define RXRPC_CONN_CHALLENGE   0               /* send challenge packet */
index 3f6571651d32ebf9890ec0e6d28228e62655b71e..3fb492eedeb98e3e7e97496e792f83ad61fa6436 100644 (file)
  *     "afs@CAMBRIDGE.REDHAT.COM>
  */
 
+#include <crypto/skcipher.h>
 #include <linux/module.h>
 #include <linux/net.h>
 #include <linux/skbuff.h>
 #include <linux/key-type.h>
-#include <linux/crypto.h>
 #include <linux/ctype.h>
 #include <linux/slab.h>
 #include <net/sock.h>
@@ -824,7 +824,7 @@ static void rxrpc_free_preparse(struct key_preparsed_payload *prep)
  */
 static int rxrpc_preparse_s(struct key_preparsed_payload *prep)
 {
-       struct crypto_blkcipher *ci;
+       struct crypto_skcipher *ci;
 
        _enter("%zu", prep->datalen);
 
@@ -833,13 +833,13 @@ static int rxrpc_preparse_s(struct key_preparsed_payload *prep)
 
        memcpy(&prep->payload.data[2], prep->data, 8);
 
-       ci = crypto_alloc_blkcipher("pcbc(des)", 0, CRYPTO_ALG_ASYNC);
+       ci = crypto_alloc_skcipher("pcbc(des)", 0, CRYPTO_ALG_ASYNC);
        if (IS_ERR(ci)) {
                _leave(" = %ld", PTR_ERR(ci));
                return PTR_ERR(ci);
        }
 
-       if (crypto_blkcipher_setkey(ci, prep->data, 8) < 0)
+       if (crypto_skcipher_setkey(ci, prep->data, 8) < 0)
                BUG();
 
        prep->payload.data[0] = ci;
@@ -853,7 +853,7 @@ static int rxrpc_preparse_s(struct key_preparsed_payload *prep)
 static void rxrpc_free_preparse_s(struct key_preparsed_payload *prep)
 {
        if (prep->payload.data[0])
-               crypto_free_blkcipher(prep->payload.data[0]);
+               crypto_free_skcipher(prep->payload.data[0]);
 }
 
 /*
@@ -870,7 +870,7 @@ static void rxrpc_destroy(struct key *key)
 static void rxrpc_destroy_s(struct key *key)
 {
        if (key->payload.data[0]) {
-               crypto_free_blkcipher(key->payload.data[0]);
+               crypto_free_skcipher(key->payload.data[0]);
                key->payload.data[0] = NULL;
        }
 }
index d7a9ab5a9d9ce8c95d60b5230c476e41740db607..0d96b48a64925840cfc2249e928ebdde51ac00a8 100644 (file)
@@ -9,11 +9,11 @@
  * 2 of the License, or (at your option) any later version.
  */
 
+#include <crypto/skcipher.h>
 #include <linux/module.h>
 #include <linux/net.h>
 #include <linux/skbuff.h>
 #include <linux/udp.h>
-#include <linux/crypto.h>
 #include <linux/scatterlist.h>
 #include <linux/ctype.h>
 #include <linux/slab.h>
@@ -53,7 +53,7 @@ MODULE_LICENSE("GPL");
  * alloc routine, but since we have it to hand, we use it to decrypt RESPONSE
  * packets
  */
-static struct crypto_blkcipher *rxkad_ci;
+static struct crypto_skcipher *rxkad_ci;
 static DEFINE_MUTEX(rxkad_ci_mutex);
 
 /*
@@ -61,7 +61,7 @@ static DEFINE_MUTEX(rxkad_ci_mutex);
  */
 static int rxkad_init_connection_security(struct rxrpc_connection *conn)
 {
-       struct crypto_blkcipher *ci;
+       struct crypto_skcipher *ci;
        struct rxrpc_key_token *token;
        int ret;
 
@@ -70,15 +70,15 @@ static int rxkad_init_connection_security(struct rxrpc_connection *conn)
        token = conn->key->payload.data[0];
        conn->security_ix = token->security_index;
 
-       ci = crypto_alloc_blkcipher("pcbc(fcrypt)", 0, CRYPTO_ALG_ASYNC);
+       ci = crypto_alloc_skcipher("pcbc(fcrypt)", 0, CRYPTO_ALG_ASYNC);
        if (IS_ERR(ci)) {
                _debug("no cipher");
                ret = PTR_ERR(ci);
                goto error;
        }
 
-       if (crypto_blkcipher_setkey(ci, token->kad->session_key,
-                                   sizeof(token->kad->session_key)) < 0)
+       if (crypto_skcipher_setkey(ci, token->kad->session_key,
+                                  sizeof(token->kad->session_key)) < 0)
                BUG();
 
        switch (conn->security_level) {
@@ -113,7 +113,7 @@ error:
 static void rxkad_prime_packet_security(struct rxrpc_connection *conn)
 {
        struct rxrpc_key_token *token;
-       struct blkcipher_desc desc;
+       SKCIPHER_REQUEST_ON_STACK(req, conn->cipher);
        struct scatterlist sg[2];
        struct rxrpc_crypt iv;
        struct {
@@ -128,10 +128,6 @@ static void rxkad_prime_packet_security(struct rxrpc_connection *conn)
        token = conn->key->payload.data[0];
        memcpy(&iv, token->kad->session_key, sizeof(iv));
 
-       desc.tfm = conn->cipher;
-       desc.info = iv.x;
-       desc.flags = 0;
-
        tmpbuf.x[0] = conn->epoch;
        tmpbuf.x[1] = conn->cid;
        tmpbuf.x[2] = 0;
@@ -139,7 +135,13 @@ static void rxkad_prime_packet_security(struct rxrpc_connection *conn)
 
        sg_init_one(&sg[0], &tmpbuf, sizeof(tmpbuf));
        sg_init_one(&sg[1], &tmpbuf, sizeof(tmpbuf));
-       crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(tmpbuf));
+
+       skcipher_request_set_tfm(req, conn->cipher);
+       skcipher_request_set_callback(req, 0, NULL, NULL);
+       skcipher_request_set_crypt(req, &sg[1], &sg[0], sizeof(tmpbuf), iv.x);
+
+       crypto_skcipher_encrypt(req);
+       skcipher_request_zero(req);
 
        memcpy(&conn->csum_iv, &tmpbuf.x[2], sizeof(conn->csum_iv));
        ASSERTCMP(conn->csum_iv.n[0], ==, tmpbuf.x[2]);
@@ -156,7 +158,7 @@ static int rxkad_secure_packet_auth(const struct rxrpc_call *call,
                                    void *sechdr)
 {
        struct rxrpc_skb_priv *sp;
-       struct blkcipher_desc desc;
+       SKCIPHER_REQUEST_ON_STACK(req, call->conn->cipher);
        struct rxrpc_crypt iv;
        struct scatterlist sg[2];
        struct {
@@ -177,13 +179,16 @@ static int rxkad_secure_packet_auth(const struct rxrpc_call *call,
 
        /* start the encryption afresh */
        memset(&iv, 0, sizeof(iv));
-       desc.tfm = call->conn->cipher;
-       desc.info = iv.x;
-       desc.flags = 0;
 
        sg_init_one(&sg[0], &tmpbuf, sizeof(tmpbuf));
        sg_init_one(&sg[1], &tmpbuf, sizeof(tmpbuf));
-       crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(tmpbuf));
+
+       skcipher_request_set_tfm(req, call->conn->cipher);
+       skcipher_request_set_callback(req, 0, NULL, NULL);
+       skcipher_request_set_crypt(req, &sg[1], &sg[0], sizeof(tmpbuf), iv.x);
+
+       crypto_skcipher_encrypt(req);
+       skcipher_request_zero(req);
 
        memcpy(sechdr, &tmpbuf, sizeof(tmpbuf));
 
@@ -203,13 +208,14 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call,
        struct rxkad_level2_hdr rxkhdr
                __attribute__((aligned(8))); /* must be all on one page */
        struct rxrpc_skb_priv *sp;
-       struct blkcipher_desc desc;
+       SKCIPHER_REQUEST_ON_STACK(req, call->conn->cipher);
        struct rxrpc_crypt iv;
        struct scatterlist sg[16];
        struct sk_buff *trailer;
        unsigned int len;
        u16 check;
        int nsg;
+       int err;
 
        sp = rxrpc_skb(skb);
 
@@ -223,28 +229,38 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call,
        /* encrypt from the session key */
        token = call->conn->key->payload.data[0];
        memcpy(&iv, token->kad->session_key, sizeof(iv));
-       desc.tfm = call->conn->cipher;
-       desc.info = iv.x;
-       desc.flags = 0;
 
        sg_init_one(&sg[0], sechdr, sizeof(rxkhdr));
        sg_init_one(&sg[1], &rxkhdr, sizeof(rxkhdr));
-       crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(rxkhdr));
+
+       skcipher_request_set_tfm(req, call->conn->cipher);
+       skcipher_request_set_callback(req, 0, NULL, NULL);
+       skcipher_request_set_crypt(req, &sg[1], &sg[0], sizeof(rxkhdr), iv.x);
+
+       crypto_skcipher_encrypt(req);
 
        /* we want to encrypt the skbuff in-place */
        nsg = skb_cow_data(skb, 0, &trailer);
+       err = -ENOMEM;
        if (nsg < 0 || nsg > 16)
-               return -ENOMEM;
+               goto out;
 
        len = data_size + call->conn->size_align - 1;
        len &= ~(call->conn->size_align - 1);
 
        sg_init_table(sg, nsg);
        skb_to_sgvec(skb, sg, 0, len);
-       crypto_blkcipher_encrypt_iv(&desc, sg, sg, len);
+
+       skcipher_request_set_crypt(req, sg, sg, len, iv.x);
+
+       crypto_skcipher_encrypt(req);
 
        _leave(" = 0");
-       return 0;
+       err = 0;
+
+out:
+       skcipher_request_zero(req);
+       return err;
 }
 
 /*
@@ -256,7 +272,7 @@ static int rxkad_secure_packet(const struct rxrpc_call *call,
                                void *sechdr)
 {
        struct rxrpc_skb_priv *sp;
-       struct blkcipher_desc desc;
+       SKCIPHER_REQUEST_ON_STACK(req, call->conn->cipher);
        struct rxrpc_crypt iv;
        struct scatterlist sg[2];
        struct {
@@ -281,9 +297,6 @@ static int rxkad_secure_packet(const struct rxrpc_call *call,
 
        /* continue encrypting from where we left off */
        memcpy(&iv, call->conn->csum_iv.x, sizeof(iv));
-       desc.tfm = call->conn->cipher;
-       desc.info = iv.x;
-       desc.flags = 0;
 
        /* calculate the security checksum */
        x = htonl(call->channel << (32 - RXRPC_CIDSHIFT));
@@ -293,7 +306,13 @@ static int rxkad_secure_packet(const struct rxrpc_call *call,
 
        sg_init_one(&sg[0], &tmpbuf, sizeof(tmpbuf));
        sg_init_one(&sg[1], &tmpbuf, sizeof(tmpbuf));
-       crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(tmpbuf));
+
+       skcipher_request_set_tfm(req, call->conn->cipher);
+       skcipher_request_set_callback(req, 0, NULL, NULL);
+       skcipher_request_set_crypt(req, &sg[1], &sg[0], sizeof(tmpbuf), iv.x);
+
+       crypto_skcipher_encrypt(req);
+       skcipher_request_zero(req);
 
        y = ntohl(tmpbuf.x[1]);
        y = (y >> 16) & 0xffff;
@@ -330,7 +349,7 @@ static int rxkad_verify_packet_auth(const struct rxrpc_call *call,
 {
        struct rxkad_level1_hdr sechdr;
        struct rxrpc_skb_priv *sp;
-       struct blkcipher_desc desc;
+       SKCIPHER_REQUEST_ON_STACK(req, call->conn->cipher);
        struct rxrpc_crypt iv;
        struct scatterlist sg[16];
        struct sk_buff *trailer;
@@ -352,11 +371,13 @@ static int rxkad_verify_packet_auth(const struct rxrpc_call *call,
 
        /* start the decryption afresh */
        memset(&iv, 0, sizeof(iv));
-       desc.tfm = call->conn->cipher;
-       desc.info = iv.x;
-       desc.flags = 0;
 
-       crypto_blkcipher_decrypt_iv(&desc, sg, sg, 8);
+       skcipher_request_set_tfm(req, call->conn->cipher);
+       skcipher_request_set_callback(req, 0, NULL, NULL);
+       skcipher_request_set_crypt(req, sg, sg, 8, iv.x);
+
+       crypto_skcipher_decrypt(req);
+       skcipher_request_zero(req);
 
        /* remove the decrypted packet length */
        if (skb_copy_bits(skb, 0, &sechdr, sizeof(sechdr)) < 0)
@@ -405,7 +426,7 @@ static int rxkad_verify_packet_encrypt(const struct rxrpc_call *call,
        const struct rxrpc_key_token *token;
        struct rxkad_level2_hdr sechdr;
        struct rxrpc_skb_priv *sp;
-       struct blkcipher_desc desc;
+       SKCIPHER_REQUEST_ON_STACK(req, call->conn->cipher);
        struct rxrpc_crypt iv;
        struct scatterlist _sg[4], *sg;
        struct sk_buff *trailer;
@@ -435,11 +456,13 @@ static int rxkad_verify_packet_encrypt(const struct rxrpc_call *call,
        /* decrypt from the session key */
        token = call->conn->key->payload.data[0];
        memcpy(&iv, token->kad->session_key, sizeof(iv));
-       desc.tfm = call->conn->cipher;
-       desc.info = iv.x;
-       desc.flags = 0;
 
-       crypto_blkcipher_decrypt_iv(&desc, sg, sg, skb->len);
+       skcipher_request_set_tfm(req, call->conn->cipher);
+       skcipher_request_set_callback(req, 0, NULL, NULL);
+       skcipher_request_set_crypt(req, sg, sg, skb->len, iv.x);
+
+       crypto_skcipher_decrypt(req);
+       skcipher_request_zero(req);
        if (sg != _sg)
                kfree(sg);
 
@@ -487,7 +510,7 @@ static int rxkad_verify_packet(const struct rxrpc_call *call,
                               struct sk_buff *skb,
                               u32 *_abort_code)
 {
-       struct blkcipher_desc desc;
+       SKCIPHER_REQUEST_ON_STACK(req, call->conn->cipher);
        struct rxrpc_skb_priv *sp;
        struct rxrpc_crypt iv;
        struct scatterlist sg[2];
@@ -516,9 +539,6 @@ static int rxkad_verify_packet(const struct rxrpc_call *call,
 
        /* continue encrypting from where we left off */
        memcpy(&iv, call->conn->csum_iv.x, sizeof(iv));
-       desc.tfm = call->conn->cipher;
-       desc.info = iv.x;
-       desc.flags = 0;
 
        /* validate the security checksum */
        x = htonl(call->channel << (32 - RXRPC_CIDSHIFT));
@@ -528,7 +548,13 @@ static int rxkad_verify_packet(const struct rxrpc_call *call,
 
        sg_init_one(&sg[0], &tmpbuf, sizeof(tmpbuf));
        sg_init_one(&sg[1], &tmpbuf, sizeof(tmpbuf));
-       crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(tmpbuf));
+
+       skcipher_request_set_tfm(req, call->conn->cipher);
+       skcipher_request_set_callback(req, 0, NULL, NULL);
+       skcipher_request_set_crypt(req, &sg[1], &sg[0], sizeof(tmpbuf), iv.x);
+
+       crypto_skcipher_encrypt(req);
+       skcipher_request_zero(req);
 
        y = ntohl(tmpbuf.x[1]);
        y = (y >> 16) & 0xffff;
@@ -718,18 +744,21 @@ static void rxkad_encrypt_response(struct rxrpc_connection *conn,
                                   struct rxkad_response *resp,
                                   const struct rxkad_key *s2)
 {
-       struct blkcipher_desc desc;
+       SKCIPHER_REQUEST_ON_STACK(req, conn->cipher);
        struct rxrpc_crypt iv;
        struct scatterlist sg[2];
 
        /* continue encrypting from where we left off */
        memcpy(&iv, s2->session_key, sizeof(iv));
-       desc.tfm = conn->cipher;
-       desc.info = iv.x;
-       desc.flags = 0;
 
        rxkad_sg_set_buf2(sg, &resp->encrypted, sizeof(resp->encrypted));
-       crypto_blkcipher_encrypt_iv(&desc, sg, sg, sizeof(resp->encrypted));
+
+       skcipher_request_set_tfm(req, conn->cipher);
+       skcipher_request_set_callback(req, 0, NULL, NULL);
+       skcipher_request_set_crypt(req, sg, sg, sizeof(resp->encrypted), iv.x);
+
+       crypto_skcipher_encrypt(req);
+       skcipher_request_zero(req);
 }
 
 /*
@@ -822,7 +851,7 @@ static int rxkad_decrypt_ticket(struct rxrpc_connection *conn,
                                time_t *_expiry,
                                u32 *_abort_code)
 {
-       struct blkcipher_desc desc;
+       struct skcipher_request *req;
        struct rxrpc_crypt iv, key;
        struct scatterlist sg[1];
        struct in_addr addr;
@@ -853,12 +882,21 @@ static int rxkad_decrypt_ticket(struct rxrpc_connection *conn,
 
        memcpy(&iv, &conn->server_key->payload.data[2], sizeof(iv));
 
-       desc.tfm = conn->server_key->payload.data[0];
-       desc.info = iv.x;
-       desc.flags = 0;
+       req = skcipher_request_alloc(conn->server_key->payload.data[0],
+                                    GFP_NOFS);
+       if (!req) {
+               *_abort_code = RXKADNOAUTH;
+               ret = -ENOMEM;
+               goto error;
+       }
 
        sg_init_one(&sg[0], ticket, ticket_len);
-       crypto_blkcipher_decrypt_iv(&desc, sg, sg, ticket_len);
+
+       skcipher_request_set_callback(req, 0, NULL, NULL);
+       skcipher_request_set_crypt(req, sg, sg, ticket_len, iv.x);
+
+       crypto_skcipher_decrypt(req);
+       skcipher_request_free(req);
 
        p = ticket;
        end = p + ticket_len;
@@ -966,7 +1004,7 @@ static void rxkad_decrypt_response(struct rxrpc_connection *conn,
                                   struct rxkad_response *resp,
                                   const struct rxrpc_crypt *session_key)
 {
-       struct blkcipher_desc desc;
+       SKCIPHER_REQUEST_ON_STACK(req, rxkad_ci);
        struct scatterlist sg[2];
        struct rxrpc_crypt iv;
 
@@ -976,17 +1014,21 @@ static void rxkad_decrypt_response(struct rxrpc_connection *conn,
        ASSERT(rxkad_ci != NULL);
 
        mutex_lock(&rxkad_ci_mutex);
-       if (crypto_blkcipher_setkey(rxkad_ci, session_key->x,
-                                   sizeof(*session_key)) < 0)
+       if (crypto_skcipher_setkey(rxkad_ci, session_key->x,
+                                  sizeof(*session_key)) < 0)
                BUG();
 
        memcpy(&iv, session_key, sizeof(iv));
-       desc.tfm = rxkad_ci;
-       desc.info = iv.x;
-       desc.flags = 0;
 
        rxkad_sg_set_buf2(sg, &resp->encrypted, sizeof(resp->encrypted));
-       crypto_blkcipher_decrypt_iv(&desc, sg, sg, sizeof(resp->encrypted));
+
+       skcipher_request_set_tfm(req, rxkad_ci);
+       skcipher_request_set_callback(req, 0, NULL, NULL);
+       skcipher_request_set_crypt(req, sg, sg, sizeof(resp->encrypted), iv.x);
+
+       crypto_skcipher_decrypt(req);
+       skcipher_request_zero(req);
+
        mutex_unlock(&rxkad_ci_mutex);
 
        _leave("");
@@ -1115,7 +1157,7 @@ static void rxkad_clear(struct rxrpc_connection *conn)
        _enter("");
 
        if (conn->cipher)
-               crypto_free_blkcipher(conn->cipher);
+               crypto_free_skcipher(conn->cipher);
 }
 
 /*
@@ -1141,7 +1183,7 @@ static __init int rxkad_init(void)
 
        /* pin the cipher we need so that the crypto layer doesn't invoke
         * keventd to go get it */
-       rxkad_ci = crypto_alloc_blkcipher("pcbc(fcrypt)", 0, CRYPTO_ALG_ASYNC);
+       rxkad_ci = crypto_alloc_skcipher("pcbc(fcrypt)", 0, CRYPTO_ALG_ASYNC);
        if (IS_ERR(rxkad_ci))
                return PTR_ERR(rxkad_ci);
 
@@ -1155,7 +1197,7 @@ static __exit void rxkad_exit(void)
        _enter("");
 
        rxrpc_unregister_security(&rxkad);
-       crypto_free_blkcipher(rxkad_ci);
+       crypto_free_skcipher(rxkad_ci);
 }
 
 module_exit(rxkad_exit);
index 1543e39f47c33f6662abd80b5846a845d3760fdc..912eb1685a5d99110c66d0dc4c7b35eac7f37c00 100644 (file)
@@ -27,9 +27,9 @@
  *   Vlad Yasevich     <vladislav.yasevich@hp.com>
  */
 
+#include <crypto/hash.h>
 #include <linux/slab.h>
 #include <linux/types.h>
-#include <linux/crypto.h>
 #include <linux/scatterlist.h>
 #include <net/sctp/sctp.h>
 #include <net/sctp/auth.h>
@@ -448,7 +448,7 @@ struct sctp_shared_key *sctp_auth_get_shkey(
  */
 int sctp_auth_init_hmacs(struct sctp_endpoint *ep, gfp_t gfp)
 {
-       struct crypto_hash *tfm = NULL;
+       struct crypto_shash *tfm = NULL;
        __u16   id;
 
        /* If AUTH extension is disabled, we are done */
@@ -462,9 +462,8 @@ int sctp_auth_init_hmacs(struct sctp_endpoint *ep, gfp_t gfp)
                return 0;
 
        /* Allocated the array of pointers to transorms */
-       ep->auth_hmacs = kzalloc(
-                           sizeof(struct crypto_hash *) * SCTP_AUTH_NUM_HMACS,
-                           gfp);
+       ep->auth_hmacs = kzalloc(sizeof(struct crypto_shash *) *
+                                SCTP_AUTH_NUM_HMACS, gfp);
        if (!ep->auth_hmacs)
                return -ENOMEM;
 
@@ -483,8 +482,7 @@ int sctp_auth_init_hmacs(struct sctp_endpoint *ep, gfp_t gfp)
                        continue;
 
                /* Allocate the ID */
-               tfm = crypto_alloc_hash(sctp_hmac_list[id].hmac_name, 0,
-                                       CRYPTO_ALG_ASYNC);
+               tfm = crypto_alloc_shash(sctp_hmac_list[id].hmac_name, 0, 0);
                if (IS_ERR(tfm))
                        goto out_err;
 
@@ -500,7 +498,7 @@ out_err:
 }
 
 /* Destroy the hmac tfm array */
-void sctp_auth_destroy_hmacs(struct crypto_hash *auth_hmacs[])
+void sctp_auth_destroy_hmacs(struct crypto_shash *auth_hmacs[])
 {
        int i;
 
@@ -508,8 +506,7 @@ void sctp_auth_destroy_hmacs(struct crypto_hash *auth_hmacs[])
                return;
 
        for (i = 0; i < SCTP_AUTH_NUM_HMACS; i++) {
-               if (auth_hmacs[i])
-                       crypto_free_hash(auth_hmacs[i]);
+               crypto_free_shash(auth_hmacs[i]);
        }
        kfree(auth_hmacs);
 }
@@ -709,8 +706,7 @@ void sctp_auth_calculate_hmac(const struct sctp_association *asoc,
                              struct sctp_auth_chunk *auth,
                              gfp_t gfp)
 {
-       struct scatterlist sg;
-       struct hash_desc desc;
+       struct crypto_shash *tfm;
        struct sctp_auth_bytes *asoc_key;
        __u16 key_id, hmac_id;
        __u8 *digest;
@@ -742,16 +738,22 @@ void sctp_auth_calculate_hmac(const struct sctp_association *asoc,
 
        /* set up scatter list */
        end = skb_tail_pointer(skb);
-       sg_init_one(&sg, auth, end - (unsigned char *)auth);
 
-       desc.tfm = asoc->ep->auth_hmacs[hmac_id];
-       desc.flags = 0;
+       tfm = asoc->ep->auth_hmacs[hmac_id];
 
        digest = auth->auth_hdr.hmac;
-       if (crypto_hash_setkey(desc.tfm, &asoc_key->data[0], asoc_key->len))
+       if (crypto_shash_setkey(tfm, &asoc_key->data[0], asoc_key->len))
                goto free;
 
-       crypto_hash_digest(&desc, &sg, sg.length, digest);
+       {
+               SHASH_DESC_ON_STACK(desc, tfm);
+
+               desc->tfm = tfm;
+               desc->flags = 0;
+               crypto_shash_digest(desc, (u8 *)auth,
+                                   end - (unsigned char *)auth, digest);
+               shash_desc_zero(desc);
+       }
 
 free:
        if (free_key)
index 2522a61752916b58b4a8f94d61e4d68068232d1b..9d494e35e7f9f84d70e3b747c4c335a40a38044c 100644 (file)
@@ -42,7 +42,6 @@
 #include <linux/slab.h>
 #include <linux/in.h>
 #include <linux/random.h>      /* get_random_bytes() */
-#include <linux/crypto.h>
 #include <net/sock.h>
 #include <net/ipv6.h>
 #include <net/sctp/sctp.h>
index 5d6a03fad3789a12290f5f14c5a7efa69c98f41a..1296e555fe29113e65fd74a3112c7463f224dc25 100644 (file)
@@ -45,6 +45,7 @@
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
+#include <crypto/hash.h>
 #include <linux/types.h>
 #include <linux/kernel.h>
 #include <linux/ip.h>
@@ -52,7 +53,6 @@
 #include <linux/net.h>
 #include <linux/inet.h>
 #include <linux/scatterlist.h>
-#include <linux/crypto.h>
 #include <linux/slab.h>
 #include <net/sock.h>
 
@@ -1606,7 +1606,6 @@ static sctp_cookie_param_t *sctp_pack_cookie(const struct sctp_endpoint *ep,
 {
        sctp_cookie_param_t *retval;
        struct sctp_signed_cookie *cookie;
-       struct scatterlist sg;
        int headersize, bodysize;
 
        /* Header size is static data prior to the actual cookie, including
@@ -1663,16 +1662,19 @@ static sctp_cookie_param_t *sctp_pack_cookie(const struct sctp_endpoint *ep,
               ntohs(init_chunk->chunk_hdr->length), raw_addrs, addrs_len);
 
        if (sctp_sk(ep->base.sk)->hmac) {
-               struct hash_desc desc;
+               SHASH_DESC_ON_STACK(desc, sctp_sk(ep->base.sk)->hmac);
+               int err;
 
                /* Sign the message.  */
-               sg_init_one(&sg, &cookie->c, bodysize);
-               desc.tfm = sctp_sk(ep->base.sk)->hmac;
-               desc.flags = 0;
-
-               if (crypto_hash_setkey(desc.tfm, ep->secret_key,
-                                      sizeof(ep->secret_key)) ||
-                   crypto_hash_digest(&desc, &sg, bodysize, cookie->signature))
+               desc->tfm = sctp_sk(ep->base.sk)->hmac;
+               desc->flags = 0;
+
+               err = crypto_shash_setkey(desc->tfm, ep->secret_key,
+                                         sizeof(ep->secret_key)) ?:
+                     crypto_shash_digest(desc, (u8 *)&cookie->c, bodysize,
+                                         cookie->signature);
+               shash_desc_zero(desc);
+               if (err)
                        goto free_cookie;
        }
 
@@ -1697,12 +1699,10 @@ struct sctp_association *sctp_unpack_cookie(
        struct sctp_cookie *bear_cookie;
        int headersize, bodysize, fixed_size;
        __u8 *digest = ep->digest;
-       struct scatterlist sg;
        unsigned int len;
        sctp_scope_t scope;
        struct sk_buff *skb = chunk->skb;
        ktime_t kt;
-       struct hash_desc desc;
 
        /* Header size is static data prior to the actual cookie, including
         * any padding.
@@ -1733,16 +1733,23 @@ struct sctp_association *sctp_unpack_cookie(
                goto no_hmac;
 
        /* Check the signature.  */
-       sg_init_one(&sg, bear_cookie, bodysize);
-       desc.tfm = sctp_sk(ep->base.sk)->hmac;
-       desc.flags = 0;
-
-       memset(digest, 0x00, SCTP_SIGNATURE_SIZE);
-       if (crypto_hash_setkey(desc.tfm, ep->secret_key,
-                              sizeof(ep->secret_key)) ||
-           crypto_hash_digest(&desc, &sg, bodysize, digest)) {
-               *error = -SCTP_IERROR_NOMEM;
-               goto fail;
+       {
+               SHASH_DESC_ON_STACK(desc, sctp_sk(ep->base.sk)->hmac);
+               int err;
+
+               desc->tfm = sctp_sk(ep->base.sk)->hmac;
+               desc->flags = 0;
+
+               err = crypto_shash_setkey(desc->tfm, ep->secret_key,
+                                         sizeof(ep->secret_key)) ?:
+                     crypto_shash_digest(desc, (u8 *)bear_cookie, bodysize,
+                                         digest);
+               shash_desc_zero(desc);
+
+               if (err) {
+                       *error = -SCTP_IERROR_NOMEM;
+                       goto fail;
+               }
        }
 
        if (memcmp(digest, cookie->signature, SCTP_SIGNATURE_SIZE)) {
index e878da0949dbfc0012d2e7985bf6e28386678d0f..de8eabf03eed9b904afd78e9af6f2ab0b172cd2b 100644 (file)
@@ -52,6 +52,7 @@
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
+#include <crypto/hash.h>
 #include <linux/types.h>
 #include <linux/kernel.h>
 #include <linux/wait.h>
@@ -61,7 +62,6 @@
 #include <linux/fcntl.h>
 #include <linux/poll.h>
 #include <linux/init.h>
-#include <linux/crypto.h>
 #include <linux/slab.h>
 #include <linux/file.h>
 #include <linux/compat.h>
@@ -4160,7 +4160,7 @@ static void sctp_destruct_sock(struct sock *sk)
        struct sctp_sock *sp = sctp_sk(sk);
 
        /* Free up the HMAC transform. */
-       crypto_free_hash(sp->hmac);
+       crypto_free_shash(sp->hmac);
 
        inet_sock_destruct(sk);
 }
@@ -6304,13 +6304,13 @@ static int sctp_listen_start(struct sock *sk, int backlog)
 {
        struct sctp_sock *sp = sctp_sk(sk);
        struct sctp_endpoint *ep = sp->ep;
-       struct crypto_hash *tfm = NULL;
+       struct crypto_shash *tfm = NULL;
        char alg[32];
 
        /* Allocate HMAC for generating cookie. */
        if (!sp->hmac && sp->sctp_hmac_alg) {
                sprintf(alg, "hmac(%s)", sp->sctp_hmac_alg);
-               tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC);
+               tfm = crypto_alloc_shash(alg, 0, 0);
                if (IS_ERR(tfm)) {
                        net_info_ratelimited("failed to load transform for %s: %ld\n",
                                             sp->sctp_hmac_alg, PTR_ERR(tfm));
index fee3c15a4b5293bb33a24beb76ea7a904f6a7aab..d94a8e1e9f05b37cdb3bc82a3b419a0b0b8c1bbf 100644 (file)
  * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
  */
 
+#include <crypto/hash.h>
+#include <crypto/skcipher.h>
 #include <linux/err.h>
 #include <linux/types.h>
 #include <linux/mm.h>
 #include <linux/scatterlist.h>
-#include <linux/crypto.h>
 #include <linux/highmem.h>
 #include <linux/pagemap.h>
 #include <linux/random.h>
@@ -51,7 +52,7 @@
 
 u32
 krb5_encrypt(
-       struct crypto_blkcipher *tfm,
+       struct crypto_skcipher *tfm,
        void * iv,
        void * in,
        void * out,
@@ -60,24 +61,28 @@ krb5_encrypt(
        u32 ret = -EINVAL;
        struct scatterlist sg[1];
        u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0};
-       struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv };
+       SKCIPHER_REQUEST_ON_STACK(req, tfm);
 
-       if (length % crypto_blkcipher_blocksize(tfm) != 0)
+       if (length % crypto_skcipher_blocksize(tfm) != 0)
                goto out;
 
-       if (crypto_blkcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) {
+       if (crypto_skcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) {
                dprintk("RPC:       gss_k5encrypt: tfm iv size too large %d\n",
-                       crypto_blkcipher_ivsize(tfm));
+                       crypto_skcipher_ivsize(tfm));
                goto out;
        }
 
        if (iv)
-               memcpy(local_iv, iv, crypto_blkcipher_ivsize(tfm));
+               memcpy(local_iv, iv, crypto_skcipher_ivsize(tfm));
 
        memcpy(out, in, length);
        sg_init_one(sg, out, length);
 
-       ret = crypto_blkcipher_encrypt_iv(&desc, sg, sg, length);
+       skcipher_request_set_callback(req, 0, NULL, NULL);
+       skcipher_request_set_crypt(req, sg, sg, length, local_iv);
+
+       ret = crypto_skcipher_encrypt(req);
+       skcipher_request_zero(req);
 out:
        dprintk("RPC:       krb5_encrypt returns %d\n", ret);
        return ret;
@@ -85,7 +90,7 @@ out:
 
 u32
 krb5_decrypt(
-     struct crypto_blkcipher *tfm,
+     struct crypto_skcipher *tfm,
      void * iv,
      void * in,
      void * out,
@@ -94,23 +99,27 @@ krb5_decrypt(
        u32 ret = -EINVAL;
        struct scatterlist sg[1];
        u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0};
-       struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv };
+       SKCIPHER_REQUEST_ON_STACK(req, tfm);
 
-       if (length % crypto_blkcipher_blocksize(tfm) != 0)
+       if (length % crypto_skcipher_blocksize(tfm) != 0)
                goto out;
 
-       if (crypto_blkcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) {
+       if (crypto_skcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) {
                dprintk("RPC:       gss_k5decrypt: tfm iv size too large %d\n",
-                       crypto_blkcipher_ivsize(tfm));
+                       crypto_skcipher_ivsize(tfm));
                goto out;
        }
        if (iv)
-               memcpy(local_iv,iv, crypto_blkcipher_ivsize(tfm));
+               memcpy(local_iv,iv, crypto_skcipher_ivsize(tfm));
 
        memcpy(out, in, length);
        sg_init_one(sg, out, length);
 
-       ret = crypto_blkcipher_decrypt_iv(&desc, sg, sg, length);
+       skcipher_request_set_callback(req, 0, NULL, NULL);
+       skcipher_request_set_crypt(req, sg, sg, length, local_iv);
+
+       ret = crypto_skcipher_decrypt(req);
+       skcipher_request_zero(req);
 out:
        dprintk("RPC:       gss_k5decrypt returns %d\n",ret);
        return ret;
@@ -119,9 +128,11 @@ out:
 static int
 checksummer(struct scatterlist *sg, void *data)
 {
-       struct hash_desc *desc = data;
+       struct ahash_request *req = data;
+
+       ahash_request_set_crypt(req, sg, NULL, sg->length);
 
-       return crypto_hash_update(desc, sg, sg->length);
+       return crypto_ahash_update(req);
 }
 
 static int
@@ -152,13 +163,13 @@ make_checksum_hmac_md5(struct krb5_ctx *kctx, char *header, int hdrlen,
                       struct xdr_buf *body, int body_offset, u8 *cksumkey,
                       unsigned int usage, struct xdr_netobj *cksumout)
 {
-       struct hash_desc                desc;
        struct scatterlist              sg[1];
        int err;
        u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN];
        u8 rc4salt[4];
-       struct crypto_hash *md5;
-       struct crypto_hash *hmac_md5;
+       struct crypto_ahash *md5;
+       struct crypto_ahash *hmac_md5;
+       struct ahash_request *req;
 
        if (cksumkey == NULL)
                return GSS_S_FAILURE;
@@ -174,61 +185,79 @@ make_checksum_hmac_md5(struct krb5_ctx *kctx, char *header, int hdrlen,
                return GSS_S_FAILURE;
        }
 
-       md5 = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
+       md5 = crypto_alloc_ahash("md5", 0, CRYPTO_ALG_ASYNC);
        if (IS_ERR(md5))
                return GSS_S_FAILURE;
 
-       hmac_md5 = crypto_alloc_hash(kctx->gk5e->cksum_name, 0,
-                                    CRYPTO_ALG_ASYNC);
+       hmac_md5 = crypto_alloc_ahash(kctx->gk5e->cksum_name, 0,
+                                     CRYPTO_ALG_ASYNC);
        if (IS_ERR(hmac_md5)) {
-               crypto_free_hash(md5);
+               crypto_free_ahash(md5);
+               return GSS_S_FAILURE;
+       }
+
+       req = ahash_request_alloc(md5, GFP_KERNEL);
+       if (!req) {
+               crypto_free_ahash(hmac_md5);
+               crypto_free_ahash(md5);
                return GSS_S_FAILURE;
        }
 
-       desc.tfm = md5;
-       desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+       ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
 
-       err = crypto_hash_init(&desc);
+       err = crypto_ahash_init(req);
        if (err)
                goto out;
        sg_init_one(sg, rc4salt, 4);
-       err = crypto_hash_update(&desc, sg, 4);
+       ahash_request_set_crypt(req, sg, NULL, 4);
+       err = crypto_ahash_update(req);
        if (err)
                goto out;
 
        sg_init_one(sg, header, hdrlen);
-       err = crypto_hash_update(&desc, sg, hdrlen);
+       ahash_request_set_crypt(req, sg, NULL, hdrlen);
+       err = crypto_ahash_update(req);
        if (err)
                goto out;
        err = xdr_process_buf(body, body_offset, body->len - body_offset,
-                             checksummer, &desc);
+                             checksummer, req);
        if (err)
                goto out;
-       err = crypto_hash_final(&desc, checksumdata);
+       ahash_request_set_crypt(req, NULL, checksumdata, 0);
+       err = crypto_ahash_final(req);
        if (err)
                goto out;
 
-       desc.tfm = hmac_md5;
-       desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+       ahash_request_free(req);
+       req = ahash_request_alloc(hmac_md5, GFP_KERNEL);
+       if (!req) {
+               crypto_free_ahash(hmac_md5);
+               crypto_free_ahash(md5);
+               return GSS_S_FAILURE;
+       }
+
+       ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
 
-       err = crypto_hash_init(&desc);
+       err = crypto_ahash_init(req);
        if (err)
                goto out;
-       err = crypto_hash_setkey(hmac_md5, cksumkey, kctx->gk5e->keylength);
+       err = crypto_ahash_setkey(hmac_md5, cksumkey, kctx->gk5e->keylength);
        if (err)
                goto out;
 
-       sg_init_one(sg, checksumdata, crypto_hash_digestsize(md5));
-       err = crypto_hash_digest(&desc, sg, crypto_hash_digestsize(md5),
-                                checksumdata);
+       sg_init_one(sg, checksumdata, crypto_ahash_digestsize(md5));
+       ahash_request_set_crypt(req, sg, checksumdata,
+                               crypto_ahash_digestsize(md5));
+       err = crypto_ahash_digest(req);
        if (err)
                goto out;
 
        memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength);
        cksumout->len = kctx->gk5e->cksumlength;
 out:
-       crypto_free_hash(md5);
-       crypto_free_hash(hmac_md5);
+       ahash_request_free(req);
+       crypto_free_ahash(md5);
+       crypto_free_ahash(hmac_md5);
        return err ? GSS_S_FAILURE : 0;
 }
 
@@ -242,7 +271,8 @@ make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen,
              struct xdr_buf *body, int body_offset, u8 *cksumkey,
              unsigned int usage, struct xdr_netobj *cksumout)
 {
-       struct hash_desc                desc;
+       struct crypto_ahash *tfm;
+       struct ahash_request *req;
        struct scatterlist              sg[1];
        int err;
        u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN];
@@ -259,32 +289,41 @@ make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen,
                return GSS_S_FAILURE;
        }
 
-       desc.tfm = crypto_alloc_hash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);
-       if (IS_ERR(desc.tfm))
+       tfm = crypto_alloc_ahash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);
+       if (IS_ERR(tfm))
                return GSS_S_FAILURE;
-       desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
 
-       checksumlen = crypto_hash_digestsize(desc.tfm);
+       req = ahash_request_alloc(tfm, GFP_KERNEL);
+       if (!req) {
+               crypto_free_ahash(tfm);
+               return GSS_S_FAILURE;
+       }
+
+       ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
+
+       checksumlen = crypto_ahash_digestsize(tfm);
 
        if (cksumkey != NULL) {
-               err = crypto_hash_setkey(desc.tfm, cksumkey,
-                                        kctx->gk5e->keylength);
+               err = crypto_ahash_setkey(tfm, cksumkey,
+                                         kctx->gk5e->keylength);
                if (err)
                        goto out;
        }
 
-       err = crypto_hash_init(&desc);
+       err = crypto_ahash_init(req);
        if (err)
                goto out;
        sg_init_one(sg, header, hdrlen);
-       err = crypto_hash_update(&desc, sg, hdrlen);
+       ahash_request_set_crypt(req, sg, NULL, hdrlen);
+       err = crypto_ahash_update(req);
        if (err)
                goto out;
        err = xdr_process_buf(body, body_offset, body->len - body_offset,
-                             checksummer, &desc);
+                             checksummer, req);
        if (err)
                goto out;
-       err = crypto_hash_final(&desc, checksumdata);
+       ahash_request_set_crypt(req, NULL, checksumdata, 0);
+       err = crypto_ahash_final(req);
        if (err)
                goto out;
 
@@ -307,7 +346,8 @@ make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen,
        }
        cksumout->len = kctx->gk5e->cksumlength;
 out:
-       crypto_free_hash(desc.tfm);
+       ahash_request_free(req);
+       crypto_free_ahash(tfm);
        return err ? GSS_S_FAILURE : 0;
 }
 
@@ -323,7 +363,8 @@ make_checksum_v2(struct krb5_ctx *kctx, char *header, int hdrlen,
                 struct xdr_buf *body, int body_offset, u8 *cksumkey,
                 unsigned int usage, struct xdr_netobj *cksumout)
 {
-       struct hash_desc desc;
+       struct crypto_ahash *tfm;
+       struct ahash_request *req;
        struct scatterlist sg[1];
        int err;
        u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN];
@@ -340,31 +381,39 @@ make_checksum_v2(struct krb5_ctx *kctx, char *header, int hdrlen,
                return GSS_S_FAILURE;
        }
 
-       desc.tfm = crypto_alloc_hash(kctx->gk5e->cksum_name, 0,
-                                                       CRYPTO_ALG_ASYNC);
-       if (IS_ERR(desc.tfm))
+       tfm = crypto_alloc_ahash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);
+       if (IS_ERR(tfm))
                return GSS_S_FAILURE;
-       checksumlen = crypto_hash_digestsize(desc.tfm);
-       desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+       checksumlen = crypto_ahash_digestsize(tfm);
+
+       req = ahash_request_alloc(tfm, GFP_KERNEL);
+       if (!req) {
+               crypto_free_ahash(tfm);
+               return GSS_S_FAILURE;
+       }
 
-       err = crypto_hash_setkey(desc.tfm, cksumkey, kctx->gk5e->keylength);
+       ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
+
+       err = crypto_ahash_setkey(tfm, cksumkey, kctx->gk5e->keylength);
        if (err)
                goto out;
 
-       err = crypto_hash_init(&desc);
+       err = crypto_ahash_init(req);
        if (err)
                goto out;
        err = xdr_process_buf(body, body_offset, body->len - body_offset,
-                             checksummer, &desc);
+                             checksummer, req);
        if (err)
                goto out;
        if (header != NULL) {
                sg_init_one(sg, header, hdrlen);
-               err = crypto_hash_update(&desc, sg, hdrlen);
+               ahash_request_set_crypt(req, sg, NULL, hdrlen);
+               err = crypto_ahash_update(req);
                if (err)
                        goto out;
        }
-       err = crypto_hash_final(&desc, checksumdata);
+       ahash_request_set_crypt(req, NULL, checksumdata, 0);
+       err = crypto_ahash_final(req);
        if (err)
                goto out;
 
@@ -381,13 +430,14 @@ make_checksum_v2(struct krb5_ctx *kctx, char *header, int hdrlen,
                break;
        }
 out:
-       crypto_free_hash(desc.tfm);
+       ahash_request_free(req);
+       crypto_free_ahash(tfm);
        return err ? GSS_S_FAILURE : 0;
 }
 
 struct encryptor_desc {
        u8 iv[GSS_KRB5_MAX_BLOCKSIZE];
-       struct blkcipher_desc desc;
+       struct skcipher_request *req;
        int pos;
        struct xdr_buf *outbuf;
        struct page **pages;
@@ -402,6 +452,7 @@ encryptor(struct scatterlist *sg, void *data)
 {
        struct encryptor_desc *desc = data;
        struct xdr_buf *outbuf = desc->outbuf;
+       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(desc->req);
        struct page *in_page;
        int thislen = desc->fraglen + sg->length;
        int fraglen, ret;
@@ -427,7 +478,7 @@ encryptor(struct scatterlist *sg, void *data)
        desc->fraglen += sg->length;
        desc->pos += sg->length;
 
-       fraglen = thislen & (crypto_blkcipher_blocksize(desc->desc.tfm) - 1);
+       fraglen = thislen & (crypto_skcipher_blocksize(tfm) - 1);
        thislen -= fraglen;
 
        if (thislen == 0)
@@ -436,8 +487,10 @@ encryptor(struct scatterlist *sg, void *data)
        sg_mark_end(&desc->infrags[desc->fragno - 1]);
        sg_mark_end(&desc->outfrags[desc->fragno - 1]);
 
-       ret = crypto_blkcipher_encrypt_iv(&desc->desc, desc->outfrags,
-                                         desc->infrags, thislen);
+       skcipher_request_set_crypt(desc->req, desc->infrags, desc->outfrags,
+                                  thislen, desc->iv);
+
+       ret = crypto_skcipher_encrypt(desc->req);
        if (ret)
                return ret;
 
@@ -459,18 +512,20 @@ encryptor(struct scatterlist *sg, void *data)
 }
 
 int
-gss_encrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf,
+gss_encrypt_xdr_buf(struct crypto_skcipher *tfm, struct xdr_buf *buf,
                    int offset, struct page **pages)
 {
        int ret;
        struct encryptor_desc desc;
+       SKCIPHER_REQUEST_ON_STACK(req, tfm);
+
+       BUG_ON((buf->len - offset) % crypto_skcipher_blocksize(tfm) != 0);
 
-       BUG_ON((buf->len - offset) % crypto_blkcipher_blocksize(tfm) != 0);
+       skcipher_request_set_tfm(req, tfm);
+       skcipher_request_set_callback(req, 0, NULL, NULL);
 
        memset(desc.iv, 0, sizeof(desc.iv));
-       desc.desc.tfm = tfm;
-       desc.desc.info = desc.iv;
-       desc.desc.flags = 0;
+       desc.req = req;
        desc.pos = offset;
        desc.outbuf = buf;
        desc.pages = pages;
@@ -481,12 +536,13 @@ gss_encrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf,
        sg_init_table(desc.outfrags, 4);
 
        ret = xdr_process_buf(buf, offset, buf->len - offset, encryptor, &desc);
+       skcipher_request_zero(req);
        return ret;
 }
 
 struct decryptor_desc {
        u8 iv[GSS_KRB5_MAX_BLOCKSIZE];
-       struct blkcipher_desc desc;
+       struct skcipher_request *req;
        struct scatterlist frags[4];
        int fragno;
        int fraglen;
@@ -497,6 +553,7 @@ decryptor(struct scatterlist *sg, void *data)
 {
        struct decryptor_desc *desc = data;
        int thislen = desc->fraglen + sg->length;
+       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(desc->req);
        int fraglen, ret;
 
        /* Worst case is 4 fragments: head, end of page 1, start
@@ -507,7 +564,7 @@ decryptor(struct scatterlist *sg, void *data)
        desc->fragno++;
        desc->fraglen += sg->length;
 
-       fraglen = thislen & (crypto_blkcipher_blocksize(desc->desc.tfm) - 1);
+       fraglen = thislen & (crypto_skcipher_blocksize(tfm) - 1);
        thislen -= fraglen;
 
        if (thislen == 0)
@@ -515,8 +572,10 @@ decryptor(struct scatterlist *sg, void *data)
 
        sg_mark_end(&desc->frags[desc->fragno - 1]);
 
-       ret = crypto_blkcipher_decrypt_iv(&desc->desc, desc->frags,
-                                         desc->frags, thislen);
+       skcipher_request_set_crypt(desc->req, desc->frags, desc->frags,
+                                  thislen, desc->iv);
+
+       ret = crypto_skcipher_decrypt(desc->req);
        if (ret)
                return ret;
 
@@ -535,24 +594,29 @@ decryptor(struct scatterlist *sg, void *data)
 }
 
 int
-gss_decrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf,
+gss_decrypt_xdr_buf(struct crypto_skcipher *tfm, struct xdr_buf *buf,
                    int offset)
 {
+       int ret;
        struct decryptor_desc desc;
+       SKCIPHER_REQUEST_ON_STACK(req, tfm);
 
        /* XXXJBF: */
-       BUG_ON((buf->len - offset) % crypto_blkcipher_blocksize(tfm) != 0);
+       BUG_ON((buf->len - offset) % crypto_skcipher_blocksize(tfm) != 0);
+
+       skcipher_request_set_tfm(req, tfm);
+       skcipher_request_set_callback(req, 0, NULL, NULL);
 
        memset(desc.iv, 0, sizeof(desc.iv));
-       desc.desc.tfm = tfm;
-       desc.desc.info = desc.iv;
-       desc.desc.flags = 0;
+       desc.req = req;
        desc.fragno = 0;
        desc.fraglen = 0;
 
        sg_init_table(desc.frags, 4);
 
-       return xdr_process_buf(buf, offset, buf->len - offset, decryptor, &desc);
+       ret = xdr_process_buf(buf, offset, buf->len - offset, decryptor, &desc);
+       skcipher_request_zero(req);
+       return ret;
 }
 
 /*
@@ -594,12 +658,12 @@ xdr_extend_head(struct xdr_buf *buf, unsigned int base, unsigned int shiftlen)
 }
 
 static u32
-gss_krb5_cts_crypt(struct crypto_blkcipher *cipher, struct xdr_buf *buf,
+gss_krb5_cts_crypt(struct crypto_skcipher *cipher, struct xdr_buf *buf,
                   u32 offset, u8 *iv, struct page **pages, int encrypt)
 {
        u32 ret;
        struct scatterlist sg[1];
-       struct blkcipher_desc desc = { .tfm = cipher, .info = iv };
+       SKCIPHER_REQUEST_ON_STACK(req, cipher);
        u8 data[GSS_KRB5_MAX_BLOCKSIZE * 2];
        struct page **save_pages;
        u32 len = buf->len - offset;
@@ -625,10 +689,16 @@ gss_krb5_cts_crypt(struct crypto_blkcipher *cipher, struct xdr_buf *buf,
 
        sg_init_one(sg, data, len);
 
+       skcipher_request_set_tfm(req, cipher);
+       skcipher_request_set_callback(req, 0, NULL, NULL);
+       skcipher_request_set_crypt(req, sg, sg, len, iv);
+
        if (encrypt)
-               ret = crypto_blkcipher_encrypt_iv(&desc, sg, sg, len);
+               ret = crypto_skcipher_encrypt(req);
        else
-               ret = crypto_blkcipher_decrypt_iv(&desc, sg, sg, len);
+               ret = crypto_skcipher_decrypt(req);
+
+       skcipher_request_zero(req);
 
        if (ret)
                goto out;
@@ -647,7 +717,7 @@ gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset,
        struct xdr_netobj hmac;
        u8 *cksumkey;
        u8 *ecptr;
-       struct crypto_blkcipher *cipher, *aux_cipher;
+       struct crypto_skcipher *cipher, *aux_cipher;
        int blocksize;
        struct page **save_pages;
        int nblocks, nbytes;
@@ -666,7 +736,7 @@ gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset,
                cksumkey = kctx->acceptor_integ;
                usage = KG_USAGE_ACCEPTOR_SEAL;
        }
-       blocksize = crypto_blkcipher_blocksize(cipher);
+       blocksize = crypto_skcipher_blocksize(cipher);
 
        /* hide the gss token header and insert the confounder */
        offset += GSS_KRB5_TOK_HDR_LEN;
@@ -719,20 +789,24 @@ gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset,
        memset(desc.iv, 0, sizeof(desc.iv));
 
        if (cbcbytes) {
+               SKCIPHER_REQUEST_ON_STACK(req, aux_cipher);
+
                desc.pos = offset + GSS_KRB5_TOK_HDR_LEN;
                desc.fragno = 0;
                desc.fraglen = 0;
                desc.pages = pages;
                desc.outbuf = buf;
-               desc.desc.info = desc.iv;
-               desc.desc.flags = 0;
-               desc.desc.tfm = aux_cipher;
+               desc.req = req;
+
+               skcipher_request_set_tfm(req, aux_cipher);
+               skcipher_request_set_callback(req, 0, NULL, NULL);
 
                sg_init_table(desc.infrags, 4);
                sg_init_table(desc.outfrags, 4);
 
                err = xdr_process_buf(buf, offset + GSS_KRB5_TOK_HDR_LEN,
                                      cbcbytes, encryptor, &desc);
+               skcipher_request_zero(req);
                if (err)
                        goto out_err;
        }
@@ -763,7 +837,7 @@ gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf,
        struct xdr_buf subbuf;
        u32 ret = 0;
        u8 *cksum_key;
-       struct crypto_blkcipher *cipher, *aux_cipher;
+       struct crypto_skcipher *cipher, *aux_cipher;
        struct xdr_netobj our_hmac_obj;
        u8 our_hmac[GSS_KRB5_MAX_CKSUM_LEN];
        u8 pkt_hmac[GSS_KRB5_MAX_CKSUM_LEN];
@@ -782,7 +856,7 @@ gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf,
                cksum_key = kctx->initiator_integ;
                usage = KG_USAGE_INITIATOR_SEAL;
        }
-       blocksize = crypto_blkcipher_blocksize(cipher);
+       blocksize = crypto_skcipher_blocksize(cipher);
 
 
        /* create a segment skipping the header and leaving out the checksum */
@@ -799,15 +873,19 @@ gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf,
        memset(desc.iv, 0, sizeof(desc.iv));
 
        if (cbcbytes) {
+               SKCIPHER_REQUEST_ON_STACK(req, aux_cipher);
+
                desc.fragno = 0;
                desc.fraglen = 0;
-               desc.desc.info = desc.iv;
-               desc.desc.flags = 0;
-               desc.desc.tfm = aux_cipher;
+               desc.req = req;
+
+               skcipher_request_set_tfm(req, aux_cipher);
+               skcipher_request_set_callback(req, 0, NULL, NULL);
 
                sg_init_table(desc.frags, 4);
 
                ret = xdr_process_buf(&subbuf, 0, cbcbytes, decryptor, &desc);
+               skcipher_request_zero(req);
                if (ret)
                        goto out_err;
        }
@@ -850,61 +928,62 @@ out_err:
  * Set the key of the given cipher.
  */
 int
-krb5_rc4_setup_seq_key(struct krb5_ctx *kctx, struct crypto_blkcipher *cipher,
+krb5_rc4_setup_seq_key(struct krb5_ctx *kctx, struct crypto_skcipher *cipher,
                       unsigned char *cksum)
 {
-       struct crypto_hash *hmac;
-       struct hash_desc desc;
-       struct scatterlist sg[1];
+       struct crypto_shash *hmac;
+       struct shash_desc *desc;
        u8 Kseq[GSS_KRB5_MAX_KEYLEN];
        u32 zeroconstant = 0;
        int err;
 
        dprintk("%s: entered\n", __func__);
 
-       hmac = crypto_alloc_hash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);
+       hmac = crypto_alloc_shash(kctx->gk5e->cksum_name, 0, 0);
        if (IS_ERR(hmac)) {
                dprintk("%s: error %ld, allocating hash '%s'\n",
                        __func__, PTR_ERR(hmac), kctx->gk5e->cksum_name);
                return PTR_ERR(hmac);
        }
 
-       desc.tfm = hmac;
-       desc.flags = 0;
+       desc = kmalloc(sizeof(*desc), GFP_KERNEL);
+       if (!desc) {
+               dprintk("%s: failed to allocate shash descriptor for '%s'\n",
+                       __func__, kctx->gk5e->cksum_name);
+               crypto_free_shash(hmac);
+               return -ENOMEM;
+       }
 
-       err = crypto_hash_init(&desc);
-       if (err)
-               goto out_err;
+       desc->tfm = hmac;
+       desc->flags = 0;
 
        /* Compute intermediate Kseq from session key */
-       err = crypto_hash_setkey(hmac, kctx->Ksess, kctx->gk5e->keylength);
+       err = crypto_shash_setkey(hmac, kctx->Ksess, kctx->gk5e->keylength);
        if (err)
                goto out_err;
 
-       sg_init_one(sg, &zeroconstant, 4);
-       err = crypto_hash_digest(&desc, sg, 4, Kseq);
+       err = crypto_shash_digest(desc, (u8 *)&zeroconstant, 4, Kseq);
        if (err)
                goto out_err;
 
        /* Compute final Kseq from the checksum and intermediate Kseq */
-       err = crypto_hash_setkey(hmac, Kseq, kctx->gk5e->keylength);
+       err = crypto_shash_setkey(hmac, Kseq, kctx->gk5e->keylength);
        if (err)
                goto out_err;
 
-       sg_set_buf(sg, cksum, 8);
-
-       err = crypto_hash_digest(&desc, sg, 8, Kseq);
+       err = crypto_shash_digest(desc, cksum, 8, Kseq);
        if (err)
                goto out_err;
 
-       err = crypto_blkcipher_setkey(cipher, Kseq, kctx->gk5e->keylength);
+       err = crypto_skcipher_setkey(cipher, Kseq, kctx->gk5e->keylength);
        if (err)
                goto out_err;
 
        err = 0;
 
 out_err:
-       crypto_free_hash(hmac);
+       kzfree(desc);
+       crypto_free_shash(hmac);
        dprintk("%s: returning %d\n", __func__, err);
        return err;
 }
@@ -914,12 +993,11 @@ out_err:
  * Set the key of cipher kctx->enc.
  */
 int
-krb5_rc4_setup_enc_key(struct krb5_ctx *kctx, struct crypto_blkcipher *cipher,
+krb5_rc4_setup_enc_key(struct krb5_ctx *kctx, struct crypto_skcipher *cipher,
                       s32 seqnum)
 {
-       struct crypto_hash *hmac;
-       struct hash_desc desc;
-       struct scatterlist sg[1];
+       struct crypto_shash *hmac;
+       struct shash_desc *desc;
        u8 Kcrypt[GSS_KRB5_MAX_KEYLEN];
        u8 zeroconstant[4] = {0};
        u8 seqnumarray[4];
@@ -927,35 +1005,38 @@ krb5_rc4_setup_enc_key(struct krb5_ctx *kctx, struct crypto_blkcipher *cipher,
 
        dprintk("%s: entered, seqnum %u\n", __func__, seqnum);
 
-       hmac = crypto_alloc_hash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);
+       hmac = crypto_alloc_shash(kctx->gk5e->cksum_name, 0, 0);
        if (IS_ERR(hmac)) {
                dprintk("%s: error %ld, allocating hash '%s'\n",
                        __func__, PTR_ERR(hmac), kctx->gk5e->cksum_name);
                return PTR_ERR(hmac);
        }
 
-       desc.tfm = hmac;
-       desc.flags = 0;
+       desc = kmalloc(sizeof(*desc), GFP_KERNEL);
+       if (!desc) {
+               dprintk("%s: failed to allocate shash descriptor for '%s'\n",
+                       __func__, kctx->gk5e->cksum_name);
+               crypto_free_shash(hmac);
+               return -ENOMEM;
+       }
 
-       err = crypto_hash_init(&desc);
-       if (err)
-               goto out_err;
+       desc->tfm = hmac;
+       desc->flags = 0;
 
        /* Compute intermediate Kcrypt from session key */
        for (i = 0; i < kctx->gk5e->keylength; i++)
                Kcrypt[i] = kctx->Ksess[i] ^ 0xf0;
 
-       err = crypto_hash_setkey(hmac, Kcrypt, kctx->gk5e->keylength);
+       err = crypto_shash_setkey(hmac, Kcrypt, kctx->gk5e->keylength);
        if (err)
                goto out_err;
 
-       sg_init_one(sg, zeroconstant, 4);
-       err = crypto_hash_digest(&desc, sg, 4, Kcrypt);
+       err = crypto_shash_digest(desc, zeroconstant, 4, Kcrypt);
        if (err)
                goto out_err;
 
        /* Compute final Kcrypt from the seqnum and intermediate Kcrypt */
-       err = crypto_hash_setkey(hmac, Kcrypt, kctx->gk5e->keylength);
+       err = crypto_shash_setkey(hmac, Kcrypt, kctx->gk5e->keylength);
        if (err)
                goto out_err;
 
@@ -964,20 +1045,19 @@ krb5_rc4_setup_enc_key(struct krb5_ctx *kctx, struct crypto_blkcipher *cipher,
        seqnumarray[2] = (unsigned char) ((seqnum >> 8) & 0xff);
        seqnumarray[3] = (unsigned char) ((seqnum >> 0) & 0xff);
 
-       sg_set_buf(sg, seqnumarray, 4);
-
-       err = crypto_hash_digest(&desc, sg, 4, Kcrypt);
+       err = crypto_shash_digest(desc, seqnumarray, 4, Kcrypt);
        if (err)
                goto out_err;
 
-       err = crypto_blkcipher_setkey(cipher, Kcrypt, kctx->gk5e->keylength);
+       err = crypto_skcipher_setkey(cipher, Kcrypt, kctx->gk5e->keylength);
        if (err)
                goto out_err;
 
        err = 0;
 
 out_err:
-       crypto_free_hash(hmac);
+       kzfree(desc);
+       crypto_free_shash(hmac);
        dprintk("%s: returning %d\n", __func__, err);
        return err;
 }
index 234fa8d0fd9bf80cc28c3d88ae94037122ca422c..87013314602634711ad9dbd223823d896a373210 100644 (file)
@@ -54,9 +54,9 @@
  * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
  */
 
+#include <crypto/skcipher.h>
 #include <linux/err.h>
 #include <linux/types.h>
-#include <linux/crypto.h>
 #include <linux/sunrpc/gss_krb5.h>
 #include <linux/sunrpc/xdr.h>
 #include <linux/lcm.h>
@@ -147,7 +147,7 @@ u32 krb5_derive_key(const struct gss_krb5_enctype *gk5e,
        size_t blocksize, keybytes, keylength, n;
        unsigned char *inblockdata, *outblockdata, *rawkey;
        struct xdr_netobj inblock, outblock;
-       struct crypto_blkcipher *cipher;
+       struct crypto_skcipher *cipher;
        u32 ret = EINVAL;
 
        blocksize = gk5e->blocksize;
@@ -157,11 +157,11 @@ u32 krb5_derive_key(const struct gss_krb5_enctype *gk5e,
        if ((inkey->len != keylength) || (outkey->len != keylength))
                goto err_return;
 
-       cipher = crypto_alloc_blkcipher(gk5e->encrypt_name, 0,
-                                       CRYPTO_ALG_ASYNC);
+       cipher = crypto_alloc_skcipher(gk5e->encrypt_name, 0,
+                                      CRYPTO_ALG_ASYNC);
        if (IS_ERR(cipher))
                goto err_return;
-       if (crypto_blkcipher_setkey(cipher, inkey->data, inkey->len))
+       if (crypto_skcipher_setkey(cipher, inkey->data, inkey->len))
                goto err_return;
 
        /* allocate and set up buffers */
@@ -238,7 +238,7 @@ err_free_in:
        memset(inblockdata, 0, blocksize);
        kfree(inblockdata);
 err_free_cipher:
-       crypto_free_blkcipher(cipher);
+       crypto_free_skcipher(cipher);
 err_return:
        return ret;
 }
index 28db442a0034ad601d1a4cc2f2f82dc16a32a3da..71341ccb989043acd4c6d449a6d89d9db8b98513 100644 (file)
@@ -34,6 +34,8 @@
  *
  */
 
+#include <crypto/hash.h>
+#include <crypto/skcipher.h>
 #include <linux/err.h>
 #include <linux/module.h>
 #include <linux/init.h>
@@ -42,7 +44,6 @@
 #include <linux/sunrpc/auth.h>
 #include <linux/sunrpc/gss_krb5.h>
 #include <linux/sunrpc/xdr.h>
-#include <linux/crypto.h>
 #include <linux/sunrpc/gss_krb5_enctypes.h>
 
 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
@@ -217,7 +218,7 @@ simple_get_netobj(const void *p, const void *end, struct xdr_netobj *res)
 
 static inline const void *
 get_key(const void *p, const void *end,
-       struct krb5_ctx *ctx, struct crypto_blkcipher **res)
+       struct krb5_ctx *ctx, struct crypto_skcipher **res)
 {
        struct xdr_netobj       key;
        int                     alg;
@@ -245,7 +246,7 @@ get_key(const void *p, const void *end,
        if (IS_ERR(p))
                goto out_err;
 
-       *res = crypto_alloc_blkcipher(ctx->gk5e->encrypt_name, 0,
+       *res = crypto_alloc_skcipher(ctx->gk5e->encrypt_name, 0,
                                                        CRYPTO_ALG_ASYNC);
        if (IS_ERR(*res)) {
                printk(KERN_WARNING "gss_kerberos_mech: unable to initialize "
@@ -253,7 +254,7 @@ get_key(const void *p, const void *end,
                *res = NULL;
                goto out_err_free_key;
        }
-       if (crypto_blkcipher_setkey(*res, key.data, key.len)) {
+       if (crypto_skcipher_setkey(*res, key.data, key.len)) {
                printk(KERN_WARNING "gss_kerberos_mech: error setting key for "
                        "crypto algorithm %s\n", ctx->gk5e->encrypt_name);
                goto out_err_free_tfm;
@@ -263,7 +264,7 @@ get_key(const void *p, const void *end,
        return p;
 
 out_err_free_tfm:
-       crypto_free_blkcipher(*res);
+       crypto_free_skcipher(*res);
 out_err_free_key:
        kfree(key.data);
        p = ERR_PTR(-EINVAL);
@@ -335,30 +336,30 @@ gss_import_v1_context(const void *p, const void *end, struct krb5_ctx *ctx)
        return 0;
 
 out_err_free_key2:
-       crypto_free_blkcipher(ctx->seq);
+       crypto_free_skcipher(ctx->seq);
 out_err_free_key1:
-       crypto_free_blkcipher(ctx->enc);
+       crypto_free_skcipher(ctx->enc);
 out_err_free_mech:
        kfree(ctx->mech_used.data);
 out_err:
        return PTR_ERR(p);
 }
 
-static struct crypto_blkcipher *
+static struct crypto_skcipher *
 context_v2_alloc_cipher(struct krb5_ctx *ctx, const char *cname, u8 *key)
 {
-       struct crypto_blkcipher *cp;
+       struct crypto_skcipher *cp;
 
-       cp = crypto_alloc_blkcipher(cname, 0, CRYPTO_ALG_ASYNC);
+       cp = crypto_alloc_skcipher(cname, 0, CRYPTO_ALG_ASYNC);
        if (IS_ERR(cp)) {
                dprintk("gss_kerberos_mech: unable to initialize "
                        "crypto algorithm %s\n", cname);
                return NULL;
        }
-       if (crypto_blkcipher_setkey(cp, key, ctx->gk5e->keylength)) {
+       if (crypto_skcipher_setkey(cp, key, ctx->gk5e->keylength)) {
                dprintk("gss_kerberos_mech: error setting key for "
                        "crypto algorithm %s\n", cname);
-               crypto_free_blkcipher(cp);
+               crypto_free_skcipher(cp);
                return NULL;
        }
        return cp;
@@ -412,9 +413,9 @@ context_derive_keys_des3(struct krb5_ctx *ctx, gfp_t gfp_mask)
        return 0;
 
 out_free_enc:
-       crypto_free_blkcipher(ctx->enc);
+       crypto_free_skcipher(ctx->enc);
 out_free_seq:
-       crypto_free_blkcipher(ctx->seq);
+       crypto_free_skcipher(ctx->seq);
 out_err:
        return -EINVAL;
 }
@@ -427,18 +428,17 @@ out_err:
 static int
 context_derive_keys_rc4(struct krb5_ctx *ctx)
 {
-       struct crypto_hash *hmac;
+       struct crypto_shash *hmac;
        char sigkeyconstant[] = "signaturekey";
        int slen = strlen(sigkeyconstant) + 1;  /* include null terminator */
-       struct hash_desc desc;
-       struct scatterlist sg[1];
+       struct shash_desc *desc;
        int err;
 
        dprintk("RPC:       %s: entered\n", __func__);
        /*
         * derive cksum (aka Ksign) key
         */
-       hmac = crypto_alloc_hash(ctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);
+       hmac = crypto_alloc_shash(ctx->gk5e->cksum_name, 0, 0);
        if (IS_ERR(hmac)) {
                dprintk("%s: error %ld allocating hash '%s'\n",
                        __func__, PTR_ERR(hmac), ctx->gk5e->cksum_name);
@@ -446,37 +446,40 @@ context_derive_keys_rc4(struct krb5_ctx *ctx)
                goto out_err;
        }
 
-       err = crypto_hash_setkey(hmac, ctx->Ksess, ctx->gk5e->keylength);
+       err = crypto_shash_setkey(hmac, ctx->Ksess, ctx->gk5e->keylength);
        if (err)
                goto out_err_free_hmac;
 
-       sg_init_table(sg, 1);
-       sg_set_buf(sg, sigkeyconstant, slen);
 
-       desc.tfm = hmac;
-       desc.flags = 0;
-
-       err = crypto_hash_init(&desc);
-       if (err)
+       desc = kmalloc(sizeof(*desc), GFP_KERNEL);
+       if (!desc) {
+               dprintk("%s: failed to allocate hash descriptor for '%s'\n",
+                       __func__, ctx->gk5e->cksum_name);
+               err = -ENOMEM;
                goto out_err_free_hmac;
+       }
+
+       desc->tfm = hmac;
+       desc->flags = 0;
 
-       err = crypto_hash_digest(&desc, sg, slen, ctx->cksum);
+       err = crypto_shash_digest(desc, sigkeyconstant, slen, ctx->cksum);
+       kzfree(desc);
        if (err)
                goto out_err_free_hmac;
        /*
-        * allocate hash, and blkciphers for data and seqnum encryption
+        * allocate hash, and skciphers for data and seqnum encryption
         */
-       ctx->enc = crypto_alloc_blkcipher(ctx->gk5e->encrypt_name, 0,
-                                         CRYPTO_ALG_ASYNC);
+       ctx->enc = crypto_alloc_skcipher(ctx->gk5e->encrypt_name, 0,
+                                        CRYPTO_ALG_ASYNC);
        if (IS_ERR(ctx->enc)) {
                err = PTR_ERR(ctx->enc);
                goto out_err_free_hmac;
        }
 
-       ctx->seq = crypto_alloc_blkcipher(ctx->gk5e->encrypt_name, 0,
-                                         CRYPTO_ALG_ASYNC);
+       ctx->seq = crypto_alloc_skcipher(ctx->gk5e->encrypt_name, 0,
+                                        CRYPTO_ALG_ASYNC);
        if (IS_ERR(ctx->seq)) {
-               crypto_free_blkcipher(ctx->enc);
+               crypto_free_skcipher(ctx->enc);
                err = PTR_ERR(ctx->seq);
                goto out_err_free_hmac;
        }
@@ -486,7 +489,7 @@ context_derive_keys_rc4(struct krb5_ctx *ctx)
        err = 0;
 
 out_err_free_hmac:
-       crypto_free_hash(hmac);
+       crypto_free_shash(hmac);
 out_err:
        dprintk("RPC:       %s: returning %d\n", __func__, err);
        return err;
@@ -588,7 +591,7 @@ context_derive_keys_new(struct krb5_ctx *ctx, gfp_t gfp_mask)
                        context_v2_alloc_cipher(ctx, "cbc(aes)",
                                                ctx->acceptor_seal);
                if (ctx->acceptor_enc_aux == NULL) {
-                       crypto_free_blkcipher(ctx->initiator_enc_aux);
+                       crypto_free_skcipher(ctx->initiator_enc_aux);
                        goto out_free_acceptor_enc;
                }
        }
@@ -596,9 +599,9 @@ context_derive_keys_new(struct krb5_ctx *ctx, gfp_t gfp_mask)
        return 0;
 
 out_free_acceptor_enc:
-       crypto_free_blkcipher(ctx->acceptor_enc);
+       crypto_free_skcipher(ctx->acceptor_enc);
 out_free_initiator_enc:
-       crypto_free_blkcipher(ctx->initiator_enc);
+       crypto_free_skcipher(ctx->initiator_enc);
 out_err:
        return -EINVAL;
 }
@@ -710,12 +713,12 @@ static void
 gss_delete_sec_context_kerberos(void *internal_ctx) {
        struct krb5_ctx *kctx = internal_ctx;
 
-       crypto_free_blkcipher(kctx->seq);
-       crypto_free_blkcipher(kctx->enc);
-       crypto_free_blkcipher(kctx->acceptor_enc);
-       crypto_free_blkcipher(kctx->initiator_enc);
-       crypto_free_blkcipher(kctx->acceptor_enc_aux);
-       crypto_free_blkcipher(kctx->initiator_enc_aux);
+       crypto_free_skcipher(kctx->seq);
+       crypto_free_skcipher(kctx->enc);
+       crypto_free_skcipher(kctx->acceptor_enc);
+       crypto_free_skcipher(kctx->initiator_enc);
+       crypto_free_skcipher(kctx->acceptor_enc_aux);
+       crypto_free_skcipher(kctx->initiator_enc_aux);
        kfree(kctx->mech_used.data);
        kfree(kctx);
 }
index 20d55c793eb657203e40b90779ceb361e2391c32..c8b9082f4a9d67eb4832930c8d62388a18d94b3d 100644 (file)
@@ -31,9 +31,9 @@
  * PERFORMANCE OF THIS SOFTWARE.
  */
 
+#include <crypto/skcipher.h>
 #include <linux/types.h>
 #include <linux/sunrpc/gss_krb5.h>
-#include <linux/crypto.h>
 
 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
 # define RPCDBG_FACILITY        RPCDBG_AUTH
@@ -43,13 +43,13 @@ static s32
 krb5_make_rc4_seq_num(struct krb5_ctx *kctx, int direction, s32 seqnum,
                      unsigned char *cksum, unsigned char *buf)
 {
-       struct crypto_blkcipher *cipher;
+       struct crypto_skcipher *cipher;
        unsigned char plain[8];
        s32 code;
 
        dprintk("RPC:       %s:\n", __func__);
-       cipher = crypto_alloc_blkcipher(kctx->gk5e->encrypt_name, 0,
-                                       CRYPTO_ALG_ASYNC);
+       cipher = crypto_alloc_skcipher(kctx->gk5e->encrypt_name, 0,
+                                      CRYPTO_ALG_ASYNC);
        if (IS_ERR(cipher))
                return PTR_ERR(cipher);
 
@@ -68,12 +68,12 @@ krb5_make_rc4_seq_num(struct krb5_ctx *kctx, int direction, s32 seqnum,
 
        code = krb5_encrypt(cipher, cksum, plain, buf, 8);
 out:
-       crypto_free_blkcipher(cipher);
+       crypto_free_skcipher(cipher);
        return code;
 }
 s32
 krb5_make_seq_num(struct krb5_ctx *kctx,
-               struct crypto_blkcipher *key,
+               struct crypto_skcipher *key,
                int direction,
                u32 seqnum,
                unsigned char *cksum, unsigned char *buf)
@@ -101,13 +101,13 @@ static s32
 krb5_get_rc4_seq_num(struct krb5_ctx *kctx, unsigned char *cksum,
                     unsigned char *buf, int *direction, s32 *seqnum)
 {
-       struct crypto_blkcipher *cipher;
+       struct crypto_skcipher *cipher;
        unsigned char plain[8];
        s32 code;
 
        dprintk("RPC:       %s:\n", __func__);
-       cipher = crypto_alloc_blkcipher(kctx->gk5e->encrypt_name, 0,
-                                       CRYPTO_ALG_ASYNC);
+       cipher = crypto_alloc_skcipher(kctx->gk5e->encrypt_name, 0,
+                                      CRYPTO_ALG_ASYNC);
        if (IS_ERR(cipher))
                return PTR_ERR(cipher);
 
@@ -130,7 +130,7 @@ krb5_get_rc4_seq_num(struct krb5_ctx *kctx, unsigned char *cksum,
        *seqnum = ((plain[0] << 24) | (plain[1] << 16) |
                                        (plain[2] << 8) | (plain[3]));
 out:
-       crypto_free_blkcipher(cipher);
+       crypto_free_skcipher(cipher);
        return code;
 }
 
@@ -142,7 +142,7 @@ krb5_get_seq_num(struct krb5_ctx *kctx,
 {
        s32 code;
        unsigned char plain[8];
-       struct crypto_blkcipher *key = kctx->seq;
+       struct crypto_skcipher *key = kctx->seq;
 
        dprintk("RPC:       krb5_get_seq_num:\n");
 
index ca7e92a32f84920036c124732d71e029a9f0ce4c..765088e4ad84d073b3587917942b9059717875b3 100644 (file)
  * SUCH DAMAGES.
  */
 
+#include <crypto/skcipher.h>
 #include <linux/types.h>
 #include <linux/jiffies.h>
 #include <linux/sunrpc/gss_krb5.h>
 #include <linux/random.h>
 #include <linux/pagemap.h>
-#include <linux/crypto.h>
 
 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
 # define RPCDBG_FACILITY       RPCDBG_AUTH
@@ -174,7 +174,7 @@ gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset,
 
        now = get_seconds();
 
-       blocksize = crypto_blkcipher_blocksize(kctx->enc);
+       blocksize = crypto_skcipher_blocksize(kctx->enc);
        gss_krb5_add_padding(buf, offset, blocksize);
        BUG_ON((buf->len - offset) % blocksize);
        plainlen = conflen + buf->len - offset;
@@ -239,10 +239,10 @@ gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset,
                return GSS_S_FAILURE;
 
        if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) {
-               struct crypto_blkcipher *cipher;
+               struct crypto_skcipher *cipher;
                int err;
-               cipher = crypto_alloc_blkcipher(kctx->gk5e->encrypt_name, 0,
-                                               CRYPTO_ALG_ASYNC);
+               cipher = crypto_alloc_skcipher(kctx->gk5e->encrypt_name, 0,
+                                              CRYPTO_ALG_ASYNC);
                if (IS_ERR(cipher))
                        return GSS_S_FAILURE;
 
@@ -250,7 +250,7 @@ gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset,
 
                err = gss_encrypt_xdr_buf(cipher, buf,
                                          offset + headlen - conflen, pages);
-               crypto_free_blkcipher(cipher);
+               crypto_free_skcipher(cipher);
                if (err)
                        return GSS_S_FAILURE;
        } else {
@@ -327,18 +327,18 @@ gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
                return GSS_S_BAD_SIG;
 
        if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) {
-               struct crypto_blkcipher *cipher;
+               struct crypto_skcipher *cipher;
                int err;
 
-               cipher = crypto_alloc_blkcipher(kctx->gk5e->encrypt_name, 0,
-                                               CRYPTO_ALG_ASYNC);
+               cipher = crypto_alloc_skcipher(kctx->gk5e->encrypt_name, 0,
+                                              CRYPTO_ALG_ASYNC);
                if (IS_ERR(cipher))
                        return GSS_S_FAILURE;
 
                krb5_rc4_setup_enc_key(kctx, cipher, seqnum);
 
                err = gss_decrypt_xdr_buf(cipher, buf, crypt_offset);
-               crypto_free_blkcipher(cipher);
+               crypto_free_skcipher(cipher);
                if (err)
                        return GSS_S_DEFECTIVE_TOKEN;
        } else {
@@ -371,7 +371,7 @@ gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
        /* Copy the data back to the right position.  XXX: Would probably be
         * better to copy and encrypt at the same time. */
 
-       blocksize = crypto_blkcipher_blocksize(kctx->enc);
+       blocksize = crypto_skcipher_blocksize(kctx->enc);
        data_start = ptr + (GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength) +
                                        conflen;
        orig_start = buf->head[0].iov_base + offset;
@@ -473,7 +473,7 @@ gss_wrap_kerberos_v2(struct krb5_ctx *kctx, u32 offset,
        *ptr++ = 0xff;
        be16ptr = (__be16 *)ptr;
 
-       blocksize = crypto_blkcipher_blocksize(kctx->acceptor_enc);
+       blocksize = crypto_skcipher_blocksize(kctx->acceptor_enc);
        *be16ptr++ = 0;
        /* "inner" token header always uses 0 for RRC */
        *be16ptr++ = 0;
index 3cd8195392416c1ee7f89884c3e0045ede182d41..71447cf863067ca7be13b7c0b5011a44a3645b49 100644 (file)
@@ -29,7 +29,8 @@
 #include <linux/ieee80211.h>
 #include <net/iw_handler.h>
 
-#include <linux/crypto.h>
+#include <crypto/hash.h>
+#include <crypto/skcipher.h>
 #include <linux/crc32.h>
 
 #include <net/lib80211.h>
@@ -63,10 +64,10 @@ struct lib80211_tkip_data {
 
        int key_idx;
 
-       struct crypto_blkcipher *rx_tfm_arc4;
-       struct crypto_hash *rx_tfm_michael;
-       struct crypto_blkcipher *tx_tfm_arc4;
-       struct crypto_hash *tx_tfm_michael;
+       struct crypto_skcipher *rx_tfm_arc4;
+       struct crypto_ahash *rx_tfm_michael;
+       struct crypto_skcipher *tx_tfm_arc4;
+       struct crypto_ahash *tx_tfm_michael;
 
        /* scratch buffers for virt_to_page() (crypto API) */
        u8 rx_hdr[16], tx_hdr[16];
@@ -98,29 +99,29 @@ static void *lib80211_tkip_init(int key_idx)
 
        priv->key_idx = key_idx;
 
-       priv->tx_tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0,
-                                               CRYPTO_ALG_ASYNC);
+       priv->tx_tfm_arc4 = crypto_alloc_skcipher("ecb(arc4)", 0,
+                                                 CRYPTO_ALG_ASYNC);
        if (IS_ERR(priv->tx_tfm_arc4)) {
                priv->tx_tfm_arc4 = NULL;
                goto fail;
        }
 
-       priv->tx_tfm_michael = crypto_alloc_hash("michael_mic", 0,
-                                                CRYPTO_ALG_ASYNC);
+       priv->tx_tfm_michael = crypto_alloc_ahash("michael_mic", 0,
+                                                 CRYPTO_ALG_ASYNC);
        if (IS_ERR(priv->tx_tfm_michael)) {
                priv->tx_tfm_michael = NULL;
                goto fail;
        }
 
-       priv->rx_tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0,
-                                               CRYPTO_ALG_ASYNC);
+       priv->rx_tfm_arc4 = crypto_alloc_skcipher("ecb(arc4)", 0,
+                                                 CRYPTO_ALG_ASYNC);
        if (IS_ERR(priv->rx_tfm_arc4)) {
                priv->rx_tfm_arc4 = NULL;
                goto fail;
        }
 
-       priv->rx_tfm_michael = crypto_alloc_hash("michael_mic", 0,
-                                                CRYPTO_ALG_ASYNC);
+       priv->rx_tfm_michael = crypto_alloc_ahash("michael_mic", 0,
+                                                 CRYPTO_ALG_ASYNC);
        if (IS_ERR(priv->rx_tfm_michael)) {
                priv->rx_tfm_michael = NULL;
                goto fail;
@@ -130,14 +131,10 @@ static void *lib80211_tkip_init(int key_idx)
 
       fail:
        if (priv) {
-               if (priv->tx_tfm_michael)
-                       crypto_free_hash(priv->tx_tfm_michael);
-               if (priv->tx_tfm_arc4)
-                       crypto_free_blkcipher(priv->tx_tfm_arc4);
-               if (priv->rx_tfm_michael)
-                       crypto_free_hash(priv->rx_tfm_michael);
-               if (priv->rx_tfm_arc4)
-                       crypto_free_blkcipher(priv->rx_tfm_arc4);
+               crypto_free_ahash(priv->tx_tfm_michael);
+               crypto_free_skcipher(priv->tx_tfm_arc4);
+               crypto_free_ahash(priv->rx_tfm_michael);
+               crypto_free_skcipher(priv->rx_tfm_arc4);
                kfree(priv);
        }
 
@@ -148,14 +145,10 @@ static void lib80211_tkip_deinit(void *priv)
 {
        struct lib80211_tkip_data *_priv = priv;
        if (_priv) {
-               if (_priv->tx_tfm_michael)
-                       crypto_free_hash(_priv->tx_tfm_michael);
-               if (_priv->tx_tfm_arc4)
-                       crypto_free_blkcipher(_priv->tx_tfm_arc4);
-               if (_priv->rx_tfm_michael)
-                       crypto_free_hash(_priv->rx_tfm_michael);
-               if (_priv->rx_tfm_arc4)
-                       crypto_free_blkcipher(_priv->rx_tfm_arc4);
+               crypto_free_ahash(_priv->tx_tfm_michael);
+               crypto_free_skcipher(_priv->tx_tfm_arc4);
+               crypto_free_ahash(_priv->rx_tfm_michael);
+               crypto_free_skcipher(_priv->rx_tfm_arc4);
        }
        kfree(priv);
 }
@@ -353,11 +346,12 @@ static int lib80211_tkip_hdr(struct sk_buff *skb, int hdr_len,
 static int lib80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
 {
        struct lib80211_tkip_data *tkey = priv;
-       struct blkcipher_desc desc = { .tfm = tkey->tx_tfm_arc4 };
+       SKCIPHER_REQUEST_ON_STACK(req, tkey->tx_tfm_arc4);
        int len;
        u8 rc4key[16], *pos, *icv;
        u32 crc;
        struct scatterlist sg;
+       int err;
 
        if (tkey->flags & IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) {
                struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
@@ -382,9 +376,14 @@ static int lib80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
        icv[2] = crc >> 16;
        icv[3] = crc >> 24;
 
-       crypto_blkcipher_setkey(tkey->tx_tfm_arc4, rc4key, 16);
+       crypto_skcipher_setkey(tkey->tx_tfm_arc4, rc4key, 16);
        sg_init_one(&sg, pos, len + 4);
-       return crypto_blkcipher_encrypt(&desc, &sg, &sg, len + 4);
+       skcipher_request_set_tfm(req, tkey->tx_tfm_arc4);
+       skcipher_request_set_callback(req, 0, NULL, NULL);
+       skcipher_request_set_crypt(req, &sg, &sg, len + 4, NULL);
+       err = crypto_skcipher_encrypt(req);
+       skcipher_request_zero(req);
+       return err;
 }
 
 /*
@@ -403,7 +402,7 @@ static inline int tkip_replay_check(u32 iv32_n, u16 iv16_n,
 static int lib80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
 {
        struct lib80211_tkip_data *tkey = priv;
-       struct blkcipher_desc desc = { .tfm = tkey->rx_tfm_arc4 };
+       SKCIPHER_REQUEST_ON_STACK(req, tkey->rx_tfm_arc4);
        u8 rc4key[16];
        u8 keyidx, *pos;
        u32 iv32;
@@ -413,6 +412,7 @@ static int lib80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
        u32 crc;
        struct scatterlist sg;
        int plen;
+       int err;
 
        hdr = (struct ieee80211_hdr *)skb->data;
 
@@ -465,9 +465,14 @@ static int lib80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
 
        plen = skb->len - hdr_len - 12;
 
-       crypto_blkcipher_setkey(tkey->rx_tfm_arc4, rc4key, 16);
+       crypto_skcipher_setkey(tkey->rx_tfm_arc4, rc4key, 16);
        sg_init_one(&sg, pos, plen + 4);
-       if (crypto_blkcipher_decrypt(&desc, &sg, &sg, plen + 4)) {
+       skcipher_request_set_tfm(req, tkey->rx_tfm_arc4);
+       skcipher_request_set_callback(req, 0, NULL, NULL);
+       skcipher_request_set_crypt(req, &sg, &sg, plen + 4, NULL);
+       err = crypto_skcipher_decrypt(req);
+       skcipher_request_zero(req);
+       if (err) {
                net_dbg_ratelimited("TKIP: failed to decrypt received packet from %pM\n",
                                    hdr->addr2);
                return -7;
@@ -505,11 +510,12 @@ static int lib80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
        return keyidx;
 }
 
-static int michael_mic(struct crypto_hash *tfm_michael, u8 * key, u8 * hdr,
+static int michael_mic(struct crypto_ahash *tfm_michael, u8 * key, u8 * hdr,
                       u8 * data, size_t data_len, u8 * mic)
 {
-       struct hash_desc desc;
+       AHASH_REQUEST_ON_STACK(req, tfm_michael);
        struct scatterlist sg[2];
+       int err;
 
        if (tfm_michael == NULL) {
                pr_warn("%s(): tfm_michael == NULL\n", __func__);
@@ -519,12 +525,15 @@ static int michael_mic(struct crypto_hash *tfm_michael, u8 * key, u8 * hdr,
        sg_set_buf(&sg[0], hdr, 16);
        sg_set_buf(&sg[1], data, data_len);
 
-       if (crypto_hash_setkey(tfm_michael, key, 8))
+       if (crypto_ahash_setkey(tfm_michael, key, 8))
                return -1;
 
-       desc.tfm = tfm_michael;
-       desc.flags = 0;
-       return crypto_hash_digest(&desc, sg, data_len + 16, mic);
+       ahash_request_set_tfm(req, tfm_michael);
+       ahash_request_set_callback(req, 0, NULL, NULL);
+       ahash_request_set_crypt(req, sg, mic, data_len + 16);
+       err = crypto_ahash_digest(req);
+       ahash_request_zero(req);
+       return err;
 }
 
 static void michael_mic_hdr(struct sk_buff *skb, u8 * hdr)
@@ -645,10 +654,10 @@ static int lib80211_tkip_set_key(void *key, int len, u8 * seq, void *priv)
 {
        struct lib80211_tkip_data *tkey = priv;
        int keyidx;
-       struct crypto_hash *tfm = tkey->tx_tfm_michael;
-       struct crypto_blkcipher *tfm2 = tkey->tx_tfm_arc4;
-       struct crypto_hash *tfm3 = tkey->rx_tfm_michael;
-       struct crypto_blkcipher *tfm4 = tkey->rx_tfm_arc4;
+       struct crypto_ahash *tfm = tkey->tx_tfm_michael;
+       struct crypto_skcipher *tfm2 = tkey->tx_tfm_arc4;
+       struct crypto_ahash *tfm3 = tkey->rx_tfm_michael;
+       struct crypto_skcipher *tfm4 = tkey->rx_tfm_arc4;
 
        keyidx = tkey->key_idx;
        memset(tkey, 0, sizeof(*tkey));
index 1c292e4ea7b60682d5b1d3adf4bd62548de9ea72..d05f58b0fd04f5f15a0d1acccaae44d2de7411df 100644 (file)
@@ -22,7 +22,7 @@
 
 #include <net/lib80211.h>
 
-#include <linux/crypto.h>
+#include <crypto/skcipher.h>
 #include <linux/crc32.h>
 
 MODULE_AUTHOR("Jouni Malinen");
@@ -35,8 +35,8 @@ struct lib80211_wep_data {
        u8 key[WEP_KEY_LEN + 1];
        u8 key_len;
        u8 key_idx;
-       struct crypto_blkcipher *tx_tfm;
-       struct crypto_blkcipher *rx_tfm;
+       struct crypto_skcipher *tx_tfm;
+       struct crypto_skcipher *rx_tfm;
 };
 
 static void *lib80211_wep_init(int keyidx)
@@ -48,13 +48,13 @@ static void *lib80211_wep_init(int keyidx)
                goto fail;
        priv->key_idx = keyidx;
 
-       priv->tx_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
+       priv->tx_tfm = crypto_alloc_skcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
        if (IS_ERR(priv->tx_tfm)) {
                priv->tx_tfm = NULL;
                goto fail;
        }
 
-       priv->rx_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
+       priv->rx_tfm = crypto_alloc_skcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
        if (IS_ERR(priv->rx_tfm)) {
                priv->rx_tfm = NULL;
                goto fail;
@@ -66,10 +66,8 @@ static void *lib80211_wep_init(int keyidx)
 
       fail:
        if (priv) {
-               if (priv->tx_tfm)
-                       crypto_free_blkcipher(priv->tx_tfm);
-               if (priv->rx_tfm)
-                       crypto_free_blkcipher(priv->rx_tfm);
+               crypto_free_skcipher(priv->tx_tfm);
+               crypto_free_skcipher(priv->rx_tfm);
                kfree(priv);
        }
        return NULL;
@@ -79,10 +77,8 @@ static void lib80211_wep_deinit(void *priv)
 {
        struct lib80211_wep_data *_priv = priv;
        if (_priv) {
-               if (_priv->tx_tfm)
-                       crypto_free_blkcipher(_priv->tx_tfm);
-               if (_priv->rx_tfm)
-                       crypto_free_blkcipher(_priv->rx_tfm);
+               crypto_free_skcipher(_priv->tx_tfm);
+               crypto_free_skcipher(_priv->rx_tfm);
        }
        kfree(priv);
 }
@@ -133,11 +129,12 @@ static int lib80211_wep_build_iv(struct sk_buff *skb, int hdr_len,
 static int lib80211_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
 {
        struct lib80211_wep_data *wep = priv;
-       struct blkcipher_desc desc = { .tfm = wep->tx_tfm };
+       SKCIPHER_REQUEST_ON_STACK(req, wep->tx_tfm);
        u32 crc, klen, len;
        u8 *pos, *icv;
        struct scatterlist sg;
        u8 key[WEP_KEY_LEN + 3];
+       int err;
 
        /* other checks are in lib80211_wep_build_iv */
        if (skb_tailroom(skb) < 4)
@@ -165,9 +162,14 @@ static int lib80211_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
        icv[2] = crc >> 16;
        icv[3] = crc >> 24;
 
-       crypto_blkcipher_setkey(wep->tx_tfm, key, klen);
+       crypto_skcipher_setkey(wep->tx_tfm, key, klen);
        sg_init_one(&sg, pos, len + 4);
-       return crypto_blkcipher_encrypt(&desc, &sg, &sg, len + 4);
+       skcipher_request_set_tfm(req, wep->tx_tfm);
+       skcipher_request_set_callback(req, 0, NULL, NULL);
+       skcipher_request_set_crypt(req, &sg, &sg, len + 4, NULL);
+       err = crypto_skcipher_encrypt(req);
+       skcipher_request_zero(req);
+       return err;
 }
 
 /* Perform WEP decryption on given buffer. Buffer includes whole WEP part of
@@ -180,11 +182,12 @@ static int lib80211_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
 static int lib80211_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
 {
        struct lib80211_wep_data *wep = priv;
-       struct blkcipher_desc desc = { .tfm = wep->rx_tfm };
+       SKCIPHER_REQUEST_ON_STACK(req, wep->rx_tfm);
        u32 crc, klen, plen;
        u8 key[WEP_KEY_LEN + 3];
        u8 keyidx, *pos, icv[4];
        struct scatterlist sg;
+       int err;
 
        if (skb->len < hdr_len + 8)
                return -1;
@@ -205,9 +208,14 @@ static int lib80211_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
        /* Apply RC4 to data and compute CRC32 over decrypted data */
        plen = skb->len - hdr_len - 8;
 
-       crypto_blkcipher_setkey(wep->rx_tfm, key, klen);
+       crypto_skcipher_setkey(wep->rx_tfm, key, klen);
        sg_init_one(&sg, pos, plen + 4);
-       if (crypto_blkcipher_decrypt(&desc, &sg, &sg, plen + 4))
+       skcipher_request_set_tfm(req, wep->rx_tfm);
+       skcipher_request_set_callback(req, 0, NULL, NULL);
+       skcipher_request_set_crypt(req, &sg, &sg, plen + 4, NULL);
+       err = crypto_skcipher_decrypt(req);
+       skcipher_request_zero(req);
+       if (err)
                return -7;
 
        crc = ~crc32_le(~0, pos, plen);
index f07224d8b88f6a2479de02ce944181edcd2576ab..250e567ba3d636c8f9103a7949b7d1b5208b77ee 100644 (file)
@@ -9,6 +9,8 @@
  * any later version.
  */
 
+#include <crypto/hash.h>
+#include <crypto/skcipher.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/pfkeyv2.h>
@@ -782,14 +784,13 @@ void xfrm_probe_algs(void)
        BUG_ON(in_softirq());
 
        for (i = 0; i < aalg_entries(); i++) {
-               status = crypto_has_hash(aalg_list[i].name, 0,
-                                        CRYPTO_ALG_ASYNC);
+               status = crypto_has_ahash(aalg_list[i].name, 0, 0);
                if (aalg_list[i].available != status)
                        aalg_list[i].available = status;
        }
 
        for (i = 0; i < ealg_entries(); i++) {
-               status = crypto_has_ablkcipher(ealg_list[i].name, 0, 0);
+               status = crypto_has_skcipher(ealg_list[i].name, 0, 0);
                if (ealg_list[i].available != status)
                        ealg_list[i].available = status;
        }
index 696ccfa08d103cd29ae56ac38c117bbd7725da06..5adbfc32242f81b0f6396fd7d0656c5aa4eca6ff 100644 (file)
 #include <linux/random.h>
 #include <linux/rcupdate.h>
 #include <linux/scatterlist.h>
-#include <linux/crypto.h>
 #include <linux/ctype.h>
 #include <crypto/hash.h>
 #include <crypto/sha.h>
-#include <crypto/aes.h>
+#include <crypto/skcipher.h>
 
 #include "encrypted.h"
 #include "ecryptfs_format.h"
@@ -85,17 +84,17 @@ static const match_table_t key_tokens = {
 
 static int aes_get_sizes(void)
 {
-       struct crypto_blkcipher *tfm;
+       struct crypto_skcipher *tfm;
 
-       tfm = crypto_alloc_blkcipher(blkcipher_alg, 0, CRYPTO_ALG_ASYNC);
+       tfm = crypto_alloc_skcipher(blkcipher_alg, 0, CRYPTO_ALG_ASYNC);
        if (IS_ERR(tfm)) {
                pr_err("encrypted_key: failed to alloc_cipher (%ld)\n",
                       PTR_ERR(tfm));
                return PTR_ERR(tfm);
        }
-       ivsize = crypto_blkcipher_ivsize(tfm);
-       blksize = crypto_blkcipher_blocksize(tfm);
-       crypto_free_blkcipher(tfm);
+       ivsize = crypto_skcipher_ivsize(tfm);
+       blksize = crypto_skcipher_blocksize(tfm);
+       crypto_free_skcipher(tfm);
        return 0;
 }
 
@@ -401,28 +400,37 @@ static int get_derived_key(u8 *derived_key, enum derived_key_type key_type,
        return ret;
 }
 
-static int init_blkcipher_desc(struct blkcipher_desc *desc, const u8 *key,
-                              unsigned int key_len, const u8 *iv,
-                              unsigned int ivsize)
+static struct skcipher_request *init_skcipher_req(const u8 *key,
+                                                 unsigned int key_len)
 {
+       struct skcipher_request *req;
+       struct crypto_skcipher *tfm;
        int ret;
 
-       desc->tfm = crypto_alloc_blkcipher(blkcipher_alg, 0, CRYPTO_ALG_ASYNC);
-       if (IS_ERR(desc->tfm)) {
+       tfm = crypto_alloc_skcipher(blkcipher_alg, 0, CRYPTO_ALG_ASYNC);
+       if (IS_ERR(tfm)) {
                pr_err("encrypted_key: failed to load %s transform (%ld)\n",
-                      blkcipher_alg, PTR_ERR(desc->tfm));
-               return PTR_ERR(desc->tfm);
+                      blkcipher_alg, PTR_ERR(tfm));
+               return ERR_CAST(tfm);
        }
-       desc->flags = 0;
 
-       ret = crypto_blkcipher_setkey(desc->tfm, key, key_len);
+       ret = crypto_skcipher_setkey(tfm, key, key_len);
        if (ret < 0) {
                pr_err("encrypted_key: failed to setkey (%d)\n", ret);
-               crypto_free_blkcipher(desc->tfm);
-               return ret;
+               crypto_free_skcipher(tfm);
+               return ERR_PTR(ret);
        }
-       crypto_blkcipher_set_iv(desc->tfm, iv, ivsize);
-       return 0;
+
+       req = skcipher_request_alloc(tfm, GFP_KERNEL);
+       if (!req) {
+               pr_err("encrypted_key: failed to allocate request for %s\n",
+                      blkcipher_alg);
+               crypto_free_skcipher(tfm);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       skcipher_request_set_callback(req, 0, NULL, NULL);
+       return req;
 }
 
 static struct key *request_master_key(struct encrypted_key_payload *epayload,
@@ -467,7 +475,8 @@ static int derived_key_encrypt(struct encrypted_key_payload *epayload,
 {
        struct scatterlist sg_in[2];
        struct scatterlist sg_out[1];
-       struct blkcipher_desc desc;
+       struct crypto_skcipher *tfm;
+       struct skcipher_request *req;
        unsigned int encrypted_datalen;
        unsigned int padlen;
        char pad[16];
@@ -476,9 +485,9 @@ static int derived_key_encrypt(struct encrypted_key_payload *epayload,
        encrypted_datalen = roundup(epayload->decrypted_datalen, blksize);
        padlen = encrypted_datalen - epayload->decrypted_datalen;
 
-       ret = init_blkcipher_desc(&desc, derived_key, derived_keylen,
-                                 epayload->iv, ivsize);
-       if (ret < 0)
+       req = init_skcipher_req(derived_key, derived_keylen);
+       ret = PTR_ERR(req);
+       if (IS_ERR(req))
                goto out;
        dump_decrypted_data(epayload);
 
@@ -491,8 +500,12 @@ static int derived_key_encrypt(struct encrypted_key_payload *epayload,
        sg_init_table(sg_out, 1);
        sg_set_buf(sg_out, epayload->encrypted_data, encrypted_datalen);
 
-       ret = crypto_blkcipher_encrypt(&desc, sg_out, sg_in, encrypted_datalen);
-       crypto_free_blkcipher(desc.tfm);
+       skcipher_request_set_crypt(req, sg_in, sg_out, encrypted_datalen,
+                                  epayload->iv);
+       ret = crypto_skcipher_encrypt(req);
+       tfm = crypto_skcipher_reqtfm(req);
+       skcipher_request_free(req);
+       crypto_free_skcipher(tfm);
        if (ret < 0)
                pr_err("encrypted_key: failed to encrypt (%d)\n", ret);
        else
@@ -565,15 +578,16 @@ static int derived_key_decrypt(struct encrypted_key_payload *epayload,
 {
        struct scatterlist sg_in[1];
        struct scatterlist sg_out[2];
-       struct blkcipher_desc desc;
+       struct crypto_skcipher *tfm;
+       struct skcipher_request *req;
        unsigned int encrypted_datalen;
        char pad[16];
        int ret;
 
        encrypted_datalen = roundup(epayload->decrypted_datalen, blksize);
-       ret = init_blkcipher_desc(&desc, derived_key, derived_keylen,
-                                 epayload->iv, ivsize);
-       if (ret < 0)
+       req = init_skcipher_req(derived_key, derived_keylen);
+       ret = PTR_ERR(req);
+       if (IS_ERR(req))
                goto out;
        dump_encrypted_data(epayload, encrypted_datalen);
 
@@ -585,8 +599,12 @@ static int derived_key_decrypt(struct encrypted_key_payload *epayload,
                   epayload->decrypted_datalen);
        sg_set_buf(&sg_out[1], pad, sizeof pad);
 
-       ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in, encrypted_datalen);
-       crypto_free_blkcipher(desc.tfm);
+       skcipher_request_set_crypt(req, sg_in, sg_out, encrypted_datalen,
+                                  epayload->iv);
+       ret = crypto_skcipher_decrypt(req);
+       tfm = crypto_skcipher_reqtfm(req);
+       skcipher_request_free(req);
+       crypto_free_skcipher(tfm);
        if (ret < 0)
                goto out;
        dump_decrypted_data(epayload);