]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - net/sunrpc/auth_gss/gss_krb5_wrap.c
Merge remote-tracking branch 'dt-rh/for-next'
[karo-tx-linux.git] / net / sunrpc / auth_gss / gss_krb5_wrap.c
index ca7e92a32f84920036c124732d71e029a9f0ce4c..765088e4ad84d073b3587917942b9059717875b3 100644 (file)
  * SUCH DAMAGES.
  */
 
+#include <crypto/skcipher.h>
 #include <linux/types.h>
 #include <linux/jiffies.h>
 #include <linux/sunrpc/gss_krb5.h>
 #include <linux/random.h>
 #include <linux/pagemap.h>
-#include <linux/crypto.h>
 
 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
 # define RPCDBG_FACILITY       RPCDBG_AUTH
@@ -174,7 +174,7 @@ gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset,
 
        now = get_seconds();
 
-       blocksize = crypto_blkcipher_blocksize(kctx->enc);
+       blocksize = crypto_skcipher_blocksize(kctx->enc);
        gss_krb5_add_padding(buf, offset, blocksize);
        BUG_ON((buf->len - offset) % blocksize);
        plainlen = conflen + buf->len - offset;
@@ -239,10 +239,10 @@ gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset,
                return GSS_S_FAILURE;
 
        if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) {
-               struct crypto_blkcipher *cipher;
+               struct crypto_skcipher *cipher;
                int err;
-               cipher = crypto_alloc_blkcipher(kctx->gk5e->encrypt_name, 0,
-                                               CRYPTO_ALG_ASYNC);
+               cipher = crypto_alloc_skcipher(kctx->gk5e->encrypt_name, 0,
+                                              CRYPTO_ALG_ASYNC);
                if (IS_ERR(cipher))
                        return GSS_S_FAILURE;
 
@@ -250,7 +250,7 @@ gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset,
 
                err = gss_encrypt_xdr_buf(cipher, buf,
                                          offset + headlen - conflen, pages);
-               crypto_free_blkcipher(cipher);
+               crypto_free_skcipher(cipher);
                if (err)
                        return GSS_S_FAILURE;
        } else {
@@ -327,18 +327,18 @@ gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
                return GSS_S_BAD_SIG;
 
        if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) {
-               struct crypto_blkcipher *cipher;
+               struct crypto_skcipher *cipher;
                int err;
 
-               cipher = crypto_alloc_blkcipher(kctx->gk5e->encrypt_name, 0,
-                                               CRYPTO_ALG_ASYNC);
+               cipher = crypto_alloc_skcipher(kctx->gk5e->encrypt_name, 0,
+                                              CRYPTO_ALG_ASYNC);
                if (IS_ERR(cipher))
                        return GSS_S_FAILURE;
 
                krb5_rc4_setup_enc_key(kctx, cipher, seqnum);
 
                err = gss_decrypt_xdr_buf(cipher, buf, crypt_offset);
-               crypto_free_blkcipher(cipher);
+               crypto_free_skcipher(cipher);
                if (err)
                        return GSS_S_DEFECTIVE_TOKEN;
        } else {
@@ -371,7 +371,7 @@ gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
        /* Copy the data back to the right position.  XXX: Would probably be
         * better to copy and encrypt at the same time. */
 
-       blocksize = crypto_blkcipher_blocksize(kctx->enc);
+       blocksize = crypto_skcipher_blocksize(kctx->enc);
        data_start = ptr + (GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength) +
                                        conflen;
        orig_start = buf->head[0].iov_base + offset;
@@ -473,7 +473,7 @@ gss_wrap_kerberos_v2(struct krb5_ctx *kctx, u32 offset,
        *ptr++ = 0xff;
        be16ptr = (__be16 *)ptr;
 
-       blocksize = crypto_blkcipher_blocksize(kctx->acceptor_enc);
+       blocksize = crypto_skcipher_blocksize(kctx->acceptor_enc);
        *be16ptr++ = 0;
        /* "inner" token header always uses 0 for RRC */
        *be16ptr++ = 0;