]> git.karo-electronics.de Git - linux-beck.git/commitdiff
crypto: arm64/aes-ce - fix for big endian
authorArd Biesheuvel <ard.biesheuvel@linaro.org>
Tue, 11 Oct 2016 18:15:13 +0000 (19:15 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 12 Jan 2017 10:39:34 +0000 (11:39 +0100)
commit 1803b9a52c4e5a5dbb8a27126f6bc06939359753 upstream.

The core AES cipher implementation that uses ARMv8 Crypto Extensions
instructions erroneously loads the round keys as 64-bit quantities,
which causes the algorithm to fail when built for big endian. In
addition, the key schedule generation routine fails to take endianness
into account as well, when loading the combining the input key with
the round constants. So fix both issues.

Fixes: 12ac3efe74f8 ("arm64/crypto: use crypto instructions to generate AES key schedule")
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/arm64/crypto/aes-ce-cipher.c

index f7bd9bf0bbb398bbb2a355c1b24d1f9c035bacf4..50d9fe11d0c862bbd9cdd75855bfbc0fbffcfea7 100644 (file)
@@ -47,24 +47,24 @@ static void aes_cipher_encrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[])
        kernel_neon_begin_partial(4);
 
        __asm__("       ld1     {v0.16b}, %[in]                 ;"
-               "       ld1     {v1.2d}, [%[key]], #16          ;"
+               "       ld1     {v1.16b}, [%[key]], #16         ;"
                "       cmp     %w[rounds], #10                 ;"
                "       bmi     0f                              ;"
                "       bne     3f                              ;"
                "       mov     v3.16b, v1.16b                  ;"
                "       b       2f                              ;"
                "0:     mov     v2.16b, v1.16b                  ;"
-               "       ld1     {v3.2d}, [%[key]], #16          ;"
+               "       ld1     {v3.16b}, [%[key]], #16         ;"
                "1:     aese    v0.16b, v2.16b                  ;"
                "       aesmc   v0.16b, v0.16b                  ;"
-               "2:     ld1     {v1.2d}, [%[key]], #16          ;"
+               "2:     ld1     {v1.16b}, [%[key]], #16         ;"
                "       aese    v0.16b, v3.16b                  ;"
                "       aesmc   v0.16b, v0.16b                  ;"
-               "3:     ld1     {v2.2d}, [%[key]], #16          ;"
+               "3:     ld1     {v2.16b}, [%[key]], #16         ;"
                "       subs    %w[rounds], %w[rounds], #3      ;"
                "       aese    v0.16b, v1.16b                  ;"
                "       aesmc   v0.16b, v0.16b                  ;"
-               "       ld1     {v3.2d}, [%[key]], #16          ;"
+               "       ld1     {v3.16b}, [%[key]], #16         ;"
                "       bpl     1b                              ;"
                "       aese    v0.16b, v2.16b                  ;"
                "       eor     v0.16b, v0.16b, v3.16b          ;"
@@ -92,24 +92,24 @@ static void aes_cipher_decrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[])
        kernel_neon_begin_partial(4);
 
        __asm__("       ld1     {v0.16b}, %[in]                 ;"
-               "       ld1     {v1.2d}, [%[key]], #16          ;"
+               "       ld1     {v1.16b}, [%[key]], #16         ;"
                "       cmp     %w[rounds], #10                 ;"
                "       bmi     0f                              ;"
                "       bne     3f                              ;"
                "       mov     v3.16b, v1.16b                  ;"
                "       b       2f                              ;"
                "0:     mov     v2.16b, v1.16b                  ;"
-               "       ld1     {v3.2d}, [%[key]], #16          ;"
+               "       ld1     {v3.16b}, [%[key]], #16         ;"
                "1:     aesd    v0.16b, v2.16b                  ;"
                "       aesimc  v0.16b, v0.16b                  ;"
-               "2:     ld1     {v1.2d}, [%[key]], #16          ;"
+               "2:     ld1     {v1.16b}, [%[key]], #16         ;"
                "       aesd    v0.16b, v3.16b                  ;"
                "       aesimc  v0.16b, v0.16b                  ;"
-               "3:     ld1     {v2.2d}, [%[key]], #16          ;"
+               "3:     ld1     {v2.16b}, [%[key]], #16         ;"
                "       subs    %w[rounds], %w[rounds], #3      ;"
                "       aesd    v0.16b, v1.16b                  ;"
                "       aesimc  v0.16b, v0.16b                  ;"
-               "       ld1     {v3.2d}, [%[key]], #16          ;"
+               "       ld1     {v3.16b}, [%[key]], #16         ;"
                "       bpl     1b                              ;"
                "       aesd    v0.16b, v2.16b                  ;"
                "       eor     v0.16b, v0.16b, v3.16b          ;"
@@ -173,7 +173,12 @@ int ce_aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key,
                u32 *rki = ctx->key_enc + (i * kwords);
                u32 *rko = rki + kwords;
 
+#ifndef CONFIG_CPU_BIG_ENDIAN
                rko[0] = ror32(aes_sub(rki[kwords - 1]), 8) ^ rcon[i] ^ rki[0];
+#else
+               rko[0] = rol32(aes_sub(rki[kwords - 1]), 8) ^ (rcon[i] << 24) ^
+                        rki[0];
+#endif
                rko[1] = rko[0] ^ rki[1];
                rko[2] = rko[1] ^ rki[2];
                rko[3] = rko[2] ^ rki[3];