]> git.karo-electronics.de Git - karo-tx-linux.git/blob - arch/x86/crypto/aesni-intel_glue.c
crypto: aesni - Fix cryptd reordering problem on gcm
[karo-tx-linux.git] / arch / x86 / crypto / aesni-intel_glue.c
1 /*
2  * Support for Intel AES-NI instructions. This file contains glue
3  * code, the real AES implementation is in intel-aes_asm.S.
4  *
5  * Copyright (C) 2008, Intel Corp.
6  *    Author: Huang Ying <ying.huang@intel.com>
7  *
8  * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD
9  * interface for 64-bit kernels.
10  *    Authors: Adrian Hoban <adrian.hoban@intel.com>
11  *             Gabriele Paoloni <gabriele.paoloni@intel.com>
12  *             Tadeusz Struk (tadeusz.struk@intel.com)
13  *             Aidan O'Mahony (aidan.o.mahony@intel.com)
14  *    Copyright (c) 2010, Intel Corporation.
15  *
16  * This program is free software; you can redistribute it and/or modify
17  * it under the terms of the GNU General Public License as published by
18  * the Free Software Foundation; either version 2 of the License, or
19  * (at your option) any later version.
20  */
21
22 #include <linux/hardirq.h>
23 #include <linux/types.h>
24 #include <linux/crypto.h>
25 #include <linux/module.h>
26 #include <linux/err.h>
27 #include <crypto/algapi.h>
28 #include <crypto/aes.h>
29 #include <crypto/cryptd.h>
30 #include <crypto/ctr.h>
31 #include <crypto/b128ops.h>
32 #include <crypto/lrw.h>
33 #include <crypto/xts.h>
34 #include <asm/cpu_device_id.h>
35 #include <asm/fpu/api.h>
36 #include <asm/crypto/aes.h>
37 #include <crypto/ablk_helper.h>
38 #include <crypto/scatterwalk.h>
39 #include <crypto/internal/aead.h>
40 #include <linux/workqueue.h>
41 #include <linux/spinlock.h>
42 #ifdef CONFIG_X86_64
43 #include <asm/crypto/glue_helper.h>
44 #endif
45
46
47 #define AESNI_ALIGN     16
48 #define AES_BLOCK_MASK  (~(AES_BLOCK_SIZE - 1))
49 #define RFC4106_HASH_SUBKEY_SIZE 16
50
51 /* This data is stored at the end of the crypto_tfm struct.
52  * It's a type of per "session" data storage location.
53  * This needs to be 16 byte aligned.
54  */
55 struct aesni_rfc4106_gcm_ctx {
56         u8 hash_subkey[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
57         struct crypto_aes_ctx aes_key_expanded
58                 __attribute__ ((__aligned__(AESNI_ALIGN)));
59         u8 nonce[4];
60 };
61
62 struct aesni_gcm_set_hash_subkey_result {
63         int err;
64         struct completion completion;
65 };
66
67 struct aesni_hash_subkey_req_data {
68         u8 iv[16];
69         struct aesni_gcm_set_hash_subkey_result result;
70         struct scatterlist sg;
71 };
72
73 struct aesni_lrw_ctx {
74         struct lrw_table_ctx lrw_table;
75         u8 raw_aes_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1];
76 };
77
78 struct aesni_xts_ctx {
79         u8 raw_tweak_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1];
80         u8 raw_crypt_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1];
81 };
82
83 asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
84                              unsigned int key_len);
85 asmlinkage void aesni_enc(struct crypto_aes_ctx *ctx, u8 *out,
86                           const u8 *in);
87 asmlinkage void aesni_dec(struct crypto_aes_ctx *ctx, u8 *out,
88                           const u8 *in);
89 asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out,
90                               const u8 *in, unsigned int len);
91 asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out,
92                               const u8 *in, unsigned int len);
93 asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
94                               const u8 *in, unsigned int len, u8 *iv);
95 asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
96                               const u8 *in, unsigned int len, u8 *iv);
97
98 int crypto_fpu_init(void);
99 void crypto_fpu_exit(void);
100
101 #define AVX_GEN2_OPTSIZE 640
102 #define AVX_GEN4_OPTSIZE 4096
103
104 #ifdef CONFIG_X86_64
105
106 static void (*aesni_ctr_enc_tfm)(struct crypto_aes_ctx *ctx, u8 *out,
107                               const u8 *in, unsigned int len, u8 *iv);
108 asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
109                               const u8 *in, unsigned int len, u8 *iv);
110
111 asmlinkage void aesni_xts_crypt8(struct crypto_aes_ctx *ctx, u8 *out,
112                                  const u8 *in, bool enc, u8 *iv);
113
114 /* asmlinkage void aesni_gcm_enc()
115  * void *ctx,  AES Key schedule. Starts on a 16 byte boundary.
116  * u8 *out, Ciphertext output. Encrypt in-place is allowed.
117  * const u8 *in, Plaintext input
118  * unsigned long plaintext_len, Length of data in bytes for encryption.
119  * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
120  *         concatenated with 8 byte Initialisation Vector (from IPSec ESP
121  *         Payload) concatenated with 0x00000001. 16-byte aligned pointer.
122  * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
123  * const u8 *aad, Additional Authentication Data (AAD)
124  * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this
125  *          is going to be 8 or 12 bytes
126  * u8 *auth_tag, Authenticated Tag output.
127  * unsigned long auth_tag_len), Authenticated Tag Length in bytes.
128  *          Valid values are 16 (most likely), 12 or 8.
129  */
130 asmlinkage void aesni_gcm_enc(void *ctx, u8 *out,
131                         const u8 *in, unsigned long plaintext_len, u8 *iv,
132                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
133                         u8 *auth_tag, unsigned long auth_tag_len);
134
135 /* asmlinkage void aesni_gcm_dec()
136  * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
137  * u8 *out, Plaintext output. Decrypt in-place is allowed.
138  * const u8 *in, Ciphertext input
139  * unsigned long ciphertext_len, Length of data in bytes for decryption.
140  * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
141  *         concatenated with 8 byte Initialisation Vector (from IPSec ESP
142  *         Payload) concatenated with 0x00000001. 16-byte aligned pointer.
143  * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
144  * const u8 *aad, Additional Authentication Data (AAD)
145  * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this is going
146  * to be 8 or 12 bytes
147  * u8 *auth_tag, Authenticated Tag output.
148  * unsigned long auth_tag_len) Authenticated Tag Length in bytes.
149  * Valid values are 16 (most likely), 12 or 8.
150  */
151 asmlinkage void aesni_gcm_dec(void *ctx, u8 *out,
152                         const u8 *in, unsigned long ciphertext_len, u8 *iv,
153                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
154                         u8 *auth_tag, unsigned long auth_tag_len);
155
156
157 #ifdef CONFIG_AS_AVX
158 asmlinkage void aes_ctr_enc_128_avx_by8(const u8 *in, u8 *iv,
159                 void *keys, u8 *out, unsigned int num_bytes);
160 asmlinkage void aes_ctr_enc_192_avx_by8(const u8 *in, u8 *iv,
161                 void *keys, u8 *out, unsigned int num_bytes);
162 asmlinkage void aes_ctr_enc_256_avx_by8(const u8 *in, u8 *iv,
163                 void *keys, u8 *out, unsigned int num_bytes);
164 /*
165  * asmlinkage void aesni_gcm_precomp_avx_gen2()
166  * gcm_data *my_ctx_data, context data
167  * u8 *hash_subkey,  the Hash sub key input. Data starts on a 16-byte boundary.
168  */
169 asmlinkage void aesni_gcm_precomp_avx_gen2(void *my_ctx_data, u8 *hash_subkey);
170
171 asmlinkage void aesni_gcm_enc_avx_gen2(void *ctx, u8 *out,
172                         const u8 *in, unsigned long plaintext_len, u8 *iv,
173                         const u8 *aad, unsigned long aad_len,
174                         u8 *auth_tag, unsigned long auth_tag_len);
175
176 asmlinkage void aesni_gcm_dec_avx_gen2(void *ctx, u8 *out,
177                         const u8 *in, unsigned long ciphertext_len, u8 *iv,
178                         const u8 *aad, unsigned long aad_len,
179                         u8 *auth_tag, unsigned long auth_tag_len);
180
181 static void aesni_gcm_enc_avx(void *ctx, u8 *out,
182                         const u8 *in, unsigned long plaintext_len, u8 *iv,
183                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
184                         u8 *auth_tag, unsigned long auth_tag_len)
185 {
186         struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
187         if ((plaintext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)){
188                 aesni_gcm_enc(ctx, out, in, plaintext_len, iv, hash_subkey, aad,
189                                 aad_len, auth_tag, auth_tag_len);
190         } else {
191                 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
192                 aesni_gcm_enc_avx_gen2(ctx, out, in, plaintext_len, iv, aad,
193                                         aad_len, auth_tag, auth_tag_len);
194         }
195 }
196
197 static void aesni_gcm_dec_avx(void *ctx, u8 *out,
198                         const u8 *in, unsigned long ciphertext_len, u8 *iv,
199                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
200                         u8 *auth_tag, unsigned long auth_tag_len)
201 {
202         struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
203         if ((ciphertext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
204                 aesni_gcm_dec(ctx, out, in, ciphertext_len, iv, hash_subkey, aad,
205                                 aad_len, auth_tag, auth_tag_len);
206         } else {
207                 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
208                 aesni_gcm_dec_avx_gen2(ctx, out, in, ciphertext_len, iv, aad,
209                                         aad_len, auth_tag, auth_tag_len);
210         }
211 }
212 #endif
213
214 #ifdef CONFIG_AS_AVX2
215 /*
216  * asmlinkage void aesni_gcm_precomp_avx_gen4()
217  * gcm_data *my_ctx_data, context data
218  * u8 *hash_subkey,  the Hash sub key input. Data starts on a 16-byte boundary.
219  */
220 asmlinkage void aesni_gcm_precomp_avx_gen4(void *my_ctx_data, u8 *hash_subkey);
221
222 asmlinkage void aesni_gcm_enc_avx_gen4(void *ctx, u8 *out,
223                         const u8 *in, unsigned long plaintext_len, u8 *iv,
224                         const u8 *aad, unsigned long aad_len,
225                         u8 *auth_tag, unsigned long auth_tag_len);
226
227 asmlinkage void aesni_gcm_dec_avx_gen4(void *ctx, u8 *out,
228                         const u8 *in, unsigned long ciphertext_len, u8 *iv,
229                         const u8 *aad, unsigned long aad_len,
230                         u8 *auth_tag, unsigned long auth_tag_len);
231
232 static void aesni_gcm_enc_avx2(void *ctx, u8 *out,
233                         const u8 *in, unsigned long plaintext_len, u8 *iv,
234                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
235                         u8 *auth_tag, unsigned long auth_tag_len)
236 {
237        struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
238         if ((plaintext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
239                 aesni_gcm_enc(ctx, out, in, plaintext_len, iv, hash_subkey, aad,
240                                 aad_len, auth_tag, auth_tag_len);
241         } else if (plaintext_len < AVX_GEN4_OPTSIZE) {
242                 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
243                 aesni_gcm_enc_avx_gen2(ctx, out, in, plaintext_len, iv, aad,
244                                         aad_len, auth_tag, auth_tag_len);
245         } else {
246                 aesni_gcm_precomp_avx_gen4(ctx, hash_subkey);
247                 aesni_gcm_enc_avx_gen4(ctx, out, in, plaintext_len, iv, aad,
248                                         aad_len, auth_tag, auth_tag_len);
249         }
250 }
251
252 static void aesni_gcm_dec_avx2(void *ctx, u8 *out,
253                         const u8 *in, unsigned long ciphertext_len, u8 *iv,
254                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
255                         u8 *auth_tag, unsigned long auth_tag_len)
256 {
257        struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
258         if ((ciphertext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
259                 aesni_gcm_dec(ctx, out, in, ciphertext_len, iv, hash_subkey,
260                                 aad, aad_len, auth_tag, auth_tag_len);
261         } else if (ciphertext_len < AVX_GEN4_OPTSIZE) {
262                 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
263                 aesni_gcm_dec_avx_gen2(ctx, out, in, ciphertext_len, iv, aad,
264                                         aad_len, auth_tag, auth_tag_len);
265         } else {
266                 aesni_gcm_precomp_avx_gen4(ctx, hash_subkey);
267                 aesni_gcm_dec_avx_gen4(ctx, out, in, ciphertext_len, iv, aad,
268                                         aad_len, auth_tag, auth_tag_len);
269         }
270 }
271 #endif
272
273 static void (*aesni_gcm_enc_tfm)(void *ctx, u8 *out,
274                         const u8 *in, unsigned long plaintext_len, u8 *iv,
275                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
276                         u8 *auth_tag, unsigned long auth_tag_len);
277
278 static void (*aesni_gcm_dec_tfm)(void *ctx, u8 *out,
279                         const u8 *in, unsigned long ciphertext_len, u8 *iv,
280                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
281                         u8 *auth_tag, unsigned long auth_tag_len);
282
283 static inline struct
284 aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
285 {
286         unsigned long align = AESNI_ALIGN;
287
288         if (align <= crypto_tfm_ctx_alignment())
289                 align = 1;
290         return PTR_ALIGN(crypto_aead_ctx(tfm), align);
291 }
292 #endif
293
294 static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
295 {
296         unsigned long addr = (unsigned long)raw_ctx;
297         unsigned long align = AESNI_ALIGN;
298
299         if (align <= crypto_tfm_ctx_alignment())
300                 align = 1;
301         return (struct crypto_aes_ctx *)ALIGN(addr, align);
302 }
303
304 static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
305                               const u8 *in_key, unsigned int key_len)
306 {
307         struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx);
308         u32 *flags = &tfm->crt_flags;
309         int err;
310
311         if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
312             key_len != AES_KEYSIZE_256) {
313                 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
314                 return -EINVAL;
315         }
316
317         if (!irq_fpu_usable())
318                 err = crypto_aes_expand_key(ctx, in_key, key_len);
319         else {
320                 kernel_fpu_begin();
321                 err = aesni_set_key(ctx, in_key, key_len);
322                 kernel_fpu_end();
323         }
324
325         return err;
326 }
327
328 static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
329                        unsigned int key_len)
330 {
331         return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len);
332 }
333
334 static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
335 {
336         struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
337
338         if (!irq_fpu_usable())
339                 crypto_aes_encrypt_x86(ctx, dst, src);
340         else {
341                 kernel_fpu_begin();
342                 aesni_enc(ctx, dst, src);
343                 kernel_fpu_end();
344         }
345 }
346
347 static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
348 {
349         struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
350
351         if (!irq_fpu_usable())
352                 crypto_aes_decrypt_x86(ctx, dst, src);
353         else {
354                 kernel_fpu_begin();
355                 aesni_dec(ctx, dst, src);
356                 kernel_fpu_end();
357         }
358 }
359
360 static void __aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
361 {
362         struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
363
364         aesni_enc(ctx, dst, src);
365 }
366
367 static void __aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
368 {
369         struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
370
371         aesni_dec(ctx, dst, src);
372 }
373
374 static int ecb_encrypt(struct blkcipher_desc *desc,
375                        struct scatterlist *dst, struct scatterlist *src,
376                        unsigned int nbytes)
377 {
378         struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
379         struct blkcipher_walk walk;
380         int err;
381
382         blkcipher_walk_init(&walk, dst, src, nbytes);
383         err = blkcipher_walk_virt(desc, &walk);
384         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
385
386         kernel_fpu_begin();
387         while ((nbytes = walk.nbytes)) {
388                 aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
389                               nbytes & AES_BLOCK_MASK);
390                 nbytes &= AES_BLOCK_SIZE - 1;
391                 err = blkcipher_walk_done(desc, &walk, nbytes);
392         }
393         kernel_fpu_end();
394
395         return err;
396 }
397
398 static int ecb_decrypt(struct blkcipher_desc *desc,
399                        struct scatterlist *dst, struct scatterlist *src,
400                        unsigned int nbytes)
401 {
402         struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
403         struct blkcipher_walk walk;
404         int err;
405
406         blkcipher_walk_init(&walk, dst, src, nbytes);
407         err = blkcipher_walk_virt(desc, &walk);
408         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
409
410         kernel_fpu_begin();
411         while ((nbytes = walk.nbytes)) {
412                 aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
413                               nbytes & AES_BLOCK_MASK);
414                 nbytes &= AES_BLOCK_SIZE - 1;
415                 err = blkcipher_walk_done(desc, &walk, nbytes);
416         }
417         kernel_fpu_end();
418
419         return err;
420 }
421
422 static int cbc_encrypt(struct blkcipher_desc *desc,
423                        struct scatterlist *dst, struct scatterlist *src,
424                        unsigned int nbytes)
425 {
426         struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
427         struct blkcipher_walk walk;
428         int err;
429
430         blkcipher_walk_init(&walk, dst, src, nbytes);
431         err = blkcipher_walk_virt(desc, &walk);
432         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
433
434         kernel_fpu_begin();
435         while ((nbytes = walk.nbytes)) {
436                 aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
437                               nbytes & AES_BLOCK_MASK, walk.iv);
438                 nbytes &= AES_BLOCK_SIZE - 1;
439                 err = blkcipher_walk_done(desc, &walk, nbytes);
440         }
441         kernel_fpu_end();
442
443         return err;
444 }
445
446 static int cbc_decrypt(struct blkcipher_desc *desc,
447                        struct scatterlist *dst, struct scatterlist *src,
448                        unsigned int nbytes)
449 {
450         struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
451         struct blkcipher_walk walk;
452         int err;
453
454         blkcipher_walk_init(&walk, dst, src, nbytes);
455         err = blkcipher_walk_virt(desc, &walk);
456         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
457
458         kernel_fpu_begin();
459         while ((nbytes = walk.nbytes)) {
460                 aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
461                               nbytes & AES_BLOCK_MASK, walk.iv);
462                 nbytes &= AES_BLOCK_SIZE - 1;
463                 err = blkcipher_walk_done(desc, &walk, nbytes);
464         }
465         kernel_fpu_end();
466
467         return err;
468 }
469
470 #ifdef CONFIG_X86_64
471 static void ctr_crypt_final(struct crypto_aes_ctx *ctx,
472                             struct blkcipher_walk *walk)
473 {
474         u8 *ctrblk = walk->iv;
475         u8 keystream[AES_BLOCK_SIZE];
476         u8 *src = walk->src.virt.addr;
477         u8 *dst = walk->dst.virt.addr;
478         unsigned int nbytes = walk->nbytes;
479
480         aesni_enc(ctx, keystream, ctrblk);
481         crypto_xor(keystream, src, nbytes);
482         memcpy(dst, keystream, nbytes);
483         crypto_inc(ctrblk, AES_BLOCK_SIZE);
484 }
485
486 #ifdef CONFIG_AS_AVX
487 static void aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx *ctx, u8 *out,
488                               const u8 *in, unsigned int len, u8 *iv)
489 {
490         /*
491          * based on key length, override with the by8 version
492          * of ctr mode encryption/decryption for improved performance
493          * aes_set_key_common() ensures that key length is one of
494          * {128,192,256}
495          */
496         if (ctx->key_length == AES_KEYSIZE_128)
497                 aes_ctr_enc_128_avx_by8(in, iv, (void *)ctx, out, len);
498         else if (ctx->key_length == AES_KEYSIZE_192)
499                 aes_ctr_enc_192_avx_by8(in, iv, (void *)ctx, out, len);
500         else
501                 aes_ctr_enc_256_avx_by8(in, iv, (void *)ctx, out, len);
502 }
503 #endif
504
505 static int ctr_crypt(struct blkcipher_desc *desc,
506                      struct scatterlist *dst, struct scatterlist *src,
507                      unsigned int nbytes)
508 {
509         struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
510         struct blkcipher_walk walk;
511         int err;
512
513         blkcipher_walk_init(&walk, dst, src, nbytes);
514         err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
515         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
516
517         kernel_fpu_begin();
518         while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
519                 aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr,
520                                       nbytes & AES_BLOCK_MASK, walk.iv);
521                 nbytes &= AES_BLOCK_SIZE - 1;
522                 err = blkcipher_walk_done(desc, &walk, nbytes);
523         }
524         if (walk.nbytes) {
525                 ctr_crypt_final(ctx, &walk);
526                 err = blkcipher_walk_done(desc, &walk, 0);
527         }
528         kernel_fpu_end();
529
530         return err;
531 }
532 #endif
533
534 static int ablk_ecb_init(struct crypto_tfm *tfm)
535 {
536         return ablk_init_common(tfm, "__driver-ecb-aes-aesni");
537 }
538
539 static int ablk_cbc_init(struct crypto_tfm *tfm)
540 {
541         return ablk_init_common(tfm, "__driver-cbc-aes-aesni");
542 }
543
544 #ifdef CONFIG_X86_64
545 static int ablk_ctr_init(struct crypto_tfm *tfm)
546 {
547         return ablk_init_common(tfm, "__driver-ctr-aes-aesni");
548 }
549
550 #endif
551
552 #if IS_ENABLED(CONFIG_CRYPTO_PCBC)
553 static int ablk_pcbc_init(struct crypto_tfm *tfm)
554 {
555         return ablk_init_common(tfm, "fpu(pcbc(__driver-aes-aesni))");
556 }
557 #endif
558
559 static void lrw_xts_encrypt_callback(void *ctx, u8 *blks, unsigned int nbytes)
560 {
561         aesni_ecb_enc(ctx, blks, blks, nbytes);
562 }
563
564 static void lrw_xts_decrypt_callback(void *ctx, u8 *blks, unsigned int nbytes)
565 {
566         aesni_ecb_dec(ctx, blks, blks, nbytes);
567 }
568
569 static int lrw_aesni_setkey(struct crypto_tfm *tfm, const u8 *key,
570                             unsigned int keylen)
571 {
572         struct aesni_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
573         int err;
574
575         err = aes_set_key_common(tfm, ctx->raw_aes_ctx, key,
576                                  keylen - AES_BLOCK_SIZE);
577         if (err)
578                 return err;
579
580         return lrw_init_table(&ctx->lrw_table, key + keylen - AES_BLOCK_SIZE);
581 }
582
583 static void lrw_aesni_exit_tfm(struct crypto_tfm *tfm)
584 {
585         struct aesni_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
586
587         lrw_free_table(&ctx->lrw_table);
588 }
589
590 static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
591                        struct scatterlist *src, unsigned int nbytes)
592 {
593         struct aesni_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
594         be128 buf[8];
595         struct lrw_crypt_req req = {
596                 .tbuf = buf,
597                 .tbuflen = sizeof(buf),
598
599                 .table_ctx = &ctx->lrw_table,
600                 .crypt_ctx = aes_ctx(ctx->raw_aes_ctx),
601                 .crypt_fn = lrw_xts_encrypt_callback,
602         };
603         int ret;
604
605         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
606
607         kernel_fpu_begin();
608         ret = lrw_crypt(desc, dst, src, nbytes, &req);
609         kernel_fpu_end();
610
611         return ret;
612 }
613
614 static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
615                        struct scatterlist *src, unsigned int nbytes)
616 {
617         struct aesni_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
618         be128 buf[8];
619         struct lrw_crypt_req req = {
620                 .tbuf = buf,
621                 .tbuflen = sizeof(buf),
622
623                 .table_ctx = &ctx->lrw_table,
624                 .crypt_ctx = aes_ctx(ctx->raw_aes_ctx),
625                 .crypt_fn = lrw_xts_decrypt_callback,
626         };
627         int ret;
628
629         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
630
631         kernel_fpu_begin();
632         ret = lrw_crypt(desc, dst, src, nbytes, &req);
633         kernel_fpu_end();
634
635         return ret;
636 }
637
638 static int xts_aesni_setkey(struct crypto_tfm *tfm, const u8 *key,
639                             unsigned int keylen)
640 {
641         struct aesni_xts_ctx *ctx = crypto_tfm_ctx(tfm);
642         int err;
643
644         err = xts_check_key(tfm, key, keylen);
645         if (err)
646                 return err;
647
648         /* first half of xts-key is for crypt */
649         err = aes_set_key_common(tfm, ctx->raw_crypt_ctx, key, keylen / 2);
650         if (err)
651                 return err;
652
653         /* second half of xts-key is for tweak */
654         return aes_set_key_common(tfm, ctx->raw_tweak_ctx, key + keylen / 2,
655                                   keylen / 2);
656 }
657
658
659 static void aesni_xts_tweak(void *ctx, u8 *out, const u8 *in)
660 {
661         aesni_enc(ctx, out, in);
662 }
663
664 #ifdef CONFIG_X86_64
665
666 static void aesni_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv)
667 {
668         glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_enc));
669 }
670
671 static void aesni_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv)
672 {
673         glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_dec));
674 }
675
676 static void aesni_xts_enc8(void *ctx, u128 *dst, const u128 *src, le128 *iv)
677 {
678         aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, true, (u8 *)iv);
679 }
680
681 static void aesni_xts_dec8(void *ctx, u128 *dst, const u128 *src, le128 *iv)
682 {
683         aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, false, (u8 *)iv);
684 }
685
686 static const struct common_glue_ctx aesni_enc_xts = {
687         .num_funcs = 2,
688         .fpu_blocks_limit = 1,
689
690         .funcs = { {
691                 .num_blocks = 8,
692                 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc8) }
693         }, {
694                 .num_blocks = 1,
695                 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc) }
696         } }
697 };
698
699 static const struct common_glue_ctx aesni_dec_xts = {
700         .num_funcs = 2,
701         .fpu_blocks_limit = 1,
702
703         .funcs = { {
704                 .num_blocks = 8,
705                 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec8) }
706         }, {
707                 .num_blocks = 1,
708                 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec) }
709         } }
710 };
711
712 static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
713                        struct scatterlist *src, unsigned int nbytes)
714 {
715         struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
716
717         return glue_xts_crypt_128bit(&aesni_enc_xts, desc, dst, src, nbytes,
718                                      XTS_TWEAK_CAST(aesni_xts_tweak),
719                                      aes_ctx(ctx->raw_tweak_ctx),
720                                      aes_ctx(ctx->raw_crypt_ctx));
721 }
722
723 static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
724                        struct scatterlist *src, unsigned int nbytes)
725 {
726         struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
727
728         return glue_xts_crypt_128bit(&aesni_dec_xts, desc, dst, src, nbytes,
729                                      XTS_TWEAK_CAST(aesni_xts_tweak),
730                                      aes_ctx(ctx->raw_tweak_ctx),
731                                      aes_ctx(ctx->raw_crypt_ctx));
732 }
733
734 #else
735
736 static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
737                        struct scatterlist *src, unsigned int nbytes)
738 {
739         struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
740         be128 buf[8];
741         struct xts_crypt_req req = {
742                 .tbuf = buf,
743                 .tbuflen = sizeof(buf),
744
745                 .tweak_ctx = aes_ctx(ctx->raw_tweak_ctx),
746                 .tweak_fn = aesni_xts_tweak,
747                 .crypt_ctx = aes_ctx(ctx->raw_crypt_ctx),
748                 .crypt_fn = lrw_xts_encrypt_callback,
749         };
750         int ret;
751
752         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
753
754         kernel_fpu_begin();
755         ret = xts_crypt(desc, dst, src, nbytes, &req);
756         kernel_fpu_end();
757
758         return ret;
759 }
760
761 static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
762                        struct scatterlist *src, unsigned int nbytes)
763 {
764         struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
765         be128 buf[8];
766         struct xts_crypt_req req = {
767                 .tbuf = buf,
768                 .tbuflen = sizeof(buf),
769
770                 .tweak_ctx = aes_ctx(ctx->raw_tweak_ctx),
771                 .tweak_fn = aesni_xts_tweak,
772                 .crypt_ctx = aes_ctx(ctx->raw_crypt_ctx),
773                 .crypt_fn = lrw_xts_decrypt_callback,
774         };
775         int ret;
776
777         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
778
779         kernel_fpu_begin();
780         ret = xts_crypt(desc, dst, src, nbytes, &req);
781         kernel_fpu_end();
782
783         return ret;
784 }
785
786 #endif
787
788 #ifdef CONFIG_X86_64
789 static int rfc4106_init(struct crypto_aead *aead)
790 {
791         struct cryptd_aead *cryptd_tfm;
792         struct cryptd_aead **ctx = crypto_aead_ctx(aead);
793
794         cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni",
795                                        CRYPTO_ALG_INTERNAL,
796                                        CRYPTO_ALG_INTERNAL);
797         if (IS_ERR(cryptd_tfm))
798                 return PTR_ERR(cryptd_tfm);
799
800         *ctx = cryptd_tfm;
801         crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base));
802         return 0;
803 }
804
805 static void rfc4106_exit(struct crypto_aead *aead)
806 {
807         struct cryptd_aead **ctx = crypto_aead_ctx(aead);
808
809         cryptd_free_aead(*ctx);
810 }
811
812 static void
813 rfc4106_set_hash_subkey_done(struct crypto_async_request *req, int err)
814 {
815         struct aesni_gcm_set_hash_subkey_result *result = req->data;
816
817         if (err == -EINPROGRESS)
818                 return;
819         result->err = err;
820         complete(&result->completion);
821 }
822
823 static int
824 rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
825 {
826         struct crypto_ablkcipher *ctr_tfm;
827         struct ablkcipher_request *req;
828         int ret = -EINVAL;
829         struct aesni_hash_subkey_req_data *req_data;
830
831         ctr_tfm = crypto_alloc_ablkcipher("ctr(aes)", 0, 0);
832         if (IS_ERR(ctr_tfm))
833                 return PTR_ERR(ctr_tfm);
834
835         ret = crypto_ablkcipher_setkey(ctr_tfm, key, key_len);
836         if (ret)
837                 goto out_free_ablkcipher;
838
839         ret = -ENOMEM;
840         req = ablkcipher_request_alloc(ctr_tfm, GFP_KERNEL);
841         if (!req)
842                 goto out_free_ablkcipher;
843
844         req_data = kmalloc(sizeof(*req_data), GFP_KERNEL);
845         if (!req_data)
846                 goto out_free_request;
847
848         memset(req_data->iv, 0, sizeof(req_data->iv));
849
850         /* Clear the data in the hash sub key container to zero.*/
851         /* We want to cipher all zeros to create the hash sub key. */
852         memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE);
853
854         init_completion(&req_data->result.completion);
855         sg_init_one(&req_data->sg, hash_subkey, RFC4106_HASH_SUBKEY_SIZE);
856         ablkcipher_request_set_tfm(req, ctr_tfm);
857         ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP |
858                                         CRYPTO_TFM_REQ_MAY_BACKLOG,
859                                         rfc4106_set_hash_subkey_done,
860                                         &req_data->result);
861
862         ablkcipher_request_set_crypt(req, &req_data->sg,
863                 &req_data->sg, RFC4106_HASH_SUBKEY_SIZE, req_data->iv);
864
865         ret = crypto_ablkcipher_encrypt(req);
866         if (ret == -EINPROGRESS || ret == -EBUSY) {
867                 ret = wait_for_completion_interruptible
868                         (&req_data->result.completion);
869                 if (!ret)
870                         ret = req_data->result.err;
871         }
872         kfree(req_data);
873 out_free_request:
874         ablkcipher_request_free(req);
875 out_free_ablkcipher:
876         crypto_free_ablkcipher(ctr_tfm);
877         return ret;
878 }
879
880 static int common_rfc4106_set_key(struct crypto_aead *aead, const u8 *key,
881                                   unsigned int key_len)
882 {
883         struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(aead);
884
885         if (key_len < 4) {
886                 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
887                 return -EINVAL;
888         }
889         /*Account for 4 byte nonce at the end.*/
890         key_len -= 4;
891
892         memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce));
893
894         return aes_set_key_common(crypto_aead_tfm(aead),
895                                   &ctx->aes_key_expanded, key, key_len) ?:
896                rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
897 }
898
899 static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
900                            unsigned int key_len)
901 {
902         struct cryptd_aead **ctx = crypto_aead_ctx(parent);
903         struct cryptd_aead *cryptd_tfm = *ctx;
904
905         return crypto_aead_setkey(&cryptd_tfm->base, key, key_len);
906 }
907
908 static int common_rfc4106_set_authsize(struct crypto_aead *aead,
909                                        unsigned int authsize)
910 {
911         switch (authsize) {
912         case 8:
913         case 12:
914         case 16:
915                 break;
916         default:
917                 return -EINVAL;
918         }
919
920         return 0;
921 }
922
923 /* This is the Integrity Check Value (aka the authentication tag length and can
924  * be 8, 12 or 16 bytes long. */
925 static int rfc4106_set_authsize(struct crypto_aead *parent,
926                                 unsigned int authsize)
927 {
928         struct cryptd_aead **ctx = crypto_aead_ctx(parent);
929         struct cryptd_aead *cryptd_tfm = *ctx;
930
931         return crypto_aead_setauthsize(&cryptd_tfm->base, authsize);
932 }
933
934 static int helper_rfc4106_encrypt(struct aead_request *req)
935 {
936         u8 one_entry_in_sg = 0;
937         u8 *src, *dst, *assoc;
938         __be32 counter = cpu_to_be32(1);
939         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
940         struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
941         void *aes_ctx = &(ctx->aes_key_expanded);
942         unsigned long auth_tag_len = crypto_aead_authsize(tfm);
943         u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
944         struct scatter_walk src_sg_walk;
945         struct scatter_walk dst_sg_walk;
946         unsigned int i;
947
948         /* Assuming we are supporting rfc4106 64-bit extended */
949         /* sequence numbers We need to have the AAD length equal */
950         /* to 16 or 20 bytes */
951         if (unlikely(req->assoclen != 16 && req->assoclen != 20))
952                 return -EINVAL;
953
954         /* IV below built */
955         for (i = 0; i < 4; i++)
956                 *(iv+i) = ctx->nonce[i];
957         for (i = 0; i < 8; i++)
958                 *(iv+4+i) = req->iv[i];
959         *((__be32 *)(iv+12)) = counter;
960
961         if (sg_is_last(req->src) &&
962             req->src->offset + req->src->length <= PAGE_SIZE &&
963             sg_is_last(req->dst) &&
964             req->dst->offset + req->dst->length <= PAGE_SIZE) {
965                 one_entry_in_sg = 1;
966                 scatterwalk_start(&src_sg_walk, req->src);
967                 assoc = scatterwalk_map(&src_sg_walk);
968                 src = assoc + req->assoclen;
969                 dst = src;
970                 if (unlikely(req->src != req->dst)) {
971                         scatterwalk_start(&dst_sg_walk, req->dst);
972                         dst = scatterwalk_map(&dst_sg_walk) + req->assoclen;
973                 }
974         } else {
975                 /* Allocate memory for src, dst, assoc */
976                 assoc = kmalloc(req->cryptlen + auth_tag_len + req->assoclen,
977                         GFP_ATOMIC);
978                 if (unlikely(!assoc))
979                         return -ENOMEM;
980                 scatterwalk_map_and_copy(assoc, req->src, 0,
981                                          req->assoclen + req->cryptlen, 0);
982                 src = assoc + req->assoclen;
983                 dst = src;
984         }
985
986         kernel_fpu_begin();
987         aesni_gcm_enc_tfm(aes_ctx, dst, src, req->cryptlen, iv,
988                           ctx->hash_subkey, assoc, req->assoclen - 8,
989                           dst + req->cryptlen, auth_tag_len);
990         kernel_fpu_end();
991
992         /* The authTag (aka the Integrity Check Value) needs to be written
993          * back to the packet. */
994         if (one_entry_in_sg) {
995                 if (unlikely(req->src != req->dst)) {
996                         scatterwalk_unmap(dst - req->assoclen);
997                         scatterwalk_advance(&dst_sg_walk, req->dst->length);
998                         scatterwalk_done(&dst_sg_walk, 1, 0);
999                 }
1000                 scatterwalk_unmap(assoc);
1001                 scatterwalk_advance(&src_sg_walk, req->src->length);
1002                 scatterwalk_done(&src_sg_walk, req->src == req->dst, 0);
1003         } else {
1004                 scatterwalk_map_and_copy(dst, req->dst, req->assoclen,
1005                                          req->cryptlen + auth_tag_len, 1);
1006                 kfree(assoc);
1007         }
1008         return 0;
1009 }
1010
1011 static int helper_rfc4106_decrypt(struct aead_request *req)
1012 {
1013         u8 one_entry_in_sg = 0;
1014         u8 *src, *dst, *assoc;
1015         unsigned long tempCipherLen = 0;
1016         __be32 counter = cpu_to_be32(1);
1017         int retval = 0;
1018         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1019         struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
1020         void *aes_ctx = &(ctx->aes_key_expanded);
1021         unsigned long auth_tag_len = crypto_aead_authsize(tfm);
1022         u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
1023         u8 authTag[16];
1024         struct scatter_walk src_sg_walk;
1025         struct scatter_walk dst_sg_walk;
1026         unsigned int i;
1027
1028         if (unlikely(req->assoclen != 16 && req->assoclen != 20))
1029                 return -EINVAL;
1030
1031         /* Assuming we are supporting rfc4106 64-bit extended */
1032         /* sequence numbers We need to have the AAD length */
1033         /* equal to 16 or 20 bytes */
1034
1035         tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len);
1036         /* IV below built */
1037         for (i = 0; i < 4; i++)
1038                 *(iv+i) = ctx->nonce[i];
1039         for (i = 0; i < 8; i++)
1040                 *(iv+4+i) = req->iv[i];
1041         *((__be32 *)(iv+12)) = counter;
1042
1043         if (sg_is_last(req->src) &&
1044             req->src->offset + req->src->length <= PAGE_SIZE &&
1045             sg_is_last(req->dst) &&
1046             req->dst->offset + req->dst->length <= PAGE_SIZE) {
1047                 one_entry_in_sg = 1;
1048                 scatterwalk_start(&src_sg_walk, req->src);
1049                 assoc = scatterwalk_map(&src_sg_walk);
1050                 src = assoc + req->assoclen;
1051                 dst = src;
1052                 if (unlikely(req->src != req->dst)) {
1053                         scatterwalk_start(&dst_sg_walk, req->dst);
1054                         dst = scatterwalk_map(&dst_sg_walk) + req->assoclen;
1055                 }
1056
1057         } else {
1058                 /* Allocate memory for src, dst, assoc */
1059                 assoc = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC);
1060                 if (!assoc)
1061                         return -ENOMEM;
1062                 scatterwalk_map_and_copy(assoc, req->src, 0,
1063                                          req->assoclen + req->cryptlen, 0);
1064                 src = assoc + req->assoclen;
1065                 dst = src;
1066         }
1067
1068         kernel_fpu_begin();
1069         aesni_gcm_dec_tfm(aes_ctx, dst, src, tempCipherLen, iv,
1070                           ctx->hash_subkey, assoc, req->assoclen - 8,
1071                           authTag, auth_tag_len);
1072         kernel_fpu_end();
1073
1074         /* Compare generated tag with passed in tag. */
1075         retval = crypto_memneq(src + tempCipherLen, authTag, auth_tag_len) ?
1076                 -EBADMSG : 0;
1077
1078         if (one_entry_in_sg) {
1079                 if (unlikely(req->src != req->dst)) {
1080                         scatterwalk_unmap(dst - req->assoclen);
1081                         scatterwalk_advance(&dst_sg_walk, req->dst->length);
1082                         scatterwalk_done(&dst_sg_walk, 1, 0);
1083                 }
1084                 scatterwalk_unmap(assoc);
1085                 scatterwalk_advance(&src_sg_walk, req->src->length);
1086                 scatterwalk_done(&src_sg_walk, req->src == req->dst, 0);
1087         } else {
1088                 scatterwalk_map_and_copy(dst, req->dst, req->assoclen,
1089                                          tempCipherLen, 1);
1090                 kfree(assoc);
1091         }
1092         return retval;
1093 }
1094
1095 static int rfc4106_encrypt(struct aead_request *req)
1096 {
1097         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1098         struct cryptd_aead **ctx = crypto_aead_ctx(tfm);
1099         struct cryptd_aead *cryptd_tfm = *ctx;
1100
1101         tfm = &cryptd_tfm->base;
1102         if (irq_fpu_usable() && (!in_atomic() ||
1103                                  !cryptd_aead_queued(cryptd_tfm)))
1104                 tfm = cryptd_aead_child(cryptd_tfm);
1105
1106         aead_request_set_tfm(req, tfm);
1107
1108         return crypto_aead_encrypt(req);
1109 }
1110
1111 static int rfc4106_decrypt(struct aead_request *req)
1112 {
1113         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1114         struct cryptd_aead **ctx = crypto_aead_ctx(tfm);
1115         struct cryptd_aead *cryptd_tfm = *ctx;
1116
1117         tfm = &cryptd_tfm->base;
1118         if (irq_fpu_usable() && (!in_atomic() ||
1119                                  !cryptd_aead_queued(cryptd_tfm)))
1120                 tfm = cryptd_aead_child(cryptd_tfm);
1121
1122         aead_request_set_tfm(req, tfm);
1123
1124         return crypto_aead_decrypt(req);
1125 }
1126 #endif
1127
1128 static struct crypto_alg aesni_algs[] = { {
1129         .cra_name               = "aes",
1130         .cra_driver_name        = "aes-aesni",
1131         .cra_priority           = 300,
1132         .cra_flags              = CRYPTO_ALG_TYPE_CIPHER,
1133         .cra_blocksize          = AES_BLOCK_SIZE,
1134         .cra_ctxsize            = sizeof(struct crypto_aes_ctx) +
1135                                   AESNI_ALIGN - 1,
1136         .cra_alignmask          = 0,
1137         .cra_module             = THIS_MODULE,
1138         .cra_u  = {
1139                 .cipher = {
1140                         .cia_min_keysize        = AES_MIN_KEY_SIZE,
1141                         .cia_max_keysize        = AES_MAX_KEY_SIZE,
1142                         .cia_setkey             = aes_set_key,
1143                         .cia_encrypt            = aes_encrypt,
1144                         .cia_decrypt            = aes_decrypt
1145                 }
1146         }
1147 }, {
1148         .cra_name               = "__aes-aesni",
1149         .cra_driver_name        = "__driver-aes-aesni",
1150         .cra_priority           = 0,
1151         .cra_flags              = CRYPTO_ALG_TYPE_CIPHER | CRYPTO_ALG_INTERNAL,
1152         .cra_blocksize          = AES_BLOCK_SIZE,
1153         .cra_ctxsize            = sizeof(struct crypto_aes_ctx) +
1154                                   AESNI_ALIGN - 1,
1155         .cra_alignmask          = 0,
1156         .cra_module             = THIS_MODULE,
1157         .cra_u  = {
1158                 .cipher = {
1159                         .cia_min_keysize        = AES_MIN_KEY_SIZE,
1160                         .cia_max_keysize        = AES_MAX_KEY_SIZE,
1161                         .cia_setkey             = aes_set_key,
1162                         .cia_encrypt            = __aes_encrypt,
1163                         .cia_decrypt            = __aes_decrypt
1164                 }
1165         }
1166 }, {
1167         .cra_name               = "__ecb-aes-aesni",
1168         .cra_driver_name        = "__driver-ecb-aes-aesni",
1169         .cra_priority           = 0,
1170         .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER |
1171                                   CRYPTO_ALG_INTERNAL,
1172         .cra_blocksize          = AES_BLOCK_SIZE,
1173         .cra_ctxsize            = sizeof(struct crypto_aes_ctx) +
1174                                   AESNI_ALIGN - 1,
1175         .cra_alignmask          = 0,
1176         .cra_type               = &crypto_blkcipher_type,
1177         .cra_module             = THIS_MODULE,
1178         .cra_u = {
1179                 .blkcipher = {
1180                         .min_keysize    = AES_MIN_KEY_SIZE,
1181                         .max_keysize    = AES_MAX_KEY_SIZE,
1182                         .setkey         = aes_set_key,
1183                         .encrypt        = ecb_encrypt,
1184                         .decrypt        = ecb_decrypt,
1185                 },
1186         },
1187 }, {
1188         .cra_name               = "__cbc-aes-aesni",
1189         .cra_driver_name        = "__driver-cbc-aes-aesni",
1190         .cra_priority           = 0,
1191         .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER |
1192                                   CRYPTO_ALG_INTERNAL,
1193         .cra_blocksize          = AES_BLOCK_SIZE,
1194         .cra_ctxsize            = sizeof(struct crypto_aes_ctx) +
1195                                   AESNI_ALIGN - 1,
1196         .cra_alignmask          = 0,
1197         .cra_type               = &crypto_blkcipher_type,
1198         .cra_module             = THIS_MODULE,
1199         .cra_u = {
1200                 .blkcipher = {
1201                         .min_keysize    = AES_MIN_KEY_SIZE,
1202                         .max_keysize    = AES_MAX_KEY_SIZE,
1203                         .setkey         = aes_set_key,
1204                         .encrypt        = cbc_encrypt,
1205                         .decrypt        = cbc_decrypt,
1206                 },
1207         },
1208 }, {
1209         .cra_name               = "ecb(aes)",
1210         .cra_driver_name        = "ecb-aes-aesni",
1211         .cra_priority           = 400,
1212         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1213         .cra_blocksize          = AES_BLOCK_SIZE,
1214         .cra_ctxsize            = sizeof(struct async_helper_ctx),
1215         .cra_alignmask          = 0,
1216         .cra_type               = &crypto_ablkcipher_type,
1217         .cra_module             = THIS_MODULE,
1218         .cra_init               = ablk_ecb_init,
1219         .cra_exit               = ablk_exit,
1220         .cra_u = {
1221                 .ablkcipher = {
1222                         .min_keysize    = AES_MIN_KEY_SIZE,
1223                         .max_keysize    = AES_MAX_KEY_SIZE,
1224                         .setkey         = ablk_set_key,
1225                         .encrypt        = ablk_encrypt,
1226                         .decrypt        = ablk_decrypt,
1227                 },
1228         },
1229 }, {
1230         .cra_name               = "cbc(aes)",
1231         .cra_driver_name        = "cbc-aes-aesni",
1232         .cra_priority           = 400,
1233         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1234         .cra_blocksize          = AES_BLOCK_SIZE,
1235         .cra_ctxsize            = sizeof(struct async_helper_ctx),
1236         .cra_alignmask          = 0,
1237         .cra_type               = &crypto_ablkcipher_type,
1238         .cra_module             = THIS_MODULE,
1239         .cra_init               = ablk_cbc_init,
1240         .cra_exit               = ablk_exit,
1241         .cra_u = {
1242                 .ablkcipher = {
1243                         .min_keysize    = AES_MIN_KEY_SIZE,
1244                         .max_keysize    = AES_MAX_KEY_SIZE,
1245                         .ivsize         = AES_BLOCK_SIZE,
1246                         .setkey         = ablk_set_key,
1247                         .encrypt        = ablk_encrypt,
1248                         .decrypt        = ablk_decrypt,
1249                 },
1250         },
1251 #ifdef CONFIG_X86_64
1252 }, {
1253         .cra_name               = "__ctr-aes-aesni",
1254         .cra_driver_name        = "__driver-ctr-aes-aesni",
1255         .cra_priority           = 0,
1256         .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER |
1257                                   CRYPTO_ALG_INTERNAL,
1258         .cra_blocksize          = 1,
1259         .cra_ctxsize            = sizeof(struct crypto_aes_ctx) +
1260                                   AESNI_ALIGN - 1,
1261         .cra_alignmask          = 0,
1262         .cra_type               = &crypto_blkcipher_type,
1263         .cra_module             = THIS_MODULE,
1264         .cra_u = {
1265                 .blkcipher = {
1266                         .min_keysize    = AES_MIN_KEY_SIZE,
1267                         .max_keysize    = AES_MAX_KEY_SIZE,
1268                         .ivsize         = AES_BLOCK_SIZE,
1269                         .setkey         = aes_set_key,
1270                         .encrypt        = ctr_crypt,
1271                         .decrypt        = ctr_crypt,
1272                 },
1273         },
1274 }, {
1275         .cra_name               = "ctr(aes)",
1276         .cra_driver_name        = "ctr-aes-aesni",
1277         .cra_priority           = 400,
1278         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1279         .cra_blocksize          = 1,
1280         .cra_ctxsize            = sizeof(struct async_helper_ctx),
1281         .cra_alignmask          = 0,
1282         .cra_type               = &crypto_ablkcipher_type,
1283         .cra_module             = THIS_MODULE,
1284         .cra_init               = ablk_ctr_init,
1285         .cra_exit               = ablk_exit,
1286         .cra_u = {
1287                 .ablkcipher = {
1288                         .min_keysize    = AES_MIN_KEY_SIZE,
1289                         .max_keysize    = AES_MAX_KEY_SIZE,
1290                         .ivsize         = AES_BLOCK_SIZE,
1291                         .setkey         = ablk_set_key,
1292                         .encrypt        = ablk_encrypt,
1293                         .decrypt        = ablk_encrypt,
1294                         .geniv          = "chainiv",
1295                 },
1296         },
1297 #endif
1298 #if IS_ENABLED(CONFIG_CRYPTO_PCBC)
1299 }, {
1300         .cra_name               = "pcbc(aes)",
1301         .cra_driver_name        = "pcbc-aes-aesni",
1302         .cra_priority           = 400,
1303         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1304         .cra_blocksize          = AES_BLOCK_SIZE,
1305         .cra_ctxsize            = sizeof(struct async_helper_ctx),
1306         .cra_alignmask          = 0,
1307         .cra_type               = &crypto_ablkcipher_type,
1308         .cra_module             = THIS_MODULE,
1309         .cra_init               = ablk_pcbc_init,
1310         .cra_exit               = ablk_exit,
1311         .cra_u = {
1312                 .ablkcipher = {
1313                         .min_keysize    = AES_MIN_KEY_SIZE,
1314                         .max_keysize    = AES_MAX_KEY_SIZE,
1315                         .ivsize         = AES_BLOCK_SIZE,
1316                         .setkey         = ablk_set_key,
1317                         .encrypt        = ablk_encrypt,
1318                         .decrypt        = ablk_decrypt,
1319                 },
1320         },
1321 #endif
1322 }, {
1323         .cra_name               = "__lrw-aes-aesni",
1324         .cra_driver_name        = "__driver-lrw-aes-aesni",
1325         .cra_priority           = 0,
1326         .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER |
1327                                   CRYPTO_ALG_INTERNAL,
1328         .cra_blocksize          = AES_BLOCK_SIZE,
1329         .cra_ctxsize            = sizeof(struct aesni_lrw_ctx),
1330         .cra_alignmask          = 0,
1331         .cra_type               = &crypto_blkcipher_type,
1332         .cra_module             = THIS_MODULE,
1333         .cra_exit               = lrw_aesni_exit_tfm,
1334         .cra_u = {
1335                 .blkcipher = {
1336                         .min_keysize    = AES_MIN_KEY_SIZE + AES_BLOCK_SIZE,
1337                         .max_keysize    = AES_MAX_KEY_SIZE + AES_BLOCK_SIZE,
1338                         .ivsize         = AES_BLOCK_SIZE,
1339                         .setkey         = lrw_aesni_setkey,
1340                         .encrypt        = lrw_encrypt,
1341                         .decrypt        = lrw_decrypt,
1342                 },
1343         },
1344 }, {
1345         .cra_name               = "__xts-aes-aesni",
1346         .cra_driver_name        = "__driver-xts-aes-aesni",
1347         .cra_priority           = 0,
1348         .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER |
1349                                   CRYPTO_ALG_INTERNAL,
1350         .cra_blocksize          = AES_BLOCK_SIZE,
1351         .cra_ctxsize            = sizeof(struct aesni_xts_ctx),
1352         .cra_alignmask          = 0,
1353         .cra_type               = &crypto_blkcipher_type,
1354         .cra_module             = THIS_MODULE,
1355         .cra_u = {
1356                 .blkcipher = {
1357                         .min_keysize    = 2 * AES_MIN_KEY_SIZE,
1358                         .max_keysize    = 2 * AES_MAX_KEY_SIZE,
1359                         .ivsize         = AES_BLOCK_SIZE,
1360                         .setkey         = xts_aesni_setkey,
1361                         .encrypt        = xts_encrypt,
1362                         .decrypt        = xts_decrypt,
1363                 },
1364         },
1365 }, {
1366         .cra_name               = "lrw(aes)",
1367         .cra_driver_name        = "lrw-aes-aesni",
1368         .cra_priority           = 400,
1369         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1370         .cra_blocksize          = AES_BLOCK_SIZE,
1371         .cra_ctxsize            = sizeof(struct async_helper_ctx),
1372         .cra_alignmask          = 0,
1373         .cra_type               = &crypto_ablkcipher_type,
1374         .cra_module             = THIS_MODULE,
1375         .cra_init               = ablk_init,
1376         .cra_exit               = ablk_exit,
1377         .cra_u = {
1378                 .ablkcipher = {
1379                         .min_keysize    = AES_MIN_KEY_SIZE + AES_BLOCK_SIZE,
1380                         .max_keysize    = AES_MAX_KEY_SIZE + AES_BLOCK_SIZE,
1381                         .ivsize         = AES_BLOCK_SIZE,
1382                         .setkey         = ablk_set_key,
1383                         .encrypt        = ablk_encrypt,
1384                         .decrypt        = ablk_decrypt,
1385                 },
1386         },
1387 }, {
1388         .cra_name               = "xts(aes)",
1389         .cra_driver_name        = "xts-aes-aesni",
1390         .cra_priority           = 400,
1391         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1392         .cra_blocksize          = AES_BLOCK_SIZE,
1393         .cra_ctxsize            = sizeof(struct async_helper_ctx),
1394         .cra_alignmask          = 0,
1395         .cra_type               = &crypto_ablkcipher_type,
1396         .cra_module             = THIS_MODULE,
1397         .cra_init               = ablk_init,
1398         .cra_exit               = ablk_exit,
1399         .cra_u = {
1400                 .ablkcipher = {
1401                         .min_keysize    = 2 * AES_MIN_KEY_SIZE,
1402                         .max_keysize    = 2 * AES_MAX_KEY_SIZE,
1403                         .ivsize         = AES_BLOCK_SIZE,
1404                         .setkey         = ablk_set_key,
1405                         .encrypt        = ablk_encrypt,
1406                         .decrypt        = ablk_decrypt,
1407                 },
1408         },
1409 } };
1410
1411 #ifdef CONFIG_X86_64
1412 static struct aead_alg aesni_aead_algs[] = { {
1413         .setkey                 = common_rfc4106_set_key,
1414         .setauthsize            = common_rfc4106_set_authsize,
1415         .encrypt                = helper_rfc4106_encrypt,
1416         .decrypt                = helper_rfc4106_decrypt,
1417         .ivsize                 = 8,
1418         .maxauthsize            = 16,
1419         .base = {
1420                 .cra_name               = "__gcm-aes-aesni",
1421                 .cra_driver_name        = "__driver-gcm-aes-aesni",
1422                 .cra_flags              = CRYPTO_ALG_INTERNAL,
1423                 .cra_blocksize          = 1,
1424                 .cra_ctxsize            = sizeof(struct aesni_rfc4106_gcm_ctx),
1425                 .cra_alignmask          = AESNI_ALIGN - 1,
1426                 .cra_module             = THIS_MODULE,
1427         },
1428 }, {
1429         .init                   = rfc4106_init,
1430         .exit                   = rfc4106_exit,
1431         .setkey                 = rfc4106_set_key,
1432         .setauthsize            = rfc4106_set_authsize,
1433         .encrypt                = rfc4106_encrypt,
1434         .decrypt                = rfc4106_decrypt,
1435         .ivsize                 = 8,
1436         .maxauthsize            = 16,
1437         .base = {
1438                 .cra_name               = "rfc4106(gcm(aes))",
1439                 .cra_driver_name        = "rfc4106-gcm-aesni",
1440                 .cra_priority           = 400,
1441                 .cra_flags              = CRYPTO_ALG_ASYNC,
1442                 .cra_blocksize          = 1,
1443                 .cra_ctxsize            = sizeof(struct cryptd_aead *),
1444                 .cra_module             = THIS_MODULE,
1445         },
1446 } };
1447 #else
1448 static struct aead_alg aesni_aead_algs[0];
1449 #endif
1450
1451
1452 static const struct x86_cpu_id aesni_cpu_id[] = {
1453         X86_FEATURE_MATCH(X86_FEATURE_AES),
1454         {}
1455 };
1456 MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
1457
1458 static int __init aesni_init(void)
1459 {
1460         int err;
1461
1462         if (!x86_match_cpu(aesni_cpu_id))
1463                 return -ENODEV;
1464 #ifdef CONFIG_X86_64
1465 #ifdef CONFIG_AS_AVX2
1466         if (boot_cpu_has(X86_FEATURE_AVX2)) {
1467                 pr_info("AVX2 version of gcm_enc/dec engaged.\n");
1468                 aesni_gcm_enc_tfm = aesni_gcm_enc_avx2;
1469                 aesni_gcm_dec_tfm = aesni_gcm_dec_avx2;
1470         } else
1471 #endif
1472 #ifdef CONFIG_AS_AVX
1473         if (boot_cpu_has(X86_FEATURE_AVX)) {
1474                 pr_info("AVX version of gcm_enc/dec engaged.\n");
1475                 aesni_gcm_enc_tfm = aesni_gcm_enc_avx;
1476                 aesni_gcm_dec_tfm = aesni_gcm_dec_avx;
1477         } else
1478 #endif
1479         {
1480                 pr_info("SSE version of gcm_enc/dec engaged.\n");
1481                 aesni_gcm_enc_tfm = aesni_gcm_enc;
1482                 aesni_gcm_dec_tfm = aesni_gcm_dec;
1483         }
1484         aesni_ctr_enc_tfm = aesni_ctr_enc;
1485 #ifdef CONFIG_AS_AVX
1486         if (boot_cpu_has(X86_FEATURE_AVX)) {
1487                 /* optimize performance of ctr mode encryption transform */
1488                 aesni_ctr_enc_tfm = aesni_ctr_enc_avx_tfm;
1489                 pr_info("AES CTR mode by8 optimization enabled\n");
1490         }
1491 #endif
1492 #endif
1493
1494         err = crypto_fpu_init();
1495         if (err)
1496                 return err;
1497
1498         err = crypto_register_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
1499         if (err)
1500                 goto fpu_exit;
1501
1502         err = crypto_register_aeads(aesni_aead_algs,
1503                                     ARRAY_SIZE(aesni_aead_algs));
1504         if (err)
1505                 goto unregister_algs;
1506
1507         return err;
1508
1509 unregister_algs:
1510         crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
1511 fpu_exit:
1512         crypto_fpu_exit();
1513         return err;
1514 }
1515
1516 static void __exit aesni_exit(void)
1517 {
1518         crypto_unregister_aeads(aesni_aead_algs, ARRAY_SIZE(aesni_aead_algs));
1519         crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
1520
1521         crypto_fpu_exit();
1522 }
1523
1524 late_initcall(aesni_init);
1525 module_exit(aesni_exit);
1526
1527 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
1528 MODULE_LICENSE("GPL");
1529 MODULE_ALIAS_CRYPTO("aes");