]> git.karo-electronics.de Git - karo-tx-linux.git/blob - arch/x86/crypto/aesni-intel_glue.c
ARM: shmobile: r8a7740: Enable PMU
[karo-tx-linux.git] / arch / x86 / crypto / aesni-intel_glue.c
1 /*
2  * Support for Intel AES-NI instructions. This file contains glue
3  * code, the real AES implementation is in intel-aes_asm.S.
4  *
5  * Copyright (C) 2008, Intel Corp.
6  *    Author: Huang Ying <ying.huang@intel.com>
7  *
8  * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD
9  * interface for 64-bit kernels.
10  *    Authors: Adrian Hoban <adrian.hoban@intel.com>
11  *             Gabriele Paoloni <gabriele.paoloni@intel.com>
12  *             Tadeusz Struk (tadeusz.struk@intel.com)
13  *             Aidan O'Mahony (aidan.o.mahony@intel.com)
14  *    Copyright (c) 2010, Intel Corporation.
15  *
16  * This program is free software; you can redistribute it and/or modify
17  * it under the terms of the GNU General Public License as published by
18  * the Free Software Foundation; either version 2 of the License, or
19  * (at your option) any later version.
20  */
21
22 #include <linux/hardirq.h>
23 #include <linux/types.h>
24 #include <linux/crypto.h>
25 #include <linux/module.h>
26 #include <linux/err.h>
27 #include <crypto/algapi.h>
28 #include <crypto/aes.h>
29 #include <crypto/cryptd.h>
30 #include <crypto/ctr.h>
31 #include <crypto/b128ops.h>
32 #include <crypto/lrw.h>
33 #include <crypto/xts.h>
34 #include <asm/cpu_device_id.h>
35 #include <asm/i387.h>
36 #include <asm/crypto/aes.h>
37 #include <asm/crypto/ablk_helper.h>
38 #include <crypto/scatterwalk.h>
39 #include <crypto/internal/aead.h>
40 #include <linux/workqueue.h>
41 #include <linux/spinlock.h>
42
43 #if defined(CONFIG_CRYPTO_CTR) || defined(CONFIG_CRYPTO_CTR_MODULE)
44 #define HAS_CTR
45 #endif
46
47 #if defined(CONFIG_CRYPTO_PCBC) || defined(CONFIG_CRYPTO_PCBC_MODULE)
48 #define HAS_PCBC
49 #endif
50
51 /* This data is stored at the end of the crypto_tfm struct.
52  * It's a type of per "session" data storage location.
53  * This needs to be 16 byte aligned.
54  */
55 struct aesni_rfc4106_gcm_ctx {
56         u8 hash_subkey[16];
57         struct crypto_aes_ctx aes_key_expanded;
58         u8 nonce[4];
59         struct cryptd_aead *cryptd_tfm;
60 };
61
62 struct aesni_gcm_set_hash_subkey_result {
63         int err;
64         struct completion completion;
65 };
66
67 struct aesni_hash_subkey_req_data {
68         u8 iv[16];
69         struct aesni_gcm_set_hash_subkey_result result;
70         struct scatterlist sg;
71 };
72
73 #define AESNI_ALIGN     (16)
74 #define AES_BLOCK_MASK  (~(AES_BLOCK_SIZE-1))
75 #define RFC4106_HASH_SUBKEY_SIZE 16
76
77 struct aesni_lrw_ctx {
78         struct lrw_table_ctx lrw_table;
79         u8 raw_aes_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1];
80 };
81
82 struct aesni_xts_ctx {
83         u8 raw_tweak_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1];
84         u8 raw_crypt_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1];
85 };
86
87 asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
88                              unsigned int key_len);
89 asmlinkage void aesni_enc(struct crypto_aes_ctx *ctx, u8 *out,
90                           const u8 *in);
91 asmlinkage void aesni_dec(struct crypto_aes_ctx *ctx, u8 *out,
92                           const u8 *in);
93 asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out,
94                               const u8 *in, unsigned int len);
95 asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out,
96                               const u8 *in, unsigned int len);
97 asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
98                               const u8 *in, unsigned int len, u8 *iv);
99 asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
100                               const u8 *in, unsigned int len, u8 *iv);
101
102 int crypto_fpu_init(void);
103 void crypto_fpu_exit(void);
104
105 #ifdef CONFIG_X86_64
106 asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
107                               const u8 *in, unsigned int len, u8 *iv);
108
109 /* asmlinkage void aesni_gcm_enc()
110  * void *ctx,  AES Key schedule. Starts on a 16 byte boundary.
111  * u8 *out, Ciphertext output. Encrypt in-place is allowed.
112  * const u8 *in, Plaintext input
113  * unsigned long plaintext_len, Length of data in bytes for encryption.
114  * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
115  *         concatenated with 8 byte Initialisation Vector (from IPSec ESP
116  *         Payload) concatenated with 0x00000001. 16-byte aligned pointer.
117  * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
118  * const u8 *aad, Additional Authentication Data (AAD)
119  * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this
120  *          is going to be 8 or 12 bytes
121  * u8 *auth_tag, Authenticated Tag output.
122  * unsigned long auth_tag_len), Authenticated Tag Length in bytes.
123  *          Valid values are 16 (most likely), 12 or 8.
124  */
125 asmlinkage void aesni_gcm_enc(void *ctx, u8 *out,
126                         const u8 *in, unsigned long plaintext_len, u8 *iv,
127                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
128                         u8 *auth_tag, unsigned long auth_tag_len);
129
130 /* asmlinkage void aesni_gcm_dec()
131  * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
132  * u8 *out, Plaintext output. Decrypt in-place is allowed.
133  * const u8 *in, Ciphertext input
134  * unsigned long ciphertext_len, Length of data in bytes for decryption.
135  * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
136  *         concatenated with 8 byte Initialisation Vector (from IPSec ESP
137  *         Payload) concatenated with 0x00000001. 16-byte aligned pointer.
138  * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
139  * const u8 *aad, Additional Authentication Data (AAD)
140  * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this is going
141  * to be 8 or 12 bytes
142  * u8 *auth_tag, Authenticated Tag output.
143  * unsigned long auth_tag_len) Authenticated Tag Length in bytes.
144  * Valid values are 16 (most likely), 12 or 8.
145  */
146 asmlinkage void aesni_gcm_dec(void *ctx, u8 *out,
147                         const u8 *in, unsigned long ciphertext_len, u8 *iv,
148                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
149                         u8 *auth_tag, unsigned long auth_tag_len);
150
151 static inline struct
152 aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
153 {
154         return
155                 (struct aesni_rfc4106_gcm_ctx *)
156                 PTR_ALIGN((u8 *)
157                 crypto_tfm_ctx(crypto_aead_tfm(tfm)), AESNI_ALIGN);
158 }
159 #endif
160
161 static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
162 {
163         unsigned long addr = (unsigned long)raw_ctx;
164         unsigned long align = AESNI_ALIGN;
165
166         if (align <= crypto_tfm_ctx_alignment())
167                 align = 1;
168         return (struct crypto_aes_ctx *)ALIGN(addr, align);
169 }
170
171 static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
172                               const u8 *in_key, unsigned int key_len)
173 {
174         struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx);
175         u32 *flags = &tfm->crt_flags;
176         int err;
177
178         if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
179             key_len != AES_KEYSIZE_256) {
180                 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
181                 return -EINVAL;
182         }
183
184         if (!irq_fpu_usable())
185                 err = crypto_aes_expand_key(ctx, in_key, key_len);
186         else {
187                 kernel_fpu_begin();
188                 err = aesni_set_key(ctx, in_key, key_len);
189                 kernel_fpu_end();
190         }
191
192         return err;
193 }
194
195 static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
196                        unsigned int key_len)
197 {
198         return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len);
199 }
200
201 static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
202 {
203         struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
204
205         if (!irq_fpu_usable())
206                 crypto_aes_encrypt_x86(ctx, dst, src);
207         else {
208                 kernel_fpu_begin();
209                 aesni_enc(ctx, dst, src);
210                 kernel_fpu_end();
211         }
212 }
213
214 static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
215 {
216         struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
217
218         if (!irq_fpu_usable())
219                 crypto_aes_decrypt_x86(ctx, dst, src);
220         else {
221                 kernel_fpu_begin();
222                 aesni_dec(ctx, dst, src);
223                 kernel_fpu_end();
224         }
225 }
226
227 static void __aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
228 {
229         struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
230
231         aesni_enc(ctx, dst, src);
232 }
233
234 static void __aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
235 {
236         struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
237
238         aesni_dec(ctx, dst, src);
239 }
240
241 static int ecb_encrypt(struct blkcipher_desc *desc,
242                        struct scatterlist *dst, struct scatterlist *src,
243                        unsigned int nbytes)
244 {
245         struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
246         struct blkcipher_walk walk;
247         int err;
248
249         blkcipher_walk_init(&walk, dst, src, nbytes);
250         err = blkcipher_walk_virt(desc, &walk);
251         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
252
253         kernel_fpu_begin();
254         while ((nbytes = walk.nbytes)) {
255                 aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
256                               nbytes & AES_BLOCK_MASK);
257                 nbytes &= AES_BLOCK_SIZE - 1;
258                 err = blkcipher_walk_done(desc, &walk, nbytes);
259         }
260         kernel_fpu_end();
261
262         return err;
263 }
264
265 static int ecb_decrypt(struct blkcipher_desc *desc,
266                        struct scatterlist *dst, struct scatterlist *src,
267                        unsigned int nbytes)
268 {
269         struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
270         struct blkcipher_walk walk;
271         int err;
272
273         blkcipher_walk_init(&walk, dst, src, nbytes);
274         err = blkcipher_walk_virt(desc, &walk);
275         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
276
277         kernel_fpu_begin();
278         while ((nbytes = walk.nbytes)) {
279                 aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
280                               nbytes & AES_BLOCK_MASK);
281                 nbytes &= AES_BLOCK_SIZE - 1;
282                 err = blkcipher_walk_done(desc, &walk, nbytes);
283         }
284         kernel_fpu_end();
285
286         return err;
287 }
288
289 static int cbc_encrypt(struct blkcipher_desc *desc,
290                        struct scatterlist *dst, struct scatterlist *src,
291                        unsigned int nbytes)
292 {
293         struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
294         struct blkcipher_walk walk;
295         int err;
296
297         blkcipher_walk_init(&walk, dst, src, nbytes);
298         err = blkcipher_walk_virt(desc, &walk);
299         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
300
301         kernel_fpu_begin();
302         while ((nbytes = walk.nbytes)) {
303                 aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
304                               nbytes & AES_BLOCK_MASK, walk.iv);
305                 nbytes &= AES_BLOCK_SIZE - 1;
306                 err = blkcipher_walk_done(desc, &walk, nbytes);
307         }
308         kernel_fpu_end();
309
310         return err;
311 }
312
313 static int cbc_decrypt(struct blkcipher_desc *desc,
314                        struct scatterlist *dst, struct scatterlist *src,
315                        unsigned int nbytes)
316 {
317         struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
318         struct blkcipher_walk walk;
319         int err;
320
321         blkcipher_walk_init(&walk, dst, src, nbytes);
322         err = blkcipher_walk_virt(desc, &walk);
323         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
324
325         kernel_fpu_begin();
326         while ((nbytes = walk.nbytes)) {
327                 aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
328                               nbytes & AES_BLOCK_MASK, walk.iv);
329                 nbytes &= AES_BLOCK_SIZE - 1;
330                 err = blkcipher_walk_done(desc, &walk, nbytes);
331         }
332         kernel_fpu_end();
333
334         return err;
335 }
336
337 #ifdef CONFIG_X86_64
338 static void ctr_crypt_final(struct crypto_aes_ctx *ctx,
339                             struct blkcipher_walk *walk)
340 {
341         u8 *ctrblk = walk->iv;
342         u8 keystream[AES_BLOCK_SIZE];
343         u8 *src = walk->src.virt.addr;
344         u8 *dst = walk->dst.virt.addr;
345         unsigned int nbytes = walk->nbytes;
346
347         aesni_enc(ctx, keystream, ctrblk);
348         crypto_xor(keystream, src, nbytes);
349         memcpy(dst, keystream, nbytes);
350         crypto_inc(ctrblk, AES_BLOCK_SIZE);
351 }
352
353 static int ctr_crypt(struct blkcipher_desc *desc,
354                      struct scatterlist *dst, struct scatterlist *src,
355                      unsigned int nbytes)
356 {
357         struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
358         struct blkcipher_walk walk;
359         int err;
360
361         blkcipher_walk_init(&walk, dst, src, nbytes);
362         err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
363         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
364
365         kernel_fpu_begin();
366         while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
367                 aesni_ctr_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
368                               nbytes & AES_BLOCK_MASK, walk.iv);
369                 nbytes &= AES_BLOCK_SIZE - 1;
370                 err = blkcipher_walk_done(desc, &walk, nbytes);
371         }
372         if (walk.nbytes) {
373                 ctr_crypt_final(ctx, &walk);
374                 err = blkcipher_walk_done(desc, &walk, 0);
375         }
376         kernel_fpu_end();
377
378         return err;
379 }
380 #endif
381
382 static int ablk_ecb_init(struct crypto_tfm *tfm)
383 {
384         return ablk_init_common(tfm, "__driver-ecb-aes-aesni");
385 }
386
387 static int ablk_cbc_init(struct crypto_tfm *tfm)
388 {
389         return ablk_init_common(tfm, "__driver-cbc-aes-aesni");
390 }
391
392 #ifdef CONFIG_X86_64
393 static int ablk_ctr_init(struct crypto_tfm *tfm)
394 {
395         return ablk_init_common(tfm, "__driver-ctr-aes-aesni");
396 }
397
398 #ifdef HAS_CTR
399 static int ablk_rfc3686_ctr_init(struct crypto_tfm *tfm)
400 {
401         return ablk_init_common(tfm, "rfc3686(__driver-ctr-aes-aesni)");
402 }
403 #endif
404 #endif
405
406 #ifdef HAS_PCBC
407 static int ablk_pcbc_init(struct crypto_tfm *tfm)
408 {
409         return ablk_init_common(tfm, "fpu(pcbc(__driver-aes-aesni))");
410 }
411 #endif
412
413 static void lrw_xts_encrypt_callback(void *ctx, u8 *blks, unsigned int nbytes)
414 {
415         aesni_ecb_enc(ctx, blks, blks, nbytes);
416 }
417
418 static void lrw_xts_decrypt_callback(void *ctx, u8 *blks, unsigned int nbytes)
419 {
420         aesni_ecb_dec(ctx, blks, blks, nbytes);
421 }
422
423 static int lrw_aesni_setkey(struct crypto_tfm *tfm, const u8 *key,
424                             unsigned int keylen)
425 {
426         struct aesni_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
427         int err;
428
429         err = aes_set_key_common(tfm, ctx->raw_aes_ctx, key,
430                                  keylen - AES_BLOCK_SIZE);
431         if (err)
432                 return err;
433
434         return lrw_init_table(&ctx->lrw_table, key + keylen - AES_BLOCK_SIZE);
435 }
436
437 static void lrw_aesni_exit_tfm(struct crypto_tfm *tfm)
438 {
439         struct aesni_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
440
441         lrw_free_table(&ctx->lrw_table);
442 }
443
444 static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
445                        struct scatterlist *src, unsigned int nbytes)
446 {
447         struct aesni_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
448         be128 buf[8];
449         struct lrw_crypt_req req = {
450                 .tbuf = buf,
451                 .tbuflen = sizeof(buf),
452
453                 .table_ctx = &ctx->lrw_table,
454                 .crypt_ctx = aes_ctx(ctx->raw_aes_ctx),
455                 .crypt_fn = lrw_xts_encrypt_callback,
456         };
457         int ret;
458
459         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
460
461         kernel_fpu_begin();
462         ret = lrw_crypt(desc, dst, src, nbytes, &req);
463         kernel_fpu_end();
464
465         return ret;
466 }
467
468 static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
469                        struct scatterlist *src, unsigned int nbytes)
470 {
471         struct aesni_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
472         be128 buf[8];
473         struct lrw_crypt_req req = {
474                 .tbuf = buf,
475                 .tbuflen = sizeof(buf),
476
477                 .table_ctx = &ctx->lrw_table,
478                 .crypt_ctx = aes_ctx(ctx->raw_aes_ctx),
479                 .crypt_fn = lrw_xts_decrypt_callback,
480         };
481         int ret;
482
483         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
484
485         kernel_fpu_begin();
486         ret = lrw_crypt(desc, dst, src, nbytes, &req);
487         kernel_fpu_end();
488
489         return ret;
490 }
491
492 static int xts_aesni_setkey(struct crypto_tfm *tfm, const u8 *key,
493                             unsigned int keylen)
494 {
495         struct aesni_xts_ctx *ctx = crypto_tfm_ctx(tfm);
496         u32 *flags = &tfm->crt_flags;
497         int err;
498
499         /* key consists of keys of equal size concatenated, therefore
500          * the length must be even
501          */
502         if (keylen % 2) {
503                 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
504                 return -EINVAL;
505         }
506
507         /* first half of xts-key is for crypt */
508         err = aes_set_key_common(tfm, ctx->raw_crypt_ctx, key, keylen / 2);
509         if (err)
510                 return err;
511
512         /* second half of xts-key is for tweak */
513         return aes_set_key_common(tfm, ctx->raw_tweak_ctx, key + keylen / 2,
514                                   keylen / 2);
515 }
516
517
518 static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
519                        struct scatterlist *src, unsigned int nbytes)
520 {
521         struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
522         be128 buf[8];
523         struct xts_crypt_req req = {
524                 .tbuf = buf,
525                 .tbuflen = sizeof(buf),
526
527                 .tweak_ctx = aes_ctx(ctx->raw_tweak_ctx),
528                 .tweak_fn = XTS_TWEAK_CAST(aesni_enc),
529                 .crypt_ctx = aes_ctx(ctx->raw_crypt_ctx),
530                 .crypt_fn = lrw_xts_encrypt_callback,
531         };
532         int ret;
533
534         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
535
536         kernel_fpu_begin();
537         ret = xts_crypt(desc, dst, src, nbytes, &req);
538         kernel_fpu_end();
539
540         return ret;
541 }
542
543 static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
544                        struct scatterlist *src, unsigned int nbytes)
545 {
546         struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
547         be128 buf[8];
548         struct xts_crypt_req req = {
549                 .tbuf = buf,
550                 .tbuflen = sizeof(buf),
551
552                 .tweak_ctx = aes_ctx(ctx->raw_tweak_ctx),
553                 .tweak_fn = XTS_TWEAK_CAST(aesni_enc),
554                 .crypt_ctx = aes_ctx(ctx->raw_crypt_ctx),
555                 .crypt_fn = lrw_xts_decrypt_callback,
556         };
557         int ret;
558
559         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
560
561         kernel_fpu_begin();
562         ret = xts_crypt(desc, dst, src, nbytes, &req);
563         kernel_fpu_end();
564
565         return ret;
566 }
567
568 #ifdef CONFIG_X86_64
569 static int rfc4106_init(struct crypto_tfm *tfm)
570 {
571         struct cryptd_aead *cryptd_tfm;
572         struct aesni_rfc4106_gcm_ctx *ctx = (struct aesni_rfc4106_gcm_ctx *)
573                 PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN);
574         struct crypto_aead *cryptd_child;
575         struct aesni_rfc4106_gcm_ctx *child_ctx;
576         cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni", 0, 0);
577         if (IS_ERR(cryptd_tfm))
578                 return PTR_ERR(cryptd_tfm);
579
580         cryptd_child = cryptd_aead_child(cryptd_tfm);
581         child_ctx = aesni_rfc4106_gcm_ctx_get(cryptd_child);
582         memcpy(child_ctx, ctx, sizeof(*ctx));
583         ctx->cryptd_tfm = cryptd_tfm;
584         tfm->crt_aead.reqsize = sizeof(struct aead_request)
585                 + crypto_aead_reqsize(&cryptd_tfm->base);
586         return 0;
587 }
588
589 static void rfc4106_exit(struct crypto_tfm *tfm)
590 {
591         struct aesni_rfc4106_gcm_ctx *ctx =
592                 (struct aesni_rfc4106_gcm_ctx *)
593                 PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN);
594         if (!IS_ERR(ctx->cryptd_tfm))
595                 cryptd_free_aead(ctx->cryptd_tfm);
596         return;
597 }
598
599 static void
600 rfc4106_set_hash_subkey_done(struct crypto_async_request *req, int err)
601 {
602         struct aesni_gcm_set_hash_subkey_result *result = req->data;
603
604         if (err == -EINPROGRESS)
605                 return;
606         result->err = err;
607         complete(&result->completion);
608 }
609
610 static int
611 rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
612 {
613         struct crypto_ablkcipher *ctr_tfm;
614         struct ablkcipher_request *req;
615         int ret = -EINVAL;
616         struct aesni_hash_subkey_req_data *req_data;
617
618         ctr_tfm = crypto_alloc_ablkcipher("ctr(aes)", 0, 0);
619         if (IS_ERR(ctr_tfm))
620                 return PTR_ERR(ctr_tfm);
621
622         crypto_ablkcipher_clear_flags(ctr_tfm, ~0);
623
624         ret = crypto_ablkcipher_setkey(ctr_tfm, key, key_len);
625         if (ret)
626                 goto out_free_ablkcipher;
627
628         ret = -ENOMEM;
629         req = ablkcipher_request_alloc(ctr_tfm, GFP_KERNEL);
630         if (!req)
631                 goto out_free_ablkcipher;
632
633         req_data = kmalloc(sizeof(*req_data), GFP_KERNEL);
634         if (!req_data)
635                 goto out_free_request;
636
637         memset(req_data->iv, 0, sizeof(req_data->iv));
638
639         /* Clear the data in the hash sub key container to zero.*/
640         /* We want to cipher all zeros to create the hash sub key. */
641         memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE);
642
643         init_completion(&req_data->result.completion);
644         sg_init_one(&req_data->sg, hash_subkey, RFC4106_HASH_SUBKEY_SIZE);
645         ablkcipher_request_set_tfm(req, ctr_tfm);
646         ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP |
647                                         CRYPTO_TFM_REQ_MAY_BACKLOG,
648                                         rfc4106_set_hash_subkey_done,
649                                         &req_data->result);
650
651         ablkcipher_request_set_crypt(req, &req_data->sg,
652                 &req_data->sg, RFC4106_HASH_SUBKEY_SIZE, req_data->iv);
653
654         ret = crypto_ablkcipher_encrypt(req);
655         if (ret == -EINPROGRESS || ret == -EBUSY) {
656                 ret = wait_for_completion_interruptible
657                         (&req_data->result.completion);
658                 if (!ret)
659                         ret = req_data->result.err;
660         }
661         kfree(req_data);
662 out_free_request:
663         ablkcipher_request_free(req);
664 out_free_ablkcipher:
665         crypto_free_ablkcipher(ctr_tfm);
666         return ret;
667 }
668
669 static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
670                                                    unsigned int key_len)
671 {
672         int ret = 0;
673         struct crypto_tfm *tfm = crypto_aead_tfm(parent);
674         struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
675         struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
676         struct aesni_rfc4106_gcm_ctx *child_ctx =
677                                  aesni_rfc4106_gcm_ctx_get(cryptd_child);
678         u8 *new_key_align, *new_key_mem = NULL;
679
680         if (key_len < 4) {
681                 crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
682                 return -EINVAL;
683         }
684         /*Account for 4 byte nonce at the end.*/
685         key_len -= 4;
686         if (key_len != AES_KEYSIZE_128) {
687                 crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
688                 return -EINVAL;
689         }
690
691         memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce));
692         /*This must be on a 16 byte boundary!*/
693         if ((unsigned long)(&(ctx->aes_key_expanded.key_enc[0])) % AESNI_ALIGN)
694                 return -EINVAL;
695
696         if ((unsigned long)key % AESNI_ALIGN) {
697                 /*key is not aligned: use an auxuliar aligned pointer*/
698                 new_key_mem = kmalloc(key_len+AESNI_ALIGN, GFP_KERNEL);
699                 if (!new_key_mem)
700                         return -ENOMEM;
701
702                 new_key_align = PTR_ALIGN(new_key_mem, AESNI_ALIGN);
703                 memcpy(new_key_align, key, key_len);
704                 key = new_key_align;
705         }
706
707         if (!irq_fpu_usable())
708                 ret = crypto_aes_expand_key(&(ctx->aes_key_expanded),
709                 key, key_len);
710         else {
711                 kernel_fpu_begin();
712                 ret = aesni_set_key(&(ctx->aes_key_expanded), key, key_len);
713                 kernel_fpu_end();
714         }
715         /*This must be on a 16 byte boundary!*/
716         if ((unsigned long)(&(ctx->hash_subkey[0])) % AESNI_ALIGN) {
717                 ret = -EINVAL;
718                 goto exit;
719         }
720         ret = rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
721         memcpy(child_ctx, ctx, sizeof(*ctx));
722 exit:
723         kfree(new_key_mem);
724         return ret;
725 }
726
727 /* This is the Integrity Check Value (aka the authentication tag length and can
728  * be 8, 12 or 16 bytes long. */
729 static int rfc4106_set_authsize(struct crypto_aead *parent,
730                                 unsigned int authsize)
731 {
732         struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
733         struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
734
735         switch (authsize) {
736         case 8:
737         case 12:
738         case 16:
739                 break;
740         default:
741                 return -EINVAL;
742         }
743         crypto_aead_crt(parent)->authsize = authsize;
744         crypto_aead_crt(cryptd_child)->authsize = authsize;
745         return 0;
746 }
747
748 static int rfc4106_encrypt(struct aead_request *req)
749 {
750         int ret;
751         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
752         struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
753
754         if (!irq_fpu_usable()) {
755                 struct aead_request *cryptd_req =
756                         (struct aead_request *) aead_request_ctx(req);
757                 memcpy(cryptd_req, req, sizeof(*req));
758                 aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
759                 return crypto_aead_encrypt(cryptd_req);
760         } else {
761                 struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
762                 kernel_fpu_begin();
763                 ret = cryptd_child->base.crt_aead.encrypt(req);
764                 kernel_fpu_end();
765                 return ret;
766         }
767 }
768
769 static int rfc4106_decrypt(struct aead_request *req)
770 {
771         int ret;
772         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
773         struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
774
775         if (!irq_fpu_usable()) {
776                 struct aead_request *cryptd_req =
777                         (struct aead_request *) aead_request_ctx(req);
778                 memcpy(cryptd_req, req, sizeof(*req));
779                 aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
780                 return crypto_aead_decrypt(cryptd_req);
781         } else {
782                 struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
783                 kernel_fpu_begin();
784                 ret = cryptd_child->base.crt_aead.decrypt(req);
785                 kernel_fpu_end();
786                 return ret;
787         }
788 }
789
790 static int __driver_rfc4106_encrypt(struct aead_request *req)
791 {
792         u8 one_entry_in_sg = 0;
793         u8 *src, *dst, *assoc;
794         __be32 counter = cpu_to_be32(1);
795         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
796         struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
797         void *aes_ctx = &(ctx->aes_key_expanded);
798         unsigned long auth_tag_len = crypto_aead_authsize(tfm);
799         u8 iv_tab[16+AESNI_ALIGN];
800         u8* iv = (u8 *) PTR_ALIGN((u8 *)iv_tab, AESNI_ALIGN);
801         struct scatter_walk src_sg_walk;
802         struct scatter_walk assoc_sg_walk;
803         struct scatter_walk dst_sg_walk;
804         unsigned int i;
805
806         /* Assuming we are supporting rfc4106 64-bit extended */
807         /* sequence numbers We need to have the AAD length equal */
808         /* to 8 or 12 bytes */
809         if (unlikely(req->assoclen != 8 && req->assoclen != 12))
810                 return -EINVAL;
811         /* IV below built */
812         for (i = 0; i < 4; i++)
813                 *(iv+i) = ctx->nonce[i];
814         for (i = 0; i < 8; i++)
815                 *(iv+4+i) = req->iv[i];
816         *((__be32 *)(iv+12)) = counter;
817
818         if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) {
819                 one_entry_in_sg = 1;
820                 scatterwalk_start(&src_sg_walk, req->src);
821                 scatterwalk_start(&assoc_sg_walk, req->assoc);
822                 src = scatterwalk_map(&src_sg_walk);
823                 assoc = scatterwalk_map(&assoc_sg_walk);
824                 dst = src;
825                 if (unlikely(req->src != req->dst)) {
826                         scatterwalk_start(&dst_sg_walk, req->dst);
827                         dst = scatterwalk_map(&dst_sg_walk);
828                 }
829
830         } else {
831                 /* Allocate memory for src, dst, assoc */
832                 src = kmalloc(req->cryptlen + auth_tag_len + req->assoclen,
833                         GFP_ATOMIC);
834                 if (unlikely(!src))
835                         return -ENOMEM;
836                 assoc = (src + req->cryptlen + auth_tag_len);
837                 scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
838                 scatterwalk_map_and_copy(assoc, req->assoc, 0,
839                                         req->assoclen, 0);
840                 dst = src;
841         }
842
843         aesni_gcm_enc(aes_ctx, dst, src, (unsigned long)req->cryptlen, iv,
844                 ctx->hash_subkey, assoc, (unsigned long)req->assoclen, dst
845                 + ((unsigned long)req->cryptlen), auth_tag_len);
846
847         /* The authTag (aka the Integrity Check Value) needs to be written
848          * back to the packet. */
849         if (one_entry_in_sg) {
850                 if (unlikely(req->src != req->dst)) {
851                         scatterwalk_unmap(dst);
852                         scatterwalk_done(&dst_sg_walk, 0, 0);
853                 }
854                 scatterwalk_unmap(src);
855                 scatterwalk_unmap(assoc);
856                 scatterwalk_done(&src_sg_walk, 0, 0);
857                 scatterwalk_done(&assoc_sg_walk, 0, 0);
858         } else {
859                 scatterwalk_map_and_copy(dst, req->dst, 0,
860                         req->cryptlen + auth_tag_len, 1);
861                 kfree(src);
862         }
863         return 0;
864 }
865
866 static int __driver_rfc4106_decrypt(struct aead_request *req)
867 {
868         u8 one_entry_in_sg = 0;
869         u8 *src, *dst, *assoc;
870         unsigned long tempCipherLen = 0;
871         __be32 counter = cpu_to_be32(1);
872         int retval = 0;
873         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
874         struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
875         void *aes_ctx = &(ctx->aes_key_expanded);
876         unsigned long auth_tag_len = crypto_aead_authsize(tfm);
877         u8 iv_and_authTag[32+AESNI_ALIGN];
878         u8 *iv = (u8 *) PTR_ALIGN((u8 *)iv_and_authTag, AESNI_ALIGN);
879         u8 *authTag = iv + 16;
880         struct scatter_walk src_sg_walk;
881         struct scatter_walk assoc_sg_walk;
882         struct scatter_walk dst_sg_walk;
883         unsigned int i;
884
885         if (unlikely((req->cryptlen < auth_tag_len) ||
886                 (req->assoclen != 8 && req->assoclen != 12)))
887                 return -EINVAL;
888         /* Assuming we are supporting rfc4106 64-bit extended */
889         /* sequence numbers We need to have the AAD length */
890         /* equal to 8 or 12 bytes */
891
892         tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len);
893         /* IV below built */
894         for (i = 0; i < 4; i++)
895                 *(iv+i) = ctx->nonce[i];
896         for (i = 0; i < 8; i++)
897                 *(iv+4+i) = req->iv[i];
898         *((__be32 *)(iv+12)) = counter;
899
900         if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) {
901                 one_entry_in_sg = 1;
902                 scatterwalk_start(&src_sg_walk, req->src);
903                 scatterwalk_start(&assoc_sg_walk, req->assoc);
904                 src = scatterwalk_map(&src_sg_walk);
905                 assoc = scatterwalk_map(&assoc_sg_walk);
906                 dst = src;
907                 if (unlikely(req->src != req->dst)) {
908                         scatterwalk_start(&dst_sg_walk, req->dst);
909                         dst = scatterwalk_map(&dst_sg_walk);
910                 }
911
912         } else {
913                 /* Allocate memory for src, dst, assoc */
914                 src = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC);
915                 if (!src)
916                         return -ENOMEM;
917                 assoc = (src + req->cryptlen + auth_tag_len);
918                 scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
919                 scatterwalk_map_and_copy(assoc, req->assoc, 0,
920                         req->assoclen, 0);
921                 dst = src;
922         }
923
924         aesni_gcm_dec(aes_ctx, dst, src, tempCipherLen, iv,
925                 ctx->hash_subkey, assoc, (unsigned long)req->assoclen,
926                 authTag, auth_tag_len);
927
928         /* Compare generated tag with passed in tag. */
929         retval = memcmp(src + tempCipherLen, authTag, auth_tag_len) ?
930                 -EBADMSG : 0;
931
932         if (one_entry_in_sg) {
933                 if (unlikely(req->src != req->dst)) {
934                         scatterwalk_unmap(dst);
935                         scatterwalk_done(&dst_sg_walk, 0, 0);
936                 }
937                 scatterwalk_unmap(src);
938                 scatterwalk_unmap(assoc);
939                 scatterwalk_done(&src_sg_walk, 0, 0);
940                 scatterwalk_done(&assoc_sg_walk, 0, 0);
941         } else {
942                 scatterwalk_map_and_copy(dst, req->dst, 0, req->cryptlen, 1);
943                 kfree(src);
944         }
945         return retval;
946 }
947 #endif
948
949 static struct crypto_alg aesni_algs[] = { {
950         .cra_name               = "aes",
951         .cra_driver_name        = "aes-aesni",
952         .cra_priority           = 300,
953         .cra_flags              = CRYPTO_ALG_TYPE_CIPHER,
954         .cra_blocksize          = AES_BLOCK_SIZE,
955         .cra_ctxsize            = sizeof(struct crypto_aes_ctx) +
956                                   AESNI_ALIGN - 1,
957         .cra_alignmask          = 0,
958         .cra_module             = THIS_MODULE,
959         .cra_u  = {
960                 .cipher = {
961                         .cia_min_keysize        = AES_MIN_KEY_SIZE,
962                         .cia_max_keysize        = AES_MAX_KEY_SIZE,
963                         .cia_setkey             = aes_set_key,
964                         .cia_encrypt            = aes_encrypt,
965                         .cia_decrypt            = aes_decrypt
966                 }
967         }
968 }, {
969         .cra_name               = "__aes-aesni",
970         .cra_driver_name        = "__driver-aes-aesni",
971         .cra_priority           = 0,
972         .cra_flags              = CRYPTO_ALG_TYPE_CIPHER,
973         .cra_blocksize          = AES_BLOCK_SIZE,
974         .cra_ctxsize            = sizeof(struct crypto_aes_ctx) +
975                                   AESNI_ALIGN - 1,
976         .cra_alignmask          = 0,
977         .cra_module             = THIS_MODULE,
978         .cra_u  = {
979                 .cipher = {
980                         .cia_min_keysize        = AES_MIN_KEY_SIZE,
981                         .cia_max_keysize        = AES_MAX_KEY_SIZE,
982                         .cia_setkey             = aes_set_key,
983                         .cia_encrypt            = __aes_encrypt,
984                         .cia_decrypt            = __aes_decrypt
985                 }
986         }
987 }, {
988         .cra_name               = "__ecb-aes-aesni",
989         .cra_driver_name        = "__driver-ecb-aes-aesni",
990         .cra_priority           = 0,
991         .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER,
992         .cra_blocksize          = AES_BLOCK_SIZE,
993         .cra_ctxsize            = sizeof(struct crypto_aes_ctx) +
994                                   AESNI_ALIGN - 1,
995         .cra_alignmask          = 0,
996         .cra_type               = &crypto_blkcipher_type,
997         .cra_module             = THIS_MODULE,
998         .cra_u = {
999                 .blkcipher = {
1000                         .min_keysize    = AES_MIN_KEY_SIZE,
1001                         .max_keysize    = AES_MAX_KEY_SIZE,
1002                         .setkey         = aes_set_key,
1003                         .encrypt        = ecb_encrypt,
1004                         .decrypt        = ecb_decrypt,
1005                 },
1006         },
1007 }, {
1008         .cra_name               = "__cbc-aes-aesni",
1009         .cra_driver_name        = "__driver-cbc-aes-aesni",
1010         .cra_priority           = 0,
1011         .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER,
1012         .cra_blocksize          = AES_BLOCK_SIZE,
1013         .cra_ctxsize            = sizeof(struct crypto_aes_ctx) +
1014                                   AESNI_ALIGN - 1,
1015         .cra_alignmask          = 0,
1016         .cra_type               = &crypto_blkcipher_type,
1017         .cra_module             = THIS_MODULE,
1018         .cra_u = {
1019                 .blkcipher = {
1020                         .min_keysize    = AES_MIN_KEY_SIZE,
1021                         .max_keysize    = AES_MAX_KEY_SIZE,
1022                         .setkey         = aes_set_key,
1023                         .encrypt        = cbc_encrypt,
1024                         .decrypt        = cbc_decrypt,
1025                 },
1026         },
1027 }, {
1028         .cra_name               = "ecb(aes)",
1029         .cra_driver_name        = "ecb-aes-aesni",
1030         .cra_priority           = 400,
1031         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1032         .cra_blocksize          = AES_BLOCK_SIZE,
1033         .cra_ctxsize            = sizeof(struct async_helper_ctx),
1034         .cra_alignmask          = 0,
1035         .cra_type               = &crypto_ablkcipher_type,
1036         .cra_module             = THIS_MODULE,
1037         .cra_init               = ablk_ecb_init,
1038         .cra_exit               = ablk_exit,
1039         .cra_u = {
1040                 .ablkcipher = {
1041                         .min_keysize    = AES_MIN_KEY_SIZE,
1042                         .max_keysize    = AES_MAX_KEY_SIZE,
1043                         .setkey         = ablk_set_key,
1044                         .encrypt        = ablk_encrypt,
1045                         .decrypt        = ablk_decrypt,
1046                 },
1047         },
1048 }, {
1049         .cra_name               = "cbc(aes)",
1050         .cra_driver_name        = "cbc-aes-aesni",
1051         .cra_priority           = 400,
1052         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1053         .cra_blocksize          = AES_BLOCK_SIZE,
1054         .cra_ctxsize            = sizeof(struct async_helper_ctx),
1055         .cra_alignmask          = 0,
1056         .cra_type               = &crypto_ablkcipher_type,
1057         .cra_module             = THIS_MODULE,
1058         .cra_init               = ablk_cbc_init,
1059         .cra_exit               = ablk_exit,
1060         .cra_u = {
1061                 .ablkcipher = {
1062                         .min_keysize    = AES_MIN_KEY_SIZE,
1063                         .max_keysize    = AES_MAX_KEY_SIZE,
1064                         .ivsize         = AES_BLOCK_SIZE,
1065                         .setkey         = ablk_set_key,
1066                         .encrypt        = ablk_encrypt,
1067                         .decrypt        = ablk_decrypt,
1068                 },
1069         },
1070 #ifdef CONFIG_X86_64
1071 }, {
1072         .cra_name               = "__ctr-aes-aesni",
1073         .cra_driver_name        = "__driver-ctr-aes-aesni",
1074         .cra_priority           = 0,
1075         .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER,
1076         .cra_blocksize          = 1,
1077         .cra_ctxsize            = sizeof(struct crypto_aes_ctx) +
1078                                   AESNI_ALIGN - 1,
1079         .cra_alignmask          = 0,
1080         .cra_type               = &crypto_blkcipher_type,
1081         .cra_module             = THIS_MODULE,
1082         .cra_u = {
1083                 .blkcipher = {
1084                         .min_keysize    = AES_MIN_KEY_SIZE,
1085                         .max_keysize    = AES_MAX_KEY_SIZE,
1086                         .ivsize         = AES_BLOCK_SIZE,
1087                         .setkey         = aes_set_key,
1088                         .encrypt        = ctr_crypt,
1089                         .decrypt        = ctr_crypt,
1090                 },
1091         },
1092 }, {
1093         .cra_name               = "ctr(aes)",
1094         .cra_driver_name        = "ctr-aes-aesni",
1095         .cra_priority           = 400,
1096         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1097         .cra_blocksize          = 1,
1098         .cra_ctxsize            = sizeof(struct async_helper_ctx),
1099         .cra_alignmask          = 0,
1100         .cra_type               = &crypto_ablkcipher_type,
1101         .cra_module             = THIS_MODULE,
1102         .cra_init               = ablk_ctr_init,
1103         .cra_exit               = ablk_exit,
1104         .cra_u = {
1105                 .ablkcipher = {
1106                         .min_keysize    = AES_MIN_KEY_SIZE,
1107                         .max_keysize    = AES_MAX_KEY_SIZE,
1108                         .ivsize         = AES_BLOCK_SIZE,
1109                         .setkey         = ablk_set_key,
1110                         .encrypt        = ablk_encrypt,
1111                         .decrypt        = ablk_encrypt,
1112                         .geniv          = "chainiv",
1113                 },
1114         },
1115 }, {
1116         .cra_name               = "__gcm-aes-aesni",
1117         .cra_driver_name        = "__driver-gcm-aes-aesni",
1118         .cra_priority           = 0,
1119         .cra_flags              = CRYPTO_ALG_TYPE_AEAD,
1120         .cra_blocksize          = 1,
1121         .cra_ctxsize            = sizeof(struct aesni_rfc4106_gcm_ctx) +
1122                                   AESNI_ALIGN,
1123         .cra_alignmask          = 0,
1124         .cra_type               = &crypto_aead_type,
1125         .cra_module             = THIS_MODULE,
1126         .cra_u = {
1127                 .aead = {
1128                         .encrypt        = __driver_rfc4106_encrypt,
1129                         .decrypt        = __driver_rfc4106_decrypt,
1130                 },
1131         },
1132 }, {
1133         .cra_name               = "rfc4106(gcm(aes))",
1134         .cra_driver_name        = "rfc4106-gcm-aesni",
1135         .cra_priority           = 400,
1136         .cra_flags              = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1137         .cra_blocksize          = 1,
1138         .cra_ctxsize            = sizeof(struct aesni_rfc4106_gcm_ctx) +
1139                                   AESNI_ALIGN,
1140         .cra_alignmask          = 0,
1141         .cra_type               = &crypto_nivaead_type,
1142         .cra_module             = THIS_MODULE,
1143         .cra_init               = rfc4106_init,
1144         .cra_exit               = rfc4106_exit,
1145         .cra_u = {
1146                 .aead = {
1147                         .setkey         = rfc4106_set_key,
1148                         .setauthsize    = rfc4106_set_authsize,
1149                         .encrypt        = rfc4106_encrypt,
1150                         .decrypt        = rfc4106_decrypt,
1151                         .geniv          = "seqiv",
1152                         .ivsize         = 8,
1153                         .maxauthsize    = 16,
1154                 },
1155         },
1156 #ifdef HAS_CTR
1157 }, {
1158         .cra_name               = "rfc3686(ctr(aes))",
1159         .cra_driver_name        = "rfc3686-ctr-aes-aesni",
1160         .cra_priority           = 400,
1161         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1162         .cra_blocksize          = 1,
1163         .cra_ctxsize            = sizeof(struct async_helper_ctx),
1164         .cra_alignmask          = 0,
1165         .cra_type               = &crypto_ablkcipher_type,
1166         .cra_module             = THIS_MODULE,
1167         .cra_init               = ablk_rfc3686_ctr_init,
1168         .cra_exit               = ablk_exit,
1169         .cra_u = {
1170                 .ablkcipher = {
1171                         .min_keysize = AES_MIN_KEY_SIZE +
1172                                        CTR_RFC3686_NONCE_SIZE,
1173                         .max_keysize = AES_MAX_KEY_SIZE +
1174                                        CTR_RFC3686_NONCE_SIZE,
1175                         .ivsize      = CTR_RFC3686_IV_SIZE,
1176                         .setkey      = ablk_set_key,
1177                         .encrypt     = ablk_encrypt,
1178                         .decrypt     = ablk_decrypt,
1179                         .geniv       = "seqiv",
1180                 },
1181         },
1182 #endif
1183 #endif
1184 #ifdef HAS_PCBC
1185 }, {
1186         .cra_name               = "pcbc(aes)",
1187         .cra_driver_name        = "pcbc-aes-aesni",
1188         .cra_priority           = 400,
1189         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1190         .cra_blocksize          = AES_BLOCK_SIZE,
1191         .cra_ctxsize            = sizeof(struct async_helper_ctx),
1192         .cra_alignmask          = 0,
1193         .cra_type               = &crypto_ablkcipher_type,
1194         .cra_module             = THIS_MODULE,
1195         .cra_init               = ablk_pcbc_init,
1196         .cra_exit               = ablk_exit,
1197         .cra_u = {
1198                 .ablkcipher = {
1199                         .min_keysize    = AES_MIN_KEY_SIZE,
1200                         .max_keysize    = AES_MAX_KEY_SIZE,
1201                         .ivsize         = AES_BLOCK_SIZE,
1202                         .setkey         = ablk_set_key,
1203                         .encrypt        = ablk_encrypt,
1204                         .decrypt        = ablk_decrypt,
1205                 },
1206         },
1207 #endif
1208 }, {
1209         .cra_name               = "__lrw-aes-aesni",
1210         .cra_driver_name        = "__driver-lrw-aes-aesni",
1211         .cra_priority           = 0,
1212         .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER,
1213         .cra_blocksize          = AES_BLOCK_SIZE,
1214         .cra_ctxsize            = sizeof(struct aesni_lrw_ctx),
1215         .cra_alignmask          = 0,
1216         .cra_type               = &crypto_blkcipher_type,
1217         .cra_module             = THIS_MODULE,
1218         .cra_exit               = lrw_aesni_exit_tfm,
1219         .cra_u = {
1220                 .blkcipher = {
1221                         .min_keysize    = AES_MIN_KEY_SIZE + AES_BLOCK_SIZE,
1222                         .max_keysize    = AES_MAX_KEY_SIZE + AES_BLOCK_SIZE,
1223                         .ivsize         = AES_BLOCK_SIZE,
1224                         .setkey         = lrw_aesni_setkey,
1225                         .encrypt        = lrw_encrypt,
1226                         .decrypt        = lrw_decrypt,
1227                 },
1228         },
1229 }, {
1230         .cra_name               = "__xts-aes-aesni",
1231         .cra_driver_name        = "__driver-xts-aes-aesni",
1232         .cra_priority           = 0,
1233         .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER,
1234         .cra_blocksize          = AES_BLOCK_SIZE,
1235         .cra_ctxsize            = sizeof(struct aesni_xts_ctx),
1236         .cra_alignmask          = 0,
1237         .cra_type               = &crypto_blkcipher_type,
1238         .cra_module             = THIS_MODULE,
1239         .cra_u = {
1240                 .blkcipher = {
1241                         .min_keysize    = 2 * AES_MIN_KEY_SIZE,
1242                         .max_keysize    = 2 * AES_MAX_KEY_SIZE,
1243                         .ivsize         = AES_BLOCK_SIZE,
1244                         .setkey         = xts_aesni_setkey,
1245                         .encrypt        = xts_encrypt,
1246                         .decrypt        = xts_decrypt,
1247                 },
1248         },
1249 }, {
1250         .cra_name               = "lrw(aes)",
1251         .cra_driver_name        = "lrw-aes-aesni",
1252         .cra_priority           = 400,
1253         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1254         .cra_blocksize          = AES_BLOCK_SIZE,
1255         .cra_ctxsize            = sizeof(struct async_helper_ctx),
1256         .cra_alignmask          = 0,
1257         .cra_type               = &crypto_ablkcipher_type,
1258         .cra_module             = THIS_MODULE,
1259         .cra_init               = ablk_init,
1260         .cra_exit               = ablk_exit,
1261         .cra_u = {
1262                 .ablkcipher = {
1263                         .min_keysize    = AES_MIN_KEY_SIZE + AES_BLOCK_SIZE,
1264                         .max_keysize    = AES_MAX_KEY_SIZE + AES_BLOCK_SIZE,
1265                         .ivsize         = AES_BLOCK_SIZE,
1266                         .setkey         = ablk_set_key,
1267                         .encrypt        = ablk_encrypt,
1268                         .decrypt        = ablk_decrypt,
1269                 },
1270         },
1271 }, {
1272         .cra_name               = "xts(aes)",
1273         .cra_driver_name        = "xts-aes-aesni",
1274         .cra_priority           = 400,
1275         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1276         .cra_blocksize          = AES_BLOCK_SIZE,
1277         .cra_ctxsize            = sizeof(struct async_helper_ctx),
1278         .cra_alignmask          = 0,
1279         .cra_type               = &crypto_ablkcipher_type,
1280         .cra_module             = THIS_MODULE,
1281         .cra_init               = ablk_init,
1282         .cra_exit               = ablk_exit,
1283         .cra_u = {
1284                 .ablkcipher = {
1285                         .min_keysize    = 2 * AES_MIN_KEY_SIZE,
1286                         .max_keysize    = 2 * AES_MAX_KEY_SIZE,
1287                         .ivsize         = AES_BLOCK_SIZE,
1288                         .setkey         = ablk_set_key,
1289                         .encrypt        = ablk_encrypt,
1290                         .decrypt        = ablk_decrypt,
1291                 },
1292         },
1293 } };
1294
1295
1296 static const struct x86_cpu_id aesni_cpu_id[] = {
1297         X86_FEATURE_MATCH(X86_FEATURE_AES),
1298         {}
1299 };
1300 MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
1301
1302 static int __init aesni_init(void)
1303 {
1304         int err;
1305
1306         if (!x86_match_cpu(aesni_cpu_id))
1307                 return -ENODEV;
1308
1309         err = crypto_fpu_init();
1310         if (err)
1311                 return err;
1312
1313         return crypto_register_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
1314 }
1315
1316 static void __exit aesni_exit(void)
1317 {
1318         crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
1319
1320         crypto_fpu_exit();
1321 }
1322
1323 module_init(aesni_init);
1324 module_exit(aesni_exit);
1325
1326 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
1327 MODULE_LICENSE("GPL");
1328 MODULE_ALIAS("aes");