]> git.karo-electronics.de Git - mv-sheeva.git/blob - crypto/vmac.c
Input: xpad - return proper error in error path
[mv-sheeva.git] / crypto / vmac.c
1 /*
2  * Modified to interface to the Linux kernel
3  * Copyright (c) 2009, Intel Corporation.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16  * Place - Suite 330, Boston, MA 02111-1307 USA.
17  */
18
19 /* --------------------------------------------------------------------------
20  * VMAC and VHASH Implementation by Ted Krovetz (tdk@acm.org) and Wei Dai.
21  * This implementation is herby placed in the public domain.
22  * The authors offers no warranty. Use at your own risk.
23  * Please send bug reports to the authors.
24  * Last modified: 17 APR 08, 1700 PDT
25  * ----------------------------------------------------------------------- */
26
27 #include <linux/init.h>
28 #include <linux/types.h>
29 #include <linux/crypto.h>
30 #include <linux/scatterlist.h>
31 #include <asm/byteorder.h>
32 #include <crypto/scatterwalk.h>
33 #include <crypto/vmac.h>
34 #include <crypto/internal/hash.h>
35
36 /*
37  * Constants and masks
38  */
39 #define UINT64_C(x) x##ULL
40 const u64 p64   = UINT64_C(0xfffffffffffffeff);  /* 2^64 - 257 prime  */
41 const u64 m62   = UINT64_C(0x3fffffffffffffff);  /* 62-bit mask       */
42 const u64 m63   = UINT64_C(0x7fffffffffffffff);  /* 63-bit mask       */
43 const u64 m64   = UINT64_C(0xffffffffffffffff);  /* 64-bit mask       */
44 const u64 mpoly = UINT64_C(0x1fffffff1fffffff);  /* Poly key mask     */
45
46 #define pe64_to_cpup le64_to_cpup               /* Prefer little endian */
47
48 #ifdef __LITTLE_ENDIAN
49 #define INDEX_HIGH 1
50 #define INDEX_LOW 0
51 #else
52 #define INDEX_HIGH 0
53 #define INDEX_LOW 1
54 #endif
55
56 /*
57  * The following routines are used in this implementation. They are
58  * written via macros to simulate zero-overhead call-by-reference.
59  *
60  * MUL64: 64x64->128-bit multiplication
61  * PMUL64: assumes top bits cleared on inputs
62  * ADD128: 128x128->128-bit addition
63  */
64
65 #define ADD128(rh, rl, ih, il)                                          \
66         do {                                                            \
67                 u64 _il = (il);                                         \
68                 (rl) += (_il);                                          \
69                 if ((rl) < (_il))                                       \
70                         (rh)++;                                         \
71                 (rh) += (ih);                                           \
72         } while (0)
73
74 #define MUL32(i1, i2)   ((u64)(u32)(i1)*(u32)(i2))
75
76 #define PMUL64(rh, rl, i1, i2)  /* Assumes m doesn't overflow */        \
77         do {                                                            \
78                 u64 _i1 = (i1), _i2 = (i2);                             \
79                 u64 m = MUL32(_i1, _i2>>32) + MUL32(_i1>>32, _i2);      \
80                 rh = MUL32(_i1>>32, _i2>>32);                           \
81                 rl = MUL32(_i1, _i2);                                   \
82                 ADD128(rh, rl, (m >> 32), (m << 32));                   \
83         } while (0)
84
85 #define MUL64(rh, rl, i1, i2)                                           \
86         do {                                                            \
87                 u64 _i1 = (i1), _i2 = (i2);                             \
88                 u64 m1 = MUL32(_i1, _i2>>32);                           \
89                 u64 m2 = MUL32(_i1>>32, _i2);                           \
90                 rh = MUL32(_i1>>32, _i2>>32);                           \
91                 rl = MUL32(_i1, _i2);                                   \
92                 ADD128(rh, rl, (m1 >> 32), (m1 << 32));                 \
93                 ADD128(rh, rl, (m2 >> 32), (m2 << 32));                 \
94         } while (0)
95
96 /*
97  * For highest performance the L1 NH and L2 polynomial hashes should be
98  * carefully implemented to take advantage of one's target architechture.
99  * Here these two hash functions are defined multiple time; once for
100  * 64-bit architectures, once for 32-bit SSE2 architectures, and once
101  * for the rest (32-bit) architectures.
102  * For each, nh_16 *must* be defined (works on multiples of 16 bytes).
103  * Optionally, nh_vmac_nhbytes can be defined (for multiples of
104  * VMAC_NHBYTES), and nh_16_2 and nh_vmac_nhbytes_2 (versions that do two
105  * NH computations at once).
106  */
107
108 #ifdef CONFIG_64BIT
109
110 #define nh_16(mp, kp, nw, rh, rl)                                       \
111         do {                                                            \
112                 int i; u64 th, tl;                                      \
113                 rh = rl = 0;                                            \
114                 for (i = 0; i < nw; i += 2) {                           \
115                         MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i],     \
116                                 pe64_to_cpup((mp)+i+1)+(kp)[i+1]);      \
117                         ADD128(rh, rl, th, tl);                         \
118                 }                                                       \
119         } while (0)
120
121 #define nh_16_2(mp, kp, nw, rh, rl, rh1, rl1)                           \
122         do {                                                            \
123                 int i; u64 th, tl;                                      \
124                 rh1 = rl1 = rh = rl = 0;                                \
125                 for (i = 0; i < nw; i += 2) {                           \
126                         MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i],     \
127                                 pe64_to_cpup((mp)+i+1)+(kp)[i+1]);      \
128                         ADD128(rh, rl, th, tl);                         \
129                         MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i+2],   \
130                                 pe64_to_cpup((mp)+i+1)+(kp)[i+3]);      \
131                         ADD128(rh1, rl1, th, tl);                       \
132                 }                                                       \
133         } while (0)
134
135 #if (VMAC_NHBYTES >= 64) /* These versions do 64-bytes of message at a time */
136 #define nh_vmac_nhbytes(mp, kp, nw, rh, rl)                             \
137         do {                                                            \
138                 int i; u64 th, tl;                                      \
139                 rh = rl = 0;                                            \
140                 for (i = 0; i < nw; i += 8) {                           \
141                         MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i],     \
142                                 pe64_to_cpup((mp)+i+1)+(kp)[i+1]);      \
143                         ADD128(rh, rl, th, tl);                         \
144                         MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+2], \
145                                 pe64_to_cpup((mp)+i+3)+(kp)[i+3]);      \
146                         ADD128(rh, rl, th, tl);                         \
147                         MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+4], \
148                                 pe64_to_cpup((mp)+i+5)+(kp)[i+5]);      \
149                         ADD128(rh, rl, th, tl);                         \
150                         MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+6], \
151                                 pe64_to_cpup((mp)+i+7)+(kp)[i+7]);      \
152                         ADD128(rh, rl, th, tl);                         \
153                 }                                                       \
154         } while (0)
155
156 #define nh_vmac_nhbytes_2(mp, kp, nw, rh, rl, rh1, rl1)                 \
157         do {                                                            \
158                 int i; u64 th, tl;                                      \
159                 rh1 = rl1 = rh = rl = 0;                                \
160                 for (i = 0; i < nw; i += 8) {                           \
161                         MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i],     \
162                                 pe64_to_cpup((mp)+i+1)+(kp)[i+1]);      \
163                         ADD128(rh, rl, th, tl);                         \
164                         MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i+2],   \
165                                 pe64_to_cpup((mp)+i+1)+(kp)[i+3]);      \
166                         ADD128(rh1, rl1, th, tl);                       \
167                         MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+2], \
168                                 pe64_to_cpup((mp)+i+3)+(kp)[i+3]);      \
169                         ADD128(rh, rl, th, tl);                         \
170                         MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+4], \
171                                 pe64_to_cpup((mp)+i+3)+(kp)[i+5]);      \
172                         ADD128(rh1, rl1, th, tl);                       \
173                         MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+4], \
174                                 pe64_to_cpup((mp)+i+5)+(kp)[i+5]);      \
175                         ADD128(rh, rl, th, tl);                         \
176                         MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+6], \
177                                 pe64_to_cpup((mp)+i+5)+(kp)[i+7]);      \
178                         ADD128(rh1, rl1, th, tl);                       \
179                         MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+6], \
180                                 pe64_to_cpup((mp)+i+7)+(kp)[i+7]);      \
181                         ADD128(rh, rl, th, tl);                         \
182                         MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+8], \
183                                 pe64_to_cpup((mp)+i+7)+(kp)[i+9]);      \
184                         ADD128(rh1, rl1, th, tl);                       \
185                 }                                                       \
186         } while (0)
187 #endif
188
189 #define poly_step(ah, al, kh, kl, mh, ml)                               \
190         do {                                                            \
191                 u64 t1h, t1l, t2h, t2l, t3h, t3l, z = 0;                \
192                 /* compute ab*cd, put bd into result registers */       \
193                 PMUL64(t3h, t3l, al, kh);                               \
194                 PMUL64(t2h, t2l, ah, kl);                               \
195                 PMUL64(t1h, t1l, ah, 2*kh);                             \
196                 PMUL64(ah, al, al, kl);                                 \
197                 /* add 2 * ac to result */                              \
198                 ADD128(ah, al, t1h, t1l);                               \
199                 /* add together ad + bc */                              \
200                 ADD128(t2h, t2l, t3h, t3l);                             \
201                 /* now (ah,al), (t2l,2*t2h) need summing */             \
202                 /* first add the high registers, carrying into t2h */   \
203                 ADD128(t2h, ah, z, t2l);                                \
204                 /* double t2h and add top bit of ah */                  \
205                 t2h = 2 * t2h + (ah >> 63);                             \
206                 ah &= m63;                                              \
207                 /* now add the low registers */                         \
208                 ADD128(ah, al, mh, ml);                                 \
209                 ADD128(ah, al, z, t2h);                                 \
210         } while (0)
211
212 #else /* ! CONFIG_64BIT */
213
214 #ifndef nh_16
215 #define nh_16(mp, kp, nw, rh, rl)                                       \
216         do {                                                            \
217                 u64 t1, t2, m1, m2, t;                                  \
218                 int i;                                                  \
219                 rh = rl = t = 0;                                        \
220                 for (i = 0; i < nw; i += 2)  {                          \
221                         t1 = pe64_to_cpup(mp+i) + kp[i];                \
222                         t2 = pe64_to_cpup(mp+i+1) + kp[i+1];            \
223                         m2 = MUL32(t1 >> 32, t2);                       \
224                         m1 = MUL32(t1, t2 >> 32);                       \
225                         ADD128(rh, rl, MUL32(t1 >> 32, t2 >> 32),       \
226                                 MUL32(t1, t2));                         \
227                         rh += (u64)(u32)(m1 >> 32)                      \
228                                 + (u32)(m2 >> 32);                      \
229                         t += (u64)(u32)m1 + (u32)m2;                    \
230                 }                                                       \
231                 ADD128(rh, rl, (t >> 32), (t << 32));                   \
232         } while (0)
233 #endif
234
235 static void poly_step_func(u64 *ahi, u64 *alo,
236                         const u64 *kh, const u64 *kl,
237                         const u64 *mh, const u64 *ml)
238 {
239 #define a0 (*(((u32 *)alo)+INDEX_LOW))
240 #define a1 (*(((u32 *)alo)+INDEX_HIGH))
241 #define a2 (*(((u32 *)ahi)+INDEX_LOW))
242 #define a3 (*(((u32 *)ahi)+INDEX_HIGH))
243 #define k0 (*(((u32 *)kl)+INDEX_LOW))
244 #define k1 (*(((u32 *)kl)+INDEX_HIGH))
245 #define k2 (*(((u32 *)kh)+INDEX_LOW))
246 #define k3 (*(((u32 *)kh)+INDEX_HIGH))
247
248         u64 p, q, t;
249         u32 t2;
250
251         p = MUL32(a3, k3);
252         p += p;
253         p += *(u64 *)mh;
254         p += MUL32(a0, k2);
255         p += MUL32(a1, k1);
256         p += MUL32(a2, k0);
257         t = (u32)(p);
258         p >>= 32;
259         p += MUL32(a0, k3);
260         p += MUL32(a1, k2);
261         p += MUL32(a2, k1);
262         p += MUL32(a3, k0);
263         t |= ((u64)((u32)p & 0x7fffffff)) << 32;
264         p >>= 31;
265         p += (u64)(((u32 *)ml)[INDEX_LOW]);
266         p += MUL32(a0, k0);
267         q =  MUL32(a1, k3);
268         q += MUL32(a2, k2);
269         q += MUL32(a3, k1);
270         q += q;
271         p += q;
272         t2 = (u32)(p);
273         p >>= 32;
274         p += (u64)(((u32 *)ml)[INDEX_HIGH]);
275         p += MUL32(a0, k1);
276         p += MUL32(a1, k0);
277         q =  MUL32(a2, k3);
278         q += MUL32(a3, k2);
279         q += q;
280         p += q;
281         *(u64 *)(alo) = (p << 32) | t2;
282         p >>= 32;
283         *(u64 *)(ahi) = p + t;
284
285 #undef a0
286 #undef a1
287 #undef a2
288 #undef a3
289 #undef k0
290 #undef k1
291 #undef k2
292 #undef k3
293 }
294
295 #define poly_step(ah, al, kh, kl, mh, ml)                               \
296         poly_step_func(&(ah), &(al), &(kh), &(kl), &(mh), &(ml))
297
298 #endif  /* end of specialized NH and poly definitions */
299
300 /* At least nh_16 is defined. Defined others as needed here */
301 #ifndef nh_16_2
302 #define nh_16_2(mp, kp, nw, rh, rl, rh2, rl2)                           \
303         do {                                                            \
304                 nh_16(mp, kp, nw, rh, rl);                              \
305                 nh_16(mp, ((kp)+2), nw, rh2, rl2);                      \
306         } while (0)
307 #endif
308 #ifndef nh_vmac_nhbytes
309 #define nh_vmac_nhbytes(mp, kp, nw, rh, rl)                             \
310         nh_16(mp, kp, nw, rh, rl)
311 #endif
312 #ifndef nh_vmac_nhbytes_2
313 #define nh_vmac_nhbytes_2(mp, kp, nw, rh, rl, rh2, rl2)                 \
314         do {                                                            \
315                 nh_vmac_nhbytes(mp, kp, nw, rh, rl);                    \
316                 nh_vmac_nhbytes(mp, ((kp)+2), nw, rh2, rl2);            \
317         } while (0)
318 #endif
319
320 static void vhash_abort(struct vmac_ctx *ctx)
321 {
322         ctx->polytmp[0] = ctx->polykey[0] ;
323         ctx->polytmp[1] = ctx->polykey[1] ;
324         ctx->first_block_processed = 0;
325 }
326
327 static u64 l3hash(u64 p1, u64 p2, u64 k1, u64 k2, u64 len)
328 {
329         u64 rh, rl, t, z = 0;
330
331         /* fully reduce (p1,p2)+(len,0) mod p127 */
332         t = p1 >> 63;
333         p1 &= m63;
334         ADD128(p1, p2, len, t);
335         /* At this point, (p1,p2) is at most 2^127+(len<<64) */
336         t = (p1 > m63) + ((p1 == m63) && (p2 == m64));
337         ADD128(p1, p2, z, t);
338         p1 &= m63;
339
340         /* compute (p1,p2)/(2^64-2^32) and (p1,p2)%(2^64-2^32) */
341         t = p1 + (p2 >> 32);
342         t += (t >> 32);
343         t += (u32)t > 0xfffffffeu;
344         p1 += (t >> 32);
345         p2 += (p1 << 32);
346
347         /* compute (p1+k1)%p64 and (p2+k2)%p64 */
348         p1 += k1;
349         p1 += (0 - (p1 < k1)) & 257;
350         p2 += k2;
351         p2 += (0 - (p2 < k2)) & 257;
352
353         /* compute (p1+k1)*(p2+k2)%p64 */
354         MUL64(rh, rl, p1, p2);
355         t = rh >> 56;
356         ADD128(t, rl, z, rh);
357         rh <<= 8;
358         ADD128(t, rl, z, rh);
359         t += t << 8;
360         rl += t;
361         rl += (0 - (rl < t)) & 257;
362         rl += (0 - (rl > p64-1)) & 257;
363         return rl;
364 }
365
366 static void vhash_update(const unsigned char *m,
367                         unsigned int mbytes, /* Pos multiple of VMAC_NHBYTES */
368                         struct vmac_ctx *ctx)
369 {
370         u64 rh, rl, *mptr;
371         const u64 *kptr = (u64 *)ctx->nhkey;
372         int i;
373         u64 ch, cl;
374         u64 pkh = ctx->polykey[0];
375         u64 pkl = ctx->polykey[1];
376
377         mptr = (u64 *)m;
378         i = mbytes / VMAC_NHBYTES;  /* Must be non-zero */
379
380         ch = ctx->polytmp[0];
381         cl = ctx->polytmp[1];
382
383         if (!ctx->first_block_processed) {
384                 ctx->first_block_processed = 1;
385                 nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
386                 rh &= m62;
387                 ADD128(ch, cl, rh, rl);
388                 mptr += (VMAC_NHBYTES/sizeof(u64));
389                 i--;
390         }
391
392         while (i--) {
393                 nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
394                 rh &= m62;
395                 poly_step(ch, cl, pkh, pkl, rh, rl);
396                 mptr += (VMAC_NHBYTES/sizeof(u64));
397         }
398
399         ctx->polytmp[0] = ch;
400         ctx->polytmp[1] = cl;
401 }
402
403 static u64 vhash(unsigned char m[], unsigned int mbytes,
404                         u64 *tagl, struct vmac_ctx *ctx)
405 {
406         u64 rh, rl, *mptr;
407         const u64 *kptr = (u64 *)ctx->nhkey;
408         int i, remaining;
409         u64 ch, cl;
410         u64 pkh = ctx->polykey[0];
411         u64 pkl = ctx->polykey[1];
412
413         mptr = (u64 *)m;
414         i = mbytes / VMAC_NHBYTES;
415         remaining = mbytes % VMAC_NHBYTES;
416
417         if (ctx->first_block_processed) {
418                 ch = ctx->polytmp[0];
419                 cl = ctx->polytmp[1];
420         } else if (i) {
421                 nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, ch, cl);
422                 ch &= m62;
423                 ADD128(ch, cl, pkh, pkl);
424                 mptr += (VMAC_NHBYTES/sizeof(u64));
425                 i--;
426         } else if (remaining) {
427                 nh_16(mptr, kptr, 2*((remaining+15)/16), ch, cl);
428                 ch &= m62;
429                 ADD128(ch, cl, pkh, pkl);
430                 mptr += (VMAC_NHBYTES/sizeof(u64));
431                 goto do_l3;
432         } else {/* Empty String */
433                 ch = pkh; cl = pkl;
434                 goto do_l3;
435         }
436
437         while (i--) {
438                 nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
439                 rh &= m62;
440                 poly_step(ch, cl, pkh, pkl, rh, rl);
441                 mptr += (VMAC_NHBYTES/sizeof(u64));
442         }
443         if (remaining) {
444                 nh_16(mptr, kptr, 2*((remaining+15)/16), rh, rl);
445                 rh &= m62;
446                 poly_step(ch, cl, pkh, pkl, rh, rl);
447         }
448
449 do_l3:
450         vhash_abort(ctx);
451         remaining *= 8;
452         return l3hash(ch, cl, ctx->l3key[0], ctx->l3key[1], remaining);
453 }
454
455 static u64 vmac(unsigned char m[], unsigned int mbytes,
456                         unsigned char n[16], u64 *tagl,
457                         struct vmac_ctx_t *ctx)
458 {
459         u64 *in_n, *out_p;
460         u64 p, h;
461         int i;
462
463         in_n = ctx->__vmac_ctx.cached_nonce;
464         out_p = ctx->__vmac_ctx.cached_aes;
465
466         i = n[15] & 1;
467         if ((*(u64 *)(n+8) != in_n[1]) || (*(u64 *)(n) != in_n[0])) {
468                 in_n[0] = *(u64 *)(n);
469                 in_n[1] = *(u64 *)(n+8);
470                 ((unsigned char *)in_n)[15] &= 0xFE;
471                 crypto_cipher_encrypt_one(ctx->child,
472                         (unsigned char *)out_p, (unsigned char *)in_n);
473
474                 ((unsigned char *)in_n)[15] |= (unsigned char)(1-i);
475         }
476         p = be64_to_cpup(out_p + i);
477         h = vhash(m, mbytes, (u64 *)0, &ctx->__vmac_ctx);
478         return le64_to_cpu(p + h);
479 }
480
481 static int vmac_set_key(unsigned char user_key[], struct vmac_ctx_t *ctx)
482 {
483         u64 in[2] = {0}, out[2];
484         unsigned i;
485         int err = 0;
486
487         err = crypto_cipher_setkey(ctx->child, user_key, VMAC_KEY_LEN);
488         if (err)
489                 return err;
490
491         /* Fill nh key */
492         ((unsigned char *)in)[0] = 0x80;
493         for (i = 0; i < sizeof(ctx->__vmac_ctx.nhkey)/8; i += 2) {
494                 crypto_cipher_encrypt_one(ctx->child,
495                         (unsigned char *)out, (unsigned char *)in);
496                 ctx->__vmac_ctx.nhkey[i] = be64_to_cpup(out);
497                 ctx->__vmac_ctx.nhkey[i+1] = be64_to_cpup(out+1);
498                 ((unsigned char *)in)[15] += 1;
499         }
500
501         /* Fill poly key */
502         ((unsigned char *)in)[0] = 0xC0;
503         in[1] = 0;
504         for (i = 0; i < sizeof(ctx->__vmac_ctx.polykey)/8; i += 2) {
505                 crypto_cipher_encrypt_one(ctx->child,
506                         (unsigned char *)out, (unsigned char *)in);
507                 ctx->__vmac_ctx.polytmp[i] =
508                         ctx->__vmac_ctx.polykey[i] =
509                                 be64_to_cpup(out) & mpoly;
510                 ctx->__vmac_ctx.polytmp[i+1] =
511                         ctx->__vmac_ctx.polykey[i+1] =
512                                 be64_to_cpup(out+1) & mpoly;
513                 ((unsigned char *)in)[15] += 1;
514         }
515
516         /* Fill ip key */
517         ((unsigned char *)in)[0] = 0xE0;
518         in[1] = 0;
519         for (i = 0; i < sizeof(ctx->__vmac_ctx.l3key)/8; i += 2) {
520                 do {
521                         crypto_cipher_encrypt_one(ctx->child,
522                                 (unsigned char *)out, (unsigned char *)in);
523                         ctx->__vmac_ctx.l3key[i] = be64_to_cpup(out);
524                         ctx->__vmac_ctx.l3key[i+1] = be64_to_cpup(out+1);
525                         ((unsigned char *)in)[15] += 1;
526                 } while (ctx->__vmac_ctx.l3key[i] >= p64
527                         || ctx->__vmac_ctx.l3key[i+1] >= p64);
528         }
529
530         /* Invalidate nonce/aes cache and reset other elements */
531         ctx->__vmac_ctx.cached_nonce[0] = (u64)-1; /* Ensure illegal nonce */
532         ctx->__vmac_ctx.cached_nonce[1] = (u64)0;  /* Ensure illegal nonce */
533         ctx->__vmac_ctx.first_block_processed = 0;
534
535         return err;
536 }
537
538 static int vmac_setkey(struct crypto_shash *parent,
539                 const u8 *key, unsigned int keylen)
540 {
541         struct vmac_ctx_t *ctx = crypto_shash_ctx(parent);
542
543         if (keylen != VMAC_KEY_LEN) {
544                 crypto_shash_set_flags(parent, CRYPTO_TFM_RES_BAD_KEY_LEN);
545                 return -EINVAL;
546         }
547
548         return vmac_set_key((u8 *)key, ctx);
549 }
550
551 static int vmac_init(struct shash_desc *pdesc)
552 {
553         return 0;
554 }
555
556 static int vmac_update(struct shash_desc *pdesc, const u8 *p,
557                 unsigned int len)
558 {
559         struct crypto_shash *parent = pdesc->tfm;
560         struct vmac_ctx_t *ctx = crypto_shash_ctx(parent);
561
562         vhash_update(p, len, &ctx->__vmac_ctx);
563
564         return 0;
565 }
566
567 static int vmac_final(struct shash_desc *pdesc, u8 *out)
568 {
569         struct crypto_shash *parent = pdesc->tfm;
570         struct vmac_ctx_t *ctx = crypto_shash_ctx(parent);
571         vmac_t mac;
572         u8 nonce[16] = {};
573
574         mac = vmac(NULL, 0, nonce, NULL, ctx);
575         memcpy(out, &mac, sizeof(vmac_t));
576         memset(&mac, 0, sizeof(vmac_t));
577         memset(&ctx->__vmac_ctx, 0, sizeof(struct vmac_ctx));
578         return 0;
579 }
580
581 static int vmac_init_tfm(struct crypto_tfm *tfm)
582 {
583         struct crypto_cipher *cipher;
584         struct crypto_instance *inst = (void *)tfm->__crt_alg;
585         struct crypto_spawn *spawn = crypto_instance_ctx(inst);
586         struct vmac_ctx_t *ctx = crypto_tfm_ctx(tfm);
587
588         cipher = crypto_spawn_cipher(spawn);
589         if (IS_ERR(cipher))
590                 return PTR_ERR(cipher);
591
592         ctx->child = cipher;
593         return 0;
594 }
595
596 static void vmac_exit_tfm(struct crypto_tfm *tfm)
597 {
598         struct vmac_ctx_t *ctx = crypto_tfm_ctx(tfm);
599         crypto_free_cipher(ctx->child);
600 }
601
602 static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb)
603 {
604         struct shash_instance *inst;
605         struct crypto_alg *alg;
606         int err;
607
608         err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH);
609         if (err)
610                 return err;
611
612         alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER,
613                         CRYPTO_ALG_TYPE_MASK);
614         if (IS_ERR(alg))
615                 return PTR_ERR(alg);
616
617         inst = shash_alloc_instance("vmac", alg);
618         err = PTR_ERR(inst);
619         if (IS_ERR(inst))
620                 goto out_put_alg;
621
622         err = crypto_init_spawn(shash_instance_ctx(inst), alg,
623                         shash_crypto_instance(inst),
624                         CRYPTO_ALG_TYPE_MASK);
625         if (err)
626                 goto out_free_inst;
627
628         inst->alg.base.cra_priority = alg->cra_priority;
629         inst->alg.base.cra_blocksize = alg->cra_blocksize;
630         inst->alg.base.cra_alignmask = alg->cra_alignmask;
631
632         inst->alg.digestsize = sizeof(vmac_t);
633         inst->alg.base.cra_ctxsize = sizeof(struct vmac_ctx_t);
634         inst->alg.base.cra_init = vmac_init_tfm;
635         inst->alg.base.cra_exit = vmac_exit_tfm;
636
637         inst->alg.init = vmac_init;
638         inst->alg.update = vmac_update;
639         inst->alg.final = vmac_final;
640         inst->alg.setkey = vmac_setkey;
641
642         err = shash_register_instance(tmpl, inst);
643         if (err) {
644 out_free_inst:
645                 shash_free_instance(shash_crypto_instance(inst));
646         }
647
648 out_put_alg:
649         crypto_mod_put(alg);
650         return err;
651 }
652
653 static struct crypto_template vmac_tmpl = {
654         .name = "vmac",
655         .create = vmac_create,
656         .free = shash_free_instance,
657         .module = THIS_MODULE,
658 };
659
660 static int __init vmac_module_init(void)
661 {
662         return crypto_register_template(&vmac_tmpl);
663 }
664
665 static void __exit vmac_module_exit(void)
666 {
667         crypto_unregister_template(&vmac_tmpl);
668 }
669
670 module_init(vmac_module_init);
671 module_exit(vmac_module_exit);
672
673 MODULE_LICENSE("GPL");
674 MODULE_DESCRIPTION("VMAC hash algorithm");
675