]> git.karo-electronics.de Git - mv-sheeva.git/blob - crypto/blkcipher.c
sky2: align IP header on Rx if possible
[mv-sheeva.git] / crypto / blkcipher.c
1 /*
2  * Block chaining cipher operations.
3  * 
4  * Generic encrypt/decrypt wrapper for ciphers, handles operations across
5  * multiple page boundaries by using temporary blocks.  In user context,
6  * the kernel is given a chance to schedule us once per page.
7  *
8  * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
9  *
10  * This program is free software; you can redistribute it and/or modify it
11  * under the terms of the GNU General Public License as published by the Free
12  * Software Foundation; either version 2 of the License, or (at your option) 
13  * any later version.
14  *
15  */
16
17 #include <crypto/internal/skcipher.h>
18 #include <crypto/scatterwalk.h>
19 #include <linux/errno.h>
20 #include <linux/hardirq.h>
21 #include <linux/kernel.h>
22 #include <linux/module.h>
23 #include <linux/scatterlist.h>
24 #include <linux/seq_file.h>
25 #include <linux/slab.h>
26 #include <linux/string.h>
27
28 #include "internal.h"
29
30 enum {
31         BLKCIPHER_WALK_PHYS = 1 << 0,
32         BLKCIPHER_WALK_SLOW = 1 << 1,
33         BLKCIPHER_WALK_COPY = 1 << 2,
34         BLKCIPHER_WALK_DIFF = 1 << 3,
35 };
36
37 static int blkcipher_walk_next(struct blkcipher_desc *desc,
38                                struct blkcipher_walk *walk);
39 static int blkcipher_walk_first(struct blkcipher_desc *desc,
40                                 struct blkcipher_walk *walk);
41
42 static inline void blkcipher_map_src(struct blkcipher_walk *walk)
43 {
44         walk->src.virt.addr = scatterwalk_map(&walk->in, 0);
45 }
46
47 static inline void blkcipher_map_dst(struct blkcipher_walk *walk)
48 {
49         walk->dst.virt.addr = scatterwalk_map(&walk->out, 1);
50 }
51
52 static inline void blkcipher_unmap_src(struct blkcipher_walk *walk)
53 {
54         scatterwalk_unmap(walk->src.virt.addr, 0);
55 }
56
57 static inline void blkcipher_unmap_dst(struct blkcipher_walk *walk)
58 {
59         scatterwalk_unmap(walk->dst.virt.addr, 1);
60 }
61
62 /* Get a spot of the specified length that does not straddle a page.
63  * The caller needs to ensure that there is enough space for this operation.
64  */
65 static inline u8 *blkcipher_get_spot(u8 *start, unsigned int len)
66 {
67         u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
68         return max(start, end_page);
69 }
70
71 static inline unsigned int blkcipher_done_slow(struct crypto_blkcipher *tfm,
72                                                struct blkcipher_walk *walk,
73                                                unsigned int bsize)
74 {
75         u8 *addr;
76         unsigned int alignmask = crypto_blkcipher_alignmask(tfm);
77
78         addr = (u8 *)ALIGN((unsigned long)walk->buffer, alignmask + 1);
79         addr = blkcipher_get_spot(addr, bsize);
80         scatterwalk_copychunks(addr, &walk->out, bsize, 1);
81         return bsize;
82 }
83
84 static inline unsigned int blkcipher_done_fast(struct blkcipher_walk *walk,
85                                                unsigned int n)
86 {
87         if (walk->flags & BLKCIPHER_WALK_COPY) {
88                 blkcipher_map_dst(walk);
89                 memcpy(walk->dst.virt.addr, walk->page, n);
90                 blkcipher_unmap_dst(walk);
91         } else if (!(walk->flags & BLKCIPHER_WALK_PHYS)) {
92                 blkcipher_unmap_src(walk);
93                 if (walk->flags & BLKCIPHER_WALK_DIFF)
94                         blkcipher_unmap_dst(walk);
95         }
96
97         scatterwalk_advance(&walk->in, n);
98         scatterwalk_advance(&walk->out, n);
99
100         return n;
101 }
102
103 int blkcipher_walk_done(struct blkcipher_desc *desc,
104                         struct blkcipher_walk *walk, int err)
105 {
106         struct crypto_blkcipher *tfm = desc->tfm;
107         unsigned int nbytes = 0;
108
109         if (likely(err >= 0)) {
110                 unsigned int n = walk->nbytes - err;
111
112                 if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW)))
113                         n = blkcipher_done_fast(walk, n);
114                 else if (WARN_ON(err)) {
115                         err = -EINVAL;
116                         goto err;
117                 } else
118                         n = blkcipher_done_slow(tfm, walk, n);
119
120                 nbytes = walk->total - n;
121                 err = 0;
122         }
123
124         scatterwalk_done(&walk->in, 0, nbytes);
125         scatterwalk_done(&walk->out, 1, nbytes);
126
127         walk->total = nbytes;
128         walk->nbytes = nbytes;
129
130         if (nbytes) {
131                 crypto_yield(desc->flags);
132                 return blkcipher_walk_next(desc, walk);
133         }
134
135 err:
136         if (walk->iv != desc->info)
137                 memcpy(desc->info, walk->iv, crypto_blkcipher_ivsize(tfm));
138         if (walk->buffer != walk->page)
139                 kfree(walk->buffer);
140         if (walk->page)
141                 free_page((unsigned long)walk->page);
142
143         return err;
144 }
145 EXPORT_SYMBOL_GPL(blkcipher_walk_done);
146
147 static inline int blkcipher_next_slow(struct blkcipher_desc *desc,
148                                       struct blkcipher_walk *walk,
149                                       unsigned int bsize,
150                                       unsigned int alignmask)
151 {
152         unsigned int n;
153         unsigned aligned_bsize = ALIGN(bsize, alignmask + 1);
154
155         if (walk->buffer)
156                 goto ok;
157
158         walk->buffer = walk->page;
159         if (walk->buffer)
160                 goto ok;
161
162         n = aligned_bsize * 3 - (alignmask + 1) +
163             (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
164         walk->buffer = kmalloc(n, GFP_ATOMIC);
165         if (!walk->buffer)
166                 return blkcipher_walk_done(desc, walk, -ENOMEM);
167
168 ok:
169         walk->dst.virt.addr = (u8 *)ALIGN((unsigned long)walk->buffer,
170                                           alignmask + 1);
171         walk->dst.virt.addr = blkcipher_get_spot(walk->dst.virt.addr, bsize);
172         walk->src.virt.addr = blkcipher_get_spot(walk->dst.virt.addr +
173                                                  aligned_bsize, bsize);
174
175         scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
176
177         walk->nbytes = bsize;
178         walk->flags |= BLKCIPHER_WALK_SLOW;
179
180         return 0;
181 }
182
183 static inline int blkcipher_next_copy(struct blkcipher_walk *walk)
184 {
185         u8 *tmp = walk->page;
186
187         blkcipher_map_src(walk);
188         memcpy(tmp, walk->src.virt.addr, walk->nbytes);
189         blkcipher_unmap_src(walk);
190
191         walk->src.virt.addr = tmp;
192         walk->dst.virt.addr = tmp;
193
194         return 0;
195 }
196
197 static inline int blkcipher_next_fast(struct blkcipher_desc *desc,
198                                       struct blkcipher_walk *walk)
199 {
200         unsigned long diff;
201
202         walk->src.phys.page = scatterwalk_page(&walk->in);
203         walk->src.phys.offset = offset_in_page(walk->in.offset);
204         walk->dst.phys.page = scatterwalk_page(&walk->out);
205         walk->dst.phys.offset = offset_in_page(walk->out.offset);
206
207         if (walk->flags & BLKCIPHER_WALK_PHYS)
208                 return 0;
209
210         diff = walk->src.phys.offset - walk->dst.phys.offset;
211         diff |= walk->src.virt.page - walk->dst.virt.page;
212
213         blkcipher_map_src(walk);
214         walk->dst.virt.addr = walk->src.virt.addr;
215
216         if (diff) {
217                 walk->flags |= BLKCIPHER_WALK_DIFF;
218                 blkcipher_map_dst(walk);
219         }
220
221         return 0;
222 }
223
224 static int blkcipher_walk_next(struct blkcipher_desc *desc,
225                                struct blkcipher_walk *walk)
226 {
227         struct crypto_blkcipher *tfm = desc->tfm;
228         unsigned int alignmask = crypto_blkcipher_alignmask(tfm);
229         unsigned int bsize;
230         unsigned int n;
231         int err;
232
233         n = walk->total;
234         if (unlikely(n < crypto_blkcipher_blocksize(tfm))) {
235                 desc->flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
236                 return blkcipher_walk_done(desc, walk, -EINVAL);
237         }
238
239         walk->flags &= ~(BLKCIPHER_WALK_SLOW | BLKCIPHER_WALK_COPY |
240                          BLKCIPHER_WALK_DIFF);
241         if (!scatterwalk_aligned(&walk->in, alignmask) ||
242             !scatterwalk_aligned(&walk->out, alignmask)) {
243                 walk->flags |= BLKCIPHER_WALK_COPY;
244                 if (!walk->page) {
245                         walk->page = (void *)__get_free_page(GFP_ATOMIC);
246                         if (!walk->page)
247                                 n = 0;
248                 }
249         }
250
251         bsize = min(walk->blocksize, n);
252         n = scatterwalk_clamp(&walk->in, n);
253         n = scatterwalk_clamp(&walk->out, n);
254
255         if (unlikely(n < bsize)) {
256                 err = blkcipher_next_slow(desc, walk, bsize, alignmask);
257                 goto set_phys_lowmem;
258         }
259
260         walk->nbytes = n;
261         if (walk->flags & BLKCIPHER_WALK_COPY) {
262                 err = blkcipher_next_copy(walk);
263                 goto set_phys_lowmem;
264         }
265
266         return blkcipher_next_fast(desc, walk);
267
268 set_phys_lowmem:
269         if (walk->flags & BLKCIPHER_WALK_PHYS) {
270                 walk->src.phys.page = virt_to_page(walk->src.virt.addr);
271                 walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
272                 walk->src.phys.offset &= PAGE_SIZE - 1;
273                 walk->dst.phys.offset &= PAGE_SIZE - 1;
274         }
275         return err;
276 }
277
278 static inline int blkcipher_copy_iv(struct blkcipher_walk *walk,
279                                     struct crypto_blkcipher *tfm,
280                                     unsigned int alignmask)
281 {
282         unsigned bs = walk->blocksize;
283         unsigned int ivsize = crypto_blkcipher_ivsize(tfm);
284         unsigned aligned_bs = ALIGN(bs, alignmask + 1);
285         unsigned int size = aligned_bs * 2 + ivsize + max(aligned_bs, ivsize) -
286                             (alignmask + 1);
287         u8 *iv;
288
289         size += alignmask & ~(crypto_tfm_ctx_alignment() - 1);
290         walk->buffer = kmalloc(size, GFP_ATOMIC);
291         if (!walk->buffer)
292                 return -ENOMEM;
293
294         iv = (u8 *)ALIGN((unsigned long)walk->buffer, alignmask + 1);
295         iv = blkcipher_get_spot(iv, bs) + aligned_bs;
296         iv = blkcipher_get_spot(iv, bs) + aligned_bs;
297         iv = blkcipher_get_spot(iv, ivsize);
298
299         walk->iv = memcpy(iv, walk->iv, ivsize);
300         return 0;
301 }
302
303 int blkcipher_walk_virt(struct blkcipher_desc *desc,
304                         struct blkcipher_walk *walk)
305 {
306         walk->flags &= ~BLKCIPHER_WALK_PHYS;
307         walk->blocksize = crypto_blkcipher_blocksize(desc->tfm);
308         return blkcipher_walk_first(desc, walk);
309 }
310 EXPORT_SYMBOL_GPL(blkcipher_walk_virt);
311
312 int blkcipher_walk_phys(struct blkcipher_desc *desc,
313                         struct blkcipher_walk *walk)
314 {
315         walk->flags |= BLKCIPHER_WALK_PHYS;
316         walk->blocksize = crypto_blkcipher_blocksize(desc->tfm);
317         return blkcipher_walk_first(desc, walk);
318 }
319 EXPORT_SYMBOL_GPL(blkcipher_walk_phys);
320
321 static int blkcipher_walk_first(struct blkcipher_desc *desc,
322                                 struct blkcipher_walk *walk)
323 {
324         struct crypto_blkcipher *tfm = desc->tfm;
325         unsigned int alignmask = crypto_blkcipher_alignmask(tfm);
326
327         if (WARN_ON_ONCE(in_irq()))
328                 return -EDEADLK;
329
330         walk->nbytes = walk->total;
331         if (unlikely(!walk->total))
332                 return 0;
333
334         walk->buffer = NULL;
335         walk->iv = desc->info;
336         if (unlikely(((unsigned long)walk->iv & alignmask))) {
337                 int err = blkcipher_copy_iv(walk, tfm, alignmask);
338                 if (err)
339                         return err;
340         }
341
342         scatterwalk_start(&walk->in, walk->in.sg);
343         scatterwalk_start(&walk->out, walk->out.sg);
344         walk->page = NULL;
345
346         return blkcipher_walk_next(desc, walk);
347 }
348
349 int blkcipher_walk_virt_block(struct blkcipher_desc *desc,
350                               struct blkcipher_walk *walk,
351                               unsigned int blocksize)
352 {
353         walk->flags &= ~BLKCIPHER_WALK_PHYS;
354         walk->blocksize = blocksize;
355         return blkcipher_walk_first(desc, walk);
356 }
357 EXPORT_SYMBOL_GPL(blkcipher_walk_virt_block);
358
359 static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
360                             unsigned int keylen)
361 {
362         struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
363         unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
364         int ret;
365         u8 *buffer, *alignbuffer;
366         unsigned long absize;
367
368         absize = keylen + alignmask;
369         buffer = kmalloc(absize, GFP_ATOMIC);
370         if (!buffer)
371                 return -ENOMEM;
372
373         alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
374         memcpy(alignbuffer, key, keylen);
375         ret = cipher->setkey(tfm, alignbuffer, keylen);
376         memset(alignbuffer, 0, keylen);
377         kfree(buffer);
378         return ret;
379 }
380
381 static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
382 {
383         struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
384         unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
385
386         if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
387                 tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
388                 return -EINVAL;
389         }
390
391         if ((unsigned long)key & alignmask)
392                 return setkey_unaligned(tfm, key, keylen);
393
394         return cipher->setkey(tfm, key, keylen);
395 }
396
397 static int async_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
398                         unsigned int keylen)
399 {
400         return setkey(crypto_ablkcipher_tfm(tfm), key, keylen);
401 }
402
403 static int async_encrypt(struct ablkcipher_request *req)
404 {
405         struct crypto_tfm *tfm = req->base.tfm;
406         struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
407         struct blkcipher_desc desc = {
408                 .tfm = __crypto_blkcipher_cast(tfm),
409                 .info = req->info,
410                 .flags = req->base.flags,
411         };
412
413
414         return alg->encrypt(&desc, req->dst, req->src, req->nbytes);
415 }
416
417 static int async_decrypt(struct ablkcipher_request *req)
418 {
419         struct crypto_tfm *tfm = req->base.tfm;
420         struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
421         struct blkcipher_desc desc = {
422                 .tfm = __crypto_blkcipher_cast(tfm),
423                 .info = req->info,
424                 .flags = req->base.flags,
425         };
426
427         return alg->decrypt(&desc, req->dst, req->src, req->nbytes);
428 }
429
430 static unsigned int crypto_blkcipher_ctxsize(struct crypto_alg *alg, u32 type,
431                                              u32 mask)
432 {
433         struct blkcipher_alg *cipher = &alg->cra_blkcipher;
434         unsigned int len = alg->cra_ctxsize;
435
436         if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK &&
437             cipher->ivsize) {
438                 len = ALIGN(len, (unsigned long)alg->cra_alignmask + 1);
439                 len += cipher->ivsize;
440         }
441
442         return len;
443 }
444
445 static int crypto_init_blkcipher_ops_async(struct crypto_tfm *tfm)
446 {
447         struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
448         struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
449
450         crt->setkey = async_setkey;
451         crt->encrypt = async_encrypt;
452         crt->decrypt = async_decrypt;
453         if (!alg->ivsize) {
454                 crt->givencrypt = skcipher_null_givencrypt;
455                 crt->givdecrypt = skcipher_null_givdecrypt;
456         }
457         crt->base = __crypto_ablkcipher_cast(tfm);
458         crt->ivsize = alg->ivsize;
459
460         return 0;
461 }
462
463 static int crypto_init_blkcipher_ops_sync(struct crypto_tfm *tfm)
464 {
465         struct blkcipher_tfm *crt = &tfm->crt_blkcipher;
466         struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
467         unsigned long align = crypto_tfm_alg_alignmask(tfm) + 1;
468         unsigned long addr;
469
470         crt->setkey = setkey;
471         crt->encrypt = alg->encrypt;
472         crt->decrypt = alg->decrypt;
473
474         addr = (unsigned long)crypto_tfm_ctx(tfm);
475         addr = ALIGN(addr, align);
476         addr += ALIGN(tfm->__crt_alg->cra_ctxsize, align);
477         crt->iv = (void *)addr;
478
479         return 0;
480 }
481
482 static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
483 {
484         struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
485
486         if (alg->ivsize > PAGE_SIZE / 8)
487                 return -EINVAL;
488
489         if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK)
490                 return crypto_init_blkcipher_ops_sync(tfm);
491         else
492                 return crypto_init_blkcipher_ops_async(tfm);
493 }
494
495 static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
496         __attribute__ ((unused));
497 static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
498 {
499         seq_printf(m, "type         : blkcipher\n");
500         seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
501         seq_printf(m, "min keysize  : %u\n", alg->cra_blkcipher.min_keysize);
502         seq_printf(m, "max keysize  : %u\n", alg->cra_blkcipher.max_keysize);
503         seq_printf(m, "ivsize       : %u\n", alg->cra_blkcipher.ivsize);
504         seq_printf(m, "geniv        : %s\n", alg->cra_blkcipher.geniv ?:
505                                              "<default>");
506 }
507
508 const struct crypto_type crypto_blkcipher_type = {
509         .ctxsize = crypto_blkcipher_ctxsize,
510         .init = crypto_init_blkcipher_ops,
511 #ifdef CONFIG_PROC_FS
512         .show = crypto_blkcipher_show,
513 #endif
514 };
515 EXPORT_SYMBOL_GPL(crypto_blkcipher_type);
516
517 static int crypto_grab_nivcipher(struct crypto_skcipher_spawn *spawn,
518                                 const char *name, u32 type, u32 mask)
519 {
520         struct crypto_alg *alg;
521         int err;
522
523         type = crypto_skcipher_type(type);
524         mask = crypto_skcipher_mask(mask) | CRYPTO_ALG_GENIV;
525
526         alg = crypto_alg_mod_lookup(name, type, mask);
527         if (IS_ERR(alg))
528                 return PTR_ERR(alg);
529
530         err = crypto_init_spawn(&spawn->base, alg, spawn->base.inst, mask);
531         crypto_mod_put(alg);
532         return err;
533 }
534
535 struct crypto_instance *skcipher_geniv_alloc(struct crypto_template *tmpl,
536                                              struct rtattr **tb, u32 type,
537                                              u32 mask)
538 {
539         struct {
540                 int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key,
541                               unsigned int keylen);
542                 int (*encrypt)(struct ablkcipher_request *req);
543                 int (*decrypt)(struct ablkcipher_request *req);
544
545                 unsigned int min_keysize;
546                 unsigned int max_keysize;
547                 unsigned int ivsize;
548
549                 const char *geniv;
550         } balg;
551         const char *name;
552         struct crypto_skcipher_spawn *spawn;
553         struct crypto_attr_type *algt;
554         struct crypto_instance *inst;
555         struct crypto_alg *alg;
556         int err;
557
558         algt = crypto_get_attr_type(tb);
559         err = PTR_ERR(algt);
560         if (IS_ERR(algt))
561                 return ERR_PTR(err);
562
563         if ((algt->type ^ (CRYPTO_ALG_TYPE_GIVCIPHER | CRYPTO_ALG_GENIV)) &
564             algt->mask)
565                 return ERR_PTR(-EINVAL);
566
567         name = crypto_attr_alg_name(tb[1]);
568         err = PTR_ERR(name);
569         if (IS_ERR(name))
570                 return ERR_PTR(err);
571
572         inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
573         if (!inst)
574                 return ERR_PTR(-ENOMEM);
575
576         spawn = crypto_instance_ctx(inst);
577
578         /* Ignore async algorithms if necessary. */
579         mask |= crypto_requires_sync(algt->type, algt->mask);
580
581         crypto_set_skcipher_spawn(spawn, inst);
582         err = crypto_grab_nivcipher(spawn, name, type, mask);
583         if (err)
584                 goto err_free_inst;
585
586         alg = crypto_skcipher_spawn_alg(spawn);
587
588         if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
589             CRYPTO_ALG_TYPE_BLKCIPHER) {
590                 balg.ivsize = alg->cra_blkcipher.ivsize;
591                 balg.min_keysize = alg->cra_blkcipher.min_keysize;
592                 balg.max_keysize = alg->cra_blkcipher.max_keysize;
593
594                 balg.setkey = async_setkey;
595                 balg.encrypt = async_encrypt;
596                 balg.decrypt = async_decrypt;
597
598                 balg.geniv = alg->cra_blkcipher.geniv;
599         } else {
600                 balg.ivsize = alg->cra_ablkcipher.ivsize;
601                 balg.min_keysize = alg->cra_ablkcipher.min_keysize;
602                 balg.max_keysize = alg->cra_ablkcipher.max_keysize;
603
604                 balg.setkey = alg->cra_ablkcipher.setkey;
605                 balg.encrypt = alg->cra_ablkcipher.encrypt;
606                 balg.decrypt = alg->cra_ablkcipher.decrypt;
607
608                 balg.geniv = alg->cra_ablkcipher.geniv;
609         }
610
611         err = -EINVAL;
612         if (!balg.ivsize)
613                 goto err_drop_alg;
614
615         /*
616          * This is only true if we're constructing an algorithm with its
617          * default IV generator.  For the default generator we elide the
618          * template name and double-check the IV generator.
619          */
620         if (algt->mask & CRYPTO_ALG_GENIV) {
621                 if (!balg.geniv)
622                         balg.geniv = crypto_default_geniv(alg);
623                 err = -EAGAIN;
624                 if (strcmp(tmpl->name, balg.geniv))
625                         goto err_drop_alg;
626
627                 memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
628                 memcpy(inst->alg.cra_driver_name, alg->cra_driver_name,
629                        CRYPTO_MAX_ALG_NAME);
630         } else {
631                 err = -ENAMETOOLONG;
632                 if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME,
633                              "%s(%s)", tmpl->name, alg->cra_name) >=
634                     CRYPTO_MAX_ALG_NAME)
635                         goto err_drop_alg;
636                 if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
637                              "%s(%s)", tmpl->name, alg->cra_driver_name) >=
638                     CRYPTO_MAX_ALG_NAME)
639                         goto err_drop_alg;
640         }
641
642         inst->alg.cra_flags = CRYPTO_ALG_TYPE_GIVCIPHER | CRYPTO_ALG_GENIV;
643         inst->alg.cra_flags |= alg->cra_flags & CRYPTO_ALG_ASYNC;
644         inst->alg.cra_priority = alg->cra_priority;
645         inst->alg.cra_blocksize = alg->cra_blocksize;
646         inst->alg.cra_alignmask = alg->cra_alignmask;
647         inst->alg.cra_type = &crypto_givcipher_type;
648
649         inst->alg.cra_ablkcipher.ivsize = balg.ivsize;
650         inst->alg.cra_ablkcipher.min_keysize = balg.min_keysize;
651         inst->alg.cra_ablkcipher.max_keysize = balg.max_keysize;
652         inst->alg.cra_ablkcipher.geniv = balg.geniv;
653
654         inst->alg.cra_ablkcipher.setkey = balg.setkey;
655         inst->alg.cra_ablkcipher.encrypt = balg.encrypt;
656         inst->alg.cra_ablkcipher.decrypt = balg.decrypt;
657
658 out:
659         return inst;
660
661 err_drop_alg:
662         crypto_drop_skcipher(spawn);
663 err_free_inst:
664         kfree(inst);
665         inst = ERR_PTR(err);
666         goto out;
667 }
668 EXPORT_SYMBOL_GPL(skcipher_geniv_alloc);
669
670 void skcipher_geniv_free(struct crypto_instance *inst)
671 {
672         crypto_drop_skcipher(crypto_instance_ctx(inst));
673         kfree(inst);
674 }
675 EXPORT_SYMBOL_GPL(skcipher_geniv_free);
676
677 int skcipher_geniv_init(struct crypto_tfm *tfm)
678 {
679         struct crypto_instance *inst = (void *)tfm->__crt_alg;
680         struct crypto_ablkcipher *cipher;
681
682         cipher = crypto_spawn_skcipher(crypto_instance_ctx(inst));
683         if (IS_ERR(cipher))
684                 return PTR_ERR(cipher);
685
686         tfm->crt_ablkcipher.base = cipher;
687         tfm->crt_ablkcipher.reqsize += crypto_ablkcipher_reqsize(cipher);
688
689         return 0;
690 }
691 EXPORT_SYMBOL_GPL(skcipher_geniv_init);
692
693 void skcipher_geniv_exit(struct crypto_tfm *tfm)
694 {
695         crypto_free_ablkcipher(tfm->crt_ablkcipher.base);
696 }
697 EXPORT_SYMBOL_GPL(skcipher_geniv_exit);
698
699 MODULE_LICENSE("GPL");
700 MODULE_DESCRIPTION("Generic block chaining cipher type");