2 * Copyright (C)2002 USAGI/WIDE Project
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 * Mitsuru KANDA @USAGI : IPv6 Support
21 * Kazunori MIYAZAWA @USAGI :
22 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
24 * This file is derived from net/ipv4/esp.c
27 #include <crypto/aead.h>
28 #include <crypto/authenc.h>
29 #include <linux/err.h>
30 #include <linux/module.h>
34 #include <linux/scatterlist.h>
35 #include <linux/kernel.h>
36 #include <linux/pfkeyv2.h>
37 #include <linux/random.h>
38 #include <linux/slab.h>
39 #include <linux/spinlock.h>
42 #include <net/protocol.h>
43 #include <linux/icmpv6.h>
46 struct xfrm_skb_cb xfrm;
50 #define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
52 static u32 esp6_get_mtu(struct xfrm_state *x, int mtu);
55 * Allocate an AEAD request structure with extra space for SG and IV.
57 * For alignment considerations the upper 32 bits of the sequence number are
58 * placed at the front, if present. Followed by the IV, the request and finally
61 * TODO: Use spare space in skb for this where possible.
63 static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int seqihlen)
69 len += crypto_aead_ivsize(aead);
72 len += crypto_aead_alignmask(aead) &
73 ~(crypto_tfm_ctx_alignment() - 1);
74 len = ALIGN(len, crypto_tfm_ctx_alignment());
77 len += sizeof(struct aead_givcrypt_request) + crypto_aead_reqsize(aead);
78 len = ALIGN(len, __alignof__(struct scatterlist));
80 len += sizeof(struct scatterlist) * nfrags;
82 return kmalloc(len, GFP_ATOMIC);
85 static inline __be32 *esp_tmp_seqhi(void *tmp)
87 return PTR_ALIGN((__be32 *)tmp, __alignof__(__be32));
90 static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int seqhilen)
92 return crypto_aead_ivsize(aead) ?
93 PTR_ALIGN((u8 *)tmp + seqhilen,
94 crypto_aead_alignmask(aead) + 1) : tmp + seqhilen;
97 static inline struct aead_givcrypt_request *esp_tmp_givreq(
98 struct crypto_aead *aead, u8 *iv)
100 struct aead_givcrypt_request *req;
102 req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead),
103 crypto_tfm_ctx_alignment());
104 aead_givcrypt_set_tfm(req, aead);
108 static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv)
110 struct aead_request *req;
112 req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead),
113 crypto_tfm_ctx_alignment());
114 aead_request_set_tfm(req, aead);
118 static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead,
119 struct aead_request *req)
121 return (void *)ALIGN((unsigned long)(req + 1) +
122 crypto_aead_reqsize(aead),
123 __alignof__(struct scatterlist));
126 static inline struct scatterlist *esp_givreq_sg(
127 struct crypto_aead *aead, struct aead_givcrypt_request *req)
129 return (void *)ALIGN((unsigned long)(req + 1) +
130 crypto_aead_reqsize(aead),
131 __alignof__(struct scatterlist));
134 static void esp_output_done(struct crypto_async_request *base, int err)
136 struct sk_buff *skb = base->data;
138 kfree(ESP_SKB_CB(skb)->tmp);
139 xfrm_output_resume(skb, err);
142 static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
145 struct ip_esp_hdr *esph;
146 struct crypto_aead *aead;
147 struct aead_givcrypt_request *req;
148 struct scatterlist *sg;
149 struct scatterlist *asg;
150 struct sk_buff *trailer;
164 struct esp_data *esp = x->data;
166 /* skb is pure payload to encrypt */
170 alen = crypto_aead_authsize(aead);
174 struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
177 padto = min(x->tfcpad, esp6_get_mtu(x, dst->child_mtu_cached));
178 if (skb->len < padto)
179 tfclen = padto - skb->len;
181 blksize = ALIGN(crypto_aead_blocksize(aead), 4);
182 clen = ALIGN(skb->len + 2 + tfclen, blksize);
184 clen = ALIGN(clen, esp->padlen);
185 plen = clen - skb->len - tfclen;
187 err = skb_cow_data(skb, tfclen + plen + alen, &trailer);
192 assoclen = sizeof(*esph);
196 if (x->props.flags & XFRM_STATE_ESN) {
198 seqhilen += sizeof(__be32);
199 assoclen += seqhilen;
202 tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen);
206 seqhi = esp_tmp_seqhi(tmp);
207 iv = esp_tmp_iv(aead, tmp, seqhilen);
208 req = esp_tmp_givreq(aead, iv);
209 asg = esp_givreq_sg(aead, req);
212 /* Fill padding... */
213 tail = skb_tail_pointer(trailer);
215 memset(tail, 0, tfclen);
220 for (i = 0; i < plen - 2; i++)
223 tail[plen - 2] = plen - 2;
224 tail[plen - 1] = *skb_mac_header(skb);
225 pskb_put(skb, trailer, clen - skb->len + alen);
227 skb_push(skb, -skb_network_offset(skb));
228 esph = ip_esp_hdr(skb);
229 *skb_mac_header(skb) = IPPROTO_ESP;
231 esph->spi = x->id.spi;
232 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
234 sg_init_table(sg, nfrags);
235 skb_to_sgvec(skb, sg,
236 esph->enc_data + crypto_aead_ivsize(aead) - skb->data,
239 if ((x->props.flags & XFRM_STATE_ESN)) {
240 sg_init_table(asg, 3);
241 sg_set_buf(asg, &esph->spi, sizeof(__be32));
242 *seqhi = htonl(XFRM_SKB_CB(skb)->seq.output.hi);
243 sg_set_buf(asg + 1, seqhi, seqhilen);
244 sg_set_buf(asg + 2, &esph->seq_no, sizeof(__be32));
246 sg_init_one(asg, esph, sizeof(*esph));
248 aead_givcrypt_set_callback(req, 0, esp_output_done, skb);
249 aead_givcrypt_set_crypt(req, sg, sg, clen, iv);
250 aead_givcrypt_set_assoc(req, asg, assoclen);
251 aead_givcrypt_set_giv(req, esph->enc_data,
252 XFRM_SKB_CB(skb)->seq.output.low);
254 ESP_SKB_CB(skb)->tmp = tmp;
255 err = crypto_aead_givencrypt(req);
256 if (err == -EINPROGRESS)
268 static int esp_input_done2(struct sk_buff *skb, int err)
270 struct xfrm_state *x = xfrm_input_state(skb);
271 struct esp_data *esp = x->data;
272 struct crypto_aead *aead = esp->aead;
273 int alen = crypto_aead_authsize(aead);
274 int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
275 int elen = skb->len - hlen;
276 int hdr_len = skb_network_header_len(skb);
280 kfree(ESP_SKB_CB(skb)->tmp);
285 if (skb_copy_bits(skb, skb->len - alen - 2, nexthdr, 2))
290 if (padlen + 2 + alen >= elen) {
291 LIMIT_NETDEBUG(KERN_WARNING "ipsec esp packet is garbage "
292 "padlen=%d, elen=%d\n", padlen + 2, elen - alen);
296 /* ... check padding bits here. Silly. :-) */
298 pskb_trim(skb, skb->len - alen - padlen - 2);
299 __skb_pull(skb, hlen);
300 skb_set_transport_header(skb, -hdr_len);
304 /* RFC4303: Drop dummy packets without any error */
305 if (err == IPPROTO_NONE)
312 static void esp_input_done(struct crypto_async_request *base, int err)
314 struct sk_buff *skb = base->data;
316 xfrm_input_resume(skb, esp_input_done2(skb, err));
319 static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
321 struct ip_esp_hdr *esph;
322 struct esp_data *esp = x->data;
323 struct crypto_aead *aead = esp->aead;
324 struct aead_request *req;
325 struct sk_buff *trailer;
326 int elen = skb->len - sizeof(*esph) - crypto_aead_ivsize(aead);
335 struct scatterlist *sg;
336 struct scatterlist *asg;
338 if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead))) {
348 if ((nfrags = skb_cow_data(skb, 0, &trailer)) < 0) {
355 assoclen = sizeof(*esph);
359 if (x->props.flags & XFRM_STATE_ESN) {
361 seqhilen += sizeof(__be32);
362 assoclen += seqhilen;
365 tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen);
369 ESP_SKB_CB(skb)->tmp = tmp;
370 seqhi = esp_tmp_seqhi(tmp);
371 iv = esp_tmp_iv(aead, tmp, seqhilen);
372 req = esp_tmp_req(aead, iv);
373 asg = esp_req_sg(aead, req);
376 skb->ip_summed = CHECKSUM_NONE;
378 esph = (struct ip_esp_hdr *)skb->data;
380 /* Get ivec. This can be wrong, check against another impls. */
383 sg_init_table(sg, nfrags);
384 skb_to_sgvec(skb, sg, sizeof(*esph) + crypto_aead_ivsize(aead), elen);
386 if ((x->props.flags & XFRM_STATE_ESN)) {
387 sg_init_table(asg, 3);
388 sg_set_buf(asg, &esph->spi, sizeof(__be32));
389 *seqhi = XFRM_SKB_CB(skb)->seq.input.hi;
390 sg_set_buf(asg + 1, seqhi, seqhilen);
391 sg_set_buf(asg + 2, &esph->seq_no, sizeof(__be32));
393 sg_init_one(asg, esph, sizeof(*esph));
395 aead_request_set_callback(req, 0, esp_input_done, skb);
396 aead_request_set_crypt(req, sg, sg, elen, iv);
397 aead_request_set_assoc(req, asg, assoclen);
399 ret = crypto_aead_decrypt(req);
400 if (ret == -EINPROGRESS)
403 ret = esp_input_done2(skb, ret);
409 static u32 esp6_get_mtu(struct xfrm_state *x, int mtu)
411 struct esp_data *esp = x->data;
412 u32 blksize = ALIGN(crypto_aead_blocksize(esp->aead), 4);
413 u32 align = max_t(u32, blksize, esp->padlen);
416 mtu -= x->props.header_len + crypto_aead_authsize(esp->aead);
417 rem = mtu & (align - 1);
420 if (x->props.mode != XFRM_MODE_TUNNEL) {
421 u32 padsize = ((blksize - 1) & 7) + 1;
422 mtu -= blksize - padsize;
423 mtu += min_t(u32, blksize - padsize, rem);
429 static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
430 u8 type, u8 code, int offset, __be32 info)
432 struct net *net = dev_net(skb->dev);
433 const struct ipv6hdr *iph = (const struct ipv6hdr *)skb->data;
434 struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data + offset);
435 struct xfrm_state *x;
437 if (type != ICMPV6_DEST_UNREACH &&
438 type != ICMPV6_PKT_TOOBIG)
441 x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
442 esph->spi, IPPROTO_ESP, AF_INET6);
445 printk(KERN_DEBUG "pmtu discovery on SA ESP/%08x/%pI6\n",
446 ntohl(esph->spi), &iph->daddr);
450 static void esp6_destroy(struct xfrm_state *x)
452 struct esp_data *esp = x->data;
457 crypto_free_aead(esp->aead);
461 static int esp_init_aead(struct xfrm_state *x)
463 struct esp_data *esp = x->data;
464 struct crypto_aead *aead;
467 aead = crypto_alloc_aead(x->aead->alg_name, 0, 0);
474 err = crypto_aead_setkey(aead, x->aead->alg_key,
475 (x->aead->alg_key_len + 7) / 8);
479 err = crypto_aead_setauthsize(aead, x->aead->alg_icv_len / 8);
487 static int esp_init_authenc(struct xfrm_state *x)
489 struct esp_data *esp = x->data;
490 struct crypto_aead *aead;
491 struct crypto_authenc_key_param *param;
495 char authenc_name[CRYPTO_MAX_ALG_NAME];
505 if ((x->props.flags & XFRM_STATE_ESN)) {
506 if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
508 x->aalg ? x->aalg->alg_name : "digest_null",
509 x->ealg->alg_name) >= CRYPTO_MAX_ALG_NAME)
512 if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
514 x->aalg ? x->aalg->alg_name : "digest_null",
515 x->ealg->alg_name) >= CRYPTO_MAX_ALG_NAME)
519 aead = crypto_alloc_aead(authenc_name, 0, 0);
526 keylen = (x->aalg ? (x->aalg->alg_key_len + 7) / 8 : 0) +
527 (x->ealg->alg_key_len + 7) / 8 + RTA_SPACE(sizeof(*param));
529 key = kmalloc(keylen, GFP_KERNEL);
535 rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
536 rta->rta_len = RTA_LENGTH(sizeof(*param));
537 param = RTA_DATA(rta);
538 p += RTA_SPACE(sizeof(*param));
541 struct xfrm_algo_desc *aalg_desc;
543 memcpy(p, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8);
544 p += (x->aalg->alg_key_len + 7) / 8;
546 aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
550 if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
551 crypto_aead_authsize(aead)) {
552 NETDEBUG(KERN_INFO "ESP: %s digestsize %u != %hu\n",
554 crypto_aead_authsize(aead),
555 aalg_desc->uinfo.auth.icv_fullbits/8);
559 err = crypto_aead_setauthsize(
560 aead, x->aalg->alg_trunc_len / 8);
565 param->enckeylen = cpu_to_be32((x->ealg->alg_key_len + 7) / 8);
566 memcpy(p, x->ealg->alg_key, (x->ealg->alg_key_len + 7) / 8);
568 err = crypto_aead_setkey(aead, key, keylen);
577 static int esp6_init_state(struct xfrm_state *x)
579 struct esp_data *esp;
580 struct crypto_aead *aead;
587 esp = kzalloc(sizeof(*esp), GFP_KERNEL);
594 err = esp_init_aead(x);
596 err = esp_init_authenc(x);
605 x->props.header_len = sizeof(struct ip_esp_hdr) +
606 crypto_aead_ivsize(aead);
607 switch (x->props.mode) {
609 if (x->sel.family != AF_INET6)
610 x->props.header_len += IPV4_BEET_PHMAXLEN +
611 (sizeof(struct ipv6hdr) - sizeof(struct iphdr));
613 case XFRM_MODE_TRANSPORT:
615 case XFRM_MODE_TUNNEL:
616 x->props.header_len += sizeof(struct ipv6hdr);
622 align = ALIGN(crypto_aead_blocksize(aead), 4);
624 align = max_t(u32, align, esp->padlen);
625 x->props.trailer_len = align + 1 + crypto_aead_authsize(esp->aead);
631 static const struct xfrm_type esp6_type =
633 .description = "ESP6",
634 .owner = THIS_MODULE,
635 .proto = IPPROTO_ESP,
636 .flags = XFRM_TYPE_REPLAY_PROT,
637 .init_state = esp6_init_state,
638 .destructor = esp6_destroy,
639 .get_mtu = esp6_get_mtu,
641 .output = esp6_output,
642 .hdr_offset = xfrm6_find_1stfragopt,
645 static const struct inet6_protocol esp6_protocol = {
646 .handler = xfrm6_rcv,
647 .err_handler = esp6_err,
648 .flags = INET6_PROTO_NOPOLICY,
651 static int __init esp6_init(void)
653 if (xfrm_register_type(&esp6_type, AF_INET6) < 0) {
654 printk(KERN_INFO "ipv6 esp init: can't add xfrm type\n");
657 if (inet6_add_protocol(&esp6_protocol, IPPROTO_ESP) < 0) {
658 printk(KERN_INFO "ipv6 esp init: can't add protocol\n");
659 xfrm_unregister_type(&esp6_type, AF_INET6);
666 static void __exit esp6_fini(void)
668 if (inet6_del_protocol(&esp6_protocol, IPPROTO_ESP) < 0)
669 printk(KERN_INFO "ipv6 esp close: can't remove protocol\n");
670 if (xfrm_unregister_type(&esp6_type, AF_INET6) < 0)
671 printk(KERN_INFO "ipv6 esp close: can't remove xfrm type\n");
674 module_init(esp6_init);
675 module_exit(esp6_fini);
677 MODULE_LICENSE("GPL");
678 MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_ESP);