1 #include <crypto/hash.h>
3 #include <linux/module.h>
4 #include <linux/slab.h>
8 #include <linux/crypto.h>
9 #include <linux/pfkeyv2.h>
10 #include <linux/scatterlist.h>
12 #include <net/protocol.h>
15 struct xfrm_skb_cb xfrm;
19 #define AH_SKB_CB(__skb) ((struct ah_skb_cb *)&((__skb)->cb[0]))
21 static void *ah_alloc_tmp(struct crypto_ahash *ahash, int nfrags,
26 len = size + crypto_ahash_digestsize(ahash) +
27 (crypto_ahash_alignmask(ahash) &
28 ~(crypto_tfm_ctx_alignment() - 1));
30 len = ALIGN(len, crypto_tfm_ctx_alignment());
32 len += sizeof(struct ahash_request) + crypto_ahash_reqsize(ahash);
33 len = ALIGN(len, __alignof__(struct scatterlist));
35 len += sizeof(struct scatterlist) * nfrags;
37 return kmalloc(len, GFP_ATOMIC);
40 static inline u8 *ah_tmp_auth(void *tmp, unsigned int offset)
45 static inline u8 *ah_tmp_icv(struct crypto_ahash *ahash, void *tmp,
48 return PTR_ALIGN((u8 *)tmp + offset, crypto_ahash_alignmask(ahash) + 1);
51 static inline struct ahash_request *ah_tmp_req(struct crypto_ahash *ahash,
54 struct ahash_request *req;
56 req = (void *)PTR_ALIGN(icv + crypto_ahash_digestsize(ahash),
57 crypto_tfm_ctx_alignment());
59 ahash_request_set_tfm(req, ahash);
64 static inline struct scatterlist *ah_req_sg(struct crypto_ahash *ahash,
65 struct ahash_request *req)
67 return (void *)ALIGN((unsigned long)(req + 1) +
68 crypto_ahash_reqsize(ahash),
69 __alignof__(struct scatterlist));
72 /* Clear mutable options and find final destination to substitute
73 * into IP header for icv calculation. Options are already checked
74 * for validity, so paranoia is not required. */
76 static int ip_clear_mutable_options(struct iphdr *iph, __be32 *daddr)
78 unsigned char * optptr = (unsigned char*)(iph+1);
79 int l = iph->ihl*4 - sizeof(struct iphdr);
92 if (optlen<2 || optlen>l)
96 case 0x85: /* Some "Extended Security" crap. */
99 case 0x80|21: /* RFC1770 */
105 memcpy(daddr, optptr+optlen-4, 4);
108 memset(optptr, 0, optlen);
116 static void ah_output_done(struct crypto_async_request *base, int err)
120 struct sk_buff *skb = base->data;
121 struct xfrm_state *x = skb_dst(skb)->xfrm;
122 struct ah_data *ahp = x->data;
123 struct iphdr *top_iph = ip_hdr(skb);
124 struct ip_auth_hdr *ah = ip_auth_hdr(skb);
125 int ihl = ip_hdrlen(skb);
127 iph = AH_SKB_CB(skb)->tmp;
128 icv = ah_tmp_icv(ahp->ahash, iph, ihl);
129 memcpy(ah->auth_data, icv, ahp->icv_trunc_len);
131 top_iph->tos = iph->tos;
132 top_iph->ttl = iph->ttl;
133 top_iph->frag_off = iph->frag_off;
134 if (top_iph->ihl != 5) {
135 top_iph->daddr = iph->daddr;
136 memcpy(top_iph+1, iph+1, top_iph->ihl*4 - sizeof(struct iphdr));
141 kfree(AH_SKB_CB(skb)->tmp);
142 xfrm_output_resume(skb, err);
145 static int ah_output(struct xfrm_state *x, struct sk_buff *skb)
151 struct sk_buff *trailer;
152 struct crypto_ahash *ahash;
153 struct ahash_request *req;
154 struct scatterlist *sg;
155 struct iphdr *iph, *top_iph;
156 struct ip_auth_hdr *ah;
162 if ((err = skb_cow_data(skb, 0, &trailer)) < 0)
166 skb_push(skb, -skb_network_offset(skb));
167 ah = ip_auth_hdr(skb);
168 ihl = ip_hdrlen(skb);
171 iph = ah_alloc_tmp(ahash, nfrags, ihl);
175 icv = ah_tmp_icv(ahash, iph, ihl);
176 req = ah_tmp_req(ahash, icv);
177 sg = ah_req_sg(ahash, req);
179 memset(ah->auth_data, 0, ahp->icv_trunc_len);
181 top_iph = ip_hdr(skb);
183 iph->tos = top_iph->tos;
184 iph->ttl = top_iph->ttl;
185 iph->frag_off = top_iph->frag_off;
187 if (top_iph->ihl != 5) {
188 iph->daddr = top_iph->daddr;
189 memcpy(iph+1, top_iph+1, top_iph->ihl*4 - sizeof(struct iphdr));
190 err = ip_clear_mutable_options(top_iph, &top_iph->daddr);
195 ah->nexthdr = *skb_mac_header(skb);
196 *skb_mac_header(skb) = IPPROTO_AH;
199 top_iph->tot_len = htons(skb->len);
200 top_iph->frag_off = 0;
204 ah->hdrlen = (XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len) >> 2) - 2;
208 ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output);
210 sg_init_table(sg, nfrags);
211 skb_to_sgvec(skb, sg, 0, skb->len);
213 ahash_request_set_crypt(req, sg, icv, skb->len);
214 ahash_request_set_callback(req, 0, ah_output_done, skb);
216 AH_SKB_CB(skb)->tmp = iph;
218 err = crypto_ahash_digest(req);
220 if (err == -EINPROGRESS)
228 memcpy(ah->auth_data, icv, ahp->icv_trunc_len);
230 top_iph->tos = iph->tos;
231 top_iph->ttl = iph->ttl;
232 top_iph->frag_off = iph->frag_off;
233 if (top_iph->ihl != 5) {
234 top_iph->daddr = iph->daddr;
235 memcpy(top_iph+1, iph+1, top_iph->ihl*4 - sizeof(struct iphdr));
244 static void ah_input_done(struct crypto_async_request *base, int err)
248 struct iphdr *work_iph;
249 struct sk_buff *skb = base->data;
250 struct xfrm_state *x = xfrm_input_state(skb);
251 struct ah_data *ahp = x->data;
252 struct ip_auth_hdr *ah = ip_auth_hdr(skb);
253 int ihl = ip_hdrlen(skb);
254 int ah_hlen = (ah->hdrlen + 2) << 2;
256 work_iph = AH_SKB_CB(skb)->tmp;
257 auth_data = ah_tmp_auth(work_iph, ihl);
258 icv = ah_tmp_icv(ahp->ahash, auth_data, ahp->icv_trunc_len);
260 err = memcmp(icv, auth_data, ahp->icv_trunc_len) ? -EBADMSG: 0;
264 skb->network_header += ah_hlen;
265 memcpy(skb_network_header(skb), work_iph, ihl);
266 __skb_pull(skb, ah_hlen + ihl);
267 skb_set_transport_header(skb, -ihl);
271 kfree(AH_SKB_CB(skb)->tmp);
272 xfrm_input_resume(skb, err);
275 static int ah_input(struct xfrm_state *x, struct sk_buff *skb)
283 struct sk_buff *trailer;
284 struct crypto_ahash *ahash;
285 struct ahash_request *req;
286 struct scatterlist *sg;
287 struct iphdr *iph, *work_iph;
288 struct ip_auth_hdr *ah;
292 if (!pskb_may_pull(skb, sizeof(*ah)))
295 ah = (struct ip_auth_hdr *)skb->data;
299 nexthdr = ah->nexthdr;
300 ah_hlen = (ah->hdrlen + 2) << 2;
302 if (ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_full_len) &&
303 ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len))
306 if (!pskb_may_pull(skb, ah_hlen))
309 /* We are going to _remove_ AH header to keep sockets happy,
310 * so... Later this can change. */
311 if (skb_cloned(skb) &&
312 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
315 skb->ip_summed = CHECKSUM_NONE;
318 if ((err = skb_cow_data(skb, 0, &trailer)) < 0)
322 ah = (struct ip_auth_hdr *)skb->data;
324 ihl = ip_hdrlen(skb);
326 work_iph = ah_alloc_tmp(ahash, nfrags, ihl + ahp->icv_trunc_len);
330 auth_data = ah_tmp_auth(work_iph, ihl);
331 icv = ah_tmp_icv(ahash, auth_data, ahp->icv_trunc_len);
332 req = ah_tmp_req(ahash, icv);
333 sg = ah_req_sg(ahash, req);
335 memcpy(work_iph, iph, ihl);
336 memcpy(auth_data, ah->auth_data, ahp->icv_trunc_len);
337 memset(ah->auth_data, 0, ahp->icv_trunc_len);
343 if (ihl > sizeof(*iph)) {
345 err = ip_clear_mutable_options(iph, &dummy);
352 sg_init_table(sg, nfrags);
353 skb_to_sgvec(skb, sg, 0, skb->len);
355 ahash_request_set_crypt(req, sg, icv, skb->len);
356 ahash_request_set_callback(req, 0, ah_input_done, skb);
358 AH_SKB_CB(skb)->tmp = work_iph;
360 err = crypto_ahash_digest(req);
362 if (err == -EINPROGRESS)
370 err = memcmp(icv, auth_data, ahp->icv_trunc_len) ? -EBADMSG: 0;
374 skb->network_header += ah_hlen;
375 memcpy(skb_network_header(skb), work_iph, ihl);
376 __skb_pull(skb, ah_hlen + ihl);
377 skb_set_transport_header(skb, -ihl);
387 static void ah4_err(struct sk_buff *skb, u32 info)
389 struct net *net = dev_net(skb->dev);
390 struct iphdr *iph = (struct iphdr *)skb->data;
391 struct ip_auth_hdr *ah = (struct ip_auth_hdr *)(skb->data+(iph->ihl<<2));
392 struct xfrm_state *x;
394 if (icmp_hdr(skb)->type != ICMP_DEST_UNREACH ||
395 icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
398 x = xfrm_state_lookup(net, skb->mark, (xfrm_address_t *)&iph->daddr, ah->spi, IPPROTO_AH, AF_INET);
401 printk(KERN_DEBUG "pmtu discovery on SA AH/%08x/%08x\n",
402 ntohl(ah->spi), ntohl(iph->daddr));
406 static int ah_init_state(struct xfrm_state *x)
408 struct ah_data *ahp = NULL;
409 struct xfrm_algo_desc *aalg_desc;
410 struct crypto_ahash *ahash;
418 ahp = kzalloc(sizeof(*ahp), GFP_KERNEL);
422 ahash = crypto_alloc_ahash(x->aalg->alg_name, 0, 0);
427 if (crypto_ahash_setkey(ahash, x->aalg->alg_key,
428 (x->aalg->alg_key_len + 7) / 8))
432 * Lookup the algorithm description maintained by xfrm_algo,
433 * verify crypto transform properties, and store information
434 * we need for AH processing. This lookup cannot fail here
435 * after a successful crypto_alloc_ahash().
437 aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
440 if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
441 crypto_ahash_digestsize(ahash)) {
442 printk(KERN_INFO "AH: %s digestsize %u != %hu\n",
443 x->aalg->alg_name, crypto_ahash_digestsize(ahash),
444 aalg_desc->uinfo.auth.icv_fullbits/8);
448 ahp->icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8;
449 ahp->icv_trunc_len = x->aalg->alg_trunc_len/8;
451 BUG_ON(ahp->icv_trunc_len > MAX_AH_AUTH_LEN);
453 x->props.header_len = XFRM_ALIGN8(sizeof(struct ip_auth_hdr) +
455 if (x->props.mode == XFRM_MODE_TUNNEL)
456 x->props.header_len += sizeof(struct iphdr);
463 crypto_free_ahash(ahp->ahash);
469 static void ah_destroy(struct xfrm_state *x)
471 struct ah_data *ahp = x->data;
476 crypto_free_ahash(ahp->ahash);
481 static const struct xfrm_type ah_type =
483 .description = "AH4",
484 .owner = THIS_MODULE,
486 .flags = XFRM_TYPE_REPLAY_PROT,
487 .init_state = ah_init_state,
488 .destructor = ah_destroy,
493 static const struct net_protocol ah4_protocol = {
494 .handler = xfrm4_rcv,
495 .err_handler = ah4_err,
500 static int __init ah4_init(void)
502 if (xfrm_register_type(&ah_type, AF_INET) < 0) {
503 printk(KERN_INFO "ip ah init: can't add xfrm type\n");
506 if (inet_add_protocol(&ah4_protocol, IPPROTO_AH) < 0) {
507 printk(KERN_INFO "ip ah init: can't add protocol\n");
508 xfrm_unregister_type(&ah_type, AF_INET);
514 static void __exit ah4_fini(void)
516 if (inet_del_protocol(&ah4_protocol, IPPROTO_AH) < 0)
517 printk(KERN_INFO "ip ah close: can't remove protocol\n");
518 if (xfrm_unregister_type(&ah_type, AF_INET) < 0)
519 printk(KERN_INFO "ip ah close: can't remove xfrm type\n");
522 module_init(ah4_init);
523 module_exit(ah4_fini);
524 MODULE_LICENSE("GPL");
525 MODULE_ALIAS_XFRM_TYPE(AF_INET, XFRM_PROTO_AH);