6 * Kazunori MIYAZAWA @USAGI
7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9 * YOSHIFUJI Hideaki @USAGI
10 * Split up af-specific functions
11 * Derek Atkins <derek@ihtfp.com>
12 * Add UDP Encapsulation
16 #include <linux/workqueue.h>
18 #include <linux/pfkeyv2.h>
19 #include <linux/ipsec.h>
20 #include <linux/module.h>
21 #include <linux/cache.h>
22 #include <asm/uaccess.h>
24 #include "xfrm_hash.h"
27 EXPORT_SYMBOL(xfrm_nl);
29 u32 sysctl_xfrm_aevent_etime __read_mostly = XFRM_AE_ETIME;
30 EXPORT_SYMBOL(sysctl_xfrm_aevent_etime);
32 u32 sysctl_xfrm_aevent_rseqth __read_mostly = XFRM_AE_SEQT_SIZE;
33 EXPORT_SYMBOL(sysctl_xfrm_aevent_rseqth);
35 u32 sysctl_xfrm_acq_expires __read_mostly = 30;
37 /* Each xfrm_state may be linked to two tables:
39 1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl)
40 2. Hash table by (daddr,family,reqid) to find what SAs exist for given
41 destination/tunnel endpoint. (output)
44 static DEFINE_SPINLOCK(xfrm_state_lock);
46 /* Hash table to find appropriate SA towards given target (endpoint
47 * of tunnel or destination of transport mode) allowed by selector.
49 * Main use is finding SA after policy selected tunnel or transport mode.
50 * Also, it can be used by ah/esp icmp error handler to find offending SA.
52 static struct hlist_head *xfrm_state_bydst __read_mostly;
53 static struct hlist_head *xfrm_state_bysrc __read_mostly;
54 static struct hlist_head *xfrm_state_byspi __read_mostly;
55 static unsigned int xfrm_state_hmask __read_mostly;
56 static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024;
57 static unsigned int xfrm_state_num;
58 static unsigned int xfrm_state_genid;
60 static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family);
61 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
63 static inline unsigned int xfrm_dst_hash(xfrm_address_t *daddr,
64 xfrm_address_t *saddr,
66 unsigned short family)
68 return __xfrm_dst_hash(daddr, saddr, reqid, family, xfrm_state_hmask);
71 static inline unsigned int xfrm_src_hash(xfrm_address_t *daddr,
72 xfrm_address_t *saddr,
73 unsigned short family)
75 return __xfrm_src_hash(daddr, saddr, family, xfrm_state_hmask);
78 static inline unsigned int
79 xfrm_spi_hash(xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family)
81 return __xfrm_spi_hash(daddr, spi, proto, family, xfrm_state_hmask);
84 static void xfrm_hash_transfer(struct hlist_head *list,
85 struct hlist_head *ndsttable,
86 struct hlist_head *nsrctable,
87 struct hlist_head *nspitable,
88 unsigned int nhashmask)
90 struct hlist_node *entry, *tmp;
93 hlist_for_each_entry_safe(x, entry, tmp, list, bydst) {
96 h = __xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
97 x->props.reqid, x->props.family,
99 hlist_add_head(&x->bydst, ndsttable+h);
101 h = __xfrm_src_hash(&x->id.daddr, &x->props.saddr,
104 hlist_add_head(&x->bysrc, nsrctable+h);
107 h = __xfrm_spi_hash(&x->id.daddr, x->id.spi,
108 x->id.proto, x->props.family,
110 hlist_add_head(&x->byspi, nspitable+h);
115 static unsigned long xfrm_hash_new_size(void)
117 return ((xfrm_state_hmask + 1) << 1) *
118 sizeof(struct hlist_head);
121 static DEFINE_MUTEX(hash_resize_mutex);
123 static void xfrm_hash_resize(struct work_struct *__unused)
125 struct hlist_head *ndst, *nsrc, *nspi, *odst, *osrc, *ospi;
126 unsigned long nsize, osize;
127 unsigned int nhashmask, ohashmask;
130 mutex_lock(&hash_resize_mutex);
132 nsize = xfrm_hash_new_size();
133 ndst = xfrm_hash_alloc(nsize);
136 nsrc = xfrm_hash_alloc(nsize);
138 xfrm_hash_free(ndst, nsize);
141 nspi = xfrm_hash_alloc(nsize);
143 xfrm_hash_free(ndst, nsize);
144 xfrm_hash_free(nsrc, nsize);
148 spin_lock_bh(&xfrm_state_lock);
150 nhashmask = (nsize / sizeof(struct hlist_head)) - 1U;
151 for (i = xfrm_state_hmask; i >= 0; i--)
152 xfrm_hash_transfer(xfrm_state_bydst+i, ndst, nsrc, nspi,
155 odst = xfrm_state_bydst;
156 osrc = xfrm_state_bysrc;
157 ospi = xfrm_state_byspi;
158 ohashmask = xfrm_state_hmask;
160 xfrm_state_bydst = ndst;
161 xfrm_state_bysrc = nsrc;
162 xfrm_state_byspi = nspi;
163 xfrm_state_hmask = nhashmask;
165 spin_unlock_bh(&xfrm_state_lock);
167 osize = (ohashmask + 1) * sizeof(struct hlist_head);
168 xfrm_hash_free(odst, osize);
169 xfrm_hash_free(osrc, osize);
170 xfrm_hash_free(ospi, osize);
173 mutex_unlock(&hash_resize_mutex);
176 static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize);
178 DECLARE_WAIT_QUEUE_HEAD(km_waitq);
179 EXPORT_SYMBOL(km_waitq);
181 static DEFINE_RWLOCK(xfrm_state_afinfo_lock);
182 static struct xfrm_state_afinfo *xfrm_state_afinfo[NPROTO];
184 static struct work_struct xfrm_state_gc_work;
185 static HLIST_HEAD(xfrm_state_gc_list);
186 static DEFINE_SPINLOCK(xfrm_state_gc_lock);
188 int __xfrm_state_delete(struct xfrm_state *x);
190 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
191 void km_state_expired(struct xfrm_state *x, int hard, u32 pid);
193 static struct xfrm_state_afinfo *xfrm_state_lock_afinfo(unsigned int family)
195 struct xfrm_state_afinfo *afinfo;
196 if (unlikely(family >= NPROTO))
198 write_lock_bh(&xfrm_state_afinfo_lock);
199 afinfo = xfrm_state_afinfo[family];
200 if (unlikely(!afinfo))
201 write_unlock_bh(&xfrm_state_afinfo_lock);
205 static void xfrm_state_unlock_afinfo(struct xfrm_state_afinfo *afinfo)
207 write_unlock_bh(&xfrm_state_afinfo_lock);
210 int xfrm_register_type(struct xfrm_type *type, unsigned short family)
212 struct xfrm_state_afinfo *afinfo = xfrm_state_lock_afinfo(family);
213 struct xfrm_type **typemap;
216 if (unlikely(afinfo == NULL))
217 return -EAFNOSUPPORT;
218 typemap = afinfo->type_map;
220 if (likely(typemap[type->proto] == NULL))
221 typemap[type->proto] = type;
224 xfrm_state_unlock_afinfo(afinfo);
227 EXPORT_SYMBOL(xfrm_register_type);
229 int xfrm_unregister_type(struct xfrm_type *type, unsigned short family)
231 struct xfrm_state_afinfo *afinfo = xfrm_state_lock_afinfo(family);
232 struct xfrm_type **typemap;
235 if (unlikely(afinfo == NULL))
236 return -EAFNOSUPPORT;
237 typemap = afinfo->type_map;
239 if (unlikely(typemap[type->proto] != type))
242 typemap[type->proto] = NULL;
243 xfrm_state_unlock_afinfo(afinfo);
246 EXPORT_SYMBOL(xfrm_unregister_type);
248 static struct xfrm_type *xfrm_get_type(u8 proto, unsigned short family)
250 struct xfrm_state_afinfo *afinfo;
251 struct xfrm_type **typemap;
252 struct xfrm_type *type;
253 int modload_attempted = 0;
256 afinfo = xfrm_state_get_afinfo(family);
257 if (unlikely(afinfo == NULL))
259 typemap = afinfo->type_map;
261 type = typemap[proto];
262 if (unlikely(type && !try_module_get(type->owner)))
264 if (!type && !modload_attempted) {
265 xfrm_state_put_afinfo(afinfo);
266 request_module("xfrm-type-%d-%d", family, proto);
267 modload_attempted = 1;
271 xfrm_state_put_afinfo(afinfo);
275 static void xfrm_put_type(struct xfrm_type *type)
277 module_put(type->owner);
280 int xfrm_register_mode(struct xfrm_mode *mode, int family)
282 struct xfrm_state_afinfo *afinfo;
283 struct xfrm_mode **modemap;
286 if (unlikely(mode->encap >= XFRM_MODE_MAX))
289 afinfo = xfrm_state_lock_afinfo(family);
290 if (unlikely(afinfo == NULL))
291 return -EAFNOSUPPORT;
294 modemap = afinfo->mode_map;
295 if (modemap[mode->encap])
299 if (!try_module_get(afinfo->owner))
302 mode->afinfo = afinfo;
303 modemap[mode->encap] = mode;
307 xfrm_state_unlock_afinfo(afinfo);
310 EXPORT_SYMBOL(xfrm_register_mode);
312 int xfrm_unregister_mode(struct xfrm_mode *mode, int family)
314 struct xfrm_state_afinfo *afinfo;
315 struct xfrm_mode **modemap;
318 if (unlikely(mode->encap >= XFRM_MODE_MAX))
321 afinfo = xfrm_state_lock_afinfo(family);
322 if (unlikely(afinfo == NULL))
323 return -EAFNOSUPPORT;
326 modemap = afinfo->mode_map;
327 if (likely(modemap[mode->encap] == mode)) {
328 modemap[mode->encap] = NULL;
329 module_put(mode->afinfo->owner);
333 xfrm_state_unlock_afinfo(afinfo);
336 EXPORT_SYMBOL(xfrm_unregister_mode);
338 static struct xfrm_mode *xfrm_get_mode(unsigned int encap, int family)
340 struct xfrm_state_afinfo *afinfo;
341 struct xfrm_mode *mode;
342 int modload_attempted = 0;
344 if (unlikely(encap >= XFRM_MODE_MAX))
348 afinfo = xfrm_state_get_afinfo(family);
349 if (unlikely(afinfo == NULL))
352 mode = afinfo->mode_map[encap];
353 if (unlikely(mode && !try_module_get(mode->owner)))
355 if (!mode && !modload_attempted) {
356 xfrm_state_put_afinfo(afinfo);
357 request_module("xfrm-mode-%d-%d", family, encap);
358 modload_attempted = 1;
362 xfrm_state_put_afinfo(afinfo);
366 static void xfrm_put_mode(struct xfrm_mode *mode)
368 module_put(mode->owner);
371 static void xfrm_state_gc_destroy(struct xfrm_state *x)
373 del_timer_sync(&x->timer);
374 del_timer_sync(&x->rtimer);
381 xfrm_put_mode(x->mode);
383 x->type->destructor(x);
384 xfrm_put_type(x->type);
386 security_xfrm_state_free(x);
390 static void xfrm_state_gc_task(struct work_struct *data)
392 struct xfrm_state *x;
393 struct hlist_node *entry, *tmp;
394 struct hlist_head gc_list;
396 spin_lock_bh(&xfrm_state_gc_lock);
397 gc_list.first = xfrm_state_gc_list.first;
398 INIT_HLIST_HEAD(&xfrm_state_gc_list);
399 spin_unlock_bh(&xfrm_state_gc_lock);
401 hlist_for_each_entry_safe(x, entry, tmp, &gc_list, bydst)
402 xfrm_state_gc_destroy(x);
407 static inline unsigned long make_jiffies(long secs)
409 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
410 return MAX_SCHEDULE_TIMEOUT-1;
415 static void xfrm_timer_handler(unsigned long data)
417 struct xfrm_state *x = (struct xfrm_state*)data;
418 unsigned long now = get_seconds();
419 long next = LONG_MAX;
424 if (x->km.state == XFRM_STATE_DEAD)
426 if (x->km.state == XFRM_STATE_EXPIRED)
428 if (x->lft.hard_add_expires_seconds) {
429 long tmo = x->lft.hard_add_expires_seconds +
430 x->curlft.add_time - now;
436 if (x->lft.hard_use_expires_seconds) {
437 long tmo = x->lft.hard_use_expires_seconds +
438 (x->curlft.use_time ? : now) - now;
446 if (x->lft.soft_add_expires_seconds) {
447 long tmo = x->lft.soft_add_expires_seconds +
448 x->curlft.add_time - now;
454 if (x->lft.soft_use_expires_seconds) {
455 long tmo = x->lft.soft_use_expires_seconds +
456 (x->curlft.use_time ? : now) - now;
465 km_state_expired(x, 0, 0);
467 if (next != LONG_MAX)
468 mod_timer(&x->timer, jiffies + make_jiffies(next));
473 if (x->km.state == XFRM_STATE_ACQ && x->id.spi == 0) {
474 x->km.state = XFRM_STATE_EXPIRED;
480 err = __xfrm_state_delete(x);
481 if (!err && x->id.spi)
482 km_state_expired(x, 1, 0);
484 xfrm_audit_state_delete(x, err ? 0 : 1,
485 audit_get_loginuid(current->audit_context), 0);
488 spin_unlock(&x->lock);
491 static void xfrm_replay_timer_handler(unsigned long data);
493 struct xfrm_state *xfrm_state_alloc(void)
495 struct xfrm_state *x;
497 x = kzalloc(sizeof(struct xfrm_state), GFP_ATOMIC);
500 atomic_set(&x->refcnt, 1);
501 atomic_set(&x->tunnel_users, 0);
502 INIT_HLIST_NODE(&x->bydst);
503 INIT_HLIST_NODE(&x->bysrc);
504 INIT_HLIST_NODE(&x->byspi);
505 init_timer(&x->timer);
506 x->timer.function = xfrm_timer_handler;
507 x->timer.data = (unsigned long)x;
508 init_timer(&x->rtimer);
509 x->rtimer.function = xfrm_replay_timer_handler;
510 x->rtimer.data = (unsigned long)x;
511 x->curlft.add_time = get_seconds();
512 x->lft.soft_byte_limit = XFRM_INF;
513 x->lft.soft_packet_limit = XFRM_INF;
514 x->lft.hard_byte_limit = XFRM_INF;
515 x->lft.hard_packet_limit = XFRM_INF;
516 x->replay_maxage = 0;
517 x->replay_maxdiff = 0;
518 spin_lock_init(&x->lock);
522 EXPORT_SYMBOL(xfrm_state_alloc);
524 void __xfrm_state_destroy(struct xfrm_state *x)
526 BUG_TRAP(x->km.state == XFRM_STATE_DEAD);
528 spin_lock_bh(&xfrm_state_gc_lock);
529 hlist_add_head(&x->bydst, &xfrm_state_gc_list);
530 spin_unlock_bh(&xfrm_state_gc_lock);
531 schedule_work(&xfrm_state_gc_work);
533 EXPORT_SYMBOL(__xfrm_state_destroy);
535 int __xfrm_state_delete(struct xfrm_state *x)
539 if (x->km.state != XFRM_STATE_DEAD) {
540 x->km.state = XFRM_STATE_DEAD;
541 spin_lock(&xfrm_state_lock);
542 hlist_del(&x->bydst);
543 hlist_del(&x->bysrc);
545 hlist_del(&x->byspi);
547 spin_unlock(&xfrm_state_lock);
549 /* All xfrm_state objects are created by xfrm_state_alloc.
550 * The xfrm_state_alloc call gives a reference, and that
551 * is what we are dropping here.
559 EXPORT_SYMBOL(__xfrm_state_delete);
561 int xfrm_state_delete(struct xfrm_state *x)
565 spin_lock_bh(&x->lock);
566 err = __xfrm_state_delete(x);
567 spin_unlock_bh(&x->lock);
571 EXPORT_SYMBOL(xfrm_state_delete);
573 #ifdef CONFIG_SECURITY_NETWORK_XFRM
575 xfrm_state_flush_secctx_check(u8 proto, struct xfrm_audit *audit_info)
579 for (i = 0; i <= xfrm_state_hmask; i++) {
580 struct hlist_node *entry;
581 struct xfrm_state *x;
583 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
584 if (xfrm_id_proto_match(x->id.proto, proto) &&
585 (err = security_xfrm_state_delete(x)) != 0) {
586 xfrm_audit_state_delete(x, 0,
587 audit_info->loginuid,
598 xfrm_state_flush_secctx_check(u8 proto, struct xfrm_audit *audit_info)
604 int xfrm_state_flush(u8 proto, struct xfrm_audit *audit_info)
608 spin_lock_bh(&xfrm_state_lock);
609 err = xfrm_state_flush_secctx_check(proto, audit_info);
613 for (i = 0; i <= xfrm_state_hmask; i++) {
614 struct hlist_node *entry;
615 struct xfrm_state *x;
617 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
618 if (!xfrm_state_kern(x) &&
619 xfrm_id_proto_match(x->id.proto, proto)) {
621 spin_unlock_bh(&xfrm_state_lock);
623 err = xfrm_state_delete(x);
624 xfrm_audit_state_delete(x, err ? 0 : 1,
625 audit_info->loginuid,
629 spin_lock_bh(&xfrm_state_lock);
637 spin_unlock_bh(&xfrm_state_lock);
641 EXPORT_SYMBOL(xfrm_state_flush);
643 void xfrm_sad_getinfo(struct xfrmk_sadinfo *si)
645 spin_lock_bh(&xfrm_state_lock);
646 si->sadcnt = xfrm_state_num;
647 si->sadhcnt = xfrm_state_hmask;
648 si->sadhmcnt = xfrm_state_hashmax;
649 spin_unlock_bh(&xfrm_state_lock);
651 EXPORT_SYMBOL(xfrm_sad_getinfo);
654 xfrm_init_tempsel(struct xfrm_state *x, struct flowi *fl,
655 struct xfrm_tmpl *tmpl,
656 xfrm_address_t *daddr, xfrm_address_t *saddr,
657 unsigned short family)
659 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
662 afinfo->init_tempsel(x, fl, tmpl, daddr, saddr);
663 xfrm_state_put_afinfo(afinfo);
667 static struct xfrm_state *__xfrm_state_lookup(xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family)
669 unsigned int h = xfrm_spi_hash(daddr, spi, proto, family);
670 struct xfrm_state *x;
671 struct hlist_node *entry;
673 hlist_for_each_entry(x, entry, xfrm_state_byspi+h, byspi) {
674 if (x->props.family != family ||
676 x->id.proto != proto)
681 if (x->id.daddr.a4 != daddr->a4)
685 if (!ipv6_addr_equal((struct in6_addr *)daddr,
699 static struct xfrm_state *__xfrm_state_lookup_byaddr(xfrm_address_t *daddr, xfrm_address_t *saddr, u8 proto, unsigned short family)
701 unsigned int h = xfrm_src_hash(daddr, saddr, family);
702 struct xfrm_state *x;
703 struct hlist_node *entry;
705 hlist_for_each_entry(x, entry, xfrm_state_bysrc+h, bysrc) {
706 if (x->props.family != family ||
707 x->id.proto != proto)
712 if (x->id.daddr.a4 != daddr->a4 ||
713 x->props.saddr.a4 != saddr->a4)
717 if (!ipv6_addr_equal((struct in6_addr *)daddr,
720 !ipv6_addr_equal((struct in6_addr *)saddr,
734 static inline struct xfrm_state *
735 __xfrm_state_locate(struct xfrm_state *x, int use_spi, int family)
738 return __xfrm_state_lookup(&x->id.daddr, x->id.spi,
739 x->id.proto, family);
741 return __xfrm_state_lookup_byaddr(&x->id.daddr,
743 x->id.proto, family);
746 static void xfrm_hash_grow_check(int have_hash_collision)
748 if (have_hash_collision &&
749 (xfrm_state_hmask + 1) < xfrm_state_hashmax &&
750 xfrm_state_num > xfrm_state_hmask)
751 schedule_work(&xfrm_hash_work);
755 xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
756 struct flowi *fl, struct xfrm_tmpl *tmpl,
757 struct xfrm_policy *pol, int *err,
758 unsigned short family)
760 unsigned int h = xfrm_dst_hash(daddr, saddr, tmpl->reqid, family);
761 struct hlist_node *entry;
762 struct xfrm_state *x, *x0;
763 int acquire_in_progress = 0;
765 struct xfrm_state *best = NULL;
767 spin_lock_bh(&xfrm_state_lock);
768 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
769 if (x->props.family == family &&
770 x->props.reqid == tmpl->reqid &&
771 !(x->props.flags & XFRM_STATE_WILDRECV) &&
772 xfrm_state_addr_check(x, daddr, saddr, family) &&
773 tmpl->mode == x->props.mode &&
774 tmpl->id.proto == x->id.proto &&
775 (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) {
777 1. There is a valid state with matching selector.
779 2. Valid state with inappropriate selector. Skip.
781 Entering area of "sysdeps".
783 3. If state is not valid, selector is temporary,
784 it selects only session which triggered
785 previous resolution. Key manager will do
786 something to install a state with proper
789 if (x->km.state == XFRM_STATE_VALID) {
790 if (!xfrm_selector_match(&x->sel, fl, x->sel.family) ||
791 !security_xfrm_state_pol_flow_match(x, pol, fl))
794 best->km.dying > x->km.dying ||
795 (best->km.dying == x->km.dying &&
796 best->curlft.add_time < x->curlft.add_time))
798 } else if (x->km.state == XFRM_STATE_ACQ) {
799 acquire_in_progress = 1;
800 } else if (x->km.state == XFRM_STATE_ERROR ||
801 x->km.state == XFRM_STATE_EXPIRED) {
802 if (xfrm_selector_match(&x->sel, fl, x->sel.family) &&
803 security_xfrm_state_pol_flow_match(x, pol, fl))
810 if (!x && !error && !acquire_in_progress) {
812 (x0 = __xfrm_state_lookup(daddr, tmpl->id.spi,
813 tmpl->id.proto, family)) != NULL) {
818 x = xfrm_state_alloc();
823 /* Initialize temporary selector matching only
824 * to current session. */
825 xfrm_init_tempsel(x, fl, tmpl, daddr, saddr, family);
827 error = security_xfrm_state_alloc_acquire(x, pol->security, fl->secid);
829 x->km.state = XFRM_STATE_DEAD;
835 if (km_query(x, tmpl, pol) == 0) {
836 x->km.state = XFRM_STATE_ACQ;
837 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
838 h = xfrm_src_hash(daddr, saddr, family);
839 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
841 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, family);
842 hlist_add_head(&x->byspi, xfrm_state_byspi+h);
844 x->lft.hard_add_expires_seconds = sysctl_xfrm_acq_expires;
845 x->timer.expires = jiffies + sysctl_xfrm_acq_expires*HZ;
846 add_timer(&x->timer);
848 xfrm_hash_grow_check(x->bydst.next != NULL);
850 x->km.state = XFRM_STATE_DEAD;
860 *err = acquire_in_progress ? -EAGAIN : error;
861 spin_unlock_bh(&xfrm_state_lock);
866 xfrm_stateonly_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
867 unsigned short family, u8 mode, u8 proto, u32 reqid)
869 unsigned int h = xfrm_dst_hash(daddr, saddr, reqid, family);
870 struct xfrm_state *rx = NULL, *x = NULL;
871 struct hlist_node *entry;
873 spin_lock(&xfrm_state_lock);
874 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
875 if (x->props.family == family &&
876 x->props.reqid == reqid &&
877 !(x->props.flags & XFRM_STATE_WILDRECV) &&
878 xfrm_state_addr_check(x, daddr, saddr, family) &&
879 mode == x->props.mode &&
880 proto == x->id.proto &&
881 x->km.state == XFRM_STATE_VALID) {
889 spin_unlock(&xfrm_state_lock);
894 EXPORT_SYMBOL(xfrm_stateonly_find);
896 static void __xfrm_state_insert(struct xfrm_state *x)
900 x->genid = ++xfrm_state_genid;
902 h = xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
903 x->props.reqid, x->props.family);
904 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
906 h = xfrm_src_hash(&x->id.daddr, &x->props.saddr, x->props.family);
907 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
910 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto,
913 hlist_add_head(&x->byspi, xfrm_state_byspi+h);
916 mod_timer(&x->timer, jiffies + HZ);
917 if (x->replay_maxage)
918 mod_timer(&x->rtimer, jiffies + x->replay_maxage);
924 xfrm_hash_grow_check(x->bydst.next != NULL);
927 /* xfrm_state_lock is held */
928 static void __xfrm_state_bump_genids(struct xfrm_state *xnew)
930 unsigned short family = xnew->props.family;
931 u32 reqid = xnew->props.reqid;
932 struct xfrm_state *x;
933 struct hlist_node *entry;
936 h = xfrm_dst_hash(&xnew->id.daddr, &xnew->props.saddr, reqid, family);
937 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
938 if (x->props.family == family &&
939 x->props.reqid == reqid &&
940 !xfrm_addr_cmp(&x->id.daddr, &xnew->id.daddr, family) &&
941 !xfrm_addr_cmp(&x->props.saddr, &xnew->props.saddr, family))
942 x->genid = xfrm_state_genid;
946 void xfrm_state_insert(struct xfrm_state *x)
948 spin_lock_bh(&xfrm_state_lock);
949 __xfrm_state_bump_genids(x);
950 __xfrm_state_insert(x);
951 spin_unlock_bh(&xfrm_state_lock);
953 EXPORT_SYMBOL(xfrm_state_insert);
955 /* xfrm_state_lock is held */
956 static struct xfrm_state *__find_acq_core(unsigned short family, u8 mode, u32 reqid, u8 proto, xfrm_address_t *daddr, xfrm_address_t *saddr, int create)
958 unsigned int h = xfrm_dst_hash(daddr, saddr, reqid, family);
959 struct hlist_node *entry;
960 struct xfrm_state *x;
962 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
963 if (x->props.reqid != reqid ||
964 x->props.mode != mode ||
965 x->props.family != family ||
966 x->km.state != XFRM_STATE_ACQ ||
968 x->id.proto != proto)
973 if (x->id.daddr.a4 != daddr->a4 ||
974 x->props.saddr.a4 != saddr->a4)
978 if (!ipv6_addr_equal((struct in6_addr *)x->id.daddr.a6,
979 (struct in6_addr *)daddr) ||
980 !ipv6_addr_equal((struct in6_addr *)
982 (struct in6_addr *)saddr))
994 x = xfrm_state_alloc();
998 x->sel.daddr.a4 = daddr->a4;
999 x->sel.saddr.a4 = saddr->a4;
1000 x->sel.prefixlen_d = 32;
1001 x->sel.prefixlen_s = 32;
1002 x->props.saddr.a4 = saddr->a4;
1003 x->id.daddr.a4 = daddr->a4;
1007 ipv6_addr_copy((struct in6_addr *)x->sel.daddr.a6,
1008 (struct in6_addr *)daddr);
1009 ipv6_addr_copy((struct in6_addr *)x->sel.saddr.a6,
1010 (struct in6_addr *)saddr);
1011 x->sel.prefixlen_d = 128;
1012 x->sel.prefixlen_s = 128;
1013 ipv6_addr_copy((struct in6_addr *)x->props.saddr.a6,
1014 (struct in6_addr *)saddr);
1015 ipv6_addr_copy((struct in6_addr *)x->id.daddr.a6,
1016 (struct in6_addr *)daddr);
1020 x->km.state = XFRM_STATE_ACQ;
1021 x->id.proto = proto;
1022 x->props.family = family;
1023 x->props.mode = mode;
1024 x->props.reqid = reqid;
1025 x->lft.hard_add_expires_seconds = sysctl_xfrm_acq_expires;
1027 x->timer.expires = jiffies + sysctl_xfrm_acq_expires*HZ;
1028 add_timer(&x->timer);
1029 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
1030 h = xfrm_src_hash(daddr, saddr, family);
1031 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
1035 xfrm_hash_grow_check(x->bydst.next != NULL);
1041 static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq);
1043 int xfrm_state_add(struct xfrm_state *x)
1045 struct xfrm_state *x1;
1048 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
1050 family = x->props.family;
1052 spin_lock_bh(&xfrm_state_lock);
1054 x1 = __xfrm_state_locate(x, use_spi, family);
1062 if (use_spi && x->km.seq) {
1063 x1 = __xfrm_find_acq_byseq(x->km.seq);
1064 if (x1 && ((x1->id.proto != x->id.proto) ||
1065 xfrm_addr_cmp(&x1->id.daddr, &x->id.daddr, family))) {
1072 x1 = __find_acq_core(family, x->props.mode, x->props.reqid,
1074 &x->id.daddr, &x->props.saddr, 0);
1076 __xfrm_state_bump_genids(x);
1077 __xfrm_state_insert(x);
1081 spin_unlock_bh(&xfrm_state_lock);
1084 xfrm_state_delete(x1);
1090 EXPORT_SYMBOL(xfrm_state_add);
1092 #ifdef CONFIG_XFRM_MIGRATE
1093 struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, int *errp)
1096 struct xfrm_state *x = xfrm_state_alloc();
1100 memcpy(&x->id, &orig->id, sizeof(x->id));
1101 memcpy(&x->sel, &orig->sel, sizeof(x->sel));
1102 memcpy(&x->lft, &orig->lft, sizeof(x->lft));
1103 x->props.mode = orig->props.mode;
1104 x->props.replay_window = orig->props.replay_window;
1105 x->props.reqid = orig->props.reqid;
1106 x->props.family = orig->props.family;
1107 x->props.saddr = orig->props.saddr;
1110 x->aalg = xfrm_algo_clone(orig->aalg);
1114 x->props.aalgo = orig->props.aalgo;
1117 x->ealg = xfrm_algo_clone(orig->ealg);
1121 x->props.ealgo = orig->props.ealgo;
1124 x->calg = xfrm_algo_clone(orig->calg);
1128 x->props.calgo = orig->props.calgo;
1131 x->encap = kmemdup(orig->encap, sizeof(*x->encap), GFP_KERNEL);
1137 x->coaddr = kmemdup(orig->coaddr, sizeof(*x->coaddr),
1143 err = xfrm_init_state(x);
1147 x->props.flags = orig->props.flags;
1149 x->curlft.add_time = orig->curlft.add_time;
1150 x->km.state = orig->km.state;
1151 x->km.seq = orig->km.seq;
1168 EXPORT_SYMBOL(xfrm_state_clone);
1170 /* xfrm_state_lock is held */
1171 struct xfrm_state * xfrm_migrate_state_find(struct xfrm_migrate *m)
1174 struct xfrm_state *x;
1175 struct hlist_node *entry;
1178 h = xfrm_dst_hash(&m->old_daddr, &m->old_saddr,
1179 m->reqid, m->old_family);
1180 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
1181 if (x->props.mode != m->mode ||
1182 x->id.proto != m->proto)
1184 if (m->reqid && x->props.reqid != m->reqid)
1186 if (xfrm_addr_cmp(&x->id.daddr, &m->old_daddr,
1188 xfrm_addr_cmp(&x->props.saddr, &m->old_saddr,
1195 h = xfrm_src_hash(&m->old_daddr, &m->old_saddr,
1197 hlist_for_each_entry(x, entry, xfrm_state_bysrc+h, bysrc) {
1198 if (x->props.mode != m->mode ||
1199 x->id.proto != m->proto)
1201 if (xfrm_addr_cmp(&x->id.daddr, &m->old_daddr,
1203 xfrm_addr_cmp(&x->props.saddr, &m->old_saddr,
1213 EXPORT_SYMBOL(xfrm_migrate_state_find);
1215 struct xfrm_state * xfrm_state_migrate(struct xfrm_state *x,
1216 struct xfrm_migrate *m)
1218 struct xfrm_state *xc;
1221 xc = xfrm_state_clone(x, &err);
1225 memcpy(&xc->id.daddr, &m->new_daddr, sizeof(xc->id.daddr));
1226 memcpy(&xc->props.saddr, &m->new_saddr, sizeof(xc->props.saddr));
1229 if (!xfrm_addr_cmp(&x->id.daddr, &m->new_daddr, m->new_family)) {
1230 /* a care is needed when the destination address of the
1231 state is to be updated as it is a part of triplet */
1232 xfrm_state_insert(xc);
1234 if ((err = xfrm_state_add(xc)) < 0)
1243 EXPORT_SYMBOL(xfrm_state_migrate);
1246 int xfrm_state_update(struct xfrm_state *x)
1248 struct xfrm_state *x1;
1250 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
1252 spin_lock_bh(&xfrm_state_lock);
1253 x1 = __xfrm_state_locate(x, use_spi, x->props.family);
1259 if (xfrm_state_kern(x1)) {
1265 if (x1->km.state == XFRM_STATE_ACQ) {
1266 __xfrm_state_insert(x);
1272 spin_unlock_bh(&xfrm_state_lock);
1278 xfrm_state_delete(x1);
1284 spin_lock_bh(&x1->lock);
1285 if (likely(x1->km.state == XFRM_STATE_VALID)) {
1286 if (x->encap && x1->encap)
1287 memcpy(x1->encap, x->encap, sizeof(*x1->encap));
1288 if (x->coaddr && x1->coaddr) {
1289 memcpy(x1->coaddr, x->coaddr, sizeof(*x1->coaddr));
1291 if (!use_spi && memcmp(&x1->sel, &x->sel, sizeof(x1->sel)))
1292 memcpy(&x1->sel, &x->sel, sizeof(x1->sel));
1293 memcpy(&x1->lft, &x->lft, sizeof(x1->lft));
1296 mod_timer(&x1->timer, jiffies + HZ);
1297 if (x1->curlft.use_time)
1298 xfrm_state_check_expire(x1);
1302 spin_unlock_bh(&x1->lock);
1308 EXPORT_SYMBOL(xfrm_state_update);
1310 int xfrm_state_check_expire(struct xfrm_state *x)
1312 if (!x->curlft.use_time)
1313 x->curlft.use_time = get_seconds();
1315 if (x->km.state != XFRM_STATE_VALID)
1318 if (x->curlft.bytes >= x->lft.hard_byte_limit ||
1319 x->curlft.packets >= x->lft.hard_packet_limit) {
1320 x->km.state = XFRM_STATE_EXPIRED;
1321 mod_timer(&x->timer, jiffies);
1326 (x->curlft.bytes >= x->lft.soft_byte_limit ||
1327 x->curlft.packets >= x->lft.soft_packet_limit)) {
1329 km_state_expired(x, 0, 0);
1333 EXPORT_SYMBOL(xfrm_state_check_expire);
1336 xfrm_state_lookup(xfrm_address_t *daddr, __be32 spi, u8 proto,
1337 unsigned short family)
1339 struct xfrm_state *x;
1341 spin_lock_bh(&xfrm_state_lock);
1342 x = __xfrm_state_lookup(daddr, spi, proto, family);
1343 spin_unlock_bh(&xfrm_state_lock);
1346 EXPORT_SYMBOL(xfrm_state_lookup);
1349 xfrm_state_lookup_byaddr(xfrm_address_t *daddr, xfrm_address_t *saddr,
1350 u8 proto, unsigned short family)
1352 struct xfrm_state *x;
1354 spin_lock_bh(&xfrm_state_lock);
1355 x = __xfrm_state_lookup_byaddr(daddr, saddr, proto, family);
1356 spin_unlock_bh(&xfrm_state_lock);
1359 EXPORT_SYMBOL(xfrm_state_lookup_byaddr);
1362 xfrm_find_acq(u8 mode, u32 reqid, u8 proto,
1363 xfrm_address_t *daddr, xfrm_address_t *saddr,
1364 int create, unsigned short family)
1366 struct xfrm_state *x;
1368 spin_lock_bh(&xfrm_state_lock);
1369 x = __find_acq_core(family, mode, reqid, proto, daddr, saddr, create);
1370 spin_unlock_bh(&xfrm_state_lock);
1374 EXPORT_SYMBOL(xfrm_find_acq);
1376 #ifdef CONFIG_XFRM_SUB_POLICY
1378 xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n,
1379 unsigned short family)
1382 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
1384 return -EAFNOSUPPORT;
1386 spin_lock_bh(&xfrm_state_lock);
1387 if (afinfo->tmpl_sort)
1388 err = afinfo->tmpl_sort(dst, src, n);
1389 spin_unlock_bh(&xfrm_state_lock);
1390 xfrm_state_put_afinfo(afinfo);
1393 EXPORT_SYMBOL(xfrm_tmpl_sort);
1396 xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n,
1397 unsigned short family)
1400 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
1402 return -EAFNOSUPPORT;
1404 spin_lock_bh(&xfrm_state_lock);
1405 if (afinfo->state_sort)
1406 err = afinfo->state_sort(dst, src, n);
1407 spin_unlock_bh(&xfrm_state_lock);
1408 xfrm_state_put_afinfo(afinfo);
1411 EXPORT_SYMBOL(xfrm_state_sort);
1414 /* Silly enough, but I'm lazy to build resolution list */
1416 static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq)
1420 for (i = 0; i <= xfrm_state_hmask; i++) {
1421 struct hlist_node *entry;
1422 struct xfrm_state *x;
1424 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
1425 if (x->km.seq == seq &&
1426 x->km.state == XFRM_STATE_ACQ) {
1435 struct xfrm_state *xfrm_find_acq_byseq(u32 seq)
1437 struct xfrm_state *x;
1439 spin_lock_bh(&xfrm_state_lock);
1440 x = __xfrm_find_acq_byseq(seq);
1441 spin_unlock_bh(&xfrm_state_lock);
1444 EXPORT_SYMBOL(xfrm_find_acq_byseq);
1446 u32 xfrm_get_acqseq(void)
1450 static DEFINE_SPINLOCK(acqseq_lock);
1452 spin_lock_bh(&acqseq_lock);
1453 res = (++acqseq ? : ++acqseq);
1454 spin_unlock_bh(&acqseq_lock);
1457 EXPORT_SYMBOL(xfrm_get_acqseq);
1459 int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high)
1462 struct xfrm_state *x0;
1464 __be32 minspi = htonl(low);
1465 __be32 maxspi = htonl(high);
1467 spin_lock_bh(&x->lock);
1468 if (x->km.state == XFRM_STATE_DEAD)
1477 if (minspi == maxspi) {
1478 x0 = xfrm_state_lookup(&x->id.daddr, minspi, x->id.proto, x->props.family);
1486 for (h=0; h<high-low+1; h++) {
1487 spi = low + net_random()%(high-low+1);
1488 x0 = xfrm_state_lookup(&x->id.daddr, htonl(spi), x->id.proto, x->props.family);
1490 x->id.spi = htonl(spi);
1497 spin_lock_bh(&xfrm_state_lock);
1498 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, x->props.family);
1499 hlist_add_head(&x->byspi, xfrm_state_byspi+h);
1500 spin_unlock_bh(&xfrm_state_lock);
1506 spin_unlock_bh(&x->lock);
1510 EXPORT_SYMBOL(xfrm_alloc_spi);
1512 int xfrm_state_walk(u8 proto, int (*func)(struct xfrm_state *, int, void*),
1516 struct xfrm_state *x, *last = NULL;
1517 struct hlist_node *entry;
1521 spin_lock_bh(&xfrm_state_lock);
1522 for (i = 0; i <= xfrm_state_hmask; i++) {
1523 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
1524 if (!xfrm_id_proto_match(x->id.proto, proto))
1527 err = func(last, count, data);
1539 err = func(last, 0, data);
1541 spin_unlock_bh(&xfrm_state_lock);
1544 EXPORT_SYMBOL(xfrm_state_walk);
1547 void xfrm_replay_notify(struct xfrm_state *x, int event)
1550 /* we send notify messages in case
1551 * 1. we updated on of the sequence numbers, and the seqno difference
1552 * is at least x->replay_maxdiff, in this case we also update the
1553 * timeout of our timer function
1554 * 2. if x->replay_maxage has elapsed since last update,
1555 * and there were changes
1557 * The state structure must be locked!
1561 case XFRM_REPLAY_UPDATE:
1562 if (x->replay_maxdiff &&
1563 (x->replay.seq - x->preplay.seq < x->replay_maxdiff) &&
1564 (x->replay.oseq - x->preplay.oseq < x->replay_maxdiff)) {
1565 if (x->xflags & XFRM_TIME_DEFER)
1566 event = XFRM_REPLAY_TIMEOUT;
1573 case XFRM_REPLAY_TIMEOUT:
1574 if ((x->replay.seq == x->preplay.seq) &&
1575 (x->replay.bitmap == x->preplay.bitmap) &&
1576 (x->replay.oseq == x->preplay.oseq)) {
1577 x->xflags |= XFRM_TIME_DEFER;
1584 memcpy(&x->preplay, &x->replay, sizeof(struct xfrm_replay_state));
1585 c.event = XFRM_MSG_NEWAE;
1586 c.data.aevent = event;
1587 km_state_notify(x, &c);
1589 if (x->replay_maxage &&
1590 !mod_timer(&x->rtimer, jiffies + x->replay_maxage))
1591 x->xflags &= ~XFRM_TIME_DEFER;
1594 static void xfrm_replay_timer_handler(unsigned long data)
1596 struct xfrm_state *x = (struct xfrm_state*)data;
1598 spin_lock(&x->lock);
1600 if (x->km.state == XFRM_STATE_VALID) {
1601 if (xfrm_aevent_is_on())
1602 xfrm_replay_notify(x, XFRM_REPLAY_TIMEOUT);
1604 x->xflags |= XFRM_TIME_DEFER;
1607 spin_unlock(&x->lock);
1610 int xfrm_replay_check(struct xfrm_state *x, __be32 net_seq)
1613 u32 seq = ntohl(net_seq);
1615 if (unlikely(seq == 0))
1618 if (likely(seq > x->replay.seq))
1621 diff = x->replay.seq - seq;
1622 if (diff >= min_t(unsigned int, x->props.replay_window,
1623 sizeof(x->replay.bitmap) * 8)) {
1624 x->stats.replay_window++;
1628 if (x->replay.bitmap & (1U << diff)) {
1634 EXPORT_SYMBOL(xfrm_replay_check);
1636 void xfrm_replay_advance(struct xfrm_state *x, __be32 net_seq)
1639 u32 seq = ntohl(net_seq);
1641 if (seq > x->replay.seq) {
1642 diff = seq - x->replay.seq;
1643 if (diff < x->props.replay_window)
1644 x->replay.bitmap = ((x->replay.bitmap) << diff) | 1;
1646 x->replay.bitmap = 1;
1647 x->replay.seq = seq;
1649 diff = x->replay.seq - seq;
1650 x->replay.bitmap |= (1U << diff);
1653 if (xfrm_aevent_is_on())
1654 xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
1656 EXPORT_SYMBOL(xfrm_replay_advance);
1658 static struct list_head xfrm_km_list = LIST_HEAD_INIT(xfrm_km_list);
1659 static DEFINE_RWLOCK(xfrm_km_lock);
1661 void km_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c)
1663 struct xfrm_mgr *km;
1665 read_lock(&xfrm_km_lock);
1666 list_for_each_entry(km, &xfrm_km_list, list)
1667 if (km->notify_policy)
1668 km->notify_policy(xp, dir, c);
1669 read_unlock(&xfrm_km_lock);
1672 void km_state_notify(struct xfrm_state *x, struct km_event *c)
1674 struct xfrm_mgr *km;
1675 read_lock(&xfrm_km_lock);
1676 list_for_each_entry(km, &xfrm_km_list, list)
1679 read_unlock(&xfrm_km_lock);
1682 EXPORT_SYMBOL(km_policy_notify);
1683 EXPORT_SYMBOL(km_state_notify);
1685 void km_state_expired(struct xfrm_state *x, int hard, u32 pid)
1691 c.event = XFRM_MSG_EXPIRE;
1692 km_state_notify(x, &c);
1698 EXPORT_SYMBOL(km_state_expired);
1700 * We send to all registered managers regardless of failure
1701 * We are happy with one success
1703 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol)
1705 int err = -EINVAL, acqret;
1706 struct xfrm_mgr *km;
1708 read_lock(&xfrm_km_lock);
1709 list_for_each_entry(km, &xfrm_km_list, list) {
1710 acqret = km->acquire(x, t, pol, XFRM_POLICY_OUT);
1714 read_unlock(&xfrm_km_lock);
1717 EXPORT_SYMBOL(km_query);
1719 int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport)
1722 struct xfrm_mgr *km;
1724 read_lock(&xfrm_km_lock);
1725 list_for_each_entry(km, &xfrm_km_list, list) {
1726 if (km->new_mapping)
1727 err = km->new_mapping(x, ipaddr, sport);
1731 read_unlock(&xfrm_km_lock);
1734 EXPORT_SYMBOL(km_new_mapping);
1736 void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 pid)
1742 c.event = XFRM_MSG_POLEXPIRE;
1743 km_policy_notify(pol, dir, &c);
1748 EXPORT_SYMBOL(km_policy_expired);
1750 int km_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
1751 struct xfrm_migrate *m, int num_migrate)
1755 struct xfrm_mgr *km;
1757 read_lock(&xfrm_km_lock);
1758 list_for_each_entry(km, &xfrm_km_list, list) {
1760 ret = km->migrate(sel, dir, type, m, num_migrate);
1765 read_unlock(&xfrm_km_lock);
1768 EXPORT_SYMBOL(km_migrate);
1770 int km_report(u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr)
1774 struct xfrm_mgr *km;
1776 read_lock(&xfrm_km_lock);
1777 list_for_each_entry(km, &xfrm_km_list, list) {
1779 ret = km->report(proto, sel, addr);
1784 read_unlock(&xfrm_km_lock);
1787 EXPORT_SYMBOL(km_report);
1789 int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
1793 struct xfrm_mgr *km;
1794 struct xfrm_policy *pol = NULL;
1796 if (optlen <= 0 || optlen > PAGE_SIZE)
1799 data = kmalloc(optlen, GFP_KERNEL);
1804 if (copy_from_user(data, optval, optlen))
1808 read_lock(&xfrm_km_lock);
1809 list_for_each_entry(km, &xfrm_km_list, list) {
1810 pol = km->compile_policy(sk, optname, data,
1815 read_unlock(&xfrm_km_lock);
1818 xfrm_sk_policy_insert(sk, err, pol);
1827 EXPORT_SYMBOL(xfrm_user_policy);
1829 int xfrm_register_km(struct xfrm_mgr *km)
1831 write_lock_bh(&xfrm_km_lock);
1832 list_add_tail(&km->list, &xfrm_km_list);
1833 write_unlock_bh(&xfrm_km_lock);
1836 EXPORT_SYMBOL(xfrm_register_km);
1838 int xfrm_unregister_km(struct xfrm_mgr *km)
1840 write_lock_bh(&xfrm_km_lock);
1841 list_del(&km->list);
1842 write_unlock_bh(&xfrm_km_lock);
1845 EXPORT_SYMBOL(xfrm_unregister_km);
1847 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo)
1850 if (unlikely(afinfo == NULL))
1852 if (unlikely(afinfo->family >= NPROTO))
1853 return -EAFNOSUPPORT;
1854 write_lock_bh(&xfrm_state_afinfo_lock);
1855 if (unlikely(xfrm_state_afinfo[afinfo->family] != NULL))
1858 xfrm_state_afinfo[afinfo->family] = afinfo;
1859 write_unlock_bh(&xfrm_state_afinfo_lock);
1862 EXPORT_SYMBOL(xfrm_state_register_afinfo);
1864 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo)
1867 if (unlikely(afinfo == NULL))
1869 if (unlikely(afinfo->family >= NPROTO))
1870 return -EAFNOSUPPORT;
1871 write_lock_bh(&xfrm_state_afinfo_lock);
1872 if (likely(xfrm_state_afinfo[afinfo->family] != NULL)) {
1873 if (unlikely(xfrm_state_afinfo[afinfo->family] != afinfo))
1876 xfrm_state_afinfo[afinfo->family] = NULL;
1878 write_unlock_bh(&xfrm_state_afinfo_lock);
1881 EXPORT_SYMBOL(xfrm_state_unregister_afinfo);
1883 static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family)
1885 struct xfrm_state_afinfo *afinfo;
1886 if (unlikely(family >= NPROTO))
1888 read_lock(&xfrm_state_afinfo_lock);
1889 afinfo = xfrm_state_afinfo[family];
1890 if (unlikely(!afinfo))
1891 read_unlock(&xfrm_state_afinfo_lock);
1895 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo)
1897 read_unlock(&xfrm_state_afinfo_lock);
1900 /* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */
1901 void xfrm_state_delete_tunnel(struct xfrm_state *x)
1904 struct xfrm_state *t = x->tunnel;
1906 if (atomic_read(&t->tunnel_users) == 2)
1907 xfrm_state_delete(t);
1908 atomic_dec(&t->tunnel_users);
1913 EXPORT_SYMBOL(xfrm_state_delete_tunnel);
1915 int xfrm_state_mtu(struct xfrm_state *x, int mtu)
1919 spin_lock_bh(&x->lock);
1920 if (x->km.state == XFRM_STATE_VALID &&
1921 x->type && x->type->get_mtu)
1922 res = x->type->get_mtu(x, mtu);
1924 res = mtu - x->props.header_len;
1925 spin_unlock_bh(&x->lock);
1929 int xfrm_init_state(struct xfrm_state *x)
1931 struct xfrm_state_afinfo *afinfo;
1932 int family = x->props.family;
1935 err = -EAFNOSUPPORT;
1936 afinfo = xfrm_state_get_afinfo(family);
1941 if (afinfo->init_flags)
1942 err = afinfo->init_flags(x);
1944 xfrm_state_put_afinfo(afinfo);
1949 err = -EPROTONOSUPPORT;
1950 x->type = xfrm_get_type(x->id.proto, family);
1951 if (x->type == NULL)
1954 err = x->type->init_state(x);
1958 x->mode = xfrm_get_mode(x->props.mode, family);
1959 if (x->mode == NULL)
1962 x->km.state = XFRM_STATE_VALID;
1968 EXPORT_SYMBOL(xfrm_init_state);
1970 void __init xfrm_state_init(void)
1974 sz = sizeof(struct hlist_head) * 8;
1976 xfrm_state_bydst = xfrm_hash_alloc(sz);
1977 xfrm_state_bysrc = xfrm_hash_alloc(sz);
1978 xfrm_state_byspi = xfrm_hash_alloc(sz);
1979 if (!xfrm_state_bydst || !xfrm_state_bysrc || !xfrm_state_byspi)
1980 panic("XFRM: Cannot allocate bydst/bysrc/byspi hashes.");
1981 xfrm_state_hmask = ((sz / sizeof(struct hlist_head)) - 1);
1983 INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task);
1986 #ifdef CONFIG_AUDITSYSCALL
1987 static inline void xfrm_audit_common_stateinfo(struct xfrm_state *x,
1988 struct audit_buffer *audit_buf)
1991 audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
1992 x->security->ctx_alg, x->security->ctx_doi,
1993 x->security->ctx_str);
1995 switch(x->props.family) {
1997 audit_log_format(audit_buf, " src=%u.%u.%u.%u dst=%u.%u.%u.%u",
1998 NIPQUAD(x->props.saddr.a4),
1999 NIPQUAD(x->id.daddr.a4));
2003 struct in6_addr saddr6, daddr6;
2005 memcpy(&saddr6, x->props.saddr.a6,
2006 sizeof(struct in6_addr));
2007 memcpy(&daddr6, x->id.daddr.a6,
2008 sizeof(struct in6_addr));
2009 audit_log_format(audit_buf,
2010 " src=" NIP6_FMT " dst=" NIP6_FMT,
2011 NIP6(saddr6), NIP6(daddr6));
2018 xfrm_audit_state_add(struct xfrm_state *x, int result, u32 auid, u32 sid)
2020 struct audit_buffer *audit_buf;
2021 extern int audit_enabled;
2023 if (audit_enabled == 0)
2025 audit_buf = xfrm_audit_start(sid, auid);
2026 if (audit_buf == NULL)
2028 audit_log_format(audit_buf, " op=SAD-add res=%u",result);
2029 xfrm_audit_common_stateinfo(x, audit_buf);
2030 audit_log_format(audit_buf, " spi=%lu(0x%lx)",
2031 (unsigned long)x->id.spi, (unsigned long)x->id.spi);
2032 audit_log_end(audit_buf);
2034 EXPORT_SYMBOL_GPL(xfrm_audit_state_add);
2037 xfrm_audit_state_delete(struct xfrm_state *x, int result, u32 auid, u32 sid)
2039 struct audit_buffer *audit_buf;
2040 extern int audit_enabled;
2042 if (audit_enabled == 0)
2044 audit_buf = xfrm_audit_start(sid, auid);
2045 if (audit_buf == NULL)
2047 audit_log_format(audit_buf, " op=SAD-delete res=%u",result);
2048 xfrm_audit_common_stateinfo(x, audit_buf);
2049 audit_log_format(audit_buf, " spi=%lu(0x%lx)",
2050 (unsigned long)x->id.spi, (unsigned long)x->id.spi);
2051 audit_log_end(audit_buf);
2053 EXPORT_SYMBOL_GPL(xfrm_audit_state_delete);
2054 #endif /* CONFIG_AUDITSYSCALL */