6 * Kazunori MIYAZAWA @USAGI
7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9 * YOSHIFUJI Hideaki @USAGI
10 * Split up af-specific functions
11 * Derek Atkins <derek@ihtfp.com>
12 * Add UDP Encapsulation
16 #include <linux/workqueue.h>
18 #include <linux/pfkeyv2.h>
19 #include <linux/ipsec.h>
20 #include <linux/module.h>
21 #include <linux/cache.h>
22 #include <linux/audit.h>
23 #include <linux/uaccess.h>
24 #include <linux/ktime.h>
25 #include <linux/slab.h>
26 #include <linux/interrupt.h>
27 #include <linux/kernel.h>
29 #include "xfrm_hash.h"
31 #define xfrm_state_deref_prot(table, net) \
32 rcu_dereference_protected((table), lockdep_is_held(&(net)->xfrm.xfrm_state_lock))
34 static void xfrm_state_gc_task(struct work_struct *work);
36 /* Each xfrm_state may be linked to two tables:
38 1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl)
39 2. Hash table by (daddr,family,reqid) to find what SAs exist for given
40 destination/tunnel endpoint. (output)
43 static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024;
44 static __read_mostly seqcount_t xfrm_state_hash_generation = SEQCNT_ZERO(xfrm_state_hash_generation);
46 static DECLARE_WORK(xfrm_state_gc_work, xfrm_state_gc_task);
47 static HLIST_HEAD(xfrm_state_gc_list);
49 static inline bool xfrm_state_hold_rcu(struct xfrm_state __rcu *x)
51 return atomic_inc_not_zero(&x->refcnt);
54 static inline unsigned int xfrm_dst_hash(struct net *net,
55 const xfrm_address_t *daddr,
56 const xfrm_address_t *saddr,
58 unsigned short family)
60 return __xfrm_dst_hash(daddr, saddr, reqid, family, net->xfrm.state_hmask);
63 static inline unsigned int xfrm_src_hash(struct net *net,
64 const xfrm_address_t *daddr,
65 const xfrm_address_t *saddr,
66 unsigned short family)
68 return __xfrm_src_hash(daddr, saddr, family, net->xfrm.state_hmask);
71 static inline unsigned int
72 xfrm_spi_hash(struct net *net, const xfrm_address_t *daddr,
73 __be32 spi, u8 proto, unsigned short family)
75 return __xfrm_spi_hash(daddr, spi, proto, family, net->xfrm.state_hmask);
78 static void xfrm_hash_transfer(struct hlist_head *list,
79 struct hlist_head *ndsttable,
80 struct hlist_head *nsrctable,
81 struct hlist_head *nspitable,
82 unsigned int nhashmask)
84 struct hlist_node *tmp;
87 hlist_for_each_entry_safe(x, tmp, list, bydst) {
90 h = __xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
91 x->props.reqid, x->props.family,
93 hlist_add_head_rcu(&x->bydst, ndsttable + h);
95 h = __xfrm_src_hash(&x->id.daddr, &x->props.saddr,
98 hlist_add_head_rcu(&x->bysrc, nsrctable + h);
101 h = __xfrm_spi_hash(&x->id.daddr, x->id.spi,
102 x->id.proto, x->props.family,
104 hlist_add_head_rcu(&x->byspi, nspitable + h);
109 static unsigned long xfrm_hash_new_size(unsigned int state_hmask)
111 return ((state_hmask + 1) << 1) * sizeof(struct hlist_head);
114 static void xfrm_hash_resize(struct work_struct *work)
116 struct net *net = container_of(work, struct net, xfrm.state_hash_work);
117 struct hlist_head *ndst, *nsrc, *nspi, *odst, *osrc, *ospi;
118 unsigned long nsize, osize;
119 unsigned int nhashmask, ohashmask;
122 nsize = xfrm_hash_new_size(net->xfrm.state_hmask);
123 ndst = xfrm_hash_alloc(nsize);
126 nsrc = xfrm_hash_alloc(nsize);
128 xfrm_hash_free(ndst, nsize);
131 nspi = xfrm_hash_alloc(nsize);
133 xfrm_hash_free(ndst, nsize);
134 xfrm_hash_free(nsrc, nsize);
138 spin_lock_bh(&net->xfrm.xfrm_state_lock);
139 write_seqcount_begin(&xfrm_state_hash_generation);
141 nhashmask = (nsize / sizeof(struct hlist_head)) - 1U;
142 odst = xfrm_state_deref_prot(net->xfrm.state_bydst, net);
143 for (i = net->xfrm.state_hmask; i >= 0; i--)
144 xfrm_hash_transfer(odst + i, ndst, nsrc, nspi, nhashmask);
146 osrc = xfrm_state_deref_prot(net->xfrm.state_bysrc, net);
147 ospi = xfrm_state_deref_prot(net->xfrm.state_byspi, net);
148 ohashmask = net->xfrm.state_hmask;
150 rcu_assign_pointer(net->xfrm.state_bydst, ndst);
151 rcu_assign_pointer(net->xfrm.state_bysrc, nsrc);
152 rcu_assign_pointer(net->xfrm.state_byspi, nspi);
153 net->xfrm.state_hmask = nhashmask;
155 write_seqcount_end(&xfrm_state_hash_generation);
156 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
158 osize = (ohashmask + 1) * sizeof(struct hlist_head);
162 xfrm_hash_free(odst, osize);
163 xfrm_hash_free(osrc, osize);
164 xfrm_hash_free(ospi, osize);
167 static DEFINE_SPINLOCK(xfrm_state_afinfo_lock);
168 static struct xfrm_state_afinfo __rcu *xfrm_state_afinfo[NPROTO];
170 static DEFINE_SPINLOCK(xfrm_state_gc_lock);
172 int __xfrm_state_delete(struct xfrm_state *x);
174 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
175 bool km_is_alive(const struct km_event *c);
176 void km_state_expired(struct xfrm_state *x, int hard, u32 portid);
178 static DEFINE_SPINLOCK(xfrm_type_lock);
179 int xfrm_register_type(const struct xfrm_type *type, unsigned short family)
181 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
182 const struct xfrm_type **typemap;
185 if (unlikely(afinfo == NULL))
186 return -EAFNOSUPPORT;
187 typemap = afinfo->type_map;
188 spin_lock_bh(&xfrm_type_lock);
190 if (likely(typemap[type->proto] == NULL))
191 typemap[type->proto] = type;
194 spin_unlock_bh(&xfrm_type_lock);
198 EXPORT_SYMBOL(xfrm_register_type);
200 int xfrm_unregister_type(const struct xfrm_type *type, unsigned short family)
202 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
203 const struct xfrm_type **typemap;
206 if (unlikely(afinfo == NULL))
207 return -EAFNOSUPPORT;
208 typemap = afinfo->type_map;
209 spin_lock_bh(&xfrm_type_lock);
211 if (unlikely(typemap[type->proto] != type))
214 typemap[type->proto] = NULL;
215 spin_unlock_bh(&xfrm_type_lock);
219 EXPORT_SYMBOL(xfrm_unregister_type);
221 static const struct xfrm_type *xfrm_get_type(u8 proto, unsigned short family)
223 struct xfrm_state_afinfo *afinfo;
224 const struct xfrm_type **typemap;
225 const struct xfrm_type *type;
226 int modload_attempted = 0;
229 afinfo = xfrm_state_get_afinfo(family);
230 if (unlikely(afinfo == NULL))
232 typemap = afinfo->type_map;
234 type = READ_ONCE(typemap[proto]);
235 if (unlikely(type && !try_module_get(type->owner)))
240 if (!type && !modload_attempted) {
241 request_module("xfrm-type-%d-%d", family, proto);
242 modload_attempted = 1;
249 static void xfrm_put_type(const struct xfrm_type *type)
251 module_put(type->owner);
254 static DEFINE_SPINLOCK(xfrm_type_offload_lock);
255 int xfrm_register_type_offload(const struct xfrm_type_offload *type,
256 unsigned short family)
258 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
259 const struct xfrm_type_offload **typemap;
262 if (unlikely(afinfo == NULL))
263 return -EAFNOSUPPORT;
264 typemap = afinfo->type_offload_map;
265 spin_lock_bh(&xfrm_type_offload_lock);
267 if (likely(typemap[type->proto] == NULL))
268 typemap[type->proto] = type;
271 spin_unlock_bh(&xfrm_type_offload_lock);
275 EXPORT_SYMBOL(xfrm_register_type_offload);
277 int xfrm_unregister_type_offload(const struct xfrm_type_offload *type,
278 unsigned short family)
280 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
281 const struct xfrm_type_offload **typemap;
284 if (unlikely(afinfo == NULL))
285 return -EAFNOSUPPORT;
286 typemap = afinfo->type_offload_map;
287 spin_lock_bh(&xfrm_type_offload_lock);
289 if (unlikely(typemap[type->proto] != type))
292 typemap[type->proto] = NULL;
293 spin_unlock_bh(&xfrm_type_offload_lock);
297 EXPORT_SYMBOL(xfrm_unregister_type_offload);
299 static const struct xfrm_type_offload *xfrm_get_type_offload(u8 proto, unsigned short family)
301 struct xfrm_state_afinfo *afinfo;
302 const struct xfrm_type_offload **typemap;
303 const struct xfrm_type_offload *type;
305 afinfo = xfrm_state_get_afinfo(family);
306 if (unlikely(afinfo == NULL))
308 typemap = afinfo->type_offload_map;
310 type = typemap[proto];
311 if ((type && !try_module_get(type->owner)))
318 static void xfrm_put_type_offload(const struct xfrm_type_offload *type)
320 module_put(type->owner);
323 static DEFINE_SPINLOCK(xfrm_mode_lock);
324 int xfrm_register_mode(struct xfrm_mode *mode, int family)
326 struct xfrm_state_afinfo *afinfo;
327 struct xfrm_mode **modemap;
330 if (unlikely(mode->encap >= XFRM_MODE_MAX))
333 afinfo = xfrm_state_get_afinfo(family);
334 if (unlikely(afinfo == NULL))
335 return -EAFNOSUPPORT;
338 modemap = afinfo->mode_map;
339 spin_lock_bh(&xfrm_mode_lock);
340 if (modemap[mode->encap])
344 if (!try_module_get(afinfo->owner))
347 mode->afinfo = afinfo;
348 modemap[mode->encap] = mode;
352 spin_unlock_bh(&xfrm_mode_lock);
356 EXPORT_SYMBOL(xfrm_register_mode);
358 int xfrm_unregister_mode(struct xfrm_mode *mode, int family)
360 struct xfrm_state_afinfo *afinfo;
361 struct xfrm_mode **modemap;
364 if (unlikely(mode->encap >= XFRM_MODE_MAX))
367 afinfo = xfrm_state_get_afinfo(family);
368 if (unlikely(afinfo == NULL))
369 return -EAFNOSUPPORT;
372 modemap = afinfo->mode_map;
373 spin_lock_bh(&xfrm_mode_lock);
374 if (likely(modemap[mode->encap] == mode)) {
375 modemap[mode->encap] = NULL;
376 module_put(mode->afinfo->owner);
380 spin_unlock_bh(&xfrm_mode_lock);
384 EXPORT_SYMBOL(xfrm_unregister_mode);
386 static struct xfrm_mode *xfrm_get_mode(unsigned int encap, int family)
388 struct xfrm_state_afinfo *afinfo;
389 struct xfrm_mode *mode;
390 int modload_attempted = 0;
392 if (unlikely(encap >= XFRM_MODE_MAX))
396 afinfo = xfrm_state_get_afinfo(family);
397 if (unlikely(afinfo == NULL))
400 mode = READ_ONCE(afinfo->mode_map[encap]);
401 if (unlikely(mode && !try_module_get(mode->owner)))
405 if (!mode && !modload_attempted) {
406 request_module("xfrm-mode-%d-%d", family, encap);
407 modload_attempted = 1;
414 static void xfrm_put_mode(struct xfrm_mode *mode)
416 module_put(mode->owner);
419 static void xfrm_state_gc_destroy(struct xfrm_state *x)
421 tasklet_hrtimer_cancel(&x->mtimer);
422 del_timer_sync(&x->rtimer);
429 kfree(x->replay_esn);
430 kfree(x->preplay_esn);
432 xfrm_put_mode(x->inner_mode);
433 if (x->inner_mode_iaf)
434 xfrm_put_mode(x->inner_mode_iaf);
436 xfrm_put_mode(x->outer_mode);
438 xfrm_put_type_offload(x->type_offload);
440 x->type->destructor(x);
441 xfrm_put_type(x->type);
443 xfrm_dev_state_free(x);
444 security_xfrm_state_free(x);
448 static void xfrm_state_gc_task(struct work_struct *work)
450 struct xfrm_state *x;
451 struct hlist_node *tmp;
452 struct hlist_head gc_list;
454 spin_lock_bh(&xfrm_state_gc_lock);
455 hlist_move_list(&xfrm_state_gc_list, &gc_list);
456 spin_unlock_bh(&xfrm_state_gc_lock);
460 hlist_for_each_entry_safe(x, tmp, &gc_list, gclist)
461 xfrm_state_gc_destroy(x);
464 static enum hrtimer_restart xfrm_timer_handler(struct hrtimer *me)
466 struct tasklet_hrtimer *thr = container_of(me, struct tasklet_hrtimer, timer);
467 struct xfrm_state *x = container_of(thr, struct xfrm_state, mtimer);
468 unsigned long now = get_seconds();
469 long next = LONG_MAX;
474 if (x->km.state == XFRM_STATE_DEAD)
476 if (x->km.state == XFRM_STATE_EXPIRED)
478 if (x->lft.hard_add_expires_seconds) {
479 long tmo = x->lft.hard_add_expires_seconds +
480 x->curlft.add_time - now;
482 if (x->xflags & XFRM_SOFT_EXPIRE) {
483 /* enter hard expire without soft expire first?!
484 * setting a new date could trigger this.
485 * workaround: fix x->curflt.add_time by below:
487 x->curlft.add_time = now - x->saved_tmo - 1;
488 tmo = x->lft.hard_add_expires_seconds - x->saved_tmo;
495 if (x->lft.hard_use_expires_seconds) {
496 long tmo = x->lft.hard_use_expires_seconds +
497 (x->curlft.use_time ? : now) - now;
505 if (x->lft.soft_add_expires_seconds) {
506 long tmo = x->lft.soft_add_expires_seconds +
507 x->curlft.add_time - now;
510 x->xflags &= ~XFRM_SOFT_EXPIRE;
511 } else if (tmo < next) {
513 x->xflags |= XFRM_SOFT_EXPIRE;
517 if (x->lft.soft_use_expires_seconds) {
518 long tmo = x->lft.soft_use_expires_seconds +
519 (x->curlft.use_time ? : now) - now;
528 km_state_expired(x, 0, 0);
530 if (next != LONG_MAX) {
531 tasklet_hrtimer_start(&x->mtimer, ktime_set(next, 0), HRTIMER_MODE_REL);
537 if (x->km.state == XFRM_STATE_ACQ && x->id.spi == 0)
538 x->km.state = XFRM_STATE_EXPIRED;
540 err = __xfrm_state_delete(x);
542 km_state_expired(x, 1, 0);
544 xfrm_audit_state_delete(x, err ? 0 : 1, true);
547 spin_unlock(&x->lock);
548 return HRTIMER_NORESTART;
551 static void xfrm_replay_timer_handler(unsigned long data);
553 struct xfrm_state *xfrm_state_alloc(struct net *net)
555 struct xfrm_state *x;
557 x = kzalloc(sizeof(struct xfrm_state), GFP_ATOMIC);
560 write_pnet(&x->xs_net, net);
561 atomic_set(&x->refcnt, 1);
562 atomic_set(&x->tunnel_users, 0);
563 INIT_LIST_HEAD(&x->km.all);
564 INIT_HLIST_NODE(&x->bydst);
565 INIT_HLIST_NODE(&x->bysrc);
566 INIT_HLIST_NODE(&x->byspi);
567 tasklet_hrtimer_init(&x->mtimer, xfrm_timer_handler,
568 CLOCK_BOOTTIME, HRTIMER_MODE_ABS);
569 setup_timer(&x->rtimer, xfrm_replay_timer_handler,
571 x->curlft.add_time = get_seconds();
572 x->lft.soft_byte_limit = XFRM_INF;
573 x->lft.soft_packet_limit = XFRM_INF;
574 x->lft.hard_byte_limit = XFRM_INF;
575 x->lft.hard_packet_limit = XFRM_INF;
576 x->replay_maxage = 0;
577 x->replay_maxdiff = 0;
578 x->inner_mode = NULL;
579 x->inner_mode_iaf = NULL;
580 spin_lock_init(&x->lock);
584 EXPORT_SYMBOL(xfrm_state_alloc);
586 void __xfrm_state_destroy(struct xfrm_state *x)
588 WARN_ON(x->km.state != XFRM_STATE_DEAD);
590 spin_lock_bh(&xfrm_state_gc_lock);
591 hlist_add_head(&x->gclist, &xfrm_state_gc_list);
592 spin_unlock_bh(&xfrm_state_gc_lock);
593 schedule_work(&xfrm_state_gc_work);
595 EXPORT_SYMBOL(__xfrm_state_destroy);
597 int __xfrm_state_delete(struct xfrm_state *x)
599 struct net *net = xs_net(x);
602 if (x->km.state != XFRM_STATE_DEAD) {
603 x->km.state = XFRM_STATE_DEAD;
604 spin_lock(&net->xfrm.xfrm_state_lock);
605 list_del(&x->km.all);
606 hlist_del_rcu(&x->bydst);
607 hlist_del_rcu(&x->bysrc);
609 hlist_del_rcu(&x->byspi);
610 net->xfrm.state_num--;
611 spin_unlock(&net->xfrm.xfrm_state_lock);
613 xfrm_dev_state_delete(x);
615 /* All xfrm_state objects are created by xfrm_state_alloc.
616 * The xfrm_state_alloc call gives a reference, and that
617 * is what we are dropping here.
625 EXPORT_SYMBOL(__xfrm_state_delete);
627 int xfrm_state_delete(struct xfrm_state *x)
631 spin_lock_bh(&x->lock);
632 err = __xfrm_state_delete(x);
633 spin_unlock_bh(&x->lock);
637 EXPORT_SYMBOL(xfrm_state_delete);
639 #ifdef CONFIG_SECURITY_NETWORK_XFRM
641 xfrm_state_flush_secctx_check(struct net *net, u8 proto, bool task_valid)
645 for (i = 0; i <= net->xfrm.state_hmask; i++) {
646 struct xfrm_state *x;
648 hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
649 if (xfrm_id_proto_match(x->id.proto, proto) &&
650 (err = security_xfrm_state_delete(x)) != 0) {
651 xfrm_audit_state_delete(x, 0, task_valid);
661 xfrm_dev_state_flush_secctx_check(struct net *net, struct net_device *dev, bool task_valid)
665 for (i = 0; i <= net->xfrm.state_hmask; i++) {
666 struct xfrm_state *x;
667 struct xfrm_state_offload *xso;
669 hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
672 if (xso->dev == dev &&
673 (err = security_xfrm_state_delete(x)) != 0) {
674 xfrm_audit_state_delete(x, 0, task_valid);
684 xfrm_state_flush_secctx_check(struct net *net, u8 proto, bool task_valid)
690 xfrm_dev_state_flush_secctx_check(struct net *net, struct net_device *dev, bool task_valid)
696 int xfrm_state_flush(struct net *net, u8 proto, bool task_valid)
698 int i, err = 0, cnt = 0;
700 spin_lock_bh(&net->xfrm.xfrm_state_lock);
701 err = xfrm_state_flush_secctx_check(net, proto, task_valid);
706 for (i = 0; i <= net->xfrm.state_hmask; i++) {
707 struct xfrm_state *x;
709 hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
710 if (!xfrm_state_kern(x) &&
711 xfrm_id_proto_match(x->id.proto, proto)) {
713 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
715 err = xfrm_state_delete(x);
716 xfrm_audit_state_delete(x, err ? 0 : 1,
722 spin_lock_bh(&net->xfrm.xfrm_state_lock);
731 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
734 EXPORT_SYMBOL(xfrm_state_flush);
736 int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_valid)
738 int i, err = 0, cnt = 0;
740 spin_lock_bh(&net->xfrm.xfrm_state_lock);
741 err = xfrm_dev_state_flush_secctx_check(net, dev, task_valid);
746 for (i = 0; i <= net->xfrm.state_hmask; i++) {
747 struct xfrm_state *x;
748 struct xfrm_state_offload *xso;
750 hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
753 if (!xfrm_state_kern(x) && xso->dev == dev) {
755 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
757 err = xfrm_state_delete(x);
758 xfrm_audit_state_delete(x, err ? 0 : 1,
764 spin_lock_bh(&net->xfrm.xfrm_state_lock);
773 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
776 EXPORT_SYMBOL(xfrm_dev_state_flush);
778 void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si)
780 spin_lock_bh(&net->xfrm.xfrm_state_lock);
781 si->sadcnt = net->xfrm.state_num;
782 si->sadhcnt = net->xfrm.state_hmask;
783 si->sadhmcnt = xfrm_state_hashmax;
784 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
786 EXPORT_SYMBOL(xfrm_sad_getinfo);
789 xfrm_init_tempstate(struct xfrm_state *x, const struct flowi *fl,
790 const struct xfrm_tmpl *tmpl,
791 const xfrm_address_t *daddr, const xfrm_address_t *saddr,
792 unsigned short family)
794 struct xfrm_state_afinfo *afinfo = xfrm_state_afinfo_get_rcu(family);
799 afinfo->init_tempsel(&x->sel, fl);
801 if (family != tmpl->encap_family) {
802 afinfo = xfrm_state_afinfo_get_rcu(tmpl->encap_family);
806 afinfo->init_temprop(x, tmpl, daddr, saddr);
809 static struct xfrm_state *__xfrm_state_lookup(struct net *net, u32 mark,
810 const xfrm_address_t *daddr,
811 __be32 spi, u8 proto,
812 unsigned short family)
814 unsigned int h = xfrm_spi_hash(net, daddr, spi, proto, family);
815 struct xfrm_state *x;
817 hlist_for_each_entry_rcu(x, net->xfrm.state_byspi + h, byspi) {
818 if (x->props.family != family ||
820 x->id.proto != proto ||
821 !xfrm_addr_equal(&x->id.daddr, daddr, family))
824 if ((mark & x->mark.m) != x->mark.v)
826 if (!xfrm_state_hold_rcu(x))
834 static struct xfrm_state *__xfrm_state_lookup_byaddr(struct net *net, u32 mark,
835 const xfrm_address_t *daddr,
836 const xfrm_address_t *saddr,
837 u8 proto, unsigned short family)
839 unsigned int h = xfrm_src_hash(net, daddr, saddr, family);
840 struct xfrm_state *x;
842 hlist_for_each_entry_rcu(x, net->xfrm.state_bysrc + h, bysrc) {
843 if (x->props.family != family ||
844 x->id.proto != proto ||
845 !xfrm_addr_equal(&x->id.daddr, daddr, family) ||
846 !xfrm_addr_equal(&x->props.saddr, saddr, family))
849 if ((mark & x->mark.m) != x->mark.v)
851 if (!xfrm_state_hold_rcu(x))
859 static inline struct xfrm_state *
860 __xfrm_state_locate(struct xfrm_state *x, int use_spi, int family)
862 struct net *net = xs_net(x);
863 u32 mark = x->mark.v & x->mark.m;
866 return __xfrm_state_lookup(net, mark, &x->id.daddr,
867 x->id.spi, x->id.proto, family);
869 return __xfrm_state_lookup_byaddr(net, mark,
872 x->id.proto, family);
875 static void xfrm_hash_grow_check(struct net *net, int have_hash_collision)
877 if (have_hash_collision &&
878 (net->xfrm.state_hmask + 1) < xfrm_state_hashmax &&
879 net->xfrm.state_num > net->xfrm.state_hmask)
880 schedule_work(&net->xfrm.state_hash_work);
883 static void xfrm_state_look_at(struct xfrm_policy *pol, struct xfrm_state *x,
884 const struct flowi *fl, unsigned short family,
885 struct xfrm_state **best, int *acq_in_progress,
889 * 1. There is a valid state with matching selector. Done.
890 * 2. Valid state with inappropriate selector. Skip.
892 * Entering area of "sysdeps".
894 * 3. If state is not valid, selector is temporary, it selects
895 * only session which triggered previous resolution. Key
896 * manager will do something to install a state with proper
899 if (x->km.state == XFRM_STATE_VALID) {
900 if ((x->sel.family &&
901 !xfrm_selector_match(&x->sel, fl, x->sel.family)) ||
902 !security_xfrm_state_pol_flow_match(x, pol, fl))
906 (*best)->km.dying > x->km.dying ||
907 ((*best)->km.dying == x->km.dying &&
908 (*best)->curlft.add_time < x->curlft.add_time))
910 } else if (x->km.state == XFRM_STATE_ACQ) {
911 *acq_in_progress = 1;
912 } else if (x->km.state == XFRM_STATE_ERROR ||
913 x->km.state == XFRM_STATE_EXPIRED) {
914 if (xfrm_selector_match(&x->sel, fl, x->sel.family) &&
915 security_xfrm_state_pol_flow_match(x, pol, fl))
921 xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
922 const struct flowi *fl, struct xfrm_tmpl *tmpl,
923 struct xfrm_policy *pol, int *err,
924 unsigned short family)
926 static xfrm_address_t saddr_wildcard = { };
927 struct net *net = xp_net(pol);
928 unsigned int h, h_wildcard;
929 struct xfrm_state *x, *x0, *to_put;
930 int acquire_in_progress = 0;
932 struct xfrm_state *best = NULL;
933 u32 mark = pol->mark.v & pol->mark.m;
934 unsigned short encap_family = tmpl->encap_family;
935 unsigned int sequence;
940 sequence = read_seqcount_begin(&xfrm_state_hash_generation);
943 h = xfrm_dst_hash(net, daddr, saddr, tmpl->reqid, encap_family);
944 hlist_for_each_entry_rcu(x, net->xfrm.state_bydst + h, bydst) {
945 if (x->props.family == encap_family &&
946 x->props.reqid == tmpl->reqid &&
947 (mark & x->mark.m) == x->mark.v &&
948 !(x->props.flags & XFRM_STATE_WILDRECV) &&
949 xfrm_state_addr_check(x, daddr, saddr, encap_family) &&
950 tmpl->mode == x->props.mode &&
951 tmpl->id.proto == x->id.proto &&
952 (tmpl->id.spi == x->id.spi || !tmpl->id.spi))
953 xfrm_state_look_at(pol, x, fl, encap_family,
954 &best, &acquire_in_progress, &error);
956 if (best || acquire_in_progress)
959 h_wildcard = xfrm_dst_hash(net, daddr, &saddr_wildcard, tmpl->reqid, encap_family);
960 hlist_for_each_entry_rcu(x, net->xfrm.state_bydst + h_wildcard, bydst) {
961 if (x->props.family == encap_family &&
962 x->props.reqid == tmpl->reqid &&
963 (mark & x->mark.m) == x->mark.v &&
964 !(x->props.flags & XFRM_STATE_WILDRECV) &&
965 xfrm_addr_equal(&x->id.daddr, daddr, encap_family) &&
966 tmpl->mode == x->props.mode &&
967 tmpl->id.proto == x->id.proto &&
968 (tmpl->id.spi == x->id.spi || !tmpl->id.spi))
969 xfrm_state_look_at(pol, x, fl, encap_family,
970 &best, &acquire_in_progress, &error);
975 if (!x && !error && !acquire_in_progress) {
977 (x0 = __xfrm_state_lookup(net, mark, daddr, tmpl->id.spi,
978 tmpl->id.proto, encap_family)) != NULL) {
985 /* If the KMs have no listeners (yet...), avoid allocating an SA
986 * for each and every packet - garbage collection might not
989 if (!km_is_alive(&c)) {
994 x = xfrm_state_alloc(net);
999 /* Initialize temporary state matching only
1000 * to current session. */
1001 xfrm_init_tempstate(x, fl, tmpl, daddr, saddr, family);
1002 memcpy(&x->mark, &pol->mark, sizeof(x->mark));
1004 error = security_xfrm_state_alloc_acquire(x, pol->security, fl->flowi_secid);
1006 x->km.state = XFRM_STATE_DEAD;
1012 if (km_query(x, tmpl, pol) == 0) {
1013 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1014 x->km.state = XFRM_STATE_ACQ;
1015 list_add(&x->km.all, &net->xfrm.state_all);
1016 hlist_add_head_rcu(&x->bydst, net->xfrm.state_bydst + h);
1017 h = xfrm_src_hash(net, daddr, saddr, encap_family);
1018 hlist_add_head_rcu(&x->bysrc, net->xfrm.state_bysrc + h);
1020 h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, encap_family);
1021 hlist_add_head_rcu(&x->byspi, net->xfrm.state_byspi + h);
1023 x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires;
1024 tasklet_hrtimer_start(&x->mtimer, ktime_set(net->xfrm.sysctl_acq_expires, 0), HRTIMER_MODE_REL);
1025 net->xfrm.state_num++;
1026 xfrm_hash_grow_check(net, x->bydst.next != NULL);
1027 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1029 x->km.state = XFRM_STATE_DEAD;
1037 if (!xfrm_state_hold_rcu(x)) {
1042 *err = acquire_in_progress ? -EAGAIN : error;
1046 xfrm_state_put(to_put);
1048 if (read_seqcount_retry(&xfrm_state_hash_generation, sequence)) {
1060 xfrm_stateonly_find(struct net *net, u32 mark,
1061 xfrm_address_t *daddr, xfrm_address_t *saddr,
1062 unsigned short family, u8 mode, u8 proto, u32 reqid)
1065 struct xfrm_state *rx = NULL, *x = NULL;
1067 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1068 h = xfrm_dst_hash(net, daddr, saddr, reqid, family);
1069 hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) {
1070 if (x->props.family == family &&
1071 x->props.reqid == reqid &&
1072 (mark & x->mark.m) == x->mark.v &&
1073 !(x->props.flags & XFRM_STATE_WILDRECV) &&
1074 xfrm_state_addr_check(x, daddr, saddr, family) &&
1075 mode == x->props.mode &&
1076 proto == x->id.proto &&
1077 x->km.state == XFRM_STATE_VALID) {
1084 xfrm_state_hold(rx);
1085 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1090 EXPORT_SYMBOL(xfrm_stateonly_find);
1092 struct xfrm_state *xfrm_state_lookup_byspi(struct net *net, __be32 spi,
1093 unsigned short family)
1095 struct xfrm_state *x;
1096 struct xfrm_state_walk *w;
1098 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1099 list_for_each_entry(w, &net->xfrm.state_all, all) {
1100 x = container_of(w, struct xfrm_state, km);
1101 if (x->props.family != family ||
1106 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1109 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1112 EXPORT_SYMBOL(xfrm_state_lookup_byspi);
1114 static void __xfrm_state_insert(struct xfrm_state *x)
1116 struct net *net = xs_net(x);
1119 list_add(&x->km.all, &net->xfrm.state_all);
1121 h = xfrm_dst_hash(net, &x->id.daddr, &x->props.saddr,
1122 x->props.reqid, x->props.family);
1123 hlist_add_head_rcu(&x->bydst, net->xfrm.state_bydst + h);
1125 h = xfrm_src_hash(net, &x->id.daddr, &x->props.saddr, x->props.family);
1126 hlist_add_head_rcu(&x->bysrc, net->xfrm.state_bysrc + h);
1129 h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto,
1132 hlist_add_head_rcu(&x->byspi, net->xfrm.state_byspi + h);
1135 tasklet_hrtimer_start(&x->mtimer, ktime_set(1, 0), HRTIMER_MODE_REL);
1136 if (x->replay_maxage)
1137 mod_timer(&x->rtimer, jiffies + x->replay_maxage);
1139 net->xfrm.state_num++;
1141 xfrm_hash_grow_check(net, x->bydst.next != NULL);
1144 /* net->xfrm.xfrm_state_lock is held */
1145 static void __xfrm_state_bump_genids(struct xfrm_state *xnew)
1147 struct net *net = xs_net(xnew);
1148 unsigned short family = xnew->props.family;
1149 u32 reqid = xnew->props.reqid;
1150 struct xfrm_state *x;
1152 u32 mark = xnew->mark.v & xnew->mark.m;
1154 h = xfrm_dst_hash(net, &xnew->id.daddr, &xnew->props.saddr, reqid, family);
1155 hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) {
1156 if (x->props.family == family &&
1157 x->props.reqid == reqid &&
1158 (mark & x->mark.m) == x->mark.v &&
1159 xfrm_addr_equal(&x->id.daddr, &xnew->id.daddr, family) &&
1160 xfrm_addr_equal(&x->props.saddr, &xnew->props.saddr, family))
1165 void xfrm_state_insert(struct xfrm_state *x)
1167 struct net *net = xs_net(x);
1169 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1170 __xfrm_state_bump_genids(x);
1171 __xfrm_state_insert(x);
1172 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1174 EXPORT_SYMBOL(xfrm_state_insert);
1176 /* net->xfrm.xfrm_state_lock is held */
1177 static struct xfrm_state *__find_acq_core(struct net *net,
1178 const struct xfrm_mark *m,
1179 unsigned short family, u8 mode,
1180 u32 reqid, u8 proto,
1181 const xfrm_address_t *daddr,
1182 const xfrm_address_t *saddr,
1185 unsigned int h = xfrm_dst_hash(net, daddr, saddr, reqid, family);
1186 struct xfrm_state *x;
1187 u32 mark = m->v & m->m;
1189 hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) {
1190 if (x->props.reqid != reqid ||
1191 x->props.mode != mode ||
1192 x->props.family != family ||
1193 x->km.state != XFRM_STATE_ACQ ||
1195 x->id.proto != proto ||
1196 (mark & x->mark.m) != x->mark.v ||
1197 !xfrm_addr_equal(&x->id.daddr, daddr, family) ||
1198 !xfrm_addr_equal(&x->props.saddr, saddr, family))
1208 x = xfrm_state_alloc(net);
1212 x->sel.daddr.a4 = daddr->a4;
1213 x->sel.saddr.a4 = saddr->a4;
1214 x->sel.prefixlen_d = 32;
1215 x->sel.prefixlen_s = 32;
1216 x->props.saddr.a4 = saddr->a4;
1217 x->id.daddr.a4 = daddr->a4;
1221 x->sel.daddr.in6 = daddr->in6;
1222 x->sel.saddr.in6 = saddr->in6;
1223 x->sel.prefixlen_d = 128;
1224 x->sel.prefixlen_s = 128;
1225 x->props.saddr.in6 = saddr->in6;
1226 x->id.daddr.in6 = daddr->in6;
1230 x->km.state = XFRM_STATE_ACQ;
1231 x->id.proto = proto;
1232 x->props.family = family;
1233 x->props.mode = mode;
1234 x->props.reqid = reqid;
1237 x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires;
1239 tasklet_hrtimer_start(&x->mtimer, ktime_set(net->xfrm.sysctl_acq_expires, 0), HRTIMER_MODE_REL);
1240 list_add(&x->km.all, &net->xfrm.state_all);
1241 hlist_add_head_rcu(&x->bydst, net->xfrm.state_bydst + h);
1242 h = xfrm_src_hash(net, daddr, saddr, family);
1243 hlist_add_head_rcu(&x->bysrc, net->xfrm.state_bysrc + h);
1245 net->xfrm.state_num++;
1247 xfrm_hash_grow_check(net, x->bydst.next != NULL);
1253 static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq);
1255 int xfrm_state_add(struct xfrm_state *x)
1257 struct net *net = xs_net(x);
1258 struct xfrm_state *x1, *to_put;
1261 u32 mark = x->mark.v & x->mark.m;
1262 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
1264 family = x->props.family;
1268 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1270 x1 = __xfrm_state_locate(x, use_spi, family);
1278 if (use_spi && x->km.seq) {
1279 x1 = __xfrm_find_acq_byseq(net, mark, x->km.seq);
1280 if (x1 && ((x1->id.proto != x->id.proto) ||
1281 !xfrm_addr_equal(&x1->id.daddr, &x->id.daddr, family))) {
1288 x1 = __find_acq_core(net, &x->mark, family, x->props.mode,
1289 x->props.reqid, x->id.proto,
1290 &x->id.daddr, &x->props.saddr, 0);
1292 __xfrm_state_bump_genids(x);
1293 __xfrm_state_insert(x);
1297 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1300 xfrm_state_delete(x1);
1305 xfrm_state_put(to_put);
1309 EXPORT_SYMBOL(xfrm_state_add);
1311 #ifdef CONFIG_XFRM_MIGRATE
1312 static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig)
1314 struct net *net = xs_net(orig);
1315 struct xfrm_state *x = xfrm_state_alloc(net);
1319 memcpy(&x->id, &orig->id, sizeof(x->id));
1320 memcpy(&x->sel, &orig->sel, sizeof(x->sel));
1321 memcpy(&x->lft, &orig->lft, sizeof(x->lft));
1322 x->props.mode = orig->props.mode;
1323 x->props.replay_window = orig->props.replay_window;
1324 x->props.reqid = orig->props.reqid;
1325 x->props.family = orig->props.family;
1326 x->props.saddr = orig->props.saddr;
1329 x->aalg = xfrm_algo_auth_clone(orig->aalg);
1333 x->props.aalgo = orig->props.aalgo;
1336 x->aead = xfrm_algo_aead_clone(orig->aead);
1341 x->ealg = xfrm_algo_clone(orig->ealg);
1345 x->props.ealgo = orig->props.ealgo;
1348 x->calg = xfrm_algo_clone(orig->calg);
1352 x->props.calgo = orig->props.calgo;
1355 x->encap = kmemdup(orig->encap, sizeof(*x->encap), GFP_KERNEL);
1361 x->coaddr = kmemdup(orig->coaddr, sizeof(*x->coaddr),
1367 if (orig->replay_esn) {
1368 if (xfrm_replay_clone(x, orig))
1372 memcpy(&x->mark, &orig->mark, sizeof(x->mark));
1374 if (xfrm_init_state(x) < 0)
1377 x->props.flags = orig->props.flags;
1378 x->props.extra_flags = orig->props.extra_flags;
1380 x->tfcpad = orig->tfcpad;
1381 x->replay_maxdiff = orig->replay_maxdiff;
1382 x->replay_maxage = orig->replay_maxage;
1383 x->curlft.add_time = orig->curlft.add_time;
1384 x->km.state = orig->km.state;
1385 x->km.seq = orig->km.seq;
1395 struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *net)
1398 struct xfrm_state *x = NULL;
1400 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1403 h = xfrm_dst_hash(net, &m->old_daddr, &m->old_saddr,
1404 m->reqid, m->old_family);
1405 hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) {
1406 if (x->props.mode != m->mode ||
1407 x->id.proto != m->proto)
1409 if (m->reqid && x->props.reqid != m->reqid)
1411 if (!xfrm_addr_equal(&x->id.daddr, &m->old_daddr,
1413 !xfrm_addr_equal(&x->props.saddr, &m->old_saddr,
1420 h = xfrm_src_hash(net, &m->old_daddr, &m->old_saddr,
1422 hlist_for_each_entry(x, net->xfrm.state_bysrc+h, bysrc) {
1423 if (x->props.mode != m->mode ||
1424 x->id.proto != m->proto)
1426 if (!xfrm_addr_equal(&x->id.daddr, &m->old_daddr,
1428 !xfrm_addr_equal(&x->props.saddr, &m->old_saddr,
1436 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1440 EXPORT_SYMBOL(xfrm_migrate_state_find);
1442 struct xfrm_state *xfrm_state_migrate(struct xfrm_state *x,
1443 struct xfrm_migrate *m)
1445 struct xfrm_state *xc;
1447 xc = xfrm_state_clone(x);
1451 memcpy(&xc->id.daddr, &m->new_daddr, sizeof(xc->id.daddr));
1452 memcpy(&xc->props.saddr, &m->new_saddr, sizeof(xc->props.saddr));
1455 if (xfrm_addr_equal(&x->id.daddr, &m->new_daddr, m->new_family)) {
1456 /* a care is needed when the destination address of the
1457 state is to be updated as it is a part of triplet */
1458 xfrm_state_insert(xc);
1460 if (xfrm_state_add(xc) < 0)
1469 EXPORT_SYMBOL(xfrm_state_migrate);
1472 int xfrm_state_update(struct xfrm_state *x)
1474 struct xfrm_state *x1, *to_put;
1476 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
1477 struct net *net = xs_net(x);
1481 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1482 x1 = __xfrm_state_locate(x, use_spi, x->props.family);
1488 if (xfrm_state_kern(x1)) {
1494 if (x1->km.state == XFRM_STATE_ACQ) {
1495 __xfrm_state_insert(x);
1501 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1504 xfrm_state_put(to_put);
1510 xfrm_state_delete(x1);
1516 spin_lock_bh(&x1->lock);
1517 if (likely(x1->km.state == XFRM_STATE_VALID)) {
1518 if (x->encap && x1->encap)
1519 memcpy(x1->encap, x->encap, sizeof(*x1->encap));
1520 if (x->coaddr && x1->coaddr) {
1521 memcpy(x1->coaddr, x->coaddr, sizeof(*x1->coaddr));
1523 if (!use_spi && memcmp(&x1->sel, &x->sel, sizeof(x1->sel)))
1524 memcpy(&x1->sel, &x->sel, sizeof(x1->sel));
1525 memcpy(&x1->lft, &x->lft, sizeof(x1->lft));
1528 tasklet_hrtimer_start(&x1->mtimer, ktime_set(1, 0), HRTIMER_MODE_REL);
1529 if (x1->curlft.use_time)
1530 xfrm_state_check_expire(x1);
1533 x->km.state = XFRM_STATE_DEAD;
1534 __xfrm_state_put(x);
1536 spin_unlock_bh(&x1->lock);
1542 EXPORT_SYMBOL(xfrm_state_update);
1544 int xfrm_state_check_expire(struct xfrm_state *x)
1546 if (!x->curlft.use_time)
1547 x->curlft.use_time = get_seconds();
1549 if (x->curlft.bytes >= x->lft.hard_byte_limit ||
1550 x->curlft.packets >= x->lft.hard_packet_limit) {
1551 x->km.state = XFRM_STATE_EXPIRED;
1552 tasklet_hrtimer_start(&x->mtimer, 0, HRTIMER_MODE_REL);
1557 (x->curlft.bytes >= x->lft.soft_byte_limit ||
1558 x->curlft.packets >= x->lft.soft_packet_limit)) {
1560 km_state_expired(x, 0, 0);
1564 EXPORT_SYMBOL(xfrm_state_check_expire);
1567 xfrm_state_lookup(struct net *net, u32 mark, const xfrm_address_t *daddr, __be32 spi,
1568 u8 proto, unsigned short family)
1570 struct xfrm_state *x;
1573 x = __xfrm_state_lookup(net, mark, daddr, spi, proto, family);
1577 EXPORT_SYMBOL(xfrm_state_lookup);
1580 xfrm_state_lookup_byaddr(struct net *net, u32 mark,
1581 const xfrm_address_t *daddr, const xfrm_address_t *saddr,
1582 u8 proto, unsigned short family)
1584 struct xfrm_state *x;
1586 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1587 x = __xfrm_state_lookup_byaddr(net, mark, daddr, saddr, proto, family);
1588 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1591 EXPORT_SYMBOL(xfrm_state_lookup_byaddr);
1594 xfrm_find_acq(struct net *net, const struct xfrm_mark *mark, u8 mode, u32 reqid,
1595 u8 proto, const xfrm_address_t *daddr,
1596 const xfrm_address_t *saddr, int create, unsigned short family)
1598 struct xfrm_state *x;
1600 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1601 x = __find_acq_core(net, mark, family, mode, reqid, proto, daddr, saddr, create);
1602 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1606 EXPORT_SYMBOL(xfrm_find_acq);
1608 #ifdef CONFIG_XFRM_SUB_POLICY
1610 xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n,
1611 unsigned short family, struct net *net)
1614 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
1616 return -EAFNOSUPPORT;
1618 spin_lock_bh(&net->xfrm.xfrm_state_lock); /*FIXME*/
1619 if (afinfo->tmpl_sort)
1620 err = afinfo->tmpl_sort(dst, src, n);
1621 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1625 EXPORT_SYMBOL(xfrm_tmpl_sort);
1628 xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n,
1629 unsigned short family)
1632 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
1633 struct net *net = xs_net(*src);
1636 return -EAFNOSUPPORT;
1638 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1639 if (afinfo->state_sort)
1640 err = afinfo->state_sort(dst, src, n);
1641 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1645 EXPORT_SYMBOL(xfrm_state_sort);
1648 /* Silly enough, but I'm lazy to build resolution list */
1650 static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq)
1654 for (i = 0; i <= net->xfrm.state_hmask; i++) {
1655 struct xfrm_state *x;
1657 hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
1658 if (x->km.seq == seq &&
1659 (mark & x->mark.m) == x->mark.v &&
1660 x->km.state == XFRM_STATE_ACQ) {
1669 struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq)
1671 struct xfrm_state *x;
1673 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1674 x = __xfrm_find_acq_byseq(net, mark, seq);
1675 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1678 EXPORT_SYMBOL(xfrm_find_acq_byseq);
1680 u32 xfrm_get_acqseq(void)
1683 static atomic_t acqseq;
1686 res = atomic_inc_return(&acqseq);
1691 EXPORT_SYMBOL(xfrm_get_acqseq);
1693 int verify_spi_info(u8 proto, u32 min, u32 max)
1701 /* IPCOMP spi is 16-bits. */
1715 EXPORT_SYMBOL(verify_spi_info);
1717 int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high)
1719 struct net *net = xs_net(x);
1721 struct xfrm_state *x0;
1723 __be32 minspi = htonl(low);
1724 __be32 maxspi = htonl(high);
1725 u32 mark = x->mark.v & x->mark.m;
1727 spin_lock_bh(&x->lock);
1728 if (x->km.state == XFRM_STATE_DEAD)
1737 if (minspi == maxspi) {
1738 x0 = xfrm_state_lookup(net, mark, &x->id.daddr, minspi, x->id.proto, x->props.family);
1746 for (h = 0; h < high-low+1; h++) {
1747 spi = low + prandom_u32()%(high-low+1);
1748 x0 = xfrm_state_lookup(net, mark, &x->id.daddr, htonl(spi), x->id.proto, x->props.family);
1750 x->id.spi = htonl(spi);
1757 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1758 h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, x->props.family);
1759 hlist_add_head_rcu(&x->byspi, net->xfrm.state_byspi + h);
1760 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1766 spin_unlock_bh(&x->lock);
1770 EXPORT_SYMBOL(xfrm_alloc_spi);
1772 static bool __xfrm_state_filter_match(struct xfrm_state *x,
1773 struct xfrm_address_filter *filter)
1776 if ((filter->family == AF_INET ||
1777 filter->family == AF_INET6) &&
1778 x->props.family != filter->family)
1781 return addr_match(&x->props.saddr, &filter->saddr,
1783 addr_match(&x->id.daddr, &filter->daddr,
1789 int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk,
1790 int (*func)(struct xfrm_state *, int, void*),
1793 struct xfrm_state *state;
1794 struct xfrm_state_walk *x;
1797 if (walk->seq != 0 && list_empty(&walk->all))
1800 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1801 if (list_empty(&walk->all))
1802 x = list_first_entry(&net->xfrm.state_all, struct xfrm_state_walk, all);
1804 x = list_first_entry(&walk->all, struct xfrm_state_walk, all);
1805 list_for_each_entry_from(x, &net->xfrm.state_all, all) {
1806 if (x->state == XFRM_STATE_DEAD)
1808 state = container_of(x, struct xfrm_state, km);
1809 if (!xfrm_id_proto_match(state->id.proto, walk->proto))
1811 if (!__xfrm_state_filter_match(state, walk->filter))
1813 err = func(state, walk->seq, data);
1815 list_move_tail(&walk->all, &x->all);
1820 if (walk->seq == 0) {
1824 list_del_init(&walk->all);
1826 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1829 EXPORT_SYMBOL(xfrm_state_walk);
1831 void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto,
1832 struct xfrm_address_filter *filter)
1834 INIT_LIST_HEAD(&walk->all);
1835 walk->proto = proto;
1836 walk->state = XFRM_STATE_DEAD;
1838 walk->filter = filter;
1840 EXPORT_SYMBOL(xfrm_state_walk_init);
1842 void xfrm_state_walk_done(struct xfrm_state_walk *walk, struct net *net)
1844 kfree(walk->filter);
1846 if (list_empty(&walk->all))
1849 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1850 list_del(&walk->all);
1851 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1853 EXPORT_SYMBOL(xfrm_state_walk_done);
1855 static void xfrm_replay_timer_handler(unsigned long data)
1857 struct xfrm_state *x = (struct xfrm_state *)data;
1859 spin_lock(&x->lock);
1861 if (x->km.state == XFRM_STATE_VALID) {
1862 if (xfrm_aevent_is_on(xs_net(x)))
1863 x->repl->notify(x, XFRM_REPLAY_TIMEOUT);
1865 x->xflags |= XFRM_TIME_DEFER;
1868 spin_unlock(&x->lock);
1871 static LIST_HEAD(xfrm_km_list);
1873 void km_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c)
1875 struct xfrm_mgr *km;
1878 list_for_each_entry_rcu(km, &xfrm_km_list, list)
1879 if (km->notify_policy)
1880 km->notify_policy(xp, dir, c);
1884 void km_state_notify(struct xfrm_state *x, const struct km_event *c)
1886 struct xfrm_mgr *km;
1888 list_for_each_entry_rcu(km, &xfrm_km_list, list)
1894 EXPORT_SYMBOL(km_policy_notify);
1895 EXPORT_SYMBOL(km_state_notify);
1897 void km_state_expired(struct xfrm_state *x, int hard, u32 portid)
1903 c.event = XFRM_MSG_EXPIRE;
1904 km_state_notify(x, &c);
1907 EXPORT_SYMBOL(km_state_expired);
1909 * We send to all registered managers regardless of failure
1910 * We are happy with one success
1912 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol)
1914 int err = -EINVAL, acqret;
1915 struct xfrm_mgr *km;
1918 list_for_each_entry_rcu(km, &xfrm_km_list, list) {
1919 acqret = km->acquire(x, t, pol);
1926 EXPORT_SYMBOL(km_query);
1928 int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport)
1931 struct xfrm_mgr *km;
1934 list_for_each_entry_rcu(km, &xfrm_km_list, list) {
1935 if (km->new_mapping)
1936 err = km->new_mapping(x, ipaddr, sport);
1943 EXPORT_SYMBOL(km_new_mapping);
1945 void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 portid)
1951 c.event = XFRM_MSG_POLEXPIRE;
1952 km_policy_notify(pol, dir, &c);
1954 EXPORT_SYMBOL(km_policy_expired);
1956 #ifdef CONFIG_XFRM_MIGRATE
1957 int km_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
1958 const struct xfrm_migrate *m, int num_migrate,
1959 const struct xfrm_kmaddress *k)
1963 struct xfrm_mgr *km;
1966 list_for_each_entry_rcu(km, &xfrm_km_list, list) {
1968 ret = km->migrate(sel, dir, type, m, num_migrate, k);
1976 EXPORT_SYMBOL(km_migrate);
1979 int km_report(struct net *net, u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr)
1983 struct xfrm_mgr *km;
1986 list_for_each_entry_rcu(km, &xfrm_km_list, list) {
1988 ret = km->report(net, proto, sel, addr);
1996 EXPORT_SYMBOL(km_report);
1998 bool km_is_alive(const struct km_event *c)
2000 struct xfrm_mgr *km;
2001 bool is_alive = false;
2004 list_for_each_entry_rcu(km, &xfrm_km_list, list) {
2005 if (km->is_alive && km->is_alive(c)) {
2014 EXPORT_SYMBOL(km_is_alive);
2016 int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
2020 struct xfrm_mgr *km;
2021 struct xfrm_policy *pol = NULL;
2023 if (optlen <= 0 || optlen > PAGE_SIZE)
2026 data = kmalloc(optlen, GFP_KERNEL);
2031 if (copy_from_user(data, optval, optlen))
2036 list_for_each_entry_rcu(km, &xfrm_km_list, list) {
2037 pol = km->compile_policy(sk, optname, data,
2045 xfrm_sk_policy_insert(sk, err, pol);
2054 EXPORT_SYMBOL(xfrm_user_policy);
2056 static DEFINE_SPINLOCK(xfrm_km_lock);
2058 int xfrm_register_km(struct xfrm_mgr *km)
2060 spin_lock_bh(&xfrm_km_lock);
2061 list_add_tail_rcu(&km->list, &xfrm_km_list);
2062 spin_unlock_bh(&xfrm_km_lock);
2065 EXPORT_SYMBOL(xfrm_register_km);
2067 int xfrm_unregister_km(struct xfrm_mgr *km)
2069 spin_lock_bh(&xfrm_km_lock);
2070 list_del_rcu(&km->list);
2071 spin_unlock_bh(&xfrm_km_lock);
2075 EXPORT_SYMBOL(xfrm_unregister_km);
2077 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo)
2081 if (WARN_ON(afinfo->family >= NPROTO))
2082 return -EAFNOSUPPORT;
2084 spin_lock_bh(&xfrm_state_afinfo_lock);
2085 if (unlikely(xfrm_state_afinfo[afinfo->family] != NULL))
2088 rcu_assign_pointer(xfrm_state_afinfo[afinfo->family], afinfo);
2089 spin_unlock_bh(&xfrm_state_afinfo_lock);
2092 EXPORT_SYMBOL(xfrm_state_register_afinfo);
2094 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo)
2096 int err = 0, family = afinfo->family;
2098 if (WARN_ON(family >= NPROTO))
2099 return -EAFNOSUPPORT;
2101 spin_lock_bh(&xfrm_state_afinfo_lock);
2102 if (likely(xfrm_state_afinfo[afinfo->family] != NULL)) {
2103 if (rcu_access_pointer(xfrm_state_afinfo[family]) != afinfo)
2106 RCU_INIT_POINTER(xfrm_state_afinfo[afinfo->family], NULL);
2108 spin_unlock_bh(&xfrm_state_afinfo_lock);
2112 EXPORT_SYMBOL(xfrm_state_unregister_afinfo);
2114 struct xfrm_state_afinfo *xfrm_state_afinfo_get_rcu(unsigned int family)
2116 if (unlikely(family >= NPROTO))
2119 return rcu_dereference(xfrm_state_afinfo[family]);
2122 struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family)
2124 struct xfrm_state_afinfo *afinfo;
2125 if (unlikely(family >= NPROTO))
2128 afinfo = rcu_dereference(xfrm_state_afinfo[family]);
2129 if (unlikely(!afinfo))
2134 /* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */
2135 void xfrm_state_delete_tunnel(struct xfrm_state *x)
2138 struct xfrm_state *t = x->tunnel;
2140 if (atomic_read(&t->tunnel_users) == 2)
2141 xfrm_state_delete(t);
2142 atomic_dec(&t->tunnel_users);
2147 EXPORT_SYMBOL(xfrm_state_delete_tunnel);
2149 int xfrm_state_mtu(struct xfrm_state *x, int mtu)
2151 const struct xfrm_type *type = READ_ONCE(x->type);
2153 if (x->km.state == XFRM_STATE_VALID &&
2154 type && type->get_mtu)
2155 return type->get_mtu(x, mtu);
2157 return mtu - x->props.header_len;
2160 int __xfrm_init_state(struct xfrm_state *x, bool init_replay)
2162 struct xfrm_state_afinfo *afinfo;
2163 struct xfrm_mode *inner_mode;
2164 int family = x->props.family;
2167 err = -EAFNOSUPPORT;
2168 afinfo = xfrm_state_get_afinfo(family);
2173 if (afinfo->init_flags)
2174 err = afinfo->init_flags(x);
2181 err = -EPROTONOSUPPORT;
2183 if (x->sel.family != AF_UNSPEC) {
2184 inner_mode = xfrm_get_mode(x->props.mode, x->sel.family);
2185 if (inner_mode == NULL)
2188 if (!(inner_mode->flags & XFRM_MODE_FLAG_TUNNEL) &&
2189 family != x->sel.family) {
2190 xfrm_put_mode(inner_mode);
2194 x->inner_mode = inner_mode;
2196 struct xfrm_mode *inner_mode_iaf;
2197 int iafamily = AF_INET;
2199 inner_mode = xfrm_get_mode(x->props.mode, x->props.family);
2200 if (inner_mode == NULL)
2203 if (!(inner_mode->flags & XFRM_MODE_FLAG_TUNNEL)) {
2204 xfrm_put_mode(inner_mode);
2207 x->inner_mode = inner_mode;
2209 if (x->props.family == AF_INET)
2210 iafamily = AF_INET6;
2212 inner_mode_iaf = xfrm_get_mode(x->props.mode, iafamily);
2213 if (inner_mode_iaf) {
2214 if (inner_mode_iaf->flags & XFRM_MODE_FLAG_TUNNEL)
2215 x->inner_mode_iaf = inner_mode_iaf;
2217 xfrm_put_mode(inner_mode_iaf);
2221 x->type = xfrm_get_type(x->id.proto, family);
2222 if (x->type == NULL)
2225 x->type_offload = xfrm_get_type_offload(x->id.proto, family);
2227 err = x->type->init_state(x);
2231 x->outer_mode = xfrm_get_mode(x->props.mode, family);
2232 if (x->outer_mode == NULL) {
2233 err = -EPROTONOSUPPORT;
2238 err = xfrm_init_replay(x);
2243 x->km.state = XFRM_STATE_VALID;
2249 EXPORT_SYMBOL(__xfrm_init_state);
2251 int xfrm_init_state(struct xfrm_state *x)
2253 return __xfrm_init_state(x, true);
2256 EXPORT_SYMBOL(xfrm_init_state);
2258 int __net_init xfrm_state_init(struct net *net)
2262 INIT_LIST_HEAD(&net->xfrm.state_all);
2264 sz = sizeof(struct hlist_head) * 8;
2266 net->xfrm.state_bydst = xfrm_hash_alloc(sz);
2267 if (!net->xfrm.state_bydst)
2269 net->xfrm.state_bysrc = xfrm_hash_alloc(sz);
2270 if (!net->xfrm.state_bysrc)
2272 net->xfrm.state_byspi = xfrm_hash_alloc(sz);
2273 if (!net->xfrm.state_byspi)
2275 net->xfrm.state_hmask = ((sz / sizeof(struct hlist_head)) - 1);
2277 net->xfrm.state_num = 0;
2278 INIT_WORK(&net->xfrm.state_hash_work, xfrm_hash_resize);
2279 spin_lock_init(&net->xfrm.xfrm_state_lock);
2283 xfrm_hash_free(net->xfrm.state_bysrc, sz);
2285 xfrm_hash_free(net->xfrm.state_bydst, sz);
2290 void xfrm_state_fini(struct net *net)
2294 flush_work(&net->xfrm.state_hash_work);
2295 xfrm_state_flush(net, IPSEC_PROTO_ANY, false);
2296 flush_work(&xfrm_state_gc_work);
2298 WARN_ON(!list_empty(&net->xfrm.state_all));
2300 sz = (net->xfrm.state_hmask + 1) * sizeof(struct hlist_head);
2301 WARN_ON(!hlist_empty(net->xfrm.state_byspi));
2302 xfrm_hash_free(net->xfrm.state_byspi, sz);
2303 WARN_ON(!hlist_empty(net->xfrm.state_bysrc));
2304 xfrm_hash_free(net->xfrm.state_bysrc, sz);
2305 WARN_ON(!hlist_empty(net->xfrm.state_bydst));
2306 xfrm_hash_free(net->xfrm.state_bydst, sz);
2309 #ifdef CONFIG_AUDITSYSCALL
2310 static void xfrm_audit_helper_sainfo(struct xfrm_state *x,
2311 struct audit_buffer *audit_buf)
2313 struct xfrm_sec_ctx *ctx = x->security;
2314 u32 spi = ntohl(x->id.spi);
2317 audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
2318 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
2320 switch (x->props.family) {
2322 audit_log_format(audit_buf, " src=%pI4 dst=%pI4",
2323 &x->props.saddr.a4, &x->id.daddr.a4);
2326 audit_log_format(audit_buf, " src=%pI6 dst=%pI6",
2327 x->props.saddr.a6, x->id.daddr.a6);
2331 audit_log_format(audit_buf, " spi=%u(0x%x)", spi, spi);
2334 static void xfrm_audit_helper_pktinfo(struct sk_buff *skb, u16 family,
2335 struct audit_buffer *audit_buf)
2337 const struct iphdr *iph4;
2338 const struct ipv6hdr *iph6;
2343 audit_log_format(audit_buf, " src=%pI4 dst=%pI4",
2344 &iph4->saddr, &iph4->daddr);
2347 iph6 = ipv6_hdr(skb);
2348 audit_log_format(audit_buf,
2349 " src=%pI6 dst=%pI6 flowlbl=0x%x%02x%02x",
2350 &iph6->saddr, &iph6->daddr,
2351 iph6->flow_lbl[0] & 0x0f,
2358 void xfrm_audit_state_add(struct xfrm_state *x, int result, bool task_valid)
2360 struct audit_buffer *audit_buf;
2362 audit_buf = xfrm_audit_start("SAD-add");
2363 if (audit_buf == NULL)
2365 xfrm_audit_helper_usrinfo(task_valid, audit_buf);
2366 xfrm_audit_helper_sainfo(x, audit_buf);
2367 audit_log_format(audit_buf, " res=%u", result);
2368 audit_log_end(audit_buf);
2370 EXPORT_SYMBOL_GPL(xfrm_audit_state_add);
2372 void xfrm_audit_state_delete(struct xfrm_state *x, int result, bool task_valid)
2374 struct audit_buffer *audit_buf;
2376 audit_buf = xfrm_audit_start("SAD-delete");
2377 if (audit_buf == NULL)
2379 xfrm_audit_helper_usrinfo(task_valid, audit_buf);
2380 xfrm_audit_helper_sainfo(x, audit_buf);
2381 audit_log_format(audit_buf, " res=%u", result);
2382 audit_log_end(audit_buf);
2384 EXPORT_SYMBOL_GPL(xfrm_audit_state_delete);
2386 void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
2387 struct sk_buff *skb)
2389 struct audit_buffer *audit_buf;
2392 audit_buf = xfrm_audit_start("SA-replay-overflow");
2393 if (audit_buf == NULL)
2395 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
2396 /* don't record the sequence number because it's inherent in this kind
2397 * of audit message */
2398 spi = ntohl(x->id.spi);
2399 audit_log_format(audit_buf, " spi=%u(0x%x)", spi, spi);
2400 audit_log_end(audit_buf);
2402 EXPORT_SYMBOL_GPL(xfrm_audit_state_replay_overflow);
2404 void xfrm_audit_state_replay(struct xfrm_state *x,
2405 struct sk_buff *skb, __be32 net_seq)
2407 struct audit_buffer *audit_buf;
2410 audit_buf = xfrm_audit_start("SA-replayed-pkt");
2411 if (audit_buf == NULL)
2413 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
2414 spi = ntohl(x->id.spi);
2415 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
2416 spi, spi, ntohl(net_seq));
2417 audit_log_end(audit_buf);
2419 EXPORT_SYMBOL_GPL(xfrm_audit_state_replay);
2421 void xfrm_audit_state_notfound_simple(struct sk_buff *skb, u16 family)
2423 struct audit_buffer *audit_buf;
2425 audit_buf = xfrm_audit_start("SA-notfound");
2426 if (audit_buf == NULL)
2428 xfrm_audit_helper_pktinfo(skb, family, audit_buf);
2429 audit_log_end(audit_buf);
2431 EXPORT_SYMBOL_GPL(xfrm_audit_state_notfound_simple);
2433 void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family,
2434 __be32 net_spi, __be32 net_seq)
2436 struct audit_buffer *audit_buf;
2439 audit_buf = xfrm_audit_start("SA-notfound");
2440 if (audit_buf == NULL)
2442 xfrm_audit_helper_pktinfo(skb, family, audit_buf);
2443 spi = ntohl(net_spi);
2444 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
2445 spi, spi, ntohl(net_seq));
2446 audit_log_end(audit_buf);
2448 EXPORT_SYMBOL_GPL(xfrm_audit_state_notfound);
2450 void xfrm_audit_state_icvfail(struct xfrm_state *x,
2451 struct sk_buff *skb, u8 proto)
2453 struct audit_buffer *audit_buf;
2457 audit_buf = xfrm_audit_start("SA-icv-failure");
2458 if (audit_buf == NULL)
2460 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
2461 if (xfrm_parse_spi(skb, proto, &net_spi, &net_seq) == 0) {
2462 u32 spi = ntohl(net_spi);
2463 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
2464 spi, spi, ntohl(net_seq));
2466 audit_log_end(audit_buf);
2468 EXPORT_SYMBOL_GPL(xfrm_audit_state_icvfail);
2469 #endif /* CONFIG_AUDITSYSCALL */