6 * Kazunori MIYAZAWA @USAGI
7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9 * YOSHIFUJI Hideaki @USAGI
10 * Split up af-specific functions
11 * Derek Atkins <derek@ihtfp.com>
12 * Add UDP Encapsulation
16 #include <linux/workqueue.h>
18 #include <linux/pfkeyv2.h>
19 #include <linux/ipsec.h>
20 #include <linux/module.h>
21 #include <asm/uaccess.h>
23 /* Each xfrm_state may be linked to two tables:
25 1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl)
26 2. Hash table by daddr to find what SAs exist for given
27 destination/tunnel endpoint. (output)
30 static DEFINE_SPINLOCK(xfrm_state_lock);
32 /* Hash table to find appropriate SA towards given target (endpoint
33 * of tunnel or destination of transport mode) allowed by selector.
35 * Main use is finding SA after policy selected tunnel or transport mode.
36 * Also, it can be used by ah/esp icmp error handler to find offending SA.
38 static struct list_head xfrm_state_bydst[XFRM_DST_HSIZE];
39 static struct list_head xfrm_state_byspi[XFRM_DST_HSIZE];
41 DECLARE_WAIT_QUEUE_HEAD(km_waitq);
42 EXPORT_SYMBOL(km_waitq);
44 static DEFINE_RWLOCK(xfrm_state_afinfo_lock);
45 static struct xfrm_state_afinfo *xfrm_state_afinfo[NPROTO];
47 static struct work_struct xfrm_state_gc_work;
48 static struct list_head xfrm_state_gc_list = LIST_HEAD_INIT(xfrm_state_gc_list);
49 static DEFINE_SPINLOCK(xfrm_state_gc_lock);
51 static int xfrm_state_gc_flush_bundles;
53 static int __xfrm_state_delete(struct xfrm_state *x);
55 static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned short family);
56 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
58 static int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
59 static void km_state_expired(struct xfrm_state *x, int hard);
61 static void xfrm_state_gc_destroy(struct xfrm_state *x)
63 if (del_timer(&x->timer))
74 x->type->destructor(x);
75 xfrm_put_type(x->type);
80 static void xfrm_state_gc_task(void *data)
83 struct list_head *entry, *tmp;
84 struct list_head gc_list = LIST_HEAD_INIT(gc_list);
86 if (xfrm_state_gc_flush_bundles) {
87 xfrm_state_gc_flush_bundles = 0;
91 spin_lock_bh(&xfrm_state_gc_lock);
92 list_splice_init(&xfrm_state_gc_list, &gc_list);
93 spin_unlock_bh(&xfrm_state_gc_lock);
95 list_for_each_safe(entry, tmp, &gc_list) {
96 x = list_entry(entry, struct xfrm_state, bydst);
97 xfrm_state_gc_destroy(x);
102 static inline unsigned long make_jiffies(long secs)
104 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
105 return MAX_SCHEDULE_TIMEOUT-1;
110 static void xfrm_timer_handler(unsigned long data)
112 struct xfrm_state *x = (struct xfrm_state*)data;
113 unsigned long now = (unsigned long)xtime.tv_sec;
114 long next = LONG_MAX;
118 if (x->km.state == XFRM_STATE_DEAD)
120 if (x->km.state == XFRM_STATE_EXPIRED)
122 if (x->lft.hard_add_expires_seconds) {
123 long tmo = x->lft.hard_add_expires_seconds +
124 x->curlft.add_time - now;
130 if (x->lft.hard_use_expires_seconds) {
131 long tmo = x->lft.hard_use_expires_seconds +
132 (x->curlft.use_time ? : now) - now;
140 if (x->lft.soft_add_expires_seconds) {
141 long tmo = x->lft.soft_add_expires_seconds +
142 x->curlft.add_time - now;
148 if (x->lft.soft_use_expires_seconds) {
149 long tmo = x->lft.soft_use_expires_seconds +
150 (x->curlft.use_time ? : now) - now;
158 km_state_expired(x, 0);
160 if (next != LONG_MAX &&
161 !mod_timer(&x->timer, jiffies + make_jiffies(next)))
166 if (x->km.state == XFRM_STATE_ACQ && x->id.spi == 0) {
167 x->km.state = XFRM_STATE_EXPIRED;
173 km_state_expired(x, 1);
174 __xfrm_state_delete(x);
177 spin_unlock(&x->lock);
181 struct xfrm_state *xfrm_state_alloc(void)
183 struct xfrm_state *x;
185 x = kmalloc(sizeof(struct xfrm_state), GFP_ATOMIC);
188 memset(x, 0, sizeof(struct xfrm_state));
189 atomic_set(&x->refcnt, 1);
190 atomic_set(&x->tunnel_users, 0);
191 INIT_LIST_HEAD(&x->bydst);
192 INIT_LIST_HEAD(&x->byspi);
193 init_timer(&x->timer);
194 x->timer.function = xfrm_timer_handler;
195 x->timer.data = (unsigned long)x;
196 x->curlft.add_time = (unsigned long)xtime.tv_sec;
197 x->lft.soft_byte_limit = XFRM_INF;
198 x->lft.soft_packet_limit = XFRM_INF;
199 x->lft.hard_byte_limit = XFRM_INF;
200 x->lft.hard_packet_limit = XFRM_INF;
201 spin_lock_init(&x->lock);
205 EXPORT_SYMBOL(xfrm_state_alloc);
207 void __xfrm_state_destroy(struct xfrm_state *x)
209 BUG_TRAP(x->km.state == XFRM_STATE_DEAD);
211 spin_lock_bh(&xfrm_state_gc_lock);
212 list_add(&x->bydst, &xfrm_state_gc_list);
213 spin_unlock_bh(&xfrm_state_gc_lock);
214 schedule_work(&xfrm_state_gc_work);
216 EXPORT_SYMBOL(__xfrm_state_destroy);
218 static int __xfrm_state_delete(struct xfrm_state *x)
222 if (x->km.state != XFRM_STATE_DEAD) {
223 x->km.state = XFRM_STATE_DEAD;
224 spin_lock(&xfrm_state_lock);
226 atomic_dec(&x->refcnt);
229 atomic_dec(&x->refcnt);
231 spin_unlock(&xfrm_state_lock);
232 if (del_timer(&x->timer))
233 atomic_dec(&x->refcnt);
235 /* The number two in this test is the reference
236 * mentioned in the comment below plus the reference
237 * our caller holds. A larger value means that
238 * there are DSTs attached to this xfrm_state.
240 if (atomic_read(&x->refcnt) > 2) {
241 xfrm_state_gc_flush_bundles = 1;
242 schedule_work(&xfrm_state_gc_work);
245 /* All xfrm_state objects are created by xfrm_state_alloc.
246 * The xfrm_state_alloc call gives a reference, and that
247 * is what we are dropping here.
249 atomic_dec(&x->refcnt);
256 int xfrm_state_delete(struct xfrm_state *x)
260 spin_lock_bh(&x->lock);
261 err = __xfrm_state_delete(x);
262 spin_unlock_bh(&x->lock);
266 EXPORT_SYMBOL(xfrm_state_delete);
268 void xfrm_state_flush(u8 proto)
271 struct xfrm_state *x;
273 spin_lock_bh(&xfrm_state_lock);
274 for (i = 0; i < XFRM_DST_HSIZE; i++) {
276 list_for_each_entry(x, xfrm_state_bydst+i, bydst) {
277 if (!xfrm_state_kern(x) &&
278 (proto == IPSEC_PROTO_ANY || x->id.proto == proto)) {
280 spin_unlock_bh(&xfrm_state_lock);
282 xfrm_state_delete(x);
285 spin_lock_bh(&xfrm_state_lock);
290 spin_unlock_bh(&xfrm_state_lock);
293 EXPORT_SYMBOL(xfrm_state_flush);
296 xfrm_init_tempsel(struct xfrm_state *x, struct flowi *fl,
297 struct xfrm_tmpl *tmpl,
298 xfrm_address_t *daddr, xfrm_address_t *saddr,
299 unsigned short family)
301 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
304 afinfo->init_tempsel(x, fl, tmpl, daddr, saddr);
305 xfrm_state_put_afinfo(afinfo);
310 xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
311 struct flowi *fl, struct xfrm_tmpl *tmpl,
312 struct xfrm_policy *pol, int *err,
313 unsigned short family)
315 unsigned h = xfrm_dst_hash(daddr, family);
316 struct xfrm_state *x, *x0;
317 int acquire_in_progress = 0;
319 struct xfrm_state *best = NULL;
320 struct xfrm_state_afinfo *afinfo;
322 afinfo = xfrm_state_get_afinfo(family);
323 if (afinfo == NULL) {
324 *err = -EAFNOSUPPORT;
328 spin_lock_bh(&xfrm_state_lock);
329 list_for_each_entry(x, xfrm_state_bydst+h, bydst) {
330 if (x->props.family == family &&
331 x->props.reqid == tmpl->reqid &&
332 xfrm_state_addr_check(x, daddr, saddr, family) &&
333 tmpl->mode == x->props.mode &&
334 tmpl->id.proto == x->id.proto &&
335 (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) {
337 1. There is a valid state with matching selector.
339 2. Valid state with inappropriate selector. Skip.
341 Entering area of "sysdeps".
343 3. If state is not valid, selector is temporary,
344 it selects only session which triggered
345 previous resolution. Key manager will do
346 something to install a state with proper
349 if (x->km.state == XFRM_STATE_VALID) {
350 if (!xfrm_selector_match(&x->sel, fl, family))
353 best->km.dying > x->km.dying ||
354 (best->km.dying == x->km.dying &&
355 best->curlft.add_time < x->curlft.add_time))
357 } else if (x->km.state == XFRM_STATE_ACQ) {
358 acquire_in_progress = 1;
359 } else if (x->km.state == XFRM_STATE_ERROR ||
360 x->km.state == XFRM_STATE_EXPIRED) {
361 if (xfrm_selector_match(&x->sel, fl, family))
368 if (!x && !error && !acquire_in_progress) {
370 (x0 = afinfo->state_lookup(daddr, tmpl->id.spi,
371 tmpl->id.proto)) != NULL) {
376 x = xfrm_state_alloc();
381 /* Initialize temporary selector matching only
382 * to current session. */
383 xfrm_init_tempsel(x, fl, tmpl, daddr, saddr, family);
385 if (km_query(x, tmpl, pol) == 0) {
386 x->km.state = XFRM_STATE_ACQ;
387 list_add_tail(&x->bydst, xfrm_state_bydst+h);
390 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, family);
391 list_add(&x->byspi, xfrm_state_byspi+h);
394 x->lft.hard_add_expires_seconds = XFRM_ACQ_EXPIRES;
396 x->timer.expires = jiffies + XFRM_ACQ_EXPIRES*HZ;
397 add_timer(&x->timer);
399 x->km.state = XFRM_STATE_DEAD;
409 *err = acquire_in_progress ? -EAGAIN : error;
410 spin_unlock_bh(&xfrm_state_lock);
411 xfrm_state_put_afinfo(afinfo);
415 static void __xfrm_state_insert(struct xfrm_state *x)
417 unsigned h = xfrm_dst_hash(&x->id.daddr, x->props.family);
419 list_add(&x->bydst, xfrm_state_bydst+h);
422 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, x->props.family);
424 list_add(&x->byspi, xfrm_state_byspi+h);
427 if (!mod_timer(&x->timer, jiffies + HZ))
433 void xfrm_state_insert(struct xfrm_state *x)
435 spin_lock_bh(&xfrm_state_lock);
436 __xfrm_state_insert(x);
437 spin_unlock_bh(&xfrm_state_lock);
439 EXPORT_SYMBOL(xfrm_state_insert);
441 static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq);
443 int xfrm_state_add(struct xfrm_state *x)
445 struct xfrm_state_afinfo *afinfo;
446 struct xfrm_state *x1;
450 family = x->props.family;
451 afinfo = xfrm_state_get_afinfo(family);
452 if (unlikely(afinfo == NULL))
453 return -EAFNOSUPPORT;
455 spin_lock_bh(&xfrm_state_lock);
457 x1 = afinfo->state_lookup(&x->id.daddr, x->id.spi, x->id.proto);
466 x1 = __xfrm_find_acq_byseq(x->km.seq);
467 if (x1 && xfrm_addr_cmp(&x1->id.daddr, &x->id.daddr, family)) {
474 x1 = afinfo->find_acq(
475 x->props.mode, x->props.reqid, x->id.proto,
476 &x->id.daddr, &x->props.saddr, 0);
478 __xfrm_state_insert(x);
482 spin_unlock_bh(&xfrm_state_lock);
483 xfrm_state_put_afinfo(afinfo);
486 xfrm_state_delete(x1);
492 EXPORT_SYMBOL(xfrm_state_add);
494 int xfrm_state_update(struct xfrm_state *x)
496 struct xfrm_state_afinfo *afinfo;
497 struct xfrm_state *x1;
500 afinfo = xfrm_state_get_afinfo(x->props.family);
501 if (unlikely(afinfo == NULL))
502 return -EAFNOSUPPORT;
504 spin_lock_bh(&xfrm_state_lock);
505 x1 = afinfo->state_lookup(&x->id.daddr, x->id.spi, x->id.proto);
511 if (xfrm_state_kern(x1)) {
517 if (x1->km.state == XFRM_STATE_ACQ) {
518 __xfrm_state_insert(x);
524 spin_unlock_bh(&xfrm_state_lock);
525 xfrm_state_put_afinfo(afinfo);
531 xfrm_state_delete(x1);
537 spin_lock_bh(&x1->lock);
538 if (likely(x1->km.state == XFRM_STATE_VALID)) {
539 if (x->encap && x1->encap)
540 memcpy(x1->encap, x->encap, sizeof(*x1->encap));
541 memcpy(&x1->lft, &x->lft, sizeof(x1->lft));
544 if (!mod_timer(&x1->timer, jiffies + HZ))
546 if (x1->curlft.use_time)
547 xfrm_state_check_expire(x1);
551 spin_unlock_bh(&x1->lock);
557 EXPORT_SYMBOL(xfrm_state_update);
559 int xfrm_state_check_expire(struct xfrm_state *x)
561 if (!x->curlft.use_time)
562 x->curlft.use_time = (unsigned long)xtime.tv_sec;
564 if (x->km.state != XFRM_STATE_VALID)
567 if (x->curlft.bytes >= x->lft.hard_byte_limit ||
568 x->curlft.packets >= x->lft.hard_packet_limit) {
569 km_state_expired(x, 1);
570 if (!mod_timer(&x->timer, jiffies + XFRM_ACQ_EXPIRES*HZ))
576 (x->curlft.bytes >= x->lft.soft_byte_limit ||
577 x->curlft.packets >= x->lft.soft_packet_limit))
578 km_state_expired(x, 0);
581 EXPORT_SYMBOL(xfrm_state_check_expire);
583 static int xfrm_state_check_space(struct xfrm_state *x, struct sk_buff *skb)
585 int nhead = x->props.header_len + LL_RESERVED_SPACE(skb->dst->dev)
589 return pskb_expand_head(skb, nhead, 0, GFP_ATOMIC);
591 /* Check tail too... */
595 int xfrm_state_check(struct xfrm_state *x, struct sk_buff *skb)
597 int err = xfrm_state_check_expire(x);
600 err = xfrm_state_check_space(x, skb);
604 EXPORT_SYMBOL(xfrm_state_check);
607 xfrm_state_lookup(xfrm_address_t *daddr, u32 spi, u8 proto,
608 unsigned short family)
610 struct xfrm_state *x;
611 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
615 spin_lock_bh(&xfrm_state_lock);
616 x = afinfo->state_lookup(daddr, spi, proto);
617 spin_unlock_bh(&xfrm_state_lock);
618 xfrm_state_put_afinfo(afinfo);
621 EXPORT_SYMBOL(xfrm_state_lookup);
624 xfrm_find_acq(u8 mode, u32 reqid, u8 proto,
625 xfrm_address_t *daddr, xfrm_address_t *saddr,
626 int create, unsigned short family)
628 struct xfrm_state *x;
629 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
633 spin_lock_bh(&xfrm_state_lock);
634 x = afinfo->find_acq(mode, reqid, proto, daddr, saddr, create);
635 spin_unlock_bh(&xfrm_state_lock);
636 xfrm_state_put_afinfo(afinfo);
639 EXPORT_SYMBOL(xfrm_find_acq);
641 /* Silly enough, but I'm lazy to build resolution list */
643 static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq)
646 struct xfrm_state *x;
648 for (i = 0; i < XFRM_DST_HSIZE; i++) {
649 list_for_each_entry(x, xfrm_state_bydst+i, bydst) {
650 if (x->km.seq == seq && x->km.state == XFRM_STATE_ACQ) {
659 struct xfrm_state *xfrm_find_acq_byseq(u32 seq)
661 struct xfrm_state *x;
663 spin_lock_bh(&xfrm_state_lock);
664 x = __xfrm_find_acq_byseq(seq);
665 spin_unlock_bh(&xfrm_state_lock);
668 EXPORT_SYMBOL(xfrm_find_acq_byseq);
670 u32 xfrm_get_acqseq(void)
674 static DEFINE_SPINLOCK(acqseq_lock);
676 spin_lock_bh(&acqseq_lock);
677 res = (++acqseq ? : ++acqseq);
678 spin_unlock_bh(&acqseq_lock);
681 EXPORT_SYMBOL(xfrm_get_acqseq);
684 xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi)
687 struct xfrm_state *x0;
692 if (minspi == maxspi) {
693 x0 = xfrm_state_lookup(&x->id.daddr, minspi, x->id.proto, x->props.family);
701 minspi = ntohl(minspi);
702 maxspi = ntohl(maxspi);
703 for (h=0; h<maxspi-minspi+1; h++) {
704 spi = minspi + net_random()%(maxspi-minspi+1);
705 x0 = xfrm_state_lookup(&x->id.daddr, htonl(spi), x->id.proto, x->props.family);
707 x->id.spi = htonl(spi);
714 spin_lock_bh(&xfrm_state_lock);
715 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, x->props.family);
716 list_add(&x->byspi, xfrm_state_byspi+h);
718 spin_unlock_bh(&xfrm_state_lock);
722 EXPORT_SYMBOL(xfrm_alloc_spi);
724 int xfrm_state_walk(u8 proto, int (*func)(struct xfrm_state *, int, void*),
728 struct xfrm_state *x;
732 spin_lock_bh(&xfrm_state_lock);
733 for (i = 0; i < XFRM_DST_HSIZE; i++) {
734 list_for_each_entry(x, xfrm_state_bydst+i, bydst) {
735 if (proto == IPSEC_PROTO_ANY || x->id.proto == proto)
744 for (i = 0; i < XFRM_DST_HSIZE; i++) {
745 list_for_each_entry(x, xfrm_state_bydst+i, bydst) {
746 if (proto != IPSEC_PROTO_ANY && x->id.proto != proto)
748 err = func(x, --count, data);
754 spin_unlock_bh(&xfrm_state_lock);
757 EXPORT_SYMBOL(xfrm_state_walk);
759 int xfrm_replay_check(struct xfrm_state *x, u32 seq)
765 if (unlikely(seq == 0))
768 if (likely(seq > x->replay.seq))
771 diff = x->replay.seq - seq;
772 if (diff >= x->props.replay_window) {
773 x->stats.replay_window++;
777 if (x->replay.bitmap & (1U << diff)) {
783 EXPORT_SYMBOL(xfrm_replay_check);
785 void xfrm_replay_advance(struct xfrm_state *x, u32 seq)
791 if (seq > x->replay.seq) {
792 diff = seq - x->replay.seq;
793 if (diff < x->props.replay_window)
794 x->replay.bitmap = ((x->replay.bitmap) << diff) | 1;
796 x->replay.bitmap = 1;
799 diff = x->replay.seq - seq;
800 x->replay.bitmap |= (1U << diff);
803 EXPORT_SYMBOL(xfrm_replay_advance);
805 static struct list_head xfrm_km_list = LIST_HEAD_INIT(xfrm_km_list);
806 static DEFINE_RWLOCK(xfrm_km_lock);
808 void km_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c)
812 read_lock(&xfrm_km_lock);
813 list_for_each_entry(km, &xfrm_km_list, list)
814 if (km->notify_policy)
815 km->notify_policy(xp, dir, c);
816 read_unlock(&xfrm_km_lock);
819 void km_state_notify(struct xfrm_state *x, struct km_event *c)
822 read_lock(&xfrm_km_lock);
823 list_for_each_entry(km, &xfrm_km_list, list)
826 read_unlock(&xfrm_km_lock);
829 EXPORT_SYMBOL(km_policy_notify);
830 EXPORT_SYMBOL(km_state_notify);
832 static void km_state_expired(struct xfrm_state *x, int hard)
837 x->km.state = XFRM_STATE_EXPIRED;
841 c.event = XFRM_SAP_EXPIRED;
842 km_state_notify(x, &c);
849 * We send to all registered managers regardless of failure
850 * We are happy with one success
852 static int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol)
854 int err = -EINVAL, acqret;
857 read_lock(&xfrm_km_lock);
858 list_for_each_entry(km, &xfrm_km_list, list) {
859 acqret = km->acquire(x, t, pol, XFRM_POLICY_OUT);
863 read_unlock(&xfrm_km_lock);
867 int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, u16 sport)
872 read_lock(&xfrm_km_lock);
873 list_for_each_entry(km, &xfrm_km_list, list) {
875 err = km->new_mapping(x, ipaddr, sport);
879 read_unlock(&xfrm_km_lock);
882 EXPORT_SYMBOL(km_new_mapping);
884 void km_policy_expired(struct xfrm_policy *pol, int dir, int hard)
890 c.event = XFRM_SAP_EXPIRED;
891 km_policy_notify(pol, dir, &c);
897 int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
902 struct xfrm_policy *pol = NULL;
904 if (optlen <= 0 || optlen > PAGE_SIZE)
907 data = kmalloc(optlen, GFP_KERNEL);
912 if (copy_from_user(data, optval, optlen))
916 read_lock(&xfrm_km_lock);
917 list_for_each_entry(km, &xfrm_km_list, list) {
918 pol = km->compile_policy(sk->sk_family, optname, data,
923 read_unlock(&xfrm_km_lock);
926 xfrm_sk_policy_insert(sk, err, pol);
935 EXPORT_SYMBOL(xfrm_user_policy);
937 int xfrm_register_km(struct xfrm_mgr *km)
939 write_lock_bh(&xfrm_km_lock);
940 list_add_tail(&km->list, &xfrm_km_list);
941 write_unlock_bh(&xfrm_km_lock);
944 EXPORT_SYMBOL(xfrm_register_km);
946 int xfrm_unregister_km(struct xfrm_mgr *km)
948 write_lock_bh(&xfrm_km_lock);
950 write_unlock_bh(&xfrm_km_lock);
953 EXPORT_SYMBOL(xfrm_unregister_km);
955 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo)
958 if (unlikely(afinfo == NULL))
960 if (unlikely(afinfo->family >= NPROTO))
961 return -EAFNOSUPPORT;
962 write_lock(&xfrm_state_afinfo_lock);
963 if (unlikely(xfrm_state_afinfo[afinfo->family] != NULL))
966 afinfo->state_bydst = xfrm_state_bydst;
967 afinfo->state_byspi = xfrm_state_byspi;
968 xfrm_state_afinfo[afinfo->family] = afinfo;
970 write_unlock(&xfrm_state_afinfo_lock);
973 EXPORT_SYMBOL(xfrm_state_register_afinfo);
975 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo)
978 if (unlikely(afinfo == NULL))
980 if (unlikely(afinfo->family >= NPROTO))
981 return -EAFNOSUPPORT;
982 write_lock(&xfrm_state_afinfo_lock);
983 if (likely(xfrm_state_afinfo[afinfo->family] != NULL)) {
984 if (unlikely(xfrm_state_afinfo[afinfo->family] != afinfo))
987 xfrm_state_afinfo[afinfo->family] = NULL;
988 afinfo->state_byspi = NULL;
989 afinfo->state_bydst = NULL;
992 write_unlock(&xfrm_state_afinfo_lock);
995 EXPORT_SYMBOL(xfrm_state_unregister_afinfo);
997 static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned short family)
999 struct xfrm_state_afinfo *afinfo;
1000 if (unlikely(family >= NPROTO))
1002 read_lock(&xfrm_state_afinfo_lock);
1003 afinfo = xfrm_state_afinfo[family];
1004 if (likely(afinfo != NULL))
1005 read_lock(&afinfo->lock);
1006 read_unlock(&xfrm_state_afinfo_lock);
1010 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo)
1012 if (unlikely(afinfo == NULL))
1014 read_unlock(&afinfo->lock);
1017 /* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */
1018 void xfrm_state_delete_tunnel(struct xfrm_state *x)
1021 struct xfrm_state *t = x->tunnel;
1023 if (atomic_read(&t->tunnel_users) == 2)
1024 xfrm_state_delete(t);
1025 atomic_dec(&t->tunnel_users);
1030 EXPORT_SYMBOL(xfrm_state_delete_tunnel);
1032 int xfrm_state_mtu(struct xfrm_state *x, int mtu)
1036 res -= x->props.header_len;
1044 spin_lock_bh(&x->lock);
1045 if (x->km.state == XFRM_STATE_VALID &&
1046 x->type && x->type->get_max_size)
1047 m = x->type->get_max_size(x, m);
1049 m += x->props.header_len;
1050 spin_unlock_bh(&x->lock);
1060 EXPORT_SYMBOL(xfrm_state_mtu);
1062 void __init xfrm_state_init(void)
1066 for (i=0; i<XFRM_DST_HSIZE; i++) {
1067 INIT_LIST_HEAD(&xfrm_state_bydst[i]);
1068 INIT_LIST_HEAD(&xfrm_state_byspi[i]);
1070 INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task, NULL);