6 * Kazunori MIYAZAWA @USAGI
7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9 * Kazunori MIYAZAWA @USAGI
11 * Split up af-specific portion
12 * Derek Atkins <derek@ihtfp.com> Add the post_input processor
16 #include <linux/err.h>
17 #include <linux/slab.h>
18 #include <linux/kmod.h>
19 #include <linux/list.h>
20 #include <linux/spinlock.h>
21 #include <linux/workqueue.h>
22 #include <linux/notifier.h>
23 #include <linux/netdevice.h>
24 #include <linux/netfilter.h>
25 #include <linux/module.h>
26 #include <linux/cache.h>
27 #include <linux/audit.h>
32 #ifdef CONFIG_XFRM_STATISTICS
36 #include "xfrm_hash.h"
38 #define XFRM_QUEUE_TMO_MIN ((unsigned)(HZ/10))
39 #define XFRM_QUEUE_TMO_MAX ((unsigned)(60*HZ))
40 #define XFRM_MAX_QUEUE_LEN 100
43 struct dst_entry *dst_orig;
47 static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock);
48 static struct xfrm_policy_afinfo const __rcu *xfrm_policy_afinfo[AF_INET6 + 1]
51 static struct kmem_cache *xfrm_dst_cache __read_mostly;
52 static __read_mostly seqcount_t xfrm_policy_hash_generation;
54 static void xfrm_init_pmtu(struct dst_entry *dst);
55 static int stale_bundle(struct dst_entry *dst);
56 static int xfrm_bundle_ok(struct xfrm_dst *xdst);
57 static void xfrm_policy_queue_process(unsigned long arg);
59 static void __xfrm_policy_link(struct xfrm_policy *pol, int dir);
60 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
63 static inline bool xfrm_pol_hold_rcu(struct xfrm_policy *policy)
65 return atomic_inc_not_zero(&policy->refcnt);
69 __xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
71 const struct flowi4 *fl4 = &fl->u.ip4;
73 return addr4_match(fl4->daddr, sel->daddr.a4, sel->prefixlen_d) &&
74 addr4_match(fl4->saddr, sel->saddr.a4, sel->prefixlen_s) &&
75 !((xfrm_flowi_dport(fl, &fl4->uli) ^ sel->dport) & sel->dport_mask) &&
76 !((xfrm_flowi_sport(fl, &fl4->uli) ^ sel->sport) & sel->sport_mask) &&
77 (fl4->flowi4_proto == sel->proto || !sel->proto) &&
78 (fl4->flowi4_oif == sel->ifindex || !sel->ifindex);
82 __xfrm6_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
84 const struct flowi6 *fl6 = &fl->u.ip6;
86 return addr_match(&fl6->daddr, &sel->daddr, sel->prefixlen_d) &&
87 addr_match(&fl6->saddr, &sel->saddr, sel->prefixlen_s) &&
88 !((xfrm_flowi_dport(fl, &fl6->uli) ^ sel->dport) & sel->dport_mask) &&
89 !((xfrm_flowi_sport(fl, &fl6->uli) ^ sel->sport) & sel->sport_mask) &&
90 (fl6->flowi6_proto == sel->proto || !sel->proto) &&
91 (fl6->flowi6_oif == sel->ifindex || !sel->ifindex);
94 bool xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl,
95 unsigned short family)
99 return __xfrm4_selector_match(sel, fl);
101 return __xfrm6_selector_match(sel, fl);
106 static const struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
108 const struct xfrm_policy_afinfo *afinfo;
110 if (unlikely(family >= ARRAY_SIZE(xfrm_policy_afinfo)))
113 afinfo = rcu_dereference(xfrm_policy_afinfo[family]);
114 if (unlikely(!afinfo))
119 struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos, int oif,
120 const xfrm_address_t *saddr,
121 const xfrm_address_t *daddr,
124 const struct xfrm_policy_afinfo *afinfo;
125 struct dst_entry *dst;
127 afinfo = xfrm_policy_get_afinfo(family);
128 if (unlikely(afinfo == NULL))
129 return ERR_PTR(-EAFNOSUPPORT);
131 dst = afinfo->dst_lookup(net, tos, oif, saddr, daddr);
137 EXPORT_SYMBOL(__xfrm_dst_lookup);
139 static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x,
141 xfrm_address_t *prev_saddr,
142 xfrm_address_t *prev_daddr,
145 struct net *net = xs_net(x);
146 xfrm_address_t *saddr = &x->props.saddr;
147 xfrm_address_t *daddr = &x->id.daddr;
148 struct dst_entry *dst;
150 if (x->type->flags & XFRM_TYPE_LOCAL_COADDR) {
154 if (x->type->flags & XFRM_TYPE_REMOTE_COADDR) {
159 dst = __xfrm_dst_lookup(net, tos, oif, saddr, daddr, family);
162 if (prev_saddr != saddr)
163 memcpy(prev_saddr, saddr, sizeof(*prev_saddr));
164 if (prev_daddr != daddr)
165 memcpy(prev_daddr, daddr, sizeof(*prev_daddr));
171 static inline unsigned long make_jiffies(long secs)
173 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
174 return MAX_SCHEDULE_TIMEOUT-1;
179 static void xfrm_policy_timer(unsigned long data)
181 struct xfrm_policy *xp = (struct xfrm_policy *)data;
182 unsigned long now = get_seconds();
183 long next = LONG_MAX;
187 read_lock(&xp->lock);
189 if (unlikely(xp->walk.dead))
192 dir = xfrm_policy_id2dir(xp->index);
194 if (xp->lft.hard_add_expires_seconds) {
195 long tmo = xp->lft.hard_add_expires_seconds +
196 xp->curlft.add_time - now;
202 if (xp->lft.hard_use_expires_seconds) {
203 long tmo = xp->lft.hard_use_expires_seconds +
204 (xp->curlft.use_time ? : xp->curlft.add_time) - now;
210 if (xp->lft.soft_add_expires_seconds) {
211 long tmo = xp->lft.soft_add_expires_seconds +
212 xp->curlft.add_time - now;
215 tmo = XFRM_KM_TIMEOUT;
220 if (xp->lft.soft_use_expires_seconds) {
221 long tmo = xp->lft.soft_use_expires_seconds +
222 (xp->curlft.use_time ? : xp->curlft.add_time) - now;
225 tmo = XFRM_KM_TIMEOUT;
232 km_policy_expired(xp, dir, 0, 0);
233 if (next != LONG_MAX &&
234 !mod_timer(&xp->timer, jiffies + make_jiffies(next)))
238 read_unlock(&xp->lock);
243 read_unlock(&xp->lock);
244 if (!xfrm_policy_delete(xp, dir))
245 km_policy_expired(xp, dir, 1, 0);
249 static struct flow_cache_object *xfrm_policy_flo_get(struct flow_cache_object *flo)
251 struct xfrm_policy *pol = container_of(flo, struct xfrm_policy, flo);
253 if (unlikely(pol->walk.dead))
261 static int xfrm_policy_flo_check(struct flow_cache_object *flo)
263 struct xfrm_policy *pol = container_of(flo, struct xfrm_policy, flo);
265 return !pol->walk.dead;
268 static void xfrm_policy_flo_delete(struct flow_cache_object *flo)
270 xfrm_pol_put(container_of(flo, struct xfrm_policy, flo));
273 static const struct flow_cache_ops xfrm_policy_fc_ops = {
274 .get = xfrm_policy_flo_get,
275 .check = xfrm_policy_flo_check,
276 .delete = xfrm_policy_flo_delete,
279 /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
283 struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp)
285 struct xfrm_policy *policy;
287 policy = kzalloc(sizeof(struct xfrm_policy), gfp);
290 write_pnet(&policy->xp_net, net);
291 INIT_LIST_HEAD(&policy->walk.all);
292 INIT_HLIST_NODE(&policy->bydst);
293 INIT_HLIST_NODE(&policy->byidx);
294 rwlock_init(&policy->lock);
295 atomic_set(&policy->refcnt, 1);
296 skb_queue_head_init(&policy->polq.hold_queue);
297 setup_timer(&policy->timer, xfrm_policy_timer,
298 (unsigned long)policy);
299 setup_timer(&policy->polq.hold_timer, xfrm_policy_queue_process,
300 (unsigned long)policy);
301 policy->flo.ops = &xfrm_policy_fc_ops;
305 EXPORT_SYMBOL(xfrm_policy_alloc);
307 static void xfrm_policy_destroy_rcu(struct rcu_head *head)
309 struct xfrm_policy *policy = container_of(head, struct xfrm_policy, rcu);
311 security_xfrm_policy_free(policy->security);
315 /* Destroy xfrm_policy: descendant resources must be released to this moment. */
317 void xfrm_policy_destroy(struct xfrm_policy *policy)
319 BUG_ON(!policy->walk.dead);
321 if (del_timer(&policy->timer) || del_timer(&policy->polq.hold_timer))
324 call_rcu(&policy->rcu, xfrm_policy_destroy_rcu);
326 EXPORT_SYMBOL(xfrm_policy_destroy);
328 /* Rule must be locked. Release descendant resources, announce
329 * entry dead. The rule must be unlinked from lists to the moment.
332 static void xfrm_policy_kill(struct xfrm_policy *policy)
334 policy->walk.dead = 1;
336 atomic_inc(&policy->genid);
338 if (del_timer(&policy->polq.hold_timer))
339 xfrm_pol_put(policy);
340 skb_queue_purge(&policy->polq.hold_queue);
342 if (del_timer(&policy->timer))
343 xfrm_pol_put(policy);
345 xfrm_pol_put(policy);
348 static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024;
350 static inline unsigned int idx_hash(struct net *net, u32 index)
352 return __idx_hash(index, net->xfrm.policy_idx_hmask);
355 /* calculate policy hash thresholds */
356 static void __get_hash_thresh(struct net *net,
357 unsigned short family, int dir,
358 u8 *dbits, u8 *sbits)
362 *dbits = net->xfrm.policy_bydst[dir].dbits4;
363 *sbits = net->xfrm.policy_bydst[dir].sbits4;
367 *dbits = net->xfrm.policy_bydst[dir].dbits6;
368 *sbits = net->xfrm.policy_bydst[dir].sbits6;
377 static struct hlist_head *policy_hash_bysel(struct net *net,
378 const struct xfrm_selector *sel,
379 unsigned short family, int dir)
381 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
386 __get_hash_thresh(net, family, dir, &dbits, &sbits);
387 hash = __sel_hash(sel, family, hmask, dbits, sbits);
389 if (hash == hmask + 1)
390 return &net->xfrm.policy_inexact[dir];
392 return rcu_dereference_check(net->xfrm.policy_bydst[dir].table,
393 lockdep_is_held(&net->xfrm.xfrm_policy_lock)) + hash;
396 static struct hlist_head *policy_hash_direct(struct net *net,
397 const xfrm_address_t *daddr,
398 const xfrm_address_t *saddr,
399 unsigned short family, int dir)
401 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
406 __get_hash_thresh(net, family, dir, &dbits, &sbits);
407 hash = __addr_hash(daddr, saddr, family, hmask, dbits, sbits);
409 return rcu_dereference_check(net->xfrm.policy_bydst[dir].table,
410 lockdep_is_held(&net->xfrm.xfrm_policy_lock)) + hash;
413 static void xfrm_dst_hash_transfer(struct net *net,
414 struct hlist_head *list,
415 struct hlist_head *ndsttable,
416 unsigned int nhashmask,
419 struct hlist_node *tmp, *entry0 = NULL;
420 struct xfrm_policy *pol;
426 hlist_for_each_entry_safe(pol, tmp, list, bydst) {
429 __get_hash_thresh(net, pol->family, dir, &dbits, &sbits);
430 h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr,
431 pol->family, nhashmask, dbits, sbits);
433 hlist_del_rcu(&pol->bydst);
434 hlist_add_head_rcu(&pol->bydst, ndsttable + h);
439 hlist_del_rcu(&pol->bydst);
440 hlist_add_behind_rcu(&pol->bydst, entry0);
442 entry0 = &pol->bydst;
444 if (!hlist_empty(list)) {
450 static void xfrm_idx_hash_transfer(struct hlist_head *list,
451 struct hlist_head *nidxtable,
452 unsigned int nhashmask)
454 struct hlist_node *tmp;
455 struct xfrm_policy *pol;
457 hlist_for_each_entry_safe(pol, tmp, list, byidx) {
460 h = __idx_hash(pol->index, nhashmask);
461 hlist_add_head(&pol->byidx, nidxtable+h);
465 static unsigned long xfrm_new_hash_mask(unsigned int old_hmask)
467 return ((old_hmask + 1) << 1) - 1;
470 static void xfrm_bydst_resize(struct net *net, int dir)
472 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
473 unsigned int nhashmask = xfrm_new_hash_mask(hmask);
474 unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
475 struct hlist_head *ndst = xfrm_hash_alloc(nsize);
476 struct hlist_head *odst;
482 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
483 write_seqcount_begin(&xfrm_policy_hash_generation);
485 odst = rcu_dereference_protected(net->xfrm.policy_bydst[dir].table,
486 lockdep_is_held(&net->xfrm.xfrm_policy_lock));
488 odst = rcu_dereference_protected(net->xfrm.policy_bydst[dir].table,
489 lockdep_is_held(&net->xfrm.xfrm_policy_lock));
491 for (i = hmask; i >= 0; i--)
492 xfrm_dst_hash_transfer(net, odst + i, ndst, nhashmask, dir);
494 rcu_assign_pointer(net->xfrm.policy_bydst[dir].table, ndst);
495 net->xfrm.policy_bydst[dir].hmask = nhashmask;
497 write_seqcount_end(&xfrm_policy_hash_generation);
498 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
502 xfrm_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head));
505 static void xfrm_byidx_resize(struct net *net, int total)
507 unsigned int hmask = net->xfrm.policy_idx_hmask;
508 unsigned int nhashmask = xfrm_new_hash_mask(hmask);
509 unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
510 struct hlist_head *oidx = net->xfrm.policy_byidx;
511 struct hlist_head *nidx = xfrm_hash_alloc(nsize);
517 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
519 for (i = hmask; i >= 0; i--)
520 xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask);
522 net->xfrm.policy_byidx = nidx;
523 net->xfrm.policy_idx_hmask = nhashmask;
525 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
527 xfrm_hash_free(oidx, (hmask + 1) * sizeof(struct hlist_head));
530 static inline int xfrm_bydst_should_resize(struct net *net, int dir, int *total)
532 unsigned int cnt = net->xfrm.policy_count[dir];
533 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
538 if ((hmask + 1) < xfrm_policy_hashmax &&
545 static inline int xfrm_byidx_should_resize(struct net *net, int total)
547 unsigned int hmask = net->xfrm.policy_idx_hmask;
549 if ((hmask + 1) < xfrm_policy_hashmax &&
556 void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si)
558 si->incnt = net->xfrm.policy_count[XFRM_POLICY_IN];
559 si->outcnt = net->xfrm.policy_count[XFRM_POLICY_OUT];
560 si->fwdcnt = net->xfrm.policy_count[XFRM_POLICY_FWD];
561 si->inscnt = net->xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX];
562 si->outscnt = net->xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX];
563 si->fwdscnt = net->xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX];
564 si->spdhcnt = net->xfrm.policy_idx_hmask;
565 si->spdhmcnt = xfrm_policy_hashmax;
567 EXPORT_SYMBOL(xfrm_spd_getinfo);
569 static DEFINE_MUTEX(hash_resize_mutex);
570 static void xfrm_hash_resize(struct work_struct *work)
572 struct net *net = container_of(work, struct net, xfrm.policy_hash_work);
575 mutex_lock(&hash_resize_mutex);
578 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
579 if (xfrm_bydst_should_resize(net, dir, &total))
580 xfrm_bydst_resize(net, dir);
582 if (xfrm_byidx_should_resize(net, total))
583 xfrm_byidx_resize(net, total);
585 mutex_unlock(&hash_resize_mutex);
588 static void xfrm_hash_rebuild(struct work_struct *work)
590 struct net *net = container_of(work, struct net,
591 xfrm.policy_hthresh.work);
593 struct xfrm_policy *pol;
594 struct xfrm_policy *policy;
595 struct hlist_head *chain;
596 struct hlist_head *odst;
597 struct hlist_node *newpos;
601 u8 lbits4, rbits4, lbits6, rbits6;
603 mutex_lock(&hash_resize_mutex);
605 /* read selector prefixlen thresholds */
607 seq = read_seqbegin(&net->xfrm.policy_hthresh.lock);
609 lbits4 = net->xfrm.policy_hthresh.lbits4;
610 rbits4 = net->xfrm.policy_hthresh.rbits4;
611 lbits6 = net->xfrm.policy_hthresh.lbits6;
612 rbits6 = net->xfrm.policy_hthresh.rbits6;
613 } while (read_seqretry(&net->xfrm.policy_hthresh.lock, seq));
615 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
617 /* reset the bydst and inexact table in all directions */
618 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
619 INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]);
620 hmask = net->xfrm.policy_bydst[dir].hmask;
621 odst = net->xfrm.policy_bydst[dir].table;
622 for (i = hmask; i >= 0; i--)
623 INIT_HLIST_HEAD(odst + i);
624 if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) {
625 /* dir out => dst = remote, src = local */
626 net->xfrm.policy_bydst[dir].dbits4 = rbits4;
627 net->xfrm.policy_bydst[dir].sbits4 = lbits4;
628 net->xfrm.policy_bydst[dir].dbits6 = rbits6;
629 net->xfrm.policy_bydst[dir].sbits6 = lbits6;
631 /* dir in/fwd => dst = local, src = remote */
632 net->xfrm.policy_bydst[dir].dbits4 = lbits4;
633 net->xfrm.policy_bydst[dir].sbits4 = rbits4;
634 net->xfrm.policy_bydst[dir].dbits6 = lbits6;
635 net->xfrm.policy_bydst[dir].sbits6 = rbits6;
639 /* re-insert all policies by order of creation */
640 list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
641 if (xfrm_policy_id2dir(policy->index) >= XFRM_POLICY_MAX) {
642 /* skip socket policies */
646 chain = policy_hash_bysel(net, &policy->selector,
648 xfrm_policy_id2dir(policy->index));
649 hlist_for_each_entry(pol, chain, bydst) {
650 if (policy->priority >= pol->priority)
651 newpos = &pol->bydst;
656 hlist_add_behind(&policy->bydst, newpos);
658 hlist_add_head(&policy->bydst, chain);
661 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
663 mutex_unlock(&hash_resize_mutex);
666 void xfrm_policy_hash_rebuild(struct net *net)
668 schedule_work(&net->xfrm.policy_hthresh.work);
670 EXPORT_SYMBOL(xfrm_policy_hash_rebuild);
672 /* Generate new index... KAME seems to generate them ordered by cost
673 * of an absolute inpredictability of ordering of rules. This will not pass. */
674 static u32 xfrm_gen_index(struct net *net, int dir, u32 index)
676 static u32 idx_generator;
679 struct hlist_head *list;
680 struct xfrm_policy *p;
685 idx = (idx_generator | dir);
694 list = net->xfrm.policy_byidx + idx_hash(net, idx);
696 hlist_for_each_entry(p, list, byidx) {
697 if (p->index == idx) {
707 static inline int selector_cmp(struct xfrm_selector *s1, struct xfrm_selector *s2)
709 u32 *p1 = (u32 *) s1;
710 u32 *p2 = (u32 *) s2;
711 int len = sizeof(struct xfrm_selector) / sizeof(u32);
714 for (i = 0; i < len; i++) {
722 static void xfrm_policy_requeue(struct xfrm_policy *old,
723 struct xfrm_policy *new)
725 struct xfrm_policy_queue *pq = &old->polq;
726 struct sk_buff_head list;
728 if (skb_queue_empty(&pq->hold_queue))
731 __skb_queue_head_init(&list);
733 spin_lock_bh(&pq->hold_queue.lock);
734 skb_queue_splice_init(&pq->hold_queue, &list);
735 if (del_timer(&pq->hold_timer))
737 spin_unlock_bh(&pq->hold_queue.lock);
741 spin_lock_bh(&pq->hold_queue.lock);
742 skb_queue_splice(&list, &pq->hold_queue);
743 pq->timeout = XFRM_QUEUE_TMO_MIN;
744 if (!mod_timer(&pq->hold_timer, jiffies))
746 spin_unlock_bh(&pq->hold_queue.lock);
749 static bool xfrm_policy_mark_match(struct xfrm_policy *policy,
750 struct xfrm_policy *pol)
752 u32 mark = policy->mark.v & policy->mark.m;
754 if (policy->mark.v == pol->mark.v && policy->mark.m == pol->mark.m)
757 if ((mark & pol->mark.m) == pol->mark.v &&
758 policy->priority == pol->priority)
764 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
766 struct net *net = xp_net(policy);
767 struct xfrm_policy *pol;
768 struct xfrm_policy *delpol;
769 struct hlist_head *chain;
770 struct hlist_node *newpos;
772 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
773 chain = policy_hash_bysel(net, &policy->selector, policy->family, dir);
776 hlist_for_each_entry(pol, chain, bydst) {
777 if (pol->type == policy->type &&
778 !selector_cmp(&pol->selector, &policy->selector) &&
779 xfrm_policy_mark_match(policy, pol) &&
780 xfrm_sec_ctx_match(pol->security, policy->security) &&
783 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
787 if (policy->priority > pol->priority)
789 } else if (policy->priority >= pol->priority) {
790 newpos = &pol->bydst;
797 hlist_add_behind(&policy->bydst, newpos);
799 hlist_add_head(&policy->bydst, chain);
800 __xfrm_policy_link(policy, dir);
801 atomic_inc(&net->xfrm.flow_cache_genid);
803 /* After previous checking, family can either be AF_INET or AF_INET6 */
804 if (policy->family == AF_INET)
805 rt_genid_bump_ipv4(net);
807 rt_genid_bump_ipv6(net);
810 xfrm_policy_requeue(delpol, policy);
811 __xfrm_policy_unlink(delpol, dir);
813 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir, policy->index);
814 hlist_add_head(&policy->byidx, net->xfrm.policy_byidx+idx_hash(net, policy->index));
815 policy->curlft.add_time = get_seconds();
816 policy->curlft.use_time = 0;
817 if (!mod_timer(&policy->timer, jiffies + HZ))
818 xfrm_pol_hold(policy);
819 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
822 xfrm_policy_kill(delpol);
823 else if (xfrm_bydst_should_resize(net, dir, NULL))
824 schedule_work(&net->xfrm.policy_hash_work);
828 EXPORT_SYMBOL(xfrm_policy_insert);
830 struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u8 type,
831 int dir, struct xfrm_selector *sel,
832 struct xfrm_sec_ctx *ctx, int delete,
835 struct xfrm_policy *pol, *ret;
836 struct hlist_head *chain;
839 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
840 chain = policy_hash_bysel(net, sel, sel->family, dir);
842 hlist_for_each_entry(pol, chain, bydst) {
843 if (pol->type == type &&
844 (mark & pol->mark.m) == pol->mark.v &&
845 !selector_cmp(sel, &pol->selector) &&
846 xfrm_sec_ctx_match(ctx, pol->security)) {
849 *err = security_xfrm_policy_delete(
852 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
855 __xfrm_policy_unlink(pol, dir);
861 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
864 xfrm_policy_kill(ret);
867 EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
869 struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8 type,
870 int dir, u32 id, int delete, int *err)
872 struct xfrm_policy *pol, *ret;
873 struct hlist_head *chain;
876 if (xfrm_policy_id2dir(id) != dir)
880 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
881 chain = net->xfrm.policy_byidx + idx_hash(net, id);
883 hlist_for_each_entry(pol, chain, byidx) {
884 if (pol->type == type && pol->index == id &&
885 (mark & pol->mark.m) == pol->mark.v) {
888 *err = security_xfrm_policy_delete(
891 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
894 __xfrm_policy_unlink(pol, dir);
900 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
903 xfrm_policy_kill(ret);
906 EXPORT_SYMBOL(xfrm_policy_byid);
908 #ifdef CONFIG_SECURITY_NETWORK_XFRM
910 xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
914 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
915 struct xfrm_policy *pol;
918 hlist_for_each_entry(pol,
919 &net->xfrm.policy_inexact[dir], bydst) {
920 if (pol->type != type)
922 err = security_xfrm_policy_delete(pol->security);
924 xfrm_audit_policy_delete(pol, 0, task_valid);
928 for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) {
929 hlist_for_each_entry(pol,
930 net->xfrm.policy_bydst[dir].table + i,
932 if (pol->type != type)
934 err = security_xfrm_policy_delete(
937 xfrm_audit_policy_delete(pol, 0,
948 xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
954 int xfrm_policy_flush(struct net *net, u8 type, bool task_valid)
956 int dir, err = 0, cnt = 0;
958 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
960 err = xfrm_policy_flush_secctx_check(net, type, task_valid);
964 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
965 struct xfrm_policy *pol;
969 hlist_for_each_entry(pol,
970 &net->xfrm.policy_inexact[dir], bydst) {
971 if (pol->type != type)
973 __xfrm_policy_unlink(pol, dir);
974 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
977 xfrm_audit_policy_delete(pol, 1, task_valid);
979 xfrm_policy_kill(pol);
981 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
985 for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) {
987 hlist_for_each_entry(pol,
988 net->xfrm.policy_bydst[dir].table + i,
990 if (pol->type != type)
992 __xfrm_policy_unlink(pol, dir);
993 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
996 xfrm_audit_policy_delete(pol, 1, task_valid);
997 xfrm_policy_kill(pol);
999 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1008 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1011 xfrm_garbage_collect(net);
1015 EXPORT_SYMBOL(xfrm_policy_flush);
1017 int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
1018 int (*func)(struct xfrm_policy *, int, int, void*),
1021 struct xfrm_policy *pol;
1022 struct xfrm_policy_walk_entry *x;
1025 if (walk->type >= XFRM_POLICY_TYPE_MAX &&
1026 walk->type != XFRM_POLICY_TYPE_ANY)
1029 if (list_empty(&walk->walk.all) && walk->seq != 0)
1032 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1033 if (list_empty(&walk->walk.all))
1034 x = list_first_entry(&net->xfrm.policy_all, struct xfrm_policy_walk_entry, all);
1036 x = list_first_entry(&walk->walk.all,
1037 struct xfrm_policy_walk_entry, all);
1039 list_for_each_entry_from(x, &net->xfrm.policy_all, all) {
1042 pol = container_of(x, struct xfrm_policy, walk);
1043 if (walk->type != XFRM_POLICY_TYPE_ANY &&
1044 walk->type != pol->type)
1046 error = func(pol, xfrm_policy_id2dir(pol->index),
1049 list_move_tail(&walk->walk.all, &x->all);
1054 if (walk->seq == 0) {
1058 list_del_init(&walk->walk.all);
1060 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1063 EXPORT_SYMBOL(xfrm_policy_walk);
1065 void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type)
1067 INIT_LIST_HEAD(&walk->walk.all);
1068 walk->walk.dead = 1;
1072 EXPORT_SYMBOL(xfrm_policy_walk_init);
1074 void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net)
1076 if (list_empty(&walk->walk.all))
1079 spin_lock_bh(&net->xfrm.xfrm_policy_lock); /*FIXME where is net? */
1080 list_del(&walk->walk.all);
1081 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1083 EXPORT_SYMBOL(xfrm_policy_walk_done);
1086 * Find policy to apply to this flow.
1088 * Returns 0 if policy found, else an -errno.
1090 static int xfrm_policy_match(const struct xfrm_policy *pol,
1091 const struct flowi *fl,
1092 u8 type, u16 family, int dir)
1094 const struct xfrm_selector *sel = &pol->selector;
1098 if (pol->family != family ||
1099 (fl->flowi_mark & pol->mark.m) != pol->mark.v ||
1103 match = xfrm_selector_match(sel, fl, family);
1105 ret = security_xfrm_policy_lookup(pol->security, fl->flowi_secid,
1111 static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
1112 const struct flowi *fl,
1116 struct xfrm_policy *pol, *ret;
1117 const xfrm_address_t *daddr, *saddr;
1118 struct hlist_head *chain;
1119 unsigned int sequence;
1122 daddr = xfrm_flowi_daddr(fl, family);
1123 saddr = xfrm_flowi_saddr(fl, family);
1124 if (unlikely(!daddr || !saddr))
1130 sequence = read_seqcount_begin(&xfrm_policy_hash_generation);
1131 chain = policy_hash_direct(net, daddr, saddr, family, dir);
1132 } while (read_seqcount_retry(&xfrm_policy_hash_generation, sequence));
1136 hlist_for_each_entry_rcu(pol, chain, bydst) {
1137 err = xfrm_policy_match(pol, fl, type, family, dir);
1147 priority = ret->priority;
1151 chain = &net->xfrm.policy_inexact[dir];
1152 hlist_for_each_entry_rcu(pol, chain, bydst) {
1153 if ((pol->priority >= priority) && ret)
1156 err = xfrm_policy_match(pol, fl, type, family, dir);
1170 if (read_seqcount_retry(&xfrm_policy_hash_generation, sequence))
1173 if (ret && !xfrm_pol_hold_rcu(ret))
1181 static struct xfrm_policy *
1182 __xfrm_policy_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir)
1184 #ifdef CONFIG_XFRM_SUB_POLICY
1185 struct xfrm_policy *pol;
1187 pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_SUB, fl, family, dir);
1191 return xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family, dir);
1194 static int flow_to_policy_dir(int dir)
1196 if (XFRM_POLICY_IN == FLOW_DIR_IN &&
1197 XFRM_POLICY_OUT == FLOW_DIR_OUT &&
1198 XFRM_POLICY_FWD == FLOW_DIR_FWD)
1204 return XFRM_POLICY_IN;
1206 return XFRM_POLICY_OUT;
1208 return XFRM_POLICY_FWD;
1212 static struct flow_cache_object *
1213 xfrm_policy_lookup(struct net *net, const struct flowi *fl, u16 family,
1214 u8 dir, struct flow_cache_object *old_obj, void *ctx)
1216 struct xfrm_policy *pol;
1219 xfrm_pol_put(container_of(old_obj, struct xfrm_policy, flo));
1221 pol = __xfrm_policy_lookup(net, fl, family, flow_to_policy_dir(dir));
1222 if (IS_ERR_OR_NULL(pol))
1223 return ERR_CAST(pol);
1225 /* Resolver returns two references:
1226 * one for cache and one for caller of flow_cache_lookup() */
1232 static inline int policy_to_flow_dir(int dir)
1234 if (XFRM_POLICY_IN == FLOW_DIR_IN &&
1235 XFRM_POLICY_OUT == FLOW_DIR_OUT &&
1236 XFRM_POLICY_FWD == FLOW_DIR_FWD)
1240 case XFRM_POLICY_IN:
1242 case XFRM_POLICY_OUT:
1243 return FLOW_DIR_OUT;
1244 case XFRM_POLICY_FWD:
1245 return FLOW_DIR_FWD;
1249 static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
1250 const struct flowi *fl, u16 family)
1252 struct xfrm_policy *pol;
1256 pol = rcu_dereference(sk->sk_policy[dir]);
1258 bool match = xfrm_selector_match(&pol->selector, fl, family);
1262 if ((sk->sk_mark & pol->mark.m) != pol->mark.v) {
1266 err = security_xfrm_policy_lookup(pol->security,
1268 policy_to_flow_dir(dir));
1270 if (!xfrm_pol_hold_rcu(pol))
1272 } else if (err == -ESRCH) {
1285 static void __xfrm_policy_link(struct xfrm_policy *pol, int dir)
1287 struct net *net = xp_net(pol);
1289 list_add(&pol->walk.all, &net->xfrm.policy_all);
1290 net->xfrm.policy_count[dir]++;
1294 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
1297 struct net *net = xp_net(pol);
1299 if (list_empty(&pol->walk.all))
1302 /* Socket policies are not hashed. */
1303 if (!hlist_unhashed(&pol->bydst)) {
1304 hlist_del_rcu(&pol->bydst);
1305 hlist_del(&pol->byidx);
1308 list_del_init(&pol->walk.all);
1309 net->xfrm.policy_count[dir]--;
1314 static void xfrm_sk_policy_link(struct xfrm_policy *pol, int dir)
1316 __xfrm_policy_link(pol, XFRM_POLICY_MAX + dir);
1319 static void xfrm_sk_policy_unlink(struct xfrm_policy *pol, int dir)
1321 __xfrm_policy_unlink(pol, XFRM_POLICY_MAX + dir);
1324 int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
1326 struct net *net = xp_net(pol);
1328 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1329 pol = __xfrm_policy_unlink(pol, dir);
1330 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1332 xfrm_policy_kill(pol);
1337 EXPORT_SYMBOL(xfrm_policy_delete);
1339 int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
1341 struct net *net = xp_net(pol);
1342 struct xfrm_policy *old_pol;
1344 #ifdef CONFIG_XFRM_SUB_POLICY
1345 if (pol && pol->type != XFRM_POLICY_TYPE_MAIN)
1349 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1350 old_pol = rcu_dereference_protected(sk->sk_policy[dir],
1351 lockdep_is_held(&net->xfrm.xfrm_policy_lock));
1353 pol->curlft.add_time = get_seconds();
1354 pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir, 0);
1355 xfrm_sk_policy_link(pol, dir);
1357 rcu_assign_pointer(sk->sk_policy[dir], pol);
1360 xfrm_policy_requeue(old_pol, pol);
1362 /* Unlinking succeeds always. This is the only function
1363 * allowed to delete or replace socket policy.
1365 xfrm_sk_policy_unlink(old_pol, dir);
1367 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1370 xfrm_policy_kill(old_pol);
1375 static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir)
1377 struct xfrm_policy *newp = xfrm_policy_alloc(xp_net(old), GFP_ATOMIC);
1378 struct net *net = xp_net(old);
1381 newp->selector = old->selector;
1382 if (security_xfrm_policy_clone(old->security,
1385 return NULL; /* ENOMEM */
1387 newp->lft = old->lft;
1388 newp->curlft = old->curlft;
1389 newp->mark = old->mark;
1390 newp->action = old->action;
1391 newp->flags = old->flags;
1392 newp->xfrm_nr = old->xfrm_nr;
1393 newp->index = old->index;
1394 newp->type = old->type;
1395 memcpy(newp->xfrm_vec, old->xfrm_vec,
1396 newp->xfrm_nr*sizeof(struct xfrm_tmpl));
1397 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1398 xfrm_sk_policy_link(newp, dir);
1399 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1405 int __xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk)
1407 const struct xfrm_policy *p;
1408 struct xfrm_policy *np;
1412 for (i = 0; i < 2; i++) {
1413 p = rcu_dereference(osk->sk_policy[i]);
1415 np = clone_policy(p, i);
1416 if (unlikely(!np)) {
1420 rcu_assign_pointer(sk->sk_policy[i], np);
1428 xfrm_get_saddr(struct net *net, int oif, xfrm_address_t *local,
1429 xfrm_address_t *remote, unsigned short family)
1432 const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1434 if (unlikely(afinfo == NULL))
1436 err = afinfo->get_saddr(net, oif, local, remote);
1441 /* Resolve list of templates for the flow, given policy. */
1444 xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl,
1445 struct xfrm_state **xfrm, unsigned short family)
1447 struct net *net = xp_net(policy);
1450 xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family);
1451 xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family);
1454 for (nx = 0, i = 0; i < policy->xfrm_nr; i++) {
1455 struct xfrm_state *x;
1456 xfrm_address_t *remote = daddr;
1457 xfrm_address_t *local = saddr;
1458 struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i];
1460 if (tmpl->mode == XFRM_MODE_TUNNEL ||
1461 tmpl->mode == XFRM_MODE_BEET) {
1462 remote = &tmpl->id.daddr;
1463 local = &tmpl->saddr;
1464 if (xfrm_addr_any(local, tmpl->encap_family)) {
1465 error = xfrm_get_saddr(net, fl->flowi_oif,
1467 tmpl->encap_family);
1474 x = xfrm_state_find(remote, local, fl, tmpl, policy, &error, family);
1476 if (x && x->km.state == XFRM_STATE_VALID) {
1483 error = (x->km.state == XFRM_STATE_ERROR ?
1486 } else if (error == -ESRCH) {
1490 if (!tmpl->optional)
1496 for (nx--; nx >= 0; nx--)
1497 xfrm_state_put(xfrm[nx]);
1502 xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, const struct flowi *fl,
1503 struct xfrm_state **xfrm, unsigned short family)
1505 struct xfrm_state *tp[XFRM_MAX_DEPTH];
1506 struct xfrm_state **tpp = (npols > 1) ? tp : xfrm;
1512 for (i = 0; i < npols; i++) {
1513 if (cnx + pols[i]->xfrm_nr >= XFRM_MAX_DEPTH) {
1518 ret = xfrm_tmpl_resolve_one(pols[i], fl, &tpp[cnx], family);
1526 /* found states are sorted for outbound processing */
1528 xfrm_state_sort(xfrm, tpp, cnx, family);
1533 for (cnx--; cnx >= 0; cnx--)
1534 xfrm_state_put(tpp[cnx]);
1539 static int xfrm_get_tos(const struct flowi *fl, int family)
1541 const struct xfrm_policy_afinfo *afinfo;
1544 afinfo = xfrm_policy_get_afinfo(family);
1545 tos = afinfo ? afinfo->get_tos(fl) : 0;
1552 static struct flow_cache_object *xfrm_bundle_flo_get(struct flow_cache_object *flo)
1554 struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo);
1555 struct dst_entry *dst = &xdst->u.dst;
1557 if (xdst->route == NULL) {
1558 /* Dummy bundle - if it has xfrms we were not
1559 * able to build bundle as template resolution failed.
1560 * It means we need to try again resolving. */
1561 if (xdst->num_xfrms > 0)
1563 } else if (dst->flags & DST_XFRM_QUEUE) {
1567 if (stale_bundle(dst))
1575 static int xfrm_bundle_flo_check(struct flow_cache_object *flo)
1577 struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo);
1578 struct dst_entry *dst = &xdst->u.dst;
1582 if (stale_bundle(dst))
1588 static void xfrm_bundle_flo_delete(struct flow_cache_object *flo)
1590 struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo);
1591 struct dst_entry *dst = &xdst->u.dst;
1596 static const struct flow_cache_ops xfrm_bundle_fc_ops = {
1597 .get = xfrm_bundle_flo_get,
1598 .check = xfrm_bundle_flo_check,
1599 .delete = xfrm_bundle_flo_delete,
1602 static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
1604 const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1605 struct dst_ops *dst_ops;
1606 struct xfrm_dst *xdst;
1609 return ERR_PTR(-EINVAL);
1613 dst_ops = &net->xfrm.xfrm4_dst_ops;
1615 #if IS_ENABLED(CONFIG_IPV6)
1617 dst_ops = &net->xfrm.xfrm6_dst_ops;
1623 xdst = dst_alloc(dst_ops, NULL, 0, DST_OBSOLETE_NONE, 0);
1626 struct dst_entry *dst = &xdst->u.dst;
1628 memset(dst + 1, 0, sizeof(*xdst) - sizeof(*dst));
1629 xdst->flo.ops = &xfrm_bundle_fc_ops;
1631 xdst = ERR_PTR(-ENOBUFS);
1638 static inline int xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst,
1641 const struct xfrm_policy_afinfo *afinfo =
1642 xfrm_policy_get_afinfo(dst->ops->family);
1648 err = afinfo->init_path(path, dst, nfheader_len);
1655 static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
1656 const struct flowi *fl)
1658 const struct xfrm_policy_afinfo *afinfo =
1659 xfrm_policy_get_afinfo(xdst->u.dst.ops->family);
1665 err = afinfo->fill_dst(xdst, dev, fl);
1673 /* Allocate chain of dst_entry's, attach known xfrm's, calculate
1674 * all the metrics... Shortly, bundle a bundle.
1677 static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
1678 struct xfrm_state **xfrm, int nx,
1679 const struct flowi *fl,
1680 struct dst_entry *dst)
1682 struct net *net = xp_net(policy);
1683 unsigned long now = jiffies;
1684 struct net_device *dev;
1685 struct xfrm_mode *inner_mode;
1686 struct dst_entry *dst_prev = NULL;
1687 struct dst_entry *dst0 = NULL;
1691 int nfheader_len = 0;
1692 int trailer_len = 0;
1694 int family = policy->selector.family;
1695 xfrm_address_t saddr, daddr;
1697 xfrm_flowi_addr_get(fl, &saddr, &daddr, family);
1699 tos = xfrm_get_tos(fl, family);
1703 for (; i < nx; i++) {
1704 struct xfrm_dst *xdst = xfrm_alloc_dst(net, family);
1705 struct dst_entry *dst1 = &xdst->u.dst;
1707 err = PTR_ERR(xdst);
1713 if (xfrm[i]->sel.family == AF_UNSPEC) {
1714 inner_mode = xfrm_ip2inner_mode(xfrm[i],
1715 xfrm_af2proto(family));
1717 err = -EAFNOSUPPORT;
1722 inner_mode = xfrm[i]->inner_mode;
1727 dst_prev->child = dst_clone(dst1);
1728 dst1->flags |= DST_NOHASH;
1732 dst_copy_metrics(dst1, dst);
1734 if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
1735 family = xfrm[i]->props.family;
1736 dst = xfrm_dst_lookup(xfrm[i], tos, fl->flowi_oif,
1737 &saddr, &daddr, family);
1744 dst1->xfrm = xfrm[i];
1745 xdst->xfrm_genid = xfrm[i]->genid;
1747 dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
1748 dst1->flags |= DST_HOST;
1749 dst1->lastuse = now;
1751 dst1->input = dst_discard;
1752 dst1->output = inner_mode->afinfo->output;
1754 dst1->next = dst_prev;
1757 header_len += xfrm[i]->props.header_len;
1758 if (xfrm[i]->type->flags & XFRM_TYPE_NON_FRAGMENT)
1759 nfheader_len += xfrm[i]->props.header_len;
1760 trailer_len += xfrm[i]->props.trailer_len;
1763 dst_prev->child = dst;
1771 xfrm_init_path((struct xfrm_dst *)dst0, dst, nfheader_len);
1772 xfrm_init_pmtu(dst_prev);
1774 for (dst_prev = dst0; dst_prev != dst; dst_prev = dst_prev->child) {
1775 struct xfrm_dst *xdst = (struct xfrm_dst *)dst_prev;
1777 err = xfrm_fill_dst(xdst, dev, fl);
1781 dst_prev->header_len = header_len;
1782 dst_prev->trailer_len = trailer_len;
1783 header_len -= xdst->u.dst.xfrm->props.header_len;
1784 trailer_len -= xdst->u.dst.xfrm->props.trailer_len;
1792 xfrm_state_put(xfrm[i]);
1796 dst0 = ERR_PTR(err);
1800 #ifdef CONFIG_XFRM_SUB_POLICY
1801 static int xfrm_dst_alloc_copy(void **target, const void *src, int size)
1804 *target = kmalloc(size, GFP_ATOMIC);
1809 memcpy(*target, src, size);
1814 static int xfrm_dst_update_parent(struct dst_entry *dst,
1815 const struct xfrm_selector *sel)
1817 #ifdef CONFIG_XFRM_SUB_POLICY
1818 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
1819 return xfrm_dst_alloc_copy((void **)&(xdst->partner),
1826 static int xfrm_dst_update_origin(struct dst_entry *dst,
1827 const struct flowi *fl)
1829 #ifdef CONFIG_XFRM_SUB_POLICY
1830 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
1831 return xfrm_dst_alloc_copy((void **)&(xdst->origin), fl, sizeof(*fl));
1837 static int xfrm_expand_policies(const struct flowi *fl, u16 family,
1838 struct xfrm_policy **pols,
1839 int *num_pols, int *num_xfrms)
1843 if (*num_pols == 0 || !pols[0]) {
1848 if (IS_ERR(pols[0]))
1849 return PTR_ERR(pols[0]);
1851 *num_xfrms = pols[0]->xfrm_nr;
1853 #ifdef CONFIG_XFRM_SUB_POLICY
1854 if (pols[0] && pols[0]->action == XFRM_POLICY_ALLOW &&
1855 pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
1856 pols[1] = xfrm_policy_lookup_bytype(xp_net(pols[0]),
1857 XFRM_POLICY_TYPE_MAIN,
1861 if (IS_ERR(pols[1])) {
1862 xfrm_pols_put(pols, *num_pols);
1863 return PTR_ERR(pols[1]);
1866 (*num_xfrms) += pols[1]->xfrm_nr;
1870 for (i = 0; i < *num_pols; i++) {
1871 if (pols[i]->action != XFRM_POLICY_ALLOW) {
1881 static struct xfrm_dst *
1882 xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
1883 const struct flowi *fl, u16 family,
1884 struct dst_entry *dst_orig)
1886 struct net *net = xp_net(pols[0]);
1887 struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
1888 struct dst_entry *dst;
1889 struct xfrm_dst *xdst;
1892 /* Try to instantiate a bundle */
1893 err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family);
1895 if (err != 0 && err != -EAGAIN)
1896 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
1897 return ERR_PTR(err);
1900 dst = xfrm_bundle_create(pols[0], xfrm, err, fl, dst_orig);
1902 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR);
1903 return ERR_CAST(dst);
1906 xdst = (struct xfrm_dst *)dst;
1907 xdst->num_xfrms = err;
1909 err = xfrm_dst_update_parent(dst, &pols[1]->selector);
1911 err = xfrm_dst_update_origin(dst, fl);
1912 if (unlikely(err)) {
1914 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR);
1915 return ERR_PTR(err);
1918 xdst->num_pols = num_pols;
1919 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
1920 xdst->policy_genid = atomic_read(&pols[0]->genid);
1925 static void xfrm_policy_queue_process(unsigned long arg)
1927 struct sk_buff *skb;
1929 struct dst_entry *dst;
1930 struct xfrm_policy *pol = (struct xfrm_policy *)arg;
1931 struct net *net = xp_net(pol);
1932 struct xfrm_policy_queue *pq = &pol->polq;
1934 struct sk_buff_head list;
1936 spin_lock(&pq->hold_queue.lock);
1937 skb = skb_peek(&pq->hold_queue);
1939 spin_unlock(&pq->hold_queue.lock);
1944 xfrm_decode_session(skb, &fl, dst->ops->family);
1945 spin_unlock(&pq->hold_queue.lock);
1947 dst_hold(dst->path);
1948 dst = xfrm_lookup(net, dst->path, &fl, sk, 0);
1952 if (dst->flags & DST_XFRM_QUEUE) {
1955 if (pq->timeout >= XFRM_QUEUE_TMO_MAX)
1958 pq->timeout = pq->timeout << 1;
1959 if (!mod_timer(&pq->hold_timer, jiffies + pq->timeout))
1966 __skb_queue_head_init(&list);
1968 spin_lock(&pq->hold_queue.lock);
1970 skb_queue_splice_init(&pq->hold_queue, &list);
1971 spin_unlock(&pq->hold_queue.lock);
1973 while (!skb_queue_empty(&list)) {
1974 skb = __skb_dequeue(&list);
1976 xfrm_decode_session(skb, &fl, skb_dst(skb)->ops->family);
1977 dst_hold(skb_dst(skb)->path);
1978 dst = xfrm_lookup(net, skb_dst(skb)->path, &fl, skb->sk, 0);
1986 skb_dst_set(skb, dst);
1988 dst_output(net, skb->sk, skb);
1997 skb_queue_purge(&pq->hold_queue);
2001 static int xdst_queue_output(struct net *net, struct sock *sk, struct sk_buff *skb)
2003 unsigned long sched_next;
2004 struct dst_entry *dst = skb_dst(skb);
2005 struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
2006 struct xfrm_policy *pol = xdst->pols[0];
2007 struct xfrm_policy_queue *pq = &pol->polq;
2009 if (unlikely(skb_fclone_busy(sk, skb))) {
2014 if (pq->hold_queue.qlen > XFRM_MAX_QUEUE_LEN) {
2021 spin_lock_bh(&pq->hold_queue.lock);
2024 pq->timeout = XFRM_QUEUE_TMO_MIN;
2026 sched_next = jiffies + pq->timeout;
2028 if (del_timer(&pq->hold_timer)) {
2029 if (time_before(pq->hold_timer.expires, sched_next))
2030 sched_next = pq->hold_timer.expires;
2034 __skb_queue_tail(&pq->hold_queue, skb);
2035 if (!mod_timer(&pq->hold_timer, sched_next))
2038 spin_unlock_bh(&pq->hold_queue.lock);
2043 static struct xfrm_dst *xfrm_create_dummy_bundle(struct net *net,
2044 struct xfrm_flo *xflo,
2045 const struct flowi *fl,
2050 struct net_device *dev;
2051 struct dst_entry *dst;
2052 struct dst_entry *dst1;
2053 struct xfrm_dst *xdst;
2055 xdst = xfrm_alloc_dst(net, family);
2059 if (!(xflo->flags & XFRM_LOOKUP_QUEUE) ||
2060 net->xfrm.sysctl_larval_drop ||
2064 dst = xflo->dst_orig;
2065 dst1 = &xdst->u.dst;
2069 dst_copy_metrics(dst1, dst);
2071 dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
2072 dst1->flags |= DST_HOST | DST_XFRM_QUEUE;
2073 dst1->lastuse = jiffies;
2075 dst1->input = dst_discard;
2076 dst1->output = xdst_queue_output;
2082 xfrm_init_path((struct xfrm_dst *)dst1, dst, 0);
2089 err = xfrm_fill_dst(xdst, dev, fl);
2098 xdst = ERR_PTR(err);
2102 static struct flow_cache_object *
2103 xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir,
2104 struct flow_cache_object *oldflo, void *ctx)
2106 struct xfrm_flo *xflo = (struct xfrm_flo *)ctx;
2107 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
2108 struct xfrm_dst *xdst, *new_xdst;
2109 int num_pols = 0, num_xfrms = 0, i, err, pol_dead;
2111 /* Check if the policies from old bundle are usable */
2114 xdst = container_of(oldflo, struct xfrm_dst, flo);
2115 num_pols = xdst->num_pols;
2116 num_xfrms = xdst->num_xfrms;
2118 for (i = 0; i < num_pols; i++) {
2119 pols[i] = xdst->pols[i];
2120 pol_dead |= pols[i]->walk.dead;
2123 dst_free(&xdst->u.dst);
2131 /* Resolve policies to use if we couldn't get them from
2132 * previous cache entry */
2135 pols[0] = __xfrm_policy_lookup(net, fl, family,
2136 flow_to_policy_dir(dir));
2137 err = xfrm_expand_policies(fl, family, pols,
2138 &num_pols, &num_xfrms);
2144 goto make_dummy_bundle;
2147 new_xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family,
2149 if (IS_ERR(new_xdst)) {
2150 err = PTR_ERR(new_xdst);
2154 goto make_dummy_bundle;
2155 dst_hold(&xdst->u.dst);
2157 } else if (new_xdst == NULL) {
2160 goto make_dummy_bundle;
2161 xdst->num_xfrms = 0;
2162 dst_hold(&xdst->u.dst);
2166 /* Kill the previous bundle */
2168 /* The policies were stolen for newly generated bundle */
2170 dst_free(&xdst->u.dst);
2173 /* Flow cache does not have reference, it dst_free()'s,
2174 * but we do need to return one reference for original caller */
2175 dst_hold(&new_xdst->u.dst);
2176 return &new_xdst->flo;
2179 /* We found policies, but there's no bundles to instantiate:
2180 * either because the policy blocks, has no transformations or
2181 * we could not build template (no xfrm_states).*/
2182 xdst = xfrm_create_dummy_bundle(net, xflo, fl, num_xfrms, family);
2184 xfrm_pols_put(pols, num_pols);
2185 return ERR_CAST(xdst);
2187 xdst->num_pols = num_pols;
2188 xdst->num_xfrms = num_xfrms;
2189 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
2191 dst_hold(&xdst->u.dst);
2195 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
2198 dst_free(&xdst->u.dst);
2200 xfrm_pols_put(pols, num_pols);
2201 return ERR_PTR(err);
2204 static struct dst_entry *make_blackhole(struct net *net, u16 family,
2205 struct dst_entry *dst_orig)
2207 const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
2208 struct dst_entry *ret;
2211 dst_release(dst_orig);
2212 return ERR_PTR(-EINVAL);
2214 ret = afinfo->blackhole_route(net, dst_orig);
2221 /* Main function: finds/creates a bundle for given flow.
2223 * At the moment we eat a raw IP route. Mostly to speed up lookups
2224 * on interfaces with disabled IPsec.
2226 struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
2227 const struct flowi *fl,
2228 const struct sock *sk, int flags)
2230 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
2231 struct flow_cache_object *flo;
2232 struct xfrm_dst *xdst;
2233 struct dst_entry *dst, *route;
2234 u16 family = dst_orig->ops->family;
2235 u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT);
2236 int i, err, num_pols, num_xfrms = 0, drop_pols = 0;
2242 sk = sk_const_to_full_sk(sk);
2243 if (sk && sk->sk_policy[XFRM_POLICY_OUT]) {
2245 pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl, family);
2246 err = xfrm_expand_policies(fl, family, pols,
2247 &num_pols, &num_xfrms);
2252 if (num_xfrms <= 0) {
2253 drop_pols = num_pols;
2257 xdst = xfrm_resolve_and_create_bundle(
2261 xfrm_pols_put(pols, num_pols);
2262 err = PTR_ERR(xdst);
2264 } else if (xdst == NULL) {
2266 drop_pols = num_pols;
2270 dst_hold(&xdst->u.dst);
2271 xdst->u.dst.flags |= DST_NOCACHE;
2272 route = xdst->route;
2277 struct xfrm_flo xflo;
2279 xflo.dst_orig = dst_orig;
2282 /* To accelerate a bit... */
2283 if ((dst_orig->flags & DST_NOXFRM) ||
2284 !net->xfrm.policy_count[XFRM_POLICY_OUT])
2287 flo = flow_cache_lookup(net, fl, family, dir,
2288 xfrm_bundle_lookup, &xflo);
2295 xdst = container_of(flo, struct xfrm_dst, flo);
2297 num_pols = xdst->num_pols;
2298 num_xfrms = xdst->num_xfrms;
2299 memcpy(pols, xdst->pols, sizeof(struct xfrm_policy *) * num_pols);
2300 route = xdst->route;
2304 if (route == NULL && num_xfrms > 0) {
2305 /* The only case when xfrm_bundle_lookup() returns a
2306 * bundle with null route, is when the template could
2307 * not be resolved. It means policies are there, but
2308 * bundle could not be created, since we don't yet
2309 * have the xfrm_state's. We need to wait for KM to
2310 * negotiate new SA's or bail out with error.*/
2311 if (net->xfrm.sysctl_larval_drop) {
2312 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
2319 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
2327 if ((flags & XFRM_LOOKUP_ICMP) &&
2328 !(pols[0]->flags & XFRM_POLICY_ICMP)) {
2333 for (i = 0; i < num_pols; i++)
2334 pols[i]->curlft.use_time = get_seconds();
2336 if (num_xfrms < 0) {
2337 /* Prohibit the flow */
2338 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK);
2341 } else if (num_xfrms > 0) {
2342 /* Flow transformed */
2343 dst_release(dst_orig);
2345 /* Flow passes untransformed */
2350 xfrm_pols_put(pols, drop_pols);
2351 if (dst && dst->xfrm &&
2352 dst->xfrm->props.mode == XFRM_MODE_TUNNEL)
2353 dst->flags |= DST_XFRM_TUNNEL;
2357 if (!(flags & XFRM_LOOKUP_ICMP)) {
2365 if (!(flags & XFRM_LOOKUP_KEEP_DST_REF))
2366 dst_release(dst_orig);
2367 xfrm_pols_put(pols, drop_pols);
2368 return ERR_PTR(err);
2370 EXPORT_SYMBOL(xfrm_lookup);
2372 /* Callers of xfrm_lookup_route() must ensure a call to dst_output().
2373 * Otherwise we may send out blackholed packets.
2375 struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
2376 const struct flowi *fl,
2377 const struct sock *sk, int flags)
2379 struct dst_entry *dst = xfrm_lookup(net, dst_orig, fl, sk,
2380 flags | XFRM_LOOKUP_QUEUE |
2381 XFRM_LOOKUP_KEEP_DST_REF);
2383 if (IS_ERR(dst) && PTR_ERR(dst) == -EREMOTE)
2384 return make_blackhole(net, dst_orig->ops->family, dst_orig);
2388 EXPORT_SYMBOL(xfrm_lookup_route);
2391 xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl)
2393 struct xfrm_state *x;
2395 if (!skb->sp || idx < 0 || idx >= skb->sp->len)
2397 x = skb->sp->xvec[idx];
2398 if (!x->type->reject)
2400 return x->type->reject(x, skb, fl);
2403 /* When skb is transformed back to its "native" form, we have to
2404 * check policy restrictions. At the moment we make this in maximally
2405 * stupid way. Shame on me. :-) Of course, connected sockets must
2406 * have policy cached at them.
2410 xfrm_state_ok(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x,
2411 unsigned short family)
2413 if (xfrm_state_kern(x))
2414 return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, tmpl->encap_family);
2415 return x->id.proto == tmpl->id.proto &&
2416 (x->id.spi == tmpl->id.spi || !tmpl->id.spi) &&
2417 (x->props.reqid == tmpl->reqid || !tmpl->reqid) &&
2418 x->props.mode == tmpl->mode &&
2419 (tmpl->allalgs || (tmpl->aalgos & (1<<x->props.aalgo)) ||
2420 !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) &&
2421 !(x->props.mode != XFRM_MODE_TRANSPORT &&
2422 xfrm_state_addr_cmp(tmpl, x, family));
2426 * 0 or more than 0 is returned when validation is succeeded (either bypass
2427 * because of optional transport mode, or next index of the mathced secpath
2428 * state with the template.
2429 * -1 is returned when no matching template is found.
2430 * Otherwise "-2 - errored_index" is returned.
2433 xfrm_policy_ok(const struct xfrm_tmpl *tmpl, const struct sec_path *sp, int start,
2434 unsigned short family)
2438 if (tmpl->optional) {
2439 if (tmpl->mode == XFRM_MODE_TRANSPORT)
2443 for (; idx < sp->len; idx++) {
2444 if (xfrm_state_ok(tmpl, sp->xvec[idx], family))
2446 if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) {
2455 int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
2456 unsigned int family, int reverse)
2458 const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
2461 if (unlikely(afinfo == NULL))
2462 return -EAFNOSUPPORT;
2464 afinfo->decode_session(skb, fl, reverse);
2465 err = security_xfrm_decode_session(skb, &fl->flowi_secid);
2469 EXPORT_SYMBOL(__xfrm_decode_session);
2471 static inline int secpath_has_nontransport(const struct sec_path *sp, int k, int *idxp)
2473 for (; k < sp->len; k++) {
2474 if (sp->xvec[k]->props.mode != XFRM_MODE_TRANSPORT) {
2483 int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
2484 unsigned short family)
2486 struct net *net = dev_net(skb->dev);
2487 struct xfrm_policy *pol;
2488 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
2497 reverse = dir & ~XFRM_POLICY_MASK;
2498 dir &= XFRM_POLICY_MASK;
2499 fl_dir = policy_to_flow_dir(dir);
2501 if (__xfrm_decode_session(skb, &fl, family, reverse) < 0) {
2502 XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
2506 nf_nat_decode_session(skb, &fl, family);
2508 /* First, check used SA against their selectors. */
2512 for (i = skb->sp->len-1; i >= 0; i--) {
2513 struct xfrm_state *x = skb->sp->xvec[i];
2514 if (!xfrm_selector_match(&x->sel, &fl, family)) {
2515 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH);
2522 sk = sk_to_full_sk(sk);
2523 if (sk && sk->sk_policy[dir]) {
2524 pol = xfrm_sk_policy_lookup(sk, dir, &fl, family);
2526 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
2532 struct flow_cache_object *flo;
2534 flo = flow_cache_lookup(net, &fl, family, fl_dir,
2535 xfrm_policy_lookup, NULL);
2536 if (IS_ERR_OR_NULL(flo))
2537 pol = ERR_CAST(flo);
2539 pol = container_of(flo, struct xfrm_policy, flo);
2543 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
2548 if (skb->sp && secpath_has_nontransport(skb->sp, 0, &xerr_idx)) {
2549 xfrm_secpath_reject(xerr_idx, skb, &fl);
2550 XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS);
2556 pol->curlft.use_time = get_seconds();
2560 #ifdef CONFIG_XFRM_SUB_POLICY
2561 if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
2562 pols[1] = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN,
2566 if (IS_ERR(pols[1])) {
2567 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
2570 pols[1]->curlft.use_time = get_seconds();
2576 if (pol->action == XFRM_POLICY_ALLOW) {
2577 struct sec_path *sp;
2578 static struct sec_path dummy;
2579 struct xfrm_tmpl *tp[XFRM_MAX_DEPTH];
2580 struct xfrm_tmpl *stp[XFRM_MAX_DEPTH];
2581 struct xfrm_tmpl **tpp = tp;
2585 if ((sp = skb->sp) == NULL)
2588 for (pi = 0; pi < npols; pi++) {
2589 if (pols[pi] != pol &&
2590 pols[pi]->action != XFRM_POLICY_ALLOW) {
2591 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
2594 if (ti + pols[pi]->xfrm_nr >= XFRM_MAX_DEPTH) {
2595 XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
2598 for (i = 0; i < pols[pi]->xfrm_nr; i++)
2599 tpp[ti++] = &pols[pi]->xfrm_vec[i];
2603 xfrm_tmpl_sort(stp, tpp, xfrm_nr, family, net);
2607 /* For each tunnel xfrm, find the first matching tmpl.
2608 * For each tmpl before that, find corresponding xfrm.
2609 * Order is _important_. Later we will implement
2610 * some barriers, but at the moment barriers
2611 * are implied between each two transformations.
2613 for (i = xfrm_nr-1, k = 0; i >= 0; i--) {
2614 k = xfrm_policy_ok(tpp[i], sp, k, family);
2617 /* "-2 - errored_index" returned */
2619 XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
2624 if (secpath_has_nontransport(sp, k, &xerr_idx)) {
2625 XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
2629 xfrm_pols_put(pols, npols);
2632 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
2635 xfrm_secpath_reject(xerr_idx, skb, &fl);
2637 xfrm_pols_put(pols, npols);
2640 EXPORT_SYMBOL(__xfrm_policy_check);
2642 int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
2644 struct net *net = dev_net(skb->dev);
2646 struct dst_entry *dst;
2649 if (xfrm_decode_session(skb, &fl, family) < 0) {
2650 XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
2656 dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, XFRM_LOOKUP_QUEUE);
2661 skb_dst_set(skb, dst);
2664 EXPORT_SYMBOL(__xfrm_route_forward);
2666 /* Optimize later using cookies and generation ids. */
2668 static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
2670 /* Code (such as __xfrm4_bundle_create()) sets dst->obsolete
2671 * to DST_OBSOLETE_FORCE_CHK to force all XFRM destinations to
2672 * get validated by dst_ops->check on every use. We do this
2673 * because when a normal route referenced by an XFRM dst is
2674 * obsoleted we do not go looking around for all parent
2675 * referencing XFRM dsts so that we can invalidate them. It
2676 * is just too much work. Instead we make the checks here on
2677 * every use. For example:
2679 * XFRM dst A --> IPv4 dst X
2681 * X is the "xdst->route" of A (X is also the "dst->path" of A
2682 * in this example). If X is marked obsolete, "A" will not
2683 * notice. That's what we are validating here via the
2684 * stale_bundle() check.
2686 * When a policy's bundle is pruned, we dst_free() the XFRM
2687 * dst which causes it's ->obsolete field to be set to
2688 * DST_OBSOLETE_DEAD. If an XFRM dst has been pruned like
2689 * this, we want to force a new route lookup.
2691 if (dst->obsolete < 0 && !stale_bundle(dst))
2697 static int stale_bundle(struct dst_entry *dst)
2699 return !xfrm_bundle_ok((struct xfrm_dst *)dst);
2702 void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
2704 while ((dst = dst->child) && dst->xfrm && dst->dev == dev) {
2705 dst->dev = dev_net(dev)->loopback_dev;
2710 EXPORT_SYMBOL(xfrm_dst_ifdown);
2712 static void xfrm_link_failure(struct sk_buff *skb)
2714 /* Impossible. Such dst must be popped before reaches point of failure. */
2717 static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
2720 if (dst->obsolete) {
2728 void xfrm_garbage_collect(struct net *net)
2730 flow_cache_flush(net);
2732 EXPORT_SYMBOL(xfrm_garbage_collect);
2734 void xfrm_garbage_collect_deferred(struct net *net)
2736 flow_cache_flush_deferred(net);
2738 EXPORT_SYMBOL(xfrm_garbage_collect_deferred);
2740 static void xfrm_init_pmtu(struct dst_entry *dst)
2743 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
2744 u32 pmtu, route_mtu_cached;
2746 pmtu = dst_mtu(dst->child);
2747 xdst->child_mtu_cached = pmtu;
2749 pmtu = xfrm_state_mtu(dst->xfrm, pmtu);
2751 route_mtu_cached = dst_mtu(xdst->route);
2752 xdst->route_mtu_cached = route_mtu_cached;
2754 if (pmtu > route_mtu_cached)
2755 pmtu = route_mtu_cached;
2757 dst_metric_set(dst, RTAX_MTU, pmtu);
2758 } while ((dst = dst->next));
2761 /* Check that the bundle accepts the flow and its components are
2765 static int xfrm_bundle_ok(struct xfrm_dst *first)
2767 struct dst_entry *dst = &first->u.dst;
2768 struct xfrm_dst *last;
2771 if (!dst_check(dst->path, ((struct xfrm_dst *)dst)->path_cookie) ||
2772 (dst->dev && !netif_running(dst->dev)))
2775 if (dst->flags & DST_XFRM_QUEUE)
2781 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
2783 if (dst->xfrm->km.state != XFRM_STATE_VALID)
2785 if (xdst->xfrm_genid != dst->xfrm->genid)
2787 if (xdst->num_pols > 0 &&
2788 xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
2791 mtu = dst_mtu(dst->child);
2792 if (xdst->child_mtu_cached != mtu) {
2794 xdst->child_mtu_cached = mtu;
2797 if (!dst_check(xdst->route, xdst->route_cookie))
2799 mtu = dst_mtu(xdst->route);
2800 if (xdst->route_mtu_cached != mtu) {
2802 xdst->route_mtu_cached = mtu;
2806 } while (dst->xfrm);
2811 mtu = last->child_mtu_cached;
2815 mtu = xfrm_state_mtu(dst->xfrm, mtu);
2816 if (mtu > last->route_mtu_cached)
2817 mtu = last->route_mtu_cached;
2818 dst_metric_set(dst, RTAX_MTU, mtu);
2823 last = (struct xfrm_dst *)last->u.dst.next;
2824 last->child_mtu_cached = mtu;
2830 static unsigned int xfrm_default_advmss(const struct dst_entry *dst)
2832 return dst_metric_advmss(dst->path);
2835 static unsigned int xfrm_mtu(const struct dst_entry *dst)
2837 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
2839 return mtu ? : dst_mtu(dst->path);
2842 static const void *xfrm_get_dst_nexthop(const struct dst_entry *dst,
2845 const struct dst_entry *path = dst->path;
2847 for (; dst != path; dst = dst->child) {
2848 const struct xfrm_state *xfrm = dst->xfrm;
2850 if (xfrm->props.mode == XFRM_MODE_TRANSPORT)
2852 if (xfrm->type->flags & XFRM_TYPE_REMOTE_COADDR)
2853 daddr = xfrm->coaddr;
2854 else if (!(xfrm->type->flags & XFRM_TYPE_LOCAL_COADDR))
2855 daddr = &xfrm->id.daddr;
2860 static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst,
2861 struct sk_buff *skb,
2864 const struct dst_entry *path = dst->path;
2867 daddr = xfrm_get_dst_nexthop(dst, daddr);
2868 return path->ops->neigh_lookup(path, skb, daddr);
2871 static void xfrm_confirm_neigh(const struct dst_entry *dst, const void *daddr)
2873 const struct dst_entry *path = dst->path;
2875 daddr = xfrm_get_dst_nexthop(dst, daddr);
2876 path->ops->confirm_neigh(path, daddr);
2879 int xfrm_policy_register_afinfo(const struct xfrm_policy_afinfo *afinfo, int family)
2883 if (WARN_ON(family >= ARRAY_SIZE(xfrm_policy_afinfo)))
2884 return -EAFNOSUPPORT;
2886 spin_lock(&xfrm_policy_afinfo_lock);
2887 if (unlikely(xfrm_policy_afinfo[family] != NULL))
2890 struct dst_ops *dst_ops = afinfo->dst_ops;
2891 if (likely(dst_ops->kmem_cachep == NULL))
2892 dst_ops->kmem_cachep = xfrm_dst_cache;
2893 if (likely(dst_ops->check == NULL))
2894 dst_ops->check = xfrm_dst_check;
2895 if (likely(dst_ops->default_advmss == NULL))
2896 dst_ops->default_advmss = xfrm_default_advmss;
2897 if (likely(dst_ops->mtu == NULL))
2898 dst_ops->mtu = xfrm_mtu;
2899 if (likely(dst_ops->negative_advice == NULL))
2900 dst_ops->negative_advice = xfrm_negative_advice;
2901 if (likely(dst_ops->link_failure == NULL))
2902 dst_ops->link_failure = xfrm_link_failure;
2903 if (likely(dst_ops->neigh_lookup == NULL))
2904 dst_ops->neigh_lookup = xfrm_neigh_lookup;
2905 if (likely(!dst_ops->confirm_neigh))
2906 dst_ops->confirm_neigh = xfrm_confirm_neigh;
2907 rcu_assign_pointer(xfrm_policy_afinfo[family], afinfo);
2909 spin_unlock(&xfrm_policy_afinfo_lock);
2913 EXPORT_SYMBOL(xfrm_policy_register_afinfo);
2915 void xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo *afinfo)
2917 struct dst_ops *dst_ops = afinfo->dst_ops;
2920 for (i = 0; i < ARRAY_SIZE(xfrm_policy_afinfo); i++) {
2921 if (xfrm_policy_afinfo[i] != afinfo)
2923 RCU_INIT_POINTER(xfrm_policy_afinfo[i], NULL);
2929 dst_ops->kmem_cachep = NULL;
2930 dst_ops->check = NULL;
2931 dst_ops->negative_advice = NULL;
2932 dst_ops->link_failure = NULL;
2934 EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
2936 #ifdef CONFIG_XFRM_STATISTICS
2937 static int __net_init xfrm_statistics_init(struct net *net)
2940 net->mib.xfrm_statistics = alloc_percpu(struct linux_xfrm_mib);
2941 if (!net->mib.xfrm_statistics)
2943 rv = xfrm_proc_init(net);
2945 free_percpu(net->mib.xfrm_statistics);
2949 static void xfrm_statistics_fini(struct net *net)
2951 xfrm_proc_fini(net);
2952 free_percpu(net->mib.xfrm_statistics);
2955 static int __net_init xfrm_statistics_init(struct net *net)
2960 static void xfrm_statistics_fini(struct net *net)
2965 static int __net_init xfrm_policy_init(struct net *net)
2967 unsigned int hmask, sz;
2970 if (net_eq(net, &init_net))
2971 xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache",
2972 sizeof(struct xfrm_dst),
2973 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
2977 sz = (hmask+1) * sizeof(struct hlist_head);
2979 net->xfrm.policy_byidx = xfrm_hash_alloc(sz);
2980 if (!net->xfrm.policy_byidx)
2982 net->xfrm.policy_idx_hmask = hmask;
2984 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
2985 struct xfrm_policy_hash *htab;
2987 net->xfrm.policy_count[dir] = 0;
2988 net->xfrm.policy_count[XFRM_POLICY_MAX + dir] = 0;
2989 INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]);
2991 htab = &net->xfrm.policy_bydst[dir];
2992 htab->table = xfrm_hash_alloc(sz);
2995 htab->hmask = hmask;
3001 net->xfrm.policy_hthresh.lbits4 = 32;
3002 net->xfrm.policy_hthresh.rbits4 = 32;
3003 net->xfrm.policy_hthresh.lbits6 = 128;
3004 net->xfrm.policy_hthresh.rbits6 = 128;
3006 seqlock_init(&net->xfrm.policy_hthresh.lock);
3008 INIT_LIST_HEAD(&net->xfrm.policy_all);
3009 INIT_WORK(&net->xfrm.policy_hash_work, xfrm_hash_resize);
3010 INIT_WORK(&net->xfrm.policy_hthresh.work, xfrm_hash_rebuild);
3011 if (net_eq(net, &init_net))
3016 for (dir--; dir >= 0; dir--) {
3017 struct xfrm_policy_hash *htab;
3019 htab = &net->xfrm.policy_bydst[dir];
3020 xfrm_hash_free(htab->table, sz);
3022 xfrm_hash_free(net->xfrm.policy_byidx, sz);
3027 static void xfrm_policy_fini(struct net *net)
3032 flush_work(&net->xfrm.policy_hash_work);
3033 #ifdef CONFIG_XFRM_SUB_POLICY
3034 xfrm_policy_flush(net, XFRM_POLICY_TYPE_SUB, false);
3036 xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, false);
3038 WARN_ON(!list_empty(&net->xfrm.policy_all));
3040 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
3041 struct xfrm_policy_hash *htab;
3043 WARN_ON(!hlist_empty(&net->xfrm.policy_inexact[dir]));
3045 htab = &net->xfrm.policy_bydst[dir];
3046 sz = (htab->hmask + 1) * sizeof(struct hlist_head);
3047 WARN_ON(!hlist_empty(htab->table));
3048 xfrm_hash_free(htab->table, sz);
3051 sz = (net->xfrm.policy_idx_hmask + 1) * sizeof(struct hlist_head);
3052 WARN_ON(!hlist_empty(net->xfrm.policy_byidx));
3053 xfrm_hash_free(net->xfrm.policy_byidx, sz);
3056 static int __net_init xfrm_net_init(struct net *net)
3060 /* Initialize the per-net locks here */
3061 spin_lock_init(&net->xfrm.xfrm_state_lock);
3062 spin_lock_init(&net->xfrm.xfrm_policy_lock);
3063 mutex_init(&net->xfrm.xfrm_cfg_mutex);
3065 rv = xfrm_statistics_init(net);
3067 goto out_statistics;
3068 rv = xfrm_state_init(net);
3071 rv = xfrm_policy_init(net);
3074 rv = xfrm_sysctl_init(net);
3077 rv = flow_cache_init(net);
3084 xfrm_sysctl_fini(net);
3086 xfrm_policy_fini(net);
3088 xfrm_state_fini(net);
3090 xfrm_statistics_fini(net);
3095 static void __net_exit xfrm_net_exit(struct net *net)
3097 flow_cache_fini(net);
3098 xfrm_sysctl_fini(net);
3099 xfrm_policy_fini(net);
3100 xfrm_state_fini(net);
3101 xfrm_statistics_fini(net);
3104 static struct pernet_operations __net_initdata xfrm_net_ops = {
3105 .init = xfrm_net_init,
3106 .exit = xfrm_net_exit,
3109 void __init xfrm_init(void)
3111 flow_cache_hp_init();
3112 register_pernet_subsys(&xfrm_net_ops);
3113 seqcount_init(&xfrm_policy_hash_generation);
3117 #ifdef CONFIG_AUDITSYSCALL
3118 static void xfrm_audit_common_policyinfo(struct xfrm_policy *xp,
3119 struct audit_buffer *audit_buf)
3121 struct xfrm_sec_ctx *ctx = xp->security;
3122 struct xfrm_selector *sel = &xp->selector;
3125 audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
3126 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
3128 switch (sel->family) {
3130 audit_log_format(audit_buf, " src=%pI4", &sel->saddr.a4);
3131 if (sel->prefixlen_s != 32)
3132 audit_log_format(audit_buf, " src_prefixlen=%d",
3134 audit_log_format(audit_buf, " dst=%pI4", &sel->daddr.a4);
3135 if (sel->prefixlen_d != 32)
3136 audit_log_format(audit_buf, " dst_prefixlen=%d",
3140 audit_log_format(audit_buf, " src=%pI6", sel->saddr.a6);
3141 if (sel->prefixlen_s != 128)
3142 audit_log_format(audit_buf, " src_prefixlen=%d",
3144 audit_log_format(audit_buf, " dst=%pI6", sel->daddr.a6);
3145 if (sel->prefixlen_d != 128)
3146 audit_log_format(audit_buf, " dst_prefixlen=%d",
3152 void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, bool task_valid)
3154 struct audit_buffer *audit_buf;
3156 audit_buf = xfrm_audit_start("SPD-add");
3157 if (audit_buf == NULL)
3159 xfrm_audit_helper_usrinfo(task_valid, audit_buf);
3160 audit_log_format(audit_buf, " res=%u", result);
3161 xfrm_audit_common_policyinfo(xp, audit_buf);
3162 audit_log_end(audit_buf);
3164 EXPORT_SYMBOL_GPL(xfrm_audit_policy_add);
3166 void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
3169 struct audit_buffer *audit_buf;
3171 audit_buf = xfrm_audit_start("SPD-delete");
3172 if (audit_buf == NULL)
3174 xfrm_audit_helper_usrinfo(task_valid, audit_buf);
3175 audit_log_format(audit_buf, " res=%u", result);
3176 xfrm_audit_common_policyinfo(xp, audit_buf);
3177 audit_log_end(audit_buf);
3179 EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete);
3182 #ifdef CONFIG_XFRM_MIGRATE
3183 static bool xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp,
3184 const struct xfrm_selector *sel_tgt)
3186 if (sel_cmp->proto == IPSEC_ULPROTO_ANY) {
3187 if (sel_tgt->family == sel_cmp->family &&
3188 xfrm_addr_equal(&sel_tgt->daddr, &sel_cmp->daddr,
3190 xfrm_addr_equal(&sel_tgt->saddr, &sel_cmp->saddr,
3192 sel_tgt->prefixlen_d == sel_cmp->prefixlen_d &&
3193 sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) {
3197 if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) {
3204 static struct xfrm_policy *xfrm_migrate_policy_find(const struct xfrm_selector *sel,
3205 u8 dir, u8 type, struct net *net)
3207 struct xfrm_policy *pol, *ret = NULL;
3208 struct hlist_head *chain;
3211 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
3212 chain = policy_hash_direct(net, &sel->daddr, &sel->saddr, sel->family, dir);
3213 hlist_for_each_entry(pol, chain, bydst) {
3214 if (xfrm_migrate_selector_match(sel, &pol->selector) &&
3215 pol->type == type) {
3217 priority = ret->priority;
3221 chain = &net->xfrm.policy_inexact[dir];
3222 hlist_for_each_entry(pol, chain, bydst) {
3223 if ((pol->priority >= priority) && ret)
3226 if (xfrm_migrate_selector_match(sel, &pol->selector) &&
3227 pol->type == type) {
3235 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
3240 static int migrate_tmpl_match(const struct xfrm_migrate *m, const struct xfrm_tmpl *t)
3244 if (t->mode == m->mode && t->id.proto == m->proto &&
3245 (m->reqid == 0 || t->reqid == m->reqid)) {
3247 case XFRM_MODE_TUNNEL:
3248 case XFRM_MODE_BEET:
3249 if (xfrm_addr_equal(&t->id.daddr, &m->old_daddr,
3251 xfrm_addr_equal(&t->saddr, &m->old_saddr,
3256 case XFRM_MODE_TRANSPORT:
3257 /* in case of transport mode, template does not store
3258 any IP addresses, hence we just compare mode and
3269 /* update endpoint address(es) of template(s) */
3270 static int xfrm_policy_migrate(struct xfrm_policy *pol,
3271 struct xfrm_migrate *m, int num_migrate)
3273 struct xfrm_migrate *mp;
3276 write_lock_bh(&pol->lock);
3277 if (unlikely(pol->walk.dead)) {
3278 /* target policy has been deleted */
3279 write_unlock_bh(&pol->lock);
3283 for (i = 0; i < pol->xfrm_nr; i++) {
3284 for (j = 0, mp = m; j < num_migrate; j++, mp++) {
3285 if (!migrate_tmpl_match(mp, &pol->xfrm_vec[i]))
3288 if (pol->xfrm_vec[i].mode != XFRM_MODE_TUNNEL &&
3289 pol->xfrm_vec[i].mode != XFRM_MODE_BEET)
3291 /* update endpoints */
3292 memcpy(&pol->xfrm_vec[i].id.daddr, &mp->new_daddr,
3293 sizeof(pol->xfrm_vec[i].id.daddr));
3294 memcpy(&pol->xfrm_vec[i].saddr, &mp->new_saddr,
3295 sizeof(pol->xfrm_vec[i].saddr));
3296 pol->xfrm_vec[i].encap_family = mp->new_family;
3298 atomic_inc(&pol->genid);
3302 write_unlock_bh(&pol->lock);
3310 static int xfrm_migrate_check(const struct xfrm_migrate *m, int num_migrate)
3314 if (num_migrate < 1 || num_migrate > XFRM_MAX_DEPTH)
3317 for (i = 0; i < num_migrate; i++) {
3318 if (xfrm_addr_equal(&m[i].old_daddr, &m[i].new_daddr,
3320 xfrm_addr_equal(&m[i].old_saddr, &m[i].new_saddr,
3323 if (xfrm_addr_any(&m[i].new_daddr, m[i].new_family) ||
3324 xfrm_addr_any(&m[i].new_saddr, m[i].new_family))
3327 /* check if there is any duplicated entry */
3328 for (j = i + 1; j < num_migrate; j++) {
3329 if (!memcmp(&m[i].old_daddr, &m[j].old_daddr,
3330 sizeof(m[i].old_daddr)) &&
3331 !memcmp(&m[i].old_saddr, &m[j].old_saddr,
3332 sizeof(m[i].old_saddr)) &&
3333 m[i].proto == m[j].proto &&
3334 m[i].mode == m[j].mode &&
3335 m[i].reqid == m[j].reqid &&
3336 m[i].old_family == m[j].old_family)
3344 int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
3345 struct xfrm_migrate *m, int num_migrate,
3346 struct xfrm_kmaddress *k, struct net *net)
3348 int i, err, nx_cur = 0, nx_new = 0;
3349 struct xfrm_policy *pol = NULL;
3350 struct xfrm_state *x, *xc;
3351 struct xfrm_state *x_cur[XFRM_MAX_DEPTH];
3352 struct xfrm_state *x_new[XFRM_MAX_DEPTH];
3353 struct xfrm_migrate *mp;
3355 if ((err = xfrm_migrate_check(m, num_migrate)) < 0)
3358 /* Stage 1 - find policy */
3359 if ((pol = xfrm_migrate_policy_find(sel, dir, type, net)) == NULL) {
3364 /* Stage 2 - find and update state(s) */
3365 for (i = 0, mp = m; i < num_migrate; i++, mp++) {
3366 if ((x = xfrm_migrate_state_find(mp, net))) {
3369 if ((xc = xfrm_state_migrate(x, mp))) {
3379 /* Stage 3 - update policy */
3380 if ((err = xfrm_policy_migrate(pol, m, num_migrate)) < 0)
3383 /* Stage 4 - delete old state(s) */
3385 xfrm_states_put(x_cur, nx_cur);
3386 xfrm_states_delete(x_cur, nx_cur);
3389 /* Stage 5 - announce */
3390 km_migrate(sel, dir, type, m, num_migrate, k);
3402 xfrm_states_put(x_cur, nx_cur);
3404 xfrm_states_delete(x_new, nx_new);
3408 EXPORT_SYMBOL(xfrm_migrate);