2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * IPv4 Forwarding Information Base: semantics.
8 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
16 #include <asm/uaccess.h>
17 #include <linux/bitops.h>
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/jiffies.h>
22 #include <linux/string.h>
23 #include <linux/socket.h>
24 #include <linux/sockios.h>
25 #include <linux/errno.h>
27 #include <linux/inet.h>
28 #include <linux/inetdevice.h>
29 #include <linux/netdevice.h>
30 #include <linux/if_arp.h>
31 #include <linux/proc_fs.h>
32 #include <linux/skbuff.h>
33 #include <linux/init.h>
34 #include <linux/slab.h>
38 #include <net/protocol.h>
39 #include <net/route.h>
42 #include <net/ip_fib.h>
43 #include <net/netlink.h>
44 #include <net/nexthop.h>
45 #include <net/lwtunnel.h>
47 #include "fib_lookup.h"
49 static DEFINE_SPINLOCK(fib_info_lock);
50 static struct hlist_head *fib_info_hash;
51 static struct hlist_head *fib_info_laddrhash;
52 static unsigned int fib_info_hash_size;
53 static unsigned int fib_info_cnt;
55 #define DEVINDEX_HASHBITS 8
56 #define DEVINDEX_HASHSIZE (1U << DEVINDEX_HASHBITS)
57 static struct hlist_head fib_info_devhash[DEVINDEX_HASHSIZE];
59 #ifdef CONFIG_IP_ROUTE_MULTIPATH
60 u32 fib_multipath_secret __read_mostly;
62 #define for_nexthops(fi) { \
63 int nhsel; const struct fib_nh *nh; \
64 for (nhsel = 0, nh = (fi)->fib_nh; \
65 nhsel < (fi)->fib_nhs; \
68 #define change_nexthops(fi) { \
69 int nhsel; struct fib_nh *nexthop_nh; \
70 for (nhsel = 0, nexthop_nh = (struct fib_nh *)((fi)->fib_nh); \
71 nhsel < (fi)->fib_nhs; \
72 nexthop_nh++, nhsel++)
74 #else /* CONFIG_IP_ROUTE_MULTIPATH */
76 /* Hope, that gcc will optimize it to get rid of dummy loop */
78 #define for_nexthops(fi) { \
79 int nhsel; const struct fib_nh *nh = (fi)->fib_nh; \
80 for (nhsel = 0; nhsel < 1; nhsel++)
82 #define change_nexthops(fi) { \
84 struct fib_nh *nexthop_nh = (struct fib_nh *)((fi)->fib_nh); \
85 for (nhsel = 0; nhsel < 1; nhsel++)
87 #endif /* CONFIG_IP_ROUTE_MULTIPATH */
89 #define endfor_nexthops(fi) }
92 const struct fib_prop fib_props[RTN_MAX + 1] = {
95 .scope = RT_SCOPE_NOWHERE,
99 .scope = RT_SCOPE_UNIVERSE,
103 .scope = RT_SCOPE_HOST,
107 .scope = RT_SCOPE_LINK,
111 .scope = RT_SCOPE_LINK,
115 .scope = RT_SCOPE_UNIVERSE,
119 .scope = RT_SCOPE_UNIVERSE,
121 [RTN_UNREACHABLE] = {
122 .error = -EHOSTUNREACH,
123 .scope = RT_SCOPE_UNIVERSE,
127 .scope = RT_SCOPE_UNIVERSE,
131 .scope = RT_SCOPE_UNIVERSE,
135 .scope = RT_SCOPE_NOWHERE,
139 .scope = RT_SCOPE_NOWHERE,
143 static void rt_fibinfo_free(struct rtable __rcu **rtp)
145 struct rtable *rt = rcu_dereference_protected(*rtp, 1);
150 /* Not even needed : RCU_INIT_POINTER(*rtp, NULL);
151 * because we waited an RCU grace period before calling
152 * free_fib_info_rcu()
158 static void free_nh_exceptions(struct fib_nh *nh)
160 struct fnhe_hash_bucket *hash;
163 hash = rcu_dereference_protected(nh->nh_exceptions, 1);
166 for (i = 0; i < FNHE_HASH_SIZE; i++) {
167 struct fib_nh_exception *fnhe;
169 fnhe = rcu_dereference_protected(hash[i].chain, 1);
171 struct fib_nh_exception *next;
173 next = rcu_dereference_protected(fnhe->fnhe_next, 1);
175 rt_fibinfo_free(&fnhe->fnhe_rth_input);
176 rt_fibinfo_free(&fnhe->fnhe_rth_output);
186 static void rt_fibinfo_free_cpus(struct rtable __rcu * __percpu *rtp)
193 for_each_possible_cpu(cpu) {
196 rt = rcu_dereference_protected(*per_cpu_ptr(rtp, cpu), 1);
203 /* Release a nexthop info record */
204 static void free_fib_info_rcu(struct rcu_head *head)
206 struct fib_info *fi = container_of(head, struct fib_info, rcu);
208 change_nexthops(fi) {
209 if (nexthop_nh->nh_dev)
210 dev_put(nexthop_nh->nh_dev);
211 lwtstate_put(nexthop_nh->nh_lwtstate);
212 free_nh_exceptions(nexthop_nh);
213 rt_fibinfo_free_cpus(nexthop_nh->nh_pcpu_rth_output);
214 rt_fibinfo_free(&nexthop_nh->nh_rth_input);
215 } endfor_nexthops(fi);
217 if (fi->fib_metrics != (u32 *) dst_default_metrics)
218 kfree(fi->fib_metrics);
222 void free_fib_info(struct fib_info *fi)
224 if (fi->fib_dead == 0) {
225 pr_warn("Freeing alive fib_info %p\n", fi);
229 #ifdef CONFIG_IP_ROUTE_CLASSID
230 change_nexthops(fi) {
231 if (nexthop_nh->nh_tclassid)
232 fi->fib_net->ipv4.fib_num_tclassid_users--;
233 } endfor_nexthops(fi);
235 call_rcu(&fi->rcu, free_fib_info_rcu);
238 void fib_release_info(struct fib_info *fi)
240 spin_lock_bh(&fib_info_lock);
241 if (fi && --fi->fib_treeref == 0) {
242 hlist_del(&fi->fib_hash);
244 hlist_del(&fi->fib_lhash);
245 change_nexthops(fi) {
246 if (!nexthop_nh->nh_dev)
248 hlist_del(&nexthop_nh->nh_hash);
249 } endfor_nexthops(fi)
253 spin_unlock_bh(&fib_info_lock);
256 static inline int nh_comp(const struct fib_info *fi, const struct fib_info *ofi)
258 const struct fib_nh *onh = ofi->fib_nh;
261 if (nh->nh_oif != onh->nh_oif ||
262 nh->nh_gw != onh->nh_gw ||
263 nh->nh_scope != onh->nh_scope ||
264 #ifdef CONFIG_IP_ROUTE_MULTIPATH
265 nh->nh_weight != onh->nh_weight ||
267 #ifdef CONFIG_IP_ROUTE_CLASSID
268 nh->nh_tclassid != onh->nh_tclassid ||
270 lwtunnel_cmp_encap(nh->nh_lwtstate, onh->nh_lwtstate) ||
271 ((nh->nh_flags ^ onh->nh_flags) & ~RTNH_COMPARE_MASK))
274 } endfor_nexthops(fi);
278 static inline unsigned int fib_devindex_hashfn(unsigned int val)
280 unsigned int mask = DEVINDEX_HASHSIZE - 1;
283 (val >> DEVINDEX_HASHBITS) ^
284 (val >> (DEVINDEX_HASHBITS * 2))) & mask;
287 static inline unsigned int fib_info_hashfn(const struct fib_info *fi)
289 unsigned int mask = (fib_info_hash_size - 1);
290 unsigned int val = fi->fib_nhs;
292 val ^= (fi->fib_protocol << 8) | fi->fib_scope;
293 val ^= (__force u32)fi->fib_prefsrc;
294 val ^= fi->fib_priority;
296 val ^= fib_devindex_hashfn(nh->nh_oif);
297 } endfor_nexthops(fi)
299 return (val ^ (val >> 7) ^ (val >> 12)) & mask;
302 static struct fib_info *fib_find_info(const struct fib_info *nfi)
304 struct hlist_head *head;
308 hash = fib_info_hashfn(nfi);
309 head = &fib_info_hash[hash];
311 hlist_for_each_entry(fi, head, fib_hash) {
312 if (!net_eq(fi->fib_net, nfi->fib_net))
314 if (fi->fib_nhs != nfi->fib_nhs)
316 if (nfi->fib_protocol == fi->fib_protocol &&
317 nfi->fib_scope == fi->fib_scope &&
318 nfi->fib_prefsrc == fi->fib_prefsrc &&
319 nfi->fib_priority == fi->fib_priority &&
320 nfi->fib_type == fi->fib_type &&
321 memcmp(nfi->fib_metrics, fi->fib_metrics,
322 sizeof(u32) * RTAX_MAX) == 0 &&
323 !((nfi->fib_flags ^ fi->fib_flags) & ~RTNH_COMPARE_MASK) &&
324 (nfi->fib_nhs == 0 || nh_comp(fi, nfi) == 0))
331 /* Check, that the gateway is already configured.
332 * Used only by redirect accept routine.
334 int ip_fib_check_default(__be32 gw, struct net_device *dev)
336 struct hlist_head *head;
340 spin_lock(&fib_info_lock);
342 hash = fib_devindex_hashfn(dev->ifindex);
343 head = &fib_info_devhash[hash];
344 hlist_for_each_entry(nh, head, nh_hash) {
345 if (nh->nh_dev == dev &&
347 !(nh->nh_flags & RTNH_F_DEAD)) {
348 spin_unlock(&fib_info_lock);
353 spin_unlock(&fib_info_lock);
358 static inline size_t fib_nlmsg_size(struct fib_info *fi)
360 size_t payload = NLMSG_ALIGN(sizeof(struct rtmsg))
361 + nla_total_size(4) /* RTA_TABLE */
362 + nla_total_size(4) /* RTA_DST */
363 + nla_total_size(4) /* RTA_PRIORITY */
364 + nla_total_size(4) /* RTA_PREFSRC */
365 + nla_total_size(TCP_CA_NAME_MAX); /* RTAX_CC_ALGO */
367 /* space for nested metrics */
368 payload += nla_total_size((RTAX_MAX * nla_total_size(4)));
371 size_t nh_encapsize = 0;
372 /* Also handles the special case fib_nhs == 1 */
374 /* each nexthop is packed in an attribute */
375 size_t nhsize = nla_total_size(sizeof(struct rtnexthop));
377 /* may contain flow and gateway attribute */
378 nhsize += 2 * nla_total_size(4);
380 /* grab encap info */
382 if (nh->nh_lwtstate) {
384 nh_encapsize += lwtunnel_get_encap_size(
387 nh_encapsize += nla_total_size(2);
389 } endfor_nexthops(fi);
391 /* all nexthops are packed in a nested attribute */
392 payload += nla_total_size((fi->fib_nhs * nhsize) +
400 void rtmsg_fib(int event, __be32 key, struct fib_alias *fa,
401 int dst_len, u32 tb_id, const struct nl_info *info,
402 unsigned int nlm_flags)
405 u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
408 skb = nlmsg_new(fib_nlmsg_size(fa->fa_info), GFP_KERNEL);
412 err = fib_dump_info(skb, info->portid, seq, event, tb_id,
413 fa->fa_type, key, dst_len,
414 fa->fa_tos, fa->fa_info, nlm_flags);
416 /* -EMSGSIZE implies BUG in fib_nlmsg_size() */
417 WARN_ON(err == -EMSGSIZE);
421 rtnl_notify(skb, info->nl_net, info->portid, RTNLGRP_IPV4_ROUTE,
422 info->nlh, GFP_KERNEL);
426 rtnl_set_sk_err(info->nl_net, RTNLGRP_IPV4_ROUTE, err);
429 static int fib_detect_death(struct fib_info *fi, int order,
430 struct fib_info **last_resort, int *last_idx,
434 int state = NUD_NONE;
436 n = neigh_lookup(&arp_tbl, &fi->fib_nh[0].nh_gw, fi->fib_dev);
438 state = n->nud_state;
443 if (state == NUD_REACHABLE)
445 if ((state & NUD_VALID) && order != dflt)
447 if ((state & NUD_VALID) ||
448 (*last_idx < 0 && order > dflt && state != NUD_INCOMPLETE)) {
455 #ifdef CONFIG_IP_ROUTE_MULTIPATH
457 static int fib_count_nexthops(struct rtnexthop *rtnh, int remaining)
461 while (rtnh_ok(rtnh, remaining)) {
463 rtnh = rtnh_next(rtnh, &remaining);
466 /* leftover implies invalid nexthop configuration, discard it */
467 return remaining > 0 ? 0 : nhs;
470 static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
471 int remaining, struct fib_config *cfg)
473 struct net *net = cfg->fc_nlinfo.nl_net;
476 change_nexthops(fi) {
479 if (!rtnh_ok(rtnh, remaining))
482 nexthop_nh->nh_flags =
483 (cfg->fc_flags & ~0xFF) | rtnh->rtnh_flags;
484 nexthop_nh->nh_oif = rtnh->rtnh_ifindex;
485 nexthop_nh->nh_weight = rtnh->rtnh_hops + 1;
487 attrlen = rtnh_attrlen(rtnh);
489 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
491 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
492 nexthop_nh->nh_gw = nla ? nla_get_in_addr(nla) : 0;
493 #ifdef CONFIG_IP_ROUTE_CLASSID
494 nla = nla_find(attrs, attrlen, RTA_FLOW);
495 nexthop_nh->nh_tclassid = nla ? nla_get_u32(nla) : 0;
496 if (nexthop_nh->nh_tclassid)
497 fi->fib_net->ipv4.fib_num_tclassid_users++;
499 nla = nla_find(attrs, attrlen, RTA_ENCAP);
501 struct lwtunnel_state *lwtstate;
502 struct net_device *dev = NULL;
503 struct nlattr *nla_entype;
505 nla_entype = nla_find(attrs, attrlen,
510 dev = __dev_get_by_index(net, cfg->fc_oif);
511 ret = lwtunnel_build_state(dev, nla_get_u16(
517 nexthop_nh->nh_lwtstate =
518 lwtstate_get(lwtstate);
522 rtnh = rtnh_next(rtnh, &remaining);
523 } endfor_nexthops(fi);
534 static void fib_rebalance(struct fib_info *fi)
538 struct in_device *in_dev;
545 if (nh->nh_flags & RTNH_F_DEAD)
548 in_dev = __in_dev_get_rtnl(nh->nh_dev);
551 IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
552 nh->nh_flags & RTNH_F_LINKDOWN)
555 total += nh->nh_weight;
556 } endfor_nexthops(fi);
559 change_nexthops(fi) {
562 in_dev = __in_dev_get_rtnl(nexthop_nh->nh_dev);
564 if (nexthop_nh->nh_flags & RTNH_F_DEAD) {
567 IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
568 nexthop_nh->nh_flags & RTNH_F_LINKDOWN) {
571 w += nexthop_nh->nh_weight;
572 upper_bound = DIV_ROUND_CLOSEST_ULL((u64)w << 31,
576 atomic_set(&nexthop_nh->nh_upper_bound, upper_bound);
577 } endfor_nexthops(fi);
579 net_get_random_once(&fib_multipath_secret,
580 sizeof(fib_multipath_secret));
583 static inline void fib_add_weight(struct fib_info *fi,
584 const struct fib_nh *nh)
586 fi->fib_weight += nh->nh_weight;
589 #else /* CONFIG_IP_ROUTE_MULTIPATH */
591 #define fib_rebalance(fi) do { } while (0)
592 #define fib_add_weight(fi, nh) do { } while (0)
594 #endif /* CONFIG_IP_ROUTE_MULTIPATH */
596 static int fib_encap_match(struct net *net, u16 encap_type,
597 struct nlattr *encap,
598 int oif, const struct fib_nh *nh,
599 const struct fib_config *cfg)
601 struct lwtunnel_state *lwtstate;
602 struct net_device *dev = NULL;
605 if (encap_type == LWTUNNEL_ENCAP_NONE)
609 dev = __dev_get_by_index(net, oif);
610 ret = lwtunnel_build_state(dev, encap_type, encap,
611 AF_INET, cfg, &lwtstate);
613 result = lwtunnel_cmp_encap(lwtstate, nh->nh_lwtstate);
614 lwtstate_free(lwtstate);
620 int fib_nh_match(struct fib_config *cfg, struct fib_info *fi)
622 struct net *net = cfg->fc_nlinfo.nl_net;
623 #ifdef CONFIG_IP_ROUTE_MULTIPATH
624 struct rtnexthop *rtnh;
628 if (cfg->fc_priority && cfg->fc_priority != fi->fib_priority)
631 if (cfg->fc_oif || cfg->fc_gw) {
633 if (fib_encap_match(net, cfg->fc_encap_type,
634 cfg->fc_encap, cfg->fc_oif,
638 if ((!cfg->fc_oif || cfg->fc_oif == fi->fib_nh->nh_oif) &&
639 (!cfg->fc_gw || cfg->fc_gw == fi->fib_nh->nh_gw))
644 #ifdef CONFIG_IP_ROUTE_MULTIPATH
649 remaining = cfg->fc_mp_len;
654 if (!rtnh_ok(rtnh, remaining))
657 if (rtnh->rtnh_ifindex && rtnh->rtnh_ifindex != nh->nh_oif)
660 attrlen = rtnh_attrlen(rtnh);
662 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
664 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
665 if (nla && nla_get_in_addr(nla) != nh->nh_gw)
667 #ifdef CONFIG_IP_ROUTE_CLASSID
668 nla = nla_find(attrs, attrlen, RTA_FLOW);
669 if (nla && nla_get_u32(nla) != nh->nh_tclassid)
674 rtnh = rtnh_next(rtnh, &remaining);
675 } endfor_nexthops(fi);
685 * Semantics of nexthop is very messy by historical reasons.
686 * We have to take into account, that:
687 * a) gateway can be actually local interface address,
688 * so that gatewayed route is direct.
689 * b) gateway must be on-link address, possibly
690 * described not by an ifaddr, but also by a direct route.
691 * c) If both gateway and interface are specified, they should not
693 * d) If we use tunnel routes, gateway could be not on-link.
695 * Attempt to reconcile all of these (alas, self-contradictory) conditions
696 * results in pretty ugly and hairy code with obscure logic.
698 * I chose to generalized it instead, so that the size
699 * of code does not increase practically, but it becomes
701 * Every prefix is assigned a "scope" value: "host" is local address,
702 * "link" is direct route,
703 * [ ... "site" ... "interior" ... ]
704 * and "universe" is true gateway route with global meaning.
706 * Every prefix refers to a set of "nexthop"s (gw, oif),
707 * where gw must have narrower scope. This recursion stops
708 * when gw has LOCAL scope or if "nexthop" is declared ONLINK,
709 * which means that gw is forced to be on link.
711 * Code is still hairy, but now it is apparently logically
712 * consistent and very flexible. F.e. as by-product it allows
713 * to co-exists in peace independent exterior and interior
716 * Normally it looks as following.
718 * {universe prefix} -> (gw, oif) [scope link]
720 * |-> {link prefix} -> (gw, oif) [scope local]
722 * |-> {local prefix} (terminal node)
724 static int fib_check_nh(struct fib_config *cfg, struct fib_info *fi,
729 struct net_device *dev;
731 net = cfg->fc_nlinfo.nl_net;
733 struct fib_result res;
735 if (nh->nh_flags & RTNH_F_ONLINK) {
736 unsigned int addr_type;
738 if (cfg->fc_scope >= RT_SCOPE_LINK)
740 dev = __dev_get_by_index(net, nh->nh_oif);
743 if (!(dev->flags & IFF_UP))
745 addr_type = inet_addr_type_dev_table(net, dev, nh->nh_gw);
746 if (addr_type != RTN_UNICAST)
748 if (!netif_carrier_ok(dev))
749 nh->nh_flags |= RTNH_F_LINKDOWN;
752 nh->nh_scope = RT_SCOPE_LINK;
757 struct fib_table *tbl = NULL;
758 struct flowi4 fl4 = {
760 .flowi4_scope = cfg->fc_scope + 1,
761 .flowi4_oif = nh->nh_oif,
762 .flowi4_iif = LOOPBACK_IFINDEX,
765 /* It is not necessary, but requires a bit of thinking */
766 if (fl4.flowi4_scope < RT_SCOPE_LINK)
767 fl4.flowi4_scope = RT_SCOPE_LINK;
770 tbl = fib_get_table(net, cfg->fc_table);
773 err = fib_table_lookup(tbl, &fl4, &res,
774 FIB_LOOKUP_IGNORE_LINKSTATE |
777 /* on error or if no table given do full lookup. This
778 * is needed for example when nexthops are in the local
779 * table rather than the given table
782 err = fib_lookup(net, &fl4, &res,
783 FIB_LOOKUP_IGNORE_LINKSTATE);
792 if (res.type != RTN_UNICAST && res.type != RTN_LOCAL)
794 nh->nh_scope = res.scope;
795 nh->nh_oif = FIB_RES_OIF(res);
796 nh->nh_dev = dev = FIB_RES_DEV(res);
800 if (!netif_carrier_ok(dev))
801 nh->nh_flags |= RTNH_F_LINKDOWN;
802 err = (dev->flags & IFF_UP) ? 0 : -ENETDOWN;
804 struct in_device *in_dev;
806 if (nh->nh_flags & (RTNH_F_PERVASIVE | RTNH_F_ONLINK))
811 in_dev = inetdev_by_index(net, nh->nh_oif);
815 if (!(in_dev->dev->flags & IFF_UP))
817 nh->nh_dev = in_dev->dev;
818 dev_hold(nh->nh_dev);
819 nh->nh_scope = RT_SCOPE_HOST;
820 if (!netif_carrier_ok(nh->nh_dev))
821 nh->nh_flags |= RTNH_F_LINKDOWN;
829 static inline unsigned int fib_laddr_hashfn(__be32 val)
831 unsigned int mask = (fib_info_hash_size - 1);
833 return ((__force u32)val ^
834 ((__force u32)val >> 7) ^
835 ((__force u32)val >> 14)) & mask;
838 static struct hlist_head *fib_info_hash_alloc(int bytes)
840 if (bytes <= PAGE_SIZE)
841 return kzalloc(bytes, GFP_KERNEL);
843 return (struct hlist_head *)
844 __get_free_pages(GFP_KERNEL | __GFP_ZERO,
848 static void fib_info_hash_free(struct hlist_head *hash, int bytes)
853 if (bytes <= PAGE_SIZE)
856 free_pages((unsigned long) hash, get_order(bytes));
859 static void fib_info_hash_move(struct hlist_head *new_info_hash,
860 struct hlist_head *new_laddrhash,
861 unsigned int new_size)
863 struct hlist_head *old_info_hash, *old_laddrhash;
864 unsigned int old_size = fib_info_hash_size;
865 unsigned int i, bytes;
867 spin_lock_bh(&fib_info_lock);
868 old_info_hash = fib_info_hash;
869 old_laddrhash = fib_info_laddrhash;
870 fib_info_hash_size = new_size;
872 for (i = 0; i < old_size; i++) {
873 struct hlist_head *head = &fib_info_hash[i];
874 struct hlist_node *n;
877 hlist_for_each_entry_safe(fi, n, head, fib_hash) {
878 struct hlist_head *dest;
879 unsigned int new_hash;
881 new_hash = fib_info_hashfn(fi);
882 dest = &new_info_hash[new_hash];
883 hlist_add_head(&fi->fib_hash, dest);
886 fib_info_hash = new_info_hash;
888 for (i = 0; i < old_size; i++) {
889 struct hlist_head *lhead = &fib_info_laddrhash[i];
890 struct hlist_node *n;
893 hlist_for_each_entry_safe(fi, n, lhead, fib_lhash) {
894 struct hlist_head *ldest;
895 unsigned int new_hash;
897 new_hash = fib_laddr_hashfn(fi->fib_prefsrc);
898 ldest = &new_laddrhash[new_hash];
899 hlist_add_head(&fi->fib_lhash, ldest);
902 fib_info_laddrhash = new_laddrhash;
904 spin_unlock_bh(&fib_info_lock);
906 bytes = old_size * sizeof(struct hlist_head *);
907 fib_info_hash_free(old_info_hash, bytes);
908 fib_info_hash_free(old_laddrhash, bytes);
911 __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
913 nh->nh_saddr = inet_select_addr(nh->nh_dev,
915 nh->nh_parent->fib_scope);
916 nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
921 static bool fib_valid_prefsrc(struct fib_config *cfg, __be32 fib_prefsrc)
923 if (cfg->fc_type != RTN_LOCAL || !cfg->fc_dst ||
924 fib_prefsrc != cfg->fc_dst) {
925 u32 tb_id = cfg->fc_table;
928 if (tb_id == RT_TABLE_MAIN)
929 tb_id = RT_TABLE_LOCAL;
931 rc = inet_addr_type_table(cfg->fc_nlinfo.nl_net,
934 if (rc != RTN_LOCAL && tb_id != RT_TABLE_LOCAL) {
935 rc = inet_addr_type_table(cfg->fc_nlinfo.nl_net,
936 fib_prefsrc, RT_TABLE_LOCAL);
946 fib_convert_metrics(struct fib_info *fi, const struct fib_config *cfg)
955 nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) {
956 int type = nla_type(nla);
964 if (type == RTAX_CC_ALGO) {
965 char tmp[TCP_CA_NAME_MAX];
967 nla_strlcpy(tmp, nla, sizeof(tmp));
968 val = tcp_ca_get_key_by_name(tmp, &ecn_ca);
969 if (val == TCP_CA_UNSPEC)
972 val = nla_get_u32(nla);
974 if (type == RTAX_ADVMSS && val > 65535 - 40)
976 if (type == RTAX_MTU && val > 65535 - 15)
978 if (type == RTAX_HOPLIMIT && val > 255)
980 if (type == RTAX_FEATURES && (val & ~RTAX_FEATURE_MASK))
982 fi->fib_metrics[type - 1] = val;
986 fi->fib_metrics[RTAX_FEATURES - 1] |= DST_FEATURE_ECN_CA;
991 struct fib_info *fib_create_info(struct fib_config *cfg)
994 struct fib_info *fi = NULL;
995 struct fib_info *ofi;
997 struct net *net = cfg->fc_nlinfo.nl_net;
999 if (cfg->fc_type > RTN_MAX)
1002 /* Fast check to catch the most weird cases */
1003 if (fib_props[cfg->fc_type].scope > cfg->fc_scope)
1006 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1008 nhs = fib_count_nexthops(cfg->fc_mp, cfg->fc_mp_len);
1015 if (fib_info_cnt >= fib_info_hash_size) {
1016 unsigned int new_size = fib_info_hash_size << 1;
1017 struct hlist_head *new_info_hash;
1018 struct hlist_head *new_laddrhash;
1023 bytes = new_size * sizeof(struct hlist_head *);
1024 new_info_hash = fib_info_hash_alloc(bytes);
1025 new_laddrhash = fib_info_hash_alloc(bytes);
1026 if (!new_info_hash || !new_laddrhash) {
1027 fib_info_hash_free(new_info_hash, bytes);
1028 fib_info_hash_free(new_laddrhash, bytes);
1030 fib_info_hash_move(new_info_hash, new_laddrhash, new_size);
1032 if (!fib_info_hash_size)
1036 fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL);
1041 fi->fib_metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL);
1042 if (!fi->fib_metrics)
1045 fi->fib_metrics = (u32 *) dst_default_metrics;
1048 fi->fib_protocol = cfg->fc_protocol;
1049 fi->fib_scope = cfg->fc_scope;
1050 fi->fib_flags = cfg->fc_flags;
1051 fi->fib_priority = cfg->fc_priority;
1052 fi->fib_prefsrc = cfg->fc_prefsrc;
1053 fi->fib_type = cfg->fc_type;
1056 change_nexthops(fi) {
1057 nexthop_nh->nh_parent = fi;
1058 nexthop_nh->nh_pcpu_rth_output = alloc_percpu(struct rtable __rcu *);
1059 if (!nexthop_nh->nh_pcpu_rth_output)
1061 } endfor_nexthops(fi)
1063 err = fib_convert_metrics(fi, cfg);
1068 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1069 err = fib_get_nhs(fi, cfg->fc_mp, cfg->fc_mp_len, cfg);
1072 if (cfg->fc_oif && fi->fib_nh->nh_oif != cfg->fc_oif)
1074 if (cfg->fc_gw && fi->fib_nh->nh_gw != cfg->fc_gw)
1076 #ifdef CONFIG_IP_ROUTE_CLASSID
1077 if (cfg->fc_flow && fi->fib_nh->nh_tclassid != cfg->fc_flow)
1084 struct fib_nh *nh = fi->fib_nh;
1086 if (cfg->fc_encap) {
1087 struct lwtunnel_state *lwtstate;
1088 struct net_device *dev = NULL;
1090 if (cfg->fc_encap_type == LWTUNNEL_ENCAP_NONE)
1093 dev = __dev_get_by_index(net, cfg->fc_oif);
1094 err = lwtunnel_build_state(dev, cfg->fc_encap_type,
1095 cfg->fc_encap, AF_INET, cfg,
1100 nh->nh_lwtstate = lwtstate_get(lwtstate);
1102 nh->nh_oif = cfg->fc_oif;
1103 nh->nh_gw = cfg->fc_gw;
1104 nh->nh_flags = cfg->fc_flags;
1105 #ifdef CONFIG_IP_ROUTE_CLASSID
1106 nh->nh_tclassid = cfg->fc_flow;
1107 if (nh->nh_tclassid)
1108 fi->fib_net->ipv4.fib_num_tclassid_users++;
1110 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1115 if (fib_props[cfg->fc_type].error) {
1116 if (cfg->fc_gw || cfg->fc_oif || cfg->fc_mp)
1120 switch (cfg->fc_type) {
1132 if (cfg->fc_scope > RT_SCOPE_HOST)
1135 if (cfg->fc_scope == RT_SCOPE_HOST) {
1136 struct fib_nh *nh = fi->fib_nh;
1138 /* Local address is added. */
1139 if (nhs != 1 || nh->nh_gw)
1141 nh->nh_scope = RT_SCOPE_NOWHERE;
1142 nh->nh_dev = dev_get_by_index(net, fi->fib_nh->nh_oif);
1149 change_nexthops(fi) {
1150 err = fib_check_nh(cfg, fi, nexthop_nh);
1153 if (nexthop_nh->nh_flags & RTNH_F_LINKDOWN)
1155 } endfor_nexthops(fi)
1156 if (linkdown == fi->fib_nhs)
1157 fi->fib_flags |= RTNH_F_LINKDOWN;
1160 if (fi->fib_prefsrc && !fib_valid_prefsrc(cfg, fi->fib_prefsrc))
1163 change_nexthops(fi) {
1164 fib_info_update_nh_saddr(net, nexthop_nh);
1165 fib_add_weight(fi, nexthop_nh);
1166 } endfor_nexthops(fi)
1171 ofi = fib_find_info(fi);
1180 atomic_inc(&fi->fib_clntref);
1181 spin_lock_bh(&fib_info_lock);
1182 hlist_add_head(&fi->fib_hash,
1183 &fib_info_hash[fib_info_hashfn(fi)]);
1184 if (fi->fib_prefsrc) {
1185 struct hlist_head *head;
1187 head = &fib_info_laddrhash[fib_laddr_hashfn(fi->fib_prefsrc)];
1188 hlist_add_head(&fi->fib_lhash, head);
1190 change_nexthops(fi) {
1191 struct hlist_head *head;
1194 if (!nexthop_nh->nh_dev)
1196 hash = fib_devindex_hashfn(nexthop_nh->nh_dev->ifindex);
1197 head = &fib_info_devhash[hash];
1198 hlist_add_head(&nexthop_nh->nh_hash, head);
1199 } endfor_nexthops(fi)
1200 spin_unlock_bh(&fib_info_lock);
1212 return ERR_PTR(err);
1215 int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
1216 u32 tb_id, u8 type, __be32 dst, int dst_len, u8 tos,
1217 struct fib_info *fi, unsigned int flags)
1219 struct nlmsghdr *nlh;
1222 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*rtm), flags);
1226 rtm = nlmsg_data(nlh);
1227 rtm->rtm_family = AF_INET;
1228 rtm->rtm_dst_len = dst_len;
1229 rtm->rtm_src_len = 0;
1232 rtm->rtm_table = tb_id;
1234 rtm->rtm_table = RT_TABLE_COMPAT;
1235 if (nla_put_u32(skb, RTA_TABLE, tb_id))
1236 goto nla_put_failure;
1237 rtm->rtm_type = type;
1238 rtm->rtm_flags = fi->fib_flags;
1239 rtm->rtm_scope = fi->fib_scope;
1240 rtm->rtm_protocol = fi->fib_protocol;
1242 if (rtm->rtm_dst_len &&
1243 nla_put_in_addr(skb, RTA_DST, dst))
1244 goto nla_put_failure;
1245 if (fi->fib_priority &&
1246 nla_put_u32(skb, RTA_PRIORITY, fi->fib_priority))
1247 goto nla_put_failure;
1248 if (rtnetlink_put_metrics(skb, fi->fib_metrics) < 0)
1249 goto nla_put_failure;
1251 if (fi->fib_prefsrc &&
1252 nla_put_in_addr(skb, RTA_PREFSRC, fi->fib_prefsrc))
1253 goto nla_put_failure;
1254 if (fi->fib_nhs == 1) {
1255 struct in_device *in_dev;
1257 if (fi->fib_nh->nh_gw &&
1258 nla_put_in_addr(skb, RTA_GATEWAY, fi->fib_nh->nh_gw))
1259 goto nla_put_failure;
1260 if (fi->fib_nh->nh_oif &&
1261 nla_put_u32(skb, RTA_OIF, fi->fib_nh->nh_oif))
1262 goto nla_put_failure;
1263 if (fi->fib_nh->nh_flags & RTNH_F_LINKDOWN) {
1264 in_dev = __in_dev_get_rtnl(fi->fib_nh->nh_dev);
1266 IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev))
1267 rtm->rtm_flags |= RTNH_F_DEAD;
1269 #ifdef CONFIG_IP_ROUTE_CLASSID
1270 if (fi->fib_nh[0].nh_tclassid &&
1271 nla_put_u32(skb, RTA_FLOW, fi->fib_nh[0].nh_tclassid))
1272 goto nla_put_failure;
1274 if (fi->fib_nh->nh_lwtstate)
1275 lwtunnel_fill_encap(skb, fi->fib_nh->nh_lwtstate);
1277 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1278 if (fi->fib_nhs > 1) {
1279 struct rtnexthop *rtnh;
1282 mp = nla_nest_start(skb, RTA_MULTIPATH);
1284 goto nla_put_failure;
1287 struct in_device *in_dev;
1289 rtnh = nla_reserve_nohdr(skb, sizeof(*rtnh));
1291 goto nla_put_failure;
1293 rtnh->rtnh_flags = nh->nh_flags & 0xFF;
1294 if (nh->nh_flags & RTNH_F_LINKDOWN) {
1295 in_dev = __in_dev_get_rtnl(nh->nh_dev);
1297 IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev))
1298 rtnh->rtnh_flags |= RTNH_F_DEAD;
1300 rtnh->rtnh_hops = nh->nh_weight - 1;
1301 rtnh->rtnh_ifindex = nh->nh_oif;
1304 nla_put_in_addr(skb, RTA_GATEWAY, nh->nh_gw))
1305 goto nla_put_failure;
1306 #ifdef CONFIG_IP_ROUTE_CLASSID
1307 if (nh->nh_tclassid &&
1308 nla_put_u32(skb, RTA_FLOW, nh->nh_tclassid))
1309 goto nla_put_failure;
1311 if (nh->nh_lwtstate)
1312 lwtunnel_fill_encap(skb, nh->nh_lwtstate);
1313 /* length of rtnetlink header + attributes */
1314 rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *) rtnh;
1315 } endfor_nexthops(fi);
1317 nla_nest_end(skb, mp);
1320 nlmsg_end(skb, nlh);
1324 nlmsg_cancel(skb, nlh);
1330 * - local address disappeared -> we must delete all the entries
1332 * - device went down -> we must shutdown all nexthops going via it.
1334 int fib_sync_down_addr(struct net *net, __be32 local)
1337 unsigned int hash = fib_laddr_hashfn(local);
1338 struct hlist_head *head = &fib_info_laddrhash[hash];
1339 struct fib_info *fi;
1341 if (!fib_info_laddrhash || local == 0)
1344 hlist_for_each_entry(fi, head, fib_lhash) {
1345 if (!net_eq(fi->fib_net, net))
1347 if (fi->fib_prefsrc == local) {
1348 fi->fib_flags |= RTNH_F_DEAD;
1355 /* Event force Flags Description
1356 * NETDEV_CHANGE 0 LINKDOWN Carrier OFF, not for scope host
1357 * NETDEV_DOWN 0 LINKDOWN|DEAD Link down, not for scope host
1358 * NETDEV_DOWN 1 LINKDOWN|DEAD Last address removed
1359 * NETDEV_UNREGISTER 1 LINKDOWN|DEAD Device removed
1361 int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force)
1364 int scope = RT_SCOPE_NOWHERE;
1365 struct fib_info *prev_fi = NULL;
1366 unsigned int hash = fib_devindex_hashfn(dev->ifindex);
1367 struct hlist_head *head = &fib_info_devhash[hash];
1373 hlist_for_each_entry(nh, head, nh_hash) {
1374 struct fib_info *fi = nh->nh_parent;
1377 BUG_ON(!fi->fib_nhs);
1378 if (nh->nh_dev != dev || fi == prev_fi)
1382 change_nexthops(fi) {
1383 if (nexthop_nh->nh_flags & RTNH_F_DEAD)
1385 else if (nexthop_nh->nh_dev == dev &&
1386 nexthop_nh->nh_scope != scope) {
1389 case NETDEV_UNREGISTER:
1390 nexthop_nh->nh_flags |= RTNH_F_DEAD;
1393 nexthop_nh->nh_flags |= RTNH_F_LINKDOWN;
1398 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1399 if (event == NETDEV_UNREGISTER &&
1400 nexthop_nh->nh_dev == dev) {
1405 } endfor_nexthops(fi)
1406 if (dead == fi->fib_nhs) {
1409 case NETDEV_UNREGISTER:
1410 fi->fib_flags |= RTNH_F_DEAD;
1413 fi->fib_flags |= RTNH_F_LINKDOWN;
1425 /* Must be invoked inside of an RCU protected region. */
1426 void fib_select_default(const struct flowi4 *flp, struct fib_result *res)
1428 struct fib_info *fi = NULL, *last_resort = NULL;
1429 struct hlist_head *fa_head = res->fa_head;
1430 struct fib_table *tb = res->table;
1431 u8 slen = 32 - res->prefixlen;
1432 int order = -1, last_idx = -1;
1433 struct fib_alias *fa, *fa1 = NULL;
1434 u32 last_prio = res->fi->fib_priority;
1437 hlist_for_each_entry_rcu(fa, fa_head, fa_list) {
1438 struct fib_info *next_fi = fa->fa_info;
1440 if (fa->fa_slen != slen)
1442 if (fa->fa_tos && fa->fa_tos != flp->flowi4_tos)
1444 if (fa->tb_id != tb->tb_id)
1446 if (next_fi->fib_priority > last_prio &&
1447 fa->fa_tos == last_tos) {
1452 if (next_fi->fib_flags & RTNH_F_DEAD)
1454 last_tos = fa->fa_tos;
1455 last_prio = next_fi->fib_priority;
1457 if (next_fi->fib_scope != res->scope ||
1458 fa->fa_type != RTN_UNICAST)
1460 if (!next_fi->fib_nh[0].nh_gw ||
1461 next_fi->fib_nh[0].nh_scope != RT_SCOPE_LINK)
1464 fib_alias_accessed(fa);
1467 if (next_fi != res->fi)
1470 } else if (!fib_detect_death(fi, order, &last_resort,
1471 &last_idx, fa1->fa_default)) {
1472 fib_result_assign(res, fi);
1473 fa1->fa_default = order;
1480 if (order <= 0 || !fi) {
1482 fa1->fa_default = -1;
1486 if (!fib_detect_death(fi, order, &last_resort, &last_idx,
1488 fib_result_assign(res, fi);
1489 fa1->fa_default = order;
1494 fib_result_assign(res, last_resort);
1495 fa1->fa_default = last_idx;
1501 * Dead device goes up. We wake up dead nexthops.
1502 * It takes sense only on multipath routes.
1504 int fib_sync_up(struct net_device *dev, unsigned int nh_flags)
1506 struct fib_info *prev_fi;
1508 struct hlist_head *head;
1512 if (!(dev->flags & IFF_UP))
1515 if (nh_flags & RTNH_F_DEAD) {
1516 unsigned int flags = dev_get_flags(dev);
1518 if (flags & (IFF_RUNNING | IFF_LOWER_UP))
1519 nh_flags |= RTNH_F_LINKDOWN;
1523 hash = fib_devindex_hashfn(dev->ifindex);
1524 head = &fib_info_devhash[hash];
1527 hlist_for_each_entry(nh, head, nh_hash) {
1528 struct fib_info *fi = nh->nh_parent;
1531 BUG_ON(!fi->fib_nhs);
1532 if (nh->nh_dev != dev || fi == prev_fi)
1537 change_nexthops(fi) {
1538 if (!(nexthop_nh->nh_flags & nh_flags)) {
1542 if (!nexthop_nh->nh_dev ||
1543 !(nexthop_nh->nh_dev->flags & IFF_UP))
1545 if (nexthop_nh->nh_dev != dev ||
1546 !__in_dev_get_rtnl(dev))
1549 nexthop_nh->nh_flags &= ~nh_flags;
1550 } endfor_nexthops(fi)
1553 fi->fib_flags &= ~nh_flags;
1563 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1565 void fib_select_multipath(struct fib_result *res, int hash)
1567 struct fib_info *fi = res->fi;
1570 if (hash > atomic_read(&nh->nh_upper_bound))
1573 res->nh_sel = nhsel;
1575 } endfor_nexthops(fi);
1577 /* Race condition: route has just become dead. */
1582 void fib_select_path(struct net *net, struct fib_result *res,
1583 struct flowi4 *fl4, int mp_hash)
1585 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1586 if (res->fi->fib_nhs > 1 && fl4->flowi4_oif == 0) {
1588 mp_hash = get_hash_from_flowi4(fl4) >> 1;
1590 fib_select_multipath(res, mp_hash);
1594 if (!res->prefixlen &&
1595 res->table->tb_num_default > 1 &&
1596 res->type == RTN_UNICAST && !fl4->flowi4_oif)
1597 fib_select_default(fl4, res);
1600 fl4->saddr = FIB_RES_PREFSRC(net, *res);
1602 EXPORT_SYMBOL_GPL(fib_select_path);