2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * IPv4 Forwarding Information Base: semantics.
8 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
16 #include <linux/uaccess.h>
17 #include <linux/bitops.h>
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/jiffies.h>
22 #include <linux/string.h>
23 #include <linux/socket.h>
24 #include <linux/sockios.h>
25 #include <linux/errno.h>
27 #include <linux/inet.h>
28 #include <linux/inetdevice.h>
29 #include <linux/netdevice.h>
30 #include <linux/if_arp.h>
31 #include <linux/proc_fs.h>
32 #include <linux/skbuff.h>
33 #include <linux/init.h>
34 #include <linux/slab.h>
38 #include <net/protocol.h>
39 #include <net/route.h>
42 #include <net/ip_fib.h>
43 #include <net/netlink.h>
44 #include <net/nexthop.h>
45 #include <net/lwtunnel.h>
47 #include "fib_lookup.h"
49 static DEFINE_SPINLOCK(fib_info_lock);
50 static struct hlist_head *fib_info_hash;
51 static struct hlist_head *fib_info_laddrhash;
52 static unsigned int fib_info_hash_size;
53 static unsigned int fib_info_cnt;
55 #define DEVINDEX_HASHBITS 8
56 #define DEVINDEX_HASHSIZE (1U << DEVINDEX_HASHBITS)
57 static struct hlist_head fib_info_devhash[DEVINDEX_HASHSIZE];
59 #ifdef CONFIG_IP_ROUTE_MULTIPATH
61 #define for_nexthops(fi) { \
62 int nhsel; const struct fib_nh *nh; \
63 for (nhsel = 0, nh = (fi)->fib_nh; \
64 nhsel < (fi)->fib_nhs; \
67 #define change_nexthops(fi) { \
68 int nhsel; struct fib_nh *nexthop_nh; \
69 for (nhsel = 0, nexthop_nh = (struct fib_nh *)((fi)->fib_nh); \
70 nhsel < (fi)->fib_nhs; \
71 nexthop_nh++, nhsel++)
73 #else /* CONFIG_IP_ROUTE_MULTIPATH */
75 /* Hope, that gcc will optimize it to get rid of dummy loop */
77 #define for_nexthops(fi) { \
78 int nhsel; const struct fib_nh *nh = (fi)->fib_nh; \
79 for (nhsel = 0; nhsel < 1; nhsel++)
81 #define change_nexthops(fi) { \
83 struct fib_nh *nexthop_nh = (struct fib_nh *)((fi)->fib_nh); \
84 for (nhsel = 0; nhsel < 1; nhsel++)
86 #endif /* CONFIG_IP_ROUTE_MULTIPATH */
88 #define endfor_nexthops(fi) }
91 const struct fib_prop fib_props[RTN_MAX + 1] = {
94 .scope = RT_SCOPE_NOWHERE,
98 .scope = RT_SCOPE_UNIVERSE,
102 .scope = RT_SCOPE_HOST,
106 .scope = RT_SCOPE_LINK,
110 .scope = RT_SCOPE_LINK,
114 .scope = RT_SCOPE_UNIVERSE,
118 .scope = RT_SCOPE_UNIVERSE,
120 [RTN_UNREACHABLE] = {
121 .error = -EHOSTUNREACH,
122 .scope = RT_SCOPE_UNIVERSE,
126 .scope = RT_SCOPE_UNIVERSE,
130 .scope = RT_SCOPE_UNIVERSE,
134 .scope = RT_SCOPE_NOWHERE,
138 .scope = RT_SCOPE_NOWHERE,
142 static void rt_fibinfo_free(struct rtable __rcu **rtp)
144 struct rtable *rt = rcu_dereference_protected(*rtp, 1);
149 /* Not even needed : RCU_INIT_POINTER(*rtp, NULL);
150 * because we waited an RCU grace period before calling
151 * free_fib_info_rcu()
157 static void free_nh_exceptions(struct fib_nh *nh)
159 struct fnhe_hash_bucket *hash;
162 hash = rcu_dereference_protected(nh->nh_exceptions, 1);
165 for (i = 0; i < FNHE_HASH_SIZE; i++) {
166 struct fib_nh_exception *fnhe;
168 fnhe = rcu_dereference_protected(hash[i].chain, 1);
170 struct fib_nh_exception *next;
172 next = rcu_dereference_protected(fnhe->fnhe_next, 1);
174 rt_fibinfo_free(&fnhe->fnhe_rth_input);
175 rt_fibinfo_free(&fnhe->fnhe_rth_output);
185 static void rt_fibinfo_free_cpus(struct rtable __rcu * __percpu *rtp)
192 for_each_possible_cpu(cpu) {
195 rt = rcu_dereference_protected(*per_cpu_ptr(rtp, cpu), 1);
202 /* Release a nexthop info record */
203 static void free_fib_info_rcu(struct rcu_head *head)
205 struct fib_info *fi = container_of(head, struct fib_info, rcu);
207 change_nexthops(fi) {
208 if (nexthop_nh->nh_dev)
209 dev_put(nexthop_nh->nh_dev);
210 lwtstate_put(nexthop_nh->nh_lwtstate);
211 free_nh_exceptions(nexthop_nh);
212 rt_fibinfo_free_cpus(nexthop_nh->nh_pcpu_rth_output);
213 rt_fibinfo_free(&nexthop_nh->nh_rth_input);
214 } endfor_nexthops(fi);
216 if (fi->fib_metrics != (u32 *) dst_default_metrics)
217 kfree(fi->fib_metrics);
221 void free_fib_info(struct fib_info *fi)
223 if (fi->fib_dead == 0) {
224 pr_warn("Freeing alive fib_info %p\n", fi);
228 #ifdef CONFIG_IP_ROUTE_CLASSID
229 change_nexthops(fi) {
230 if (nexthop_nh->nh_tclassid)
231 fi->fib_net->ipv4.fib_num_tclassid_users--;
232 } endfor_nexthops(fi);
234 call_rcu(&fi->rcu, free_fib_info_rcu);
236 EXPORT_SYMBOL_GPL(free_fib_info);
238 void fib_release_info(struct fib_info *fi)
240 spin_lock_bh(&fib_info_lock);
241 if (fi && --fi->fib_treeref == 0) {
242 hlist_del(&fi->fib_hash);
244 hlist_del(&fi->fib_lhash);
245 change_nexthops(fi) {
246 if (!nexthop_nh->nh_dev)
248 hlist_del(&nexthop_nh->nh_hash);
249 } endfor_nexthops(fi)
253 spin_unlock_bh(&fib_info_lock);
256 static inline int nh_comp(const struct fib_info *fi, const struct fib_info *ofi)
258 const struct fib_nh *onh = ofi->fib_nh;
261 if (nh->nh_oif != onh->nh_oif ||
262 nh->nh_gw != onh->nh_gw ||
263 nh->nh_scope != onh->nh_scope ||
264 #ifdef CONFIG_IP_ROUTE_MULTIPATH
265 nh->nh_weight != onh->nh_weight ||
267 #ifdef CONFIG_IP_ROUTE_CLASSID
268 nh->nh_tclassid != onh->nh_tclassid ||
270 lwtunnel_cmp_encap(nh->nh_lwtstate, onh->nh_lwtstate) ||
271 ((nh->nh_flags ^ onh->nh_flags) & ~RTNH_COMPARE_MASK))
274 } endfor_nexthops(fi);
278 static inline unsigned int fib_devindex_hashfn(unsigned int val)
280 unsigned int mask = DEVINDEX_HASHSIZE - 1;
283 (val >> DEVINDEX_HASHBITS) ^
284 (val >> (DEVINDEX_HASHBITS * 2))) & mask;
287 static inline unsigned int fib_info_hashfn(const struct fib_info *fi)
289 unsigned int mask = (fib_info_hash_size - 1);
290 unsigned int val = fi->fib_nhs;
292 val ^= (fi->fib_protocol << 8) | fi->fib_scope;
293 val ^= (__force u32)fi->fib_prefsrc;
294 val ^= fi->fib_priority;
296 val ^= fib_devindex_hashfn(nh->nh_oif);
297 } endfor_nexthops(fi)
299 return (val ^ (val >> 7) ^ (val >> 12)) & mask;
302 static struct fib_info *fib_find_info(const struct fib_info *nfi)
304 struct hlist_head *head;
308 hash = fib_info_hashfn(nfi);
309 head = &fib_info_hash[hash];
311 hlist_for_each_entry(fi, head, fib_hash) {
312 if (!net_eq(fi->fib_net, nfi->fib_net))
314 if (fi->fib_nhs != nfi->fib_nhs)
316 if (nfi->fib_protocol == fi->fib_protocol &&
317 nfi->fib_scope == fi->fib_scope &&
318 nfi->fib_prefsrc == fi->fib_prefsrc &&
319 nfi->fib_priority == fi->fib_priority &&
320 nfi->fib_type == fi->fib_type &&
321 memcmp(nfi->fib_metrics, fi->fib_metrics,
322 sizeof(u32) * RTAX_MAX) == 0 &&
323 !((nfi->fib_flags ^ fi->fib_flags) & ~RTNH_COMPARE_MASK) &&
324 (nfi->fib_nhs == 0 || nh_comp(fi, nfi) == 0))
331 /* Check, that the gateway is already configured.
332 * Used only by redirect accept routine.
334 int ip_fib_check_default(__be32 gw, struct net_device *dev)
336 struct hlist_head *head;
340 spin_lock(&fib_info_lock);
342 hash = fib_devindex_hashfn(dev->ifindex);
343 head = &fib_info_devhash[hash];
344 hlist_for_each_entry(nh, head, nh_hash) {
345 if (nh->nh_dev == dev &&
347 !(nh->nh_flags & RTNH_F_DEAD)) {
348 spin_unlock(&fib_info_lock);
353 spin_unlock(&fib_info_lock);
358 static inline size_t fib_nlmsg_size(struct fib_info *fi)
360 size_t payload = NLMSG_ALIGN(sizeof(struct rtmsg))
361 + nla_total_size(4) /* RTA_TABLE */
362 + nla_total_size(4) /* RTA_DST */
363 + nla_total_size(4) /* RTA_PRIORITY */
364 + nla_total_size(4) /* RTA_PREFSRC */
365 + nla_total_size(TCP_CA_NAME_MAX); /* RTAX_CC_ALGO */
367 /* space for nested metrics */
368 payload += nla_total_size((RTAX_MAX * nla_total_size(4)));
371 size_t nh_encapsize = 0;
372 /* Also handles the special case fib_nhs == 1 */
374 /* each nexthop is packed in an attribute */
375 size_t nhsize = nla_total_size(sizeof(struct rtnexthop));
377 /* may contain flow and gateway attribute */
378 nhsize += 2 * nla_total_size(4);
380 /* grab encap info */
382 if (nh->nh_lwtstate) {
384 nh_encapsize += lwtunnel_get_encap_size(
387 nh_encapsize += nla_total_size(2);
389 } endfor_nexthops(fi);
391 /* all nexthops are packed in a nested attribute */
392 payload += nla_total_size((fi->fib_nhs * nhsize) +
400 void rtmsg_fib(int event, __be32 key, struct fib_alias *fa,
401 int dst_len, u32 tb_id, const struct nl_info *info,
402 unsigned int nlm_flags)
405 u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
408 skb = nlmsg_new(fib_nlmsg_size(fa->fa_info), GFP_KERNEL);
412 err = fib_dump_info(skb, info->portid, seq, event, tb_id,
413 fa->fa_type, key, dst_len,
414 fa->fa_tos, fa->fa_info, nlm_flags);
416 /* -EMSGSIZE implies BUG in fib_nlmsg_size() */
417 WARN_ON(err == -EMSGSIZE);
421 rtnl_notify(skb, info->nl_net, info->portid, RTNLGRP_IPV4_ROUTE,
422 info->nlh, GFP_KERNEL);
426 rtnl_set_sk_err(info->nl_net, RTNLGRP_IPV4_ROUTE, err);
429 static int fib_detect_death(struct fib_info *fi, int order,
430 struct fib_info **last_resort, int *last_idx,
434 int state = NUD_NONE;
436 n = neigh_lookup(&arp_tbl, &fi->fib_nh[0].nh_gw, fi->fib_dev);
438 state = n->nud_state;
443 if (state == NUD_REACHABLE)
445 if ((state & NUD_VALID) && order != dflt)
447 if ((state & NUD_VALID) ||
448 (*last_idx < 0 && order > dflt && state != NUD_INCOMPLETE)) {
455 #ifdef CONFIG_IP_ROUTE_MULTIPATH
457 static int fib_count_nexthops(struct rtnexthop *rtnh, int remaining)
461 while (rtnh_ok(rtnh, remaining)) {
463 rtnh = rtnh_next(rtnh, &remaining);
466 /* leftover implies invalid nexthop configuration, discard it */
467 return remaining > 0 ? 0 : nhs;
470 static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
471 int remaining, struct fib_config *cfg)
475 change_nexthops(fi) {
478 if (!rtnh_ok(rtnh, remaining))
481 if (rtnh->rtnh_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN))
484 nexthop_nh->nh_flags =
485 (cfg->fc_flags & ~0xFF) | rtnh->rtnh_flags;
486 nexthop_nh->nh_oif = rtnh->rtnh_ifindex;
487 nexthop_nh->nh_weight = rtnh->rtnh_hops + 1;
489 attrlen = rtnh_attrlen(rtnh);
491 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
493 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
494 nexthop_nh->nh_gw = nla ? nla_get_in_addr(nla) : 0;
495 #ifdef CONFIG_IP_ROUTE_CLASSID
496 nla = nla_find(attrs, attrlen, RTA_FLOW);
497 nexthop_nh->nh_tclassid = nla ? nla_get_u32(nla) : 0;
498 if (nexthop_nh->nh_tclassid)
499 fi->fib_net->ipv4.fib_num_tclassid_users++;
501 nla = nla_find(attrs, attrlen, RTA_ENCAP);
503 struct lwtunnel_state *lwtstate;
504 struct nlattr *nla_entype;
506 nla_entype = nla_find(attrs, attrlen,
511 ret = lwtunnel_build_state(nla_get_u16(
517 nexthop_nh->nh_lwtstate =
518 lwtstate_get(lwtstate);
522 rtnh = rtnh_next(rtnh, &remaining);
523 } endfor_nexthops(fi);
534 static void fib_rebalance(struct fib_info *fi)
538 struct in_device *in_dev;
545 if (nh->nh_flags & RTNH_F_DEAD)
548 in_dev = __in_dev_get_rtnl(nh->nh_dev);
551 IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
552 nh->nh_flags & RTNH_F_LINKDOWN)
555 total += nh->nh_weight;
556 } endfor_nexthops(fi);
559 change_nexthops(fi) {
562 in_dev = __in_dev_get_rtnl(nexthop_nh->nh_dev);
564 if (nexthop_nh->nh_flags & RTNH_F_DEAD) {
567 IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
568 nexthop_nh->nh_flags & RTNH_F_LINKDOWN) {
571 w += nexthop_nh->nh_weight;
572 upper_bound = DIV_ROUND_CLOSEST_ULL((u64)w << 31,
576 atomic_set(&nexthop_nh->nh_upper_bound, upper_bound);
577 } endfor_nexthops(fi);
580 static inline void fib_add_weight(struct fib_info *fi,
581 const struct fib_nh *nh)
583 fi->fib_weight += nh->nh_weight;
586 #else /* CONFIG_IP_ROUTE_MULTIPATH */
588 #define fib_rebalance(fi) do { } while (0)
589 #define fib_add_weight(fi, nh) do { } while (0)
591 #endif /* CONFIG_IP_ROUTE_MULTIPATH */
593 static int fib_encap_match(u16 encap_type,
594 struct nlattr *encap,
595 const struct fib_nh *nh,
596 const struct fib_config *cfg)
598 struct lwtunnel_state *lwtstate;
601 if (encap_type == LWTUNNEL_ENCAP_NONE)
604 ret = lwtunnel_build_state(encap_type, encap,
605 AF_INET, cfg, &lwtstate);
607 result = lwtunnel_cmp_encap(lwtstate, nh->nh_lwtstate);
608 lwtstate_free(lwtstate);
614 int fib_nh_match(struct fib_config *cfg, struct fib_info *fi)
616 #ifdef CONFIG_IP_ROUTE_MULTIPATH
617 struct rtnexthop *rtnh;
621 if (cfg->fc_priority && cfg->fc_priority != fi->fib_priority)
624 if (cfg->fc_oif || cfg->fc_gw) {
626 if (fib_encap_match(cfg->fc_encap_type,
627 cfg->fc_encap, fi->fib_nh, cfg))
630 if ((!cfg->fc_oif || cfg->fc_oif == fi->fib_nh->nh_oif) &&
631 (!cfg->fc_gw || cfg->fc_gw == fi->fib_nh->nh_gw))
636 #ifdef CONFIG_IP_ROUTE_MULTIPATH
641 remaining = cfg->fc_mp_len;
646 if (!rtnh_ok(rtnh, remaining))
649 if (rtnh->rtnh_ifindex && rtnh->rtnh_ifindex != nh->nh_oif)
652 attrlen = rtnh_attrlen(rtnh);
654 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
656 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
657 if (nla && nla_get_in_addr(nla) != nh->nh_gw)
659 #ifdef CONFIG_IP_ROUTE_CLASSID
660 nla = nla_find(attrs, attrlen, RTA_FLOW);
661 if (nla && nla_get_u32(nla) != nh->nh_tclassid)
666 rtnh = rtnh_next(rtnh, &remaining);
667 } endfor_nexthops(fi);
677 * Semantics of nexthop is very messy by historical reasons.
678 * We have to take into account, that:
679 * a) gateway can be actually local interface address,
680 * so that gatewayed route is direct.
681 * b) gateway must be on-link address, possibly
682 * described not by an ifaddr, but also by a direct route.
683 * c) If both gateway and interface are specified, they should not
685 * d) If we use tunnel routes, gateway could be not on-link.
687 * Attempt to reconcile all of these (alas, self-contradictory) conditions
688 * results in pretty ugly and hairy code with obscure logic.
690 * I chose to generalized it instead, so that the size
691 * of code does not increase practically, but it becomes
693 * Every prefix is assigned a "scope" value: "host" is local address,
694 * "link" is direct route,
695 * [ ... "site" ... "interior" ... ]
696 * and "universe" is true gateway route with global meaning.
698 * Every prefix refers to a set of "nexthop"s (gw, oif),
699 * where gw must have narrower scope. This recursion stops
700 * when gw has LOCAL scope or if "nexthop" is declared ONLINK,
701 * which means that gw is forced to be on link.
703 * Code is still hairy, but now it is apparently logically
704 * consistent and very flexible. F.e. as by-product it allows
705 * to co-exists in peace independent exterior and interior
708 * Normally it looks as following.
710 * {universe prefix} -> (gw, oif) [scope link]
712 * |-> {link prefix} -> (gw, oif) [scope local]
714 * |-> {local prefix} (terminal node)
716 static int fib_check_nh(struct fib_config *cfg, struct fib_info *fi,
721 struct net_device *dev;
723 net = cfg->fc_nlinfo.nl_net;
725 struct fib_result res;
727 if (nh->nh_flags & RTNH_F_ONLINK) {
728 unsigned int addr_type;
730 if (cfg->fc_scope >= RT_SCOPE_LINK)
732 dev = __dev_get_by_index(net, nh->nh_oif);
735 if (!(dev->flags & IFF_UP))
737 addr_type = inet_addr_type_dev_table(net, dev, nh->nh_gw);
738 if (addr_type != RTN_UNICAST)
740 if (!netif_carrier_ok(dev))
741 nh->nh_flags |= RTNH_F_LINKDOWN;
744 nh->nh_scope = RT_SCOPE_LINK;
749 struct fib_table *tbl = NULL;
750 struct flowi4 fl4 = {
752 .flowi4_scope = cfg->fc_scope + 1,
753 .flowi4_oif = nh->nh_oif,
754 .flowi4_iif = LOOPBACK_IFINDEX,
757 /* It is not necessary, but requires a bit of thinking */
758 if (fl4.flowi4_scope < RT_SCOPE_LINK)
759 fl4.flowi4_scope = RT_SCOPE_LINK;
762 tbl = fib_get_table(net, cfg->fc_table);
765 err = fib_table_lookup(tbl, &fl4, &res,
766 FIB_LOOKUP_IGNORE_LINKSTATE |
769 /* on error or if no table given do full lookup. This
770 * is needed for example when nexthops are in the local
771 * table rather than the given table
774 err = fib_lookup(net, &fl4, &res,
775 FIB_LOOKUP_IGNORE_LINKSTATE);
784 if (res.type != RTN_UNICAST && res.type != RTN_LOCAL)
786 nh->nh_scope = res.scope;
787 nh->nh_oif = FIB_RES_OIF(res);
788 nh->nh_dev = dev = FIB_RES_DEV(res);
792 if (!netif_carrier_ok(dev))
793 nh->nh_flags |= RTNH_F_LINKDOWN;
794 err = (dev->flags & IFF_UP) ? 0 : -ENETDOWN;
796 struct in_device *in_dev;
798 if (nh->nh_flags & (RTNH_F_PERVASIVE | RTNH_F_ONLINK))
803 in_dev = inetdev_by_index(net, nh->nh_oif);
807 if (!(in_dev->dev->flags & IFF_UP))
809 nh->nh_dev = in_dev->dev;
810 dev_hold(nh->nh_dev);
811 nh->nh_scope = RT_SCOPE_HOST;
812 if (!netif_carrier_ok(nh->nh_dev))
813 nh->nh_flags |= RTNH_F_LINKDOWN;
821 static inline unsigned int fib_laddr_hashfn(__be32 val)
823 unsigned int mask = (fib_info_hash_size - 1);
825 return ((__force u32)val ^
826 ((__force u32)val >> 7) ^
827 ((__force u32)val >> 14)) & mask;
830 static struct hlist_head *fib_info_hash_alloc(int bytes)
832 if (bytes <= PAGE_SIZE)
833 return kzalloc(bytes, GFP_KERNEL);
835 return (struct hlist_head *)
836 __get_free_pages(GFP_KERNEL | __GFP_ZERO,
840 static void fib_info_hash_free(struct hlist_head *hash, int bytes)
845 if (bytes <= PAGE_SIZE)
848 free_pages((unsigned long) hash, get_order(bytes));
851 static void fib_info_hash_move(struct hlist_head *new_info_hash,
852 struct hlist_head *new_laddrhash,
853 unsigned int new_size)
855 struct hlist_head *old_info_hash, *old_laddrhash;
856 unsigned int old_size = fib_info_hash_size;
857 unsigned int i, bytes;
859 spin_lock_bh(&fib_info_lock);
860 old_info_hash = fib_info_hash;
861 old_laddrhash = fib_info_laddrhash;
862 fib_info_hash_size = new_size;
864 for (i = 0; i < old_size; i++) {
865 struct hlist_head *head = &fib_info_hash[i];
866 struct hlist_node *n;
869 hlist_for_each_entry_safe(fi, n, head, fib_hash) {
870 struct hlist_head *dest;
871 unsigned int new_hash;
873 new_hash = fib_info_hashfn(fi);
874 dest = &new_info_hash[new_hash];
875 hlist_add_head(&fi->fib_hash, dest);
878 fib_info_hash = new_info_hash;
880 for (i = 0; i < old_size; i++) {
881 struct hlist_head *lhead = &fib_info_laddrhash[i];
882 struct hlist_node *n;
885 hlist_for_each_entry_safe(fi, n, lhead, fib_lhash) {
886 struct hlist_head *ldest;
887 unsigned int new_hash;
889 new_hash = fib_laddr_hashfn(fi->fib_prefsrc);
890 ldest = &new_laddrhash[new_hash];
891 hlist_add_head(&fi->fib_lhash, ldest);
894 fib_info_laddrhash = new_laddrhash;
896 spin_unlock_bh(&fib_info_lock);
898 bytes = old_size * sizeof(struct hlist_head *);
899 fib_info_hash_free(old_info_hash, bytes);
900 fib_info_hash_free(old_laddrhash, bytes);
903 __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
905 nh->nh_saddr = inet_select_addr(nh->nh_dev,
907 nh->nh_parent->fib_scope);
908 nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
913 static bool fib_valid_prefsrc(struct fib_config *cfg, __be32 fib_prefsrc)
915 if (cfg->fc_type != RTN_LOCAL || !cfg->fc_dst ||
916 fib_prefsrc != cfg->fc_dst) {
917 u32 tb_id = cfg->fc_table;
920 if (tb_id == RT_TABLE_MAIN)
921 tb_id = RT_TABLE_LOCAL;
923 rc = inet_addr_type_table(cfg->fc_nlinfo.nl_net,
926 if (rc != RTN_LOCAL && tb_id != RT_TABLE_LOCAL) {
927 rc = inet_addr_type_table(cfg->fc_nlinfo.nl_net,
928 fib_prefsrc, RT_TABLE_LOCAL);
938 fib_convert_metrics(struct fib_info *fi, const struct fib_config *cfg)
947 nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) {
948 int type = nla_type(nla);
956 if (type == RTAX_CC_ALGO) {
957 char tmp[TCP_CA_NAME_MAX];
959 nla_strlcpy(tmp, nla, sizeof(tmp));
960 val = tcp_ca_get_key_by_name(tmp, &ecn_ca);
961 if (val == TCP_CA_UNSPEC)
964 val = nla_get_u32(nla);
966 if (type == RTAX_ADVMSS && val > 65535 - 40)
968 if (type == RTAX_MTU && val > 65535 - 15)
970 if (type == RTAX_HOPLIMIT && val > 255)
972 if (type == RTAX_FEATURES && (val & ~RTAX_FEATURE_MASK))
974 fi->fib_metrics[type - 1] = val;
978 fi->fib_metrics[RTAX_FEATURES - 1] |= DST_FEATURE_ECN_CA;
983 struct fib_info *fib_create_info(struct fib_config *cfg)
986 struct fib_info *fi = NULL;
987 struct fib_info *ofi;
989 struct net *net = cfg->fc_nlinfo.nl_net;
991 if (cfg->fc_type > RTN_MAX)
994 /* Fast check to catch the most weird cases */
995 if (fib_props[cfg->fc_type].scope > cfg->fc_scope)
998 if (cfg->fc_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN))
1001 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1003 nhs = fib_count_nexthops(cfg->fc_mp, cfg->fc_mp_len);
1010 if (fib_info_cnt >= fib_info_hash_size) {
1011 unsigned int new_size = fib_info_hash_size << 1;
1012 struct hlist_head *new_info_hash;
1013 struct hlist_head *new_laddrhash;
1018 bytes = new_size * sizeof(struct hlist_head *);
1019 new_info_hash = fib_info_hash_alloc(bytes);
1020 new_laddrhash = fib_info_hash_alloc(bytes);
1021 if (!new_info_hash || !new_laddrhash) {
1022 fib_info_hash_free(new_info_hash, bytes);
1023 fib_info_hash_free(new_laddrhash, bytes);
1025 fib_info_hash_move(new_info_hash, new_laddrhash, new_size);
1027 if (!fib_info_hash_size)
1031 fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL);
1036 fi->fib_metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL);
1037 if (!fi->fib_metrics)
1040 fi->fib_metrics = (u32 *) dst_default_metrics;
1043 fi->fib_protocol = cfg->fc_protocol;
1044 fi->fib_scope = cfg->fc_scope;
1045 fi->fib_flags = cfg->fc_flags;
1046 fi->fib_priority = cfg->fc_priority;
1047 fi->fib_prefsrc = cfg->fc_prefsrc;
1048 fi->fib_type = cfg->fc_type;
1049 fi->fib_tb_id = cfg->fc_table;
1052 change_nexthops(fi) {
1053 nexthop_nh->nh_parent = fi;
1054 nexthop_nh->nh_pcpu_rth_output = alloc_percpu(struct rtable __rcu *);
1055 if (!nexthop_nh->nh_pcpu_rth_output)
1057 } endfor_nexthops(fi)
1059 err = fib_convert_metrics(fi, cfg);
1064 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1065 err = fib_get_nhs(fi, cfg->fc_mp, cfg->fc_mp_len, cfg);
1068 if (cfg->fc_oif && fi->fib_nh->nh_oif != cfg->fc_oif)
1070 if (cfg->fc_gw && fi->fib_nh->nh_gw != cfg->fc_gw)
1072 #ifdef CONFIG_IP_ROUTE_CLASSID
1073 if (cfg->fc_flow && fi->fib_nh->nh_tclassid != cfg->fc_flow)
1080 struct fib_nh *nh = fi->fib_nh;
1082 if (cfg->fc_encap) {
1083 struct lwtunnel_state *lwtstate;
1085 if (cfg->fc_encap_type == LWTUNNEL_ENCAP_NONE)
1087 err = lwtunnel_build_state(cfg->fc_encap_type,
1088 cfg->fc_encap, AF_INET, cfg,
1093 nh->nh_lwtstate = lwtstate_get(lwtstate);
1095 nh->nh_oif = cfg->fc_oif;
1096 nh->nh_gw = cfg->fc_gw;
1097 nh->nh_flags = cfg->fc_flags;
1098 #ifdef CONFIG_IP_ROUTE_CLASSID
1099 nh->nh_tclassid = cfg->fc_flow;
1100 if (nh->nh_tclassid)
1101 fi->fib_net->ipv4.fib_num_tclassid_users++;
1103 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1108 if (fib_props[cfg->fc_type].error) {
1109 if (cfg->fc_gw || cfg->fc_oif || cfg->fc_mp)
1113 switch (cfg->fc_type) {
1125 if (cfg->fc_scope > RT_SCOPE_HOST)
1128 if (cfg->fc_scope == RT_SCOPE_HOST) {
1129 struct fib_nh *nh = fi->fib_nh;
1131 /* Local address is added. */
1132 if (nhs != 1 || nh->nh_gw)
1134 nh->nh_scope = RT_SCOPE_NOWHERE;
1135 nh->nh_dev = dev_get_by_index(net, fi->fib_nh->nh_oif);
1142 change_nexthops(fi) {
1143 err = fib_check_nh(cfg, fi, nexthop_nh);
1146 if (nexthop_nh->nh_flags & RTNH_F_LINKDOWN)
1148 } endfor_nexthops(fi)
1149 if (linkdown == fi->fib_nhs)
1150 fi->fib_flags |= RTNH_F_LINKDOWN;
1153 if (fi->fib_prefsrc && !fib_valid_prefsrc(cfg, fi->fib_prefsrc))
1156 change_nexthops(fi) {
1157 fib_info_update_nh_saddr(net, nexthop_nh);
1158 fib_add_weight(fi, nexthop_nh);
1159 } endfor_nexthops(fi)
1164 ofi = fib_find_info(fi);
1173 atomic_inc(&fi->fib_clntref);
1174 spin_lock_bh(&fib_info_lock);
1175 hlist_add_head(&fi->fib_hash,
1176 &fib_info_hash[fib_info_hashfn(fi)]);
1177 if (fi->fib_prefsrc) {
1178 struct hlist_head *head;
1180 head = &fib_info_laddrhash[fib_laddr_hashfn(fi->fib_prefsrc)];
1181 hlist_add_head(&fi->fib_lhash, head);
1183 change_nexthops(fi) {
1184 struct hlist_head *head;
1187 if (!nexthop_nh->nh_dev)
1189 hash = fib_devindex_hashfn(nexthop_nh->nh_dev->ifindex);
1190 head = &fib_info_devhash[hash];
1191 hlist_add_head(&nexthop_nh->nh_hash, head);
1192 } endfor_nexthops(fi)
1193 spin_unlock_bh(&fib_info_lock);
1205 return ERR_PTR(err);
1208 int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
1209 u32 tb_id, u8 type, __be32 dst, int dst_len, u8 tos,
1210 struct fib_info *fi, unsigned int flags)
1212 struct nlmsghdr *nlh;
1215 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*rtm), flags);
1219 rtm = nlmsg_data(nlh);
1220 rtm->rtm_family = AF_INET;
1221 rtm->rtm_dst_len = dst_len;
1222 rtm->rtm_src_len = 0;
1225 rtm->rtm_table = tb_id;
1227 rtm->rtm_table = RT_TABLE_COMPAT;
1228 if (nla_put_u32(skb, RTA_TABLE, tb_id))
1229 goto nla_put_failure;
1230 rtm->rtm_type = type;
1231 rtm->rtm_flags = fi->fib_flags;
1232 rtm->rtm_scope = fi->fib_scope;
1233 rtm->rtm_protocol = fi->fib_protocol;
1235 if (rtm->rtm_dst_len &&
1236 nla_put_in_addr(skb, RTA_DST, dst))
1237 goto nla_put_failure;
1238 if (fi->fib_priority &&
1239 nla_put_u32(skb, RTA_PRIORITY, fi->fib_priority))
1240 goto nla_put_failure;
1241 if (rtnetlink_put_metrics(skb, fi->fib_metrics) < 0)
1242 goto nla_put_failure;
1244 if (fi->fib_prefsrc &&
1245 nla_put_in_addr(skb, RTA_PREFSRC, fi->fib_prefsrc))
1246 goto nla_put_failure;
1247 if (fi->fib_nhs == 1) {
1248 struct in_device *in_dev;
1250 if (fi->fib_nh->nh_gw &&
1251 nla_put_in_addr(skb, RTA_GATEWAY, fi->fib_nh->nh_gw))
1252 goto nla_put_failure;
1253 if (fi->fib_nh->nh_oif &&
1254 nla_put_u32(skb, RTA_OIF, fi->fib_nh->nh_oif))
1255 goto nla_put_failure;
1256 if (fi->fib_nh->nh_flags & RTNH_F_LINKDOWN) {
1257 in_dev = __in_dev_get_rtnl(fi->fib_nh->nh_dev);
1259 IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev))
1260 rtm->rtm_flags |= RTNH_F_DEAD;
1262 #ifdef CONFIG_IP_ROUTE_CLASSID
1263 if (fi->fib_nh[0].nh_tclassid &&
1264 nla_put_u32(skb, RTA_FLOW, fi->fib_nh[0].nh_tclassid))
1265 goto nla_put_failure;
1267 if (fi->fib_nh->nh_lwtstate &&
1268 lwtunnel_fill_encap(skb, fi->fib_nh->nh_lwtstate) < 0)
1269 goto nla_put_failure;
1271 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1272 if (fi->fib_nhs > 1) {
1273 struct rtnexthop *rtnh;
1276 mp = nla_nest_start(skb, RTA_MULTIPATH);
1278 goto nla_put_failure;
1281 struct in_device *in_dev;
1283 rtnh = nla_reserve_nohdr(skb, sizeof(*rtnh));
1285 goto nla_put_failure;
1287 rtnh->rtnh_flags = nh->nh_flags & 0xFF;
1288 if (nh->nh_flags & RTNH_F_LINKDOWN) {
1289 in_dev = __in_dev_get_rtnl(nh->nh_dev);
1291 IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev))
1292 rtnh->rtnh_flags |= RTNH_F_DEAD;
1294 rtnh->rtnh_hops = nh->nh_weight - 1;
1295 rtnh->rtnh_ifindex = nh->nh_oif;
1298 nla_put_in_addr(skb, RTA_GATEWAY, nh->nh_gw))
1299 goto nla_put_failure;
1300 #ifdef CONFIG_IP_ROUTE_CLASSID
1301 if (nh->nh_tclassid &&
1302 nla_put_u32(skb, RTA_FLOW, nh->nh_tclassid))
1303 goto nla_put_failure;
1305 if (nh->nh_lwtstate &&
1306 lwtunnel_fill_encap(skb, nh->nh_lwtstate) < 0)
1307 goto nla_put_failure;
1309 /* length of rtnetlink header + attributes */
1310 rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *) rtnh;
1311 } endfor_nexthops(fi);
1313 nla_nest_end(skb, mp);
1316 nlmsg_end(skb, nlh);
1320 nlmsg_cancel(skb, nlh);
1326 * - local address disappeared -> we must delete all the entries
1328 * - device went down -> we must shutdown all nexthops going via it.
1330 int fib_sync_down_addr(struct net_device *dev, __be32 local)
1333 unsigned int hash = fib_laddr_hashfn(local);
1334 struct hlist_head *head = &fib_info_laddrhash[hash];
1335 struct net *net = dev_net(dev);
1336 int tb_id = l3mdev_fib_table(dev);
1337 struct fib_info *fi;
1339 if (!fib_info_laddrhash || local == 0)
1342 hlist_for_each_entry(fi, head, fib_lhash) {
1343 if (!net_eq(fi->fib_net, net) ||
1344 fi->fib_tb_id != tb_id)
1346 if (fi->fib_prefsrc == local) {
1347 fi->fib_flags |= RTNH_F_DEAD;
1354 static int call_fib_nh_notifiers(struct fib_nh *fib_nh,
1355 enum fib_event_type event_type)
1357 struct in_device *in_dev = __in_dev_get_rtnl(fib_nh->nh_dev);
1358 struct fib_nh_notifier_info info = {
1362 switch (event_type) {
1363 case FIB_EVENT_NH_ADD:
1364 if (fib_nh->nh_flags & RTNH_F_DEAD)
1366 if (IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
1367 fib_nh->nh_flags & RTNH_F_LINKDOWN)
1369 return call_fib_notifiers(dev_net(fib_nh->nh_dev), event_type,
1371 case FIB_EVENT_NH_DEL:
1372 if ((IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
1373 fib_nh->nh_flags & RTNH_F_LINKDOWN) ||
1374 (fib_nh->nh_flags & RTNH_F_DEAD))
1375 return call_fib_notifiers(dev_net(fib_nh->nh_dev),
1376 event_type, &info.info);
1384 /* Event force Flags Description
1385 * NETDEV_CHANGE 0 LINKDOWN Carrier OFF, not for scope host
1386 * NETDEV_DOWN 0 LINKDOWN|DEAD Link down, not for scope host
1387 * NETDEV_DOWN 1 LINKDOWN|DEAD Last address removed
1388 * NETDEV_UNREGISTER 1 LINKDOWN|DEAD Device removed
1390 int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force)
1393 int scope = RT_SCOPE_NOWHERE;
1394 struct fib_info *prev_fi = NULL;
1395 unsigned int hash = fib_devindex_hashfn(dev->ifindex);
1396 struct hlist_head *head = &fib_info_devhash[hash];
1402 hlist_for_each_entry(nh, head, nh_hash) {
1403 struct fib_info *fi = nh->nh_parent;
1406 BUG_ON(!fi->fib_nhs);
1407 if (nh->nh_dev != dev || fi == prev_fi)
1411 change_nexthops(fi) {
1412 if (nexthop_nh->nh_flags & RTNH_F_DEAD)
1414 else if (nexthop_nh->nh_dev == dev &&
1415 nexthop_nh->nh_scope != scope) {
1418 case NETDEV_UNREGISTER:
1419 nexthop_nh->nh_flags |= RTNH_F_DEAD;
1422 nexthop_nh->nh_flags |= RTNH_F_LINKDOWN;
1425 call_fib_nh_notifiers(nexthop_nh,
1429 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1430 if (event == NETDEV_UNREGISTER &&
1431 nexthop_nh->nh_dev == dev) {
1436 } endfor_nexthops(fi)
1437 if (dead == fi->fib_nhs) {
1440 case NETDEV_UNREGISTER:
1441 fi->fib_flags |= RTNH_F_DEAD;
1444 fi->fib_flags |= RTNH_F_LINKDOWN;
1456 /* Must be invoked inside of an RCU protected region. */
1457 static void fib_select_default(const struct flowi4 *flp, struct fib_result *res)
1459 struct fib_info *fi = NULL, *last_resort = NULL;
1460 struct hlist_head *fa_head = res->fa_head;
1461 struct fib_table *tb = res->table;
1462 u8 slen = 32 - res->prefixlen;
1463 int order = -1, last_idx = -1;
1464 struct fib_alias *fa, *fa1 = NULL;
1465 u32 last_prio = res->fi->fib_priority;
1468 hlist_for_each_entry_rcu(fa, fa_head, fa_list) {
1469 struct fib_info *next_fi = fa->fa_info;
1471 if (fa->fa_slen != slen)
1473 if (fa->fa_tos && fa->fa_tos != flp->flowi4_tos)
1475 if (fa->tb_id != tb->tb_id)
1477 if (next_fi->fib_priority > last_prio &&
1478 fa->fa_tos == last_tos) {
1483 if (next_fi->fib_flags & RTNH_F_DEAD)
1485 last_tos = fa->fa_tos;
1486 last_prio = next_fi->fib_priority;
1488 if (next_fi->fib_scope != res->scope ||
1489 fa->fa_type != RTN_UNICAST)
1491 if (!next_fi->fib_nh[0].nh_gw ||
1492 next_fi->fib_nh[0].nh_scope != RT_SCOPE_LINK)
1495 fib_alias_accessed(fa);
1498 if (next_fi != res->fi)
1501 } else if (!fib_detect_death(fi, order, &last_resort,
1502 &last_idx, fa1->fa_default)) {
1503 fib_result_assign(res, fi);
1504 fa1->fa_default = order;
1511 if (order <= 0 || !fi) {
1513 fa1->fa_default = -1;
1517 if (!fib_detect_death(fi, order, &last_resort, &last_idx,
1519 fib_result_assign(res, fi);
1520 fa1->fa_default = order;
1525 fib_result_assign(res, last_resort);
1526 fa1->fa_default = last_idx;
1532 * Dead device goes up. We wake up dead nexthops.
1533 * It takes sense only on multipath routes.
1535 int fib_sync_up(struct net_device *dev, unsigned int nh_flags)
1537 struct fib_info *prev_fi;
1539 struct hlist_head *head;
1543 if (!(dev->flags & IFF_UP))
1546 if (nh_flags & RTNH_F_DEAD) {
1547 unsigned int flags = dev_get_flags(dev);
1549 if (flags & (IFF_RUNNING | IFF_LOWER_UP))
1550 nh_flags |= RTNH_F_LINKDOWN;
1554 hash = fib_devindex_hashfn(dev->ifindex);
1555 head = &fib_info_devhash[hash];
1558 hlist_for_each_entry(nh, head, nh_hash) {
1559 struct fib_info *fi = nh->nh_parent;
1562 BUG_ON(!fi->fib_nhs);
1563 if (nh->nh_dev != dev || fi == prev_fi)
1568 change_nexthops(fi) {
1569 if (!(nexthop_nh->nh_flags & nh_flags)) {
1573 if (!nexthop_nh->nh_dev ||
1574 !(nexthop_nh->nh_dev->flags & IFF_UP))
1576 if (nexthop_nh->nh_dev != dev ||
1577 !__in_dev_get_rtnl(dev))
1580 nexthop_nh->nh_flags &= ~nh_flags;
1581 call_fib_nh_notifiers(nexthop_nh, FIB_EVENT_NH_ADD);
1582 } endfor_nexthops(fi)
1585 fi->fib_flags &= ~nh_flags;
1595 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1596 static bool fib_good_nh(const struct fib_nh *nh)
1598 int state = NUD_REACHABLE;
1600 if (nh->nh_scope == RT_SCOPE_LINK) {
1601 struct neighbour *n;
1605 n = __ipv4_neigh_lookup_noref(nh->nh_dev,
1606 (__force u32)nh->nh_gw);
1608 state = n->nud_state;
1610 rcu_read_unlock_bh();
1613 return !!(state & NUD_VALID);
1616 void fib_select_multipath(struct fib_result *res, int hash)
1618 struct fib_info *fi = res->fi;
1619 struct net *net = fi->fib_net;
1623 if (hash > atomic_read(&nh->nh_upper_bound))
1626 if (!net->ipv4.sysctl_fib_multipath_use_neigh ||
1628 res->nh_sel = nhsel;
1632 res->nh_sel = nhsel;
1635 } endfor_nexthops(fi);
1639 void fib_select_path(struct net *net, struct fib_result *res,
1640 struct flowi4 *fl4, const struct sk_buff *skb)
1644 oif_check = (fl4->flowi4_oif == 0 ||
1645 fl4->flowi4_flags & FLOWI_FLAG_SKIP_NH_OIF);
1647 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1648 if (res->fi->fib_nhs > 1 && oif_check) {
1649 int h = fib_multipath_hash(res->fi, fl4, skb);
1651 fib_select_multipath(res, h);
1655 if (!res->prefixlen &&
1656 res->table->tb_num_default > 1 &&
1657 res->type == RTN_UNICAST && oif_check)
1658 fib_select_default(fl4, res);
1661 fl4->saddr = FIB_RES_PREFSRC(net, *res);
1663 EXPORT_SYMBOL_GPL(fib_select_path);