2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * IPv4 Forwarding Information Base: semantics.
8 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
16 #include <asm/uaccess.h>
17 #include <asm/system.h>
18 #include <linux/bitops.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/jiffies.h>
23 #include <linux/string.h>
24 #include <linux/socket.h>
25 #include <linux/sockios.h>
26 #include <linux/errno.h>
28 #include <linux/inet.h>
29 #include <linux/inetdevice.h>
30 #include <linux/netdevice.h>
31 #include <linux/if_arp.h>
32 #include <linux/proc_fs.h>
33 #include <linux/skbuff.h>
34 #include <linux/init.h>
35 #include <linux/slab.h>
39 #include <net/protocol.h>
40 #include <net/route.h>
43 #include <net/ip_fib.h>
44 #include <net/netlink.h>
45 #include <net/nexthop.h>
47 #include "fib_lookup.h"
49 static DEFINE_SPINLOCK(fib_info_lock);
50 static struct hlist_head *fib_info_hash;
51 static struct hlist_head *fib_info_laddrhash;
52 static unsigned int fib_info_hash_size;
53 static unsigned int fib_info_cnt;
55 #define DEVINDEX_HASHBITS 8
56 #define DEVINDEX_HASHSIZE (1U << DEVINDEX_HASHBITS)
57 static struct hlist_head fib_info_devhash[DEVINDEX_HASHSIZE];
59 #ifdef CONFIG_IP_ROUTE_MULTIPATH
61 static DEFINE_SPINLOCK(fib_multipath_lock);
63 #define for_nexthops(fi) { \
64 int nhsel; const struct fib_nh *nh; \
65 for (nhsel = 0, nh = (fi)->fib_nh; \
66 nhsel < (fi)->fib_nhs; \
69 #define change_nexthops(fi) { \
70 int nhsel; struct fib_nh *nexthop_nh; \
71 for (nhsel = 0, nexthop_nh = (struct fib_nh *)((fi)->fib_nh); \
72 nhsel < (fi)->fib_nhs; \
73 nexthop_nh++, nhsel++)
75 #else /* CONFIG_IP_ROUTE_MULTIPATH */
77 /* Hope, that gcc will optimize it to get rid of dummy loop */
79 #define for_nexthops(fi) { \
80 int nhsel; const struct fib_nh *nh = (fi)->fib_nh; \
81 for (nhsel = 0; nhsel < 1; nhsel++)
83 #define change_nexthops(fi) { \
85 struct fib_nh *nexthop_nh = (struct fib_nh *)((fi)->fib_nh); \
86 for (nhsel = 0; nhsel < 1; nhsel++)
88 #endif /* CONFIG_IP_ROUTE_MULTIPATH */
90 #define endfor_nexthops(fi) }
97 } fib_props[RTN_MAX + 1] = {
100 .scope = RT_SCOPE_NOWHERE,
104 .scope = RT_SCOPE_UNIVERSE,
108 .scope = RT_SCOPE_HOST,
112 .scope = RT_SCOPE_LINK,
116 .scope = RT_SCOPE_LINK,
120 .scope = RT_SCOPE_UNIVERSE,
124 .scope = RT_SCOPE_UNIVERSE,
126 [RTN_UNREACHABLE] = {
127 .error = -EHOSTUNREACH,
128 .scope = RT_SCOPE_UNIVERSE,
132 .scope = RT_SCOPE_UNIVERSE,
136 .scope = RT_SCOPE_UNIVERSE,
140 .scope = RT_SCOPE_NOWHERE,
144 .scope = RT_SCOPE_NOWHERE,
149 /* Release a nexthop info record */
151 static void free_fib_info_rcu(struct rcu_head *head)
153 struct fib_info *fi = container_of(head, struct fib_info, rcu);
155 if (fi->fib_metrics != (u32 *) dst_default_metrics)
156 kfree(fi->fib_metrics);
160 void free_fib_info(struct fib_info *fi)
162 if (fi->fib_dead == 0) {
163 pr_warning("Freeing alive fib_info %p\n", fi);
166 change_nexthops(fi) {
167 if (nexthop_nh->nh_dev)
168 dev_put(nexthop_nh->nh_dev);
169 nexthop_nh->nh_dev = NULL;
170 } endfor_nexthops(fi);
172 release_net(fi->fib_net);
173 call_rcu(&fi->rcu, free_fib_info_rcu);
176 void fib_release_info(struct fib_info *fi)
178 spin_lock_bh(&fib_info_lock);
179 if (fi && --fi->fib_treeref == 0) {
180 hlist_del(&fi->fib_hash);
182 hlist_del(&fi->fib_lhash);
183 change_nexthops(fi) {
184 if (!nexthop_nh->nh_dev)
186 hlist_del(&nexthop_nh->nh_hash);
187 } endfor_nexthops(fi)
191 spin_unlock_bh(&fib_info_lock);
194 static inline int nh_comp(const struct fib_info *fi, const struct fib_info *ofi)
196 const struct fib_nh *onh = ofi->fib_nh;
199 if (nh->nh_oif != onh->nh_oif ||
200 nh->nh_gw != onh->nh_gw ||
201 nh->nh_scope != onh->nh_scope ||
202 #ifdef CONFIG_IP_ROUTE_MULTIPATH
203 nh->nh_weight != onh->nh_weight ||
205 #ifdef CONFIG_IP_ROUTE_CLASSID
206 nh->nh_tclassid != onh->nh_tclassid ||
208 ((nh->nh_flags ^ onh->nh_flags) & ~RTNH_F_DEAD))
211 } endfor_nexthops(fi);
215 static inline unsigned int fib_devindex_hashfn(unsigned int val)
217 unsigned int mask = DEVINDEX_HASHSIZE - 1;
220 (val >> DEVINDEX_HASHBITS) ^
221 (val >> (DEVINDEX_HASHBITS * 2))) & mask;
224 static inline unsigned int fib_info_hashfn(const struct fib_info *fi)
226 unsigned int mask = (fib_info_hash_size - 1);
227 unsigned int val = fi->fib_nhs;
229 val ^= fi->fib_protocol;
230 val ^= (__force u32)fi->fib_prefsrc;
231 val ^= fi->fib_priority;
233 val ^= fib_devindex_hashfn(nh->nh_oif);
234 } endfor_nexthops(fi)
236 return (val ^ (val >> 7) ^ (val >> 12)) & mask;
239 static struct fib_info *fib_find_info(const struct fib_info *nfi)
241 struct hlist_head *head;
242 struct hlist_node *node;
246 hash = fib_info_hashfn(nfi);
247 head = &fib_info_hash[hash];
249 hlist_for_each_entry(fi, node, head, fib_hash) {
250 if (!net_eq(fi->fib_net, nfi->fib_net))
252 if (fi->fib_nhs != nfi->fib_nhs)
254 if (nfi->fib_protocol == fi->fib_protocol &&
255 nfi->fib_prefsrc == fi->fib_prefsrc &&
256 nfi->fib_priority == fi->fib_priority &&
257 memcmp(nfi->fib_metrics, fi->fib_metrics,
258 sizeof(fi->fib_metrics)) == 0 &&
259 ((nfi->fib_flags ^ fi->fib_flags) & ~RTNH_F_DEAD) == 0 &&
260 (nfi->fib_nhs == 0 || nh_comp(fi, nfi) == 0))
267 /* Check, that the gateway is already configured.
268 * Used only by redirect accept routine.
270 int ip_fib_check_default(__be32 gw, struct net_device *dev)
272 struct hlist_head *head;
273 struct hlist_node *node;
277 spin_lock(&fib_info_lock);
279 hash = fib_devindex_hashfn(dev->ifindex);
280 head = &fib_info_devhash[hash];
281 hlist_for_each_entry(nh, node, head, nh_hash) {
282 if (nh->nh_dev == dev &&
284 !(nh->nh_flags & RTNH_F_DEAD)) {
285 spin_unlock(&fib_info_lock);
290 spin_unlock(&fib_info_lock);
295 static inline size_t fib_nlmsg_size(struct fib_info *fi)
297 size_t payload = NLMSG_ALIGN(sizeof(struct rtmsg))
298 + nla_total_size(4) /* RTA_TABLE */
299 + nla_total_size(4) /* RTA_DST */
300 + nla_total_size(4) /* RTA_PRIORITY */
301 + nla_total_size(4); /* RTA_PREFSRC */
303 /* space for nested metrics */
304 payload += nla_total_size((RTAX_MAX * nla_total_size(4)));
307 /* Also handles the special case fib_nhs == 1 */
309 /* each nexthop is packed in an attribute */
310 size_t nhsize = nla_total_size(sizeof(struct rtnexthop));
312 /* may contain flow and gateway attribute */
313 nhsize += 2 * nla_total_size(4);
315 /* all nexthops are packed in a nested attribute */
316 payload += nla_total_size(fi->fib_nhs * nhsize);
322 void rtmsg_fib(int event, __be32 key, struct fib_alias *fa,
323 int dst_len, u32 tb_id, struct nl_info *info,
324 unsigned int nlm_flags)
327 u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
330 skb = nlmsg_new(fib_nlmsg_size(fa->fa_info), GFP_KERNEL);
334 err = fib_dump_info(skb, info->pid, seq, event, tb_id,
335 fa->fa_type, fa->fa_scope, key, dst_len,
336 fa->fa_tos, fa->fa_info, nlm_flags);
338 /* -EMSGSIZE implies BUG in fib_nlmsg_size() */
339 WARN_ON(err == -EMSGSIZE);
343 rtnl_notify(skb, info->nl_net, info->pid, RTNLGRP_IPV4_ROUTE,
344 info->nlh, GFP_KERNEL);
348 rtnl_set_sk_err(info->nl_net, RTNLGRP_IPV4_ROUTE, err);
351 /* Return the first fib alias matching TOS with
352 * priority less than or equal to PRIO.
354 struct fib_alias *fib_find_alias(struct list_head *fah, u8 tos, u32 prio)
357 struct fib_alias *fa;
358 list_for_each_entry(fa, fah, fa_list) {
359 if (fa->fa_tos > tos)
361 if (fa->fa_info->fib_priority >= prio ||
369 int fib_detect_death(struct fib_info *fi, int order,
370 struct fib_info **last_resort, int *last_idx, int dflt)
373 int state = NUD_NONE;
375 n = neigh_lookup(&arp_tbl, &fi->fib_nh[0].nh_gw, fi->fib_dev);
377 state = n->nud_state;
380 if (state == NUD_REACHABLE)
382 if ((state & NUD_VALID) && order != dflt)
384 if ((state & NUD_VALID) ||
385 (*last_idx < 0 && order > dflt)) {
392 #ifdef CONFIG_IP_ROUTE_MULTIPATH
394 static int fib_count_nexthops(struct rtnexthop *rtnh, int remaining)
398 while (rtnh_ok(rtnh, remaining)) {
400 rtnh = rtnh_next(rtnh, &remaining);
403 /* leftover implies invalid nexthop configuration, discard it */
404 return remaining > 0 ? 0 : nhs;
407 static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
408 int remaining, struct fib_config *cfg)
410 change_nexthops(fi) {
413 if (!rtnh_ok(rtnh, remaining))
416 nexthop_nh->nh_flags =
417 (cfg->fc_flags & ~0xFF) | rtnh->rtnh_flags;
418 nexthop_nh->nh_oif = rtnh->rtnh_ifindex;
419 nexthop_nh->nh_weight = rtnh->rtnh_hops + 1;
421 attrlen = rtnh_attrlen(rtnh);
423 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
425 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
426 nexthop_nh->nh_gw = nla ? nla_get_be32(nla) : 0;
427 #ifdef CONFIG_IP_ROUTE_CLASSID
428 nla = nla_find(attrs, attrlen, RTA_FLOW);
429 nexthop_nh->nh_tclassid = nla ? nla_get_u32(nla) : 0;
433 rtnh = rtnh_next(rtnh, &remaining);
434 } endfor_nexthops(fi);
441 int fib_nh_match(struct fib_config *cfg, struct fib_info *fi)
443 #ifdef CONFIG_IP_ROUTE_MULTIPATH
444 struct rtnexthop *rtnh;
448 if (cfg->fc_priority && cfg->fc_priority != fi->fib_priority)
451 if (cfg->fc_oif || cfg->fc_gw) {
452 if ((!cfg->fc_oif || cfg->fc_oif == fi->fib_nh->nh_oif) &&
453 (!cfg->fc_gw || cfg->fc_gw == fi->fib_nh->nh_gw))
458 #ifdef CONFIG_IP_ROUTE_MULTIPATH
459 if (cfg->fc_mp == NULL)
463 remaining = cfg->fc_mp_len;
468 if (!rtnh_ok(rtnh, remaining))
471 if (rtnh->rtnh_ifindex && rtnh->rtnh_ifindex != nh->nh_oif)
474 attrlen = rtnh_attrlen(rtnh);
476 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
478 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
479 if (nla && nla_get_be32(nla) != nh->nh_gw)
481 #ifdef CONFIG_IP_ROUTE_CLASSID
482 nla = nla_find(attrs, attrlen, RTA_FLOW);
483 if (nla && nla_get_u32(nla) != nh->nh_tclassid)
488 rtnh = rtnh_next(rtnh, &remaining);
489 } endfor_nexthops(fi);
499 * Semantics of nexthop is very messy by historical reasons.
500 * We have to take into account, that:
501 * a) gateway can be actually local interface address,
502 * so that gatewayed route is direct.
503 * b) gateway must be on-link address, possibly
504 * described not by an ifaddr, but also by a direct route.
505 * c) If both gateway and interface are specified, they should not
507 * d) If we use tunnel routes, gateway could be not on-link.
509 * Attempt to reconcile all of these (alas, self-contradictory) conditions
510 * results in pretty ugly and hairy code with obscure logic.
512 * I chose to generalized it instead, so that the size
513 * of code does not increase practically, but it becomes
515 * Every prefix is assigned a "scope" value: "host" is local address,
516 * "link" is direct route,
517 * [ ... "site" ... "interior" ... ]
518 * and "universe" is true gateway route with global meaning.
520 * Every prefix refers to a set of "nexthop"s (gw, oif),
521 * where gw must have narrower scope. This recursion stops
522 * when gw has LOCAL scope or if "nexthop" is declared ONLINK,
523 * which means that gw is forced to be on link.
525 * Code is still hairy, but now it is apparently logically
526 * consistent and very flexible. F.e. as by-product it allows
527 * to co-exists in peace independent exterior and interior
530 * Normally it looks as following.
532 * {universe prefix} -> (gw, oif) [scope link]
534 * |-> {link prefix} -> (gw, oif) [scope local]
536 * |-> {local prefix} (terminal node)
538 static int fib_check_nh(struct fib_config *cfg, struct fib_info *fi,
543 struct net_device *dev;
545 net = cfg->fc_nlinfo.nl_net;
547 struct fib_result res;
549 if (nh->nh_flags & RTNH_F_ONLINK) {
551 if (cfg->fc_scope >= RT_SCOPE_LINK)
553 if (inet_addr_type(net, nh->nh_gw) != RTN_UNICAST)
555 dev = __dev_get_by_index(net, nh->nh_oif);
558 if (!(dev->flags & IFF_UP))
562 nh->nh_scope = RT_SCOPE_LINK;
568 .fl4_dst = nh->nh_gw,
569 .fl4_scope = cfg->fc_scope + 1,
573 /* It is not necessary, but requires a bit of thinking */
574 if (fl.fl4_scope < RT_SCOPE_LINK)
575 fl.fl4_scope = RT_SCOPE_LINK;
576 err = fib_lookup(net, &fl, &res);
583 if (res.type != RTN_UNICAST && res.type != RTN_LOCAL)
585 nh->nh_scope = res.scope;
586 nh->nh_oif = FIB_RES_OIF(res);
587 nh->nh_dev = dev = FIB_RES_DEV(res);
591 err = (dev->flags & IFF_UP) ? 0 : -ENETDOWN;
593 struct in_device *in_dev;
595 if (nh->nh_flags & (RTNH_F_PERVASIVE | RTNH_F_ONLINK))
600 in_dev = inetdev_by_index(net, nh->nh_oif);
604 if (!(in_dev->dev->flags & IFF_UP))
606 nh->nh_dev = in_dev->dev;
607 dev_hold(nh->nh_dev);
608 nh->nh_scope = RT_SCOPE_HOST;
616 static inline unsigned int fib_laddr_hashfn(__be32 val)
618 unsigned int mask = (fib_info_hash_size - 1);
620 return ((__force u32)val ^
621 ((__force u32)val >> 7) ^
622 ((__force u32)val >> 14)) & mask;
625 static struct hlist_head *fib_info_hash_alloc(int bytes)
627 if (bytes <= PAGE_SIZE)
628 return kzalloc(bytes, GFP_KERNEL);
630 return (struct hlist_head *)
631 __get_free_pages(GFP_KERNEL | __GFP_ZERO,
635 static void fib_info_hash_free(struct hlist_head *hash, int bytes)
640 if (bytes <= PAGE_SIZE)
643 free_pages((unsigned long) hash, get_order(bytes));
646 static void fib_info_hash_move(struct hlist_head *new_info_hash,
647 struct hlist_head *new_laddrhash,
648 unsigned int new_size)
650 struct hlist_head *old_info_hash, *old_laddrhash;
651 unsigned int old_size = fib_info_hash_size;
652 unsigned int i, bytes;
654 spin_lock_bh(&fib_info_lock);
655 old_info_hash = fib_info_hash;
656 old_laddrhash = fib_info_laddrhash;
657 fib_info_hash_size = new_size;
659 for (i = 0; i < old_size; i++) {
660 struct hlist_head *head = &fib_info_hash[i];
661 struct hlist_node *node, *n;
664 hlist_for_each_entry_safe(fi, node, n, head, fib_hash) {
665 struct hlist_head *dest;
666 unsigned int new_hash;
668 hlist_del(&fi->fib_hash);
670 new_hash = fib_info_hashfn(fi);
671 dest = &new_info_hash[new_hash];
672 hlist_add_head(&fi->fib_hash, dest);
675 fib_info_hash = new_info_hash;
677 for (i = 0; i < old_size; i++) {
678 struct hlist_head *lhead = &fib_info_laddrhash[i];
679 struct hlist_node *node, *n;
682 hlist_for_each_entry_safe(fi, node, n, lhead, fib_lhash) {
683 struct hlist_head *ldest;
684 unsigned int new_hash;
686 hlist_del(&fi->fib_lhash);
688 new_hash = fib_laddr_hashfn(fi->fib_prefsrc);
689 ldest = &new_laddrhash[new_hash];
690 hlist_add_head(&fi->fib_lhash, ldest);
693 fib_info_laddrhash = new_laddrhash;
695 spin_unlock_bh(&fib_info_lock);
697 bytes = old_size * sizeof(struct hlist_head *);
698 fib_info_hash_free(old_info_hash, bytes);
699 fib_info_hash_free(old_laddrhash, bytes);
702 struct fib_info *fib_create_info(struct fib_config *cfg)
705 struct fib_info *fi = NULL;
706 struct fib_info *ofi;
708 struct net *net = cfg->fc_nlinfo.nl_net;
710 if (cfg->fc_type > RTN_MAX)
713 /* Fast check to catch the most weird cases */
714 if (fib_props[cfg->fc_type].scope > cfg->fc_scope)
717 #ifdef CONFIG_IP_ROUTE_MULTIPATH
719 nhs = fib_count_nexthops(cfg->fc_mp, cfg->fc_mp_len);
726 if (fib_info_cnt >= fib_info_hash_size) {
727 unsigned int new_size = fib_info_hash_size << 1;
728 struct hlist_head *new_info_hash;
729 struct hlist_head *new_laddrhash;
734 bytes = new_size * sizeof(struct hlist_head *);
735 new_info_hash = fib_info_hash_alloc(bytes);
736 new_laddrhash = fib_info_hash_alloc(bytes);
737 if (!new_info_hash || !new_laddrhash) {
738 fib_info_hash_free(new_info_hash, bytes);
739 fib_info_hash_free(new_laddrhash, bytes);
741 fib_info_hash_move(new_info_hash, new_laddrhash, new_size);
743 if (!fib_info_hash_size)
747 fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL);
751 fi->fib_metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL);
752 if (!fi->fib_metrics)
755 fi->fib_metrics = (u32 *) dst_default_metrics;
758 fi->fib_net = hold_net(net);
759 fi->fib_protocol = cfg->fc_protocol;
760 fi->fib_flags = cfg->fc_flags;
761 fi->fib_priority = cfg->fc_priority;
762 fi->fib_prefsrc = cfg->fc_prefsrc;
765 change_nexthops(fi) {
766 nexthop_nh->nh_parent = fi;
767 } endfor_nexthops(fi)
773 nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) {
774 int type = nla_type(nla);
779 fi->fib_metrics[type - 1] = nla_get_u32(nla);
785 #ifdef CONFIG_IP_ROUTE_MULTIPATH
786 err = fib_get_nhs(fi, cfg->fc_mp, cfg->fc_mp_len, cfg);
789 if (cfg->fc_oif && fi->fib_nh->nh_oif != cfg->fc_oif)
791 if (cfg->fc_gw && fi->fib_nh->nh_gw != cfg->fc_gw)
793 #ifdef CONFIG_IP_ROUTE_CLASSID
794 if (cfg->fc_flow && fi->fib_nh->nh_tclassid != cfg->fc_flow)
801 struct fib_nh *nh = fi->fib_nh;
803 nh->nh_oif = cfg->fc_oif;
804 nh->nh_gw = cfg->fc_gw;
805 nh->nh_flags = cfg->fc_flags;
806 #ifdef CONFIG_IP_ROUTE_CLASSID
807 nh->nh_tclassid = cfg->fc_flow;
809 #ifdef CONFIG_IP_ROUTE_MULTIPATH
814 if (fib_props[cfg->fc_type].error) {
815 if (cfg->fc_gw || cfg->fc_oif || cfg->fc_mp)
819 switch (cfg->fc_type) {
831 if (cfg->fc_scope > RT_SCOPE_HOST)
834 if (cfg->fc_scope == RT_SCOPE_HOST) {
835 struct fib_nh *nh = fi->fib_nh;
837 /* Local address is added. */
838 if (nhs != 1 || nh->nh_gw)
840 nh->nh_scope = RT_SCOPE_NOWHERE;
841 nh->nh_dev = dev_get_by_index(net, fi->fib_nh->nh_oif);
843 if (nh->nh_dev == NULL)
846 change_nexthops(fi) {
847 err = fib_check_nh(cfg, fi, nexthop_nh);
850 } endfor_nexthops(fi)
853 if (fi->fib_prefsrc) {
854 if (cfg->fc_type != RTN_LOCAL || !cfg->fc_dst ||
855 fi->fib_prefsrc != cfg->fc_dst)
856 if (inet_addr_type(net, fi->fib_prefsrc) != RTN_LOCAL)
861 ofi = fib_find_info(fi);
870 atomic_inc(&fi->fib_clntref);
871 spin_lock_bh(&fib_info_lock);
872 hlist_add_head(&fi->fib_hash,
873 &fib_info_hash[fib_info_hashfn(fi)]);
874 if (fi->fib_prefsrc) {
875 struct hlist_head *head;
877 head = &fib_info_laddrhash[fib_laddr_hashfn(fi->fib_prefsrc)];
878 hlist_add_head(&fi->fib_lhash, head);
880 change_nexthops(fi) {
881 struct hlist_head *head;
884 if (!nexthop_nh->nh_dev)
886 hash = fib_devindex_hashfn(nexthop_nh->nh_dev->ifindex);
887 head = &fib_info_devhash[hash];
888 hlist_add_head(&nexthop_nh->nh_hash, head);
889 } endfor_nexthops(fi)
890 spin_unlock_bh(&fib_info_lock);
905 /* Note! fib_semantic_match intentionally uses RCU list functions. */
906 int fib_semantic_match(struct fib_table *tb, struct list_head *head,
907 const struct flowi *flp, struct fib_result *res,
908 int prefixlen, int fib_flags)
910 struct fib_alias *fa;
913 list_for_each_entry_rcu(fa, head, fa_list) {
917 fa->fa_tos != flp->fl4_tos)
920 if (fa->fa_scope < flp->fl4_scope)
923 fib_alias_accessed(fa);
925 err = fib_props[fa->fa_type].error;
927 struct fib_info *fi = fa->fa_info;
929 if (fi->fib_flags & RTNH_F_DEAD)
933 if (nh->nh_flags & RTNH_F_DEAD)
935 if (!flp->oif || flp->oif == nh->nh_oif)
938 #ifdef CONFIG_IP_ROUTE_MULTIPATH
939 if (nhsel < fi->fib_nhs) {
955 res->prefixlen = prefixlen;
956 res->nh_sel = nh_sel;
957 res->type = fa->fa_type;
958 res->scope = fa->fa_scope;
959 res->fi = fa->fa_info;
962 if (!(fib_flags & FIB_LOOKUP_NOREF))
963 atomic_inc(&res->fi->fib_clntref);
967 /* Find appropriate source address to this destination */
969 __be32 __fib_res_prefsrc(struct fib_result *res)
971 return inet_select_addr(FIB_RES_DEV(*res), FIB_RES_GW(*res), res->scope);
974 int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
975 u32 tb_id, u8 type, u8 scope, __be32 dst, int dst_len, u8 tos,
976 struct fib_info *fi, unsigned int flags)
978 struct nlmsghdr *nlh;
981 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*rtm), flags);
985 rtm = nlmsg_data(nlh);
986 rtm->rtm_family = AF_INET;
987 rtm->rtm_dst_len = dst_len;
988 rtm->rtm_src_len = 0;
991 rtm->rtm_table = tb_id;
993 rtm->rtm_table = RT_TABLE_COMPAT;
994 NLA_PUT_U32(skb, RTA_TABLE, tb_id);
995 rtm->rtm_type = type;
996 rtm->rtm_flags = fi->fib_flags;
997 rtm->rtm_scope = scope;
998 rtm->rtm_protocol = fi->fib_protocol;
1000 if (rtm->rtm_dst_len)
1001 NLA_PUT_BE32(skb, RTA_DST, dst);
1003 if (fi->fib_priority)
1004 NLA_PUT_U32(skb, RTA_PRIORITY, fi->fib_priority);
1006 if (rtnetlink_put_metrics(skb, fi->fib_metrics) < 0)
1007 goto nla_put_failure;
1009 if (fi->fib_prefsrc)
1010 NLA_PUT_BE32(skb, RTA_PREFSRC, fi->fib_prefsrc);
1012 if (fi->fib_nhs == 1) {
1013 if (fi->fib_nh->nh_gw)
1014 NLA_PUT_BE32(skb, RTA_GATEWAY, fi->fib_nh->nh_gw);
1016 if (fi->fib_nh->nh_oif)
1017 NLA_PUT_U32(skb, RTA_OIF, fi->fib_nh->nh_oif);
1018 #ifdef CONFIG_IP_ROUTE_CLASSID
1019 if (fi->fib_nh[0].nh_tclassid)
1020 NLA_PUT_U32(skb, RTA_FLOW, fi->fib_nh[0].nh_tclassid);
1023 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1024 if (fi->fib_nhs > 1) {
1025 struct rtnexthop *rtnh;
1028 mp = nla_nest_start(skb, RTA_MULTIPATH);
1030 goto nla_put_failure;
1033 rtnh = nla_reserve_nohdr(skb, sizeof(*rtnh));
1035 goto nla_put_failure;
1037 rtnh->rtnh_flags = nh->nh_flags & 0xFF;
1038 rtnh->rtnh_hops = nh->nh_weight - 1;
1039 rtnh->rtnh_ifindex = nh->nh_oif;
1042 NLA_PUT_BE32(skb, RTA_GATEWAY, nh->nh_gw);
1043 #ifdef CONFIG_IP_ROUTE_CLASSID
1044 if (nh->nh_tclassid)
1045 NLA_PUT_U32(skb, RTA_FLOW, nh->nh_tclassid);
1047 /* length of rtnetlink header + attributes */
1048 rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *) rtnh;
1049 } endfor_nexthops(fi);
1051 nla_nest_end(skb, mp);
1054 return nlmsg_end(skb, nlh);
1057 nlmsg_cancel(skb, nlh);
1063 * - local address disappeared -> we must delete all the entries
1065 * - device went down -> we must shutdown all nexthops going via it.
1067 int fib_sync_down_addr(struct net *net, __be32 local)
1070 unsigned int hash = fib_laddr_hashfn(local);
1071 struct hlist_head *head = &fib_info_laddrhash[hash];
1072 struct hlist_node *node;
1073 struct fib_info *fi;
1075 if (fib_info_laddrhash == NULL || local == 0)
1078 hlist_for_each_entry(fi, node, head, fib_lhash) {
1079 if (!net_eq(fi->fib_net, net))
1081 if (fi->fib_prefsrc == local) {
1082 fi->fib_flags |= RTNH_F_DEAD;
1089 int fib_sync_down_dev(struct net_device *dev, int force)
1092 int scope = RT_SCOPE_NOWHERE;
1093 struct fib_info *prev_fi = NULL;
1094 unsigned int hash = fib_devindex_hashfn(dev->ifindex);
1095 struct hlist_head *head = &fib_info_devhash[hash];
1096 struct hlist_node *node;
1102 hlist_for_each_entry(nh, node, head, nh_hash) {
1103 struct fib_info *fi = nh->nh_parent;
1106 BUG_ON(!fi->fib_nhs);
1107 if (nh->nh_dev != dev || fi == prev_fi)
1111 change_nexthops(fi) {
1112 if (nexthop_nh->nh_flags & RTNH_F_DEAD)
1114 else if (nexthop_nh->nh_dev == dev &&
1115 nexthop_nh->nh_scope != scope) {
1116 nexthop_nh->nh_flags |= RTNH_F_DEAD;
1117 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1118 spin_lock_bh(&fib_multipath_lock);
1119 fi->fib_power -= nexthop_nh->nh_power;
1120 nexthop_nh->nh_power = 0;
1121 spin_unlock_bh(&fib_multipath_lock);
1125 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1126 if (force > 1 && nexthop_nh->nh_dev == dev) {
1131 } endfor_nexthops(fi)
1132 if (dead == fi->fib_nhs) {
1133 fi->fib_flags |= RTNH_F_DEAD;
1141 /* Must be invoked inside of an RCU protected region. */
1142 void fib_select_default(struct fib_result *res)
1144 struct fib_info *fi = NULL, *last_resort = NULL;
1145 struct list_head *fa_head = res->fa_head;
1146 struct fib_table *tb = res->table;
1147 int order = -1, last_idx = -1;
1148 struct fib_alias *fa;
1150 list_for_each_entry_rcu(fa, fa_head, fa_list) {
1151 struct fib_info *next_fi = fa->fa_info;
1153 if (fa->fa_scope != res->scope ||
1154 fa->fa_type != RTN_UNICAST)
1157 if (next_fi->fib_priority > res->fi->fib_priority)
1159 if (!next_fi->fib_nh[0].nh_gw ||
1160 next_fi->fib_nh[0].nh_scope != RT_SCOPE_LINK)
1163 fib_alias_accessed(fa);
1166 if (next_fi != res->fi)
1168 } else if (!fib_detect_death(fi, order, &last_resort,
1169 &last_idx, tb->tb_default)) {
1170 fib_result_assign(res, fi);
1171 tb->tb_default = order;
1178 if (order <= 0 || fi == NULL) {
1179 tb->tb_default = -1;
1183 if (!fib_detect_death(fi, order, &last_resort, &last_idx,
1185 fib_result_assign(res, fi);
1186 tb->tb_default = order;
1191 fib_result_assign(res, last_resort);
1192 tb->tb_default = last_idx;
1197 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1200 * Dead device goes up. We wake up dead nexthops.
1201 * It takes sense only on multipath routes.
1203 int fib_sync_up(struct net_device *dev)
1205 struct fib_info *prev_fi;
1207 struct hlist_head *head;
1208 struct hlist_node *node;
1212 if (!(dev->flags & IFF_UP))
1216 hash = fib_devindex_hashfn(dev->ifindex);
1217 head = &fib_info_devhash[hash];
1220 hlist_for_each_entry(nh, node, head, nh_hash) {
1221 struct fib_info *fi = nh->nh_parent;
1224 BUG_ON(!fi->fib_nhs);
1225 if (nh->nh_dev != dev || fi == prev_fi)
1230 change_nexthops(fi) {
1231 if (!(nexthop_nh->nh_flags & RTNH_F_DEAD)) {
1235 if (nexthop_nh->nh_dev == NULL ||
1236 !(nexthop_nh->nh_dev->flags & IFF_UP))
1238 if (nexthop_nh->nh_dev != dev ||
1239 !__in_dev_get_rtnl(dev))
1242 spin_lock_bh(&fib_multipath_lock);
1243 nexthop_nh->nh_power = 0;
1244 nexthop_nh->nh_flags &= ~RTNH_F_DEAD;
1245 spin_unlock_bh(&fib_multipath_lock);
1246 } endfor_nexthops(fi)
1249 fi->fib_flags &= ~RTNH_F_DEAD;
1258 * The algorithm is suboptimal, but it provides really
1259 * fair weighted route distribution.
1261 void fib_select_multipath(const struct flowi *flp, struct fib_result *res)
1263 struct fib_info *fi = res->fi;
1266 spin_lock_bh(&fib_multipath_lock);
1267 if (fi->fib_power <= 0) {
1269 change_nexthops(fi) {
1270 if (!(nexthop_nh->nh_flags & RTNH_F_DEAD)) {
1271 power += nexthop_nh->nh_weight;
1272 nexthop_nh->nh_power = nexthop_nh->nh_weight;
1274 } endfor_nexthops(fi);
1275 fi->fib_power = power;
1277 spin_unlock_bh(&fib_multipath_lock);
1278 /* Race condition: route has just become dead. */
1285 /* w should be random number [0..fi->fib_power-1],
1286 * it is pretty bad approximation.
1289 w = jiffies % fi->fib_power;
1291 change_nexthops(fi) {
1292 if (!(nexthop_nh->nh_flags & RTNH_F_DEAD) &&
1293 nexthop_nh->nh_power) {
1294 w -= nexthop_nh->nh_power;
1296 nexthop_nh->nh_power--;
1298 res->nh_sel = nhsel;
1299 spin_unlock_bh(&fib_multipath_lock);
1303 } endfor_nexthops(fi);
1305 /* Race condition: route has just become dead. */
1307 spin_unlock_bh(&fib_multipath_lock);