2 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * $Id: route.c,v 1.56 2001/10/31 21:55:55 davem Exp $
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
18 * YOSHIFUJI Hideaki @USAGI
19 * reworked default router selection.
20 * - respect outgoing interface
21 * - select from (probably) reachable routers (i.e.
22 * routers in REACHABLE, STALE, DELAY or PROBE states).
23 * - always select the same router if it is (probably)
24 * reachable. otherwise, round-robin the list.
27 #include <linux/capability.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/times.h>
31 #include <linux/socket.h>
32 #include <linux/sockios.h>
33 #include <linux/net.h>
34 #include <linux/route.h>
35 #include <linux/netdevice.h>
36 #include <linux/in6.h>
37 #include <linux/init.h>
38 #include <linux/netlink.h>
39 #include <linux/if_arp.h>
42 #include <linux/proc_fs.h>
43 #include <linux/seq_file.h>
48 #include <net/ip6_fib.h>
49 #include <net/ip6_route.h>
50 #include <net/ndisc.h>
51 #include <net/addrconf.h>
53 #include <linux/rtnetlink.h>
56 #include <net/netevent.h>
58 #include <asm/uaccess.h>
61 #include <linux/sysctl.h>
64 /* Set to 3 to get tracing. */
68 #define RDBG(x) printk x
69 #define RT6_TRACE(x...) printk(KERN_DEBUG x)
72 #define RT6_TRACE(x...) do { ; } while (0)
75 #define CLONE_OFFLINK_ROUTE 0
77 #define RT6_SELECT_F_IFACE 0x1
78 #define RT6_SELECT_F_REACHABLE 0x2
80 static int ip6_rt_max_size = 4096;
81 static int ip6_rt_gc_min_interval = HZ / 2;
82 static int ip6_rt_gc_timeout = 60*HZ;
83 int ip6_rt_gc_interval = 30*HZ;
84 static int ip6_rt_gc_elasticity = 9;
85 static int ip6_rt_mtu_expires = 10*60*HZ;
86 static int ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40;
88 static struct rt6_info * ip6_rt_copy(struct rt6_info *ort);
89 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
90 static struct dst_entry *ip6_negative_advice(struct dst_entry *);
91 static void ip6_dst_destroy(struct dst_entry *);
92 static void ip6_dst_ifdown(struct dst_entry *,
93 struct net_device *dev, int how);
94 static int ip6_dst_gc(void);
96 static int ip6_pkt_discard(struct sk_buff *skb);
97 static int ip6_pkt_discard_out(struct sk_buff *skb);
98 static void ip6_link_failure(struct sk_buff *skb);
99 static void ip6_rt_update_pmtu(struct dst_entry *dst, u32 mtu);
101 #ifdef CONFIG_IPV6_ROUTE_INFO
102 static struct rt6_info *rt6_add_route_info(struct in6_addr *prefix, int prefixlen,
103 struct in6_addr *gwaddr, int ifindex,
105 static struct rt6_info *rt6_get_route_info(struct in6_addr *prefix, int prefixlen,
106 struct in6_addr *gwaddr, int ifindex);
109 static struct dst_ops ip6_dst_ops = {
111 .protocol = __constant_htons(ETH_P_IPV6),
114 .check = ip6_dst_check,
115 .destroy = ip6_dst_destroy,
116 .ifdown = ip6_dst_ifdown,
117 .negative_advice = ip6_negative_advice,
118 .link_failure = ip6_link_failure,
119 .update_pmtu = ip6_rt_update_pmtu,
120 .entry_size = sizeof(struct rt6_info),
123 struct rt6_info ip6_null_entry = {
126 .__refcnt = ATOMIC_INIT(1),
128 .dev = &loopback_dev,
130 .error = -ENETUNREACH,
131 .metrics = { [RTAX_HOPLIMIT - 1] = 255, },
132 .input = ip6_pkt_discard,
133 .output = ip6_pkt_discard_out,
135 .path = (struct dst_entry*)&ip6_null_entry,
138 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
139 .rt6i_metric = ~(u32) 0,
140 .rt6i_ref = ATOMIC_INIT(1),
143 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
145 struct rt6_info ip6_prohibit_entry = {
148 .__refcnt = ATOMIC_INIT(1),
150 .dev = &loopback_dev,
153 .metrics = { [RTAX_HOPLIMIT - 1] = 255, },
154 .input = ip6_pkt_discard,
155 .output = ip6_pkt_discard_out,
157 .path = (struct dst_entry*)&ip6_prohibit_entry,
160 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
161 .rt6i_metric = ~(u32) 0,
162 .rt6i_ref = ATOMIC_INIT(1),
165 struct rt6_info ip6_blk_hole_entry = {
168 .__refcnt = ATOMIC_INIT(1),
170 .dev = &loopback_dev,
173 .metrics = { [RTAX_HOPLIMIT - 1] = 255, },
174 .input = ip6_pkt_discard,
175 .output = ip6_pkt_discard_out,
177 .path = (struct dst_entry*)&ip6_blk_hole_entry,
180 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
181 .rt6i_metric = ~(u32) 0,
182 .rt6i_ref = ATOMIC_INIT(1),
187 /* allocate dst with ip6_dst_ops */
188 static __inline__ struct rt6_info *ip6_dst_alloc(void)
190 return (struct rt6_info *)dst_alloc(&ip6_dst_ops);
193 static void ip6_dst_destroy(struct dst_entry *dst)
195 struct rt6_info *rt = (struct rt6_info *)dst;
196 struct inet6_dev *idev = rt->rt6i_idev;
199 rt->rt6i_idev = NULL;
204 static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
207 struct rt6_info *rt = (struct rt6_info *)dst;
208 struct inet6_dev *idev = rt->rt6i_idev;
210 if (dev != &loopback_dev && idev != NULL && idev->dev == dev) {
211 struct inet6_dev *loopback_idev = in6_dev_get(&loopback_dev);
212 if (loopback_idev != NULL) {
213 rt->rt6i_idev = loopback_idev;
219 static __inline__ int rt6_check_expired(const struct rt6_info *rt)
221 return (rt->rt6i_flags & RTF_EXPIRES &&
222 time_after(jiffies, rt->rt6i_expires));
225 static inline int rt6_need_strict(struct in6_addr *daddr)
227 return (ipv6_addr_type(daddr) &
228 (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL));
232 * Route lookup. Any table->tb6_lock is implied.
235 static __inline__ struct rt6_info *rt6_device_match(struct rt6_info *rt,
239 struct rt6_info *local = NULL;
240 struct rt6_info *sprt;
243 for (sprt = rt; sprt; sprt = sprt->u.next) {
244 struct net_device *dev = sprt->rt6i_dev;
245 if (dev->ifindex == oif)
247 if (dev->flags & IFF_LOOPBACK) {
248 if (sprt->rt6i_idev == NULL ||
249 sprt->rt6i_idev->dev->ifindex != oif) {
252 if (local && (!oif ||
253 local->rt6i_idev->dev->ifindex == oif))
264 return &ip6_null_entry;
269 #ifdef CONFIG_IPV6_ROUTER_PREF
270 static void rt6_probe(struct rt6_info *rt)
272 struct neighbour *neigh = rt ? rt->rt6i_nexthop : NULL;
274 * Okay, this does not seem to be appropriate
275 * for now, however, we need to check if it
276 * is really so; aka Router Reachability Probing.
278 * Router Reachability Probe MUST be rate-limited
279 * to no more than one per minute.
281 if (!neigh || (neigh->nud_state & NUD_VALID))
283 read_lock_bh(&neigh->lock);
284 if (!(neigh->nud_state & NUD_VALID) &&
285 time_after(jiffies, neigh->updated + rt->rt6i_idev->cnf.rtr_probe_interval)) {
286 struct in6_addr mcaddr;
287 struct in6_addr *target;
289 neigh->updated = jiffies;
290 read_unlock_bh(&neigh->lock);
292 target = (struct in6_addr *)&neigh->primary_key;
293 addrconf_addr_solict_mult(target, &mcaddr);
294 ndisc_send_ns(rt->rt6i_dev, NULL, target, &mcaddr, NULL);
296 read_unlock_bh(&neigh->lock);
299 static inline void rt6_probe(struct rt6_info *rt)
306 * Default Router Selection (RFC 2461 6.3.6)
308 static int inline rt6_check_dev(struct rt6_info *rt, int oif)
310 struct net_device *dev = rt->rt6i_dev;
311 if (!oif || dev->ifindex == oif)
313 if ((dev->flags & IFF_LOOPBACK) &&
314 rt->rt6i_idev && rt->rt6i_idev->dev->ifindex == oif)
319 static int inline rt6_check_neigh(struct rt6_info *rt)
321 struct neighbour *neigh = rt->rt6i_nexthop;
323 if (rt->rt6i_flags & RTF_NONEXTHOP ||
324 !(rt->rt6i_flags & RTF_GATEWAY))
327 read_lock_bh(&neigh->lock);
328 if (neigh->nud_state & NUD_VALID)
330 read_unlock_bh(&neigh->lock);
335 static int rt6_score_route(struct rt6_info *rt, int oif,
340 m = rt6_check_dev(rt, oif);
341 if (!m && (strict & RT6_SELECT_F_IFACE))
343 #ifdef CONFIG_IPV6_ROUTER_PREF
344 m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(rt->rt6i_flags)) << 2;
346 n = rt6_check_neigh(rt);
349 else if (!n && strict & RT6_SELECT_F_REACHABLE)
354 static struct rt6_info *rt6_select(struct rt6_info **head, int oif,
357 struct rt6_info *match = NULL, *last = NULL;
358 struct rt6_info *rt, *rt0 = *head;
362 RT6_TRACE("%s(head=%p(*head=%p), oif=%d)\n",
363 __FUNCTION__, head, head ? *head : NULL, oif);
365 for (rt = rt0, metric = rt0->rt6i_metric;
366 rt && rt->rt6i_metric == metric && (!last || rt != rt0);
370 if (rt6_check_expired(rt))
375 m = rt6_score_route(rt, oif, strict);
389 (strict & RT6_SELECT_F_REACHABLE) &&
390 last && last != rt0) {
391 /* no entries matched; do round-robin */
392 static DEFINE_SPINLOCK(lock);
395 rt0->u.next = last->u.next;
400 RT6_TRACE("%s() => %p, score=%d\n",
401 __FUNCTION__, match, mpri);
403 return (match ? match : &ip6_null_entry);
406 #ifdef CONFIG_IPV6_ROUTE_INFO
407 int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
408 struct in6_addr *gwaddr)
410 struct route_info *rinfo = (struct route_info *) opt;
411 struct in6_addr prefix_buf, *prefix;
416 if (len < sizeof(struct route_info)) {
420 /* Sanity check for prefix_len and length */
421 if (rinfo->length > 3) {
423 } else if (rinfo->prefix_len > 128) {
425 } else if (rinfo->prefix_len > 64) {
426 if (rinfo->length < 2) {
429 } else if (rinfo->prefix_len > 0) {
430 if (rinfo->length < 1) {
435 pref = rinfo->route_pref;
436 if (pref == ICMPV6_ROUTER_PREF_INVALID)
437 pref = ICMPV6_ROUTER_PREF_MEDIUM;
439 lifetime = htonl(rinfo->lifetime);
440 if (lifetime == 0xffffffff) {
442 } else if (lifetime > 0x7fffffff/HZ) {
443 /* Avoid arithmetic overflow */
444 lifetime = 0x7fffffff/HZ - 1;
447 if (rinfo->length == 3)
448 prefix = (struct in6_addr *)rinfo->prefix;
450 /* this function is safe */
451 ipv6_addr_prefix(&prefix_buf,
452 (struct in6_addr *)rinfo->prefix,
454 prefix = &prefix_buf;
457 rt = rt6_get_route_info(prefix, rinfo->prefix_len, gwaddr, dev->ifindex);
459 if (rt && !lifetime) {
460 ip6_del_rt(rt, NULL, NULL, NULL);
465 rt = rt6_add_route_info(prefix, rinfo->prefix_len, gwaddr, dev->ifindex,
468 rt->rt6i_flags = RTF_ROUTEINFO |
469 (rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
472 if (lifetime == 0xffffffff) {
473 rt->rt6i_flags &= ~RTF_EXPIRES;
475 rt->rt6i_expires = jiffies + HZ * lifetime;
476 rt->rt6i_flags |= RTF_EXPIRES;
478 dst_release(&rt->u.dst);
484 #define BACKTRACK() \
485 if (rt == &ip6_null_entry && flags & RT6_F_STRICT) { \
486 while ((fn = fn->parent) != NULL) { \
487 if (fn->fn_flags & RTN_TL_ROOT) { \
488 dst_hold(&rt->u.dst); \
491 if (fn->fn_flags & RTN_RTINFO) \
496 static struct rt6_info *ip6_pol_route_lookup(struct fib6_table *table,
497 struct flowi *fl, int flags)
499 struct fib6_node *fn;
502 read_lock_bh(&table->tb6_lock);
503 fn = fib6_lookup(&table->tb6_root, &fl->fl6_dst, &fl->fl6_src);
506 rt = rt6_device_match(rt, fl->oif, flags & RT6_F_STRICT);
508 dst_hold(&rt->u.dst);
510 read_unlock_bh(&table->tb6_lock);
512 rt->u.dst.lastuse = jiffies;
519 struct rt6_info *rt6_lookup(struct in6_addr *daddr, struct in6_addr *saddr,
531 struct dst_entry *dst;
532 int flags = strict ? RT6_F_STRICT : 0;
534 dst = fib6_rule_lookup(&fl, flags, ip6_pol_route_lookup);
536 return (struct rt6_info *) dst;
543 /* ip6_ins_rt is called with FREE table->tb6_lock.
544 It takes new route entry, the addition fails by any reason the
545 route is freed. In any case, if caller does not hold it, it may
549 int ip6_ins_rt(struct rt6_info *rt, struct nlmsghdr *nlh,
550 void *_rtattr, struct netlink_skb_parms *req)
553 struct fib6_table *table;
555 table = rt->rt6i_table;
556 write_lock_bh(&table->tb6_lock);
557 err = fib6_add(&table->tb6_root, rt, nlh, _rtattr, req);
558 write_unlock_bh(&table->tb6_lock);
563 static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort, struct in6_addr *daddr,
564 struct in6_addr *saddr)
572 rt = ip6_rt_copy(ort);
575 if (!(rt->rt6i_flags&RTF_GATEWAY)) {
576 if (rt->rt6i_dst.plen != 128 &&
577 ipv6_addr_equal(&rt->rt6i_dst.addr, daddr))
578 rt->rt6i_flags |= RTF_ANYCAST;
579 ipv6_addr_copy(&rt->rt6i_gateway, daddr);
582 ipv6_addr_copy(&rt->rt6i_dst.addr, daddr);
583 rt->rt6i_dst.plen = 128;
584 rt->rt6i_flags |= RTF_CACHE;
585 rt->u.dst.flags |= DST_HOST;
587 #ifdef CONFIG_IPV6_SUBTREES
588 if (rt->rt6i_src.plen && saddr) {
589 ipv6_addr_copy(&rt->rt6i_src.addr, saddr);
590 rt->rt6i_src.plen = 128;
594 rt->rt6i_nexthop = ndisc_get_neigh(rt->rt6i_dev, &rt->rt6i_gateway);
601 static struct rt6_info *rt6_alloc_clone(struct rt6_info *ort, struct in6_addr *daddr)
603 struct rt6_info *rt = ip6_rt_copy(ort);
605 ipv6_addr_copy(&rt->rt6i_dst.addr, daddr);
606 rt->rt6i_dst.plen = 128;
607 rt->rt6i_flags |= RTF_CACHE;
608 if (rt->rt6i_flags & RTF_REJECT)
609 rt->u.dst.error = ort->u.dst.error;
610 rt->u.dst.flags |= DST_HOST;
611 rt->rt6i_nexthop = neigh_clone(ort->rt6i_nexthop);
616 static struct rt6_info *ip6_pol_route_input(struct fib6_table *table,
617 struct flowi *fl, int flags)
619 struct fib6_node *fn;
620 struct rt6_info *rt, *nrt;
624 int reachable = RT6_SELECT_F_REACHABLE;
626 if (flags & RT6_F_STRICT)
627 strict = RT6_SELECT_F_IFACE;
630 read_lock_bh(&table->tb6_lock);
633 fn = fib6_lookup(&table->tb6_root, &fl->fl6_dst, &fl->fl6_src);
636 rt = rt6_select(&fn->leaf, fl->iif, strict | reachable);
638 if (rt == &ip6_null_entry ||
639 rt->rt6i_flags & RTF_CACHE)
642 dst_hold(&rt->u.dst);
643 read_unlock_bh(&table->tb6_lock);
645 if (!rt->rt6i_nexthop && !(rt->rt6i_flags & RTF_NONEXTHOP))
646 nrt = rt6_alloc_cow(rt, &fl->fl6_dst, &fl->fl6_src);
648 #if CLONE_OFFLINK_ROUTE
649 nrt = rt6_alloc_clone(rt, &fl->fl6_dst);
655 dst_release(&rt->u.dst);
656 rt = nrt ? : &ip6_null_entry;
658 dst_hold(&rt->u.dst);
660 err = ip6_ins_rt(nrt, NULL, NULL, NULL);
669 * Race condition! In the gap, when table->tb6_lock was
670 * released someone could insert this route. Relookup.
672 dst_release(&rt->u.dst);
680 dst_hold(&rt->u.dst);
681 read_unlock_bh(&table->tb6_lock);
683 rt->u.dst.lastuse = jiffies;
689 void ip6_route_input(struct sk_buff *skb)
691 struct ipv6hdr *iph = skb->nh.ipv6h;
693 .iif = skb->dev->ifindex,
698 .flowlabel = (* (u32 *) iph)&IPV6_FLOWINFO_MASK,
701 .proto = iph->nexthdr,
705 if (rt6_need_strict(&iph->daddr))
706 flags |= RT6_F_STRICT;
708 skb->dst = fib6_rule_lookup(&fl, flags, ip6_pol_route_input);
711 static struct rt6_info *ip6_pol_route_output(struct fib6_table *table,
712 struct flowi *fl, int flags)
714 struct fib6_node *fn;
715 struct rt6_info *rt, *nrt;
719 int reachable = RT6_SELECT_F_REACHABLE;
721 if (flags & RT6_F_STRICT)
722 strict = RT6_SELECT_F_IFACE;
725 read_lock_bh(&table->tb6_lock);
728 fn = fib6_lookup(&table->tb6_root, &fl->fl6_dst, &fl->fl6_src);
731 rt = rt6_select(&fn->leaf, fl->oif, strict | reachable);
733 if (rt == &ip6_null_entry ||
734 rt->rt6i_flags & RTF_CACHE)
737 dst_hold(&rt->u.dst);
738 read_unlock_bh(&table->tb6_lock);
740 if (!rt->rt6i_nexthop && !(rt->rt6i_flags & RTF_NONEXTHOP))
741 nrt = rt6_alloc_cow(rt, &fl->fl6_dst, &fl->fl6_src);
743 #if CLONE_OFFLINK_ROUTE
744 nrt = rt6_alloc_clone(rt, &fl->fl6_dst);
750 dst_release(&rt->u.dst);
751 rt = nrt ? : &ip6_null_entry;
753 dst_hold(&rt->u.dst);
755 err = ip6_ins_rt(nrt, NULL, NULL, NULL);
764 * Race condition! In the gap, when table->tb6_lock was
765 * released someone could insert this route. Relookup.
767 dst_release(&rt->u.dst);
775 dst_hold(&rt->u.dst);
776 read_unlock_bh(&table->tb6_lock);
778 rt->u.dst.lastuse = jiffies;
783 struct dst_entry * ip6_route_output(struct sock *sk, struct flowi *fl)
787 if (rt6_need_strict(&fl->fl6_dst))
788 flags |= RT6_F_STRICT;
790 return fib6_rule_lookup(fl, flags, ip6_pol_route_output);
795 * Destination cache support functions
798 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
802 rt = (struct rt6_info *) dst;
804 if (rt && rt->rt6i_node && (rt->rt6i_node->fn_sernum == cookie))
810 static struct dst_entry *ip6_negative_advice(struct dst_entry *dst)
812 struct rt6_info *rt = (struct rt6_info *) dst;
815 if (rt->rt6i_flags & RTF_CACHE)
816 ip6_del_rt(rt, NULL, NULL, NULL);
823 static void ip6_link_failure(struct sk_buff *skb)
827 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0, skb->dev);
829 rt = (struct rt6_info *) skb->dst;
831 if (rt->rt6i_flags&RTF_CACHE) {
832 dst_set_expires(&rt->u.dst, 0);
833 rt->rt6i_flags |= RTF_EXPIRES;
834 } else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT))
835 rt->rt6i_node->fn_sernum = -1;
839 static void ip6_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
841 struct rt6_info *rt6 = (struct rt6_info*)dst;
843 if (mtu < dst_mtu(dst) && rt6->rt6i_dst.plen == 128) {
844 rt6->rt6i_flags |= RTF_MODIFIED;
845 if (mtu < IPV6_MIN_MTU) {
847 dst->metrics[RTAX_FEATURES-1] |= RTAX_FEATURE_ALLFRAG;
849 dst->metrics[RTAX_MTU-1] = mtu;
850 call_netevent_notifiers(NETEVENT_PMTU_UPDATE, dst);
854 static int ipv6_get_mtu(struct net_device *dev);
856 static inline unsigned int ipv6_advmss(unsigned int mtu)
858 mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
860 if (mtu < ip6_rt_min_advmss)
861 mtu = ip6_rt_min_advmss;
864 * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and
865 * corresponding MSS is IPV6_MAXPLEN - tcp_header_size.
866 * IPV6_MAXPLEN is also valid and means: "any MSS,
867 * rely only on pmtu discovery"
869 if (mtu > IPV6_MAXPLEN - sizeof(struct tcphdr))
874 static struct dst_entry *ndisc_dst_gc_list;
875 static DEFINE_SPINLOCK(ndisc_lock);
877 struct dst_entry *ndisc_dst_alloc(struct net_device *dev,
878 struct neighbour *neigh,
879 struct in6_addr *addr,
880 int (*output)(struct sk_buff *))
883 struct inet6_dev *idev = in6_dev_get(dev);
885 if (unlikely(idev == NULL))
888 rt = ip6_dst_alloc();
889 if (unlikely(rt == NULL)) {
898 neigh = ndisc_get_neigh(dev, addr);
901 rt->rt6i_idev = idev;
902 rt->rt6i_nexthop = neigh;
903 atomic_set(&rt->u.dst.__refcnt, 1);
904 rt->u.dst.metrics[RTAX_HOPLIMIT-1] = 255;
905 rt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(rt->rt6i_dev);
906 rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(dst_mtu(&rt->u.dst));
907 rt->u.dst.output = output;
909 #if 0 /* there's no chance to use these for ndisc */
910 rt->u.dst.flags = ipv6_addr_type(addr) & IPV6_ADDR_UNICAST
913 ipv6_addr_copy(&rt->rt6i_dst.addr, addr);
914 rt->rt6i_dst.plen = 128;
917 spin_lock_bh(&ndisc_lock);
918 rt->u.dst.next = ndisc_dst_gc_list;
919 ndisc_dst_gc_list = &rt->u.dst;
920 spin_unlock_bh(&ndisc_lock);
922 fib6_force_start_gc();
925 return (struct dst_entry *)rt;
928 int ndisc_dst_gc(int *more)
930 struct dst_entry *dst, *next, **pprev;
936 spin_lock_bh(&ndisc_lock);
937 pprev = &ndisc_dst_gc_list;
939 while ((dst = *pprev) != NULL) {
940 if (!atomic_read(&dst->__refcnt)) {
950 spin_unlock_bh(&ndisc_lock);
955 static int ip6_dst_gc(void)
957 static unsigned expire = 30*HZ;
958 static unsigned long last_gc;
959 unsigned long now = jiffies;
961 if (time_after(last_gc + ip6_rt_gc_min_interval, now) &&
962 atomic_read(&ip6_dst_ops.entries) <= ip6_rt_max_size)
968 if (atomic_read(&ip6_dst_ops.entries) < ip6_dst_ops.gc_thresh)
969 expire = ip6_rt_gc_timeout>>1;
972 expire -= expire>>ip6_rt_gc_elasticity;
973 return (atomic_read(&ip6_dst_ops.entries) > ip6_rt_max_size);
976 /* Clean host part of a prefix. Not necessary in radix tree,
977 but results in cleaner routing tables.
979 Remove it only when all the things will work!
982 static int ipv6_get_mtu(struct net_device *dev)
984 int mtu = IPV6_MIN_MTU;
985 struct inet6_dev *idev;
987 idev = in6_dev_get(dev);
989 mtu = idev->cnf.mtu6;
995 int ipv6_get_hoplimit(struct net_device *dev)
997 int hoplimit = ipv6_devconf.hop_limit;
998 struct inet6_dev *idev;
1000 idev = in6_dev_get(dev);
1002 hoplimit = idev->cnf.hop_limit;
1012 int ip6_route_add(struct in6_rtmsg *rtmsg, struct nlmsghdr *nlh,
1013 void *_rtattr, struct netlink_skb_parms *req,
1018 struct rtattr **rta;
1019 struct rt6_info *rt = NULL;
1020 struct net_device *dev = NULL;
1021 struct inet6_dev *idev = NULL;
1022 struct fib6_table *table;
1025 rta = (struct rtattr **) _rtattr;
1027 if (rtmsg->rtmsg_dst_len > 128 || rtmsg->rtmsg_src_len > 128)
1029 #ifndef CONFIG_IPV6_SUBTREES
1030 if (rtmsg->rtmsg_src_len)
1033 if (rtmsg->rtmsg_ifindex) {
1035 dev = dev_get_by_index(rtmsg->rtmsg_ifindex);
1038 idev = in6_dev_get(dev);
1043 if (rtmsg->rtmsg_metric == 0)
1044 rtmsg->rtmsg_metric = IP6_RT_PRIO_USER;
1046 table = fib6_new_table(table_id);
1047 if (table == NULL) {
1052 rt = ip6_dst_alloc();
1059 rt->u.dst.obsolete = -1;
1060 rt->rt6i_expires = jiffies + clock_t_to_jiffies(rtmsg->rtmsg_info);
1061 if (nlh && (r = NLMSG_DATA(nlh))) {
1062 rt->rt6i_protocol = r->rtm_protocol;
1064 rt->rt6i_protocol = RTPROT_BOOT;
1067 addr_type = ipv6_addr_type(&rtmsg->rtmsg_dst);
1069 if (addr_type & IPV6_ADDR_MULTICAST)
1070 rt->u.dst.input = ip6_mc_input;
1072 rt->u.dst.input = ip6_forward;
1074 rt->u.dst.output = ip6_output;
1076 ipv6_addr_prefix(&rt->rt6i_dst.addr,
1077 &rtmsg->rtmsg_dst, rtmsg->rtmsg_dst_len);
1078 rt->rt6i_dst.plen = rtmsg->rtmsg_dst_len;
1079 if (rt->rt6i_dst.plen == 128)
1080 rt->u.dst.flags = DST_HOST;
1082 #ifdef CONFIG_IPV6_SUBTREES
1083 ipv6_addr_prefix(&rt->rt6i_src.addr,
1084 &rtmsg->rtmsg_src, rtmsg->rtmsg_src_len);
1085 rt->rt6i_src.plen = rtmsg->rtmsg_src_len;
1088 rt->rt6i_metric = rtmsg->rtmsg_metric;
1090 /* We cannot add true routes via loopback here,
1091 they would result in kernel looping; promote them to reject routes
1093 if ((rtmsg->rtmsg_flags&RTF_REJECT) ||
1094 (dev && (dev->flags&IFF_LOOPBACK) && !(addr_type&IPV6_ADDR_LOOPBACK))) {
1095 /* hold loopback dev/idev if we haven't done so. */
1096 if (dev != &loopback_dev) {
1101 dev = &loopback_dev;
1103 idev = in6_dev_get(dev);
1109 rt->u.dst.output = ip6_pkt_discard_out;
1110 rt->u.dst.input = ip6_pkt_discard;
1111 rt->u.dst.error = -ENETUNREACH;
1112 rt->rt6i_flags = RTF_REJECT|RTF_NONEXTHOP;
1116 if (rtmsg->rtmsg_flags & RTF_GATEWAY) {
1117 struct in6_addr *gw_addr;
1120 gw_addr = &rtmsg->rtmsg_gateway;
1121 ipv6_addr_copy(&rt->rt6i_gateway, &rtmsg->rtmsg_gateway);
1122 gwa_type = ipv6_addr_type(gw_addr);
1124 if (gwa_type != (IPV6_ADDR_LINKLOCAL|IPV6_ADDR_UNICAST)) {
1125 struct rt6_info *grt;
1127 /* IPv6 strictly inhibits using not link-local
1128 addresses as nexthop address.
1129 Otherwise, router will not able to send redirects.
1130 It is very good, but in some (rare!) circumstances
1131 (SIT, PtP, NBMA NOARP links) it is handy to allow
1132 some exceptions. --ANK
1135 if (!(gwa_type&IPV6_ADDR_UNICAST))
1138 grt = rt6_lookup(gw_addr, NULL, rtmsg->rtmsg_ifindex, 1);
1140 err = -EHOSTUNREACH;
1144 if (dev != grt->rt6i_dev) {
1145 dst_release(&grt->u.dst);
1149 dev = grt->rt6i_dev;
1150 idev = grt->rt6i_idev;
1152 in6_dev_hold(grt->rt6i_idev);
1154 if (!(grt->rt6i_flags&RTF_GATEWAY))
1156 dst_release(&grt->u.dst);
1162 if (dev == NULL || (dev->flags&IFF_LOOPBACK))
1170 if (rtmsg->rtmsg_flags & (RTF_GATEWAY|RTF_NONEXTHOP)) {
1171 rt->rt6i_nexthop = __neigh_lookup_errno(&nd_tbl, &rt->rt6i_gateway, dev);
1172 if (IS_ERR(rt->rt6i_nexthop)) {
1173 err = PTR_ERR(rt->rt6i_nexthop);
1174 rt->rt6i_nexthop = NULL;
1179 rt->rt6i_flags = rtmsg->rtmsg_flags;
1182 if (rta && rta[RTA_METRICS-1]) {
1183 int attrlen = RTA_PAYLOAD(rta[RTA_METRICS-1]);
1184 struct rtattr *attr = RTA_DATA(rta[RTA_METRICS-1]);
1186 while (RTA_OK(attr, attrlen)) {
1187 unsigned flavor = attr->rta_type;
1189 if (flavor > RTAX_MAX) {
1193 rt->u.dst.metrics[flavor-1] =
1194 *(u32 *)RTA_DATA(attr);
1196 attr = RTA_NEXT(attr, attrlen);
1200 if (rt->u.dst.metrics[RTAX_HOPLIMIT-1] == 0)
1201 rt->u.dst.metrics[RTAX_HOPLIMIT-1] = -1;
1202 if (!rt->u.dst.metrics[RTAX_MTU-1])
1203 rt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(dev);
1204 if (!rt->u.dst.metrics[RTAX_ADVMSS-1])
1205 rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(dst_mtu(&rt->u.dst));
1206 rt->u.dst.dev = dev;
1207 rt->rt6i_idev = idev;
1208 rt->rt6i_table = table;
1209 return ip6_ins_rt(rt, nlh, _rtattr, req);
1217 dst_free((struct dst_entry *) rt);
1221 int ip6_del_rt(struct rt6_info *rt, struct nlmsghdr *nlh, void *_rtattr, struct netlink_skb_parms *req)
1224 struct fib6_table *table;
1226 if (rt == &ip6_null_entry)
1229 table = rt->rt6i_table;
1230 write_lock_bh(&table->tb6_lock);
1232 err = fib6_del(rt, nlh, _rtattr, req);
1233 dst_release(&rt->u.dst);
1235 write_unlock_bh(&table->tb6_lock);
1240 static int ip6_route_del(struct in6_rtmsg *rtmsg, struct nlmsghdr *nlh,
1241 void *_rtattr, struct netlink_skb_parms *req,
1244 struct fib6_table *table;
1245 struct fib6_node *fn;
1246 struct rt6_info *rt;
1249 table = fib6_get_table(table_id);
1253 read_lock_bh(&table->tb6_lock);
1255 fn = fib6_locate(&table->tb6_root,
1256 &rtmsg->rtmsg_dst, rtmsg->rtmsg_dst_len,
1257 &rtmsg->rtmsg_src, rtmsg->rtmsg_src_len);
1260 for (rt = fn->leaf; rt; rt = rt->u.next) {
1261 if (rtmsg->rtmsg_ifindex &&
1262 (rt->rt6i_dev == NULL ||
1263 rt->rt6i_dev->ifindex != rtmsg->rtmsg_ifindex))
1265 if (rtmsg->rtmsg_flags&RTF_GATEWAY &&
1266 !ipv6_addr_equal(&rtmsg->rtmsg_gateway, &rt->rt6i_gateway))
1268 if (rtmsg->rtmsg_metric &&
1269 rtmsg->rtmsg_metric != rt->rt6i_metric)
1271 dst_hold(&rt->u.dst);
1272 read_unlock_bh(&table->tb6_lock);
1274 return ip6_del_rt(rt, nlh, _rtattr, req);
1277 read_unlock_bh(&table->tb6_lock);
1285 void rt6_redirect(struct in6_addr *dest, struct in6_addr *saddr,
1286 struct neighbour *neigh, u8 *lladdr, int on_link)
1288 struct rt6_info *rt, *nrt = NULL;
1289 struct fib6_node *fn;
1290 struct fib6_table *table;
1291 struct netevent_redirect netevent;
1293 /* TODO: Very lazy, might need to check all tables */
1294 table = fib6_get_table(RT6_TABLE_MAIN);
1299 * Get the "current" route for this destination and
1300 * check if the redirect has come from approriate router.
1302 * RFC 2461 specifies that redirects should only be
1303 * accepted if they come from the nexthop to the target.
1304 * Due to the way the routes are chosen, this notion
1305 * is a bit fuzzy and one might need to check all possible
1309 read_lock_bh(&table->tb6_lock);
1310 fn = fib6_lookup(&table->tb6_root, dest, NULL);
1312 for (rt = fn->leaf; rt; rt = rt->u.next) {
1314 * Current route is on-link; redirect is always invalid.
1316 * Seems, previous statement is not true. It could
1317 * be node, which looks for us as on-link (f.e. proxy ndisc)
1318 * But then router serving it might decide, that we should
1319 * know truth 8)8) --ANK (980726).
1321 if (rt6_check_expired(rt))
1323 if (!(rt->rt6i_flags & RTF_GATEWAY))
1325 if (neigh->dev != rt->rt6i_dev)
1327 if (!ipv6_addr_equal(saddr, &rt->rt6i_gateway))
1332 dst_hold(&rt->u.dst);
1333 else if (rt6_need_strict(dest)) {
1334 while ((fn = fn->parent) != NULL) {
1335 if (fn->fn_flags & RTN_ROOT)
1337 if (fn->fn_flags & RTN_RTINFO)
1341 read_unlock_bh(&table->tb6_lock);
1344 if (net_ratelimit())
1345 printk(KERN_DEBUG "rt6_redirect: source isn't a valid nexthop "
1346 "for redirect target\n");
1351 * We have finally decided to accept it.
1354 neigh_update(neigh, lladdr, NUD_STALE,
1355 NEIGH_UPDATE_F_WEAK_OVERRIDE|
1356 NEIGH_UPDATE_F_OVERRIDE|
1357 (on_link ? 0 : (NEIGH_UPDATE_F_OVERRIDE_ISROUTER|
1358 NEIGH_UPDATE_F_ISROUTER))
1362 * Redirect received -> path was valid.
1363 * Look, redirects are sent only in response to data packets,
1364 * so that this nexthop apparently is reachable. --ANK
1366 dst_confirm(&rt->u.dst);
1368 /* Duplicate redirect: silently ignore. */
1369 if (neigh == rt->u.dst.neighbour)
1372 nrt = ip6_rt_copy(rt);
1376 nrt->rt6i_flags = RTF_GATEWAY|RTF_UP|RTF_DYNAMIC|RTF_CACHE;
1378 nrt->rt6i_flags &= ~RTF_GATEWAY;
1380 ipv6_addr_copy(&nrt->rt6i_dst.addr, dest);
1381 nrt->rt6i_dst.plen = 128;
1382 nrt->u.dst.flags |= DST_HOST;
1384 ipv6_addr_copy(&nrt->rt6i_gateway, (struct in6_addr*)neigh->primary_key);
1385 nrt->rt6i_nexthop = neigh_clone(neigh);
1386 /* Reset pmtu, it may be better */
1387 nrt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(neigh->dev);
1388 nrt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(dst_mtu(&nrt->u.dst));
1390 if (ip6_ins_rt(nrt, NULL, NULL, NULL))
1393 netevent.old = &rt->u.dst;
1394 netevent.new = &nrt->u.dst;
1395 call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);
1397 if (rt->rt6i_flags&RTF_CACHE) {
1398 ip6_del_rt(rt, NULL, NULL, NULL);
1403 dst_release(&rt->u.dst);
1408 * Handle ICMP "packet too big" messages
1409 * i.e. Path MTU discovery
1412 void rt6_pmtu_discovery(struct in6_addr *daddr, struct in6_addr *saddr,
1413 struct net_device *dev, u32 pmtu)
1415 struct rt6_info *rt, *nrt;
1418 rt = rt6_lookup(daddr, saddr, dev->ifindex, 0);
1422 if (pmtu >= dst_mtu(&rt->u.dst))
1425 if (pmtu < IPV6_MIN_MTU) {
1427 * According to RFC2460, PMTU is set to the IPv6 Minimum Link
1428 * MTU (1280) and a fragment header should always be included
1429 * after a node receiving Too Big message reporting PMTU is
1430 * less than the IPv6 Minimum Link MTU.
1432 pmtu = IPV6_MIN_MTU;
1436 /* New mtu received -> path was valid.
1437 They are sent only in response to data packets,
1438 so that this nexthop apparently is reachable. --ANK
1440 dst_confirm(&rt->u.dst);
1442 /* Host route. If it is static, it would be better
1443 not to override it, but add new one, so that
1444 when cache entry will expire old pmtu
1445 would return automatically.
1447 if (rt->rt6i_flags & RTF_CACHE) {
1448 rt->u.dst.metrics[RTAX_MTU-1] = pmtu;
1450 rt->u.dst.metrics[RTAX_FEATURES-1] |= RTAX_FEATURE_ALLFRAG;
1451 dst_set_expires(&rt->u.dst, ip6_rt_mtu_expires);
1452 rt->rt6i_flags |= RTF_MODIFIED|RTF_EXPIRES;
1457 Two cases are possible:
1458 1. It is connected route. Action: COW
1459 2. It is gatewayed route or NONEXTHOP route. Action: clone it.
1461 if (!rt->rt6i_nexthop && !(rt->rt6i_flags & RTF_NONEXTHOP))
1462 nrt = rt6_alloc_cow(rt, daddr, saddr);
1464 nrt = rt6_alloc_clone(rt, daddr);
1467 nrt->u.dst.metrics[RTAX_MTU-1] = pmtu;
1469 nrt->u.dst.metrics[RTAX_FEATURES-1] |= RTAX_FEATURE_ALLFRAG;
1471 /* According to RFC 1981, detecting PMTU increase shouldn't be
1472 * happened within 5 mins, the recommended timer is 10 mins.
1473 * Here this route expiration time is set to ip6_rt_mtu_expires
1474 * which is 10 mins. After 10 mins the decreased pmtu is expired
1475 * and detecting PMTU increase will be automatically happened.
1477 dst_set_expires(&nrt->u.dst, ip6_rt_mtu_expires);
1478 nrt->rt6i_flags |= RTF_DYNAMIC|RTF_EXPIRES;
1480 ip6_ins_rt(nrt, NULL, NULL, NULL);
1483 dst_release(&rt->u.dst);
1487 * Misc support functions
1490 static struct rt6_info * ip6_rt_copy(struct rt6_info *ort)
1492 struct rt6_info *rt = ip6_dst_alloc();
1495 rt->u.dst.input = ort->u.dst.input;
1496 rt->u.dst.output = ort->u.dst.output;
1498 memcpy(rt->u.dst.metrics, ort->u.dst.metrics, RTAX_MAX*sizeof(u32));
1499 rt->u.dst.dev = ort->u.dst.dev;
1501 dev_hold(rt->u.dst.dev);
1502 rt->rt6i_idev = ort->rt6i_idev;
1504 in6_dev_hold(rt->rt6i_idev);
1505 rt->u.dst.lastuse = jiffies;
1506 rt->rt6i_expires = 0;
1508 ipv6_addr_copy(&rt->rt6i_gateway, &ort->rt6i_gateway);
1509 rt->rt6i_flags = ort->rt6i_flags & ~RTF_EXPIRES;
1510 rt->rt6i_metric = 0;
1512 memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
1513 #ifdef CONFIG_IPV6_SUBTREES
1514 memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
1516 rt->rt6i_table = ort->rt6i_table;
1521 #ifdef CONFIG_IPV6_ROUTE_INFO
1522 static struct rt6_info *rt6_get_route_info(struct in6_addr *prefix, int prefixlen,
1523 struct in6_addr *gwaddr, int ifindex)
1525 struct fib6_node *fn;
1526 struct rt6_info *rt = NULL;
1527 struct fib6_table *table;
1529 table = fib6_get_table(RT6_TABLE_INFO);
1533 write_lock_bh(&table->tb6_lock);
1534 fn = fib6_locate(&table->tb6_root, prefix ,prefixlen, NULL, 0);
1538 for (rt = fn->leaf; rt; rt = rt->u.next) {
1539 if (rt->rt6i_dev->ifindex != ifindex)
1541 if ((rt->rt6i_flags & (RTF_ROUTEINFO|RTF_GATEWAY)) != (RTF_ROUTEINFO|RTF_GATEWAY))
1543 if (!ipv6_addr_equal(&rt->rt6i_gateway, gwaddr))
1545 dst_hold(&rt->u.dst);
1549 write_unlock_bh(&table->tb6_lock);
1553 static struct rt6_info *rt6_add_route_info(struct in6_addr *prefix, int prefixlen,
1554 struct in6_addr *gwaddr, int ifindex,
1557 struct in6_rtmsg rtmsg;
1559 memset(&rtmsg, 0, sizeof(rtmsg));
1560 rtmsg.rtmsg_type = RTMSG_NEWROUTE;
1561 ipv6_addr_copy(&rtmsg.rtmsg_dst, prefix);
1562 rtmsg.rtmsg_dst_len = prefixlen;
1563 ipv6_addr_copy(&rtmsg.rtmsg_gateway, gwaddr);
1564 rtmsg.rtmsg_metric = 1024;
1565 rtmsg.rtmsg_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO | RTF_UP | RTF_PREF(pref);
1566 /* We should treat it as a default route if prefix length is 0. */
1568 rtmsg.rtmsg_flags |= RTF_DEFAULT;
1569 rtmsg.rtmsg_ifindex = ifindex;
1571 ip6_route_add(&rtmsg, NULL, NULL, NULL, RT6_TABLE_INFO);
1573 return rt6_get_route_info(prefix, prefixlen, gwaddr, ifindex);
1577 struct rt6_info *rt6_get_dflt_router(struct in6_addr *addr, struct net_device *dev)
1579 struct rt6_info *rt;
1580 struct fib6_table *table;
1582 table = fib6_get_table(RT6_TABLE_DFLT);
1586 write_lock_bh(&table->tb6_lock);
1587 for (rt = table->tb6_root.leaf; rt; rt=rt->u.next) {
1588 if (dev == rt->rt6i_dev &&
1589 ((rt->rt6i_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) &&
1590 ipv6_addr_equal(&rt->rt6i_gateway, addr))
1594 dst_hold(&rt->u.dst);
1595 write_unlock_bh(&table->tb6_lock);
1599 struct rt6_info *rt6_add_dflt_router(struct in6_addr *gwaddr,
1600 struct net_device *dev,
1603 struct in6_rtmsg rtmsg;
1605 memset(&rtmsg, 0, sizeof(struct in6_rtmsg));
1606 rtmsg.rtmsg_type = RTMSG_NEWROUTE;
1607 ipv6_addr_copy(&rtmsg.rtmsg_gateway, gwaddr);
1608 rtmsg.rtmsg_metric = 1024;
1609 rtmsg.rtmsg_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT | RTF_UP | RTF_EXPIRES |
1612 rtmsg.rtmsg_ifindex = dev->ifindex;
1614 ip6_route_add(&rtmsg, NULL, NULL, NULL, RT6_TABLE_DFLT);
1615 return rt6_get_dflt_router(gwaddr, dev);
1618 void rt6_purge_dflt_routers(void)
1620 struct rt6_info *rt;
1621 struct fib6_table *table;
1623 /* NOTE: Keep consistent with rt6_get_dflt_router */
1624 table = fib6_get_table(RT6_TABLE_DFLT);
1629 read_lock_bh(&table->tb6_lock);
1630 for (rt = table->tb6_root.leaf; rt; rt = rt->u.next) {
1631 if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) {
1632 dst_hold(&rt->u.dst);
1633 read_unlock_bh(&table->tb6_lock);
1634 ip6_del_rt(rt, NULL, NULL, NULL);
1638 read_unlock_bh(&table->tb6_lock);
1641 int ipv6_route_ioctl(unsigned int cmd, void __user *arg)
1643 struct in6_rtmsg rtmsg;
1647 case SIOCADDRT: /* Add a route */
1648 case SIOCDELRT: /* Delete a route */
1649 if (!capable(CAP_NET_ADMIN))
1651 err = copy_from_user(&rtmsg, arg,
1652 sizeof(struct in6_rtmsg));
1659 err = ip6_route_add(&rtmsg, NULL, NULL, NULL,
1663 err = ip6_route_del(&rtmsg, NULL, NULL, NULL,
1678 * Drop the packet on the floor
1681 static int ip6_pkt_discard(struct sk_buff *skb)
1683 int type = ipv6_addr_type(&skb->nh.ipv6h->daddr);
1684 if (type == IPV6_ADDR_ANY || type == IPV6_ADDR_RESERVED)
1685 IP6_INC_STATS(IPSTATS_MIB_INADDRERRORS);
1687 IP6_INC_STATS(IPSTATS_MIB_OUTNOROUTES);
1688 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_NOROUTE, 0, skb->dev);
1693 static int ip6_pkt_discard_out(struct sk_buff *skb)
1695 skb->dev = skb->dst->dev;
1696 return ip6_pkt_discard(skb);
1700 * Allocate a dst for local (unicast / anycast) address.
1703 struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
1704 const struct in6_addr *addr,
1707 struct rt6_info *rt = ip6_dst_alloc();
1710 return ERR_PTR(-ENOMEM);
1712 dev_hold(&loopback_dev);
1715 rt->u.dst.flags = DST_HOST;
1716 rt->u.dst.input = ip6_input;
1717 rt->u.dst.output = ip6_output;
1718 rt->rt6i_dev = &loopback_dev;
1719 rt->rt6i_idev = idev;
1720 rt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(rt->rt6i_dev);
1721 rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(dst_mtu(&rt->u.dst));
1722 rt->u.dst.metrics[RTAX_HOPLIMIT-1] = -1;
1723 rt->u.dst.obsolete = -1;
1725 rt->rt6i_flags = RTF_UP | RTF_NONEXTHOP;
1727 rt->rt6i_flags |= RTF_ANYCAST;
1729 rt->rt6i_flags |= RTF_LOCAL;
1730 rt->rt6i_nexthop = ndisc_get_neigh(rt->rt6i_dev, &rt->rt6i_gateway);
1731 if (rt->rt6i_nexthop == NULL) {
1732 dst_free((struct dst_entry *) rt);
1733 return ERR_PTR(-ENOMEM);
1736 ipv6_addr_copy(&rt->rt6i_dst.addr, addr);
1737 rt->rt6i_dst.plen = 128;
1738 rt->rt6i_table = fib6_get_table(RT6_TABLE_LOCAL);
1740 atomic_set(&rt->u.dst.__refcnt, 1);
1745 static int fib6_ifdown(struct rt6_info *rt, void *arg)
1747 if (((void*)rt->rt6i_dev == arg || arg == NULL) &&
1748 rt != &ip6_null_entry) {
1749 RT6_TRACE("deleted by ifdown %p\n", rt);
1755 void rt6_ifdown(struct net_device *dev)
1757 fib6_clean_all(fib6_ifdown, 0, dev);
1760 struct rt6_mtu_change_arg
1762 struct net_device *dev;
1766 static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg)
1768 struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *) p_arg;
1769 struct inet6_dev *idev;
1771 /* In IPv6 pmtu discovery is not optional,
1772 so that RTAX_MTU lock cannot disable it.
1773 We still use this lock to block changes
1774 caused by addrconf/ndisc.
1777 idev = __in6_dev_get(arg->dev);
1781 /* For administrative MTU increase, there is no way to discover
1782 IPv6 PMTU increase, so PMTU increase should be updated here.
1783 Since RFC 1981 doesn't include administrative MTU increase
1784 update PMTU increase is a MUST. (i.e. jumbo frame)
1787 If new MTU is less than route PMTU, this new MTU will be the
1788 lowest MTU in the path, update the route PMTU to reflect PMTU
1789 decreases; if new MTU is greater than route PMTU, and the
1790 old MTU is the lowest MTU in the path, update the route PMTU
1791 to reflect the increase. In this case if the other nodes' MTU
1792 also have the lowest MTU, TOO BIG MESSAGE will be lead to
1795 if (rt->rt6i_dev == arg->dev &&
1796 !dst_metric_locked(&rt->u.dst, RTAX_MTU) &&
1797 (dst_mtu(&rt->u.dst) > arg->mtu ||
1798 (dst_mtu(&rt->u.dst) < arg->mtu &&
1799 dst_mtu(&rt->u.dst) == idev->cnf.mtu6)))
1800 rt->u.dst.metrics[RTAX_MTU-1] = arg->mtu;
1801 rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(arg->mtu);
1805 void rt6_mtu_change(struct net_device *dev, unsigned mtu)
1807 struct rt6_mtu_change_arg arg = {
1812 fib6_clean_all(rt6_mtu_change_route, 0, &arg);
1815 static int inet6_rtm_to_rtmsg(struct rtmsg *r, struct rtattr **rta,
1816 struct in6_rtmsg *rtmsg)
1818 memset(rtmsg, 0, sizeof(*rtmsg));
1820 rtmsg->rtmsg_dst_len = r->rtm_dst_len;
1821 rtmsg->rtmsg_src_len = r->rtm_src_len;
1822 rtmsg->rtmsg_flags = RTF_UP;
1823 if (r->rtm_type == RTN_UNREACHABLE)
1824 rtmsg->rtmsg_flags |= RTF_REJECT;
1826 if (rta[RTA_GATEWAY-1]) {
1827 if (rta[RTA_GATEWAY-1]->rta_len != RTA_LENGTH(16))
1829 memcpy(&rtmsg->rtmsg_gateway, RTA_DATA(rta[RTA_GATEWAY-1]), 16);
1830 rtmsg->rtmsg_flags |= RTF_GATEWAY;
1832 if (rta[RTA_DST-1]) {
1833 if (RTA_PAYLOAD(rta[RTA_DST-1]) < ((r->rtm_dst_len+7)>>3))
1835 memcpy(&rtmsg->rtmsg_dst, RTA_DATA(rta[RTA_DST-1]), ((r->rtm_dst_len+7)>>3));
1837 if (rta[RTA_SRC-1]) {
1838 if (RTA_PAYLOAD(rta[RTA_SRC-1]) < ((r->rtm_src_len+7)>>3))
1840 memcpy(&rtmsg->rtmsg_src, RTA_DATA(rta[RTA_SRC-1]), ((r->rtm_src_len+7)>>3));
1842 if (rta[RTA_OIF-1]) {
1843 if (rta[RTA_OIF-1]->rta_len != RTA_LENGTH(sizeof(int)))
1845 memcpy(&rtmsg->rtmsg_ifindex, RTA_DATA(rta[RTA_OIF-1]), sizeof(int));
1847 if (rta[RTA_PRIORITY-1]) {
1848 if (rta[RTA_PRIORITY-1]->rta_len != RTA_LENGTH(4))
1850 memcpy(&rtmsg->rtmsg_metric, RTA_DATA(rta[RTA_PRIORITY-1]), 4);
1855 int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
1857 struct rtmsg *r = NLMSG_DATA(nlh);
1858 struct in6_rtmsg rtmsg;
1860 if (inet6_rtm_to_rtmsg(r, arg, &rtmsg))
1862 return ip6_route_del(&rtmsg, nlh, arg, &NETLINK_CB(skb),
1863 rtm_get_table(arg, r->rtm_table));
1866 int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
1868 struct rtmsg *r = NLMSG_DATA(nlh);
1869 struct in6_rtmsg rtmsg;
1871 if (inet6_rtm_to_rtmsg(r, arg, &rtmsg))
1873 return ip6_route_add(&rtmsg, nlh, arg, &NETLINK_CB(skb),
1874 rtm_get_table(arg, r->rtm_table));
1877 static int rt6_fill_node(struct sk_buff *skb, struct rt6_info *rt,
1878 struct in6_addr *dst, struct in6_addr *src,
1879 int iif, int type, u32 pid, u32 seq,
1880 int prefix, unsigned int flags)
1883 struct nlmsghdr *nlh;
1884 unsigned char *b = skb->tail;
1885 struct rta_cacheinfo ci;
1888 if (prefix) { /* user wants prefix routes only */
1889 if (!(rt->rt6i_flags & RTF_PREFIX_RT)) {
1890 /* success since this is not a prefix route */
1895 nlh = NLMSG_NEW(skb, pid, seq, type, sizeof(*rtm), flags);
1896 rtm = NLMSG_DATA(nlh);
1897 rtm->rtm_family = AF_INET6;
1898 rtm->rtm_dst_len = rt->rt6i_dst.plen;
1899 rtm->rtm_src_len = rt->rt6i_src.plen;
1902 table = rt->rt6i_table->tb6_id;
1904 table = RT6_TABLE_UNSPEC;
1905 rtm->rtm_table = table;
1906 RTA_PUT_U32(skb, RTA_TABLE, table);
1907 if (rt->rt6i_flags&RTF_REJECT)
1908 rtm->rtm_type = RTN_UNREACHABLE;
1909 else if (rt->rt6i_dev && (rt->rt6i_dev->flags&IFF_LOOPBACK))
1910 rtm->rtm_type = RTN_LOCAL;
1912 rtm->rtm_type = RTN_UNICAST;
1914 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
1915 rtm->rtm_protocol = rt->rt6i_protocol;
1916 if (rt->rt6i_flags&RTF_DYNAMIC)
1917 rtm->rtm_protocol = RTPROT_REDIRECT;
1918 else if (rt->rt6i_flags & RTF_ADDRCONF)
1919 rtm->rtm_protocol = RTPROT_KERNEL;
1920 else if (rt->rt6i_flags&RTF_DEFAULT)
1921 rtm->rtm_protocol = RTPROT_RA;
1923 if (rt->rt6i_flags&RTF_CACHE)
1924 rtm->rtm_flags |= RTM_F_CLONED;
1927 RTA_PUT(skb, RTA_DST, 16, dst);
1928 rtm->rtm_dst_len = 128;
1929 } else if (rtm->rtm_dst_len)
1930 RTA_PUT(skb, RTA_DST, 16, &rt->rt6i_dst.addr);
1931 #ifdef CONFIG_IPV6_SUBTREES
1933 RTA_PUT(skb, RTA_SRC, 16, src);
1934 rtm->rtm_src_len = 128;
1935 } else if (rtm->rtm_src_len)
1936 RTA_PUT(skb, RTA_SRC, 16, &rt->rt6i_src.addr);
1939 RTA_PUT(skb, RTA_IIF, 4, &iif);
1941 struct in6_addr saddr_buf;
1942 if (ipv6_get_saddr(&rt->u.dst, dst, &saddr_buf) == 0)
1943 RTA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf);
1945 if (rtnetlink_put_metrics(skb, rt->u.dst.metrics) < 0)
1946 goto rtattr_failure;
1947 if (rt->u.dst.neighbour)
1948 RTA_PUT(skb, RTA_GATEWAY, 16, &rt->u.dst.neighbour->primary_key);
1950 RTA_PUT(skb, RTA_OIF, sizeof(int), &rt->rt6i_dev->ifindex);
1951 RTA_PUT(skb, RTA_PRIORITY, 4, &rt->rt6i_metric);
1952 ci.rta_lastuse = jiffies_to_clock_t(jiffies - rt->u.dst.lastuse);
1953 if (rt->rt6i_expires)
1954 ci.rta_expires = jiffies_to_clock_t(rt->rt6i_expires - jiffies);
1957 ci.rta_used = rt->u.dst.__use;
1958 ci.rta_clntref = atomic_read(&rt->u.dst.__refcnt);
1959 ci.rta_error = rt->u.dst.error;
1963 RTA_PUT(skb, RTA_CACHEINFO, sizeof(ci), &ci);
1964 nlh->nlmsg_len = skb->tail - b;
1969 skb_trim(skb, b - skb->data);
1973 int rt6_dump_route(struct rt6_info *rt, void *p_arg)
1975 struct rt6_rtnl_dump_arg *arg = (struct rt6_rtnl_dump_arg *) p_arg;
1978 if (arg->cb->nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(struct rtmsg))) {
1979 struct rtmsg *rtm = NLMSG_DATA(arg->cb->nlh);
1980 prefix = (rtm->rtm_flags & RTM_F_PREFIX) != 0;
1984 return rt6_fill_node(arg->skb, rt, NULL, NULL, 0, RTM_NEWROUTE,
1985 NETLINK_CB(arg->cb->skb).pid, arg->cb->nlh->nlmsg_seq,
1986 prefix, NLM_F_MULTI);
1989 int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg)
1991 struct rtattr **rta = arg;
1994 struct sk_buff *skb;
1996 struct rt6_info *rt;
1998 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2002 /* Reserve room for dummy headers, this skb can pass
2003 through good chunk of routing engine.
2005 skb->mac.raw = skb->data;
2006 skb_reserve(skb, MAX_HEADER + sizeof(struct ipv6hdr));
2008 memset(&fl, 0, sizeof(fl));
2010 ipv6_addr_copy(&fl.fl6_src,
2011 (struct in6_addr*)RTA_DATA(rta[RTA_SRC-1]));
2013 ipv6_addr_copy(&fl.fl6_dst,
2014 (struct in6_addr*)RTA_DATA(rta[RTA_DST-1]));
2017 memcpy(&iif, RTA_DATA(rta[RTA_IIF-1]), sizeof(int));
2020 struct net_device *dev;
2021 dev = __dev_get_by_index(iif);
2030 memcpy(&fl.oif, RTA_DATA(rta[RTA_OIF-1]), sizeof(int));
2032 rt = (struct rt6_info*)ip6_route_output(NULL, &fl);
2034 skb->dst = &rt->u.dst;
2036 NETLINK_CB(skb).dst_pid = NETLINK_CB(in_skb).pid;
2037 err = rt6_fill_node(skb, rt,
2038 &fl.fl6_dst, &fl.fl6_src,
2040 RTM_NEWROUTE, NETLINK_CB(in_skb).pid,
2041 nlh->nlmsg_seq, 0, 0);
2047 err = rtnl_unicast(skb, NETLINK_CB(in_skb).pid);
2055 void inet6_rt_notify(int event, struct rt6_info *rt, struct nlmsghdr *nlh,
2056 struct netlink_skb_parms *req)
2058 struct sk_buff *skb;
2059 int size = NLMSG_SPACE(sizeof(struct rtmsg)+256);
2060 u32 pid = current->pid;
2066 seq = nlh->nlmsg_seq;
2068 skb = alloc_skb(size, gfp_any());
2070 netlink_set_err(rtnl, 0, RTNLGRP_IPV6_ROUTE, ENOBUFS);
2073 if (rt6_fill_node(skb, rt, NULL, NULL, 0, event, pid, seq, 0, 0) < 0) {
2075 netlink_set_err(rtnl, 0, RTNLGRP_IPV6_ROUTE, EINVAL);
2078 NETLINK_CB(skb).dst_group = RTNLGRP_IPV6_ROUTE;
2079 netlink_broadcast(rtnl, skb, 0, RTNLGRP_IPV6_ROUTE, gfp_any());
2086 #ifdef CONFIG_PROC_FS
2088 #define RT6_INFO_LEN (32 + 4 + 32 + 4 + 32 + 40 + 5 + 1)
2099 static int rt6_info_route(struct rt6_info *rt, void *p_arg)
2101 struct rt6_proc_arg *arg = (struct rt6_proc_arg *) p_arg;
2104 if (arg->skip < arg->offset / RT6_INFO_LEN) {
2109 if (arg->len >= arg->length)
2112 for (i=0; i<16; i++) {
2113 sprintf(arg->buffer + arg->len, "%02x",
2114 rt->rt6i_dst.addr.s6_addr[i]);
2117 arg->len += sprintf(arg->buffer + arg->len, " %02x ",
2120 #ifdef CONFIG_IPV6_SUBTREES
2121 for (i=0; i<16; i++) {
2122 sprintf(arg->buffer + arg->len, "%02x",
2123 rt->rt6i_src.addr.s6_addr[i]);
2126 arg->len += sprintf(arg->buffer + arg->len, " %02x ",
2129 sprintf(arg->buffer + arg->len,
2130 "00000000000000000000000000000000 00 ");
2134 if (rt->rt6i_nexthop) {
2135 for (i=0; i<16; i++) {
2136 sprintf(arg->buffer + arg->len, "%02x",
2137 rt->rt6i_nexthop->primary_key[i]);
2141 sprintf(arg->buffer + arg->len,
2142 "00000000000000000000000000000000");
2145 arg->len += sprintf(arg->buffer + arg->len,
2146 " %08x %08x %08x %08x %8s\n",
2147 rt->rt6i_metric, atomic_read(&rt->u.dst.__refcnt),
2148 rt->u.dst.__use, rt->rt6i_flags,
2149 rt->rt6i_dev ? rt->rt6i_dev->name : "");
2153 static int rt6_proc_info(char *buffer, char **start, off_t offset, int length)
2155 struct rt6_proc_arg arg = {
2161 fib6_clean_all(rt6_info_route, 0, &arg);
2165 *start += offset % RT6_INFO_LEN;
2167 arg.len -= offset % RT6_INFO_LEN;
2169 if (arg.len > length)
2177 static int rt6_stats_seq_show(struct seq_file *seq, void *v)
2179 seq_printf(seq, "%04x %04x %04x %04x %04x %04x %04x\n",
2180 rt6_stats.fib_nodes, rt6_stats.fib_route_nodes,
2181 rt6_stats.fib_rt_alloc, rt6_stats.fib_rt_entries,
2182 rt6_stats.fib_rt_cache,
2183 atomic_read(&ip6_dst_ops.entries),
2184 rt6_stats.fib_discarded_routes);
2189 static int rt6_stats_seq_open(struct inode *inode, struct file *file)
2191 return single_open(file, rt6_stats_seq_show, NULL);
2194 static struct file_operations rt6_stats_seq_fops = {
2195 .owner = THIS_MODULE,
2196 .open = rt6_stats_seq_open,
2198 .llseek = seq_lseek,
2199 .release = single_release,
2201 #endif /* CONFIG_PROC_FS */
2203 #ifdef CONFIG_SYSCTL
2205 static int flush_delay;
2208 int ipv6_sysctl_rtcache_flush(ctl_table *ctl, int write, struct file * filp,
2209 void __user *buffer, size_t *lenp, loff_t *ppos)
2212 proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
2213 fib6_run_gc(flush_delay <= 0 ? ~0UL : (unsigned long)flush_delay);
2219 ctl_table ipv6_route_table[] = {
2221 .ctl_name = NET_IPV6_ROUTE_FLUSH,
2222 .procname = "flush",
2223 .data = &flush_delay,
2224 .maxlen = sizeof(int),
2226 .proc_handler = &ipv6_sysctl_rtcache_flush
2229 .ctl_name = NET_IPV6_ROUTE_GC_THRESH,
2230 .procname = "gc_thresh",
2231 .data = &ip6_dst_ops.gc_thresh,
2232 .maxlen = sizeof(int),
2234 .proc_handler = &proc_dointvec,
2237 .ctl_name = NET_IPV6_ROUTE_MAX_SIZE,
2238 .procname = "max_size",
2239 .data = &ip6_rt_max_size,
2240 .maxlen = sizeof(int),
2242 .proc_handler = &proc_dointvec,
2245 .ctl_name = NET_IPV6_ROUTE_GC_MIN_INTERVAL,
2246 .procname = "gc_min_interval",
2247 .data = &ip6_rt_gc_min_interval,
2248 .maxlen = sizeof(int),
2250 .proc_handler = &proc_dointvec_jiffies,
2251 .strategy = &sysctl_jiffies,
2254 .ctl_name = NET_IPV6_ROUTE_GC_TIMEOUT,
2255 .procname = "gc_timeout",
2256 .data = &ip6_rt_gc_timeout,
2257 .maxlen = sizeof(int),
2259 .proc_handler = &proc_dointvec_jiffies,
2260 .strategy = &sysctl_jiffies,
2263 .ctl_name = NET_IPV6_ROUTE_GC_INTERVAL,
2264 .procname = "gc_interval",
2265 .data = &ip6_rt_gc_interval,
2266 .maxlen = sizeof(int),
2268 .proc_handler = &proc_dointvec_jiffies,
2269 .strategy = &sysctl_jiffies,
2272 .ctl_name = NET_IPV6_ROUTE_GC_ELASTICITY,
2273 .procname = "gc_elasticity",
2274 .data = &ip6_rt_gc_elasticity,
2275 .maxlen = sizeof(int),
2277 .proc_handler = &proc_dointvec_jiffies,
2278 .strategy = &sysctl_jiffies,
2281 .ctl_name = NET_IPV6_ROUTE_MTU_EXPIRES,
2282 .procname = "mtu_expires",
2283 .data = &ip6_rt_mtu_expires,
2284 .maxlen = sizeof(int),
2286 .proc_handler = &proc_dointvec_jiffies,
2287 .strategy = &sysctl_jiffies,
2290 .ctl_name = NET_IPV6_ROUTE_MIN_ADVMSS,
2291 .procname = "min_adv_mss",
2292 .data = &ip6_rt_min_advmss,
2293 .maxlen = sizeof(int),
2295 .proc_handler = &proc_dointvec_jiffies,
2296 .strategy = &sysctl_jiffies,
2299 .ctl_name = NET_IPV6_ROUTE_GC_MIN_INTERVAL_MS,
2300 .procname = "gc_min_interval_ms",
2301 .data = &ip6_rt_gc_min_interval,
2302 .maxlen = sizeof(int),
2304 .proc_handler = &proc_dointvec_ms_jiffies,
2305 .strategy = &sysctl_ms_jiffies,
2312 void __init ip6_route_init(void)
2314 struct proc_dir_entry *p;
2316 ip6_dst_ops.kmem_cachep = kmem_cache_create("ip6_dst_cache",
2317 sizeof(struct rt6_info),
2318 0, SLAB_HWCACHE_ALIGN,
2320 if (!ip6_dst_ops.kmem_cachep)
2321 panic("cannot create ip6_dst_cache");
2324 #ifdef CONFIG_PROC_FS
2325 p = proc_net_create("ipv6_route", 0, rt6_proc_info);
2327 p->owner = THIS_MODULE;
2329 proc_net_fops_create("rt6_stats", S_IRUGO, &rt6_stats_seq_fops);
2334 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
2339 void ip6_route_cleanup(void)
2341 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
2342 fib6_rules_cleanup();
2344 #ifdef CONFIG_PROC_FS
2345 proc_net_remove("ipv6_route");
2346 proc_net_remove("rt6_stats");
2353 kmem_cache_destroy(ip6_dst_ops.kmem_cachep);