2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * ROUTE - implementation of the IP router.
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Alan Cox, <gw4pts@gw4pts.ampr.org>
11 * Linus Torvalds, <Linus.Torvalds@helsinki.fi>
12 * Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
15 * Alan Cox : Verify area fixes.
16 * Alan Cox : cli() protects routing changes
17 * Rui Oliveira : ICMP routing table updates
18 * (rco@di.uminho.pt) Routing table insertion and update
19 * Linus Torvalds : Rewrote bits to be sensible
20 * Alan Cox : Added BSD route gw semantics
21 * Alan Cox : Super /proc >4K
22 * Alan Cox : MTU in route table
23 * Alan Cox : MSS actually. Also added the window
25 * Sam Lantinga : Fixed route matching in rt_del()
26 * Alan Cox : Routing cache support.
27 * Alan Cox : Removed compatibility cruft.
28 * Alan Cox : RTF_REJECT support.
29 * Alan Cox : TCP irtt support.
30 * Jonathan Naylor : Added Metric support.
31 * Miquel van Smoorenburg : BSD API fixes.
32 * Miquel van Smoorenburg : Metrics.
33 * Alan Cox : Use __u32 properly
34 * Alan Cox : Aligned routing errors more closely with BSD
35 * our system is still very different.
36 * Alan Cox : Faster /proc handling
37 * Alexey Kuznetsov : Massive rework to support tree based routing,
38 * routing caches and better behaviour.
40 * Olaf Erb : irtt wasn't being copied right.
41 * Bjorn Ekwall : Kerneld route support.
42 * Alan Cox : Multicast fixed (I hope)
43 * Pavel Krauz : Limited broadcast fixed
44 * Mike McLagan : Routing by source
45 * Alexey Kuznetsov : End of old history. Split to fib.c and
46 * route.c and rewritten from scratch.
47 * Andi Kleen : Load-limit warning messages.
48 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
49 * Vitaly E. Lavrov : Race condition in ip_route_input_slow.
50 * Tobias Ringstrom : Uninitialized res.type in ip_route_output_slow.
51 * Vladimir V. Ivanov : IP rule info (flowid) is really useful.
52 * Marc Boucher : routing by fwmark
53 * Robert Olsson : Added rt_cache statistics
54 * Arnaldo C. Melo : Convert proc stuff to seq_file
55 * Eric Dumazet : hashed spinlocks and rt_check_expire() fixes.
56 * Ilia Sotnikov : Ignore TOS on PMTUD and Redirect
57 * Ilia Sotnikov : Removed TOS from hash calculations
59 * This program is free software; you can redistribute it and/or
60 * modify it under the terms of the GNU General Public License
61 * as published by the Free Software Foundation; either version
62 * 2 of the License, or (at your option) any later version.
65 #define pr_fmt(fmt) "IPv4: " fmt
67 #include <linux/module.h>
68 #include <asm/uaccess.h>
69 #include <linux/bitops.h>
70 #include <linux/types.h>
71 #include <linux/kernel.h>
73 #include <linux/bootmem.h>
74 #include <linux/string.h>
75 #include <linux/socket.h>
76 #include <linux/sockios.h>
77 #include <linux/errno.h>
79 #include <linux/inet.h>
80 #include <linux/netdevice.h>
81 #include <linux/proc_fs.h>
82 #include <linux/init.h>
83 #include <linux/workqueue.h>
84 #include <linux/skbuff.h>
85 #include <linux/inetdevice.h>
86 #include <linux/igmp.h>
87 #include <linux/pkt_sched.h>
88 #include <linux/mroute.h>
89 #include <linux/netfilter_ipv4.h>
90 #include <linux/random.h>
91 #include <linux/jhash.h>
92 #include <linux/rcupdate.h>
93 #include <linux/times.h>
94 #include <linux/slab.h>
95 #include <linux/prefetch.h>
97 #include <net/net_namespace.h>
98 #include <net/protocol.h>
100 #include <net/route.h>
101 #include <net/inetpeer.h>
102 #include <net/sock.h>
103 #include <net/ip_fib.h>
106 #include <net/icmp.h>
107 #include <net/xfrm.h>
108 #include <net/netevent.h>
109 #include <net/rtnetlink.h>
111 #include <linux/sysctl.h>
112 #include <linux/kmemleak.h>
114 #include <net/secure_seq.h>
116 #define RT_FL_TOS(oldflp4) \
117 ((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
119 #define IP_MAX_MTU 0xFFF0
121 #define RT_GC_TIMEOUT (300*HZ)
123 static int ip_rt_max_size;
124 static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
125 static int ip_rt_gc_interval __read_mostly = 60 * HZ;
126 static int ip_rt_gc_min_interval __read_mostly = HZ / 2;
127 static int ip_rt_redirect_number __read_mostly = 9;
128 static int ip_rt_redirect_load __read_mostly = HZ / 50;
129 static int ip_rt_redirect_silence __read_mostly = ((HZ / 50) << (9 + 1));
130 static int ip_rt_error_cost __read_mostly = HZ;
131 static int ip_rt_error_burst __read_mostly = 5 * HZ;
132 static int ip_rt_gc_elasticity __read_mostly = 8;
133 static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
134 static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
135 static int ip_rt_min_advmss __read_mostly = 256;
138 * Interface to generic destination cache.
141 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
142 static unsigned int ipv4_default_advmss(const struct dst_entry *dst);
143 static unsigned int ipv4_mtu(const struct dst_entry *dst);
144 static void ipv4_dst_destroy(struct dst_entry *dst);
145 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
146 static void ipv4_link_failure(struct sk_buff *skb);
147 static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
148 struct sk_buff *skb, u32 mtu);
149 static void ip_do_redirect(struct dst_entry *dst, struct sock *sk,
150 struct sk_buff *skb);
152 static void ipv4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
157 static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old)
163 static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
167 static struct dst_ops ipv4_dst_ops = {
169 .protocol = cpu_to_be16(ETH_P_IP),
170 .check = ipv4_dst_check,
171 .default_advmss = ipv4_default_advmss,
173 .cow_metrics = ipv4_cow_metrics,
174 .destroy = ipv4_dst_destroy,
175 .ifdown = ipv4_dst_ifdown,
176 .negative_advice = ipv4_negative_advice,
177 .link_failure = ipv4_link_failure,
178 .update_pmtu = ip_rt_update_pmtu,
179 .redirect = ip_do_redirect,
180 .local_out = __ip_local_out,
181 .neigh_lookup = ipv4_neigh_lookup,
184 #define ECN_OR_COST(class) TC_PRIO_##class
186 const __u8 ip_tos2prio[16] = {
188 ECN_OR_COST(BESTEFFORT),
190 ECN_OR_COST(BESTEFFORT),
196 ECN_OR_COST(INTERACTIVE),
198 ECN_OR_COST(INTERACTIVE),
199 TC_PRIO_INTERACTIVE_BULK,
200 ECN_OR_COST(INTERACTIVE_BULK),
201 TC_PRIO_INTERACTIVE_BULK,
202 ECN_OR_COST(INTERACTIVE_BULK)
204 EXPORT_SYMBOL(ip_tos2prio);
206 static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
207 #define RT_CACHE_STAT_INC(field) __this_cpu_inc(rt_cache_stat.field)
209 static inline int rt_genid(struct net *net)
211 return atomic_read(&net->ipv4.rt_genid);
214 #ifdef CONFIG_PROC_FS
215 static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
219 return SEQ_START_TOKEN;
222 static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
228 static void rt_cache_seq_stop(struct seq_file *seq, void *v)
232 static int rt_cache_seq_show(struct seq_file *seq, void *v)
234 if (v == SEQ_START_TOKEN)
235 seq_printf(seq, "%-127s\n",
236 "Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t"
237 "Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t"
242 static const struct seq_operations rt_cache_seq_ops = {
243 .start = rt_cache_seq_start,
244 .next = rt_cache_seq_next,
245 .stop = rt_cache_seq_stop,
246 .show = rt_cache_seq_show,
249 static int rt_cache_seq_open(struct inode *inode, struct file *file)
251 return seq_open(file, &rt_cache_seq_ops);
254 static const struct file_operations rt_cache_seq_fops = {
255 .owner = THIS_MODULE,
256 .open = rt_cache_seq_open,
259 .release = seq_release,
263 static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos)
268 return SEQ_START_TOKEN;
270 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
271 if (!cpu_possible(cpu))
274 return &per_cpu(rt_cache_stat, cpu);
279 static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
283 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
284 if (!cpu_possible(cpu))
287 return &per_cpu(rt_cache_stat, cpu);
293 static void rt_cpu_seq_stop(struct seq_file *seq, void *v)
298 static int rt_cpu_seq_show(struct seq_file *seq, void *v)
300 struct rt_cache_stat *st = v;
302 if (v == SEQ_START_TOKEN) {
303 seq_printf(seq, "entries in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src out_hit out_slow_tot out_slow_mc gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
307 seq_printf(seq,"%08x %08x %08x %08x %08x %08x %08x %08x "
308 " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n",
309 dst_entries_get_slow(&ipv4_dst_ops),
332 static const struct seq_operations rt_cpu_seq_ops = {
333 .start = rt_cpu_seq_start,
334 .next = rt_cpu_seq_next,
335 .stop = rt_cpu_seq_stop,
336 .show = rt_cpu_seq_show,
340 static int rt_cpu_seq_open(struct inode *inode, struct file *file)
342 return seq_open(file, &rt_cpu_seq_ops);
345 static const struct file_operations rt_cpu_seq_fops = {
346 .owner = THIS_MODULE,
347 .open = rt_cpu_seq_open,
350 .release = seq_release,
353 #ifdef CONFIG_IP_ROUTE_CLASSID
354 static int rt_acct_proc_show(struct seq_file *m, void *v)
356 struct ip_rt_acct *dst, *src;
359 dst = kcalloc(256, sizeof(struct ip_rt_acct), GFP_KERNEL);
363 for_each_possible_cpu(i) {
364 src = (struct ip_rt_acct *)per_cpu_ptr(ip_rt_acct, i);
365 for (j = 0; j < 256; j++) {
366 dst[j].o_bytes += src[j].o_bytes;
367 dst[j].o_packets += src[j].o_packets;
368 dst[j].i_bytes += src[j].i_bytes;
369 dst[j].i_packets += src[j].i_packets;
373 seq_write(m, dst, 256 * sizeof(struct ip_rt_acct));
378 static int rt_acct_proc_open(struct inode *inode, struct file *file)
380 return single_open(file, rt_acct_proc_show, NULL);
383 static const struct file_operations rt_acct_proc_fops = {
384 .owner = THIS_MODULE,
385 .open = rt_acct_proc_open,
388 .release = single_release,
392 static int __net_init ip_rt_do_proc_init(struct net *net)
394 struct proc_dir_entry *pde;
396 pde = proc_net_fops_create(net, "rt_cache", S_IRUGO,
401 pde = proc_create("rt_cache", S_IRUGO,
402 net->proc_net_stat, &rt_cpu_seq_fops);
406 #ifdef CONFIG_IP_ROUTE_CLASSID
407 pde = proc_create("rt_acct", 0, net->proc_net, &rt_acct_proc_fops);
413 #ifdef CONFIG_IP_ROUTE_CLASSID
415 remove_proc_entry("rt_cache", net->proc_net_stat);
418 remove_proc_entry("rt_cache", net->proc_net);
423 static void __net_exit ip_rt_do_proc_exit(struct net *net)
425 remove_proc_entry("rt_cache", net->proc_net_stat);
426 remove_proc_entry("rt_cache", net->proc_net);
427 #ifdef CONFIG_IP_ROUTE_CLASSID
428 remove_proc_entry("rt_acct", net->proc_net);
432 static struct pernet_operations ip_rt_proc_ops __net_initdata = {
433 .init = ip_rt_do_proc_init,
434 .exit = ip_rt_do_proc_exit,
437 static int __init ip_rt_proc_init(void)
439 return register_pernet_subsys(&ip_rt_proc_ops);
443 static inline int ip_rt_proc_init(void)
447 #endif /* CONFIG_PROC_FS */
449 static inline int rt_is_expired(struct rtable *rth)
451 return rth->rt_genid != rt_genid(dev_net(rth->dst.dev));
455 * Perturbation of rt_genid by a small quantity [1..256]
456 * Using 8 bits of shuffling ensure we can call rt_cache_invalidate()
457 * many times (2^24) without giving recent rt_genid.
458 * Jenkins hash is strong enough that litle changes of rt_genid are OK.
460 static void rt_cache_invalidate(struct net *net)
462 unsigned char shuffle;
464 get_random_bytes(&shuffle, sizeof(shuffle));
465 atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
469 * delay < 0 : invalidate cache (fast : entries will be deleted later)
470 * delay >= 0 : invalidate & flush cache (can be long)
472 void rt_cache_flush(struct net *net, int delay)
474 rt_cache_invalidate(net);
477 static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
481 struct net_device *dev = dst->dev;
482 const __be32 *pkey = daddr;
483 const struct rtable *rt;
486 rt = (const struct rtable *) dst;
488 pkey = (const __be32 *) &rt->rt_gateway;
490 pkey = &ip_hdr(skb)->daddr;
492 n = __ipv4_neigh_lookup(dev, *(__force u32 *)pkey);
495 return neigh_create(&arp_tbl, pkey, dev);
499 * Peer allocation may fail only in serious out-of-memory conditions. However
500 * we still can generate some output.
501 * Random ID selection looks a bit dangerous because we have no chances to
502 * select ID being unique in a reasonable period of time.
503 * But broken packet identifier may be better than no packet at all.
505 static void ip_select_fb_ident(struct iphdr *iph)
507 static DEFINE_SPINLOCK(ip_fb_id_lock);
508 static u32 ip_fallback_id;
511 spin_lock_bh(&ip_fb_id_lock);
512 salt = secure_ip_id((__force __be32)ip_fallback_id ^ iph->daddr);
513 iph->id = htons(salt & 0xFFFF);
514 ip_fallback_id = salt;
515 spin_unlock_bh(&ip_fb_id_lock);
518 void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
520 struct net *net = dev_net(dst->dev);
521 struct inet_peer *peer;
523 peer = inet_getpeer_v4(net->ipv4.peers, iph->daddr, 1);
525 iph->id = htons(inet_getid(peer, more));
530 ip_select_fb_ident(iph);
532 EXPORT_SYMBOL(__ip_select_ident);
534 static void __build_flow_key(struct flowi4 *fl4, const struct sock *sk,
535 const struct iphdr *iph,
537 u8 prot, u32 mark, int flow_flags)
540 const struct inet_sock *inet = inet_sk(sk);
542 oif = sk->sk_bound_dev_if;
544 tos = RT_CONN_FLAGS(sk);
545 prot = inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol;
547 flowi4_init_output(fl4, oif, mark, tos,
548 RT_SCOPE_UNIVERSE, prot,
550 iph->daddr, iph->saddr, 0, 0);
553 static void build_skb_flow_key(struct flowi4 *fl4, const struct sk_buff *skb,
554 const struct sock *sk)
556 const struct iphdr *iph = ip_hdr(skb);
557 int oif = skb->dev->ifindex;
558 u8 tos = RT_TOS(iph->tos);
559 u8 prot = iph->protocol;
560 u32 mark = skb->mark;
562 __build_flow_key(fl4, sk, iph, oif, tos, prot, mark, 0);
565 static void build_sk_flow_key(struct flowi4 *fl4, const struct sock *sk)
567 const struct inet_sock *inet = inet_sk(sk);
568 const struct ip_options_rcu *inet_opt;
569 __be32 daddr = inet->inet_daddr;
572 inet_opt = rcu_dereference(inet->inet_opt);
573 if (inet_opt && inet_opt->opt.srr)
574 daddr = inet_opt->opt.faddr;
575 flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
576 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
577 inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
578 inet_sk_flowi_flags(sk),
579 daddr, inet->inet_saddr, 0, 0);
583 static void ip_rt_build_flow_key(struct flowi4 *fl4, const struct sock *sk,
584 const struct sk_buff *skb)
587 build_skb_flow_key(fl4, skb, sk);
589 build_sk_flow_key(fl4, sk);
592 static DEFINE_SEQLOCK(fnhe_seqlock);
594 static struct fib_nh_exception *fnhe_oldest(struct fnhe_hash_bucket *hash)
596 struct fib_nh_exception *fnhe, *oldest;
598 oldest = rcu_dereference(hash->chain);
599 for (fnhe = rcu_dereference(oldest->fnhe_next); fnhe;
600 fnhe = rcu_dereference(fnhe->fnhe_next)) {
601 if (time_before(fnhe->fnhe_stamp, oldest->fnhe_stamp))
607 static inline u32 fnhe_hashfun(__be32 daddr)
611 hval = (__force u32) daddr;
612 hval ^= (hval >> 11) ^ (hval >> 22);
614 return hval & (FNHE_HASH_SIZE - 1);
617 static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
618 u32 pmtu, unsigned long expires)
620 struct fnhe_hash_bucket *hash;
621 struct fib_nh_exception *fnhe;
623 u32 hval = fnhe_hashfun(daddr);
625 write_seqlock_bh(&fnhe_seqlock);
627 hash = nh->nh_exceptions;
629 hash = kzalloc(FNHE_HASH_SIZE * sizeof(*hash), GFP_ATOMIC);
632 nh->nh_exceptions = hash;
638 for (fnhe = rcu_dereference(hash->chain); fnhe;
639 fnhe = rcu_dereference(fnhe->fnhe_next)) {
640 if (fnhe->fnhe_daddr == daddr)
649 fnhe->fnhe_pmtu = pmtu;
650 fnhe->fnhe_expires = expires;
653 if (depth > FNHE_RECLAIM_DEPTH)
654 fnhe = fnhe_oldest(hash);
656 fnhe = kzalloc(sizeof(*fnhe), GFP_ATOMIC);
660 fnhe->fnhe_next = hash->chain;
661 rcu_assign_pointer(hash->chain, fnhe);
663 fnhe->fnhe_daddr = daddr;
665 fnhe->fnhe_pmtu = pmtu;
666 fnhe->fnhe_expires = expires;
669 fnhe->fnhe_stamp = jiffies;
672 write_sequnlock_bh(&fnhe_seqlock);
676 static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flowi4 *fl4,
679 __be32 new_gw = icmp_hdr(skb)->un.gateway;
680 __be32 old_gw = ip_hdr(skb)->saddr;
681 struct net_device *dev = skb->dev;
682 struct in_device *in_dev;
683 struct fib_result res;
687 switch (icmp_hdr(skb)->code & 7) {
689 case ICMP_REDIR_NETTOS:
690 case ICMP_REDIR_HOST:
691 case ICMP_REDIR_HOSTTOS:
698 if (rt->rt_gateway != old_gw)
701 in_dev = __in_dev_get_rcu(dev);
706 if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev) ||
707 ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw) ||
708 ipv4_is_zeronet(new_gw))
709 goto reject_redirect;
711 if (!IN_DEV_SHARED_MEDIA(in_dev)) {
712 if (!inet_addr_onlink(in_dev, new_gw, old_gw))
713 goto reject_redirect;
714 if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev))
715 goto reject_redirect;
717 if (inet_addr_type(net, new_gw) != RTN_UNICAST)
718 goto reject_redirect;
721 n = ipv4_neigh_lookup(&rt->dst, NULL, &new_gw);
723 if (!(n->nud_state & NUD_VALID)) {
724 neigh_event_send(n, NULL);
726 if (fib_lookup(net, fl4, &res) == 0) {
727 struct fib_nh *nh = &FIB_RES_NH(res);
729 update_or_create_fnhe(nh, fl4->daddr, new_gw,
733 rt->dst.obsolete = DST_OBSOLETE_KILL;
734 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n);
741 #ifdef CONFIG_IP_ROUTE_VERBOSE
742 if (IN_DEV_LOG_MARTIANS(in_dev)) {
743 const struct iphdr *iph = (const struct iphdr *) skb->data;
744 __be32 daddr = iph->daddr;
745 __be32 saddr = iph->saddr;
747 net_info_ratelimited("Redirect from %pI4 on %s about %pI4 ignored\n"
748 " Advised path = %pI4 -> %pI4\n",
749 &old_gw, dev->name, &new_gw,
756 static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
761 rt = (struct rtable *) dst;
763 ip_rt_build_flow_key(&fl4, sk, skb);
764 __ip_do_redirect(rt, skb, &fl4, true);
767 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
769 struct rtable *rt = (struct rtable *)dst;
770 struct dst_entry *ret = dst;
773 if (dst->obsolete > 0) {
776 } else if ((rt->rt_flags & RTCF_REDIRECTED) ||
787 * 1. The first ip_rt_redirect_number redirects are sent
788 * with exponential backoff, then we stop sending them at all,
789 * assuming that the host ignores our redirects.
790 * 2. If we did not see packets requiring redirects
791 * during ip_rt_redirect_silence, we assume that the host
792 * forgot redirected route and start to send redirects again.
794 * This algorithm is much cheaper and more intelligent than dumb load limiting
797 * NOTE. Do not forget to inhibit load limiting for redirects (redundant)
798 * and "frag. need" (breaks PMTU discovery) in icmp.c.
801 void ip_rt_send_redirect(struct sk_buff *skb)
803 struct rtable *rt = skb_rtable(skb);
804 struct in_device *in_dev;
805 struct inet_peer *peer;
810 in_dev = __in_dev_get_rcu(rt->dst.dev);
811 if (!in_dev || !IN_DEV_TX_REDIRECTS(in_dev)) {
815 log_martians = IN_DEV_LOG_MARTIANS(in_dev);
818 net = dev_net(rt->dst.dev);
819 peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, 1);
821 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway);
825 /* No redirected packets during ip_rt_redirect_silence;
826 * reset the algorithm.
828 if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence))
829 peer->rate_tokens = 0;
831 /* Too many ignored redirects; do not send anything
832 * set dst.rate_last to the last seen redirected packet.
834 if (peer->rate_tokens >= ip_rt_redirect_number) {
835 peer->rate_last = jiffies;
839 /* Check for load limit; set rate_last to the latest sent
842 if (peer->rate_tokens == 0 ||
845 (ip_rt_redirect_load << peer->rate_tokens)))) {
846 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway);
847 peer->rate_last = jiffies;
849 #ifdef CONFIG_IP_ROUTE_VERBOSE
851 peer->rate_tokens == ip_rt_redirect_number)
852 net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n",
853 &ip_hdr(skb)->saddr, rt->rt_iif,
854 &ip_hdr(skb)->daddr, &rt->rt_gateway);
861 static int ip_error(struct sk_buff *skb)
863 struct in_device *in_dev = __in_dev_get_rcu(skb->dev);
864 struct rtable *rt = skb_rtable(skb);
865 struct inet_peer *peer;
871 net = dev_net(rt->dst.dev);
872 if (!IN_DEV_FORWARD(in_dev)) {
873 switch (rt->dst.error) {
875 IP_INC_STATS_BH(net, IPSTATS_MIB_INADDRERRORS);
879 IP_INC_STATS_BH(net, IPSTATS_MIB_INNOROUTES);
885 switch (rt->dst.error) {
890 code = ICMP_HOST_UNREACH;
893 code = ICMP_NET_UNREACH;
894 IP_INC_STATS_BH(net, IPSTATS_MIB_INNOROUTES);
897 code = ICMP_PKT_FILTERED;
901 peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, 1);
906 peer->rate_tokens += now - peer->rate_last;
907 if (peer->rate_tokens > ip_rt_error_burst)
908 peer->rate_tokens = ip_rt_error_burst;
909 peer->rate_last = now;
910 if (peer->rate_tokens >= ip_rt_error_cost)
911 peer->rate_tokens -= ip_rt_error_cost;
917 icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
923 static u32 __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
925 struct fib_result res;
927 if (mtu < ip_rt_min_pmtu)
928 mtu = ip_rt_min_pmtu;
930 if (fib_lookup(dev_net(rt->dst.dev), fl4, &res) == 0) {
931 struct fib_nh *nh = &FIB_RES_NH(res);
933 update_or_create_fnhe(nh, fl4->daddr, 0, mtu,
934 jiffies + ip_rt_mtu_expires);
939 static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
940 struct sk_buff *skb, u32 mtu)
942 struct rtable *rt = (struct rtable *) dst;
945 ip_rt_build_flow_key(&fl4, sk, skb);
946 mtu = __ip_rt_update_pmtu(rt, &fl4, mtu);
949 dst->obsolete = DST_OBSOLETE_KILL;
952 dst_set_expires(&rt->dst, ip_rt_mtu_expires);
956 void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu,
957 int oif, u32 mark, u8 protocol, int flow_flags)
959 const struct iphdr *iph = (const struct iphdr *) skb->data;
963 __build_flow_key(&fl4, NULL, iph, oif,
964 RT_TOS(iph->tos), protocol, mark, flow_flags);
965 rt = __ip_route_output_key(net, &fl4);
967 __ip_rt_update_pmtu(rt, &fl4, mtu);
971 EXPORT_SYMBOL_GPL(ipv4_update_pmtu);
973 void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
975 const struct iphdr *iph = (const struct iphdr *) skb->data;
979 __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
980 rt = __ip_route_output_key(sock_net(sk), &fl4);
982 __ip_rt_update_pmtu(rt, &fl4, mtu);
986 EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu);
988 void ipv4_redirect(struct sk_buff *skb, struct net *net,
989 int oif, u32 mark, u8 protocol, int flow_flags)
991 const struct iphdr *iph = (const struct iphdr *) skb->data;
995 __build_flow_key(&fl4, NULL, iph, oif,
996 RT_TOS(iph->tos), protocol, mark, flow_flags);
997 rt = __ip_route_output_key(net, &fl4);
999 __ip_do_redirect(rt, skb, &fl4, false);
1003 EXPORT_SYMBOL_GPL(ipv4_redirect);
1005 void ipv4_sk_redirect(struct sk_buff *skb, struct sock *sk)
1007 const struct iphdr *iph = (const struct iphdr *) skb->data;
1011 __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
1012 rt = __ip_route_output_key(sock_net(sk), &fl4);
1014 __ip_do_redirect(rt, skb, &fl4, false);
1018 EXPORT_SYMBOL_GPL(ipv4_sk_redirect);
1020 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
1022 struct rtable *rt = (struct rtable *) dst;
1024 /* All IPV4 dsts are created with ->obsolete set to the value
1025 * DST_OBSOLETE_FORCE_CHK which forces validation calls down
1026 * into this function always.
1028 * When a PMTU/redirect information update invalidates a
1029 * route, this is indicated by setting obsolete to
1030 * DST_OBSOLETE_KILL.
1032 if (dst->obsolete == DST_OBSOLETE_KILL || rt_is_expired(rt))
1037 static void ipv4_dst_destroy(struct dst_entry *dst)
1039 struct rtable *rt = (struct rtable *) dst;
1042 fib_info_put(rt->fi);
1048 static void ipv4_link_failure(struct sk_buff *skb)
1052 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
1054 rt = skb_rtable(skb);
1056 dst_set_expires(&rt->dst, 0);
1059 static int ip_rt_bug(struct sk_buff *skb)
1061 pr_debug("%s: %pI4 -> %pI4, %s\n",
1062 __func__, &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
1063 skb->dev ? skb->dev->name : "?");
1070 We do not cache source address of outgoing interface,
1071 because it is used only by IP RR, TS and SRR options,
1072 so that it out of fast path.
1074 BTW remember: "addr" is allowed to be not aligned
1078 void ip_rt_get_source(u8 *addr, struct sk_buff *skb, struct rtable *rt)
1082 if (rt_is_output_route(rt))
1083 src = ip_hdr(skb)->saddr;
1085 struct fib_result res;
1091 memset(&fl4, 0, sizeof(fl4));
1092 fl4.daddr = iph->daddr;
1093 fl4.saddr = iph->saddr;
1094 fl4.flowi4_tos = RT_TOS(iph->tos);
1095 fl4.flowi4_oif = rt->dst.dev->ifindex;
1096 fl4.flowi4_iif = skb->dev->ifindex;
1097 fl4.flowi4_mark = skb->mark;
1100 if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res) == 0)
1101 src = FIB_RES_PREFSRC(dev_net(rt->dst.dev), res);
1103 src = inet_select_addr(rt->dst.dev,
1104 rt_nexthop(rt, iph->daddr),
1108 memcpy(addr, &src, 4);
1111 #ifdef CONFIG_IP_ROUTE_CLASSID
1112 static void set_class_tag(struct rtable *rt, u32 tag)
1114 if (!(rt->dst.tclassid & 0xFFFF))
1115 rt->dst.tclassid |= tag & 0xFFFF;
1116 if (!(rt->dst.tclassid & 0xFFFF0000))
1117 rt->dst.tclassid |= tag & 0xFFFF0000;
1121 static unsigned int ipv4_default_advmss(const struct dst_entry *dst)
1123 unsigned int advmss = dst_metric_raw(dst, RTAX_ADVMSS);
1126 advmss = max_t(unsigned int, dst->dev->mtu - 40,
1128 if (advmss > 65535 - 40)
1129 advmss = 65535 - 40;
1134 static unsigned int ipv4_mtu(const struct dst_entry *dst)
1136 const struct rtable *rt = (const struct rtable *) dst;
1137 unsigned int mtu = rt->rt_pmtu;
1139 if (mtu && time_after_eq(jiffies, rt->dst.expires))
1143 mtu = dst_metric_raw(dst, RTAX_MTU);
1145 if (mtu && rt_is_output_route(rt))
1148 mtu = dst->dev->mtu;
1150 if (unlikely(dst_metric_locked(dst, RTAX_MTU))) {
1151 if (rt->rt_gateway && mtu > 576)
1155 if (mtu > IP_MAX_MTU)
1161 static void rt_init_metrics(struct rtable *rt, const struct flowi4 *fl4,
1162 struct fib_info *fi)
1164 if (fi->fib_metrics != (u32 *) dst_default_metrics) {
1166 atomic_inc(&fi->fib_clntref);
1168 dst_init_metrics(&rt->dst, fi->fib_metrics, true);
1171 static void rt_bind_exception(struct rtable *rt, struct fib_nh *nh, __be32 daddr)
1173 struct fnhe_hash_bucket *hash = nh->nh_exceptions;
1174 struct fib_nh_exception *fnhe;
1177 hval = fnhe_hashfun(daddr);
1180 for (fnhe = rcu_dereference(hash[hval].chain); fnhe;
1181 fnhe = rcu_dereference(fnhe->fnhe_next)) {
1182 __be32 fnhe_daddr, gw;
1183 unsigned long expires;
1187 seq = read_seqbegin(&fnhe_seqlock);
1188 fnhe_daddr = fnhe->fnhe_daddr;
1190 pmtu = fnhe->fnhe_pmtu;
1191 expires = fnhe->fnhe_expires;
1192 if (read_seqretry(&fnhe_seqlock, seq))
1194 if (daddr != fnhe_daddr)
1197 unsigned long diff = expires - jiffies;
1199 if (time_before(jiffies, expires)) {
1201 dst_set_expires(&rt->dst, diff);
1205 rt->rt_flags |= RTCF_REDIRECTED;
1206 rt->rt_gateway = gw;
1208 fnhe->fnhe_stamp = jiffies;
1213 static void rt_set_nexthop(struct rtable *rt, const struct flowi4 *fl4,
1214 const struct fib_result *res,
1215 struct fib_info *fi, u16 type, u32 itag)
1218 struct fib_nh *nh = &FIB_RES_NH(*res);
1220 if (nh->nh_gw && nh->nh_scope == RT_SCOPE_LINK)
1221 rt->rt_gateway = nh->nh_gw;
1222 if (unlikely(nh->nh_exceptions))
1223 rt_bind_exception(rt, nh, fl4->daddr);
1224 rt_init_metrics(rt, fl4, fi);
1225 #ifdef CONFIG_IP_ROUTE_CLASSID
1226 rt->dst.tclassid = FIB_RES_NH(*res).nh_tclassid;
1230 #ifdef CONFIG_IP_ROUTE_CLASSID
1231 #ifdef CONFIG_IP_MULTIPLE_TABLES
1232 set_class_tag(rt, res->tclassid);
1234 set_class_tag(rt, itag);
1238 static struct rtable *rt_dst_alloc(struct net_device *dev,
1239 bool nopolicy, bool noxfrm)
1241 return dst_alloc(&ipv4_dst_ops, dev, 1, DST_OBSOLETE_FORCE_CHK,
1242 DST_HOST | DST_NOCACHE |
1243 (nopolicy ? DST_NOPOLICY : 0) |
1244 (noxfrm ? DST_NOXFRM : 0));
1247 /* called in rcu_read_lock() section */
1248 static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1249 u8 tos, struct net_device *dev, int our)
1252 struct in_device *in_dev = __in_dev_get_rcu(dev);
1256 /* Primary sanity checks. */
1261 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
1262 skb->protocol != htons(ETH_P_IP))
1265 if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev)))
1266 if (ipv4_is_loopback(saddr))
1269 if (ipv4_is_zeronet(saddr)) {
1270 if (!ipv4_is_local_multicast(daddr))
1273 err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
1278 rth = rt_dst_alloc(dev_net(dev)->loopback_dev,
1279 IN_DEV_CONF_GET(in_dev, NOPOLICY), false);
1283 #ifdef CONFIG_IP_ROUTE_CLASSID
1284 rth->dst.tclassid = itag;
1286 rth->dst.output = ip_rt_bug;
1288 rth->rt_genid = rt_genid(dev_net(dev));
1289 rth->rt_flags = RTCF_MULTICAST;
1290 rth->rt_type = RTN_MULTICAST;
1291 rth->rt_route_iif = dev->ifindex;
1292 rth->rt_iif = dev->ifindex;
1295 rth->rt_gateway = 0;
1298 rth->dst.input= ip_local_deliver;
1299 rth->rt_flags |= RTCF_LOCAL;
1302 #ifdef CONFIG_IP_MROUTE
1303 if (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev))
1304 rth->dst.input = ip_mr_input;
1306 RT_CACHE_STAT_INC(in_slow_mc);
1308 skb_dst_set(skb, &rth->dst);
1320 static void ip_handle_martian_source(struct net_device *dev,
1321 struct in_device *in_dev,
1322 struct sk_buff *skb,
1326 RT_CACHE_STAT_INC(in_martian_src);
1327 #ifdef CONFIG_IP_ROUTE_VERBOSE
1328 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) {
1330 * RFC1812 recommendation, if source is martian,
1331 * the only hint is MAC header.
1333 pr_warn("martian source %pI4 from %pI4, on dev %s\n",
1334 &daddr, &saddr, dev->name);
1335 if (dev->hard_header_len && skb_mac_header_was_set(skb)) {
1336 print_hex_dump(KERN_WARNING, "ll header: ",
1337 DUMP_PREFIX_OFFSET, 16, 1,
1338 skb_mac_header(skb),
1339 dev->hard_header_len, true);
1345 /* called in rcu_read_lock() section */
1346 static int __mkroute_input(struct sk_buff *skb,
1347 const struct fib_result *res,
1348 struct in_device *in_dev,
1349 __be32 daddr, __be32 saddr, u32 tos,
1350 struct rtable **result)
1354 struct in_device *out_dev;
1355 unsigned int flags = 0;
1358 /* get a working reference to the output device */
1359 out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res));
1360 if (out_dev == NULL) {
1361 net_crit_ratelimited("Bug in ip_route_input_slow(). Please report.\n");
1366 err = fib_validate_source(skb, saddr, daddr, tos, FIB_RES_OIF(*res),
1367 in_dev->dev, in_dev, &itag);
1369 ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
1376 flags |= RTCF_DIRECTSRC;
1378 if (out_dev == in_dev && err &&
1379 (IN_DEV_SHARED_MEDIA(out_dev) ||
1380 inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res))))
1381 flags |= RTCF_DOREDIRECT;
1383 if (skb->protocol != htons(ETH_P_IP)) {
1384 /* Not IP (i.e. ARP). Do not create route, if it is
1385 * invalid for proxy arp. DNAT routes are always valid.
1387 * Proxy arp feature have been extended to allow, ARP
1388 * replies back to the same interface, to support
1389 * Private VLAN switch technologies. See arp.c.
1391 if (out_dev == in_dev &&
1392 IN_DEV_PROXY_ARP_PVLAN(in_dev) == 0) {
1398 rth = rt_dst_alloc(out_dev->dev,
1399 IN_DEV_CONF_GET(in_dev, NOPOLICY),
1400 IN_DEV_CONF_GET(out_dev, NOXFRM));
1406 rth->rt_genid = rt_genid(dev_net(rth->dst.dev));
1407 rth->rt_flags = flags;
1408 rth->rt_type = res->type;
1409 rth->rt_route_iif = in_dev->dev->ifindex;
1410 rth->rt_iif = in_dev->dev->ifindex;
1413 rth->rt_gateway = 0;
1416 rth->dst.input = ip_forward;
1417 rth->dst.output = ip_output;
1419 rt_set_nexthop(rth, NULL, res, res->fi, res->type, itag);
1427 static int ip_mkroute_input(struct sk_buff *skb,
1428 struct fib_result *res,
1429 const struct flowi4 *fl4,
1430 struct in_device *in_dev,
1431 __be32 daddr, __be32 saddr, u32 tos)
1433 struct rtable *rth = NULL;
1436 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1437 if (res->fi && res->fi->fib_nhs > 1)
1438 fib_select_multipath(res);
1441 /* create a routing cache entry */
1442 err = __mkroute_input(skb, res, in_dev, daddr, saddr, tos, &rth);
1446 skb_dst_set(skb, &rth->dst);
1451 * NOTE. We drop all the packets that has local source
1452 * addresses, because every properly looped back packet
1453 * must have correct destination already attached by output routine.
1455 * Such approach solves two big problems:
1456 * 1. Not simplex devices are handled properly.
1457 * 2. IP spoofing attempts are filtered with 100% of guarantee.
1458 * called with rcu_read_lock()
1461 static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1462 u8 tos, struct net_device *dev)
1464 struct fib_result res;
1465 struct in_device *in_dev = __in_dev_get_rcu(dev);
1467 unsigned int flags = 0;
1471 struct net *net = dev_net(dev);
1473 /* IP on this device is disabled. */
1478 /* Check for the most weird martians, which can be not detected
1482 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr))
1483 goto martian_source;
1485 if (ipv4_is_lbcast(daddr) || (saddr == 0 && daddr == 0))
1488 /* Accept zero addresses only to limited broadcast;
1489 * I even do not know to fix it or not. Waiting for complains :-)
1491 if (ipv4_is_zeronet(saddr))
1492 goto martian_source;
1494 if (ipv4_is_zeronet(daddr))
1495 goto martian_destination;
1497 if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev))) {
1498 if (ipv4_is_loopback(daddr))
1499 goto martian_destination;
1501 if (ipv4_is_loopback(saddr))
1502 goto martian_source;
1506 * Now we are ready to route packet.
1509 fl4.flowi4_iif = dev->ifindex;
1510 fl4.flowi4_mark = skb->mark;
1511 fl4.flowi4_tos = tos;
1512 fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
1515 err = fib_lookup(net, &fl4, &res);
1519 RT_CACHE_STAT_INC(in_slow_tot);
1521 if (res.type == RTN_BROADCAST)
1524 if (res.type == RTN_LOCAL) {
1525 err = fib_validate_source(skb, saddr, daddr, tos,
1526 net->loopback_dev->ifindex,
1527 dev, in_dev, &itag);
1529 goto martian_source_keep_err;
1531 flags |= RTCF_DIRECTSRC;
1535 if (!IN_DEV_FORWARD(in_dev))
1537 if (res.type != RTN_UNICAST)
1538 goto martian_destination;
1540 err = ip_mkroute_input(skb, &res, &fl4, in_dev, daddr, saddr, tos);
1544 if (skb->protocol != htons(ETH_P_IP))
1547 if (!ipv4_is_zeronet(saddr)) {
1548 err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
1551 goto martian_source_keep_err;
1553 flags |= RTCF_DIRECTSRC;
1555 flags |= RTCF_BROADCAST;
1556 res.type = RTN_BROADCAST;
1557 RT_CACHE_STAT_INC(in_brd);
1560 rth = rt_dst_alloc(net->loopback_dev,
1561 IN_DEV_CONF_GET(in_dev, NOPOLICY), false);
1565 rth->dst.input= ip_local_deliver;
1566 rth->dst.output= ip_rt_bug;
1567 #ifdef CONFIG_IP_ROUTE_CLASSID
1568 rth->dst.tclassid = itag;
1571 rth->rt_genid = rt_genid(net);
1572 rth->rt_flags = flags|RTCF_LOCAL;
1573 rth->rt_type = res.type;
1574 rth->rt_route_iif = dev->ifindex;
1575 rth->rt_iif = dev->ifindex;
1578 rth->rt_gateway = 0;
1580 if (res.type == RTN_UNREACHABLE) {
1581 rth->dst.input= ip_error;
1582 rth->dst.error= -err;
1583 rth->rt_flags &= ~RTCF_LOCAL;
1585 skb_dst_set(skb, &rth->dst);
1590 RT_CACHE_STAT_INC(in_no_route);
1591 res.type = RTN_UNREACHABLE;
1597 * Do not cache martian addresses: they should be logged (RFC1812)
1599 martian_destination:
1600 RT_CACHE_STAT_INC(in_martian_dst);
1601 #ifdef CONFIG_IP_ROUTE_VERBOSE
1602 if (IN_DEV_LOG_MARTIANS(in_dev))
1603 net_warn_ratelimited("martian destination %pI4 from %pI4, dev %s\n",
1604 &daddr, &saddr, dev->name);
1617 martian_source_keep_err:
1618 ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
1622 int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1623 u8 tos, struct net_device *dev)
1629 /* Multicast recognition logic is moved from route cache to here.
1630 The problem was that too many Ethernet cards have broken/missing
1631 hardware multicast filters :-( As result the host on multicasting
1632 network acquires a lot of useless route cache entries, sort of
1633 SDR messages from all the world. Now we try to get rid of them.
1634 Really, provided software IP multicast filter is organized
1635 reasonably (at least, hashed), it does not result in a slowdown
1636 comparing with route cache reject entries.
1637 Note, that multicast routers are not affected, because
1638 route cache entry is created eventually.
1640 if (ipv4_is_multicast(daddr)) {
1641 struct in_device *in_dev = __in_dev_get_rcu(dev);
1644 int our = ip_check_mc_rcu(in_dev, daddr, saddr,
1645 ip_hdr(skb)->protocol);
1647 #ifdef CONFIG_IP_MROUTE
1649 (!ipv4_is_local_multicast(daddr) &&
1650 IN_DEV_MFORWARD(in_dev))
1653 int res = ip_route_input_mc(skb, daddr, saddr,
1662 res = ip_route_input_slow(skb, daddr, saddr, tos, dev);
1666 EXPORT_SYMBOL(ip_route_input);
1668 /* called with rcu_read_lock() */
1669 static struct rtable *__mkroute_output(const struct fib_result *res,
1670 const struct flowi4 *fl4, int orig_oif,
1671 struct net_device *dev_out,
1674 struct fib_info *fi = res->fi;
1675 struct in_device *in_dev;
1676 u16 type = res->type;
1679 in_dev = __in_dev_get_rcu(dev_out);
1681 return ERR_PTR(-EINVAL);
1683 if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev)))
1684 if (ipv4_is_loopback(fl4->saddr) && !(dev_out->flags & IFF_LOOPBACK))
1685 return ERR_PTR(-EINVAL);
1687 if (ipv4_is_lbcast(fl4->daddr))
1688 type = RTN_BROADCAST;
1689 else if (ipv4_is_multicast(fl4->daddr))
1690 type = RTN_MULTICAST;
1691 else if (ipv4_is_zeronet(fl4->daddr))
1692 return ERR_PTR(-EINVAL);
1694 if (dev_out->flags & IFF_LOOPBACK)
1695 flags |= RTCF_LOCAL;
1697 if (type == RTN_BROADCAST) {
1698 flags |= RTCF_BROADCAST | RTCF_LOCAL;
1700 } else if (type == RTN_MULTICAST) {
1701 flags |= RTCF_MULTICAST | RTCF_LOCAL;
1702 if (!ip_check_mc_rcu(in_dev, fl4->daddr, fl4->saddr,
1704 flags &= ~RTCF_LOCAL;
1705 /* If multicast route do not exist use
1706 * default one, but do not gateway in this case.
1709 if (fi && res->prefixlen < 4)
1713 rth = rt_dst_alloc(dev_out,
1714 IN_DEV_CONF_GET(in_dev, NOPOLICY),
1715 IN_DEV_CONF_GET(in_dev, NOXFRM));
1717 return ERR_PTR(-ENOBUFS);
1719 rth->dst.output = ip_output;
1721 rth->rt_genid = rt_genid(dev_net(dev_out));
1722 rth->rt_flags = flags;
1723 rth->rt_type = type;
1724 rth->rt_route_iif = 0;
1725 rth->rt_iif = orig_oif ? : dev_out->ifindex;
1726 rth->rt_oif = orig_oif;
1728 rth->rt_gateway = 0;
1731 RT_CACHE_STAT_INC(out_slow_tot);
1733 if (flags & RTCF_LOCAL)
1734 rth->dst.input = ip_local_deliver;
1735 if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
1736 if (flags & RTCF_LOCAL &&
1737 !(dev_out->flags & IFF_LOOPBACK)) {
1738 rth->dst.output = ip_mc_output;
1739 RT_CACHE_STAT_INC(out_slow_mc);
1741 #ifdef CONFIG_IP_MROUTE
1742 if (type == RTN_MULTICAST) {
1743 if (IN_DEV_MFORWARD(in_dev) &&
1744 !ipv4_is_local_multicast(fl4->daddr)) {
1745 rth->dst.input = ip_mr_input;
1746 rth->dst.output = ip_mc_output;
1752 rt_set_nexthop(rth, fl4, res, fi, type, 0);
1754 if (fl4->flowi4_flags & FLOWI_FLAG_RT_NOCACHE)
1755 rth->dst.flags |= DST_NOCACHE;
1761 * Major route resolver routine.
1764 struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4)
1766 struct net_device *dev_out = NULL;
1767 __u8 tos = RT_FL_TOS(fl4);
1768 unsigned int flags = 0;
1769 struct fib_result res;
1777 orig_oif = fl4->flowi4_oif;
1779 fl4->flowi4_iif = net->loopback_dev->ifindex;
1780 fl4->flowi4_tos = tos & IPTOS_RT_MASK;
1781 fl4->flowi4_scope = ((tos & RTO_ONLINK) ?
1782 RT_SCOPE_LINK : RT_SCOPE_UNIVERSE);
1786 rth = ERR_PTR(-EINVAL);
1787 if (ipv4_is_multicast(fl4->saddr) ||
1788 ipv4_is_lbcast(fl4->saddr) ||
1789 ipv4_is_zeronet(fl4->saddr))
1792 /* I removed check for oif == dev_out->oif here.
1793 It was wrong for two reasons:
1794 1. ip_dev_find(net, saddr) can return wrong iface, if saddr
1795 is assigned to multiple interfaces.
1796 2. Moreover, we are allowed to send packets with saddr
1797 of another iface. --ANK
1800 if (fl4->flowi4_oif == 0 &&
1801 (ipv4_is_multicast(fl4->daddr) ||
1802 ipv4_is_lbcast(fl4->daddr))) {
1803 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
1804 dev_out = __ip_dev_find(net, fl4->saddr, false);
1805 if (dev_out == NULL)
1808 /* Special hack: user can direct multicasts
1809 and limited broadcast via necessary interface
1810 without fiddling with IP_MULTICAST_IF or IP_PKTINFO.
1811 This hack is not just for fun, it allows
1812 vic,vat and friends to work.
1813 They bind socket to loopback, set ttl to zero
1814 and expect that it will work.
1815 From the viewpoint of routing cache they are broken,
1816 because we are not allowed to build multicast path
1817 with loopback source addr (look, routing cache
1818 cannot know, that ttl is zero, so that packet
1819 will not leave this host and route is valid).
1820 Luckily, this hack is good workaround.
1823 fl4->flowi4_oif = dev_out->ifindex;
1827 if (!(fl4->flowi4_flags & FLOWI_FLAG_ANYSRC)) {
1828 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
1829 if (!__ip_dev_find(net, fl4->saddr, false))
1835 if (fl4->flowi4_oif) {
1836 dev_out = dev_get_by_index_rcu(net, fl4->flowi4_oif);
1837 rth = ERR_PTR(-ENODEV);
1838 if (dev_out == NULL)
1841 /* RACE: Check return value of inet_select_addr instead. */
1842 if (!(dev_out->flags & IFF_UP) || !__in_dev_get_rcu(dev_out)) {
1843 rth = ERR_PTR(-ENETUNREACH);
1846 if (ipv4_is_local_multicast(fl4->daddr) ||
1847 ipv4_is_lbcast(fl4->daddr)) {
1849 fl4->saddr = inet_select_addr(dev_out, 0,
1854 if (ipv4_is_multicast(fl4->daddr))
1855 fl4->saddr = inet_select_addr(dev_out, 0,
1857 else if (!fl4->daddr)
1858 fl4->saddr = inet_select_addr(dev_out, 0,
1864 fl4->daddr = fl4->saddr;
1866 fl4->daddr = fl4->saddr = htonl(INADDR_LOOPBACK);
1867 dev_out = net->loopback_dev;
1868 fl4->flowi4_oif = net->loopback_dev->ifindex;
1869 res.type = RTN_LOCAL;
1870 flags |= RTCF_LOCAL;
1874 if (fib_lookup(net, fl4, &res)) {
1877 if (fl4->flowi4_oif) {
1878 /* Apparently, routing tables are wrong. Assume,
1879 that the destination is on link.
1882 Because we are allowed to send to iface
1883 even if it has NO routes and NO assigned
1884 addresses. When oif is specified, routing
1885 tables are looked up with only one purpose:
1886 to catch if destination is gatewayed, rather than
1887 direct. Moreover, if MSG_DONTROUTE is set,
1888 we send packet, ignoring both routing tables
1889 and ifaddr state. --ANK
1892 We could make it even if oif is unknown,
1893 likely IPv6, but we do not.
1896 if (fl4->saddr == 0)
1897 fl4->saddr = inet_select_addr(dev_out, 0,
1899 res.type = RTN_UNICAST;
1902 rth = ERR_PTR(-ENETUNREACH);
1906 if (res.type == RTN_LOCAL) {
1908 if (res.fi->fib_prefsrc)
1909 fl4->saddr = res.fi->fib_prefsrc;
1911 fl4->saddr = fl4->daddr;
1913 dev_out = net->loopback_dev;
1914 fl4->flowi4_oif = dev_out->ifindex;
1916 flags |= RTCF_LOCAL;
1920 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1921 if (res.fi->fib_nhs > 1 && fl4->flowi4_oif == 0)
1922 fib_select_multipath(&res);
1925 if (!res.prefixlen &&
1926 res.table->tb_num_default > 1 &&
1927 res.type == RTN_UNICAST && !fl4->flowi4_oif)
1928 fib_select_default(&res);
1931 fl4->saddr = FIB_RES_PREFSRC(net, res);
1933 dev_out = FIB_RES_DEV(res);
1934 fl4->flowi4_oif = dev_out->ifindex;
1938 rth = __mkroute_output(&res, fl4, orig_oif, dev_out, flags);
1944 EXPORT_SYMBOL_GPL(__ip_route_output_key);
1946 static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie)
1951 static unsigned int ipv4_blackhole_mtu(const struct dst_entry *dst)
1953 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
1955 return mtu ? : dst->dev->mtu;
1958 static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
1959 struct sk_buff *skb, u32 mtu)
1963 static void ipv4_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
1964 struct sk_buff *skb)
1968 static u32 *ipv4_rt_blackhole_cow_metrics(struct dst_entry *dst,
1974 static struct dst_ops ipv4_dst_blackhole_ops = {
1976 .protocol = cpu_to_be16(ETH_P_IP),
1977 .destroy = ipv4_dst_destroy,
1978 .check = ipv4_blackhole_dst_check,
1979 .mtu = ipv4_blackhole_mtu,
1980 .default_advmss = ipv4_default_advmss,
1981 .update_pmtu = ipv4_rt_blackhole_update_pmtu,
1982 .redirect = ipv4_rt_blackhole_redirect,
1983 .cow_metrics = ipv4_rt_blackhole_cow_metrics,
1984 .neigh_lookup = ipv4_neigh_lookup,
1987 struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig)
1989 struct rtable *ort = (struct rtable *) dst_orig;
1992 rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, 1, DST_OBSOLETE_NONE, 0);
1994 struct dst_entry *new = &rt->dst;
1997 new->input = dst_discard;
1998 new->output = dst_discard;
2000 new->dev = ort->dst.dev;
2004 rt->rt_route_iif = ort->rt_route_iif;
2005 rt->rt_iif = ort->rt_iif;
2006 rt->rt_oif = ort->rt_oif;
2007 rt->rt_pmtu = ort->rt_pmtu;
2009 rt->rt_genid = rt_genid(net);
2010 rt->rt_flags = ort->rt_flags;
2011 rt->rt_type = ort->rt_type;
2012 rt->rt_gateway = ort->rt_gateway;
2015 atomic_inc(&rt->fi->fib_clntref);
2020 dst_release(dst_orig);
2022 return rt ? &rt->dst : ERR_PTR(-ENOMEM);
2025 struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
2028 struct rtable *rt = __ip_route_output_key(net, flp4);
2033 if (flp4->flowi4_proto)
2034 rt = (struct rtable *) xfrm_lookup(net, &rt->dst,
2035 flowi4_to_flowi(flp4),
2040 EXPORT_SYMBOL_GPL(ip_route_output_flow);
2042 static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
2043 struct flowi4 *fl4, struct sk_buff *skb, u32 pid,
2044 u32 seq, int event, int nowait, unsigned int flags)
2046 struct rtable *rt = skb_rtable(skb);
2048 struct nlmsghdr *nlh;
2049 unsigned long expires = 0;
2051 u32 metrics[RTAX_MAX];
2053 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*r), flags);
2057 r = nlmsg_data(nlh);
2058 r->rtm_family = AF_INET;
2059 r->rtm_dst_len = 32;
2061 r->rtm_tos = fl4->flowi4_tos;
2062 r->rtm_table = RT_TABLE_MAIN;
2063 if (nla_put_u32(skb, RTA_TABLE, RT_TABLE_MAIN))
2064 goto nla_put_failure;
2065 r->rtm_type = rt->rt_type;
2066 r->rtm_scope = RT_SCOPE_UNIVERSE;
2067 r->rtm_protocol = RTPROT_UNSPEC;
2068 r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
2069 if (rt->rt_flags & RTCF_NOTIFY)
2070 r->rtm_flags |= RTM_F_NOTIFY;
2072 if (nla_put_be32(skb, RTA_DST, dst))
2073 goto nla_put_failure;
2075 r->rtm_src_len = 32;
2076 if (nla_put_be32(skb, RTA_SRC, src))
2077 goto nla_put_failure;
2080 nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
2081 goto nla_put_failure;
2082 #ifdef CONFIG_IP_ROUTE_CLASSID
2083 if (rt->dst.tclassid &&
2084 nla_put_u32(skb, RTA_FLOW, rt->dst.tclassid))
2085 goto nla_put_failure;
2087 if (!rt_is_input_route(rt) &&
2088 fl4->saddr != src) {
2089 if (nla_put_be32(skb, RTA_PREFSRC, fl4->saddr))
2090 goto nla_put_failure;
2092 if (rt->rt_gateway &&
2093 nla_put_be32(skb, RTA_GATEWAY, rt->rt_gateway))
2094 goto nla_put_failure;
2096 memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics));
2098 metrics[RTAX_MTU - 1] = rt->rt_pmtu;
2099 if (rtnetlink_put_metrics(skb, metrics) < 0)
2100 goto nla_put_failure;
2102 if (fl4->flowi4_mark &&
2103 nla_put_be32(skb, RTA_MARK, fl4->flowi4_mark))
2104 goto nla_put_failure;
2106 error = rt->dst.error;
2107 expires = rt->dst.expires;
2109 if (time_before(jiffies, expires))
2115 if (rt_is_input_route(rt)) {
2116 if (nla_put_u32(skb, RTA_IIF, rt->rt_iif))
2117 goto nla_put_failure;
2120 if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, error) < 0)
2121 goto nla_put_failure;
2123 return nlmsg_end(skb, nlh);
2126 nlmsg_cancel(skb, nlh);
2130 static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void *arg)
2132 struct net *net = sock_net(in_skb->sk);
2134 struct nlattr *tb[RTA_MAX+1];
2135 struct rtable *rt = NULL;
2142 struct sk_buff *skb;
2144 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy);
2148 rtm = nlmsg_data(nlh);
2150 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2156 /* Reserve room for dummy headers, this skb can pass
2157 through good chunk of routing engine.
2159 skb_reset_mac_header(skb);
2160 skb_reset_network_header(skb);
2162 /* Bugfix: need to give ip_route_input enough of an IP header to not gag. */
2163 ip_hdr(skb)->protocol = IPPROTO_ICMP;
2164 skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr));
2166 src = tb[RTA_SRC] ? nla_get_be32(tb[RTA_SRC]) : 0;
2167 dst = tb[RTA_DST] ? nla_get_be32(tb[RTA_DST]) : 0;
2168 iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
2169 mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0;
2171 memset(&fl4, 0, sizeof(fl4));
2174 fl4.flowi4_tos = rtm->rtm_tos;
2175 fl4.flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0;
2176 fl4.flowi4_mark = mark;
2179 struct net_device *dev;
2181 dev = __dev_get_by_index(net, iif);
2187 skb->protocol = htons(ETH_P_IP);
2191 err = ip_route_input(skb, dst, src, rtm->rtm_tos, dev);
2194 rt = skb_rtable(skb);
2195 if (err == 0 && rt->dst.error)
2196 err = -rt->dst.error;
2198 rt = ip_route_output_key(net, &fl4);
2208 skb_dst_set(skb, &rt->dst);
2209 if (rtm->rtm_flags & RTM_F_NOTIFY)
2210 rt->rt_flags |= RTCF_NOTIFY;
2212 err = rt_fill_info(net, dst, src, &fl4, skb,
2213 NETLINK_CB(in_skb).pid, nlh->nlmsg_seq,
2214 RTM_NEWROUTE, 0, 0);
2218 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).pid);
2227 int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb)
2232 void ip_rt_multicast_event(struct in_device *in_dev)
2234 rt_cache_flush(dev_net(in_dev->dev), 0);
2237 #ifdef CONFIG_SYSCTL
2238 static int ipv4_sysctl_rtcache_flush(ctl_table *__ctl, int write,
2239 void __user *buffer,
2240 size_t *lenp, loff_t *ppos)
2247 memcpy(&ctl, __ctl, sizeof(ctl));
2248 ctl.data = &flush_delay;
2249 proc_dointvec(&ctl, write, buffer, lenp, ppos);
2251 net = (struct net *)__ctl->extra1;
2252 rt_cache_flush(net, flush_delay);
2259 static ctl_table ipv4_route_table[] = {
2261 .procname = "gc_thresh",
2262 .data = &ipv4_dst_ops.gc_thresh,
2263 .maxlen = sizeof(int),
2265 .proc_handler = proc_dointvec,
2268 .procname = "max_size",
2269 .data = &ip_rt_max_size,
2270 .maxlen = sizeof(int),
2272 .proc_handler = proc_dointvec,
2275 /* Deprecated. Use gc_min_interval_ms */
2277 .procname = "gc_min_interval",
2278 .data = &ip_rt_gc_min_interval,
2279 .maxlen = sizeof(int),
2281 .proc_handler = proc_dointvec_jiffies,
2284 .procname = "gc_min_interval_ms",
2285 .data = &ip_rt_gc_min_interval,
2286 .maxlen = sizeof(int),
2288 .proc_handler = proc_dointvec_ms_jiffies,
2291 .procname = "gc_timeout",
2292 .data = &ip_rt_gc_timeout,
2293 .maxlen = sizeof(int),
2295 .proc_handler = proc_dointvec_jiffies,
2298 .procname = "gc_interval",
2299 .data = &ip_rt_gc_interval,
2300 .maxlen = sizeof(int),
2302 .proc_handler = proc_dointvec_jiffies,
2305 .procname = "redirect_load",
2306 .data = &ip_rt_redirect_load,
2307 .maxlen = sizeof(int),
2309 .proc_handler = proc_dointvec,
2312 .procname = "redirect_number",
2313 .data = &ip_rt_redirect_number,
2314 .maxlen = sizeof(int),
2316 .proc_handler = proc_dointvec,
2319 .procname = "redirect_silence",
2320 .data = &ip_rt_redirect_silence,
2321 .maxlen = sizeof(int),
2323 .proc_handler = proc_dointvec,
2326 .procname = "error_cost",
2327 .data = &ip_rt_error_cost,
2328 .maxlen = sizeof(int),
2330 .proc_handler = proc_dointvec,
2333 .procname = "error_burst",
2334 .data = &ip_rt_error_burst,
2335 .maxlen = sizeof(int),
2337 .proc_handler = proc_dointvec,
2340 .procname = "gc_elasticity",
2341 .data = &ip_rt_gc_elasticity,
2342 .maxlen = sizeof(int),
2344 .proc_handler = proc_dointvec,
2347 .procname = "mtu_expires",
2348 .data = &ip_rt_mtu_expires,
2349 .maxlen = sizeof(int),
2351 .proc_handler = proc_dointvec_jiffies,
2354 .procname = "min_pmtu",
2355 .data = &ip_rt_min_pmtu,
2356 .maxlen = sizeof(int),
2358 .proc_handler = proc_dointvec,
2361 .procname = "min_adv_mss",
2362 .data = &ip_rt_min_advmss,
2363 .maxlen = sizeof(int),
2365 .proc_handler = proc_dointvec,
2370 static struct ctl_table ipv4_route_flush_table[] = {
2372 .procname = "flush",
2373 .maxlen = sizeof(int),
2375 .proc_handler = ipv4_sysctl_rtcache_flush,
2380 static __net_init int sysctl_route_net_init(struct net *net)
2382 struct ctl_table *tbl;
2384 tbl = ipv4_route_flush_table;
2385 if (!net_eq(net, &init_net)) {
2386 tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
2390 tbl[0].extra1 = net;
2392 net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
2393 if (net->ipv4.route_hdr == NULL)
2398 if (tbl != ipv4_route_flush_table)
2404 static __net_exit void sysctl_route_net_exit(struct net *net)
2406 struct ctl_table *tbl;
2408 tbl = net->ipv4.route_hdr->ctl_table_arg;
2409 unregister_net_sysctl_table(net->ipv4.route_hdr);
2410 BUG_ON(tbl == ipv4_route_flush_table);
2414 static __net_initdata struct pernet_operations sysctl_route_ops = {
2415 .init = sysctl_route_net_init,
2416 .exit = sysctl_route_net_exit,
2420 static __net_init int rt_genid_init(struct net *net)
2422 get_random_bytes(&net->ipv4.rt_genid,
2423 sizeof(net->ipv4.rt_genid));
2424 get_random_bytes(&net->ipv4.dev_addr_genid,
2425 sizeof(net->ipv4.dev_addr_genid));
2429 static __net_initdata struct pernet_operations rt_genid_ops = {
2430 .init = rt_genid_init,
2433 static int __net_init ipv4_inetpeer_init(struct net *net)
2435 struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
2439 inet_peer_base_init(bp);
2440 net->ipv4.peers = bp;
2444 static void __net_exit ipv4_inetpeer_exit(struct net *net)
2446 struct inet_peer_base *bp = net->ipv4.peers;
2448 net->ipv4.peers = NULL;
2449 inetpeer_invalidate_tree(bp);
2453 static __net_initdata struct pernet_operations ipv4_inetpeer_ops = {
2454 .init = ipv4_inetpeer_init,
2455 .exit = ipv4_inetpeer_exit,
2458 #ifdef CONFIG_IP_ROUTE_CLASSID
2459 struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
2460 #endif /* CONFIG_IP_ROUTE_CLASSID */
2462 int __init ip_rt_init(void)
2466 #ifdef CONFIG_IP_ROUTE_CLASSID
2467 ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
2469 panic("IP: failed to allocate ip_rt_acct\n");
2472 ipv4_dst_ops.kmem_cachep =
2473 kmem_cache_create("ip_dst_cache", sizeof(struct rtable), 0,
2474 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
2476 ipv4_dst_blackhole_ops.kmem_cachep = ipv4_dst_ops.kmem_cachep;
2478 if (dst_entries_init(&ipv4_dst_ops) < 0)
2479 panic("IP: failed to allocate ipv4_dst_ops counter\n");
2481 if (dst_entries_init(&ipv4_dst_blackhole_ops) < 0)
2482 panic("IP: failed to allocate ipv4_dst_blackhole_ops counter\n");
2484 ipv4_dst_ops.gc_thresh = ~0;
2485 ip_rt_max_size = INT_MAX;
2490 if (ip_rt_proc_init())
2491 pr_err("Unable to create route proc files\n");
2494 xfrm4_init(ip_rt_max_size);
2496 rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL, NULL);
2498 #ifdef CONFIG_SYSCTL
2499 register_pernet_subsys(&sysctl_route_ops);
2501 register_pernet_subsys(&rt_genid_ops);
2502 register_pernet_subsys(&ipv4_inetpeer_ops);
2506 #ifdef CONFIG_SYSCTL
2508 * We really need to sanitize the damn ipv4 init order, then all
2509 * this nonsense will go away.
2511 void __init ip_static_sysctl_init(void)
2513 register_net_sysctl(&init_net, "net/ipv4/route", ipv4_route_table);