2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * ROUTE - implementation of the IP router.
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Alan Cox, <gw4pts@gw4pts.ampr.org>
11 * Linus Torvalds, <Linus.Torvalds@helsinki.fi>
12 * Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
15 * Alan Cox : Verify area fixes.
16 * Alan Cox : cli() protects routing changes
17 * Rui Oliveira : ICMP routing table updates
18 * (rco@di.uminho.pt) Routing table insertion and update
19 * Linus Torvalds : Rewrote bits to be sensible
20 * Alan Cox : Added BSD route gw semantics
21 * Alan Cox : Super /proc >4K
22 * Alan Cox : MTU in route table
23 * Alan Cox : MSS actually. Also added the window
25 * Sam Lantinga : Fixed route matching in rt_del()
26 * Alan Cox : Routing cache support.
27 * Alan Cox : Removed compatibility cruft.
28 * Alan Cox : RTF_REJECT support.
29 * Alan Cox : TCP irtt support.
30 * Jonathan Naylor : Added Metric support.
31 * Miquel van Smoorenburg : BSD API fixes.
32 * Miquel van Smoorenburg : Metrics.
33 * Alan Cox : Use __u32 properly
34 * Alan Cox : Aligned routing errors more closely with BSD
35 * our system is still very different.
36 * Alan Cox : Faster /proc handling
37 * Alexey Kuznetsov : Massive rework to support tree based routing,
38 * routing caches and better behaviour.
40 * Olaf Erb : irtt wasn't being copied right.
41 * Bjorn Ekwall : Kerneld route support.
42 * Alan Cox : Multicast fixed (I hope)
43 * Pavel Krauz : Limited broadcast fixed
44 * Mike McLagan : Routing by source
45 * Alexey Kuznetsov : End of old history. Split to fib.c and
46 * route.c and rewritten from scratch.
47 * Andi Kleen : Load-limit warning messages.
48 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
49 * Vitaly E. Lavrov : Race condition in ip_route_input_slow.
50 * Tobias Ringstrom : Uninitialized res.type in ip_route_output_slow.
51 * Vladimir V. Ivanov : IP rule info (flowid) is really useful.
52 * Marc Boucher : routing by fwmark
53 * Robert Olsson : Added rt_cache statistics
54 * Arnaldo C. Melo : Convert proc stuff to seq_file
55 * Eric Dumazet : hashed spinlocks and rt_check_expire() fixes.
56 * Ilia Sotnikov : Ignore TOS on PMTUD and Redirect
57 * Ilia Sotnikov : Removed TOS from hash calculations
59 * This program is free software; you can redistribute it and/or
60 * modify it under the terms of the GNU General Public License
61 * as published by the Free Software Foundation; either version
62 * 2 of the License, or (at your option) any later version.
65 #include <linux/module.h>
66 #include <asm/uaccess.h>
67 #include <asm/system.h>
68 #include <linux/bitops.h>
69 #include <linux/types.h>
70 #include <linux/kernel.h>
72 #include <linux/bootmem.h>
73 #include <linux/string.h>
74 #include <linux/socket.h>
75 #include <linux/sockios.h>
76 #include <linux/errno.h>
78 #include <linux/inet.h>
79 #include <linux/netdevice.h>
80 #include <linux/proc_fs.h>
81 #include <linux/init.h>
82 #include <linux/workqueue.h>
83 #include <linux/skbuff.h>
84 #include <linux/inetdevice.h>
85 #include <linux/igmp.h>
86 #include <linux/pkt_sched.h>
87 #include <linux/mroute.h>
88 #include <linux/netfilter_ipv4.h>
89 #include <linux/random.h>
90 #include <linux/jhash.h>
91 #include <linux/rcupdate.h>
92 #include <linux/times.h>
93 #include <linux/slab.h>
95 #include <net/net_namespace.h>
96 #include <net/protocol.h>
98 #include <net/route.h>
99 #include <net/inetpeer.h>
100 #include <net/sock.h>
101 #include <net/ip_fib.h>
104 #include <net/icmp.h>
105 #include <net/xfrm.h>
106 #include <net/netevent.h>
107 #include <net/rtnetlink.h>
109 #include <linux/sysctl.h>
112 #define RT_FL_TOS(oldflp) \
113 ((u32)(oldflp->fl4_tos & (IPTOS_RT_MASK | RTO_ONLINK)))
115 #define IP_MAX_MTU 0xFFF0
117 #define RT_GC_TIMEOUT (300*HZ)
119 static int ip_rt_max_size;
120 static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
121 static int ip_rt_gc_interval __read_mostly = 60 * HZ;
122 static int ip_rt_gc_min_interval __read_mostly = HZ / 2;
123 static int ip_rt_redirect_number __read_mostly = 9;
124 static int ip_rt_redirect_load __read_mostly = HZ / 50;
125 static int ip_rt_redirect_silence __read_mostly = ((HZ / 50) << (9 + 1));
126 static int ip_rt_error_cost __read_mostly = HZ;
127 static int ip_rt_error_burst __read_mostly = 5 * HZ;
128 static int ip_rt_gc_elasticity __read_mostly = 8;
129 static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
130 static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
131 static int ip_rt_min_advmss __read_mostly = 256;
132 static int rt_chain_length_max __read_mostly = 20;
134 static struct delayed_work expires_work;
135 static unsigned long expires_ljiffies;
138 * Interface to generic destination cache.
141 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
142 static void ipv4_dst_destroy(struct dst_entry *dst);
143 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
144 static void ipv4_link_failure(struct sk_buff *skb);
145 static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu);
146 static int rt_garbage_collect(struct dst_ops *ops);
148 static void ipv4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
153 static struct dst_ops ipv4_dst_ops = {
155 .protocol = cpu_to_be16(ETH_P_IP),
156 .gc = rt_garbage_collect,
157 .check = ipv4_dst_check,
158 .destroy = ipv4_dst_destroy,
159 .ifdown = ipv4_dst_ifdown,
160 .negative_advice = ipv4_negative_advice,
161 .link_failure = ipv4_link_failure,
162 .update_pmtu = ip_rt_update_pmtu,
163 .local_out = __ip_local_out,
166 #define ECN_OR_COST(class) TC_PRIO_##class
168 const __u8 ip_tos2prio[16] = {
172 ECN_OR_COST(BESTEFFORT),
178 ECN_OR_COST(INTERACTIVE),
180 ECN_OR_COST(INTERACTIVE),
181 TC_PRIO_INTERACTIVE_BULK,
182 ECN_OR_COST(INTERACTIVE_BULK),
183 TC_PRIO_INTERACTIVE_BULK,
184 ECN_OR_COST(INTERACTIVE_BULK)
192 /* The locking scheme is rather straight forward:
194 * 1) Read-Copy Update protects the buckets of the central route hash.
195 * 2) Only writers remove entries, and they hold the lock
196 * as they look at rtable reference counts.
197 * 3) Only readers acquire references to rtable entries,
198 * they do so with atomic increments and with the
202 struct rt_hash_bucket {
203 struct rtable __rcu *chain;
206 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
207 defined(CONFIG_PROVE_LOCKING)
209 * Instead of using one spinlock for each rt_hash_bucket, we use a table of spinlocks
210 * The size of this table is a power of two and depends on the number of CPUS.
211 * (on lockdep we have a quite big spinlock_t, so keep the size down there)
213 #ifdef CONFIG_LOCKDEP
214 # define RT_HASH_LOCK_SZ 256
217 # define RT_HASH_LOCK_SZ 4096
219 # define RT_HASH_LOCK_SZ 2048
221 # define RT_HASH_LOCK_SZ 1024
223 # define RT_HASH_LOCK_SZ 512
225 # define RT_HASH_LOCK_SZ 256
229 static spinlock_t *rt_hash_locks;
230 # define rt_hash_lock_addr(slot) &rt_hash_locks[(slot) & (RT_HASH_LOCK_SZ - 1)]
232 static __init void rt_hash_lock_init(void)
236 rt_hash_locks = kmalloc(sizeof(spinlock_t) * RT_HASH_LOCK_SZ,
239 panic("IP: failed to allocate rt_hash_locks\n");
241 for (i = 0; i < RT_HASH_LOCK_SZ; i++)
242 spin_lock_init(&rt_hash_locks[i]);
245 # define rt_hash_lock_addr(slot) NULL
247 static inline void rt_hash_lock_init(void)
252 static struct rt_hash_bucket *rt_hash_table __read_mostly;
253 static unsigned rt_hash_mask __read_mostly;
254 static unsigned int rt_hash_log __read_mostly;
256 static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
257 #define RT_CACHE_STAT_INC(field) __this_cpu_inc(rt_cache_stat.field)
259 static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
262 return jhash_3words((__force u32)daddr, (__force u32)saddr,
267 static inline int rt_genid(struct net *net)
269 return atomic_read(&net->ipv4.rt_genid);
272 #ifdef CONFIG_PROC_FS
273 struct rt_cache_iter_state {
274 struct seq_net_private p;
279 static struct rtable *rt_cache_get_first(struct seq_file *seq)
281 struct rt_cache_iter_state *st = seq->private;
282 struct rtable *r = NULL;
284 for (st->bucket = rt_hash_mask; st->bucket >= 0; --st->bucket) {
285 if (!rcu_dereference_raw(rt_hash_table[st->bucket].chain))
288 r = rcu_dereference_bh(rt_hash_table[st->bucket].chain);
290 if (dev_net(r->dst.dev) == seq_file_net(seq) &&
291 r->rt_genid == st->genid)
293 r = rcu_dereference_bh(r->dst.rt_next);
295 rcu_read_unlock_bh();
300 static struct rtable *__rt_cache_get_next(struct seq_file *seq,
303 struct rt_cache_iter_state *st = seq->private;
305 r = rcu_dereference_bh(r->dst.rt_next);
307 rcu_read_unlock_bh();
309 if (--st->bucket < 0)
311 } while (!rcu_dereference_raw(rt_hash_table[st->bucket].chain));
313 r = rcu_dereference_bh(rt_hash_table[st->bucket].chain);
318 static struct rtable *rt_cache_get_next(struct seq_file *seq,
321 struct rt_cache_iter_state *st = seq->private;
322 while ((r = __rt_cache_get_next(seq, r)) != NULL) {
323 if (dev_net(r->dst.dev) != seq_file_net(seq))
325 if (r->rt_genid == st->genid)
331 static struct rtable *rt_cache_get_idx(struct seq_file *seq, loff_t pos)
333 struct rtable *r = rt_cache_get_first(seq);
336 while (pos && (r = rt_cache_get_next(seq, r)))
338 return pos ? NULL : r;
341 static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
343 struct rt_cache_iter_state *st = seq->private;
345 return rt_cache_get_idx(seq, *pos - 1);
346 st->genid = rt_genid(seq_file_net(seq));
347 return SEQ_START_TOKEN;
350 static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
354 if (v == SEQ_START_TOKEN)
355 r = rt_cache_get_first(seq);
357 r = rt_cache_get_next(seq, v);
362 static void rt_cache_seq_stop(struct seq_file *seq, void *v)
364 if (v && v != SEQ_START_TOKEN)
365 rcu_read_unlock_bh();
368 static int rt_cache_seq_show(struct seq_file *seq, void *v)
370 if (v == SEQ_START_TOKEN)
371 seq_printf(seq, "%-127s\n",
372 "Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t"
373 "Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t"
376 struct rtable *r = v;
379 seq_printf(seq, "%s\t%08X\t%08X\t%8X\t%d\t%u\t%d\t"
380 "%08X\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n",
381 r->dst.dev ? r->dst.dev->name : "*",
382 (__force u32)r->rt_dst,
383 (__force u32)r->rt_gateway,
384 r->rt_flags, atomic_read(&r->dst.__refcnt),
385 r->dst.__use, 0, (__force u32)r->rt_src,
386 (dst_metric(&r->dst, RTAX_ADVMSS) ?
387 (int)dst_metric(&r->dst, RTAX_ADVMSS) + 40 : 0),
388 dst_metric(&r->dst, RTAX_WINDOW),
389 (int)((dst_metric(&r->dst, RTAX_RTT) >> 3) +
390 dst_metric(&r->dst, RTAX_RTTVAR)),
392 r->dst.hh ? atomic_read(&r->dst.hh->hh_refcnt) : -1,
393 r->dst.hh ? (r->dst.hh->hh_output ==
395 r->rt_spec_dst, &len);
397 seq_printf(seq, "%*s\n", 127 - len, "");
402 static const struct seq_operations rt_cache_seq_ops = {
403 .start = rt_cache_seq_start,
404 .next = rt_cache_seq_next,
405 .stop = rt_cache_seq_stop,
406 .show = rt_cache_seq_show,
409 static int rt_cache_seq_open(struct inode *inode, struct file *file)
411 return seq_open_net(inode, file, &rt_cache_seq_ops,
412 sizeof(struct rt_cache_iter_state));
415 static const struct file_operations rt_cache_seq_fops = {
416 .owner = THIS_MODULE,
417 .open = rt_cache_seq_open,
420 .release = seq_release_net,
424 static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos)
429 return SEQ_START_TOKEN;
431 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
432 if (!cpu_possible(cpu))
435 return &per_cpu(rt_cache_stat, cpu);
440 static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
444 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
445 if (!cpu_possible(cpu))
448 return &per_cpu(rt_cache_stat, cpu);
454 static void rt_cpu_seq_stop(struct seq_file *seq, void *v)
459 static int rt_cpu_seq_show(struct seq_file *seq, void *v)
461 struct rt_cache_stat *st = v;
463 if (v == SEQ_START_TOKEN) {
464 seq_printf(seq, "entries in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src out_hit out_slow_tot out_slow_mc gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
468 seq_printf(seq,"%08x %08x %08x %08x %08x %08x %08x %08x "
469 " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n",
470 dst_entries_get_slow(&ipv4_dst_ops),
493 static const struct seq_operations rt_cpu_seq_ops = {
494 .start = rt_cpu_seq_start,
495 .next = rt_cpu_seq_next,
496 .stop = rt_cpu_seq_stop,
497 .show = rt_cpu_seq_show,
501 static int rt_cpu_seq_open(struct inode *inode, struct file *file)
503 return seq_open(file, &rt_cpu_seq_ops);
506 static const struct file_operations rt_cpu_seq_fops = {
507 .owner = THIS_MODULE,
508 .open = rt_cpu_seq_open,
511 .release = seq_release,
514 #ifdef CONFIG_NET_CLS_ROUTE
515 static int rt_acct_proc_show(struct seq_file *m, void *v)
517 struct ip_rt_acct *dst, *src;
520 dst = kcalloc(256, sizeof(struct ip_rt_acct), GFP_KERNEL);
524 for_each_possible_cpu(i) {
525 src = (struct ip_rt_acct *)per_cpu_ptr(ip_rt_acct, i);
526 for (j = 0; j < 256; j++) {
527 dst[j].o_bytes += src[j].o_bytes;
528 dst[j].o_packets += src[j].o_packets;
529 dst[j].i_bytes += src[j].i_bytes;
530 dst[j].i_packets += src[j].i_packets;
534 seq_write(m, dst, 256 * sizeof(struct ip_rt_acct));
539 static int rt_acct_proc_open(struct inode *inode, struct file *file)
541 return single_open(file, rt_acct_proc_show, NULL);
544 static const struct file_operations rt_acct_proc_fops = {
545 .owner = THIS_MODULE,
546 .open = rt_acct_proc_open,
549 .release = single_release,
553 static int __net_init ip_rt_do_proc_init(struct net *net)
555 struct proc_dir_entry *pde;
557 pde = proc_net_fops_create(net, "rt_cache", S_IRUGO,
562 pde = proc_create("rt_cache", S_IRUGO,
563 net->proc_net_stat, &rt_cpu_seq_fops);
567 #ifdef CONFIG_NET_CLS_ROUTE
568 pde = proc_create("rt_acct", 0, net->proc_net, &rt_acct_proc_fops);
574 #ifdef CONFIG_NET_CLS_ROUTE
576 remove_proc_entry("rt_cache", net->proc_net_stat);
579 remove_proc_entry("rt_cache", net->proc_net);
584 static void __net_exit ip_rt_do_proc_exit(struct net *net)
586 remove_proc_entry("rt_cache", net->proc_net_stat);
587 remove_proc_entry("rt_cache", net->proc_net);
588 #ifdef CONFIG_NET_CLS_ROUTE
589 remove_proc_entry("rt_acct", net->proc_net);
593 static struct pernet_operations ip_rt_proc_ops __net_initdata = {
594 .init = ip_rt_do_proc_init,
595 .exit = ip_rt_do_proc_exit,
598 static int __init ip_rt_proc_init(void)
600 return register_pernet_subsys(&ip_rt_proc_ops);
604 static inline int ip_rt_proc_init(void)
608 #endif /* CONFIG_PROC_FS */
610 static inline void rt_free(struct rtable *rt)
612 call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
615 static inline void rt_drop(struct rtable *rt)
618 call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
621 static inline int rt_fast_clean(struct rtable *rth)
623 /* Kill broadcast/multicast entries very aggresively, if they
624 collide in hash table with more useful entries */
625 return (rth->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) &&
626 rt_is_input_route(rth) && rth->dst.rt_next;
629 static inline int rt_valuable(struct rtable *rth)
631 return (rth->rt_flags & (RTCF_REDIRECTED | RTCF_NOTIFY)) ||
635 static int rt_may_expire(struct rtable *rth, unsigned long tmo1, unsigned long tmo2)
640 if (atomic_read(&rth->dst.__refcnt))
644 if (rth->dst.expires &&
645 time_after_eq(jiffies, rth->dst.expires))
648 age = jiffies - rth->dst.lastuse;
650 if ((age <= tmo1 && !rt_fast_clean(rth)) ||
651 (age <= tmo2 && rt_valuable(rth)))
657 /* Bits of score are:
659 * 30: not quite useless
660 * 29..0: usage counter
662 static inline u32 rt_score(struct rtable *rt)
664 u32 score = jiffies - rt->dst.lastuse;
666 score = ~score & ~(3<<30);
671 if (rt_is_output_route(rt) ||
672 !(rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST|RTCF_LOCAL)))
678 static inline bool rt_caching(const struct net *net)
680 return net->ipv4.current_rt_cache_rebuild_count <=
681 net->ipv4.sysctl_rt_cache_rebuild_count;
684 static inline bool compare_hash_inputs(const struct flowi *fl1,
685 const struct flowi *fl2)
687 return ((((__force u32)fl1->fl4_dst ^ (__force u32)fl2->fl4_dst) |
688 ((__force u32)fl1->fl4_src ^ (__force u32)fl2->fl4_src) |
689 (fl1->iif ^ fl2->iif)) == 0);
692 static inline int compare_keys(struct flowi *fl1, struct flowi *fl2)
694 return (((__force u32)fl1->fl4_dst ^ (__force u32)fl2->fl4_dst) |
695 ((__force u32)fl1->fl4_src ^ (__force u32)fl2->fl4_src) |
696 (fl1->mark ^ fl2->mark) |
697 (*(u16 *)&fl1->fl4_tos ^ *(u16 *)&fl2->fl4_tos) |
698 (fl1->oif ^ fl2->oif) |
699 (fl1->iif ^ fl2->iif)) == 0;
702 static inline int compare_netns(struct rtable *rt1, struct rtable *rt2)
704 return net_eq(dev_net(rt1->dst.dev), dev_net(rt2->dst.dev));
707 static inline int rt_is_expired(struct rtable *rth)
709 return rth->rt_genid != rt_genid(dev_net(rth->dst.dev));
713 * Perform a full scan of hash table and free all entries.
714 * Can be called by a softirq or a process.
715 * In the later case, we want to be reschedule if necessary
717 static void rt_do_flush(int process_context)
720 struct rtable *rth, *next;
721 struct rtable * tail;
723 for (i = 0; i <= rt_hash_mask; i++) {
724 if (process_context && need_resched())
726 rth = rcu_dereference_raw(rt_hash_table[i].chain);
730 spin_lock_bh(rt_hash_lock_addr(i));
733 struct rtable __rcu **prev;
736 rth = rcu_dereference_protected(rt_hash_table[i].chain,
737 lockdep_is_held(rt_hash_lock_addr(i)));
739 /* defer releasing the head of the list after spin_unlock */
740 for (tail = rth; tail;
741 tail = rcu_dereference_protected(tail->dst.rt_next,
742 lockdep_is_held(rt_hash_lock_addr(i))))
743 if (!rt_is_expired(tail))
746 rt_hash_table[i].chain = tail;
748 /* call rt_free on entries after the tail requiring flush */
749 prev = &rt_hash_table[i].chain;
750 for (p = rcu_dereference_protected(*prev,
751 lockdep_is_held(rt_hash_lock_addr(i)));
754 next = rcu_dereference_protected(p->dst.rt_next,
755 lockdep_is_held(rt_hash_lock_addr(i)));
756 if (!rt_is_expired(p)) {
757 prev = &p->dst.rt_next;
765 rth = rcu_dereference_protected(rt_hash_table[i].chain,
766 lockdep_is_held(rt_hash_lock_addr(i)));
767 rcu_assign_pointer(rt_hash_table[i].chain, NULL);
770 spin_unlock_bh(rt_hash_lock_addr(i));
772 for (; rth != tail; rth = next) {
773 next = rcu_dereference_protected(rth->dst.rt_next, 1);
780 * While freeing expired entries, we compute average chain length
781 * and standard deviation, using fixed-point arithmetic.
782 * This to have an estimation of rt_chain_length_max
783 * rt_chain_length_max = max(elasticity, AVG + 4*SD)
784 * We use 3 bits for frational part, and 29 (or 61) for magnitude.
788 #define ONE (1UL << FRACT_BITS)
791 * Given a hash chain and an item in this hash chain,
792 * find if a previous entry has the same hash_inputs
793 * (but differs on tos, mark or oif)
794 * Returns 0 if an alias is found.
795 * Returns ONE if rth has no alias before itself.
797 static int has_noalias(const struct rtable *head, const struct rtable *rth)
799 const struct rtable *aux = head;
802 if (compare_hash_inputs(&aux->fl, &rth->fl))
804 aux = rcu_dereference_protected(aux->dst.rt_next, 1);
809 static void rt_check_expire(void)
811 static unsigned int rover;
812 unsigned int i = rover, goal;
814 struct rtable __rcu **rthp;
815 unsigned long samples = 0;
816 unsigned long sum = 0, sum2 = 0;
820 delta = jiffies - expires_ljiffies;
821 expires_ljiffies = jiffies;
822 mult = ((u64)delta) << rt_hash_log;
823 if (ip_rt_gc_timeout > 1)
824 do_div(mult, ip_rt_gc_timeout);
825 goal = (unsigned int)mult;
826 if (goal > rt_hash_mask)
827 goal = rt_hash_mask + 1;
828 for (; goal > 0; goal--) {
829 unsigned long tmo = ip_rt_gc_timeout;
830 unsigned long length;
832 i = (i + 1) & rt_hash_mask;
833 rthp = &rt_hash_table[i].chain;
840 if (rcu_dereference_raw(*rthp) == NULL)
843 spin_lock_bh(rt_hash_lock_addr(i));
844 while ((rth = rcu_dereference_protected(*rthp,
845 lockdep_is_held(rt_hash_lock_addr(i)))) != NULL) {
846 prefetch(rth->dst.rt_next);
847 if (rt_is_expired(rth)) {
848 *rthp = rth->dst.rt_next;
852 if (rth->dst.expires) {
853 /* Entry is expired even if it is in use */
854 if (time_before_eq(jiffies, rth->dst.expires)) {
857 rthp = &rth->dst.rt_next;
859 * We only count entries on
860 * a chain with equal hash inputs once
861 * so that entries for different QOS
862 * levels, and other non-hash input
863 * attributes don't unfairly skew
864 * the length computation
866 length += has_noalias(rt_hash_table[i].chain, rth);
869 } else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout))
872 /* Cleanup aged off entries. */
873 *rthp = rth->dst.rt_next;
876 spin_unlock_bh(rt_hash_lock_addr(i));
878 sum2 += length*length;
881 unsigned long avg = sum / samples;
882 unsigned long sd = int_sqrt(sum2 / samples - avg*avg);
883 rt_chain_length_max = max_t(unsigned long,
885 (avg + 4*sd) >> FRACT_BITS);
891 * rt_worker_func() is run in process context.
892 * we call rt_check_expire() to scan part of the hash table
894 static void rt_worker_func(struct work_struct *work)
897 schedule_delayed_work(&expires_work, ip_rt_gc_interval);
901 * Pertubation of rt_genid by a small quantity [1..256]
902 * Using 8 bits of shuffling ensure we can call rt_cache_invalidate()
903 * many times (2^24) without giving recent rt_genid.
904 * Jenkins hash is strong enough that litle changes of rt_genid are OK.
906 static void rt_cache_invalidate(struct net *net)
908 unsigned char shuffle;
910 get_random_bytes(&shuffle, sizeof(shuffle));
911 atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
915 * delay < 0 : invalidate cache (fast : entries will be deleted later)
916 * delay >= 0 : invalidate & flush cache (can be long)
918 void rt_cache_flush(struct net *net, int delay)
920 rt_cache_invalidate(net);
922 rt_do_flush(!in_softirq());
925 /* Flush previous cache invalidated entries from the cache */
926 void rt_cache_flush_batch(void)
928 rt_do_flush(!in_softirq());
931 static void rt_emergency_hash_rebuild(struct net *net)
934 printk(KERN_WARNING "Route hash chain too long!\n");
935 rt_cache_invalidate(net);
939 Short description of GC goals.
941 We want to build algorithm, which will keep routing cache
942 at some equilibrium point, when number of aged off entries
943 is kept approximately equal to newly generated ones.
945 Current expiration strength is variable "expire".
946 We try to adjust it dynamically, so that if networking
947 is idle expires is large enough to keep enough of warm entries,
948 and when load increases it reduces to limit cache size.
951 static int rt_garbage_collect(struct dst_ops *ops)
953 static unsigned long expire = RT_GC_TIMEOUT;
954 static unsigned long last_gc;
956 static int equilibrium;
958 struct rtable __rcu **rthp;
959 unsigned long now = jiffies;
961 int entries = dst_entries_get_fast(&ipv4_dst_ops);
964 * Garbage collection is pretty expensive,
965 * do not make it too frequently.
968 RT_CACHE_STAT_INC(gc_total);
970 if (now - last_gc < ip_rt_gc_min_interval &&
971 entries < ip_rt_max_size) {
972 RT_CACHE_STAT_INC(gc_ignored);
976 entries = dst_entries_get_slow(&ipv4_dst_ops);
977 /* Calculate number of entries, which we want to expire now. */
978 goal = entries - (ip_rt_gc_elasticity << rt_hash_log);
980 if (equilibrium < ipv4_dst_ops.gc_thresh)
981 equilibrium = ipv4_dst_ops.gc_thresh;
982 goal = entries - equilibrium;
984 equilibrium += min_t(unsigned int, goal >> 1, rt_hash_mask + 1);
985 goal = entries - equilibrium;
988 /* We are in dangerous area. Try to reduce cache really
991 goal = max_t(unsigned int, goal >> 1, rt_hash_mask + 1);
992 equilibrium = entries - goal;
995 if (now - last_gc >= ip_rt_gc_min_interval)
1006 for (i = rt_hash_mask, k = rover; i >= 0; i--) {
1007 unsigned long tmo = expire;
1009 k = (k + 1) & rt_hash_mask;
1010 rthp = &rt_hash_table[k].chain;
1011 spin_lock_bh(rt_hash_lock_addr(k));
1012 while ((rth = rcu_dereference_protected(*rthp,
1013 lockdep_is_held(rt_hash_lock_addr(k)))) != NULL) {
1014 if (!rt_is_expired(rth) &&
1015 !rt_may_expire(rth, tmo, expire)) {
1017 rthp = &rth->dst.rt_next;
1020 *rthp = rth->dst.rt_next;
1024 spin_unlock_bh(rt_hash_lock_addr(k));
1033 /* Goal is not achieved. We stop process if:
1035 - if expire reduced to zero. Otherwise, expire is halfed.
1036 - if table is not full.
1037 - if we are called from interrupt.
1038 - jiffies check is just fallback/debug loop breaker.
1039 We will not spin here for long time in any case.
1042 RT_CACHE_STAT_INC(gc_goal_miss);
1048 #if RT_CACHE_DEBUG >= 2
1049 printk(KERN_DEBUG "expire>> %u %d %d %d\n", expire,
1050 dst_entries_get_fast(&ipv4_dst_ops), goal, i);
1053 if (dst_entries_get_fast(&ipv4_dst_ops) < ip_rt_max_size)
1055 } while (!in_softirq() && time_before_eq(jiffies, now));
1057 if (dst_entries_get_fast(&ipv4_dst_ops) < ip_rt_max_size)
1059 if (dst_entries_get_slow(&ipv4_dst_ops) < ip_rt_max_size)
1061 if (net_ratelimit())
1062 printk(KERN_WARNING "dst cache overflow\n");
1063 RT_CACHE_STAT_INC(gc_dst_overflow);
1067 expire += ip_rt_gc_min_interval;
1068 if (expire > ip_rt_gc_timeout ||
1069 dst_entries_get_fast(&ipv4_dst_ops) < ipv4_dst_ops.gc_thresh ||
1070 dst_entries_get_slow(&ipv4_dst_ops) < ipv4_dst_ops.gc_thresh)
1071 expire = ip_rt_gc_timeout;
1072 #if RT_CACHE_DEBUG >= 2
1073 printk(KERN_DEBUG "expire++ %u %d %d %d\n", expire,
1074 dst_entries_get_fast(&ipv4_dst_ops), goal, rover);
1080 * Returns number of entries in a hash chain that have different hash_inputs
1082 static int slow_chain_length(const struct rtable *head)
1085 const struct rtable *rth = head;
1088 length += has_noalias(head, rth);
1089 rth = rcu_dereference_protected(rth->dst.rt_next, 1);
1091 return length >> FRACT_BITS;
1094 static int rt_intern_hash(unsigned hash, struct rtable *rt,
1095 struct rtable **rp, struct sk_buff *skb, int ifindex)
1097 struct rtable *rth, *cand;
1098 struct rtable __rcu **rthp, **candp;
1102 int attempts = !in_softirq();
1106 min_score = ~(u32)0;
1111 if (!rt_caching(dev_net(rt->dst.dev))) {
1113 * If we're not caching, just tell the caller we
1114 * were successful and don't touch the route. The
1115 * caller hold the sole reference to the cache entry, and
1116 * it will be released when the caller is done with it.
1117 * If we drop it here, the callers have no way to resolve routes
1118 * when we're not caching. Instead, just point *rp at rt, so
1119 * the caller gets a single use out of the route
1120 * Note that we do rt_free on this new route entry, so that
1121 * once its refcount hits zero, we are still able to reap it
1123 * Note: To avoid expensive rcu stuff for this uncached dst,
1124 * we set DST_NOCACHE so that dst_release() can free dst without
1125 * waiting a grace period.
1128 rt->dst.flags |= DST_NOCACHE;
1129 if (rt->rt_type == RTN_UNICAST || rt_is_output_route(rt)) {
1130 int err = arp_bind_neighbour(&rt->dst);
1132 if (net_ratelimit())
1134 "Neighbour table failure & not caching routes.\n");
1143 rthp = &rt_hash_table[hash].chain;
1145 spin_lock_bh(rt_hash_lock_addr(hash));
1146 while ((rth = rcu_dereference_protected(*rthp,
1147 lockdep_is_held(rt_hash_lock_addr(hash)))) != NULL) {
1148 if (rt_is_expired(rth)) {
1149 *rthp = rth->dst.rt_next;
1153 if (compare_keys(&rth->fl, &rt->fl) && compare_netns(rth, rt)) {
1155 *rthp = rth->dst.rt_next;
1157 * Since lookup is lockfree, the deletion
1158 * must be visible to another weakly ordered CPU before
1159 * the insertion at the start of the hash chain.
1161 rcu_assign_pointer(rth->dst.rt_next,
1162 rt_hash_table[hash].chain);
1164 * Since lookup is lockfree, the update writes
1165 * must be ordered for consistency on SMP.
1167 rcu_assign_pointer(rt_hash_table[hash].chain, rth);
1169 dst_use(&rth->dst, now);
1170 spin_unlock_bh(rt_hash_lock_addr(hash));
1176 skb_dst_set(skb, &rth->dst);
1180 if (!atomic_read(&rth->dst.__refcnt)) {
1181 u32 score = rt_score(rth);
1183 if (score <= min_score) {
1192 rthp = &rth->dst.rt_next;
1196 /* ip_rt_gc_elasticity used to be average length of chain
1197 * length, when exceeded gc becomes really aggressive.
1199 * The second limit is less certain. At the moment it allows
1200 * only 2 entries per bucket. We will see.
1202 if (chain_length > ip_rt_gc_elasticity) {
1203 *candp = cand->dst.rt_next;
1207 if (chain_length > rt_chain_length_max &&
1208 slow_chain_length(rt_hash_table[hash].chain) > rt_chain_length_max) {
1209 struct net *net = dev_net(rt->dst.dev);
1210 int num = ++net->ipv4.current_rt_cache_rebuild_count;
1211 if (!rt_caching(net)) {
1212 printk(KERN_WARNING "%s: %d rebuilds is over limit, route caching disabled\n",
1213 rt->dst.dev->name, num);
1215 rt_emergency_hash_rebuild(net);
1216 spin_unlock_bh(rt_hash_lock_addr(hash));
1218 hash = rt_hash(rt->fl.fl4_dst, rt->fl.fl4_src,
1219 ifindex, rt_genid(net));
1224 /* Try to bind route to arp only if it is output
1225 route or unicast forwarding path.
1227 if (rt->rt_type == RTN_UNICAST || rt_is_output_route(rt)) {
1228 int err = arp_bind_neighbour(&rt->dst);
1230 spin_unlock_bh(rt_hash_lock_addr(hash));
1232 if (err != -ENOBUFS) {
1237 /* Neighbour tables are full and nothing
1238 can be released. Try to shrink route cache,
1239 it is most likely it holds some neighbour records.
1241 if (attempts-- > 0) {
1242 int saved_elasticity = ip_rt_gc_elasticity;
1243 int saved_int = ip_rt_gc_min_interval;
1244 ip_rt_gc_elasticity = 1;
1245 ip_rt_gc_min_interval = 0;
1246 rt_garbage_collect(&ipv4_dst_ops);
1247 ip_rt_gc_min_interval = saved_int;
1248 ip_rt_gc_elasticity = saved_elasticity;
1252 if (net_ratelimit())
1253 printk(KERN_WARNING "ipv4: Neighbour table overflow.\n");
1259 rt->dst.rt_next = rt_hash_table[hash].chain;
1261 #if RT_CACHE_DEBUG >= 2
1262 if (rt->dst.rt_next) {
1264 printk(KERN_DEBUG "rt_cache @%02x: %pI4",
1266 for (trt = rt->dst.rt_next; trt; trt = trt->dst.rt_next)
1267 printk(" . %pI4", &trt->rt_dst);
1272 * Since lookup is lockfree, we must make sure
1273 * previous writes to rt are comitted to memory
1274 * before making rt visible to other CPUS.
1276 rcu_assign_pointer(rt_hash_table[hash].chain, rt);
1278 spin_unlock_bh(rt_hash_lock_addr(hash));
1284 skb_dst_set(skb, &rt->dst);
1288 void rt_bind_peer(struct rtable *rt, int create)
1290 struct inet_peer *peer;
1292 peer = inet_getpeer_v4(rt->rt_dst, create);
1294 if (peer && cmpxchg(&rt->peer, NULL, peer) != NULL)
1299 * Peer allocation may fail only in serious out-of-memory conditions. However
1300 * we still can generate some output.
1301 * Random ID selection looks a bit dangerous because we have no chances to
1302 * select ID being unique in a reasonable period of time.
1303 * But broken packet identifier may be better than no packet at all.
1305 static void ip_select_fb_ident(struct iphdr *iph)
1307 static DEFINE_SPINLOCK(ip_fb_id_lock);
1308 static u32 ip_fallback_id;
1311 spin_lock_bh(&ip_fb_id_lock);
1312 salt = secure_ip_id((__force __be32)ip_fallback_id ^ iph->daddr);
1313 iph->id = htons(salt & 0xFFFF);
1314 ip_fallback_id = salt;
1315 spin_unlock_bh(&ip_fb_id_lock);
1318 void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
1320 struct rtable *rt = (struct rtable *) dst;
1323 if (rt->peer == NULL)
1324 rt_bind_peer(rt, 1);
1326 /* If peer is attached to destination, it is never detached,
1327 so that we need not to grab a lock to dereference it.
1330 iph->id = htons(inet_getid(rt->peer, more));
1334 printk(KERN_DEBUG "rt_bind_peer(0) @%p\n",
1335 __builtin_return_address(0));
1337 ip_select_fb_ident(iph);
1339 EXPORT_SYMBOL(__ip_select_ident);
1341 static void rt_del(unsigned hash, struct rtable *rt)
1343 struct rtable __rcu **rthp;
1346 rthp = &rt_hash_table[hash].chain;
1347 spin_lock_bh(rt_hash_lock_addr(hash));
1349 while ((aux = rcu_dereference_protected(*rthp,
1350 lockdep_is_held(rt_hash_lock_addr(hash)))) != NULL) {
1351 if (aux == rt || rt_is_expired(aux)) {
1352 *rthp = aux->dst.rt_next;
1356 rthp = &aux->dst.rt_next;
1358 spin_unlock_bh(rt_hash_lock_addr(hash));
1361 /* called in rcu_read_lock() section */
1362 void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1363 __be32 saddr, struct net_device *dev)
1366 struct in_device *in_dev = __in_dev_get_rcu(dev);
1368 struct rtable __rcu **rthp;
1369 __be32 skeys[2] = { saddr, 0 };
1370 int ikeys[2] = { dev->ifindex, 0 };
1371 struct netevent_redirect netevent;
1378 if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev) ||
1379 ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw) ||
1380 ipv4_is_zeronet(new_gw))
1381 goto reject_redirect;
1383 if (!rt_caching(net))
1384 goto reject_redirect;
1386 if (!IN_DEV_SHARED_MEDIA(in_dev)) {
1387 if (!inet_addr_onlink(in_dev, new_gw, old_gw))
1388 goto reject_redirect;
1389 if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev))
1390 goto reject_redirect;
1392 if (inet_addr_type(net, new_gw) != RTN_UNICAST)
1393 goto reject_redirect;
1396 for (i = 0; i < 2; i++) {
1397 for (k = 0; k < 2; k++) {
1398 unsigned hash = rt_hash(daddr, skeys[i], ikeys[k],
1401 rthp = &rt_hash_table[hash].chain;
1403 while ((rth = rcu_dereference(*rthp)) != NULL) {
1406 if (rth->fl.fl4_dst != daddr ||
1407 rth->fl.fl4_src != skeys[i] ||
1408 rth->fl.oif != ikeys[k] ||
1409 rt_is_input_route(rth) ||
1410 rt_is_expired(rth) ||
1411 !net_eq(dev_net(rth->dst.dev), net)) {
1412 rthp = &rth->dst.rt_next;
1416 if (rth->rt_dst != daddr ||
1417 rth->rt_src != saddr ||
1419 rth->rt_gateway != old_gw ||
1420 rth->dst.dev != dev)
1423 dst_hold(&rth->dst);
1425 rt = dst_alloc(&ipv4_dst_ops);
1431 /* Copy all the information. */
1434 atomic_set(&rt->dst.__refcnt, 1);
1435 rt->dst.child = NULL;
1437 dev_hold(rt->dst.dev);
1438 rt->dst.obsolete = -1;
1439 rt->dst.lastuse = jiffies;
1440 rt->dst.path = &rt->dst;
1441 rt->dst.neighbour = NULL;
1444 rt->dst.xfrm = NULL;
1446 rt->rt_genid = rt_genid(net);
1447 rt->rt_flags |= RTCF_REDIRECTED;
1449 /* Gateway is different ... */
1450 rt->rt_gateway = new_gw;
1452 /* Redirect received -> path was valid */
1453 dst_confirm(&rth->dst);
1456 atomic_inc(&rt->peer->refcnt);
1458 if (arp_bind_neighbour(&rt->dst) ||
1459 !(rt->dst.neighbour->nud_state &
1461 if (rt->dst.neighbour)
1462 neigh_event_send(rt->dst.neighbour, NULL);
1468 netevent.old = &rth->dst;
1469 netevent.new = &rt->dst;
1470 call_netevent_notifiers(NETEVENT_REDIRECT,
1474 if (!rt_intern_hash(hash, rt, &rt, NULL, rt->fl.oif))
1485 #ifdef CONFIG_IP_ROUTE_VERBOSE
1486 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit())
1487 printk(KERN_INFO "Redirect from %pI4 on %s about %pI4 ignored.\n"
1488 " Advised path = %pI4 -> %pI4\n",
1489 &old_gw, dev->name, &new_gw,
1495 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
1497 struct rtable *rt = (struct rtable *)dst;
1498 struct dst_entry *ret = dst;
1501 if (dst->obsolete > 0) {
1504 } else if ((rt->rt_flags & RTCF_REDIRECTED) ||
1506 time_after_eq(jiffies, rt->dst.expires))) {
1507 unsigned hash = rt_hash(rt->fl.fl4_dst, rt->fl.fl4_src,
1509 rt_genid(dev_net(dst->dev)));
1510 #if RT_CACHE_DEBUG >= 1
1511 printk(KERN_DEBUG "ipv4_negative_advice: redirect to %pI4/%02x dropped\n",
1512 &rt->rt_dst, rt->fl.fl4_tos);
1523 * 1. The first ip_rt_redirect_number redirects are sent
1524 * with exponential backoff, then we stop sending them at all,
1525 * assuming that the host ignores our redirects.
1526 * 2. If we did not see packets requiring redirects
1527 * during ip_rt_redirect_silence, we assume that the host
1528 * forgot redirected route and start to send redirects again.
1530 * This algorithm is much cheaper and more intelligent than dumb load limiting
1533 * NOTE. Do not forget to inhibit load limiting for redirects (redundant)
1534 * and "frag. need" (breaks PMTU discovery) in icmp.c.
1537 void ip_rt_send_redirect(struct sk_buff *skb)
1539 struct rtable *rt = skb_rtable(skb);
1540 struct in_device *in_dev;
1544 in_dev = __in_dev_get_rcu(rt->dst.dev);
1545 if (!in_dev || !IN_DEV_TX_REDIRECTS(in_dev)) {
1549 log_martians = IN_DEV_LOG_MARTIANS(in_dev);
1552 /* No redirected packets during ip_rt_redirect_silence;
1553 * reset the algorithm.
1555 if (time_after(jiffies, rt->dst.rate_last + ip_rt_redirect_silence))
1556 rt->dst.rate_tokens = 0;
1558 /* Too many ignored redirects; do not send anything
1559 * set dst.rate_last to the last seen redirected packet.
1561 if (rt->dst.rate_tokens >= ip_rt_redirect_number) {
1562 rt->dst.rate_last = jiffies;
1566 /* Check for load limit; set rate_last to the latest sent
1569 if (rt->dst.rate_tokens == 0 ||
1571 (rt->dst.rate_last +
1572 (ip_rt_redirect_load << rt->dst.rate_tokens)))) {
1573 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway);
1574 rt->dst.rate_last = jiffies;
1575 ++rt->dst.rate_tokens;
1576 #ifdef CONFIG_IP_ROUTE_VERBOSE
1578 rt->dst.rate_tokens == ip_rt_redirect_number &&
1580 printk(KERN_WARNING "host %pI4/if%d ignores redirects for %pI4 to %pI4.\n",
1581 &rt->rt_src, rt->rt_iif,
1582 &rt->rt_dst, &rt->rt_gateway);
1587 static int ip_error(struct sk_buff *skb)
1589 struct rtable *rt = skb_rtable(skb);
1593 switch (rt->dst.error) {
1598 code = ICMP_HOST_UNREACH;
1601 code = ICMP_NET_UNREACH;
1602 IP_INC_STATS_BH(dev_net(rt->dst.dev),
1603 IPSTATS_MIB_INNOROUTES);
1606 code = ICMP_PKT_FILTERED;
1611 rt->dst.rate_tokens += now - rt->dst.rate_last;
1612 if (rt->dst.rate_tokens > ip_rt_error_burst)
1613 rt->dst.rate_tokens = ip_rt_error_burst;
1614 rt->dst.rate_last = now;
1615 if (rt->dst.rate_tokens >= ip_rt_error_cost) {
1616 rt->dst.rate_tokens -= ip_rt_error_cost;
1617 icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
1620 out: kfree_skb(skb);
1625 * The last two values are not from the RFC but
1626 * are needed for AMPRnet AX.25 paths.
1629 static const unsigned short mtu_plateau[] =
1630 {32000, 17914, 8166, 4352, 2002, 1492, 576, 296, 216, 128 };
1632 static inline unsigned short guess_mtu(unsigned short old_mtu)
1636 for (i = 0; i < ARRAY_SIZE(mtu_plateau); i++)
1637 if (old_mtu > mtu_plateau[i])
1638 return mtu_plateau[i];
1642 unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph,
1643 unsigned short new_mtu,
1644 struct net_device *dev)
1647 unsigned short old_mtu = ntohs(iph->tot_len);
1649 int ikeys[2] = { dev->ifindex, 0 };
1650 __be32 skeys[2] = { iph->saddr, 0, };
1651 __be32 daddr = iph->daddr;
1652 unsigned short est_mtu = 0;
1654 for (k = 0; k < 2; k++) {
1655 for (i = 0; i < 2; i++) {
1656 unsigned hash = rt_hash(daddr, skeys[i], ikeys[k],
1660 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
1661 rth = rcu_dereference(rth->dst.rt_next)) {
1662 unsigned short mtu = new_mtu;
1664 if (rth->fl.fl4_dst != daddr ||
1665 rth->fl.fl4_src != skeys[i] ||
1666 rth->rt_dst != daddr ||
1667 rth->rt_src != iph->saddr ||
1668 rth->fl.oif != ikeys[k] ||
1669 rt_is_input_route(rth) ||
1670 dst_metric_locked(&rth->dst, RTAX_MTU) ||
1671 !net_eq(dev_net(rth->dst.dev), net) ||
1675 if (new_mtu < 68 || new_mtu >= old_mtu) {
1677 /* BSD 4.2 compatibility hack :-( */
1679 old_mtu >= dst_mtu(&rth->dst) &&
1680 old_mtu >= 68 + (iph->ihl << 2))
1681 old_mtu -= iph->ihl << 2;
1683 mtu = guess_mtu(old_mtu);
1685 if (mtu <= dst_mtu(&rth->dst)) {
1686 if (mtu < dst_mtu(&rth->dst)) {
1687 dst_confirm(&rth->dst);
1688 if (mtu < ip_rt_min_pmtu) {
1689 u32 lock = dst_metric(&rth->dst,
1691 mtu = ip_rt_min_pmtu;
1692 lock |= (1 << RTAX_MTU);
1693 dst_metric_set(&rth->dst, RTAX_LOCK,
1696 dst_metric_set(&rth->dst, RTAX_MTU, mtu);
1697 dst_set_expires(&rth->dst,
1706 return est_mtu ? : new_mtu;
1709 static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
1711 if (dst_mtu(dst) > mtu && mtu >= 68 &&
1712 !(dst_metric_locked(dst, RTAX_MTU))) {
1713 if (mtu < ip_rt_min_pmtu) {
1714 u32 lock = dst_metric(dst, RTAX_LOCK);
1715 mtu = ip_rt_min_pmtu;
1716 dst_metric_set(dst, RTAX_LOCK, lock | (1 << RTAX_MTU));
1718 dst_metric_set(dst, RTAX_MTU, mtu);
1719 dst_set_expires(dst, ip_rt_mtu_expires);
1720 call_netevent_notifiers(NETEVENT_PMTU_UPDATE, dst);
1724 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
1726 if (rt_is_expired((struct rtable *)dst))
1731 static void ipv4_dst_destroy(struct dst_entry *dst)
1733 struct rtable *rt = (struct rtable *) dst;
1734 struct inet_peer *peer = rt->peer;
1743 static void ipv4_link_failure(struct sk_buff *skb)
1747 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
1749 rt = skb_rtable(skb);
1751 dst_set_expires(&rt->dst, 0);
1754 static int ip_rt_bug(struct sk_buff *skb)
1756 printk(KERN_DEBUG "ip_rt_bug: %pI4 -> %pI4, %s\n",
1757 &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
1758 skb->dev ? skb->dev->name : "?");
1764 We do not cache source address of outgoing interface,
1765 because it is used only by IP RR, TS and SRR options,
1766 so that it out of fast path.
1768 BTW remember: "addr" is allowed to be not aligned
1772 void ip_rt_get_source(u8 *addr, struct rtable *rt)
1775 struct fib_result res;
1777 if (rt_is_output_route(rt))
1781 if (fib_lookup(dev_net(rt->dst.dev), &rt->fl, &res) == 0)
1782 src = FIB_RES_PREFSRC(res);
1784 src = inet_select_addr(rt->dst.dev, rt->rt_gateway,
1788 memcpy(addr, &src, 4);
1791 #ifdef CONFIG_NET_CLS_ROUTE
1792 static void set_class_tag(struct rtable *rt, u32 tag)
1794 if (!(rt->dst.tclassid & 0xFFFF))
1795 rt->dst.tclassid |= tag & 0xFFFF;
1796 if (!(rt->dst.tclassid & 0xFFFF0000))
1797 rt->dst.tclassid |= tag & 0xFFFF0000;
1801 static void rt_set_nexthop(struct rtable *rt, struct fib_result *res, u32 itag)
1803 struct dst_entry *dst = &rt->dst;
1804 struct fib_info *fi = res->fi;
1807 if (FIB_RES_GW(*res) &&
1808 FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK)
1809 rt->rt_gateway = FIB_RES_GW(*res);
1810 dst_import_metrics(dst, fi->fib_metrics);
1811 if (fi->fib_mtu == 0) {
1812 dst_metric_set(dst, RTAX_MTU, dst->dev->mtu);
1813 if (dst_metric_locked(dst, RTAX_MTU) &&
1814 rt->rt_gateway != rt->rt_dst &&
1815 dst->dev->mtu > 576)
1816 dst_metric_set(dst, RTAX_MTU, 576);
1818 #ifdef CONFIG_NET_CLS_ROUTE
1819 dst->tclassid = FIB_RES_NH(*res).nh_tclassid;
1822 dst_metric_set(dst, RTAX_MTU, dst->dev->mtu);
1824 if (dst_metric(dst, RTAX_HOPLIMIT) == 0)
1825 dst_metric_set(dst, RTAX_HOPLIMIT, sysctl_ip_default_ttl);
1826 if (dst_mtu(dst) > IP_MAX_MTU)
1827 dst_metric_set(dst, RTAX_MTU, IP_MAX_MTU);
1828 if (dst_metric(dst, RTAX_ADVMSS) == 0)
1829 dst_metric_set(dst, RTAX_ADVMSS,
1830 max_t(unsigned int, dst->dev->mtu - 40,
1832 if (dst_metric(dst, RTAX_ADVMSS) > 65535 - 40)
1833 dst_metric_set(dst, RTAX_ADVMSS, 65535 - 40);
1835 #ifdef CONFIG_NET_CLS_ROUTE
1836 #ifdef CONFIG_IP_MULTIPLE_TABLES
1837 set_class_tag(rt, fib_rules_tclass(res));
1839 set_class_tag(rt, itag);
1841 rt->rt_type = res->type;
1844 /* called in rcu_read_lock() section */
1845 static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1846 u8 tos, struct net_device *dev, int our)
1851 struct in_device *in_dev = __in_dev_get_rcu(dev);
1855 /* Primary sanity checks. */
1860 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
1861 ipv4_is_loopback(saddr) || skb->protocol != htons(ETH_P_IP))
1864 if (ipv4_is_zeronet(saddr)) {
1865 if (!ipv4_is_local_multicast(daddr))
1867 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
1869 err = fib_validate_source(saddr, 0, tos, 0, dev, &spec_dst,
1874 rth = dst_alloc(&ipv4_dst_ops);
1878 rth->dst.output = ip_rt_bug;
1879 rth->dst.obsolete = -1;
1881 atomic_set(&rth->dst.__refcnt, 1);
1882 rth->dst.flags= DST_HOST;
1883 if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
1884 rth->dst.flags |= DST_NOPOLICY;
1885 rth->fl.fl4_dst = daddr;
1886 rth->rt_dst = daddr;
1887 rth->fl.fl4_tos = tos;
1888 rth->fl.mark = skb->mark;
1889 rth->fl.fl4_src = saddr;
1890 rth->rt_src = saddr;
1891 #ifdef CONFIG_NET_CLS_ROUTE
1892 rth->dst.tclassid = itag;
1895 rth->fl.iif = dev->ifindex;
1896 rth->dst.dev = init_net.loopback_dev;
1897 dev_hold(rth->dst.dev);
1899 rth->rt_gateway = daddr;
1900 rth->rt_spec_dst= spec_dst;
1901 rth->rt_genid = rt_genid(dev_net(dev));
1902 rth->rt_flags = RTCF_MULTICAST;
1903 rth->rt_type = RTN_MULTICAST;
1905 rth->dst.input= ip_local_deliver;
1906 rth->rt_flags |= RTCF_LOCAL;
1909 #ifdef CONFIG_IP_MROUTE
1910 if (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev))
1911 rth->dst.input = ip_mr_input;
1913 RT_CACHE_STAT_INC(in_slow_mc);
1915 hash = rt_hash(daddr, saddr, dev->ifindex, rt_genid(dev_net(dev)));
1916 return rt_intern_hash(hash, rth, NULL, skb, dev->ifindex);
1927 static void ip_handle_martian_source(struct net_device *dev,
1928 struct in_device *in_dev,
1929 struct sk_buff *skb,
1933 RT_CACHE_STAT_INC(in_martian_src);
1934 #ifdef CONFIG_IP_ROUTE_VERBOSE
1935 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) {
1937 * RFC1812 recommendation, if source is martian,
1938 * the only hint is MAC header.
1940 printk(KERN_WARNING "martian source %pI4 from %pI4, on dev %s\n",
1941 &daddr, &saddr, dev->name);
1942 if (dev->hard_header_len && skb_mac_header_was_set(skb)) {
1944 const unsigned char *p = skb_mac_header(skb);
1945 printk(KERN_WARNING "ll header: ");
1946 for (i = 0; i < dev->hard_header_len; i++, p++) {
1948 if (i < (dev->hard_header_len - 1))
1957 /* called in rcu_read_lock() section */
1958 static int __mkroute_input(struct sk_buff *skb,
1959 struct fib_result *res,
1960 struct in_device *in_dev,
1961 __be32 daddr, __be32 saddr, u32 tos,
1962 struct rtable **result)
1966 struct in_device *out_dev;
1967 unsigned int flags = 0;
1971 /* get a working reference to the output device */
1972 out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res));
1973 if (out_dev == NULL) {
1974 if (net_ratelimit())
1975 printk(KERN_CRIT "Bug in ip_route_input" \
1976 "_slow(). Please, report\n");
1981 err = fib_validate_source(saddr, daddr, tos, FIB_RES_OIF(*res),
1982 in_dev->dev, &spec_dst, &itag, skb->mark);
1984 ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
1991 flags |= RTCF_DIRECTSRC;
1993 if (out_dev == in_dev && err &&
1994 (IN_DEV_SHARED_MEDIA(out_dev) ||
1995 inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res))))
1996 flags |= RTCF_DOREDIRECT;
1998 if (skb->protocol != htons(ETH_P_IP)) {
1999 /* Not IP (i.e. ARP). Do not create route, if it is
2000 * invalid for proxy arp. DNAT routes are always valid.
2002 * Proxy arp feature have been extended to allow, ARP
2003 * replies back to the same interface, to support
2004 * Private VLAN switch technologies. See arp.c.
2006 if (out_dev == in_dev &&
2007 IN_DEV_PROXY_ARP_PVLAN(in_dev) == 0) {
2014 rth = dst_alloc(&ipv4_dst_ops);
2020 atomic_set(&rth->dst.__refcnt, 1);
2021 rth->dst.flags= DST_HOST;
2022 if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
2023 rth->dst.flags |= DST_NOPOLICY;
2024 if (IN_DEV_CONF_GET(out_dev, NOXFRM))
2025 rth->dst.flags |= DST_NOXFRM;
2026 rth->fl.fl4_dst = daddr;
2027 rth->rt_dst = daddr;
2028 rth->fl.fl4_tos = tos;
2029 rth->fl.mark = skb->mark;
2030 rth->fl.fl4_src = saddr;
2031 rth->rt_src = saddr;
2032 rth->rt_gateway = daddr;
2034 rth->fl.iif = in_dev->dev->ifindex;
2035 rth->dst.dev = (out_dev)->dev;
2036 dev_hold(rth->dst.dev);
2038 rth->rt_spec_dst= spec_dst;
2040 rth->dst.obsolete = -1;
2041 rth->dst.input = ip_forward;
2042 rth->dst.output = ip_output;
2043 rth->rt_genid = rt_genid(dev_net(rth->dst.dev));
2045 rt_set_nexthop(rth, res, itag);
2047 rth->rt_flags = flags;
2055 static int ip_mkroute_input(struct sk_buff *skb,
2056 struct fib_result *res,
2057 const struct flowi *fl,
2058 struct in_device *in_dev,
2059 __be32 daddr, __be32 saddr, u32 tos)
2061 struct rtable* rth = NULL;
2065 #ifdef CONFIG_IP_ROUTE_MULTIPATH
2066 if (res->fi && res->fi->fib_nhs > 1 && fl->oif == 0)
2067 fib_select_multipath(fl, res);
2070 /* create a routing cache entry */
2071 err = __mkroute_input(skb, res, in_dev, daddr, saddr, tos, &rth);
2075 /* put it into the cache */
2076 hash = rt_hash(daddr, saddr, fl->iif,
2077 rt_genid(dev_net(rth->dst.dev)));
2078 return rt_intern_hash(hash, rth, NULL, skb, fl->iif);
2082 * NOTE. We drop all the packets that has local source
2083 * addresses, because every properly looped back packet
2084 * must have correct destination already attached by output routine.
2086 * Such approach solves two big problems:
2087 * 1. Not simplex devices are handled properly.
2088 * 2. IP spoofing attempts are filtered with 100% of guarantee.
2089 * called with rcu_read_lock()
2092 static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2093 u8 tos, struct net_device *dev)
2095 struct fib_result res;
2096 struct in_device *in_dev = __in_dev_get_rcu(dev);
2097 struct flowi fl = { .fl4_dst = daddr,
2100 .fl4_scope = RT_SCOPE_UNIVERSE,
2102 .iif = dev->ifindex };
2105 struct rtable * rth;
2109 struct net * net = dev_net(dev);
2111 /* IP on this device is disabled. */
2116 /* Check for the most weird martians, which can be not detected
2120 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
2121 ipv4_is_loopback(saddr))
2122 goto martian_source;
2124 if (ipv4_is_lbcast(daddr) || (saddr == 0 && daddr == 0))
2127 /* Accept zero addresses only to limited broadcast;
2128 * I even do not know to fix it or not. Waiting for complains :-)
2130 if (ipv4_is_zeronet(saddr))
2131 goto martian_source;
2133 if (ipv4_is_zeronet(daddr) || ipv4_is_loopback(daddr))
2134 goto martian_destination;
2137 * Now we are ready to route packet.
2139 err = fib_lookup(net, &fl, &res);
2141 if (!IN_DEV_FORWARD(in_dev))
2146 RT_CACHE_STAT_INC(in_slow_tot);
2148 if (res.type == RTN_BROADCAST)
2151 if (res.type == RTN_LOCAL) {
2152 err = fib_validate_source(saddr, daddr, tos,
2153 net->loopback_dev->ifindex,
2154 dev, &spec_dst, &itag, skb->mark);
2156 goto martian_source_keep_err;
2158 flags |= RTCF_DIRECTSRC;
2163 if (!IN_DEV_FORWARD(in_dev))
2165 if (res.type != RTN_UNICAST)
2166 goto martian_destination;
2168 err = ip_mkroute_input(skb, &res, &fl, in_dev, daddr, saddr, tos);
2172 if (skb->protocol != htons(ETH_P_IP))
2175 if (ipv4_is_zeronet(saddr))
2176 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
2178 err = fib_validate_source(saddr, 0, tos, 0, dev, &spec_dst,
2181 goto martian_source_keep_err;
2183 flags |= RTCF_DIRECTSRC;
2185 flags |= RTCF_BROADCAST;
2186 res.type = RTN_BROADCAST;
2187 RT_CACHE_STAT_INC(in_brd);
2190 rth = dst_alloc(&ipv4_dst_ops);
2194 rth->dst.output= ip_rt_bug;
2195 rth->dst.obsolete = -1;
2196 rth->rt_genid = rt_genid(net);
2198 atomic_set(&rth->dst.__refcnt, 1);
2199 rth->dst.flags= DST_HOST;
2200 if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
2201 rth->dst.flags |= DST_NOPOLICY;
2202 rth->fl.fl4_dst = daddr;
2203 rth->rt_dst = daddr;
2204 rth->fl.fl4_tos = tos;
2205 rth->fl.mark = skb->mark;
2206 rth->fl.fl4_src = saddr;
2207 rth->rt_src = saddr;
2208 #ifdef CONFIG_NET_CLS_ROUTE
2209 rth->dst.tclassid = itag;
2212 rth->fl.iif = dev->ifindex;
2213 rth->dst.dev = net->loopback_dev;
2214 dev_hold(rth->dst.dev);
2215 rth->rt_gateway = daddr;
2216 rth->rt_spec_dst= spec_dst;
2217 rth->dst.input= ip_local_deliver;
2218 rth->rt_flags = flags|RTCF_LOCAL;
2219 if (res.type == RTN_UNREACHABLE) {
2220 rth->dst.input= ip_error;
2221 rth->dst.error= -err;
2222 rth->rt_flags &= ~RTCF_LOCAL;
2224 rth->rt_type = res.type;
2225 hash = rt_hash(daddr, saddr, fl.iif, rt_genid(net));
2226 err = rt_intern_hash(hash, rth, NULL, skb, fl.iif);
2230 RT_CACHE_STAT_INC(in_no_route);
2231 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE);
2232 res.type = RTN_UNREACHABLE;
2238 * Do not cache martian addresses: they should be logged (RFC1812)
2240 martian_destination:
2241 RT_CACHE_STAT_INC(in_martian_dst);
2242 #ifdef CONFIG_IP_ROUTE_VERBOSE
2243 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit())
2244 printk(KERN_WARNING "martian destination %pI4 from %pI4, dev %s\n",
2245 &daddr, &saddr, dev->name);
2249 err = -EHOSTUNREACH;
2262 martian_source_keep_err:
2263 ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
2267 int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2268 u8 tos, struct net_device *dev, bool noref)
2270 struct rtable * rth;
2272 int iif = dev->ifindex;
2280 if (!rt_caching(net))
2283 tos &= IPTOS_RT_MASK;
2284 hash = rt_hash(daddr, saddr, iif, rt_genid(net));
2286 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
2287 rth = rcu_dereference(rth->dst.rt_next)) {
2288 if ((((__force u32)rth->fl.fl4_dst ^ (__force u32)daddr) |
2289 ((__force u32)rth->fl.fl4_src ^ (__force u32)saddr) |
2290 (rth->fl.iif ^ iif) |
2292 (rth->fl.fl4_tos ^ tos)) == 0 &&
2293 rth->fl.mark == skb->mark &&
2294 net_eq(dev_net(rth->dst.dev), net) &&
2295 !rt_is_expired(rth)) {
2297 dst_use_noref(&rth->dst, jiffies);
2298 skb_dst_set_noref(skb, &rth->dst);
2300 dst_use(&rth->dst, jiffies);
2301 skb_dst_set(skb, &rth->dst);
2303 RT_CACHE_STAT_INC(in_hit);
2307 RT_CACHE_STAT_INC(in_hlist_search);
2311 /* Multicast recognition logic is moved from route cache to here.
2312 The problem was that too many Ethernet cards have broken/missing
2313 hardware multicast filters :-( As result the host on multicasting
2314 network acquires a lot of useless route cache entries, sort of
2315 SDR messages from all the world. Now we try to get rid of them.
2316 Really, provided software IP multicast filter is organized
2317 reasonably (at least, hashed), it does not result in a slowdown
2318 comparing with route cache reject entries.
2319 Note, that multicast routers are not affected, because
2320 route cache entry is created eventually.
2322 if (ipv4_is_multicast(daddr)) {
2323 struct in_device *in_dev = __in_dev_get_rcu(dev);
2326 int our = ip_check_mc(in_dev, daddr, saddr,
2327 ip_hdr(skb)->protocol);
2329 #ifdef CONFIG_IP_MROUTE
2331 (!ipv4_is_local_multicast(daddr) &&
2332 IN_DEV_MFORWARD(in_dev))
2335 int res = ip_route_input_mc(skb, daddr, saddr,
2344 res = ip_route_input_slow(skb, daddr, saddr, tos, dev);
2348 EXPORT_SYMBOL(ip_route_input_common);
2350 /* called with rcu_read_lock() */
2351 static int __mkroute_output(struct rtable **result,
2352 struct fib_result *res,
2353 const struct flowi *fl,
2354 const struct flowi *oldflp,
2355 struct net_device *dev_out,
2359 struct in_device *in_dev;
2360 u32 tos = RT_FL_TOS(oldflp);
2362 if (ipv4_is_loopback(fl->fl4_src) && !(dev_out->flags & IFF_LOOPBACK))
2365 if (ipv4_is_lbcast(fl->fl4_dst))
2366 res->type = RTN_BROADCAST;
2367 else if (ipv4_is_multicast(fl->fl4_dst))
2368 res->type = RTN_MULTICAST;
2369 else if (ipv4_is_zeronet(fl->fl4_dst))
2372 if (dev_out->flags & IFF_LOOPBACK)
2373 flags |= RTCF_LOCAL;
2375 in_dev = __in_dev_get_rcu(dev_out);
2379 if (res->type == RTN_BROADCAST) {
2380 flags |= RTCF_BROADCAST | RTCF_LOCAL;
2382 } else if (res->type == RTN_MULTICAST) {
2383 flags |= RTCF_MULTICAST | RTCF_LOCAL;
2384 if (!ip_check_mc(in_dev, oldflp->fl4_dst, oldflp->fl4_src,
2386 flags &= ~RTCF_LOCAL;
2387 /* If multicast route do not exist use
2388 * default one, but do not gateway in this case.
2391 if (res->fi && res->prefixlen < 4)
2396 rth = dst_alloc(&ipv4_dst_ops);
2400 atomic_set(&rth->dst.__refcnt, 1);
2401 rth->dst.flags= DST_HOST;
2402 if (IN_DEV_CONF_GET(in_dev, NOXFRM))
2403 rth->dst.flags |= DST_NOXFRM;
2404 if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
2405 rth->dst.flags |= DST_NOPOLICY;
2407 rth->fl.fl4_dst = oldflp->fl4_dst;
2408 rth->fl.fl4_tos = tos;
2409 rth->fl.fl4_src = oldflp->fl4_src;
2410 rth->fl.oif = oldflp->oif;
2411 rth->fl.mark = oldflp->mark;
2412 rth->rt_dst = fl->fl4_dst;
2413 rth->rt_src = fl->fl4_src;
2414 rth->rt_iif = oldflp->oif ? : dev_out->ifindex;
2415 /* get references to the devices that are to be hold by the routing
2417 rth->dst.dev = dev_out;
2419 rth->rt_gateway = fl->fl4_dst;
2420 rth->rt_spec_dst= fl->fl4_src;
2422 rth->dst.output=ip_output;
2423 rth->dst.obsolete = -1;
2424 rth->rt_genid = rt_genid(dev_net(dev_out));
2426 RT_CACHE_STAT_INC(out_slow_tot);
2428 if (flags & RTCF_LOCAL) {
2429 rth->dst.input = ip_local_deliver;
2430 rth->rt_spec_dst = fl->fl4_dst;
2432 if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
2433 rth->rt_spec_dst = fl->fl4_src;
2434 if (flags & RTCF_LOCAL &&
2435 !(dev_out->flags & IFF_LOOPBACK)) {
2436 rth->dst.output = ip_mc_output;
2437 RT_CACHE_STAT_INC(out_slow_mc);
2439 #ifdef CONFIG_IP_MROUTE
2440 if (res->type == RTN_MULTICAST) {
2441 if (IN_DEV_MFORWARD(in_dev) &&
2442 !ipv4_is_local_multicast(oldflp->fl4_dst)) {
2443 rth->dst.input = ip_mr_input;
2444 rth->dst.output = ip_mc_output;
2450 rt_set_nexthop(rth, res, 0);
2452 rth->rt_flags = flags;
2457 /* called with rcu_read_lock() */
2458 static int ip_mkroute_output(struct rtable **rp,
2459 struct fib_result *res,
2460 const struct flowi *fl,
2461 const struct flowi *oldflp,
2462 struct net_device *dev_out,
2465 struct rtable *rth = NULL;
2466 int err = __mkroute_output(&rth, res, fl, oldflp, dev_out, flags);
2469 hash = rt_hash(oldflp->fl4_dst, oldflp->fl4_src, oldflp->oif,
2470 rt_genid(dev_net(dev_out)));
2471 err = rt_intern_hash(hash, rth, rp, NULL, oldflp->oif);
2478 * Major route resolver routine.
2479 * called with rcu_read_lock();
2482 static int ip_route_output_slow(struct net *net, struct rtable **rp,
2483 const struct flowi *oldflp)
2485 u32 tos = RT_FL_TOS(oldflp);
2486 struct flowi fl = { .fl4_dst = oldflp->fl4_dst,
2487 .fl4_src = oldflp->fl4_src,
2488 .fl4_tos = tos & IPTOS_RT_MASK,
2489 .fl4_scope = ((tos & RTO_ONLINK) ?
2490 RT_SCOPE_LINK : RT_SCOPE_UNIVERSE),
2491 .mark = oldflp->mark,
2492 .iif = net->loopback_dev->ifindex,
2493 .oif = oldflp->oif };
2494 struct fib_result res;
2495 unsigned int flags = 0;
2496 struct net_device *dev_out = NULL;
2501 #ifdef CONFIG_IP_MULTIPLE_TABLES
2505 if (oldflp->fl4_src) {
2507 if (ipv4_is_multicast(oldflp->fl4_src) ||
2508 ipv4_is_lbcast(oldflp->fl4_src) ||
2509 ipv4_is_zeronet(oldflp->fl4_src))
2512 /* I removed check for oif == dev_out->oif here.
2513 It was wrong for two reasons:
2514 1. ip_dev_find(net, saddr) can return wrong iface, if saddr
2515 is assigned to multiple interfaces.
2516 2. Moreover, we are allowed to send packets with saddr
2517 of another iface. --ANK
2520 if (oldflp->oif == 0 &&
2521 (ipv4_is_multicast(oldflp->fl4_dst) ||
2522 ipv4_is_lbcast(oldflp->fl4_dst))) {
2523 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2524 dev_out = __ip_dev_find(net, oldflp->fl4_src, false);
2525 if (dev_out == NULL)
2528 /* Special hack: user can direct multicasts
2529 and limited broadcast via necessary interface
2530 without fiddling with IP_MULTICAST_IF or IP_PKTINFO.
2531 This hack is not just for fun, it allows
2532 vic,vat and friends to work.
2533 They bind socket to loopback, set ttl to zero
2534 and expect that it will work.
2535 From the viewpoint of routing cache they are broken,
2536 because we are not allowed to build multicast path
2537 with loopback source addr (look, routing cache
2538 cannot know, that ttl is zero, so that packet
2539 will not leave this host and route is valid).
2540 Luckily, this hack is good workaround.
2543 fl.oif = dev_out->ifindex;
2547 if (!(oldflp->flags & FLOWI_FLAG_ANYSRC)) {
2548 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2549 if (!__ip_dev_find(net, oldflp->fl4_src, false))
2556 dev_out = dev_get_by_index_rcu(net, oldflp->oif);
2558 if (dev_out == NULL)
2561 /* RACE: Check return value of inet_select_addr instead. */
2562 if (rcu_dereference(dev_out->ip_ptr) == NULL)
2563 goto out; /* Wrong error code */
2565 if (ipv4_is_local_multicast(oldflp->fl4_dst) ||
2566 ipv4_is_lbcast(oldflp->fl4_dst)) {
2568 fl.fl4_src = inet_select_addr(dev_out, 0,
2573 if (ipv4_is_multicast(oldflp->fl4_dst))
2574 fl.fl4_src = inet_select_addr(dev_out, 0,
2576 else if (!oldflp->fl4_dst)
2577 fl.fl4_src = inet_select_addr(dev_out, 0,
2583 fl.fl4_dst = fl.fl4_src;
2585 fl.fl4_dst = fl.fl4_src = htonl(INADDR_LOOPBACK);
2586 dev_out = net->loopback_dev;
2587 fl.oif = net->loopback_dev->ifindex;
2588 res.type = RTN_LOCAL;
2589 flags |= RTCF_LOCAL;
2593 if (fib_lookup(net, &fl, &res)) {
2596 /* Apparently, routing tables are wrong. Assume,
2597 that the destination is on link.
2600 Because we are allowed to send to iface
2601 even if it has NO routes and NO assigned
2602 addresses. When oif is specified, routing
2603 tables are looked up with only one purpose:
2604 to catch if destination is gatewayed, rather than
2605 direct. Moreover, if MSG_DONTROUTE is set,
2606 we send packet, ignoring both routing tables
2607 and ifaddr state. --ANK
2610 We could make it even if oif is unknown,
2611 likely IPv6, but we do not.
2614 if (fl.fl4_src == 0)
2615 fl.fl4_src = inet_select_addr(dev_out, 0,
2617 res.type = RTN_UNICAST;
2624 if (res.type == RTN_LOCAL) {
2626 fl.fl4_src = fl.fl4_dst;
2627 dev_out = net->loopback_dev;
2628 fl.oif = dev_out->ifindex;
2630 flags |= RTCF_LOCAL;
2634 #ifdef CONFIG_IP_ROUTE_MULTIPATH
2635 if (res.fi->fib_nhs > 1 && fl.oif == 0)
2636 fib_select_multipath(&fl, &res);
2639 if (!res.prefixlen && res.type == RTN_UNICAST && !fl.oif)
2640 fib_select_default(net, &fl, &res);
2643 fl.fl4_src = FIB_RES_PREFSRC(res);
2645 dev_out = FIB_RES_DEV(res);
2646 fl.oif = dev_out->ifindex;
2650 err = ip_mkroute_output(rp, &res, &fl, oldflp, dev_out, flags);
2655 int __ip_route_output_key(struct net *net, struct rtable **rp,
2656 const struct flowi *flp)
2662 if (!rt_caching(net))
2665 hash = rt_hash(flp->fl4_dst, flp->fl4_src, flp->oif, rt_genid(net));
2668 for (rth = rcu_dereference_bh(rt_hash_table[hash].chain); rth;
2669 rth = rcu_dereference_bh(rth->dst.rt_next)) {
2670 if (rth->fl.fl4_dst == flp->fl4_dst &&
2671 rth->fl.fl4_src == flp->fl4_src &&
2672 rt_is_output_route(rth) &&
2673 rth->fl.oif == flp->oif &&
2674 rth->fl.mark == flp->mark &&
2675 !((rth->fl.fl4_tos ^ flp->fl4_tos) &
2676 (IPTOS_RT_MASK | RTO_ONLINK)) &&
2677 net_eq(dev_net(rth->dst.dev), net) &&
2678 !rt_is_expired(rth)) {
2679 dst_use(&rth->dst, jiffies);
2680 RT_CACHE_STAT_INC(out_hit);
2681 rcu_read_unlock_bh();
2685 RT_CACHE_STAT_INC(out_hlist_search);
2687 rcu_read_unlock_bh();
2691 res = ip_route_output_slow(net, rp, flp);
2695 EXPORT_SYMBOL_GPL(__ip_route_output_key);
2697 static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie)
2702 static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
2706 static struct dst_ops ipv4_dst_blackhole_ops = {
2708 .protocol = cpu_to_be16(ETH_P_IP),
2709 .destroy = ipv4_dst_destroy,
2710 .check = ipv4_blackhole_dst_check,
2711 .update_pmtu = ipv4_rt_blackhole_update_pmtu,
2715 static int ipv4_dst_blackhole(struct net *net, struct rtable **rp, struct flowi *flp)
2717 struct rtable *ort = *rp;
2718 struct rtable *rt = (struct rtable *)
2719 dst_alloc(&ipv4_dst_blackhole_ops);
2722 struct dst_entry *new = &rt->dst;
2724 atomic_set(&new->__refcnt, 1);
2726 new->input = dst_discard;
2727 new->output = dst_discard;
2728 dst_copy_metrics(new, &ort->dst);
2730 new->dev = ort->dst.dev;
2736 rt->rt_genid = rt_genid(net);
2737 rt->rt_flags = ort->rt_flags;
2738 rt->rt_type = ort->rt_type;
2739 rt->rt_dst = ort->rt_dst;
2740 rt->rt_src = ort->rt_src;
2741 rt->rt_iif = ort->rt_iif;
2742 rt->rt_gateway = ort->rt_gateway;
2743 rt->rt_spec_dst = ort->rt_spec_dst;
2744 rt->peer = ort->peer;
2746 atomic_inc(&rt->peer->refcnt);
2751 dst_release(&(*rp)->dst);
2753 return rt ? 0 : -ENOMEM;
2756 int ip_route_output_flow(struct net *net, struct rtable **rp, struct flowi *flp,
2757 struct sock *sk, int flags)
2761 if ((err = __ip_route_output_key(net, rp, flp)) != 0)
2766 flp->fl4_src = (*rp)->rt_src;
2768 flp->fl4_dst = (*rp)->rt_dst;
2769 err = __xfrm_lookup(net, (struct dst_entry **)rp, flp, sk,
2770 flags ? XFRM_LOOKUP_WAIT : 0);
2771 if (err == -EREMOTE)
2772 err = ipv4_dst_blackhole(net, rp, flp);
2779 EXPORT_SYMBOL_GPL(ip_route_output_flow);
2781 int ip_route_output_key(struct net *net, struct rtable **rp, struct flowi *flp)
2783 return ip_route_output_flow(net, rp, flp, NULL, 0);
2785 EXPORT_SYMBOL(ip_route_output_key);
2787 static int rt_fill_info(struct net *net,
2788 struct sk_buff *skb, u32 pid, u32 seq, int event,
2789 int nowait, unsigned int flags)
2791 struct rtable *rt = skb_rtable(skb);
2793 struct nlmsghdr *nlh;
2795 u32 id = 0, ts = 0, tsage = 0, error;
2797 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*r), flags);
2801 r = nlmsg_data(nlh);
2802 r->rtm_family = AF_INET;
2803 r->rtm_dst_len = 32;
2805 r->rtm_tos = rt->fl.fl4_tos;
2806 r->rtm_table = RT_TABLE_MAIN;
2807 NLA_PUT_U32(skb, RTA_TABLE, RT_TABLE_MAIN);
2808 r->rtm_type = rt->rt_type;
2809 r->rtm_scope = RT_SCOPE_UNIVERSE;
2810 r->rtm_protocol = RTPROT_UNSPEC;
2811 r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
2812 if (rt->rt_flags & RTCF_NOTIFY)
2813 r->rtm_flags |= RTM_F_NOTIFY;
2815 NLA_PUT_BE32(skb, RTA_DST, rt->rt_dst);
2817 if (rt->fl.fl4_src) {
2818 r->rtm_src_len = 32;
2819 NLA_PUT_BE32(skb, RTA_SRC, rt->fl.fl4_src);
2822 NLA_PUT_U32(skb, RTA_OIF, rt->dst.dev->ifindex);
2823 #ifdef CONFIG_NET_CLS_ROUTE
2824 if (rt->dst.tclassid)
2825 NLA_PUT_U32(skb, RTA_FLOW, rt->dst.tclassid);
2827 if (rt_is_input_route(rt))
2828 NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_spec_dst);
2829 else if (rt->rt_src != rt->fl.fl4_src)
2830 NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_src);
2832 if (rt->rt_dst != rt->rt_gateway)
2833 NLA_PUT_BE32(skb, RTA_GATEWAY, rt->rt_gateway);
2835 if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
2836 goto nla_put_failure;
2839 NLA_PUT_BE32(skb, RTA_MARK, rt->fl.mark);
2841 error = rt->dst.error;
2842 expires = rt->dst.expires ? rt->dst.expires - jiffies : 0;
2844 inet_peer_refcheck(rt->peer);
2845 id = atomic_read(&rt->peer->ip_id_count) & 0xffff;
2846 if (rt->peer->tcp_ts_stamp) {
2847 ts = rt->peer->tcp_ts;
2848 tsage = get_seconds() - rt->peer->tcp_ts_stamp;
2852 if (rt_is_input_route(rt)) {
2853 #ifdef CONFIG_IP_MROUTE
2854 __be32 dst = rt->rt_dst;
2856 if (ipv4_is_multicast(dst) && !ipv4_is_local_multicast(dst) &&
2857 IPV4_DEVCONF_ALL(net, MC_FORWARDING)) {
2858 int err = ipmr_get_route(net, skb, r, nowait);
2863 goto nla_put_failure;
2865 if (err == -EMSGSIZE)
2866 goto nla_put_failure;
2872 NLA_PUT_U32(skb, RTA_IIF, rt->fl.iif);
2875 if (rtnl_put_cacheinfo(skb, &rt->dst, id, ts, tsage,
2876 expires, error) < 0)
2877 goto nla_put_failure;
2879 return nlmsg_end(skb, nlh);
2882 nlmsg_cancel(skb, nlh);
2886 static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg)
2888 struct net *net = sock_net(in_skb->sk);
2890 struct nlattr *tb[RTA_MAX+1];
2891 struct rtable *rt = NULL;
2897 struct sk_buff *skb;
2899 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy);
2903 rtm = nlmsg_data(nlh);
2905 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2911 /* Reserve room for dummy headers, this skb can pass
2912 through good chunk of routing engine.
2914 skb_reset_mac_header(skb);
2915 skb_reset_network_header(skb);
2917 /* Bugfix: need to give ip_route_input enough of an IP header to not gag. */
2918 ip_hdr(skb)->protocol = IPPROTO_ICMP;
2919 skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr));
2921 src = tb[RTA_SRC] ? nla_get_be32(tb[RTA_SRC]) : 0;
2922 dst = tb[RTA_DST] ? nla_get_be32(tb[RTA_DST]) : 0;
2923 iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
2924 mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0;
2927 struct net_device *dev;
2929 dev = __dev_get_by_index(net, iif);
2935 skb->protocol = htons(ETH_P_IP);
2939 err = ip_route_input(skb, dst, src, rtm->rtm_tos, dev);
2942 rt = skb_rtable(skb);
2943 if (err == 0 && rt->dst.error)
2944 err = -rt->dst.error;
2949 .fl4_tos = rtm->rtm_tos,
2950 .oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0,
2953 err = ip_route_output_key(net, &rt, &fl);
2959 skb_dst_set(skb, &rt->dst);
2960 if (rtm->rtm_flags & RTM_F_NOTIFY)
2961 rt->rt_flags |= RTCF_NOTIFY;
2963 err = rt_fill_info(net, skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq,
2964 RTM_NEWROUTE, 0, 0);
2968 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).pid);
2977 int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb)
2984 net = sock_net(skb->sk);
2989 s_idx = idx = cb->args[1];
2990 for (h = s_h; h <= rt_hash_mask; h++, s_idx = 0) {
2991 if (!rt_hash_table[h].chain)
2994 for (rt = rcu_dereference_bh(rt_hash_table[h].chain), idx = 0; rt;
2995 rt = rcu_dereference_bh(rt->dst.rt_next), idx++) {
2996 if (!net_eq(dev_net(rt->dst.dev), net) || idx < s_idx)
2998 if (rt_is_expired(rt))
3000 skb_dst_set_noref(skb, &rt->dst);
3001 if (rt_fill_info(net, skb, NETLINK_CB(cb->skb).pid,
3002 cb->nlh->nlmsg_seq, RTM_NEWROUTE,
3003 1, NLM_F_MULTI) <= 0) {
3005 rcu_read_unlock_bh();
3010 rcu_read_unlock_bh();
3019 void ip_rt_multicast_event(struct in_device *in_dev)
3021 rt_cache_flush(dev_net(in_dev->dev), 0);
3024 #ifdef CONFIG_SYSCTL
3025 static int ipv4_sysctl_rtcache_flush(ctl_table *__ctl, int write,
3026 void __user *buffer,
3027 size_t *lenp, loff_t *ppos)
3034 memcpy(&ctl, __ctl, sizeof(ctl));
3035 ctl.data = &flush_delay;
3036 proc_dointvec(&ctl, write, buffer, lenp, ppos);
3038 net = (struct net *)__ctl->extra1;
3039 rt_cache_flush(net, flush_delay);
3046 static ctl_table ipv4_route_table[] = {
3048 .procname = "gc_thresh",
3049 .data = &ipv4_dst_ops.gc_thresh,
3050 .maxlen = sizeof(int),
3052 .proc_handler = proc_dointvec,
3055 .procname = "max_size",
3056 .data = &ip_rt_max_size,
3057 .maxlen = sizeof(int),
3059 .proc_handler = proc_dointvec,
3062 /* Deprecated. Use gc_min_interval_ms */
3064 .procname = "gc_min_interval",
3065 .data = &ip_rt_gc_min_interval,
3066 .maxlen = sizeof(int),
3068 .proc_handler = proc_dointvec_jiffies,
3071 .procname = "gc_min_interval_ms",
3072 .data = &ip_rt_gc_min_interval,
3073 .maxlen = sizeof(int),
3075 .proc_handler = proc_dointvec_ms_jiffies,
3078 .procname = "gc_timeout",
3079 .data = &ip_rt_gc_timeout,
3080 .maxlen = sizeof(int),
3082 .proc_handler = proc_dointvec_jiffies,
3085 .procname = "gc_interval",
3086 .data = &ip_rt_gc_interval,
3087 .maxlen = sizeof(int),
3089 .proc_handler = proc_dointvec_jiffies,
3092 .procname = "redirect_load",
3093 .data = &ip_rt_redirect_load,
3094 .maxlen = sizeof(int),
3096 .proc_handler = proc_dointvec,
3099 .procname = "redirect_number",
3100 .data = &ip_rt_redirect_number,
3101 .maxlen = sizeof(int),
3103 .proc_handler = proc_dointvec,
3106 .procname = "redirect_silence",
3107 .data = &ip_rt_redirect_silence,
3108 .maxlen = sizeof(int),
3110 .proc_handler = proc_dointvec,
3113 .procname = "error_cost",
3114 .data = &ip_rt_error_cost,
3115 .maxlen = sizeof(int),
3117 .proc_handler = proc_dointvec,
3120 .procname = "error_burst",
3121 .data = &ip_rt_error_burst,
3122 .maxlen = sizeof(int),
3124 .proc_handler = proc_dointvec,
3127 .procname = "gc_elasticity",
3128 .data = &ip_rt_gc_elasticity,
3129 .maxlen = sizeof(int),
3131 .proc_handler = proc_dointvec,
3134 .procname = "mtu_expires",
3135 .data = &ip_rt_mtu_expires,
3136 .maxlen = sizeof(int),
3138 .proc_handler = proc_dointvec_jiffies,
3141 .procname = "min_pmtu",
3142 .data = &ip_rt_min_pmtu,
3143 .maxlen = sizeof(int),
3145 .proc_handler = proc_dointvec,
3148 .procname = "min_adv_mss",
3149 .data = &ip_rt_min_advmss,
3150 .maxlen = sizeof(int),
3152 .proc_handler = proc_dointvec,
3157 static struct ctl_table empty[1];
3159 static struct ctl_table ipv4_skeleton[] =
3161 { .procname = "route",
3162 .mode = 0555, .child = ipv4_route_table},
3163 { .procname = "neigh",
3164 .mode = 0555, .child = empty},
3168 static __net_initdata struct ctl_path ipv4_path[] = {
3169 { .procname = "net", },
3170 { .procname = "ipv4", },
3174 static struct ctl_table ipv4_route_flush_table[] = {
3176 .procname = "flush",
3177 .maxlen = sizeof(int),
3179 .proc_handler = ipv4_sysctl_rtcache_flush,
3184 static __net_initdata struct ctl_path ipv4_route_path[] = {
3185 { .procname = "net", },
3186 { .procname = "ipv4", },
3187 { .procname = "route", },
3191 static __net_init int sysctl_route_net_init(struct net *net)
3193 struct ctl_table *tbl;
3195 tbl = ipv4_route_flush_table;
3196 if (!net_eq(net, &init_net)) {
3197 tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
3201 tbl[0].extra1 = net;
3203 net->ipv4.route_hdr =
3204 register_net_sysctl_table(net, ipv4_route_path, tbl);
3205 if (net->ipv4.route_hdr == NULL)
3210 if (tbl != ipv4_route_flush_table)
3216 static __net_exit void sysctl_route_net_exit(struct net *net)
3218 struct ctl_table *tbl;
3220 tbl = net->ipv4.route_hdr->ctl_table_arg;
3221 unregister_net_sysctl_table(net->ipv4.route_hdr);
3222 BUG_ON(tbl == ipv4_route_flush_table);
3226 static __net_initdata struct pernet_operations sysctl_route_ops = {
3227 .init = sysctl_route_net_init,
3228 .exit = sysctl_route_net_exit,
3232 static __net_init int rt_genid_init(struct net *net)
3234 get_random_bytes(&net->ipv4.rt_genid,
3235 sizeof(net->ipv4.rt_genid));
3239 static __net_initdata struct pernet_operations rt_genid_ops = {
3240 .init = rt_genid_init,
3244 #ifdef CONFIG_NET_CLS_ROUTE
3245 struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
3246 #endif /* CONFIG_NET_CLS_ROUTE */
3248 static __initdata unsigned long rhash_entries;
3249 static int __init set_rhash_entries(char *str)
3253 rhash_entries = simple_strtoul(str, &str, 0);
3256 __setup("rhash_entries=", set_rhash_entries);
3258 int __init ip_rt_init(void)
3262 #ifdef CONFIG_NET_CLS_ROUTE
3263 ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
3265 panic("IP: failed to allocate ip_rt_acct\n");
3268 ipv4_dst_ops.kmem_cachep =
3269 kmem_cache_create("ip_dst_cache", sizeof(struct rtable), 0,
3270 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
3272 ipv4_dst_blackhole_ops.kmem_cachep = ipv4_dst_ops.kmem_cachep;
3274 if (dst_entries_init(&ipv4_dst_ops) < 0)
3275 panic("IP: failed to allocate ipv4_dst_ops counter\n");
3277 if (dst_entries_init(&ipv4_dst_blackhole_ops) < 0)
3278 panic("IP: failed to allocate ipv4_dst_blackhole_ops counter\n");
3280 rt_hash_table = (struct rt_hash_bucket *)
3281 alloc_large_system_hash("IP route cache",
3282 sizeof(struct rt_hash_bucket),
3284 (totalram_pages >= 128 * 1024) ?
3289 rhash_entries ? 0 : 512 * 1024);
3290 memset(rt_hash_table, 0, (rt_hash_mask + 1) * sizeof(struct rt_hash_bucket));
3291 rt_hash_lock_init();
3293 ipv4_dst_ops.gc_thresh = (rt_hash_mask + 1);
3294 ip_rt_max_size = (rt_hash_mask + 1) * 16;
3299 /* All the timers, started at system startup tend
3300 to synchronize. Perturb it a bit.
3302 INIT_DELAYED_WORK_DEFERRABLE(&expires_work, rt_worker_func);
3303 expires_ljiffies = jiffies;
3304 schedule_delayed_work(&expires_work,
3305 net_random() % ip_rt_gc_interval + ip_rt_gc_interval);
3307 if (ip_rt_proc_init())
3308 printk(KERN_ERR "Unable to create route proc files\n");
3311 xfrm4_init(ip_rt_max_size);
3313 rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL);
3315 #ifdef CONFIG_SYSCTL
3316 register_pernet_subsys(&sysctl_route_ops);
3318 register_pernet_subsys(&rt_genid_ops);
3322 #ifdef CONFIG_SYSCTL
3324 * We really need to sanitize the damn ipv4 init order, then all
3325 * this nonsense will go away.
3327 void __init ip_static_sysctl_init(void)
3329 register_sysctl_paths(ipv4_path, ipv4_skeleton);