]> git.karo-electronics.de Git - karo-tx-linux.git/blob - net/ipv4/route.c
ipv4: Maintain redirect and PMTU info in struct rtable again.
[karo-tx-linux.git] / net / ipv4 / route.c
1 /*
2  * INET         An implementation of the TCP/IP protocol suite for the LINUX
3  *              operating system.  INET is implemented using the  BSD Socket
4  *              interface as the means of communication with the user level.
5  *
6  *              ROUTE - implementation of the IP router.
7  *
8  * Authors:     Ross Biro
9  *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10  *              Alan Cox, <gw4pts@gw4pts.ampr.org>
11  *              Linus Torvalds, <Linus.Torvalds@helsinki.fi>
12  *              Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
13  *
14  * Fixes:
15  *              Alan Cox        :       Verify area fixes.
16  *              Alan Cox        :       cli() protects routing changes
17  *              Rui Oliveira    :       ICMP routing table updates
18  *              (rco@di.uminho.pt)      Routing table insertion and update
19  *              Linus Torvalds  :       Rewrote bits to be sensible
20  *              Alan Cox        :       Added BSD route gw semantics
21  *              Alan Cox        :       Super /proc >4K
22  *              Alan Cox        :       MTU in route table
23  *              Alan Cox        :       MSS actually. Also added the window
24  *                                      clamper.
25  *              Sam Lantinga    :       Fixed route matching in rt_del()
26  *              Alan Cox        :       Routing cache support.
27  *              Alan Cox        :       Removed compatibility cruft.
28  *              Alan Cox        :       RTF_REJECT support.
29  *              Alan Cox        :       TCP irtt support.
30  *              Jonathan Naylor :       Added Metric support.
31  *      Miquel van Smoorenburg  :       BSD API fixes.
32  *      Miquel van Smoorenburg  :       Metrics.
33  *              Alan Cox        :       Use __u32 properly
34  *              Alan Cox        :       Aligned routing errors more closely with BSD
35  *                                      our system is still very different.
36  *              Alan Cox        :       Faster /proc handling
37  *      Alexey Kuznetsov        :       Massive rework to support tree based routing,
38  *                                      routing caches and better behaviour.
39  *
40  *              Olaf Erb        :       irtt wasn't being copied right.
41  *              Bjorn Ekwall    :       Kerneld route support.
42  *              Alan Cox        :       Multicast fixed (I hope)
43  *              Pavel Krauz     :       Limited broadcast fixed
44  *              Mike McLagan    :       Routing by source
45  *      Alexey Kuznetsov        :       End of old history. Split to fib.c and
46  *                                      route.c and rewritten from scratch.
47  *              Andi Kleen      :       Load-limit warning messages.
48  *      Vitaly E. Lavrov        :       Transparent proxy revived after year coma.
49  *      Vitaly E. Lavrov        :       Race condition in ip_route_input_slow.
50  *      Tobias Ringstrom        :       Uninitialized res.type in ip_route_output_slow.
51  *      Vladimir V. Ivanov      :       IP rule info (flowid) is really useful.
52  *              Marc Boucher    :       routing by fwmark
53  *      Robert Olsson           :       Added rt_cache statistics
54  *      Arnaldo C. Melo         :       Convert proc stuff to seq_file
55  *      Eric Dumazet            :       hashed spinlocks and rt_check_expire() fixes.
56  *      Ilia Sotnikov           :       Ignore TOS on PMTUD and Redirect
57  *      Ilia Sotnikov           :       Removed TOS from hash calculations
58  *
59  *              This program is free software; you can redistribute it and/or
60  *              modify it under the terms of the GNU General Public License
61  *              as published by the Free Software Foundation; either version
62  *              2 of the License, or (at your option) any later version.
63  */
64
65 #define pr_fmt(fmt) "IPv4: " fmt
66
67 #include <linux/module.h>
68 #include <asm/uaccess.h>
69 #include <linux/bitops.h>
70 #include <linux/types.h>
71 #include <linux/kernel.h>
72 #include <linux/mm.h>
73 #include <linux/bootmem.h>
74 #include <linux/string.h>
75 #include <linux/socket.h>
76 #include <linux/sockios.h>
77 #include <linux/errno.h>
78 #include <linux/in.h>
79 #include <linux/inet.h>
80 #include <linux/netdevice.h>
81 #include <linux/proc_fs.h>
82 #include <linux/init.h>
83 #include <linux/workqueue.h>
84 #include <linux/skbuff.h>
85 #include <linux/inetdevice.h>
86 #include <linux/igmp.h>
87 #include <linux/pkt_sched.h>
88 #include <linux/mroute.h>
89 #include <linux/netfilter_ipv4.h>
90 #include <linux/random.h>
91 #include <linux/jhash.h>
92 #include <linux/rcupdate.h>
93 #include <linux/times.h>
94 #include <linux/slab.h>
95 #include <linux/prefetch.h>
96 #include <net/dst.h>
97 #include <net/net_namespace.h>
98 #include <net/protocol.h>
99 #include <net/ip.h>
100 #include <net/route.h>
101 #include <net/inetpeer.h>
102 #include <net/sock.h>
103 #include <net/ip_fib.h>
104 #include <net/arp.h>
105 #include <net/tcp.h>
106 #include <net/icmp.h>
107 #include <net/xfrm.h>
108 #include <net/netevent.h>
109 #include <net/rtnetlink.h>
110 #ifdef CONFIG_SYSCTL
111 #include <linux/sysctl.h>
112 #include <linux/kmemleak.h>
113 #endif
114 #include <net/secure_seq.h>
115
116 #define RT_FL_TOS(oldflp4) \
117         ((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
118
119 #define IP_MAX_MTU      0xFFF0
120
121 #define RT_GC_TIMEOUT (300*HZ)
122
123 static int ip_rt_max_size;
124 static int ip_rt_gc_timeout __read_mostly       = RT_GC_TIMEOUT;
125 static int ip_rt_gc_interval __read_mostly  = 60 * HZ;
126 static int ip_rt_gc_min_interval __read_mostly  = HZ / 2;
127 static int ip_rt_redirect_number __read_mostly  = 9;
128 static int ip_rt_redirect_load __read_mostly    = HZ / 50;
129 static int ip_rt_redirect_silence __read_mostly = ((HZ / 50) << (9 + 1));
130 static int ip_rt_error_cost __read_mostly       = HZ;
131 static int ip_rt_error_burst __read_mostly      = 5 * HZ;
132 static int ip_rt_gc_elasticity __read_mostly    = 8;
133 static int ip_rt_mtu_expires __read_mostly      = 10 * 60 * HZ;
134 static int ip_rt_min_pmtu __read_mostly         = 512 + 20 + 20;
135 static int ip_rt_min_advmss __read_mostly       = 256;
136 static int rt_chain_length_max __read_mostly    = 20;
137
138 static struct delayed_work expires_work;
139 static unsigned long expires_ljiffies;
140
141 /*
142  *      Interface to generic destination cache.
143  */
144
145 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
146 static unsigned int      ipv4_default_advmss(const struct dst_entry *dst);
147 static unsigned int      ipv4_mtu(const struct dst_entry *dst);
148 static void              ipv4_dst_destroy(struct dst_entry *dst);
149 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
150 static void              ipv4_link_failure(struct sk_buff *skb);
151 static void              ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu);
152 static int rt_garbage_collect(struct dst_ops *ops);
153
154 static void ipv4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
155                             int how)
156 {
157 }
158
159 static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old)
160 {
161         struct rtable *rt = (struct rtable *) dst;
162         struct inet_peer *peer;
163         u32 *p = NULL;
164
165         peer = rt_get_peer_create(rt, rt->rt_dst);
166         if (peer) {
167                 u32 *old_p = __DST_METRICS_PTR(old);
168                 unsigned long prev, new;
169
170                 p = peer->metrics;
171                 if (inet_metrics_new(peer))
172                         memcpy(p, old_p, sizeof(u32) * RTAX_MAX);
173
174                 new = (unsigned long) p;
175                 prev = cmpxchg(&dst->_metrics, old, new);
176
177                 if (prev != old) {
178                         p = __DST_METRICS_PTR(prev);
179                         if (prev & DST_METRICS_READ_ONLY)
180                                 p = NULL;
181                 } else {
182                         if (rt->fi) {
183                                 fib_info_put(rt->fi);
184                                 rt->fi = NULL;
185                         }
186                 }
187         }
188         return p;
189 }
190
191 static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
192                                            struct sk_buff *skb,
193                                            const void *daddr);
194
195 static struct dst_ops ipv4_dst_ops = {
196         .family =               AF_INET,
197         .protocol =             cpu_to_be16(ETH_P_IP),
198         .gc =                   rt_garbage_collect,
199         .check =                ipv4_dst_check,
200         .default_advmss =       ipv4_default_advmss,
201         .mtu =                  ipv4_mtu,
202         .cow_metrics =          ipv4_cow_metrics,
203         .destroy =              ipv4_dst_destroy,
204         .ifdown =               ipv4_dst_ifdown,
205         .negative_advice =      ipv4_negative_advice,
206         .link_failure =         ipv4_link_failure,
207         .update_pmtu =          ip_rt_update_pmtu,
208         .local_out =            __ip_local_out,
209         .neigh_lookup =         ipv4_neigh_lookup,
210 };
211
212 #define ECN_OR_COST(class)      TC_PRIO_##class
213
214 const __u8 ip_tos2prio[16] = {
215         TC_PRIO_BESTEFFORT,
216         ECN_OR_COST(BESTEFFORT),
217         TC_PRIO_BESTEFFORT,
218         ECN_OR_COST(BESTEFFORT),
219         TC_PRIO_BULK,
220         ECN_OR_COST(BULK),
221         TC_PRIO_BULK,
222         ECN_OR_COST(BULK),
223         TC_PRIO_INTERACTIVE,
224         ECN_OR_COST(INTERACTIVE),
225         TC_PRIO_INTERACTIVE,
226         ECN_OR_COST(INTERACTIVE),
227         TC_PRIO_INTERACTIVE_BULK,
228         ECN_OR_COST(INTERACTIVE_BULK),
229         TC_PRIO_INTERACTIVE_BULK,
230         ECN_OR_COST(INTERACTIVE_BULK)
231 };
232 EXPORT_SYMBOL(ip_tos2prio);
233
234 /*
235  * Route cache.
236  */
237
238 /* The locking scheme is rather straight forward:
239  *
240  * 1) Read-Copy Update protects the buckets of the central route hash.
241  * 2) Only writers remove entries, and they hold the lock
242  *    as they look at rtable reference counts.
243  * 3) Only readers acquire references to rtable entries,
244  *    they do so with atomic increments and with the
245  *    lock held.
246  */
247
248 struct rt_hash_bucket {
249         struct rtable __rcu     *chain;
250 };
251
252 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
253         defined(CONFIG_PROVE_LOCKING)
254 /*
255  * Instead of using one spinlock for each rt_hash_bucket, we use a table of spinlocks
256  * The size of this table is a power of two and depends on the number of CPUS.
257  * (on lockdep we have a quite big spinlock_t, so keep the size down there)
258  */
259 #ifdef CONFIG_LOCKDEP
260 # define RT_HASH_LOCK_SZ        256
261 #else
262 # if NR_CPUS >= 32
263 #  define RT_HASH_LOCK_SZ       4096
264 # elif NR_CPUS >= 16
265 #  define RT_HASH_LOCK_SZ       2048
266 # elif NR_CPUS >= 8
267 #  define RT_HASH_LOCK_SZ       1024
268 # elif NR_CPUS >= 4
269 #  define RT_HASH_LOCK_SZ       512
270 # else
271 #  define RT_HASH_LOCK_SZ       256
272 # endif
273 #endif
274
275 static spinlock_t       *rt_hash_locks;
276 # define rt_hash_lock_addr(slot) &rt_hash_locks[(slot) & (RT_HASH_LOCK_SZ - 1)]
277
278 static __init void rt_hash_lock_init(void)
279 {
280         int i;
281
282         rt_hash_locks = kmalloc(sizeof(spinlock_t) * RT_HASH_LOCK_SZ,
283                         GFP_KERNEL);
284         if (!rt_hash_locks)
285                 panic("IP: failed to allocate rt_hash_locks\n");
286
287         for (i = 0; i < RT_HASH_LOCK_SZ; i++)
288                 spin_lock_init(&rt_hash_locks[i]);
289 }
290 #else
291 # define rt_hash_lock_addr(slot) NULL
292
293 static inline void rt_hash_lock_init(void)
294 {
295 }
296 #endif
297
298 static struct rt_hash_bucket    *rt_hash_table __read_mostly;
299 static unsigned int             rt_hash_mask __read_mostly;
300 static unsigned int             rt_hash_log  __read_mostly;
301
302 static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
303 #define RT_CACHE_STAT_INC(field) __this_cpu_inc(rt_cache_stat.field)
304
305 static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
306                                    int genid)
307 {
308         return jhash_3words((__force u32)daddr, (__force u32)saddr,
309                             idx, genid)
310                 & rt_hash_mask;
311 }
312
313 static inline int rt_genid(struct net *net)
314 {
315         return atomic_read(&net->ipv4.rt_genid);
316 }
317
318 #ifdef CONFIG_PROC_FS
319 struct rt_cache_iter_state {
320         struct seq_net_private p;
321         int bucket;
322         int genid;
323 };
324
325 static struct rtable *rt_cache_get_first(struct seq_file *seq)
326 {
327         struct rt_cache_iter_state *st = seq->private;
328         struct rtable *r = NULL;
329
330         for (st->bucket = rt_hash_mask; st->bucket >= 0; --st->bucket) {
331                 if (!rcu_access_pointer(rt_hash_table[st->bucket].chain))
332                         continue;
333                 rcu_read_lock_bh();
334                 r = rcu_dereference_bh(rt_hash_table[st->bucket].chain);
335                 while (r) {
336                         if (dev_net(r->dst.dev) == seq_file_net(seq) &&
337                             r->rt_genid == st->genid)
338                                 return r;
339                         r = rcu_dereference_bh(r->dst.rt_next);
340                 }
341                 rcu_read_unlock_bh();
342         }
343         return r;
344 }
345
346 static struct rtable *__rt_cache_get_next(struct seq_file *seq,
347                                           struct rtable *r)
348 {
349         struct rt_cache_iter_state *st = seq->private;
350
351         r = rcu_dereference_bh(r->dst.rt_next);
352         while (!r) {
353                 rcu_read_unlock_bh();
354                 do {
355                         if (--st->bucket < 0)
356                                 return NULL;
357                 } while (!rcu_access_pointer(rt_hash_table[st->bucket].chain));
358                 rcu_read_lock_bh();
359                 r = rcu_dereference_bh(rt_hash_table[st->bucket].chain);
360         }
361         return r;
362 }
363
364 static struct rtable *rt_cache_get_next(struct seq_file *seq,
365                                         struct rtable *r)
366 {
367         struct rt_cache_iter_state *st = seq->private;
368         while ((r = __rt_cache_get_next(seq, r)) != NULL) {
369                 if (dev_net(r->dst.dev) != seq_file_net(seq))
370                         continue;
371                 if (r->rt_genid == st->genid)
372                         break;
373         }
374         return r;
375 }
376
377 static struct rtable *rt_cache_get_idx(struct seq_file *seq, loff_t pos)
378 {
379         struct rtable *r = rt_cache_get_first(seq);
380
381         if (r)
382                 while (pos && (r = rt_cache_get_next(seq, r)))
383                         --pos;
384         return pos ? NULL : r;
385 }
386
387 static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
388 {
389         struct rt_cache_iter_state *st = seq->private;
390         if (*pos)
391                 return rt_cache_get_idx(seq, *pos - 1);
392         st->genid = rt_genid(seq_file_net(seq));
393         return SEQ_START_TOKEN;
394 }
395
396 static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
397 {
398         struct rtable *r;
399
400         if (v == SEQ_START_TOKEN)
401                 r = rt_cache_get_first(seq);
402         else
403                 r = rt_cache_get_next(seq, v);
404         ++*pos;
405         return r;
406 }
407
408 static void rt_cache_seq_stop(struct seq_file *seq, void *v)
409 {
410         if (v && v != SEQ_START_TOKEN)
411                 rcu_read_unlock_bh();
412 }
413
414 static int rt_cache_seq_show(struct seq_file *seq, void *v)
415 {
416         if (v == SEQ_START_TOKEN)
417                 seq_printf(seq, "%-127s\n",
418                            "Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t"
419                            "Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t"
420                            "HHUptod\tSpecDst");
421         else {
422                 struct rtable *r = v;
423                 int len;
424
425                 seq_printf(seq, "%s\t%08X\t%08X\t%8X\t%d\t%u\t%d\t"
426                            "%08X\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n",
427                            r->dst.dev ? r->dst.dev->name : "*",
428                            (__force u32)r->rt_dst,
429                            (__force u32)r->rt_gateway,
430                            r->rt_flags, atomic_read(&r->dst.__refcnt),
431                            r->dst.__use, 0, (__force u32)r->rt_src,
432                            dst_metric_advmss(&r->dst) + 40,
433                            dst_metric(&r->dst, RTAX_WINDOW), 0,
434                            r->rt_key_tos,
435                            -1, 0, 0, &len);
436
437                 seq_printf(seq, "%*s\n", 127 - len, "");
438         }
439         return 0;
440 }
441
442 static const struct seq_operations rt_cache_seq_ops = {
443         .start  = rt_cache_seq_start,
444         .next   = rt_cache_seq_next,
445         .stop   = rt_cache_seq_stop,
446         .show   = rt_cache_seq_show,
447 };
448
449 static int rt_cache_seq_open(struct inode *inode, struct file *file)
450 {
451         return seq_open_net(inode, file, &rt_cache_seq_ops,
452                         sizeof(struct rt_cache_iter_state));
453 }
454
455 static const struct file_operations rt_cache_seq_fops = {
456         .owner   = THIS_MODULE,
457         .open    = rt_cache_seq_open,
458         .read    = seq_read,
459         .llseek  = seq_lseek,
460         .release = seq_release_net,
461 };
462
463
464 static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos)
465 {
466         int cpu;
467
468         if (*pos == 0)
469                 return SEQ_START_TOKEN;
470
471         for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
472                 if (!cpu_possible(cpu))
473                         continue;
474                 *pos = cpu+1;
475                 return &per_cpu(rt_cache_stat, cpu);
476         }
477         return NULL;
478 }
479
480 static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
481 {
482         int cpu;
483
484         for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
485                 if (!cpu_possible(cpu))
486                         continue;
487                 *pos = cpu+1;
488                 return &per_cpu(rt_cache_stat, cpu);
489         }
490         return NULL;
491
492 }
493
494 static void rt_cpu_seq_stop(struct seq_file *seq, void *v)
495 {
496
497 }
498
499 static int rt_cpu_seq_show(struct seq_file *seq, void *v)
500 {
501         struct rt_cache_stat *st = v;
502
503         if (v == SEQ_START_TOKEN) {
504                 seq_printf(seq, "entries  in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src  out_hit out_slow_tot out_slow_mc  gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
505                 return 0;
506         }
507
508         seq_printf(seq,"%08x  %08x %08x %08x %08x %08x %08x %08x "
509                    " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n",
510                    dst_entries_get_slow(&ipv4_dst_ops),
511                    st->in_hit,
512                    st->in_slow_tot,
513                    st->in_slow_mc,
514                    st->in_no_route,
515                    st->in_brd,
516                    st->in_martian_dst,
517                    st->in_martian_src,
518
519                    st->out_hit,
520                    st->out_slow_tot,
521                    st->out_slow_mc,
522
523                    st->gc_total,
524                    st->gc_ignored,
525                    st->gc_goal_miss,
526                    st->gc_dst_overflow,
527                    st->in_hlist_search,
528                    st->out_hlist_search
529                 );
530         return 0;
531 }
532
533 static const struct seq_operations rt_cpu_seq_ops = {
534         .start  = rt_cpu_seq_start,
535         .next   = rt_cpu_seq_next,
536         .stop   = rt_cpu_seq_stop,
537         .show   = rt_cpu_seq_show,
538 };
539
540
541 static int rt_cpu_seq_open(struct inode *inode, struct file *file)
542 {
543         return seq_open(file, &rt_cpu_seq_ops);
544 }
545
546 static const struct file_operations rt_cpu_seq_fops = {
547         .owner   = THIS_MODULE,
548         .open    = rt_cpu_seq_open,
549         .read    = seq_read,
550         .llseek  = seq_lseek,
551         .release = seq_release,
552 };
553
554 #ifdef CONFIG_IP_ROUTE_CLASSID
555 static int rt_acct_proc_show(struct seq_file *m, void *v)
556 {
557         struct ip_rt_acct *dst, *src;
558         unsigned int i, j;
559
560         dst = kcalloc(256, sizeof(struct ip_rt_acct), GFP_KERNEL);
561         if (!dst)
562                 return -ENOMEM;
563
564         for_each_possible_cpu(i) {
565                 src = (struct ip_rt_acct *)per_cpu_ptr(ip_rt_acct, i);
566                 for (j = 0; j < 256; j++) {
567                         dst[j].o_bytes   += src[j].o_bytes;
568                         dst[j].o_packets += src[j].o_packets;
569                         dst[j].i_bytes   += src[j].i_bytes;
570                         dst[j].i_packets += src[j].i_packets;
571                 }
572         }
573
574         seq_write(m, dst, 256 * sizeof(struct ip_rt_acct));
575         kfree(dst);
576         return 0;
577 }
578
579 static int rt_acct_proc_open(struct inode *inode, struct file *file)
580 {
581         return single_open(file, rt_acct_proc_show, NULL);
582 }
583
584 static const struct file_operations rt_acct_proc_fops = {
585         .owner          = THIS_MODULE,
586         .open           = rt_acct_proc_open,
587         .read           = seq_read,
588         .llseek         = seq_lseek,
589         .release        = single_release,
590 };
591 #endif
592
593 static int __net_init ip_rt_do_proc_init(struct net *net)
594 {
595         struct proc_dir_entry *pde;
596
597         pde = proc_net_fops_create(net, "rt_cache", S_IRUGO,
598                         &rt_cache_seq_fops);
599         if (!pde)
600                 goto err1;
601
602         pde = proc_create("rt_cache", S_IRUGO,
603                           net->proc_net_stat, &rt_cpu_seq_fops);
604         if (!pde)
605                 goto err2;
606
607 #ifdef CONFIG_IP_ROUTE_CLASSID
608         pde = proc_create("rt_acct", 0, net->proc_net, &rt_acct_proc_fops);
609         if (!pde)
610                 goto err3;
611 #endif
612         return 0;
613
614 #ifdef CONFIG_IP_ROUTE_CLASSID
615 err3:
616         remove_proc_entry("rt_cache", net->proc_net_stat);
617 #endif
618 err2:
619         remove_proc_entry("rt_cache", net->proc_net);
620 err1:
621         return -ENOMEM;
622 }
623
624 static void __net_exit ip_rt_do_proc_exit(struct net *net)
625 {
626         remove_proc_entry("rt_cache", net->proc_net_stat);
627         remove_proc_entry("rt_cache", net->proc_net);
628 #ifdef CONFIG_IP_ROUTE_CLASSID
629         remove_proc_entry("rt_acct", net->proc_net);
630 #endif
631 }
632
633 static struct pernet_operations ip_rt_proc_ops __net_initdata =  {
634         .init = ip_rt_do_proc_init,
635         .exit = ip_rt_do_proc_exit,
636 };
637
638 static int __init ip_rt_proc_init(void)
639 {
640         return register_pernet_subsys(&ip_rt_proc_ops);
641 }
642
643 #else
644 static inline int ip_rt_proc_init(void)
645 {
646         return 0;
647 }
648 #endif /* CONFIG_PROC_FS */
649
650 static inline void rt_free(struct rtable *rt)
651 {
652         call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
653 }
654
655 static inline void rt_drop(struct rtable *rt)
656 {
657         ip_rt_put(rt);
658         call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
659 }
660
661 static inline int rt_fast_clean(struct rtable *rth)
662 {
663         /* Kill broadcast/multicast entries very aggresively, if they
664            collide in hash table with more useful entries */
665         return (rth->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) &&
666                 rt_is_input_route(rth) && rth->dst.rt_next;
667 }
668
669 static inline int rt_valuable(struct rtable *rth)
670 {
671         return (rth->rt_flags & (RTCF_REDIRECTED | RTCF_NOTIFY)) ||
672                 rth->dst.expires;
673 }
674
675 static int rt_may_expire(struct rtable *rth, unsigned long tmo1, unsigned long tmo2)
676 {
677         unsigned long age;
678         int ret = 0;
679
680         if (atomic_read(&rth->dst.__refcnt))
681                 goto out;
682
683         age = jiffies - rth->dst.lastuse;
684         if ((age <= tmo1 && !rt_fast_clean(rth)) ||
685             (age <= tmo2 && rt_valuable(rth)))
686                 goto out;
687         ret = 1;
688 out:    return ret;
689 }
690
691 /* Bits of score are:
692  * 31: very valuable
693  * 30: not quite useless
694  * 29..0: usage counter
695  */
696 static inline u32 rt_score(struct rtable *rt)
697 {
698         u32 score = jiffies - rt->dst.lastuse;
699
700         score = ~score & ~(3<<30);
701
702         if (rt_valuable(rt))
703                 score |= (1<<31);
704
705         if (rt_is_output_route(rt) ||
706             !(rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST|RTCF_LOCAL)))
707                 score |= (1<<30);
708
709         return score;
710 }
711
712 static inline bool rt_caching(const struct net *net)
713 {
714         return net->ipv4.current_rt_cache_rebuild_count <=
715                 net->ipv4.sysctl_rt_cache_rebuild_count;
716 }
717
718 static inline bool compare_hash_inputs(const struct rtable *rt1,
719                                        const struct rtable *rt2)
720 {
721         return ((((__force u32)rt1->rt_key_dst ^ (__force u32)rt2->rt_key_dst) |
722                 ((__force u32)rt1->rt_key_src ^ (__force u32)rt2->rt_key_src) |
723                 (rt1->rt_route_iif ^ rt2->rt_route_iif)) == 0);
724 }
725
726 static inline int compare_keys(struct rtable *rt1, struct rtable *rt2)
727 {
728         return (((__force u32)rt1->rt_key_dst ^ (__force u32)rt2->rt_key_dst) |
729                 ((__force u32)rt1->rt_key_src ^ (__force u32)rt2->rt_key_src) |
730                 (rt1->rt_mark ^ rt2->rt_mark) |
731                 (rt1->rt_key_tos ^ rt2->rt_key_tos) |
732                 (rt1->rt_route_iif ^ rt2->rt_route_iif) |
733                 (rt1->rt_oif ^ rt2->rt_oif)) == 0;
734 }
735
736 static inline int compare_netns(struct rtable *rt1, struct rtable *rt2)
737 {
738         return net_eq(dev_net(rt1->dst.dev), dev_net(rt2->dst.dev));
739 }
740
741 static inline int rt_is_expired(struct rtable *rth)
742 {
743         return rth->rt_genid != rt_genid(dev_net(rth->dst.dev));
744 }
745
746 /*
747  * Perform a full scan of hash table and free all entries.
748  * Can be called by a softirq or a process.
749  * In the later case, we want to be reschedule if necessary
750  */
751 static void rt_do_flush(struct net *net, int process_context)
752 {
753         unsigned int i;
754         struct rtable *rth, *next;
755
756         for (i = 0; i <= rt_hash_mask; i++) {
757                 struct rtable __rcu **pprev;
758                 struct rtable *list;
759
760                 if (process_context && need_resched())
761                         cond_resched();
762                 rth = rcu_access_pointer(rt_hash_table[i].chain);
763                 if (!rth)
764                         continue;
765
766                 spin_lock_bh(rt_hash_lock_addr(i));
767
768                 list = NULL;
769                 pprev = &rt_hash_table[i].chain;
770                 rth = rcu_dereference_protected(*pprev,
771                         lockdep_is_held(rt_hash_lock_addr(i)));
772
773                 while (rth) {
774                         next = rcu_dereference_protected(rth->dst.rt_next,
775                                 lockdep_is_held(rt_hash_lock_addr(i)));
776
777                         if (!net ||
778                             net_eq(dev_net(rth->dst.dev), net)) {
779                                 rcu_assign_pointer(*pprev, next);
780                                 rcu_assign_pointer(rth->dst.rt_next, list);
781                                 list = rth;
782                         } else {
783                                 pprev = &rth->dst.rt_next;
784                         }
785                         rth = next;
786                 }
787
788                 spin_unlock_bh(rt_hash_lock_addr(i));
789
790                 for (; list; list = next) {
791                         next = rcu_dereference_protected(list->dst.rt_next, 1);
792                         rt_free(list);
793                 }
794         }
795 }
796
797 /*
798  * While freeing expired entries, we compute average chain length
799  * and standard deviation, using fixed-point arithmetic.
800  * This to have an estimation of rt_chain_length_max
801  *  rt_chain_length_max = max(elasticity, AVG + 4*SD)
802  * We use 3 bits for frational part, and 29 (or 61) for magnitude.
803  */
804
805 #define FRACT_BITS 3
806 #define ONE (1UL << FRACT_BITS)
807
808 /*
809  * Given a hash chain and an item in this hash chain,
810  * find if a previous entry has the same hash_inputs
811  * (but differs on tos, mark or oif)
812  * Returns 0 if an alias is found.
813  * Returns ONE if rth has no alias before itself.
814  */
815 static int has_noalias(const struct rtable *head, const struct rtable *rth)
816 {
817         const struct rtable *aux = head;
818
819         while (aux != rth) {
820                 if (compare_hash_inputs(aux, rth))
821                         return 0;
822                 aux = rcu_dereference_protected(aux->dst.rt_next, 1);
823         }
824         return ONE;
825 }
826
827 static void rt_check_expire(void)
828 {
829         static unsigned int rover;
830         unsigned int i = rover, goal;
831         struct rtable *rth;
832         struct rtable __rcu **rthp;
833         unsigned long samples = 0;
834         unsigned long sum = 0, sum2 = 0;
835         unsigned long delta;
836         u64 mult;
837
838         delta = jiffies - expires_ljiffies;
839         expires_ljiffies = jiffies;
840         mult = ((u64)delta) << rt_hash_log;
841         if (ip_rt_gc_timeout > 1)
842                 do_div(mult, ip_rt_gc_timeout);
843         goal = (unsigned int)mult;
844         if (goal > rt_hash_mask)
845                 goal = rt_hash_mask + 1;
846         for (; goal > 0; goal--) {
847                 unsigned long tmo = ip_rt_gc_timeout;
848                 unsigned long length;
849
850                 i = (i + 1) & rt_hash_mask;
851                 rthp = &rt_hash_table[i].chain;
852
853                 if (need_resched())
854                         cond_resched();
855
856                 samples++;
857
858                 if (rcu_dereference_raw(*rthp) == NULL)
859                         continue;
860                 length = 0;
861                 spin_lock_bh(rt_hash_lock_addr(i));
862                 while ((rth = rcu_dereference_protected(*rthp,
863                                         lockdep_is_held(rt_hash_lock_addr(i)))) != NULL) {
864                         prefetch(rth->dst.rt_next);
865                         if (rt_is_expired(rth) ||
866                             rt_may_expire(rth, tmo, ip_rt_gc_timeout)) {
867                                 *rthp = rth->dst.rt_next;
868                                 rt_free(rth);
869                                 continue;
870                         }
871
872                         /* We only count entries on a chain with equal
873                          * hash inputs once so that entries for
874                          * different QOS levels, and other non-hash
875                          * input attributes don't unfairly skew the
876                          * length computation
877                          */
878                         tmo >>= 1;
879                         rthp = &rth->dst.rt_next;
880                         length += has_noalias(rt_hash_table[i].chain, rth);
881                 }
882                 spin_unlock_bh(rt_hash_lock_addr(i));
883                 sum += length;
884                 sum2 += length*length;
885         }
886         if (samples) {
887                 unsigned long avg = sum / samples;
888                 unsigned long sd = int_sqrt(sum2 / samples - avg*avg);
889                 rt_chain_length_max = max_t(unsigned long,
890                                         ip_rt_gc_elasticity,
891                                         (avg + 4*sd) >> FRACT_BITS);
892         }
893         rover = i;
894 }
895
896 /*
897  * rt_worker_func() is run in process context.
898  * we call rt_check_expire() to scan part of the hash table
899  */
900 static void rt_worker_func(struct work_struct *work)
901 {
902         rt_check_expire();
903         schedule_delayed_work(&expires_work, ip_rt_gc_interval);
904 }
905
906 /*
907  * Perturbation of rt_genid by a small quantity [1..256]
908  * Using 8 bits of shuffling ensure we can call rt_cache_invalidate()
909  * many times (2^24) without giving recent rt_genid.
910  * Jenkins hash is strong enough that litle changes of rt_genid are OK.
911  */
912 static void rt_cache_invalidate(struct net *net)
913 {
914         unsigned char shuffle;
915
916         get_random_bytes(&shuffle, sizeof(shuffle));
917         atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
918         inetpeer_invalidate_family(AF_INET);
919 }
920
921 /*
922  * delay < 0  : invalidate cache (fast : entries will be deleted later)
923  * delay >= 0 : invalidate & flush cache (can be long)
924  */
925 void rt_cache_flush(struct net *net, int delay)
926 {
927         rt_cache_invalidate(net);
928         if (delay >= 0)
929                 rt_do_flush(net, !in_softirq());
930 }
931
932 /* Flush previous cache invalidated entries from the cache */
933 void rt_cache_flush_batch(struct net *net)
934 {
935         rt_do_flush(net, !in_softirq());
936 }
937
938 static void rt_emergency_hash_rebuild(struct net *net)
939 {
940         net_warn_ratelimited("Route hash chain too long!\n");
941         rt_cache_invalidate(net);
942 }
943
944 /*
945    Short description of GC goals.
946
947    We want to build algorithm, which will keep routing cache
948    at some equilibrium point, when number of aged off entries
949    is kept approximately equal to newly generated ones.
950
951    Current expiration strength is variable "expire".
952    We try to adjust it dynamically, so that if networking
953    is idle expires is large enough to keep enough of warm entries,
954    and when load increases it reduces to limit cache size.
955  */
956
957 static int rt_garbage_collect(struct dst_ops *ops)
958 {
959         static unsigned long expire = RT_GC_TIMEOUT;
960         static unsigned long last_gc;
961         static int rover;
962         static int equilibrium;
963         struct rtable *rth;
964         struct rtable __rcu **rthp;
965         unsigned long now = jiffies;
966         int goal;
967         int entries = dst_entries_get_fast(&ipv4_dst_ops);
968
969         /*
970          * Garbage collection is pretty expensive,
971          * do not make it too frequently.
972          */
973
974         RT_CACHE_STAT_INC(gc_total);
975
976         if (now - last_gc < ip_rt_gc_min_interval &&
977             entries < ip_rt_max_size) {
978                 RT_CACHE_STAT_INC(gc_ignored);
979                 goto out;
980         }
981
982         entries = dst_entries_get_slow(&ipv4_dst_ops);
983         /* Calculate number of entries, which we want to expire now. */
984         goal = entries - (ip_rt_gc_elasticity << rt_hash_log);
985         if (goal <= 0) {
986                 if (equilibrium < ipv4_dst_ops.gc_thresh)
987                         equilibrium = ipv4_dst_ops.gc_thresh;
988                 goal = entries - equilibrium;
989                 if (goal > 0) {
990                         equilibrium += min_t(unsigned int, goal >> 1, rt_hash_mask + 1);
991                         goal = entries - equilibrium;
992                 }
993         } else {
994                 /* We are in dangerous area. Try to reduce cache really
995                  * aggressively.
996                  */
997                 goal = max_t(unsigned int, goal >> 1, rt_hash_mask + 1);
998                 equilibrium = entries - goal;
999         }
1000
1001         if (now - last_gc >= ip_rt_gc_min_interval)
1002                 last_gc = now;
1003
1004         if (goal <= 0) {
1005                 equilibrium += goal;
1006                 goto work_done;
1007         }
1008
1009         do {
1010                 int i, k;
1011
1012                 for (i = rt_hash_mask, k = rover; i >= 0; i--) {
1013                         unsigned long tmo = expire;
1014
1015                         k = (k + 1) & rt_hash_mask;
1016                         rthp = &rt_hash_table[k].chain;
1017                         spin_lock_bh(rt_hash_lock_addr(k));
1018                         while ((rth = rcu_dereference_protected(*rthp,
1019                                         lockdep_is_held(rt_hash_lock_addr(k)))) != NULL) {
1020                                 if (!rt_is_expired(rth) &&
1021                                         !rt_may_expire(rth, tmo, expire)) {
1022                                         tmo >>= 1;
1023                                         rthp = &rth->dst.rt_next;
1024                                         continue;
1025                                 }
1026                                 *rthp = rth->dst.rt_next;
1027                                 rt_free(rth);
1028                                 goal--;
1029                         }
1030                         spin_unlock_bh(rt_hash_lock_addr(k));
1031                         if (goal <= 0)
1032                                 break;
1033                 }
1034                 rover = k;
1035
1036                 if (goal <= 0)
1037                         goto work_done;
1038
1039                 /* Goal is not achieved. We stop process if:
1040
1041                    - if expire reduced to zero. Otherwise, expire is halfed.
1042                    - if table is not full.
1043                    - if we are called from interrupt.
1044                    - jiffies check is just fallback/debug loop breaker.
1045                      We will not spin here for long time in any case.
1046                  */
1047
1048                 RT_CACHE_STAT_INC(gc_goal_miss);
1049
1050                 if (expire == 0)
1051                         break;
1052
1053                 expire >>= 1;
1054
1055                 if (dst_entries_get_fast(&ipv4_dst_ops) < ip_rt_max_size)
1056                         goto out;
1057         } while (!in_softirq() && time_before_eq(jiffies, now));
1058
1059         if (dst_entries_get_fast(&ipv4_dst_ops) < ip_rt_max_size)
1060                 goto out;
1061         if (dst_entries_get_slow(&ipv4_dst_ops) < ip_rt_max_size)
1062                 goto out;
1063         net_warn_ratelimited("dst cache overflow\n");
1064         RT_CACHE_STAT_INC(gc_dst_overflow);
1065         return 1;
1066
1067 work_done:
1068         expire += ip_rt_gc_min_interval;
1069         if (expire > ip_rt_gc_timeout ||
1070             dst_entries_get_fast(&ipv4_dst_ops) < ipv4_dst_ops.gc_thresh ||
1071             dst_entries_get_slow(&ipv4_dst_ops) < ipv4_dst_ops.gc_thresh)
1072                 expire = ip_rt_gc_timeout;
1073 out:    return 0;
1074 }
1075
1076 /*
1077  * Returns number of entries in a hash chain that have different hash_inputs
1078  */
1079 static int slow_chain_length(const struct rtable *head)
1080 {
1081         int length = 0;
1082         const struct rtable *rth = head;
1083
1084         while (rth) {
1085                 length += has_noalias(head, rth);
1086                 rth = rcu_dereference_protected(rth->dst.rt_next, 1);
1087         }
1088         return length >> FRACT_BITS;
1089 }
1090
1091 static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
1092                                            struct sk_buff *skb,
1093                                            const void *daddr)
1094 {
1095         struct net_device *dev = dst->dev;
1096         const __be32 *pkey = daddr;
1097         const struct rtable *rt;
1098         struct neighbour *n;
1099
1100         rt = (const struct rtable *) dst;
1101         if (rt->rt_gateway)
1102                 pkey = (const __be32 *) &rt->rt_gateway;
1103         else if (skb)
1104                 pkey = &ip_hdr(skb)->daddr;
1105
1106         n = __ipv4_neigh_lookup(dev, *(__force u32 *)pkey);
1107         if (n)
1108                 return n;
1109         return neigh_create(&arp_tbl, pkey, dev);
1110 }
1111
1112 static struct rtable *rt_intern_hash(unsigned int hash, struct rtable *rt,
1113                                      struct sk_buff *skb, int ifindex)
1114 {
1115         struct rtable   *rth, *cand;
1116         struct rtable __rcu **rthp, **candp;
1117         unsigned long   now;
1118         u32             min_score;
1119         int             chain_length;
1120
1121 restart:
1122         chain_length = 0;
1123         min_score = ~(u32)0;
1124         cand = NULL;
1125         candp = NULL;
1126         now = jiffies;
1127
1128         if (!rt_caching(dev_net(rt->dst.dev)) || (rt->dst.flags & DST_NOCACHE)) {
1129                 /*
1130                  * If we're not caching, just tell the caller we
1131                  * were successful and don't touch the route.  The
1132                  * caller hold the sole reference to the cache entry, and
1133                  * it will be released when the caller is done with it.
1134                  * If we drop it here, the callers have no way to resolve routes
1135                  * when we're not caching.  Instead, just point *rp at rt, so
1136                  * the caller gets a single use out of the route
1137                  * Note that we do rt_free on this new route entry, so that
1138                  * once its refcount hits zero, we are still able to reap it
1139                  * (Thanks Alexey)
1140                  * Note: To avoid expensive rcu stuff for this uncached dst,
1141                  * we set DST_NOCACHE so that dst_release() can free dst without
1142                  * waiting a grace period.
1143                  */
1144
1145                 rt->dst.flags |= DST_NOCACHE;
1146                 goto skip_hashing;
1147         }
1148
1149         rthp = &rt_hash_table[hash].chain;
1150
1151         spin_lock_bh(rt_hash_lock_addr(hash));
1152         while ((rth = rcu_dereference_protected(*rthp,
1153                         lockdep_is_held(rt_hash_lock_addr(hash)))) != NULL) {
1154                 if (rt_is_expired(rth)) {
1155                         *rthp = rth->dst.rt_next;
1156                         rt_free(rth);
1157                         continue;
1158                 }
1159                 if (compare_keys(rth, rt) && compare_netns(rth, rt)) {
1160                         /* Put it first */
1161                         *rthp = rth->dst.rt_next;
1162                         /*
1163                          * Since lookup is lockfree, the deletion
1164                          * must be visible to another weakly ordered CPU before
1165                          * the insertion at the start of the hash chain.
1166                          */
1167                         rcu_assign_pointer(rth->dst.rt_next,
1168                                            rt_hash_table[hash].chain);
1169                         /*
1170                          * Since lookup is lockfree, the update writes
1171                          * must be ordered for consistency on SMP.
1172                          */
1173                         rcu_assign_pointer(rt_hash_table[hash].chain, rth);
1174
1175                         dst_use(&rth->dst, now);
1176                         spin_unlock_bh(rt_hash_lock_addr(hash));
1177
1178                         rt_drop(rt);
1179                         if (skb)
1180                                 skb_dst_set(skb, &rth->dst);
1181                         return rth;
1182                 }
1183
1184                 if (!atomic_read(&rth->dst.__refcnt)) {
1185                         u32 score = rt_score(rth);
1186
1187                         if (score <= min_score) {
1188                                 cand = rth;
1189                                 candp = rthp;
1190                                 min_score = score;
1191                         }
1192                 }
1193
1194                 chain_length++;
1195
1196                 rthp = &rth->dst.rt_next;
1197         }
1198
1199         if (cand) {
1200                 /* ip_rt_gc_elasticity used to be average length of chain
1201                  * length, when exceeded gc becomes really aggressive.
1202                  *
1203                  * The second limit is less certain. At the moment it allows
1204                  * only 2 entries per bucket. We will see.
1205                  */
1206                 if (chain_length > ip_rt_gc_elasticity) {
1207                         *candp = cand->dst.rt_next;
1208                         rt_free(cand);
1209                 }
1210         } else {
1211                 if (chain_length > rt_chain_length_max &&
1212                     slow_chain_length(rt_hash_table[hash].chain) > rt_chain_length_max) {
1213                         struct net *net = dev_net(rt->dst.dev);
1214                         int num = ++net->ipv4.current_rt_cache_rebuild_count;
1215                         if (!rt_caching(net)) {
1216                                 pr_warn("%s: %d rebuilds is over limit, route caching disabled\n",
1217                                         rt->dst.dev->name, num);
1218                         }
1219                         rt_emergency_hash_rebuild(net);
1220                         spin_unlock_bh(rt_hash_lock_addr(hash));
1221
1222                         hash = rt_hash(rt->rt_key_dst, rt->rt_key_src,
1223                                         ifindex, rt_genid(net));
1224                         goto restart;
1225                 }
1226         }
1227
1228         rt->dst.rt_next = rt_hash_table[hash].chain;
1229
1230         /*
1231          * Since lookup is lockfree, we must make sure
1232          * previous writes to rt are committed to memory
1233          * before making rt visible to other CPUS.
1234          */
1235         rcu_assign_pointer(rt_hash_table[hash].chain, rt);
1236
1237         spin_unlock_bh(rt_hash_lock_addr(hash));
1238
1239 skip_hashing:
1240         if (skb)
1241                 skb_dst_set(skb, &rt->dst);
1242         return rt;
1243 }
1244
1245 void rt_bind_peer(struct rtable *rt, __be32 daddr, int create)
1246 {
1247         struct inet_peer_base *base;
1248         struct inet_peer *peer;
1249
1250         base = inetpeer_base_ptr(rt->_peer);
1251         if (!base)
1252                 return;
1253
1254         peer = inet_getpeer_v4(base, daddr, create);
1255         if (peer) {
1256                 if (!rt_set_peer(rt, peer))
1257                         inet_putpeer(peer);
1258         }
1259 }
1260
1261 /*
1262  * Peer allocation may fail only in serious out-of-memory conditions.  However
1263  * we still can generate some output.
1264  * Random ID selection looks a bit dangerous because we have no chances to
1265  * select ID being unique in a reasonable period of time.
1266  * But broken packet identifier may be better than no packet at all.
1267  */
1268 static void ip_select_fb_ident(struct iphdr *iph)
1269 {
1270         static DEFINE_SPINLOCK(ip_fb_id_lock);
1271         static u32 ip_fallback_id;
1272         u32 salt;
1273
1274         spin_lock_bh(&ip_fb_id_lock);
1275         salt = secure_ip_id((__force __be32)ip_fallback_id ^ iph->daddr);
1276         iph->id = htons(salt & 0xFFFF);
1277         ip_fallback_id = salt;
1278         spin_unlock_bh(&ip_fb_id_lock);
1279 }
1280
1281 void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
1282 {
1283         struct net *net = dev_net(dst->dev);
1284         struct inet_peer *peer;
1285
1286         peer = inet_getpeer_v4(net->ipv4.peers, iph->daddr, 1);
1287         if (peer) {
1288                 iph->id = htons(inet_getid(peer, more));
1289                 inet_putpeer(peer);
1290                 return;
1291         }
1292
1293         ip_select_fb_ident(iph);
1294 }
1295 EXPORT_SYMBOL(__ip_select_ident);
1296
1297 static void rt_del(unsigned int hash, struct rtable *rt)
1298 {
1299         struct rtable __rcu **rthp;
1300         struct rtable *aux;
1301
1302         rthp = &rt_hash_table[hash].chain;
1303         spin_lock_bh(rt_hash_lock_addr(hash));
1304         ip_rt_put(rt);
1305         while ((aux = rcu_dereference_protected(*rthp,
1306                         lockdep_is_held(rt_hash_lock_addr(hash)))) != NULL) {
1307                 if (aux == rt || rt_is_expired(aux)) {
1308                         *rthp = aux->dst.rt_next;
1309                         rt_free(aux);
1310                         continue;
1311                 }
1312                 rthp = &aux->dst.rt_next;
1313         }
1314         spin_unlock_bh(rt_hash_lock_addr(hash));
1315 }
1316
1317 /* called in rcu_read_lock() section */
1318 void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1319                     __be32 saddr, struct net_device *dev)
1320 {
1321         int s, i;
1322         struct in_device *in_dev = __in_dev_get_rcu(dev);
1323         __be32 skeys[2] = { saddr, 0 };
1324         int    ikeys[2] = { dev->ifindex, 0 };
1325         struct net *net;
1326
1327         if (!in_dev)
1328                 return;
1329
1330         net = dev_net(dev);
1331         if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev) ||
1332             ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw) ||
1333             ipv4_is_zeronet(new_gw))
1334                 goto reject_redirect;
1335
1336         if (!IN_DEV_SHARED_MEDIA(in_dev)) {
1337                 if (!inet_addr_onlink(in_dev, new_gw, old_gw))
1338                         goto reject_redirect;
1339                 if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev))
1340                         goto reject_redirect;
1341         } else {
1342                 if (inet_addr_type(net, new_gw) != RTN_UNICAST)
1343                         goto reject_redirect;
1344         }
1345
1346         for (s = 0; s < 2; s++) {
1347                 for (i = 0; i < 2; i++) {
1348                         unsigned int hash;
1349                         struct rtable __rcu **rthp;
1350                         struct rtable *rt;
1351
1352                         hash = rt_hash(daddr, skeys[s], ikeys[i], rt_genid(net));
1353
1354                         rthp = &rt_hash_table[hash].chain;
1355
1356                         while ((rt = rcu_dereference(*rthp)) != NULL) {
1357                                 struct neighbour *n;
1358
1359                                 rthp = &rt->dst.rt_next;
1360
1361                                 if (rt->rt_key_dst != daddr ||
1362                                     rt->rt_key_src != skeys[s] ||
1363                                     rt->rt_oif != ikeys[i] ||
1364                                     rt_is_input_route(rt) ||
1365                                     rt_is_expired(rt) ||
1366                                     !net_eq(dev_net(rt->dst.dev), net) ||
1367                                     rt->dst.error ||
1368                                     rt->dst.dev != dev ||
1369                                     rt->rt_gateway != old_gw)
1370                                         continue;
1371
1372                                 n = ipv4_neigh_lookup(&rt->dst, NULL, &new_gw);
1373                                 if (n) {
1374                                         if (!(n->nud_state & NUD_VALID)) {
1375                                                 neigh_event_send(n, NULL);
1376                                         } else {
1377                                                 rt->rt_gateway = new_gw;
1378                                                 rt->rt_flags |= RTCF_REDIRECTED;
1379                                                 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n);
1380                                         }
1381                                         neigh_release(n);
1382                                 }
1383                         }
1384                 }
1385         }
1386         return;
1387
1388 reject_redirect:
1389 #ifdef CONFIG_IP_ROUTE_VERBOSE
1390         if (IN_DEV_LOG_MARTIANS(in_dev))
1391                 net_info_ratelimited("Redirect from %pI4 on %s about %pI4 ignored\n"
1392                                      "  Advised path = %pI4 -> %pI4\n",
1393                                      &old_gw, dev->name, &new_gw,
1394                                      &saddr, &daddr);
1395 #endif
1396         ;
1397 }
1398
1399 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
1400 {
1401         struct rtable *rt = (struct rtable *)dst;
1402         struct dst_entry *ret = dst;
1403
1404         if (rt) {
1405                 if (dst->obsolete > 0) {
1406                         ip_rt_put(rt);
1407                         ret = NULL;
1408                 } else if ((rt->rt_flags & RTCF_REDIRECTED) ||
1409                            rt->dst.expires) {
1410                         unsigned int hash = rt_hash(rt->rt_key_dst, rt->rt_key_src,
1411                                                 rt->rt_oif,
1412                                                 rt_genid(dev_net(dst->dev)));
1413                         rt_del(hash, rt);
1414                         ret = NULL;
1415                 }
1416         }
1417         return ret;
1418 }
1419
1420 /*
1421  * Algorithm:
1422  *      1. The first ip_rt_redirect_number redirects are sent
1423  *         with exponential backoff, then we stop sending them at all,
1424  *         assuming that the host ignores our redirects.
1425  *      2. If we did not see packets requiring redirects
1426  *         during ip_rt_redirect_silence, we assume that the host
1427  *         forgot redirected route and start to send redirects again.
1428  *
1429  * This algorithm is much cheaper and more intelligent than dumb load limiting
1430  * in icmp.c.
1431  *
1432  * NOTE. Do not forget to inhibit load limiting for redirects (redundant)
1433  * and "frag. need" (breaks PMTU discovery) in icmp.c.
1434  */
1435
1436 void ip_rt_send_redirect(struct sk_buff *skb)
1437 {
1438         struct rtable *rt = skb_rtable(skb);
1439         struct in_device *in_dev;
1440         struct inet_peer *peer;
1441         struct net *net;
1442         int log_martians;
1443
1444         rcu_read_lock();
1445         in_dev = __in_dev_get_rcu(rt->dst.dev);
1446         if (!in_dev || !IN_DEV_TX_REDIRECTS(in_dev)) {
1447                 rcu_read_unlock();
1448                 return;
1449         }
1450         log_martians = IN_DEV_LOG_MARTIANS(in_dev);
1451         rcu_read_unlock();
1452
1453         net = dev_net(rt->dst.dev);
1454         peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, 1);
1455         if (!peer) {
1456                 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway);
1457                 return;
1458         }
1459
1460         /* No redirected packets during ip_rt_redirect_silence;
1461          * reset the algorithm.
1462          */
1463         if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence))
1464                 peer->rate_tokens = 0;
1465
1466         /* Too many ignored redirects; do not send anything
1467          * set dst.rate_last to the last seen redirected packet.
1468          */
1469         if (peer->rate_tokens >= ip_rt_redirect_number) {
1470                 peer->rate_last = jiffies;
1471                 goto out_put_peer;
1472         }
1473
1474         /* Check for load limit; set rate_last to the latest sent
1475          * redirect.
1476          */
1477         if (peer->rate_tokens == 0 ||
1478             time_after(jiffies,
1479                        (peer->rate_last +
1480                         (ip_rt_redirect_load << peer->rate_tokens)))) {
1481                 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway);
1482                 peer->rate_last = jiffies;
1483                 ++peer->rate_tokens;
1484 #ifdef CONFIG_IP_ROUTE_VERBOSE
1485                 if (log_martians &&
1486                     peer->rate_tokens == ip_rt_redirect_number)
1487                         net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n",
1488                                              &ip_hdr(skb)->saddr, rt->rt_iif,
1489                                              &rt->rt_dst, &rt->rt_gateway);
1490 #endif
1491         }
1492 out_put_peer:
1493         inet_putpeer(peer);
1494 }
1495
1496 static int ip_error(struct sk_buff *skb)
1497 {
1498         struct in_device *in_dev = __in_dev_get_rcu(skb->dev);
1499         struct rtable *rt = skb_rtable(skb);
1500         struct inet_peer *peer;
1501         unsigned long now;
1502         struct net *net;
1503         bool send;
1504         int code;
1505
1506         net = dev_net(rt->dst.dev);
1507         if (!IN_DEV_FORWARD(in_dev)) {
1508                 switch (rt->dst.error) {
1509                 case EHOSTUNREACH:
1510                         IP_INC_STATS_BH(net, IPSTATS_MIB_INADDRERRORS);
1511                         break;
1512
1513                 case ENETUNREACH:
1514                         IP_INC_STATS_BH(net, IPSTATS_MIB_INNOROUTES);
1515                         break;
1516                 }
1517                 goto out;
1518         }
1519
1520         switch (rt->dst.error) {
1521         case EINVAL:
1522         default:
1523                 goto out;
1524         case EHOSTUNREACH:
1525                 code = ICMP_HOST_UNREACH;
1526                 break;
1527         case ENETUNREACH:
1528                 code = ICMP_NET_UNREACH;
1529                 IP_INC_STATS_BH(net, IPSTATS_MIB_INNOROUTES);
1530                 break;
1531         case EACCES:
1532                 code = ICMP_PKT_FILTERED;
1533                 break;
1534         }
1535
1536         peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, 1);
1537
1538         send = true;
1539         if (peer) {
1540                 now = jiffies;
1541                 peer->rate_tokens += now - peer->rate_last;
1542                 if (peer->rate_tokens > ip_rt_error_burst)
1543                         peer->rate_tokens = ip_rt_error_burst;
1544                 peer->rate_last = now;
1545                 if (peer->rate_tokens >= ip_rt_error_cost)
1546                         peer->rate_tokens -= ip_rt_error_cost;
1547                 else
1548                         send = false;
1549                 inet_putpeer(peer);
1550         }
1551         if (send)
1552                 icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
1553
1554 out:    kfree_skb(skb);
1555         return 0;
1556 }
1557
1558 static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
1559 {
1560         struct rtable *rt = (struct rtable *) dst;
1561
1562         dst_confirm(dst);
1563
1564         if (mtu < ip_rt_min_pmtu)
1565                 mtu = ip_rt_min_pmtu;
1566
1567         rt->rt_pmtu = mtu;
1568         dst_set_expires(&rt->dst, ip_rt_mtu_expires);
1569 }
1570
1571 void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu,
1572                       int oif, u32 mark, u8 protocol, int flow_flags)
1573 {
1574         const struct iphdr *iph = (const struct iphdr *)skb->data;
1575         struct flowi4 fl4;
1576         struct rtable *rt;
1577
1578         flowi4_init_output(&fl4, oif, mark, RT_TOS(iph->tos), RT_SCOPE_UNIVERSE,
1579                            protocol, flow_flags,
1580                            iph->daddr, iph->saddr, 0, 0);
1581         rt = __ip_route_output_key(net, &fl4);
1582         if (!IS_ERR(rt)) {
1583                 ip_rt_update_pmtu(&rt->dst, mtu);
1584                 ip_rt_put(rt);
1585         }
1586 }
1587 EXPORT_SYMBOL_GPL(ipv4_update_pmtu);
1588
1589 void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1590 {
1591         const struct inet_sock *inet = inet_sk(sk);
1592
1593         return ipv4_update_pmtu(skb, sock_net(sk), mtu,
1594                                 sk->sk_bound_dev_if, sk->sk_mark,
1595                                 inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
1596                                 inet_sk_flowi_flags(sk));
1597 }
1598 EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu);
1599
1600 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
1601 {
1602         struct rtable *rt = (struct rtable *) dst;
1603
1604         if (rt_is_expired(rt))
1605                 return NULL;
1606         return dst;
1607 }
1608
1609 static void ipv4_dst_destroy(struct dst_entry *dst)
1610 {
1611         struct rtable *rt = (struct rtable *) dst;
1612
1613         if (rt->fi) {
1614                 fib_info_put(rt->fi);
1615                 rt->fi = NULL;
1616         }
1617         if (rt_has_peer(rt)) {
1618                 struct inet_peer *peer = rt_peer_ptr(rt);
1619                 inet_putpeer(peer);
1620         }
1621 }
1622
1623
1624 static void ipv4_link_failure(struct sk_buff *skb)
1625 {
1626         struct rtable *rt;
1627
1628         icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
1629
1630         rt = skb_rtable(skb);
1631         if (rt)
1632                 dst_set_expires(&rt->dst, 0);
1633 }
1634
1635 static int ip_rt_bug(struct sk_buff *skb)
1636 {
1637         pr_debug("%s: %pI4 -> %pI4, %s\n",
1638                  __func__, &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
1639                  skb->dev ? skb->dev->name : "?");
1640         kfree_skb(skb);
1641         WARN_ON(1);
1642         return 0;
1643 }
1644
1645 /*
1646    We do not cache source address of outgoing interface,
1647    because it is used only by IP RR, TS and SRR options,
1648    so that it out of fast path.
1649
1650    BTW remember: "addr" is allowed to be not aligned
1651    in IP options!
1652  */
1653
1654 void ip_rt_get_source(u8 *addr, struct sk_buff *skb, struct rtable *rt)
1655 {
1656         __be32 src;
1657
1658         if (rt_is_output_route(rt))
1659                 src = ip_hdr(skb)->saddr;
1660         else {
1661                 struct fib_result res;
1662                 struct flowi4 fl4;
1663                 struct iphdr *iph;
1664
1665                 iph = ip_hdr(skb);
1666
1667                 memset(&fl4, 0, sizeof(fl4));
1668                 fl4.daddr = iph->daddr;
1669                 fl4.saddr = iph->saddr;
1670                 fl4.flowi4_tos = RT_TOS(iph->tos);
1671                 fl4.flowi4_oif = rt->dst.dev->ifindex;
1672                 fl4.flowi4_iif = skb->dev->ifindex;
1673                 fl4.flowi4_mark = skb->mark;
1674
1675                 rcu_read_lock();
1676                 if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res) == 0)
1677                         src = FIB_RES_PREFSRC(dev_net(rt->dst.dev), res);
1678                 else
1679                         src = inet_select_addr(rt->dst.dev, rt->rt_gateway,
1680                                         RT_SCOPE_UNIVERSE);
1681                 rcu_read_unlock();
1682         }
1683         memcpy(addr, &src, 4);
1684 }
1685
1686 #ifdef CONFIG_IP_ROUTE_CLASSID
1687 static void set_class_tag(struct rtable *rt, u32 tag)
1688 {
1689         if (!(rt->dst.tclassid & 0xFFFF))
1690                 rt->dst.tclassid |= tag & 0xFFFF;
1691         if (!(rt->dst.tclassid & 0xFFFF0000))
1692                 rt->dst.tclassid |= tag & 0xFFFF0000;
1693 }
1694 #endif
1695
1696 static unsigned int ipv4_default_advmss(const struct dst_entry *dst)
1697 {
1698         unsigned int advmss = dst_metric_raw(dst, RTAX_ADVMSS);
1699
1700         if (advmss == 0) {
1701                 advmss = max_t(unsigned int, dst->dev->mtu - 40,
1702                                ip_rt_min_advmss);
1703                 if (advmss > 65535 - 40)
1704                         advmss = 65535 - 40;
1705         }
1706         return advmss;
1707 }
1708
1709 static unsigned int ipv4_mtu(const struct dst_entry *dst)
1710 {
1711         const struct rtable *rt = (const struct rtable *) dst;
1712         unsigned int mtu = rt->rt_pmtu;
1713
1714         if (mtu && time_after_eq(jiffies, rt->dst.expires))
1715                 mtu = 0;
1716
1717         if (!mtu)
1718                 mtu = dst_metric_raw(dst, RTAX_MTU);
1719
1720         if (mtu && rt_is_output_route(rt))
1721                 return mtu;
1722
1723         mtu = dst->dev->mtu;
1724
1725         if (unlikely(dst_metric_locked(dst, RTAX_MTU))) {
1726
1727                 if (rt->rt_gateway != rt->rt_dst && mtu > 576)
1728                         mtu = 576;
1729         }
1730
1731         if (mtu > IP_MAX_MTU)
1732                 mtu = IP_MAX_MTU;
1733
1734         return mtu;
1735 }
1736
1737 static void rt_init_metrics(struct rtable *rt, const struct flowi4 *fl4,
1738                             struct fib_info *fi)
1739 {
1740         struct inet_peer_base *base;
1741         struct inet_peer *peer;
1742
1743         base = inetpeer_base_ptr(rt->_peer);
1744         BUG_ON(!base);
1745
1746         peer = inet_getpeer_v4(base, rt->rt_dst, 0);
1747         if (peer) {
1748                 __rt_set_peer(rt, peer);
1749                 if (inet_metrics_new(peer))
1750                         memcpy(peer->metrics, fi->fib_metrics,
1751                                sizeof(u32) * RTAX_MAX);
1752                 dst_init_metrics(&rt->dst, peer->metrics, false);
1753         } else {
1754                 if (fi->fib_metrics != (u32 *) dst_default_metrics) {
1755                         rt->fi = fi;
1756                         atomic_inc(&fi->fib_clntref);
1757                 }
1758                 dst_init_metrics(&rt->dst, fi->fib_metrics, true);
1759         }
1760 }
1761
1762 static void rt_set_nexthop(struct rtable *rt, const struct flowi4 *fl4,
1763                            const struct fib_result *res,
1764                            struct fib_info *fi, u16 type, u32 itag)
1765 {
1766         struct dst_entry *dst = &rt->dst;
1767
1768         if (fi) {
1769                 if (FIB_RES_GW(*res) &&
1770                     FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK)
1771                         rt->rt_gateway = FIB_RES_GW(*res);
1772                 rt_init_metrics(rt, fl4, fi);
1773 #ifdef CONFIG_IP_ROUTE_CLASSID
1774                 dst->tclassid = FIB_RES_NH(*res).nh_tclassid;
1775 #endif
1776         }
1777
1778         if (dst_mtu(dst) > IP_MAX_MTU)
1779                 dst_metric_set(dst, RTAX_MTU, IP_MAX_MTU);
1780
1781 #ifdef CONFIG_IP_ROUTE_CLASSID
1782 #ifdef CONFIG_IP_MULTIPLE_TABLES
1783         set_class_tag(rt, fib_rules_tclass(res));
1784 #endif
1785         set_class_tag(rt, itag);
1786 #endif
1787 }
1788
1789 static struct rtable *rt_dst_alloc(struct net_device *dev,
1790                                    bool nopolicy, bool noxfrm)
1791 {
1792         return dst_alloc(&ipv4_dst_ops, dev, 1, -1,
1793                          DST_HOST |
1794                          (nopolicy ? DST_NOPOLICY : 0) |
1795                          (noxfrm ? DST_NOXFRM : 0));
1796 }
1797
1798 /* called in rcu_read_lock() section */
1799 static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1800                                 u8 tos, struct net_device *dev, int our)
1801 {
1802         unsigned int hash;
1803         struct rtable *rth;
1804         struct in_device *in_dev = __in_dev_get_rcu(dev);
1805         u32 itag = 0;
1806         int err;
1807
1808         /* Primary sanity checks. */
1809
1810         if (in_dev == NULL)
1811                 return -EINVAL;
1812
1813         if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
1814             skb->protocol != htons(ETH_P_IP))
1815                 goto e_inval;
1816
1817         if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev)))
1818                 if (ipv4_is_loopback(saddr))
1819                         goto e_inval;
1820
1821         if (ipv4_is_zeronet(saddr)) {
1822                 if (!ipv4_is_local_multicast(daddr))
1823                         goto e_inval;
1824         } else {
1825                 err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
1826                                           in_dev, &itag);
1827                 if (err < 0)
1828                         goto e_err;
1829         }
1830         rth = rt_dst_alloc(dev_net(dev)->loopback_dev,
1831                            IN_DEV_CONF_GET(in_dev, NOPOLICY), false);
1832         if (!rth)
1833                 goto e_nobufs;
1834
1835 #ifdef CONFIG_IP_ROUTE_CLASSID
1836         rth->dst.tclassid = itag;
1837 #endif
1838         rth->dst.output = ip_rt_bug;
1839
1840         rth->rt_key_dst = daddr;
1841         rth->rt_key_src = saddr;
1842         rth->rt_genid   = rt_genid(dev_net(dev));
1843         rth->rt_flags   = RTCF_MULTICAST;
1844         rth->rt_type    = RTN_MULTICAST;
1845         rth->rt_key_tos = tos;
1846         rth->rt_dst     = daddr;
1847         rth->rt_src     = saddr;
1848         rth->rt_route_iif = dev->ifindex;
1849         rth->rt_iif     = dev->ifindex;
1850         rth->rt_oif     = 0;
1851         rth->rt_mark    = skb->mark;
1852         rth->rt_pmtu    = 0;
1853         rth->rt_gateway = daddr;
1854         rt_init_peer(rth, dev_net(dev)->ipv4.peers);
1855         rth->fi = NULL;
1856         if (our) {
1857                 rth->dst.input= ip_local_deliver;
1858                 rth->rt_flags |= RTCF_LOCAL;
1859         }
1860
1861 #ifdef CONFIG_IP_MROUTE
1862         if (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev))
1863                 rth->dst.input = ip_mr_input;
1864 #endif
1865         RT_CACHE_STAT_INC(in_slow_mc);
1866
1867         hash = rt_hash(daddr, saddr, dev->ifindex, rt_genid(dev_net(dev)));
1868         rth = rt_intern_hash(hash, rth, skb, dev->ifindex);
1869         return IS_ERR(rth) ? PTR_ERR(rth) : 0;
1870
1871 e_nobufs:
1872         return -ENOBUFS;
1873 e_inval:
1874         return -EINVAL;
1875 e_err:
1876         return err;
1877 }
1878
1879
1880 static void ip_handle_martian_source(struct net_device *dev,
1881                                      struct in_device *in_dev,
1882                                      struct sk_buff *skb,
1883                                      __be32 daddr,
1884                                      __be32 saddr)
1885 {
1886         RT_CACHE_STAT_INC(in_martian_src);
1887 #ifdef CONFIG_IP_ROUTE_VERBOSE
1888         if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) {
1889                 /*
1890                  *      RFC1812 recommendation, if source is martian,
1891                  *      the only hint is MAC header.
1892                  */
1893                 pr_warn("martian source %pI4 from %pI4, on dev %s\n",
1894                         &daddr, &saddr, dev->name);
1895                 if (dev->hard_header_len && skb_mac_header_was_set(skb)) {
1896                         print_hex_dump(KERN_WARNING, "ll header: ",
1897                                        DUMP_PREFIX_OFFSET, 16, 1,
1898                                        skb_mac_header(skb),
1899                                        dev->hard_header_len, true);
1900                 }
1901         }
1902 #endif
1903 }
1904
1905 /* called in rcu_read_lock() section */
1906 static int __mkroute_input(struct sk_buff *skb,
1907                            const struct fib_result *res,
1908                            struct in_device *in_dev,
1909                            __be32 daddr, __be32 saddr, u32 tos,
1910                            struct rtable **result)
1911 {
1912         struct rtable *rth;
1913         int err;
1914         struct in_device *out_dev;
1915         unsigned int flags = 0;
1916         u32 itag;
1917
1918         /* get a working reference to the output device */
1919         out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res));
1920         if (out_dev == NULL) {
1921                 net_crit_ratelimited("Bug in ip_route_input_slow(). Please report.\n");
1922                 return -EINVAL;
1923         }
1924
1925
1926         err = fib_validate_source(skb, saddr, daddr, tos, FIB_RES_OIF(*res),
1927                                   in_dev->dev, in_dev, &itag);
1928         if (err < 0) {
1929                 ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
1930                                          saddr);
1931
1932                 goto cleanup;
1933         }
1934
1935         if (err)
1936                 flags |= RTCF_DIRECTSRC;
1937
1938         if (out_dev == in_dev && err &&
1939             (IN_DEV_SHARED_MEDIA(out_dev) ||
1940              inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res))))
1941                 flags |= RTCF_DOREDIRECT;
1942
1943         if (skb->protocol != htons(ETH_P_IP)) {
1944                 /* Not IP (i.e. ARP). Do not create route, if it is
1945                  * invalid for proxy arp. DNAT routes are always valid.
1946                  *
1947                  * Proxy arp feature have been extended to allow, ARP
1948                  * replies back to the same interface, to support
1949                  * Private VLAN switch technologies. See arp.c.
1950                  */
1951                 if (out_dev == in_dev &&
1952                     IN_DEV_PROXY_ARP_PVLAN(in_dev) == 0) {
1953                         err = -EINVAL;
1954                         goto cleanup;
1955                 }
1956         }
1957
1958         rth = rt_dst_alloc(out_dev->dev,
1959                            IN_DEV_CONF_GET(in_dev, NOPOLICY),
1960                            IN_DEV_CONF_GET(out_dev, NOXFRM));
1961         if (!rth) {
1962                 err = -ENOBUFS;
1963                 goto cleanup;
1964         }
1965
1966         rth->rt_key_dst = daddr;
1967         rth->rt_key_src = saddr;
1968         rth->rt_genid = rt_genid(dev_net(rth->dst.dev));
1969         rth->rt_flags = flags;
1970         rth->rt_type = res->type;
1971         rth->rt_key_tos = tos;
1972         rth->rt_dst     = daddr;
1973         rth->rt_src     = saddr;
1974         rth->rt_route_iif = in_dev->dev->ifindex;
1975         rth->rt_iif     = in_dev->dev->ifindex;
1976         rth->rt_oif     = 0;
1977         rth->rt_mark    = skb->mark;
1978         rth->rt_pmtu    = 0;
1979         rth->rt_gateway = daddr;
1980         rt_init_peer(rth, &res->table->tb_peers);
1981         rth->fi = NULL;
1982
1983         rth->dst.input = ip_forward;
1984         rth->dst.output = ip_output;
1985
1986         rt_set_nexthop(rth, NULL, res, res->fi, res->type, itag);
1987
1988         *result = rth;
1989         err = 0;
1990  cleanup:
1991         return err;
1992 }
1993
1994 static int ip_mkroute_input(struct sk_buff *skb,
1995                             struct fib_result *res,
1996                             const struct flowi4 *fl4,
1997                             struct in_device *in_dev,
1998                             __be32 daddr, __be32 saddr, u32 tos)
1999 {
2000         struct rtable *rth = NULL;
2001         int err;
2002         unsigned int hash;
2003
2004 #ifdef CONFIG_IP_ROUTE_MULTIPATH
2005         if (res->fi && res->fi->fib_nhs > 1)
2006                 fib_select_multipath(res);
2007 #endif
2008
2009         /* create a routing cache entry */
2010         err = __mkroute_input(skb, res, in_dev, daddr, saddr, tos, &rth);
2011         if (err)
2012                 return err;
2013
2014         /* put it into the cache */
2015         hash = rt_hash(daddr, saddr, fl4->flowi4_iif,
2016                        rt_genid(dev_net(rth->dst.dev)));
2017         rth = rt_intern_hash(hash, rth, skb, fl4->flowi4_iif);
2018         if (IS_ERR(rth))
2019                 return PTR_ERR(rth);
2020         return 0;
2021 }
2022
2023 /*
2024  *      NOTE. We drop all the packets that has local source
2025  *      addresses, because every properly looped back packet
2026  *      must have correct destination already attached by output routine.
2027  *
2028  *      Such approach solves two big problems:
2029  *      1. Not simplex devices are handled properly.
2030  *      2. IP spoofing attempts are filtered with 100% of guarantee.
2031  *      called with rcu_read_lock()
2032  */
2033
2034 static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2035                                u8 tos, struct net_device *dev)
2036 {
2037         struct fib_result res;
2038         struct in_device *in_dev = __in_dev_get_rcu(dev);
2039         struct flowi4   fl4;
2040         unsigned int    flags = 0;
2041         u32             itag = 0;
2042         struct rtable   *rth;
2043         unsigned int    hash;
2044         int             err = -EINVAL;
2045         struct net    *net = dev_net(dev);
2046
2047         /* IP on this device is disabled. */
2048
2049         if (!in_dev)
2050                 goto out;
2051
2052         /* Check for the most weird martians, which can be not detected
2053            by fib_lookup.
2054          */
2055
2056         if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr))
2057                 goto martian_source;
2058
2059         if (ipv4_is_lbcast(daddr) || (saddr == 0 && daddr == 0))
2060                 goto brd_input;
2061
2062         /* Accept zero addresses only to limited broadcast;
2063          * I even do not know to fix it or not. Waiting for complains :-)
2064          */
2065         if (ipv4_is_zeronet(saddr))
2066                 goto martian_source;
2067
2068         if (ipv4_is_zeronet(daddr))
2069                 goto martian_destination;
2070
2071         if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev))) {
2072                 if (ipv4_is_loopback(daddr))
2073                         goto martian_destination;
2074
2075                 if (ipv4_is_loopback(saddr))
2076                         goto martian_source;
2077         }
2078
2079         /*
2080          *      Now we are ready to route packet.
2081          */
2082         fl4.flowi4_oif = 0;
2083         fl4.flowi4_iif = dev->ifindex;
2084         fl4.flowi4_mark = skb->mark;
2085         fl4.flowi4_tos = tos;
2086         fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
2087         fl4.daddr = daddr;
2088         fl4.saddr = saddr;
2089         err = fib_lookup(net, &fl4, &res);
2090         if (err != 0)
2091                 goto no_route;
2092
2093         RT_CACHE_STAT_INC(in_slow_tot);
2094
2095         if (res.type == RTN_BROADCAST)
2096                 goto brd_input;
2097
2098         if (res.type == RTN_LOCAL) {
2099                 err = fib_validate_source(skb, saddr, daddr, tos,
2100                                           net->loopback_dev->ifindex,
2101                                           dev, in_dev, &itag);
2102                 if (err < 0)
2103                         goto martian_source_keep_err;
2104                 if (err)
2105                         flags |= RTCF_DIRECTSRC;
2106                 goto local_input;
2107         }
2108
2109         if (!IN_DEV_FORWARD(in_dev))
2110                 goto no_route;
2111         if (res.type != RTN_UNICAST)
2112                 goto martian_destination;
2113
2114         err = ip_mkroute_input(skb, &res, &fl4, in_dev, daddr, saddr, tos);
2115 out:    return err;
2116
2117 brd_input:
2118         if (skb->protocol != htons(ETH_P_IP))
2119                 goto e_inval;
2120
2121         if (!ipv4_is_zeronet(saddr)) {
2122                 err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
2123                                           in_dev, &itag);
2124                 if (err < 0)
2125                         goto martian_source_keep_err;
2126                 if (err)
2127                         flags |= RTCF_DIRECTSRC;
2128         }
2129         flags |= RTCF_BROADCAST;
2130         res.type = RTN_BROADCAST;
2131         RT_CACHE_STAT_INC(in_brd);
2132
2133 local_input:
2134         rth = rt_dst_alloc(net->loopback_dev,
2135                            IN_DEV_CONF_GET(in_dev, NOPOLICY), false);
2136         if (!rth)
2137                 goto e_nobufs;
2138
2139         rth->dst.input= ip_local_deliver;
2140         rth->dst.output= ip_rt_bug;
2141 #ifdef CONFIG_IP_ROUTE_CLASSID
2142         rth->dst.tclassid = itag;
2143 #endif
2144
2145         rth->rt_key_dst = daddr;
2146         rth->rt_key_src = saddr;
2147         rth->rt_genid = rt_genid(net);
2148         rth->rt_flags   = flags|RTCF_LOCAL;
2149         rth->rt_type    = res.type;
2150         rth->rt_key_tos = tos;
2151         rth->rt_dst     = daddr;
2152         rth->rt_src     = saddr;
2153         rth->rt_route_iif = dev->ifindex;
2154         rth->rt_iif     = dev->ifindex;
2155         rth->rt_oif     = 0;
2156         rth->rt_mark    = skb->mark;
2157         rth->rt_pmtu    = 0;
2158         rth->rt_gateway = daddr;
2159         rt_init_peer(rth, net->ipv4.peers);
2160         rth->fi = NULL;
2161         if (res.type == RTN_UNREACHABLE) {
2162                 rth->dst.input= ip_error;
2163                 rth->dst.error= -err;
2164                 rth->rt_flags   &= ~RTCF_LOCAL;
2165         }
2166         hash = rt_hash(daddr, saddr, fl4.flowi4_iif, rt_genid(net));
2167         rth = rt_intern_hash(hash, rth, skb, fl4.flowi4_iif);
2168         err = 0;
2169         if (IS_ERR(rth))
2170                 err = PTR_ERR(rth);
2171         goto out;
2172
2173 no_route:
2174         RT_CACHE_STAT_INC(in_no_route);
2175         res.type = RTN_UNREACHABLE;
2176         if (err == -ESRCH)
2177                 err = -ENETUNREACH;
2178         goto local_input;
2179
2180         /*
2181          *      Do not cache martian addresses: they should be logged (RFC1812)
2182          */
2183 martian_destination:
2184         RT_CACHE_STAT_INC(in_martian_dst);
2185 #ifdef CONFIG_IP_ROUTE_VERBOSE
2186         if (IN_DEV_LOG_MARTIANS(in_dev))
2187                 net_warn_ratelimited("martian destination %pI4 from %pI4, dev %s\n",
2188                                      &daddr, &saddr, dev->name);
2189 #endif
2190
2191 e_inval:
2192         err = -EINVAL;
2193         goto out;
2194
2195 e_nobufs:
2196         err = -ENOBUFS;
2197         goto out;
2198
2199 martian_source:
2200         err = -EINVAL;
2201 martian_source_keep_err:
2202         ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
2203         goto out;
2204 }
2205
2206 int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2207                            u8 tos, struct net_device *dev, bool noref)
2208 {
2209         struct rtable   *rth;
2210         unsigned int    hash;
2211         int iif = dev->ifindex;
2212         struct net *net;
2213         int res;
2214
2215         net = dev_net(dev);
2216
2217         rcu_read_lock();
2218
2219         if (!rt_caching(net))
2220                 goto skip_cache;
2221
2222         tos &= IPTOS_RT_MASK;
2223         hash = rt_hash(daddr, saddr, iif, rt_genid(net));
2224
2225         for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
2226              rth = rcu_dereference(rth->dst.rt_next)) {
2227                 if ((((__force u32)rth->rt_key_dst ^ (__force u32)daddr) |
2228                      ((__force u32)rth->rt_key_src ^ (__force u32)saddr) |
2229                      (rth->rt_route_iif ^ iif) |
2230                      (rth->rt_key_tos ^ tos)) == 0 &&
2231                     rth->rt_mark == skb->mark &&
2232                     net_eq(dev_net(rth->dst.dev), net) &&
2233                     !rt_is_expired(rth)) {
2234                         if (noref) {
2235                                 dst_use_noref(&rth->dst, jiffies);
2236                                 skb_dst_set_noref(skb, &rth->dst);
2237                         } else {
2238                                 dst_use(&rth->dst, jiffies);
2239                                 skb_dst_set(skb, &rth->dst);
2240                         }
2241                         RT_CACHE_STAT_INC(in_hit);
2242                         rcu_read_unlock();
2243                         return 0;
2244                 }
2245                 RT_CACHE_STAT_INC(in_hlist_search);
2246         }
2247
2248 skip_cache:
2249         /* Multicast recognition logic is moved from route cache to here.
2250            The problem was that too many Ethernet cards have broken/missing
2251            hardware multicast filters :-( As result the host on multicasting
2252            network acquires a lot of useless route cache entries, sort of
2253            SDR messages from all the world. Now we try to get rid of them.
2254            Really, provided software IP multicast filter is organized
2255            reasonably (at least, hashed), it does not result in a slowdown
2256            comparing with route cache reject entries.
2257            Note, that multicast routers are not affected, because
2258            route cache entry is created eventually.
2259          */
2260         if (ipv4_is_multicast(daddr)) {
2261                 struct in_device *in_dev = __in_dev_get_rcu(dev);
2262
2263                 if (in_dev) {
2264                         int our = ip_check_mc_rcu(in_dev, daddr, saddr,
2265                                                   ip_hdr(skb)->protocol);
2266                         if (our
2267 #ifdef CONFIG_IP_MROUTE
2268                                 ||
2269                             (!ipv4_is_local_multicast(daddr) &&
2270                              IN_DEV_MFORWARD(in_dev))
2271 #endif
2272                            ) {
2273                                 int res = ip_route_input_mc(skb, daddr, saddr,
2274                                                             tos, dev, our);
2275                                 rcu_read_unlock();
2276                                 return res;
2277                         }
2278                 }
2279                 rcu_read_unlock();
2280                 return -EINVAL;
2281         }
2282         res = ip_route_input_slow(skb, daddr, saddr, tos, dev);
2283         rcu_read_unlock();
2284         return res;
2285 }
2286 EXPORT_SYMBOL(ip_route_input_common);
2287
2288 /* called with rcu_read_lock() */
2289 static struct rtable *__mkroute_output(const struct fib_result *res,
2290                                        const struct flowi4 *fl4,
2291                                        __be32 orig_daddr, __be32 orig_saddr,
2292                                        int orig_oif, __u8 orig_rtos,
2293                                        struct net_device *dev_out,
2294                                        unsigned int flags)
2295 {
2296         struct fib_info *fi = res->fi;
2297         struct in_device *in_dev;
2298         u16 type = res->type;
2299         struct rtable *rth;
2300
2301         in_dev = __in_dev_get_rcu(dev_out);
2302         if (!in_dev)
2303                 return ERR_PTR(-EINVAL);
2304
2305         if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev)))
2306                 if (ipv4_is_loopback(fl4->saddr) && !(dev_out->flags & IFF_LOOPBACK))
2307                         return ERR_PTR(-EINVAL);
2308
2309         if (ipv4_is_lbcast(fl4->daddr))
2310                 type = RTN_BROADCAST;
2311         else if (ipv4_is_multicast(fl4->daddr))
2312                 type = RTN_MULTICAST;
2313         else if (ipv4_is_zeronet(fl4->daddr))
2314                 return ERR_PTR(-EINVAL);
2315
2316         if (dev_out->flags & IFF_LOOPBACK)
2317                 flags |= RTCF_LOCAL;
2318
2319         if (type == RTN_BROADCAST) {
2320                 flags |= RTCF_BROADCAST | RTCF_LOCAL;
2321                 fi = NULL;
2322         } else if (type == RTN_MULTICAST) {
2323                 flags |= RTCF_MULTICAST | RTCF_LOCAL;
2324                 if (!ip_check_mc_rcu(in_dev, fl4->daddr, fl4->saddr,
2325                                      fl4->flowi4_proto))
2326                         flags &= ~RTCF_LOCAL;
2327                 /* If multicast route do not exist use
2328                  * default one, but do not gateway in this case.
2329                  * Yes, it is hack.
2330                  */
2331                 if (fi && res->prefixlen < 4)
2332                         fi = NULL;
2333         }
2334
2335         rth = rt_dst_alloc(dev_out,
2336                            IN_DEV_CONF_GET(in_dev, NOPOLICY),
2337                            IN_DEV_CONF_GET(in_dev, NOXFRM));
2338         if (!rth)
2339                 return ERR_PTR(-ENOBUFS);
2340
2341         rth->dst.output = ip_output;
2342
2343         rth->rt_key_dst = orig_daddr;
2344         rth->rt_key_src = orig_saddr;
2345         rth->rt_genid = rt_genid(dev_net(dev_out));
2346         rth->rt_flags   = flags;
2347         rth->rt_type    = type;
2348         rth->rt_key_tos = orig_rtos;
2349         rth->rt_dst     = fl4->daddr;
2350         rth->rt_src     = fl4->saddr;
2351         rth->rt_route_iif = 0;
2352         rth->rt_iif     = orig_oif ? : dev_out->ifindex;
2353         rth->rt_oif     = orig_oif;
2354         rth->rt_mark    = fl4->flowi4_mark;
2355         rth->rt_pmtu    = 0;
2356         rth->rt_gateway = fl4->daddr;
2357         rt_init_peer(rth, (res->table ?
2358                            &res->table->tb_peers :
2359                            dev_net(dev_out)->ipv4.peers));
2360         rth->fi = NULL;
2361
2362         RT_CACHE_STAT_INC(out_slow_tot);
2363
2364         if (flags & RTCF_LOCAL)
2365                 rth->dst.input = ip_local_deliver;
2366         if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
2367                 if (flags & RTCF_LOCAL &&
2368                     !(dev_out->flags & IFF_LOOPBACK)) {
2369                         rth->dst.output = ip_mc_output;
2370                         RT_CACHE_STAT_INC(out_slow_mc);
2371                 }
2372 #ifdef CONFIG_IP_MROUTE
2373                 if (type == RTN_MULTICAST) {
2374                         if (IN_DEV_MFORWARD(in_dev) &&
2375                             !ipv4_is_local_multicast(fl4->daddr)) {
2376                                 rth->dst.input = ip_mr_input;
2377                                 rth->dst.output = ip_mc_output;
2378                         }
2379                 }
2380 #endif
2381         }
2382
2383         rt_set_nexthop(rth, fl4, res, fi, type, 0);
2384
2385         if (fl4->flowi4_flags & FLOWI_FLAG_RT_NOCACHE)
2386                 rth->dst.flags |= DST_NOCACHE;
2387
2388         return rth;
2389 }
2390
2391 /*
2392  * Major route resolver routine.
2393  * called with rcu_read_lock();
2394  */
2395
2396 static struct rtable *ip_route_output_slow(struct net *net, struct flowi4 *fl4)
2397 {
2398         struct net_device *dev_out = NULL;
2399         __u8 tos = RT_FL_TOS(fl4);
2400         unsigned int flags = 0;
2401         struct fib_result res;
2402         struct rtable *rth;
2403         __be32 orig_daddr;
2404         __be32 orig_saddr;
2405         int orig_oif;
2406
2407         res.fi          = NULL;
2408         res.table       = NULL;
2409 #ifdef CONFIG_IP_MULTIPLE_TABLES
2410         res.r           = NULL;
2411 #endif
2412
2413         orig_daddr = fl4->daddr;
2414         orig_saddr = fl4->saddr;
2415         orig_oif = fl4->flowi4_oif;
2416
2417         fl4->flowi4_iif = net->loopback_dev->ifindex;
2418         fl4->flowi4_tos = tos & IPTOS_RT_MASK;
2419         fl4->flowi4_scope = ((tos & RTO_ONLINK) ?
2420                          RT_SCOPE_LINK : RT_SCOPE_UNIVERSE);
2421
2422         rcu_read_lock();
2423         if (fl4->saddr) {
2424                 rth = ERR_PTR(-EINVAL);
2425                 if (ipv4_is_multicast(fl4->saddr) ||
2426                     ipv4_is_lbcast(fl4->saddr) ||
2427                     ipv4_is_zeronet(fl4->saddr))
2428                         goto out;
2429
2430                 /* I removed check for oif == dev_out->oif here.
2431                    It was wrong for two reasons:
2432                    1. ip_dev_find(net, saddr) can return wrong iface, if saddr
2433                       is assigned to multiple interfaces.
2434                    2. Moreover, we are allowed to send packets with saddr
2435                       of another iface. --ANK
2436                  */
2437
2438                 if (fl4->flowi4_oif == 0 &&
2439                     (ipv4_is_multicast(fl4->daddr) ||
2440                      ipv4_is_lbcast(fl4->daddr))) {
2441                         /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2442                         dev_out = __ip_dev_find(net, fl4->saddr, false);
2443                         if (dev_out == NULL)
2444                                 goto out;
2445
2446                         /* Special hack: user can direct multicasts
2447                            and limited broadcast via necessary interface
2448                            without fiddling with IP_MULTICAST_IF or IP_PKTINFO.
2449                            This hack is not just for fun, it allows
2450                            vic,vat and friends to work.
2451                            They bind socket to loopback, set ttl to zero
2452                            and expect that it will work.
2453                            From the viewpoint of routing cache they are broken,
2454                            because we are not allowed to build multicast path
2455                            with loopback source addr (look, routing cache
2456                            cannot know, that ttl is zero, so that packet
2457                            will not leave this host and route is valid).
2458                            Luckily, this hack is good workaround.
2459                          */
2460
2461                         fl4->flowi4_oif = dev_out->ifindex;
2462                         goto make_route;
2463                 }
2464
2465                 if (!(fl4->flowi4_flags & FLOWI_FLAG_ANYSRC)) {
2466                         /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2467                         if (!__ip_dev_find(net, fl4->saddr, false))
2468                                 goto out;
2469                 }
2470         }
2471
2472
2473         if (fl4->flowi4_oif) {
2474                 dev_out = dev_get_by_index_rcu(net, fl4->flowi4_oif);
2475                 rth = ERR_PTR(-ENODEV);
2476                 if (dev_out == NULL)
2477                         goto out;
2478
2479                 /* RACE: Check return value of inet_select_addr instead. */
2480                 if (!(dev_out->flags & IFF_UP) || !__in_dev_get_rcu(dev_out)) {
2481                         rth = ERR_PTR(-ENETUNREACH);
2482                         goto out;
2483                 }
2484                 if (ipv4_is_local_multicast(fl4->daddr) ||
2485                     ipv4_is_lbcast(fl4->daddr)) {
2486                         if (!fl4->saddr)
2487                                 fl4->saddr = inet_select_addr(dev_out, 0,
2488                                                               RT_SCOPE_LINK);
2489                         goto make_route;
2490                 }
2491                 if (fl4->saddr) {
2492                         if (ipv4_is_multicast(fl4->daddr))
2493                                 fl4->saddr = inet_select_addr(dev_out, 0,
2494                                                               fl4->flowi4_scope);
2495                         else if (!fl4->daddr)
2496                                 fl4->saddr = inet_select_addr(dev_out, 0,
2497                                                               RT_SCOPE_HOST);
2498                 }
2499         }
2500
2501         if (!fl4->daddr) {
2502                 fl4->daddr = fl4->saddr;
2503                 if (!fl4->daddr)
2504                         fl4->daddr = fl4->saddr = htonl(INADDR_LOOPBACK);
2505                 dev_out = net->loopback_dev;
2506                 fl4->flowi4_oif = net->loopback_dev->ifindex;
2507                 res.type = RTN_LOCAL;
2508                 flags |= RTCF_LOCAL;
2509                 goto make_route;
2510         }
2511
2512         if (fib_lookup(net, fl4, &res)) {
2513                 res.fi = NULL;
2514                 res.table = NULL;
2515                 if (fl4->flowi4_oif) {
2516                         /* Apparently, routing tables are wrong. Assume,
2517                            that the destination is on link.
2518
2519                            WHY? DW.
2520                            Because we are allowed to send to iface
2521                            even if it has NO routes and NO assigned
2522                            addresses. When oif is specified, routing
2523                            tables are looked up with only one purpose:
2524                            to catch if destination is gatewayed, rather than
2525                            direct. Moreover, if MSG_DONTROUTE is set,
2526                            we send packet, ignoring both routing tables
2527                            and ifaddr state. --ANK
2528
2529
2530                            We could make it even if oif is unknown,
2531                            likely IPv6, but we do not.
2532                          */
2533
2534                         if (fl4->saddr == 0)
2535                                 fl4->saddr = inet_select_addr(dev_out, 0,
2536                                                               RT_SCOPE_LINK);
2537                         res.type = RTN_UNICAST;
2538                         goto make_route;
2539                 }
2540                 rth = ERR_PTR(-ENETUNREACH);
2541                 goto out;
2542         }
2543
2544         if (res.type == RTN_LOCAL) {
2545                 if (!fl4->saddr) {
2546                         if (res.fi->fib_prefsrc)
2547                                 fl4->saddr = res.fi->fib_prefsrc;
2548                         else
2549                                 fl4->saddr = fl4->daddr;
2550                 }
2551                 dev_out = net->loopback_dev;
2552                 fl4->flowi4_oif = dev_out->ifindex;
2553                 res.fi = NULL;
2554                 flags |= RTCF_LOCAL;
2555                 goto make_route;
2556         }
2557
2558 #ifdef CONFIG_IP_ROUTE_MULTIPATH
2559         if (res.fi->fib_nhs > 1 && fl4->flowi4_oif == 0)
2560                 fib_select_multipath(&res);
2561         else
2562 #endif
2563         if (!res.prefixlen &&
2564             res.table->tb_num_default > 1 &&
2565             res.type == RTN_UNICAST && !fl4->flowi4_oif)
2566                 fib_select_default(&res);
2567
2568         if (!fl4->saddr)
2569                 fl4->saddr = FIB_RES_PREFSRC(net, res);
2570
2571         dev_out = FIB_RES_DEV(res);
2572         fl4->flowi4_oif = dev_out->ifindex;
2573
2574
2575 make_route:
2576         rth = __mkroute_output(&res, fl4, orig_daddr, orig_saddr, orig_oif,
2577                                tos, dev_out, flags);
2578         if (!IS_ERR(rth)) {
2579                 unsigned int hash;
2580
2581                 hash = rt_hash(orig_daddr, orig_saddr, orig_oif,
2582                                rt_genid(dev_net(dev_out)));
2583                 rth = rt_intern_hash(hash, rth, NULL, orig_oif);
2584         }
2585
2586 out:
2587         rcu_read_unlock();
2588         return rth;
2589 }
2590
2591 struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *flp4)
2592 {
2593         struct rtable *rth;
2594         unsigned int hash;
2595
2596         if (!rt_caching(net))
2597                 goto slow_output;
2598
2599         hash = rt_hash(flp4->daddr, flp4->saddr, flp4->flowi4_oif, rt_genid(net));
2600
2601         rcu_read_lock_bh();
2602         for (rth = rcu_dereference_bh(rt_hash_table[hash].chain); rth;
2603                 rth = rcu_dereference_bh(rth->dst.rt_next)) {
2604                 if (rth->rt_key_dst == flp4->daddr &&
2605                     rth->rt_key_src == flp4->saddr &&
2606                     rt_is_output_route(rth) &&
2607                     rth->rt_oif == flp4->flowi4_oif &&
2608                     rth->rt_mark == flp4->flowi4_mark &&
2609                     !((rth->rt_key_tos ^ flp4->flowi4_tos) &
2610                             (IPTOS_RT_MASK | RTO_ONLINK)) &&
2611                     net_eq(dev_net(rth->dst.dev), net) &&
2612                     !rt_is_expired(rth)) {
2613                         dst_use(&rth->dst, jiffies);
2614                         RT_CACHE_STAT_INC(out_hit);
2615                         rcu_read_unlock_bh();
2616                         if (!flp4->saddr)
2617                                 flp4->saddr = rth->rt_src;
2618                         if (!flp4->daddr)
2619                                 flp4->daddr = rth->rt_dst;
2620                         return rth;
2621                 }
2622                 RT_CACHE_STAT_INC(out_hlist_search);
2623         }
2624         rcu_read_unlock_bh();
2625
2626 slow_output:
2627         return ip_route_output_slow(net, flp4);
2628 }
2629 EXPORT_SYMBOL_GPL(__ip_route_output_key);
2630
2631 static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie)
2632 {
2633         return NULL;
2634 }
2635
2636 static unsigned int ipv4_blackhole_mtu(const struct dst_entry *dst)
2637 {
2638         unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
2639
2640         return mtu ? : dst->dev->mtu;
2641 }
2642
2643 static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
2644 {
2645 }
2646
2647 static u32 *ipv4_rt_blackhole_cow_metrics(struct dst_entry *dst,
2648                                           unsigned long old)
2649 {
2650         return NULL;
2651 }
2652
2653 static struct dst_ops ipv4_dst_blackhole_ops = {
2654         .family                 =       AF_INET,
2655         .protocol               =       cpu_to_be16(ETH_P_IP),
2656         .destroy                =       ipv4_dst_destroy,
2657         .check                  =       ipv4_blackhole_dst_check,
2658         .mtu                    =       ipv4_blackhole_mtu,
2659         .default_advmss         =       ipv4_default_advmss,
2660         .update_pmtu            =       ipv4_rt_blackhole_update_pmtu,
2661         .cow_metrics            =       ipv4_rt_blackhole_cow_metrics,
2662         .neigh_lookup           =       ipv4_neigh_lookup,
2663 };
2664
2665 struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig)
2666 {
2667         struct rtable *rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, 1, 0, 0);
2668         struct rtable *ort = (struct rtable *) dst_orig;
2669
2670         if (rt) {
2671                 struct dst_entry *new = &rt->dst;
2672
2673                 new->__use = 1;
2674                 new->input = dst_discard;
2675                 new->output = dst_discard;
2676                 dst_copy_metrics(new, &ort->dst);
2677
2678                 new->dev = ort->dst.dev;
2679                 if (new->dev)
2680                         dev_hold(new->dev);
2681
2682                 rt->rt_key_dst = ort->rt_key_dst;
2683                 rt->rt_key_src = ort->rt_key_src;
2684                 rt->rt_key_tos = ort->rt_key_tos;
2685                 rt->rt_route_iif = ort->rt_route_iif;
2686                 rt->rt_iif = ort->rt_iif;
2687                 rt->rt_oif = ort->rt_oif;
2688                 rt->rt_mark = ort->rt_mark;
2689                 rt->rt_pmtu = ort->rt_pmtu;
2690
2691                 rt->rt_genid = rt_genid(net);
2692                 rt->rt_flags = ort->rt_flags;
2693                 rt->rt_type = ort->rt_type;
2694                 rt->rt_dst = ort->rt_dst;
2695                 rt->rt_src = ort->rt_src;
2696                 rt->rt_gateway = ort->rt_gateway;
2697                 rt_transfer_peer(rt, ort);
2698                 rt->fi = ort->fi;
2699                 if (rt->fi)
2700                         atomic_inc(&rt->fi->fib_clntref);
2701
2702                 dst_free(new);
2703         }
2704
2705         dst_release(dst_orig);
2706
2707         return rt ? &rt->dst : ERR_PTR(-ENOMEM);
2708 }
2709
2710 struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
2711                                     struct sock *sk)
2712 {
2713         struct rtable *rt = __ip_route_output_key(net, flp4);
2714
2715         if (IS_ERR(rt))
2716                 return rt;
2717
2718         if (flp4->flowi4_proto)
2719                 rt = (struct rtable *) xfrm_lookup(net, &rt->dst,
2720                                                    flowi4_to_flowi(flp4),
2721                                                    sk, 0);
2722
2723         return rt;
2724 }
2725 EXPORT_SYMBOL_GPL(ip_route_output_flow);
2726
2727 static int rt_fill_info(struct net *net,
2728                         struct sk_buff *skb, u32 pid, u32 seq, int event,
2729                         int nowait, unsigned int flags)
2730 {
2731         struct rtable *rt = skb_rtable(skb);
2732         struct rtmsg *r;
2733         struct nlmsghdr *nlh;
2734         unsigned long expires = 0;
2735         u32 id = 0, error;
2736
2737         nlh = nlmsg_put(skb, pid, seq, event, sizeof(*r), flags);
2738         if (nlh == NULL)
2739                 return -EMSGSIZE;
2740
2741         r = nlmsg_data(nlh);
2742         r->rtm_family    = AF_INET;
2743         r->rtm_dst_len  = 32;
2744         r->rtm_src_len  = 0;
2745         r->rtm_tos      = rt->rt_key_tos;
2746         r->rtm_table    = RT_TABLE_MAIN;
2747         if (nla_put_u32(skb, RTA_TABLE, RT_TABLE_MAIN))
2748                 goto nla_put_failure;
2749         r->rtm_type     = rt->rt_type;
2750         r->rtm_scope    = RT_SCOPE_UNIVERSE;
2751         r->rtm_protocol = RTPROT_UNSPEC;
2752         r->rtm_flags    = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
2753         if (rt->rt_flags & RTCF_NOTIFY)
2754                 r->rtm_flags |= RTM_F_NOTIFY;
2755
2756         if (nla_put_be32(skb, RTA_DST, rt->rt_dst))
2757                 goto nla_put_failure;
2758         if (rt->rt_key_src) {
2759                 r->rtm_src_len = 32;
2760                 if (nla_put_be32(skb, RTA_SRC, rt->rt_key_src))
2761                         goto nla_put_failure;
2762         }
2763         if (rt->dst.dev &&
2764             nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
2765                 goto nla_put_failure;
2766 #ifdef CONFIG_IP_ROUTE_CLASSID
2767         if (rt->dst.tclassid &&
2768             nla_put_u32(skb, RTA_FLOW, rt->dst.tclassid))
2769                 goto nla_put_failure;
2770 #endif
2771         if (!rt_is_input_route(rt) &&
2772             rt->rt_src != rt->rt_key_src) {
2773                 if (nla_put_be32(skb, RTA_PREFSRC, rt->rt_src))
2774                         goto nla_put_failure;
2775         }
2776         if (rt->rt_dst != rt->rt_gateway &&
2777             nla_put_be32(skb, RTA_GATEWAY, rt->rt_gateway))
2778                 goto nla_put_failure;
2779
2780         if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
2781                 goto nla_put_failure;
2782
2783         if (rt->rt_mark &&
2784             nla_put_be32(skb, RTA_MARK, rt->rt_mark))
2785                 goto nla_put_failure;
2786
2787         error = rt->dst.error;
2788         if (rt_has_peer(rt)) {
2789                 const struct inet_peer *peer = rt_peer_ptr(rt);
2790                 inet_peer_refcheck(peer);
2791                 id = atomic_read(&peer->ip_id_count) & 0xffff;
2792         }
2793         expires = rt->dst.expires;
2794         if (expires) {
2795                 if (time_before(jiffies, expires))
2796                         expires -= jiffies;
2797                 else
2798                         expires = 0;
2799         }
2800
2801         if (rt_is_input_route(rt)) {
2802 #ifdef CONFIG_IP_MROUTE
2803                 __be32 dst = rt->rt_dst;
2804
2805                 if (ipv4_is_multicast(dst) && !ipv4_is_local_multicast(dst) &&
2806                     IPV4_DEVCONF_ALL(net, MC_FORWARDING)) {
2807                         int err = ipmr_get_route(net, skb,
2808                                                  rt->rt_src, rt->rt_dst,
2809                                                  r, nowait);
2810                         if (err <= 0) {
2811                                 if (!nowait) {
2812                                         if (err == 0)
2813                                                 return 0;
2814                                         goto nla_put_failure;
2815                                 } else {
2816                                         if (err == -EMSGSIZE)
2817                                                 goto nla_put_failure;
2818                                         error = err;
2819                                 }
2820                         }
2821                 } else
2822 #endif
2823                         if (nla_put_u32(skb, RTA_IIF, rt->rt_iif))
2824                                 goto nla_put_failure;
2825         }
2826
2827         if (rtnl_put_cacheinfo(skb, &rt->dst, id, expires, error) < 0)
2828                 goto nla_put_failure;
2829
2830         return nlmsg_end(skb, nlh);
2831
2832 nla_put_failure:
2833         nlmsg_cancel(skb, nlh);
2834         return -EMSGSIZE;
2835 }
2836
2837 static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void *arg)
2838 {
2839         struct net *net = sock_net(in_skb->sk);
2840         struct rtmsg *rtm;
2841         struct nlattr *tb[RTA_MAX+1];
2842         struct rtable *rt = NULL;
2843         __be32 dst = 0;
2844         __be32 src = 0;
2845         u32 iif;
2846         int err;
2847         int mark;
2848         struct sk_buff *skb;
2849
2850         err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy);
2851         if (err < 0)
2852                 goto errout;
2853
2854         rtm = nlmsg_data(nlh);
2855
2856         skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2857         if (skb == NULL) {
2858                 err = -ENOBUFS;
2859                 goto errout;
2860         }
2861
2862         /* Reserve room for dummy headers, this skb can pass
2863            through good chunk of routing engine.
2864          */
2865         skb_reset_mac_header(skb);
2866         skb_reset_network_header(skb);
2867
2868         /* Bugfix: need to give ip_route_input enough of an IP header to not gag. */
2869         ip_hdr(skb)->protocol = IPPROTO_ICMP;
2870         skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr));
2871
2872         src = tb[RTA_SRC] ? nla_get_be32(tb[RTA_SRC]) : 0;
2873         dst = tb[RTA_DST] ? nla_get_be32(tb[RTA_DST]) : 0;
2874         iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
2875         mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0;
2876
2877         if (iif) {
2878                 struct net_device *dev;
2879
2880                 dev = __dev_get_by_index(net, iif);
2881                 if (dev == NULL) {
2882                         err = -ENODEV;
2883                         goto errout_free;
2884                 }
2885
2886                 skb->protocol   = htons(ETH_P_IP);
2887                 skb->dev        = dev;
2888                 skb->mark       = mark;
2889                 local_bh_disable();
2890                 err = ip_route_input(skb, dst, src, rtm->rtm_tos, dev);
2891                 local_bh_enable();
2892
2893                 rt = skb_rtable(skb);
2894                 if (err == 0 && rt->dst.error)
2895                         err = -rt->dst.error;
2896         } else {
2897                 struct flowi4 fl4 = {
2898                         .daddr = dst,
2899                         .saddr = src,
2900                         .flowi4_tos = rtm->rtm_tos,
2901                         .flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0,
2902                         .flowi4_mark = mark,
2903                 };
2904                 rt = ip_route_output_key(net, &fl4);
2905
2906                 err = 0;
2907                 if (IS_ERR(rt))
2908                         err = PTR_ERR(rt);
2909         }
2910
2911         if (err)
2912                 goto errout_free;
2913
2914         skb_dst_set(skb, &rt->dst);
2915         if (rtm->rtm_flags & RTM_F_NOTIFY)
2916                 rt->rt_flags |= RTCF_NOTIFY;
2917
2918         err = rt_fill_info(net, skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq,
2919                            RTM_NEWROUTE, 0, 0);
2920         if (err <= 0)
2921                 goto errout_free;
2922
2923         err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).pid);
2924 errout:
2925         return err;
2926
2927 errout_free:
2928         kfree_skb(skb);
2929         goto errout;
2930 }
2931
2932 int ip_rt_dump(struct sk_buff *skb,  struct netlink_callback *cb)
2933 {
2934         struct rtable *rt;
2935         int h, s_h;
2936         int idx, s_idx;
2937         struct net *net;
2938
2939         net = sock_net(skb->sk);
2940
2941         s_h = cb->args[0];
2942         if (s_h < 0)
2943                 s_h = 0;
2944         s_idx = idx = cb->args[1];
2945         for (h = s_h; h <= rt_hash_mask; h++, s_idx = 0) {
2946                 if (!rt_hash_table[h].chain)
2947                         continue;
2948                 rcu_read_lock_bh();
2949                 for (rt = rcu_dereference_bh(rt_hash_table[h].chain), idx = 0; rt;
2950                      rt = rcu_dereference_bh(rt->dst.rt_next), idx++) {
2951                         if (!net_eq(dev_net(rt->dst.dev), net) || idx < s_idx)
2952                                 continue;
2953                         if (rt_is_expired(rt))
2954                                 continue;
2955                         skb_dst_set_noref(skb, &rt->dst);
2956                         if (rt_fill_info(net, skb, NETLINK_CB(cb->skb).pid,
2957                                          cb->nlh->nlmsg_seq, RTM_NEWROUTE,
2958                                          1, NLM_F_MULTI) <= 0) {
2959                                 skb_dst_drop(skb);
2960                                 rcu_read_unlock_bh();
2961                                 goto done;
2962                         }
2963                         skb_dst_drop(skb);
2964                 }
2965                 rcu_read_unlock_bh();
2966         }
2967
2968 done:
2969         cb->args[0] = h;
2970         cb->args[1] = idx;
2971         return skb->len;
2972 }
2973
2974 void ip_rt_multicast_event(struct in_device *in_dev)
2975 {
2976         rt_cache_flush(dev_net(in_dev->dev), 0);
2977 }
2978
2979 #ifdef CONFIG_SYSCTL
2980 static int ipv4_sysctl_rtcache_flush(ctl_table *__ctl, int write,
2981                                         void __user *buffer,
2982                                         size_t *lenp, loff_t *ppos)
2983 {
2984         if (write) {
2985                 int flush_delay;
2986                 ctl_table ctl;
2987                 struct net *net;
2988
2989                 memcpy(&ctl, __ctl, sizeof(ctl));
2990                 ctl.data = &flush_delay;
2991                 proc_dointvec(&ctl, write, buffer, lenp, ppos);
2992
2993                 net = (struct net *)__ctl->extra1;
2994                 rt_cache_flush(net, flush_delay);
2995                 return 0;
2996         }
2997
2998         return -EINVAL;
2999 }
3000
3001 static ctl_table ipv4_route_table[] = {
3002         {
3003                 .procname       = "gc_thresh",
3004                 .data           = &ipv4_dst_ops.gc_thresh,
3005                 .maxlen         = sizeof(int),
3006                 .mode           = 0644,
3007                 .proc_handler   = proc_dointvec,
3008         },
3009         {
3010                 .procname       = "max_size",
3011                 .data           = &ip_rt_max_size,
3012                 .maxlen         = sizeof(int),
3013                 .mode           = 0644,
3014                 .proc_handler   = proc_dointvec,
3015         },
3016         {
3017                 /*  Deprecated. Use gc_min_interval_ms */
3018
3019                 .procname       = "gc_min_interval",
3020                 .data           = &ip_rt_gc_min_interval,
3021                 .maxlen         = sizeof(int),
3022                 .mode           = 0644,
3023                 .proc_handler   = proc_dointvec_jiffies,
3024         },
3025         {
3026                 .procname       = "gc_min_interval_ms",
3027                 .data           = &ip_rt_gc_min_interval,
3028                 .maxlen         = sizeof(int),
3029                 .mode           = 0644,
3030                 .proc_handler   = proc_dointvec_ms_jiffies,
3031         },
3032         {
3033                 .procname       = "gc_timeout",
3034                 .data           = &ip_rt_gc_timeout,
3035                 .maxlen         = sizeof(int),
3036                 .mode           = 0644,
3037                 .proc_handler   = proc_dointvec_jiffies,
3038         },
3039         {
3040                 .procname       = "gc_interval",
3041                 .data           = &ip_rt_gc_interval,
3042                 .maxlen         = sizeof(int),
3043                 .mode           = 0644,
3044                 .proc_handler   = proc_dointvec_jiffies,
3045         },
3046         {
3047                 .procname       = "redirect_load",
3048                 .data           = &ip_rt_redirect_load,
3049                 .maxlen         = sizeof(int),
3050                 .mode           = 0644,
3051                 .proc_handler   = proc_dointvec,
3052         },
3053         {
3054                 .procname       = "redirect_number",
3055                 .data           = &ip_rt_redirect_number,
3056                 .maxlen         = sizeof(int),
3057                 .mode           = 0644,
3058                 .proc_handler   = proc_dointvec,
3059         },
3060         {
3061                 .procname       = "redirect_silence",
3062                 .data           = &ip_rt_redirect_silence,
3063                 .maxlen         = sizeof(int),
3064                 .mode           = 0644,
3065                 .proc_handler   = proc_dointvec,
3066         },
3067         {
3068                 .procname       = "error_cost",
3069                 .data           = &ip_rt_error_cost,
3070                 .maxlen         = sizeof(int),
3071                 .mode           = 0644,
3072                 .proc_handler   = proc_dointvec,
3073         },
3074         {
3075                 .procname       = "error_burst",
3076                 .data           = &ip_rt_error_burst,
3077                 .maxlen         = sizeof(int),
3078                 .mode           = 0644,
3079                 .proc_handler   = proc_dointvec,
3080         },
3081         {
3082                 .procname       = "gc_elasticity",
3083                 .data           = &ip_rt_gc_elasticity,
3084                 .maxlen         = sizeof(int),
3085                 .mode           = 0644,
3086                 .proc_handler   = proc_dointvec,
3087         },
3088         {
3089                 .procname       = "mtu_expires",
3090                 .data           = &ip_rt_mtu_expires,
3091                 .maxlen         = sizeof(int),
3092                 .mode           = 0644,
3093                 .proc_handler   = proc_dointvec_jiffies,
3094         },
3095         {
3096                 .procname       = "min_pmtu",
3097                 .data           = &ip_rt_min_pmtu,
3098                 .maxlen         = sizeof(int),
3099                 .mode           = 0644,
3100                 .proc_handler   = proc_dointvec,
3101         },
3102         {
3103                 .procname       = "min_adv_mss",
3104                 .data           = &ip_rt_min_advmss,
3105                 .maxlen         = sizeof(int),
3106                 .mode           = 0644,
3107                 .proc_handler   = proc_dointvec,
3108         },
3109         { }
3110 };
3111
3112 static struct ctl_table ipv4_route_flush_table[] = {
3113         {
3114                 .procname       = "flush",
3115                 .maxlen         = sizeof(int),
3116                 .mode           = 0200,
3117                 .proc_handler   = ipv4_sysctl_rtcache_flush,
3118         },
3119         { },
3120 };
3121
3122 static __net_init int sysctl_route_net_init(struct net *net)
3123 {
3124         struct ctl_table *tbl;
3125
3126         tbl = ipv4_route_flush_table;
3127         if (!net_eq(net, &init_net)) {
3128                 tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
3129                 if (tbl == NULL)
3130                         goto err_dup;
3131         }
3132         tbl[0].extra1 = net;
3133
3134         net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
3135         if (net->ipv4.route_hdr == NULL)
3136                 goto err_reg;
3137         return 0;
3138
3139 err_reg:
3140         if (tbl != ipv4_route_flush_table)
3141                 kfree(tbl);
3142 err_dup:
3143         return -ENOMEM;
3144 }
3145
3146 static __net_exit void sysctl_route_net_exit(struct net *net)
3147 {
3148         struct ctl_table *tbl;
3149
3150         tbl = net->ipv4.route_hdr->ctl_table_arg;
3151         unregister_net_sysctl_table(net->ipv4.route_hdr);
3152         BUG_ON(tbl == ipv4_route_flush_table);
3153         kfree(tbl);
3154 }
3155
3156 static __net_initdata struct pernet_operations sysctl_route_ops = {
3157         .init = sysctl_route_net_init,
3158         .exit = sysctl_route_net_exit,
3159 };
3160 #endif
3161
3162 static __net_init int rt_genid_init(struct net *net)
3163 {
3164         get_random_bytes(&net->ipv4.rt_genid,
3165                          sizeof(net->ipv4.rt_genid));
3166         get_random_bytes(&net->ipv4.dev_addr_genid,
3167                          sizeof(net->ipv4.dev_addr_genid));
3168         return 0;
3169 }
3170
3171 static __net_initdata struct pernet_operations rt_genid_ops = {
3172         .init = rt_genid_init,
3173 };
3174
3175 static int __net_init ipv4_inetpeer_init(struct net *net)
3176 {
3177         struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
3178
3179         if (!bp)
3180                 return -ENOMEM;
3181         inet_peer_base_init(bp);
3182         net->ipv4.peers = bp;
3183         return 0;
3184 }
3185
3186 static void __net_exit ipv4_inetpeer_exit(struct net *net)
3187 {
3188         struct inet_peer_base *bp = net->ipv4.peers;
3189
3190         net->ipv4.peers = NULL;
3191         inetpeer_invalidate_tree(bp);
3192         kfree(bp);
3193 }
3194
3195 static __net_initdata struct pernet_operations ipv4_inetpeer_ops = {
3196         .init   =       ipv4_inetpeer_init,
3197         .exit   =       ipv4_inetpeer_exit,
3198 };
3199
3200 #ifdef CONFIG_IP_ROUTE_CLASSID
3201 struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
3202 #endif /* CONFIG_IP_ROUTE_CLASSID */
3203
3204 static __initdata unsigned long rhash_entries;
3205 static int __init set_rhash_entries(char *str)
3206 {
3207         ssize_t ret;
3208
3209         if (!str)
3210                 return 0;
3211
3212         ret = kstrtoul(str, 0, &rhash_entries);
3213         if (ret)
3214                 return 0;
3215
3216         return 1;
3217 }
3218 __setup("rhash_entries=", set_rhash_entries);
3219
3220 int __init ip_rt_init(void)
3221 {
3222         int rc = 0;
3223
3224 #ifdef CONFIG_IP_ROUTE_CLASSID
3225         ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
3226         if (!ip_rt_acct)
3227                 panic("IP: failed to allocate ip_rt_acct\n");
3228 #endif
3229
3230         ipv4_dst_ops.kmem_cachep =
3231                 kmem_cache_create("ip_dst_cache", sizeof(struct rtable), 0,
3232                                   SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
3233
3234         ipv4_dst_blackhole_ops.kmem_cachep = ipv4_dst_ops.kmem_cachep;
3235
3236         if (dst_entries_init(&ipv4_dst_ops) < 0)
3237                 panic("IP: failed to allocate ipv4_dst_ops counter\n");
3238
3239         if (dst_entries_init(&ipv4_dst_blackhole_ops) < 0)
3240                 panic("IP: failed to allocate ipv4_dst_blackhole_ops counter\n");
3241
3242         rt_hash_table = (struct rt_hash_bucket *)
3243                 alloc_large_system_hash("IP route cache",
3244                                         sizeof(struct rt_hash_bucket),
3245                                         rhash_entries,
3246                                         (totalram_pages >= 128 * 1024) ?
3247                                         15 : 17,
3248                                         0,
3249                                         &rt_hash_log,
3250                                         &rt_hash_mask,
3251                                         0,
3252                                         rhash_entries ? 0 : 512 * 1024);
3253         memset(rt_hash_table, 0, (rt_hash_mask + 1) * sizeof(struct rt_hash_bucket));
3254         rt_hash_lock_init();
3255
3256         ipv4_dst_ops.gc_thresh = (rt_hash_mask + 1);
3257         ip_rt_max_size = (rt_hash_mask + 1) * 16;
3258
3259         devinet_init();
3260         ip_fib_init();
3261
3262         INIT_DELAYED_WORK_DEFERRABLE(&expires_work, rt_worker_func);
3263         expires_ljiffies = jiffies;
3264         schedule_delayed_work(&expires_work,
3265                 net_random() % ip_rt_gc_interval + ip_rt_gc_interval);
3266
3267         if (ip_rt_proc_init())
3268                 pr_err("Unable to create route proc files\n");
3269 #ifdef CONFIG_XFRM
3270         xfrm_init();
3271         xfrm4_init(ip_rt_max_size);
3272 #endif
3273         rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL, NULL);
3274
3275 #ifdef CONFIG_SYSCTL
3276         register_pernet_subsys(&sysctl_route_ops);
3277 #endif
3278         register_pernet_subsys(&rt_genid_ops);
3279         register_pernet_subsys(&ipv4_inetpeer_ops);
3280         return rc;
3281 }
3282
3283 #ifdef CONFIG_SYSCTL
3284 /*
3285  * We really need to sanitize the damn ipv4 init order, then all
3286  * this nonsense will go away.
3287  */
3288 void __init ip_static_sysctl_init(void)
3289 {
3290         register_net_sysctl(&init_net, "net/ipv4/route", ipv4_route_table);
3291 }
3292 #endif