2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * IPv4 FIB: lookup engine and maintenance routines.
8 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
16 #include <asm/uaccess.h>
17 #include <asm/system.h>
18 #include <linux/bitops.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
22 #include <linux/string.h>
23 #include <linux/socket.h>
24 #include <linux/sockios.h>
25 #include <linux/errno.h>
27 #include <linux/inet.h>
28 #include <linux/inetdevice.h>
29 #include <linux/netdevice.h>
30 #include <linux/if_arp.h>
31 #include <linux/proc_fs.h>
32 #include <linux/skbuff.h>
33 #include <linux/netlink.h>
34 #include <linux/init.h>
35 #include <linux/slab.h>
37 #include <net/net_namespace.h>
39 #include <net/protocol.h>
40 #include <net/route.h>
43 #include <net/ip_fib.h>
45 #include "fib_lookup.h"
47 static struct kmem_cache *fn_hash_kmem __read_mostly;
48 static struct kmem_cache *fn_alias_kmem __read_mostly;
51 struct hlist_node fn_hash;
52 struct list_head fn_alias;
54 struct fib_alias fn_embedded_alias;
57 #define EMBEDDED_HASH_SIZE (L1_CACHE_BYTES / sizeof(struct hlist_head))
60 struct fn_zone __rcu *fz_next; /* Next not empty zone */
61 struct hlist_head __rcu *fz_hash; /* Hash table pointer */
63 u32 fz_hashmask; /* (fz_divisor - 1) */
65 u8 fz_order; /* Zone order (0..32) */
66 u8 fz_revorder; /* 32 - fz_order */
67 __be32 fz_mask; /* inet_make_mask(order) */
68 #define FZ_MASK(fz) ((fz)->fz_mask)
70 struct hlist_head fz_embedded_hash[EMBEDDED_HASH_SIZE];
72 int fz_nent; /* Number of entries */
73 int fz_divisor; /* Hash size (mask+1) */
77 struct fn_zone *fn_zones[33];
78 struct fn_zone __rcu *fn_zone_list;
81 static inline u32 fn_hash(__be32 key, struct fn_zone *fz)
83 u32 h = ntohl(key) >> fz->fz_revorder;
91 static inline __be32 fz_key(__be32 dst, struct fn_zone *fz)
93 return dst & FZ_MASK(fz);
96 static unsigned int fib_hash_genid;
98 #define FZ_MAX_DIVISOR ((PAGE_SIZE<<MAX_ORDER) / sizeof(struct hlist_head))
100 static struct hlist_head *fz_hash_alloc(int divisor)
102 unsigned long size = divisor * sizeof(struct hlist_head);
104 if (size <= PAGE_SIZE)
105 return kzalloc(size, GFP_KERNEL);
107 return (struct hlist_head *)
108 __get_free_pages(GFP_KERNEL | __GFP_ZERO, get_order(size));
111 /* The fib hash lock must be held when this is called. */
112 static inline void fn_rebuild_zone(struct fn_zone *fz,
113 struct hlist_head *old_ht,
118 for (i = 0; i < old_divisor; i++) {
119 struct hlist_node *node, *n;
122 hlist_for_each_entry_safe(f, node, n, &old_ht[i], fn_hash) {
123 struct hlist_head __rcu *new_head;
125 hlist_del_rcu(&f->fn_hash);
127 new_head = &fz->fz_hash[fn_hash(f->fn_key, fz)];
128 hlist_add_head_rcu(&f->fn_hash, new_head);
133 static void fz_hash_free(struct hlist_head *hash, int divisor)
135 unsigned long size = divisor * sizeof(struct hlist_head);
137 if (size <= PAGE_SIZE)
140 free_pages((unsigned long)hash, get_order(size));
143 static void fn_rehash_zone(struct fn_zone *fz)
145 struct hlist_head *ht, *old_ht;
146 int old_divisor, new_divisor;
149 new_divisor = old_divisor = fz->fz_divisor;
151 switch (old_divisor) {
152 case EMBEDDED_HASH_SIZE:
153 new_divisor *= EMBEDDED_HASH_SIZE;
155 case EMBEDDED_HASH_SIZE*EMBEDDED_HASH_SIZE:
156 new_divisor *= (EMBEDDED_HASH_SIZE/2);
159 if ((old_divisor << 1) > FZ_MAX_DIVISOR) {
160 printk(KERN_CRIT "route.c: bad divisor %d!\n", old_divisor);
163 new_divisor = (old_divisor << 1);
167 new_hashmask = (new_divisor - 1);
169 #if RT_CACHE_DEBUG >= 2
170 printk(KERN_DEBUG "fn_rehash_zone: hash for zone %d grows from %d\n",
171 fz->fz_order, old_divisor);
174 ht = fz_hash_alloc(new_divisor);
179 memcpy(&nfz, fz, sizeof(nfz));
181 write_seqlock_bh(&fz->fz_lock);
182 old_ht = fz->fz_hash;
184 nfz.fz_hashmask = new_hashmask;
185 nfz.fz_divisor = new_divisor;
186 fn_rebuild_zone(&nfz, old_ht, old_divisor);
188 rcu_assign_pointer(fz->fz_hash, ht);
189 fz->fz_hashmask = new_hashmask;
190 fz->fz_divisor = new_divisor;
191 write_sequnlock_bh(&fz->fz_lock);
193 if (old_ht != fz->fz_embedded_hash) {
195 fz_hash_free(old_ht, old_divisor);
200 static void fn_free_node_rcu(struct rcu_head *head)
202 struct fib_node *f = container_of(head, struct fib_node, fn_embedded_alias.rcu);
204 kmem_cache_free(fn_hash_kmem, f);
207 static inline void fn_free_node(struct fib_node *f)
209 call_rcu(&f->fn_embedded_alias.rcu, fn_free_node_rcu);
212 static void fn_free_alias_rcu(struct rcu_head *head)
214 struct fib_alias *fa = container_of(head, struct fib_alias, rcu);
216 kmem_cache_free(fn_alias_kmem, fa);
219 static inline void fn_free_alias(struct fib_alias *fa, struct fib_node *f)
221 fib_release_info(fa->fa_info);
222 if (fa == &f->fn_embedded_alias)
225 call_rcu(&fa->rcu, fn_free_alias_rcu);
228 static struct fn_zone *
229 fn_new_zone(struct fn_hash *table, int z)
232 struct fn_zone *fz = kzalloc(sizeof(struct fn_zone), GFP_KERNEL);
236 seqlock_init(&fz->fz_lock);
237 fz->fz_divisor = z ? EMBEDDED_HASH_SIZE : 1;
238 fz->fz_hashmask = fz->fz_divisor - 1;
239 fz->fz_hash = fz->fz_embedded_hash;
241 fz->fz_revorder = 32 - z;
242 fz->fz_mask = inet_make_mask(z);
244 /* Find the first not empty zone with more specific mask */
245 for (i = z + 1; i <= 32; i++)
246 if (table->fn_zones[i])
249 /* No more specific masks, we are the first. */
250 rcu_assign_pointer(fz->fz_next,
251 rtnl_dereference(table->fn_zone_list));
252 rcu_assign_pointer(table->fn_zone_list, fz);
254 rcu_assign_pointer(fz->fz_next,
255 rtnl_dereference(table->fn_zones[i]->fz_next));
256 rcu_assign_pointer(table->fn_zones[i]->fz_next, fz);
258 table->fn_zones[z] = fz;
263 int fib_table_lookup(struct fib_table *tb,
264 const struct flowi *flp, struct fib_result *res,
269 struct fn_hash *t = (struct fn_hash *)tb->tb_data;
272 for (fz = rcu_dereference(t->fn_zone_list);
274 fz = rcu_dereference(fz->fz_next)) {
275 struct hlist_head __rcu *head;
276 struct hlist_node *node;
282 seq = read_seqbegin(&fz->fz_lock);
283 k = fz_key(flp->fl4_dst, fz);
285 head = &fz->fz_hash[fn_hash(k, fz)];
286 hlist_for_each_entry_rcu(f, node, head, fn_hash) {
290 err = fib_semantic_match(&f->fn_alias,
292 fz->fz_order, fib_flags);
296 } while (read_seqretry(&fz->fz_lock, seq));
304 void fib_table_select_default(struct fib_table *tb,
305 const struct flowi *flp, struct fib_result *res)
308 struct hlist_node *node;
310 struct fib_info *fi = NULL;
311 struct fib_info *last_resort;
312 struct fn_hash *t = (struct fn_hash *)tb->tb_data;
313 struct fn_zone *fz = t->fn_zones[0];
323 hlist_for_each_entry_rcu(f, node, &fz->fz_hash[0], fn_hash) {
324 struct fib_alias *fa;
326 list_for_each_entry_rcu(fa, &f->fn_alias, fa_list) {
327 struct fib_info *next_fi = fa->fa_info;
329 if (fa->fa_scope != res->scope ||
330 fa->fa_type != RTN_UNICAST)
333 if (next_fi->fib_priority > res->fi->fib_priority)
335 if (!next_fi->fib_nh[0].nh_gw ||
336 next_fi->fib_nh[0].nh_scope != RT_SCOPE_LINK)
339 fib_alias_accessed(fa);
342 if (next_fi != res->fi)
344 } else if (!fib_detect_death(fi, order, &last_resort,
345 &last_idx, tb->tb_default)) {
346 fib_result_assign(res, fi);
347 tb->tb_default = order;
355 if (order <= 0 || fi == NULL) {
360 if (!fib_detect_death(fi, order, &last_resort, &last_idx,
362 fib_result_assign(res, fi);
363 tb->tb_default = order;
368 fib_result_assign(res, last_resort);
369 tb->tb_default = last_idx;
374 /* Insert node F to FZ. */
375 static inline void fib_insert_node(struct fn_zone *fz, struct fib_node *f)
377 struct hlist_head *head = &fz->fz_hash[fn_hash(f->fn_key, fz)];
379 hlist_add_head_rcu(&f->fn_hash, head);
382 /* Return the node in FZ matching KEY. */
383 static struct fib_node *fib_find_node(struct fn_zone *fz, __be32 key)
385 struct hlist_head *head = &fz->fz_hash[fn_hash(key, fz)];
386 struct hlist_node *node;
389 hlist_for_each_entry_rcu(f, node, head, fn_hash) {
390 if (f->fn_key == key)
398 static struct fib_alias *fib_fast_alloc(struct fib_node *f)
400 struct fib_alias *fa = &f->fn_embedded_alias;
402 if (fa->fa_info != NULL)
403 fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
407 /* Caller must hold RTNL. */
408 int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
410 struct fn_hash *table = (struct fn_hash *) tb->tb_data;
411 struct fib_node *new_f = NULL;
413 struct fib_alias *fa, *new_fa;
416 u8 tos = cfg->fc_tos;
420 if (cfg->fc_dst_len > 32)
423 fz = table->fn_zones[cfg->fc_dst_len];
424 if (!fz && !(fz = fn_new_zone(table, cfg->fc_dst_len)))
429 if (cfg->fc_dst & ~FZ_MASK(fz))
431 key = fz_key(cfg->fc_dst, fz);
434 fi = fib_create_info(cfg);
438 if (fz->fz_nent > (fz->fz_divisor<<1) &&
439 fz->fz_divisor < FZ_MAX_DIVISOR &&
440 (cfg->fc_dst_len == 32 ||
441 (1 << cfg->fc_dst_len) > fz->fz_divisor))
444 f = fib_find_node(fz, key);
449 fa = fib_find_alias(&f->fn_alias, tos, fi->fib_priority);
451 /* Now fa, if non-NULL, points to the first fib alias
452 * with the same keys [prefix,tos,priority], if such key already
453 * exists or to the node before which we will insert new one.
455 * If fa is NULL, we will need to allocate a new one and
456 * insert to the head of f.
458 * If f is NULL, no fib node matched the destination key
459 * and we need to allocate a new one of those as well.
462 if (fa && fa->fa_tos == tos &&
463 fa->fa_info->fib_priority == fi->fib_priority) {
464 struct fib_alias *fa_first, *fa_match;
467 if (cfg->fc_nlflags & NLM_F_EXCL)
471 * 1. Find exact match for type, scope, fib_info to avoid
473 * 2. Find next 'fa' (or head), NLM_F_APPEND inserts before it
477 fa = list_entry(fa->fa_list.prev, struct fib_alias, fa_list);
478 list_for_each_entry_continue(fa, &f->fn_alias, fa_list) {
479 if (fa->fa_tos != tos)
481 if (fa->fa_info->fib_priority != fi->fib_priority)
483 if (fa->fa_type == cfg->fc_type &&
484 fa->fa_scope == cfg->fc_scope &&
491 if (cfg->fc_nlflags & NLM_F_REPLACE) {
501 new_fa = fib_fast_alloc(f);
505 new_fa->fa_tos = fa->fa_tos;
506 new_fa->fa_info = fi;
507 new_fa->fa_type = cfg->fc_type;
508 new_fa->fa_scope = cfg->fc_scope;
509 state = fa->fa_state;
510 new_fa->fa_state = state & ~FA_S_ACCESSED;
512 list_replace_rcu(&fa->fa_list, &new_fa->fa_list);
514 fn_free_alias(fa, f);
515 if (state & FA_S_ACCESSED)
516 rt_cache_flush(cfg->fc_nlinfo.nl_net, -1);
517 rtmsg_fib(RTM_NEWROUTE, key, new_fa, cfg->fc_dst_len,
518 tb->tb_id, &cfg->fc_nlinfo, NLM_F_REPLACE);
522 /* Error if we find a perfect match which
523 * uses the same scope, type, and nexthop
529 if (!(cfg->fc_nlflags & NLM_F_APPEND))
534 if (!(cfg->fc_nlflags & NLM_F_CREATE))
540 new_f = kmem_cache_zalloc(fn_hash_kmem, GFP_KERNEL);
544 INIT_HLIST_NODE(&new_f->fn_hash);
545 INIT_LIST_HEAD(&new_f->fn_alias);
550 new_fa = fib_fast_alloc(f);
554 new_fa->fa_info = fi;
555 new_fa->fa_tos = tos;
556 new_fa->fa_type = cfg->fc_type;
557 new_fa->fa_scope = cfg->fc_scope;
558 new_fa->fa_state = 0;
561 * Insert new entry to the list.
565 fib_insert_node(fz, new_f);
566 list_add_tail_rcu(&new_fa->fa_list,
567 (fa ? &fa->fa_list : &f->fn_alias));
572 rt_cache_flush(cfg->fc_nlinfo.nl_net, -1);
574 rtmsg_fib(RTM_NEWROUTE, key, new_fa, cfg->fc_dst_len, tb->tb_id,
580 kmem_cache_free(fn_hash_kmem, new_f);
581 fib_release_info(fi);
585 int fib_table_delete(struct fib_table *tb, struct fib_config *cfg)
587 struct fn_hash *table = (struct fn_hash *)tb->tb_data;
589 struct fib_alias *fa, *fa_to_delete;
593 if (cfg->fc_dst_len > 32)
596 if ((fz = table->fn_zones[cfg->fc_dst_len]) == NULL)
601 if (cfg->fc_dst & ~FZ_MASK(fz))
603 key = fz_key(cfg->fc_dst, fz);
606 f = fib_find_node(fz, key);
611 fa = fib_find_alias(&f->fn_alias, cfg->fc_tos, 0);
616 fa = list_entry(fa->fa_list.prev, struct fib_alias, fa_list);
617 list_for_each_entry_continue(fa, &f->fn_alias, fa_list) {
618 struct fib_info *fi = fa->fa_info;
620 if (fa->fa_tos != cfg->fc_tos)
623 if ((!cfg->fc_type ||
624 fa->fa_type == cfg->fc_type) &&
625 (cfg->fc_scope == RT_SCOPE_NOWHERE ||
626 fa->fa_scope == cfg->fc_scope) &&
627 (!cfg->fc_protocol ||
628 fi->fib_protocol == cfg->fc_protocol) &&
629 fib_nh_match(cfg, fi) == 0) {
639 rtmsg_fib(RTM_DELROUTE, key, fa, cfg->fc_dst_len,
640 tb->tb_id, &cfg->fc_nlinfo, 0);
643 list_del_rcu(&fa->fa_list);
644 if (list_empty(&f->fn_alias)) {
645 hlist_del_rcu(&f->fn_hash);
650 if (fa->fa_state & FA_S_ACCESSED)
651 rt_cache_flush(cfg->fc_nlinfo.nl_net, -1);
652 fn_free_alias(fa, f);
663 static int fn_flush_list(struct fn_zone *fz, int idx)
665 struct hlist_head *head = &fz->fz_hash[idx];
666 struct hlist_node *node, *n;
670 hlist_for_each_entry_safe(f, node, n, head, fn_hash) {
671 struct fib_alias *fa, *fa_node;
675 list_for_each_entry_safe(fa, fa_node, &f->fn_alias, fa_list) {
676 struct fib_info *fi = fa->fa_info;
678 if (fi && (fi->fib_flags&RTNH_F_DEAD)) {
679 list_del_rcu(&fa->fa_list);
680 if (list_empty(&f->fn_alias)) {
681 hlist_del_rcu(&f->fn_hash);
686 fn_free_alias(fa, f);
698 /* caller must hold RTNL. */
699 int fib_table_flush(struct fib_table *tb)
701 struct fn_hash *table = (struct fn_hash *) tb->tb_data;
705 for (fz = rtnl_dereference(table->fn_zone_list);
707 fz = rtnl_dereference(fz->fz_next)) {
710 for (i = fz->fz_divisor - 1; i >= 0; i--)
711 found += fn_flush_list(fz, i);
718 fn_hash_dump_bucket(struct sk_buff *skb, struct netlink_callback *cb,
719 struct fib_table *tb,
721 struct hlist_head *head)
723 struct hlist_node *node;
729 hlist_for_each_entry_rcu(f, node, head, fn_hash) {
730 struct fib_alias *fa;
732 list_for_each_entry_rcu(fa, &f->fn_alias, fa_list) {
736 if (fib_dump_info(skb, NETLINK_CB(cb->skb).pid,
759 fn_hash_dump_zone(struct sk_buff *skb, struct netlink_callback *cb,
760 struct fib_table *tb,
765 if (fz->fz_hash == NULL)
768 for (h = s_h; h < fz->fz_divisor; h++) {
769 if (hlist_empty(&fz->fz_hash[h]))
771 if (fn_hash_dump_bucket(skb, cb, tb, fz, &fz->fz_hash[h]) < 0) {
775 memset(&cb->args[4], 0,
776 sizeof(cb->args) - 4*sizeof(cb->args[0]));
782 int fib_table_dump(struct fib_table *tb, struct sk_buff *skb,
783 struct netlink_callback *cb)
787 struct fn_hash *table = (struct fn_hash *)tb->tb_data;
791 for (fz = rcu_dereference(table->fn_zone_list);
793 fz = rcu_dereference(fz->fz_next), m++) {
796 if (fn_hash_dump_zone(skb, cb, tb, fz) < 0) {
801 memset(&cb->args[3], 0,
802 sizeof(cb->args) - 3*sizeof(cb->args[0]));
809 void __init fib_hash_init(void)
811 fn_hash_kmem = kmem_cache_create("ip_fib_hash", sizeof(struct fib_node),
812 0, SLAB_PANIC, NULL);
814 fn_alias_kmem = kmem_cache_create("ip_fib_alias", sizeof(struct fib_alias),
815 0, SLAB_PANIC, NULL);
819 struct fib_table *fib_hash_table(u32 id)
821 struct fib_table *tb;
823 tb = kmalloc(sizeof(struct fib_table) + sizeof(struct fn_hash),
831 memset(tb->tb_data, 0, sizeof(struct fn_hash));
835 /* ------------------------------------------------------------------------ */
836 #ifdef CONFIG_PROC_FS
838 struct fib_iter_state {
839 struct seq_net_private p;
840 struct fn_zone *zone;
842 struct hlist_head *hash_head;
844 struct fib_alias *fa;
850 static struct fib_alias *fib_get_first(struct seq_file *seq)
852 struct fib_iter_state *iter = seq->private;
853 struct fib_table *main_table;
854 struct fn_hash *table;
856 main_table = fib_get_table(seq_file_net(seq), RT_TABLE_MAIN);
857 table = (struct fn_hash *)main_table->tb_data;
860 iter->hash_head = NULL;
864 iter->genid = fib_hash_genid;
867 for (iter->zone = rcu_dereference(table->fn_zone_list);
869 iter->zone = rcu_dereference(iter->zone->fz_next)) {
872 if (!iter->zone->fz_nent)
875 iter->hash_head = iter->zone->fz_hash;
876 maxslot = iter->zone->fz_divisor;
878 for (iter->bucket = 0; iter->bucket < maxslot;
879 ++iter->bucket, ++iter->hash_head) {
880 struct hlist_node *node;
883 hlist_for_each_entry(fn, node, iter->hash_head, fn_hash) {
884 struct fib_alias *fa;
886 list_for_each_entry(fa, &fn->fn_alias, fa_list) {
898 static struct fib_alias *fib_get_next(struct seq_file *seq)
900 struct fib_iter_state *iter = seq->private;
902 struct fib_alias *fa;
904 /* Advance FA, if any. */
909 list_for_each_entry_continue(fa, &fn->fn_alias, fa_list) {
915 fa = iter->fa = NULL;
919 struct hlist_node *node = &fn->fn_hash;
920 hlist_for_each_entry_continue(fn, node, fn_hash) {
923 list_for_each_entry(fa, &fn->fn_alias, fa_list) {
930 fn = iter->fn = NULL;
932 /* Advance hash chain. */
937 struct hlist_node *node;
940 maxslot = iter->zone->fz_divisor;
942 while (++iter->bucket < maxslot) {
945 hlist_for_each_entry(fn, node, iter->hash_head, fn_hash) {
946 list_for_each_entry(fa, &fn->fn_alias, fa_list) {
954 iter->zone = rcu_dereference(iter->zone->fz_next);
960 iter->hash_head = iter->zone->fz_hash;
962 hlist_for_each_entry(fn, node, iter->hash_head, fn_hash) {
963 list_for_each_entry(fa, &fn->fn_alias, fa_list) {
975 static struct fib_alias *fib_get_idx(struct seq_file *seq, loff_t pos)
977 struct fib_iter_state *iter = seq->private;
978 struct fib_alias *fa;
980 if (iter->valid && pos >= iter->pos && iter->genid == fib_hash_genid) {
984 fa = fib_get_first(seq);
987 while (pos && (fa = fib_get_next(seq)))
989 return pos ? NULL : fa;
992 static void *fib_seq_start(struct seq_file *seq, loff_t *pos)
998 if (fib_get_table(seq_file_net(seq), RT_TABLE_MAIN))
999 v = *pos ? fib_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
1003 static void *fib_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1006 return v == SEQ_START_TOKEN ? fib_get_first(seq) : fib_get_next(seq);
1009 static void fib_seq_stop(struct seq_file *seq, void *v)
1015 static unsigned fib_flag_trans(int type, __be32 mask, struct fib_info *fi)
1017 static const unsigned type2flags[RTN_MAX + 1] = {
1021 unsigned flags = type2flags[type];
1023 if (fi && fi->fib_nh->nh_gw)
1024 flags |= RTF_GATEWAY;
1025 if (mask == htonl(0xFFFFFFFF))
1032 * This outputs /proc/net/route.
1034 * It always works in backward compatibility mode.
1035 * The format of the file is not supposed to be changed.
1037 static int fib_seq_show(struct seq_file *seq, void *v)
1039 struct fib_iter_state *iter;
1041 __be32 prefix, mask;
1044 struct fib_alias *fa;
1045 struct fib_info *fi;
1047 if (v == SEQ_START_TOKEN) {
1048 seq_printf(seq, "%-127s\n", "Iface\tDestination\tGateway "
1049 "\tFlags\tRefCnt\tUse\tMetric\tMask\t\tMTU"
1054 iter = seq->private;
1059 mask = FZ_MASK(iter->zone);
1060 flags = fib_flag_trans(fa->fa_type, mask, fi);
1063 "%s\t%08X\t%08X\t%04X\t%d\t%u\t%d\t%08X\t%d\t%u\t%u%n",
1064 fi->fib_dev ? fi->fib_dev->name : "*", prefix,
1065 fi->fib_nh->nh_gw, flags, 0, 0, fi->fib_priority,
1066 mask, (fi->fib_advmss ? fi->fib_advmss + 40 : 0),
1068 fi->fib_rtt >> 3, &len);
1071 "*\t%08X\t%08X\t%04X\t%d\t%u\t%d\t%08X\t%d\t%u\t%u%n",
1072 prefix, 0, flags, 0, 0, 0, mask, 0, 0, 0, &len);
1074 seq_printf(seq, "%*s\n", 127 - len, "");
1079 static const struct seq_operations fib_seq_ops = {
1080 .start = fib_seq_start,
1081 .next = fib_seq_next,
1082 .stop = fib_seq_stop,
1083 .show = fib_seq_show,
1086 static int fib_seq_open(struct inode *inode, struct file *file)
1088 return seq_open_net(inode, file, &fib_seq_ops,
1089 sizeof(struct fib_iter_state));
1092 static const struct file_operations fib_seq_fops = {
1093 .owner = THIS_MODULE,
1094 .open = fib_seq_open,
1096 .llseek = seq_lseek,
1097 .release = seq_release_net,
1100 int __net_init fib_proc_init(struct net *net)
1102 if (!proc_net_fops_create(net, "route", S_IRUGO, &fib_seq_fops))
1107 void __net_exit fib_proc_exit(struct net *net)
1109 proc_net_remove(net, "route");
1111 #endif /* CONFIG_PROC_FS */