2 * DECnet An implementation of the DECnet protocol suite for the LINUX
3 * operating system. DECnet is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * DECnet Routing Functions (Endnode and Router)
8 * Authors: Steve Whitehouse <SteveW@ACM.org>
9 * Eduardo Marcelo Serrat <emserrat@geocities.com>
12 * Steve Whitehouse : Fixes to allow "intra-ethernet" and
13 * "return-to-sender" bits on outgoing
15 * Steve Whitehouse : Timeouts for cached routes.
16 * Steve Whitehouse : Use dst cache for input routes too.
17 * Steve Whitehouse : Fixed error values in dn_send_skb.
18 * Steve Whitehouse : Rework routing functions to better fit
19 * DECnet routing design
20 * Alexey Kuznetsov : New SMP locking
21 * Steve Whitehouse : More SMP locking changes & dn_cache_dump()
22 * Steve Whitehouse : Prerouting NF hook, now really is prerouting.
23 * Fixed possible skb leak in rtnetlink funcs.
24 * Steve Whitehouse : Dave Miller's dynamic hash table sizing and
25 * Alexey Kuznetsov's finer grained locking
27 * Steve Whitehouse : Routing is now starting to look like a
28 * sensible set of code now, mainly due to
29 * my copying the IPv4 routing code. The
30 * hooks here are modified and will continue
31 * to evolve for a while.
32 * Steve Whitehouse : Real SMP at last :-) Also new netfilter
33 * stuff. Look out raw sockets your days
35 * Steve Whitehouse : Added return-to-sender functions. Added
36 * backlog congestion level return codes.
37 * Steve Whitehouse : Fixed bug where routes were set up with
38 * no ref count on net devices.
39 * Steve Whitehouse : RCU for the route cache
40 * Steve Whitehouse : Preparations for the flow cache
41 * Steve Whitehouse : Prepare for nonlinear skbs
44 /******************************************************************************
45 (c) 1995-1998 E.M. Serrat emserrat@geocities.com
47 This program is free software; you can redistribute it and/or modify
48 it under the terms of the GNU General Public License as published by
49 the Free Software Foundation; either version 2 of the License, or
52 This program is distributed in the hope that it will be useful,
53 but WITHOUT ANY WARRANTY; without even the implied warranty of
54 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
55 GNU General Public License for more details.
56 *******************************************************************************/
58 #include <linux/errno.h>
59 #include <linux/types.h>
60 #include <linux/socket.h>
62 #include <linux/kernel.h>
63 #include <linux/sockios.h>
64 #include <linux/net.h>
65 #include <linux/netdevice.h>
66 #include <linux/inet.h>
67 #include <linux/route.h>
68 #include <linux/in_route.h>
69 #include <linux/slab.h>
72 #include <linux/proc_fs.h>
73 #include <linux/seq_file.h>
74 #include <linux/init.h>
75 #include <linux/rtnetlink.h>
76 #include <linux/string.h>
77 #include <linux/netfilter_decnet.h>
78 #include <linux/rcupdate.h>
79 #include <linux/times.h>
80 #include <linux/export.h>
81 #include <asm/errno.h>
82 #include <net/net_namespace.h>
83 #include <net/netlink.h>
84 #include <net/neighbour.h>
87 #include <net/fib_rules.h>
89 #include <net/dn_dev.h>
90 #include <net/dn_nsp.h>
91 #include <net/dn_route.h>
92 #include <net/dn_neigh.h>
93 #include <net/dn_fib.h>
95 struct dn_rt_hash_bucket
97 struct dn_route __rcu *chain;
101 extern struct neigh_table dn_neigh_table;
104 static unsigned char dn_hiord_addr[6] = {0xAA,0x00,0x04,0x00,0x00,0x00};
106 static const int dn_rt_min_delay = 2 * HZ;
107 static const int dn_rt_max_delay = 10 * HZ;
108 static const int dn_rt_mtu_expires = 10 * 60 * HZ;
110 static unsigned long dn_rt_deadline;
112 static int dn_dst_gc(struct dst_ops *ops);
113 static struct dst_entry *dn_dst_check(struct dst_entry *, __u32);
114 static unsigned int dn_dst_default_advmss(const struct dst_entry *dst);
115 static unsigned int dn_dst_mtu(const struct dst_entry *dst);
116 static void dn_dst_destroy(struct dst_entry *);
117 static struct dst_entry *dn_dst_negative_advice(struct dst_entry *);
118 static void dn_dst_link_failure(struct sk_buff *);
119 static void dn_dst_update_pmtu(struct dst_entry *dst, u32 mtu);
120 static struct neighbour *dn_dst_neigh_lookup(const struct dst_entry *dst, const void *daddr);
121 static int dn_route_input(struct sk_buff *);
122 static void dn_run_flush(unsigned long dummy);
124 static struct dn_rt_hash_bucket *dn_rt_hash_table;
125 static unsigned int dn_rt_hash_mask;
127 static struct timer_list dn_route_timer;
128 static DEFINE_TIMER(dn_rt_flush_timer, dn_run_flush, 0, 0);
129 int decnet_dst_gc_interval = 2;
131 static struct dst_ops dn_dst_ops = {
133 .protocol = cpu_to_be16(ETH_P_DNA_RT),
136 .check = dn_dst_check,
137 .default_advmss = dn_dst_default_advmss,
139 .cow_metrics = dst_cow_metrics_generic,
140 .destroy = dn_dst_destroy,
141 .negative_advice = dn_dst_negative_advice,
142 .link_failure = dn_dst_link_failure,
143 .update_pmtu = dn_dst_update_pmtu,
144 .neigh_lookup = dn_dst_neigh_lookup,
147 static void dn_dst_destroy(struct dst_entry *dst)
149 dst_destroy_metrics_generic(dst);
152 static __inline__ unsigned int dn_hash(__le16 src, __le16 dst)
154 __u16 tmp = (__u16 __force)(src ^ dst);
158 return dn_rt_hash_mask & (unsigned int)tmp;
161 static inline void dnrt_free(struct dn_route *rt)
163 call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
166 static inline void dnrt_drop(struct dn_route *rt)
168 dst_release(&rt->dst);
169 call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
172 static void dn_dst_check_expire(unsigned long dummy)
176 struct dn_route __rcu **rtp;
177 unsigned long now = jiffies;
178 unsigned long expire = 120 * HZ;
180 for (i = 0; i <= dn_rt_hash_mask; i++) {
181 rtp = &dn_rt_hash_table[i].chain;
183 spin_lock(&dn_rt_hash_table[i].lock);
184 while ((rt = rcu_dereference_protected(*rtp,
185 lockdep_is_held(&dn_rt_hash_table[i].lock))) != NULL) {
186 if (atomic_read(&rt->dst.__refcnt) ||
187 (now - rt->dst.lastuse) < expire) {
188 rtp = &rt->dst.dn_next;
191 *rtp = rt->dst.dn_next;
192 rt->dst.dn_next = NULL;
195 spin_unlock(&dn_rt_hash_table[i].lock);
197 if ((jiffies - now) > 0)
201 mod_timer(&dn_route_timer, now + decnet_dst_gc_interval * HZ);
204 static int dn_dst_gc(struct dst_ops *ops)
207 struct dn_route __rcu **rtp;
209 unsigned long now = jiffies;
210 unsigned long expire = 10 * HZ;
212 for (i = 0; i <= dn_rt_hash_mask; i++) {
214 spin_lock_bh(&dn_rt_hash_table[i].lock);
215 rtp = &dn_rt_hash_table[i].chain;
217 while ((rt = rcu_dereference_protected(*rtp,
218 lockdep_is_held(&dn_rt_hash_table[i].lock))) != NULL) {
219 if (atomic_read(&rt->dst.__refcnt) ||
220 (now - rt->dst.lastuse) < expire) {
221 rtp = &rt->dst.dn_next;
224 *rtp = rt->dst.dn_next;
225 rt->dst.dn_next = NULL;
229 spin_unlock_bh(&dn_rt_hash_table[i].lock);
236 * The decnet standards don't impose a particular minimum mtu, what they
237 * do insist on is that the routing layer accepts a datagram of at least
238 * 230 bytes long. Here we have to subtract the routing header length from
239 * 230 to get the minimum acceptable mtu. If there is no neighbour, then we
240 * assume the worst and use a long header size.
242 * We update both the mtu and the advertised mss (i.e. the segment size we
243 * advertise to the other end).
245 static void dn_dst_update_pmtu(struct dst_entry *dst, u32 mtu)
247 struct neighbour *n = dst_get_neighbour_noref(dst);
251 dn = n ? rcu_dereference_raw(n->dev->dn_ptr) : NULL;
253 if (dn && dn->use_long == 0)
258 if (dst_metric(dst, RTAX_MTU) > mtu && mtu >= min_mtu) {
259 if (!(dst_metric_locked(dst, RTAX_MTU))) {
260 dst_metric_set(dst, RTAX_MTU, mtu);
261 dst_set_expires(dst, dn_rt_mtu_expires);
263 if (!(dst_metric_locked(dst, RTAX_ADVMSS))) {
264 u32 mss = mtu - DN_MAX_NSP_DATA_HEADER;
265 u32 existing_mss = dst_metric_raw(dst, RTAX_ADVMSS);
266 if (!existing_mss || existing_mss > mss)
267 dst_metric_set(dst, RTAX_ADVMSS, mss);
273 * When a route has been marked obsolete. (e.g. routing cache flush)
275 static struct dst_entry *dn_dst_check(struct dst_entry *dst, __u32 cookie)
280 static struct dst_entry *dn_dst_negative_advice(struct dst_entry *dst)
286 static void dn_dst_link_failure(struct sk_buff *skb)
290 static inline int compare_keys(struct flowidn *fl1, struct flowidn *fl2)
292 return ((fl1->daddr ^ fl2->daddr) |
293 (fl1->saddr ^ fl2->saddr) |
294 (fl1->flowidn_mark ^ fl2->flowidn_mark) |
295 (fl1->flowidn_scope ^ fl2->flowidn_scope) |
296 (fl1->flowidn_oif ^ fl2->flowidn_oif) |
297 (fl1->flowidn_iif ^ fl2->flowidn_iif)) == 0;
300 static int dn_insert_route(struct dn_route *rt, unsigned int hash, struct dn_route **rp)
302 struct dn_route *rth;
303 struct dn_route __rcu **rthp;
304 unsigned long now = jiffies;
306 rthp = &dn_rt_hash_table[hash].chain;
308 spin_lock_bh(&dn_rt_hash_table[hash].lock);
309 while ((rth = rcu_dereference_protected(*rthp,
310 lockdep_is_held(&dn_rt_hash_table[hash].lock))) != NULL) {
311 if (compare_keys(&rth->fld, &rt->fld)) {
313 *rthp = rth->dst.dn_next;
314 rcu_assign_pointer(rth->dst.dn_next,
315 dn_rt_hash_table[hash].chain);
316 rcu_assign_pointer(dn_rt_hash_table[hash].chain, rth);
318 dst_use(&rth->dst, now);
319 spin_unlock_bh(&dn_rt_hash_table[hash].lock);
325 rthp = &rth->dst.dn_next;
328 rcu_assign_pointer(rt->dst.dn_next, dn_rt_hash_table[hash].chain);
329 rcu_assign_pointer(dn_rt_hash_table[hash].chain, rt);
331 dst_use(&rt->dst, now);
332 spin_unlock_bh(&dn_rt_hash_table[hash].lock);
337 static void dn_run_flush(unsigned long dummy)
340 struct dn_route *rt, *next;
342 for (i = 0; i < dn_rt_hash_mask; i++) {
343 spin_lock_bh(&dn_rt_hash_table[i].lock);
345 if ((rt = xchg((struct dn_route **)&dn_rt_hash_table[i].chain, NULL)) == NULL)
346 goto nothing_to_declare;
348 for(; rt; rt = next) {
349 next = rcu_dereference_raw(rt->dst.dn_next);
350 RCU_INIT_POINTER(rt->dst.dn_next, NULL);
351 dst_free((struct dst_entry *)rt);
355 spin_unlock_bh(&dn_rt_hash_table[i].lock);
359 static DEFINE_SPINLOCK(dn_rt_flush_lock);
361 void dn_rt_cache_flush(int delay)
363 unsigned long now = jiffies;
364 int user_mode = !in_interrupt();
367 delay = dn_rt_min_delay;
369 spin_lock_bh(&dn_rt_flush_lock);
371 if (del_timer(&dn_rt_flush_timer) && delay > 0 && dn_rt_deadline) {
372 long tmo = (long)(dn_rt_deadline - now);
374 if (user_mode && tmo < dn_rt_max_delay - dn_rt_min_delay)
382 spin_unlock_bh(&dn_rt_flush_lock);
387 if (dn_rt_deadline == 0)
388 dn_rt_deadline = now + dn_rt_max_delay;
390 dn_rt_flush_timer.expires = now + delay;
391 add_timer(&dn_rt_flush_timer);
392 spin_unlock_bh(&dn_rt_flush_lock);
396 * dn_return_short - Return a short packet to its sender
397 * @skb: The packet to return
400 static int dn_return_short(struct sk_buff *skb)
402 struct dn_skb_cb *cb;
407 /* Add back headers */
408 skb_push(skb, skb->data - skb_network_header(skb));
410 if ((skb = skb_unshare(skb, GFP_ATOMIC)) == NULL)
414 /* Skip packet length and point to flags */
416 *ptr++ = (cb->rt_flags & ~DN_RT_F_RQR) | DN_RT_F_RTS;
422 *ptr = 0; /* Zero hop count */
426 skb->pkt_type = PACKET_OUTGOING;
427 dn_rt_finish_output(skb, NULL, NULL);
428 return NET_RX_SUCCESS;
432 * dn_return_long - Return a long packet to its sender
433 * @skb: The long format packet to return
436 static int dn_return_long(struct sk_buff *skb)
438 struct dn_skb_cb *cb;
440 unsigned char *src_addr, *dst_addr;
441 unsigned char tmp[ETH_ALEN];
443 /* Add back all headers */
444 skb_push(skb, skb->data - skb_network_header(skb));
446 if ((skb = skb_unshare(skb, GFP_ATOMIC)) == NULL)
450 /* Ignore packet length and point to flags */
454 if (*ptr & DN_RT_F_PF) {
455 char padlen = (*ptr & ~DN_RT_F_PF);
459 *ptr++ = (cb->rt_flags & ~DN_RT_F_RQR) | DN_RT_F_RTS;
465 *ptr = 0; /* Zero hop count */
467 /* Swap source and destination */
468 memcpy(tmp, src_addr, ETH_ALEN);
469 memcpy(src_addr, dst_addr, ETH_ALEN);
470 memcpy(dst_addr, tmp, ETH_ALEN);
472 skb->pkt_type = PACKET_OUTGOING;
473 dn_rt_finish_output(skb, dst_addr, src_addr);
474 return NET_RX_SUCCESS;
478 * dn_route_rx_packet - Try and find a route for an incoming packet
479 * @skb: The packet to find a route for
481 * Returns: result of input function if route is found, error code otherwise
483 static int dn_route_rx_packet(struct sk_buff *skb)
485 struct dn_skb_cb *cb;
488 if ((err = dn_route_input(skb)) == 0)
489 return dst_input(skb);
492 if (decnet_debug_level & 4) {
493 char *devname = skb->dev ? skb->dev->name : "???";
496 "DECnet: dn_route_rx_packet: rt_flags=0x%02x dev=%s len=%d src=0x%04hx dst=0x%04hx err=%d type=%d\n",
497 (int)cb->rt_flags, devname, skb->len,
498 le16_to_cpu(cb->src), le16_to_cpu(cb->dst),
502 if ((skb->pkt_type == PACKET_HOST) && (cb->rt_flags & DN_RT_F_RQR)) {
503 switch (cb->rt_flags & DN_RT_PKT_MSK) {
504 case DN_RT_PKT_SHORT:
505 return dn_return_short(skb);
507 return dn_return_long(skb);
515 static int dn_route_rx_long(struct sk_buff *skb)
517 struct dn_skb_cb *cb = DN_SKB_CB(skb);
518 unsigned char *ptr = skb->data;
520 if (!pskb_may_pull(skb, 21)) /* 20 for long header, 1 for shortest nsp */
524 skb_reset_transport_header(skb);
526 /* Destination info */
528 cb->dst = dn_eth2dn(ptr);
529 if (memcmp(ptr, dn_hiord_addr, 4) != 0)
536 cb->src = dn_eth2dn(ptr);
537 if (memcmp(ptr, dn_hiord_addr, 4) != 0)
542 cb->hops = *ptr++; /* Visit Count */
544 return NF_HOOK(NFPROTO_DECNET, NF_DN_PRE_ROUTING, skb, skb->dev, NULL,
554 static int dn_route_rx_short(struct sk_buff *skb)
556 struct dn_skb_cb *cb = DN_SKB_CB(skb);
557 unsigned char *ptr = skb->data;
559 if (!pskb_may_pull(skb, 6)) /* 5 for short header + 1 for shortest nsp */
563 skb_reset_transport_header(skb);
565 cb->dst = *(__le16 *)ptr;
567 cb->src = *(__le16 *)ptr;
569 cb->hops = *ptr & 0x3f;
571 return NF_HOOK(NFPROTO_DECNET, NF_DN_PRE_ROUTING, skb, skb->dev, NULL,
579 static int dn_route_discard(struct sk_buff *skb)
582 * I know we drop the packet here, but thats considered success in
586 return NET_RX_SUCCESS;
589 static int dn_route_ptp_hello(struct sk_buff *skb)
592 dn_neigh_pointopoint_hello(skb);
593 return NET_RX_SUCCESS;
596 int dn_route_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
598 struct dn_skb_cb *cb;
599 unsigned char flags = 0;
600 __u16 len = le16_to_cpu(*(__le16 *)skb->data);
601 struct dn_dev *dn = rcu_dereference(dev->dn_ptr);
602 unsigned char padlen = 0;
604 if (!net_eq(dev_net(dev), &init_net))
610 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
613 if (!pskb_may_pull(skb, 3))
627 cb->iif = dev->ifindex;
630 * If we have padding, remove it.
632 if (flags & DN_RT_F_PF) {
633 padlen = flags & ~DN_RT_F_PF;
634 if (!pskb_may_pull(skb, padlen + 1))
636 skb_pull(skb, padlen);
640 skb_reset_network_header(skb);
643 * Weed out future version DECnet
645 if (flags & DN_RT_F_VER)
648 cb->rt_flags = flags;
650 if (decnet_debug_level & 1)
652 "dn_route_rcv: got 0x%02x from %s [%d %d %d]\n",
653 (int)flags, (dev) ? dev->name : "???", len, skb->len,
656 if (flags & DN_RT_PKT_CNTL) {
657 if (unlikely(skb_linearize(skb)))
660 switch (flags & DN_RT_CNTL_MSK) {
662 dn_dev_init_pkt(skb);
665 dn_dev_veri_pkt(skb);
669 if (dn->parms.state != DN_DEV_S_RU)
672 switch (flags & DN_RT_CNTL_MSK) {
674 return NF_HOOK(NFPROTO_DECNET, NF_DN_HELLO,
680 return NF_HOOK(NFPROTO_DECNET, NF_DN_ROUTE,
684 return NF_HOOK(NFPROTO_DECNET, NF_DN_HELLO,
686 dn_neigh_router_hello);
689 return NF_HOOK(NFPROTO_DECNET, NF_DN_HELLO,
691 dn_neigh_endnode_hello);
694 if (dn->parms.state != DN_DEV_S_RU)
697 skb_pull(skb, 1); /* Pull flags */
699 switch (flags & DN_RT_PKT_MSK) {
701 return dn_route_rx_long(skb);
702 case DN_RT_PKT_SHORT:
703 return dn_route_rx_short(skb);
713 static int dn_to_neigh_output(struct sk_buff *skb)
715 struct dst_entry *dst = skb_dst(skb);
716 struct neighbour *n = dst_get_neighbour_noref(dst);
718 return n->output(n, skb);
721 static int dn_output(struct sk_buff *skb)
723 struct dst_entry *dst = skb_dst(skb);
724 struct dn_route *rt = (struct dn_route *)dst;
725 struct net_device *dev = dst->dev;
726 struct dn_skb_cb *cb = DN_SKB_CB(skb);
730 if (dst_get_neighbour_noref(dst) == NULL)
735 cb->src = rt->rt_saddr;
736 cb->dst = rt->rt_daddr;
739 * Always set the Intra-Ethernet bit on all outgoing packets
740 * originated on this node. Only valid flag from upper layers
741 * is return-to-sender-requested. Set hop count to 0 too.
743 cb->rt_flags &= ~DN_RT_F_RQR;
744 cb->rt_flags |= DN_RT_F_IE;
747 return NF_HOOK(NFPROTO_DECNET, NF_DN_LOCAL_OUT, skb, NULL, dev,
751 net_dbg_ratelimited("dn_output: This should not happen\n");
758 static int dn_forward(struct sk_buff *skb)
760 struct dn_skb_cb *cb = DN_SKB_CB(skb);
761 struct dst_entry *dst = skb_dst(skb);
762 struct dn_dev *dn_db = rcu_dereference(dst->dev->dn_ptr);
765 #ifdef CONFIG_NETFILTER
766 struct net_device *dev = skb->dev;
769 if (skb->pkt_type != PACKET_HOST)
772 /* Ensure that we have enough space for headers */
773 rt = (struct dn_route *)skb_dst(skb);
774 header_len = dn_db->use_long ? 21 : 6;
775 if (skb_cow(skb, LL_RESERVED_SPACE(rt->dst.dev)+header_len))
779 * Hop count exceeded.
784 skb->dev = rt->dst.dev;
787 * If packet goes out same interface it came in on, then set
788 * the Intra-Ethernet bit. This has no effect for short
789 * packets, so we don't need to test for them here.
791 cb->rt_flags &= ~DN_RT_F_IE;
792 if (rt->rt_flags & RTCF_DOREDIRECT)
793 cb->rt_flags |= DN_RT_F_IE;
795 return NF_HOOK(NFPROTO_DECNET, NF_DN_FORWARD, skb, dev, skb->dev,
804 * Used to catch bugs. This should never normally get
807 static int dn_rt_bug(struct sk_buff *skb)
809 struct dn_skb_cb *cb = DN_SKB_CB(skb);
811 net_dbg_ratelimited("dn_rt_bug: skb from:%04x to:%04x\n",
812 le16_to_cpu(cb->src), le16_to_cpu(cb->dst));
819 static unsigned int dn_dst_default_advmss(const struct dst_entry *dst)
821 return dn_mss_from_pmtu(dst->dev, dst_mtu(dst));
824 static unsigned int dn_dst_mtu(const struct dst_entry *dst)
826 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
828 return mtu ? : dst->dev->mtu;
831 static struct neighbour *dn_dst_neigh_lookup(const struct dst_entry *dst, const void *daddr)
833 return __neigh_lookup_errno(&dn_neigh_table, daddr, dst->dev);
836 static int dn_rt_set_next_hop(struct dn_route *rt, struct dn_fib_res *res)
838 struct dn_fib_info *fi = res->fi;
839 struct net_device *dev = rt->dst.dev;
840 unsigned int mss_metric;
844 if (DN_FIB_RES_GW(*res) &&
845 DN_FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK)
846 rt->rt_gateway = DN_FIB_RES_GW(*res);
847 dst_init_metrics(&rt->dst, fi->fib_metrics, true);
849 rt->rt_type = res->type;
851 if (dev != NULL && dst_get_neighbour_noref(&rt->dst) == NULL) {
852 n = __neigh_lookup_errno(&dn_neigh_table, &rt->rt_gateway, dev);
855 dst_set_neighbour(&rt->dst, n);
858 if (dst_metric(&rt->dst, RTAX_MTU) > rt->dst.dev->mtu)
859 dst_metric_set(&rt->dst, RTAX_MTU, rt->dst.dev->mtu);
860 mss_metric = dst_metric_raw(&rt->dst, RTAX_ADVMSS);
862 unsigned int mss = dn_mss_from_pmtu(dev, dst_mtu(&rt->dst));
863 if (mss_metric > mss)
864 dst_metric_set(&rt->dst, RTAX_ADVMSS, mss);
869 static inline int dn_match_addr(__le16 addr1, __le16 addr2)
871 __u16 tmp = le16_to_cpu(addr1) ^ le16_to_cpu(addr2);
880 static __le16 dnet_select_source(const struct net_device *dev, __le16 daddr, int scope)
883 struct dn_dev *dn_db;
884 struct dn_ifaddr *ifa;
889 dn_db = rcu_dereference(dev->dn_ptr);
890 for (ifa = rcu_dereference(dn_db->ifa_list);
892 ifa = rcu_dereference(ifa->ifa_next)) {
893 if (ifa->ifa_scope > scope)
896 saddr = ifa->ifa_local;
899 ret = dn_match_addr(daddr, ifa->ifa_local);
900 if (ret > best_match)
901 saddr = ifa->ifa_local;
903 saddr = ifa->ifa_local;
910 static inline __le16 __dn_fib_res_prefsrc(struct dn_fib_res *res)
912 return dnet_select_source(DN_FIB_RES_DEV(*res), DN_FIB_RES_GW(*res), res->scope);
915 static inline __le16 dn_fib_rules_map_destination(__le16 daddr, struct dn_fib_res *res)
917 __le16 mask = dnet_make_mask(res->prefixlen);
918 return (daddr&~mask)|res->fi->fib_nh->nh_gw;
921 static int dn_route_output_slow(struct dst_entry **pprt, const struct flowidn *oldflp, int try_hard)
923 struct flowidn fld = {
924 .daddr = oldflp->daddr,
925 .saddr = oldflp->saddr,
926 .flowidn_scope = RT_SCOPE_UNIVERSE,
927 .flowidn_mark = oldflp->flowidn_mark,
928 .flowidn_iif = init_net.loopback_dev->ifindex,
929 .flowidn_oif = oldflp->flowidn_oif,
931 struct dn_route *rt = NULL;
932 struct net_device *dev_out = NULL, *dev;
933 struct neighbour *neigh = NULL;
935 unsigned int flags = 0;
936 struct dn_fib_res res = { .fi = NULL, .type = RTN_UNICAST };
941 if (decnet_debug_level & 16)
943 "dn_route_output_slow: dst=%04x src=%04x mark=%d"
944 " iif=%d oif=%d\n", le16_to_cpu(oldflp->daddr),
945 le16_to_cpu(oldflp->saddr),
946 oldflp->flowidn_mark, init_net.loopback_dev->ifindex,
947 oldflp->flowidn_oif);
949 /* If we have an output interface, verify its a DECnet device */
950 if (oldflp->flowidn_oif) {
951 dev_out = dev_get_by_index(&init_net, oldflp->flowidn_oif);
953 if (dev_out && dev_out->dn_ptr == NULL) {
961 /* If we have a source address, verify that its a local address */
963 err = -EADDRNOTAVAIL;
966 if (dn_dev_islocal(dev_out, oldflp->saddr))
972 for_each_netdev_rcu(&init_net, dev) {
975 if (!dn_dev_islocal(dev, oldflp->saddr))
977 if ((dev->flags & IFF_LOOPBACK) &&
979 !dn_dev_islocal(dev, oldflp->daddr))
993 /* No destination? Assume its local */
995 fld.daddr = fld.saddr;
997 err = -EADDRNOTAVAIL;
1000 dev_out = init_net.loopback_dev;
1004 fld.saddr = dnet_select_source(dev_out, 0,
1009 fld.flowidn_oif = init_net.loopback_dev->ifindex;
1010 res.type = RTN_LOCAL;
1014 if (decnet_debug_level & 16)
1016 "dn_route_output_slow: initial checks complete."
1017 " dst=%o4x src=%04x oif=%d try_hard=%d\n",
1018 le16_to_cpu(fld.daddr), le16_to_cpu(fld.saddr),
1019 fld.flowidn_oif, try_hard);
1022 * N.B. If the kernel is compiled without router support then
1023 * dn_fib_lookup() will evaluate to non-zero so this if () block
1024 * will always be executed.
1027 if (try_hard || (err = dn_fib_lookup(&fld, &res)) != 0) {
1028 struct dn_dev *dn_db;
1032 * Here the fallback is basically the standard algorithm for
1033 * routing in endnodes which is described in the DECnet routing
1036 * If we are not trying hard, look in neighbour cache.
1037 * The result is tested to ensure that if a specific output
1038 * device/source address was requested, then we honour that
1042 neigh = neigh_lookup_nodev(&dn_neigh_table, &init_net, &fld.daddr);
1044 if ((oldflp->flowidn_oif &&
1045 (neigh->dev->ifindex != oldflp->flowidn_oif)) ||
1047 (!dn_dev_islocal(neigh->dev,
1049 neigh_release(neigh);
1054 if (dn_dev_islocal(neigh->dev, fld.daddr)) {
1055 dev_out = init_net.loopback_dev;
1056 res.type = RTN_LOCAL;
1058 dev_out = neigh->dev;
1066 /* Not there? Perhaps its a local address */
1067 if (dev_out == NULL)
1068 dev_out = dn_dev_get_default();
1070 if (dev_out == NULL)
1072 dn_db = rcu_dereference_raw(dev_out->dn_ptr);
1073 /* Possible improvement - check all devices for local addr */
1074 if (dn_dev_islocal(dev_out, fld.daddr)) {
1076 dev_out = init_net.loopback_dev;
1078 res.type = RTN_LOCAL;
1081 /* Not local either.... try sending it to the default router */
1082 neigh = neigh_clone(dn_db->router);
1083 BUG_ON(neigh && neigh->dev != dev_out);
1085 /* Ok then, we assume its directly connected and move on */
1088 gateway = ((struct dn_neigh *)neigh)->addr;
1090 gateway = fld.daddr;
1091 if (fld.saddr == 0) {
1092 fld.saddr = dnet_select_source(dev_out, gateway,
1093 res.type == RTN_LOCAL ?
1096 if (fld.saddr == 0 && res.type != RTN_LOCAL)
1099 fld.flowidn_oif = dev_out->ifindex;
1104 if (res.type == RTN_NAT)
1107 if (res.type == RTN_LOCAL) {
1109 fld.saddr = fld.daddr;
1112 dev_out = init_net.loopback_dev;
1114 fld.flowidn_oif = dev_out->ifindex;
1116 dn_fib_info_put(res.fi);
1121 if (res.fi->fib_nhs > 1 && fld.flowidn_oif == 0)
1122 dn_fib_select_multipath(&fld, &res);
1125 * We could add some logic to deal with default routes here and
1126 * get rid of some of the special casing above.
1130 fld.saddr = DN_FIB_RES_PREFSRC(res);
1134 dev_out = DN_FIB_RES_DEV(res);
1136 fld.flowidn_oif = dev_out->ifindex;
1137 gateway = DN_FIB_RES_GW(res);
1140 if (dev_out->flags & IFF_LOOPBACK)
1141 flags |= RTCF_LOCAL;
1143 rt = dst_alloc(&dn_dst_ops, dev_out, 1, 0, DST_HOST);
1147 memset(&rt->fld, 0, sizeof(rt->fld));
1148 rt->fld.saddr = oldflp->saddr;
1149 rt->fld.daddr = oldflp->daddr;
1150 rt->fld.flowidn_oif = oldflp->flowidn_oif;
1151 rt->fld.flowidn_iif = 0;
1152 rt->fld.flowidn_mark = oldflp->flowidn_mark;
1154 rt->rt_saddr = fld.saddr;
1155 rt->rt_daddr = fld.daddr;
1156 rt->rt_gateway = gateway ? gateway : fld.daddr;
1157 rt->rt_local_src = fld.saddr;
1159 rt->rt_dst_map = fld.daddr;
1160 rt->rt_src_map = fld.saddr;
1162 dst_set_neighbour(&rt->dst, neigh);
1165 rt->dst.lastuse = jiffies;
1166 rt->dst.output = dn_output;
1167 rt->dst.input = dn_rt_bug;
1168 rt->rt_flags = flags;
1169 if (flags & RTCF_LOCAL)
1170 rt->dst.input = dn_nsp_rx;
1172 err = dn_rt_set_next_hop(rt, &res);
1176 hash = dn_hash(rt->fld.saddr, rt->fld.daddr);
1177 dn_insert_route(rt, hash, (struct dn_route **)pprt);
1181 neigh_release(neigh);
1183 dn_fib_res_put(&res);
1190 err = -EADDRNOTAVAIL;
1205 * N.B. The flags may be moved into the flowi at some future stage.
1207 static int __dn_route_output_key(struct dst_entry **pprt, const struct flowidn *flp, int flags)
1209 unsigned int hash = dn_hash(flp->saddr, flp->daddr);
1210 struct dn_route *rt = NULL;
1212 if (!(flags & MSG_TRYHARD)) {
1214 for (rt = rcu_dereference_bh(dn_rt_hash_table[hash].chain); rt;
1215 rt = rcu_dereference_bh(rt->dst.dn_next)) {
1216 if ((flp->daddr == rt->fld.daddr) &&
1217 (flp->saddr == rt->fld.saddr) &&
1218 (flp->flowidn_mark == rt->fld.flowidn_mark) &&
1219 dn_is_output_route(rt) &&
1220 (rt->fld.flowidn_oif == flp->flowidn_oif)) {
1221 dst_use(&rt->dst, jiffies);
1222 rcu_read_unlock_bh();
1227 rcu_read_unlock_bh();
1230 return dn_route_output_slow(pprt, flp, flags);
1233 static int dn_route_output_key(struct dst_entry **pprt, struct flowidn *flp, int flags)
1237 err = __dn_route_output_key(pprt, flp, flags);
1238 if (err == 0 && flp->flowidn_proto) {
1239 *pprt = xfrm_lookup(&init_net, *pprt,
1240 flowidn_to_flowi(flp), NULL, 0);
1241 if (IS_ERR(*pprt)) {
1242 err = PTR_ERR(*pprt);
1249 int dn_route_output_sock(struct dst_entry **pprt, struct flowidn *fl, struct sock *sk, int flags)
1253 err = __dn_route_output_key(pprt, fl, flags & MSG_TRYHARD);
1254 if (err == 0 && fl->flowidn_proto) {
1255 if (!(flags & MSG_DONTWAIT))
1256 fl->flowidn_flags |= FLOWI_FLAG_CAN_SLEEP;
1257 *pprt = xfrm_lookup(&init_net, *pprt,
1258 flowidn_to_flowi(fl), sk, 0);
1259 if (IS_ERR(*pprt)) {
1260 err = PTR_ERR(*pprt);
1267 static int dn_route_input_slow(struct sk_buff *skb)
1269 struct dn_route *rt = NULL;
1270 struct dn_skb_cb *cb = DN_SKB_CB(skb);
1271 struct net_device *in_dev = skb->dev;
1272 struct net_device *out_dev = NULL;
1273 struct dn_dev *dn_db;
1274 struct neighbour *neigh = NULL;
1278 __le16 local_src = 0;
1279 struct flowidn fld = {
1282 .flowidn_scope = RT_SCOPE_UNIVERSE,
1283 .flowidn_mark = skb->mark,
1284 .flowidn_iif = skb->dev->ifindex,
1286 struct dn_fib_res res = { .fi = NULL, .type = RTN_UNREACHABLE };
1292 if ((dn_db = rcu_dereference(in_dev->dn_ptr)) == NULL)
1295 /* Zero source addresses are not allowed */
1300 * In this case we've just received a packet from a source
1301 * outside ourselves pretending to come from us. We don't
1302 * allow it any further to prevent routing loops, spoofing and
1303 * other nasties. Loopback packets already have the dst attached
1304 * so this only affects packets which have originated elsewhere.
1307 if (dn_dev_islocal(in_dev, cb->src))
1310 err = dn_fib_lookup(&fld, &res);
1315 * Is the destination us ?
1317 if (!dn_dev_islocal(in_dev, cb->dst))
1320 res.type = RTN_LOCAL;
1322 __le16 src_map = fld.saddr;
1325 out_dev = DN_FIB_RES_DEV(res);
1326 if (out_dev == NULL) {
1327 net_crit_ratelimited("Bug in dn_route_input_slow() No output device\n");
1333 src_map = fld.saddr; /* no NAT support for now */
1335 gateway = DN_FIB_RES_GW(res);
1336 if (res.type == RTN_NAT) {
1337 fld.daddr = dn_fib_rules_map_destination(fld.daddr, &res);
1338 dn_fib_res_put(&res);
1340 if (dn_fib_lookup(&fld, &res))
1343 if (res.type != RTN_UNICAST)
1346 gateway = fld.daddr;
1348 fld.saddr = src_map;
1354 * Forwarding check here, we only check for forwarding
1355 * being turned off, if you want to only forward intra
1356 * area, its up to you to set the routing tables up
1359 if (dn_db->parms.forwarding == 0)
1362 if (res.fi->fib_nhs > 1 && fld.flowidn_oif == 0)
1363 dn_fib_select_multipath(&fld, &res);
1366 * Check for out_dev == in_dev. We use the RTCF_DOREDIRECT
1367 * flag as a hint to set the intra-ethernet bit when
1368 * forwarding. If we've got NAT in operation, we don't do
1369 * this optimisation.
1371 if (out_dev == in_dev && !(flags & RTCF_NAT))
1372 flags |= RTCF_DOREDIRECT;
1374 local_src = DN_FIB_RES_PREFSRC(res);
1377 case RTN_UNREACHABLE:
1380 flags |= RTCF_LOCAL;
1381 fld.saddr = cb->dst;
1382 fld.daddr = cb->src;
1384 /* Routing tables gave us a gateway */
1388 /* Packet was intra-ethernet, so we know its on-link */
1389 if (cb->rt_flags & DN_RT_F_IE) {
1391 flags |= RTCF_DIRECTSRC;
1395 /* Use the default router if there is one */
1396 neigh = neigh_clone(dn_db->router);
1398 gateway = ((struct dn_neigh *)neigh)->addr;
1402 /* Close eyes and pray */
1404 flags |= RTCF_DIRECTSRC;
1411 rt = dst_alloc(&dn_dst_ops, out_dev, 0, 0, DST_HOST);
1415 memset(&rt->fld, 0, sizeof(rt->fld));
1416 rt->rt_saddr = fld.saddr;
1417 rt->rt_daddr = fld.daddr;
1418 rt->rt_gateway = fld.daddr;
1420 rt->rt_gateway = gateway;
1421 rt->rt_local_src = local_src ? local_src : rt->rt_saddr;
1423 rt->rt_dst_map = fld.daddr;
1424 rt->rt_src_map = fld.saddr;
1426 rt->fld.saddr = cb->src;
1427 rt->fld.daddr = cb->dst;
1428 rt->fld.flowidn_oif = 0;
1429 rt->fld.flowidn_iif = in_dev->ifindex;
1430 rt->fld.flowidn_mark = fld.flowidn_mark;
1432 dst_set_neighbour(&rt->dst, neigh);
1433 rt->dst.lastuse = jiffies;
1434 rt->dst.output = dn_rt_bug;
1437 rt->dst.input = dn_forward;
1440 rt->dst.output = dn_output;
1441 rt->dst.input = dn_nsp_rx;
1442 rt->dst.dev = in_dev;
1443 flags |= RTCF_LOCAL;
1446 case RTN_UNREACHABLE:
1448 rt->dst.input = dst_discard;
1450 rt->rt_flags = flags;
1452 err = dn_rt_set_next_hop(rt, &res);
1456 hash = dn_hash(rt->fld.saddr, rt->fld.daddr);
1457 dn_insert_route(rt, hash, &rt);
1458 skb_dst_set(skb, &rt->dst);
1462 neigh_release(neigh);
1464 dn_fib_res_put(&res);
1484 static int dn_route_input(struct sk_buff *skb)
1486 struct dn_route *rt;
1487 struct dn_skb_cb *cb = DN_SKB_CB(skb);
1488 unsigned int hash = dn_hash(cb->src, cb->dst);
1494 for(rt = rcu_dereference(dn_rt_hash_table[hash].chain); rt != NULL;
1495 rt = rcu_dereference(rt->dst.dn_next)) {
1496 if ((rt->fld.saddr == cb->src) &&
1497 (rt->fld.daddr == cb->dst) &&
1498 (rt->fld.flowidn_oif == 0) &&
1499 (rt->fld.flowidn_mark == skb->mark) &&
1500 (rt->fld.flowidn_iif == cb->iif)) {
1501 dst_use(&rt->dst, jiffies);
1503 skb_dst_set(skb, (struct dst_entry *)rt);
1509 return dn_route_input_slow(skb);
1512 static int dn_rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
1513 int event, int nowait, unsigned int flags)
1515 struct dn_route *rt = (struct dn_route *)skb_dst(skb);
1517 struct nlmsghdr *nlh;
1520 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*r), flags);
1524 r = nlmsg_data(nlh);
1525 r->rtm_family = AF_DECnet;
1526 r->rtm_dst_len = 16;
1529 r->rtm_table = RT_TABLE_MAIN;
1530 r->rtm_type = rt->rt_type;
1531 r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
1532 r->rtm_scope = RT_SCOPE_UNIVERSE;
1533 r->rtm_protocol = RTPROT_UNSPEC;
1535 if (rt->rt_flags & RTCF_NOTIFY)
1536 r->rtm_flags |= RTM_F_NOTIFY;
1538 if (nla_put_u32(skb, RTA_TABLE, RT_TABLE_MAIN) < 0 ||
1539 nla_put_le16(skb, RTA_DST, rt->rt_daddr) < 0)
1542 if (rt->fld.saddr) {
1543 r->rtm_src_len = 16;
1544 if (nla_put_le16(skb, RTA_SRC, rt->fld.saddr) < 0)
1548 nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex) < 0)
1552 * Note to self - change this if input routes reverse direction when
1553 * they deal only with inputs and not with replies like they do
1556 if (nla_put_le16(skb, RTA_PREFSRC, rt->rt_local_src) < 0)
1559 if (rt->rt_daddr != rt->rt_gateway &&
1560 nla_put_le16(skb, RTA_GATEWAY, rt->rt_gateway) < 0)
1563 if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
1566 expires = rt->dst.expires ? rt->dst.expires - jiffies : 0;
1567 if (rtnl_put_cacheinfo(skb, &rt->dst, 0, 0, 0, expires,
1571 if (dn_is_input_route(rt) &&
1572 nla_put_u32(skb, RTA_IIF, rt->fld.flowidn_iif) < 0)
1575 return nlmsg_end(skb, nlh);
1578 nlmsg_cancel(skb, nlh);
1583 * This is called by both endnodes and routers now.
1585 static int dn_cache_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void *arg)
1587 struct net *net = sock_net(in_skb->sk);
1588 struct rtattr **rta = arg;
1589 struct rtmsg *rtm = nlmsg_data(nlh);
1590 struct dn_route *rt = NULL;
1591 struct dn_skb_cb *cb;
1593 struct sk_buff *skb;
1596 if (!net_eq(net, &init_net))
1599 memset(&fld, 0, sizeof(fld));
1600 fld.flowidn_proto = DNPROTO_NSP;
1602 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1605 skb_reset_mac_header(skb);
1606 cb = DN_SKB_CB(skb);
1609 memcpy(&fld.saddr, RTA_DATA(rta[RTA_SRC-1]), 2);
1611 memcpy(&fld.daddr, RTA_DATA(rta[RTA_DST-1]), 2);
1613 memcpy(&fld.flowidn_iif, RTA_DATA(rta[RTA_IIF-1]), sizeof(int));
1615 if (fld.flowidn_iif) {
1616 struct net_device *dev;
1617 if ((dev = dev_get_by_index(&init_net, fld.flowidn_iif)) == NULL) {
1626 skb->protocol = htons(ETH_P_DNA_RT);
1628 cb->src = fld.saddr;
1629 cb->dst = fld.daddr;
1631 err = dn_route_input(skb);
1633 memset(cb, 0, sizeof(struct dn_skb_cb));
1634 rt = (struct dn_route *)skb_dst(skb);
1635 if (!err && -rt->dst.error)
1636 err = rt->dst.error;
1639 if (rta[RTA_OIF - 1])
1640 memcpy(&oif, RTA_DATA(rta[RTA_OIF - 1]), sizeof(int));
1641 fld.flowidn_oif = oif;
1642 err = dn_route_output_key((struct dst_entry **)&rt, &fld, 0);
1650 skb_dst_set(skb, &rt->dst);
1651 if (rtm->rtm_flags & RTM_F_NOTIFY)
1652 rt->rt_flags |= RTCF_NOTIFY;
1654 err = dn_rt_fill_info(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq, RTM_NEWROUTE, 0, 0);
1663 return rtnl_unicast(skb, &init_net, NETLINK_CB(in_skb).pid);
1671 * For routers, this is called from dn_fib_dump, but for endnodes its
1672 * called directly from the rtnetlink dispatch table.
1674 int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb)
1676 struct net *net = sock_net(skb->sk);
1677 struct dn_route *rt;
1682 if (!net_eq(net, &init_net))
1685 if (nlmsg_len(cb->nlh) < sizeof(struct rtmsg))
1688 rtm = nlmsg_data(cb->nlh);
1689 if (!(rtm->rtm_flags & RTM_F_CLONED))
1693 s_idx = idx = cb->args[1];
1694 for(h = 0; h <= dn_rt_hash_mask; h++) {
1700 for(rt = rcu_dereference_bh(dn_rt_hash_table[h].chain), idx = 0;
1702 rt = rcu_dereference_bh(rt->dst.dn_next), idx++) {
1705 skb_dst_set(skb, dst_clone(&rt->dst));
1706 if (dn_rt_fill_info(skb, NETLINK_CB(cb->skb).pid,
1707 cb->nlh->nlmsg_seq, RTM_NEWROUTE,
1708 1, NLM_F_MULTI) <= 0) {
1710 rcu_read_unlock_bh();
1715 rcu_read_unlock_bh();
1724 #ifdef CONFIG_PROC_FS
1725 struct dn_rt_cache_iter_state {
1729 static struct dn_route *dn_rt_cache_get_first(struct seq_file *seq)
1731 struct dn_route *rt = NULL;
1732 struct dn_rt_cache_iter_state *s = seq->private;
1734 for(s->bucket = dn_rt_hash_mask; s->bucket >= 0; --s->bucket) {
1736 rt = rcu_dereference_bh(dn_rt_hash_table[s->bucket].chain);
1739 rcu_read_unlock_bh();
1744 static struct dn_route *dn_rt_cache_get_next(struct seq_file *seq, struct dn_route *rt)
1746 struct dn_rt_cache_iter_state *s = seq->private;
1748 rt = rcu_dereference_bh(rt->dst.dn_next);
1750 rcu_read_unlock_bh();
1751 if (--s->bucket < 0)
1754 rt = rcu_dereference_bh(dn_rt_hash_table[s->bucket].chain);
1759 static void *dn_rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
1761 struct dn_route *rt = dn_rt_cache_get_first(seq);
1764 while(*pos && (rt = dn_rt_cache_get_next(seq, rt)))
1767 return *pos ? NULL : rt;
1770 static void *dn_rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1772 struct dn_route *rt = dn_rt_cache_get_next(seq, v);
1777 static void dn_rt_cache_seq_stop(struct seq_file *seq, void *v)
1780 rcu_read_unlock_bh();
1783 static int dn_rt_cache_seq_show(struct seq_file *seq, void *v)
1785 struct dn_route *rt = v;
1786 char buf1[DN_ASCBUF_LEN], buf2[DN_ASCBUF_LEN];
1788 seq_printf(seq, "%-8s %-7s %-7s %04d %04d %04d\n",
1789 rt->dst.dev ? rt->dst.dev->name : "*",
1790 dn_addr2asc(le16_to_cpu(rt->rt_daddr), buf1),
1791 dn_addr2asc(le16_to_cpu(rt->rt_saddr), buf2),
1792 atomic_read(&rt->dst.__refcnt),
1794 (int) dst_metric(&rt->dst, RTAX_RTT));
1798 static const struct seq_operations dn_rt_cache_seq_ops = {
1799 .start = dn_rt_cache_seq_start,
1800 .next = dn_rt_cache_seq_next,
1801 .stop = dn_rt_cache_seq_stop,
1802 .show = dn_rt_cache_seq_show,
1805 static int dn_rt_cache_seq_open(struct inode *inode, struct file *file)
1807 return seq_open_private(file, &dn_rt_cache_seq_ops,
1808 sizeof(struct dn_rt_cache_iter_state));
1811 static const struct file_operations dn_rt_cache_seq_fops = {
1812 .owner = THIS_MODULE,
1813 .open = dn_rt_cache_seq_open,
1815 .llseek = seq_lseek,
1816 .release = seq_release_private,
1819 #endif /* CONFIG_PROC_FS */
1821 void __init dn_route_init(void)
1825 dn_dst_ops.kmem_cachep =
1826 kmem_cache_create("dn_dst_cache", sizeof(struct dn_route), 0,
1827 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
1828 dst_entries_init(&dn_dst_ops);
1829 setup_timer(&dn_route_timer, dn_dst_check_expire, 0);
1830 dn_route_timer.expires = jiffies + decnet_dst_gc_interval * HZ;
1831 add_timer(&dn_route_timer);
1833 goal = totalram_pages >> (26 - PAGE_SHIFT);
1835 for(order = 0; (1UL << order) < goal; order++)
1839 * Only want 1024 entries max, since the table is very, very unlikely
1840 * to be larger than that.
1842 while(order && ((((1UL << order) * PAGE_SIZE) /
1843 sizeof(struct dn_rt_hash_bucket)) >= 2048))
1847 dn_rt_hash_mask = (1UL << order) * PAGE_SIZE /
1848 sizeof(struct dn_rt_hash_bucket);
1849 while(dn_rt_hash_mask & (dn_rt_hash_mask - 1))
1851 dn_rt_hash_table = (struct dn_rt_hash_bucket *)
1852 __get_free_pages(GFP_ATOMIC, order);
1853 } while (dn_rt_hash_table == NULL && --order > 0);
1855 if (!dn_rt_hash_table)
1856 panic("Failed to allocate DECnet route cache hash table\n");
1859 "DECnet: Routing cache hash table of %u buckets, %ldKbytes\n",
1861 (long)(dn_rt_hash_mask*sizeof(struct dn_rt_hash_bucket))/1024);
1864 for(i = 0; i <= dn_rt_hash_mask; i++) {
1865 spin_lock_init(&dn_rt_hash_table[i].lock);
1866 dn_rt_hash_table[i].chain = NULL;
1869 dn_dst_ops.gc_thresh = (dn_rt_hash_mask + 1);
1871 proc_net_fops_create(&init_net, "decnet_cache", S_IRUGO, &dn_rt_cache_seq_fops);
1873 #ifdef CONFIG_DECNET_ROUTER
1874 rtnl_register(PF_DECnet, RTM_GETROUTE, dn_cache_getroute,
1877 rtnl_register(PF_DECnet, RTM_GETROUTE, dn_cache_getroute,
1878 dn_cache_dump, NULL);
1882 void __exit dn_route_cleanup(void)
1884 del_timer(&dn_route_timer);
1887 proc_net_remove(&init_net, "decnet_cache");
1888 dst_entries_destroy(&dn_dst_ops);