2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
7 * Copyright Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
8 * Copyright Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk)
9 * Copyright Tomi Manninen OH2BNS (oh2bns@sral.fi)
11 #include <linux/errno.h>
12 #include <linux/types.h>
13 #include <linux/socket.h>
15 #include <linux/kernel.h>
16 #include <linux/timer.h>
17 #include <linux/string.h>
18 #include <linux/sockios.h>
19 #include <linux/net.h>
20 #include <linux/slab.h>
22 #include <linux/inet.h>
23 #include <linux/netdevice.h>
25 #include <linux/if_arp.h>
26 #include <linux/skbuff.h>
28 #include <asm/uaccess.h>
29 #include <asm/system.h>
30 #include <linux/fcntl.h>
31 #include <linux/termios.h> /* For TIOCINQ/OUTQ */
33 #include <linux/interrupt.h>
34 #include <linux/notifier.h>
35 #include <linux/netfilter.h>
36 #include <linux/init.h>
37 #include <linux/spinlock.h>
38 #include <net/netrom.h>
39 #include <linux/seq_file.h>
40 #include <linux/export.h>
42 static unsigned int nr_neigh_no = 1;
44 static HLIST_HEAD(nr_node_list);
45 static DEFINE_SPINLOCK(nr_node_list_lock);
46 static HLIST_HEAD(nr_neigh_list);
47 static DEFINE_SPINLOCK(nr_neigh_list_lock);
49 static struct nr_node *nr_node_get(ax25_address *callsign)
51 struct nr_node *found = NULL;
52 struct nr_node *nr_node;
53 struct hlist_node *node;
55 spin_lock_bh(&nr_node_list_lock);
56 nr_node_for_each(nr_node, node, &nr_node_list)
57 if (ax25cmp(callsign, &nr_node->callsign) == 0) {
58 nr_node_hold(nr_node);
62 spin_unlock_bh(&nr_node_list_lock);
66 static struct nr_neigh *nr_neigh_get_dev(ax25_address *callsign,
67 struct net_device *dev)
69 struct nr_neigh *found = NULL;
70 struct nr_neigh *nr_neigh;
71 struct hlist_node *node;
73 spin_lock_bh(&nr_neigh_list_lock);
74 nr_neigh_for_each(nr_neigh, node, &nr_neigh_list)
75 if (ax25cmp(callsign, &nr_neigh->callsign) == 0 &&
76 nr_neigh->dev == dev) {
77 nr_neigh_hold(nr_neigh);
81 spin_unlock_bh(&nr_neigh_list_lock);
85 static void nr_remove_neigh(struct nr_neigh *);
88 * Add a new route to a node, and in the process add the node and the
89 * neighbour if it is new.
91 static int __must_check nr_add_node(ax25_address *nr, const char *mnemonic,
92 ax25_address *ax25, ax25_digi *ax25_digi, struct net_device *dev,
93 int quality, int obs_count)
95 struct nr_node *nr_node;
96 struct nr_neigh *nr_neigh;
97 struct nr_route nr_route;
99 struct net_device *odev;
101 if ((odev=nr_dev_get(nr)) != NULL) { /* Can't add routes to ourself */
106 nr_node = nr_node_get(nr);
108 nr_neigh = nr_neigh_get_dev(ax25, dev);
111 * The L2 link to a neighbour has failed in the past
112 * and now a frame comes from this neighbour. We assume
113 * it was a temporary trouble with the link and reset the
114 * routes now (and not wait for a node broadcast).
116 if (nr_neigh != NULL && nr_neigh->failed != 0 && quality == 0) {
117 struct nr_node *nr_nodet;
118 struct hlist_node *node;
120 spin_lock_bh(&nr_node_list_lock);
121 nr_node_for_each(nr_nodet, node, &nr_node_list) {
122 nr_node_lock(nr_nodet);
123 for (i = 0; i < nr_nodet->count; i++)
124 if (nr_nodet->routes[i].neighbour == nr_neigh)
125 if (i < nr_nodet->which)
127 nr_node_unlock(nr_nodet);
129 spin_unlock_bh(&nr_node_list_lock);
132 if (nr_neigh != NULL)
133 nr_neigh->failed = 0;
135 if (quality == 0 && nr_neigh != NULL && nr_node != NULL) {
136 nr_neigh_put(nr_neigh);
137 nr_node_put(nr_node);
141 if (nr_neigh == NULL) {
142 if ((nr_neigh = kmalloc(sizeof(*nr_neigh), GFP_ATOMIC)) == NULL) {
144 nr_node_put(nr_node);
148 nr_neigh->callsign = *ax25;
149 nr_neigh->digipeat = NULL;
150 nr_neigh->ax25 = NULL;
152 nr_neigh->quality = sysctl_netrom_default_path_quality;
153 nr_neigh->locked = 0;
155 nr_neigh->number = nr_neigh_no++;
156 nr_neigh->failed = 0;
157 atomic_set(&nr_neigh->refcount, 1);
159 if (ax25_digi != NULL && ax25_digi->ndigi > 0) {
160 nr_neigh->digipeat = kmemdup(ax25_digi,
163 if (nr_neigh->digipeat == NULL) {
166 nr_node_put(nr_node);
171 spin_lock_bh(&nr_neigh_list_lock);
172 hlist_add_head(&nr_neigh->neigh_node, &nr_neigh_list);
173 nr_neigh_hold(nr_neigh);
174 spin_unlock_bh(&nr_neigh_list_lock);
177 if (quality != 0 && ax25cmp(nr, ax25) == 0 && !nr_neigh->locked)
178 nr_neigh->quality = quality;
180 if (nr_node == NULL) {
181 if ((nr_node = kmalloc(sizeof(*nr_node), GFP_ATOMIC)) == NULL) {
183 nr_neigh_put(nr_neigh);
187 nr_node->callsign = *nr;
188 strcpy(nr_node->mnemonic, mnemonic);
192 atomic_set(&nr_node->refcount, 1);
193 spin_lock_init(&nr_node->node_lock);
195 nr_node->routes[0].quality = quality;
196 nr_node->routes[0].obs_count = obs_count;
197 nr_node->routes[0].neighbour = nr_neigh;
199 nr_neigh_hold(nr_neigh);
202 spin_lock_bh(&nr_node_list_lock);
203 hlist_add_head(&nr_node->node_node, &nr_node_list);
204 /* refcount initialized at 1 */
205 spin_unlock_bh(&nr_node_list_lock);
209 nr_node_lock(nr_node);
212 strcpy(nr_node->mnemonic, mnemonic);
214 for (found = 0, i = 0; i < nr_node->count; i++) {
215 if (nr_node->routes[i].neighbour == nr_neigh) {
216 nr_node->routes[i].quality = quality;
217 nr_node->routes[i].obs_count = obs_count;
224 /* We have space at the bottom, slot it in */
225 if (nr_node->count < 3) {
226 nr_node->routes[2] = nr_node->routes[1];
227 nr_node->routes[1] = nr_node->routes[0];
229 nr_node->routes[0].quality = quality;
230 nr_node->routes[0].obs_count = obs_count;
231 nr_node->routes[0].neighbour = nr_neigh;
235 nr_neigh_hold(nr_neigh);
238 /* It must be better than the worst */
239 if (quality > nr_node->routes[2].quality) {
240 nr_node->routes[2].neighbour->count--;
241 nr_neigh_put(nr_node->routes[2].neighbour);
243 if (nr_node->routes[2].neighbour->count == 0 && !nr_node->routes[2].neighbour->locked)
244 nr_remove_neigh(nr_node->routes[2].neighbour);
246 nr_node->routes[2].quality = quality;
247 nr_node->routes[2].obs_count = obs_count;
248 nr_node->routes[2].neighbour = nr_neigh;
250 nr_neigh_hold(nr_neigh);
256 /* Now re-sort the routes in quality order */
257 switch (nr_node->count) {
259 if (nr_node->routes[1].quality > nr_node->routes[0].quality) {
260 switch (nr_node->which) {
268 nr_route = nr_node->routes[0];
269 nr_node->routes[0] = nr_node->routes[1];
270 nr_node->routes[1] = nr_route;
272 if (nr_node->routes[2].quality > nr_node->routes[1].quality) {
273 switch (nr_node->which) {
274 case 1: nr_node->which = 2;
277 case 2: nr_node->which = 1;
283 nr_route = nr_node->routes[1];
284 nr_node->routes[1] = nr_node->routes[2];
285 nr_node->routes[2] = nr_route;
288 if (nr_node->routes[1].quality > nr_node->routes[0].quality) {
289 switch (nr_node->which) {
290 case 0: nr_node->which = 1;
293 case 1: nr_node->which = 0;
298 nr_route = nr_node->routes[0];
299 nr_node->routes[0] = nr_node->routes[1];
300 nr_node->routes[1] = nr_route;
306 for (i = 0; i < nr_node->count; i++) {
307 if (nr_node->routes[i].neighbour == nr_neigh) {
308 if (i < nr_node->which)
314 nr_neigh_put(nr_neigh);
315 nr_node_unlock(nr_node);
316 nr_node_put(nr_node);
320 static inline void __nr_remove_node(struct nr_node *nr_node)
322 hlist_del_init(&nr_node->node_node);
323 nr_node_put(nr_node);
326 #define nr_remove_node_locked(__node) \
327 __nr_remove_node(__node)
329 static void nr_remove_node(struct nr_node *nr_node)
331 spin_lock_bh(&nr_node_list_lock);
332 __nr_remove_node(nr_node);
333 spin_unlock_bh(&nr_node_list_lock);
336 static inline void __nr_remove_neigh(struct nr_neigh *nr_neigh)
338 hlist_del_init(&nr_neigh->neigh_node);
339 nr_neigh_put(nr_neigh);
342 #define nr_remove_neigh_locked(__neigh) \
343 __nr_remove_neigh(__neigh)
345 static void nr_remove_neigh(struct nr_neigh *nr_neigh)
347 spin_lock_bh(&nr_neigh_list_lock);
348 __nr_remove_neigh(nr_neigh);
349 spin_unlock_bh(&nr_neigh_list_lock);
353 * "Delete" a node. Strictly speaking remove a route to a node. The node
354 * is only deleted if no routes are left to it.
356 static int nr_del_node(ax25_address *callsign, ax25_address *neighbour, struct net_device *dev)
358 struct nr_node *nr_node;
359 struct nr_neigh *nr_neigh;
362 nr_node = nr_node_get(callsign);
367 nr_neigh = nr_neigh_get_dev(neighbour, dev);
369 if (nr_neigh == NULL) {
370 nr_node_put(nr_node);
374 nr_node_lock(nr_node);
375 for (i = 0; i < nr_node->count; i++) {
376 if (nr_node->routes[i].neighbour == nr_neigh) {
378 nr_neigh_put(nr_neigh);
380 if (nr_neigh->count == 0 && !nr_neigh->locked)
381 nr_remove_neigh(nr_neigh);
382 nr_neigh_put(nr_neigh);
386 if (nr_node->count == 0) {
387 nr_remove_node(nr_node);
391 nr_node->routes[0] = nr_node->routes[1];
393 nr_node->routes[1] = nr_node->routes[2];
397 nr_node_put(nr_node);
399 nr_node_unlock(nr_node);
404 nr_neigh_put(nr_neigh);
405 nr_node_unlock(nr_node);
406 nr_node_put(nr_node);
412 * Lock a neighbour with a quality.
414 static int __must_check nr_add_neigh(ax25_address *callsign,
415 ax25_digi *ax25_digi, struct net_device *dev, unsigned int quality)
417 struct nr_neigh *nr_neigh;
419 nr_neigh = nr_neigh_get_dev(callsign, dev);
421 nr_neigh->quality = quality;
422 nr_neigh->locked = 1;
423 nr_neigh_put(nr_neigh);
427 if ((nr_neigh = kmalloc(sizeof(*nr_neigh), GFP_ATOMIC)) == NULL)
430 nr_neigh->callsign = *callsign;
431 nr_neigh->digipeat = NULL;
432 nr_neigh->ax25 = NULL;
434 nr_neigh->quality = quality;
435 nr_neigh->locked = 1;
437 nr_neigh->number = nr_neigh_no++;
438 nr_neigh->failed = 0;
439 atomic_set(&nr_neigh->refcount, 1);
441 if (ax25_digi != NULL && ax25_digi->ndigi > 0) {
442 nr_neigh->digipeat = kmemdup(ax25_digi, sizeof(*ax25_digi),
444 if (nr_neigh->digipeat == NULL) {
450 spin_lock_bh(&nr_neigh_list_lock);
451 hlist_add_head(&nr_neigh->neigh_node, &nr_neigh_list);
452 /* refcount is initialized at 1 */
453 spin_unlock_bh(&nr_neigh_list_lock);
459 * "Delete" a neighbour. The neighbour is only removed if the number
460 * of nodes that may use it is zero.
462 static int nr_del_neigh(ax25_address *callsign, struct net_device *dev, unsigned int quality)
464 struct nr_neigh *nr_neigh;
466 nr_neigh = nr_neigh_get_dev(callsign, dev);
468 if (nr_neigh == NULL) return -EINVAL;
470 nr_neigh->quality = quality;
471 nr_neigh->locked = 0;
473 if (nr_neigh->count == 0)
474 nr_remove_neigh(nr_neigh);
475 nr_neigh_put(nr_neigh);
481 * Decrement the obsolescence count by one. If a route is reduced to a
482 * count of zero, remove it. Also remove any unlocked neighbours with
483 * zero nodes routing via it.
485 static int nr_dec_obs(void)
487 struct nr_neigh *nr_neigh;
489 struct hlist_node *node, *nodet;
492 spin_lock_bh(&nr_node_list_lock);
493 nr_node_for_each_safe(s, node, nodet, &nr_node_list) {
495 for (i = 0; i < s->count; i++) {
496 switch (s->routes[i].obs_count) {
497 case 0: /* A locked entry */
500 case 1: /* From 1 -> 0 */
501 nr_neigh = s->routes[i].neighbour;
504 nr_neigh_put(nr_neigh);
506 if (nr_neigh->count == 0 && !nr_neigh->locked)
507 nr_remove_neigh(nr_neigh);
513 s->routes[0] = s->routes[1];
516 s->routes[1] = s->routes[2];
523 s->routes[i].obs_count--;
530 nr_remove_node_locked(s);
533 spin_unlock_bh(&nr_node_list_lock);
539 * A device has been removed. Remove its routes and neighbours.
541 void nr_rt_device_down(struct net_device *dev)
544 struct hlist_node *node, *nodet, *node2, *node2t;
548 spin_lock_bh(&nr_neigh_list_lock);
549 nr_neigh_for_each_safe(s, node, nodet, &nr_neigh_list) {
551 spin_lock_bh(&nr_node_list_lock);
552 nr_node_for_each_safe(t, node2, node2t, &nr_node_list) {
554 for (i = 0; i < t->count; i++) {
555 if (t->routes[i].neighbour == s) {
560 t->routes[0] = t->routes[1];
562 t->routes[1] = t->routes[2];
570 nr_remove_node_locked(t);
573 spin_unlock_bh(&nr_node_list_lock);
575 nr_remove_neigh_locked(s);
578 spin_unlock_bh(&nr_neigh_list_lock);
582 * Check that the device given is a valid AX.25 interface that is "up".
583 * Or a valid ethernet interface with an AX.25 callsign binding.
585 static struct net_device *nr_ax25_dev_get(char *devname)
587 struct net_device *dev;
589 if ((dev = dev_get_by_name(&init_net, devname)) == NULL)
592 if ((dev->flags & IFF_UP) && dev->type == ARPHRD_AX25)
600 * Find the first active NET/ROM device, usually "nr0".
602 struct net_device *nr_dev_first(void)
604 struct net_device *dev, *first = NULL;
607 for_each_netdev_rcu(&init_net, dev) {
608 if ((dev->flags & IFF_UP) && dev->type == ARPHRD_NETROM)
609 if (first == NULL || strncmp(dev->name, first->name, 3) < 0)
620 * Find the NET/ROM device for the given callsign.
622 struct net_device *nr_dev_get(ax25_address *addr)
624 struct net_device *dev;
627 for_each_netdev_rcu(&init_net, dev) {
628 if ((dev->flags & IFF_UP) && dev->type == ARPHRD_NETROM &&
629 ax25cmp(addr, (ax25_address *)dev->dev_addr) == 0) {
640 static ax25_digi *nr_call_to_digi(ax25_digi *digi, int ndigis,
641 ax25_address *digipeaters)
648 for (i = 0; i < ndigis; i++) {
649 digi->calls[i] = digipeaters[i];
650 digi->repeated[i] = 0;
653 digi->ndigi = ndigis;
654 digi->lastrepeat = -1;
660 * Handle the ioctls that control the routing functions.
662 int nr_rt_ioctl(unsigned int cmd, void __user *arg)
664 struct nr_route_struct nr_route;
665 struct net_device *dev;
671 if (copy_from_user(&nr_route, arg, sizeof(struct nr_route_struct)))
673 if ((dev = nr_ax25_dev_get(nr_route.device)) == NULL)
675 if (nr_route.ndigis < 0 || nr_route.ndigis > AX25_MAX_DIGIS) {
679 switch (nr_route.type) {
681 ret = nr_add_node(&nr_route.callsign,
684 nr_call_to_digi(&digi, nr_route.ndigis,
685 nr_route.digipeaters),
686 dev, nr_route.quality,
690 ret = nr_add_neigh(&nr_route.callsign,
691 nr_call_to_digi(&digi, nr_route.ndigis,
692 nr_route.digipeaters),
693 dev, nr_route.quality);
702 if (copy_from_user(&nr_route, arg, sizeof(struct nr_route_struct)))
704 if ((dev = nr_ax25_dev_get(nr_route.device)) == NULL)
706 switch (nr_route.type) {
708 ret = nr_del_node(&nr_route.callsign,
709 &nr_route.neighbour, dev);
712 ret = nr_del_neigh(&nr_route.callsign,
713 dev, nr_route.quality);
732 * A level 2 link has timed out, therefore it appears to be a poor link,
733 * then don't use that neighbour until it is reset.
735 void nr_link_failed(ax25_cb *ax25, int reason)
737 struct nr_neigh *s, *nr_neigh = NULL;
738 struct hlist_node *node;
739 struct nr_node *nr_node = NULL;
741 spin_lock_bh(&nr_neigh_list_lock);
742 nr_neigh_for_each(s, node, &nr_neigh_list) {
743 if (s->ax25 == ax25) {
749 spin_unlock_bh(&nr_neigh_list_lock);
751 if (nr_neigh == NULL)
754 nr_neigh->ax25 = NULL;
757 if (++nr_neigh->failed < sysctl_netrom_link_fails_count) {
758 nr_neigh_put(nr_neigh);
761 spin_lock_bh(&nr_node_list_lock);
762 nr_node_for_each(nr_node, node, &nr_node_list) {
763 nr_node_lock(nr_node);
764 if (nr_node->which < nr_node->count &&
765 nr_node->routes[nr_node->which].neighbour == nr_neigh)
767 nr_node_unlock(nr_node);
769 spin_unlock_bh(&nr_node_list_lock);
770 nr_neigh_put(nr_neigh);
774 * Route a frame to an appropriate AX.25 connection. A NULL ax25_cb
775 * indicates an internally generated frame.
777 int nr_route_frame(struct sk_buff *skb, ax25_cb *ax25)
779 ax25_address *nr_src, *nr_dest;
780 struct nr_neigh *nr_neigh;
781 struct nr_node *nr_node;
782 struct net_device *dev;
786 struct sk_buff *skbn;
789 nr_src = (ax25_address *)(skb->data + 0);
790 nr_dest = (ax25_address *)(skb->data + 7);
793 ret = nr_add_node(nr_src, "", &ax25->dest_addr, ax25->digipeat,
794 ax25->ax25_dev->dev, 0,
795 sysctl_netrom_obsolescence_count_initialiser);
800 if ((dev = nr_dev_get(nr_dest)) != NULL) { /* Its for me */
801 if (ax25 == NULL) /* Its from me */
802 ret = nr_loopback_queue(skb);
804 ret = nr_rx_frame(skb, dev);
809 if (!sysctl_netrom_routing_control && ax25 != NULL)
812 /* Its Time-To-Live has expired */
813 if (skb->data[14] == 1) {
817 nr_node = nr_node_get(nr_dest);
820 nr_node_lock(nr_node);
822 if (nr_node->which >= nr_node->count) {
823 nr_node_unlock(nr_node);
824 nr_node_put(nr_node);
828 nr_neigh = nr_node->routes[nr_node->which].neighbour;
830 if ((dev = nr_dev_first()) == NULL) {
831 nr_node_unlock(nr_node);
832 nr_node_put(nr_node);
836 /* We are going to change the netrom headers so we should get our
837 own skb, we also did not know until now how much header space
838 we had to reserve... - RXQ */
839 if ((skbn=skb_copy_expand(skb, dev->hard_header_len, 0, GFP_ATOMIC)) == NULL) {
840 nr_node_unlock(nr_node);
841 nr_node_put(nr_node);
849 dptr = skb_push(skb, 1);
850 *dptr = AX25_P_NETROM;
852 ax25s = nr_neigh->ax25;
853 nr_neigh->ax25 = ax25_send_frame(skb, 256,
854 (ax25_address *)dev->dev_addr,
856 nr_neigh->digipeat, nr_neigh->dev);
861 ret = (nr_neigh->ax25 != NULL);
862 nr_node_unlock(nr_node);
863 nr_node_put(nr_node);
868 #ifdef CONFIG_PROC_FS
870 static void *nr_node_start(struct seq_file *seq, loff_t *pos)
872 spin_lock_bh(&nr_node_list_lock);
873 return seq_hlist_start_head(&nr_node_list, *pos);
876 static void *nr_node_next(struct seq_file *seq, void *v, loff_t *pos)
878 return seq_hlist_next(v, &nr_node_list, pos);
881 static void nr_node_stop(struct seq_file *seq, void *v)
883 spin_unlock_bh(&nr_node_list_lock);
886 static int nr_node_show(struct seq_file *seq, void *v)
891 if (v == SEQ_START_TOKEN)
893 "callsign mnemonic w n qual obs neigh qual obs neigh qual obs neigh\n");
895 struct nr_node *nr_node = hlist_entry(v, struct nr_node,
898 nr_node_lock(nr_node);
899 seq_printf(seq, "%-9s %-7s %d %d",
900 ax2asc(buf, &nr_node->callsign),
901 (nr_node->mnemonic[0] == '\0') ? "*" : nr_node->mnemonic,
905 for (i = 0; i < nr_node->count; i++) {
906 seq_printf(seq, " %3d %d %05d",
907 nr_node->routes[i].quality,
908 nr_node->routes[i].obs_count,
909 nr_node->routes[i].neighbour->number);
911 nr_node_unlock(nr_node);
918 static const struct seq_operations nr_node_seqops = {
919 .start = nr_node_start,
920 .next = nr_node_next,
921 .stop = nr_node_stop,
922 .show = nr_node_show,
925 static int nr_node_info_open(struct inode *inode, struct file *file)
927 return seq_open(file, &nr_node_seqops);
930 const struct file_operations nr_nodes_fops = {
931 .owner = THIS_MODULE,
932 .open = nr_node_info_open,
935 .release = seq_release,
938 static void *nr_neigh_start(struct seq_file *seq, loff_t *pos)
940 spin_lock_bh(&nr_neigh_list_lock);
941 return seq_hlist_start_head(&nr_neigh_list, *pos);
944 static void *nr_neigh_next(struct seq_file *seq, void *v, loff_t *pos)
946 return seq_hlist_next(v, &nr_neigh_list, pos);
949 static void nr_neigh_stop(struct seq_file *seq, void *v)
951 spin_unlock_bh(&nr_neigh_list_lock);
954 static int nr_neigh_show(struct seq_file *seq, void *v)
959 if (v == SEQ_START_TOKEN)
960 seq_puts(seq, "addr callsign dev qual lock count failed digipeaters\n");
962 struct nr_neigh *nr_neigh;
964 nr_neigh = hlist_entry(v, struct nr_neigh, neigh_node);
965 seq_printf(seq, "%05d %-9s %-4s %3d %d %3d %3d",
967 ax2asc(buf, &nr_neigh->callsign),
968 nr_neigh->dev ? nr_neigh->dev->name : "???",
974 if (nr_neigh->digipeat != NULL) {
975 for (i = 0; i < nr_neigh->digipeat->ndigi; i++)
976 seq_printf(seq, " %s",
977 ax2asc(buf, &nr_neigh->digipeat->calls[i]));
985 static const struct seq_operations nr_neigh_seqops = {
986 .start = nr_neigh_start,
987 .next = nr_neigh_next,
988 .stop = nr_neigh_stop,
989 .show = nr_neigh_show,
992 static int nr_neigh_info_open(struct inode *inode, struct file *file)
994 return seq_open(file, &nr_neigh_seqops);
997 const struct file_operations nr_neigh_fops = {
998 .owner = THIS_MODULE,
999 .open = nr_neigh_info_open,
1001 .llseek = seq_lseek,
1002 .release = seq_release,
1008 * Free all memory associated with the nodes and routes lists.
1010 void __exit nr_rt_free(void)
1012 struct nr_neigh *s = NULL;
1013 struct nr_node *t = NULL;
1014 struct hlist_node *node, *nodet;
1016 spin_lock_bh(&nr_neigh_list_lock);
1017 spin_lock_bh(&nr_node_list_lock);
1018 nr_node_for_each_safe(t, node, nodet, &nr_node_list) {
1020 nr_remove_node_locked(t);
1023 nr_neigh_for_each_safe(s, node, nodet, &nr_neigh_list) {
1028 nr_remove_neigh_locked(s);
1030 spin_unlock_bh(&nr_node_list_lock);
1031 spin_unlock_bh(&nr_neigh_list_lock);