]> git.karo-electronics.de Git - karo-tx-linux.git/blob - net/batman-adv/main.c
3e1bb7a1f8b469d554d7492404cd86e9320b4602
[karo-tx-linux.git] / net / batman-adv / main.c
1 /*
2  * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3  *
4  * Marek Lindner, Simon Wunderlich
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of version 2 of the GNU General Public
8  * License as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13  * General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18  * 02110-1301, USA
19  *
20  */
21
22 #include "main.h"
23 #include "bat_sysfs.h"
24 #include "bat_debugfs.h"
25 #include "routing.h"
26 #include "send.h"
27 #include "originator.h"
28 #include "soft-interface.h"
29 #include "icmp_socket.h"
30 #include "translation-table.h"
31 #include "hard-interface.h"
32 #include "gateway_client.h"
33 #include "bridge_loop_avoidance.h"
34 #include "vis.h"
35 #include "hash.h"
36 #include "bat_algo.h"
37
38
39 /* List manipulations on hardif_list have to be rtnl_lock()'ed,
40  * list traversals just rcu-locked */
41 struct list_head hardif_list;
42 static int (*recv_packet_handler[256])(struct sk_buff *, struct hard_iface *);
43 char bat_routing_algo[20] = "BATMAN_IV";
44 static struct hlist_head bat_algo_list;
45
46 unsigned char broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
47
48 struct workqueue_struct *bat_event_workqueue;
49
50 static void recv_handler_init(void);
51
52 static int __init batman_init(void)
53 {
54         INIT_LIST_HEAD(&hardif_list);
55         INIT_HLIST_HEAD(&bat_algo_list);
56
57         recv_handler_init();
58
59         batadv_iv_init();
60
61         /* the name should not be longer than 10 chars - see
62          * http://lwn.net/Articles/23634/ */
63         bat_event_workqueue = create_singlethread_workqueue("bat_events");
64
65         if (!bat_event_workqueue)
66                 return -ENOMEM;
67
68         bat_socket_init();
69         batadv_debugfs_init();
70
71         register_netdevice_notifier(&hard_if_notifier);
72
73         pr_info("B.A.T.M.A.N. advanced %s (compatibility version %i) loaded\n",
74                 SOURCE_VERSION, COMPAT_VERSION);
75
76         return 0;
77 }
78
79 static void __exit batman_exit(void)
80 {
81         batadv_debugfs_destroy();
82         unregister_netdevice_notifier(&hard_if_notifier);
83         hardif_remove_interfaces();
84
85         flush_workqueue(bat_event_workqueue);
86         destroy_workqueue(bat_event_workqueue);
87         bat_event_workqueue = NULL;
88
89         rcu_barrier();
90 }
91
92 int mesh_init(struct net_device *soft_iface)
93 {
94         struct bat_priv *bat_priv = netdev_priv(soft_iface);
95         int ret;
96
97         spin_lock_init(&bat_priv->forw_bat_list_lock);
98         spin_lock_init(&bat_priv->forw_bcast_list_lock);
99         spin_lock_init(&bat_priv->tt_changes_list_lock);
100         spin_lock_init(&bat_priv->tt_req_list_lock);
101         spin_lock_init(&bat_priv->tt_roam_list_lock);
102         spin_lock_init(&bat_priv->tt_buff_lock);
103         spin_lock_init(&bat_priv->gw_list_lock);
104         spin_lock_init(&bat_priv->vis_hash_lock);
105         spin_lock_init(&bat_priv->vis_list_lock);
106
107         INIT_HLIST_HEAD(&bat_priv->forw_bat_list);
108         INIT_HLIST_HEAD(&bat_priv->forw_bcast_list);
109         INIT_HLIST_HEAD(&bat_priv->gw_list);
110         INIT_LIST_HEAD(&bat_priv->tt_changes_list);
111         INIT_LIST_HEAD(&bat_priv->tt_req_list);
112         INIT_LIST_HEAD(&bat_priv->tt_roam_list);
113
114         ret = originator_init(bat_priv);
115         if (ret < 0)
116                 goto err;
117
118         ret = tt_init(bat_priv);
119         if (ret < 0)
120                 goto err;
121
122         tt_local_add(soft_iface, soft_iface->dev_addr, NULL_IFINDEX);
123
124         ret = vis_init(bat_priv);
125         if (ret < 0)
126                 goto err;
127
128         ret = batadv_bla_init(bat_priv);
129         if (ret < 0)
130                 goto err;
131
132         atomic_set(&bat_priv->gw_reselect, 0);
133         atomic_set(&bat_priv->mesh_state, MESH_ACTIVE);
134
135         return 0;
136
137 err:
138         mesh_free(soft_iface);
139         return ret;
140 }
141
142 void mesh_free(struct net_device *soft_iface)
143 {
144         struct bat_priv *bat_priv = netdev_priv(soft_iface);
145
146         atomic_set(&bat_priv->mesh_state, MESH_DEACTIVATING);
147
148         purge_outstanding_packets(bat_priv, NULL);
149
150         vis_quit(bat_priv);
151
152         gw_node_purge(bat_priv);
153         originator_free(bat_priv);
154
155         tt_free(bat_priv);
156
157         batadv_bla_free(bat_priv);
158
159         free_percpu(bat_priv->bat_counters);
160
161         atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
162 }
163
164 void inc_module_count(void)
165 {
166         try_module_get(THIS_MODULE);
167 }
168
169 void dec_module_count(void)
170 {
171         module_put(THIS_MODULE);
172 }
173
174 int is_my_mac(const uint8_t *addr)
175 {
176         const struct hard_iface *hard_iface;
177
178         rcu_read_lock();
179         list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
180                 if (hard_iface->if_status != IF_ACTIVE)
181                         continue;
182
183                 if (compare_eth(hard_iface->net_dev->dev_addr, addr)) {
184                         rcu_read_unlock();
185                         return 1;
186                 }
187         }
188         rcu_read_unlock();
189         return 0;
190 }
191
192 static int recv_unhandled_packet(struct sk_buff *skb,
193                                  struct hard_iface *recv_if)
194 {
195         return NET_RX_DROP;
196 }
197
198 /* incoming packets with the batman ethertype received on any active hard
199  * interface
200  */
201 int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
202                     struct packet_type *ptype, struct net_device *orig_dev)
203 {
204         struct bat_priv *bat_priv;
205         struct batman_ogm_packet *batman_ogm_packet;
206         struct hard_iface *hard_iface;
207         uint8_t idx;
208         int ret;
209
210         hard_iface = container_of(ptype, struct hard_iface, batman_adv_ptype);
211         skb = skb_share_check(skb, GFP_ATOMIC);
212
213         /* skb was released by skb_share_check() */
214         if (!skb)
215                 goto err_out;
216
217         /* packet should hold at least type and version */
218         if (unlikely(!pskb_may_pull(skb, 2)))
219                 goto err_free;
220
221         /* expect a valid ethernet header here. */
222         if (unlikely(skb->mac_len != ETH_HLEN || !skb_mac_header(skb)))
223                 goto err_free;
224
225         if (!hard_iface->soft_iface)
226                 goto err_free;
227
228         bat_priv = netdev_priv(hard_iface->soft_iface);
229
230         if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE)
231                 goto err_free;
232
233         /* discard frames on not active interfaces */
234         if (hard_iface->if_status != IF_ACTIVE)
235                 goto err_free;
236
237         batman_ogm_packet = (struct batman_ogm_packet *)skb->data;
238
239         if (batman_ogm_packet->header.version != COMPAT_VERSION) {
240                 bat_dbg(DBG_BATMAN, bat_priv,
241                         "Drop packet: incompatible batman version (%i)\n",
242                         batman_ogm_packet->header.version);
243                 goto err_free;
244         }
245
246         /* all receive handlers return whether they received or reused
247          * the supplied skb. if not, we have to free the skb.
248          */
249         idx = batman_ogm_packet->header.packet_type;
250         ret = (*recv_packet_handler[idx])(skb, hard_iface);
251
252         if (ret == NET_RX_DROP)
253                 kfree_skb(skb);
254
255         /* return NET_RX_SUCCESS in any case as we
256          * most probably dropped the packet for
257          * routing-logical reasons.
258          */
259         return NET_RX_SUCCESS;
260
261 err_free:
262         kfree_skb(skb);
263 err_out:
264         return NET_RX_DROP;
265 }
266
267 static void recv_handler_init(void)
268 {
269         int i;
270
271         for (i = 0; i < ARRAY_SIZE(recv_packet_handler); i++)
272                 recv_packet_handler[i] = recv_unhandled_packet;
273
274         /* batman icmp packet */
275         recv_packet_handler[BAT_ICMP] = recv_icmp_packet;
276         /* unicast packet */
277         recv_packet_handler[BAT_UNICAST] = recv_unicast_packet;
278         /* fragmented unicast packet */
279         recv_packet_handler[BAT_UNICAST_FRAG] = recv_ucast_frag_packet;
280         /* broadcast packet */
281         recv_packet_handler[BAT_BCAST] = recv_bcast_packet;
282         /* vis packet */
283         recv_packet_handler[BAT_VIS] = recv_vis_packet;
284         /* Translation table query (request or response) */
285         recv_packet_handler[BAT_TT_QUERY] = recv_tt_query;
286         /* Roaming advertisement */
287         recv_packet_handler[BAT_ROAM_ADV] = recv_roam_adv;
288 }
289
290 int recv_handler_register(uint8_t packet_type,
291                           int (*recv_handler)(struct sk_buff *,
292                                               struct hard_iface *))
293 {
294         if (recv_packet_handler[packet_type] != &recv_unhandled_packet)
295                 return -EBUSY;
296
297         recv_packet_handler[packet_type] = recv_handler;
298         return 0;
299 }
300
301 void recv_handler_unregister(uint8_t packet_type)
302 {
303         recv_packet_handler[packet_type] = recv_unhandled_packet;
304 }
305
306 static struct bat_algo_ops *bat_algo_get(char *name)
307 {
308         struct bat_algo_ops *bat_algo_ops = NULL, *bat_algo_ops_tmp;
309         struct hlist_node *node;
310
311         hlist_for_each_entry(bat_algo_ops_tmp, node, &bat_algo_list, list) {
312                 if (strcmp(bat_algo_ops_tmp->name, name) != 0)
313                         continue;
314
315                 bat_algo_ops = bat_algo_ops_tmp;
316                 break;
317         }
318
319         return bat_algo_ops;
320 }
321
322 int bat_algo_register(struct bat_algo_ops *bat_algo_ops)
323 {
324         struct bat_algo_ops *bat_algo_ops_tmp;
325         int ret;
326
327         bat_algo_ops_tmp = bat_algo_get(bat_algo_ops->name);
328         if (bat_algo_ops_tmp) {
329                 pr_info("Trying to register already registered routing algorithm: %s\n",
330                         bat_algo_ops->name);
331                 ret = -EEXIST;
332                 goto out;
333         }
334
335         /* all algorithms must implement all ops (for now) */
336         if (!bat_algo_ops->bat_iface_enable ||
337             !bat_algo_ops->bat_iface_disable ||
338             !bat_algo_ops->bat_iface_update_mac ||
339             !bat_algo_ops->bat_primary_iface_set ||
340             !bat_algo_ops->bat_ogm_schedule ||
341             !bat_algo_ops->bat_ogm_emit) {
342                 pr_info("Routing algo '%s' does not implement required ops\n",
343                         bat_algo_ops->name);
344                 ret = -EINVAL;
345                 goto out;
346         }
347
348         INIT_HLIST_NODE(&bat_algo_ops->list);
349         hlist_add_head(&bat_algo_ops->list, &bat_algo_list);
350         ret = 0;
351
352 out:
353         return ret;
354 }
355
356 int bat_algo_select(struct bat_priv *bat_priv, char *name)
357 {
358         struct bat_algo_ops *bat_algo_ops;
359         int ret = -EINVAL;
360
361         bat_algo_ops = bat_algo_get(name);
362         if (!bat_algo_ops)
363                 goto out;
364
365         bat_priv->bat_algo_ops = bat_algo_ops;
366         ret = 0;
367
368 out:
369         return ret;
370 }
371
372 int bat_algo_seq_print_text(struct seq_file *seq, void *offset)
373 {
374         struct bat_algo_ops *bat_algo_ops;
375         struct hlist_node *node;
376
377         seq_printf(seq, "Available routing algorithms:\n");
378
379         hlist_for_each_entry(bat_algo_ops, node, &bat_algo_list, list) {
380                 seq_printf(seq, "%s\n", bat_algo_ops->name);
381         }
382
383         return 0;
384 }
385
386 static int param_set_ra(const char *val, const struct kernel_param *kp)
387 {
388         struct bat_algo_ops *bat_algo_ops;
389         char *algo_name = (char *)val;
390         size_t name_len = strlen(algo_name);
391
392         if (algo_name[name_len - 1] == '\n')
393                 algo_name[name_len - 1] = '\0';
394
395         bat_algo_ops = bat_algo_get(algo_name);
396         if (!bat_algo_ops) {
397                 pr_err("Routing algorithm '%s' is not supported\n", algo_name);
398                 return -EINVAL;
399         }
400
401         return param_set_copystring(algo_name, kp);
402 }
403
404 static const struct kernel_param_ops param_ops_ra = {
405         .set = param_set_ra,
406         .get = param_get_string,
407 };
408
409 static struct kparam_string __param_string_ra = {
410         .maxlen = sizeof(bat_routing_algo),
411         .string = bat_routing_algo,
412 };
413
414 module_param_cb(routing_algo, &param_ops_ra, &__param_string_ra, 0644);
415 module_init(batman_init);
416 module_exit(batman_exit);
417
418 MODULE_LICENSE("GPL");
419
420 MODULE_AUTHOR(DRIVER_AUTHOR);
421 MODULE_DESCRIPTION(DRIVER_DESC);
422 MODULE_SUPPORTED_DEVICE(DRIVER_DEVICE);
423 MODULE_VERSION(SOURCE_VERSION);