]> git.karo-electronics.de Git - mv-sheeva.git/blob - net/batman-adv/translation-table.c
Merge tag 'v2.6.38' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
[mv-sheeva.git] / net / batman-adv / translation-table.c
1 /*
2  * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
3  *
4  * Marek Lindner, Simon Wunderlich
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of version 2 of the GNU General Public
8  * License as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13  * General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18  * 02110-1301, USA
19  *
20  */
21
22 #include "main.h"
23 #include "translation-table.h"
24 #include "soft-interface.h"
25 #include "types.h"
26 #include "hash.h"
27 #include "originator.h"
28
29 static void hna_local_purge(struct work_struct *work);
30 static void _hna_global_del_orig(struct bat_priv *bat_priv,
31                                  struct hna_global_entry *hna_global_entry,
32                                  char *message);
33
34 static void hna_local_start_timer(struct bat_priv *bat_priv)
35 {
36         INIT_DELAYED_WORK(&bat_priv->hna_work, hna_local_purge);
37         queue_delayed_work(bat_event_workqueue, &bat_priv->hna_work, 10 * HZ);
38 }
39
40 int hna_local_init(struct bat_priv *bat_priv)
41 {
42         if (bat_priv->hna_local_hash)
43                 return 1;
44
45         bat_priv->hna_local_hash = hash_new(1024);
46
47         if (!bat_priv->hna_local_hash)
48                 return 0;
49
50         atomic_set(&bat_priv->hna_local_changed, 0);
51         hna_local_start_timer(bat_priv);
52
53         return 1;
54 }
55
56 void hna_local_add(struct net_device *soft_iface, uint8_t *addr)
57 {
58         struct bat_priv *bat_priv = netdev_priv(soft_iface);
59         struct hna_local_entry *hna_local_entry;
60         struct hna_global_entry *hna_global_entry;
61         int required_bytes;
62
63         spin_lock_bh(&bat_priv->hna_lhash_lock);
64         hna_local_entry =
65                 ((struct hna_local_entry *)hash_find(bat_priv->hna_local_hash,
66                                                      compare_orig, choose_orig,
67                                                      addr));
68         spin_unlock_bh(&bat_priv->hna_lhash_lock);
69
70         if (hna_local_entry) {
71                 hna_local_entry->last_seen = jiffies;
72                 return;
73         }
74
75         /* only announce as many hosts as possible in the batman-packet and
76            space in batman_packet->num_hna That also should give a limit to
77            MAC-flooding. */
78         required_bytes = (bat_priv->num_local_hna + 1) * ETH_ALEN;
79         required_bytes += BAT_PACKET_LEN;
80
81         if ((required_bytes > ETH_DATA_LEN) ||
82             (atomic_read(&bat_priv->aggregated_ogms) &&
83              required_bytes > MAX_AGGREGATION_BYTES) ||
84             (bat_priv->num_local_hna + 1 > 255)) {
85                 bat_dbg(DBG_ROUTES, bat_priv,
86                         "Can't add new local hna entry (%pM): "
87                         "number of local hna entries exceeds packet size\n",
88                         addr);
89                 return;
90         }
91
92         bat_dbg(DBG_ROUTES, bat_priv,
93                 "Creating new local hna entry: %pM\n", addr);
94
95         hna_local_entry = kmalloc(sizeof(struct hna_local_entry), GFP_ATOMIC);
96         if (!hna_local_entry)
97                 return;
98
99         memcpy(hna_local_entry->addr, addr, ETH_ALEN);
100         hna_local_entry->last_seen = jiffies;
101
102         /* the batman interface mac address should never be purged */
103         if (compare_orig(addr, soft_iface->dev_addr))
104                 hna_local_entry->never_purge = 1;
105         else
106                 hna_local_entry->never_purge = 0;
107
108         spin_lock_bh(&bat_priv->hna_lhash_lock);
109
110         hash_add(bat_priv->hna_local_hash, compare_orig, choose_orig,
111                  hna_local_entry);
112         bat_priv->num_local_hna++;
113         atomic_set(&bat_priv->hna_local_changed, 1);
114
115         spin_unlock_bh(&bat_priv->hna_lhash_lock);
116
117         /* remove address from global hash if present */
118         spin_lock_bh(&bat_priv->hna_ghash_lock);
119
120         hna_global_entry = ((struct hna_global_entry *)
121                                 hash_find(bat_priv->hna_global_hash,
122                                           compare_orig, choose_orig, addr));
123
124         if (hna_global_entry)
125                 _hna_global_del_orig(bat_priv, hna_global_entry,
126                                      "local hna received");
127
128         spin_unlock_bh(&bat_priv->hna_ghash_lock);
129 }
130
131 int hna_local_fill_buffer(struct bat_priv *bat_priv,
132                           unsigned char *buff, int buff_len)
133 {
134         struct hashtable_t *hash = bat_priv->hna_local_hash;
135         struct hna_local_entry *hna_local_entry;
136         struct element_t *bucket;
137         int i;
138         struct hlist_node *walk;
139         struct hlist_head *head;
140         int count = 0;
141
142         spin_lock_bh(&bat_priv->hna_lhash_lock);
143
144         for (i = 0; i < hash->size; i++) {
145                 head = &hash->table[i];
146
147                 hlist_for_each_entry(bucket, walk, head, hlist) {
148
149                         if (buff_len < (count + 1) * ETH_ALEN)
150                                 break;
151
152                         hna_local_entry = bucket->data;
153                         memcpy(buff + (count * ETH_ALEN), hna_local_entry->addr,
154                                ETH_ALEN);
155
156                         count++;
157                 }
158         }
159
160         /* if we did not get all new local hnas see you next time  ;-) */
161         if (count == bat_priv->num_local_hna)
162                 atomic_set(&bat_priv->hna_local_changed, 0);
163
164         spin_unlock_bh(&bat_priv->hna_lhash_lock);
165         return count;
166 }
167
168 int hna_local_seq_print_text(struct seq_file *seq, void *offset)
169 {
170         struct net_device *net_dev = (struct net_device *)seq->private;
171         struct bat_priv *bat_priv = netdev_priv(net_dev);
172         struct hashtable_t *hash = bat_priv->hna_local_hash;
173         struct hna_local_entry *hna_local_entry;
174         int i;
175         struct hlist_node *walk;
176         struct hlist_head *head;
177         struct element_t *bucket;
178         size_t buf_size, pos;
179         char *buff;
180
181         if (!bat_priv->primary_if) {
182                 return seq_printf(seq, "BATMAN mesh %s disabled - "
183                                "please specify interfaces to enable it\n",
184                                net_dev->name);
185         }
186
187         seq_printf(seq, "Locally retrieved addresses (from %s) "
188                    "announced via HNA:\n",
189                    net_dev->name);
190
191         spin_lock_bh(&bat_priv->hna_lhash_lock);
192
193         buf_size = 1;
194         /* Estimate length for: " * xx:xx:xx:xx:xx:xx\n" */
195         for (i = 0; i < hash->size; i++) {
196                 head = &hash->table[i];
197
198                 hlist_for_each(walk, head)
199                         buf_size += 21;
200         }
201
202         buff = kmalloc(buf_size, GFP_ATOMIC);
203         if (!buff) {
204                 spin_unlock_bh(&bat_priv->hna_lhash_lock);
205                 return -ENOMEM;
206         }
207         buff[0] = '\0';
208         pos = 0;
209
210         for (i = 0; i < hash->size; i++) {
211                 head = &hash->table[i];
212
213                 hlist_for_each_entry(bucket, walk, head, hlist) {
214                         hna_local_entry = bucket->data;
215
216                         pos += snprintf(buff + pos, 22, " * %pM\n",
217                                         hna_local_entry->addr);
218                 }
219         }
220
221         spin_unlock_bh(&bat_priv->hna_lhash_lock);
222
223         seq_printf(seq, "%s", buff);
224         kfree(buff);
225         return 0;
226 }
227
228 static void _hna_local_del(void *data, void *arg)
229 {
230         struct bat_priv *bat_priv = (struct bat_priv *)arg;
231
232         kfree(data);
233         bat_priv->num_local_hna--;
234         atomic_set(&bat_priv->hna_local_changed, 1);
235 }
236
237 static void hna_local_del(struct bat_priv *bat_priv,
238                           struct hna_local_entry *hna_local_entry,
239                           char *message)
240 {
241         bat_dbg(DBG_ROUTES, bat_priv, "Deleting local hna entry (%pM): %s\n",
242                 hna_local_entry->addr, message);
243
244         hash_remove(bat_priv->hna_local_hash, compare_orig, choose_orig,
245                     hna_local_entry->addr);
246         _hna_local_del(hna_local_entry, bat_priv);
247 }
248
249 void hna_local_remove(struct bat_priv *bat_priv,
250                       uint8_t *addr, char *message)
251 {
252         struct hna_local_entry *hna_local_entry;
253
254         spin_lock_bh(&bat_priv->hna_lhash_lock);
255
256         hna_local_entry = (struct hna_local_entry *)
257                 hash_find(bat_priv->hna_local_hash, compare_orig, choose_orig,
258                           addr);
259
260         if (hna_local_entry)
261                 hna_local_del(bat_priv, hna_local_entry, message);
262
263         spin_unlock_bh(&bat_priv->hna_lhash_lock);
264 }
265
266 static void hna_local_purge(struct work_struct *work)
267 {
268         struct delayed_work *delayed_work =
269                 container_of(work, struct delayed_work, work);
270         struct bat_priv *bat_priv =
271                 container_of(delayed_work, struct bat_priv, hna_work);
272         struct hashtable_t *hash = bat_priv->hna_local_hash;
273         struct hna_local_entry *hna_local_entry;
274         int i;
275         struct hlist_node *walk, *safe;
276         struct hlist_head *head;
277         struct element_t *bucket;
278         unsigned long timeout;
279
280         spin_lock_bh(&bat_priv->hna_lhash_lock);
281
282         for (i = 0; i < hash->size; i++) {
283                 head = &hash->table[i];
284
285                 hlist_for_each_entry_safe(bucket, walk, safe, head, hlist) {
286                         hna_local_entry = bucket->data;
287
288                         timeout = hna_local_entry->last_seen;
289                         timeout += LOCAL_HNA_TIMEOUT * HZ;
290
291                         if ((!hna_local_entry->never_purge) &&
292                             time_after(jiffies, timeout))
293                                 hna_local_del(bat_priv, hna_local_entry,
294                                         "address timed out");
295                 }
296         }
297
298         spin_unlock_bh(&bat_priv->hna_lhash_lock);
299         hna_local_start_timer(bat_priv);
300 }
301
302 void hna_local_free(struct bat_priv *bat_priv)
303 {
304         if (!bat_priv->hna_local_hash)
305                 return;
306
307         cancel_delayed_work_sync(&bat_priv->hna_work);
308         hash_delete(bat_priv->hna_local_hash, _hna_local_del, bat_priv);
309         bat_priv->hna_local_hash = NULL;
310 }
311
312 int hna_global_init(struct bat_priv *bat_priv)
313 {
314         if (bat_priv->hna_global_hash)
315                 return 1;
316
317         bat_priv->hna_global_hash = hash_new(1024);
318
319         if (!bat_priv->hna_global_hash)
320                 return 0;
321
322         return 1;
323 }
324
325 void hna_global_add_orig(struct bat_priv *bat_priv,
326                          struct orig_node *orig_node,
327                          unsigned char *hna_buff, int hna_buff_len)
328 {
329         struct hna_global_entry *hna_global_entry;
330         struct hna_local_entry *hna_local_entry;
331         int hna_buff_count = 0;
332         unsigned char *hna_ptr;
333
334         while ((hna_buff_count + 1) * ETH_ALEN <= hna_buff_len) {
335                 spin_lock_bh(&bat_priv->hna_ghash_lock);
336
337                 hna_ptr = hna_buff + (hna_buff_count * ETH_ALEN);
338                 hna_global_entry = (struct hna_global_entry *)
339                         hash_find(bat_priv->hna_global_hash, compare_orig,
340                                   choose_orig, hna_ptr);
341
342                 if (!hna_global_entry) {
343                         spin_unlock_bh(&bat_priv->hna_ghash_lock);
344
345                         hna_global_entry =
346                                 kmalloc(sizeof(struct hna_global_entry),
347                                         GFP_ATOMIC);
348
349                         if (!hna_global_entry)
350                                 break;
351
352                         memcpy(hna_global_entry->addr, hna_ptr, ETH_ALEN);
353
354                         bat_dbg(DBG_ROUTES, bat_priv,
355                                 "Creating new global hna entry: "
356                                 "%pM (via %pM)\n",
357                                 hna_global_entry->addr, orig_node->orig);
358
359                         spin_lock_bh(&bat_priv->hna_ghash_lock);
360                         hash_add(bat_priv->hna_global_hash, compare_orig,
361                                  choose_orig, hna_global_entry);
362
363                 }
364
365                 hna_global_entry->orig_node = orig_node;
366                 spin_unlock_bh(&bat_priv->hna_ghash_lock);
367
368                 /* remove address from local hash if present */
369                 spin_lock_bh(&bat_priv->hna_lhash_lock);
370
371                 hna_ptr = hna_buff + (hna_buff_count * ETH_ALEN);
372                 hna_local_entry = (struct hna_local_entry *)
373                         hash_find(bat_priv->hna_local_hash, compare_orig,
374                                   choose_orig, hna_ptr);
375
376                 if (hna_local_entry)
377                         hna_local_del(bat_priv, hna_local_entry,
378                                       "global hna received");
379
380                 spin_unlock_bh(&bat_priv->hna_lhash_lock);
381
382                 hna_buff_count++;
383         }
384
385         /* initialize, and overwrite if malloc succeeds */
386         orig_node->hna_buff = NULL;
387         orig_node->hna_buff_len = 0;
388
389         if (hna_buff_len > 0) {
390                 orig_node->hna_buff = kmalloc(hna_buff_len, GFP_ATOMIC);
391                 if (orig_node->hna_buff) {
392                         memcpy(orig_node->hna_buff, hna_buff, hna_buff_len);
393                         orig_node->hna_buff_len = hna_buff_len;
394                 }
395         }
396 }
397
398 int hna_global_seq_print_text(struct seq_file *seq, void *offset)
399 {
400         struct net_device *net_dev = (struct net_device *)seq->private;
401         struct bat_priv *bat_priv = netdev_priv(net_dev);
402         struct hashtable_t *hash = bat_priv->hna_global_hash;
403         struct hna_global_entry *hna_global_entry;
404         int i;
405         struct hlist_node *walk;
406         struct hlist_head *head;
407         struct element_t *bucket;
408         size_t buf_size, pos;
409         char *buff;
410
411         if (!bat_priv->primary_if) {
412                 return seq_printf(seq, "BATMAN mesh %s disabled - "
413                                   "please specify interfaces to enable it\n",
414                                   net_dev->name);
415         }
416
417         seq_printf(seq, "Globally announced HNAs received via the mesh %s\n",
418                    net_dev->name);
419
420         spin_lock_bh(&bat_priv->hna_ghash_lock);
421
422         buf_size = 1;
423         /* Estimate length for: " * xx:xx:xx:xx:xx:xx via xx:xx:xx:xx:xx:xx\n"*/
424         for (i = 0; i < hash->size; i++) {
425                 head = &hash->table[i];
426
427                 hlist_for_each(walk, head)
428                         buf_size += 43;
429         }
430
431         buff = kmalloc(buf_size, GFP_ATOMIC);
432         if (!buff) {
433                 spin_unlock_bh(&bat_priv->hna_ghash_lock);
434                 return -ENOMEM;
435         }
436         buff[0] = '\0';
437         pos = 0;
438
439         for (i = 0; i < hash->size; i++) {
440                 head = &hash->table[i];
441
442                 hlist_for_each_entry(bucket, walk, head, hlist) {
443                         hna_global_entry = bucket->data;
444
445                         pos += snprintf(buff + pos, 44,
446                                         " * %pM via %pM\n",
447                                         hna_global_entry->addr,
448                                         hna_global_entry->orig_node->orig);
449                 }
450         }
451
452         spin_unlock_bh(&bat_priv->hna_ghash_lock);
453
454         seq_printf(seq, "%s", buff);
455         kfree(buff);
456         return 0;
457 }
458
459 static void _hna_global_del_orig(struct bat_priv *bat_priv,
460                                  struct hna_global_entry *hna_global_entry,
461                                  char *message)
462 {
463         bat_dbg(DBG_ROUTES, bat_priv,
464                 "Deleting global hna entry %pM (via %pM): %s\n",
465                 hna_global_entry->addr, hna_global_entry->orig_node->orig,
466                 message);
467
468         hash_remove(bat_priv->hna_global_hash, compare_orig, choose_orig,
469                     hna_global_entry->addr);
470         kfree(hna_global_entry);
471 }
472
473 void hna_global_del_orig(struct bat_priv *bat_priv,
474                          struct orig_node *orig_node, char *message)
475 {
476         struct hna_global_entry *hna_global_entry;
477         int hna_buff_count = 0;
478         unsigned char *hna_ptr;
479
480         if (orig_node->hna_buff_len == 0)
481                 return;
482
483         spin_lock_bh(&bat_priv->hna_ghash_lock);
484
485         while ((hna_buff_count + 1) * ETH_ALEN <= orig_node->hna_buff_len) {
486                 hna_ptr = orig_node->hna_buff + (hna_buff_count * ETH_ALEN);
487                 hna_global_entry = (struct hna_global_entry *)
488                         hash_find(bat_priv->hna_global_hash, compare_orig,
489                                   choose_orig, hna_ptr);
490
491                 if ((hna_global_entry) &&
492                     (hna_global_entry->orig_node == orig_node))
493                         _hna_global_del_orig(bat_priv, hna_global_entry,
494                                              message);
495
496                 hna_buff_count++;
497         }
498
499         spin_unlock_bh(&bat_priv->hna_ghash_lock);
500
501         orig_node->hna_buff_len = 0;
502         kfree(orig_node->hna_buff);
503         orig_node->hna_buff = NULL;
504 }
505
506 static void hna_global_del(void *data, void *arg)
507 {
508         kfree(data);
509 }
510
511 void hna_global_free(struct bat_priv *bat_priv)
512 {
513         if (!bat_priv->hna_global_hash)
514                 return;
515
516         hash_delete(bat_priv->hna_global_hash, hna_global_del, NULL);
517         bat_priv->hna_global_hash = NULL;
518 }
519
520 struct orig_node *transtable_search(struct bat_priv *bat_priv, uint8_t *addr)
521 {
522         struct hna_global_entry *hna_global_entry;
523
524         spin_lock_bh(&bat_priv->hna_ghash_lock);
525         hna_global_entry = (struct hna_global_entry *)
526                                 hash_find(bat_priv->hna_global_hash,
527                                           compare_orig, choose_orig, addr);
528         spin_unlock_bh(&bat_priv->hna_ghash_lock);
529
530         if (!hna_global_entry)
531                 return NULL;
532
533         return hna_global_entry->orig_node;
534 }