]> git.karo-electronics.de Git - karo-tx-linux.git/blob - net/batman-adv/originator.c
batman-adv: make the TT CRC logic VLAN specific
[karo-tx-linux.git] / net / batman-adv / originator.c
1 /* Copyright (C) 2009-2013 B.A.T.M.A.N. contributors:
2  *
3  * Marek Lindner, Simon Wunderlich
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of version 2 of the GNU General Public
7  * License as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but
10  * WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17  * 02110-1301, USA
18  */
19
20 #include "main.h"
21 #include "distributed-arp-table.h"
22 #include "originator.h"
23 #include "hash.h"
24 #include "translation-table.h"
25 #include "routing.h"
26 #include "gateway_client.h"
27 #include "hard-interface.h"
28 #include "soft-interface.h"
29 #include "bridge_loop_avoidance.h"
30 #include "network-coding.h"
31 #include "fragmentation.h"
32
33 /* hash class keys */
34 static struct lock_class_key batadv_orig_hash_lock_class_key;
35
36 static void batadv_purge_orig(struct work_struct *work);
37
38 /* returns 1 if they are the same originator */
39 static int batadv_compare_orig(const struct hlist_node *node, const void *data2)
40 {
41         const void *data1 = container_of(node, struct batadv_orig_node,
42                                          hash_entry);
43
44         return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
45 }
46
47 /**
48  * batadv_orig_node_vlan_get - get an orig_node_vlan object
49  * @orig_node: the originator serving the VLAN
50  * @vid: the VLAN identifier
51  *
52  * Returns the vlan object identified by vid and belonging to orig_node or NULL
53  * if it does not exist.
54  */
55 struct batadv_orig_node_vlan *
56 batadv_orig_node_vlan_get(struct batadv_orig_node *orig_node,
57                           unsigned short vid)
58 {
59         struct batadv_orig_node_vlan *vlan = NULL, *tmp;
60
61         rcu_read_lock();
62         list_for_each_entry_rcu(tmp, &orig_node->vlan_list, list) {
63                 if (tmp->vid != vid)
64                         continue;
65
66                 if (!atomic_inc_not_zero(&tmp->refcount))
67                         continue;
68
69                 vlan = tmp;
70
71                 break;
72         }
73         rcu_read_unlock();
74
75         return vlan;
76 }
77
78 /**
79  * batadv_orig_node_vlan_new - search and possibly create an orig_node_vlan
80  *  object
81  * @orig_node: the originator serving the VLAN
82  * @vid: the VLAN identifier
83  *
84  * Returns NULL in case of failure or the vlan object identified by vid and
85  * belonging to orig_node otherwise. The object is created and added to the list
86  * if it does not exist.
87  *
88  * The object is returned with refcounter increased by 1.
89  */
90 struct batadv_orig_node_vlan *
91 batadv_orig_node_vlan_new(struct batadv_orig_node *orig_node,
92                           unsigned short vid)
93 {
94         struct batadv_orig_node_vlan *vlan;
95
96         spin_lock_bh(&orig_node->vlan_list_lock);
97
98         /* first look if an object for this vid already exists */
99         vlan = batadv_orig_node_vlan_get(orig_node, vid);
100         if (vlan)
101                 goto out;
102
103         vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
104         if (!vlan)
105                 goto out;
106
107         atomic_set(&vlan->refcount, 2);
108         vlan->vid = vid;
109
110         list_add_rcu(&vlan->list, &orig_node->vlan_list);
111
112 out:
113         spin_unlock_bh(&orig_node->vlan_list_lock);
114
115         return vlan;
116 }
117
118 /**
119  * batadv_orig_node_vlan_free_ref - decrement the refcounter and possibly free
120  *  the originator-vlan object
121  * @orig_vlan: the originator-vlan object to release
122  */
123 void batadv_orig_node_vlan_free_ref(struct batadv_orig_node_vlan *orig_vlan)
124 {
125         if (atomic_dec_and_test(&orig_vlan->refcount))
126                 kfree_rcu(orig_vlan, rcu);
127 }
128
129 int batadv_originator_init(struct batadv_priv *bat_priv)
130 {
131         if (bat_priv->orig_hash)
132                 return 0;
133
134         bat_priv->orig_hash = batadv_hash_new(1024);
135
136         if (!bat_priv->orig_hash)
137                 goto err;
138
139         batadv_hash_set_lock_class(bat_priv->orig_hash,
140                                    &batadv_orig_hash_lock_class_key);
141
142         INIT_DELAYED_WORK(&bat_priv->orig_work, batadv_purge_orig);
143         queue_delayed_work(batadv_event_workqueue,
144                            &bat_priv->orig_work,
145                            msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD));
146
147         return 0;
148
149 err:
150         return -ENOMEM;
151 }
152
153 void batadv_neigh_node_free_ref(struct batadv_neigh_node *neigh_node)
154 {
155         if (atomic_dec_and_test(&neigh_node->refcount))
156                 kfree_rcu(neigh_node, rcu);
157 }
158
159 /* increases the refcounter of a found router */
160 struct batadv_neigh_node *
161 batadv_orig_node_get_router(struct batadv_orig_node *orig_node)
162 {
163         struct batadv_neigh_node *router;
164
165         rcu_read_lock();
166         router = rcu_dereference(orig_node->router);
167
168         if (router && !atomic_inc_not_zero(&router->refcount))
169                 router = NULL;
170
171         rcu_read_unlock();
172         return router;
173 }
174
175 struct batadv_neigh_node *
176 batadv_neigh_node_new(struct batadv_hard_iface *hard_iface,
177                       const uint8_t *neigh_addr)
178 {
179         struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
180         struct batadv_neigh_node *neigh_node;
181
182         neigh_node = kzalloc(sizeof(*neigh_node), GFP_ATOMIC);
183         if (!neigh_node)
184                 goto out;
185
186         INIT_HLIST_NODE(&neigh_node->list);
187
188         memcpy(neigh_node->addr, neigh_addr, ETH_ALEN);
189         spin_lock_init(&neigh_node->lq_update_lock);
190
191         /* extra reference for return */
192         atomic_set(&neigh_node->refcount, 2);
193
194         batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
195                    "Creating new neighbor %pM on interface %s\n", neigh_addr,
196                    hard_iface->net_dev->name);
197
198 out:
199         return neigh_node;
200 }
201
202 static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
203 {
204         struct hlist_node *node_tmp;
205         struct batadv_neigh_node *neigh_node, *tmp_neigh_node;
206         struct batadv_orig_node *orig_node;
207
208         orig_node = container_of(rcu, struct batadv_orig_node, rcu);
209
210         spin_lock_bh(&orig_node->neigh_list_lock);
211
212         /* for all bonding members ... */
213         list_for_each_entry_safe(neigh_node, tmp_neigh_node,
214                                  &orig_node->bond_list, bonding_list) {
215                 list_del_rcu(&neigh_node->bonding_list);
216                 batadv_neigh_node_free_ref(neigh_node);
217         }
218
219         /* for all neighbors towards this originator ... */
220         hlist_for_each_entry_safe(neigh_node, node_tmp,
221                                   &orig_node->neigh_list, list) {
222                 hlist_del_rcu(&neigh_node->list);
223                 batadv_neigh_node_free_ref(neigh_node);
224         }
225
226         spin_unlock_bh(&orig_node->neigh_list_lock);
227
228         /* Free nc_nodes */
229         batadv_nc_purge_orig(orig_node->bat_priv, orig_node, NULL);
230
231         batadv_frag_purge_orig(orig_node, NULL);
232
233         batadv_tt_global_del_orig(orig_node->bat_priv, orig_node,
234                                   "originator timed out");
235
236         kfree(orig_node->tt_buff);
237         kfree(orig_node->bcast_own);
238         kfree(orig_node->bcast_own_sum);
239         kfree(orig_node);
240 }
241
242 /**
243  * batadv_orig_node_free_ref - decrement the orig node refcounter and possibly
244  * schedule an rcu callback for freeing it
245  * @orig_node: the orig node to free
246  */
247 void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node)
248 {
249         if (atomic_dec_and_test(&orig_node->refcount))
250                 call_rcu(&orig_node->rcu, batadv_orig_node_free_rcu);
251 }
252
253 /**
254  * batadv_orig_node_free_ref_now - decrement the orig node refcounter and
255  * possibly free it (without rcu callback)
256  * @orig_node: the orig node to free
257  */
258 void batadv_orig_node_free_ref_now(struct batadv_orig_node *orig_node)
259 {
260         if (atomic_dec_and_test(&orig_node->refcount))
261                 batadv_orig_node_free_rcu(&orig_node->rcu);
262 }
263
264 void batadv_originator_free(struct batadv_priv *bat_priv)
265 {
266         struct batadv_hashtable *hash = bat_priv->orig_hash;
267         struct hlist_node *node_tmp;
268         struct hlist_head *head;
269         spinlock_t *list_lock; /* spinlock to protect write access */
270         struct batadv_orig_node *orig_node;
271         uint32_t i;
272
273         if (!hash)
274                 return;
275
276         cancel_delayed_work_sync(&bat_priv->orig_work);
277
278         bat_priv->orig_hash = NULL;
279
280         for (i = 0; i < hash->size; i++) {
281                 head = &hash->table[i];
282                 list_lock = &hash->list_locks[i];
283
284                 spin_lock_bh(list_lock);
285                 hlist_for_each_entry_safe(orig_node, node_tmp,
286                                           head, hash_entry) {
287                         hlist_del_rcu(&orig_node->hash_entry);
288                         batadv_orig_node_free_ref(orig_node);
289                 }
290                 spin_unlock_bh(list_lock);
291         }
292
293         batadv_hash_destroy(hash);
294 }
295
296 /* this function finds or creates an originator entry for the given
297  * address if it does not exits
298  */
299 struct batadv_orig_node *batadv_get_orig_node(struct batadv_priv *bat_priv,
300                                               const uint8_t *addr)
301 {
302         struct batadv_orig_node *orig_node;
303         struct batadv_orig_node_vlan *vlan;
304         int size, i;
305         int hash_added;
306         unsigned long reset_time;
307
308         orig_node = batadv_orig_hash_find(bat_priv, addr);
309         if (orig_node)
310                 return orig_node;
311
312         batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
313                    "Creating new originator: %pM\n", addr);
314
315         orig_node = kzalloc(sizeof(*orig_node), GFP_ATOMIC);
316         if (!orig_node)
317                 return NULL;
318
319         INIT_HLIST_HEAD(&orig_node->neigh_list);
320         INIT_LIST_HEAD(&orig_node->bond_list);
321         INIT_LIST_HEAD(&orig_node->vlan_list);
322         spin_lock_init(&orig_node->ogm_cnt_lock);
323         spin_lock_init(&orig_node->bcast_seqno_lock);
324         spin_lock_init(&orig_node->neigh_list_lock);
325         spin_lock_init(&orig_node->tt_buff_lock);
326         spin_lock_init(&orig_node->tt_lock);
327         spin_lock_init(&orig_node->vlan_list_lock);
328
329         batadv_nc_init_orig(orig_node);
330
331         /* extra reference for return */
332         atomic_set(&orig_node->refcount, 2);
333
334         orig_node->tt_initialised = false;
335         orig_node->bat_priv = bat_priv;
336         memcpy(orig_node->orig, addr, ETH_ALEN);
337         batadv_dat_init_orig_node_addr(orig_node);
338         orig_node->router = NULL;
339         atomic_set(&orig_node->last_ttvn, 0);
340         orig_node->tt_buff = NULL;
341         orig_node->tt_buff_len = 0;
342         reset_time = jiffies - 1 - msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
343         orig_node->bcast_seqno_reset = reset_time;
344         orig_node->batman_seqno_reset = reset_time;
345
346         atomic_set(&orig_node->bond_candidates, 0);
347
348         /* create a vlan object for the "untagged" LAN */
349         vlan = batadv_orig_node_vlan_new(orig_node, BATADV_NO_FLAGS);
350         if (!vlan)
351                 goto free_orig_node;
352         /* batadv_orig_node_vlan_new() increases the refcounter.
353          * Immediately release vlan since it is not needed anymore in this
354          * context
355          */
356         batadv_orig_node_vlan_free_ref(vlan);
357
358         size = bat_priv->num_ifaces * sizeof(unsigned long) * BATADV_NUM_WORDS;
359
360         orig_node->bcast_own = kzalloc(size, GFP_ATOMIC);
361         if (!orig_node->bcast_own)
362                 goto free_vlan;
363
364         size = bat_priv->num_ifaces * sizeof(uint8_t);
365         orig_node->bcast_own_sum = kzalloc(size, GFP_ATOMIC);
366
367         for (i = 0; i < BATADV_FRAG_BUFFER_COUNT; i++) {
368                 INIT_HLIST_HEAD(&orig_node->fragments[i].head);
369                 spin_lock_init(&orig_node->fragments[i].lock);
370                 orig_node->fragments[i].size = 0;
371         }
372
373         if (!orig_node->bcast_own_sum)
374                 goto free_bcast_own;
375
376         hash_added = batadv_hash_add(bat_priv->orig_hash, batadv_compare_orig,
377                                      batadv_choose_orig, orig_node,
378                                      &orig_node->hash_entry);
379         if (hash_added != 0)
380                 goto free_bcast_own_sum;
381
382         return orig_node;
383 free_bcast_own_sum:
384         kfree(orig_node->bcast_own_sum);
385 free_bcast_own:
386         kfree(orig_node->bcast_own);
387 free_vlan:
388         batadv_orig_node_vlan_free_ref(vlan);
389 free_orig_node:
390         kfree(orig_node);
391         return NULL;
392 }
393
394 static bool
395 batadv_purge_orig_neighbors(struct batadv_priv *bat_priv,
396                             struct batadv_orig_node *orig_node,
397                             struct batadv_neigh_node **best_neigh_node)
398 {
399         struct hlist_node *node_tmp;
400         struct batadv_neigh_node *neigh_node;
401         bool neigh_purged = false;
402         unsigned long last_seen;
403         struct batadv_hard_iface *if_incoming;
404
405         *best_neigh_node = NULL;
406
407         spin_lock_bh(&orig_node->neigh_list_lock);
408
409         /* for all neighbors towards this originator ... */
410         hlist_for_each_entry_safe(neigh_node, node_tmp,
411                                   &orig_node->neigh_list, list) {
412                 last_seen = neigh_node->last_seen;
413                 if_incoming = neigh_node->if_incoming;
414
415                 if ((batadv_has_timed_out(last_seen, BATADV_PURGE_TIMEOUT)) ||
416                     (if_incoming->if_status == BATADV_IF_INACTIVE) ||
417                     (if_incoming->if_status == BATADV_IF_NOT_IN_USE) ||
418                     (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED)) {
419                         if ((if_incoming->if_status == BATADV_IF_INACTIVE) ||
420                             (if_incoming->if_status == BATADV_IF_NOT_IN_USE) ||
421                             (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED))
422                                 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
423                                            "neighbor purge: originator %pM, neighbor: %pM, iface: %s\n",
424                                            orig_node->orig, neigh_node->addr,
425                                            if_incoming->net_dev->name);
426                         else
427                                 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
428                                            "neighbor timeout: originator %pM, neighbor: %pM, last_seen: %u\n",
429                                            orig_node->orig, neigh_node->addr,
430                                            jiffies_to_msecs(last_seen));
431
432                         neigh_purged = true;
433
434                         hlist_del_rcu(&neigh_node->list);
435                         batadv_bonding_candidate_del(orig_node, neigh_node);
436                         batadv_neigh_node_free_ref(neigh_node);
437                 } else {
438                         if ((!*best_neigh_node) ||
439                             (neigh_node->tq_avg > (*best_neigh_node)->tq_avg))
440                                 *best_neigh_node = neigh_node;
441                 }
442         }
443
444         spin_unlock_bh(&orig_node->neigh_list_lock);
445         return neigh_purged;
446 }
447
448 static bool batadv_purge_orig_node(struct batadv_priv *bat_priv,
449                                    struct batadv_orig_node *orig_node)
450 {
451         struct batadv_neigh_node *best_neigh_node;
452
453         if (batadv_has_timed_out(orig_node->last_seen,
454                                  2 * BATADV_PURGE_TIMEOUT)) {
455                 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
456                            "Originator timeout: originator %pM, last_seen %u\n",
457                            orig_node->orig,
458                            jiffies_to_msecs(orig_node->last_seen));
459                 return true;
460         } else {
461                 if (batadv_purge_orig_neighbors(bat_priv, orig_node,
462                                                 &best_neigh_node))
463                         batadv_update_route(bat_priv, orig_node,
464                                             best_neigh_node);
465         }
466
467         return false;
468 }
469
470 static void _batadv_purge_orig(struct batadv_priv *bat_priv)
471 {
472         struct batadv_hashtable *hash = bat_priv->orig_hash;
473         struct hlist_node *node_tmp;
474         struct hlist_head *head;
475         spinlock_t *list_lock; /* spinlock to protect write access */
476         struct batadv_orig_node *orig_node;
477         uint32_t i;
478
479         if (!hash)
480                 return;
481
482         /* for all origins... */
483         for (i = 0; i < hash->size; i++) {
484                 head = &hash->table[i];
485                 list_lock = &hash->list_locks[i];
486
487                 spin_lock_bh(list_lock);
488                 hlist_for_each_entry_safe(orig_node, node_tmp,
489                                           head, hash_entry) {
490                         if (batadv_purge_orig_node(bat_priv, orig_node)) {
491                                 batadv_gw_node_delete(bat_priv, orig_node);
492                                 hlist_del_rcu(&orig_node->hash_entry);
493                                 batadv_orig_node_free_ref(orig_node);
494                                 continue;
495                         }
496
497                         batadv_frag_purge_orig(orig_node,
498                                                batadv_frag_check_entry);
499                 }
500                 spin_unlock_bh(list_lock);
501         }
502
503         batadv_gw_node_purge(bat_priv);
504         batadv_gw_election(bat_priv);
505 }
506
507 static void batadv_purge_orig(struct work_struct *work)
508 {
509         struct delayed_work *delayed_work;
510         struct batadv_priv *bat_priv;
511
512         delayed_work = container_of(work, struct delayed_work, work);
513         bat_priv = container_of(delayed_work, struct batadv_priv, orig_work);
514         _batadv_purge_orig(bat_priv);
515         queue_delayed_work(batadv_event_workqueue,
516                            &bat_priv->orig_work,
517                            msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD));
518 }
519
520 void batadv_purge_orig_ref(struct batadv_priv *bat_priv)
521 {
522         _batadv_purge_orig(bat_priv);
523 }
524
525 int batadv_orig_seq_print_text(struct seq_file *seq, void *offset)
526 {
527         struct net_device *net_dev = (struct net_device *)seq->private;
528         struct batadv_priv *bat_priv = netdev_priv(net_dev);
529         struct batadv_hashtable *hash = bat_priv->orig_hash;
530         struct hlist_head *head;
531         struct batadv_hard_iface *primary_if;
532         struct batadv_orig_node *orig_node;
533         struct batadv_neigh_node *neigh_node, *neigh_node_tmp;
534         int batman_count = 0;
535         int last_seen_secs;
536         int last_seen_msecs;
537         unsigned long last_seen_jiffies;
538         uint32_t i;
539
540         primary_if = batadv_seq_print_text_primary_if_get(seq);
541         if (!primary_if)
542                 goto out;
543
544         seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s)]\n",
545                    BATADV_SOURCE_VERSION, primary_if->net_dev->name,
546                    primary_if->net_dev->dev_addr, net_dev->name);
547         seq_printf(seq, "  %-15s %s (%s/%i) %17s [%10s]: %20s ...\n",
548                    "Originator", "last-seen", "#", BATADV_TQ_MAX_VALUE,
549                    "Nexthop", "outgoingIF", "Potential nexthops");
550
551         for (i = 0; i < hash->size; i++) {
552                 head = &hash->table[i];
553
554                 rcu_read_lock();
555                 hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
556                         neigh_node = batadv_orig_node_get_router(orig_node);
557                         if (!neigh_node)
558                                 continue;
559
560                         if (neigh_node->tq_avg == 0)
561                                 goto next;
562
563                         last_seen_jiffies = jiffies - orig_node->last_seen;
564                         last_seen_msecs = jiffies_to_msecs(last_seen_jiffies);
565                         last_seen_secs = last_seen_msecs / 1000;
566                         last_seen_msecs = last_seen_msecs % 1000;
567
568                         seq_printf(seq, "%pM %4i.%03is   (%3i) %pM [%10s]:",
569                                    orig_node->orig, last_seen_secs,
570                                    last_seen_msecs, neigh_node->tq_avg,
571                                    neigh_node->addr,
572                                    neigh_node->if_incoming->net_dev->name);
573
574                         hlist_for_each_entry_rcu(neigh_node_tmp,
575                                                  &orig_node->neigh_list, list) {
576                                 seq_printf(seq, " %pM (%3i)",
577                                            neigh_node_tmp->addr,
578                                            neigh_node_tmp->tq_avg);
579                         }
580
581                         seq_puts(seq, "\n");
582                         batman_count++;
583
584 next:
585                         batadv_neigh_node_free_ref(neigh_node);
586                 }
587                 rcu_read_unlock();
588         }
589
590         if (batman_count == 0)
591                 seq_puts(seq, "No batman nodes in range ...\n");
592
593 out:
594         if (primary_if)
595                 batadv_hardif_free_ref(primary_if);
596         return 0;
597 }
598
599 static int batadv_orig_node_add_if(struct batadv_orig_node *orig_node,
600                                    int max_if_num)
601 {
602         void *data_ptr;
603         size_t data_size, old_size;
604
605         data_size = max_if_num * sizeof(unsigned long) * BATADV_NUM_WORDS;
606         old_size = (max_if_num - 1) * sizeof(unsigned long) * BATADV_NUM_WORDS;
607         data_ptr = kmalloc(data_size, GFP_ATOMIC);
608         if (!data_ptr)
609                 return -ENOMEM;
610
611         memcpy(data_ptr, orig_node->bcast_own, old_size);
612         kfree(orig_node->bcast_own);
613         orig_node->bcast_own = data_ptr;
614
615         data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC);
616         if (!data_ptr)
617                 return -ENOMEM;
618
619         memcpy(data_ptr, orig_node->bcast_own_sum,
620                (max_if_num - 1) * sizeof(uint8_t));
621         kfree(orig_node->bcast_own_sum);
622         orig_node->bcast_own_sum = data_ptr;
623
624         return 0;
625 }
626
627 int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
628                             int max_if_num)
629 {
630         struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
631         struct batadv_hashtable *hash = bat_priv->orig_hash;
632         struct hlist_head *head;
633         struct batadv_orig_node *orig_node;
634         uint32_t i;
635         int ret;
636
637         /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
638          * if_num
639          */
640         for (i = 0; i < hash->size; i++) {
641                 head = &hash->table[i];
642
643                 rcu_read_lock();
644                 hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
645                         spin_lock_bh(&orig_node->ogm_cnt_lock);
646                         ret = batadv_orig_node_add_if(orig_node, max_if_num);
647                         spin_unlock_bh(&orig_node->ogm_cnt_lock);
648
649                         if (ret == -ENOMEM)
650                                 goto err;
651                 }
652                 rcu_read_unlock();
653         }
654
655         return 0;
656
657 err:
658         rcu_read_unlock();
659         return -ENOMEM;
660 }
661
662 static int batadv_orig_node_del_if(struct batadv_orig_node *orig_node,
663                                    int max_if_num, int del_if_num)
664 {
665         void *data_ptr = NULL;
666         int chunk_size;
667
668         /* last interface was removed */
669         if (max_if_num == 0)
670                 goto free_bcast_own;
671
672         chunk_size = sizeof(unsigned long) * BATADV_NUM_WORDS;
673         data_ptr = kmalloc(max_if_num * chunk_size, GFP_ATOMIC);
674         if (!data_ptr)
675                 return -ENOMEM;
676
677         /* copy first part */
678         memcpy(data_ptr, orig_node->bcast_own, del_if_num * chunk_size);
679
680         /* copy second part */
681         memcpy((char *)data_ptr + del_if_num * chunk_size,
682                orig_node->bcast_own + ((del_if_num + 1) * chunk_size),
683                (max_if_num - del_if_num) * chunk_size);
684
685 free_bcast_own:
686         kfree(orig_node->bcast_own);
687         orig_node->bcast_own = data_ptr;
688
689         if (max_if_num == 0)
690                 goto free_own_sum;
691
692         data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC);
693         if (!data_ptr)
694                 return -ENOMEM;
695
696         memcpy(data_ptr, orig_node->bcast_own_sum,
697                del_if_num * sizeof(uint8_t));
698
699         memcpy((char *)data_ptr + del_if_num * sizeof(uint8_t),
700                orig_node->bcast_own_sum + ((del_if_num + 1) * sizeof(uint8_t)),
701                (max_if_num - del_if_num) * sizeof(uint8_t));
702
703 free_own_sum:
704         kfree(orig_node->bcast_own_sum);
705         orig_node->bcast_own_sum = data_ptr;
706
707         return 0;
708 }
709
710 int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
711                             int max_if_num)
712 {
713         struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
714         struct batadv_hashtable *hash = bat_priv->orig_hash;
715         struct hlist_head *head;
716         struct batadv_hard_iface *hard_iface_tmp;
717         struct batadv_orig_node *orig_node;
718         uint32_t i;
719         int ret;
720
721         /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
722          * if_num
723          */
724         for (i = 0; i < hash->size; i++) {
725                 head = &hash->table[i];
726
727                 rcu_read_lock();
728                 hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
729                         spin_lock_bh(&orig_node->ogm_cnt_lock);
730                         ret = batadv_orig_node_del_if(orig_node, max_if_num,
731                                                       hard_iface->if_num);
732                         spin_unlock_bh(&orig_node->ogm_cnt_lock);
733
734                         if (ret == -ENOMEM)
735                                 goto err;
736                 }
737                 rcu_read_unlock();
738         }
739
740         /* renumber remaining batman interfaces _inside_ of orig_hash_lock */
741         rcu_read_lock();
742         list_for_each_entry_rcu(hard_iface_tmp, &batadv_hardif_list, list) {
743                 if (hard_iface_tmp->if_status == BATADV_IF_NOT_IN_USE)
744                         continue;
745
746                 if (hard_iface == hard_iface_tmp)
747                         continue;
748
749                 if (hard_iface->soft_iface != hard_iface_tmp->soft_iface)
750                         continue;
751
752                 if (hard_iface_tmp->if_num > hard_iface->if_num)
753                         hard_iface_tmp->if_num--;
754         }
755         rcu_read_unlock();
756
757         hard_iface->if_num = -1;
758         return 0;
759
760 err:
761         rcu_read_unlock();
762         return -ENOMEM;
763 }