]> git.karo-electronics.de Git - karo-tx-linux.git/blob - net/mac80211/mesh_pathtbl.c
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wirel...
[karo-tx-linux.git] / net / mac80211 / mesh_pathtbl.c
1 /*
2  * Copyright (c) 2008, 2009 open80211s Ltd.
3  * Author:     Luis Carlos Cobo <luisca@cozybit.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation.
8  */
9
10 #include <linux/etherdevice.h>
11 #include <linux/list.h>
12 #include <linux/random.h>
13 #include <linux/slab.h>
14 #include <linux/spinlock.h>
15 #include <linux/string.h>
16 #include <net/mac80211.h>
17 #include "wme.h"
18 #include "ieee80211_i.h"
19 #include "mesh.h"
20
21 /* There will be initially 2^INIT_PATHS_SIZE_ORDER buckets */
22 #define INIT_PATHS_SIZE_ORDER   2
23
24 /* Keep the mean chain length below this constant */
25 #define MEAN_CHAIN_LEN          2
26
27 #define MPATH_EXPIRED(mpath) ((mpath->flags & MESH_PATH_ACTIVE) && \
28                                 time_after(jiffies, mpath->exp_time) && \
29                                 !(mpath->flags & MESH_PATH_FIXED))
30
31 struct mpath_node {
32         struct hlist_node list;
33         struct rcu_head rcu;
34         /* This indirection allows two different tables to point to the same
35          * mesh_path structure, useful when resizing
36          */
37         struct mesh_path *mpath;
38 };
39
40 static struct mesh_table __rcu *mesh_paths;
41 static struct mesh_table __rcu *mpp_paths; /* Store paths for MPP&MAP */
42
43 int mesh_paths_generation;
44
45 /* This lock will have the grow table function as writer and add / delete nodes
46  * as readers. RCU provides sufficient protection only when reading the table
47  * (i.e. doing lookups).  Adding or adding or removing nodes requires we take
48  * the read lock or we risk operating on an old table.  The write lock is only
49  * needed when modifying the number of buckets a table.
50  */
51 static DEFINE_RWLOCK(pathtbl_resize_lock);
52
53
54 static inline struct mesh_table *resize_dereference_mesh_paths(void)
55 {
56         return rcu_dereference_protected(mesh_paths,
57                 lockdep_is_held(&pathtbl_resize_lock));
58 }
59
60 static inline struct mesh_table *resize_dereference_mpp_paths(void)
61 {
62         return rcu_dereference_protected(mpp_paths,
63                 lockdep_is_held(&pathtbl_resize_lock));
64 }
65
66 /*
67  * CAREFUL -- "tbl" must not be an expression,
68  * in particular not an rcu_dereference(), since
69  * it's used twice. So it is illegal to do
70  *      for_each_mesh_entry(rcu_dereference(...), ...)
71  */
72 #define for_each_mesh_entry(tbl, p, node, i) \
73         for (i = 0; i <= tbl->hash_mask; i++) \
74                 hlist_for_each_entry_rcu(node, p, &tbl->hash_buckets[i], list)
75
76
77 static struct mesh_table *mesh_table_alloc(int size_order)
78 {
79         int i;
80         struct mesh_table *newtbl;
81
82         newtbl = kmalloc(sizeof(struct mesh_table), GFP_ATOMIC);
83         if (!newtbl)
84                 return NULL;
85
86         newtbl->hash_buckets = kzalloc(sizeof(struct hlist_head) *
87                         (1 << size_order), GFP_ATOMIC);
88
89         if (!newtbl->hash_buckets) {
90                 kfree(newtbl);
91                 return NULL;
92         }
93
94         newtbl->hashwlock = kmalloc(sizeof(spinlock_t) *
95                         (1 << size_order), GFP_ATOMIC);
96         if (!newtbl->hashwlock) {
97                 kfree(newtbl->hash_buckets);
98                 kfree(newtbl);
99                 return NULL;
100         }
101
102         newtbl->size_order = size_order;
103         newtbl->hash_mask = (1 << size_order) - 1;
104         atomic_set(&newtbl->entries,  0);
105         get_random_bytes(&newtbl->hash_rnd,
106                         sizeof(newtbl->hash_rnd));
107         for (i = 0; i <= newtbl->hash_mask; i++)
108                 spin_lock_init(&newtbl->hashwlock[i]);
109         spin_lock_init(&newtbl->gates_lock);
110
111         return newtbl;
112 }
113
114 static void __mesh_table_free(struct mesh_table *tbl)
115 {
116         kfree(tbl->hash_buckets);
117         kfree(tbl->hashwlock);
118         kfree(tbl);
119 }
120
121 static void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
122 {
123         struct hlist_head *mesh_hash;
124         struct hlist_node *p, *q;
125         struct mpath_node *gate;
126         int i;
127
128         mesh_hash = tbl->hash_buckets;
129         for (i = 0; i <= tbl->hash_mask; i++) {
130                 spin_lock_bh(&tbl->hashwlock[i]);
131                 hlist_for_each_safe(p, q, &mesh_hash[i]) {
132                         tbl->free_node(p, free_leafs);
133                         atomic_dec(&tbl->entries);
134                 }
135                 spin_unlock_bh(&tbl->hashwlock[i]);
136         }
137         if (free_leafs) {
138                 spin_lock_bh(&tbl->gates_lock);
139                 hlist_for_each_entry_safe(gate, p, q,
140                                          tbl->known_gates, list) {
141                         hlist_del(&gate->list);
142                         kfree(gate);
143                 }
144                 kfree(tbl->known_gates);
145                 spin_unlock_bh(&tbl->gates_lock);
146         }
147
148         __mesh_table_free(tbl);
149 }
150
151 static int mesh_table_grow(struct mesh_table *oldtbl,
152                            struct mesh_table *newtbl)
153 {
154         struct hlist_head *oldhash;
155         struct hlist_node *p, *q;
156         int i;
157
158         if (atomic_read(&oldtbl->entries)
159                         < oldtbl->mean_chain_len * (oldtbl->hash_mask + 1))
160                 return -EAGAIN;
161
162         newtbl->free_node = oldtbl->free_node;
163         newtbl->mean_chain_len = oldtbl->mean_chain_len;
164         newtbl->copy_node = oldtbl->copy_node;
165         newtbl->known_gates = oldtbl->known_gates;
166         atomic_set(&newtbl->entries, atomic_read(&oldtbl->entries));
167
168         oldhash = oldtbl->hash_buckets;
169         for (i = 0; i <= oldtbl->hash_mask; i++)
170                 hlist_for_each(p, &oldhash[i])
171                         if (oldtbl->copy_node(p, newtbl) < 0)
172                                 goto errcopy;
173
174         return 0;
175
176 errcopy:
177         for (i = 0; i <= newtbl->hash_mask; i++) {
178                 hlist_for_each_safe(p, q, &newtbl->hash_buckets[i])
179                         oldtbl->free_node(p, 0);
180         }
181         return -ENOMEM;
182 }
183
184 static u32 mesh_table_hash(const u8 *addr, struct ieee80211_sub_if_data *sdata,
185                            struct mesh_table *tbl)
186 {
187         /* Use last four bytes of hw addr and interface index as hash index */
188         return jhash_2words(*(u32 *)(addr+2), sdata->dev->ifindex, tbl->hash_rnd)
189                 & tbl->hash_mask;
190 }
191
192
193 /**
194  *
195  * mesh_path_assign_nexthop - update mesh path next hop
196  *
197  * @mpath: mesh path to update
198  * @sta: next hop to assign
199  *
200  * Locking: mpath->state_lock must be held when calling this function
201  */
202 void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
203 {
204         struct sk_buff *skb;
205         struct ieee80211_hdr *hdr;
206         unsigned long flags;
207
208         rcu_assign_pointer(mpath->next_hop, sta);
209
210         spin_lock_irqsave(&mpath->frame_queue.lock, flags);
211         skb_queue_walk(&mpath->frame_queue, skb) {
212                 hdr = (struct ieee80211_hdr *) skb->data;
213                 memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN);
214                 memcpy(hdr->addr2, mpath->sdata->vif.addr, ETH_ALEN);
215                 ieee80211_mps_set_frame_flags(sta->sdata, sta, hdr);
216         }
217
218         spin_unlock_irqrestore(&mpath->frame_queue.lock, flags);
219 }
220
221 static void prepare_for_gate(struct sk_buff *skb, char *dst_addr,
222                              struct mesh_path *gate_mpath)
223 {
224         struct ieee80211_hdr *hdr;
225         struct ieee80211s_hdr *mshdr;
226         int mesh_hdrlen, hdrlen;
227         char *next_hop;
228
229         hdr = (struct ieee80211_hdr *) skb->data;
230         hdrlen = ieee80211_hdrlen(hdr->frame_control);
231         mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
232
233         if (!(mshdr->flags & MESH_FLAGS_AE)) {
234                 /* size of the fixed part of the mesh header */
235                 mesh_hdrlen = 6;
236
237                 /* make room for the two extended addresses */
238                 skb_push(skb, 2 * ETH_ALEN);
239                 memmove(skb->data, hdr, hdrlen + mesh_hdrlen);
240
241                 hdr = (struct ieee80211_hdr *) skb->data;
242
243                 /* we preserve the previous mesh header and only add
244                  * the new addreses */
245                 mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
246                 mshdr->flags = MESH_FLAGS_AE_A5_A6;
247                 memcpy(mshdr->eaddr1, hdr->addr3, ETH_ALEN);
248                 memcpy(mshdr->eaddr2, hdr->addr4, ETH_ALEN);
249         }
250
251         /* update next hop */
252         hdr = (struct ieee80211_hdr *) skb->data;
253         rcu_read_lock();
254         next_hop = rcu_dereference(gate_mpath->next_hop)->sta.addr;
255         memcpy(hdr->addr1, next_hop, ETH_ALEN);
256         rcu_read_unlock();
257         memcpy(hdr->addr2, gate_mpath->sdata->vif.addr, ETH_ALEN);
258         memcpy(hdr->addr3, dst_addr, ETH_ALEN);
259 }
260
261 /**
262  *
263  * mesh_path_move_to_queue - Move or copy frames from one mpath queue to another
264  *
265  * This function is used to transfer or copy frames from an unresolved mpath to
266  * a gate mpath.  The function also adds the Address Extension field and
267  * updates the next hop.
268  *
269  * If a frame already has an Address Extension field, only the next hop and
270  * destination addresses are updated.
271  *
272  * The gate mpath must be an active mpath with a valid mpath->next_hop.
273  *
274  * @mpath: An active mpath the frames will be sent to (i.e. the gate)
275  * @from_mpath: The failed mpath
276  * @copy: When true, copy all the frames to the new mpath queue.  When false,
277  * move them.
278  */
279 static void mesh_path_move_to_queue(struct mesh_path *gate_mpath,
280                                     struct mesh_path *from_mpath,
281                                     bool copy)
282 {
283         struct sk_buff *skb, *fskb, *tmp;
284         struct sk_buff_head failq;
285         unsigned long flags;
286
287         BUG_ON(gate_mpath == from_mpath);
288         BUG_ON(!gate_mpath->next_hop);
289
290         __skb_queue_head_init(&failq);
291
292         spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
293         skb_queue_splice_init(&from_mpath->frame_queue, &failq);
294         spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
295
296         skb_queue_walk_safe(&failq, fskb, tmp) {
297                 if (skb_queue_len(&gate_mpath->frame_queue) >=
298                                   MESH_FRAME_QUEUE_LEN) {
299                         mpath_dbg(gate_mpath->sdata, "mpath queue full!\n");
300                         break;
301                 }
302
303                 skb = skb_copy(fskb, GFP_ATOMIC);
304                 if (WARN_ON(!skb))
305                         break;
306
307                 prepare_for_gate(skb, gate_mpath->dst, gate_mpath);
308                 skb_queue_tail(&gate_mpath->frame_queue, skb);
309
310                 if (copy)
311                         continue;
312
313                 __skb_unlink(fskb, &failq);
314                 kfree_skb(fskb);
315         }
316
317         mpath_dbg(gate_mpath->sdata, "Mpath queue for gate %pM has %d frames\n",
318                   gate_mpath->dst, skb_queue_len(&gate_mpath->frame_queue));
319
320         if (!copy)
321                 return;
322
323         spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
324         skb_queue_splice(&failq, &from_mpath->frame_queue);
325         spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
326 }
327
328
329 static struct mesh_path *mpath_lookup(struct mesh_table *tbl, const u8 *dst,
330                                       struct ieee80211_sub_if_data *sdata)
331 {
332         struct mesh_path *mpath;
333         struct hlist_node *n;
334         struct hlist_head *bucket;
335         struct mpath_node *node;
336
337         bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)];
338         hlist_for_each_entry_rcu(node, n, bucket, list) {
339                 mpath = node->mpath;
340                 if (mpath->sdata == sdata &&
341                     ether_addr_equal(dst, mpath->dst)) {
342                         if (MPATH_EXPIRED(mpath)) {
343                                 spin_lock_bh(&mpath->state_lock);
344                                 mpath->flags &= ~MESH_PATH_ACTIVE;
345                                 spin_unlock_bh(&mpath->state_lock);
346                         }
347                         return mpath;
348                 }
349         }
350         return NULL;
351 }
352
353 /**
354  * mesh_path_lookup - look up a path in the mesh path table
355  * @dst: hardware address (ETH_ALEN length) of destination
356  * @sdata: local subif
357  *
358  * Returns: pointer to the mesh path structure, or NULL if not found
359  *
360  * Locking: must be called within a read rcu section.
361  */
362 struct mesh_path *mesh_path_lookup(const u8 *dst,
363                                    struct ieee80211_sub_if_data *sdata)
364 {
365         return mpath_lookup(rcu_dereference(mesh_paths), dst, sdata);
366 }
367
368 struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
369 {
370         return mpath_lookup(rcu_dereference(mpp_paths), dst, sdata);
371 }
372
373
374 /**
375  * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index
376  * @idx: index
377  * @sdata: local subif, or NULL for all entries
378  *
379  * Returns: pointer to the mesh path structure, or NULL if not found.
380  *
381  * Locking: must be called within a read rcu section.
382  */
383 struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data *sdata)
384 {
385         struct mesh_table *tbl = rcu_dereference(mesh_paths);
386         struct mpath_node *node;
387         struct hlist_node *p;
388         int i;
389         int j = 0;
390
391         for_each_mesh_entry(tbl, p, node, i) {
392                 if (sdata && node->mpath->sdata != sdata)
393                         continue;
394                 if (j++ == idx) {
395                         if (MPATH_EXPIRED(node->mpath)) {
396                                 spin_lock_bh(&node->mpath->state_lock);
397                                 node->mpath->flags &= ~MESH_PATH_ACTIVE;
398                                 spin_unlock_bh(&node->mpath->state_lock);
399                         }
400                         return node->mpath;
401                 }
402         }
403
404         return NULL;
405 }
406
407 /**
408  * mesh_path_add_gate - add the given mpath to a mesh gate to our path table
409  * @mpath: gate path to add to table
410  */
411 int mesh_path_add_gate(struct mesh_path *mpath)
412 {
413         struct mesh_table *tbl;
414         struct mpath_node *gate, *new_gate;
415         struct hlist_node *n;
416         int err;
417
418         rcu_read_lock();
419         tbl = rcu_dereference(mesh_paths);
420
421         hlist_for_each_entry_rcu(gate, n, tbl->known_gates, list)
422                 if (gate->mpath == mpath) {
423                         err = -EEXIST;
424                         goto err_rcu;
425                 }
426
427         new_gate = kzalloc(sizeof(struct mpath_node), GFP_ATOMIC);
428         if (!new_gate) {
429                 err = -ENOMEM;
430                 goto err_rcu;
431         }
432
433         mpath->is_gate = true;
434         mpath->sdata->u.mesh.num_gates++;
435         new_gate->mpath = mpath;
436         spin_lock_bh(&tbl->gates_lock);
437         hlist_add_head_rcu(&new_gate->list, tbl->known_gates);
438         spin_unlock_bh(&tbl->gates_lock);
439         rcu_read_unlock();
440         mpath_dbg(mpath->sdata,
441                   "Mesh path: Recorded new gate: %pM. %d known gates\n",
442                   mpath->dst, mpath->sdata->u.mesh.num_gates);
443         return 0;
444 err_rcu:
445         rcu_read_unlock();
446         return err;
447 }
448
449 /**
450  * mesh_gate_del - remove a mesh gate from the list of known gates
451  * @tbl: table which holds our list of known gates
452  * @mpath: gate mpath
453  *
454  * Returns: 0 on success
455  *
456  * Locking: must be called inside rcu_read_lock() section
457  */
458 static int mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath)
459 {
460         struct mpath_node *gate;
461         struct hlist_node *p, *q;
462
463         hlist_for_each_entry_safe(gate, p, q, tbl->known_gates, list)
464                 if (gate->mpath == mpath) {
465                         spin_lock_bh(&tbl->gates_lock);
466                         hlist_del_rcu(&gate->list);
467                         kfree_rcu(gate, rcu);
468                         spin_unlock_bh(&tbl->gates_lock);
469                         mpath->sdata->u.mesh.num_gates--;
470                         mpath->is_gate = false;
471                         mpath_dbg(mpath->sdata,
472                                   "Mesh path: Deleted gate: %pM. %d known gates\n",
473                                   mpath->dst, mpath->sdata->u.mesh.num_gates);
474                         break;
475                 }
476
477         return 0;
478 }
479
480 /**
481  * mesh_gate_num - number of gates known to this interface
482  * @sdata: subif data
483  */
484 int mesh_gate_num(struct ieee80211_sub_if_data *sdata)
485 {
486         return sdata->u.mesh.num_gates;
487 }
488
489 /**
490  * mesh_path_add - allocate and add a new path to the mesh path table
491  * @addr: destination address of the path (ETH_ALEN length)
492  * @sdata: local subif
493  *
494  * Returns: 0 on success
495  *
496  * State: the initial state of the new path is set to 0
497  */
498 int mesh_path_add(const u8 *dst, struct ieee80211_sub_if_data *sdata)
499 {
500         struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
501         struct ieee80211_local *local = sdata->local;
502         struct mesh_table *tbl;
503         struct mesh_path *mpath, *new_mpath;
504         struct mpath_node *node, *new_node;
505         struct hlist_head *bucket;
506         struct hlist_node *n;
507         int grow = 0;
508         int err = 0;
509         u32 hash_idx;
510
511         if (ether_addr_equal(dst, sdata->vif.addr))
512                 /* never add ourselves as neighbours */
513                 return -ENOTSUPP;
514
515         if (is_multicast_ether_addr(dst))
516                 return -ENOTSUPP;
517
518         if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0)
519                 return -ENOSPC;
520
521         err = -ENOMEM;
522         new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
523         if (!new_mpath)
524                 goto err_path_alloc;
525
526         new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
527         if (!new_node)
528                 goto err_node_alloc;
529
530         read_lock_bh(&pathtbl_resize_lock);
531         memcpy(new_mpath->dst, dst, ETH_ALEN);
532         eth_broadcast_addr(new_mpath->rann_snd_addr);
533         new_mpath->is_root = false;
534         new_mpath->sdata = sdata;
535         new_mpath->flags = 0;
536         skb_queue_head_init(&new_mpath->frame_queue);
537         new_node->mpath = new_mpath;
538         new_mpath->timer.data = (unsigned long) new_mpath;
539         new_mpath->timer.function = mesh_path_timer;
540         new_mpath->exp_time = jiffies;
541         spin_lock_init(&new_mpath->state_lock);
542         init_timer(&new_mpath->timer);
543
544         tbl = resize_dereference_mesh_paths();
545
546         hash_idx = mesh_table_hash(dst, sdata, tbl);
547         bucket = &tbl->hash_buckets[hash_idx];
548
549         spin_lock(&tbl->hashwlock[hash_idx]);
550
551         err = -EEXIST;
552         hlist_for_each_entry(node, n, bucket, list) {
553                 mpath = node->mpath;
554                 if (mpath->sdata == sdata &&
555                     ether_addr_equal(dst, mpath->dst))
556                         goto err_exists;
557         }
558
559         hlist_add_head_rcu(&new_node->list, bucket);
560         if (atomic_inc_return(&tbl->entries) >=
561             tbl->mean_chain_len * (tbl->hash_mask + 1))
562                 grow = 1;
563
564         mesh_paths_generation++;
565
566         spin_unlock(&tbl->hashwlock[hash_idx]);
567         read_unlock_bh(&pathtbl_resize_lock);
568         if (grow) {
569                 set_bit(MESH_WORK_GROW_MPATH_TABLE,  &ifmsh->wrkq_flags);
570                 ieee80211_queue_work(&local->hw, &sdata->work);
571         }
572         return 0;
573
574 err_exists:
575         spin_unlock(&tbl->hashwlock[hash_idx]);
576         read_unlock_bh(&pathtbl_resize_lock);
577         kfree(new_node);
578 err_node_alloc:
579         kfree(new_mpath);
580 err_path_alloc:
581         atomic_dec(&sdata->u.mesh.mpaths);
582         return err;
583 }
584
585 static void mesh_table_free_rcu(struct rcu_head *rcu)
586 {
587         struct mesh_table *tbl = container_of(rcu, struct mesh_table, rcu_head);
588
589         mesh_table_free(tbl, false);
590 }
591
592 void mesh_mpath_table_grow(void)
593 {
594         struct mesh_table *oldtbl, *newtbl;
595
596         write_lock_bh(&pathtbl_resize_lock);
597         oldtbl = resize_dereference_mesh_paths();
598         newtbl = mesh_table_alloc(oldtbl->size_order + 1);
599         if (!newtbl)
600                 goto out;
601         if (mesh_table_grow(oldtbl, newtbl) < 0) {
602                 __mesh_table_free(newtbl);
603                 goto out;
604         }
605         rcu_assign_pointer(mesh_paths, newtbl);
606
607         call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);
608
609  out:
610         write_unlock_bh(&pathtbl_resize_lock);
611 }
612
613 void mesh_mpp_table_grow(void)
614 {
615         struct mesh_table *oldtbl, *newtbl;
616
617         write_lock_bh(&pathtbl_resize_lock);
618         oldtbl = resize_dereference_mpp_paths();
619         newtbl = mesh_table_alloc(oldtbl->size_order + 1);
620         if (!newtbl)
621                 goto out;
622         if (mesh_table_grow(oldtbl, newtbl) < 0) {
623                 __mesh_table_free(newtbl);
624                 goto out;
625         }
626         rcu_assign_pointer(mpp_paths, newtbl);
627         call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);
628
629  out:
630         write_unlock_bh(&pathtbl_resize_lock);
631 }
632
633 int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
634 {
635         struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
636         struct ieee80211_local *local = sdata->local;
637         struct mesh_table *tbl;
638         struct mesh_path *mpath, *new_mpath;
639         struct mpath_node *node, *new_node;
640         struct hlist_head *bucket;
641         struct hlist_node *n;
642         int grow = 0;
643         int err = 0;
644         u32 hash_idx;
645
646         if (ether_addr_equal(dst, sdata->vif.addr))
647                 /* never add ourselves as neighbours */
648                 return -ENOTSUPP;
649
650         if (is_multicast_ether_addr(dst))
651                 return -ENOTSUPP;
652
653         err = -ENOMEM;
654         new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
655         if (!new_mpath)
656                 goto err_path_alloc;
657
658         new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
659         if (!new_node)
660                 goto err_node_alloc;
661
662         read_lock_bh(&pathtbl_resize_lock);
663         memcpy(new_mpath->dst, dst, ETH_ALEN);
664         memcpy(new_mpath->mpp, mpp, ETH_ALEN);
665         new_mpath->sdata = sdata;
666         new_mpath->flags = 0;
667         skb_queue_head_init(&new_mpath->frame_queue);
668         new_node->mpath = new_mpath;
669         init_timer(&new_mpath->timer);
670         new_mpath->exp_time = jiffies;
671         spin_lock_init(&new_mpath->state_lock);
672
673         tbl = resize_dereference_mpp_paths();
674
675         hash_idx = mesh_table_hash(dst, sdata, tbl);
676         bucket = &tbl->hash_buckets[hash_idx];
677
678         spin_lock(&tbl->hashwlock[hash_idx]);
679
680         err = -EEXIST;
681         hlist_for_each_entry(node, n, bucket, list) {
682                 mpath = node->mpath;
683                 if (mpath->sdata == sdata &&
684                     ether_addr_equal(dst, mpath->dst))
685                         goto err_exists;
686         }
687
688         hlist_add_head_rcu(&new_node->list, bucket);
689         if (atomic_inc_return(&tbl->entries) >=
690             tbl->mean_chain_len * (tbl->hash_mask + 1))
691                 grow = 1;
692
693         spin_unlock(&tbl->hashwlock[hash_idx]);
694         read_unlock_bh(&pathtbl_resize_lock);
695         if (grow) {
696                 set_bit(MESH_WORK_GROW_MPP_TABLE,  &ifmsh->wrkq_flags);
697                 ieee80211_queue_work(&local->hw, &sdata->work);
698         }
699         return 0;
700
701 err_exists:
702         spin_unlock(&tbl->hashwlock[hash_idx]);
703         read_unlock_bh(&pathtbl_resize_lock);
704         kfree(new_node);
705 err_node_alloc:
706         kfree(new_mpath);
707 err_path_alloc:
708         return err;
709 }
710
711
712 /**
713  * mesh_plink_broken - deactivates paths and sends perr when a link breaks
714  *
715  * @sta: broken peer link
716  *
717  * This function must be called from the rate control algorithm if enough
718  * delivery errors suggest that a peer link is no longer usable.
719  */
720 void mesh_plink_broken(struct sta_info *sta)
721 {
722         struct mesh_table *tbl;
723         static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
724         struct mesh_path *mpath;
725         struct mpath_node *node;
726         struct hlist_node *p;
727         struct ieee80211_sub_if_data *sdata = sta->sdata;
728         int i;
729         __le16 reason = cpu_to_le16(WLAN_REASON_MESH_PATH_DEST_UNREACHABLE);
730
731         rcu_read_lock();
732         tbl = rcu_dereference(mesh_paths);
733         for_each_mesh_entry(tbl, p, node, i) {
734                 mpath = node->mpath;
735                 if (rcu_dereference(mpath->next_hop) == sta &&
736                     mpath->flags & MESH_PATH_ACTIVE &&
737                     !(mpath->flags & MESH_PATH_FIXED)) {
738                         spin_lock_bh(&mpath->state_lock);
739                         mpath->flags &= ~MESH_PATH_ACTIVE;
740                         ++mpath->sn;
741                         spin_unlock_bh(&mpath->state_lock);
742                         mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl,
743                                         mpath->dst, cpu_to_le32(mpath->sn),
744                                         reason, bcast, sdata);
745                 }
746         }
747         rcu_read_unlock();
748 }
749
750 static void mesh_path_node_reclaim(struct rcu_head *rp)
751 {
752         struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
753         struct ieee80211_sub_if_data *sdata = node->mpath->sdata;
754
755         del_timer_sync(&node->mpath->timer);
756         atomic_dec(&sdata->u.mesh.mpaths);
757         kfree(node->mpath);
758         kfree(node);
759 }
760
761 /* needs to be called with the corresponding hashwlock taken */
762 static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node)
763 {
764         struct mesh_path *mpath;
765         mpath = node->mpath;
766         spin_lock(&mpath->state_lock);
767         mpath->flags |= MESH_PATH_RESOLVING;
768         if (mpath->is_gate)
769                 mesh_gate_del(tbl, mpath);
770         hlist_del_rcu(&node->list);
771         call_rcu(&node->rcu, mesh_path_node_reclaim);
772         spin_unlock(&mpath->state_lock);
773         atomic_dec(&tbl->entries);
774 }
775
776 /**
777  * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches
778  *
779  * @sta: mesh peer to match
780  *
781  * RCU notes: this function is called when a mesh plink transitions from
782  * PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that
783  * allows path creation. This will happen before the sta can be freed (because
784  * sta_info_destroy() calls this) so any reader in a rcu read block will be
785  * protected against the plink disappearing.
786  */
787 void mesh_path_flush_by_nexthop(struct sta_info *sta)
788 {
789         struct mesh_table *tbl;
790         struct mesh_path *mpath;
791         struct mpath_node *node;
792         struct hlist_node *p;
793         int i;
794
795         rcu_read_lock();
796         read_lock_bh(&pathtbl_resize_lock);
797         tbl = resize_dereference_mesh_paths();
798         for_each_mesh_entry(tbl, p, node, i) {
799                 mpath = node->mpath;
800                 if (rcu_dereference(mpath->next_hop) == sta) {
801                         spin_lock(&tbl->hashwlock[i]);
802                         __mesh_path_del(tbl, node);
803                         spin_unlock(&tbl->hashwlock[i]);
804                 }
805         }
806         read_unlock_bh(&pathtbl_resize_lock);
807         rcu_read_unlock();
808 }
809
810 static void table_flush_by_iface(struct mesh_table *tbl,
811                                  struct ieee80211_sub_if_data *sdata)
812 {
813         struct mesh_path *mpath;
814         struct mpath_node *node;
815         struct hlist_node *p;
816         int i;
817
818         WARN_ON(!rcu_read_lock_held());
819         for_each_mesh_entry(tbl, p, node, i) {
820                 mpath = node->mpath;
821                 if (mpath->sdata != sdata)
822                         continue;
823                 spin_lock_bh(&tbl->hashwlock[i]);
824                 __mesh_path_del(tbl, node);
825                 spin_unlock_bh(&tbl->hashwlock[i]);
826         }
827 }
828
829 /**
830  * mesh_path_flush_by_iface - Deletes all mesh paths associated with a given iface
831  *
832  * This function deletes both mesh paths as well as mesh portal paths.
833  *
834  * @sdata: interface data to match
835  *
836  */
837 void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
838 {
839         struct mesh_table *tbl;
840
841         rcu_read_lock();
842         read_lock_bh(&pathtbl_resize_lock);
843         tbl = resize_dereference_mesh_paths();
844         table_flush_by_iface(tbl, sdata);
845         tbl = resize_dereference_mpp_paths();
846         table_flush_by_iface(tbl, sdata);
847         read_unlock_bh(&pathtbl_resize_lock);
848         rcu_read_unlock();
849 }
850
851 /**
852  * mesh_path_del - delete a mesh path from the table
853  *
854  * @addr: dst address (ETH_ALEN length)
855  * @sdata: local subif
856  *
857  * Returns: 0 if successful
858  */
859 int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata)
860 {
861         struct mesh_table *tbl;
862         struct mesh_path *mpath;
863         struct mpath_node *node;
864         struct hlist_head *bucket;
865         struct hlist_node *n;
866         int hash_idx;
867         int err = 0;
868
869         read_lock_bh(&pathtbl_resize_lock);
870         tbl = resize_dereference_mesh_paths();
871         hash_idx = mesh_table_hash(addr, sdata, tbl);
872         bucket = &tbl->hash_buckets[hash_idx];
873
874         spin_lock(&tbl->hashwlock[hash_idx]);
875         hlist_for_each_entry(node, n, bucket, list) {
876                 mpath = node->mpath;
877                 if (mpath->sdata == sdata &&
878                     ether_addr_equal(addr, mpath->dst)) {
879                         __mesh_path_del(tbl, node);
880                         goto enddel;
881                 }
882         }
883
884         err = -ENXIO;
885 enddel:
886         mesh_paths_generation++;
887         spin_unlock(&tbl->hashwlock[hash_idx]);
888         read_unlock_bh(&pathtbl_resize_lock);
889         return err;
890 }
891
892 /**
893  * mesh_path_tx_pending - sends pending frames in a mesh path queue
894  *
895  * @mpath: mesh path to activate
896  *
897  * Locking: the state_lock of the mpath structure must NOT be held when calling
898  * this function.
899  */
900 void mesh_path_tx_pending(struct mesh_path *mpath)
901 {
902         if (mpath->flags & MESH_PATH_ACTIVE)
903                 ieee80211_add_pending_skbs(mpath->sdata->local,
904                                 &mpath->frame_queue);
905 }
906
907 /**
908  * mesh_path_send_to_gates - sends pending frames to all known mesh gates
909  *
910  * @mpath: mesh path whose queue will be emptied
911  *
912  * If there is only one gate, the frames are transferred from the failed mpath
913  * queue to that gate's queue.  If there are more than one gates, the frames
914  * are copied from each gate to the next.  After frames are copied, the
915  * mpath queues are emptied onto the transmission queue.
916  */
917 int mesh_path_send_to_gates(struct mesh_path *mpath)
918 {
919         struct ieee80211_sub_if_data *sdata = mpath->sdata;
920         struct hlist_node *n;
921         struct mesh_table *tbl;
922         struct mesh_path *from_mpath = mpath;
923         struct mpath_node *gate = NULL;
924         bool copy = false;
925         struct hlist_head *known_gates;
926
927         rcu_read_lock();
928         tbl = rcu_dereference(mesh_paths);
929         known_gates = tbl->known_gates;
930         rcu_read_unlock();
931
932         if (!known_gates)
933                 return -EHOSTUNREACH;
934
935         hlist_for_each_entry_rcu(gate, n, known_gates, list) {
936                 if (gate->mpath->sdata != sdata)
937                         continue;
938
939                 if (gate->mpath->flags & MESH_PATH_ACTIVE) {
940                         mpath_dbg(sdata, "Forwarding to %pM\n", gate->mpath->dst);
941                         mesh_path_move_to_queue(gate->mpath, from_mpath, copy);
942                         from_mpath = gate->mpath;
943                         copy = true;
944                 } else {
945                         mpath_dbg(sdata,
946                                   "Not forwarding %p (flags %#x)\n",
947                                   gate->mpath, gate->mpath->flags);
948                 }
949         }
950
951         hlist_for_each_entry_rcu(gate, n, known_gates, list)
952                 if (gate->mpath->sdata == sdata) {
953                         mpath_dbg(sdata, "Sending to %pM\n", gate->mpath->dst);
954                         mesh_path_tx_pending(gate->mpath);
955                 }
956
957         return (from_mpath == mpath) ? -EHOSTUNREACH : 0;
958 }
959
960 /**
961  * mesh_path_discard_frame - discard a frame whose path could not be resolved
962  *
963  * @skb: frame to discard
964  * @sdata: network subif the frame was to be sent through
965  *
966  * Locking: the function must me called within a rcu_read_lock region
967  */
968 void mesh_path_discard_frame(struct sk_buff *skb,
969                              struct ieee80211_sub_if_data *sdata)
970 {
971         kfree_skb(skb);
972         sdata->u.mesh.mshstats.dropped_frames_no_route++;
973 }
974
975 /**
976  * mesh_path_flush_pending - free the pending queue of a mesh path
977  *
978  * @mpath: mesh path whose queue has to be freed
979  *
980  * Locking: the function must me called within a rcu_read_lock region
981  */
982 void mesh_path_flush_pending(struct mesh_path *mpath)
983 {
984         struct sk_buff *skb;
985
986         while ((skb = skb_dequeue(&mpath->frame_queue)) != NULL)
987                 mesh_path_discard_frame(skb, mpath->sdata);
988 }
989
990 /**
991  * mesh_path_fix_nexthop - force a specific next hop for a mesh path
992  *
993  * @mpath: the mesh path to modify
994  * @next_hop: the next hop to force
995  *
996  * Locking: this function must be called holding mpath->state_lock
997  */
998 void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop)
999 {
1000         spin_lock_bh(&mpath->state_lock);
1001         mesh_path_assign_nexthop(mpath, next_hop);
1002         mpath->sn = 0xffff;
1003         mpath->metric = 0;
1004         mpath->hop_count = 0;
1005         mpath->exp_time = 0;
1006         mpath->flags |= MESH_PATH_FIXED;
1007         mesh_path_activate(mpath);
1008         spin_unlock_bh(&mpath->state_lock);
1009         mesh_path_tx_pending(mpath);
1010 }
1011
1012 static void mesh_path_node_free(struct hlist_node *p, bool free_leafs)
1013 {
1014         struct mesh_path *mpath;
1015         struct mpath_node *node = hlist_entry(p, struct mpath_node, list);
1016         mpath = node->mpath;
1017         hlist_del_rcu(p);
1018         if (free_leafs) {
1019                 del_timer_sync(&mpath->timer);
1020                 kfree(mpath);
1021         }
1022         kfree(node);
1023 }
1024
1025 static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl)
1026 {
1027         struct mesh_path *mpath;
1028         struct mpath_node *node, *new_node;
1029         u32 hash_idx;
1030
1031         new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
1032         if (new_node == NULL)
1033                 return -ENOMEM;
1034
1035         node = hlist_entry(p, struct mpath_node, list);
1036         mpath = node->mpath;
1037         new_node->mpath = mpath;
1038         hash_idx = mesh_table_hash(mpath->dst, mpath->sdata, newtbl);
1039         hlist_add_head(&new_node->list,
1040                         &newtbl->hash_buckets[hash_idx]);
1041         return 0;
1042 }
1043
1044 int mesh_pathtbl_init(void)
1045 {
1046         struct mesh_table *tbl_path, *tbl_mpp;
1047         int ret;
1048
1049         tbl_path = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
1050         if (!tbl_path)
1051                 return -ENOMEM;
1052         tbl_path->free_node = &mesh_path_node_free;
1053         tbl_path->copy_node = &mesh_path_node_copy;
1054         tbl_path->mean_chain_len = MEAN_CHAIN_LEN;
1055         tbl_path->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
1056         if (!tbl_path->known_gates) {
1057                 ret = -ENOMEM;
1058                 goto free_path;
1059         }
1060         INIT_HLIST_HEAD(tbl_path->known_gates);
1061
1062
1063         tbl_mpp = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
1064         if (!tbl_mpp) {
1065                 ret = -ENOMEM;
1066                 goto free_path;
1067         }
1068         tbl_mpp->free_node = &mesh_path_node_free;
1069         tbl_mpp->copy_node = &mesh_path_node_copy;
1070         tbl_mpp->mean_chain_len = MEAN_CHAIN_LEN;
1071         tbl_mpp->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
1072         if (!tbl_mpp->known_gates) {
1073                 ret = -ENOMEM;
1074                 goto free_mpp;
1075         }
1076         INIT_HLIST_HEAD(tbl_mpp->known_gates);
1077
1078         /* Need no locking since this is during init */
1079         RCU_INIT_POINTER(mesh_paths, tbl_path);
1080         RCU_INIT_POINTER(mpp_paths, tbl_mpp);
1081
1082         return 0;
1083
1084 free_mpp:
1085         mesh_table_free(tbl_mpp, true);
1086 free_path:
1087         mesh_table_free(tbl_path, true);
1088         return ret;
1089 }
1090
1091 void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
1092 {
1093         struct mesh_table *tbl;
1094         struct mesh_path *mpath;
1095         struct mpath_node *node;
1096         struct hlist_node *p;
1097         int i;
1098
1099         rcu_read_lock();
1100         tbl = rcu_dereference(mesh_paths);
1101         for_each_mesh_entry(tbl, p, node, i) {
1102                 if (node->mpath->sdata != sdata)
1103                         continue;
1104                 mpath = node->mpath;
1105                 if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
1106                     (!(mpath->flags & MESH_PATH_FIXED)) &&
1107                      time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE))
1108                         mesh_path_del(mpath->dst, mpath->sdata);
1109         }
1110         rcu_read_unlock();
1111 }
1112
1113 void mesh_pathtbl_unregister(void)
1114 {
1115         /* no need for locking during exit path */
1116         mesh_table_free(rcu_dereference_protected(mesh_paths, 1), true);
1117         mesh_table_free(rcu_dereference_protected(mpp_paths, 1), true);
1118 }