2 * Copyright (c) 2008, 2009 open80211s Ltd.
3 * Author: Luis Carlos Cobo <luisca@cozybit.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
10 #include <linux/etherdevice.h>
11 #include <linux/list.h>
12 #include <linux/random.h>
13 #include <linux/slab.h>
14 #include <linux/spinlock.h>
15 #include <linux/string.h>
16 #include <net/mac80211.h>
18 #include "ieee80211_i.h"
21 /* There will be initially 2^INIT_PATHS_SIZE_ORDER buckets */
22 #define INIT_PATHS_SIZE_ORDER 2
24 /* Keep the mean chain length below this constant */
25 #define MEAN_CHAIN_LEN 2
27 #define MPATH_EXPIRED(mpath) ((mpath->flags & MESH_PATH_ACTIVE) && \
28 time_after(jiffies, mpath->exp_time) && \
29 !(mpath->flags & MESH_PATH_FIXED))
32 struct hlist_node list;
34 /* This indirection allows two different tables to point to the same
35 * mesh_path structure, useful when resizing
37 struct mesh_path *mpath;
40 static struct mesh_table __rcu *mesh_paths;
41 static struct mesh_table __rcu *mpp_paths; /* Store paths for MPP&MAP */
43 int mesh_paths_generation;
45 /* This lock will have the grow table function as writer and add / delete nodes
46 * as readers. RCU provides sufficient protection only when reading the table
47 * (i.e. doing lookups). Adding or adding or removing nodes requires we take
48 * the read lock or we risk operating on an old table. The write lock is only
49 * needed when modifying the number of buckets a table.
51 static DEFINE_RWLOCK(pathtbl_resize_lock);
54 static inline struct mesh_table *resize_dereference_mesh_paths(void)
56 return rcu_dereference_protected(mesh_paths,
57 lockdep_is_held(&pathtbl_resize_lock));
60 static inline struct mesh_table *resize_dereference_mpp_paths(void)
62 return rcu_dereference_protected(mpp_paths,
63 lockdep_is_held(&pathtbl_resize_lock));
67 * CAREFUL -- "tbl" must not be an expression,
68 * in particular not an rcu_dereference(), since
69 * it's used twice. So it is illegal to do
70 * for_each_mesh_entry(rcu_dereference(...), ...)
72 #define for_each_mesh_entry(tbl, p, node, i) \
73 for (i = 0; i <= tbl->hash_mask; i++) \
74 hlist_for_each_entry_rcu(node, p, &tbl->hash_buckets[i], list)
77 static struct mesh_table *mesh_table_alloc(int size_order)
80 struct mesh_table *newtbl;
82 newtbl = kmalloc(sizeof(struct mesh_table), GFP_ATOMIC);
86 newtbl->hash_buckets = kzalloc(sizeof(struct hlist_head) *
87 (1 << size_order), GFP_ATOMIC);
89 if (!newtbl->hash_buckets) {
94 newtbl->hashwlock = kmalloc(sizeof(spinlock_t) *
95 (1 << size_order), GFP_ATOMIC);
96 if (!newtbl->hashwlock) {
97 kfree(newtbl->hash_buckets);
102 newtbl->size_order = size_order;
103 newtbl->hash_mask = (1 << size_order) - 1;
104 atomic_set(&newtbl->entries, 0);
105 get_random_bytes(&newtbl->hash_rnd,
106 sizeof(newtbl->hash_rnd));
107 for (i = 0; i <= newtbl->hash_mask; i++)
108 spin_lock_init(&newtbl->hashwlock[i]);
109 spin_lock_init(&newtbl->gates_lock);
114 static void __mesh_table_free(struct mesh_table *tbl)
116 kfree(tbl->hash_buckets);
117 kfree(tbl->hashwlock);
121 static void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
123 struct hlist_head *mesh_hash;
124 struct hlist_node *p, *q;
125 struct mpath_node *gate;
128 mesh_hash = tbl->hash_buckets;
129 for (i = 0; i <= tbl->hash_mask; i++) {
130 spin_lock_bh(&tbl->hashwlock[i]);
131 hlist_for_each_safe(p, q, &mesh_hash[i]) {
132 tbl->free_node(p, free_leafs);
133 atomic_dec(&tbl->entries);
135 spin_unlock_bh(&tbl->hashwlock[i]);
138 spin_lock_bh(&tbl->gates_lock);
139 hlist_for_each_entry_safe(gate, p, q,
140 tbl->known_gates, list) {
141 hlist_del(&gate->list);
144 kfree(tbl->known_gates);
145 spin_unlock_bh(&tbl->gates_lock);
148 __mesh_table_free(tbl);
151 static int mesh_table_grow(struct mesh_table *oldtbl,
152 struct mesh_table *newtbl)
154 struct hlist_head *oldhash;
155 struct hlist_node *p, *q;
158 if (atomic_read(&oldtbl->entries)
159 < oldtbl->mean_chain_len * (oldtbl->hash_mask + 1))
162 newtbl->free_node = oldtbl->free_node;
163 newtbl->mean_chain_len = oldtbl->mean_chain_len;
164 newtbl->copy_node = oldtbl->copy_node;
165 newtbl->known_gates = oldtbl->known_gates;
166 atomic_set(&newtbl->entries, atomic_read(&oldtbl->entries));
168 oldhash = oldtbl->hash_buckets;
169 for (i = 0; i <= oldtbl->hash_mask; i++)
170 hlist_for_each(p, &oldhash[i])
171 if (oldtbl->copy_node(p, newtbl) < 0)
177 for (i = 0; i <= newtbl->hash_mask; i++) {
178 hlist_for_each_safe(p, q, &newtbl->hash_buckets[i])
179 oldtbl->free_node(p, 0);
184 static u32 mesh_table_hash(u8 *addr, struct ieee80211_sub_if_data *sdata,
185 struct mesh_table *tbl)
187 /* Use last four bytes of hw addr and interface index as hash index */
188 return jhash_2words(*(u32 *)(addr+2), sdata->dev->ifindex, tbl->hash_rnd)
195 * mesh_path_assign_nexthop - update mesh path next hop
197 * @mpath: mesh path to update
198 * @sta: next hop to assign
200 * Locking: mpath->state_lock must be held when calling this function
202 void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
205 struct ieee80211_hdr *hdr;
208 rcu_assign_pointer(mpath->next_hop, sta);
210 spin_lock_irqsave(&mpath->frame_queue.lock, flags);
211 skb_queue_walk(&mpath->frame_queue, skb) {
212 hdr = (struct ieee80211_hdr *) skb->data;
213 memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN);
214 memcpy(hdr->addr2, mpath->sdata->vif.addr, ETH_ALEN);
215 ieee80211_mps_set_frame_flags(sta->sdata, sta, hdr);
218 spin_unlock_irqrestore(&mpath->frame_queue.lock, flags);
221 static void prepare_for_gate(struct sk_buff *skb, char *dst_addr,
222 struct mesh_path *gate_mpath)
224 struct ieee80211_hdr *hdr;
225 struct ieee80211s_hdr *mshdr;
226 int mesh_hdrlen, hdrlen;
229 hdr = (struct ieee80211_hdr *) skb->data;
230 hdrlen = ieee80211_hdrlen(hdr->frame_control);
231 mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
233 if (!(mshdr->flags & MESH_FLAGS_AE)) {
234 /* size of the fixed part of the mesh header */
237 /* make room for the two extended addresses */
238 skb_push(skb, 2 * ETH_ALEN);
239 memmove(skb->data, hdr, hdrlen + mesh_hdrlen);
241 hdr = (struct ieee80211_hdr *) skb->data;
243 /* we preserve the previous mesh header and only add
244 * the new addreses */
245 mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
246 mshdr->flags = MESH_FLAGS_AE_A5_A6;
247 memcpy(mshdr->eaddr1, hdr->addr3, ETH_ALEN);
248 memcpy(mshdr->eaddr2, hdr->addr4, ETH_ALEN);
251 /* update next hop */
252 hdr = (struct ieee80211_hdr *) skb->data;
254 next_hop = rcu_dereference(gate_mpath->next_hop)->sta.addr;
255 memcpy(hdr->addr1, next_hop, ETH_ALEN);
257 memcpy(hdr->addr2, gate_mpath->sdata->vif.addr, ETH_ALEN);
258 memcpy(hdr->addr3, dst_addr, ETH_ALEN);
263 * mesh_path_move_to_queue - Move or copy frames from one mpath queue to another
265 * This function is used to transfer or copy frames from an unresolved mpath to
266 * a gate mpath. The function also adds the Address Extension field and
267 * updates the next hop.
269 * If a frame already has an Address Extension field, only the next hop and
270 * destination addresses are updated.
272 * The gate mpath must be an active mpath with a valid mpath->next_hop.
274 * @mpath: An active mpath the frames will be sent to (i.e. the gate)
275 * @from_mpath: The failed mpath
276 * @copy: When true, copy all the frames to the new mpath queue. When false,
279 static void mesh_path_move_to_queue(struct mesh_path *gate_mpath,
280 struct mesh_path *from_mpath,
283 struct sk_buff *skb, *fskb, *tmp;
284 struct sk_buff_head failq;
287 BUG_ON(gate_mpath == from_mpath);
288 BUG_ON(!gate_mpath->next_hop);
290 __skb_queue_head_init(&failq);
292 spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
293 skb_queue_splice_init(&from_mpath->frame_queue, &failq);
294 spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
296 skb_queue_walk_safe(&failq, fskb, tmp) {
297 if (skb_queue_len(&gate_mpath->frame_queue) >=
298 MESH_FRAME_QUEUE_LEN) {
299 mpath_dbg(gate_mpath->sdata, "mpath queue full!\n");
303 skb = skb_copy(fskb, GFP_ATOMIC);
307 prepare_for_gate(skb, gate_mpath->dst, gate_mpath);
308 skb_queue_tail(&gate_mpath->frame_queue, skb);
313 __skb_unlink(fskb, &failq);
317 mpath_dbg(gate_mpath->sdata, "Mpath queue for gate %pM has %d frames\n",
318 gate_mpath->dst, skb_queue_len(&gate_mpath->frame_queue));
323 spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
324 skb_queue_splice(&failq, &from_mpath->frame_queue);
325 spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
329 static struct mesh_path *mpath_lookup(struct mesh_table *tbl, u8 *dst,
330 struct ieee80211_sub_if_data *sdata)
332 struct mesh_path *mpath;
333 struct hlist_node *n;
334 struct hlist_head *bucket;
335 struct mpath_node *node;
337 bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)];
338 hlist_for_each_entry_rcu(node, n, bucket, list) {
340 if (mpath->sdata == sdata &&
341 ether_addr_equal(dst, mpath->dst)) {
342 if (MPATH_EXPIRED(mpath)) {
343 spin_lock_bh(&mpath->state_lock);
344 mpath->flags &= ~MESH_PATH_ACTIVE;
345 spin_unlock_bh(&mpath->state_lock);
354 * mesh_path_lookup - look up a path in the mesh path table
355 * @dst: hardware address (ETH_ALEN length) of destination
356 * @sdata: local subif
358 * Returns: pointer to the mesh path structure, or NULL if not found
360 * Locking: must be called within a read rcu section.
362 struct mesh_path *mesh_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
364 return mpath_lookup(rcu_dereference(mesh_paths), dst, sdata);
367 struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
369 return mpath_lookup(rcu_dereference(mpp_paths), dst, sdata);
374 * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index
376 * @sdata: local subif, or NULL for all entries
378 * Returns: pointer to the mesh path structure, or NULL if not found.
380 * Locking: must be called within a read rcu section.
382 struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data *sdata)
384 struct mesh_table *tbl = rcu_dereference(mesh_paths);
385 struct mpath_node *node;
386 struct hlist_node *p;
390 for_each_mesh_entry(tbl, p, node, i) {
391 if (sdata && node->mpath->sdata != sdata)
394 if (MPATH_EXPIRED(node->mpath)) {
395 spin_lock_bh(&node->mpath->state_lock);
396 node->mpath->flags &= ~MESH_PATH_ACTIVE;
397 spin_unlock_bh(&node->mpath->state_lock);
407 * mesh_path_add_gate - add the given mpath to a mesh gate to our path table
408 * @mpath: gate path to add to table
410 int mesh_path_add_gate(struct mesh_path *mpath)
412 struct mesh_table *tbl;
413 struct mpath_node *gate, *new_gate;
414 struct hlist_node *n;
418 tbl = rcu_dereference(mesh_paths);
420 hlist_for_each_entry_rcu(gate, n, tbl->known_gates, list)
421 if (gate->mpath == mpath) {
426 new_gate = kzalloc(sizeof(struct mpath_node), GFP_ATOMIC);
432 mpath->is_gate = true;
433 mpath->sdata->u.mesh.num_gates++;
434 new_gate->mpath = mpath;
435 spin_lock_bh(&tbl->gates_lock);
436 hlist_add_head_rcu(&new_gate->list, tbl->known_gates);
437 spin_unlock_bh(&tbl->gates_lock);
439 mpath_dbg(mpath->sdata,
440 "Mesh path: Recorded new gate: %pM. %d known gates\n",
441 mpath->dst, mpath->sdata->u.mesh.num_gates);
449 * mesh_gate_del - remove a mesh gate from the list of known gates
450 * @tbl: table which holds our list of known gates
453 * Returns: 0 on success
455 * Locking: must be called inside rcu_read_lock() section
457 static int mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath)
459 struct mpath_node *gate;
460 struct hlist_node *p, *q;
462 hlist_for_each_entry_safe(gate, p, q, tbl->known_gates, list)
463 if (gate->mpath == mpath) {
464 spin_lock_bh(&tbl->gates_lock);
465 hlist_del_rcu(&gate->list);
466 kfree_rcu(gate, rcu);
467 spin_unlock_bh(&tbl->gates_lock);
468 mpath->sdata->u.mesh.num_gates--;
469 mpath->is_gate = false;
470 mpath_dbg(mpath->sdata,
471 "Mesh path: Deleted gate: %pM. %d known gates\n",
472 mpath->dst, mpath->sdata->u.mesh.num_gates);
480 * mesh_gate_num - number of gates known to this interface
483 int mesh_gate_num(struct ieee80211_sub_if_data *sdata)
485 return sdata->u.mesh.num_gates;
489 * mesh_path_add - allocate and add a new path to the mesh path table
490 * @addr: destination address of the path (ETH_ALEN length)
491 * @sdata: local subif
493 * Returns: 0 on success
495 * State: the initial state of the new path is set to 0
497 int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
499 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
500 struct ieee80211_local *local = sdata->local;
501 struct mesh_table *tbl;
502 struct mesh_path *mpath, *new_mpath;
503 struct mpath_node *node, *new_node;
504 struct hlist_head *bucket;
505 struct hlist_node *n;
510 if (ether_addr_equal(dst, sdata->vif.addr))
511 /* never add ourselves as neighbours */
514 if (is_multicast_ether_addr(dst))
517 if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0)
521 new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
525 new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
529 read_lock_bh(&pathtbl_resize_lock);
530 memcpy(new_mpath->dst, dst, ETH_ALEN);
531 eth_broadcast_addr(new_mpath->rann_snd_addr);
532 new_mpath->is_root = false;
533 new_mpath->sdata = sdata;
534 new_mpath->flags = 0;
535 skb_queue_head_init(&new_mpath->frame_queue);
536 new_node->mpath = new_mpath;
537 new_mpath->timer.data = (unsigned long) new_mpath;
538 new_mpath->timer.function = mesh_path_timer;
539 new_mpath->exp_time = jiffies;
540 spin_lock_init(&new_mpath->state_lock);
541 init_timer(&new_mpath->timer);
543 tbl = resize_dereference_mesh_paths();
545 hash_idx = mesh_table_hash(dst, sdata, tbl);
546 bucket = &tbl->hash_buckets[hash_idx];
548 spin_lock(&tbl->hashwlock[hash_idx]);
551 hlist_for_each_entry(node, n, bucket, list) {
553 if (mpath->sdata == sdata &&
554 ether_addr_equal(dst, mpath->dst))
558 hlist_add_head_rcu(&new_node->list, bucket);
559 if (atomic_inc_return(&tbl->entries) >=
560 tbl->mean_chain_len * (tbl->hash_mask + 1))
563 mesh_paths_generation++;
565 spin_unlock(&tbl->hashwlock[hash_idx]);
566 read_unlock_bh(&pathtbl_resize_lock);
568 set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags);
569 ieee80211_queue_work(&local->hw, &sdata->work);
574 spin_unlock(&tbl->hashwlock[hash_idx]);
575 read_unlock_bh(&pathtbl_resize_lock);
580 atomic_dec(&sdata->u.mesh.mpaths);
584 static void mesh_table_free_rcu(struct rcu_head *rcu)
586 struct mesh_table *tbl = container_of(rcu, struct mesh_table, rcu_head);
588 mesh_table_free(tbl, false);
591 void mesh_mpath_table_grow(void)
593 struct mesh_table *oldtbl, *newtbl;
595 write_lock_bh(&pathtbl_resize_lock);
596 oldtbl = resize_dereference_mesh_paths();
597 newtbl = mesh_table_alloc(oldtbl->size_order + 1);
600 if (mesh_table_grow(oldtbl, newtbl) < 0) {
601 __mesh_table_free(newtbl);
604 rcu_assign_pointer(mesh_paths, newtbl);
606 call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);
609 write_unlock_bh(&pathtbl_resize_lock);
612 void mesh_mpp_table_grow(void)
614 struct mesh_table *oldtbl, *newtbl;
616 write_lock_bh(&pathtbl_resize_lock);
617 oldtbl = resize_dereference_mpp_paths();
618 newtbl = mesh_table_alloc(oldtbl->size_order + 1);
621 if (mesh_table_grow(oldtbl, newtbl) < 0) {
622 __mesh_table_free(newtbl);
625 rcu_assign_pointer(mpp_paths, newtbl);
626 call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);
629 write_unlock_bh(&pathtbl_resize_lock);
632 int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
634 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
635 struct ieee80211_local *local = sdata->local;
636 struct mesh_table *tbl;
637 struct mesh_path *mpath, *new_mpath;
638 struct mpath_node *node, *new_node;
639 struct hlist_head *bucket;
640 struct hlist_node *n;
645 if (ether_addr_equal(dst, sdata->vif.addr))
646 /* never add ourselves as neighbours */
649 if (is_multicast_ether_addr(dst))
653 new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
657 new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
661 read_lock_bh(&pathtbl_resize_lock);
662 memcpy(new_mpath->dst, dst, ETH_ALEN);
663 memcpy(new_mpath->mpp, mpp, ETH_ALEN);
664 new_mpath->sdata = sdata;
665 new_mpath->flags = 0;
666 skb_queue_head_init(&new_mpath->frame_queue);
667 new_node->mpath = new_mpath;
668 init_timer(&new_mpath->timer);
669 new_mpath->exp_time = jiffies;
670 spin_lock_init(&new_mpath->state_lock);
672 tbl = resize_dereference_mpp_paths();
674 hash_idx = mesh_table_hash(dst, sdata, tbl);
675 bucket = &tbl->hash_buckets[hash_idx];
677 spin_lock(&tbl->hashwlock[hash_idx]);
680 hlist_for_each_entry(node, n, bucket, list) {
682 if (mpath->sdata == sdata &&
683 ether_addr_equal(dst, mpath->dst))
687 hlist_add_head_rcu(&new_node->list, bucket);
688 if (atomic_inc_return(&tbl->entries) >=
689 tbl->mean_chain_len * (tbl->hash_mask + 1))
692 spin_unlock(&tbl->hashwlock[hash_idx]);
693 read_unlock_bh(&pathtbl_resize_lock);
695 set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags);
696 ieee80211_queue_work(&local->hw, &sdata->work);
701 spin_unlock(&tbl->hashwlock[hash_idx]);
702 read_unlock_bh(&pathtbl_resize_lock);
712 * mesh_plink_broken - deactivates paths and sends perr when a link breaks
714 * @sta: broken peer link
716 * This function must be called from the rate control algorithm if enough
717 * delivery errors suggest that a peer link is no longer usable.
719 void mesh_plink_broken(struct sta_info *sta)
721 struct mesh_table *tbl;
722 static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
723 struct mesh_path *mpath;
724 struct mpath_node *node;
725 struct hlist_node *p;
726 struct ieee80211_sub_if_data *sdata = sta->sdata;
728 __le16 reason = cpu_to_le16(WLAN_REASON_MESH_PATH_DEST_UNREACHABLE);
731 tbl = rcu_dereference(mesh_paths);
732 for_each_mesh_entry(tbl, p, node, i) {
734 if (rcu_dereference(mpath->next_hop) == sta &&
735 mpath->flags & MESH_PATH_ACTIVE &&
736 !(mpath->flags & MESH_PATH_FIXED)) {
737 spin_lock_bh(&mpath->state_lock);
738 mpath->flags &= ~MESH_PATH_ACTIVE;
740 spin_unlock_bh(&mpath->state_lock);
741 mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl,
742 mpath->dst, cpu_to_le32(mpath->sn),
743 reason, bcast, sdata);
749 static void mesh_path_node_reclaim(struct rcu_head *rp)
751 struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
752 struct ieee80211_sub_if_data *sdata = node->mpath->sdata;
754 del_timer_sync(&node->mpath->timer);
755 atomic_dec(&sdata->u.mesh.mpaths);
760 /* needs to be called with the corresponding hashwlock taken */
761 static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node)
763 struct mesh_path *mpath;
765 spin_lock(&mpath->state_lock);
766 mpath->flags |= MESH_PATH_RESOLVING;
768 mesh_gate_del(tbl, mpath);
769 hlist_del_rcu(&node->list);
770 call_rcu(&node->rcu, mesh_path_node_reclaim);
771 spin_unlock(&mpath->state_lock);
772 atomic_dec(&tbl->entries);
776 * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches
778 * @sta: mesh peer to match
780 * RCU notes: this function is called when a mesh plink transitions from
781 * PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that
782 * allows path creation. This will happen before the sta can be freed (because
783 * sta_info_destroy() calls this) so any reader in a rcu read block will be
784 * protected against the plink disappearing.
786 void mesh_path_flush_by_nexthop(struct sta_info *sta)
788 struct mesh_table *tbl;
789 struct mesh_path *mpath;
790 struct mpath_node *node;
791 struct hlist_node *p;
795 read_lock_bh(&pathtbl_resize_lock);
796 tbl = resize_dereference_mesh_paths();
797 for_each_mesh_entry(tbl, p, node, i) {
799 if (rcu_dereference(mpath->next_hop) == sta) {
800 spin_lock(&tbl->hashwlock[i]);
801 __mesh_path_del(tbl, node);
802 spin_unlock(&tbl->hashwlock[i]);
805 read_unlock_bh(&pathtbl_resize_lock);
809 static void table_flush_by_iface(struct mesh_table *tbl,
810 struct ieee80211_sub_if_data *sdata)
812 struct mesh_path *mpath;
813 struct mpath_node *node;
814 struct hlist_node *p;
817 WARN_ON(!rcu_read_lock_held());
818 for_each_mesh_entry(tbl, p, node, i) {
820 if (mpath->sdata != sdata)
822 spin_lock_bh(&tbl->hashwlock[i]);
823 __mesh_path_del(tbl, node);
824 spin_unlock_bh(&tbl->hashwlock[i]);
829 * mesh_path_flush_by_iface - Deletes all mesh paths associated with a given iface
831 * This function deletes both mesh paths as well as mesh portal paths.
833 * @sdata: interface data to match
836 void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
838 struct mesh_table *tbl;
841 read_lock_bh(&pathtbl_resize_lock);
842 tbl = resize_dereference_mesh_paths();
843 table_flush_by_iface(tbl, sdata);
844 tbl = resize_dereference_mpp_paths();
845 table_flush_by_iface(tbl, sdata);
846 read_unlock_bh(&pathtbl_resize_lock);
851 * mesh_path_del - delete a mesh path from the table
853 * @addr: dst address (ETH_ALEN length)
854 * @sdata: local subif
856 * Returns: 0 if successful
858 int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata)
860 struct mesh_table *tbl;
861 struct mesh_path *mpath;
862 struct mpath_node *node;
863 struct hlist_head *bucket;
864 struct hlist_node *n;
868 read_lock_bh(&pathtbl_resize_lock);
869 tbl = resize_dereference_mesh_paths();
870 hash_idx = mesh_table_hash(addr, sdata, tbl);
871 bucket = &tbl->hash_buckets[hash_idx];
873 spin_lock(&tbl->hashwlock[hash_idx]);
874 hlist_for_each_entry(node, n, bucket, list) {
876 if (mpath->sdata == sdata &&
877 ether_addr_equal(addr, mpath->dst)) {
878 __mesh_path_del(tbl, node);
885 mesh_paths_generation++;
886 spin_unlock(&tbl->hashwlock[hash_idx]);
887 read_unlock_bh(&pathtbl_resize_lock);
892 * mesh_path_tx_pending - sends pending frames in a mesh path queue
894 * @mpath: mesh path to activate
896 * Locking: the state_lock of the mpath structure must NOT be held when calling
899 void mesh_path_tx_pending(struct mesh_path *mpath)
901 if (mpath->flags & MESH_PATH_ACTIVE)
902 ieee80211_add_pending_skbs(mpath->sdata->local,
903 &mpath->frame_queue);
907 * mesh_path_send_to_gates - sends pending frames to all known mesh gates
909 * @mpath: mesh path whose queue will be emptied
911 * If there is only one gate, the frames are transferred from the failed mpath
912 * queue to that gate's queue. If there are more than one gates, the frames
913 * are copied from each gate to the next. After frames are copied, the
914 * mpath queues are emptied onto the transmission queue.
916 int mesh_path_send_to_gates(struct mesh_path *mpath)
918 struct ieee80211_sub_if_data *sdata = mpath->sdata;
919 struct hlist_node *n;
920 struct mesh_table *tbl;
921 struct mesh_path *from_mpath = mpath;
922 struct mpath_node *gate = NULL;
924 struct hlist_head *known_gates;
927 tbl = rcu_dereference(mesh_paths);
928 known_gates = tbl->known_gates;
932 return -EHOSTUNREACH;
934 hlist_for_each_entry_rcu(gate, n, known_gates, list) {
935 if (gate->mpath->sdata != sdata)
938 if (gate->mpath->flags & MESH_PATH_ACTIVE) {
939 mpath_dbg(sdata, "Forwarding to %pM\n", gate->mpath->dst);
940 mesh_path_move_to_queue(gate->mpath, from_mpath, copy);
941 from_mpath = gate->mpath;
945 "Not forwarding %p (flags %#x)\n",
946 gate->mpath, gate->mpath->flags);
950 hlist_for_each_entry_rcu(gate, n, known_gates, list)
951 if (gate->mpath->sdata == sdata) {
952 mpath_dbg(sdata, "Sending to %pM\n", gate->mpath->dst);
953 mesh_path_tx_pending(gate->mpath);
956 return (from_mpath == mpath) ? -EHOSTUNREACH : 0;
960 * mesh_path_discard_frame - discard a frame whose path could not be resolved
962 * @skb: frame to discard
963 * @sdata: network subif the frame was to be sent through
965 * Locking: the function must me called within a rcu_read_lock region
967 void mesh_path_discard_frame(struct sk_buff *skb,
968 struct ieee80211_sub_if_data *sdata)
971 sdata->u.mesh.mshstats.dropped_frames_no_route++;
975 * mesh_path_flush_pending - free the pending queue of a mesh path
977 * @mpath: mesh path whose queue has to be freed
979 * Locking: the function must me called within a rcu_read_lock region
981 void mesh_path_flush_pending(struct mesh_path *mpath)
985 while ((skb = skb_dequeue(&mpath->frame_queue)) != NULL)
986 mesh_path_discard_frame(skb, mpath->sdata);
990 * mesh_path_fix_nexthop - force a specific next hop for a mesh path
992 * @mpath: the mesh path to modify
993 * @next_hop: the next hop to force
995 * Locking: this function must be called holding mpath->state_lock
997 void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop)
999 spin_lock_bh(&mpath->state_lock);
1000 mesh_path_assign_nexthop(mpath, next_hop);
1003 mpath->hop_count = 0;
1004 mpath->exp_time = 0;
1005 mpath->flags |= MESH_PATH_FIXED;
1006 mesh_path_activate(mpath);
1007 spin_unlock_bh(&mpath->state_lock);
1008 mesh_path_tx_pending(mpath);
1011 static void mesh_path_node_free(struct hlist_node *p, bool free_leafs)
1013 struct mesh_path *mpath;
1014 struct mpath_node *node = hlist_entry(p, struct mpath_node, list);
1015 mpath = node->mpath;
1018 del_timer_sync(&mpath->timer);
1024 static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl)
1026 struct mesh_path *mpath;
1027 struct mpath_node *node, *new_node;
1030 new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
1031 if (new_node == NULL)
1034 node = hlist_entry(p, struct mpath_node, list);
1035 mpath = node->mpath;
1036 new_node->mpath = mpath;
1037 hash_idx = mesh_table_hash(mpath->dst, mpath->sdata, newtbl);
1038 hlist_add_head(&new_node->list,
1039 &newtbl->hash_buckets[hash_idx]);
1043 int mesh_pathtbl_init(void)
1045 struct mesh_table *tbl_path, *tbl_mpp;
1048 tbl_path = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
1051 tbl_path->free_node = &mesh_path_node_free;
1052 tbl_path->copy_node = &mesh_path_node_copy;
1053 tbl_path->mean_chain_len = MEAN_CHAIN_LEN;
1054 tbl_path->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
1055 if (!tbl_path->known_gates) {
1059 INIT_HLIST_HEAD(tbl_path->known_gates);
1062 tbl_mpp = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
1067 tbl_mpp->free_node = &mesh_path_node_free;
1068 tbl_mpp->copy_node = &mesh_path_node_copy;
1069 tbl_mpp->mean_chain_len = MEAN_CHAIN_LEN;
1070 tbl_mpp->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
1071 if (!tbl_mpp->known_gates) {
1075 INIT_HLIST_HEAD(tbl_mpp->known_gates);
1077 /* Need no locking since this is during init */
1078 RCU_INIT_POINTER(mesh_paths, tbl_path);
1079 RCU_INIT_POINTER(mpp_paths, tbl_mpp);
1084 mesh_table_free(tbl_mpp, true);
1086 mesh_table_free(tbl_path, true);
1090 void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
1092 struct mesh_table *tbl;
1093 struct mesh_path *mpath;
1094 struct mpath_node *node;
1095 struct hlist_node *p;
1099 tbl = rcu_dereference(mesh_paths);
1100 for_each_mesh_entry(tbl, p, node, i) {
1101 if (node->mpath->sdata != sdata)
1103 mpath = node->mpath;
1104 if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
1105 (!(mpath->flags & MESH_PATH_FIXED)) &&
1106 time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE))
1107 mesh_path_del(mpath->dst, mpath->sdata);
1112 void mesh_pathtbl_unregister(void)
1114 /* no need for locking during exit path */
1115 mesh_table_free(rcu_dereference_protected(mesh_paths, 1), true);
1116 mesh_table_free(rcu_dereference_protected(mpp_paths, 1), true);