2 * Copyright (c) 2008 open80211s Ltd.
3 * Author: Luis Carlos Cobo <luisca@cozybit.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
10 #include <linux/etherdevice.h>
11 #include <linux/list.h>
12 #include <linux/netdevice.h>
13 #include <linux/random.h>
14 #include <linux/spinlock.h>
15 #include <linux/string.h>
16 #include <net/mac80211.h>
17 #include "ieee80211_i.h"
20 /* There will be initially 2^INIT_PATHS_SIZE_ORDER buckets */
21 #define INIT_PATHS_SIZE_ORDER 2
23 /* Keep the mean chain length below this constant */
24 #define MEAN_CHAIN_LEN 2
26 #define MPATH_EXPIRED(mpath) ((mpath->flags & MESH_PATH_ACTIVE) && \
27 time_after(jiffies, mpath->exp_time) && \
28 !(mpath->flags & MESH_PATH_FIXED))
31 struct hlist_node list;
33 /* This indirection allows two different tables to point to the same
34 * mesh_path structure, useful when resizing
36 struct mesh_path *mpath;
39 static struct mesh_table *mesh_paths;
41 /* This lock will have the grow table function as writer and add / delete nodes
42 * as readers. When reading the table (i.e. doing lookups) we are well protected
45 static DEFINE_RWLOCK(pathtbl_resize_lock);
49 * mesh_path_assign_nexthop - update mesh path next hop
51 * @mpath: mesh path to update
52 * @sta: next hop to assign
54 * Locking: mpath->state_lock must be held when calling this function
56 void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
58 rcu_assign_pointer(mpath->next_hop, sta);
63 * mesh_path_lookup - look up a path in the mesh path table
64 * @dst: hardware address (ETH_ALEN length) of destination
65 * @dev: local interface
67 * Returns: pointer to the mesh path structure, or NULL if not found
69 * Locking: must be called within a read rcu section.
71 struct mesh_path *mesh_path_lookup(u8 *dst, struct net_device *dev)
73 struct mesh_path *mpath;
75 struct hlist_head *bucket;
76 struct mesh_table *tbl;
77 struct mpath_node *node;
79 tbl = rcu_dereference(mesh_paths);
81 bucket = &tbl->hash_buckets[mesh_table_hash(dst, dev, tbl)];
82 hlist_for_each_entry_rcu(node, n, bucket, list) {
84 if (mpath->dev == dev &&
85 memcmp(dst, mpath->dst, ETH_ALEN) == 0) {
86 if (MPATH_EXPIRED(mpath)) {
87 spin_lock_bh(&mpath->state_lock);
88 if (MPATH_EXPIRED(mpath))
89 mpath->flags &= ~MESH_PATH_ACTIVE;
90 spin_unlock_bh(&mpath->state_lock);
99 * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index
101 * @dev: local interface
103 * Returns: pointer to the mesh path structure, or NULL if not found.
105 * Locking: must be called within a read rcu section.
107 struct mesh_path *mesh_path_lookup_by_idx(int idx, struct net_device *dev)
109 struct mpath_node *node;
110 struct hlist_node *p;
114 for_each_mesh_entry(mesh_paths, p, node, i)
116 if (MPATH_EXPIRED(node->mpath)) {
117 spin_lock_bh(&node->mpath->state_lock);
118 if (MPATH_EXPIRED(node->mpath))
119 node->mpath->flags &= ~MESH_PATH_ACTIVE;
120 spin_unlock_bh(&node->mpath->state_lock);
129 * mesh_path_add - allocate and add a new path to the mesh path table
130 * @addr: destination address of the path (ETH_ALEN length)
131 * @dev: local interface
133 * Returns: 0 on sucess
135 * State: the initial state of the new path is set to 0
137 int mesh_path_add(u8 *dst, struct net_device *dev)
139 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
140 struct mesh_path *mpath, *new_mpath;
141 struct mpath_node *node, *new_node;
142 struct hlist_head *bucket;
143 struct hlist_node *n;
148 if (memcmp(dst, dev->dev_addr, ETH_ALEN) == 0)
149 /* never add ourselves as neighbours */
152 if (is_multicast_ether_addr(dst))
155 if (atomic_add_unless(&sdata->u.sta.mpaths, 1, MESH_MAX_MPATHS) == 0)
158 read_lock(&pathtbl_resize_lock);
160 new_mpath = kzalloc(sizeof(struct mesh_path), GFP_KERNEL);
162 atomic_dec(&sdata->u.sta.mpaths);
166 memcpy(new_mpath->dst, dst, ETH_ALEN);
167 new_mpath->dev = dev;
168 new_mpath->flags = 0;
169 skb_queue_head_init(&new_mpath->frame_queue);
170 new_node = kmalloc(sizeof(struct mpath_node), GFP_KERNEL);
171 new_node->mpath = new_mpath;
172 new_mpath->timer.data = (unsigned long) new_mpath;
173 new_mpath->timer.function = mesh_path_timer;
174 new_mpath->exp_time = jiffies;
175 spin_lock_init(&new_mpath->state_lock);
176 init_timer(&new_mpath->timer);
178 hash_idx = mesh_table_hash(dst, dev, mesh_paths);
179 bucket = &mesh_paths->hash_buckets[hash_idx];
181 spin_lock(&mesh_paths->hashwlock[hash_idx]);
183 hlist_for_each_entry(node, n, bucket, list) {
185 if (mpath->dev == dev && memcmp(dst, mpath->dst, ETH_ALEN)
188 atomic_dec(&sdata->u.sta.mpaths);
195 hlist_add_head_rcu(&new_node->list, bucket);
196 if (atomic_inc_return(&mesh_paths->entries) >=
197 mesh_paths->mean_chain_len * (mesh_paths->hash_mask + 1))
201 spin_unlock(&mesh_paths->hashwlock[hash_idx]);
203 read_unlock(&pathtbl_resize_lock);
205 struct mesh_table *oldtbl, *newtbl;
207 write_lock(&pathtbl_resize_lock);
209 newtbl = mesh_table_grow(mesh_paths);
211 write_unlock(&pathtbl_resize_lock);
214 rcu_assign_pointer(mesh_paths, newtbl);
216 mesh_table_free(oldtbl, false);
217 write_unlock(&pathtbl_resize_lock);
224 * mesh_plink_broken - deactivates paths and sends perr when a link breaks
226 * @sta: broken peer link
228 * This function must be called from the rate control algorithm if enough
229 * delivery errors suggest that a peer link is no longer usable.
231 void mesh_plink_broken(struct sta_info *sta)
233 struct mesh_path *mpath;
234 struct mpath_node *node;
235 struct hlist_node *p;
236 struct net_device *dev = sta->sdata->dev;
240 for_each_mesh_entry(mesh_paths, p, node, i) {
242 spin_lock_bh(&mpath->state_lock);
243 if (mpath->next_hop == sta &&
244 mpath->flags & MESH_PATH_ACTIVE &&
245 !(mpath->flags & MESH_PATH_FIXED)) {
246 mpath->flags &= ~MESH_PATH_ACTIVE;
248 spin_unlock_bh(&mpath->state_lock);
249 mesh_path_error_tx(mpath->dst,
250 cpu_to_le32(mpath->dsn),
251 dev->broadcast, dev);
253 spin_unlock_bh(&mpath->state_lock);
257 EXPORT_SYMBOL(mesh_plink_broken);
260 * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches
262 * @sta - mesh peer to match
264 * RCU notes: this function is called when a mesh plink transitions from ESTAB
265 * to any other state, since ESTAB state is the only one that allows path
266 * creation. This will happen before the sta can be freed (because
267 * sta_info_destroy() calls this) so any reader in a rcu read block will be
268 * protected against the plink disappearing.
270 void mesh_path_flush_by_nexthop(struct sta_info *sta)
272 struct mesh_path *mpath;
273 struct mpath_node *node;
274 struct hlist_node *p;
277 for_each_mesh_entry(mesh_paths, p, node, i) {
279 if (mpath->next_hop == sta)
280 mesh_path_del(mpath->dst, mpath->dev, true);
284 void mesh_path_flush(struct net_device *dev)
286 struct mesh_path *mpath;
287 struct mpath_node *node;
288 struct hlist_node *p;
291 for_each_mesh_entry(mesh_paths, p, node, i) {
293 if (mpath->dev == dev)
294 mesh_path_del(mpath->dst, mpath->dev, false);
298 static void mesh_path_node_reclaim(struct rcu_head *rp)
300 struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
301 struct ieee80211_sub_if_data *sdata =
302 IEEE80211_DEV_TO_SUB_IF(node->mpath->dev);
304 del_timer_sync(&node->mpath->timer);
305 atomic_dec(&sdata->u.sta.mpaths);
311 * mesh_path_del - delete a mesh path from the table
313 * @addr: dst address (ETH_ALEN length)
314 * @dev: local interface
316 * Returns: 0 if succesful
318 * State: if the path is being resolved, the deletion will be postponed until
319 * the path resolution completes or times out, unless the force parameter
322 int mesh_path_del(u8 *addr, struct net_device *dev, bool force)
324 struct mesh_path *mpath;
325 struct mpath_node *node;
326 struct hlist_head *bucket;
327 struct hlist_node *n;
331 read_lock(&pathtbl_resize_lock);
332 hash_idx = mesh_table_hash(addr, dev, mesh_paths);
333 bucket = &mesh_paths->hash_buckets[hash_idx];
335 spin_lock(&mesh_paths->hashwlock[hash_idx]);
336 hlist_for_each_entry(node, n, bucket, list) {
338 if (mpath->dev == dev &&
339 memcmp(addr, mpath->dst, ETH_ALEN) == 0) {
340 spin_lock_bh(&mpath->state_lock);
341 if (!force && mpath->flags & MESH_PATH_RESOLVING) {
342 mpath->flags |= MESH_PATH_DELETE;
344 mpath->flags |= MESH_PATH_RESOLVING;
345 hlist_del_rcu(&node->list);
346 call_rcu(&node->rcu, mesh_path_node_reclaim);
347 atomic_dec(&mesh_paths->entries);
349 spin_unlock_bh(&mpath->state_lock);
356 spin_unlock(&mesh_paths->hashwlock[hash_idx]);
357 read_unlock(&pathtbl_resize_lock);
362 * mesh_path_tx_pending - sends pending frames in a mesh path queue
364 * @mpath: mesh path to activate
366 * Locking: the state_lock of the mpath structure must NOT be held when calling
369 void mesh_path_tx_pending(struct mesh_path *mpath)
373 while ((skb = skb_dequeue(&mpath->frame_queue)) &&
374 (mpath->flags & MESH_PATH_ACTIVE))
379 * mesh_path_discard_frame - discard a frame whose path could not be resolved
381 * @skb: frame to discard
382 * @dev: network device the frame was to be sent through
384 * If the frame was beign forwarded from another MP, a PERR frame will be sent
387 * Locking: the function must me called within a rcu_read_lock region
389 void mesh_path_discard_frame(struct sk_buff *skb, struct net_device *dev)
391 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
392 struct mesh_path *mpath;
395 if (skb->pkt_type == PACKET_OTHERHOST) {
396 struct ieee80211s_hdr *prev_meshhdr;
400 prev_meshhdr = ((struct ieee80211s_hdr *)skb->cb);
401 mshhdrlen = ieee80211_get_mesh_hdrlen(prev_meshhdr);
404 mpath = mesh_path_lookup(da, dev);
407 mesh_path_error_tx(skb->data, cpu_to_le32(dsn), ra, dev);
411 sdata->u.sta.mshstats.dropped_frames_no_route++;
415 * mesh_path_flush_pending - free the pending queue of a mesh path
417 * @mpath: mesh path whose queue has to be freed
419 * Locking: the function must me called withing a rcu_read_lock region
421 void mesh_path_flush_pending(struct mesh_path *mpath)
423 struct ieee80211_sub_if_data *sdata;
426 sdata = IEEE80211_DEV_TO_SUB_IF(mpath->dev);
428 while ((skb = skb_dequeue(&mpath->frame_queue)) &&
429 (mpath->flags & MESH_PATH_ACTIVE))
430 mesh_path_discard_frame(skb, mpath->dev);
434 * mesh_path_fix_nexthop - force a specific next hop for a mesh path
436 * @mpath: the mesh path to modify
437 * @next_hop: the next hop to force
439 * Locking: this function must be called holding mpath->state_lock
441 void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop)
443 spin_lock_bh(&mpath->state_lock);
444 mesh_path_assign_nexthop(mpath, next_hop);
447 mpath->hop_count = 0;
449 mpath->flags |= MESH_PATH_FIXED;
450 mesh_path_activate(mpath);
451 spin_unlock_bh(&mpath->state_lock);
452 mesh_path_tx_pending(mpath);
455 static void mesh_path_node_free(struct hlist_node *p, bool free_leafs)
457 struct mesh_path *mpath;
458 struct mpath_node *node = hlist_entry(p, struct mpath_node, list);
467 static void mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl)
469 struct mesh_path *mpath;
470 struct mpath_node *node, *new_node;
473 node = hlist_entry(p, struct mpath_node, list);
475 new_node = kmalloc(sizeof(struct mpath_node), GFP_KERNEL);
476 new_node->mpath = mpath;
477 hash_idx = mesh_table_hash(mpath->dst, mpath->dev, newtbl);
478 hlist_add_head(&new_node->list,
479 &newtbl->hash_buckets[hash_idx]);
482 int mesh_pathtbl_init(void)
484 mesh_paths = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
485 mesh_paths->free_node = &mesh_path_node_free;
486 mesh_paths->copy_node = &mesh_path_node_copy;
487 mesh_paths->mean_chain_len = MEAN_CHAIN_LEN;
493 void mesh_path_expire(struct net_device *dev)
495 struct mesh_path *mpath;
496 struct mpath_node *node;
497 struct hlist_node *p;
500 read_lock(&pathtbl_resize_lock);
501 for_each_mesh_entry(mesh_paths, p, node, i) {
502 if (node->mpath->dev != dev)
505 spin_lock_bh(&mpath->state_lock);
506 if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
507 (!(mpath->flags & MESH_PATH_FIXED)) &&
509 mpath->exp_time + MESH_PATH_EXPIRE)) {
510 spin_unlock_bh(&mpath->state_lock);
511 mesh_path_del(mpath->dst, mpath->dev, false);
513 spin_unlock_bh(&mpath->state_lock);
515 read_unlock(&pathtbl_resize_lock);
518 void mesh_pathtbl_unregister(void)
520 mesh_table_free(mesh_paths, true);