2 * Copyright (c) 2008 open80211s Ltd.
3 * Author: Luis Carlos Cobo <luisca@cozybit.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
10 #include <linux/etherdevice.h>
11 #include <linux/list.h>
12 #include <linux/random.h>
13 #include <linux/spinlock.h>
14 #include <linux/string.h>
15 #include <net/mac80211.h>
16 #include "ieee80211_i.h"
19 /* There will be initially 2^INIT_PATHS_SIZE_ORDER buckets */
20 #define INIT_PATHS_SIZE_ORDER 2
22 /* Keep the mean chain length below this constant */
23 #define MEAN_CHAIN_LEN 2
25 #define MPATH_EXPIRED(mpath) ((mpath->flags & MESH_PATH_ACTIVE) && \
26 time_after(jiffies, mpath->exp_time) && \
27 !(mpath->flags & MESH_PATH_FIXED))
30 struct hlist_node list;
32 /* This indirection allows two different tables to point to the same
33 * mesh_path structure, useful when resizing
35 struct mesh_path *mpath;
38 static struct mesh_table *mesh_paths;
39 static struct mesh_table *mpp_paths; /* Store paths for MPP&MAP */
41 /* This lock will have the grow table function as writer and add / delete nodes
42 * as readers. When reading the table (i.e. doing lookups) we are well protected
45 static DEFINE_RWLOCK(pathtbl_resize_lock);
49 * mesh_path_assign_nexthop - update mesh path next hop
51 * @mpath: mesh path to update
52 * @sta: next hop to assign
54 * Locking: mpath->state_lock must be held when calling this function
56 void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
59 struct ieee80211_hdr *hdr;
60 struct sk_buff_head tmpq;
63 rcu_assign_pointer(mpath->next_hop, sta);
65 __skb_queue_head_init(&tmpq);
67 spin_lock_irqsave(&mpath->frame_queue.lock, flags);
69 while ((skb = __skb_dequeue(&mpath->frame_queue)) != NULL) {
70 hdr = (struct ieee80211_hdr *) skb->data;
71 memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN);
72 __skb_queue_tail(&tmpq, skb);
75 skb_queue_splice(&tmpq, &mpath->frame_queue);
76 spin_unlock_irqrestore(&mpath->frame_queue.lock, flags);
81 * mesh_path_lookup - look up a path in the mesh path table
82 * @dst: hardware address (ETH_ALEN length) of destination
85 * Returns: pointer to the mesh path structure, or NULL if not found
87 * Locking: must be called within a read rcu section.
89 struct mesh_path *mesh_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
91 struct mesh_path *mpath;
93 struct hlist_head *bucket;
94 struct mesh_table *tbl;
95 struct mpath_node *node;
97 tbl = rcu_dereference(mesh_paths);
99 bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)];
100 hlist_for_each_entry_rcu(node, n, bucket, list) {
102 if (mpath->sdata == sdata &&
103 memcmp(dst, mpath->dst, ETH_ALEN) == 0) {
104 if (MPATH_EXPIRED(mpath)) {
105 spin_lock_bh(&mpath->state_lock);
106 if (MPATH_EXPIRED(mpath))
107 mpath->flags &= ~MESH_PATH_ACTIVE;
108 spin_unlock_bh(&mpath->state_lock);
116 struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
118 struct mesh_path *mpath;
119 struct hlist_node *n;
120 struct hlist_head *bucket;
121 struct mesh_table *tbl;
122 struct mpath_node *node;
124 tbl = rcu_dereference(mpp_paths);
126 bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)];
127 hlist_for_each_entry_rcu(node, n, bucket, list) {
129 if (mpath->sdata == sdata &&
130 memcmp(dst, mpath->dst, ETH_ALEN) == 0) {
131 if (MPATH_EXPIRED(mpath)) {
132 spin_lock_bh(&mpath->state_lock);
133 if (MPATH_EXPIRED(mpath))
134 mpath->flags &= ~MESH_PATH_ACTIVE;
135 spin_unlock_bh(&mpath->state_lock);
145 * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index
147 * @sdata: local subif, or NULL for all entries
149 * Returns: pointer to the mesh path structure, or NULL if not found.
151 * Locking: must be called within a read rcu section.
153 struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data *sdata)
155 struct mpath_node *node;
156 struct hlist_node *p;
160 for_each_mesh_entry(mesh_paths, p, node, i) {
161 if (sdata && node->mpath->sdata != sdata)
164 if (MPATH_EXPIRED(node->mpath)) {
165 spin_lock_bh(&node->mpath->state_lock);
166 if (MPATH_EXPIRED(node->mpath))
167 node->mpath->flags &= ~MESH_PATH_ACTIVE;
168 spin_unlock_bh(&node->mpath->state_lock);
178 * mesh_path_add - allocate and add a new path to the mesh path table
179 * @addr: destination address of the path (ETH_ALEN length)
180 * @sdata: local subif
182 * Returns: 0 on sucess
184 * State: the initial state of the new path is set to 0
186 int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
188 struct mesh_path *mpath, *new_mpath;
189 struct mpath_node *node, *new_node;
190 struct hlist_head *bucket;
191 struct hlist_node *n;
198 if (memcmp(dst, sdata->dev->dev_addr, ETH_ALEN) == 0)
199 /* never add ourselves as neighbours */
202 if (is_multicast_ether_addr(dst))
205 if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0)
209 new_mpath = kzalloc(sizeof(struct mesh_path), GFP_KERNEL);
213 new_node = kmalloc(sizeof(struct mpath_node), GFP_KERNEL);
217 read_lock(&pathtbl_resize_lock);
218 memcpy(new_mpath->dst, dst, ETH_ALEN);
219 new_mpath->sdata = sdata;
220 new_mpath->flags = 0;
221 skb_queue_head_init(&new_mpath->frame_queue);
222 new_node->mpath = new_mpath;
223 new_mpath->timer.data = (unsigned long) new_mpath;
224 new_mpath->timer.function = mesh_path_timer;
225 new_mpath->exp_time = jiffies;
226 spin_lock_init(&new_mpath->state_lock);
227 init_timer(&new_mpath->timer);
229 hash_idx = mesh_table_hash(dst, sdata, mesh_paths);
230 bucket = &mesh_paths->hash_buckets[hash_idx];
232 spin_lock(&mesh_paths->hashwlock[hash_idx]);
235 hlist_for_each_entry(node, n, bucket, list) {
237 if (mpath->sdata == sdata && memcmp(dst, mpath->dst, ETH_ALEN) == 0)
241 hlist_add_head_rcu(&new_node->list, bucket);
242 if (atomic_inc_return(&mesh_paths->entries) >=
243 mesh_paths->mean_chain_len * (mesh_paths->hash_mask + 1))
246 spin_unlock(&mesh_paths->hashwlock[hash_idx]);
247 read_unlock(&pathtbl_resize_lock);
249 struct mesh_table *oldtbl, *newtbl;
251 write_lock(&pathtbl_resize_lock);
253 newtbl = mesh_table_grow(mesh_paths);
255 write_unlock(&pathtbl_resize_lock);
258 rcu_assign_pointer(mesh_paths, newtbl);
259 write_unlock(&pathtbl_resize_lock);
262 mesh_table_free(oldtbl, false);
267 spin_unlock(&mesh_paths->hashwlock[hash_idx]);
268 read_unlock(&pathtbl_resize_lock);
273 atomic_dec(&sdata->u.mesh.mpaths);
278 int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
280 struct mesh_path *mpath, *new_mpath;
281 struct mpath_node *node, *new_node;
282 struct hlist_head *bucket;
283 struct hlist_node *n;
290 if (memcmp(dst, sdata->dev->dev_addr, ETH_ALEN) == 0)
291 /* never add ourselves as neighbours */
294 if (is_multicast_ether_addr(dst))
298 new_mpath = kzalloc(sizeof(struct mesh_path), GFP_KERNEL);
302 new_node = kmalloc(sizeof(struct mpath_node), GFP_KERNEL);
306 read_lock(&pathtbl_resize_lock);
307 memcpy(new_mpath->dst, dst, ETH_ALEN);
308 memcpy(new_mpath->mpp, mpp, ETH_ALEN);
309 new_mpath->sdata = sdata;
310 new_mpath->flags = 0;
311 skb_queue_head_init(&new_mpath->frame_queue);
312 new_node->mpath = new_mpath;
313 new_mpath->exp_time = jiffies;
314 spin_lock_init(&new_mpath->state_lock);
316 hash_idx = mesh_table_hash(dst, sdata, mpp_paths);
317 bucket = &mpp_paths->hash_buckets[hash_idx];
319 spin_lock(&mpp_paths->hashwlock[hash_idx]);
322 hlist_for_each_entry(node, n, bucket, list) {
324 if (mpath->sdata == sdata && memcmp(dst, mpath->dst, ETH_ALEN) == 0)
328 hlist_add_head_rcu(&new_node->list, bucket);
329 if (atomic_inc_return(&mpp_paths->entries) >=
330 mpp_paths->mean_chain_len * (mpp_paths->hash_mask + 1))
333 spin_unlock(&mpp_paths->hashwlock[hash_idx]);
334 read_unlock(&pathtbl_resize_lock);
336 struct mesh_table *oldtbl, *newtbl;
338 write_lock(&pathtbl_resize_lock);
340 newtbl = mesh_table_grow(mpp_paths);
342 write_unlock(&pathtbl_resize_lock);
345 rcu_assign_pointer(mpp_paths, newtbl);
346 write_unlock(&pathtbl_resize_lock);
349 mesh_table_free(oldtbl, false);
354 spin_unlock(&mpp_paths->hashwlock[hash_idx]);
355 read_unlock(&pathtbl_resize_lock);
365 * mesh_plink_broken - deactivates paths and sends perr when a link breaks
367 * @sta: broken peer link
369 * This function must be called from the rate control algorithm if enough
370 * delivery errors suggest that a peer link is no longer usable.
372 void mesh_plink_broken(struct sta_info *sta)
374 struct mesh_path *mpath;
375 struct mpath_node *node;
376 struct hlist_node *p;
377 struct ieee80211_sub_if_data *sdata = sta->sdata;
381 for_each_mesh_entry(mesh_paths, p, node, i) {
383 spin_lock_bh(&mpath->state_lock);
384 if (mpath->next_hop == sta &&
385 mpath->flags & MESH_PATH_ACTIVE &&
386 !(mpath->flags & MESH_PATH_FIXED)) {
387 mpath->flags &= ~MESH_PATH_ACTIVE;
389 spin_unlock_bh(&mpath->state_lock);
390 mesh_path_error_tx(mpath->dst,
391 cpu_to_le32(mpath->dsn),
392 sdata->dev->broadcast, sdata);
394 spin_unlock_bh(&mpath->state_lock);
400 * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches
402 * @sta - mesh peer to match
404 * RCU notes: this function is called when a mesh plink transitions from
405 * PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that
406 * allows path creation. This will happen before the sta can be freed (because
407 * sta_info_destroy() calls this) so any reader in a rcu read block will be
408 * protected against the plink disappearing.
410 void mesh_path_flush_by_nexthop(struct sta_info *sta)
412 struct mesh_path *mpath;
413 struct mpath_node *node;
414 struct hlist_node *p;
417 for_each_mesh_entry(mesh_paths, p, node, i) {
419 if (mpath->next_hop == sta)
420 mesh_path_del(mpath->dst, mpath->sdata);
424 void mesh_path_flush(struct ieee80211_sub_if_data *sdata)
426 struct mesh_path *mpath;
427 struct mpath_node *node;
428 struct hlist_node *p;
431 for_each_mesh_entry(mesh_paths, p, node, i) {
433 if (mpath->sdata == sdata)
434 mesh_path_del(mpath->dst, mpath->sdata);
438 static void mesh_path_node_reclaim(struct rcu_head *rp)
440 struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
441 struct ieee80211_sub_if_data *sdata = node->mpath->sdata;
443 del_timer_sync(&node->mpath->timer);
444 atomic_dec(&sdata->u.mesh.mpaths);
450 * mesh_path_del - delete a mesh path from the table
452 * @addr: dst address (ETH_ALEN length)
453 * @sdata: local subif
455 * Returns: 0 if succesful
457 int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata)
459 struct mesh_path *mpath;
460 struct mpath_node *node;
461 struct hlist_head *bucket;
462 struct hlist_node *n;
466 read_lock(&pathtbl_resize_lock);
467 hash_idx = mesh_table_hash(addr, sdata, mesh_paths);
468 bucket = &mesh_paths->hash_buckets[hash_idx];
470 spin_lock(&mesh_paths->hashwlock[hash_idx]);
471 hlist_for_each_entry(node, n, bucket, list) {
473 if (mpath->sdata == sdata &&
474 memcmp(addr, mpath->dst, ETH_ALEN) == 0) {
475 spin_lock_bh(&mpath->state_lock);
476 mpath->flags |= MESH_PATH_RESOLVING;
477 hlist_del_rcu(&node->list);
478 call_rcu(&node->rcu, mesh_path_node_reclaim);
479 atomic_dec(&mesh_paths->entries);
480 spin_unlock_bh(&mpath->state_lock);
487 spin_unlock(&mesh_paths->hashwlock[hash_idx]);
488 read_unlock(&pathtbl_resize_lock);
493 * mesh_path_tx_pending - sends pending frames in a mesh path queue
495 * @mpath: mesh path to activate
497 * Locking: the state_lock of the mpath structure must NOT be held when calling
500 void mesh_path_tx_pending(struct mesh_path *mpath)
504 while ((skb = skb_dequeue(&mpath->frame_queue)) &&
505 (mpath->flags & MESH_PATH_ACTIVE))
510 * mesh_path_discard_frame - discard a frame whose path could not be resolved
512 * @skb: frame to discard
513 * @sdata: network subif the frame was to be sent through
515 * If the frame was being forwarded from another MP, a PERR frame will be sent
516 * to the precursor. The precursor's address (i.e. the previous hop) was saved
517 * in addr1 of the frame-to-be-forwarded, and would only be overwritten once
518 * the destination is successfully resolved.
520 * Locking: the function must me called within a rcu_read_lock region
522 void mesh_path_discard_frame(struct sk_buff *skb,
523 struct ieee80211_sub_if_data *sdata)
525 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
526 struct mesh_path *mpath;
529 if (memcmp(hdr->addr4, sdata->dev->dev_addr, ETH_ALEN) != 0) {
534 mpath = mesh_path_lookup(da, sdata);
537 mesh_path_error_tx(skb->data, cpu_to_le32(dsn), ra, sdata);
541 sdata->u.mesh.mshstats.dropped_frames_no_route++;
545 * mesh_path_flush_pending - free the pending queue of a mesh path
547 * @mpath: mesh path whose queue has to be freed
549 * Locking: the function must me called withing a rcu_read_lock region
551 void mesh_path_flush_pending(struct mesh_path *mpath)
555 while ((skb = skb_dequeue(&mpath->frame_queue)) &&
556 (mpath->flags & MESH_PATH_ACTIVE))
557 mesh_path_discard_frame(skb, mpath->sdata);
561 * mesh_path_fix_nexthop - force a specific next hop for a mesh path
563 * @mpath: the mesh path to modify
564 * @next_hop: the next hop to force
566 * Locking: this function must be called holding mpath->state_lock
568 void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop)
570 spin_lock_bh(&mpath->state_lock);
571 mesh_path_assign_nexthop(mpath, next_hop);
574 mpath->hop_count = 0;
576 mpath->flags |= MESH_PATH_FIXED;
577 mesh_path_activate(mpath);
578 spin_unlock_bh(&mpath->state_lock);
579 mesh_path_tx_pending(mpath);
582 static void mesh_path_node_free(struct hlist_node *p, bool free_leafs)
584 struct mesh_path *mpath;
585 struct mpath_node *node = hlist_entry(p, struct mpath_node, list);
593 static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl)
595 struct mesh_path *mpath;
596 struct mpath_node *node, *new_node;
599 new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
600 if (new_node == NULL)
603 node = hlist_entry(p, struct mpath_node, list);
605 new_node->mpath = mpath;
606 hash_idx = mesh_table_hash(mpath->dst, mpath->sdata, newtbl);
607 hlist_add_head(&new_node->list,
608 &newtbl->hash_buckets[hash_idx]);
612 int mesh_pathtbl_init(void)
614 mesh_paths = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
617 mesh_paths->free_node = &mesh_path_node_free;
618 mesh_paths->copy_node = &mesh_path_node_copy;
619 mesh_paths->mean_chain_len = MEAN_CHAIN_LEN;
621 mpp_paths = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
623 mesh_table_free(mesh_paths, true);
626 mpp_paths->free_node = &mesh_path_node_free;
627 mpp_paths->copy_node = &mesh_path_node_copy;
628 mpp_paths->mean_chain_len = MEAN_CHAIN_LEN;
633 void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
635 struct mesh_path *mpath;
636 struct mpath_node *node;
637 struct hlist_node *p;
640 read_lock(&pathtbl_resize_lock);
641 for_each_mesh_entry(mesh_paths, p, node, i) {
642 if (node->mpath->sdata != sdata)
645 spin_lock_bh(&mpath->state_lock);
646 if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
647 (!(mpath->flags & MESH_PATH_FIXED)) &&
649 mpath->exp_time + MESH_PATH_EXPIRE)) {
650 spin_unlock_bh(&mpath->state_lock);
651 mesh_path_del(mpath->dst, mpath->sdata);
653 spin_unlock_bh(&mpath->state_lock);
655 read_unlock(&pathtbl_resize_lock);
658 void mesh_pathtbl_unregister(void)
660 mesh_table_free(mesh_paths, true);
661 mesh_table_free(mpp_paths, true);