2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version
5 * 2 of the License, or (at your option) any later version.
7 * Robert Olsson <robert.olsson@its.uu.se> Uppsala Universitet
8 * & Swedish University of Agricultural Sciences.
10 * Jens Laas <jens.laas@data.slu.se> Swedish University of
11 * Agricultural Sciences.
13 * Hans Liss <hans.liss@its.uu.se> Uppsala Universitet
15 * This work is based on the LPC-trie which is originally descibed in:
17 * An experimental study of compression methods for dynamic tries
18 * Stefan Nilsson and Matti Tikkanen. Algorithmica, 33(1):19-33, 2002.
19 * http://www.nada.kth.se/~snilsson/public/papers/dyntrie2/
22 * IP-address lookup using LC-tries. Stefan Nilsson and Gunnar Karlsson
23 * IEEE Journal on Selected Areas in Communications, 17(6):1083-1092, June 1999
25 * Version: $Id: fib_trie.c,v 1.3 2005/06/08 14:20:01 robert Exp $
28 * Code from fib_hash has been reused which includes the following header:
31 * INET An implementation of the TCP/IP protocol suite for the LINUX
32 * operating system. INET is implemented using the BSD Socket
33 * interface as the means of communication with the user level.
35 * IPv4 FIB: lookup engine and maintenance routines.
38 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
40 * This program is free software; you can redistribute it and/or
41 * modify it under the terms of the GNU General Public License
42 * as published by the Free Software Foundation; either version
43 * 2 of the License, or (at your option) any later version.
45 * Substantial contributions to this work comes from:
47 * David S. Miller, <davem@davemloft.net>
48 * Stephen Hemminger <shemminger@osdl.org>
49 * Paul E. McKenney <paulmck@us.ibm.com>
50 * Patrick McHardy <kaber@trash.net>
53 #define VERSION "0.408"
55 #include <asm/uaccess.h>
56 #include <asm/system.h>
57 #include <linux/bitops.h>
58 #include <linux/types.h>
59 #include <linux/kernel.h>
61 #include <linux/string.h>
62 #include <linux/socket.h>
63 #include <linux/sockios.h>
64 #include <linux/errno.h>
66 #include <linux/inet.h>
67 #include <linux/inetdevice.h>
68 #include <linux/netdevice.h>
69 #include <linux/if_arp.h>
70 #include <linux/proc_fs.h>
71 #include <linux/rcupdate.h>
72 #include <linux/skbuff.h>
73 #include <linux/netlink.h>
74 #include <linux/init.h>
75 #include <linux/list.h>
76 #include <net/net_namespace.h>
78 #include <net/protocol.h>
79 #include <net/route.h>
82 #include <net/ip_fib.h>
83 #include "fib_lookup.h"
85 #undef CONFIG_IP_FIB_TRIE_STATS
86 #define MAX_STAT_DEPTH 32
88 #define KEYLENGTH (8*sizeof(t_key))
90 typedef unsigned int t_key;
94 #define NODE_TYPE_MASK 0x1UL
95 #define NODE_TYPE(node) ((node)->parent & NODE_TYPE_MASK)
97 #define IS_TNODE(n) (!(n->parent & T_LEAF))
98 #define IS_LEAF(n) (n->parent & T_LEAF)
102 unsigned long parent;
107 unsigned long parent;
108 struct hlist_head list;
113 struct hlist_node hlist;
116 struct list_head falh;
121 unsigned long parent;
122 unsigned short pos:5; /* 2log(KEYLENGTH) bits needed */
123 unsigned short bits:5; /* 2log(KEYLENGTH) bits needed */
124 unsigned short full_children; /* KEYLENGTH bits needed */
125 unsigned short empty_children; /* KEYLENGTH bits needed */
127 struct node *child[0];
130 #ifdef CONFIG_IP_FIB_TRIE_STATS
131 struct trie_use_stats {
133 unsigned int backtrack;
134 unsigned int semantic_match_passed;
135 unsigned int semantic_match_miss;
136 unsigned int null_node_hit;
137 unsigned int resize_node_skipped;
142 unsigned int totdepth;
143 unsigned int maxdepth;
146 unsigned int nullpointers;
147 unsigned int nodesizes[MAX_STAT_DEPTH];
152 #ifdef CONFIG_IP_FIB_TRIE_STATS
153 struct trie_use_stats stats;
158 static void put_child(struct trie *t, struct tnode *tn, int i, struct node *n);
159 static void tnode_put_child_reorg(struct tnode *tn, int i, struct node *n, int wasfull);
160 static struct node *resize(struct trie *t, struct tnode *tn);
161 static struct tnode *inflate(struct trie *t, struct tnode *tn);
162 static struct tnode *halve(struct trie *t, struct tnode *tn);
163 static void tnode_free(struct tnode *tn);
165 static struct kmem_cache *fn_alias_kmem __read_mostly;
167 static inline struct tnode *node_parent(struct node *node)
171 ret = (struct tnode *)(node->parent & ~NODE_TYPE_MASK);
172 return rcu_dereference(ret);
175 static inline void node_set_parent(struct node *node, struct tnode *ptr)
177 rcu_assign_pointer(node->parent,
178 (unsigned long)ptr | NODE_TYPE(node));
181 /* rcu_read_lock needs to be hold by caller from readside */
183 static inline struct node *tnode_get_child(struct tnode *tn, int i)
185 BUG_ON(i >= 1 << tn->bits);
187 return rcu_dereference(tn->child[i]);
190 static inline int tnode_child_length(const struct tnode *tn)
192 return 1 << tn->bits;
195 static inline t_key mask_pfx(t_key k, unsigned short l)
197 return (l == 0) ? 0 : k >> (KEYLENGTH-l) << (KEYLENGTH-l);
200 static inline t_key tkey_extract_bits(t_key a, int offset, int bits)
202 if (offset < KEYLENGTH)
203 return ((t_key)(a << offset)) >> (KEYLENGTH - bits);
208 static inline int tkey_equals(t_key a, t_key b)
213 static inline int tkey_sub_equals(t_key a, int offset, int bits, t_key b)
215 if (bits == 0 || offset >= KEYLENGTH)
217 bits = bits > KEYLENGTH ? KEYLENGTH : bits;
218 return ((a ^ b) << offset) >> (KEYLENGTH - bits) == 0;
221 static inline int tkey_mismatch(t_key a, int offset, t_key b)
228 while ((diff << i) >> (KEYLENGTH-1) == 0)
234 To understand this stuff, an understanding of keys and all their bits is
235 necessary. Every node in the trie has a key associated with it, but not
236 all of the bits in that key are significant.
238 Consider a node 'n' and its parent 'tp'.
240 If n is a leaf, every bit in its key is significant. Its presence is
241 necessitated by path compression, since during a tree traversal (when
242 searching for a leaf - unless we are doing an insertion) we will completely
243 ignore all skipped bits we encounter. Thus we need to verify, at the end of
244 a potentially successful search, that we have indeed been walking the
247 Note that we can never "miss" the correct key in the tree if present by
248 following the wrong path. Path compression ensures that segments of the key
249 that are the same for all keys with a given prefix are skipped, but the
250 skipped part *is* identical for each node in the subtrie below the skipped
251 bit! trie_insert() in this implementation takes care of that - note the
252 call to tkey_sub_equals() in trie_insert().
254 if n is an internal node - a 'tnode' here, the various parts of its key
255 have many different meanings.
258 _________________________________________________________________
259 | i | i | i | i | i | i | i | N | N | N | S | S | S | S | S | C |
260 -----------------------------------------------------------------
261 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
263 _________________________________________________________________
264 | C | C | C | u | u | u | u | u | u | u | u | u | u | u | u | u |
265 -----------------------------------------------------------------
266 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
273 First, let's just ignore the bits that come before the parent tp, that is
274 the bits from 0 to (tp->pos-1). They are *known* but at this point we do
275 not use them for anything.
277 The bits from (tp->pos) to (tp->pos + tp->bits - 1) - "N", above - are the
278 index into the parent's child array. That is, they will be used to find
279 'n' among tp's children.
281 The bits from (tp->pos + tp->bits) to (n->pos - 1) - "S" - are skipped bits
284 All the bits we have seen so far are significant to the node n. The rest
285 of the bits are really not needed or indeed known in n->key.
287 The bits from (n->pos) to (n->pos + n->bits - 1) - "C" - are the index into
288 n's child array, and will of course be different for each child.
291 The rest of the bits, from (n->pos + n->bits) onward, are completely unknown
296 static inline void check_tnode(const struct tnode *tn)
298 WARN_ON(tn && tn->pos+tn->bits > 32);
301 static const int halve_threshold = 25;
302 static const int inflate_threshold = 50;
303 static const int halve_threshold_root = 8;
304 static const int inflate_threshold_root = 15;
307 static void __alias_free_mem(struct rcu_head *head)
309 struct fib_alias *fa = container_of(head, struct fib_alias, rcu);
310 kmem_cache_free(fn_alias_kmem, fa);
313 static inline void alias_free_mem_rcu(struct fib_alias *fa)
315 call_rcu(&fa->rcu, __alias_free_mem);
318 static void __leaf_free_rcu(struct rcu_head *head)
320 kfree(container_of(head, struct leaf, rcu));
323 static void __leaf_info_free_rcu(struct rcu_head *head)
325 kfree(container_of(head, struct leaf_info, rcu));
328 static inline void free_leaf_info(struct leaf_info *leaf)
330 call_rcu(&leaf->rcu, __leaf_info_free_rcu);
333 static struct tnode *tnode_alloc(unsigned int size)
337 if (size <= PAGE_SIZE)
338 return kcalloc(size, 1, GFP_KERNEL);
340 pages = alloc_pages(GFP_KERNEL|__GFP_ZERO, get_order(size));
344 return page_address(pages);
347 static void __tnode_free_rcu(struct rcu_head *head)
349 struct tnode *tn = container_of(head, struct tnode, rcu);
350 unsigned int size = sizeof(struct tnode) +
351 (1 << tn->bits) * sizeof(struct node *);
353 if (size <= PAGE_SIZE)
356 free_pages((unsigned long)tn, get_order(size));
359 static inline void tnode_free(struct tnode *tn)
362 struct leaf *l = (struct leaf *) tn;
363 call_rcu_bh(&l->rcu, __leaf_free_rcu);
365 call_rcu(&tn->rcu, __tnode_free_rcu);
368 static struct leaf *leaf_new(void)
370 struct leaf *l = kmalloc(sizeof(struct leaf), GFP_KERNEL);
373 INIT_HLIST_HEAD(&l->list);
378 static struct leaf_info *leaf_info_new(int plen)
380 struct leaf_info *li = kmalloc(sizeof(struct leaf_info), GFP_KERNEL);
383 INIT_LIST_HEAD(&li->falh);
388 static struct tnode* tnode_new(t_key key, int pos, int bits)
390 int nchildren = 1<<bits;
391 int sz = sizeof(struct tnode) + nchildren * sizeof(struct node *);
392 struct tnode *tn = tnode_alloc(sz);
396 tn->parent = T_TNODE;
400 tn->full_children = 0;
401 tn->empty_children = 1<<bits;
404 pr_debug("AT %p s=%u %u\n", tn, (unsigned int) sizeof(struct tnode),
405 (unsigned int) (sizeof(struct node) * 1<<bits));
410 * Check whether a tnode 'n' is "full", i.e. it is an internal node
411 * and no bits are skipped. See discussion in dyntree paper p. 6
414 static inline int tnode_full(const struct tnode *tn, const struct node *n)
416 if (n == NULL || IS_LEAF(n))
419 return ((struct tnode *) n)->pos == tn->pos + tn->bits;
422 static inline void put_child(struct trie *t, struct tnode *tn, int i, struct node *n)
424 tnode_put_child_reorg(tn, i, n, -1);
428 * Add a child at position i overwriting the old value.
429 * Update the value of full_children and empty_children.
432 static void tnode_put_child_reorg(struct tnode *tn, int i, struct node *n, int wasfull)
434 struct node *chi = tn->child[i];
437 BUG_ON(i >= 1<<tn->bits);
440 /* update emptyChildren */
441 if (n == NULL && chi != NULL)
442 tn->empty_children++;
443 else if (n != NULL && chi == NULL)
444 tn->empty_children--;
446 /* update fullChildren */
448 wasfull = tnode_full(tn, chi);
450 isfull = tnode_full(tn, n);
451 if (wasfull && !isfull)
453 else if (!wasfull && isfull)
457 node_set_parent(n, tn);
459 rcu_assign_pointer(tn->child[i], n);
462 static struct node *resize(struct trie *t, struct tnode *tn)
466 struct tnode *old_tn;
467 int inflate_threshold_use;
468 int halve_threshold_use;
474 pr_debug("In tnode_resize %p inflate_threshold=%d threshold=%d\n",
475 tn, inflate_threshold, halve_threshold);
478 if (tn->empty_children == tnode_child_length(tn)) {
483 if (tn->empty_children == tnode_child_length(tn) - 1)
484 for (i = 0; i < tnode_child_length(tn); i++) {
491 /* compress one level */
492 node_set_parent(n, NULL);
497 * Double as long as the resulting node has a number of
498 * nonempty nodes that are above the threshold.
502 * From "Implementing a dynamic compressed trie" by Stefan Nilsson of
503 * the Helsinki University of Technology and Matti Tikkanen of Nokia
504 * Telecommunications, page 6:
505 * "A node is doubled if the ratio of non-empty children to all
506 * children in the *doubled* node is at least 'high'."
508 * 'high' in this instance is the variable 'inflate_threshold'. It
509 * is expressed as a percentage, so we multiply it with
510 * tnode_child_length() and instead of multiplying by 2 (since the
511 * child array will be doubled by inflate()) and multiplying
512 * the left-hand side by 100 (to handle the percentage thing) we
513 * multiply the left-hand side by 50.
515 * The left-hand side may look a bit weird: tnode_child_length(tn)
516 * - tn->empty_children is of course the number of non-null children
517 * in the current node. tn->full_children is the number of "full"
518 * children, that is non-null tnodes with a skip value of 0.
519 * All of those will be doubled in the resulting inflated tnode, so
520 * we just count them one extra time here.
522 * A clearer way to write this would be:
524 * to_be_doubled = tn->full_children;
525 * not_to_be_doubled = tnode_child_length(tn) - tn->empty_children -
528 * new_child_length = tnode_child_length(tn) * 2;
530 * new_fill_factor = 100 * (not_to_be_doubled + 2*to_be_doubled) /
532 * if (new_fill_factor >= inflate_threshold)
534 * ...and so on, tho it would mess up the while () loop.
537 * 100 * (not_to_be_doubled + 2*to_be_doubled) / new_child_length >=
541 * 100 * (not_to_be_doubled + 2*to_be_doubled) >=
542 * inflate_threshold * new_child_length
544 * expand not_to_be_doubled and to_be_doubled, and shorten:
545 * 100 * (tnode_child_length(tn) - tn->empty_children +
546 * tn->full_children) >= inflate_threshold * new_child_length
548 * expand new_child_length:
549 * 100 * (tnode_child_length(tn) - tn->empty_children +
550 * tn->full_children) >=
551 * inflate_threshold * tnode_child_length(tn) * 2
554 * 50 * (tn->full_children + tnode_child_length(tn) -
555 * tn->empty_children) >= inflate_threshold *
556 * tnode_child_length(tn)
562 /* Keep root node larger */
565 inflate_threshold_use = inflate_threshold_root;
567 inflate_threshold_use = inflate_threshold;
571 while ((tn->full_children > 0 && max_resize-- &&
572 50 * (tn->full_children + tnode_child_length(tn) - tn->empty_children) >=
573 inflate_threshold_use * tnode_child_length(tn))) {
579 #ifdef CONFIG_IP_FIB_TRIE_STATS
580 t->stats.resize_node_skipped++;
586 if (max_resize < 0) {
588 printk(KERN_WARNING "Fix inflate_threshold_root. Now=%d size=%d bits\n",
589 inflate_threshold_root, tn->bits);
591 printk(KERN_WARNING "Fix inflate_threshold. Now=%d size=%d bits\n",
592 inflate_threshold, tn->bits);
598 * Halve as long as the number of empty children in this
599 * node is above threshold.
603 /* Keep root node larger */
606 halve_threshold_use = halve_threshold_root;
608 halve_threshold_use = halve_threshold;
612 while (tn->bits > 1 && max_resize-- &&
613 100 * (tnode_child_length(tn) - tn->empty_children) <
614 halve_threshold_use * tnode_child_length(tn)) {
620 #ifdef CONFIG_IP_FIB_TRIE_STATS
621 t->stats.resize_node_skipped++;
627 if (max_resize < 0) {
629 printk(KERN_WARNING "Fix halve_threshold_root. Now=%d size=%d bits\n",
630 halve_threshold_root, tn->bits);
632 printk(KERN_WARNING "Fix halve_threshold. Now=%d size=%d bits\n",
633 halve_threshold, tn->bits);
636 /* Only one child remains */
637 if (tn->empty_children == tnode_child_length(tn) - 1)
638 for (i = 0; i < tnode_child_length(tn); i++) {
645 /* compress one level */
647 node_set_parent(n, NULL);
652 return (struct node *) tn;
655 static struct tnode *inflate(struct trie *t, struct tnode *tn)
658 struct tnode *oldtnode = tn;
659 int olen = tnode_child_length(tn);
662 pr_debug("In inflate\n");
664 tn = tnode_new(oldtnode->key, oldtnode->pos, oldtnode->bits + 1);
667 return ERR_PTR(-ENOMEM);
670 * Preallocate and store tnodes before the actual work so we
671 * don't get into an inconsistent state if memory allocation
672 * fails. In case of failure we return the oldnode and inflate
673 * of tnode is ignored.
676 for (i = 0; i < olen; i++) {
677 struct tnode *inode = (struct tnode *) tnode_get_child(oldtnode, i);
681 inode->pos == oldtnode->pos + oldtnode->bits &&
683 struct tnode *left, *right;
684 t_key m = ~0U << (KEYLENGTH - 1) >> inode->pos;
686 left = tnode_new(inode->key&(~m), inode->pos + 1,
691 right = tnode_new(inode->key|m, inode->pos + 1,
699 put_child(t, tn, 2*i, (struct node *) left);
700 put_child(t, tn, 2*i+1, (struct node *) right);
704 for (i = 0; i < olen; i++) {
705 struct node *node = tnode_get_child(oldtnode, i);
706 struct tnode *left, *right;
713 /* A leaf or an internal node with skipped bits */
715 if (IS_LEAF(node) || ((struct tnode *) node)->pos >
716 tn->pos + tn->bits - 1) {
717 if (tkey_extract_bits(node->key, oldtnode->pos + oldtnode->bits,
719 put_child(t, tn, 2*i, node);
721 put_child(t, tn, 2*i+1, node);
725 /* An internal node with two children */
726 inode = (struct tnode *) node;
728 if (inode->bits == 1) {
729 put_child(t, tn, 2*i, inode->child[0]);
730 put_child(t, tn, 2*i+1, inode->child[1]);
736 /* An internal node with more than two children */
738 /* We will replace this node 'inode' with two new
739 * ones, 'left' and 'right', each with half of the
740 * original children. The two new nodes will have
741 * a position one bit further down the key and this
742 * means that the "significant" part of their keys
743 * (see the discussion near the top of this file)
744 * will differ by one bit, which will be "0" in
745 * left's key and "1" in right's key. Since we are
746 * moving the key position by one step, the bit that
747 * we are moving away from - the bit at position
748 * (inode->pos) - is the one that will differ between
749 * left and right. So... we synthesize that bit in the
751 * The mask 'm' below will be a single "one" bit at
752 * the position (inode->pos)
755 /* Use the old key, but set the new significant
759 left = (struct tnode *) tnode_get_child(tn, 2*i);
760 put_child(t, tn, 2*i, NULL);
764 right = (struct tnode *) tnode_get_child(tn, 2*i+1);
765 put_child(t, tn, 2*i+1, NULL);
769 size = tnode_child_length(left);
770 for (j = 0; j < size; j++) {
771 put_child(t, left, j, inode->child[j]);
772 put_child(t, right, j, inode->child[j + size]);
774 put_child(t, tn, 2*i, resize(t, left));
775 put_child(t, tn, 2*i+1, resize(t, right));
779 tnode_free(oldtnode);
783 int size = tnode_child_length(tn);
786 for (j = 0; j < size; j++)
788 tnode_free((struct tnode *)tn->child[j]);
792 return ERR_PTR(-ENOMEM);
796 static struct tnode *halve(struct trie *t, struct tnode *tn)
798 struct tnode *oldtnode = tn;
799 struct node *left, *right;
801 int olen = tnode_child_length(tn);
803 pr_debug("In halve\n");
805 tn = tnode_new(oldtnode->key, oldtnode->pos, oldtnode->bits - 1);
808 return ERR_PTR(-ENOMEM);
811 * Preallocate and store tnodes before the actual work so we
812 * don't get into an inconsistent state if memory allocation
813 * fails. In case of failure we return the oldnode and halve
814 * of tnode is ignored.
817 for (i = 0; i < olen; i += 2) {
818 left = tnode_get_child(oldtnode, i);
819 right = tnode_get_child(oldtnode, i+1);
821 /* Two nonempty children */
825 newn = tnode_new(left->key, tn->pos + tn->bits, 1);
830 put_child(t, tn, i/2, (struct node *)newn);
835 for (i = 0; i < olen; i += 2) {
836 struct tnode *newBinNode;
838 left = tnode_get_child(oldtnode, i);
839 right = tnode_get_child(oldtnode, i+1);
841 /* At least one of the children is empty */
843 if (right == NULL) /* Both are empty */
845 put_child(t, tn, i/2, right);
850 put_child(t, tn, i/2, left);
854 /* Two nonempty children */
855 newBinNode = (struct tnode *) tnode_get_child(tn, i/2);
856 put_child(t, tn, i/2, NULL);
857 put_child(t, newBinNode, 0, left);
858 put_child(t, newBinNode, 1, right);
859 put_child(t, tn, i/2, resize(t, newBinNode));
861 tnode_free(oldtnode);
865 int size = tnode_child_length(tn);
868 for (j = 0; j < size; j++)
870 tnode_free((struct tnode *)tn->child[j]);
874 return ERR_PTR(-ENOMEM);
878 /* readside must use rcu_read_lock currently dump routines
879 via get_fa_head and dump */
881 static struct leaf_info *find_leaf_info(struct leaf *l, int plen)
883 struct hlist_head *head = &l->list;
884 struct hlist_node *node;
885 struct leaf_info *li;
887 hlist_for_each_entry_rcu(li, node, head, hlist)
888 if (li->plen == plen)
894 static inline struct list_head * get_fa_head(struct leaf *l, int plen)
896 struct leaf_info *li = find_leaf_info(l, plen);
904 static void insert_leaf_info(struct hlist_head *head, struct leaf_info *new)
906 struct leaf_info *li = NULL, *last = NULL;
907 struct hlist_node *node;
909 if (hlist_empty(head)) {
910 hlist_add_head_rcu(&new->hlist, head);
912 hlist_for_each_entry(li, node, head, hlist) {
913 if (new->plen > li->plen)
919 hlist_add_after_rcu(&last->hlist, &new->hlist);
921 hlist_add_before_rcu(&new->hlist, &li->hlist);
925 /* rcu_read_lock needs to be hold by caller from readside */
928 fib_find_node(struct trie *t, u32 key)
935 n = rcu_dereference(t->trie);
937 while (n != NULL && NODE_TYPE(n) == T_TNODE) {
938 tn = (struct tnode *) n;
942 if (tkey_sub_equals(tn->key, pos, tn->pos-pos, key)) {
943 pos = tn->pos + tn->bits;
944 n = tnode_get_child(tn, tkey_extract_bits(key, tn->pos, tn->bits));
948 /* Case we have found a leaf. Compare prefixes */
950 if (n != NULL && IS_LEAF(n) && tkey_equals(key, n->key))
951 return (struct leaf *)n;
956 static struct node *trie_rebalance(struct trie *t, struct tnode *tn)
959 t_key cindex, key = tn->key;
962 while (tn != NULL && (tp = node_parent((struct node *)tn)) != NULL) {
963 cindex = tkey_extract_bits(key, tp->pos, tp->bits);
964 wasfull = tnode_full(tp, tnode_get_child(tp, cindex));
965 tn = (struct tnode *) resize (t, (struct tnode *)tn);
966 tnode_put_child_reorg((struct tnode *)tp, cindex,(struct node*)tn, wasfull);
968 tp = node_parent((struct node *) tn);
974 /* Handle last (top) tnode */
976 tn = (struct tnode*) resize(t, (struct tnode *)tn);
978 return (struct node*) tn;
981 /* only used from updater-side */
983 static struct list_head *
984 fib_insert_node(struct trie *t, int *err, u32 key, int plen)
987 struct tnode *tp = NULL, *tn = NULL;
991 struct list_head *fa_head = NULL;
992 struct leaf_info *li;
998 /* If we point to NULL, stop. Either the tree is empty and we should
999 * just put a new leaf in if, or we have reached an empty child slot,
1000 * and we should just put our new leaf in that.
1001 * If we point to a T_TNODE, check if it matches our key. Note that
1002 * a T_TNODE might be skipping any number of bits - its 'pos' need
1003 * not be the parent's 'pos'+'bits'!
1005 * If it does match the current key, get pos/bits from it, extract
1006 * the index from our key, push the T_TNODE and walk the tree.
1008 * If it doesn't, we have to replace it with a new T_TNODE.
1010 * If we point to a T_LEAF, it might or might not have the same key
1011 * as we do. If it does, just change the value, update the T_LEAF's
1012 * value, and return it.
1013 * If it doesn't, we need to replace it with a T_TNODE.
1016 while (n != NULL && NODE_TYPE(n) == T_TNODE) {
1017 tn = (struct tnode *) n;
1021 if (tkey_sub_equals(tn->key, pos, tn->pos-pos, key)) {
1023 pos = tn->pos + tn->bits;
1024 n = tnode_get_child(tn, tkey_extract_bits(key, tn->pos, tn->bits));
1026 BUG_ON(n && node_parent(n) != tn);
1032 * n ----> NULL, LEAF or TNODE
1034 * tp is n's (parent) ----> NULL or TNODE
1037 BUG_ON(tp && IS_LEAF(tp));
1039 /* Case 1: n is a leaf. Compare prefixes */
1041 if (n != NULL && IS_LEAF(n) && tkey_equals(key, n->key)) {
1042 struct leaf *l = (struct leaf *) n;
1044 li = leaf_info_new(plen);
1051 fa_head = &li->falh;
1052 insert_leaf_info(&l->list, li);
1064 li = leaf_info_new(plen);
1067 tnode_free((struct tnode *) l);
1072 fa_head = &li->falh;
1073 insert_leaf_info(&l->list, li);
1075 if (t->trie && n == NULL) {
1076 /* Case 2: n is NULL, and will just insert a new leaf */
1078 node_set_parent((struct node *)l, tp);
1080 cindex = tkey_extract_bits(key, tp->pos, tp->bits);
1081 put_child(t, (struct tnode *)tp, cindex, (struct node *)l);
1083 /* Case 3: n is a LEAF or a TNODE and the key doesn't match. */
1085 * Add a new tnode here
1086 * first tnode need some special handling
1090 pos = tp->pos+tp->bits;
1095 newpos = tkey_mismatch(key, pos, n->key);
1096 tn = tnode_new(n->key, newpos, 1);
1099 tn = tnode_new(key, newpos, 1); /* First tnode */
1104 tnode_free((struct tnode *) l);
1109 node_set_parent((struct node *)tn, tp);
1111 missbit = tkey_extract_bits(key, newpos, 1);
1112 put_child(t, tn, missbit, (struct node *)l);
1113 put_child(t, tn, 1-missbit, n);
1116 cindex = tkey_extract_bits(key, tp->pos, tp->bits);
1117 put_child(t, (struct tnode *)tp, cindex, (struct node *)tn);
1119 rcu_assign_pointer(t->trie, (struct node *)tn); /* First tnode */
1124 if (tp && tp->pos + tp->bits > 32)
1125 printk(KERN_WARNING "fib_trie tp=%p pos=%d, bits=%d, key=%0x plen=%d\n",
1126 tp, tp->pos, tp->bits, key, plen);
1128 /* Rebalance the trie */
1130 rcu_assign_pointer(t->trie, trie_rebalance(t, tp));
1136 * Caller must hold RTNL.
1138 static int fn_trie_insert(struct fib_table *tb, struct fib_config *cfg)
1140 struct trie *t = (struct trie *) tb->tb_data;
1141 struct fib_alias *fa, *new_fa;
1142 struct list_head *fa_head = NULL;
1143 struct fib_info *fi;
1144 int plen = cfg->fc_dst_len;
1145 u8 tos = cfg->fc_tos;
1153 key = ntohl(cfg->fc_dst);
1155 pr_debug("Insert table=%u %08x/%d\n", tb->tb_id, key, plen);
1157 mask = ntohl(inet_make_mask(plen));
1164 fi = fib_create_info(cfg);
1170 l = fib_find_node(t, key);
1174 fa_head = get_fa_head(l, plen);
1175 fa = fib_find_alias(fa_head, tos, fi->fib_priority);
1178 /* Now fa, if non-NULL, points to the first fib alias
1179 * with the same keys [prefix,tos,priority], if such key already
1180 * exists or to the node before which we will insert new one.
1182 * If fa is NULL, we will need to allocate a new one and
1183 * insert to the head of f.
1185 * If f is NULL, no fib node matched the destination key
1186 * and we need to allocate a new one of those as well.
1189 if (fa && fa->fa_info->fib_priority == fi->fib_priority) {
1190 struct fib_alias *fa_orig;
1193 if (cfg->fc_nlflags & NLM_F_EXCL)
1196 if (cfg->fc_nlflags & NLM_F_REPLACE) {
1197 struct fib_info *fi_drop;
1200 if (fi->fib_treeref > 1)
1204 new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
1208 fi_drop = fa->fa_info;
1209 new_fa->fa_tos = fa->fa_tos;
1210 new_fa->fa_info = fi;
1211 new_fa->fa_type = cfg->fc_type;
1212 new_fa->fa_scope = cfg->fc_scope;
1213 state = fa->fa_state;
1214 new_fa->fa_state &= ~FA_S_ACCESSED;
1216 list_replace_rcu(&fa->fa_list, &new_fa->fa_list);
1217 alias_free_mem_rcu(fa);
1219 fib_release_info(fi_drop);
1220 if (state & FA_S_ACCESSED)
1222 rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen,
1223 tb->tb_id, &cfg->fc_nlinfo, NLM_F_REPLACE);
1227 /* Error if we find a perfect match which
1228 * uses the same scope, type, and nexthop
1232 list_for_each_entry(fa, fa_orig->fa_list.prev, fa_list) {
1233 if (fa->fa_tos != tos)
1235 if (fa->fa_info->fib_priority != fi->fib_priority)
1237 if (fa->fa_type == cfg->fc_type &&
1238 fa->fa_scope == cfg->fc_scope &&
1239 fa->fa_info == fi) {
1243 if (!(cfg->fc_nlflags & NLM_F_APPEND))
1247 if (!(cfg->fc_nlflags & NLM_F_CREATE))
1251 new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
1255 new_fa->fa_info = fi;
1256 new_fa->fa_tos = tos;
1257 new_fa->fa_type = cfg->fc_type;
1258 new_fa->fa_scope = cfg->fc_scope;
1259 new_fa->fa_state = 0;
1261 * Insert new entry to the list.
1266 fa_head = fib_insert_node(t, &err, key, plen);
1268 goto out_free_new_fa;
1271 list_add_tail_rcu(&new_fa->fa_list,
1272 (fa ? &fa->fa_list : fa_head));
1275 rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, tb->tb_id,
1276 &cfg->fc_nlinfo, 0);
1281 kmem_cache_free(fn_alias_kmem, new_fa);
1283 fib_release_info(fi);
1289 /* should be called with rcu_read_lock */
1290 static inline int check_leaf(struct trie *t, struct leaf *l,
1291 t_key key, int *plen, const struct flowi *flp,
1292 struct fib_result *res)
1296 struct leaf_info *li;
1297 struct hlist_head *hhead = &l->list;
1298 struct hlist_node *node;
1300 hlist_for_each_entry_rcu(li, node, hhead, hlist) {
1302 mask = inet_make_mask(i);
1303 if (l->key != (key & ntohl(mask)))
1306 if ((err = fib_semantic_match(&li->falh, flp, res, htonl(l->key), mask, i)) <= 0) {
1308 #ifdef CONFIG_IP_FIB_TRIE_STATS
1309 t->stats.semantic_match_passed++;
1313 #ifdef CONFIG_IP_FIB_TRIE_STATS
1314 t->stats.semantic_match_miss++;
1321 fn_trie_lookup(struct fib_table *tb, const struct flowi *flp, struct fib_result *res)
1323 struct trie *t = (struct trie *) tb->tb_data;
1328 t_key key = ntohl(flp->fl4_dst);
1331 int current_prefix_length = KEYLENGTH;
1333 t_key node_prefix, key_prefix, pref_mismatch;
1338 n = rcu_dereference(t->trie);
1342 #ifdef CONFIG_IP_FIB_TRIE_STATS
1348 if ((ret = check_leaf(t, (struct leaf *)n, key, &plen, flp, res)) <= 0)
1352 pn = (struct tnode *) n;
1360 cindex = tkey_extract_bits(mask_pfx(key, current_prefix_length),
1363 n = tnode_get_child(pn, cindex);
1366 #ifdef CONFIG_IP_FIB_TRIE_STATS
1367 t->stats.null_node_hit++;
1373 if ((ret = check_leaf(t, (struct leaf *)n, key, &plen, flp, res)) <= 0)
1381 cn = (struct tnode *)n;
1384 * It's a tnode, and we can do some extra checks here if we
1385 * like, to avoid descending into a dead-end branch.
1386 * This tnode is in the parent's child array at index
1387 * key[p_pos..p_pos+p_bits] but potentially with some bits
1388 * chopped off, so in reality the index may be just a
1389 * subprefix, padded with zero at the end.
1390 * We can also take a look at any skipped bits in this
1391 * tnode - everything up to p_pos is supposed to be ok,
1392 * and the non-chopped bits of the index (se previous
1393 * paragraph) are also guaranteed ok, but the rest is
1394 * considered unknown.
1396 * The skipped bits are key[pos+bits..cn->pos].
1399 /* If current_prefix_length < pos+bits, we are already doing
1400 * actual prefix matching, which means everything from
1401 * pos+(bits-chopped_off) onward must be zero along some
1402 * branch of this subtree - otherwise there is *no* valid
1403 * prefix present. Here we can only check the skipped
1404 * bits. Remember, since we have already indexed into the
1405 * parent's child array, we know that the bits we chopped of
1409 /* NOTA BENE: CHECKING ONLY SKIPPED BITS FOR THE NEW NODE HERE */
1411 if (current_prefix_length < pos+bits) {
1412 if (tkey_extract_bits(cn->key, current_prefix_length,
1413 cn->pos - current_prefix_length) != 0 ||
1419 * If chopped_off=0, the index is fully validated and we
1420 * only need to look at the skipped bits for this, the new,
1421 * tnode. What we actually want to do is to find out if
1422 * these skipped bits match our key perfectly, or if we will
1423 * have to count on finding a matching prefix further down,
1424 * because if we do, we would like to have some way of
1425 * verifying the existence of such a prefix at this point.
1428 /* The only thing we can do at this point is to verify that
1429 * any such matching prefix can indeed be a prefix to our
1430 * key, and if the bits in the node we are inspecting that
1431 * do not match our key are not ZERO, this cannot be true.
1432 * Thus, find out where there is a mismatch (before cn->pos)
1433 * and verify that all the mismatching bits are zero in the
1437 /* Note: We aren't very concerned about the piece of the key
1438 * that precede pn->pos+pn->bits, since these have already been
1439 * checked. The bits after cn->pos aren't checked since these are
1440 * by definition "unknown" at this point. Thus, what we want to
1441 * see is if we are about to enter the "prefix matching" state,
1442 * and in that case verify that the skipped bits that will prevail
1443 * throughout this subtree are zero, as they have to be if we are
1444 * to find a matching prefix.
1447 node_prefix = mask_pfx(cn->key, cn->pos);
1448 key_prefix = mask_pfx(key, cn->pos);
1449 pref_mismatch = key_prefix^node_prefix;
1452 /* In short: If skipped bits in this node do not match the search
1453 * key, enter the "prefix matching" state.directly.
1455 if (pref_mismatch) {
1456 while (!(pref_mismatch & (1<<(KEYLENGTH-1)))) {
1458 pref_mismatch = pref_mismatch <<1;
1460 key_prefix = tkey_extract_bits(cn->key, mp, cn->pos-mp);
1462 if (key_prefix != 0)
1465 if (current_prefix_length >= cn->pos)
1466 current_prefix_length = mp;
1469 pn = (struct tnode *)n; /* Descend */
1476 /* As zero don't change the child key (cindex) */
1477 while ((chopped_off <= pn->bits) && !(cindex & (1<<(chopped_off-1))))
1480 /* Decrease current_... with bits chopped off */
1481 if (current_prefix_length > pn->pos + pn->bits - chopped_off)
1482 current_prefix_length = pn->pos + pn->bits - chopped_off;
1485 * Either we do the actual chop off according or if we have
1486 * chopped off all bits in this tnode walk up to our parent.
1489 if (chopped_off <= pn->bits) {
1490 cindex &= ~(1 << (chopped_off-1));
1492 struct tnode *parent = node_parent((struct node *) pn);
1496 /* Get Child's index */
1497 cindex = tkey_extract_bits(pn->key, parent->pos, parent->bits);
1501 #ifdef CONFIG_IP_FIB_TRIE_STATS
1502 t->stats.backtrack++;
1514 /* only called from updater side */
1515 static int trie_leaf_remove(struct trie *t, t_key key)
1518 struct tnode *tp = NULL;
1519 struct node *n = t->trie;
1522 pr_debug("entering trie_leaf_remove(%p)\n", n);
1524 /* Note that in the case skipped bits, those bits are *not* checked!
1525 * When we finish this, we will have NULL or a T_LEAF, and the
1526 * T_LEAF may or may not match our key.
1529 while (n != NULL && IS_TNODE(n)) {
1530 struct tnode *tn = (struct tnode *) n;
1532 n = tnode_get_child(tn ,tkey_extract_bits(key, tn->pos, tn->bits));
1534 BUG_ON(n && node_parent(n) != tn);
1536 l = (struct leaf *) n;
1538 if (!n || !tkey_equals(l->key, key))
1543 * Remove the leaf and rebalance the tree
1548 tp = node_parent(n);
1549 tnode_free((struct tnode *) n);
1552 cindex = tkey_extract_bits(key, tp->pos, tp->bits);
1553 put_child(t, (struct tnode *)tp, cindex, NULL);
1554 rcu_assign_pointer(t->trie, trie_rebalance(t, tp));
1556 rcu_assign_pointer(t->trie, NULL);
1562 * Caller must hold RTNL.
1564 static int fn_trie_delete(struct fib_table *tb, struct fib_config *cfg)
1566 struct trie *t = (struct trie *) tb->tb_data;
1568 int plen = cfg->fc_dst_len;
1569 u8 tos = cfg->fc_tos;
1570 struct fib_alias *fa, *fa_to_delete;
1571 struct list_head *fa_head;
1573 struct leaf_info *li;
1578 key = ntohl(cfg->fc_dst);
1579 mask = ntohl(inet_make_mask(plen));
1585 l = fib_find_node(t, key);
1590 fa_head = get_fa_head(l, plen);
1591 fa = fib_find_alias(fa_head, tos, 0);
1596 pr_debug("Deleting %08x/%d tos=%d t=%p\n", key, plen, tos, t);
1598 fa_to_delete = NULL;
1599 fa_head = fa->fa_list.prev;
1601 list_for_each_entry(fa, fa_head, fa_list) {
1602 struct fib_info *fi = fa->fa_info;
1604 if (fa->fa_tos != tos)
1607 if ((!cfg->fc_type || fa->fa_type == cfg->fc_type) &&
1608 (cfg->fc_scope == RT_SCOPE_NOWHERE ||
1609 fa->fa_scope == cfg->fc_scope) &&
1610 (!cfg->fc_protocol ||
1611 fi->fib_protocol == cfg->fc_protocol) &&
1612 fib_nh_match(cfg, fi) == 0) {
1622 rtmsg_fib(RTM_DELROUTE, htonl(key), fa, plen, tb->tb_id,
1623 &cfg->fc_nlinfo, 0);
1625 l = fib_find_node(t, key);
1626 li = find_leaf_info(l, plen);
1628 list_del_rcu(&fa->fa_list);
1630 if (list_empty(fa_head)) {
1631 hlist_del_rcu(&li->hlist);
1635 if (hlist_empty(&l->list))
1636 trie_leaf_remove(t, key);
1638 if (fa->fa_state & FA_S_ACCESSED)
1641 fib_release_info(fa->fa_info);
1642 alias_free_mem_rcu(fa);
1646 static int trie_flush_list(struct trie *t, struct list_head *head)
1648 struct fib_alias *fa, *fa_node;
1651 list_for_each_entry_safe(fa, fa_node, head, fa_list) {
1652 struct fib_info *fi = fa->fa_info;
1654 if (fi && (fi->fib_flags & RTNH_F_DEAD)) {
1655 list_del_rcu(&fa->fa_list);
1656 fib_release_info(fa->fa_info);
1657 alias_free_mem_rcu(fa);
1664 static int trie_flush_leaf(struct trie *t, struct leaf *l)
1667 struct hlist_head *lih = &l->list;
1668 struct hlist_node *node, *tmp;
1669 struct leaf_info *li = NULL;
1671 hlist_for_each_entry_safe(li, node, tmp, lih, hlist) {
1672 found += trie_flush_list(t, &li->falh);
1674 if (list_empty(&li->falh)) {
1675 hlist_del_rcu(&li->hlist);
1682 /* rcu_read_lock needs to be hold by caller from readside */
1684 static struct leaf *nextleaf(struct trie *t, struct leaf *thisleaf)
1686 struct node *c = (struct node *) thisleaf;
1689 struct node *trie = rcu_dereference(t->trie);
1695 if (IS_LEAF(trie)) /* trie w. just a leaf */
1696 return (struct leaf *) trie;
1698 p = (struct tnode*) trie; /* Start */
1705 /* Find the next child of the parent */
1707 pos = 1 + tkey_extract_bits(c->key, p->pos, p->bits);
1711 last = 1 << p->bits;
1712 for (idx = pos; idx < last ; idx++) {
1713 c = rcu_dereference(p->child[idx]);
1718 /* Decend if tnode */
1719 while (IS_TNODE(c)) {
1720 p = (struct tnode *) c;
1723 /* Rightmost non-NULL branch */
1724 if (p && IS_TNODE(p))
1725 while (!(c = rcu_dereference(p->child[idx]))
1726 && idx < (1<<p->bits)) idx++;
1728 /* Done with this tnode? */
1729 if (idx >= (1 << p->bits) || !c)
1732 return (struct leaf *) c;
1735 /* No more children go up one step */
1736 c = (struct node *) p;
1739 return NULL; /* Ready. Root of trie */
1743 * Caller must hold RTNL.
1745 static int fn_trie_flush(struct fib_table *tb)
1747 struct trie *t = (struct trie *) tb->tb_data;
1748 struct leaf *ll = NULL, *l = NULL;
1751 for (h = 0; (l = nextleaf(t, l)) != NULL; h++) {
1752 found += trie_flush_leaf(t, l);
1754 if (ll && hlist_empty(&ll->list))
1755 trie_leaf_remove(t, ll->key);
1759 if (ll && hlist_empty(&ll->list))
1760 trie_leaf_remove(t, ll->key);
1762 pr_debug("trie_flush found=%d\n", found);
1767 fn_trie_select_default(struct fib_table *tb, const struct flowi *flp, struct fib_result *res)
1769 struct trie *t = (struct trie *) tb->tb_data;
1770 int order, last_idx;
1771 struct fib_info *fi = NULL;
1772 struct fib_info *last_resort;
1773 struct fib_alias *fa = NULL;
1774 struct list_head *fa_head;
1783 l = fib_find_node(t, 0);
1787 fa_head = get_fa_head(l, 0);
1791 if (list_empty(fa_head))
1794 list_for_each_entry_rcu(fa, fa_head, fa_list) {
1795 struct fib_info *next_fi = fa->fa_info;
1797 if (fa->fa_scope != res->scope ||
1798 fa->fa_type != RTN_UNICAST)
1801 if (next_fi->fib_priority > res->fi->fib_priority)
1803 if (!next_fi->fib_nh[0].nh_gw ||
1804 next_fi->fib_nh[0].nh_scope != RT_SCOPE_LINK)
1806 fa->fa_state |= FA_S_ACCESSED;
1809 if (next_fi != res->fi)
1811 } else if (!fib_detect_death(fi, order, &last_resort,
1812 &last_idx, tb->tb_default)) {
1813 fib_result_assign(res, fi);
1814 tb->tb_default = order;
1820 if (order <= 0 || fi == NULL) {
1821 tb->tb_default = -1;
1825 if (!fib_detect_death(fi, order, &last_resort, &last_idx,
1827 fib_result_assign(res, fi);
1828 tb->tb_default = order;
1832 fib_result_assign(res, last_resort);
1833 tb->tb_default = last_idx;
1838 static int fn_trie_dump_fa(t_key key, int plen, struct list_head *fah, struct fib_table *tb,
1839 struct sk_buff *skb, struct netlink_callback *cb)
1842 struct fib_alias *fa;
1844 __be32 xkey = htonl(key);
1849 /* rcu_read_lock is hold by caller */
1851 list_for_each_entry_rcu(fa, fah, fa_list) {
1856 BUG_ON(!fa->fa_info);
1858 if (fib_dump_info(skb, NETLINK_CB(cb->skb).pid,
1867 fa->fa_info, 0) < 0) {
1877 static int fn_trie_dump_plen(struct trie *t, int plen, struct fib_table *tb, struct sk_buff *skb,
1878 struct netlink_callback *cb)
1881 struct list_head *fa_head;
1882 struct leaf *l = NULL;
1886 for (h = 0; (l = nextleaf(t, l)) != NULL; h++) {
1890 memset(&cb->args[4], 0,
1891 sizeof(cb->args) - 4*sizeof(cb->args[0]));
1893 fa_head = get_fa_head(l, plen);
1898 if (list_empty(fa_head))
1901 if (fn_trie_dump_fa(l->key, plen, fa_head, tb, skb, cb)<0) {
1910 static int fn_trie_dump(struct fib_table *tb, struct sk_buff *skb, struct netlink_callback *cb)
1913 struct trie *t = (struct trie *) tb->tb_data;
1918 for (m = 0; m <= 32; m++) {
1922 memset(&cb->args[3], 0,
1923 sizeof(cb->args) - 3*sizeof(cb->args[0]));
1925 if (fn_trie_dump_plen(t, 32-m, tb, skb, cb)<0) {
1938 /* Fix more generic FIB names for init later */
1940 struct fib_table *fib_hash_init(u32 id)
1942 struct fib_table *tb;
1945 if (fn_alias_kmem == NULL)
1946 fn_alias_kmem = kmem_cache_create("ip_fib_alias",
1947 sizeof(struct fib_alias),
1948 0, SLAB_HWCACHE_ALIGN,
1951 tb = kmalloc(sizeof(struct fib_table) + sizeof(struct trie),
1957 tb->tb_default = -1;
1958 tb->tb_lookup = fn_trie_lookup;
1959 tb->tb_insert = fn_trie_insert;
1960 tb->tb_delete = fn_trie_delete;
1961 tb->tb_flush = fn_trie_flush;
1962 tb->tb_select_default = fn_trie_select_default;
1963 tb->tb_dump = fn_trie_dump;
1965 t = (struct trie *) tb->tb_data;
1966 memset(t, 0, sizeof(*t));
1968 if (id == RT_TABLE_LOCAL)
1969 printk(KERN_INFO "IPv4 FIB: Using LC-trie version %s\n", VERSION);
1974 #ifdef CONFIG_PROC_FS
1975 /* Depth first Trie walk iterator */
1976 struct fib_trie_iter {
1977 struct seq_net_private p;
1978 struct trie *trie_local, *trie_main;
1979 struct tnode *tnode;
1985 static struct node *fib_trie_get_next(struct fib_trie_iter *iter)
1987 struct tnode *tn = iter->tnode;
1988 unsigned cindex = iter->index;
1991 /* A single entry routing table */
1995 pr_debug("get_next iter={node=%p index=%d depth=%d}\n",
1996 iter->tnode, iter->index, iter->depth);
1998 while (cindex < (1<<tn->bits)) {
1999 struct node *n = tnode_get_child(tn, cindex);
2004 iter->index = cindex + 1;
2006 /* push down one level */
2007 iter->tnode = (struct tnode *) n;
2017 /* Current node exhausted, pop back up */
2018 p = node_parent((struct node *)tn);
2020 cindex = tkey_extract_bits(tn->key, p->pos, p->bits)+1;
2030 static struct node *fib_trie_get_first(struct fib_trie_iter *iter,
2038 n = rcu_dereference(t->trie);
2045 iter->tnode = (struct tnode *) n;
2060 static void trie_collect_stats(struct trie *t, struct trie_stat *s)
2063 struct fib_trie_iter iter;
2065 memset(s, 0, sizeof(*s));
2068 for (n = fib_trie_get_first(&iter, t); n;
2069 n = fib_trie_get_next(&iter)) {
2072 s->totdepth += iter.depth;
2073 if (iter.depth > s->maxdepth)
2074 s->maxdepth = iter.depth;
2076 const struct tnode *tn = (const struct tnode *) n;
2080 if (tn->bits < MAX_STAT_DEPTH)
2081 s->nodesizes[tn->bits]++;
2083 for (i = 0; i < (1<<tn->bits); i++)
2092 * This outputs /proc/net/fib_triestats
2094 static void trie_show_stats(struct seq_file *seq, struct trie_stat *stat)
2096 unsigned i, max, pointers, bytes, avdepth;
2099 avdepth = stat->totdepth*100 / stat->leaves;
2103 seq_printf(seq, "\tAver depth: %u.%02d\n", avdepth / 100, avdepth % 100 );
2104 seq_printf(seq, "\tMax depth: %u\n", stat->maxdepth);
2106 seq_printf(seq, "\tLeaves: %u\n", stat->leaves);
2108 bytes = sizeof(struct leaf) * stat->leaves;
2109 seq_printf(seq, "\tInternal nodes: %u\n\t", stat->tnodes);
2110 bytes += sizeof(struct tnode) * stat->tnodes;
2112 max = MAX_STAT_DEPTH;
2113 while (max > 0 && stat->nodesizes[max-1] == 0)
2117 for (i = 1; i <= max; i++)
2118 if (stat->nodesizes[i] != 0) {
2119 seq_printf(seq, " %u: %u", i, stat->nodesizes[i]);
2120 pointers += (1<<i) * stat->nodesizes[i];
2122 seq_putc(seq, '\n');
2123 seq_printf(seq, "\tPointers: %u\n", pointers);
2125 bytes += sizeof(struct node *) * pointers;
2126 seq_printf(seq, "Null ptrs: %u\n", stat->nullpointers);
2127 seq_printf(seq, "Total size: %u kB\n", (bytes + 1023) / 1024);
2129 #ifdef CONFIG_IP_FIB_TRIE_STATS
2130 seq_printf(seq, "Counters:\n---------\n");
2131 seq_printf(seq,"gets = %d\n", t->stats.gets);
2132 seq_printf(seq,"backtracks = %d\n", t->stats.backtrack);
2133 seq_printf(seq,"semantic match passed = %d\n", t->stats.semantic_match_passed);
2134 seq_printf(seq,"semantic match miss = %d\n", t->stats.semantic_match_miss);
2135 seq_printf(seq,"null node hit= %d\n", t->stats.null_node_hit);
2136 seq_printf(seq,"skipped node resize = %d\n", t->stats.resize_node_skipped);
2138 memset(&(t->stats), 0, sizeof(t->stats));
2140 #endif /* CONFIG_IP_FIB_TRIE_STATS */
2143 static int fib_triestat_seq_show(struct seq_file *seq, void *v)
2145 struct net *net = (struct net *)seq->private;
2146 struct trie *trie_local, *trie_main;
2147 struct trie_stat *stat;
2148 struct fib_table *tb;
2151 tb = fib_get_table(net, RT_TABLE_LOCAL);
2153 trie_local = (struct trie *) tb->tb_data;
2156 tb = fib_get_table(net, RT_TABLE_MAIN);
2158 trie_main = (struct trie *) tb->tb_data;
2161 stat = kmalloc(sizeof(*stat), GFP_KERNEL);
2165 seq_printf(seq, "Basic info: size of leaf: %Zd bytes, size of tnode: %Zd bytes.\n",
2166 sizeof(struct leaf), sizeof(struct tnode));
2169 seq_printf(seq, "Local:\n");
2170 trie_collect_stats(trie_local, stat);
2171 trie_show_stats(seq, stat);
2175 seq_printf(seq, "Main:\n");
2176 trie_collect_stats(trie_main, stat);
2177 trie_show_stats(seq, stat);
2184 static int fib_triestat_seq_open(struct inode *inode, struct file *file)
2189 net = get_proc_net(inode);
2192 err = single_open(file, fib_triestat_seq_show, net);
2200 static int fib_triestat_seq_release(struct inode *ino, struct file *f)
2202 struct seq_file *seq = f->private_data;
2203 put_net(seq->private);
2204 return single_release(ino, f);
2207 static const struct file_operations fib_triestat_fops = {
2208 .owner = THIS_MODULE,
2209 .open = fib_triestat_seq_open,
2211 .llseek = seq_lseek,
2212 .release = fib_triestat_seq_release,
2215 static struct node *fib_trie_get_idx(struct fib_trie_iter *iter,
2221 for (n = fib_trie_get_first(iter, iter->trie_local);
2222 n; ++idx, n = fib_trie_get_next(iter)) {
2227 for (n = fib_trie_get_first(iter, iter->trie_main);
2228 n; ++idx, n = fib_trie_get_next(iter)) {
2235 static void *fib_trie_seq_start(struct seq_file *seq, loff_t *pos)
2237 struct fib_trie_iter *iter = seq->private;
2238 struct fib_table *tb;
2240 if (!iter->trie_local) {
2241 tb = fib_get_table(iter->p.net, RT_TABLE_LOCAL);
2243 iter->trie_local = (struct trie *) tb->tb_data;
2245 if (!iter->trie_main) {
2246 tb = fib_get_table(iter->p.net, RT_TABLE_MAIN);
2248 iter->trie_main = (struct trie *) tb->tb_data;
2252 return SEQ_START_TOKEN;
2253 return fib_trie_get_idx(iter, *pos - 1);
2256 static void *fib_trie_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2258 struct fib_trie_iter *iter = seq->private;
2262 if (v == SEQ_START_TOKEN)
2263 return fib_trie_get_idx(iter, 0);
2265 v = fib_trie_get_next(iter);
2270 /* continue scan in next trie */
2271 if (iter->trie == iter->trie_local)
2272 return fib_trie_get_first(iter, iter->trie_main);
2277 static void fib_trie_seq_stop(struct seq_file *seq, void *v)
2282 static void seq_indent(struct seq_file *seq, int n)
2284 while (n-- > 0) seq_puts(seq, " ");
2287 static inline const char *rtn_scope(enum rt_scope_t s)
2289 static char buf[32];
2292 case RT_SCOPE_UNIVERSE: return "universe";
2293 case RT_SCOPE_SITE: return "site";
2294 case RT_SCOPE_LINK: return "link";
2295 case RT_SCOPE_HOST: return "host";
2296 case RT_SCOPE_NOWHERE: return "nowhere";
2298 snprintf(buf, sizeof(buf), "scope=%d", s);
2303 static const char *rtn_type_names[__RTN_MAX] = {
2304 [RTN_UNSPEC] = "UNSPEC",
2305 [RTN_UNICAST] = "UNICAST",
2306 [RTN_LOCAL] = "LOCAL",
2307 [RTN_BROADCAST] = "BROADCAST",
2308 [RTN_ANYCAST] = "ANYCAST",
2309 [RTN_MULTICAST] = "MULTICAST",
2310 [RTN_BLACKHOLE] = "BLACKHOLE",
2311 [RTN_UNREACHABLE] = "UNREACHABLE",
2312 [RTN_PROHIBIT] = "PROHIBIT",
2313 [RTN_THROW] = "THROW",
2315 [RTN_XRESOLVE] = "XRESOLVE",
2318 static inline const char *rtn_type(unsigned t)
2320 static char buf[32];
2322 if (t < __RTN_MAX && rtn_type_names[t])
2323 return rtn_type_names[t];
2324 snprintf(buf, sizeof(buf), "type %u", t);
2328 /* Pretty print the trie */
2329 static int fib_trie_seq_show(struct seq_file *seq, void *v)
2331 const struct fib_trie_iter *iter = seq->private;
2334 if (v == SEQ_START_TOKEN)
2337 if (!node_parent(n)) {
2338 if (iter->trie == iter->trie_local)
2339 seq_puts(seq, "<local>:\n");
2341 seq_puts(seq, "<main>:\n");
2345 struct tnode *tn = (struct tnode *) n;
2346 __be32 prf = htonl(mask_pfx(tn->key, tn->pos));
2348 seq_indent(seq, iter->depth-1);
2349 seq_printf(seq, " +-- %d.%d.%d.%d/%d %d %d %d\n",
2350 NIPQUAD(prf), tn->pos, tn->bits, tn->full_children,
2351 tn->empty_children);
2354 struct leaf *l = (struct leaf *) n;
2356 __be32 val = htonl(l->key);
2358 seq_indent(seq, iter->depth);
2359 seq_printf(seq, " |-- %d.%d.%d.%d\n", NIPQUAD(val));
2360 for (i = 32; i >= 0; i--) {
2361 struct leaf_info *li = find_leaf_info(l, i);
2363 struct fib_alias *fa;
2364 list_for_each_entry_rcu(fa, &li->falh, fa_list) {
2365 seq_indent(seq, iter->depth+1);
2366 seq_printf(seq, " /%d %s %s", i,
2367 rtn_scope(fa->fa_scope),
2368 rtn_type(fa->fa_type));
2370 seq_printf(seq, "tos =%d\n",
2372 seq_putc(seq, '\n');
2381 static const struct seq_operations fib_trie_seq_ops = {
2382 .start = fib_trie_seq_start,
2383 .next = fib_trie_seq_next,
2384 .stop = fib_trie_seq_stop,
2385 .show = fib_trie_seq_show,
2388 static int fib_trie_seq_open(struct inode *inode, struct file *file)
2390 return seq_open_net(inode, file, &fib_trie_seq_ops,
2391 sizeof(struct fib_trie_iter));
2394 static const struct file_operations fib_trie_fops = {
2395 .owner = THIS_MODULE,
2396 .open = fib_trie_seq_open,
2398 .llseek = seq_lseek,
2399 .release = seq_release_net,
2402 static unsigned fib_flag_trans(int type, __be32 mask, const struct fib_info *fi)
2404 static unsigned type2flags[RTN_MAX + 1] = {
2405 [7] = RTF_REJECT, [8] = RTF_REJECT,
2407 unsigned flags = type2flags[type];
2409 if (fi && fi->fib_nh->nh_gw)
2410 flags |= RTF_GATEWAY;
2411 if (mask == htonl(0xFFFFFFFF))
2418 * This outputs /proc/net/route.
2419 * The format of the file is not supposed to be changed
2420 * and needs to be same as fib_hash output to avoid breaking
2423 static int fib_route_seq_show(struct seq_file *seq, void *v)
2425 const struct fib_trie_iter *iter = seq->private;
2430 if (v == SEQ_START_TOKEN) {
2431 seq_printf(seq, "%-127s\n", "Iface\tDestination\tGateway "
2432 "\tFlags\tRefCnt\tUse\tMetric\tMask\t\tMTU"
2437 if (iter->trie == iter->trie_local)
2442 for (i=32; i>=0; i--) {
2443 struct leaf_info *li = find_leaf_info(l, i);
2444 struct fib_alias *fa;
2445 __be32 mask, prefix;
2450 mask = inet_make_mask(li->plen);
2451 prefix = htonl(l->key);
2453 list_for_each_entry_rcu(fa, &li->falh, fa_list) {
2454 const struct fib_info *fi = fa->fa_info;
2455 unsigned flags = fib_flag_trans(fa->fa_type, mask, fi);
2457 if (fa->fa_type == RTN_BROADCAST
2458 || fa->fa_type == RTN_MULTICAST)
2462 snprintf(bf, sizeof(bf),
2463 "%s\t%08X\t%08X\t%04X\t%d\t%u\t%d\t%08X\t%d\t%u\t%u",
2464 fi->fib_dev ? fi->fib_dev->name : "*",
2466 fi->fib_nh->nh_gw, flags, 0, 0,
2469 (fi->fib_advmss ? fi->fib_advmss + 40 : 0),
2473 snprintf(bf, sizeof(bf),
2474 "*\t%08X\t%08X\t%04X\t%d\t%u\t%d\t%08X\t%d\t%u\t%u",
2475 prefix, 0, flags, 0, 0, 0,
2478 seq_printf(seq, "%-127s\n", bf);
2485 static const struct seq_operations fib_route_seq_ops = {
2486 .start = fib_trie_seq_start,
2487 .next = fib_trie_seq_next,
2488 .stop = fib_trie_seq_stop,
2489 .show = fib_route_seq_show,
2492 static int fib_route_seq_open(struct inode *inode, struct file *file)
2494 return seq_open_net(inode, file, &fib_route_seq_ops,
2495 sizeof(struct fib_trie_iter));
2498 static const struct file_operations fib_route_fops = {
2499 .owner = THIS_MODULE,
2500 .open = fib_route_seq_open,
2502 .llseek = seq_lseek,
2503 .release = seq_release_net,
2506 int __net_init fib_proc_init(struct net *net)
2508 if (!proc_net_fops_create(net, "fib_trie", S_IRUGO, &fib_trie_fops))
2511 if (!proc_net_fops_create(net, "fib_triestat", S_IRUGO,
2512 &fib_triestat_fops))
2515 if (!proc_net_fops_create(net, "route", S_IRUGO, &fib_route_fops))
2521 proc_net_remove(net, "fib_triestat");
2523 proc_net_remove(net, "fib_trie");
2528 void __net_exit fib_proc_exit(struct net *net)
2530 proc_net_remove(net, "fib_trie");
2531 proc_net_remove(net, "fib_triestat");
2532 proc_net_remove(net, "route");
2535 #endif /* CONFIG_PROC_FS */