blob: 424704aee1866f82428934aaba316bb1cbe639ed [file] [log] [blame]
Robert Olsson19baf832005-06-21 12:43:18 -07001/*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version
5 * 2 of the License, or (at your option) any later version.
6 *
7 * Robert Olsson <robert.olsson@its.uu.se> Uppsala Universitet
8 * & Swedish University of Agricultural Sciences.
9 *
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +090010 * Jens Laas <jens.laas@data.slu.se> Swedish University of
Robert Olsson19baf832005-06-21 12:43:18 -070011 * Agricultural Sciences.
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +090012 *
Robert Olsson19baf832005-06-21 12:43:18 -070013 * Hans Liss <hans.liss@its.uu.se> Uppsala Universitet
14 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -030015 * This work is based on the LPC-trie which is originally described in:
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +090016 *
Robert Olsson19baf832005-06-21 12:43:18 -070017 * An experimental study of compression methods for dynamic tries
18 * Stefan Nilsson and Matti Tikkanen. Algorithmica, 33(1):19-33, 2002.
Justin P. Mattock631dd1a2010-10-18 11:03:14 +020019 * http://www.csc.kth.se/~snilsson/software/dyntrie2/
Robert Olsson19baf832005-06-21 12:43:18 -070020 *
21 *
22 * IP-address lookup using LC-tries. Stefan Nilsson and Gunnar Karlsson
23 * IEEE Journal on Selected Areas in Communications, 17(6):1083-1092, June 1999
24 *
Robert Olsson19baf832005-06-21 12:43:18 -070025 *
26 * Code from fib_hash has been reused which includes the following header:
27 *
28 *
29 * INET An implementation of the TCP/IP protocol suite for the LINUX
30 * operating system. INET is implemented using the BSD Socket
31 * interface as the means of communication with the user level.
32 *
33 * IPv4 FIB: lookup engine and maintenance routines.
34 *
35 *
36 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
37 *
38 * This program is free software; you can redistribute it and/or
39 * modify it under the terms of the GNU General Public License
40 * as published by the Free Software Foundation; either version
41 * 2 of the License, or (at your option) any later version.
Robert Olssonfd966252005-12-22 11:25:10 -080042 *
43 * Substantial contributions to this work comes from:
44 *
45 * David S. Miller, <davem@davemloft.net>
46 * Stephen Hemminger <shemminger@osdl.org>
47 * Paul E. McKenney <paulmck@us.ibm.com>
48 * Patrick McHardy <kaber@trash.net>
Robert Olsson19baf832005-06-21 12:43:18 -070049 */
50
Jens Låås80b71b82009-08-28 23:57:15 -070051#define VERSION "0.409"
Robert Olsson19baf832005-06-21 12:43:18 -070052
Robert Olsson19baf832005-06-21 12:43:18 -070053#include <asm/uaccess.h>
Jiri Slaby1977f032007-10-18 23:40:25 -070054#include <linux/bitops.h>
Robert Olsson19baf832005-06-21 12:43:18 -070055#include <linux/types.h>
56#include <linux/kernel.h>
Robert Olsson19baf832005-06-21 12:43:18 -070057#include <linux/mm.h>
58#include <linux/string.h>
59#include <linux/socket.h>
60#include <linux/sockios.h>
61#include <linux/errno.h>
62#include <linux/in.h>
63#include <linux/inet.h>
Stephen Hemmingercd8787a2006-01-03 14:38:34 -080064#include <linux/inetdevice.h>
Robert Olsson19baf832005-06-21 12:43:18 -070065#include <linux/netdevice.h>
66#include <linux/if_arp.h>
67#include <linux/proc_fs.h>
Robert Olsson2373ce12005-08-25 13:01:29 -070068#include <linux/rcupdate.h>
Robert Olsson19baf832005-06-21 12:43:18 -070069#include <linux/skbuff.h>
70#include <linux/netlink.h>
71#include <linux/init.h>
72#include <linux/list.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090073#include <linux/slab.h>
Paul Gortmakerbc3b2d72011-07-15 11:47:34 -040074#include <linux/export.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020075#include <net/net_namespace.h>
Robert Olsson19baf832005-06-21 12:43:18 -070076#include <net/ip.h>
77#include <net/protocol.h>
78#include <net/route.h>
79#include <net/tcp.h>
80#include <net/sock.h>
81#include <net/ip_fib.h>
82#include "fib_lookup.h"
83
Robert Olsson06ef9212006-03-20 21:35:01 -080084#define MAX_STAT_DEPTH 32
Robert Olsson19baf832005-06-21 12:43:18 -070085
Robert Olsson19baf832005-06-21 12:43:18 -070086#define KEYLENGTH (8*sizeof(t_key))
Robert Olsson19baf832005-06-21 12:43:18 -070087
Robert Olsson19baf832005-06-21 12:43:18 -070088typedef unsigned int t_key;
89
90#define T_TNODE 0
91#define T_LEAF 1
92#define NODE_TYPE_MASK 0x1UL
Robert Olsson2373ce12005-08-25 13:01:29 -070093#define NODE_TYPE(node) ((node)->parent & NODE_TYPE_MASK)
94
Olof Johansson91b9a272005-08-09 20:24:39 -070095#define IS_TNODE(n) (!(n->parent & T_LEAF))
96#define IS_LEAF(n) (n->parent & T_LEAF)
Robert Olsson19baf832005-06-21 12:43:18 -070097
David S. Millerb299e4f2011-02-02 20:48:10 -080098struct rt_trie_node {
Olof Johansson91b9a272005-08-09 20:24:39 -070099 unsigned long parent;
Eric Dumazet8d965442008-01-13 22:31:44 -0800100 t_key key;
Robert Olsson19baf832005-06-21 12:43:18 -0700101};
102
103struct leaf {
Olof Johansson91b9a272005-08-09 20:24:39 -0700104 unsigned long parent;
Eric Dumazet8d965442008-01-13 22:31:44 -0800105 t_key key;
Robert Olsson19baf832005-06-21 12:43:18 -0700106 struct hlist_head list;
Robert Olsson2373ce12005-08-25 13:01:29 -0700107 struct rcu_head rcu;
Robert Olsson19baf832005-06-21 12:43:18 -0700108};
109
110struct leaf_info {
111 struct hlist_node hlist;
112 int plen;
Eric Dumazet5c745012011-07-18 03:16:33 +0000113 u32 mask_plen; /* ntohl(inet_make_mask(plen)) */
Robert Olsson19baf832005-06-21 12:43:18 -0700114 struct list_head falh;
Eric Dumazet5c745012011-07-18 03:16:33 +0000115 struct rcu_head rcu;
Robert Olsson19baf832005-06-21 12:43:18 -0700116};
117
118struct tnode {
Olof Johansson91b9a272005-08-09 20:24:39 -0700119 unsigned long parent;
Eric Dumazet8d965442008-01-13 22:31:44 -0800120 t_key key;
Eric Dumazet112d8cf2008-01-12 21:27:41 -0800121 unsigned char pos; /* 2log(KEYLENGTH) bits needed */
122 unsigned char bits; /* 2log(KEYLENGTH) bits needed */
Eric Dumazet8d965442008-01-13 22:31:44 -0800123 unsigned int full_children; /* KEYLENGTH bits needed */
124 unsigned int empty_children; /* KEYLENGTH bits needed */
Stephen Hemminger15be75c2008-04-10 02:56:38 -0700125 union {
126 struct rcu_head rcu;
127 struct work_struct work;
Jarek Poplawskie0f7cb82009-06-15 02:31:29 -0700128 struct tnode *tnode_free;
Stephen Hemminger15be75c2008-04-10 02:56:38 -0700129 };
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700130 struct rt_trie_node __rcu *child[0];
Robert Olsson19baf832005-06-21 12:43:18 -0700131};
132
133#ifdef CONFIG_IP_FIB_TRIE_STATS
134struct trie_use_stats {
135 unsigned int gets;
136 unsigned int backtrack;
137 unsigned int semantic_match_passed;
138 unsigned int semantic_match_miss;
139 unsigned int null_node_hit;
Robert Olsson2f368952005-07-05 15:02:40 -0700140 unsigned int resize_node_skipped;
Robert Olsson19baf832005-06-21 12:43:18 -0700141};
142#endif
143
144struct trie_stat {
145 unsigned int totdepth;
146 unsigned int maxdepth;
147 unsigned int tnodes;
148 unsigned int leaves;
149 unsigned int nullpointers;
Stephen Hemminger93672292008-01-22 21:54:05 -0800150 unsigned int prefixes;
Robert Olsson06ef9212006-03-20 21:35:01 -0800151 unsigned int nodesizes[MAX_STAT_DEPTH];
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700152};
Robert Olsson19baf832005-06-21 12:43:18 -0700153
154struct trie {
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700155 struct rt_trie_node __rcu *trie;
Robert Olsson19baf832005-06-21 12:43:18 -0700156#ifdef CONFIG_IP_FIB_TRIE_STATS
157 struct trie_use_stats stats;
158#endif
Robert Olsson19baf832005-06-21 12:43:18 -0700159};
160
David S. Millerb299e4f2011-02-02 20:48:10 -0800161static void put_child(struct trie *t, struct tnode *tn, int i, struct rt_trie_node *n);
162static void tnode_put_child_reorg(struct tnode *tn, int i, struct rt_trie_node *n,
Stephen Hemmingera07f5f52008-01-22 21:53:36 -0800163 int wasfull);
David S. Millerb299e4f2011-02-02 20:48:10 -0800164static struct rt_trie_node *resize(struct trie *t, struct tnode *tn);
Robert Olsson2f80b3c2005-08-09 20:25:06 -0700165static struct tnode *inflate(struct trie *t, struct tnode *tn);
166static struct tnode *halve(struct trie *t, struct tnode *tn);
Jarek Poplawskie0f7cb82009-06-15 02:31:29 -0700167/* tnodes to free after resize(); protected by RTNL */
168static struct tnode *tnode_free_head;
Jarek Poplawskic3059472009-07-14 08:33:08 +0000169static size_t tnode_free_size;
170
171/*
172 * synchronize_rcu after call_rcu for that many pages; it should be especially
173 * useful before resizing the root node with PREEMPT_NONE configs; the value was
174 * obtained experimentally, aiming to avoid visible slowdown.
175 */
176static const int sync_pages = 128;
Robert Olsson19baf832005-06-21 12:43:18 -0700177
Christoph Lametere18b8902006-12-06 20:33:20 -0800178static struct kmem_cache *fn_alias_kmem __read_mostly;
Stephen Hemmingerbc3c8c12008-01-22 21:51:50 -0800179static struct kmem_cache *trie_leaf_kmem __read_mostly;
Robert Olsson19baf832005-06-21 12:43:18 -0700180
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700181/*
182 * caller must hold RTNL
183 */
184static inline struct tnode *node_parent(const struct rt_trie_node *node)
Stephen Hemminger06801912007-08-10 15:22:13 -0700185{
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700186 unsigned long parent;
187
188 parent = rcu_dereference_index_check(node->parent, lockdep_rtnl_is_held());
189
190 return (struct tnode *)(parent & ~NODE_TYPE_MASK);
Eric Dumazetb59cfbf2008-01-18 03:31:36 -0800191}
Stephen Hemminger06801912007-08-10 15:22:13 -0700192
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700193/*
194 * caller must hold RCU read lock or RTNL
195 */
196static inline struct tnode *node_parent_rcu(const struct rt_trie_node *node)
Eric Dumazetb59cfbf2008-01-18 03:31:36 -0800197{
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700198 unsigned long parent;
Eric Dumazetb59cfbf2008-01-18 03:31:36 -0800199
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700200 parent = rcu_dereference_index_check(node->parent, rcu_read_lock_held() ||
201 lockdep_rtnl_is_held());
202
203 return (struct tnode *)(parent & ~NODE_TYPE_MASK);
Stephen Hemminger06801912007-08-10 15:22:13 -0700204}
205
Eric Dumazetcf778b02012-01-12 04:41:32 +0000206/* Same as rcu_assign_pointer
Stephen Hemminger6440cc92008-03-22 17:59:58 -0700207 * but that macro() assumes that value is a pointer.
208 */
David S. Millerb299e4f2011-02-02 20:48:10 -0800209static inline void node_set_parent(struct rt_trie_node *node, struct tnode *ptr)
Stephen Hemminger06801912007-08-10 15:22:13 -0700210{
Stephen Hemminger6440cc92008-03-22 17:59:58 -0700211 smp_wmb();
212 node->parent = (unsigned long)ptr | NODE_TYPE(node);
Stephen Hemminger06801912007-08-10 15:22:13 -0700213}
Robert Olsson2373ce12005-08-25 13:01:29 -0700214
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700215/*
216 * caller must hold RTNL
217 */
218static inline struct rt_trie_node *tnode_get_child(const struct tnode *tn, unsigned int i)
Robert Olsson19baf832005-06-21 12:43:18 -0700219{
Eric Dumazetb59cfbf2008-01-18 03:31:36 -0800220 BUG_ON(i >= 1U << tn->bits);
Robert Olsson19baf832005-06-21 12:43:18 -0700221
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700222 return rtnl_dereference(tn->child[i]);
Eric Dumazetb59cfbf2008-01-18 03:31:36 -0800223}
224
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700225/*
226 * caller must hold RCU read lock or RTNL
227 */
228static inline struct rt_trie_node *tnode_get_child_rcu(const struct tnode *tn, unsigned int i)
Eric Dumazetb59cfbf2008-01-18 03:31:36 -0800229{
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700230 BUG_ON(i >= 1U << tn->bits);
Eric Dumazetb59cfbf2008-01-18 03:31:36 -0800231
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700232 return rcu_dereference_rtnl(tn->child[i]);
Robert Olsson19baf832005-06-21 12:43:18 -0700233}
234
Stephen Hemmignerbb435b82005-08-09 20:25:39 -0700235static inline int tnode_child_length(const struct tnode *tn)
Robert Olsson19baf832005-06-21 12:43:18 -0700236{
Olof Johansson91b9a272005-08-09 20:24:39 -0700237 return 1 << tn->bits;
Robert Olsson19baf832005-06-21 12:43:18 -0700238}
239
David S. Miller3b004562011-02-16 14:56:22 -0800240static inline t_key mask_pfx(t_key k, unsigned int l)
Stephen Hemmingerab66b4a2007-08-10 15:22:58 -0700241{
242 return (l == 0) ? 0 : k >> (KEYLENGTH-l) << (KEYLENGTH-l);
243}
244
David S. Miller3b004562011-02-16 14:56:22 -0800245static inline t_key tkey_extract_bits(t_key a, unsigned int offset, unsigned int bits)
Robert Olsson19baf832005-06-21 12:43:18 -0700246{
Olof Johansson91b9a272005-08-09 20:24:39 -0700247 if (offset < KEYLENGTH)
Robert Olsson19baf832005-06-21 12:43:18 -0700248 return ((t_key)(a << offset)) >> (KEYLENGTH - bits);
Olof Johansson91b9a272005-08-09 20:24:39 -0700249 else
Robert Olsson19baf832005-06-21 12:43:18 -0700250 return 0;
251}
252
253static inline int tkey_equals(t_key a, t_key b)
254{
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700255 return a == b;
Robert Olsson19baf832005-06-21 12:43:18 -0700256}
257
258static inline int tkey_sub_equals(t_key a, int offset, int bits, t_key b)
259{
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700260 if (bits == 0 || offset >= KEYLENGTH)
261 return 1;
Olof Johansson91b9a272005-08-09 20:24:39 -0700262 bits = bits > KEYLENGTH ? KEYLENGTH : bits;
263 return ((a ^ b) << offset) >> (KEYLENGTH - bits) == 0;
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700264}
Robert Olsson19baf832005-06-21 12:43:18 -0700265
266static inline int tkey_mismatch(t_key a, int offset, t_key b)
267{
268 t_key diff = a ^ b;
269 int i = offset;
270
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700271 if (!diff)
272 return 0;
273 while ((diff << i) >> (KEYLENGTH-1) == 0)
Robert Olsson19baf832005-06-21 12:43:18 -0700274 i++;
275 return i;
276}
277
Robert Olsson19baf832005-06-21 12:43:18 -0700278/*
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900279 To understand this stuff, an understanding of keys and all their bits is
280 necessary. Every node in the trie has a key associated with it, but not
Robert Olsson19baf832005-06-21 12:43:18 -0700281 all of the bits in that key are significant.
282
283 Consider a node 'n' and its parent 'tp'.
284
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900285 If n is a leaf, every bit in its key is significant. Its presence is
286 necessitated by path compression, since during a tree traversal (when
287 searching for a leaf - unless we are doing an insertion) we will completely
288 ignore all skipped bits we encounter. Thus we need to verify, at the end of
289 a potentially successful search, that we have indeed been walking the
Robert Olsson19baf832005-06-21 12:43:18 -0700290 correct key path.
291
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900292 Note that we can never "miss" the correct key in the tree if present by
293 following the wrong path. Path compression ensures that segments of the key
294 that are the same for all keys with a given prefix are skipped, but the
295 skipped part *is* identical for each node in the subtrie below the skipped
296 bit! trie_insert() in this implementation takes care of that - note the
Robert Olsson19baf832005-06-21 12:43:18 -0700297 call to tkey_sub_equals() in trie_insert().
298
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900299 if n is an internal node - a 'tnode' here, the various parts of its key
Robert Olsson19baf832005-06-21 12:43:18 -0700300 have many different meanings.
301
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900302 Example:
Robert Olsson19baf832005-06-21 12:43:18 -0700303 _________________________________________________________________
304 | i | i | i | i | i | i | i | N | N | N | S | S | S | S | S | C |
305 -----------------------------------------------------------------
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900306 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
Robert Olsson19baf832005-06-21 12:43:18 -0700307
308 _________________________________________________________________
309 | C | C | C | u | u | u | u | u | u | u | u | u | u | u | u | u |
310 -----------------------------------------------------------------
311 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
312
313 tp->pos = 7
314 tp->bits = 3
315 n->pos = 15
Olof Johansson91b9a272005-08-09 20:24:39 -0700316 n->bits = 4
Robert Olsson19baf832005-06-21 12:43:18 -0700317
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900318 First, let's just ignore the bits that come before the parent tp, that is
319 the bits from 0 to (tp->pos-1). They are *known* but at this point we do
Robert Olsson19baf832005-06-21 12:43:18 -0700320 not use them for anything.
321
322 The bits from (tp->pos) to (tp->pos + tp->bits - 1) - "N", above - are the
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900323 index into the parent's child array. That is, they will be used to find
Robert Olsson19baf832005-06-21 12:43:18 -0700324 'n' among tp's children.
325
326 The bits from (tp->pos + tp->bits) to (n->pos - 1) - "S" - are skipped bits
327 for the node n.
328
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900329 All the bits we have seen so far are significant to the node n. The rest
Robert Olsson19baf832005-06-21 12:43:18 -0700330 of the bits are really not needed or indeed known in n->key.
331
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900332 The bits from (n->pos) to (n->pos + n->bits - 1) - "C" - are the index into
Robert Olsson19baf832005-06-21 12:43:18 -0700333 n's child array, and will of course be different for each child.
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900334
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700335
Robert Olsson19baf832005-06-21 12:43:18 -0700336 The rest of the bits, from (n->pos + n->bits) onward, are completely unknown
337 at this point.
338
339*/
340
Stephen Hemminger0c7770c2005-08-23 21:59:41 -0700341static inline void check_tnode(const struct tnode *tn)
Robert Olsson19baf832005-06-21 12:43:18 -0700342{
Stephen Hemminger0c7770c2005-08-23 21:59:41 -0700343 WARN_ON(tn && tn->pos+tn->bits > 32);
Robert Olsson19baf832005-06-21 12:43:18 -0700344}
345
Denis V. Lunevf5026fa2007-12-13 09:47:57 -0800346static const int halve_threshold = 25;
347static const int inflate_threshold = 50;
Jarek Poplawski345aa032009-07-07 19:39:16 -0700348static const int halve_threshold_root = 15;
Jens Låås80b71b82009-08-28 23:57:15 -0700349static const int inflate_threshold_root = 30;
Robert Olsson2373ce12005-08-25 13:01:29 -0700350
351static void __alias_free_mem(struct rcu_head *head)
352{
353 struct fib_alias *fa = container_of(head, struct fib_alias, rcu);
354 kmem_cache_free(fn_alias_kmem, fa);
355}
356
357static inline void alias_free_mem_rcu(struct fib_alias *fa)
358{
359 call_rcu(&fa->rcu, __alias_free_mem);
360}
361
362static void __leaf_free_rcu(struct rcu_head *head)
363{
Stephen Hemmingerbc3c8c12008-01-22 21:51:50 -0800364 struct leaf *l = container_of(head, struct leaf, rcu);
365 kmem_cache_free(trie_leaf_kmem, l);
Robert Olsson2373ce12005-08-25 13:01:29 -0700366}
367
Stephen Hemminger387a5482008-04-10 03:47:34 -0700368static inline void free_leaf(struct leaf *l)
369{
370 call_rcu_bh(&l->rcu, __leaf_free_rcu);
371}
372
Robert Olsson2373ce12005-08-25 13:01:29 -0700373static inline void free_leaf_info(struct leaf_info *leaf)
374{
Lai Jiangshanbceb0f42011-03-18 11:42:34 +0800375 kfree_rcu(leaf, rcu);
Robert Olsson2373ce12005-08-25 13:01:29 -0700376}
377
Eric Dumazet8d965442008-01-13 22:31:44 -0800378static struct tnode *tnode_alloc(size_t size)
Robert Olsson2373ce12005-08-25 13:01:29 -0700379{
Robert Olsson2373ce12005-08-25 13:01:29 -0700380 if (size <= PAGE_SIZE)
Eric Dumazet8d965442008-01-13 22:31:44 -0800381 return kzalloc(size, GFP_KERNEL);
Stephen Hemminger15be75c2008-04-10 02:56:38 -0700382 else
Eric Dumazet7a1c8e52010-11-20 07:46:35 +0000383 return vzalloc(size);
Stephen Hemminger15be75c2008-04-10 02:56:38 -0700384}
Robert Olsson2373ce12005-08-25 13:01:29 -0700385
Stephen Hemminger15be75c2008-04-10 02:56:38 -0700386static void __tnode_vfree(struct work_struct *arg)
387{
388 struct tnode *tn = container_of(arg, struct tnode, work);
389 vfree(tn);
Robert Olsson2373ce12005-08-25 13:01:29 -0700390}
391
392static void __tnode_free_rcu(struct rcu_head *head)
393{
394 struct tnode *tn = container_of(head, struct tnode, rcu);
Eric Dumazet8d965442008-01-13 22:31:44 -0800395 size_t size = sizeof(struct tnode) +
David S. Millerb299e4f2011-02-02 20:48:10 -0800396 (sizeof(struct rt_trie_node *) << tn->bits);
Robert Olsson2373ce12005-08-25 13:01:29 -0700397
398 if (size <= PAGE_SIZE)
399 kfree(tn);
Stephen Hemminger15be75c2008-04-10 02:56:38 -0700400 else {
401 INIT_WORK(&tn->work, __tnode_vfree);
402 schedule_work(&tn->work);
403 }
Robert Olsson2373ce12005-08-25 13:01:29 -0700404}
405
406static inline void tnode_free(struct tnode *tn)
407{
Stephen Hemminger387a5482008-04-10 03:47:34 -0700408 if (IS_LEAF(tn))
409 free_leaf((struct leaf *) tn);
410 else
Robert Olsson550e29b2006-04-04 12:53:35 -0700411 call_rcu(&tn->rcu, __tnode_free_rcu);
Robert Olsson2373ce12005-08-25 13:01:29 -0700412}
413
Jarek Poplawskie0f7cb82009-06-15 02:31:29 -0700414static void tnode_free_safe(struct tnode *tn)
415{
416 BUG_ON(IS_LEAF(tn));
Jarek Poplawski7b855762009-06-18 00:28:51 -0700417 tn->tnode_free = tnode_free_head;
418 tnode_free_head = tn;
Jarek Poplawskic3059472009-07-14 08:33:08 +0000419 tnode_free_size += sizeof(struct tnode) +
David S. Millerb299e4f2011-02-02 20:48:10 -0800420 (sizeof(struct rt_trie_node *) << tn->bits);
Jarek Poplawskie0f7cb82009-06-15 02:31:29 -0700421}
422
423static void tnode_free_flush(void)
424{
425 struct tnode *tn;
426
427 while ((tn = tnode_free_head)) {
428 tnode_free_head = tn->tnode_free;
429 tn->tnode_free = NULL;
430 tnode_free(tn);
431 }
Jarek Poplawskic3059472009-07-14 08:33:08 +0000432
433 if (tnode_free_size >= PAGE_SIZE * sync_pages) {
434 tnode_free_size = 0;
435 synchronize_rcu();
436 }
Jarek Poplawskie0f7cb82009-06-15 02:31:29 -0700437}
438
Robert Olsson19baf832005-06-21 12:43:18 -0700439static struct leaf *leaf_new(void)
440{
Stephen Hemmingerbc3c8c12008-01-22 21:51:50 -0800441 struct leaf *l = kmem_cache_alloc(trie_leaf_kmem, GFP_KERNEL);
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700442 if (l) {
Robert Olsson2373ce12005-08-25 13:01:29 -0700443 l->parent = T_LEAF;
Robert Olsson19baf832005-06-21 12:43:18 -0700444 INIT_HLIST_HEAD(&l->list);
445 }
446 return l;
447}
448
449static struct leaf_info *leaf_info_new(int plen)
450{
451 struct leaf_info *li = kmalloc(sizeof(struct leaf_info), GFP_KERNEL);
Robert Olsson2373ce12005-08-25 13:01:29 -0700452 if (li) {
453 li->plen = plen;
Eric Dumazet5c745012011-07-18 03:16:33 +0000454 li->mask_plen = ntohl(inet_make_mask(plen));
Robert Olsson2373ce12005-08-25 13:01:29 -0700455 INIT_LIST_HEAD(&li->falh);
Patrick McHardyf0e36f82005-07-05 14:44:55 -0700456 }
Robert Olsson2373ce12005-08-25 13:01:29 -0700457 return li;
Patrick McHardyf0e36f82005-07-05 14:44:55 -0700458}
459
Stephen Hemmingera07f5f52008-01-22 21:53:36 -0800460static struct tnode *tnode_new(t_key key, int pos, int bits)
Robert Olsson19baf832005-06-21 12:43:18 -0700461{
David S. Millerb299e4f2011-02-02 20:48:10 -0800462 size_t sz = sizeof(struct tnode) + (sizeof(struct rt_trie_node *) << bits);
Patrick McHardyf0e36f82005-07-05 14:44:55 -0700463 struct tnode *tn = tnode_alloc(sz);
Robert Olsson19baf832005-06-21 12:43:18 -0700464
Olof Johansson91b9a272005-08-09 20:24:39 -0700465 if (tn) {
Robert Olsson2373ce12005-08-25 13:01:29 -0700466 tn->parent = T_TNODE;
Robert Olsson19baf832005-06-21 12:43:18 -0700467 tn->pos = pos;
468 tn->bits = bits;
469 tn->key = key;
470 tn->full_children = 0;
471 tn->empty_children = 1<<bits;
472 }
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700473
Eric Dumazeta034ee32010-09-09 23:32:28 +0000474 pr_debug("AT %p s=%zu %zu\n", tn, sizeof(struct tnode),
David S. Millerb299e4f2011-02-02 20:48:10 -0800475 sizeof(struct rt_trie_node) << bits);
Robert Olsson19baf832005-06-21 12:43:18 -0700476 return tn;
477}
478
Robert Olsson19baf832005-06-21 12:43:18 -0700479/*
480 * Check whether a tnode 'n' is "full", i.e. it is an internal node
481 * and no bits are skipped. See discussion in dyntree paper p. 6
482 */
483
David S. Millerb299e4f2011-02-02 20:48:10 -0800484static inline int tnode_full(const struct tnode *tn, const struct rt_trie_node *n)
Robert Olsson19baf832005-06-21 12:43:18 -0700485{
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700486 if (n == NULL || IS_LEAF(n))
Robert Olsson19baf832005-06-21 12:43:18 -0700487 return 0;
488
489 return ((struct tnode *) n)->pos == tn->pos + tn->bits;
490}
491
Stephen Hemmingera07f5f52008-01-22 21:53:36 -0800492static inline void put_child(struct trie *t, struct tnode *tn, int i,
David S. Millerb299e4f2011-02-02 20:48:10 -0800493 struct rt_trie_node *n)
Robert Olsson19baf832005-06-21 12:43:18 -0700494{
495 tnode_put_child_reorg(tn, i, n, -1);
496}
497
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700498 /*
Robert Olsson19baf832005-06-21 12:43:18 -0700499 * Add a child at position i overwriting the old value.
500 * Update the value of full_children and empty_children.
501 */
502
David S. Millerb299e4f2011-02-02 20:48:10 -0800503static void tnode_put_child_reorg(struct tnode *tn, int i, struct rt_trie_node *n,
Stephen Hemmingera07f5f52008-01-22 21:53:36 -0800504 int wasfull)
Robert Olsson19baf832005-06-21 12:43:18 -0700505{
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700506 struct rt_trie_node *chi = rtnl_dereference(tn->child[i]);
Robert Olsson19baf832005-06-21 12:43:18 -0700507 int isfull;
508
Stephen Hemminger0c7770c2005-08-23 21:59:41 -0700509 BUG_ON(i >= 1<<tn->bits);
510
Robert Olsson19baf832005-06-21 12:43:18 -0700511 /* update emptyChildren */
512 if (n == NULL && chi != NULL)
513 tn->empty_children++;
514 else if (n != NULL && chi == NULL)
515 tn->empty_children--;
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700516
Robert Olsson19baf832005-06-21 12:43:18 -0700517 /* update fullChildren */
Olof Johansson91b9a272005-08-09 20:24:39 -0700518 if (wasfull == -1)
Robert Olsson19baf832005-06-21 12:43:18 -0700519 wasfull = tnode_full(tn, chi);
520
521 isfull = tnode_full(tn, n);
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700522 if (wasfull && !isfull)
Robert Olsson19baf832005-06-21 12:43:18 -0700523 tn->full_children--;
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700524 else if (!wasfull && isfull)
Robert Olsson19baf832005-06-21 12:43:18 -0700525 tn->full_children++;
Olof Johansson91b9a272005-08-09 20:24:39 -0700526
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700527 if (n)
Stephen Hemminger06801912007-08-10 15:22:13 -0700528 node_set_parent(n, tn);
Robert Olsson19baf832005-06-21 12:43:18 -0700529
Eric Dumazetcf778b02012-01-12 04:41:32 +0000530 rcu_assign_pointer(tn->child[i], n);
Robert Olsson19baf832005-06-21 12:43:18 -0700531}
532
Jens Låås80b71b82009-08-28 23:57:15 -0700533#define MAX_WORK 10
David S. Millerb299e4f2011-02-02 20:48:10 -0800534static struct rt_trie_node *resize(struct trie *t, struct tnode *tn)
Robert Olsson19baf832005-06-21 12:43:18 -0700535{
536 int i;
Robert Olsson2f80b3c2005-08-09 20:25:06 -0700537 struct tnode *old_tn;
Robert Olssone6308be2005-10-04 13:01:58 -0700538 int inflate_threshold_use;
539 int halve_threshold_use;
Jens Låås80b71b82009-08-28 23:57:15 -0700540 int max_work;
Robert Olsson19baf832005-06-21 12:43:18 -0700541
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900542 if (!tn)
Robert Olsson19baf832005-06-21 12:43:18 -0700543 return NULL;
544
Stephen Hemminger0c7770c2005-08-23 21:59:41 -0700545 pr_debug("In tnode_resize %p inflate_threshold=%d threshold=%d\n",
546 tn, inflate_threshold, halve_threshold);
Robert Olsson19baf832005-06-21 12:43:18 -0700547
548 /* No children */
549 if (tn->empty_children == tnode_child_length(tn)) {
Jarek Poplawskie0f7cb82009-06-15 02:31:29 -0700550 tnode_free_safe(tn);
Robert Olsson19baf832005-06-21 12:43:18 -0700551 return NULL;
552 }
553 /* One child */
554 if (tn->empty_children == tnode_child_length(tn) - 1)
Jens Låås80b71b82009-08-28 23:57:15 -0700555 goto one_child;
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700556 /*
Robert Olsson19baf832005-06-21 12:43:18 -0700557 * Double as long as the resulting node has a number of
558 * nonempty nodes that are above the threshold.
559 */
560
561 /*
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700562 * From "Implementing a dynamic compressed trie" by Stefan Nilsson of
563 * the Helsinki University of Technology and Matti Tikkanen of Nokia
Robert Olsson19baf832005-06-21 12:43:18 -0700564 * Telecommunications, page 6:
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700565 * "A node is doubled if the ratio of non-empty children to all
Robert Olsson19baf832005-06-21 12:43:18 -0700566 * children in the *doubled* node is at least 'high'."
567 *
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700568 * 'high' in this instance is the variable 'inflate_threshold'. It
569 * is expressed as a percentage, so we multiply it with
570 * tnode_child_length() and instead of multiplying by 2 (since the
571 * child array will be doubled by inflate()) and multiplying
572 * the left-hand side by 100 (to handle the percentage thing) we
Robert Olsson19baf832005-06-21 12:43:18 -0700573 * multiply the left-hand side by 50.
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700574 *
575 * The left-hand side may look a bit weird: tnode_child_length(tn)
576 * - tn->empty_children is of course the number of non-null children
577 * in the current node. tn->full_children is the number of "full"
Robert Olsson19baf832005-06-21 12:43:18 -0700578 * children, that is non-null tnodes with a skip value of 0.
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700579 * All of those will be doubled in the resulting inflated tnode, so
Robert Olsson19baf832005-06-21 12:43:18 -0700580 * we just count them one extra time here.
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700581 *
Robert Olsson19baf832005-06-21 12:43:18 -0700582 * A clearer way to write this would be:
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700583 *
Robert Olsson19baf832005-06-21 12:43:18 -0700584 * to_be_doubled = tn->full_children;
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700585 * not_to_be_doubled = tnode_child_length(tn) - tn->empty_children -
Robert Olsson19baf832005-06-21 12:43:18 -0700586 * tn->full_children;
587 *
588 * new_child_length = tnode_child_length(tn) * 2;
589 *
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700590 * new_fill_factor = 100 * (not_to_be_doubled + 2*to_be_doubled) /
Robert Olsson19baf832005-06-21 12:43:18 -0700591 * new_child_length;
592 * if (new_fill_factor >= inflate_threshold)
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700593 *
594 * ...and so on, tho it would mess up the while () loop.
595 *
Robert Olsson19baf832005-06-21 12:43:18 -0700596 * anyway,
597 * 100 * (not_to_be_doubled + 2*to_be_doubled) / new_child_length >=
598 * inflate_threshold
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700599 *
Robert Olsson19baf832005-06-21 12:43:18 -0700600 * avoid a division:
601 * 100 * (not_to_be_doubled + 2*to_be_doubled) >=
602 * inflate_threshold * new_child_length
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700603 *
Robert Olsson19baf832005-06-21 12:43:18 -0700604 * expand not_to_be_doubled and to_be_doubled, and shorten:
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700605 * 100 * (tnode_child_length(tn) - tn->empty_children +
Olof Johansson91b9a272005-08-09 20:24:39 -0700606 * tn->full_children) >= inflate_threshold * new_child_length
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700607 *
Robert Olsson19baf832005-06-21 12:43:18 -0700608 * expand new_child_length:
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700609 * 100 * (tnode_child_length(tn) - tn->empty_children +
Olof Johansson91b9a272005-08-09 20:24:39 -0700610 * tn->full_children) >=
Robert Olsson19baf832005-06-21 12:43:18 -0700611 * inflate_threshold * tnode_child_length(tn) * 2
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700612 *
Robert Olsson19baf832005-06-21 12:43:18 -0700613 * shorten again:
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700614 * 50 * (tn->full_children + tnode_child_length(tn) -
Olof Johansson91b9a272005-08-09 20:24:39 -0700615 * tn->empty_children) >= inflate_threshold *
Robert Olsson19baf832005-06-21 12:43:18 -0700616 * tnode_child_length(tn)
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700617 *
Robert Olsson19baf832005-06-21 12:43:18 -0700618 */
619
620 check_tnode(tn);
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700621
Robert Olssone6308be2005-10-04 13:01:58 -0700622 /* Keep root node larger */
623
David S. Millerb299e4f2011-02-02 20:48:10 -0800624 if (!node_parent((struct rt_trie_node *)tn)) {
Jens Låås80b71b82009-08-28 23:57:15 -0700625 inflate_threshold_use = inflate_threshold_root;
626 halve_threshold_use = halve_threshold_root;
Eric Dumazeta034ee32010-09-09 23:32:28 +0000627 } else {
Robert Olssone6308be2005-10-04 13:01:58 -0700628 inflate_threshold_use = inflate_threshold;
Jens Låås80b71b82009-08-28 23:57:15 -0700629 halve_threshold_use = halve_threshold;
630 }
Robert Olssone6308be2005-10-04 13:01:58 -0700631
Jens Låås80b71b82009-08-28 23:57:15 -0700632 max_work = MAX_WORK;
633 while ((tn->full_children > 0 && max_work-- &&
Stephen Hemmingera07f5f52008-01-22 21:53:36 -0800634 50 * (tn->full_children + tnode_child_length(tn)
635 - tn->empty_children)
636 >= inflate_threshold_use * tnode_child_length(tn))) {
Robert Olsson19baf832005-06-21 12:43:18 -0700637
Robert Olsson2f80b3c2005-08-09 20:25:06 -0700638 old_tn = tn;
639 tn = inflate(t, tn);
Stephen Hemmingera07f5f52008-01-22 21:53:36 -0800640
Robert Olsson2f80b3c2005-08-09 20:25:06 -0700641 if (IS_ERR(tn)) {
642 tn = old_tn;
Robert Olsson2f368952005-07-05 15:02:40 -0700643#ifdef CONFIG_IP_FIB_TRIE_STATS
644 t->stats.resize_node_skipped++;
645#endif
646 break;
647 }
Robert Olsson19baf832005-06-21 12:43:18 -0700648 }
649
650 check_tnode(tn);
651
Jens Låås80b71b82009-08-28 23:57:15 -0700652 /* Return if at least one inflate is run */
Eric Dumazeta034ee32010-09-09 23:32:28 +0000653 if (max_work != MAX_WORK)
David S. Millerb299e4f2011-02-02 20:48:10 -0800654 return (struct rt_trie_node *) tn;
Jens Låås80b71b82009-08-28 23:57:15 -0700655
Robert Olsson19baf832005-06-21 12:43:18 -0700656 /*
657 * Halve as long as the number of empty children in this
658 * node is above threshold.
659 */
Robert Olsson2f368952005-07-05 15:02:40 -0700660
Jens Låås80b71b82009-08-28 23:57:15 -0700661 max_work = MAX_WORK;
662 while (tn->bits > 1 && max_work-- &&
Robert Olsson19baf832005-06-21 12:43:18 -0700663 100 * (tnode_child_length(tn) - tn->empty_children) <
Robert Olssone6308be2005-10-04 13:01:58 -0700664 halve_threshold_use * tnode_child_length(tn)) {
Robert Olsson19baf832005-06-21 12:43:18 -0700665
Robert Olsson2f80b3c2005-08-09 20:25:06 -0700666 old_tn = tn;
667 tn = halve(t, tn);
668 if (IS_ERR(tn)) {
669 tn = old_tn;
Robert Olsson2f368952005-07-05 15:02:40 -0700670#ifdef CONFIG_IP_FIB_TRIE_STATS
671 t->stats.resize_node_skipped++;
672#endif
673 break;
674 }
675 }
676
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700677
Robert Olsson19baf832005-06-21 12:43:18 -0700678 /* Only one child remains */
Jens Låås80b71b82009-08-28 23:57:15 -0700679 if (tn->empty_children == tnode_child_length(tn) - 1) {
680one_child:
Robert Olsson19baf832005-06-21 12:43:18 -0700681 for (i = 0; i < tnode_child_length(tn); i++) {
David S. Millerb299e4f2011-02-02 20:48:10 -0800682 struct rt_trie_node *n;
Olof Johansson91b9a272005-08-09 20:24:39 -0700683
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700684 n = rtnl_dereference(tn->child[i]);
Robert Olsson2373ce12005-08-25 13:01:29 -0700685 if (!n)
Olof Johansson91b9a272005-08-09 20:24:39 -0700686 continue;
Olof Johansson91b9a272005-08-09 20:24:39 -0700687
688 /* compress one level */
689
Stephen Hemminger06801912007-08-10 15:22:13 -0700690 node_set_parent(n, NULL);
Jarek Poplawskie0f7cb82009-06-15 02:31:29 -0700691 tnode_free_safe(tn);
Olof Johansson91b9a272005-08-09 20:24:39 -0700692 return n;
Robert Olsson19baf832005-06-21 12:43:18 -0700693 }
Jens Låås80b71b82009-08-28 23:57:15 -0700694 }
David S. Millerb299e4f2011-02-02 20:48:10 -0800695 return (struct rt_trie_node *) tn;
Robert Olsson19baf832005-06-21 12:43:18 -0700696}
697
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700698
699static void tnode_clean_free(struct tnode *tn)
700{
701 int i;
702 struct tnode *tofree;
703
704 for (i = 0; i < tnode_child_length(tn); i++) {
705 tofree = (struct tnode *)rtnl_dereference(tn->child[i]);
706 if (tofree)
707 tnode_free(tofree);
708 }
709 tnode_free(tn);
710}
711
Robert Olsson2f80b3c2005-08-09 20:25:06 -0700712static struct tnode *inflate(struct trie *t, struct tnode *tn)
Robert Olsson19baf832005-06-21 12:43:18 -0700713{
Robert Olsson19baf832005-06-21 12:43:18 -0700714 struct tnode *oldtnode = tn;
715 int olen = tnode_child_length(tn);
716 int i;
717
Stephen Hemminger0c7770c2005-08-23 21:59:41 -0700718 pr_debug("In inflate\n");
Robert Olsson19baf832005-06-21 12:43:18 -0700719
720 tn = tnode_new(oldtnode->key, oldtnode->pos, oldtnode->bits + 1);
721
Stephen Hemminger0c7770c2005-08-23 21:59:41 -0700722 if (!tn)
Robert Olsson2f80b3c2005-08-09 20:25:06 -0700723 return ERR_PTR(-ENOMEM);
Robert Olsson2f368952005-07-05 15:02:40 -0700724
725 /*
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700726 * Preallocate and store tnodes before the actual work so we
727 * don't get into an inconsistent state if memory allocation
728 * fails. In case of failure we return the oldnode and inflate
Robert Olsson2f368952005-07-05 15:02:40 -0700729 * of tnode is ignored.
730 */
Olof Johansson91b9a272005-08-09 20:24:39 -0700731
732 for (i = 0; i < olen; i++) {
Stephen Hemmingera07f5f52008-01-22 21:53:36 -0800733 struct tnode *inode;
Robert Olsson2f368952005-07-05 15:02:40 -0700734
Stephen Hemmingera07f5f52008-01-22 21:53:36 -0800735 inode = (struct tnode *) tnode_get_child(oldtnode, i);
Robert Olsson2f368952005-07-05 15:02:40 -0700736 if (inode &&
737 IS_TNODE(inode) &&
738 inode->pos == oldtnode->pos + oldtnode->bits &&
739 inode->bits > 1) {
740 struct tnode *left, *right;
Stephen Hemmingerab66b4a2007-08-10 15:22:58 -0700741 t_key m = ~0U << (KEYLENGTH - 1) >> inode->pos;
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700742
Robert Olsson2f368952005-07-05 15:02:40 -0700743 left = tnode_new(inode->key&(~m), inode->pos + 1,
744 inode->bits - 1);
Robert Olsson2f80b3c2005-08-09 20:25:06 -0700745 if (!left)
746 goto nomem;
Olof Johansson91b9a272005-08-09 20:24:39 -0700747
Robert Olsson2f368952005-07-05 15:02:40 -0700748 right = tnode_new(inode->key|m, inode->pos + 1,
749 inode->bits - 1);
750
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900751 if (!right) {
Robert Olsson2f80b3c2005-08-09 20:25:06 -0700752 tnode_free(left);
753 goto nomem;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900754 }
Robert Olsson2f368952005-07-05 15:02:40 -0700755
David S. Millerb299e4f2011-02-02 20:48:10 -0800756 put_child(t, tn, 2*i, (struct rt_trie_node *) left);
757 put_child(t, tn, 2*i+1, (struct rt_trie_node *) right);
Robert Olsson2f368952005-07-05 15:02:40 -0700758 }
759 }
760
Olof Johansson91b9a272005-08-09 20:24:39 -0700761 for (i = 0; i < olen; i++) {
Stephen Hemmingerc95aaf92008-01-12 21:25:02 -0800762 struct tnode *inode;
David S. Millerb299e4f2011-02-02 20:48:10 -0800763 struct rt_trie_node *node = tnode_get_child(oldtnode, i);
Olof Johansson91b9a272005-08-09 20:24:39 -0700764 struct tnode *left, *right;
765 int size, j;
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700766
Robert Olsson19baf832005-06-21 12:43:18 -0700767 /* An empty child */
768 if (node == NULL)
769 continue;
770
771 /* A leaf or an internal node with skipped bits */
772
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700773 if (IS_LEAF(node) || ((struct tnode *) node)->pos >
Robert Olsson19baf832005-06-21 12:43:18 -0700774 tn->pos + tn->bits - 1) {
Stephen Hemmingera07f5f52008-01-22 21:53:36 -0800775 if (tkey_extract_bits(node->key,
776 oldtnode->pos + oldtnode->bits,
777 1) == 0)
Robert Olsson19baf832005-06-21 12:43:18 -0700778 put_child(t, tn, 2*i, node);
779 else
780 put_child(t, tn, 2*i+1, node);
781 continue;
782 }
783
784 /* An internal node with two children */
785 inode = (struct tnode *) node;
786
787 if (inode->bits == 1) {
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700788 put_child(t, tn, 2*i, rtnl_dereference(inode->child[0]));
789 put_child(t, tn, 2*i+1, rtnl_dereference(inode->child[1]));
Robert Olsson19baf832005-06-21 12:43:18 -0700790
Jarek Poplawskie0f7cb82009-06-15 02:31:29 -0700791 tnode_free_safe(inode);
Olof Johansson91b9a272005-08-09 20:24:39 -0700792 continue;
Robert Olsson19baf832005-06-21 12:43:18 -0700793 }
794
Olof Johansson91b9a272005-08-09 20:24:39 -0700795 /* An internal node with more than two children */
Robert Olsson19baf832005-06-21 12:43:18 -0700796
Olof Johansson91b9a272005-08-09 20:24:39 -0700797 /* We will replace this node 'inode' with two new
798 * ones, 'left' and 'right', each with half of the
799 * original children. The two new nodes will have
800 * a position one bit further down the key and this
801 * means that the "significant" part of their keys
802 * (see the discussion near the top of this file)
803 * will differ by one bit, which will be "0" in
804 * left's key and "1" in right's key. Since we are
805 * moving the key position by one step, the bit that
806 * we are moving away from - the bit at position
807 * (inode->pos) - is the one that will differ between
808 * left and right. So... we synthesize that bit in the
809 * two new keys.
810 * The mask 'm' below will be a single "one" bit at
811 * the position (inode->pos)
812 */
Robert Olsson19baf832005-06-21 12:43:18 -0700813
Olof Johansson91b9a272005-08-09 20:24:39 -0700814 /* Use the old key, but set the new significant
815 * bit to zero.
816 */
Robert Olsson19baf832005-06-21 12:43:18 -0700817
Olof Johansson91b9a272005-08-09 20:24:39 -0700818 left = (struct tnode *) tnode_get_child(tn, 2*i);
819 put_child(t, tn, 2*i, NULL);
Robert Olsson19baf832005-06-21 12:43:18 -0700820
Olof Johansson91b9a272005-08-09 20:24:39 -0700821 BUG_ON(!left);
Robert Olsson2f368952005-07-05 15:02:40 -0700822
Olof Johansson91b9a272005-08-09 20:24:39 -0700823 right = (struct tnode *) tnode_get_child(tn, 2*i+1);
824 put_child(t, tn, 2*i+1, NULL);
Robert Olsson2f368952005-07-05 15:02:40 -0700825
Olof Johansson91b9a272005-08-09 20:24:39 -0700826 BUG_ON(!right);
Robert Olsson2f368952005-07-05 15:02:40 -0700827
Olof Johansson91b9a272005-08-09 20:24:39 -0700828 size = tnode_child_length(left);
829 for (j = 0; j < size; j++) {
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700830 put_child(t, left, j, rtnl_dereference(inode->child[j]));
831 put_child(t, right, j, rtnl_dereference(inode->child[j + size]));
Robert Olsson19baf832005-06-21 12:43:18 -0700832 }
Olof Johansson91b9a272005-08-09 20:24:39 -0700833 put_child(t, tn, 2*i, resize(t, left));
834 put_child(t, tn, 2*i+1, resize(t, right));
835
Jarek Poplawskie0f7cb82009-06-15 02:31:29 -0700836 tnode_free_safe(inode);
Robert Olsson19baf832005-06-21 12:43:18 -0700837 }
Jarek Poplawskie0f7cb82009-06-15 02:31:29 -0700838 tnode_free_safe(oldtnode);
Robert Olsson19baf832005-06-21 12:43:18 -0700839 return tn;
Robert Olsson2f80b3c2005-08-09 20:25:06 -0700840nomem:
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700841 tnode_clean_free(tn);
842 return ERR_PTR(-ENOMEM);
Robert Olsson19baf832005-06-21 12:43:18 -0700843}
844
Robert Olsson2f80b3c2005-08-09 20:25:06 -0700845static struct tnode *halve(struct trie *t, struct tnode *tn)
Robert Olsson19baf832005-06-21 12:43:18 -0700846{
847 struct tnode *oldtnode = tn;
David S. Millerb299e4f2011-02-02 20:48:10 -0800848 struct rt_trie_node *left, *right;
Robert Olsson19baf832005-06-21 12:43:18 -0700849 int i;
850 int olen = tnode_child_length(tn);
851
Stephen Hemminger0c7770c2005-08-23 21:59:41 -0700852 pr_debug("In halve\n");
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700853
854 tn = tnode_new(oldtnode->key, oldtnode->pos, oldtnode->bits - 1);
Robert Olsson19baf832005-06-21 12:43:18 -0700855
Robert Olsson2f80b3c2005-08-09 20:25:06 -0700856 if (!tn)
857 return ERR_PTR(-ENOMEM);
Robert Olsson2f368952005-07-05 15:02:40 -0700858
859 /*
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700860 * Preallocate and store tnodes before the actual work so we
861 * don't get into an inconsistent state if memory allocation
862 * fails. In case of failure we return the oldnode and halve
Robert Olsson2f368952005-07-05 15:02:40 -0700863 * of tnode is ignored.
864 */
865
Olof Johansson91b9a272005-08-09 20:24:39 -0700866 for (i = 0; i < olen; i += 2) {
Robert Olsson2f368952005-07-05 15:02:40 -0700867 left = tnode_get_child(oldtnode, i);
868 right = tnode_get_child(oldtnode, i+1);
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700869
Robert Olsson2f368952005-07-05 15:02:40 -0700870 /* Two nonempty children */
Stephen Hemminger0c7770c2005-08-23 21:59:41 -0700871 if (left && right) {
Robert Olsson2f80b3c2005-08-09 20:25:06 -0700872 struct tnode *newn;
Stephen Hemminger0c7770c2005-08-23 21:59:41 -0700873
Robert Olsson2f80b3c2005-08-09 20:25:06 -0700874 newn = tnode_new(left->key, tn->pos + tn->bits, 1);
Stephen Hemminger0c7770c2005-08-23 21:59:41 -0700875
876 if (!newn)
Robert Olsson2f80b3c2005-08-09 20:25:06 -0700877 goto nomem;
Stephen Hemminger0c7770c2005-08-23 21:59:41 -0700878
David S. Millerb299e4f2011-02-02 20:48:10 -0800879 put_child(t, tn, i/2, (struct rt_trie_node *)newn);
Robert Olsson2f368952005-07-05 15:02:40 -0700880 }
Robert Olsson2f368952005-07-05 15:02:40 -0700881
Robert Olsson2f368952005-07-05 15:02:40 -0700882 }
Robert Olsson19baf832005-06-21 12:43:18 -0700883
Olof Johansson91b9a272005-08-09 20:24:39 -0700884 for (i = 0; i < olen; i += 2) {
885 struct tnode *newBinNode;
886
Robert Olsson19baf832005-06-21 12:43:18 -0700887 left = tnode_get_child(oldtnode, i);
888 right = tnode_get_child(oldtnode, i+1);
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700889
Robert Olsson19baf832005-06-21 12:43:18 -0700890 /* At least one of the children is empty */
891 if (left == NULL) {
892 if (right == NULL) /* Both are empty */
893 continue;
894 put_child(t, tn, i/2, right);
Olof Johansson91b9a272005-08-09 20:24:39 -0700895 continue;
Stephen Hemminger0c7770c2005-08-23 21:59:41 -0700896 }
Olof Johansson91b9a272005-08-09 20:24:39 -0700897
898 if (right == NULL) {
Robert Olsson19baf832005-06-21 12:43:18 -0700899 put_child(t, tn, i/2, left);
Olof Johansson91b9a272005-08-09 20:24:39 -0700900 continue;
901 }
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700902
Robert Olsson19baf832005-06-21 12:43:18 -0700903 /* Two nonempty children */
Olof Johansson91b9a272005-08-09 20:24:39 -0700904 newBinNode = (struct tnode *) tnode_get_child(tn, i/2);
905 put_child(t, tn, i/2, NULL);
Olof Johansson91b9a272005-08-09 20:24:39 -0700906 put_child(t, newBinNode, 0, left);
907 put_child(t, newBinNode, 1, right);
908 put_child(t, tn, i/2, resize(t, newBinNode));
Robert Olsson19baf832005-06-21 12:43:18 -0700909 }
Jarek Poplawskie0f7cb82009-06-15 02:31:29 -0700910 tnode_free_safe(oldtnode);
Robert Olsson19baf832005-06-21 12:43:18 -0700911 return tn;
Robert Olsson2f80b3c2005-08-09 20:25:06 -0700912nomem:
Eric Dumazet0a5c0472011-03-31 01:51:35 -0700913 tnode_clean_free(tn);
914 return ERR_PTR(-ENOMEM);
Robert Olsson19baf832005-06-21 12:43:18 -0700915}
916
Robert Olsson772cb712005-09-19 15:31:18 -0700917/* readside must use rcu_read_lock currently dump routines
Robert Olsson2373ce12005-08-25 13:01:29 -0700918 via get_fa_head and dump */
919
Robert Olsson772cb712005-09-19 15:31:18 -0700920static struct leaf_info *find_leaf_info(struct leaf *l, int plen)
Robert Olsson19baf832005-06-21 12:43:18 -0700921{
Robert Olsson772cb712005-09-19 15:31:18 -0700922 struct hlist_head *head = &l->list;
Robert Olsson19baf832005-06-21 12:43:18 -0700923 struct hlist_node *node;
924 struct leaf_info *li;
925
Robert Olsson2373ce12005-08-25 13:01:29 -0700926 hlist_for_each_entry_rcu(li, node, head, hlist)
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700927 if (li->plen == plen)
Robert Olsson19baf832005-06-21 12:43:18 -0700928 return li;
Olof Johansson91b9a272005-08-09 20:24:39 -0700929
Robert Olsson19baf832005-06-21 12:43:18 -0700930 return NULL;
931}
932
Stephen Hemmingera07f5f52008-01-22 21:53:36 -0800933static inline struct list_head *get_fa_head(struct leaf *l, int plen)
Robert Olsson19baf832005-06-21 12:43:18 -0700934{
Robert Olsson772cb712005-09-19 15:31:18 -0700935 struct leaf_info *li = find_leaf_info(l, plen);
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700936
Olof Johansson91b9a272005-08-09 20:24:39 -0700937 if (!li)
938 return NULL;
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700939
Olof Johansson91b9a272005-08-09 20:24:39 -0700940 return &li->falh;
Robert Olsson19baf832005-06-21 12:43:18 -0700941}
942
943static void insert_leaf_info(struct hlist_head *head, struct leaf_info *new)
944{
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900945 struct leaf_info *li = NULL, *last = NULL;
946 struct hlist_node *node;
Robert Olsson19baf832005-06-21 12:43:18 -0700947
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900948 if (hlist_empty(head)) {
949 hlist_add_head_rcu(&new->hlist, head);
950 } else {
951 hlist_for_each_entry(li, node, head, hlist) {
952 if (new->plen > li->plen)
953 break;
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700954
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900955 last = li;
956 }
957 if (last)
958 hlist_add_after_rcu(&last->hlist, &new->hlist);
959 else
960 hlist_add_before_rcu(&new->hlist, &li->hlist);
961 }
Robert Olsson19baf832005-06-21 12:43:18 -0700962}
963
Robert Olsson2373ce12005-08-25 13:01:29 -0700964/* rcu_read_lock needs to be hold by caller from readside */
965
Robert Olsson19baf832005-06-21 12:43:18 -0700966static struct leaf *
967fib_find_node(struct trie *t, u32 key)
968{
969 int pos;
970 struct tnode *tn;
David S. Millerb299e4f2011-02-02 20:48:10 -0800971 struct rt_trie_node *n;
Robert Olsson19baf832005-06-21 12:43:18 -0700972
973 pos = 0;
Eric Dumazeta034ee32010-09-09 23:32:28 +0000974 n = rcu_dereference_rtnl(t->trie);
Robert Olsson19baf832005-06-21 12:43:18 -0700975
976 while (n != NULL && NODE_TYPE(n) == T_TNODE) {
977 tn = (struct tnode *) n;
Olof Johansson91b9a272005-08-09 20:24:39 -0700978
Robert Olsson19baf832005-06-21 12:43:18 -0700979 check_tnode(tn);
Olof Johansson91b9a272005-08-09 20:24:39 -0700980
Stephen Hemmingerc877efb2005-07-19 14:01:51 -0700981 if (tkey_sub_equals(tn->key, pos, tn->pos-pos, key)) {
Olof Johansson91b9a272005-08-09 20:24:39 -0700982 pos = tn->pos + tn->bits;
Stephen Hemmingera07f5f52008-01-22 21:53:36 -0800983 n = tnode_get_child_rcu(tn,
984 tkey_extract_bits(key,
985 tn->pos,
986 tn->bits));
Olof Johansson91b9a272005-08-09 20:24:39 -0700987 } else
Robert Olsson19baf832005-06-21 12:43:18 -0700988 break;
989 }
990 /* Case we have found a leaf. Compare prefixes */
991
Olof Johansson91b9a272005-08-09 20:24:39 -0700992 if (n != NULL && IS_LEAF(n) && tkey_equals(key, n->key))
993 return (struct leaf *)n;
994
Robert Olsson19baf832005-06-21 12:43:18 -0700995 return NULL;
996}
997
Jarek Poplawski7b855762009-06-18 00:28:51 -0700998static void trie_rebalance(struct trie *t, struct tnode *tn)
Robert Olsson19baf832005-06-21 12:43:18 -0700999{
Robert Olsson19baf832005-06-21 12:43:18 -07001000 int wasfull;
Robert Olsson3ed18d72009-05-21 15:20:59 -07001001 t_key cindex, key;
Stephen Hemminger06801912007-08-10 15:22:13 -07001002 struct tnode *tp;
Robert Olsson19baf832005-06-21 12:43:18 -07001003
Robert Olsson3ed18d72009-05-21 15:20:59 -07001004 key = tn->key;
1005
David S. Millerb299e4f2011-02-02 20:48:10 -08001006 while (tn != NULL && (tp = node_parent((struct rt_trie_node *)tn)) != NULL) {
Robert Olsson19baf832005-06-21 12:43:18 -07001007 cindex = tkey_extract_bits(key, tp->pos, tp->bits);
1008 wasfull = tnode_full(tp, tnode_get_child(tp, cindex));
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08001009 tn = (struct tnode *) resize(t, (struct tnode *)tn);
1010
1011 tnode_put_child_reorg((struct tnode *)tp, cindex,
David S. Millerb299e4f2011-02-02 20:48:10 -08001012 (struct rt_trie_node *)tn, wasfull);
Olof Johansson91b9a272005-08-09 20:24:39 -07001013
David S. Millerb299e4f2011-02-02 20:48:10 -08001014 tp = node_parent((struct rt_trie_node *) tn);
Jarek Poplawski008440e2009-06-30 12:47:19 -07001015 if (!tp)
Eric Dumazetcf778b02012-01-12 04:41:32 +00001016 rcu_assign_pointer(t->trie, (struct rt_trie_node *)tn);
Jarek Poplawski008440e2009-06-30 12:47:19 -07001017
Jarek Poplawskie0f7cb82009-06-15 02:31:29 -07001018 tnode_free_flush();
Stephen Hemminger06801912007-08-10 15:22:13 -07001019 if (!tp)
Robert Olsson19baf832005-06-21 12:43:18 -07001020 break;
Stephen Hemminger06801912007-08-10 15:22:13 -07001021 tn = tp;
Robert Olsson19baf832005-06-21 12:43:18 -07001022 }
Stephen Hemminger06801912007-08-10 15:22:13 -07001023
Robert Olsson19baf832005-06-21 12:43:18 -07001024 /* Handle last (top) tnode */
Jarek Poplawski7b855762009-06-18 00:28:51 -07001025 if (IS_TNODE(tn))
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08001026 tn = (struct tnode *)resize(t, (struct tnode *)tn);
Robert Olsson19baf832005-06-21 12:43:18 -07001027
Eric Dumazetcf778b02012-01-12 04:41:32 +00001028 rcu_assign_pointer(t->trie, (struct rt_trie_node *)tn);
Jarek Poplawski7b855762009-06-18 00:28:51 -07001029 tnode_free_flush();
Robert Olsson19baf832005-06-21 12:43:18 -07001030}
1031
Robert Olsson2373ce12005-08-25 13:01:29 -07001032/* only used from updater-side */
1033
Stephen Hemmingerfea86ad2008-01-12 20:57:07 -08001034static struct list_head *fib_insert_node(struct trie *t, u32 key, int plen)
Robert Olsson19baf832005-06-21 12:43:18 -07001035{
1036 int pos, newpos;
1037 struct tnode *tp = NULL, *tn = NULL;
David S. Millerb299e4f2011-02-02 20:48:10 -08001038 struct rt_trie_node *n;
Robert Olsson19baf832005-06-21 12:43:18 -07001039 struct leaf *l;
1040 int missbit;
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001041 struct list_head *fa_head = NULL;
Robert Olsson19baf832005-06-21 12:43:18 -07001042 struct leaf_info *li;
1043 t_key cindex;
1044
1045 pos = 0;
Eric Dumazet0a5c0472011-03-31 01:51:35 -07001046 n = rtnl_dereference(t->trie);
Robert Olsson19baf832005-06-21 12:43:18 -07001047
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001048 /* If we point to NULL, stop. Either the tree is empty and we should
1049 * just put a new leaf in if, or we have reached an empty child slot,
Robert Olsson19baf832005-06-21 12:43:18 -07001050 * and we should just put our new leaf in that.
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001051 * If we point to a T_TNODE, check if it matches our key. Note that
1052 * a T_TNODE might be skipping any number of bits - its 'pos' need
Robert Olsson19baf832005-06-21 12:43:18 -07001053 * not be the parent's 'pos'+'bits'!
1054 *
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001055 * If it does match the current key, get pos/bits from it, extract
Robert Olsson19baf832005-06-21 12:43:18 -07001056 * the index from our key, push the T_TNODE and walk the tree.
1057 *
1058 * If it doesn't, we have to replace it with a new T_TNODE.
1059 *
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001060 * If we point to a T_LEAF, it might or might not have the same key
1061 * as we do. If it does, just change the value, update the T_LEAF's
1062 * value, and return it.
Robert Olsson19baf832005-06-21 12:43:18 -07001063 * If it doesn't, we need to replace it with a T_TNODE.
1064 */
1065
1066 while (n != NULL && NODE_TYPE(n) == T_TNODE) {
1067 tn = (struct tnode *) n;
Olof Johansson91b9a272005-08-09 20:24:39 -07001068
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001069 check_tnode(tn);
Olof Johansson91b9a272005-08-09 20:24:39 -07001070
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001071 if (tkey_sub_equals(tn->key, pos, tn->pos-pos, key)) {
Robert Olsson19baf832005-06-21 12:43:18 -07001072 tp = tn;
Olof Johansson91b9a272005-08-09 20:24:39 -07001073 pos = tn->pos + tn->bits;
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08001074 n = tnode_get_child(tn,
1075 tkey_extract_bits(key,
1076 tn->pos,
1077 tn->bits));
Robert Olsson19baf832005-06-21 12:43:18 -07001078
Stephen Hemminger06801912007-08-10 15:22:13 -07001079 BUG_ON(n && node_parent(n) != tn);
Olof Johansson91b9a272005-08-09 20:24:39 -07001080 } else
Robert Olsson19baf832005-06-21 12:43:18 -07001081 break;
1082 }
1083
1084 /*
1085 * n ----> NULL, LEAF or TNODE
1086 *
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001087 * tp is n's (parent) ----> NULL or TNODE
Robert Olsson19baf832005-06-21 12:43:18 -07001088 */
1089
Olof Johansson91b9a272005-08-09 20:24:39 -07001090 BUG_ON(tp && IS_LEAF(tp));
Robert Olsson19baf832005-06-21 12:43:18 -07001091
1092 /* Case 1: n is a leaf. Compare prefixes */
1093
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001094 if (n != NULL && IS_LEAF(n) && tkey_equals(key, n->key)) {
Stephen Hemmingerc95aaf92008-01-12 21:25:02 -08001095 l = (struct leaf *) n;
Robert Olsson19baf832005-06-21 12:43:18 -07001096 li = leaf_info_new(plen);
Olof Johansson91b9a272005-08-09 20:24:39 -07001097
Stephen Hemmingerfea86ad2008-01-12 20:57:07 -08001098 if (!li)
1099 return NULL;
Robert Olsson19baf832005-06-21 12:43:18 -07001100
1101 fa_head = &li->falh;
1102 insert_leaf_info(&l->list, li);
1103 goto done;
1104 }
Robert Olsson19baf832005-06-21 12:43:18 -07001105 l = leaf_new();
1106
Stephen Hemmingerfea86ad2008-01-12 20:57:07 -08001107 if (!l)
1108 return NULL;
Robert Olsson19baf832005-06-21 12:43:18 -07001109
1110 l->key = key;
1111 li = leaf_info_new(plen);
1112
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001113 if (!li) {
Stephen Hemminger387a5482008-04-10 03:47:34 -07001114 free_leaf(l);
Stephen Hemmingerfea86ad2008-01-12 20:57:07 -08001115 return NULL;
Robert Olssonf835e472005-06-28 15:00:39 -07001116 }
Robert Olsson19baf832005-06-21 12:43:18 -07001117
1118 fa_head = &li->falh;
1119 insert_leaf_info(&l->list, li);
1120
Robert Olsson19baf832005-06-21 12:43:18 -07001121 if (t->trie && n == NULL) {
Olof Johansson91b9a272005-08-09 20:24:39 -07001122 /* Case 2: n is NULL, and will just insert a new leaf */
Robert Olsson19baf832005-06-21 12:43:18 -07001123
David S. Millerb299e4f2011-02-02 20:48:10 -08001124 node_set_parent((struct rt_trie_node *)l, tp);
Robert Olsson19baf832005-06-21 12:43:18 -07001125
Olof Johansson91b9a272005-08-09 20:24:39 -07001126 cindex = tkey_extract_bits(key, tp->pos, tp->bits);
David S. Millerb299e4f2011-02-02 20:48:10 -08001127 put_child(t, (struct tnode *)tp, cindex, (struct rt_trie_node *)l);
Olof Johansson91b9a272005-08-09 20:24:39 -07001128 } else {
1129 /* Case 3: n is a LEAF or a TNODE and the key doesn't match. */
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001130 /*
1131 * Add a new tnode here
Robert Olsson19baf832005-06-21 12:43:18 -07001132 * first tnode need some special handling
1133 */
1134
1135 if (tp)
Olof Johansson91b9a272005-08-09 20:24:39 -07001136 pos = tp->pos+tp->bits;
Robert Olsson19baf832005-06-21 12:43:18 -07001137 else
Olof Johansson91b9a272005-08-09 20:24:39 -07001138 pos = 0;
1139
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001140 if (n) {
Robert Olsson19baf832005-06-21 12:43:18 -07001141 newpos = tkey_mismatch(key, pos, n->key);
1142 tn = tnode_new(n->key, newpos, 1);
Olof Johansson91b9a272005-08-09 20:24:39 -07001143 } else {
Robert Olsson19baf832005-06-21 12:43:18 -07001144 newpos = 0;
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001145 tn = tnode_new(key, newpos, 1); /* First tnode */
Robert Olsson19baf832005-06-21 12:43:18 -07001146 }
Robert Olsson19baf832005-06-21 12:43:18 -07001147
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001148 if (!tn) {
Robert Olssonf835e472005-06-28 15:00:39 -07001149 free_leaf_info(li);
Stephen Hemminger387a5482008-04-10 03:47:34 -07001150 free_leaf(l);
Stephen Hemmingerfea86ad2008-01-12 20:57:07 -08001151 return NULL;
Olof Johansson91b9a272005-08-09 20:24:39 -07001152 }
1153
David S. Millerb299e4f2011-02-02 20:48:10 -08001154 node_set_parent((struct rt_trie_node *)tn, tp);
Robert Olsson19baf832005-06-21 12:43:18 -07001155
Olof Johansson91b9a272005-08-09 20:24:39 -07001156 missbit = tkey_extract_bits(key, newpos, 1);
David S. Millerb299e4f2011-02-02 20:48:10 -08001157 put_child(t, tn, missbit, (struct rt_trie_node *)l);
Robert Olsson19baf832005-06-21 12:43:18 -07001158 put_child(t, tn, 1-missbit, n);
1159
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001160 if (tp) {
Robert Olsson19baf832005-06-21 12:43:18 -07001161 cindex = tkey_extract_bits(key, tp->pos, tp->bits);
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08001162 put_child(t, (struct tnode *)tp, cindex,
David S. Millerb299e4f2011-02-02 20:48:10 -08001163 (struct rt_trie_node *)tn);
Olof Johansson91b9a272005-08-09 20:24:39 -07001164 } else {
Eric Dumazetcf778b02012-01-12 04:41:32 +00001165 rcu_assign_pointer(t->trie, (struct rt_trie_node *)tn);
Robert Olsson19baf832005-06-21 12:43:18 -07001166 tp = tn;
1167 }
1168 }
Olof Johansson91b9a272005-08-09 20:24:39 -07001169
1170 if (tp && tp->pos + tp->bits > 32)
Joe Perches058bd4d2012-03-11 18:36:11 +00001171 pr_warn("fib_trie tp=%p pos=%d, bits=%d, key=%0x plen=%d\n",
1172 tp, tp->pos, tp->bits, key, plen);
Olof Johansson91b9a272005-08-09 20:24:39 -07001173
Robert Olsson19baf832005-06-21 12:43:18 -07001174 /* Rebalance the trie */
Robert Olsson2373ce12005-08-25 13:01:29 -07001175
Jarek Poplawski7b855762009-06-18 00:28:51 -07001176 trie_rebalance(t, tp);
Robert Olssonf835e472005-06-28 15:00:39 -07001177done:
Robert Olsson19baf832005-06-21 12:43:18 -07001178 return fa_head;
1179}
1180
Robert Olssond562f1f2007-03-26 14:22:22 -07001181/*
1182 * Caller must hold RTNL.
1183 */
Stephen Hemminger16c6cf82009-09-20 10:35:36 +00001184int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
Robert Olsson19baf832005-06-21 12:43:18 -07001185{
1186 struct trie *t = (struct trie *) tb->tb_data;
1187 struct fib_alias *fa, *new_fa;
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001188 struct list_head *fa_head = NULL;
Robert Olsson19baf832005-06-21 12:43:18 -07001189 struct fib_info *fi;
Thomas Graf4e902c52006-08-17 18:14:52 -07001190 int plen = cfg->fc_dst_len;
1191 u8 tos = cfg->fc_tos;
Robert Olsson19baf832005-06-21 12:43:18 -07001192 u32 key, mask;
1193 int err;
1194 struct leaf *l;
1195
1196 if (plen > 32)
1197 return -EINVAL;
1198
Thomas Graf4e902c52006-08-17 18:14:52 -07001199 key = ntohl(cfg->fc_dst);
Robert Olsson19baf832005-06-21 12:43:18 -07001200
Patrick McHardy2dfe55b2006-08-10 23:08:33 -07001201 pr_debug("Insert table=%u %08x/%d\n", tb->tb_id, key, plen);
Robert Olsson19baf832005-06-21 12:43:18 -07001202
Olof Johansson91b9a272005-08-09 20:24:39 -07001203 mask = ntohl(inet_make_mask(plen));
Robert Olsson19baf832005-06-21 12:43:18 -07001204
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001205 if (key & ~mask)
Robert Olsson19baf832005-06-21 12:43:18 -07001206 return -EINVAL;
1207
1208 key = key & mask;
1209
Thomas Graf4e902c52006-08-17 18:14:52 -07001210 fi = fib_create_info(cfg);
1211 if (IS_ERR(fi)) {
1212 err = PTR_ERR(fi);
Robert Olsson19baf832005-06-21 12:43:18 -07001213 goto err;
Thomas Graf4e902c52006-08-17 18:14:52 -07001214 }
Robert Olsson19baf832005-06-21 12:43:18 -07001215
1216 l = fib_find_node(t, key);
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001217 fa = NULL;
Robert Olsson19baf832005-06-21 12:43:18 -07001218
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001219 if (l) {
Robert Olsson19baf832005-06-21 12:43:18 -07001220 fa_head = get_fa_head(l, plen);
1221 fa = fib_find_alias(fa_head, tos, fi->fib_priority);
1222 }
1223
1224 /* Now fa, if non-NULL, points to the first fib alias
1225 * with the same keys [prefix,tos,priority], if such key already
1226 * exists or to the node before which we will insert new one.
1227 *
1228 * If fa is NULL, we will need to allocate a new one and
1229 * insert to the head of f.
1230 *
1231 * If f is NULL, no fib node matched the destination key
1232 * and we need to allocate a new one of those as well.
1233 */
1234
Julian Anastasov936f6f82008-01-28 21:18:06 -08001235 if (fa && fa->fa_tos == tos &&
1236 fa->fa_info->fib_priority == fi->fib_priority) {
1237 struct fib_alias *fa_first, *fa_match;
Robert Olsson19baf832005-06-21 12:43:18 -07001238
1239 err = -EEXIST;
Thomas Graf4e902c52006-08-17 18:14:52 -07001240 if (cfg->fc_nlflags & NLM_F_EXCL)
Robert Olsson19baf832005-06-21 12:43:18 -07001241 goto out;
1242
Julian Anastasov936f6f82008-01-28 21:18:06 -08001243 /* We have 2 goals:
1244 * 1. Find exact match for type, scope, fib_info to avoid
1245 * duplicate routes
1246 * 2. Find next 'fa' (or head), NLM_F_APPEND inserts before it
1247 */
1248 fa_match = NULL;
1249 fa_first = fa;
1250 fa = list_entry(fa->fa_list.prev, struct fib_alias, fa_list);
1251 list_for_each_entry_continue(fa, fa_head, fa_list) {
1252 if (fa->fa_tos != tos)
1253 break;
1254 if (fa->fa_info->fib_priority != fi->fib_priority)
1255 break;
1256 if (fa->fa_type == cfg->fc_type &&
Julian Anastasov936f6f82008-01-28 21:18:06 -08001257 fa->fa_info == fi) {
1258 fa_match = fa;
1259 break;
1260 }
1261 }
1262
Thomas Graf4e902c52006-08-17 18:14:52 -07001263 if (cfg->fc_nlflags & NLM_F_REPLACE) {
Robert Olsson19baf832005-06-21 12:43:18 -07001264 struct fib_info *fi_drop;
1265 u8 state;
1266
Julian Anastasov936f6f82008-01-28 21:18:06 -08001267 fa = fa_first;
1268 if (fa_match) {
1269 if (fa == fa_match)
1270 err = 0;
Joonwoo Park67250332008-01-18 03:45:18 -08001271 goto out;
Julian Anastasov936f6f82008-01-28 21:18:06 -08001272 }
Robert Olsson2373ce12005-08-25 13:01:29 -07001273 err = -ENOBUFS;
Christoph Lametere94b1762006-12-06 20:33:17 -08001274 new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
Robert Olsson2373ce12005-08-25 13:01:29 -07001275 if (new_fa == NULL)
1276 goto out;
Robert Olsson19baf832005-06-21 12:43:18 -07001277
1278 fi_drop = fa->fa_info;
Robert Olsson2373ce12005-08-25 13:01:29 -07001279 new_fa->fa_tos = fa->fa_tos;
1280 new_fa->fa_info = fi;
Thomas Graf4e902c52006-08-17 18:14:52 -07001281 new_fa->fa_type = cfg->fc_type;
Robert Olsson19baf832005-06-21 12:43:18 -07001282 state = fa->fa_state;
Julian Anastasov936f6f82008-01-28 21:18:06 -08001283 new_fa->fa_state = state & ~FA_S_ACCESSED;
Robert Olsson19baf832005-06-21 12:43:18 -07001284
Robert Olsson2373ce12005-08-25 13:01:29 -07001285 list_replace_rcu(&fa->fa_list, &new_fa->fa_list);
1286 alias_free_mem_rcu(fa);
Robert Olsson19baf832005-06-21 12:43:18 -07001287
1288 fib_release_info(fi_drop);
1289 if (state & FA_S_ACCESSED)
Denis V. Lunev76e6ebf2008-07-05 19:00:44 -07001290 rt_cache_flush(cfg->fc_nlinfo.nl_net, -1);
Milan Kocianb8f55832007-05-23 14:55:06 -07001291 rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen,
1292 tb->tb_id, &cfg->fc_nlinfo, NLM_F_REPLACE);
Robert Olsson19baf832005-06-21 12:43:18 -07001293
Olof Johansson91b9a272005-08-09 20:24:39 -07001294 goto succeeded;
Robert Olsson19baf832005-06-21 12:43:18 -07001295 }
1296 /* Error if we find a perfect match which
1297 * uses the same scope, type, and nexthop
1298 * information.
1299 */
Julian Anastasov936f6f82008-01-28 21:18:06 -08001300 if (fa_match)
1301 goto out;
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08001302
Thomas Graf4e902c52006-08-17 18:14:52 -07001303 if (!(cfg->fc_nlflags & NLM_F_APPEND))
Julian Anastasov936f6f82008-01-28 21:18:06 -08001304 fa = fa_first;
Robert Olsson19baf832005-06-21 12:43:18 -07001305 }
1306 err = -ENOENT;
Thomas Graf4e902c52006-08-17 18:14:52 -07001307 if (!(cfg->fc_nlflags & NLM_F_CREATE))
Robert Olsson19baf832005-06-21 12:43:18 -07001308 goto out;
1309
1310 err = -ENOBUFS;
Christoph Lametere94b1762006-12-06 20:33:17 -08001311 new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
Robert Olsson19baf832005-06-21 12:43:18 -07001312 if (new_fa == NULL)
1313 goto out;
1314
1315 new_fa->fa_info = fi;
1316 new_fa->fa_tos = tos;
Thomas Graf4e902c52006-08-17 18:14:52 -07001317 new_fa->fa_type = cfg->fc_type;
Robert Olsson19baf832005-06-21 12:43:18 -07001318 new_fa->fa_state = 0;
Robert Olsson19baf832005-06-21 12:43:18 -07001319 /*
1320 * Insert new entry to the list.
1321 */
1322
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001323 if (!fa_head) {
Stephen Hemmingerfea86ad2008-01-12 20:57:07 -08001324 fa_head = fib_insert_node(t, key, plen);
1325 if (unlikely(!fa_head)) {
1326 err = -ENOMEM;
Robert Olssonf835e472005-06-28 15:00:39 -07001327 goto out_free_new_fa;
Stephen Hemmingerfea86ad2008-01-12 20:57:07 -08001328 }
Robert Olssonf835e472005-06-28 15:00:39 -07001329 }
Robert Olsson19baf832005-06-21 12:43:18 -07001330
David S. Miller21d8c492011-04-14 14:49:37 -07001331 if (!plen)
1332 tb->tb_num_default++;
1333
Robert Olsson2373ce12005-08-25 13:01:29 -07001334 list_add_tail_rcu(&new_fa->fa_list,
1335 (fa ? &fa->fa_list : fa_head));
Robert Olsson19baf832005-06-21 12:43:18 -07001336
Denis V. Lunev76e6ebf2008-07-05 19:00:44 -07001337 rt_cache_flush(cfg->fc_nlinfo.nl_net, -1);
Thomas Graf4e902c52006-08-17 18:14:52 -07001338 rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, tb->tb_id,
Milan Kocianb8f55832007-05-23 14:55:06 -07001339 &cfg->fc_nlinfo, 0);
Robert Olsson19baf832005-06-21 12:43:18 -07001340succeeded:
1341 return 0;
Robert Olssonf835e472005-06-28 15:00:39 -07001342
1343out_free_new_fa:
1344 kmem_cache_free(fn_alias_kmem, new_fa);
Robert Olsson19baf832005-06-21 12:43:18 -07001345out:
1346 fib_release_info(fi);
Olof Johansson91b9a272005-08-09 20:24:39 -07001347err:
Robert Olsson19baf832005-06-21 12:43:18 -07001348 return err;
1349}
1350
Robert Olsson772cb712005-09-19 15:31:18 -07001351/* should be called with rcu_read_lock */
David S. Miller5b470442011-01-31 16:10:03 -08001352static int check_leaf(struct fib_table *tb, struct trie *t, struct leaf *l,
David S. Miller22bd5b92011-03-11 19:54:08 -05001353 t_key key, const struct flowi4 *flp,
Eric Dumazetebc0ffa2010-10-05 10:41:36 +00001354 struct fib_result *res, int fib_flags)
Robert Olsson19baf832005-06-21 12:43:18 -07001355{
Robert Olsson19baf832005-06-21 12:43:18 -07001356 struct leaf_info *li;
1357 struct hlist_head *hhead = &l->list;
1358 struct hlist_node *node;
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001359
Robert Olsson2373ce12005-08-25 13:01:29 -07001360 hlist_for_each_entry_rcu(li, node, hhead, hlist) {
David S. Miller3be06862011-03-07 15:01:10 -08001361 struct fib_alias *fa;
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08001362
Eric Dumazet5c745012011-07-18 03:16:33 +00001363 if (l->key != (key & li->mask_plen))
Robert Olsson19baf832005-06-21 12:43:18 -07001364 continue;
1365
David S. Miller3be06862011-03-07 15:01:10 -08001366 list_for_each_entry_rcu(fa, &li->falh, fa_list) {
1367 struct fib_info *fi = fa->fa_info;
1368 int nhsel, err;
1369
David S. Miller22bd5b92011-03-11 19:54:08 -05001370 if (fa->fa_tos && fa->fa_tos != flp->flowi4_tos)
David S. Miller3be06862011-03-07 15:01:10 -08001371 continue;
David S. Millerdccd9ec2012-05-10 22:16:32 -04001372 if (fi->fib_dead)
1373 continue;
David S. Miller37e826c2011-03-24 18:06:47 -07001374 if (fa->fa_info->fib_scope < flp->flowi4_scope)
David S. Miller3be06862011-03-07 15:01:10 -08001375 continue;
1376 fib_alias_accessed(fa);
1377 err = fib_props[fa->fa_type].error;
1378 if (err) {
1379#ifdef CONFIG_IP_FIB_TRIE_STATS
Julian Anastasov1fbc7842011-03-25 20:33:23 -07001380 t->stats.semantic_match_passed++;
David S. Miller3be06862011-03-07 15:01:10 -08001381#endif
Julian Anastasov1fbc7842011-03-25 20:33:23 -07001382 return err;
David S. Miller3be06862011-03-07 15:01:10 -08001383 }
1384 if (fi->fib_flags & RTNH_F_DEAD)
1385 continue;
1386 for (nhsel = 0; nhsel < fi->fib_nhs; nhsel++) {
1387 const struct fib_nh *nh = &fi->fib_nh[nhsel];
1388
1389 if (nh->nh_flags & RTNH_F_DEAD)
1390 continue;
David S. Miller22bd5b92011-03-11 19:54:08 -05001391 if (flp->flowi4_oif && flp->flowi4_oif != nh->nh_oif)
David S. Miller3be06862011-03-07 15:01:10 -08001392 continue;
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08001393
Robert Olsson19baf832005-06-21 12:43:18 -07001394#ifdef CONFIG_IP_FIB_TRIE_STATS
David S. Miller3be06862011-03-07 15:01:10 -08001395 t->stats.semantic_match_passed++;
Robert Olsson19baf832005-06-21 12:43:18 -07001396#endif
Eric Dumazet5c745012011-07-18 03:16:33 +00001397 res->prefixlen = li->plen;
David S. Miller3be06862011-03-07 15:01:10 -08001398 res->nh_sel = nhsel;
1399 res->type = fa->fa_type;
David S. Miller37e826c2011-03-24 18:06:47 -07001400 res->scope = fa->fa_info->fib_scope;
David S. Miller3be06862011-03-07 15:01:10 -08001401 res->fi = fi;
1402 res->table = tb;
1403 res->fa_head = &li->falh;
1404 if (!(fib_flags & FIB_LOOKUP_NOREF))
Eric Dumazet5c745012011-07-18 03:16:33 +00001405 atomic_inc(&fi->fib_clntref);
David S. Miller3be06862011-03-07 15:01:10 -08001406 return 0;
1407 }
1408 }
1409
1410#ifdef CONFIG_IP_FIB_TRIE_STATS
1411 t->stats.semantic_match_miss++;
1412#endif
Robert Olsson19baf832005-06-21 12:43:18 -07001413 }
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08001414
Ben Hutchings2e655572008-07-10 16:52:52 -07001415 return 1;
Robert Olsson19baf832005-06-21 12:43:18 -07001416}
1417
David S. Miller22bd5b92011-03-11 19:54:08 -05001418int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp,
Eric Dumazetebc0ffa2010-10-05 10:41:36 +00001419 struct fib_result *res, int fib_flags)
Robert Olsson19baf832005-06-21 12:43:18 -07001420{
1421 struct trie *t = (struct trie *) tb->tb_data;
Ben Hutchings2e655572008-07-10 16:52:52 -07001422 int ret;
David S. Millerb299e4f2011-02-02 20:48:10 -08001423 struct rt_trie_node *n;
Robert Olsson19baf832005-06-21 12:43:18 -07001424 struct tnode *pn;
David S. Miller3b004562011-02-16 14:56:22 -08001425 unsigned int pos, bits;
David S. Miller22bd5b92011-03-11 19:54:08 -05001426 t_key key = ntohl(flp->daddr);
David S. Miller3b004562011-02-16 14:56:22 -08001427 unsigned int chopped_off;
Robert Olsson19baf832005-06-21 12:43:18 -07001428 t_key cindex = 0;
David S. Miller3b004562011-02-16 14:56:22 -08001429 unsigned int current_prefix_length = KEYLENGTH;
Olof Johansson91b9a272005-08-09 20:24:39 -07001430 struct tnode *cn;
Eric Dumazet874ffa82010-10-13 06:56:11 +00001431 t_key pref_mismatch;
Olof Johansson91b9a272005-08-09 20:24:39 -07001432
Robert Olsson2373ce12005-08-25 13:01:29 -07001433 rcu_read_lock();
Robert Olsson19baf832005-06-21 12:43:18 -07001434
Robert Olsson2373ce12005-08-25 13:01:29 -07001435 n = rcu_dereference(t->trie);
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001436 if (!n)
Robert Olsson19baf832005-06-21 12:43:18 -07001437 goto failed;
1438
1439#ifdef CONFIG_IP_FIB_TRIE_STATS
1440 t->stats.gets++;
1441#endif
1442
1443 /* Just a leaf? */
1444 if (IS_LEAF(n)) {
David S. Miller5b470442011-01-31 16:10:03 -08001445 ret = check_leaf(tb, t, (struct leaf *)n, key, flp, res, fib_flags);
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08001446 goto found;
Robert Olsson19baf832005-06-21 12:43:18 -07001447 }
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08001448
Robert Olsson19baf832005-06-21 12:43:18 -07001449 pn = (struct tnode *) n;
1450 chopped_off = 0;
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001451
Olof Johansson91b9a272005-08-09 20:24:39 -07001452 while (pn) {
Robert Olsson19baf832005-06-21 12:43:18 -07001453 pos = pn->pos;
1454 bits = pn->bits;
1455
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001456 if (!chopped_off)
Stephen Hemmingerab66b4a2007-08-10 15:22:58 -07001457 cindex = tkey_extract_bits(mask_pfx(key, current_prefix_length),
1458 pos, bits);
Robert Olsson19baf832005-06-21 12:43:18 -07001459
Jarek Poplawskib902e572009-07-14 11:20:32 +00001460 n = tnode_get_child_rcu(pn, cindex);
Robert Olsson19baf832005-06-21 12:43:18 -07001461
1462 if (n == NULL) {
1463#ifdef CONFIG_IP_FIB_TRIE_STATS
1464 t->stats.null_node_hit++;
1465#endif
1466 goto backtrace;
1467 }
1468
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001469 if (IS_LEAF(n)) {
David S. Miller5b470442011-01-31 16:10:03 -08001470 ret = check_leaf(tb, t, (struct leaf *)n, key, flp, res, fib_flags);
Ben Hutchings2e655572008-07-10 16:52:52 -07001471 if (ret > 0)
Olof Johansson91b9a272005-08-09 20:24:39 -07001472 goto backtrace;
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08001473 goto found;
Olof Johansson91b9a272005-08-09 20:24:39 -07001474 }
1475
Olof Johansson91b9a272005-08-09 20:24:39 -07001476 cn = (struct tnode *)n;
1477
1478 /*
1479 * It's a tnode, and we can do some extra checks here if we
1480 * like, to avoid descending into a dead-end branch.
1481 * This tnode is in the parent's child array at index
1482 * key[p_pos..p_pos+p_bits] but potentially with some bits
1483 * chopped off, so in reality the index may be just a
1484 * subprefix, padded with zero at the end.
1485 * We can also take a look at any skipped bits in this
1486 * tnode - everything up to p_pos is supposed to be ok,
1487 * and the non-chopped bits of the index (se previous
1488 * paragraph) are also guaranteed ok, but the rest is
1489 * considered unknown.
1490 *
1491 * The skipped bits are key[pos+bits..cn->pos].
1492 */
1493
1494 /* If current_prefix_length < pos+bits, we are already doing
1495 * actual prefix matching, which means everything from
1496 * pos+(bits-chopped_off) onward must be zero along some
1497 * branch of this subtree - otherwise there is *no* valid
1498 * prefix present. Here we can only check the skipped
1499 * bits. Remember, since we have already indexed into the
1500 * parent's child array, we know that the bits we chopped of
1501 * *are* zero.
1502 */
1503
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08001504 /* NOTA BENE: Checking only skipped bits
1505 for the new node here */
Olof Johansson91b9a272005-08-09 20:24:39 -07001506
1507 if (current_prefix_length < pos+bits) {
1508 if (tkey_extract_bits(cn->key, current_prefix_length,
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08001509 cn->pos - current_prefix_length)
1510 || !(cn->child[0]))
Olof Johansson91b9a272005-08-09 20:24:39 -07001511 goto backtrace;
1512 }
1513
1514 /*
1515 * If chopped_off=0, the index is fully validated and we
1516 * only need to look at the skipped bits for this, the new,
1517 * tnode. What we actually want to do is to find out if
1518 * these skipped bits match our key perfectly, or if we will
1519 * have to count on finding a matching prefix further down,
1520 * because if we do, we would like to have some way of
1521 * verifying the existence of such a prefix at this point.
1522 */
1523
1524 /* The only thing we can do at this point is to verify that
1525 * any such matching prefix can indeed be a prefix to our
1526 * key, and if the bits in the node we are inspecting that
1527 * do not match our key are not ZERO, this cannot be true.
1528 * Thus, find out where there is a mismatch (before cn->pos)
1529 * and verify that all the mismatching bits are zero in the
1530 * new tnode's key.
1531 */
1532
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08001533 /*
1534 * Note: We aren't very concerned about the piece of
1535 * the key that precede pn->pos+pn->bits, since these
1536 * have already been checked. The bits after cn->pos
1537 * aren't checked since these are by definition
1538 * "unknown" at this point. Thus, what we want to see
1539 * is if we are about to enter the "prefix matching"
1540 * state, and in that case verify that the skipped
1541 * bits that will prevail throughout this subtree are
1542 * zero, as they have to be if we are to find a
1543 * matching prefix.
Olof Johansson91b9a272005-08-09 20:24:39 -07001544 */
1545
Eric Dumazet874ffa82010-10-13 06:56:11 +00001546 pref_mismatch = mask_pfx(cn->key ^ key, cn->pos);
Olof Johansson91b9a272005-08-09 20:24:39 -07001547
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08001548 /*
1549 * In short: If skipped bits in this node do not match
1550 * the search key, enter the "prefix matching"
1551 * state.directly.
Olof Johansson91b9a272005-08-09 20:24:39 -07001552 */
1553 if (pref_mismatch) {
Eric Dumazet874ffa82010-10-13 06:56:11 +00001554 int mp = KEYLENGTH - fls(pref_mismatch);
Olof Johansson91b9a272005-08-09 20:24:39 -07001555
Eric Dumazet874ffa82010-10-13 06:56:11 +00001556 if (tkey_extract_bits(cn->key, mp, cn->pos - mp) != 0)
Olof Johansson91b9a272005-08-09 20:24:39 -07001557 goto backtrace;
1558
1559 if (current_prefix_length >= cn->pos)
1560 current_prefix_length = mp;
1561 }
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08001562
Olof Johansson91b9a272005-08-09 20:24:39 -07001563 pn = (struct tnode *)n; /* Descend */
1564 chopped_off = 0;
1565 continue;
1566
Robert Olsson19baf832005-06-21 12:43:18 -07001567backtrace:
1568 chopped_off++;
1569
1570 /* As zero don't change the child key (cindex) */
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08001571 while ((chopped_off <= pn->bits)
1572 && !(cindex & (1<<(chopped_off-1))))
Robert Olsson19baf832005-06-21 12:43:18 -07001573 chopped_off++;
Robert Olsson19baf832005-06-21 12:43:18 -07001574
1575 /* Decrease current_... with bits chopped off */
1576 if (current_prefix_length > pn->pos + pn->bits - chopped_off)
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08001577 current_prefix_length = pn->pos + pn->bits
1578 - chopped_off;
Olof Johansson91b9a272005-08-09 20:24:39 -07001579
Robert Olsson19baf832005-06-21 12:43:18 -07001580 /*
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001581 * Either we do the actual chop off according or if we have
Robert Olsson19baf832005-06-21 12:43:18 -07001582 * chopped off all bits in this tnode walk up to our parent.
1583 */
1584
Olof Johansson91b9a272005-08-09 20:24:39 -07001585 if (chopped_off <= pn->bits) {
Robert Olsson19baf832005-06-21 12:43:18 -07001586 cindex &= ~(1 << (chopped_off-1));
Olof Johansson91b9a272005-08-09 20:24:39 -07001587 } else {
David S. Millerb299e4f2011-02-02 20:48:10 -08001588 struct tnode *parent = node_parent_rcu((struct rt_trie_node *) pn);
Stephen Hemminger06801912007-08-10 15:22:13 -07001589 if (!parent)
Robert Olsson19baf832005-06-21 12:43:18 -07001590 goto failed;
Olof Johansson91b9a272005-08-09 20:24:39 -07001591
Robert Olsson19baf832005-06-21 12:43:18 -07001592 /* Get Child's index */
Stephen Hemminger06801912007-08-10 15:22:13 -07001593 cindex = tkey_extract_bits(pn->key, parent->pos, parent->bits);
1594 pn = parent;
Robert Olsson19baf832005-06-21 12:43:18 -07001595 chopped_off = 0;
1596
1597#ifdef CONFIG_IP_FIB_TRIE_STATS
1598 t->stats.backtrack++;
1599#endif
1600 goto backtrace;
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001601 }
Robert Olsson19baf832005-06-21 12:43:18 -07001602 }
1603failed:
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001604 ret = 1;
Robert Olsson19baf832005-06-21 12:43:18 -07001605found:
Robert Olsson2373ce12005-08-25 13:01:29 -07001606 rcu_read_unlock();
Robert Olsson19baf832005-06-21 12:43:18 -07001607 return ret;
1608}
Florian Westphal6fc01432011-08-25 13:46:12 +02001609EXPORT_SYMBOL_GPL(fib_table_lookup);
Robert Olsson19baf832005-06-21 12:43:18 -07001610
Stephen Hemminger9195bef2008-01-22 21:56:34 -08001611/*
1612 * Remove the leaf and return parent.
1613 */
1614static void trie_leaf_remove(struct trie *t, struct leaf *l)
Robert Olsson19baf832005-06-21 12:43:18 -07001615{
David S. Millerb299e4f2011-02-02 20:48:10 -08001616 struct tnode *tp = node_parent((struct rt_trie_node *) l);
Robert Olsson19baf832005-06-21 12:43:18 -07001617
Stephen Hemminger9195bef2008-01-22 21:56:34 -08001618 pr_debug("entering trie_leaf_remove(%p)\n", l);
Robert Olsson19baf832005-06-21 12:43:18 -07001619
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001620 if (tp) {
Stephen Hemminger9195bef2008-01-22 21:56:34 -08001621 t_key cindex = tkey_extract_bits(l->key, tp->pos, tp->bits);
Robert Olsson19baf832005-06-21 12:43:18 -07001622 put_child(t, (struct tnode *)tp, cindex, NULL);
Jarek Poplawski7b855762009-06-18 00:28:51 -07001623 trie_rebalance(t, tp);
Olof Johansson91b9a272005-08-09 20:24:39 -07001624 } else
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +00001625 RCU_INIT_POINTER(t->trie, NULL);
Robert Olsson19baf832005-06-21 12:43:18 -07001626
Stephen Hemminger387a5482008-04-10 03:47:34 -07001627 free_leaf(l);
Robert Olsson19baf832005-06-21 12:43:18 -07001628}
1629
Robert Olssond562f1f2007-03-26 14:22:22 -07001630/*
1631 * Caller must hold RTNL.
1632 */
Stephen Hemminger16c6cf82009-09-20 10:35:36 +00001633int fib_table_delete(struct fib_table *tb, struct fib_config *cfg)
Robert Olsson19baf832005-06-21 12:43:18 -07001634{
1635 struct trie *t = (struct trie *) tb->tb_data;
1636 u32 key, mask;
Thomas Graf4e902c52006-08-17 18:14:52 -07001637 int plen = cfg->fc_dst_len;
1638 u8 tos = cfg->fc_tos;
Robert Olsson19baf832005-06-21 12:43:18 -07001639 struct fib_alias *fa, *fa_to_delete;
1640 struct list_head *fa_head;
1641 struct leaf *l;
Olof Johansson91b9a272005-08-09 20:24:39 -07001642 struct leaf_info *li;
1643
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001644 if (plen > 32)
Robert Olsson19baf832005-06-21 12:43:18 -07001645 return -EINVAL;
1646
Thomas Graf4e902c52006-08-17 18:14:52 -07001647 key = ntohl(cfg->fc_dst);
Olof Johansson91b9a272005-08-09 20:24:39 -07001648 mask = ntohl(inet_make_mask(plen));
Robert Olsson19baf832005-06-21 12:43:18 -07001649
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001650 if (key & ~mask)
Robert Olsson19baf832005-06-21 12:43:18 -07001651 return -EINVAL;
1652
1653 key = key & mask;
1654 l = fib_find_node(t, key);
1655
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001656 if (!l)
Robert Olsson19baf832005-06-21 12:43:18 -07001657 return -ESRCH;
1658
1659 fa_head = get_fa_head(l, plen);
1660 fa = fib_find_alias(fa_head, tos, 0);
1661
1662 if (!fa)
1663 return -ESRCH;
1664
Stephen Hemminger0c7770c2005-08-23 21:59:41 -07001665 pr_debug("Deleting %08x/%d tos=%d t=%p\n", key, plen, tos, t);
Robert Olsson19baf832005-06-21 12:43:18 -07001666
1667 fa_to_delete = NULL;
Julian Anastasov936f6f82008-01-28 21:18:06 -08001668 fa = list_entry(fa->fa_list.prev, struct fib_alias, fa_list);
1669 list_for_each_entry_continue(fa, fa_head, fa_list) {
Robert Olsson19baf832005-06-21 12:43:18 -07001670 struct fib_info *fi = fa->fa_info;
1671
1672 if (fa->fa_tos != tos)
1673 break;
1674
Thomas Graf4e902c52006-08-17 18:14:52 -07001675 if ((!cfg->fc_type || fa->fa_type == cfg->fc_type) &&
1676 (cfg->fc_scope == RT_SCOPE_NOWHERE ||
David S. Miller37e826c2011-03-24 18:06:47 -07001677 fa->fa_info->fib_scope == cfg->fc_scope) &&
Julian Anastasov74cb3c12011-03-19 12:13:46 +00001678 (!cfg->fc_prefsrc ||
1679 fi->fib_prefsrc == cfg->fc_prefsrc) &&
Thomas Graf4e902c52006-08-17 18:14:52 -07001680 (!cfg->fc_protocol ||
1681 fi->fib_protocol == cfg->fc_protocol) &&
1682 fib_nh_match(cfg, fi) == 0) {
Robert Olsson19baf832005-06-21 12:43:18 -07001683 fa_to_delete = fa;
1684 break;
1685 }
1686 }
1687
Olof Johansson91b9a272005-08-09 20:24:39 -07001688 if (!fa_to_delete)
1689 return -ESRCH;
Robert Olsson19baf832005-06-21 12:43:18 -07001690
Olof Johansson91b9a272005-08-09 20:24:39 -07001691 fa = fa_to_delete;
Thomas Graf4e902c52006-08-17 18:14:52 -07001692 rtmsg_fib(RTM_DELROUTE, htonl(key), fa, plen, tb->tb_id,
Milan Kocianb8f55832007-05-23 14:55:06 -07001693 &cfg->fc_nlinfo, 0);
Robert Olsson19baf832005-06-21 12:43:18 -07001694
Olof Johansson91b9a272005-08-09 20:24:39 -07001695 l = fib_find_node(t, key);
Robert Olsson772cb712005-09-19 15:31:18 -07001696 li = find_leaf_info(l, plen);
Robert Olsson19baf832005-06-21 12:43:18 -07001697
Robert Olsson2373ce12005-08-25 13:01:29 -07001698 list_del_rcu(&fa->fa_list);
Robert Olsson19baf832005-06-21 12:43:18 -07001699
David S. Miller21d8c492011-04-14 14:49:37 -07001700 if (!plen)
1701 tb->tb_num_default--;
1702
Olof Johansson91b9a272005-08-09 20:24:39 -07001703 if (list_empty(fa_head)) {
Robert Olsson2373ce12005-08-25 13:01:29 -07001704 hlist_del_rcu(&li->hlist);
Olof Johansson91b9a272005-08-09 20:24:39 -07001705 free_leaf_info(li);
Robert Olsson2373ce12005-08-25 13:01:29 -07001706 }
Olof Johansson91b9a272005-08-09 20:24:39 -07001707
1708 if (hlist_empty(&l->list))
Stephen Hemminger9195bef2008-01-22 21:56:34 -08001709 trie_leaf_remove(t, l);
Olof Johansson91b9a272005-08-09 20:24:39 -07001710
1711 if (fa->fa_state & FA_S_ACCESSED)
Denis V. Lunev76e6ebf2008-07-05 19:00:44 -07001712 rt_cache_flush(cfg->fc_nlinfo.nl_net, -1);
Olof Johansson91b9a272005-08-09 20:24:39 -07001713
Robert Olsson2373ce12005-08-25 13:01:29 -07001714 fib_release_info(fa->fa_info);
1715 alias_free_mem_rcu(fa);
Olof Johansson91b9a272005-08-09 20:24:39 -07001716 return 0;
Robert Olsson19baf832005-06-21 12:43:18 -07001717}
1718
Stephen Hemmingeref3660c2008-04-10 03:46:12 -07001719static int trie_flush_list(struct list_head *head)
Robert Olsson19baf832005-06-21 12:43:18 -07001720{
1721 struct fib_alias *fa, *fa_node;
1722 int found = 0;
1723
1724 list_for_each_entry_safe(fa, fa_node, head, fa_list) {
1725 struct fib_info *fi = fa->fa_info;
Robert Olsson19baf832005-06-21 12:43:18 -07001726
Robert Olsson2373ce12005-08-25 13:01:29 -07001727 if (fi && (fi->fib_flags & RTNH_F_DEAD)) {
1728 list_del_rcu(&fa->fa_list);
1729 fib_release_info(fa->fa_info);
1730 alias_free_mem_rcu(fa);
Robert Olsson19baf832005-06-21 12:43:18 -07001731 found++;
1732 }
1733 }
1734 return found;
1735}
1736
Stephen Hemmingeref3660c2008-04-10 03:46:12 -07001737static int trie_flush_leaf(struct leaf *l)
Robert Olsson19baf832005-06-21 12:43:18 -07001738{
1739 int found = 0;
1740 struct hlist_head *lih = &l->list;
1741 struct hlist_node *node, *tmp;
1742 struct leaf_info *li = NULL;
1743
1744 hlist_for_each_entry_safe(li, node, tmp, lih, hlist) {
Stephen Hemmingeref3660c2008-04-10 03:46:12 -07001745 found += trie_flush_list(&li->falh);
Robert Olsson19baf832005-06-21 12:43:18 -07001746
1747 if (list_empty(&li->falh)) {
Robert Olsson2373ce12005-08-25 13:01:29 -07001748 hlist_del_rcu(&li->hlist);
Robert Olsson19baf832005-06-21 12:43:18 -07001749 free_leaf_info(li);
1750 }
1751 }
1752 return found;
1753}
1754
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001755/*
1756 * Scan for the next right leaf starting at node p->child[idx]
1757 * Since we have back pointer, no recursion necessary.
1758 */
David S. Millerb299e4f2011-02-02 20:48:10 -08001759static struct leaf *leaf_walk_rcu(struct tnode *p, struct rt_trie_node *c)
Robert Olsson19baf832005-06-21 12:43:18 -07001760{
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001761 do {
1762 t_key idx;
Robert Olsson19baf832005-06-21 12:43:18 -07001763
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001764 if (c)
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001765 idx = tkey_extract_bits(c->key, p->pos, p->bits) + 1;
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07001766 else
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001767 idx = 0;
Robert Olsson19baf832005-06-21 12:43:18 -07001768
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001769 while (idx < 1u << p->bits) {
1770 c = tnode_get_child_rcu(p, idx++);
Robert Olsson2373ce12005-08-25 13:01:29 -07001771 if (!c)
Olof Johansson91b9a272005-08-09 20:24:39 -07001772 continue;
Robert Olsson19baf832005-06-21 12:43:18 -07001773
Eric Dumazet07d5c262013-08-05 11:18:49 -07001774 if (IS_LEAF(c))
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001775 return (struct leaf *) c;
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001776
1777 /* Rescan start scanning in new node */
1778 p = (struct tnode *) c;
1779 idx = 0;
Robert Olsson19baf832005-06-21 12:43:18 -07001780 }
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001781
1782 /* Node empty, walk back up to parent */
David S. Millerb299e4f2011-02-02 20:48:10 -08001783 c = (struct rt_trie_node *) p;
Eric Dumazeta034ee32010-09-09 23:32:28 +00001784 } while ((p = node_parent_rcu(c)) != NULL);
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001785
1786 return NULL; /* Root of trie */
1787}
1788
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001789static struct leaf *trie_firstleaf(struct trie *t)
1790{
Eric Dumazeta034ee32010-09-09 23:32:28 +00001791 struct tnode *n = (struct tnode *)rcu_dereference_rtnl(t->trie);
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001792
1793 if (!n)
1794 return NULL;
1795
1796 if (IS_LEAF(n)) /* trie is just a leaf */
1797 return (struct leaf *) n;
1798
1799 return leaf_walk_rcu(n, NULL);
1800}
1801
1802static struct leaf *trie_nextleaf(struct leaf *l)
1803{
David S. Millerb299e4f2011-02-02 20:48:10 -08001804 struct rt_trie_node *c = (struct rt_trie_node *) l;
Jarek Poplawskib902e572009-07-14 11:20:32 +00001805 struct tnode *p = node_parent_rcu(c);
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001806
1807 if (!p)
1808 return NULL; /* trie with just one leaf */
1809
1810 return leaf_walk_rcu(p, c);
Robert Olsson19baf832005-06-21 12:43:18 -07001811}
1812
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001813static struct leaf *trie_leafindex(struct trie *t, int index)
1814{
1815 struct leaf *l = trie_firstleaf(t);
1816
Stephen Hemmingerec28cf72008-02-11 21:12:49 -08001817 while (l && index-- > 0)
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001818 l = trie_nextleaf(l);
Stephen Hemmingerec28cf72008-02-11 21:12:49 -08001819
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001820 return l;
1821}
1822
1823
Robert Olssond562f1f2007-03-26 14:22:22 -07001824/*
1825 * Caller must hold RTNL.
1826 */
Stephen Hemminger16c6cf82009-09-20 10:35:36 +00001827int fib_table_flush(struct fib_table *tb)
Robert Olsson19baf832005-06-21 12:43:18 -07001828{
1829 struct trie *t = (struct trie *) tb->tb_data;
Stephen Hemminger9195bef2008-01-22 21:56:34 -08001830 struct leaf *l, *ll = NULL;
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001831 int found = 0;
Robert Olsson19baf832005-06-21 12:43:18 -07001832
Stephen Hemminger82cfbb02008-01-22 21:55:32 -08001833 for (l = trie_firstleaf(t); l; l = trie_nextleaf(l)) {
Stephen Hemmingeref3660c2008-04-10 03:46:12 -07001834 found += trie_flush_leaf(l);
Robert Olsson19baf832005-06-21 12:43:18 -07001835
1836 if (ll && hlist_empty(&ll->list))
Stephen Hemminger9195bef2008-01-22 21:56:34 -08001837 trie_leaf_remove(t, ll);
Robert Olsson19baf832005-06-21 12:43:18 -07001838 ll = l;
1839 }
1840
1841 if (ll && hlist_empty(&ll->list))
Stephen Hemminger9195bef2008-01-22 21:56:34 -08001842 trie_leaf_remove(t, ll);
Robert Olsson19baf832005-06-21 12:43:18 -07001843
Stephen Hemminger0c7770c2005-08-23 21:59:41 -07001844 pr_debug("trie_flush found=%d\n", found);
Robert Olsson19baf832005-06-21 12:43:18 -07001845 return found;
1846}
1847
Pavel Emelyanov4aa2c462010-10-28 02:00:43 +00001848void fib_free_table(struct fib_table *tb)
1849{
1850 kfree(tb);
1851}
1852
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08001853static int fn_trie_dump_fa(t_key key, int plen, struct list_head *fah,
1854 struct fib_table *tb,
Robert Olsson19baf832005-06-21 12:43:18 -07001855 struct sk_buff *skb, struct netlink_callback *cb)
1856{
1857 int i, s_i;
1858 struct fib_alias *fa;
Al Viro32ab5f82006-09-26 22:21:45 -07001859 __be32 xkey = htonl(key);
Robert Olsson19baf832005-06-21 12:43:18 -07001860
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001861 s_i = cb->args[5];
Robert Olsson19baf832005-06-21 12:43:18 -07001862 i = 0;
1863
Robert Olsson2373ce12005-08-25 13:01:29 -07001864 /* rcu_read_lock is hold by caller */
1865
1866 list_for_each_entry_rcu(fa, fah, fa_list) {
Robert Olsson19baf832005-06-21 12:43:18 -07001867 if (i < s_i) {
1868 i++;
1869 continue;
1870 }
Robert Olsson19baf832005-06-21 12:43:18 -07001871
1872 if (fib_dump_info(skb, NETLINK_CB(cb->skb).pid,
1873 cb->nlh->nlmsg_seq,
1874 RTM_NEWROUTE,
1875 tb->tb_id,
1876 fa->fa_type,
Thomas Grafbe403ea2006-08-17 18:15:17 -07001877 xkey,
Robert Olsson19baf832005-06-21 12:43:18 -07001878 plen,
1879 fa->fa_tos,
Stephen Hemminger64347f72008-01-22 21:55:01 -08001880 fa->fa_info, NLM_F_MULTI) < 0) {
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001881 cb->args[5] = i;
Robert Olsson19baf832005-06-21 12:43:18 -07001882 return -1;
Olof Johansson91b9a272005-08-09 20:24:39 -07001883 }
Robert Olsson19baf832005-06-21 12:43:18 -07001884 i++;
1885 }
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001886 cb->args[5] = i;
Robert Olsson19baf832005-06-21 12:43:18 -07001887 return skb->len;
1888}
1889
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001890static int fn_trie_dump_leaf(struct leaf *l, struct fib_table *tb,
1891 struct sk_buff *skb, struct netlink_callback *cb)
Robert Olsson19baf832005-06-21 12:43:18 -07001892{
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001893 struct leaf_info *li;
1894 struct hlist_node *node;
1895 int i, s_i;
Robert Olsson19baf832005-06-21 12:43:18 -07001896
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001897 s_i = cb->args[4];
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001898 i = 0;
Robert Olsson19baf832005-06-21 12:43:18 -07001899
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001900 /* rcu_read_lock is hold by caller */
1901 hlist_for_each_entry_rcu(li, node, &l->list, hlist) {
1902 if (i < s_i) {
1903 i++;
Robert Olsson19baf832005-06-21 12:43:18 -07001904 continue;
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001905 }
Robert Olsson19baf832005-06-21 12:43:18 -07001906
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001907 if (i > s_i)
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001908 cb->args[5] = 0;
Olof Johansson91b9a272005-08-09 20:24:39 -07001909
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001910 if (list_empty(&li->falh))
Robert Olsson19baf832005-06-21 12:43:18 -07001911 continue;
1912
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001913 if (fn_trie_dump_fa(l->key, li->plen, &li->falh, tb, skb, cb) < 0) {
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001914 cb->args[4] = i;
Robert Olsson19baf832005-06-21 12:43:18 -07001915 return -1;
1916 }
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001917 i++;
Robert Olsson19baf832005-06-21 12:43:18 -07001918 }
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001919
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001920 cb->args[4] = i;
Robert Olsson19baf832005-06-21 12:43:18 -07001921 return skb->len;
1922}
1923
Stephen Hemminger16c6cf82009-09-20 10:35:36 +00001924int fib_table_dump(struct fib_table *tb, struct sk_buff *skb,
1925 struct netlink_callback *cb)
Robert Olsson19baf832005-06-21 12:43:18 -07001926{
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001927 struct leaf *l;
Robert Olsson19baf832005-06-21 12:43:18 -07001928 struct trie *t = (struct trie *) tb->tb_data;
Stephen Hemmingerd5ce8a02008-01-22 21:57:22 -08001929 t_key key = cb->args[2];
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001930 int count = cb->args[3];
Robert Olsson19baf832005-06-21 12:43:18 -07001931
Robert Olsson2373ce12005-08-25 13:01:29 -07001932 rcu_read_lock();
Stephen Hemmingerd5ce8a02008-01-22 21:57:22 -08001933 /* Dump starting at last key.
1934 * Note: 0.0.0.0/0 (ie default) is first key.
1935 */
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001936 if (count == 0)
Stephen Hemmingerd5ce8a02008-01-22 21:57:22 -08001937 l = trie_firstleaf(t);
1938 else {
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001939 /* Normally, continue from last key, but if that is missing
1940 * fallback to using slow rescan
1941 */
Stephen Hemmingerd5ce8a02008-01-22 21:57:22 -08001942 l = fib_find_node(t, key);
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001943 if (!l)
1944 l = trie_leafindex(t, count);
Stephen Hemmingerd5ce8a02008-01-22 21:57:22 -08001945 }
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001946
Stephen Hemmingerd5ce8a02008-01-22 21:57:22 -08001947 while (l) {
1948 cb->args[2] = l->key;
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001949 if (fn_trie_dump_leaf(l, tb, skb, cb) < 0) {
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001950 cb->args[3] = count;
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001951 rcu_read_unlock();
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001952 return -1;
Robert Olsson19baf832005-06-21 12:43:18 -07001953 }
Stephen Hemmingerd5ce8a02008-01-22 21:57:22 -08001954
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001955 ++count;
Stephen Hemmingerd5ce8a02008-01-22 21:57:22 -08001956 l = trie_nextleaf(l);
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001957 memset(&cb->args[4], 0,
1958 sizeof(cb->args) - 4*sizeof(cb->args[0]));
Robert Olsson19baf832005-06-21 12:43:18 -07001959 }
Stephen Hemminger71d67e62008-01-31 16:45:47 -08001960 cb->args[3] = count;
Robert Olsson2373ce12005-08-25 13:01:29 -07001961 rcu_read_unlock();
Stephen Hemmingera88ee222008-01-22 21:56:11 -08001962
Robert Olsson19baf832005-06-21 12:43:18 -07001963 return skb->len;
Robert Olsson19baf832005-06-21 12:43:18 -07001964}
1965
David S. Miller5348ba82011-02-01 15:30:56 -08001966void __init fib_trie_init(void)
Stephen Hemminger7f9b8052008-01-14 23:14:20 -08001967{
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08001968 fn_alias_kmem = kmem_cache_create("ip_fib_alias",
1969 sizeof(struct fib_alias),
Stephen Hemmingerbc3c8c12008-01-22 21:51:50 -08001970 0, SLAB_PANIC, NULL);
1971
1972 trie_leaf_kmem = kmem_cache_create("ip_fib_trie",
1973 max(sizeof(struct leaf),
1974 sizeof(struct leaf_info)),
1975 0, SLAB_PANIC, NULL);
Stephen Hemminger7f9b8052008-01-14 23:14:20 -08001976}
Robert Olsson19baf832005-06-21 12:43:18 -07001977
Stephen Hemminger7f9b8052008-01-14 23:14:20 -08001978
David S. Miller5348ba82011-02-01 15:30:56 -08001979struct fib_table *fib_trie_table(u32 id)
Robert Olsson19baf832005-06-21 12:43:18 -07001980{
1981 struct fib_table *tb;
1982 struct trie *t;
1983
Robert Olsson19baf832005-06-21 12:43:18 -07001984 tb = kmalloc(sizeof(struct fib_table) + sizeof(struct trie),
1985 GFP_KERNEL);
1986 if (tb == NULL)
1987 return NULL;
1988
1989 tb->tb_id = id;
Denis V. Lunev971b8932007-12-08 00:32:23 -08001990 tb->tb_default = -1;
David S. Miller21d8c492011-04-14 14:49:37 -07001991 tb->tb_num_default = 0;
Robert Olsson19baf832005-06-21 12:43:18 -07001992
1993 t = (struct trie *) tb->tb_data;
Stephen Hemmingerc28a1cf2008-01-12 20:49:13 -08001994 memset(t, 0, sizeof(*t));
Robert Olsson19baf832005-06-21 12:43:18 -07001995
Robert Olsson19baf832005-06-21 12:43:18 -07001996 return tb;
1997}
1998
Robert Olsson19baf832005-06-21 12:43:18 -07001999#ifdef CONFIG_PROC_FS
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002000/* Depth first Trie walk iterator */
2001struct fib_trie_iter {
Denis V. Lunev1c340b22008-01-10 03:27:17 -08002002 struct seq_net_private p;
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002003 struct fib_table *tb;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002004 struct tnode *tnode;
Eric Dumazeta034ee32010-09-09 23:32:28 +00002005 unsigned int index;
2006 unsigned int depth;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002007};
Robert Olsson19baf832005-06-21 12:43:18 -07002008
David S. Millerb299e4f2011-02-02 20:48:10 -08002009static struct rt_trie_node *fib_trie_get_next(struct fib_trie_iter *iter)
Robert Olsson19baf832005-06-21 12:43:18 -07002010{
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002011 struct tnode *tn = iter->tnode;
Eric Dumazeta034ee32010-09-09 23:32:28 +00002012 unsigned int cindex = iter->index;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002013 struct tnode *p;
2014
Eric W. Biederman6640e692007-01-24 14:42:04 -08002015 /* A single entry routing table */
2016 if (!tn)
2017 return NULL;
2018
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002019 pr_debug("get_next iter={node=%p index=%d depth=%d}\n",
2020 iter->tnode, iter->index, iter->depth);
2021rescan:
2022 while (cindex < (1<<tn->bits)) {
David S. Millerb299e4f2011-02-02 20:48:10 -08002023 struct rt_trie_node *n = tnode_get_child_rcu(tn, cindex);
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002024
2025 if (n) {
2026 if (IS_LEAF(n)) {
2027 iter->tnode = tn;
2028 iter->index = cindex + 1;
2029 } else {
2030 /* push down one level */
2031 iter->tnode = (struct tnode *) n;
2032 iter->index = 0;
2033 ++iter->depth;
2034 }
2035 return n;
2036 }
2037
2038 ++cindex;
2039 }
2040
2041 /* Current node exhausted, pop back up */
David S. Millerb299e4f2011-02-02 20:48:10 -08002042 p = node_parent_rcu((struct rt_trie_node *)tn);
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002043 if (p) {
2044 cindex = tkey_extract_bits(tn->key, p->pos, p->bits)+1;
2045 tn = p;
2046 --iter->depth;
2047 goto rescan;
2048 }
2049
2050 /* got root? */
Robert Olsson19baf832005-06-21 12:43:18 -07002051 return NULL;
2052}
2053
David S. Millerb299e4f2011-02-02 20:48:10 -08002054static struct rt_trie_node *fib_trie_get_first(struct fib_trie_iter *iter,
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002055 struct trie *t)
Robert Olsson19baf832005-06-21 12:43:18 -07002056{
David S. Millerb299e4f2011-02-02 20:48:10 -08002057 struct rt_trie_node *n;
Robert Olsson5ddf0eb2006-03-20 21:34:12 -08002058
Stephen Hemminger132adf52007-03-08 20:44:43 -08002059 if (!t)
Robert Olsson5ddf0eb2006-03-20 21:34:12 -08002060 return NULL;
2061
2062 n = rcu_dereference(t->trie);
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002063 if (!n)
Robert Olsson5ddf0eb2006-03-20 21:34:12 -08002064 return NULL;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002065
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002066 if (IS_TNODE(n)) {
2067 iter->tnode = (struct tnode *) n;
2068 iter->index = 0;
2069 iter->depth = 1;
2070 } else {
2071 iter->tnode = NULL;
2072 iter->index = 0;
2073 iter->depth = 0;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002074 }
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002075
2076 return n;
Robert Olsson19baf832005-06-21 12:43:18 -07002077}
2078
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002079static void trie_collect_stats(struct trie *t, struct trie_stat *s)
Robert Olsson19baf832005-06-21 12:43:18 -07002080{
David S. Millerb299e4f2011-02-02 20:48:10 -08002081 struct rt_trie_node *n;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002082 struct fib_trie_iter iter;
Robert Olsson19baf832005-06-21 12:43:18 -07002083
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002084 memset(s, 0, sizeof(*s));
Robert Olsson19baf832005-06-21 12:43:18 -07002085
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002086 rcu_read_lock();
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002087 for (n = fib_trie_get_first(&iter, t); n; n = fib_trie_get_next(&iter)) {
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002088 if (IS_LEAF(n)) {
Stephen Hemminger93672292008-01-22 21:54:05 -08002089 struct leaf *l = (struct leaf *)n;
2090 struct leaf_info *li;
2091 struct hlist_node *tmp;
2092
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002093 s->leaves++;
2094 s->totdepth += iter.depth;
2095 if (iter.depth > s->maxdepth)
2096 s->maxdepth = iter.depth;
Stephen Hemminger93672292008-01-22 21:54:05 -08002097
2098 hlist_for_each_entry_rcu(li, tmp, &l->list, hlist)
2099 ++s->prefixes;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002100 } else {
2101 const struct tnode *tn = (const struct tnode *) n;
2102 int i;
Robert Olsson19baf832005-06-21 12:43:18 -07002103
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002104 s->tnodes++;
Stephen Hemminger132adf52007-03-08 20:44:43 -08002105 if (tn->bits < MAX_STAT_DEPTH)
Robert Olsson06ef9212006-03-20 21:35:01 -08002106 s->nodesizes[tn->bits]++;
2107
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002108 for (i = 0; i < (1<<tn->bits); i++)
2109 if (!tn->child[i])
2110 s->nullpointers++;
2111 }
2112 }
2113 rcu_read_unlock();
Robert Olsson19baf832005-06-21 12:43:18 -07002114}
2115
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07002116/*
Robert Olsson19baf832005-06-21 12:43:18 -07002117 * This outputs /proc/net/fib_triestats
Robert Olsson19baf832005-06-21 12:43:18 -07002118 */
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002119static void trie_show_stats(struct seq_file *seq, struct trie_stat *stat)
Robert Olsson19baf832005-06-21 12:43:18 -07002120{
Eric Dumazeta034ee32010-09-09 23:32:28 +00002121 unsigned int i, max, pointers, bytes, avdepth;
Robert Olsson19baf832005-06-21 12:43:18 -07002122
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002123 if (stat->leaves)
2124 avdepth = stat->totdepth*100 / stat->leaves;
2125 else
2126 avdepth = 0;
Robert Olsson19baf832005-06-21 12:43:18 -07002127
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08002128 seq_printf(seq, "\tAver depth: %u.%02d\n",
2129 avdepth / 100, avdepth % 100);
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002130 seq_printf(seq, "\tMax depth: %u\n", stat->maxdepth);
Robert Olsson19baf832005-06-21 12:43:18 -07002131
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002132 seq_printf(seq, "\tLeaves: %u\n", stat->leaves);
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002133 bytes = sizeof(struct leaf) * stat->leaves;
Stephen Hemminger93672292008-01-22 21:54:05 -08002134
2135 seq_printf(seq, "\tPrefixes: %u\n", stat->prefixes);
2136 bytes += sizeof(struct leaf_info) * stat->prefixes;
2137
Stephen Hemminger187b5182008-01-12 20:55:55 -08002138 seq_printf(seq, "\tInternal nodes: %u\n\t", stat->tnodes);
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002139 bytes += sizeof(struct tnode) * stat->tnodes;
Robert Olsson19baf832005-06-21 12:43:18 -07002140
Robert Olsson06ef9212006-03-20 21:35:01 -08002141 max = MAX_STAT_DEPTH;
2142 while (max > 0 && stat->nodesizes[max-1] == 0)
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002143 max--;
Robert Olsson19baf832005-06-21 12:43:18 -07002144
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002145 pointers = 0;
2146 for (i = 1; i <= max; i++)
2147 if (stat->nodesizes[i] != 0) {
Stephen Hemminger187b5182008-01-12 20:55:55 -08002148 seq_printf(seq, " %u: %u", i, stat->nodesizes[i]);
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002149 pointers += (1<<i) * stat->nodesizes[i];
2150 }
2151 seq_putc(seq, '\n');
Stephen Hemminger187b5182008-01-12 20:55:55 -08002152 seq_printf(seq, "\tPointers: %u\n", pointers);
Robert Olsson19baf832005-06-21 12:43:18 -07002153
David S. Millerb299e4f2011-02-02 20:48:10 -08002154 bytes += sizeof(struct rt_trie_node *) * pointers;
Stephen Hemminger187b5182008-01-12 20:55:55 -08002155 seq_printf(seq, "Null ptrs: %u\n", stat->nullpointers);
2156 seq_printf(seq, "Total size: %u kB\n", (bytes + 1023) / 1024);
Stephen Hemminger66a2f7f2008-01-12 21:23:17 -08002157}
Robert Olsson19baf832005-06-21 12:43:18 -07002158
2159#ifdef CONFIG_IP_FIB_TRIE_STATS
Stephen Hemminger66a2f7f2008-01-12 21:23:17 -08002160static void trie_show_usage(struct seq_file *seq,
2161 const struct trie_use_stats *stats)
2162{
2163 seq_printf(seq, "\nCounters:\n---------\n");
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08002164 seq_printf(seq, "gets = %u\n", stats->gets);
2165 seq_printf(seq, "backtracks = %u\n", stats->backtrack);
2166 seq_printf(seq, "semantic match passed = %u\n",
2167 stats->semantic_match_passed);
2168 seq_printf(seq, "semantic match miss = %u\n",
2169 stats->semantic_match_miss);
2170 seq_printf(seq, "null node hit= %u\n", stats->null_node_hit);
2171 seq_printf(seq, "skipped node resize = %u\n\n",
2172 stats->resize_node_skipped);
Robert Olsson19baf832005-06-21 12:43:18 -07002173}
Stephen Hemminger66a2f7f2008-01-12 21:23:17 -08002174#endif /* CONFIG_IP_FIB_TRIE_STATS */
2175
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002176static void fib_table_print(struct seq_file *seq, struct fib_table *tb)
Stephen Hemmingerd717a9a2008-01-14 23:11:54 -08002177{
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002178 if (tb->tb_id == RT_TABLE_LOCAL)
2179 seq_puts(seq, "Local:\n");
2180 else if (tb->tb_id == RT_TABLE_MAIN)
2181 seq_puts(seq, "Main:\n");
2182 else
2183 seq_printf(seq, "Id %d:\n", tb->tb_id);
Stephen Hemmingerd717a9a2008-01-14 23:11:54 -08002184}
Robert Olsson19baf832005-06-21 12:43:18 -07002185
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002186
Robert Olsson19baf832005-06-21 12:43:18 -07002187static int fib_triestat_seq_show(struct seq_file *seq, void *v)
2188{
Denis V. Lunev1c340b22008-01-10 03:27:17 -08002189 struct net *net = (struct net *)seq->private;
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002190 unsigned int h;
Eric W. Biederman877a9bf2007-12-07 00:47:47 -08002191
Stephen Hemmingerd717a9a2008-01-14 23:11:54 -08002192 seq_printf(seq,
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08002193 "Basic info: size of leaf:"
2194 " %Zd bytes, size of tnode: %Zd bytes.\n",
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002195 sizeof(struct leaf), sizeof(struct tnode));
Olof Johansson91b9a272005-08-09 20:24:39 -07002196
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002197 for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
2198 struct hlist_head *head = &net->ipv4.fib_table_hash[h];
2199 struct hlist_node *node;
2200 struct fib_table *tb;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002201
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002202 hlist_for_each_entry_rcu(tb, node, head, tb_hlist) {
2203 struct trie *t = (struct trie *) tb->tb_data;
2204 struct trie_stat stat;
2205
2206 if (!t)
2207 continue;
2208
2209 fib_table_print(seq, tb);
2210
2211 trie_collect_stats(t, &stat);
2212 trie_show_stats(seq, &stat);
2213#ifdef CONFIG_IP_FIB_TRIE_STATS
2214 trie_show_usage(seq, &t->stats);
2215#endif
2216 }
2217 }
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002218
Robert Olsson19baf832005-06-21 12:43:18 -07002219 return 0;
2220}
2221
Robert Olsson19baf832005-06-21 12:43:18 -07002222static int fib_triestat_seq_open(struct inode *inode, struct file *file)
2223{
Pavel Emelyanovde05c552008-07-18 04:07:21 -07002224 return single_open_net(inode, file, fib_triestat_seq_show);
Denis V. Lunev1c340b22008-01-10 03:27:17 -08002225}
2226
Arjan van de Ven9a321442007-02-12 00:55:35 -08002227static const struct file_operations fib_triestat_fops = {
Stephen Hemmingerc877efb2005-07-19 14:01:51 -07002228 .owner = THIS_MODULE,
2229 .open = fib_triestat_seq_open,
2230 .read = seq_read,
2231 .llseek = seq_lseek,
Pavel Emelyanovb6fcbdb2008-07-18 04:07:44 -07002232 .release = single_release_net,
Robert Olsson19baf832005-06-21 12:43:18 -07002233};
2234
David S. Millerb299e4f2011-02-02 20:48:10 -08002235static struct rt_trie_node *fib_trie_get_idx(struct seq_file *seq, loff_t pos)
Robert Olsson19baf832005-06-21 12:43:18 -07002236{
YOSHIFUJI Hideaki12188542008-03-26 02:36:06 +09002237 struct fib_trie_iter *iter = seq->private;
2238 struct net *net = seq_file_net(seq);
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002239 loff_t idx = 0;
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002240 unsigned int h;
Robert Olsson19baf832005-06-21 12:43:18 -07002241
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002242 for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
2243 struct hlist_head *head = &net->ipv4.fib_table_hash[h];
2244 struct hlist_node *node;
2245 struct fib_table *tb;
2246
2247 hlist_for_each_entry_rcu(tb, node, head, tb_hlist) {
David S. Millerb299e4f2011-02-02 20:48:10 -08002248 struct rt_trie_node *n;
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002249
2250 for (n = fib_trie_get_first(iter,
2251 (struct trie *) tb->tb_data);
2252 n; n = fib_trie_get_next(iter))
2253 if (pos == idx++) {
2254 iter->tb = tb;
2255 return n;
2256 }
2257 }
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002258 }
Robert Olsson19baf832005-06-21 12:43:18 -07002259
Robert Olsson19baf832005-06-21 12:43:18 -07002260 return NULL;
2261}
2262
2263static void *fib_trie_seq_start(struct seq_file *seq, loff_t *pos)
Stephen Hemmingerc95aaf92008-01-12 21:25:02 -08002264 __acquires(RCU)
Robert Olsson19baf832005-06-21 12:43:18 -07002265{
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002266 rcu_read_lock();
YOSHIFUJI Hideaki12188542008-03-26 02:36:06 +09002267 return fib_trie_get_idx(seq, *pos);
Robert Olsson19baf832005-06-21 12:43:18 -07002268}
2269
2270static void *fib_trie_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2271{
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002272 struct fib_trie_iter *iter = seq->private;
YOSHIFUJI Hideaki12188542008-03-26 02:36:06 +09002273 struct net *net = seq_file_net(seq);
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002274 struct fib_table *tb = iter->tb;
2275 struct hlist_node *tb_node;
2276 unsigned int h;
David S. Millerb299e4f2011-02-02 20:48:10 -08002277 struct rt_trie_node *n;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002278
Robert Olsson19baf832005-06-21 12:43:18 -07002279 ++*pos;
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002280 /* next node in same table */
2281 n = fib_trie_get_next(iter);
2282 if (n)
2283 return n;
Olof Johansson91b9a272005-08-09 20:24:39 -07002284
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002285 /* walk rest of this hash chain */
2286 h = tb->tb_id & (FIB_TABLE_HASHSZ - 1);
Eric Dumazet0a5c0472011-03-31 01:51:35 -07002287 while ((tb_node = rcu_dereference(hlist_next_rcu(&tb->tb_hlist)))) {
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002288 tb = hlist_entry(tb_node, struct fib_table, tb_hlist);
2289 n = fib_trie_get_first(iter, (struct trie *) tb->tb_data);
2290 if (n)
2291 goto found;
2292 }
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002293
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002294 /* new hash chain */
2295 while (++h < FIB_TABLE_HASHSZ) {
2296 struct hlist_head *head = &net->ipv4.fib_table_hash[h];
2297 hlist_for_each_entry_rcu(tb, tb_node, head, tb_hlist) {
2298 n = fib_trie_get_first(iter, (struct trie *) tb->tb_data);
2299 if (n)
2300 goto found;
2301 }
2302 }
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002303 return NULL;
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002304
2305found:
2306 iter->tb = tb;
2307 return n;
Robert Olsson19baf832005-06-21 12:43:18 -07002308}
2309
2310static void fib_trie_seq_stop(struct seq_file *seq, void *v)
Stephen Hemmingerc95aaf92008-01-12 21:25:02 -08002311 __releases(RCU)
Robert Olsson19baf832005-06-21 12:43:18 -07002312{
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002313 rcu_read_unlock();
Robert Olsson19baf832005-06-21 12:43:18 -07002314}
2315
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002316static void seq_indent(struct seq_file *seq, int n)
2317{
Eric Dumazeta034ee32010-09-09 23:32:28 +00002318 while (n-- > 0)
2319 seq_puts(seq, " ");
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002320}
Robert Olsson19baf832005-06-21 12:43:18 -07002321
Eric Dumazet28d36e32008-01-14 23:09:56 -08002322static inline const char *rtn_scope(char *buf, size_t len, enum rt_scope_t s)
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002323{
Stephen Hemminger132adf52007-03-08 20:44:43 -08002324 switch (s) {
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002325 case RT_SCOPE_UNIVERSE: return "universe";
2326 case RT_SCOPE_SITE: return "site";
2327 case RT_SCOPE_LINK: return "link";
2328 case RT_SCOPE_HOST: return "host";
2329 case RT_SCOPE_NOWHERE: return "nowhere";
2330 default:
Eric Dumazet28d36e32008-01-14 23:09:56 -08002331 snprintf(buf, len, "scope=%d", s);
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002332 return buf;
2333 }
2334}
2335
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -07002336static const char *const rtn_type_names[__RTN_MAX] = {
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002337 [RTN_UNSPEC] = "UNSPEC",
2338 [RTN_UNICAST] = "UNICAST",
2339 [RTN_LOCAL] = "LOCAL",
2340 [RTN_BROADCAST] = "BROADCAST",
2341 [RTN_ANYCAST] = "ANYCAST",
2342 [RTN_MULTICAST] = "MULTICAST",
2343 [RTN_BLACKHOLE] = "BLACKHOLE",
2344 [RTN_UNREACHABLE] = "UNREACHABLE",
2345 [RTN_PROHIBIT] = "PROHIBIT",
2346 [RTN_THROW] = "THROW",
2347 [RTN_NAT] = "NAT",
2348 [RTN_XRESOLVE] = "XRESOLVE",
2349};
2350
Eric Dumazeta034ee32010-09-09 23:32:28 +00002351static inline const char *rtn_type(char *buf, size_t len, unsigned int t)
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002352{
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002353 if (t < __RTN_MAX && rtn_type_names[t])
2354 return rtn_type_names[t];
Eric Dumazet28d36e32008-01-14 23:09:56 -08002355 snprintf(buf, len, "type %u", t);
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002356 return buf;
2357}
2358
2359/* Pretty print the trie */
Robert Olsson19baf832005-06-21 12:43:18 -07002360static int fib_trie_seq_show(struct seq_file *seq, void *v)
2361{
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002362 const struct fib_trie_iter *iter = seq->private;
David S. Millerb299e4f2011-02-02 20:48:10 -08002363 struct rt_trie_node *n = v;
Robert Olsson19baf832005-06-21 12:43:18 -07002364
Stephen Hemminger3d3b2d22008-03-23 22:43:56 -07002365 if (!node_parent_rcu(n))
2366 fib_table_print(seq, iter->tb);
Robert Olsson095b8502007-01-26 19:06:01 -08002367
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002368 if (IS_TNODE(n)) {
2369 struct tnode *tn = (struct tnode *) n;
Stephen Hemmingerab66b4a2007-08-10 15:22:58 -07002370 __be32 prf = htonl(mask_pfx(tn->key, tn->pos));
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002371
Robert Olsson1d25cd62005-09-19 15:29:52 -07002372 seq_indent(seq, iter->depth-1);
Harvey Harrison673d57e2008-10-31 00:53:57 -07002373 seq_printf(seq, " +-- %pI4/%d %d %d %d\n",
2374 &prf, tn->pos, tn->bits, tn->full_children,
Robert Olsson1d25cd62005-09-19 15:29:52 -07002375 tn->empty_children);
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09002376
Olof Johansson91b9a272005-08-09 20:24:39 -07002377 } else {
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002378 struct leaf *l = (struct leaf *) n;
Stephen Hemminger13280422008-01-22 21:54:37 -08002379 struct leaf_info *li;
2380 struct hlist_node *node;
Al Viro32ab5f82006-09-26 22:21:45 -07002381 __be32 val = htonl(l->key);
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002382
2383 seq_indent(seq, iter->depth);
Harvey Harrison673d57e2008-10-31 00:53:57 -07002384 seq_printf(seq, " |-- %pI4\n", &val);
Eric Dumazet28d36e32008-01-14 23:09:56 -08002385
Stephen Hemminger13280422008-01-22 21:54:37 -08002386 hlist_for_each_entry_rcu(li, node, &l->list, hlist) {
2387 struct fib_alias *fa;
Eric Dumazet28d36e32008-01-14 23:09:56 -08002388
Stephen Hemminger13280422008-01-22 21:54:37 -08002389 list_for_each_entry_rcu(fa, &li->falh, fa_list) {
2390 char buf1[32], buf2[32];
Eric Dumazet28d36e32008-01-14 23:09:56 -08002391
Stephen Hemminger13280422008-01-22 21:54:37 -08002392 seq_indent(seq, iter->depth+1);
2393 seq_printf(seq, " /%d %s %s", li->plen,
2394 rtn_scope(buf1, sizeof(buf1),
David S. Miller37e826c2011-03-24 18:06:47 -07002395 fa->fa_info->fib_scope),
Stephen Hemminger13280422008-01-22 21:54:37 -08002396 rtn_type(buf2, sizeof(buf2),
2397 fa->fa_type));
2398 if (fa->fa_tos)
Denis V. Lunevb9c4d822008-02-05 02:58:45 -08002399 seq_printf(seq, " tos=%d", fa->fa_tos);
Stephen Hemminger13280422008-01-22 21:54:37 -08002400 seq_putc(seq, '\n');
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002401 }
2402 }
Robert Olsson19baf832005-06-21 12:43:18 -07002403 }
2404
2405 return 0;
2406}
2407
Stephen Hemmingerf6908082007-03-12 14:34:29 -07002408static const struct seq_operations fib_trie_seq_ops = {
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002409 .start = fib_trie_seq_start,
2410 .next = fib_trie_seq_next,
2411 .stop = fib_trie_seq_stop,
2412 .show = fib_trie_seq_show,
Robert Olsson19baf832005-06-21 12:43:18 -07002413};
2414
2415static int fib_trie_seq_open(struct inode *inode, struct file *file)
2416{
Denis V. Lunev1c340b22008-01-10 03:27:17 -08002417 return seq_open_net(inode, file, &fib_trie_seq_ops,
2418 sizeof(struct fib_trie_iter));
Robert Olsson19baf832005-06-21 12:43:18 -07002419}
2420
Arjan van de Ven9a321442007-02-12 00:55:35 -08002421static const struct file_operations fib_trie_fops = {
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002422 .owner = THIS_MODULE,
2423 .open = fib_trie_seq_open,
2424 .read = seq_read,
2425 .llseek = seq_lseek,
Denis V. Lunev1c340b22008-01-10 03:27:17 -08002426 .release = seq_release_net,
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002427};
2428
Stephen Hemminger8315f5d2008-02-11 21:14:39 -08002429struct fib_route_iter {
2430 struct seq_net_private p;
2431 struct trie *main_trie;
2432 loff_t pos;
2433 t_key key;
2434};
2435
2436static struct leaf *fib_route_get_idx(struct fib_route_iter *iter, loff_t pos)
2437{
2438 struct leaf *l = NULL;
2439 struct trie *t = iter->main_trie;
2440
2441 /* use cache location of last found key */
2442 if (iter->pos > 0 && pos >= iter->pos && (l = fib_find_node(t, iter->key)))
2443 pos -= iter->pos;
2444 else {
2445 iter->pos = 0;
2446 l = trie_firstleaf(t);
2447 }
2448
2449 while (l && pos-- > 0) {
2450 iter->pos++;
2451 l = trie_nextleaf(l);
2452 }
2453
2454 if (l)
2455 iter->key = pos; /* remember it */
2456 else
2457 iter->pos = 0; /* forget it */
2458
2459 return l;
2460}
2461
2462static void *fib_route_seq_start(struct seq_file *seq, loff_t *pos)
2463 __acquires(RCU)
2464{
2465 struct fib_route_iter *iter = seq->private;
2466 struct fib_table *tb;
2467
2468 rcu_read_lock();
YOSHIFUJI Hideaki12188542008-03-26 02:36:06 +09002469 tb = fib_get_table(seq_file_net(seq), RT_TABLE_MAIN);
Stephen Hemminger8315f5d2008-02-11 21:14:39 -08002470 if (!tb)
2471 return NULL;
2472
2473 iter->main_trie = (struct trie *) tb->tb_data;
2474 if (*pos == 0)
2475 return SEQ_START_TOKEN;
2476 else
2477 return fib_route_get_idx(iter, *pos - 1);
2478}
2479
2480static void *fib_route_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2481{
2482 struct fib_route_iter *iter = seq->private;
2483 struct leaf *l = v;
2484
2485 ++*pos;
2486 if (v == SEQ_START_TOKEN) {
2487 iter->pos = 0;
2488 l = trie_firstleaf(iter->main_trie);
2489 } else {
2490 iter->pos++;
2491 l = trie_nextleaf(l);
2492 }
2493
2494 if (l)
2495 iter->key = l->key;
2496 else
2497 iter->pos = 0;
2498 return l;
2499}
2500
2501static void fib_route_seq_stop(struct seq_file *seq, void *v)
2502 __releases(RCU)
2503{
2504 rcu_read_unlock();
2505}
2506
Eric Dumazeta034ee32010-09-09 23:32:28 +00002507static unsigned int fib_flag_trans(int type, __be32 mask, const struct fib_info *fi)
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002508{
Eric Dumazeta034ee32010-09-09 23:32:28 +00002509 unsigned int flags = 0;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002510
Eric Dumazeta034ee32010-09-09 23:32:28 +00002511 if (type == RTN_UNREACHABLE || type == RTN_PROHIBIT)
2512 flags = RTF_REJECT;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002513 if (fi && fi->fib_nh->nh_gw)
2514 flags |= RTF_GATEWAY;
Al Viro32ab5f82006-09-26 22:21:45 -07002515 if (mask == htonl(0xFFFFFFFF))
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002516 flags |= RTF_HOST;
2517 flags |= RTF_UP;
2518 return flags;
2519}
2520
2521/*
2522 * This outputs /proc/net/route.
2523 * The format of the file is not supposed to be changed
Eric Dumazeta034ee32010-09-09 23:32:28 +00002524 * and needs to be same as fib_hash output to avoid breaking
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002525 * legacy utilities
2526 */
2527static int fib_route_seq_show(struct seq_file *seq, void *v)
2528{
2529 struct leaf *l = v;
Stephen Hemminger13280422008-01-22 21:54:37 -08002530 struct leaf_info *li;
2531 struct hlist_node *node;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002532
2533 if (v == SEQ_START_TOKEN) {
2534 seq_printf(seq, "%-127s\n", "Iface\tDestination\tGateway "
2535 "\tFlags\tRefCnt\tUse\tMetric\tMask\t\tMTU"
2536 "\tWindow\tIRTT");
2537 return 0;
2538 }
2539
Stephen Hemminger13280422008-01-22 21:54:37 -08002540 hlist_for_each_entry_rcu(li, node, &l->list, hlist) {
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002541 struct fib_alias *fa;
Al Viro32ab5f82006-09-26 22:21:45 -07002542 __be32 mask, prefix;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002543
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002544 mask = inet_make_mask(li->plen);
2545 prefix = htonl(l->key);
2546
2547 list_for_each_entry_rcu(fa, &li->falh, fa_list) {
Herbert Xu1371e372005-10-15 09:42:39 +10002548 const struct fib_info *fi = fa->fa_info;
Eric Dumazeta034ee32010-09-09 23:32:28 +00002549 unsigned int flags = fib_flag_trans(fa->fa_type, mask, fi);
Pavel Emelyanov5e659e42008-04-24 01:02:16 -07002550 int len;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002551
2552 if (fa->fa_type == RTN_BROADCAST
2553 || fa->fa_type == RTN_MULTICAST)
2554 continue;
2555
2556 if (fi)
Pavel Emelyanov5e659e42008-04-24 01:02:16 -07002557 seq_printf(seq,
2558 "%s\t%08X\t%08X\t%04X\t%d\t%u\t"
2559 "%d\t%08X\t%d\t%u\t%u%n",
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002560 fi->fib_dev ? fi->fib_dev->name : "*",
2561 prefix,
2562 fi->fib_nh->nh_gw, flags, 0, 0,
2563 fi->fib_priority,
2564 mask,
Stephen Hemmingera07f5f52008-01-22 21:53:36 -08002565 (fi->fib_advmss ?
2566 fi->fib_advmss + 40 : 0),
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002567 fi->fib_window,
Pavel Emelyanov5e659e42008-04-24 01:02:16 -07002568 fi->fib_rtt >> 3, &len);
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002569 else
Pavel Emelyanov5e659e42008-04-24 01:02:16 -07002570 seq_printf(seq,
2571 "*\t%08X\t%08X\t%04X\t%d\t%u\t"
2572 "%d\t%08X\t%d\t%u\t%u%n",
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002573 prefix, 0, flags, 0, 0, 0,
Pavel Emelyanov5e659e42008-04-24 01:02:16 -07002574 mask, 0, 0, 0, &len);
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002575
Pavel Emelyanov5e659e42008-04-24 01:02:16 -07002576 seq_printf(seq, "%*s\n", 127 - len, "");
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002577 }
2578 }
2579
2580 return 0;
2581}
2582
Stephen Hemmingerf6908082007-03-12 14:34:29 -07002583static const struct seq_operations fib_route_seq_ops = {
Stephen Hemminger8315f5d2008-02-11 21:14:39 -08002584 .start = fib_route_seq_start,
2585 .next = fib_route_seq_next,
2586 .stop = fib_route_seq_stop,
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002587 .show = fib_route_seq_show,
2588};
2589
2590static int fib_route_seq_open(struct inode *inode, struct file *file)
2591{
Denis V. Lunev1c340b22008-01-10 03:27:17 -08002592 return seq_open_net(inode, file, &fib_route_seq_ops,
Stephen Hemminger8315f5d2008-02-11 21:14:39 -08002593 sizeof(struct fib_route_iter));
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002594}
2595
Arjan van de Ven9a321442007-02-12 00:55:35 -08002596static const struct file_operations fib_route_fops = {
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002597 .owner = THIS_MODULE,
2598 .open = fib_route_seq_open,
2599 .read = seq_read,
2600 .llseek = seq_lseek,
Denis V. Lunev1c340b22008-01-10 03:27:17 -08002601 .release = seq_release_net,
Robert Olsson19baf832005-06-21 12:43:18 -07002602};
2603
Denis V. Lunev61a02652008-01-10 03:21:09 -08002604int __net_init fib_proc_init(struct net *net)
Robert Olsson19baf832005-06-21 12:43:18 -07002605{
Denis V. Lunev61a02652008-01-10 03:21:09 -08002606 if (!proc_net_fops_create(net, "fib_trie", S_IRUGO, &fib_trie_fops))
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002607 goto out1;
2608
Denis V. Lunev61a02652008-01-10 03:21:09 -08002609 if (!proc_net_fops_create(net, "fib_triestat", S_IRUGO,
2610 &fib_triestat_fops))
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002611 goto out2;
2612
Denis V. Lunev61a02652008-01-10 03:21:09 -08002613 if (!proc_net_fops_create(net, "route", S_IRUGO, &fib_route_fops))
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002614 goto out3;
2615
Robert Olsson19baf832005-06-21 12:43:18 -07002616 return 0;
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002617
2618out3:
Denis V. Lunev61a02652008-01-10 03:21:09 -08002619 proc_net_remove(net, "fib_triestat");
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002620out2:
Denis V. Lunev61a02652008-01-10 03:21:09 -08002621 proc_net_remove(net, "fib_trie");
Stephen Hemmingercb7b5932005-09-09 13:35:42 -07002622out1:
2623 return -ENOMEM;
Robert Olsson19baf832005-06-21 12:43:18 -07002624}
2625
Denis V. Lunev61a02652008-01-10 03:21:09 -08002626void __net_exit fib_proc_exit(struct net *net)
Robert Olsson19baf832005-06-21 12:43:18 -07002627{
Denis V. Lunev61a02652008-01-10 03:21:09 -08002628 proc_net_remove(net, "fib_trie");
2629 proc_net_remove(net, "fib_triestat");
2630 proc_net_remove(net, "route");
Robert Olsson19baf832005-06-21 12:43:18 -07002631}
2632
2633#endif /* CONFIG_PROC_FS */