| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  *		INETPEER - A storage for permanent information about peers | 
 | 3 |  * | 
 | 4 |  *  This source is covered by the GNU GPL, the same as all kernel sources. | 
 | 5 |  * | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 |  *  Authors:	Andrey V. Savochkin <saw@msu.ru> | 
 | 7 |  */ | 
 | 8 |  | 
 | 9 | #include <linux/module.h> | 
 | 10 | #include <linux/types.h> | 
 | 11 | #include <linux/slab.h> | 
 | 12 | #include <linux/interrupt.h> | 
 | 13 | #include <linux/spinlock.h> | 
 | 14 | #include <linux/random.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | #include <linux/timer.h> | 
 | 16 | #include <linux/time.h> | 
 | 17 | #include <linux/kernel.h> | 
 | 18 | #include <linux/mm.h> | 
 | 19 | #include <linux/net.h> | 
| Arnaldo Carvalho de Melo | 2038073 | 2005-08-16 02:18:02 -0300 | [diff] [blame] | 20 | #include <net/ip.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | #include <net/inetpeer.h> | 
 | 22 |  | 
 | 23 | /* | 
 | 24 |  *  Theory of operations. | 
 | 25 |  *  We keep one entry for each peer IP address.  The nodes contains long-living | 
 | 26 |  *  information about the peer which doesn't depend on routes. | 
 | 27 |  *  At this moment this information consists only of ID field for the next | 
 | 28 |  *  outgoing IP packet.  This field is incremented with each packet as encoded | 
 | 29 |  *  in inet_getid() function (include/net/inetpeer.h). | 
 | 30 |  *  At the moment of writing this notes identifier of IP packets is generated | 
 | 31 |  *  to be unpredictable using this code only for packets subjected | 
 | 32 |  *  (actually or potentially) to defragmentation.  I.e. DF packets less than | 
 | 33 |  *  PMTU in size uses a constant ID and do not use this code (see | 
 | 34 |  *  ip_select_ident() in include/net/ip.h). | 
 | 35 |  * | 
 | 36 |  *  Route cache entries hold references to our nodes. | 
 | 37 |  *  New cache entries get references via lookup by destination IP address in | 
 | 38 |  *  the avl tree.  The reference is grabbed only when it's needed i.e. only | 
 | 39 |  *  when we try to output IP packet which needs an unpredictable ID (see | 
 | 40 |  *  __ip_select_ident() in net/ipv4/route.c). | 
 | 41 |  *  Nodes are removed only when reference counter goes to 0. | 
 | 42 |  *  When it's happened the node may be removed when a sufficient amount of | 
 | 43 |  *  time has been passed since its last use.  The less-recently-used entry can | 
 | 44 |  *  also be removed if the pool is overloaded i.e. if the total amount of | 
 | 45 |  *  entries is greater-or-equal than the threshold. | 
 | 46 |  * | 
 | 47 |  *  Node pool is organised as an AVL tree. | 
 | 48 |  *  Such an implementation has been chosen not just for fun.  It's a way to | 
 | 49 |  *  prevent easy and efficient DoS attacks by creating hash collisions.  A huge | 
 | 50 |  *  amount of long living nodes in a single hash slot would significantly delay | 
 | 51 |  *  lookups performed with disabled BHs. | 
 | 52 |  * | 
 | 53 |  *  Serialisation issues. | 
 | 54 |  *  1.  Nodes may appear in the tree only with the pool write lock held. | 
 | 55 |  *  2.  Nodes may disappear from the tree only with the pool write lock held | 
 | 56 |  *      AND reference count being 0. | 
 | 57 |  *  3.  Nodes appears and disappears from unused node list only under | 
 | 58 |  *      "inet_peer_unused_lock". | 
 | 59 |  *  4.  Global variable peer_total is modified under the pool lock. | 
 | 60 |  *  5.  struct inet_peer fields modification: | 
 | 61 |  *		avl_left, avl_right, avl_parent, avl_height: pool lock | 
| Pavel Emelyanov | d71209d | 2007-11-12 21:27:28 -0800 | [diff] [blame] | 62 |  *		unused: unused node list lock | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 63 |  *		refcnt: atomically against modifications on other CPU; | 
 | 64 |  *		   usually under some other lock to prevent node disappearing | 
 | 65 |  *		dtime: unused node list lock | 
 | 66 |  *		v4daddr: unchangeable | 
 | 67 |  *		ip_id_count: idlock | 
 | 68 |  */ | 
 | 69 |  | 
| Christoph Lameter | e18b890 | 2006-12-06 20:33:20 -0800 | [diff] [blame] | 70 | static struct kmem_cache *peer_cachep __read_mostly; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 71 |  | 
 | 72 | #define node_height(x) x->avl_height | 
 | 73 | static struct inet_peer peer_fake_node = { | 
 | 74 | 	.avl_left	= &peer_fake_node, | 
 | 75 | 	.avl_right	= &peer_fake_node, | 
 | 76 | 	.avl_height	= 0 | 
 | 77 | }; | 
 | 78 | #define peer_avl_empty (&peer_fake_node) | 
 | 79 | static struct inet_peer *peer_root = peer_avl_empty; | 
 | 80 | static DEFINE_RWLOCK(peer_pool_lock); | 
 | 81 | #define PEER_MAXDEPTH 40 /* sufficient for about 2^27 nodes */ | 
 | 82 |  | 
| Herbert Xu | 7466d90 | 2006-07-09 18:18:00 -0700 | [diff] [blame] | 83 | static int peer_total; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 84 | /* Exported for sysctl_net_ipv4.  */ | 
| Eric Dumazet | 243bbca | 2007-03-06 20:23:10 -0800 | [diff] [blame] | 85 | int inet_peer_threshold __read_mostly = 65536 + 128;	/* start to throw entries more | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 86 | 					 * aggressively at this stage */ | 
| Eric Dumazet | 243bbca | 2007-03-06 20:23:10 -0800 | [diff] [blame] | 87 | int inet_peer_minttl __read_mostly = 120 * HZ;	/* TTL under high load: 120 sec */ | 
 | 88 | int inet_peer_maxttl __read_mostly = 10 * 60 * HZ;	/* usual time to live: 10 min */ | 
 | 89 | int inet_peer_gc_mintime __read_mostly = 10 * HZ; | 
 | 90 | int inet_peer_gc_maxtime __read_mostly = 120 * HZ; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 91 |  | 
| Pavel Emelyanov | d71209d | 2007-11-12 21:27:28 -0800 | [diff] [blame] | 92 | static LIST_HEAD(unused_peers); | 
| Eric Dumazet | 4663afe | 2006-10-12 21:21:06 -0700 | [diff] [blame] | 93 | static DEFINE_SPINLOCK(inet_peer_unused_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 94 |  | 
 | 95 | static void peer_check_expire(unsigned long dummy); | 
| Ingo Molnar | 8d06afa | 2005-09-09 13:10:40 -0700 | [diff] [blame] | 96 | static DEFINE_TIMER(peer_periodic_timer, peer_check_expire, 0, 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 97 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 98 |  | 
 | 99 | /* Called from ip_output.c:ip_init  */ | 
 | 100 | void __init inet_initpeers(void) | 
 | 101 | { | 
 | 102 | 	struct sysinfo si; | 
 | 103 |  | 
 | 104 | 	/* Use the straight interface to information about memory. */ | 
 | 105 | 	si_meminfo(&si); | 
 | 106 | 	/* The values below were suggested by Alexey Kuznetsov | 
 | 107 | 	 * <kuznet@ms2.inr.ac.ru>.  I don't have any opinion about the values | 
 | 108 | 	 * myself.  --SAW | 
 | 109 | 	 */ | 
 | 110 | 	if (si.totalram <= (32768*1024)/PAGE_SIZE) | 
 | 111 | 		inet_peer_threshold >>= 1; /* max pool size about 1MB on IA32 */ | 
 | 112 | 	if (si.totalram <= (16384*1024)/PAGE_SIZE) | 
 | 113 | 		inet_peer_threshold >>= 1; /* about 512KB */ | 
 | 114 | 	if (si.totalram <= (8192*1024)/PAGE_SIZE) | 
 | 115 | 		inet_peer_threshold >>= 2; /* about 128KB */ | 
 | 116 |  | 
 | 117 | 	peer_cachep = kmem_cache_create("inet_peer_cache", | 
 | 118 | 			sizeof(struct inet_peer), | 
| Alexey Dobriyan | e5d679f | 2006-08-26 19:25:52 -0700 | [diff] [blame] | 119 | 			0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, | 
| Paul Mundt | 20c2df8 | 2007-07-20 10:11:58 +0900 | [diff] [blame] | 120 | 			NULL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 121 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 122 | 	/* All the timers, started at system startup tend | 
 | 123 | 	   to synchronize. Perturb it a bit. | 
 | 124 | 	 */ | 
 | 125 | 	peer_periodic_timer.expires = jiffies | 
 | 126 | 		+ net_random() % inet_peer_gc_maxtime | 
 | 127 | 		+ inet_peer_gc_maxtime; | 
 | 128 | 	add_timer(&peer_periodic_timer); | 
 | 129 | } | 
 | 130 |  | 
 | 131 | /* Called with or without local BH being disabled. */ | 
 | 132 | static void unlink_from_unused(struct inet_peer *p) | 
 | 133 | { | 
 | 134 | 	spin_lock_bh(&inet_peer_unused_lock); | 
| Pavel Emelyanov | d71209d | 2007-11-12 21:27:28 -0800 | [diff] [blame] | 135 | 	list_del_init(&p->unused); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 136 | 	spin_unlock_bh(&inet_peer_unused_lock); | 
 | 137 | } | 
 | 138 |  | 
| Eric Dumazet | 243bbca | 2007-03-06 20:23:10 -0800 | [diff] [blame] | 139 | /* | 
 | 140 |  * Called with local BH disabled and the pool lock held. | 
 | 141 |  * _stack is known to be NULL or not at compile time, | 
 | 142 |  * so compiler will optimize the if (_stack) tests. | 
 | 143 |  */ | 
| Jianjun Kong | d931910 | 2008-11-03 00:23:42 -0800 | [diff] [blame] | 144 | #define lookup(_daddr, _stack) 					\ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 145 | ({								\ | 
 | 146 | 	struct inet_peer *u, **v;				\ | 
| Patrick McHardy | fc7b938 | 2007-07-20 19:39:17 -0700 | [diff] [blame] | 147 | 	if (_stack != NULL) {					\ | 
| Eric Dumazet | 243bbca | 2007-03-06 20:23:10 -0800 | [diff] [blame] | 148 | 		stackptr = _stack;				\ | 
 | 149 | 		*stackptr++ = &peer_root;			\ | 
 | 150 | 	}							\ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 151 | 	for (u = peer_root; u != peer_avl_empty; ) {		\ | 
| Eric Dumazet | 243bbca | 2007-03-06 20:23:10 -0800 | [diff] [blame] | 152 | 		if (_daddr == u->v4daddr)			\ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 153 | 			break;					\ | 
| Eric Dumazet | 243bbca | 2007-03-06 20:23:10 -0800 | [diff] [blame] | 154 | 		if ((__force __u32)_daddr < (__force __u32)u->v4daddr)	\ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 155 | 			v = &u->avl_left;			\ | 
 | 156 | 		else						\ | 
 | 157 | 			v = &u->avl_right;			\ | 
| Patrick McHardy | fc7b938 | 2007-07-20 19:39:17 -0700 | [diff] [blame] | 158 | 		if (_stack != NULL)				\ | 
| Eric Dumazet | 243bbca | 2007-03-06 20:23:10 -0800 | [diff] [blame] | 159 | 			*stackptr++ = v;			\ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 160 | 		u = *v;						\ | 
 | 161 | 	}							\ | 
 | 162 | 	u;							\ | 
 | 163 | }) | 
 | 164 |  | 
 | 165 | /* Called with local BH disabled and the pool write lock held. */ | 
 | 166 | #define lookup_rightempty(start)				\ | 
 | 167 | ({								\ | 
 | 168 | 	struct inet_peer *u, **v;				\ | 
 | 169 | 	*stackptr++ = &start->avl_left;				\ | 
 | 170 | 	v = &start->avl_left;					\ | 
 | 171 | 	for (u = *v; u->avl_right != peer_avl_empty; ) {	\ | 
 | 172 | 		v = &u->avl_right;				\ | 
 | 173 | 		*stackptr++ = v;				\ | 
 | 174 | 		u = *v;						\ | 
 | 175 | 	}							\ | 
 | 176 | 	u;							\ | 
 | 177 | }) | 
 | 178 |  | 
 | 179 | /* Called with local BH disabled and the pool write lock held. | 
 | 180 |  * Variable names are the proof of operation correctness. | 
 | 181 |  * Look into mm/map_avl.c for more detail description of the ideas.  */ | 
 | 182 | static void peer_avl_rebalance(struct inet_peer **stack[], | 
 | 183 | 		struct inet_peer ***stackend) | 
 | 184 | { | 
 | 185 | 	struct inet_peer **nodep, *node, *l, *r; | 
 | 186 | 	int lh, rh; | 
 | 187 |  | 
 | 188 | 	while (stackend > stack) { | 
 | 189 | 		nodep = *--stackend; | 
 | 190 | 		node = *nodep; | 
 | 191 | 		l = node->avl_left; | 
 | 192 | 		r = node->avl_right; | 
 | 193 | 		lh = node_height(l); | 
 | 194 | 		rh = node_height(r); | 
 | 195 | 		if (lh > rh + 1) { /* l: RH+2 */ | 
 | 196 | 			struct inet_peer *ll, *lr, *lrl, *lrr; | 
 | 197 | 			int lrh; | 
 | 198 | 			ll = l->avl_left; | 
 | 199 | 			lr = l->avl_right; | 
 | 200 | 			lrh = node_height(lr); | 
 | 201 | 			if (lrh <= node_height(ll)) {	/* ll: RH+1 */ | 
 | 202 | 				node->avl_left = lr;	/* lr: RH or RH+1 */ | 
 | 203 | 				node->avl_right = r;	/* r: RH */ | 
 | 204 | 				node->avl_height = lrh + 1; /* RH+1 or RH+2 */ | 
 | 205 | 				l->avl_left = ll;	/* ll: RH+1 */ | 
 | 206 | 				l->avl_right = node;	/* node: RH+1 or RH+2 */ | 
 | 207 | 				l->avl_height = node->avl_height + 1; | 
 | 208 | 				*nodep = l; | 
 | 209 | 			} else { /* ll: RH, lr: RH+1 */ | 
 | 210 | 				lrl = lr->avl_left;	/* lrl: RH or RH-1 */ | 
 | 211 | 				lrr = lr->avl_right;	/* lrr: RH or RH-1 */ | 
 | 212 | 				node->avl_left = lrr;	/* lrr: RH or RH-1 */ | 
 | 213 | 				node->avl_right = r;	/* r: RH */ | 
 | 214 | 				node->avl_height = rh + 1; /* node: RH+1 */ | 
 | 215 | 				l->avl_left = ll;	/* ll: RH */ | 
 | 216 | 				l->avl_right = lrl;	/* lrl: RH or RH-1 */ | 
 | 217 | 				l->avl_height = rh + 1;	/* l: RH+1 */ | 
 | 218 | 				lr->avl_left = l;	/* l: RH+1 */ | 
 | 219 | 				lr->avl_right = node;	/* node: RH+1 */ | 
 | 220 | 				lr->avl_height = rh + 2; | 
 | 221 | 				*nodep = lr; | 
 | 222 | 			} | 
 | 223 | 		} else if (rh > lh + 1) { /* r: LH+2 */ | 
 | 224 | 			struct inet_peer *rr, *rl, *rlr, *rll; | 
 | 225 | 			int rlh; | 
 | 226 | 			rr = r->avl_right; | 
 | 227 | 			rl = r->avl_left; | 
 | 228 | 			rlh = node_height(rl); | 
 | 229 | 			if (rlh <= node_height(rr)) {	/* rr: LH+1 */ | 
 | 230 | 				node->avl_right = rl;	/* rl: LH or LH+1 */ | 
 | 231 | 				node->avl_left = l;	/* l: LH */ | 
 | 232 | 				node->avl_height = rlh + 1; /* LH+1 or LH+2 */ | 
 | 233 | 				r->avl_right = rr;	/* rr: LH+1 */ | 
 | 234 | 				r->avl_left = node;	/* node: LH+1 or LH+2 */ | 
 | 235 | 				r->avl_height = node->avl_height + 1; | 
 | 236 | 				*nodep = r; | 
 | 237 | 			} else { /* rr: RH, rl: RH+1 */ | 
 | 238 | 				rlr = rl->avl_right;	/* rlr: LH or LH-1 */ | 
 | 239 | 				rll = rl->avl_left;	/* rll: LH or LH-1 */ | 
 | 240 | 				node->avl_right = rll;	/* rll: LH or LH-1 */ | 
 | 241 | 				node->avl_left = l;	/* l: LH */ | 
 | 242 | 				node->avl_height = lh + 1; /* node: LH+1 */ | 
 | 243 | 				r->avl_right = rr;	/* rr: LH */ | 
 | 244 | 				r->avl_left = rlr;	/* rlr: LH or LH-1 */ | 
 | 245 | 				r->avl_height = lh + 1;	/* r: LH+1 */ | 
 | 246 | 				rl->avl_right = r;	/* r: LH+1 */ | 
 | 247 | 				rl->avl_left = node;	/* node: LH+1 */ | 
 | 248 | 				rl->avl_height = lh + 2; | 
 | 249 | 				*nodep = rl; | 
 | 250 | 			} | 
 | 251 | 		} else { | 
 | 252 | 			node->avl_height = (lh > rh ? lh : rh) + 1; | 
 | 253 | 		} | 
 | 254 | 	} | 
 | 255 | } | 
 | 256 |  | 
 | 257 | /* Called with local BH disabled and the pool write lock held. */ | 
 | 258 | #define link_to_pool(n)						\ | 
 | 259 | do {								\ | 
 | 260 | 	n->avl_height = 1;					\ | 
 | 261 | 	n->avl_left = peer_avl_empty;				\ | 
 | 262 | 	n->avl_right = peer_avl_empty;				\ | 
 | 263 | 	**--stackptr = n;					\ | 
 | 264 | 	peer_avl_rebalance(stack, stackptr);			\ | 
 | 265 | } while(0) | 
 | 266 |  | 
 | 267 | /* May be called with local BH enabled. */ | 
 | 268 | static void unlink_from_pool(struct inet_peer *p) | 
 | 269 | { | 
 | 270 | 	int do_free; | 
 | 271 |  | 
 | 272 | 	do_free = 0; | 
 | 273 |  | 
 | 274 | 	write_lock_bh(&peer_pool_lock); | 
 | 275 | 	/* Check the reference counter.  It was artificially incremented by 1 | 
 | 276 | 	 * in cleanup() function to prevent sudden disappearing.  If the | 
 | 277 | 	 * reference count is still 1 then the node is referenced only as `p' | 
 | 278 | 	 * here and from the pool.  So under the exclusive pool lock it's safe | 
 | 279 | 	 * to remove the node and free it later. */ | 
 | 280 | 	if (atomic_read(&p->refcnt) == 1) { | 
 | 281 | 		struct inet_peer **stack[PEER_MAXDEPTH]; | 
 | 282 | 		struct inet_peer ***stackptr, ***delp; | 
| Eric Dumazet | 243bbca | 2007-03-06 20:23:10 -0800 | [diff] [blame] | 283 | 		if (lookup(p->v4daddr, stack) != p) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 284 | 			BUG(); | 
 | 285 | 		delp = stackptr - 1; /* *delp[0] == p */ | 
 | 286 | 		if (p->avl_left == peer_avl_empty) { | 
 | 287 | 			*delp[0] = p->avl_right; | 
 | 288 | 			--stackptr; | 
 | 289 | 		} else { | 
 | 290 | 			/* look for a node to insert instead of p */ | 
 | 291 | 			struct inet_peer *t; | 
 | 292 | 			t = lookup_rightempty(p); | 
| Kris Katterjohn | 09a6266 | 2006-01-08 22:24:28 -0800 | [diff] [blame] | 293 | 			BUG_ON(*stackptr[-1] != t); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 294 | 			**--stackptr = t->avl_left; | 
 | 295 | 			/* t is removed, t->v4daddr > x->v4daddr for any | 
 | 296 | 			 * x in p->avl_left subtree. | 
 | 297 | 			 * Put t in the old place of p. */ | 
 | 298 | 			*delp[0] = t; | 
 | 299 | 			t->avl_left = p->avl_left; | 
 | 300 | 			t->avl_right = p->avl_right; | 
 | 301 | 			t->avl_height = p->avl_height; | 
| Kris Katterjohn | 09a6266 | 2006-01-08 22:24:28 -0800 | [diff] [blame] | 302 | 			BUG_ON(delp[1] != &p->avl_left); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 303 | 			delp[1] = &t->avl_left; /* was &p->avl_left */ | 
 | 304 | 		} | 
 | 305 | 		peer_avl_rebalance(stack, stackptr); | 
 | 306 | 		peer_total--; | 
 | 307 | 		do_free = 1; | 
 | 308 | 	} | 
 | 309 | 	write_unlock_bh(&peer_pool_lock); | 
 | 310 |  | 
 | 311 | 	if (do_free) | 
 | 312 | 		kmem_cache_free(peer_cachep, p); | 
 | 313 | 	else | 
 | 314 | 		/* The node is used again.  Decrease the reference counter | 
 | 315 | 		 * back.  The loop "cleanup -> unlink_from_unused | 
 | 316 | 		 *   -> unlink_from_pool -> putpeer -> link_to_unused | 
 | 317 | 		 *   -> cleanup (for the same node)" | 
 | 318 | 		 * doesn't really exist because the entry will have a | 
 | 319 | 		 * recent deletion time and will not be cleaned again soon. */ | 
 | 320 | 		inet_putpeer(p); | 
 | 321 | } | 
 | 322 |  | 
 | 323 | /* May be called with local BH enabled. */ | 
 | 324 | static int cleanup_once(unsigned long ttl) | 
 | 325 | { | 
| Pavel Emelyanov | d71209d | 2007-11-12 21:27:28 -0800 | [diff] [blame] | 326 | 	struct inet_peer *p = NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 327 |  | 
 | 328 | 	/* Remove the first entry from the list of unused nodes. */ | 
 | 329 | 	spin_lock_bh(&inet_peer_unused_lock); | 
| Pavel Emelyanov | d71209d | 2007-11-12 21:27:28 -0800 | [diff] [blame] | 330 | 	if (!list_empty(&unused_peers)) { | 
 | 331 | 		__u32 delta; | 
 | 332 |  | 
 | 333 | 		p = list_first_entry(&unused_peers, struct inet_peer, unused); | 
 | 334 | 		delta = (__u32)jiffies - p->dtime; | 
 | 335 |  | 
| Eric Dumazet | 4663afe | 2006-10-12 21:21:06 -0700 | [diff] [blame] | 336 | 		if (delta < ttl) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 337 | 			/* Do not prune fresh entries. */ | 
 | 338 | 			spin_unlock_bh(&inet_peer_unused_lock); | 
 | 339 | 			return -1; | 
 | 340 | 		} | 
| Pavel Emelyanov | d71209d | 2007-11-12 21:27:28 -0800 | [diff] [blame] | 341 |  | 
 | 342 | 		list_del_init(&p->unused); | 
 | 343 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 344 | 		/* Grab an extra reference to prevent node disappearing | 
 | 345 | 		 * before unlink_from_pool() call. */ | 
 | 346 | 		atomic_inc(&p->refcnt); | 
 | 347 | 	} | 
 | 348 | 	spin_unlock_bh(&inet_peer_unused_lock); | 
 | 349 |  | 
 | 350 | 	if (p == NULL) | 
 | 351 | 		/* It means that the total number of USED entries has | 
 | 352 | 		 * grown over inet_peer_threshold.  It shouldn't really | 
 | 353 | 		 * happen because of entry limits in route cache. */ | 
 | 354 | 		return -1; | 
 | 355 |  | 
 | 356 | 	unlink_from_pool(p); | 
 | 357 | 	return 0; | 
 | 358 | } | 
 | 359 |  | 
 | 360 | /* Called with or without local BH being disabled. */ | 
| Al Viro | 53576d9 | 2006-09-26 22:18:43 -0700 | [diff] [blame] | 361 | struct inet_peer *inet_getpeer(__be32 daddr, int create) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 362 | { | 
 | 363 | 	struct inet_peer *p, *n; | 
 | 364 | 	struct inet_peer **stack[PEER_MAXDEPTH], ***stackptr; | 
 | 365 |  | 
 | 366 | 	/* Look up for the address quickly. */ | 
 | 367 | 	read_lock_bh(&peer_pool_lock); | 
| Eric Dumazet | 243bbca | 2007-03-06 20:23:10 -0800 | [diff] [blame] | 368 | 	p = lookup(daddr, NULL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 369 | 	if (p != peer_avl_empty) | 
 | 370 | 		atomic_inc(&p->refcnt); | 
 | 371 | 	read_unlock_bh(&peer_pool_lock); | 
 | 372 |  | 
 | 373 | 	if (p != peer_avl_empty) { | 
 | 374 | 		/* The existing node has been found. */ | 
 | 375 | 		/* Remove the entry from unused list if it was there. */ | 
 | 376 | 		unlink_from_unused(p); | 
 | 377 | 		return p; | 
 | 378 | 	} | 
 | 379 |  | 
 | 380 | 	if (!create) | 
 | 381 | 		return NULL; | 
 | 382 |  | 
 | 383 | 	/* Allocate the space outside the locked region. */ | 
 | 384 | 	n = kmem_cache_alloc(peer_cachep, GFP_ATOMIC); | 
 | 385 | 	if (n == NULL) | 
 | 386 | 		return NULL; | 
 | 387 | 	n->v4daddr = daddr; | 
 | 388 | 	atomic_set(&n->refcnt, 1); | 
| Herbert Xu | 89cee8b | 2005-12-13 23:14:27 -0800 | [diff] [blame] | 389 | 	atomic_set(&n->rid, 0); | 
| Eric Dumazet | 2c1409a | 2009-11-12 09:33:09 +0000 | [diff] [blame] | 390 | 	atomic_set(&n->ip_id_count, secure_ip_id(daddr)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 391 | 	n->tcp_ts_stamp = 0; | 
 | 392 |  | 
 | 393 | 	write_lock_bh(&peer_pool_lock); | 
 | 394 | 	/* Check if an entry has suddenly appeared. */ | 
| Eric Dumazet | 243bbca | 2007-03-06 20:23:10 -0800 | [diff] [blame] | 395 | 	p = lookup(daddr, stack); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 396 | 	if (p != peer_avl_empty) | 
 | 397 | 		goto out_free; | 
 | 398 |  | 
 | 399 | 	/* Link the node. */ | 
 | 400 | 	link_to_pool(n); | 
| Pavel Emelyanov | d71209d | 2007-11-12 21:27:28 -0800 | [diff] [blame] | 401 | 	INIT_LIST_HEAD(&n->unused); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 402 | 	peer_total++; | 
 | 403 | 	write_unlock_bh(&peer_pool_lock); | 
 | 404 |  | 
 | 405 | 	if (peer_total >= inet_peer_threshold) | 
 | 406 | 		/* Remove one less-recently-used entry. */ | 
 | 407 | 		cleanup_once(0); | 
 | 408 |  | 
 | 409 | 	return n; | 
 | 410 |  | 
 | 411 | out_free: | 
 | 412 | 	/* The appropriate node is already in the pool. */ | 
 | 413 | 	atomic_inc(&p->refcnt); | 
 | 414 | 	write_unlock_bh(&peer_pool_lock); | 
 | 415 | 	/* Remove the entry from unused list if it was there. */ | 
 | 416 | 	unlink_from_unused(p); | 
 | 417 | 	/* Free preallocated the preallocated node. */ | 
 | 418 | 	kmem_cache_free(peer_cachep, n); | 
 | 419 | 	return p; | 
 | 420 | } | 
 | 421 |  | 
 | 422 | /* Called with local BH disabled. */ | 
 | 423 | static void peer_check_expire(unsigned long dummy) | 
 | 424 | { | 
| Eric Dumazet | 4663afe | 2006-10-12 21:21:06 -0700 | [diff] [blame] | 425 | 	unsigned long now = jiffies; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 426 | 	int ttl; | 
 | 427 |  | 
 | 428 | 	if (peer_total >= inet_peer_threshold) | 
 | 429 | 		ttl = inet_peer_minttl; | 
 | 430 | 	else | 
 | 431 | 		ttl = inet_peer_maxttl | 
 | 432 | 				- (inet_peer_maxttl - inet_peer_minttl) / HZ * | 
 | 433 | 					peer_total / inet_peer_threshold * HZ; | 
| Eric Dumazet | 4663afe | 2006-10-12 21:21:06 -0700 | [diff] [blame] | 434 | 	while (!cleanup_once(ttl)) { | 
 | 435 | 		if (jiffies != now) | 
 | 436 | 			break; | 
 | 437 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 438 |  | 
 | 439 | 	/* Trigger the timer after inet_peer_gc_mintime .. inet_peer_gc_maxtime | 
 | 440 | 	 * interval depending on the total number of entries (more entries, | 
 | 441 | 	 * less interval). */ | 
| Dave Johnson | 1344a41 | 2005-08-23 10:10:15 -0700 | [diff] [blame] | 442 | 	if (peer_total >= inet_peer_threshold) | 
 | 443 | 		peer_periodic_timer.expires = jiffies + inet_peer_gc_mintime; | 
 | 444 | 	else | 
 | 445 | 		peer_periodic_timer.expires = jiffies | 
 | 446 | 			+ inet_peer_gc_maxtime | 
 | 447 | 			- (inet_peer_gc_maxtime - inet_peer_gc_mintime) / HZ * | 
 | 448 | 				peer_total / inet_peer_threshold * HZ; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 449 | 	add_timer(&peer_periodic_timer); | 
 | 450 | } | 
| Eric Dumazet | 4663afe | 2006-10-12 21:21:06 -0700 | [diff] [blame] | 451 |  | 
 | 452 | void inet_putpeer(struct inet_peer *p) | 
 | 453 | { | 
 | 454 | 	spin_lock_bh(&inet_peer_unused_lock); | 
 | 455 | 	if (atomic_dec_and_test(&p->refcnt)) { | 
| Pavel Emelyanov | d71209d | 2007-11-12 21:27:28 -0800 | [diff] [blame] | 456 | 		list_add_tail(&p->unused, &unused_peers); | 
| Eric Dumazet | 4663afe | 2006-10-12 21:21:06 -0700 | [diff] [blame] | 457 | 		p->dtime = (__u32)jiffies; | 
 | 458 | 	} | 
 | 459 | 	spin_unlock_bh(&inet_peer_unused_lock); | 
 | 460 | } |