| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | *		INETPEER - A storage for permanent information about peers | 
|  | 3 | * | 
|  | 4 | *  This source is covered by the GNU GPL, the same as all kernel sources. | 
|  | 5 | * | 
|  | 6 | *  Version:	$Id: inetpeer.c,v 1.7 2001/09/20 21:22:50 davem Exp $ | 
|  | 7 | * | 
|  | 8 | *  Authors:	Andrey V. Savochkin <saw@msu.ru> | 
|  | 9 | */ | 
|  | 10 |  | 
|  | 11 | #include <linux/module.h> | 
|  | 12 | #include <linux/types.h> | 
|  | 13 | #include <linux/slab.h> | 
|  | 14 | #include <linux/interrupt.h> | 
|  | 15 | #include <linux/spinlock.h> | 
|  | 16 | #include <linux/random.h> | 
|  | 17 | #include <linux/sched.h> | 
|  | 18 | #include <linux/timer.h> | 
|  | 19 | #include <linux/time.h> | 
|  | 20 | #include <linux/kernel.h> | 
|  | 21 | #include <linux/mm.h> | 
|  | 22 | #include <linux/net.h> | 
|  | 23 | #include <net/inetpeer.h> | 
|  | 24 |  | 
|  | 25 | /* | 
|  | 26 | *  Theory of operations. | 
|  | 27 | *  We keep one entry for each peer IP address.  The nodes contains long-living | 
|  | 28 | *  information about the peer which doesn't depend on routes. | 
|  | 29 | *  At this moment this information consists only of ID field for the next | 
|  | 30 | *  outgoing IP packet.  This field is incremented with each packet as encoded | 
|  | 31 | *  in inet_getid() function (include/net/inetpeer.h). | 
|  | 32 | *  At the moment of writing this notes identifier of IP packets is generated | 
|  | 33 | *  to be unpredictable using this code only for packets subjected | 
|  | 34 | *  (actually or potentially) to defragmentation.  I.e. DF packets less than | 
|  | 35 | *  PMTU in size uses a constant ID and do not use this code (see | 
|  | 36 | *  ip_select_ident() in include/net/ip.h). | 
|  | 37 | * | 
|  | 38 | *  Route cache entries hold references to our nodes. | 
|  | 39 | *  New cache entries get references via lookup by destination IP address in | 
|  | 40 | *  the avl tree.  The reference is grabbed only when it's needed i.e. only | 
|  | 41 | *  when we try to output IP packet which needs an unpredictable ID (see | 
|  | 42 | *  __ip_select_ident() in net/ipv4/route.c). | 
|  | 43 | *  Nodes are removed only when reference counter goes to 0. | 
|  | 44 | *  When it's happened the node may be removed when a sufficient amount of | 
|  | 45 | *  time has been passed since its last use.  The less-recently-used entry can | 
|  | 46 | *  also be removed if the pool is overloaded i.e. if the total amount of | 
|  | 47 | *  entries is greater-or-equal than the threshold. | 
|  | 48 | * | 
|  | 49 | *  Node pool is organised as an AVL tree. | 
|  | 50 | *  Such an implementation has been chosen not just for fun.  It's a way to | 
|  | 51 | *  prevent easy and efficient DoS attacks by creating hash collisions.  A huge | 
|  | 52 | *  amount of long living nodes in a single hash slot would significantly delay | 
|  | 53 | *  lookups performed with disabled BHs. | 
|  | 54 | * | 
|  | 55 | *  Serialisation issues. | 
|  | 56 | *  1.  Nodes may appear in the tree only with the pool write lock held. | 
|  | 57 | *  2.  Nodes may disappear from the tree only with the pool write lock held | 
|  | 58 | *      AND reference count being 0. | 
|  | 59 | *  3.  Nodes appears and disappears from unused node list only under | 
|  | 60 | *      "inet_peer_unused_lock". | 
|  | 61 | *  4.  Global variable peer_total is modified under the pool lock. | 
|  | 62 | *  5.  struct inet_peer fields modification: | 
|  | 63 | *		avl_left, avl_right, avl_parent, avl_height: pool lock | 
|  | 64 | *		unused_next, unused_prevp: unused node list lock | 
|  | 65 | *		refcnt: atomically against modifications on other CPU; | 
|  | 66 | *		   usually under some other lock to prevent node disappearing | 
|  | 67 | *		dtime: unused node list lock | 
|  | 68 | *		v4daddr: unchangeable | 
|  | 69 | *		ip_id_count: idlock | 
|  | 70 | */ | 
|  | 71 |  | 
|  | 72 | /* Exported for inet_getid inline function.  */ | 
|  | 73 | DEFINE_SPINLOCK(inet_peer_idlock); | 
|  | 74 |  | 
|  | 75 | static kmem_cache_t *peer_cachep; | 
|  | 76 |  | 
|  | 77 | #define node_height(x) x->avl_height | 
|  | 78 | static struct inet_peer peer_fake_node = { | 
|  | 79 | .avl_left	= &peer_fake_node, | 
|  | 80 | .avl_right	= &peer_fake_node, | 
|  | 81 | .avl_height	= 0 | 
|  | 82 | }; | 
|  | 83 | #define peer_avl_empty (&peer_fake_node) | 
|  | 84 | static struct inet_peer *peer_root = peer_avl_empty; | 
|  | 85 | static DEFINE_RWLOCK(peer_pool_lock); | 
|  | 86 | #define PEER_MAXDEPTH 40 /* sufficient for about 2^27 nodes */ | 
|  | 87 |  | 
|  | 88 | static volatile int peer_total; | 
|  | 89 | /* Exported for sysctl_net_ipv4.  */ | 
|  | 90 | int inet_peer_threshold = 65536 + 128;	/* start to throw entries more | 
|  | 91 | * aggressively at this stage */ | 
|  | 92 | int inet_peer_minttl = 120 * HZ;	/* TTL under high load: 120 sec */ | 
|  | 93 | int inet_peer_maxttl = 10 * 60 * HZ;	/* usual time to live: 10 min */ | 
|  | 94 |  | 
|  | 95 | static struct inet_peer *inet_peer_unused_head; | 
|  | 96 | /* Exported for inet_putpeer inline function.  */ | 
|  | 97 | struct inet_peer **inet_peer_unused_tailp = &inet_peer_unused_head; | 
|  | 98 | DEFINE_SPINLOCK(inet_peer_unused_lock); | 
|  | 99 | #define PEER_MAX_CLEANUP_WORK 30 | 
|  | 100 |  | 
|  | 101 | static void peer_check_expire(unsigned long dummy); | 
|  | 102 | static struct timer_list peer_periodic_timer = | 
|  | 103 | TIMER_INITIALIZER(peer_check_expire, 0, 0); | 
|  | 104 |  | 
|  | 105 | /* Exported for sysctl_net_ipv4.  */ | 
|  | 106 | int inet_peer_gc_mintime = 10 * HZ, | 
|  | 107 | inet_peer_gc_maxtime = 120 * HZ; | 
|  | 108 |  | 
|  | 109 | /* Called from ip_output.c:ip_init  */ | 
|  | 110 | void __init inet_initpeers(void) | 
|  | 111 | { | 
|  | 112 | struct sysinfo si; | 
|  | 113 |  | 
|  | 114 | /* Use the straight interface to information about memory. */ | 
|  | 115 | si_meminfo(&si); | 
|  | 116 | /* The values below were suggested by Alexey Kuznetsov | 
|  | 117 | * <kuznet@ms2.inr.ac.ru>.  I don't have any opinion about the values | 
|  | 118 | * myself.  --SAW | 
|  | 119 | */ | 
|  | 120 | if (si.totalram <= (32768*1024)/PAGE_SIZE) | 
|  | 121 | inet_peer_threshold >>= 1; /* max pool size about 1MB on IA32 */ | 
|  | 122 | if (si.totalram <= (16384*1024)/PAGE_SIZE) | 
|  | 123 | inet_peer_threshold >>= 1; /* about 512KB */ | 
|  | 124 | if (si.totalram <= (8192*1024)/PAGE_SIZE) | 
|  | 125 | inet_peer_threshold >>= 2; /* about 128KB */ | 
|  | 126 |  | 
|  | 127 | peer_cachep = kmem_cache_create("inet_peer_cache", | 
|  | 128 | sizeof(struct inet_peer), | 
|  | 129 | 0, SLAB_HWCACHE_ALIGN, | 
|  | 130 | NULL, NULL); | 
|  | 131 |  | 
|  | 132 | if (!peer_cachep) | 
|  | 133 | panic("cannot create inet_peer_cache"); | 
|  | 134 |  | 
|  | 135 | /* All the timers, started at system startup tend | 
|  | 136 | to synchronize. Perturb it a bit. | 
|  | 137 | */ | 
|  | 138 | peer_periodic_timer.expires = jiffies | 
|  | 139 | + net_random() % inet_peer_gc_maxtime | 
|  | 140 | + inet_peer_gc_maxtime; | 
|  | 141 | add_timer(&peer_periodic_timer); | 
|  | 142 | } | 
|  | 143 |  | 
|  | 144 | /* Called with or without local BH being disabled. */ | 
|  | 145 | static void unlink_from_unused(struct inet_peer *p) | 
|  | 146 | { | 
|  | 147 | spin_lock_bh(&inet_peer_unused_lock); | 
|  | 148 | if (p->unused_prevp != NULL) { | 
|  | 149 | /* On unused list. */ | 
|  | 150 | *p->unused_prevp = p->unused_next; | 
|  | 151 | if (p->unused_next != NULL) | 
|  | 152 | p->unused_next->unused_prevp = p->unused_prevp; | 
|  | 153 | else | 
|  | 154 | inet_peer_unused_tailp = p->unused_prevp; | 
|  | 155 | p->unused_prevp = NULL; /* mark it as removed */ | 
|  | 156 | } | 
|  | 157 | spin_unlock_bh(&inet_peer_unused_lock); | 
|  | 158 | } | 
|  | 159 |  | 
|  | 160 | /* Called with local BH disabled and the pool lock held. */ | 
|  | 161 | #define lookup(daddr) 						\ | 
|  | 162 | ({								\ | 
|  | 163 | struct inet_peer *u, **v;				\ | 
|  | 164 | stackptr = stack;					\ | 
|  | 165 | *stackptr++ = &peer_root;				\ | 
|  | 166 | for (u = peer_root; u != peer_avl_empty; ) {		\ | 
|  | 167 | if (daddr == u->v4daddr)			\ | 
|  | 168 | break;					\ | 
|  | 169 | if (daddr < u->v4daddr)				\ | 
|  | 170 | v = &u->avl_left;			\ | 
|  | 171 | else						\ | 
|  | 172 | v = &u->avl_right;			\ | 
|  | 173 | *stackptr++ = v;				\ | 
|  | 174 | u = *v;						\ | 
|  | 175 | }							\ | 
|  | 176 | u;							\ | 
|  | 177 | }) | 
|  | 178 |  | 
|  | 179 | /* Called with local BH disabled and the pool write lock held. */ | 
|  | 180 | #define lookup_rightempty(start)				\ | 
|  | 181 | ({								\ | 
|  | 182 | struct inet_peer *u, **v;				\ | 
|  | 183 | *stackptr++ = &start->avl_left;				\ | 
|  | 184 | v = &start->avl_left;					\ | 
|  | 185 | for (u = *v; u->avl_right != peer_avl_empty; ) {	\ | 
|  | 186 | v = &u->avl_right;				\ | 
|  | 187 | *stackptr++ = v;				\ | 
|  | 188 | u = *v;						\ | 
|  | 189 | }							\ | 
|  | 190 | u;							\ | 
|  | 191 | }) | 
|  | 192 |  | 
|  | 193 | /* Called with local BH disabled and the pool write lock held. | 
|  | 194 | * Variable names are the proof of operation correctness. | 
|  | 195 | * Look into mm/map_avl.c for more detail description of the ideas.  */ | 
|  | 196 | static void peer_avl_rebalance(struct inet_peer **stack[], | 
|  | 197 | struct inet_peer ***stackend) | 
|  | 198 | { | 
|  | 199 | struct inet_peer **nodep, *node, *l, *r; | 
|  | 200 | int lh, rh; | 
|  | 201 |  | 
|  | 202 | while (stackend > stack) { | 
|  | 203 | nodep = *--stackend; | 
|  | 204 | node = *nodep; | 
|  | 205 | l = node->avl_left; | 
|  | 206 | r = node->avl_right; | 
|  | 207 | lh = node_height(l); | 
|  | 208 | rh = node_height(r); | 
|  | 209 | if (lh > rh + 1) { /* l: RH+2 */ | 
|  | 210 | struct inet_peer *ll, *lr, *lrl, *lrr; | 
|  | 211 | int lrh; | 
|  | 212 | ll = l->avl_left; | 
|  | 213 | lr = l->avl_right; | 
|  | 214 | lrh = node_height(lr); | 
|  | 215 | if (lrh <= node_height(ll)) {	/* ll: RH+1 */ | 
|  | 216 | node->avl_left = lr;	/* lr: RH or RH+1 */ | 
|  | 217 | node->avl_right = r;	/* r: RH */ | 
|  | 218 | node->avl_height = lrh + 1; /* RH+1 or RH+2 */ | 
|  | 219 | l->avl_left = ll;	/* ll: RH+1 */ | 
|  | 220 | l->avl_right = node;	/* node: RH+1 or RH+2 */ | 
|  | 221 | l->avl_height = node->avl_height + 1; | 
|  | 222 | *nodep = l; | 
|  | 223 | } else { /* ll: RH, lr: RH+1 */ | 
|  | 224 | lrl = lr->avl_left;	/* lrl: RH or RH-1 */ | 
|  | 225 | lrr = lr->avl_right;	/* lrr: RH or RH-1 */ | 
|  | 226 | node->avl_left = lrr;	/* lrr: RH or RH-1 */ | 
|  | 227 | node->avl_right = r;	/* r: RH */ | 
|  | 228 | node->avl_height = rh + 1; /* node: RH+1 */ | 
|  | 229 | l->avl_left = ll;	/* ll: RH */ | 
|  | 230 | l->avl_right = lrl;	/* lrl: RH or RH-1 */ | 
|  | 231 | l->avl_height = rh + 1;	/* l: RH+1 */ | 
|  | 232 | lr->avl_left = l;	/* l: RH+1 */ | 
|  | 233 | lr->avl_right = node;	/* node: RH+1 */ | 
|  | 234 | lr->avl_height = rh + 2; | 
|  | 235 | *nodep = lr; | 
|  | 236 | } | 
|  | 237 | } else if (rh > lh + 1) { /* r: LH+2 */ | 
|  | 238 | struct inet_peer *rr, *rl, *rlr, *rll; | 
|  | 239 | int rlh; | 
|  | 240 | rr = r->avl_right; | 
|  | 241 | rl = r->avl_left; | 
|  | 242 | rlh = node_height(rl); | 
|  | 243 | if (rlh <= node_height(rr)) {	/* rr: LH+1 */ | 
|  | 244 | node->avl_right = rl;	/* rl: LH or LH+1 */ | 
|  | 245 | node->avl_left = l;	/* l: LH */ | 
|  | 246 | node->avl_height = rlh + 1; /* LH+1 or LH+2 */ | 
|  | 247 | r->avl_right = rr;	/* rr: LH+1 */ | 
|  | 248 | r->avl_left = node;	/* node: LH+1 or LH+2 */ | 
|  | 249 | r->avl_height = node->avl_height + 1; | 
|  | 250 | *nodep = r; | 
|  | 251 | } else { /* rr: RH, rl: RH+1 */ | 
|  | 252 | rlr = rl->avl_right;	/* rlr: LH or LH-1 */ | 
|  | 253 | rll = rl->avl_left;	/* rll: LH or LH-1 */ | 
|  | 254 | node->avl_right = rll;	/* rll: LH or LH-1 */ | 
|  | 255 | node->avl_left = l;	/* l: LH */ | 
|  | 256 | node->avl_height = lh + 1; /* node: LH+1 */ | 
|  | 257 | r->avl_right = rr;	/* rr: LH */ | 
|  | 258 | r->avl_left = rlr;	/* rlr: LH or LH-1 */ | 
|  | 259 | r->avl_height = lh + 1;	/* r: LH+1 */ | 
|  | 260 | rl->avl_right = r;	/* r: LH+1 */ | 
|  | 261 | rl->avl_left = node;	/* node: LH+1 */ | 
|  | 262 | rl->avl_height = lh + 2; | 
|  | 263 | *nodep = rl; | 
|  | 264 | } | 
|  | 265 | } else { | 
|  | 266 | node->avl_height = (lh > rh ? lh : rh) + 1; | 
|  | 267 | } | 
|  | 268 | } | 
|  | 269 | } | 
|  | 270 |  | 
|  | 271 | /* Called with local BH disabled and the pool write lock held. */ | 
|  | 272 | #define link_to_pool(n)						\ | 
|  | 273 | do {								\ | 
|  | 274 | n->avl_height = 1;					\ | 
|  | 275 | n->avl_left = peer_avl_empty;				\ | 
|  | 276 | n->avl_right = peer_avl_empty;				\ | 
|  | 277 | **--stackptr = n;					\ | 
|  | 278 | peer_avl_rebalance(stack, stackptr);			\ | 
|  | 279 | } while(0) | 
|  | 280 |  | 
|  | 281 | /* May be called with local BH enabled. */ | 
|  | 282 | static void unlink_from_pool(struct inet_peer *p) | 
|  | 283 | { | 
|  | 284 | int do_free; | 
|  | 285 |  | 
|  | 286 | do_free = 0; | 
|  | 287 |  | 
|  | 288 | write_lock_bh(&peer_pool_lock); | 
|  | 289 | /* Check the reference counter.  It was artificially incremented by 1 | 
|  | 290 | * in cleanup() function to prevent sudden disappearing.  If the | 
|  | 291 | * reference count is still 1 then the node is referenced only as `p' | 
|  | 292 | * here and from the pool.  So under the exclusive pool lock it's safe | 
|  | 293 | * to remove the node and free it later. */ | 
|  | 294 | if (atomic_read(&p->refcnt) == 1) { | 
|  | 295 | struct inet_peer **stack[PEER_MAXDEPTH]; | 
|  | 296 | struct inet_peer ***stackptr, ***delp; | 
|  | 297 | if (lookup(p->v4daddr) != p) | 
|  | 298 | BUG(); | 
|  | 299 | delp = stackptr - 1; /* *delp[0] == p */ | 
|  | 300 | if (p->avl_left == peer_avl_empty) { | 
|  | 301 | *delp[0] = p->avl_right; | 
|  | 302 | --stackptr; | 
|  | 303 | } else { | 
|  | 304 | /* look for a node to insert instead of p */ | 
|  | 305 | struct inet_peer *t; | 
|  | 306 | t = lookup_rightempty(p); | 
|  | 307 | if (*stackptr[-1] != t) | 
|  | 308 | BUG(); | 
|  | 309 | **--stackptr = t->avl_left; | 
|  | 310 | /* t is removed, t->v4daddr > x->v4daddr for any | 
|  | 311 | * x in p->avl_left subtree. | 
|  | 312 | * Put t in the old place of p. */ | 
|  | 313 | *delp[0] = t; | 
|  | 314 | t->avl_left = p->avl_left; | 
|  | 315 | t->avl_right = p->avl_right; | 
|  | 316 | t->avl_height = p->avl_height; | 
|  | 317 | if (delp[1] != &p->avl_left) | 
|  | 318 | BUG(); | 
|  | 319 | delp[1] = &t->avl_left; /* was &p->avl_left */ | 
|  | 320 | } | 
|  | 321 | peer_avl_rebalance(stack, stackptr); | 
|  | 322 | peer_total--; | 
|  | 323 | do_free = 1; | 
|  | 324 | } | 
|  | 325 | write_unlock_bh(&peer_pool_lock); | 
|  | 326 |  | 
|  | 327 | if (do_free) | 
|  | 328 | kmem_cache_free(peer_cachep, p); | 
|  | 329 | else | 
|  | 330 | /* The node is used again.  Decrease the reference counter | 
|  | 331 | * back.  The loop "cleanup -> unlink_from_unused | 
|  | 332 | *   -> unlink_from_pool -> putpeer -> link_to_unused | 
|  | 333 | *   -> cleanup (for the same node)" | 
|  | 334 | * doesn't really exist because the entry will have a | 
|  | 335 | * recent deletion time and will not be cleaned again soon. */ | 
|  | 336 | inet_putpeer(p); | 
|  | 337 | } | 
|  | 338 |  | 
|  | 339 | /* May be called with local BH enabled. */ | 
|  | 340 | static int cleanup_once(unsigned long ttl) | 
|  | 341 | { | 
|  | 342 | struct inet_peer *p; | 
|  | 343 |  | 
|  | 344 | /* Remove the first entry from the list of unused nodes. */ | 
|  | 345 | spin_lock_bh(&inet_peer_unused_lock); | 
|  | 346 | p = inet_peer_unused_head; | 
|  | 347 | if (p != NULL) { | 
|  | 348 | if (time_after(p->dtime + ttl, jiffies)) { | 
|  | 349 | /* Do not prune fresh entries. */ | 
|  | 350 | spin_unlock_bh(&inet_peer_unused_lock); | 
|  | 351 | return -1; | 
|  | 352 | } | 
|  | 353 | inet_peer_unused_head = p->unused_next; | 
|  | 354 | if (p->unused_next != NULL) | 
|  | 355 | p->unused_next->unused_prevp = p->unused_prevp; | 
|  | 356 | else | 
|  | 357 | inet_peer_unused_tailp = p->unused_prevp; | 
|  | 358 | p->unused_prevp = NULL; /* mark as not on the list */ | 
|  | 359 | /* Grab an extra reference to prevent node disappearing | 
|  | 360 | * before unlink_from_pool() call. */ | 
|  | 361 | atomic_inc(&p->refcnt); | 
|  | 362 | } | 
|  | 363 | spin_unlock_bh(&inet_peer_unused_lock); | 
|  | 364 |  | 
|  | 365 | if (p == NULL) | 
|  | 366 | /* It means that the total number of USED entries has | 
|  | 367 | * grown over inet_peer_threshold.  It shouldn't really | 
|  | 368 | * happen because of entry limits in route cache. */ | 
|  | 369 | return -1; | 
|  | 370 |  | 
|  | 371 | unlink_from_pool(p); | 
|  | 372 | return 0; | 
|  | 373 | } | 
|  | 374 |  | 
|  | 375 | /* Called with or without local BH being disabled. */ | 
|  | 376 | struct inet_peer *inet_getpeer(__u32 daddr, int create) | 
|  | 377 | { | 
|  | 378 | struct inet_peer *p, *n; | 
|  | 379 | struct inet_peer **stack[PEER_MAXDEPTH], ***stackptr; | 
|  | 380 |  | 
|  | 381 | /* Look up for the address quickly. */ | 
|  | 382 | read_lock_bh(&peer_pool_lock); | 
|  | 383 | p = lookup(daddr); | 
|  | 384 | if (p != peer_avl_empty) | 
|  | 385 | atomic_inc(&p->refcnt); | 
|  | 386 | read_unlock_bh(&peer_pool_lock); | 
|  | 387 |  | 
|  | 388 | if (p != peer_avl_empty) { | 
|  | 389 | /* The existing node has been found. */ | 
|  | 390 | /* Remove the entry from unused list if it was there. */ | 
|  | 391 | unlink_from_unused(p); | 
|  | 392 | return p; | 
|  | 393 | } | 
|  | 394 |  | 
|  | 395 | if (!create) | 
|  | 396 | return NULL; | 
|  | 397 |  | 
|  | 398 | /* Allocate the space outside the locked region. */ | 
|  | 399 | n = kmem_cache_alloc(peer_cachep, GFP_ATOMIC); | 
|  | 400 | if (n == NULL) | 
|  | 401 | return NULL; | 
|  | 402 | n->v4daddr = daddr; | 
|  | 403 | atomic_set(&n->refcnt, 1); | 
|  | 404 | n->ip_id_count = secure_ip_id(daddr); | 
|  | 405 | n->tcp_ts_stamp = 0; | 
|  | 406 |  | 
|  | 407 | write_lock_bh(&peer_pool_lock); | 
|  | 408 | /* Check if an entry has suddenly appeared. */ | 
|  | 409 | p = lookup(daddr); | 
|  | 410 | if (p != peer_avl_empty) | 
|  | 411 | goto out_free; | 
|  | 412 |  | 
|  | 413 | /* Link the node. */ | 
|  | 414 | link_to_pool(n); | 
|  | 415 | n->unused_prevp = NULL; /* not on the list */ | 
|  | 416 | peer_total++; | 
|  | 417 | write_unlock_bh(&peer_pool_lock); | 
|  | 418 |  | 
|  | 419 | if (peer_total >= inet_peer_threshold) | 
|  | 420 | /* Remove one less-recently-used entry. */ | 
|  | 421 | cleanup_once(0); | 
|  | 422 |  | 
|  | 423 | return n; | 
|  | 424 |  | 
|  | 425 | out_free: | 
|  | 426 | /* The appropriate node is already in the pool. */ | 
|  | 427 | atomic_inc(&p->refcnt); | 
|  | 428 | write_unlock_bh(&peer_pool_lock); | 
|  | 429 | /* Remove the entry from unused list if it was there. */ | 
|  | 430 | unlink_from_unused(p); | 
|  | 431 | /* Free preallocated the preallocated node. */ | 
|  | 432 | kmem_cache_free(peer_cachep, n); | 
|  | 433 | return p; | 
|  | 434 | } | 
|  | 435 |  | 
|  | 436 | /* Called with local BH disabled. */ | 
|  | 437 | static void peer_check_expire(unsigned long dummy) | 
|  | 438 | { | 
|  | 439 | int i; | 
|  | 440 | int ttl; | 
|  | 441 |  | 
|  | 442 | if (peer_total >= inet_peer_threshold) | 
|  | 443 | ttl = inet_peer_minttl; | 
|  | 444 | else | 
|  | 445 | ttl = inet_peer_maxttl | 
|  | 446 | - (inet_peer_maxttl - inet_peer_minttl) / HZ * | 
|  | 447 | peer_total / inet_peer_threshold * HZ; | 
|  | 448 | for (i = 0; i < PEER_MAX_CLEANUP_WORK && !cleanup_once(ttl); i++); | 
|  | 449 |  | 
|  | 450 | /* Trigger the timer after inet_peer_gc_mintime .. inet_peer_gc_maxtime | 
|  | 451 | * interval depending on the total number of entries (more entries, | 
|  | 452 | * less interval). */ | 
|  | 453 | peer_periodic_timer.expires = jiffies | 
|  | 454 | + inet_peer_gc_maxtime | 
|  | 455 | - (inet_peer_gc_maxtime - inet_peer_gc_mintime) / HZ * | 
|  | 456 | peer_total / inet_peer_threshold * HZ; | 
|  | 457 | add_timer(&peer_periodic_timer); | 
|  | 458 | } | 
|  | 459 |  | 
|  | 460 | EXPORT_SYMBOL(inet_peer_idlock); |