| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 1 | /* | 
 | 2 |  * SLOB Allocator: Simple List Of Blocks | 
 | 3 |  * | 
 | 4 |  * Matt Mackall <mpm@selenic.com> 12/30/03 | 
 | 5 |  * | 
 | 6 |  * How SLOB works: | 
 | 7 |  * | 
 | 8 |  * The core of SLOB is a traditional K&R style heap allocator, with | 
 | 9 |  * support for returning aligned objects. The granularity of this | 
 | 10 |  * allocator is 8 bytes on x86, though it's perhaps possible to reduce | 
 | 11 |  * this to 4 if it's deemed worth the effort. The slob heap is a | 
 | 12 |  * singly-linked list of pages from __get_free_page, grown on demand | 
 | 13 |  * and allocation from the heap is currently first-fit. | 
 | 14 |  * | 
 | 15 |  * Above this is an implementation of kmalloc/kfree. Blocks returned | 
 | 16 |  * from kmalloc are 8-byte aligned and prepended with a 8-byte header. | 
 | 17 |  * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls | 
 | 18 |  * __get_free_pages directly so that it can return page-aligned blocks | 
 | 19 |  * and keeps a linked list of such pages and their orders. These | 
 | 20 |  * objects are detected in kfree() by their page alignment. | 
 | 21 |  * | 
 | 22 |  * SLAB is emulated on top of SLOB by simply calling constructors and | 
 | 23 |  * destructors for every SLAB allocation. Objects are returned with | 
| Christoph Lameter | 5af6083 | 2007-05-06 14:49:56 -0700 | [diff] [blame] | 24 |  * the 8-byte alignment unless the SLAB_HWCACHE_ALIGN flag is | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 25 |  * set, in which case the low-level allocator will fragment blocks to | 
 | 26 |  * create the proper alignment. Again, objects of page-size or greater | 
 | 27 |  * are allocated by calling __get_free_pages. As SLAB objects know | 
 | 28 |  * their size, no separate size bookkeeping is necessary and there is | 
 | 29 |  * essentially no allocation space overhead. | 
 | 30 |  */ | 
 | 31 |  | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 32 | #include <linux/slab.h> | 
 | 33 | #include <linux/mm.h> | 
 | 34 | #include <linux/cache.h> | 
 | 35 | #include <linux/init.h> | 
 | 36 | #include <linux/module.h> | 
 | 37 | #include <linux/timer.h> | 
| Nick Piggin | afc0ced | 2007-05-16 22:10:49 -0700 | [diff] [blame] | 38 | #include <linux/rcupdate.h> | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 39 |  | 
 | 40 | struct slob_block { | 
 | 41 | 	int units; | 
 | 42 | 	struct slob_block *next; | 
 | 43 | }; | 
 | 44 | typedef struct slob_block slob_t; | 
 | 45 |  | 
 | 46 | #define SLOB_UNIT sizeof(slob_t) | 
 | 47 | #define SLOB_UNITS(size) (((size) + SLOB_UNIT - 1)/SLOB_UNIT) | 
 | 48 | #define SLOB_ALIGN L1_CACHE_BYTES | 
 | 49 |  | 
 | 50 | struct bigblock { | 
 | 51 | 	int order; | 
 | 52 | 	void *pages; | 
 | 53 | 	struct bigblock *next; | 
 | 54 | }; | 
 | 55 | typedef struct bigblock bigblock_t; | 
 | 56 |  | 
| Nick Piggin | afc0ced | 2007-05-16 22:10:49 -0700 | [diff] [blame] | 57 | /* | 
 | 58 |  * struct slob_rcu is inserted at the tail of allocated slob blocks, which | 
 | 59 |  * were created with a SLAB_DESTROY_BY_RCU slab. slob_rcu is used to free | 
 | 60 |  * the block using call_rcu. | 
 | 61 |  */ | 
 | 62 | struct slob_rcu { | 
 | 63 | 	struct rcu_head head; | 
 | 64 | 	int size; | 
 | 65 | }; | 
 | 66 |  | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 67 | static slob_t arena = { .next = &arena, .units = 1 }; | 
 | 68 | static slob_t *slobfree = &arena; | 
 | 69 | static bigblock_t *bigblocks; | 
 | 70 | static DEFINE_SPINLOCK(slob_lock); | 
 | 71 | static DEFINE_SPINLOCK(block_lock); | 
 | 72 |  | 
 | 73 | static void slob_free(void *b, int size); | 
| Dimitri Gorokhovik | bcb4ddb | 2006-12-29 16:48:28 -0800 | [diff] [blame] | 74 | static void slob_timer_cbk(void); | 
 | 75 |  | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 76 |  | 
 | 77 | static void *slob_alloc(size_t size, gfp_t gfp, int align) | 
 | 78 | { | 
 | 79 | 	slob_t *prev, *cur, *aligned = 0; | 
 | 80 | 	int delta = 0, units = SLOB_UNITS(size); | 
 | 81 | 	unsigned long flags; | 
 | 82 |  | 
 | 83 | 	spin_lock_irqsave(&slob_lock, flags); | 
 | 84 | 	prev = slobfree; | 
 | 85 | 	for (cur = prev->next; ; prev = cur, cur = cur->next) { | 
 | 86 | 		if (align) { | 
 | 87 | 			aligned = (slob_t *)ALIGN((unsigned long)cur, align); | 
 | 88 | 			delta = aligned - cur; | 
 | 89 | 		} | 
 | 90 | 		if (cur->units >= units + delta) { /* room enough? */ | 
 | 91 | 			if (delta) { /* need to fragment head to align? */ | 
 | 92 | 				aligned->units = cur->units - delta; | 
 | 93 | 				aligned->next = cur->next; | 
 | 94 | 				cur->next = aligned; | 
 | 95 | 				cur->units = delta; | 
 | 96 | 				prev = cur; | 
 | 97 | 				cur = aligned; | 
 | 98 | 			} | 
 | 99 |  | 
 | 100 | 			if (cur->units == units) /* exact fit? */ | 
 | 101 | 				prev->next = cur->next; /* unlink */ | 
 | 102 | 			else { /* fragment */ | 
 | 103 | 				prev->next = cur + units; | 
 | 104 | 				prev->next->units = cur->units - units; | 
 | 105 | 				prev->next->next = cur->next; | 
 | 106 | 				cur->units = units; | 
 | 107 | 			} | 
 | 108 |  | 
 | 109 | 			slobfree = prev; | 
 | 110 | 			spin_unlock_irqrestore(&slob_lock, flags); | 
 | 111 | 			return cur; | 
 | 112 | 		} | 
 | 113 | 		if (cur == slobfree) { | 
 | 114 | 			spin_unlock_irqrestore(&slob_lock, flags); | 
 | 115 |  | 
 | 116 | 			if (size == PAGE_SIZE) /* trying to shrink arena? */ | 
 | 117 | 				return 0; | 
 | 118 |  | 
 | 119 | 			cur = (slob_t *)__get_free_page(gfp); | 
 | 120 | 			if (!cur) | 
 | 121 | 				return 0; | 
 | 122 |  | 
 | 123 | 			slob_free(cur, PAGE_SIZE); | 
 | 124 | 			spin_lock_irqsave(&slob_lock, flags); | 
 | 125 | 			cur = slobfree; | 
 | 126 | 		} | 
 | 127 | 	} | 
 | 128 | } | 
 | 129 |  | 
 | 130 | static void slob_free(void *block, int size) | 
 | 131 | { | 
 | 132 | 	slob_t *cur, *b = (slob_t *)block; | 
 | 133 | 	unsigned long flags; | 
 | 134 |  | 
 | 135 | 	if (!block) | 
 | 136 | 		return; | 
 | 137 |  | 
 | 138 | 	if (size) | 
 | 139 | 		b->units = SLOB_UNITS(size); | 
 | 140 |  | 
 | 141 | 	/* Find reinsertion point */ | 
 | 142 | 	spin_lock_irqsave(&slob_lock, flags); | 
 | 143 | 	for (cur = slobfree; !(b > cur && b < cur->next); cur = cur->next) | 
 | 144 | 		if (cur >= cur->next && (b > cur || b < cur->next)) | 
 | 145 | 			break; | 
 | 146 |  | 
 | 147 | 	if (b + b->units == cur->next) { | 
 | 148 | 		b->units += cur->next->units; | 
 | 149 | 		b->next = cur->next->next; | 
 | 150 | 	} else | 
 | 151 | 		b->next = cur->next; | 
 | 152 |  | 
 | 153 | 	if (cur + cur->units == b) { | 
 | 154 | 		cur->units += b->units; | 
 | 155 | 		cur->next = b->next; | 
 | 156 | 	} else | 
 | 157 | 		cur->next = b; | 
 | 158 |  | 
 | 159 | 	slobfree = cur; | 
 | 160 |  | 
 | 161 | 	spin_unlock_irqrestore(&slob_lock, flags); | 
 | 162 | } | 
 | 163 |  | 
| Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 164 | void *__kmalloc(size_t size, gfp_t gfp) | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 165 | { | 
 | 166 | 	slob_t *m; | 
 | 167 | 	bigblock_t *bb; | 
 | 168 | 	unsigned long flags; | 
 | 169 |  | 
 | 170 | 	if (size < PAGE_SIZE - SLOB_UNIT) { | 
 | 171 | 		m = slob_alloc(size + SLOB_UNIT, gfp, 0); | 
 | 172 | 		return m ? (void *)(m + 1) : 0; | 
 | 173 | 	} | 
 | 174 |  | 
 | 175 | 	bb = slob_alloc(sizeof(bigblock_t), gfp, 0); | 
 | 176 | 	if (!bb) | 
 | 177 | 		return 0; | 
 | 178 |  | 
| Akinobu Mita | 4ab688c | 2007-05-06 14:50:19 -0700 | [diff] [blame] | 179 | 	bb->order = get_order(size); | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 180 | 	bb->pages = (void *)__get_free_pages(gfp, bb->order); | 
 | 181 |  | 
 | 182 | 	if (bb->pages) { | 
 | 183 | 		spin_lock_irqsave(&block_lock, flags); | 
 | 184 | 		bb->next = bigblocks; | 
 | 185 | 		bigblocks = bb; | 
 | 186 | 		spin_unlock_irqrestore(&block_lock, flags); | 
 | 187 | 		return bb->pages; | 
 | 188 | 	} | 
 | 189 |  | 
 | 190 | 	slob_free(bb, sizeof(bigblock_t)); | 
 | 191 | 	return 0; | 
 | 192 | } | 
| Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 193 | EXPORT_SYMBOL(__kmalloc); | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 194 |  | 
| Pekka Enberg | fd76bab | 2007-05-06 14:48:40 -0700 | [diff] [blame] | 195 | /** | 
 | 196 |  * krealloc - reallocate memory. The contents will remain unchanged. | 
 | 197 |  * | 
 | 198 |  * @p: object to reallocate memory for. | 
 | 199 |  * @new_size: how many bytes of memory are required. | 
 | 200 |  * @flags: the type of memory to allocate. | 
 | 201 |  * | 
 | 202 |  * The contents of the object pointed to are preserved up to the | 
 | 203 |  * lesser of the new and old sizes.  If @p is %NULL, krealloc() | 
 | 204 |  * behaves exactly like kmalloc().  If @size is 0 and @p is not a | 
 | 205 |  * %NULL pointer, the object pointed to is freed. | 
 | 206 |  */ | 
 | 207 | void *krealloc(const void *p, size_t new_size, gfp_t flags) | 
 | 208 | { | 
 | 209 | 	void *ret; | 
 | 210 |  | 
 | 211 | 	if (unlikely(!p)) | 
 | 212 | 		return kmalloc_track_caller(new_size, flags); | 
 | 213 |  | 
 | 214 | 	if (unlikely(!new_size)) { | 
 | 215 | 		kfree(p); | 
 | 216 | 		return NULL; | 
 | 217 | 	} | 
 | 218 |  | 
 | 219 | 	ret = kmalloc_track_caller(new_size, flags); | 
 | 220 | 	if (ret) { | 
 | 221 | 		memcpy(ret, p, min(new_size, ksize(p))); | 
 | 222 | 		kfree(p); | 
 | 223 | 	} | 
 | 224 | 	return ret; | 
 | 225 | } | 
 | 226 | EXPORT_SYMBOL(krealloc); | 
 | 227 |  | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 228 | void kfree(const void *block) | 
 | 229 | { | 
 | 230 | 	bigblock_t *bb, **last = &bigblocks; | 
 | 231 | 	unsigned long flags; | 
 | 232 |  | 
 | 233 | 	if (!block) | 
 | 234 | 		return; | 
 | 235 |  | 
 | 236 | 	if (!((unsigned long)block & (PAGE_SIZE-1))) { | 
 | 237 | 		/* might be on the big block list */ | 
 | 238 | 		spin_lock_irqsave(&block_lock, flags); | 
 | 239 | 		for (bb = bigblocks; bb; last = &bb->next, bb = bb->next) { | 
 | 240 | 			if (bb->pages == block) { | 
 | 241 | 				*last = bb->next; | 
 | 242 | 				spin_unlock_irqrestore(&block_lock, flags); | 
 | 243 | 				free_pages((unsigned long)block, bb->order); | 
 | 244 | 				slob_free(bb, sizeof(bigblock_t)); | 
 | 245 | 				return; | 
 | 246 | 			} | 
 | 247 | 		} | 
 | 248 | 		spin_unlock_irqrestore(&block_lock, flags); | 
 | 249 | 	} | 
 | 250 |  | 
 | 251 | 	slob_free((slob_t *)block - 1, 0); | 
 | 252 | 	return; | 
 | 253 | } | 
 | 254 |  | 
 | 255 | EXPORT_SYMBOL(kfree); | 
 | 256 |  | 
| Pekka Enberg | fd76bab | 2007-05-06 14:48:40 -0700 | [diff] [blame] | 257 | size_t ksize(const void *block) | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 258 | { | 
 | 259 | 	bigblock_t *bb; | 
 | 260 | 	unsigned long flags; | 
 | 261 |  | 
 | 262 | 	if (!block) | 
 | 263 | 		return 0; | 
 | 264 |  | 
 | 265 | 	if (!((unsigned long)block & (PAGE_SIZE-1))) { | 
 | 266 | 		spin_lock_irqsave(&block_lock, flags); | 
 | 267 | 		for (bb = bigblocks; bb; bb = bb->next) | 
 | 268 | 			if (bb->pages == block) { | 
 | 269 | 				spin_unlock_irqrestore(&slob_lock, flags); | 
 | 270 | 				return PAGE_SIZE << bb->order; | 
 | 271 | 			} | 
 | 272 | 		spin_unlock_irqrestore(&block_lock, flags); | 
 | 273 | 	} | 
 | 274 |  | 
 | 275 | 	return ((slob_t *)block - 1)->units * SLOB_UNIT; | 
 | 276 | } | 
 | 277 |  | 
 | 278 | struct kmem_cache { | 
 | 279 | 	unsigned int size, align; | 
| Nick Piggin | afc0ced | 2007-05-16 22:10:49 -0700 | [diff] [blame] | 280 | 	unsigned long flags; | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 281 | 	const char *name; | 
 | 282 | 	void (*ctor)(void *, struct kmem_cache *, unsigned long); | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 283 | }; | 
 | 284 |  | 
 | 285 | struct kmem_cache *kmem_cache_create(const char *name, size_t size, | 
 | 286 | 	size_t align, unsigned long flags, | 
 | 287 | 	void (*ctor)(void*, struct kmem_cache *, unsigned long), | 
 | 288 | 	void (*dtor)(void*, struct kmem_cache *, unsigned long)) | 
 | 289 | { | 
 | 290 | 	struct kmem_cache *c; | 
 | 291 |  | 
 | 292 | 	c = slob_alloc(sizeof(struct kmem_cache), flags, 0); | 
 | 293 |  | 
 | 294 | 	if (c) { | 
 | 295 | 		c->name = name; | 
 | 296 | 		c->size = size; | 
| Nick Piggin | afc0ced | 2007-05-16 22:10:49 -0700 | [diff] [blame] | 297 | 		if (flags & SLAB_DESTROY_BY_RCU) { | 
| Nick Piggin | afc0ced | 2007-05-16 22:10:49 -0700 | [diff] [blame] | 298 | 			/* leave room for rcu footer at the end of object */ | 
 | 299 | 			c->size += sizeof(struct slob_rcu); | 
 | 300 | 		} | 
 | 301 | 		c->flags = flags; | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 302 | 		c->ctor = ctor; | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 303 | 		/* ignore alignment unless it's forced */ | 
| Christoph Lameter | 5af6083 | 2007-05-06 14:49:56 -0700 | [diff] [blame] | 304 | 		c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0; | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 305 | 		if (c->align < align) | 
 | 306 | 			c->align = align; | 
| Akinobu Mita | bc0055a | 2007-05-06 14:49:52 -0700 | [diff] [blame] | 307 | 	} else if (flags & SLAB_PANIC) | 
 | 308 | 		panic("Cannot create slab cache %s\n", name); | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 309 |  | 
 | 310 | 	return c; | 
 | 311 | } | 
 | 312 | EXPORT_SYMBOL(kmem_cache_create); | 
 | 313 |  | 
| Alexey Dobriyan | 133d205 | 2006-09-27 01:49:41 -0700 | [diff] [blame] | 314 | void kmem_cache_destroy(struct kmem_cache *c) | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 315 | { | 
 | 316 | 	slob_free(c, sizeof(struct kmem_cache)); | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 317 | } | 
 | 318 | EXPORT_SYMBOL(kmem_cache_destroy); | 
 | 319 |  | 
 | 320 | void *kmem_cache_alloc(struct kmem_cache *c, gfp_t flags) | 
 | 321 | { | 
 | 322 | 	void *b; | 
 | 323 |  | 
 | 324 | 	if (c->size < PAGE_SIZE) | 
 | 325 | 		b = slob_alloc(c->size, flags, c->align); | 
 | 326 | 	else | 
| Akinobu Mita | 4ab688c | 2007-05-06 14:50:19 -0700 | [diff] [blame] | 327 | 		b = (void *)__get_free_pages(flags, get_order(c->size)); | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 328 |  | 
 | 329 | 	if (c->ctor) | 
| Christoph Lameter | a35afb8 | 2007-05-16 22:10:57 -0700 | [diff] [blame] | 330 | 		c->ctor(b, c, 0); | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 331 |  | 
 | 332 | 	return b; | 
 | 333 | } | 
 | 334 | EXPORT_SYMBOL(kmem_cache_alloc); | 
 | 335 |  | 
| Pekka Enberg | a8c0f9a | 2006-03-25 03:06:42 -0800 | [diff] [blame] | 336 | void *kmem_cache_zalloc(struct kmem_cache *c, gfp_t flags) | 
 | 337 | { | 
 | 338 | 	void *ret = kmem_cache_alloc(c, flags); | 
 | 339 | 	if (ret) | 
 | 340 | 		memset(ret, 0, c->size); | 
 | 341 |  | 
 | 342 | 	return ret; | 
 | 343 | } | 
 | 344 | EXPORT_SYMBOL(kmem_cache_zalloc); | 
 | 345 |  | 
| Nick Piggin | afc0ced | 2007-05-16 22:10:49 -0700 | [diff] [blame] | 346 | static void __kmem_cache_free(void *b, int size) | 
 | 347 | { | 
 | 348 | 	if (size < PAGE_SIZE) | 
 | 349 | 		slob_free(b, size); | 
 | 350 | 	else | 
 | 351 | 		free_pages((unsigned long)b, get_order(size)); | 
 | 352 | } | 
 | 353 |  | 
 | 354 | static void kmem_rcu_free(struct rcu_head *head) | 
 | 355 | { | 
 | 356 | 	struct slob_rcu *slob_rcu = (struct slob_rcu *)head; | 
 | 357 | 	void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu)); | 
 | 358 |  | 
 | 359 | 	__kmem_cache_free(b, slob_rcu->size); | 
 | 360 | } | 
 | 361 |  | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 362 | void kmem_cache_free(struct kmem_cache *c, void *b) | 
 | 363 | { | 
| Nick Piggin | afc0ced | 2007-05-16 22:10:49 -0700 | [diff] [blame] | 364 | 	if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) { | 
 | 365 | 		struct slob_rcu *slob_rcu; | 
 | 366 | 		slob_rcu = b + (c->size - sizeof(struct slob_rcu)); | 
 | 367 | 		INIT_RCU_HEAD(&slob_rcu->head); | 
 | 368 | 		slob_rcu->size = c->size; | 
 | 369 | 		call_rcu(&slob_rcu->head, kmem_rcu_free); | 
 | 370 | 	} else { | 
| Nick Piggin | afc0ced | 2007-05-16 22:10:49 -0700 | [diff] [blame] | 371 | 		__kmem_cache_free(b, c->size); | 
 | 372 | 	} | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 373 | } | 
 | 374 | EXPORT_SYMBOL(kmem_cache_free); | 
 | 375 |  | 
 | 376 | unsigned int kmem_cache_size(struct kmem_cache *c) | 
 | 377 | { | 
 | 378 | 	return c->size; | 
 | 379 | } | 
 | 380 | EXPORT_SYMBOL(kmem_cache_size); | 
 | 381 |  | 
 | 382 | const char *kmem_cache_name(struct kmem_cache *c) | 
 | 383 | { | 
 | 384 | 	return c->name; | 
 | 385 | } | 
 | 386 | EXPORT_SYMBOL(kmem_cache_name); | 
 | 387 |  | 
 | 388 | static struct timer_list slob_timer = TIMER_INITIALIZER( | 
| Dimitri Gorokhovik | bcb4ddb | 2006-12-29 16:48:28 -0800 | [diff] [blame] | 389 | 	(void (*)(unsigned long))slob_timer_cbk, 0, 0); | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 390 |  | 
| Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 391 | int kmem_cache_shrink(struct kmem_cache *d) | 
 | 392 | { | 
 | 393 | 	return 0; | 
 | 394 | } | 
 | 395 | EXPORT_SYMBOL(kmem_cache_shrink); | 
 | 396 |  | 
| Christoph Lameter | 55935a3 | 2006-12-13 00:34:24 -0800 | [diff] [blame] | 397 | int kmem_ptr_validate(struct kmem_cache *a, const void *b) | 
| Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 398 | { | 
 | 399 | 	return 0; | 
 | 400 | } | 
 | 401 |  | 
| Dimitri Gorokhovik | bcb4ddb | 2006-12-29 16:48:28 -0800 | [diff] [blame] | 402 | void __init kmem_cache_init(void) | 
 | 403 | { | 
 | 404 | 	slob_timer_cbk(); | 
 | 405 | } | 
 | 406 |  | 
 | 407 | static void slob_timer_cbk(void) | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 408 | { | 
 | 409 | 	void *p = slob_alloc(PAGE_SIZE, 0, PAGE_SIZE-1); | 
 | 410 |  | 
 | 411 | 	if (p) | 
 | 412 | 		free_page((unsigned long)p); | 
 | 413 |  | 
 | 414 | 	mod_timer(&slob_timer, jiffies + HZ); | 
 | 415 | } |