| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * linux/mm/slab.c | 
|  | 3 | * Written by Mark Hemment, 1996/97. | 
|  | 4 | * (markhe@nextd.demon.co.uk) | 
|  | 5 | * | 
|  | 6 | * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli | 
|  | 7 | * | 
|  | 8 | * Major cleanup, different bufctl logic, per-cpu arrays | 
|  | 9 | *	(c) 2000 Manfred Spraul | 
|  | 10 | * | 
|  | 11 | * Cleanup, make the head arrays unconditional, preparation for NUMA | 
|  | 12 | * 	(c) 2002 Manfred Spraul | 
|  | 13 | * | 
|  | 14 | * An implementation of the Slab Allocator as described in outline in; | 
|  | 15 | *	UNIX Internals: The New Frontiers by Uresh Vahalia | 
|  | 16 | *	Pub: Prentice Hall	ISBN 0-13-101908-2 | 
|  | 17 | * or with a little more detail in; | 
|  | 18 | *	The Slab Allocator: An Object-Caching Kernel Memory Allocator | 
|  | 19 | *	Jeff Bonwick (Sun Microsystems). | 
|  | 20 | *	Presented at: USENIX Summer 1994 Technical Conference | 
|  | 21 | * | 
|  | 22 | * The memory is organized in caches, one cache for each object type. | 
|  | 23 | * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct) | 
|  | 24 | * Each cache consists out of many slabs (they are small (usually one | 
|  | 25 | * page long) and always contiguous), and each slab contains multiple | 
|  | 26 | * initialized objects. | 
|  | 27 | * | 
|  | 28 | * This means, that your constructor is used only for newly allocated | 
|  | 29 | * slabs and you must pass objects with the same intializations to | 
|  | 30 | * kmem_cache_free. | 
|  | 31 | * | 
|  | 32 | * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM, | 
|  | 33 | * normal). If you need a special memory type, then must create a new | 
|  | 34 | * cache for that memory type. | 
|  | 35 | * | 
|  | 36 | * In order to reduce fragmentation, the slabs are sorted in 3 groups: | 
|  | 37 | *   full slabs with 0 free objects | 
|  | 38 | *   partial slabs | 
|  | 39 | *   empty slabs with no allocated objects | 
|  | 40 | * | 
|  | 41 | * If partial slabs exist, then new allocations come from these slabs, | 
|  | 42 | * otherwise from empty slabs or new slabs are allocated. | 
|  | 43 | * | 
|  | 44 | * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache | 
|  | 45 | * during kmem_cache_destroy(). The caller must prevent concurrent allocs. | 
|  | 46 | * | 
|  | 47 | * Each cache has a short per-cpu head array, most allocs | 
|  | 48 | * and frees go into that array, and if that array overflows, then 1/2 | 
|  | 49 | * of the entries in the array are given back into the global cache. | 
|  | 50 | * The head array is strictly LIFO and should improve the cache hit rates. | 
|  | 51 | * On SMP, it additionally reduces the spinlock operations. | 
|  | 52 | * | 
|  | 53 | * The c_cpuarray may not be read with enabled local interrupts - | 
|  | 54 | * it's changed with a smp_call_function(). | 
|  | 55 | * | 
|  | 56 | * SMP synchronization: | 
|  | 57 | *  constructors and destructors are called without any locking. | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 58 | *  Several members in struct kmem_cache and struct slab never change, they | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 59 | *	are accessed without any locking. | 
|  | 60 | *  The per-cpu arrays are never accessed from the wrong cpu, no locking, | 
|  | 61 | *  	and local interrupts are disabled so slab code is preempt-safe. | 
|  | 62 | *  The non-constant members are protected with a per-cache irq spinlock. | 
|  | 63 | * | 
|  | 64 | * Many thanks to Mark Hemment, who wrote another per-cpu slab patch | 
|  | 65 | * in 2000 - many ideas in the current implementation are derived from | 
|  | 66 | * his patch. | 
|  | 67 | * | 
|  | 68 | * Further notes from the original documentation: | 
|  | 69 | * | 
|  | 70 | * 11 April '97.  Started multi-threading - markhe | 
| Ingo Molnar | fc0abb1 | 2006-01-18 17:42:33 -0800 | [diff] [blame] | 71 | *	The global cache-chain is protected by the mutex 'cache_chain_mutex'. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 72 | *	The sem is only needed when accessing/extending the cache-chain, which | 
|  | 73 | *	can never happen inside an interrupt (kmem_cache_create(), | 
|  | 74 | *	kmem_cache_shrink() and kmem_cache_reap()). | 
|  | 75 | * | 
|  | 76 | *	At present, each engine can be growing a cache.  This should be blocked. | 
|  | 77 | * | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 78 | * 15 March 2005. NUMA slab allocator. | 
|  | 79 | *	Shai Fultheim <shai@scalex86.org>. | 
|  | 80 | *	Shobhit Dayal <shobhit@calsoftinc.com> | 
|  | 81 | *	Alok N Kataria <alokk@calsoftinc.com> | 
|  | 82 | *	Christoph Lameter <christoph@lameter.com> | 
|  | 83 | * | 
|  | 84 | *	Modified the slab allocator to be node aware on NUMA systems. | 
|  | 85 | *	Each node has its own list of partial, free and full slabs. | 
|  | 86 | *	All object allocations for a node occur from node specific slab lists. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 87 | */ | 
|  | 88 |  | 
|  | 89 | #include	<linux/config.h> | 
|  | 90 | #include	<linux/slab.h> | 
|  | 91 | #include	<linux/mm.h> | 
|  | 92 | #include	<linux/swap.h> | 
|  | 93 | #include	<linux/cache.h> | 
|  | 94 | #include	<linux/interrupt.h> | 
|  | 95 | #include	<linux/init.h> | 
|  | 96 | #include	<linux/compiler.h> | 
|  | 97 | #include	<linux/seq_file.h> | 
|  | 98 | #include	<linux/notifier.h> | 
|  | 99 | #include	<linux/kallsyms.h> | 
|  | 100 | #include	<linux/cpu.h> | 
|  | 101 | #include	<linux/sysctl.h> | 
|  | 102 | #include	<linux/module.h> | 
|  | 103 | #include	<linux/rcupdate.h> | 
| Paulo Marques | 543537b | 2005-06-23 00:09:02 -0700 | [diff] [blame] | 104 | #include	<linux/string.h> | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 105 | #include	<linux/nodemask.h> | 
| Christoph Lameter | dc85da1 | 2006-01-18 17:42:36 -0800 | [diff] [blame] | 106 | #include	<linux/mempolicy.h> | 
| Ingo Molnar | fc0abb1 | 2006-01-18 17:42:33 -0800 | [diff] [blame] | 107 | #include	<linux/mutex.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 108 |  | 
|  | 109 | #include	<asm/uaccess.h> | 
|  | 110 | #include	<asm/cacheflush.h> | 
|  | 111 | #include	<asm/tlbflush.h> | 
|  | 112 | #include	<asm/page.h> | 
|  | 113 |  | 
|  | 114 | /* | 
|  | 115 | * DEBUG	- 1 for kmem_cache_create() to honour; SLAB_DEBUG_INITIAL, | 
|  | 116 | *		  SLAB_RED_ZONE & SLAB_POISON. | 
|  | 117 | *		  0 for faster, smaller code (especially in the critical paths). | 
|  | 118 | * | 
|  | 119 | * STATS	- 1 to collect stats for /proc/slabinfo. | 
|  | 120 | *		  0 for faster, smaller code (especially in the critical paths). | 
|  | 121 | * | 
|  | 122 | * FORCED_DEBUG	- 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible) | 
|  | 123 | */ | 
|  | 124 |  | 
|  | 125 | #ifdef CONFIG_DEBUG_SLAB | 
|  | 126 | #define	DEBUG		1 | 
|  | 127 | #define	STATS		1 | 
|  | 128 | #define	FORCED_DEBUG	1 | 
|  | 129 | #else | 
|  | 130 | #define	DEBUG		0 | 
|  | 131 | #define	STATS		0 | 
|  | 132 | #define	FORCED_DEBUG	0 | 
|  | 133 | #endif | 
|  | 134 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 135 | /* Shouldn't this be in a header file somewhere? */ | 
|  | 136 | #define	BYTES_PER_WORD		sizeof(void *) | 
|  | 137 |  | 
|  | 138 | #ifndef cache_line_size | 
|  | 139 | #define cache_line_size()	L1_CACHE_BYTES | 
|  | 140 | #endif | 
|  | 141 |  | 
|  | 142 | #ifndef ARCH_KMALLOC_MINALIGN | 
|  | 143 | /* | 
|  | 144 | * Enforce a minimum alignment for the kmalloc caches. | 
|  | 145 | * Usually, the kmalloc caches are cache_line_size() aligned, except when | 
|  | 146 | * DEBUG and FORCED_DEBUG are enabled, then they are BYTES_PER_WORD aligned. | 
|  | 147 | * Some archs want to perform DMA into kmalloc caches and need a guaranteed | 
|  | 148 | * alignment larger than BYTES_PER_WORD. ARCH_KMALLOC_MINALIGN allows that. | 
|  | 149 | * Note that this flag disables some debug features. | 
|  | 150 | */ | 
|  | 151 | #define ARCH_KMALLOC_MINALIGN 0 | 
|  | 152 | #endif | 
|  | 153 |  | 
|  | 154 | #ifndef ARCH_SLAB_MINALIGN | 
|  | 155 | /* | 
|  | 156 | * Enforce a minimum alignment for all caches. | 
|  | 157 | * Intended for archs that get misalignment faults even for BYTES_PER_WORD | 
|  | 158 | * aligned buffers. Includes ARCH_KMALLOC_MINALIGN. | 
|  | 159 | * If possible: Do not enable this flag for CONFIG_DEBUG_SLAB, it disables | 
|  | 160 | * some debug features. | 
|  | 161 | */ | 
|  | 162 | #define ARCH_SLAB_MINALIGN 0 | 
|  | 163 | #endif | 
|  | 164 |  | 
|  | 165 | #ifndef ARCH_KMALLOC_FLAGS | 
|  | 166 | #define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN | 
|  | 167 | #endif | 
|  | 168 |  | 
|  | 169 | /* Legal flag mask for kmem_cache_create(). */ | 
|  | 170 | #if DEBUG | 
|  | 171 | # define CREATE_MASK	(SLAB_DEBUG_INITIAL | SLAB_RED_ZONE | \ | 
|  | 172 | SLAB_POISON | SLAB_HWCACHE_ALIGN | \ | 
|  | 173 | SLAB_NO_REAP | SLAB_CACHE_DMA | \ | 
|  | 174 | SLAB_MUST_HWCACHE_ALIGN | SLAB_STORE_USER | \ | 
|  | 175 | SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ | 
|  | 176 | SLAB_DESTROY_BY_RCU) | 
|  | 177 | #else | 
|  | 178 | # define CREATE_MASK	(SLAB_HWCACHE_ALIGN | SLAB_NO_REAP | \ | 
|  | 179 | SLAB_CACHE_DMA | SLAB_MUST_HWCACHE_ALIGN | \ | 
|  | 180 | SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ | 
|  | 181 | SLAB_DESTROY_BY_RCU) | 
|  | 182 | #endif | 
|  | 183 |  | 
|  | 184 | /* | 
|  | 185 | * kmem_bufctl_t: | 
|  | 186 | * | 
|  | 187 | * Bufctl's are used for linking objs within a slab | 
|  | 188 | * linked offsets. | 
|  | 189 | * | 
|  | 190 | * This implementation relies on "struct page" for locating the cache & | 
|  | 191 | * slab an object belongs to. | 
|  | 192 | * This allows the bufctl structure to be small (one int), but limits | 
|  | 193 | * the number of objects a slab (not a cache) can contain when off-slab | 
|  | 194 | * bufctls are used. The limit is the size of the largest general cache | 
|  | 195 | * that does not use off-slab slabs. | 
|  | 196 | * For 32bit archs with 4 kB pages, is this 56. | 
|  | 197 | * This is not serious, as it is only for large objects, when it is unwise | 
|  | 198 | * to have too many per slab. | 
|  | 199 | * Note: This limit can be raised by introducing a general cache whose size | 
|  | 200 | * is less than 512 (PAGE_SIZE<<3), but greater than 256. | 
|  | 201 | */ | 
|  | 202 |  | 
| Kyle Moffett | fa5b08d | 2005-09-03 15:55:03 -0700 | [diff] [blame] | 203 | typedef unsigned int kmem_bufctl_t; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 204 | #define BUFCTL_END	(((kmem_bufctl_t)(~0U))-0) | 
|  | 205 | #define BUFCTL_FREE	(((kmem_bufctl_t)(~0U))-1) | 
|  | 206 | #define	SLAB_LIMIT	(((kmem_bufctl_t)(~0U))-2) | 
|  | 207 |  | 
|  | 208 | /* Max number of objs-per-slab for caches which use off-slab slabs. | 
|  | 209 | * Needed to avoid a possible looping condition in cache_grow(). | 
|  | 210 | */ | 
|  | 211 | static unsigned long offslab_limit; | 
|  | 212 |  | 
|  | 213 | /* | 
|  | 214 | * struct slab | 
|  | 215 | * | 
|  | 216 | * Manages the objs in a slab. Placed either at the beginning of mem allocated | 
|  | 217 | * for a slab, or allocated from an general cache. | 
|  | 218 | * Slabs are chained into three list: fully used, partial, fully free slabs. | 
|  | 219 | */ | 
|  | 220 | struct slab { | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 221 | struct list_head list; | 
|  | 222 | unsigned long colouroff; | 
|  | 223 | void *s_mem;		/* including colour offset */ | 
|  | 224 | unsigned int inuse;	/* num of objs active in slab */ | 
|  | 225 | kmem_bufctl_t free; | 
|  | 226 | unsigned short nodeid; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 227 | }; | 
|  | 228 |  | 
|  | 229 | /* | 
|  | 230 | * struct slab_rcu | 
|  | 231 | * | 
|  | 232 | * slab_destroy on a SLAB_DESTROY_BY_RCU cache uses this structure to | 
|  | 233 | * arrange for kmem_freepages to be called via RCU.  This is useful if | 
|  | 234 | * we need to approach a kernel structure obliquely, from its address | 
|  | 235 | * obtained without the usual locking.  We can lock the structure to | 
|  | 236 | * stabilize it and check it's still at the given address, only if we | 
|  | 237 | * can be sure that the memory has not been meanwhile reused for some | 
|  | 238 | * other kind of object (which our subsystem's lock might corrupt). | 
|  | 239 | * | 
|  | 240 | * rcu_read_lock before reading the address, then rcu_read_unlock after | 
|  | 241 | * taking the spinlock within the structure expected at that address. | 
|  | 242 | * | 
|  | 243 | * We assume struct slab_rcu can overlay struct slab when destroying. | 
|  | 244 | */ | 
|  | 245 | struct slab_rcu { | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 246 | struct rcu_head head; | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 247 | struct kmem_cache *cachep; | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 248 | void *addr; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 249 | }; | 
|  | 250 |  | 
|  | 251 | /* | 
|  | 252 | * struct array_cache | 
|  | 253 | * | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 254 | * Purpose: | 
|  | 255 | * - LIFO ordering, to hand out cache-warm objects from _alloc | 
|  | 256 | * - reduce the number of linked list operations | 
|  | 257 | * - reduce spinlock operations | 
|  | 258 | * | 
|  | 259 | * The limit is stored in the per-cpu structure to reduce the data cache | 
|  | 260 | * footprint. | 
|  | 261 | * | 
|  | 262 | */ | 
|  | 263 | struct array_cache { | 
|  | 264 | unsigned int avail; | 
|  | 265 | unsigned int limit; | 
|  | 266 | unsigned int batchcount; | 
|  | 267 | unsigned int touched; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 268 | spinlock_t lock; | 
|  | 269 | void *entry[0];		/* | 
|  | 270 | * Must have this definition in here for the proper | 
|  | 271 | * alignment of array_cache. Also simplifies accessing | 
|  | 272 | * the entries. | 
|  | 273 | * [0] is for gcc 2.95. It should really be []. | 
|  | 274 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 275 | }; | 
|  | 276 |  | 
|  | 277 | /* bootstrap: The caches do not work without cpuarrays anymore, | 
|  | 278 | * but the cpuarrays are allocated from the generic caches... | 
|  | 279 | */ | 
|  | 280 | #define BOOT_CPUCACHE_ENTRIES	1 | 
|  | 281 | struct arraycache_init { | 
|  | 282 | struct array_cache cache; | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 283 | void *entries[BOOT_CPUCACHE_ENTRIES]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 284 | }; | 
|  | 285 |  | 
|  | 286 | /* | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 287 | * The slab lists for all objects. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 288 | */ | 
|  | 289 | struct kmem_list3 { | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 290 | struct list_head slabs_partial;	/* partial list first, better asm code */ | 
|  | 291 | struct list_head slabs_full; | 
|  | 292 | struct list_head slabs_free; | 
|  | 293 | unsigned long free_objects; | 
|  | 294 | unsigned long next_reap; | 
|  | 295 | int free_touched; | 
|  | 296 | unsigned int free_limit; | 
| Ravikiran G Thirumalai | 2e1217c | 2006-02-04 23:27:56 -0800 | [diff] [blame] | 297 | unsigned int colour_next;	/* Per-node cache coloring */ | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 298 | spinlock_t list_lock; | 
|  | 299 | struct array_cache *shared;	/* shared per node */ | 
|  | 300 | struct array_cache **alien;	/* on other nodes */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 301 | }; | 
|  | 302 |  | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 303 | /* | 
|  | 304 | * Need this for bootstrapping a per node allocator. | 
|  | 305 | */ | 
|  | 306 | #define NUM_INIT_LISTS (2 * MAX_NUMNODES + 1) | 
|  | 307 | struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS]; | 
|  | 308 | #define	CACHE_CACHE 0 | 
|  | 309 | #define	SIZE_AC 1 | 
|  | 310 | #define	SIZE_L3 (1 + MAX_NUMNODES) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 311 |  | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 312 | /* | 
| Ivan Kokshaysky | 7243cc0 | 2005-09-22 21:43:58 -0700 | [diff] [blame] | 313 | * This function must be completely optimized away if | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 314 | * a constant is passed to it. Mostly the same as | 
|  | 315 | * what is in linux/slab.h except it returns an | 
|  | 316 | * index. | 
|  | 317 | */ | 
| Ivan Kokshaysky | 7243cc0 | 2005-09-22 21:43:58 -0700 | [diff] [blame] | 318 | static __always_inline int index_of(const size_t size) | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 319 | { | 
| Steven Rostedt | 5ec8a84 | 2006-02-01 03:05:44 -0800 | [diff] [blame] | 320 | extern void __bad_size(void); | 
|  | 321 |  | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 322 | if (__builtin_constant_p(size)) { | 
|  | 323 | int i = 0; | 
|  | 324 |  | 
|  | 325 | #define CACHE(x) \ | 
|  | 326 | if (size <=x) \ | 
|  | 327 | return i; \ | 
|  | 328 | else \ | 
|  | 329 | i++; | 
|  | 330 | #include "linux/kmalloc_sizes.h" | 
|  | 331 | #undef CACHE | 
| Steven Rostedt | 5ec8a84 | 2006-02-01 03:05:44 -0800 | [diff] [blame] | 332 | __bad_size(); | 
| Ivan Kokshaysky | 7243cc0 | 2005-09-22 21:43:58 -0700 | [diff] [blame] | 333 | } else | 
| Steven Rostedt | 5ec8a84 | 2006-02-01 03:05:44 -0800 | [diff] [blame] | 334 | __bad_size(); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 335 | return 0; | 
|  | 336 | } | 
|  | 337 |  | 
|  | 338 | #define INDEX_AC index_of(sizeof(struct arraycache_init)) | 
|  | 339 | #define INDEX_L3 index_of(sizeof(struct kmem_list3)) | 
|  | 340 |  | 
| Pekka Enberg | 5295a74 | 2006-02-01 03:05:48 -0800 | [diff] [blame] | 341 | static void kmem_list3_init(struct kmem_list3 *parent) | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 342 | { | 
|  | 343 | INIT_LIST_HEAD(&parent->slabs_full); | 
|  | 344 | INIT_LIST_HEAD(&parent->slabs_partial); | 
|  | 345 | INIT_LIST_HEAD(&parent->slabs_free); | 
|  | 346 | parent->shared = NULL; | 
|  | 347 | parent->alien = NULL; | 
| Ravikiran G Thirumalai | 2e1217c | 2006-02-04 23:27:56 -0800 | [diff] [blame] | 348 | parent->colour_next = 0; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 349 | spin_lock_init(&parent->list_lock); | 
|  | 350 | parent->free_objects = 0; | 
|  | 351 | parent->free_touched = 0; | 
|  | 352 | } | 
|  | 353 |  | 
|  | 354 | #define MAKE_LIST(cachep, listp, slab, nodeid)	\ | 
|  | 355 | do {	\ | 
|  | 356 | INIT_LIST_HEAD(listp);		\ | 
|  | 357 | list_splice(&(cachep->nodelists[nodeid]->slab), listp); \ | 
|  | 358 | } while (0) | 
|  | 359 |  | 
|  | 360 | #define	MAKE_ALL_LISTS(cachep, ptr, nodeid)			\ | 
|  | 361 | do {					\ | 
|  | 362 | MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid);	\ | 
|  | 363 | MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \ | 
|  | 364 | MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid);	\ | 
|  | 365 | } while (0) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 366 |  | 
|  | 367 | /* | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 368 | * struct kmem_cache | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 369 | * | 
|  | 370 | * manages a cache. | 
|  | 371 | */ | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 372 |  | 
| Pekka J Enberg | 2109a2d | 2005-11-07 00:58:01 -0800 | [diff] [blame] | 373 | struct kmem_cache { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 374 | /* 1) per-cpu data, touched during every alloc/free */ | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 375 | struct array_cache *array[NR_CPUS]; | 
|  | 376 | unsigned int batchcount; | 
|  | 377 | unsigned int limit; | 
|  | 378 | unsigned int shared; | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 379 | unsigned int buffer_size; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 380 | /* 2) touched by every alloc & free from the backend */ | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 381 | struct kmem_list3 *nodelists[MAX_NUMNODES]; | 
|  | 382 | unsigned int flags;	/* constant flags */ | 
|  | 383 | unsigned int num;	/* # of objs per slab */ | 
|  | 384 | spinlock_t spinlock; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 385 |  | 
|  | 386 | /* 3) cache_grow/shrink */ | 
|  | 387 | /* order of pgs per slab (2^n) */ | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 388 | unsigned int gfporder; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 389 |  | 
|  | 390 | /* force GFP flags, e.g. GFP_DMA */ | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 391 | gfp_t gfpflags; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 392 |  | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 393 | size_t colour;		/* cache colouring range */ | 
|  | 394 | unsigned int colour_off;	/* colour offset */ | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 395 | struct kmem_cache *slabp_cache; | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 396 | unsigned int slab_size; | 
|  | 397 | unsigned int dflags;	/* dynamic flags */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 398 |  | 
|  | 399 | /* constructor func */ | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 400 | void (*ctor) (void *, struct kmem_cache *, unsigned long); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 401 |  | 
|  | 402 | /* de-constructor func */ | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 403 | void (*dtor) (void *, struct kmem_cache *, unsigned long); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 404 |  | 
|  | 405 | /* 4) cache creation/removal */ | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 406 | const char *name; | 
|  | 407 | struct list_head next; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 408 |  | 
|  | 409 | /* 5) statistics */ | 
|  | 410 | #if STATS | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 411 | unsigned long num_active; | 
|  | 412 | unsigned long num_allocations; | 
|  | 413 | unsigned long high_mark; | 
|  | 414 | unsigned long grown; | 
|  | 415 | unsigned long reaped; | 
|  | 416 | unsigned long errors; | 
|  | 417 | unsigned long max_freeable; | 
|  | 418 | unsigned long node_allocs; | 
|  | 419 | unsigned long node_frees; | 
|  | 420 | atomic_t allochit; | 
|  | 421 | atomic_t allocmiss; | 
|  | 422 | atomic_t freehit; | 
|  | 423 | atomic_t freemiss; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 424 | #endif | 
|  | 425 | #if DEBUG | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 426 | /* | 
|  | 427 | * If debugging is enabled, then the allocator can add additional | 
|  | 428 | * fields and/or padding to every object. buffer_size contains the total | 
|  | 429 | * object size including these internal fields, the following two | 
|  | 430 | * variables contain the offset to the user object and its size. | 
|  | 431 | */ | 
|  | 432 | int obj_offset; | 
|  | 433 | int obj_size; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 434 | #endif | 
|  | 435 | }; | 
|  | 436 |  | 
|  | 437 | #define CFLGS_OFF_SLAB		(0x80000000UL) | 
|  | 438 | #define	OFF_SLAB(x)	((x)->flags & CFLGS_OFF_SLAB) | 
|  | 439 |  | 
|  | 440 | #define BATCHREFILL_LIMIT	16 | 
|  | 441 | /* Optimization question: fewer reaps means less | 
|  | 442 | * probability for unnessary cpucache drain/refill cycles. | 
|  | 443 | * | 
| Adrian Bunk | dc6f3f2 | 2005-11-08 16:44:08 +0100 | [diff] [blame] | 444 | * OTOH the cpuarrays can contain lots of objects, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 445 | * which could lock up otherwise freeable slabs. | 
|  | 446 | */ | 
|  | 447 | #define REAPTIMEOUT_CPUC	(2*HZ) | 
|  | 448 | #define REAPTIMEOUT_LIST3	(4*HZ) | 
|  | 449 |  | 
|  | 450 | #if STATS | 
|  | 451 | #define	STATS_INC_ACTIVE(x)	((x)->num_active++) | 
|  | 452 | #define	STATS_DEC_ACTIVE(x)	((x)->num_active--) | 
|  | 453 | #define	STATS_INC_ALLOCED(x)	((x)->num_allocations++) | 
|  | 454 | #define	STATS_INC_GROWN(x)	((x)->grown++) | 
|  | 455 | #define	STATS_INC_REAPED(x)	((x)->reaped++) | 
|  | 456 | #define	STATS_SET_HIGH(x)	do { if ((x)->num_active > (x)->high_mark) \ | 
|  | 457 | (x)->high_mark = (x)->num_active; \ | 
|  | 458 | } while (0) | 
|  | 459 | #define	STATS_INC_ERR(x)	((x)->errors++) | 
|  | 460 | #define	STATS_INC_NODEALLOCS(x)	((x)->node_allocs++) | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 461 | #define	STATS_INC_NODEFREES(x)	((x)->node_frees++) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 462 | #define	STATS_SET_FREEABLE(x, i) \ | 
|  | 463 | do { if ((x)->max_freeable < i) \ | 
|  | 464 | (x)->max_freeable = i; \ | 
|  | 465 | } while (0) | 
|  | 466 |  | 
|  | 467 | #define STATS_INC_ALLOCHIT(x)	atomic_inc(&(x)->allochit) | 
|  | 468 | #define STATS_INC_ALLOCMISS(x)	atomic_inc(&(x)->allocmiss) | 
|  | 469 | #define STATS_INC_FREEHIT(x)	atomic_inc(&(x)->freehit) | 
|  | 470 | #define STATS_INC_FREEMISS(x)	atomic_inc(&(x)->freemiss) | 
|  | 471 | #else | 
|  | 472 | #define	STATS_INC_ACTIVE(x)	do { } while (0) | 
|  | 473 | #define	STATS_DEC_ACTIVE(x)	do { } while (0) | 
|  | 474 | #define	STATS_INC_ALLOCED(x)	do { } while (0) | 
|  | 475 | #define	STATS_INC_GROWN(x)	do { } while (0) | 
|  | 476 | #define	STATS_INC_REAPED(x)	do { } while (0) | 
|  | 477 | #define	STATS_SET_HIGH(x)	do { } while (0) | 
|  | 478 | #define	STATS_INC_ERR(x)	do { } while (0) | 
|  | 479 | #define	STATS_INC_NODEALLOCS(x)	do { } while (0) | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 480 | #define	STATS_INC_NODEFREES(x)	do { } while (0) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 481 | #define	STATS_SET_FREEABLE(x, i) \ | 
|  | 482 | do { } while (0) | 
|  | 483 |  | 
|  | 484 | #define STATS_INC_ALLOCHIT(x)	do { } while (0) | 
|  | 485 | #define STATS_INC_ALLOCMISS(x)	do { } while (0) | 
|  | 486 | #define STATS_INC_FREEHIT(x)	do { } while (0) | 
|  | 487 | #define STATS_INC_FREEMISS(x)	do { } while (0) | 
|  | 488 | #endif | 
|  | 489 |  | 
|  | 490 | #if DEBUG | 
|  | 491 | /* Magic nums for obj red zoning. | 
|  | 492 | * Placed in the first word before and the first word after an obj. | 
|  | 493 | */ | 
|  | 494 | #define	RED_INACTIVE	0x5A2CF071UL	/* when obj is inactive */ | 
|  | 495 | #define	RED_ACTIVE	0x170FC2A5UL	/* when obj is active */ | 
|  | 496 |  | 
|  | 497 | /* ...and for poisoning */ | 
|  | 498 | #define	POISON_INUSE	0x5a	/* for use-uninitialised poisoning */ | 
|  | 499 | #define POISON_FREE	0x6b	/* for use-after-free poisoning */ | 
|  | 500 | #define	POISON_END	0xa5	/* end-byte of poisoning */ | 
|  | 501 |  | 
|  | 502 | /* memory layout of objects: | 
|  | 503 | * 0		: objp | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 504 | * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 505 | * 		the end of an object is aligned with the end of the real | 
|  | 506 | * 		allocation. Catches writes behind the end of the allocation. | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 507 | * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 508 | * 		redzone word. | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 509 | * cachep->obj_offset: The real object. | 
|  | 510 | * cachep->buffer_size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long] | 
|  | 511 | * cachep->buffer_size - 1* BYTES_PER_WORD: last caller address [BYTES_PER_WORD long] | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 512 | */ | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 513 | static int obj_offset(struct kmem_cache *cachep) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 514 | { | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 515 | return cachep->obj_offset; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 516 | } | 
|  | 517 |  | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 518 | static int obj_size(struct kmem_cache *cachep) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 519 | { | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 520 | return cachep->obj_size; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 521 | } | 
|  | 522 |  | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 523 | static unsigned long *dbg_redzone1(struct kmem_cache *cachep, void *objp) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 524 | { | 
|  | 525 | BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 526 | return (unsigned long*) (objp+obj_offset(cachep)-BYTES_PER_WORD); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 527 | } | 
|  | 528 |  | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 529 | static unsigned long *dbg_redzone2(struct kmem_cache *cachep, void *objp) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 530 | { | 
|  | 531 | BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); | 
|  | 532 | if (cachep->flags & SLAB_STORE_USER) | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 533 | return (unsigned long *)(objp + cachep->buffer_size - | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 534 | 2 * BYTES_PER_WORD); | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 535 | return (unsigned long *)(objp + cachep->buffer_size - BYTES_PER_WORD); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 536 | } | 
|  | 537 |  | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 538 | static void **dbg_userword(struct kmem_cache *cachep, void *objp) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 539 | { | 
|  | 540 | BUG_ON(!(cachep->flags & SLAB_STORE_USER)); | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 541 | return (void **)(objp + cachep->buffer_size - BYTES_PER_WORD); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 542 | } | 
|  | 543 |  | 
|  | 544 | #else | 
|  | 545 |  | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 546 | #define obj_offset(x)			0 | 
|  | 547 | #define obj_size(cachep)		(cachep->buffer_size) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 548 | #define dbg_redzone1(cachep, objp)	({BUG(); (unsigned long *)NULL;}) | 
|  | 549 | #define dbg_redzone2(cachep, objp)	({BUG(); (unsigned long *)NULL;}) | 
|  | 550 | #define dbg_userword(cachep, objp)	({BUG(); (void **)NULL;}) | 
|  | 551 |  | 
|  | 552 | #endif | 
|  | 553 |  | 
|  | 554 | /* | 
|  | 555 | * Maximum size of an obj (in 2^order pages) | 
|  | 556 | * and absolute limit for the gfp order. | 
|  | 557 | */ | 
|  | 558 | #if defined(CONFIG_LARGE_ALLOCS) | 
|  | 559 | #define	MAX_OBJ_ORDER	13	/* up to 32Mb */ | 
|  | 560 | #define	MAX_GFP_ORDER	13	/* up to 32Mb */ | 
|  | 561 | #elif defined(CONFIG_MMU) | 
|  | 562 | #define	MAX_OBJ_ORDER	5	/* 32 pages */ | 
|  | 563 | #define	MAX_GFP_ORDER	5	/* 32 pages */ | 
|  | 564 | #else | 
|  | 565 | #define	MAX_OBJ_ORDER	8	/* up to 1Mb */ | 
|  | 566 | #define	MAX_GFP_ORDER	8	/* up to 1Mb */ | 
|  | 567 | #endif | 
|  | 568 |  | 
|  | 569 | /* | 
|  | 570 | * Do not go above this order unless 0 objects fit into the slab. | 
|  | 571 | */ | 
|  | 572 | #define	BREAK_GFP_ORDER_HI	1 | 
|  | 573 | #define	BREAK_GFP_ORDER_LO	0 | 
|  | 574 | static int slab_break_gfp_order = BREAK_GFP_ORDER_LO; | 
|  | 575 |  | 
| Pekka Enberg | 065d41c | 2005-11-13 16:06:46 -0800 | [diff] [blame] | 576 | /* Functions for storing/retrieving the cachep and or slab from the | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 577 | * global 'mem_map'. These are used to find the slab an obj belongs to. | 
|  | 578 | * With kfree(), these are used to find the cache which an obj belongs to. | 
|  | 579 | */ | 
| Pekka Enberg | 065d41c | 2005-11-13 16:06:46 -0800 | [diff] [blame] | 580 | static inline void page_set_cache(struct page *page, struct kmem_cache *cache) | 
|  | 581 | { | 
|  | 582 | page->lru.next = (struct list_head *)cache; | 
|  | 583 | } | 
|  | 584 |  | 
|  | 585 | static inline struct kmem_cache *page_get_cache(struct page *page) | 
|  | 586 | { | 
|  | 587 | return (struct kmem_cache *)page->lru.next; | 
|  | 588 | } | 
|  | 589 |  | 
|  | 590 | static inline void page_set_slab(struct page *page, struct slab *slab) | 
|  | 591 | { | 
|  | 592 | page->lru.prev = (struct list_head *)slab; | 
|  | 593 | } | 
|  | 594 |  | 
|  | 595 | static inline struct slab *page_get_slab(struct page *page) | 
|  | 596 | { | 
|  | 597 | return (struct slab *)page->lru.prev; | 
|  | 598 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 599 |  | 
| Pekka Enberg | 6ed5eb221 | 2006-02-01 03:05:49 -0800 | [diff] [blame] | 600 | static inline struct kmem_cache *virt_to_cache(const void *obj) | 
|  | 601 | { | 
|  | 602 | struct page *page = virt_to_page(obj); | 
|  | 603 | return page_get_cache(page); | 
|  | 604 | } | 
|  | 605 |  | 
|  | 606 | static inline struct slab *virt_to_slab(const void *obj) | 
|  | 607 | { | 
|  | 608 | struct page *page = virt_to_page(obj); | 
|  | 609 | return page_get_slab(page); | 
|  | 610 | } | 
|  | 611 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 612 | /* These are the default caches for kmalloc. Custom caches can have other sizes. */ | 
|  | 613 | struct cache_sizes malloc_sizes[] = { | 
|  | 614 | #define CACHE(x) { .cs_size = (x) }, | 
|  | 615 | #include <linux/kmalloc_sizes.h> | 
|  | 616 | CACHE(ULONG_MAX) | 
|  | 617 | #undef CACHE | 
|  | 618 | }; | 
|  | 619 | EXPORT_SYMBOL(malloc_sizes); | 
|  | 620 |  | 
|  | 621 | /* Must match cache_sizes above. Out of line to keep cache footprint low. */ | 
|  | 622 | struct cache_names { | 
|  | 623 | char *name; | 
|  | 624 | char *name_dma; | 
|  | 625 | }; | 
|  | 626 |  | 
|  | 627 | static struct cache_names __initdata cache_names[] = { | 
|  | 628 | #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" }, | 
|  | 629 | #include <linux/kmalloc_sizes.h> | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 630 | {NULL,} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 631 | #undef CACHE | 
|  | 632 | }; | 
|  | 633 |  | 
|  | 634 | static struct arraycache_init initarray_cache __initdata = | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 635 | { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} }; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 636 | static struct arraycache_init initarray_generic = | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 637 | { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} }; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 638 |  | 
|  | 639 | /* internal cache of cache description objs */ | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 640 | static struct kmem_cache cache_cache = { | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 641 | .batchcount = 1, | 
|  | 642 | .limit = BOOT_CPUCACHE_ENTRIES, | 
|  | 643 | .shared = 1, | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 644 | .buffer_size = sizeof(struct kmem_cache), | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 645 | .flags = SLAB_NO_REAP, | 
|  | 646 | .spinlock = SPIN_LOCK_UNLOCKED, | 
|  | 647 | .name = "kmem_cache", | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 648 | #if DEBUG | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 649 | .obj_size = sizeof(struct kmem_cache), | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 650 | #endif | 
|  | 651 | }; | 
|  | 652 |  | 
|  | 653 | /* Guard access to the cache-chain. */ | 
| Ingo Molnar | fc0abb1 | 2006-01-18 17:42:33 -0800 | [diff] [blame] | 654 | static DEFINE_MUTEX(cache_chain_mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 655 | static struct list_head cache_chain; | 
|  | 656 |  | 
|  | 657 | /* | 
|  | 658 | * vm_enough_memory() looks at this to determine how many | 
|  | 659 | * slab-allocated pages are possibly freeable under pressure | 
|  | 660 | * | 
|  | 661 | * SLAB_RECLAIM_ACCOUNT turns this on per-slab | 
|  | 662 | */ | 
|  | 663 | atomic_t slab_reclaim_pages; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 664 |  | 
|  | 665 | /* | 
|  | 666 | * chicken and egg problem: delay the per-cpu array allocation | 
|  | 667 | * until the general caches are up. | 
|  | 668 | */ | 
|  | 669 | static enum { | 
|  | 670 | NONE, | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 671 | PARTIAL_AC, | 
|  | 672 | PARTIAL_L3, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 673 | FULL | 
|  | 674 | } g_cpucache_up; | 
|  | 675 |  | 
|  | 676 | static DEFINE_PER_CPU(struct work_struct, reap_work); | 
|  | 677 |  | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 678 | static void free_block(struct kmem_cache *cachep, void **objpp, int len, int node); | 
|  | 679 | static void enable_cpucache(struct kmem_cache *cachep); | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 680 | static void cache_reap(void *unused); | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 681 | static int __node_shrink(struct kmem_cache *cachep, int node); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 682 |  | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 683 | static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 684 | { | 
|  | 685 | return cachep->array[smp_processor_id()]; | 
|  | 686 | } | 
|  | 687 |  | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 688 | static inline struct kmem_cache *__find_general_cachep(size_t size, gfp_t gfpflags) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 689 | { | 
|  | 690 | struct cache_sizes *csizep = malloc_sizes; | 
|  | 691 |  | 
|  | 692 | #if DEBUG | 
|  | 693 | /* This happens if someone tries to call | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 694 | * kmem_cache_create(), or __kmalloc(), before | 
|  | 695 | * the generic caches are initialized. | 
|  | 696 | */ | 
| Alok Kataria | c7e43c7 | 2005-09-14 12:17:53 -0700 | [diff] [blame] | 697 | BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 698 | #endif | 
|  | 699 | while (size > csizep->cs_size) | 
|  | 700 | csizep++; | 
|  | 701 |  | 
|  | 702 | /* | 
| Martin Hicks | 0abf40c | 2005-09-03 15:54:54 -0700 | [diff] [blame] | 703 | * Really subtle: The last entry with cs->cs_size==ULONG_MAX | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 704 | * has cs_{dma,}cachep==NULL. Thus no special case | 
|  | 705 | * for large kmalloc calls required. | 
|  | 706 | */ | 
|  | 707 | if (unlikely(gfpflags & GFP_DMA)) | 
|  | 708 | return csizep->cs_dmacachep; | 
|  | 709 | return csizep->cs_cachep; | 
|  | 710 | } | 
|  | 711 |  | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 712 | struct kmem_cache *kmem_find_general_cachep(size_t size, gfp_t gfpflags) | 
| Manfred Spraul | 97e2bde | 2005-05-01 08:58:38 -0700 | [diff] [blame] | 713 | { | 
|  | 714 | return __find_general_cachep(size, gfpflags); | 
|  | 715 | } | 
|  | 716 | EXPORT_SYMBOL(kmem_find_general_cachep); | 
|  | 717 |  | 
| Steven Rostedt | fbaccac | 2006-02-01 03:05:45 -0800 | [diff] [blame] | 718 | static size_t slab_mgmt_size(size_t nr_objs, size_t align) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 719 | { | 
| Steven Rostedt | fbaccac | 2006-02-01 03:05:45 -0800 | [diff] [blame] | 720 | return ALIGN(sizeof(struct slab)+nr_objs*sizeof(kmem_bufctl_t), align); | 
|  | 721 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 722 |  | 
| Steven Rostedt | fbaccac | 2006-02-01 03:05:45 -0800 | [diff] [blame] | 723 | /* Calculate the number of objects and left-over bytes for a given | 
|  | 724 | buffer size. */ | 
|  | 725 | static void cache_estimate(unsigned long gfporder, size_t buffer_size, | 
|  | 726 | size_t align, int flags, size_t *left_over, | 
|  | 727 | unsigned int *num) | 
|  | 728 | { | 
|  | 729 | int nr_objs; | 
|  | 730 | size_t mgmt_size; | 
|  | 731 | size_t slab_size = PAGE_SIZE << gfporder; | 
|  | 732 |  | 
|  | 733 | /* | 
|  | 734 | * The slab management structure can be either off the slab or | 
|  | 735 | * on it. For the latter case, the memory allocated for a | 
|  | 736 | * slab is used for: | 
|  | 737 | * | 
|  | 738 | * - The struct slab | 
|  | 739 | * - One kmem_bufctl_t for each object | 
|  | 740 | * - Padding to respect alignment of @align | 
|  | 741 | * - @buffer_size bytes for each object | 
|  | 742 | * | 
|  | 743 | * If the slab management structure is off the slab, then the | 
|  | 744 | * alignment will already be calculated into the size. Because | 
|  | 745 | * the slabs are all pages aligned, the objects will be at the | 
|  | 746 | * correct alignment when allocated. | 
|  | 747 | */ | 
|  | 748 | if (flags & CFLGS_OFF_SLAB) { | 
|  | 749 | mgmt_size = 0; | 
|  | 750 | nr_objs = slab_size / buffer_size; | 
|  | 751 |  | 
|  | 752 | if (nr_objs > SLAB_LIMIT) | 
|  | 753 | nr_objs = SLAB_LIMIT; | 
|  | 754 | } else { | 
|  | 755 | /* | 
|  | 756 | * Ignore padding for the initial guess. The padding | 
|  | 757 | * is at most @align-1 bytes, and @buffer_size is at | 
|  | 758 | * least @align. In the worst case, this result will | 
|  | 759 | * be one greater than the number of objects that fit | 
|  | 760 | * into the memory allocation when taking the padding | 
|  | 761 | * into account. | 
|  | 762 | */ | 
|  | 763 | nr_objs = (slab_size - sizeof(struct slab)) / | 
|  | 764 | (buffer_size + sizeof(kmem_bufctl_t)); | 
|  | 765 |  | 
|  | 766 | /* | 
|  | 767 | * This calculated number will be either the right | 
|  | 768 | * amount, or one greater than what we want. | 
|  | 769 | */ | 
|  | 770 | if (slab_mgmt_size(nr_objs, align) + nr_objs*buffer_size | 
|  | 771 | > slab_size) | 
|  | 772 | nr_objs--; | 
|  | 773 |  | 
|  | 774 | if (nr_objs > SLAB_LIMIT) | 
|  | 775 | nr_objs = SLAB_LIMIT; | 
|  | 776 |  | 
|  | 777 | mgmt_size = slab_mgmt_size(nr_objs, align); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 778 | } | 
| Steven Rostedt | fbaccac | 2006-02-01 03:05:45 -0800 | [diff] [blame] | 779 | *num = nr_objs; | 
|  | 780 | *left_over = slab_size - nr_objs*buffer_size - mgmt_size; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 781 | } | 
|  | 782 |  | 
|  | 783 | #define slab_error(cachep, msg) __slab_error(__FUNCTION__, cachep, msg) | 
|  | 784 |  | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 785 | static void __slab_error(const char *function, struct kmem_cache *cachep, char *msg) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 786 | { | 
|  | 787 | printk(KERN_ERR "slab error in %s(): cache `%s': %s\n", | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 788 | function, cachep->name, msg); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 789 | dump_stack(); | 
|  | 790 | } | 
|  | 791 |  | 
| Christoph Lameter | 8fce4d8 | 2006-03-09 17:33:54 -0800 | [diff] [blame] | 792 | #ifdef CONFIG_NUMA | 
|  | 793 | /* | 
|  | 794 | * Special reaping functions for NUMA systems called from cache_reap(). | 
|  | 795 | * These take care of doing round robin flushing of alien caches (containing | 
|  | 796 | * objects freed on different nodes from which they were allocated) and the | 
|  | 797 | * flushing of remote pcps by calling drain_node_pages. | 
|  | 798 | */ | 
|  | 799 | static DEFINE_PER_CPU(unsigned long, reap_node); | 
|  | 800 |  | 
|  | 801 | static void init_reap_node(int cpu) | 
|  | 802 | { | 
|  | 803 | int node; | 
|  | 804 |  | 
|  | 805 | node = next_node(cpu_to_node(cpu), node_online_map); | 
|  | 806 | if (node == MAX_NUMNODES) | 
|  | 807 | node = 0; | 
|  | 808 |  | 
|  | 809 | __get_cpu_var(reap_node) = node; | 
|  | 810 | } | 
|  | 811 |  | 
|  | 812 | static void next_reap_node(void) | 
|  | 813 | { | 
|  | 814 | int node = __get_cpu_var(reap_node); | 
|  | 815 |  | 
|  | 816 | /* | 
|  | 817 | * Also drain per cpu pages on remote zones | 
|  | 818 | */ | 
|  | 819 | if (node != numa_node_id()) | 
|  | 820 | drain_node_pages(node); | 
|  | 821 |  | 
|  | 822 | node = next_node(node, node_online_map); | 
|  | 823 | if (unlikely(node >= MAX_NUMNODES)) | 
|  | 824 | node = first_node(node_online_map); | 
|  | 825 | __get_cpu_var(reap_node) = node; | 
|  | 826 | } | 
|  | 827 |  | 
|  | 828 | #else | 
|  | 829 | #define init_reap_node(cpu) do { } while (0) | 
|  | 830 | #define next_reap_node(void) do { } while (0) | 
|  | 831 | #endif | 
|  | 832 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 833 | /* | 
|  | 834 | * Initiate the reap timer running on the target CPU.  We run at around 1 to 2Hz | 
|  | 835 | * via the workqueue/eventd. | 
|  | 836 | * Add the CPU number into the expiration time to minimize the possibility of | 
|  | 837 | * the CPUs getting into lockstep and contending for the global cache chain | 
|  | 838 | * lock. | 
|  | 839 | */ | 
|  | 840 | static void __devinit start_cpu_timer(int cpu) | 
|  | 841 | { | 
|  | 842 | struct work_struct *reap_work = &per_cpu(reap_work, cpu); | 
|  | 843 |  | 
|  | 844 | /* | 
|  | 845 | * When this gets called from do_initcalls via cpucache_init(), | 
|  | 846 | * init_workqueues() has already run, so keventd will be setup | 
|  | 847 | * at that time. | 
|  | 848 | */ | 
|  | 849 | if (keventd_up() && reap_work->func == NULL) { | 
| Christoph Lameter | 8fce4d8 | 2006-03-09 17:33:54 -0800 | [diff] [blame] | 850 | init_reap_node(cpu); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 851 | INIT_WORK(reap_work, cache_reap, NULL); | 
|  | 852 | schedule_delayed_work_on(cpu, reap_work, HZ + 3 * cpu); | 
|  | 853 | } | 
|  | 854 | } | 
|  | 855 |  | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 856 | static struct array_cache *alloc_arraycache(int node, int entries, | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 857 | int batchcount) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 858 | { | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 859 | int memsize = sizeof(void *) * entries + sizeof(struct array_cache); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 860 | struct array_cache *nc = NULL; | 
|  | 861 |  | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 862 | nc = kmalloc_node(memsize, GFP_KERNEL, node); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 863 | if (nc) { | 
|  | 864 | nc->avail = 0; | 
|  | 865 | nc->limit = entries; | 
|  | 866 | nc->batchcount = batchcount; | 
|  | 867 | nc->touched = 0; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 868 | spin_lock_init(&nc->lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 869 | } | 
|  | 870 | return nc; | 
|  | 871 | } | 
|  | 872 |  | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 873 | #ifdef CONFIG_NUMA | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 874 | static void *__cache_alloc_node(struct kmem_cache *, gfp_t, int); | 
| Christoph Lameter | dc85da1 | 2006-01-18 17:42:36 -0800 | [diff] [blame] | 875 |  | 
| Pekka Enberg | 5295a74 | 2006-02-01 03:05:48 -0800 | [diff] [blame] | 876 | static struct array_cache **alloc_alien_cache(int node, int limit) | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 877 | { | 
|  | 878 | struct array_cache **ac_ptr; | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 879 | int memsize = sizeof(void *) * MAX_NUMNODES; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 880 | int i; | 
|  | 881 |  | 
|  | 882 | if (limit > 1) | 
|  | 883 | limit = 12; | 
|  | 884 | ac_ptr = kmalloc_node(memsize, GFP_KERNEL, node); | 
|  | 885 | if (ac_ptr) { | 
|  | 886 | for_each_node(i) { | 
|  | 887 | if (i == node || !node_online(i)) { | 
|  | 888 | ac_ptr[i] = NULL; | 
|  | 889 | continue; | 
|  | 890 | } | 
|  | 891 | ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d); | 
|  | 892 | if (!ac_ptr[i]) { | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 893 | for (i--; i <= 0; i--) | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 894 | kfree(ac_ptr[i]); | 
|  | 895 | kfree(ac_ptr); | 
|  | 896 | return NULL; | 
|  | 897 | } | 
|  | 898 | } | 
|  | 899 | } | 
|  | 900 | return ac_ptr; | 
|  | 901 | } | 
|  | 902 |  | 
| Pekka Enberg | 5295a74 | 2006-02-01 03:05:48 -0800 | [diff] [blame] | 903 | static void free_alien_cache(struct array_cache **ac_ptr) | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 904 | { | 
|  | 905 | int i; | 
|  | 906 |  | 
|  | 907 | if (!ac_ptr) | 
|  | 908 | return; | 
|  | 909 |  | 
|  | 910 | for_each_node(i) | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 911 | kfree(ac_ptr[i]); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 912 |  | 
|  | 913 | kfree(ac_ptr); | 
|  | 914 | } | 
|  | 915 |  | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 916 | static void __drain_alien_cache(struct kmem_cache *cachep, | 
| Pekka Enberg | 5295a74 | 2006-02-01 03:05:48 -0800 | [diff] [blame] | 917 | struct array_cache *ac, int node) | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 918 | { | 
|  | 919 | struct kmem_list3 *rl3 = cachep->nodelists[node]; | 
|  | 920 |  | 
|  | 921 | if (ac->avail) { | 
|  | 922 | spin_lock(&rl3->list_lock); | 
| Christoph Lameter | ff69416 | 2005-09-22 21:44:02 -0700 | [diff] [blame] | 923 | free_block(cachep, ac->entry, ac->avail, node); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 924 | ac->avail = 0; | 
|  | 925 | spin_unlock(&rl3->list_lock); | 
|  | 926 | } | 
|  | 927 | } | 
|  | 928 |  | 
| Christoph Lameter | 8fce4d8 | 2006-03-09 17:33:54 -0800 | [diff] [blame] | 929 | /* | 
|  | 930 | * Called from cache_reap() to regularly drain alien caches round robin. | 
|  | 931 | */ | 
|  | 932 | static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3) | 
|  | 933 | { | 
|  | 934 | int node = __get_cpu_var(reap_node); | 
|  | 935 |  | 
|  | 936 | if (l3->alien) { | 
|  | 937 | struct array_cache *ac = l3->alien[node]; | 
|  | 938 | if (ac && ac->avail) { | 
|  | 939 | spin_lock_irq(&ac->lock); | 
|  | 940 | __drain_alien_cache(cachep, ac, node); | 
|  | 941 | spin_unlock_irq(&ac->lock); | 
|  | 942 | } | 
|  | 943 | } | 
|  | 944 | } | 
|  | 945 |  | 
| Ravikiran G Thirumalai | 4484ebf | 2006-02-04 23:27:59 -0800 | [diff] [blame] | 946 | static void drain_alien_cache(struct kmem_cache *cachep, struct array_cache **alien) | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 947 | { | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 948 | int i = 0; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 949 | struct array_cache *ac; | 
|  | 950 | unsigned long flags; | 
|  | 951 |  | 
|  | 952 | for_each_online_node(i) { | 
| Ravikiran G Thirumalai | 4484ebf | 2006-02-04 23:27:59 -0800 | [diff] [blame] | 953 | ac = alien[i]; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 954 | if (ac) { | 
|  | 955 | spin_lock_irqsave(&ac->lock, flags); | 
|  | 956 | __drain_alien_cache(cachep, ac, i); | 
|  | 957 | spin_unlock_irqrestore(&ac->lock, flags); | 
|  | 958 | } | 
|  | 959 | } | 
|  | 960 | } | 
|  | 961 | #else | 
| Linus Torvalds | 7a21ef6 | 2006-02-05 11:26:38 -0800 | [diff] [blame] | 962 |  | 
| Ravikiran G Thirumalai | 4484ebf | 2006-02-04 23:27:59 -0800 | [diff] [blame] | 963 | #define drain_alien_cache(cachep, alien) do { } while (0) | 
| Christoph Lameter | 8fce4d8 | 2006-03-09 17:33:54 -0800 | [diff] [blame] | 964 | #define reap_alien(cachep, l3) do { } while (0) | 
| Ravikiran G Thirumalai | 4484ebf | 2006-02-04 23:27:59 -0800 | [diff] [blame] | 965 |  | 
| Linus Torvalds | 7a21ef6 | 2006-02-05 11:26:38 -0800 | [diff] [blame] | 966 | static inline struct array_cache **alloc_alien_cache(int node, int limit) | 
|  | 967 | { | 
|  | 968 | return (struct array_cache **) 0x01020304ul; | 
|  | 969 | } | 
|  | 970 |  | 
| Ravikiran G Thirumalai | 4484ebf | 2006-02-04 23:27:59 -0800 | [diff] [blame] | 971 | static inline void free_alien_cache(struct array_cache **ac_ptr) | 
|  | 972 | { | 
|  | 973 | } | 
| Linus Torvalds | 7a21ef6 | 2006-02-05 11:26:38 -0800 | [diff] [blame] | 974 |  | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 975 | #endif | 
|  | 976 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 977 | static int __devinit cpuup_callback(struct notifier_block *nfb, | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 978 | unsigned long action, void *hcpu) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 979 | { | 
|  | 980 | long cpu = (long)hcpu; | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 981 | struct kmem_cache *cachep; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 982 | struct kmem_list3 *l3 = NULL; | 
|  | 983 | int node = cpu_to_node(cpu); | 
|  | 984 | int memsize = sizeof(struct kmem_list3); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 985 |  | 
|  | 986 | switch (action) { | 
|  | 987 | case CPU_UP_PREPARE: | 
| Ingo Molnar | fc0abb1 | 2006-01-18 17:42:33 -0800 | [diff] [blame] | 988 | mutex_lock(&cache_chain_mutex); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 989 | /* we need to do this right in the beginning since | 
|  | 990 | * alloc_arraycache's are going to use this list. | 
|  | 991 | * kmalloc_node allows us to add the slab to the right | 
|  | 992 | * kmem_list3 and not this cpu's kmem_list3 | 
|  | 993 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 994 |  | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 995 | list_for_each_entry(cachep, &cache_chain, next) { | 
|  | 996 | /* setup the size64 kmemlist for cpu before we can | 
|  | 997 | * begin anything. Make sure some other cpu on this | 
|  | 998 | * node has not already allocated this | 
|  | 999 | */ | 
|  | 1000 | if (!cachep->nodelists[node]) { | 
|  | 1001 | if (!(l3 = kmalloc_node(memsize, | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1002 | GFP_KERNEL, node))) | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1003 | goto bad; | 
|  | 1004 | kmem_list3_init(l3); | 
|  | 1005 | l3->next_reap = jiffies + REAPTIMEOUT_LIST3 + | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1006 | ((unsigned long)cachep) % REAPTIMEOUT_LIST3; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1007 |  | 
| Ravikiran G Thirumalai | 4484ebf | 2006-02-04 23:27:59 -0800 | [diff] [blame] | 1008 | /* | 
|  | 1009 | * The l3s don't come and go as CPUs come and | 
|  | 1010 | * go.  cache_chain_mutex is sufficient | 
|  | 1011 | * protection here. | 
|  | 1012 | */ | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1013 | cachep->nodelists[node] = l3; | 
|  | 1014 | } | 
|  | 1015 |  | 
|  | 1016 | spin_lock_irq(&cachep->nodelists[node]->list_lock); | 
|  | 1017 | cachep->nodelists[node]->free_limit = | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1018 | (1 + nr_cpus_node(node)) * | 
|  | 1019 | cachep->batchcount + cachep->num; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1020 | spin_unlock_irq(&cachep->nodelists[node]->list_lock); | 
|  | 1021 | } | 
|  | 1022 |  | 
|  | 1023 | /* Now we can go ahead with allocating the shared array's | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1024 | & array cache's */ | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1025 | list_for_each_entry(cachep, &cache_chain, next) { | 
| Tobias Klauser | cd105df | 2006-01-08 01:00:59 -0800 | [diff] [blame] | 1026 | struct array_cache *nc; | 
| Ravikiran G Thirumalai | 4484ebf | 2006-02-04 23:27:59 -0800 | [diff] [blame] | 1027 | struct array_cache *shared; | 
|  | 1028 | struct array_cache **alien; | 
| Tobias Klauser | cd105df | 2006-01-08 01:00:59 -0800 | [diff] [blame] | 1029 |  | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1030 | nc = alloc_arraycache(node, cachep->limit, | 
| Ravikiran G Thirumalai | 4484ebf | 2006-02-04 23:27:59 -0800 | [diff] [blame] | 1031 | cachep->batchcount); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1032 | if (!nc) | 
|  | 1033 | goto bad; | 
| Ravikiran G Thirumalai | 4484ebf | 2006-02-04 23:27:59 -0800 | [diff] [blame] | 1034 | shared = alloc_arraycache(node, | 
|  | 1035 | cachep->shared * cachep->batchcount, | 
|  | 1036 | 0xbaadf00d); | 
|  | 1037 | if (!shared) | 
|  | 1038 | goto bad; | 
| Linus Torvalds | 7a21ef6 | 2006-02-05 11:26:38 -0800 | [diff] [blame] | 1039 |  | 
| Ravikiran G Thirumalai | 4484ebf | 2006-02-04 23:27:59 -0800 | [diff] [blame] | 1040 | alien = alloc_alien_cache(node, cachep->limit); | 
|  | 1041 | if (!alien) | 
|  | 1042 | goto bad; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1043 | cachep->array[cpu] = nc; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1044 |  | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1045 | l3 = cachep->nodelists[node]; | 
|  | 1046 | BUG_ON(!l3); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1047 |  | 
| Ravikiran G Thirumalai | 4484ebf | 2006-02-04 23:27:59 -0800 | [diff] [blame] | 1048 | spin_lock_irq(&l3->list_lock); | 
|  | 1049 | if (!l3->shared) { | 
|  | 1050 | /* | 
|  | 1051 | * We are serialised from CPU_DEAD or | 
|  | 1052 | * CPU_UP_CANCELLED by the cpucontrol lock | 
|  | 1053 | */ | 
|  | 1054 | l3->shared = shared; | 
|  | 1055 | shared = NULL; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1056 | } | 
| Ravikiran G Thirumalai | 4484ebf | 2006-02-04 23:27:59 -0800 | [diff] [blame] | 1057 | #ifdef CONFIG_NUMA | 
|  | 1058 | if (!l3->alien) { | 
|  | 1059 | l3->alien = alien; | 
|  | 1060 | alien = NULL; | 
|  | 1061 | } | 
|  | 1062 | #endif | 
|  | 1063 | spin_unlock_irq(&l3->list_lock); | 
|  | 1064 |  | 
|  | 1065 | kfree(shared); | 
|  | 1066 | free_alien_cache(alien); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1067 | } | 
| Ingo Molnar | fc0abb1 | 2006-01-18 17:42:33 -0800 | [diff] [blame] | 1068 | mutex_unlock(&cache_chain_mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1069 | break; | 
|  | 1070 | case CPU_ONLINE: | 
|  | 1071 | start_cpu_timer(cpu); | 
|  | 1072 | break; | 
|  | 1073 | #ifdef CONFIG_HOTPLUG_CPU | 
|  | 1074 | case CPU_DEAD: | 
| Ravikiran G Thirumalai | 4484ebf | 2006-02-04 23:27:59 -0800 | [diff] [blame] | 1075 | /* | 
|  | 1076 | * Even if all the cpus of a node are down, we don't free the | 
|  | 1077 | * kmem_list3 of any cache. This to avoid a race between | 
|  | 1078 | * cpu_down, and a kmalloc allocation from another cpu for | 
|  | 1079 | * memory from the node of the cpu going down.  The list3 | 
|  | 1080 | * structure is usually allocated from kmem_cache_create() and | 
|  | 1081 | * gets destroyed at kmem_cache_destroy(). | 
|  | 1082 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1083 | /* fall thru */ | 
|  | 1084 | case CPU_UP_CANCELED: | 
| Ingo Molnar | fc0abb1 | 2006-01-18 17:42:33 -0800 | [diff] [blame] | 1085 | mutex_lock(&cache_chain_mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1086 |  | 
|  | 1087 | list_for_each_entry(cachep, &cache_chain, next) { | 
|  | 1088 | struct array_cache *nc; | 
| Ravikiran G Thirumalai | 4484ebf | 2006-02-04 23:27:59 -0800 | [diff] [blame] | 1089 | struct array_cache *shared; | 
|  | 1090 | struct array_cache **alien; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1091 | cpumask_t mask; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1092 |  | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1093 | mask = node_to_cpumask(node); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1094 | /* cpu is dead; no one can alloc from it. */ | 
|  | 1095 | nc = cachep->array[cpu]; | 
|  | 1096 | cachep->array[cpu] = NULL; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1097 | l3 = cachep->nodelists[node]; | 
|  | 1098 |  | 
|  | 1099 | if (!l3) | 
| Ravikiran G Thirumalai | 4484ebf | 2006-02-04 23:27:59 -0800 | [diff] [blame] | 1100 | goto free_array_cache; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1101 |  | 
| Ravikiran G Thirumalai | ca3b9b9 | 2006-02-04 23:27:58 -0800 | [diff] [blame] | 1102 | spin_lock_irq(&l3->list_lock); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1103 |  | 
|  | 1104 | /* Free limit for this kmem_list3 */ | 
|  | 1105 | l3->free_limit -= cachep->batchcount; | 
|  | 1106 | if (nc) | 
| Christoph Lameter | ff69416 | 2005-09-22 21:44:02 -0700 | [diff] [blame] | 1107 | free_block(cachep, nc->entry, nc->avail, node); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1108 |  | 
|  | 1109 | if (!cpus_empty(mask)) { | 
| Ravikiran G Thirumalai | ca3b9b9 | 2006-02-04 23:27:58 -0800 | [diff] [blame] | 1110 | spin_unlock_irq(&l3->list_lock); | 
| Ravikiran G Thirumalai | 4484ebf | 2006-02-04 23:27:59 -0800 | [diff] [blame] | 1111 | goto free_array_cache; | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1112 | } | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1113 |  | 
| Ravikiran G Thirumalai | 4484ebf | 2006-02-04 23:27:59 -0800 | [diff] [blame] | 1114 | shared = l3->shared; | 
|  | 1115 | if (shared) { | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1116 | free_block(cachep, l3->shared->entry, | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1117 | l3->shared->avail, node); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1118 | l3->shared = NULL; | 
|  | 1119 | } | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1120 |  | 
| Ravikiran G Thirumalai | 4484ebf | 2006-02-04 23:27:59 -0800 | [diff] [blame] | 1121 | alien = l3->alien; | 
|  | 1122 | l3->alien = NULL; | 
|  | 1123 |  | 
|  | 1124 | spin_unlock_irq(&l3->list_lock); | 
|  | 1125 |  | 
|  | 1126 | kfree(shared); | 
|  | 1127 | if (alien) { | 
|  | 1128 | drain_alien_cache(cachep, alien); | 
|  | 1129 | free_alien_cache(alien); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1130 | } | 
| Ravikiran G Thirumalai | 4484ebf | 2006-02-04 23:27:59 -0800 | [diff] [blame] | 1131 | free_array_cache: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1132 | kfree(nc); | 
|  | 1133 | } | 
| Ravikiran G Thirumalai | 4484ebf | 2006-02-04 23:27:59 -0800 | [diff] [blame] | 1134 | /* | 
|  | 1135 | * In the previous loop, all the objects were freed to | 
|  | 1136 | * the respective cache's slabs,  now we can go ahead and | 
|  | 1137 | * shrink each nodelist to its limit. | 
|  | 1138 | */ | 
|  | 1139 | list_for_each_entry(cachep, &cache_chain, next) { | 
|  | 1140 | l3 = cachep->nodelists[node]; | 
|  | 1141 | if (!l3) | 
|  | 1142 | continue; | 
|  | 1143 | spin_lock_irq(&l3->list_lock); | 
|  | 1144 | /* free slabs belonging to this node */ | 
|  | 1145 | __node_shrink(cachep, node); | 
|  | 1146 | spin_unlock_irq(&l3->list_lock); | 
|  | 1147 | } | 
| Ingo Molnar | fc0abb1 | 2006-01-18 17:42:33 -0800 | [diff] [blame] | 1148 | mutex_unlock(&cache_chain_mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1149 | break; | 
|  | 1150 | #endif | 
|  | 1151 | } | 
|  | 1152 | return NOTIFY_OK; | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1153 | bad: | 
| Ingo Molnar | fc0abb1 | 2006-01-18 17:42:33 -0800 | [diff] [blame] | 1154 | mutex_unlock(&cache_chain_mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1155 | return NOTIFY_BAD; | 
|  | 1156 | } | 
|  | 1157 |  | 
|  | 1158 | static struct notifier_block cpucache_notifier = { &cpuup_callback, NULL, 0 }; | 
|  | 1159 |  | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1160 | /* | 
|  | 1161 | * swap the static kmem_list3 with kmalloced memory | 
|  | 1162 | */ | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 1163 | static void init_list(struct kmem_cache *cachep, struct kmem_list3 *list, int nodeid) | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1164 | { | 
|  | 1165 | struct kmem_list3 *ptr; | 
|  | 1166 |  | 
|  | 1167 | BUG_ON(cachep->nodelists[nodeid] != list); | 
|  | 1168 | ptr = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, nodeid); | 
|  | 1169 | BUG_ON(!ptr); | 
|  | 1170 |  | 
|  | 1171 | local_irq_disable(); | 
|  | 1172 | memcpy(ptr, list, sizeof(struct kmem_list3)); | 
|  | 1173 | MAKE_ALL_LISTS(cachep, ptr, nodeid); | 
|  | 1174 | cachep->nodelists[nodeid] = ptr; | 
|  | 1175 | local_irq_enable(); | 
|  | 1176 | } | 
|  | 1177 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1178 | /* Initialisation. | 
|  | 1179 | * Called after the gfp() functions have been enabled, and before smp_init(). | 
|  | 1180 | */ | 
|  | 1181 | void __init kmem_cache_init(void) | 
|  | 1182 | { | 
|  | 1183 | size_t left_over; | 
|  | 1184 | struct cache_sizes *sizes; | 
|  | 1185 | struct cache_names *names; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1186 | int i; | 
| Jack Steiner | 07ed76b | 2006-03-07 21:55:46 -0800 | [diff] [blame] | 1187 | int order; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1188 |  | 
|  | 1189 | for (i = 0; i < NUM_INIT_LISTS; i++) { | 
|  | 1190 | kmem_list3_init(&initkmem_list3[i]); | 
|  | 1191 | if (i < MAX_NUMNODES) | 
|  | 1192 | cache_cache.nodelists[i] = NULL; | 
|  | 1193 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1194 |  | 
|  | 1195 | /* | 
|  | 1196 | * Fragmentation resistance on low memory - only use bigger | 
|  | 1197 | * page orders on machines with more than 32MB of memory. | 
|  | 1198 | */ | 
|  | 1199 | if (num_physpages > (32 << 20) >> PAGE_SHIFT) | 
|  | 1200 | slab_break_gfp_order = BREAK_GFP_ORDER_HI; | 
|  | 1201 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1202 | /* Bootstrap is tricky, because several objects are allocated | 
|  | 1203 | * from caches that do not exist yet: | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 1204 | * 1) initialize the cache_cache cache: it contains the struct kmem_cache | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1205 | *    structures of all caches, except cache_cache itself: cache_cache | 
|  | 1206 | *    is statically allocated. | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1207 | *    Initially an __init data area is used for the head array and the | 
|  | 1208 | *    kmem_list3 structures, it's replaced with a kmalloc allocated | 
|  | 1209 | *    array at the end of the bootstrap. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1210 | * 2) Create the first kmalloc cache. | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 1211 | *    The struct kmem_cache for the new cache is allocated normally. | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1212 | *    An __init data area is used for the head array. | 
|  | 1213 | * 3) Create the remaining kmalloc caches, with minimally sized | 
|  | 1214 | *    head arrays. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1215 | * 4) Replace the __init data head arrays for cache_cache and the first | 
|  | 1216 | *    kmalloc cache with kmalloc allocated arrays. | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1217 | * 5) Replace the __init data for kmem_list3 for cache_cache and | 
|  | 1218 | *    the other cache's with kmalloc allocated memory. | 
|  | 1219 | * 6) Resize the head arrays of the kmalloc caches to their final sizes. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1220 | */ | 
|  | 1221 |  | 
|  | 1222 | /* 1) create the cache_cache */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1223 | INIT_LIST_HEAD(&cache_chain); | 
|  | 1224 | list_add(&cache_cache.next, &cache_chain); | 
|  | 1225 | cache_cache.colour_off = cache_line_size(); | 
|  | 1226 | cache_cache.array[smp_processor_id()] = &initarray_cache.cache; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1227 | cache_cache.nodelists[numa_node_id()] = &initkmem_list3[CACHE_CACHE]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1228 |  | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 1229 | cache_cache.buffer_size = ALIGN(cache_cache.buffer_size, cache_line_size()); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1230 |  | 
| Jack Steiner | 07ed76b | 2006-03-07 21:55:46 -0800 | [diff] [blame] | 1231 | for (order = 0; order < MAX_ORDER; order++) { | 
|  | 1232 | cache_estimate(order, cache_cache.buffer_size, | 
|  | 1233 | cache_line_size(), 0, &left_over, &cache_cache.num); | 
|  | 1234 | if (cache_cache.num) | 
|  | 1235 | break; | 
|  | 1236 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1237 | if (!cache_cache.num) | 
|  | 1238 | BUG(); | 
| Jack Steiner | 07ed76b | 2006-03-07 21:55:46 -0800 | [diff] [blame] | 1239 | cache_cache.gfporder = order; | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1240 | cache_cache.colour = left_over / cache_cache.colour_off; | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1241 | cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) + | 
|  | 1242 | sizeof(struct slab), cache_line_size()); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1243 |  | 
|  | 1244 | /* 2+3) create the kmalloc caches */ | 
|  | 1245 | sizes = malloc_sizes; | 
|  | 1246 | names = cache_names; | 
|  | 1247 |  | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1248 | /* Initialize the caches that provide memory for the array cache | 
|  | 1249 | * and the kmem_list3 structures first. | 
|  | 1250 | * Without this, further allocations will bug | 
|  | 1251 | */ | 
|  | 1252 |  | 
|  | 1253 | sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name, | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1254 | sizes[INDEX_AC].cs_size, | 
|  | 1255 | ARCH_KMALLOC_MINALIGN, | 
|  | 1256 | (ARCH_KMALLOC_FLAGS | | 
|  | 1257 | SLAB_PANIC), NULL, NULL); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1258 |  | 
|  | 1259 | if (INDEX_AC != INDEX_L3) | 
|  | 1260 | sizes[INDEX_L3].cs_cachep = | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1261 | kmem_cache_create(names[INDEX_L3].name, | 
|  | 1262 | sizes[INDEX_L3].cs_size, | 
|  | 1263 | ARCH_KMALLOC_MINALIGN, | 
|  | 1264 | (ARCH_KMALLOC_FLAGS | SLAB_PANIC), NULL, | 
|  | 1265 | NULL); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1266 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1267 | while (sizes->cs_size != ULONG_MAX) { | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1268 | /* | 
|  | 1269 | * For performance, all the general caches are L1 aligned. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1270 | * This should be particularly beneficial on SMP boxes, as it | 
|  | 1271 | * eliminates "false sharing". | 
|  | 1272 | * Note for systems short on memory removing the alignment will | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1273 | * allow tighter packing of the smaller caches. | 
|  | 1274 | */ | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1275 | if (!sizes->cs_cachep) | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1276 | sizes->cs_cachep = kmem_cache_create(names->name, | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1277 | sizes->cs_size, | 
|  | 1278 | ARCH_KMALLOC_MINALIGN, | 
|  | 1279 | (ARCH_KMALLOC_FLAGS | 
|  | 1280 | | SLAB_PANIC), | 
|  | 1281 | NULL, NULL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1282 |  | 
|  | 1283 | /* Inc off-slab bufctl limit until the ceiling is hit. */ | 
|  | 1284 | if (!(OFF_SLAB(sizes->cs_cachep))) { | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1285 | offslab_limit = sizes->cs_size - sizeof(struct slab); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1286 | offslab_limit /= sizeof(kmem_bufctl_t); | 
|  | 1287 | } | 
|  | 1288 |  | 
|  | 1289 | sizes->cs_dmacachep = kmem_cache_create(names->name_dma, | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1290 | sizes->cs_size, | 
|  | 1291 | ARCH_KMALLOC_MINALIGN, | 
|  | 1292 | (ARCH_KMALLOC_FLAGS | | 
|  | 1293 | SLAB_CACHE_DMA | | 
|  | 1294 | SLAB_PANIC), NULL, | 
|  | 1295 | NULL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1296 |  | 
|  | 1297 | sizes++; | 
|  | 1298 | names++; | 
|  | 1299 | } | 
|  | 1300 | /* 4) Replace the bootstrap head arrays */ | 
|  | 1301 | { | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1302 | void *ptr; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1303 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1304 | ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1305 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1306 | local_irq_disable(); | 
| Pekka Enberg | 9a2dba4 | 2006-02-01 03:05:49 -0800 | [diff] [blame] | 1307 | BUG_ON(cpu_cache_get(&cache_cache) != &initarray_cache.cache); | 
|  | 1308 | memcpy(ptr, cpu_cache_get(&cache_cache), | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1309 | sizeof(struct arraycache_init)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1310 | cache_cache.array[smp_processor_id()] = ptr; | 
|  | 1311 | local_irq_enable(); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1312 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1313 | ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1314 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1315 | local_irq_disable(); | 
| Pekka Enberg | 9a2dba4 | 2006-02-01 03:05:49 -0800 | [diff] [blame] | 1316 | BUG_ON(cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep) | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1317 | != &initarray_generic.cache); | 
| Pekka Enberg | 9a2dba4 | 2006-02-01 03:05:49 -0800 | [diff] [blame] | 1318 | memcpy(ptr, cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep), | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1319 | sizeof(struct arraycache_init)); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1320 | malloc_sizes[INDEX_AC].cs_cachep->array[smp_processor_id()] = | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1321 | ptr; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1322 | local_irq_enable(); | 
|  | 1323 | } | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1324 | /* 5) Replace the bootstrap kmem_list3's */ | 
|  | 1325 | { | 
|  | 1326 | int node; | 
|  | 1327 | /* Replace the static kmem_list3 structures for the boot cpu */ | 
|  | 1328 | init_list(&cache_cache, &initkmem_list3[CACHE_CACHE], | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1329 | numa_node_id()); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1330 |  | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1331 | for_each_online_node(node) { | 
|  | 1332 | init_list(malloc_sizes[INDEX_AC].cs_cachep, | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1333 | &initkmem_list3[SIZE_AC + node], node); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1334 |  | 
|  | 1335 | if (INDEX_AC != INDEX_L3) { | 
|  | 1336 | init_list(malloc_sizes[INDEX_L3].cs_cachep, | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1337 | &initkmem_list3[SIZE_L3 + node], | 
|  | 1338 | node); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1339 | } | 
|  | 1340 | } | 
|  | 1341 | } | 
|  | 1342 |  | 
|  | 1343 | /* 6) resize the head arrays to their final sizes */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1344 | { | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 1345 | struct kmem_cache *cachep; | 
| Ingo Molnar | fc0abb1 | 2006-01-18 17:42:33 -0800 | [diff] [blame] | 1346 | mutex_lock(&cache_chain_mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1347 | list_for_each_entry(cachep, &cache_chain, next) | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1348 | enable_cpucache(cachep); | 
| Ingo Molnar | fc0abb1 | 2006-01-18 17:42:33 -0800 | [diff] [blame] | 1349 | mutex_unlock(&cache_chain_mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1350 | } | 
|  | 1351 |  | 
|  | 1352 | /* Done! */ | 
|  | 1353 | g_cpucache_up = FULL; | 
|  | 1354 |  | 
|  | 1355 | /* Register a cpu startup notifier callback | 
| Pekka Enberg | 9a2dba4 | 2006-02-01 03:05:49 -0800 | [diff] [blame] | 1356 | * that initializes cpu_cache_get for all new cpus | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1357 | */ | 
|  | 1358 | register_cpu_notifier(&cpucache_notifier); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1359 |  | 
|  | 1360 | /* The reap timers are started later, with a module init call: | 
|  | 1361 | * That part of the kernel is not yet operational. | 
|  | 1362 | */ | 
|  | 1363 | } | 
|  | 1364 |  | 
|  | 1365 | static int __init cpucache_init(void) | 
|  | 1366 | { | 
|  | 1367 | int cpu; | 
|  | 1368 |  | 
|  | 1369 | /* | 
|  | 1370 | * Register the timers that return unneeded | 
|  | 1371 | * pages to gfp. | 
|  | 1372 | */ | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1373 | for_each_online_cpu(cpu) | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1374 | start_cpu_timer(cpu); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1375 |  | 
|  | 1376 | return 0; | 
|  | 1377 | } | 
|  | 1378 |  | 
|  | 1379 | __initcall(cpucache_init); | 
|  | 1380 |  | 
|  | 1381 | /* | 
|  | 1382 | * Interface to system's page allocator. No need to hold the cache-lock. | 
|  | 1383 | * | 
|  | 1384 | * If we requested dmaable memory, we will get it. Even if we | 
|  | 1385 | * did not request dmaable memory, we might get it, but that | 
|  | 1386 | * would be relatively rare and ignorable. | 
|  | 1387 | */ | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 1388 | static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1389 | { | 
|  | 1390 | struct page *page; | 
|  | 1391 | void *addr; | 
|  | 1392 | int i; | 
|  | 1393 |  | 
|  | 1394 | flags |= cachep->gfpflags; | 
| Christoph Lameter | 50c85a1 | 2005-11-13 16:06:47 -0800 | [diff] [blame] | 1395 | page = alloc_pages_node(nodeid, flags, cachep->gfporder); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1396 | if (!page) | 
|  | 1397 | return NULL; | 
|  | 1398 | addr = page_address(page); | 
|  | 1399 |  | 
|  | 1400 | i = (1 << cachep->gfporder); | 
|  | 1401 | if (cachep->flags & SLAB_RECLAIM_ACCOUNT) | 
|  | 1402 | atomic_add(i, &slab_reclaim_pages); | 
|  | 1403 | add_page_state(nr_slab, i); | 
|  | 1404 | while (i--) { | 
|  | 1405 | SetPageSlab(page); | 
|  | 1406 | page++; | 
|  | 1407 | } | 
|  | 1408 | return addr; | 
|  | 1409 | } | 
|  | 1410 |  | 
|  | 1411 | /* | 
|  | 1412 | * Interface to system's page release. | 
|  | 1413 | */ | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 1414 | static void kmem_freepages(struct kmem_cache *cachep, void *addr) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1415 | { | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1416 | unsigned long i = (1 << cachep->gfporder); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1417 | struct page *page = virt_to_page(addr); | 
|  | 1418 | const unsigned long nr_freed = i; | 
|  | 1419 |  | 
|  | 1420 | while (i--) { | 
|  | 1421 | if (!TestClearPageSlab(page)) | 
|  | 1422 | BUG(); | 
|  | 1423 | page++; | 
|  | 1424 | } | 
|  | 1425 | sub_page_state(nr_slab, nr_freed); | 
|  | 1426 | if (current->reclaim_state) | 
|  | 1427 | current->reclaim_state->reclaimed_slab += nr_freed; | 
|  | 1428 | free_pages((unsigned long)addr, cachep->gfporder); | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1429 | if (cachep->flags & SLAB_RECLAIM_ACCOUNT) | 
|  | 1430 | atomic_sub(1 << cachep->gfporder, &slab_reclaim_pages); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1431 | } | 
|  | 1432 |  | 
|  | 1433 | static void kmem_rcu_free(struct rcu_head *head) | 
|  | 1434 | { | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1435 | struct slab_rcu *slab_rcu = (struct slab_rcu *)head; | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 1436 | struct kmem_cache *cachep = slab_rcu->cachep; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1437 |  | 
|  | 1438 | kmem_freepages(cachep, slab_rcu->addr); | 
|  | 1439 | if (OFF_SLAB(cachep)) | 
|  | 1440 | kmem_cache_free(cachep->slabp_cache, slab_rcu); | 
|  | 1441 | } | 
|  | 1442 |  | 
|  | 1443 | #if DEBUG | 
|  | 1444 |  | 
|  | 1445 | #ifdef CONFIG_DEBUG_PAGEALLOC | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 1446 | static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr, | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1447 | unsigned long caller) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1448 | { | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 1449 | int size = obj_size(cachep); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1450 |  | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 1451 | addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1452 |  | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1453 | if (size < 5 * sizeof(unsigned long)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1454 | return; | 
|  | 1455 |  | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1456 | *addr++ = 0x12345678; | 
|  | 1457 | *addr++ = caller; | 
|  | 1458 | *addr++ = smp_processor_id(); | 
|  | 1459 | size -= 3 * sizeof(unsigned long); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1460 | { | 
|  | 1461 | unsigned long *sptr = &caller; | 
|  | 1462 | unsigned long svalue; | 
|  | 1463 |  | 
|  | 1464 | while (!kstack_end(sptr)) { | 
|  | 1465 | svalue = *sptr++; | 
|  | 1466 | if (kernel_text_address(svalue)) { | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1467 | *addr++ = svalue; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1468 | size -= sizeof(unsigned long); | 
|  | 1469 | if (size <= sizeof(unsigned long)) | 
|  | 1470 | break; | 
|  | 1471 | } | 
|  | 1472 | } | 
|  | 1473 |  | 
|  | 1474 | } | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1475 | *addr++ = 0x87654321; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1476 | } | 
|  | 1477 | #endif | 
|  | 1478 |  | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 1479 | static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1480 | { | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 1481 | int size = obj_size(cachep); | 
|  | 1482 | addr = &((char *)addr)[obj_offset(cachep)]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1483 |  | 
|  | 1484 | memset(addr, val, size); | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1485 | *(unsigned char *)(addr + size - 1) = POISON_END; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1486 | } | 
|  | 1487 |  | 
|  | 1488 | static void dump_line(char *data, int offset, int limit) | 
|  | 1489 | { | 
|  | 1490 | int i; | 
|  | 1491 | printk(KERN_ERR "%03x:", offset); | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1492 | for (i = 0; i < limit; i++) { | 
|  | 1493 | printk(" %02x", (unsigned char)data[offset + i]); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1494 | } | 
|  | 1495 | printk("\n"); | 
|  | 1496 | } | 
|  | 1497 | #endif | 
|  | 1498 |  | 
|  | 1499 | #if DEBUG | 
|  | 1500 |  | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 1501 | static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1502 | { | 
|  | 1503 | int i, size; | 
|  | 1504 | char *realobj; | 
|  | 1505 |  | 
|  | 1506 | if (cachep->flags & SLAB_RED_ZONE) { | 
|  | 1507 | printk(KERN_ERR "Redzone: 0x%lx/0x%lx.\n", | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1508 | *dbg_redzone1(cachep, objp), | 
|  | 1509 | *dbg_redzone2(cachep, objp)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1510 | } | 
|  | 1511 |  | 
|  | 1512 | if (cachep->flags & SLAB_STORE_USER) { | 
|  | 1513 | printk(KERN_ERR "Last user: [<%p>]", | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1514 | *dbg_userword(cachep, objp)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1515 | print_symbol("(%s)", | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1516 | (unsigned long)*dbg_userword(cachep, objp)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1517 | printk("\n"); | 
|  | 1518 | } | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 1519 | realobj = (char *)objp + obj_offset(cachep); | 
|  | 1520 | size = obj_size(cachep); | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1521 | for (i = 0; i < size && lines; i += 16, lines--) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1522 | int limit; | 
|  | 1523 | limit = 16; | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1524 | if (i + limit > size) | 
|  | 1525 | limit = size - i; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1526 | dump_line(realobj, i, limit); | 
|  | 1527 | } | 
|  | 1528 | } | 
|  | 1529 |  | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 1530 | static void check_poison_obj(struct kmem_cache *cachep, void *objp) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1531 | { | 
|  | 1532 | char *realobj; | 
|  | 1533 | int size, i; | 
|  | 1534 | int lines = 0; | 
|  | 1535 |  | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 1536 | realobj = (char *)objp + obj_offset(cachep); | 
|  | 1537 | size = obj_size(cachep); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1538 |  | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1539 | for (i = 0; i < size; i++) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1540 | char exp = POISON_FREE; | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1541 | if (i == size - 1) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1542 | exp = POISON_END; | 
|  | 1543 | if (realobj[i] != exp) { | 
|  | 1544 | int limit; | 
|  | 1545 | /* Mismatch ! */ | 
|  | 1546 | /* Print header */ | 
|  | 1547 | if (lines == 0) { | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1548 | printk(KERN_ERR | 
|  | 1549 | "Slab corruption: start=%p, len=%d\n", | 
|  | 1550 | realobj, size); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1551 | print_objinfo(cachep, objp, 0); | 
|  | 1552 | } | 
|  | 1553 | /* Hexdump the affected line */ | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1554 | i = (i / 16) * 16; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1555 | limit = 16; | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1556 | if (i + limit > size) | 
|  | 1557 | limit = size - i; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1558 | dump_line(realobj, i, limit); | 
|  | 1559 | i += 16; | 
|  | 1560 | lines++; | 
|  | 1561 | /* Limit to 5 lines */ | 
|  | 1562 | if (lines > 5) | 
|  | 1563 | break; | 
|  | 1564 | } | 
|  | 1565 | } | 
|  | 1566 | if (lines != 0) { | 
|  | 1567 | /* Print some data about the neighboring objects, if they | 
|  | 1568 | * exist: | 
|  | 1569 | */ | 
| Pekka Enberg | 6ed5eb221 | 2006-02-01 03:05:49 -0800 | [diff] [blame] | 1570 | struct slab *slabp = virt_to_slab(objp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1571 | int objnr; | 
|  | 1572 |  | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 1573 | objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1574 | if (objnr) { | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 1575 | objp = slabp->s_mem + (objnr - 1) * cachep->buffer_size; | 
|  | 1576 | realobj = (char *)objp + obj_offset(cachep); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1577 | printk(KERN_ERR "Prev obj: start=%p, len=%d\n", | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1578 | realobj, size); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1579 | print_objinfo(cachep, objp, 2); | 
|  | 1580 | } | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1581 | if (objnr + 1 < cachep->num) { | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 1582 | objp = slabp->s_mem + (objnr + 1) * cachep->buffer_size; | 
|  | 1583 | realobj = (char *)objp + obj_offset(cachep); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1584 | printk(KERN_ERR "Next obj: start=%p, len=%d\n", | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1585 | realobj, size); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1586 | print_objinfo(cachep, objp, 2); | 
|  | 1587 | } | 
|  | 1588 | } | 
|  | 1589 | } | 
|  | 1590 | #endif | 
|  | 1591 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1592 | #if DEBUG | 
| Matthew Dobson | 12dd36f | 2006-02-01 03:05:46 -0800 | [diff] [blame] | 1593 | /** | 
|  | 1594 | * slab_destroy_objs - call the registered destructor for each object in | 
|  | 1595 | *      a slab that is to be destroyed. | 
|  | 1596 | */ | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 1597 | static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp) | 
| Matthew Dobson | 12dd36f | 2006-02-01 03:05:46 -0800 | [diff] [blame] | 1598 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1599 | int i; | 
|  | 1600 | for (i = 0; i < cachep->num; i++) { | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 1601 | void *objp = slabp->s_mem + cachep->buffer_size * i; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1602 |  | 
|  | 1603 | if (cachep->flags & SLAB_POISON) { | 
|  | 1604 | #ifdef CONFIG_DEBUG_PAGEALLOC | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 1605 | if ((cachep->buffer_size % PAGE_SIZE) == 0 | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1606 | && OFF_SLAB(cachep)) | 
|  | 1607 | kernel_map_pages(virt_to_page(objp), | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 1608 | cachep->buffer_size / PAGE_SIZE, | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1609 | 1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1610 | else | 
|  | 1611 | check_poison_obj(cachep, objp); | 
|  | 1612 | #else | 
|  | 1613 | check_poison_obj(cachep, objp); | 
|  | 1614 | #endif | 
|  | 1615 | } | 
|  | 1616 | if (cachep->flags & SLAB_RED_ZONE) { | 
|  | 1617 | if (*dbg_redzone1(cachep, objp) != RED_INACTIVE) | 
|  | 1618 | slab_error(cachep, "start of a freed object " | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1619 | "was overwritten"); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1620 | if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) | 
|  | 1621 | slab_error(cachep, "end of a freed object " | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1622 | "was overwritten"); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1623 | } | 
|  | 1624 | if (cachep->dtor && !(cachep->flags & SLAB_POISON)) | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 1625 | (cachep->dtor) (objp + obj_offset(cachep), cachep, 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1626 | } | 
| Matthew Dobson | 12dd36f | 2006-02-01 03:05:46 -0800 | [diff] [blame] | 1627 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1628 | #else | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 1629 | static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp) | 
| Matthew Dobson | 12dd36f | 2006-02-01 03:05:46 -0800 | [diff] [blame] | 1630 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1631 | if (cachep->dtor) { | 
|  | 1632 | int i; | 
|  | 1633 | for (i = 0; i < cachep->num; i++) { | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 1634 | void *objp = slabp->s_mem + cachep->buffer_size * i; | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1635 | (cachep->dtor) (objp, cachep, 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1636 | } | 
|  | 1637 | } | 
| Matthew Dobson | 12dd36f | 2006-02-01 03:05:46 -0800 | [diff] [blame] | 1638 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1639 | #endif | 
|  | 1640 |  | 
| Matthew Dobson | 12dd36f | 2006-02-01 03:05:46 -0800 | [diff] [blame] | 1641 | /** | 
|  | 1642 | * Destroy all the objs in a slab, and release the mem back to the system. | 
|  | 1643 | * Before calling the slab must have been unlinked from the cache. | 
|  | 1644 | * The cache-lock is not held/needed. | 
|  | 1645 | */ | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 1646 | static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp) | 
| Matthew Dobson | 12dd36f | 2006-02-01 03:05:46 -0800 | [diff] [blame] | 1647 | { | 
|  | 1648 | void *addr = slabp->s_mem - slabp->colouroff; | 
|  | 1649 |  | 
|  | 1650 | slab_destroy_objs(cachep, slabp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1651 | if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) { | 
|  | 1652 | struct slab_rcu *slab_rcu; | 
|  | 1653 |  | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1654 | slab_rcu = (struct slab_rcu *)slabp; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1655 | slab_rcu->cachep = cachep; | 
|  | 1656 | slab_rcu->addr = addr; | 
|  | 1657 | call_rcu(&slab_rcu->head, kmem_rcu_free); | 
|  | 1658 | } else { | 
|  | 1659 | kmem_freepages(cachep, addr); | 
|  | 1660 | if (OFF_SLAB(cachep)) | 
|  | 1661 | kmem_cache_free(cachep->slabp_cache, slabp); | 
|  | 1662 | } | 
|  | 1663 | } | 
|  | 1664 |  | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 1665 | /* For setting up all the kmem_list3s for cache whose buffer_size is same | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1666 | as size of kmem_list3. */ | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 1667 | static void set_up_list3s(struct kmem_cache *cachep, int index) | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1668 | { | 
|  | 1669 | int node; | 
|  | 1670 |  | 
|  | 1671 | for_each_online_node(node) { | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1672 | cachep->nodelists[node] = &initkmem_list3[index + node]; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1673 | cachep->nodelists[node]->next_reap = jiffies + | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1674 | REAPTIMEOUT_LIST3 + | 
|  | 1675 | ((unsigned long)cachep) % REAPTIMEOUT_LIST3; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1676 | } | 
|  | 1677 | } | 
|  | 1678 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1679 | /** | 
| Randy.Dunlap | a70773d | 2006-02-01 03:05:52 -0800 | [diff] [blame] | 1680 | * calculate_slab_order - calculate size (page order) of slabs | 
|  | 1681 | * @cachep: pointer to the cache that is being created | 
|  | 1682 | * @size: size of objects to be created in this cache. | 
|  | 1683 | * @align: required alignment for the objects. | 
|  | 1684 | * @flags: slab allocation flags | 
|  | 1685 | * | 
|  | 1686 | * Also calculates the number of objects per slab. | 
| Pekka Enberg | 4d268eb | 2006-01-08 01:00:36 -0800 | [diff] [blame] | 1687 | * | 
|  | 1688 | * This could be made much more intelligent.  For now, try to avoid using | 
|  | 1689 | * high order pages for slabs.  When the gfp() functions are more friendly | 
|  | 1690 | * towards high-order requests, this should be changed. | 
|  | 1691 | */ | 
| Randy Dunlap | ee13d78 | 2006-02-01 03:05:53 -0800 | [diff] [blame] | 1692 | static inline size_t calculate_slab_order(struct kmem_cache *cachep, | 
|  | 1693 | size_t size, size_t align, unsigned long flags) | 
| Pekka Enberg | 4d268eb | 2006-01-08 01:00:36 -0800 | [diff] [blame] | 1694 | { | 
|  | 1695 | size_t left_over = 0; | 
| Linus Torvalds | 9888e6f | 2006-03-06 17:44:43 -0800 | [diff] [blame] | 1696 | int gfporder; | 
| Pekka Enberg | 4d268eb | 2006-01-08 01:00:36 -0800 | [diff] [blame] | 1697 |  | 
| Linus Torvalds | 9888e6f | 2006-03-06 17:44:43 -0800 | [diff] [blame] | 1698 | for (gfporder = 0 ; gfporder <= MAX_GFP_ORDER; gfporder++) { | 
| Pekka Enberg | 4d268eb | 2006-01-08 01:00:36 -0800 | [diff] [blame] | 1699 | unsigned int num; | 
|  | 1700 | size_t remainder; | 
|  | 1701 |  | 
| Linus Torvalds | 9888e6f | 2006-03-06 17:44:43 -0800 | [diff] [blame] | 1702 | cache_estimate(gfporder, size, align, flags, &remainder, &num); | 
| Pekka Enberg | 4d268eb | 2006-01-08 01:00:36 -0800 | [diff] [blame] | 1703 | if (!num) | 
|  | 1704 | continue; | 
| Linus Torvalds | 9888e6f | 2006-03-06 17:44:43 -0800 | [diff] [blame] | 1705 |  | 
| Pekka Enberg | 4d268eb | 2006-01-08 01:00:36 -0800 | [diff] [blame] | 1706 | /* More than offslab_limit objects will cause problems */ | 
| Linus Torvalds | 9888e6f | 2006-03-06 17:44:43 -0800 | [diff] [blame] | 1707 | if ((flags & CFLGS_OFF_SLAB) && num > offslab_limit) | 
| Pekka Enberg | 4d268eb | 2006-01-08 01:00:36 -0800 | [diff] [blame] | 1708 | break; | 
|  | 1709 |  | 
| Linus Torvalds | 9888e6f | 2006-03-06 17:44:43 -0800 | [diff] [blame] | 1710 | /* Found something acceptable - save it away */ | 
| Pekka Enberg | 4d268eb | 2006-01-08 01:00:36 -0800 | [diff] [blame] | 1711 | cachep->num = num; | 
| Linus Torvalds | 9888e6f | 2006-03-06 17:44:43 -0800 | [diff] [blame] | 1712 | cachep->gfporder = gfporder; | 
| Pekka Enberg | 4d268eb | 2006-01-08 01:00:36 -0800 | [diff] [blame] | 1713 | left_over = remainder; | 
|  | 1714 |  | 
|  | 1715 | /* | 
| Linus Torvalds | f78bb8a | 2006-03-08 10:33:05 -0800 | [diff] [blame] | 1716 | * A VFS-reclaimable slab tends to have most allocations | 
|  | 1717 | * as GFP_NOFS and we really don't want to have to be allocating | 
|  | 1718 | * higher-order pages when we are unable to shrink dcache. | 
|  | 1719 | */ | 
|  | 1720 | if (flags & SLAB_RECLAIM_ACCOUNT) | 
|  | 1721 | break; | 
|  | 1722 |  | 
|  | 1723 | /* | 
| Pekka Enberg | 4d268eb | 2006-01-08 01:00:36 -0800 | [diff] [blame] | 1724 | * Large number of objects is good, but very large slabs are | 
|  | 1725 | * currently bad for the gfp()s. | 
|  | 1726 | */ | 
| Linus Torvalds | 9888e6f | 2006-03-06 17:44:43 -0800 | [diff] [blame] | 1727 | if (gfporder >= slab_break_gfp_order) | 
| Pekka Enberg | 4d268eb | 2006-01-08 01:00:36 -0800 | [diff] [blame] | 1728 | break; | 
|  | 1729 |  | 
| Linus Torvalds | 9888e6f | 2006-03-06 17:44:43 -0800 | [diff] [blame] | 1730 | /* | 
|  | 1731 | * Acceptable internal fragmentation? | 
|  | 1732 | */ | 
|  | 1733 | if ((left_over * 8) <= (PAGE_SIZE << gfporder)) | 
| Pekka Enberg | 4d268eb | 2006-01-08 01:00:36 -0800 | [diff] [blame] | 1734 | break; | 
|  | 1735 | } | 
|  | 1736 | return left_over; | 
|  | 1737 | } | 
|  | 1738 |  | 
|  | 1739 | /** | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1740 | * kmem_cache_create - Create a cache. | 
|  | 1741 | * @name: A string which is used in /proc/slabinfo to identify this cache. | 
|  | 1742 | * @size: The size of objects to be created in this cache. | 
|  | 1743 | * @align: The required alignment for the objects. | 
|  | 1744 | * @flags: SLAB flags | 
|  | 1745 | * @ctor: A constructor for the objects. | 
|  | 1746 | * @dtor: A destructor for the objects. | 
|  | 1747 | * | 
|  | 1748 | * Returns a ptr to the cache on success, NULL on failure. | 
|  | 1749 | * Cannot be called within a int, but can be interrupted. | 
|  | 1750 | * The @ctor is run when new pages are allocated by the cache | 
|  | 1751 | * and the @dtor is run before the pages are handed back. | 
|  | 1752 | * | 
|  | 1753 | * @name must be valid until the cache is destroyed. This implies that | 
|  | 1754 | * the module calling this has to destroy the cache before getting | 
|  | 1755 | * unloaded. | 
|  | 1756 | * | 
|  | 1757 | * The flags are | 
|  | 1758 | * | 
|  | 1759 | * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5) | 
|  | 1760 | * to catch references to uninitialised memory. | 
|  | 1761 | * | 
|  | 1762 | * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check | 
|  | 1763 | * for buffer overruns. | 
|  | 1764 | * | 
|  | 1765 | * %SLAB_NO_REAP - Don't automatically reap this cache when we're under | 
|  | 1766 | * memory pressure. | 
|  | 1767 | * | 
|  | 1768 | * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware | 
|  | 1769 | * cacheline.  This can be beneficial if you're counting cycles as closely | 
|  | 1770 | * as davem. | 
|  | 1771 | */ | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 1772 | struct kmem_cache * | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1773 | kmem_cache_create (const char *name, size_t size, size_t align, | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 1774 | unsigned long flags, void (*ctor)(void*, struct kmem_cache *, unsigned long), | 
|  | 1775 | void (*dtor)(void*, struct kmem_cache *, unsigned long)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1776 | { | 
|  | 1777 | size_t left_over, slab_size, ralign; | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 1778 | struct kmem_cache *cachep = NULL; | 
| Andrew Morton | 4f12bb4 | 2005-11-07 00:58:00 -0800 | [diff] [blame] | 1779 | struct list_head *p; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1780 |  | 
|  | 1781 | /* | 
|  | 1782 | * Sanity checks... these are all serious usage bugs. | 
|  | 1783 | */ | 
|  | 1784 | if ((!name) || | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1785 | in_interrupt() || | 
|  | 1786 | (size < BYTES_PER_WORD) || | 
|  | 1787 | (size > (1 << MAX_OBJ_ORDER) * PAGE_SIZE) || (dtor && !ctor)) { | 
|  | 1788 | printk(KERN_ERR "%s: Early error in slab %s\n", | 
|  | 1789 | __FUNCTION__, name); | 
|  | 1790 | BUG(); | 
|  | 1791 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1792 |  | 
| Ravikiran G Thirumalai | f0188f4 | 2006-02-10 01:51:13 -0800 | [diff] [blame] | 1793 | /* | 
|  | 1794 | * Prevent CPUs from coming and going. | 
|  | 1795 | * lock_cpu_hotplug() nests outside cache_chain_mutex | 
|  | 1796 | */ | 
|  | 1797 | lock_cpu_hotplug(); | 
|  | 1798 |  | 
| Ingo Molnar | fc0abb1 | 2006-01-18 17:42:33 -0800 | [diff] [blame] | 1799 | mutex_lock(&cache_chain_mutex); | 
| Andrew Morton | 4f12bb4 | 2005-11-07 00:58:00 -0800 | [diff] [blame] | 1800 |  | 
|  | 1801 | list_for_each(p, &cache_chain) { | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 1802 | struct kmem_cache *pc = list_entry(p, struct kmem_cache, next); | 
| Andrew Morton | 4f12bb4 | 2005-11-07 00:58:00 -0800 | [diff] [blame] | 1803 | mm_segment_t old_fs = get_fs(); | 
|  | 1804 | char tmp; | 
|  | 1805 | int res; | 
|  | 1806 |  | 
|  | 1807 | /* | 
|  | 1808 | * This happens when the module gets unloaded and doesn't | 
|  | 1809 | * destroy its slab cache and no-one else reuses the vmalloc | 
|  | 1810 | * area of the module.  Print a warning. | 
|  | 1811 | */ | 
|  | 1812 | set_fs(KERNEL_DS); | 
|  | 1813 | res = __get_user(tmp, pc->name); | 
|  | 1814 | set_fs(old_fs); | 
|  | 1815 | if (res) { | 
|  | 1816 | printk("SLAB: cache with size %d has lost its name\n", | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 1817 | pc->buffer_size); | 
| Andrew Morton | 4f12bb4 | 2005-11-07 00:58:00 -0800 | [diff] [blame] | 1818 | continue; | 
|  | 1819 | } | 
|  | 1820 |  | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1821 | if (!strcmp(pc->name, name)) { | 
| Andrew Morton | 4f12bb4 | 2005-11-07 00:58:00 -0800 | [diff] [blame] | 1822 | printk("kmem_cache_create: duplicate cache %s\n", name); | 
|  | 1823 | dump_stack(); | 
|  | 1824 | goto oops; | 
|  | 1825 | } | 
|  | 1826 | } | 
|  | 1827 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1828 | #if DEBUG | 
|  | 1829 | WARN_ON(strchr(name, ' '));	/* It confuses parsers */ | 
|  | 1830 | if ((flags & SLAB_DEBUG_INITIAL) && !ctor) { | 
|  | 1831 | /* No constructor, but inital state check requested */ | 
|  | 1832 | printk(KERN_ERR "%s: No con, but init state check " | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1833 | "requested - %s\n", __FUNCTION__, name); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1834 | flags &= ~SLAB_DEBUG_INITIAL; | 
|  | 1835 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1836 | #if FORCED_DEBUG | 
|  | 1837 | /* | 
|  | 1838 | * Enable redzoning and last user accounting, except for caches with | 
|  | 1839 | * large objects, if the increased size would increase the object size | 
|  | 1840 | * above the next power of two: caches with object sizes just above a | 
|  | 1841 | * power of two have a significant amount of internal fragmentation. | 
|  | 1842 | */ | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1843 | if ((size < 4096 | 
|  | 1844 | || fls(size - 1) == fls(size - 1 + 3 * BYTES_PER_WORD))) | 
|  | 1845 | flags |= SLAB_RED_ZONE | SLAB_STORE_USER; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1846 | if (!(flags & SLAB_DESTROY_BY_RCU)) | 
|  | 1847 | flags |= SLAB_POISON; | 
|  | 1848 | #endif | 
|  | 1849 | if (flags & SLAB_DESTROY_BY_RCU) | 
|  | 1850 | BUG_ON(flags & SLAB_POISON); | 
|  | 1851 | #endif | 
|  | 1852 | if (flags & SLAB_DESTROY_BY_RCU) | 
|  | 1853 | BUG_ON(dtor); | 
|  | 1854 |  | 
|  | 1855 | /* | 
|  | 1856 | * Always checks flags, a caller might be expecting debug | 
|  | 1857 | * support which isn't available. | 
|  | 1858 | */ | 
|  | 1859 | if (flags & ~CREATE_MASK) | 
|  | 1860 | BUG(); | 
|  | 1861 |  | 
|  | 1862 | /* Check that size is in terms of words.  This is needed to avoid | 
|  | 1863 | * unaligned accesses for some archs when redzoning is used, and makes | 
|  | 1864 | * sure any on-slab bufctl's are also correctly aligned. | 
|  | 1865 | */ | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1866 | if (size & (BYTES_PER_WORD - 1)) { | 
|  | 1867 | size += (BYTES_PER_WORD - 1); | 
|  | 1868 | size &= ~(BYTES_PER_WORD - 1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1869 | } | 
|  | 1870 |  | 
|  | 1871 | /* calculate out the final buffer alignment: */ | 
|  | 1872 | /* 1) arch recommendation: can be overridden for debug */ | 
|  | 1873 | if (flags & SLAB_HWCACHE_ALIGN) { | 
|  | 1874 | /* Default alignment: as specified by the arch code. | 
|  | 1875 | * Except if an object is really small, then squeeze multiple | 
|  | 1876 | * objects into one cacheline. | 
|  | 1877 | */ | 
|  | 1878 | ralign = cache_line_size(); | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1879 | while (size <= ralign / 2) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1880 | ralign /= 2; | 
|  | 1881 | } else { | 
|  | 1882 | ralign = BYTES_PER_WORD; | 
|  | 1883 | } | 
|  | 1884 | /* 2) arch mandated alignment: disables debug if necessary */ | 
|  | 1885 | if (ralign < ARCH_SLAB_MINALIGN) { | 
|  | 1886 | ralign = ARCH_SLAB_MINALIGN; | 
|  | 1887 | if (ralign > BYTES_PER_WORD) | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1888 | flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1889 | } | 
|  | 1890 | /* 3) caller mandated alignment: disables debug if necessary */ | 
|  | 1891 | if (ralign < align) { | 
|  | 1892 | ralign = align; | 
|  | 1893 | if (ralign > BYTES_PER_WORD) | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1894 | flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1895 | } | 
|  | 1896 | /* 4) Store it. Note that the debug code below can reduce | 
|  | 1897 | *    the alignment to BYTES_PER_WORD. | 
|  | 1898 | */ | 
|  | 1899 | align = ralign; | 
|  | 1900 |  | 
|  | 1901 | /* Get cache's description obj. */ | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 1902 | cachep = kmem_cache_alloc(&cache_cache, SLAB_KERNEL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1903 | if (!cachep) | 
| Andrew Morton | 4f12bb4 | 2005-11-07 00:58:00 -0800 | [diff] [blame] | 1904 | goto oops; | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 1905 | memset(cachep, 0, sizeof(struct kmem_cache)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1906 |  | 
|  | 1907 | #if DEBUG | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 1908 | cachep->obj_size = size; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1909 |  | 
|  | 1910 | if (flags & SLAB_RED_ZONE) { | 
|  | 1911 | /* redzoning only works with word aligned caches */ | 
|  | 1912 | align = BYTES_PER_WORD; | 
|  | 1913 |  | 
|  | 1914 | /* add space for red zone words */ | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 1915 | cachep->obj_offset += BYTES_PER_WORD; | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1916 | size += 2 * BYTES_PER_WORD; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1917 | } | 
|  | 1918 | if (flags & SLAB_STORE_USER) { | 
|  | 1919 | /* user store requires word alignment and | 
|  | 1920 | * one word storage behind the end of the real | 
|  | 1921 | * object. | 
|  | 1922 | */ | 
|  | 1923 | align = BYTES_PER_WORD; | 
|  | 1924 | size += BYTES_PER_WORD; | 
|  | 1925 | } | 
|  | 1926 | #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC) | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1927 | if (size >= malloc_sizes[INDEX_L3 + 1].cs_size | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 1928 | && cachep->obj_size > cache_line_size() && size < PAGE_SIZE) { | 
|  | 1929 | cachep->obj_offset += PAGE_SIZE - size; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1930 | size = PAGE_SIZE; | 
|  | 1931 | } | 
|  | 1932 | #endif | 
|  | 1933 | #endif | 
|  | 1934 |  | 
|  | 1935 | /* Determine if the slab management is 'on' or 'off' slab. */ | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1936 | if (size >= (PAGE_SIZE >> 3)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1937 | /* | 
|  | 1938 | * Size is large, assume best to place the slab management obj | 
|  | 1939 | * off-slab (should allow better packing of objs). | 
|  | 1940 | */ | 
|  | 1941 | flags |= CFLGS_OFF_SLAB; | 
|  | 1942 |  | 
|  | 1943 | size = ALIGN(size, align); | 
|  | 1944 |  | 
| Linus Torvalds | f78bb8a | 2006-03-08 10:33:05 -0800 | [diff] [blame] | 1945 | left_over = calculate_slab_order(cachep, size, align, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1946 |  | 
|  | 1947 | if (!cachep->num) { | 
|  | 1948 | printk("kmem_cache_create: couldn't create cache %s.\n", name); | 
|  | 1949 | kmem_cache_free(&cache_cache, cachep); | 
|  | 1950 | cachep = NULL; | 
| Andrew Morton | 4f12bb4 | 2005-11-07 00:58:00 -0800 | [diff] [blame] | 1951 | goto oops; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1952 | } | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1953 | slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t) | 
|  | 1954 | + sizeof(struct slab), align); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1955 |  | 
|  | 1956 | /* | 
|  | 1957 | * If the slab has been placed off-slab, and we have enough space then | 
|  | 1958 | * move it on-slab. This is at the expense of any extra colouring. | 
|  | 1959 | */ | 
|  | 1960 | if (flags & CFLGS_OFF_SLAB && left_over >= slab_size) { | 
|  | 1961 | flags &= ~CFLGS_OFF_SLAB; | 
|  | 1962 | left_over -= slab_size; | 
|  | 1963 | } | 
|  | 1964 |  | 
|  | 1965 | if (flags & CFLGS_OFF_SLAB) { | 
|  | 1966 | /* really off slab. No need for manual alignment */ | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1967 | slab_size = | 
|  | 1968 | cachep->num * sizeof(kmem_bufctl_t) + sizeof(struct slab); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1969 | } | 
|  | 1970 |  | 
|  | 1971 | cachep->colour_off = cache_line_size(); | 
|  | 1972 | /* Offset must be a multiple of the alignment. */ | 
|  | 1973 | if (cachep->colour_off < align) | 
|  | 1974 | cachep->colour_off = align; | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 1975 | cachep->colour = left_over / cachep->colour_off; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1976 | cachep->slab_size = slab_size; | 
|  | 1977 | cachep->flags = flags; | 
|  | 1978 | cachep->gfpflags = 0; | 
|  | 1979 | if (flags & SLAB_CACHE_DMA) | 
|  | 1980 | cachep->gfpflags |= GFP_DMA; | 
|  | 1981 | spin_lock_init(&cachep->spinlock); | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 1982 | cachep->buffer_size = size; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1983 |  | 
|  | 1984 | if (flags & CFLGS_OFF_SLAB) | 
| Victor Fusco | b2d5507 | 2005-09-10 00:26:36 -0700 | [diff] [blame] | 1985 | cachep->slabp_cache = kmem_find_general_cachep(slab_size, 0u); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1986 | cachep->ctor = ctor; | 
|  | 1987 | cachep->dtor = dtor; | 
|  | 1988 | cachep->name = name; | 
|  | 1989 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1990 |  | 
|  | 1991 | if (g_cpucache_up == FULL) { | 
|  | 1992 | enable_cpucache(cachep); | 
|  | 1993 | } else { | 
|  | 1994 | if (g_cpucache_up == NONE) { | 
|  | 1995 | /* Note: the first kmem_cache_create must create | 
|  | 1996 | * the cache that's used by kmalloc(24), otherwise | 
|  | 1997 | * the creation of further caches will BUG(). | 
|  | 1998 | */ | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 1999 | cachep->array[smp_processor_id()] = | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2000 | &initarray_generic.cache; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2001 |  | 
|  | 2002 | /* If the cache that's used by | 
|  | 2003 | * kmalloc(sizeof(kmem_list3)) is the first cache, | 
|  | 2004 | * then we need to set up all its list3s, otherwise | 
|  | 2005 | * the creation of further caches will BUG(). | 
|  | 2006 | */ | 
|  | 2007 | set_up_list3s(cachep, SIZE_AC); | 
|  | 2008 | if (INDEX_AC == INDEX_L3) | 
|  | 2009 | g_cpucache_up = PARTIAL_L3; | 
|  | 2010 | else | 
|  | 2011 | g_cpucache_up = PARTIAL_AC; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2012 | } else { | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2013 | cachep->array[smp_processor_id()] = | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2014 | kmalloc(sizeof(struct arraycache_init), GFP_KERNEL); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2015 |  | 
|  | 2016 | if (g_cpucache_up == PARTIAL_AC) { | 
|  | 2017 | set_up_list3s(cachep, SIZE_L3); | 
|  | 2018 | g_cpucache_up = PARTIAL_L3; | 
|  | 2019 | } else { | 
|  | 2020 | int node; | 
|  | 2021 | for_each_online_node(node) { | 
|  | 2022 |  | 
|  | 2023 | cachep->nodelists[node] = | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2024 | kmalloc_node(sizeof | 
|  | 2025 | (struct kmem_list3), | 
|  | 2026 | GFP_KERNEL, node); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2027 | BUG_ON(!cachep->nodelists[node]); | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2028 | kmem_list3_init(cachep-> | 
|  | 2029 | nodelists[node]); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2030 | } | 
|  | 2031 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2032 | } | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2033 | cachep->nodelists[numa_node_id()]->next_reap = | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2034 | jiffies + REAPTIMEOUT_LIST3 + | 
|  | 2035 | ((unsigned long)cachep) % REAPTIMEOUT_LIST3; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2036 |  | 
| Pekka Enberg | 9a2dba4 | 2006-02-01 03:05:49 -0800 | [diff] [blame] | 2037 | BUG_ON(!cpu_cache_get(cachep)); | 
|  | 2038 | cpu_cache_get(cachep)->avail = 0; | 
|  | 2039 | cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES; | 
|  | 2040 | cpu_cache_get(cachep)->batchcount = 1; | 
|  | 2041 | cpu_cache_get(cachep)->touched = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2042 | cachep->batchcount = 1; | 
|  | 2043 | cachep->limit = BOOT_CPUCACHE_ENTRIES; | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2044 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2045 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2046 | /* cache setup completed, link it into the list */ | 
|  | 2047 | list_add(&cachep->next, &cache_chain); | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2048 | oops: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2049 | if (!cachep && (flags & SLAB_PANIC)) | 
|  | 2050 | panic("kmem_cache_create(): failed to create slab `%s'\n", | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2051 | name); | 
| Ingo Molnar | fc0abb1 | 2006-01-18 17:42:33 -0800 | [diff] [blame] | 2052 | mutex_unlock(&cache_chain_mutex); | 
| Ravikiran G Thirumalai | f0188f4 | 2006-02-10 01:51:13 -0800 | [diff] [blame] | 2053 | unlock_cpu_hotplug(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2054 | return cachep; | 
|  | 2055 | } | 
|  | 2056 | EXPORT_SYMBOL(kmem_cache_create); | 
|  | 2057 |  | 
|  | 2058 | #if DEBUG | 
|  | 2059 | static void check_irq_off(void) | 
|  | 2060 | { | 
|  | 2061 | BUG_ON(!irqs_disabled()); | 
|  | 2062 | } | 
|  | 2063 |  | 
|  | 2064 | static void check_irq_on(void) | 
|  | 2065 | { | 
|  | 2066 | BUG_ON(irqs_disabled()); | 
|  | 2067 | } | 
|  | 2068 |  | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 2069 | static void check_spinlock_acquired(struct kmem_cache *cachep) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2070 | { | 
|  | 2071 | #ifdef CONFIG_SMP | 
|  | 2072 | check_irq_off(); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2073 | assert_spin_locked(&cachep->nodelists[numa_node_id()]->list_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2074 | #endif | 
|  | 2075 | } | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2076 |  | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 2077 | static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node) | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2078 | { | 
|  | 2079 | #ifdef CONFIG_SMP | 
|  | 2080 | check_irq_off(); | 
|  | 2081 | assert_spin_locked(&cachep->nodelists[node]->list_lock); | 
|  | 2082 | #endif | 
|  | 2083 | } | 
|  | 2084 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2085 | #else | 
|  | 2086 | #define check_irq_off()	do { } while(0) | 
|  | 2087 | #define check_irq_on()	do { } while(0) | 
|  | 2088 | #define check_spinlock_acquired(x) do { } while(0) | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2089 | #define check_spinlock_acquired_node(x, y) do { } while(0) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2090 | #endif | 
|  | 2091 |  | 
|  | 2092 | /* | 
|  | 2093 | * Waits for all CPUs to execute func(). | 
|  | 2094 | */ | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2095 | static void smp_call_function_all_cpus(void (*func)(void *arg), void *arg) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2096 | { | 
|  | 2097 | check_irq_on(); | 
|  | 2098 | preempt_disable(); | 
|  | 2099 |  | 
|  | 2100 | local_irq_disable(); | 
|  | 2101 | func(arg); | 
|  | 2102 | local_irq_enable(); | 
|  | 2103 |  | 
|  | 2104 | if (smp_call_function(func, arg, 1, 1)) | 
|  | 2105 | BUG(); | 
|  | 2106 |  | 
|  | 2107 | preempt_enable(); | 
|  | 2108 | } | 
|  | 2109 |  | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 2110 | static void drain_array_locked(struct kmem_cache *cachep, struct array_cache *ac, | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2111 | int force, int node); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2112 |  | 
|  | 2113 | static void do_drain(void *arg) | 
|  | 2114 | { | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 2115 | struct kmem_cache *cachep = (struct kmem_cache *) arg; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2116 | struct array_cache *ac; | 
| Christoph Lameter | ff69416 | 2005-09-22 21:44:02 -0700 | [diff] [blame] | 2117 | int node = numa_node_id(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2118 |  | 
|  | 2119 | check_irq_off(); | 
| Pekka Enberg | 9a2dba4 | 2006-02-01 03:05:49 -0800 | [diff] [blame] | 2120 | ac = cpu_cache_get(cachep); | 
| Christoph Lameter | ff69416 | 2005-09-22 21:44:02 -0700 | [diff] [blame] | 2121 | spin_lock(&cachep->nodelists[node]->list_lock); | 
|  | 2122 | free_block(cachep, ac->entry, ac->avail, node); | 
|  | 2123 | spin_unlock(&cachep->nodelists[node]->list_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2124 | ac->avail = 0; | 
|  | 2125 | } | 
|  | 2126 |  | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 2127 | static void drain_cpu_caches(struct kmem_cache *cachep) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2128 | { | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2129 | struct kmem_list3 *l3; | 
|  | 2130 | int node; | 
|  | 2131 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2132 | smp_call_function_all_cpus(do_drain, cachep); | 
|  | 2133 | check_irq_on(); | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2134 | for_each_online_node(node) { | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2135 | l3 = cachep->nodelists[node]; | 
|  | 2136 | if (l3) { | 
| Ravikiran G Thirumalai | ca3b9b9 | 2006-02-04 23:27:58 -0800 | [diff] [blame] | 2137 | spin_lock_irq(&l3->list_lock); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2138 | drain_array_locked(cachep, l3->shared, 1, node); | 
| Ravikiran G Thirumalai | ca3b9b9 | 2006-02-04 23:27:58 -0800 | [diff] [blame] | 2139 | spin_unlock_irq(&l3->list_lock); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2140 | if (l3->alien) | 
| Ravikiran G Thirumalai | 4484ebf | 2006-02-04 23:27:59 -0800 | [diff] [blame] | 2141 | drain_alien_cache(cachep, l3->alien); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2142 | } | 
|  | 2143 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2144 | } | 
|  | 2145 |  | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 2146 | static int __node_shrink(struct kmem_cache *cachep, int node) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2147 | { | 
|  | 2148 | struct slab *slabp; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2149 | struct kmem_list3 *l3 = cachep->nodelists[node]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2150 | int ret; | 
|  | 2151 |  | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2152 | for (;;) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2153 | struct list_head *p; | 
|  | 2154 |  | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2155 | p = l3->slabs_free.prev; | 
|  | 2156 | if (p == &l3->slabs_free) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2157 | break; | 
|  | 2158 |  | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2159 | slabp = list_entry(l3->slabs_free.prev, struct slab, list); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2160 | #if DEBUG | 
|  | 2161 | if (slabp->inuse) | 
|  | 2162 | BUG(); | 
|  | 2163 | #endif | 
|  | 2164 | list_del(&slabp->list); | 
|  | 2165 |  | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2166 | l3->free_objects -= cachep->num; | 
|  | 2167 | spin_unlock_irq(&l3->list_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2168 | slab_destroy(cachep, slabp); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2169 | spin_lock_irq(&l3->list_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2170 | } | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2171 | ret = !list_empty(&l3->slabs_full) || !list_empty(&l3->slabs_partial); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2172 | return ret; | 
|  | 2173 | } | 
|  | 2174 |  | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 2175 | static int __cache_shrink(struct kmem_cache *cachep) | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2176 | { | 
|  | 2177 | int ret = 0, i = 0; | 
|  | 2178 | struct kmem_list3 *l3; | 
|  | 2179 |  | 
|  | 2180 | drain_cpu_caches(cachep); | 
|  | 2181 |  | 
|  | 2182 | check_irq_on(); | 
|  | 2183 | for_each_online_node(i) { | 
|  | 2184 | l3 = cachep->nodelists[i]; | 
|  | 2185 | if (l3) { | 
|  | 2186 | spin_lock_irq(&l3->list_lock); | 
|  | 2187 | ret += __node_shrink(cachep, i); | 
|  | 2188 | spin_unlock_irq(&l3->list_lock); | 
|  | 2189 | } | 
|  | 2190 | } | 
|  | 2191 | return (ret ? 1 : 0); | 
|  | 2192 | } | 
|  | 2193 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2194 | /** | 
|  | 2195 | * kmem_cache_shrink - Shrink a cache. | 
|  | 2196 | * @cachep: The cache to shrink. | 
|  | 2197 | * | 
|  | 2198 | * Releases as many slabs as possible for a cache. | 
|  | 2199 | * To help debugging, a zero exit status indicates all slabs were released. | 
|  | 2200 | */ | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 2201 | int kmem_cache_shrink(struct kmem_cache *cachep) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2202 | { | 
|  | 2203 | if (!cachep || in_interrupt()) | 
|  | 2204 | BUG(); | 
|  | 2205 |  | 
|  | 2206 | return __cache_shrink(cachep); | 
|  | 2207 | } | 
|  | 2208 | EXPORT_SYMBOL(kmem_cache_shrink); | 
|  | 2209 |  | 
|  | 2210 | /** | 
|  | 2211 | * kmem_cache_destroy - delete a cache | 
|  | 2212 | * @cachep: the cache to destroy | 
|  | 2213 | * | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 2214 | * Remove a struct kmem_cache object from the slab cache. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2215 | * Returns 0 on success. | 
|  | 2216 | * | 
|  | 2217 | * It is expected this function will be called by a module when it is | 
|  | 2218 | * unloaded.  This will remove the cache completely, and avoid a duplicate | 
|  | 2219 | * cache being allocated each time a module is loaded and unloaded, if the | 
|  | 2220 | * module doesn't have persistent in-kernel storage across loads and unloads. | 
|  | 2221 | * | 
|  | 2222 | * The cache must be empty before calling this function. | 
|  | 2223 | * | 
|  | 2224 | * The caller must guarantee that noone will allocate memory from the cache | 
|  | 2225 | * during the kmem_cache_destroy(). | 
|  | 2226 | */ | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 2227 | int kmem_cache_destroy(struct kmem_cache *cachep) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2228 | { | 
|  | 2229 | int i; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2230 | struct kmem_list3 *l3; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2231 |  | 
|  | 2232 | if (!cachep || in_interrupt()) | 
|  | 2233 | BUG(); | 
|  | 2234 |  | 
|  | 2235 | /* Don't let CPUs to come and go */ | 
|  | 2236 | lock_cpu_hotplug(); | 
|  | 2237 |  | 
|  | 2238 | /* Find the cache in the chain of caches. */ | 
| Ingo Molnar | fc0abb1 | 2006-01-18 17:42:33 -0800 | [diff] [blame] | 2239 | mutex_lock(&cache_chain_mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2240 | /* | 
|  | 2241 | * the chain is never empty, cache_cache is never destroyed | 
|  | 2242 | */ | 
|  | 2243 | list_del(&cachep->next); | 
| Ingo Molnar | fc0abb1 | 2006-01-18 17:42:33 -0800 | [diff] [blame] | 2244 | mutex_unlock(&cache_chain_mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2245 |  | 
|  | 2246 | if (__cache_shrink(cachep)) { | 
|  | 2247 | slab_error(cachep, "Can't free all objects"); | 
| Ingo Molnar | fc0abb1 | 2006-01-18 17:42:33 -0800 | [diff] [blame] | 2248 | mutex_lock(&cache_chain_mutex); | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2249 | list_add(&cachep->next, &cache_chain); | 
| Ingo Molnar | fc0abb1 | 2006-01-18 17:42:33 -0800 | [diff] [blame] | 2250 | mutex_unlock(&cache_chain_mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2251 | unlock_cpu_hotplug(); | 
|  | 2252 | return 1; | 
|  | 2253 | } | 
|  | 2254 |  | 
|  | 2255 | if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) | 
| Paul E. McKenney | fbd568a3e | 2005-05-01 08:59:04 -0700 | [diff] [blame] | 2256 | synchronize_rcu(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2257 |  | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2258 | for_each_online_cpu(i) | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2259 | kfree(cachep->array[i]); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2260 |  | 
|  | 2261 | /* NUMA: free the list3 structures */ | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2262 | for_each_online_node(i) { | 
|  | 2263 | if ((l3 = cachep->nodelists[i])) { | 
|  | 2264 | kfree(l3->shared); | 
|  | 2265 | free_alien_cache(l3->alien); | 
|  | 2266 | kfree(l3); | 
|  | 2267 | } | 
|  | 2268 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2269 | kmem_cache_free(&cache_cache, cachep); | 
|  | 2270 |  | 
|  | 2271 | unlock_cpu_hotplug(); | 
|  | 2272 |  | 
|  | 2273 | return 0; | 
|  | 2274 | } | 
|  | 2275 | EXPORT_SYMBOL(kmem_cache_destroy); | 
|  | 2276 |  | 
|  | 2277 | /* Get the memory for a slab management obj. */ | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 2278 | static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp, | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2279 | int colour_off, gfp_t local_flags) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2280 | { | 
|  | 2281 | struct slab *slabp; | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2282 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2283 | if (OFF_SLAB(cachep)) { | 
|  | 2284 | /* Slab management obj is off-slab. */ | 
|  | 2285 | slabp = kmem_cache_alloc(cachep->slabp_cache, local_flags); | 
|  | 2286 | if (!slabp) | 
|  | 2287 | return NULL; | 
|  | 2288 | } else { | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2289 | slabp = objp + colour_off; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2290 | colour_off += cachep->slab_size; | 
|  | 2291 | } | 
|  | 2292 | slabp->inuse = 0; | 
|  | 2293 | slabp->colouroff = colour_off; | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2294 | slabp->s_mem = objp + colour_off; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2295 |  | 
|  | 2296 | return slabp; | 
|  | 2297 | } | 
|  | 2298 |  | 
|  | 2299 | static inline kmem_bufctl_t *slab_bufctl(struct slab *slabp) | 
|  | 2300 | { | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2301 | return (kmem_bufctl_t *) (slabp + 1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2302 | } | 
|  | 2303 |  | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 2304 | static void cache_init_objs(struct kmem_cache *cachep, | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2305 | struct slab *slabp, unsigned long ctor_flags) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2306 | { | 
|  | 2307 | int i; | 
|  | 2308 |  | 
|  | 2309 | for (i = 0; i < cachep->num; i++) { | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 2310 | void *objp = slabp->s_mem + cachep->buffer_size * i; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2311 | #if DEBUG | 
|  | 2312 | /* need to poison the objs? */ | 
|  | 2313 | if (cachep->flags & SLAB_POISON) | 
|  | 2314 | poison_obj(cachep, objp, POISON_FREE); | 
|  | 2315 | if (cachep->flags & SLAB_STORE_USER) | 
|  | 2316 | *dbg_userword(cachep, objp) = NULL; | 
|  | 2317 |  | 
|  | 2318 | if (cachep->flags & SLAB_RED_ZONE) { | 
|  | 2319 | *dbg_redzone1(cachep, objp) = RED_INACTIVE; | 
|  | 2320 | *dbg_redzone2(cachep, objp) = RED_INACTIVE; | 
|  | 2321 | } | 
|  | 2322 | /* | 
|  | 2323 | * Constructors are not allowed to allocate memory from | 
|  | 2324 | * the same cache which they are a constructor for. | 
|  | 2325 | * Otherwise, deadlock. They must also be threaded. | 
|  | 2326 | */ | 
|  | 2327 | if (cachep->ctor && !(cachep->flags & SLAB_POISON)) | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 2328 | cachep->ctor(objp + obj_offset(cachep), cachep, | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2329 | ctor_flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2330 |  | 
|  | 2331 | if (cachep->flags & SLAB_RED_ZONE) { | 
|  | 2332 | if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) | 
|  | 2333 | slab_error(cachep, "constructor overwrote the" | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2334 | " end of an object"); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2335 | if (*dbg_redzone1(cachep, objp) != RED_INACTIVE) | 
|  | 2336 | slab_error(cachep, "constructor overwrote the" | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2337 | " start of an object"); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2338 | } | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 2339 | if ((cachep->buffer_size % PAGE_SIZE) == 0 && OFF_SLAB(cachep) | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2340 | && cachep->flags & SLAB_POISON) | 
|  | 2341 | kernel_map_pages(virt_to_page(objp), | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 2342 | cachep->buffer_size / PAGE_SIZE, 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2343 | #else | 
|  | 2344 | if (cachep->ctor) | 
|  | 2345 | cachep->ctor(objp, cachep, ctor_flags); | 
|  | 2346 | #endif | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2347 | slab_bufctl(slabp)[i] = i + 1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2348 | } | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2349 | slab_bufctl(slabp)[i - 1] = BUFCTL_END; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2350 | slabp->free = 0; | 
|  | 2351 | } | 
|  | 2352 |  | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 2353 | static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2354 | { | 
|  | 2355 | if (flags & SLAB_DMA) { | 
|  | 2356 | if (!(cachep->gfpflags & GFP_DMA)) | 
|  | 2357 | BUG(); | 
|  | 2358 | } else { | 
|  | 2359 | if (cachep->gfpflags & GFP_DMA) | 
|  | 2360 | BUG(); | 
|  | 2361 | } | 
|  | 2362 | } | 
|  | 2363 |  | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 2364 | static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slabp, int nodeid) | 
| Matthew Dobson | 78d382d | 2006-02-01 03:05:47 -0800 | [diff] [blame] | 2365 | { | 
|  | 2366 | void *objp = slabp->s_mem + (slabp->free * cachep->buffer_size); | 
|  | 2367 | kmem_bufctl_t next; | 
|  | 2368 |  | 
|  | 2369 | slabp->inuse++; | 
|  | 2370 | next = slab_bufctl(slabp)[slabp->free]; | 
|  | 2371 | #if DEBUG | 
|  | 2372 | slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE; | 
|  | 2373 | WARN_ON(slabp->nodeid != nodeid); | 
|  | 2374 | #endif | 
|  | 2375 | slabp->free = next; | 
|  | 2376 |  | 
|  | 2377 | return objp; | 
|  | 2378 | } | 
|  | 2379 |  | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 2380 | static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp, void *objp, | 
| Matthew Dobson | 78d382d | 2006-02-01 03:05:47 -0800 | [diff] [blame] | 2381 | int nodeid) | 
|  | 2382 | { | 
|  | 2383 | unsigned int objnr = (unsigned)(objp-slabp->s_mem) / cachep->buffer_size; | 
|  | 2384 |  | 
|  | 2385 | #if DEBUG | 
|  | 2386 | /* Verify that the slab belongs to the intended node */ | 
|  | 2387 | WARN_ON(slabp->nodeid != nodeid); | 
|  | 2388 |  | 
|  | 2389 | if (slab_bufctl(slabp)[objnr] != BUFCTL_FREE) { | 
|  | 2390 | printk(KERN_ERR "slab: double free detected in cache " | 
|  | 2391 | "'%s', objp %p\n", cachep->name, objp); | 
|  | 2392 | BUG(); | 
|  | 2393 | } | 
|  | 2394 | #endif | 
|  | 2395 | slab_bufctl(slabp)[objnr] = slabp->free; | 
|  | 2396 | slabp->free = objnr; | 
|  | 2397 | slabp->inuse--; | 
|  | 2398 | } | 
|  | 2399 |  | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 2400 | static void set_slab_attr(struct kmem_cache *cachep, struct slab *slabp, void *objp) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2401 | { | 
|  | 2402 | int i; | 
|  | 2403 | struct page *page; | 
|  | 2404 |  | 
|  | 2405 | /* Nasty!!!!!! I hope this is OK. */ | 
|  | 2406 | i = 1 << cachep->gfporder; | 
|  | 2407 | page = virt_to_page(objp); | 
|  | 2408 | do { | 
| Pekka Enberg | 065d41c | 2005-11-13 16:06:46 -0800 | [diff] [blame] | 2409 | page_set_cache(page, cachep); | 
|  | 2410 | page_set_slab(page, slabp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2411 | page++; | 
|  | 2412 | } while (--i); | 
|  | 2413 | } | 
|  | 2414 |  | 
|  | 2415 | /* | 
|  | 2416 | * Grow (by 1) the number of slabs within a cache.  This is called by | 
|  | 2417 | * kmem_cache_alloc() when there are no active objs left in a cache. | 
|  | 2418 | */ | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 2419 | static int cache_grow(struct kmem_cache *cachep, gfp_t flags, int nodeid) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2420 | { | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2421 | struct slab *slabp; | 
|  | 2422 | void *objp; | 
|  | 2423 | size_t offset; | 
|  | 2424 | gfp_t local_flags; | 
|  | 2425 | unsigned long ctor_flags; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2426 | struct kmem_list3 *l3; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2427 |  | 
|  | 2428 | /* Be lazy and only check for valid flags here, | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2429 | * keeping it out of the critical path in kmem_cache_alloc(). | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2430 | */ | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2431 | if (flags & ~(SLAB_DMA | SLAB_LEVEL_MASK | SLAB_NO_GROW)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2432 | BUG(); | 
|  | 2433 | if (flags & SLAB_NO_GROW) | 
|  | 2434 | return 0; | 
|  | 2435 |  | 
|  | 2436 | ctor_flags = SLAB_CTOR_CONSTRUCTOR; | 
|  | 2437 | local_flags = (flags & SLAB_LEVEL_MASK); | 
|  | 2438 | if (!(local_flags & __GFP_WAIT)) | 
|  | 2439 | /* | 
|  | 2440 | * Not allowed to sleep.  Need to tell a constructor about | 
|  | 2441 | * this - it might need to know... | 
|  | 2442 | */ | 
|  | 2443 | ctor_flags |= SLAB_CTOR_ATOMIC; | 
|  | 2444 |  | 
| Ravikiran G Thirumalai | 2e1217c | 2006-02-04 23:27:56 -0800 | [diff] [blame] | 2445 | /* Take the l3 list lock to change the colour_next on this node */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2446 | check_irq_off(); | 
| Ravikiran G Thirumalai | 2e1217c | 2006-02-04 23:27:56 -0800 | [diff] [blame] | 2447 | l3 = cachep->nodelists[nodeid]; | 
|  | 2448 | spin_lock(&l3->list_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2449 |  | 
|  | 2450 | /* Get colour for the slab, and cal the next value. */ | 
| Ravikiran G Thirumalai | 2e1217c | 2006-02-04 23:27:56 -0800 | [diff] [blame] | 2451 | offset = l3->colour_next; | 
|  | 2452 | l3->colour_next++; | 
|  | 2453 | if (l3->colour_next >= cachep->colour) | 
|  | 2454 | l3->colour_next = 0; | 
|  | 2455 | spin_unlock(&l3->list_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2456 |  | 
| Ravikiran G Thirumalai | 2e1217c | 2006-02-04 23:27:56 -0800 | [diff] [blame] | 2457 | offset *= cachep->colour_off; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2458 |  | 
|  | 2459 | if (local_flags & __GFP_WAIT) | 
|  | 2460 | local_irq_enable(); | 
|  | 2461 |  | 
|  | 2462 | /* | 
|  | 2463 | * The test for missing atomic flag is performed here, rather than | 
|  | 2464 | * the more obvious place, simply to reduce the critical path length | 
|  | 2465 | * in kmem_cache_alloc(). If a caller is seriously mis-behaving they | 
|  | 2466 | * will eventually be caught here (where it matters). | 
|  | 2467 | */ | 
|  | 2468 | kmem_flagcheck(cachep, flags); | 
|  | 2469 |  | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2470 | /* Get mem for the objs. | 
|  | 2471 | * Attempt to allocate a physical page from 'nodeid', | 
|  | 2472 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2473 | if (!(objp = kmem_getpages(cachep, flags, nodeid))) | 
|  | 2474 | goto failed; | 
|  | 2475 |  | 
|  | 2476 | /* Get slab management. */ | 
|  | 2477 | if (!(slabp = alloc_slabmgmt(cachep, objp, offset, local_flags))) | 
|  | 2478 | goto opps1; | 
|  | 2479 |  | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2480 | slabp->nodeid = nodeid; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2481 | set_slab_attr(cachep, slabp, objp); | 
|  | 2482 |  | 
|  | 2483 | cache_init_objs(cachep, slabp, ctor_flags); | 
|  | 2484 |  | 
|  | 2485 | if (local_flags & __GFP_WAIT) | 
|  | 2486 | local_irq_disable(); | 
|  | 2487 | check_irq_off(); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2488 | spin_lock(&l3->list_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2489 |  | 
|  | 2490 | /* Make slab active. */ | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2491 | list_add_tail(&slabp->list, &(l3->slabs_free)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2492 | STATS_INC_GROWN(cachep); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2493 | l3->free_objects += cachep->num; | 
|  | 2494 | spin_unlock(&l3->list_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2495 | return 1; | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2496 | opps1: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2497 | kmem_freepages(cachep, objp); | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2498 | failed: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2499 | if (local_flags & __GFP_WAIT) | 
|  | 2500 | local_irq_disable(); | 
|  | 2501 | return 0; | 
|  | 2502 | } | 
|  | 2503 |  | 
|  | 2504 | #if DEBUG | 
|  | 2505 |  | 
|  | 2506 | /* | 
|  | 2507 | * Perform extra freeing checks: | 
|  | 2508 | * - detect bad pointers. | 
|  | 2509 | * - POISON/RED_ZONE checking | 
|  | 2510 | * - destructor calls, for caches with POISON+dtor | 
|  | 2511 | */ | 
|  | 2512 | static void kfree_debugcheck(const void *objp) | 
|  | 2513 | { | 
|  | 2514 | struct page *page; | 
|  | 2515 |  | 
|  | 2516 | if (!virt_addr_valid(objp)) { | 
|  | 2517 | printk(KERN_ERR "kfree_debugcheck: out of range ptr %lxh.\n", | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2518 | (unsigned long)objp); | 
|  | 2519 | BUG(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2520 | } | 
|  | 2521 | page = virt_to_page(objp); | 
|  | 2522 | if (!PageSlab(page)) { | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2523 | printk(KERN_ERR "kfree_debugcheck: bad ptr %lxh.\n", | 
|  | 2524 | (unsigned long)objp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2525 | BUG(); | 
|  | 2526 | } | 
|  | 2527 | } | 
|  | 2528 |  | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 2529 | static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2530 | void *caller) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2531 | { | 
|  | 2532 | struct page *page; | 
|  | 2533 | unsigned int objnr; | 
|  | 2534 | struct slab *slabp; | 
|  | 2535 |  | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 2536 | objp -= obj_offset(cachep); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2537 | kfree_debugcheck(objp); | 
|  | 2538 | page = virt_to_page(objp); | 
|  | 2539 |  | 
| Pekka Enberg | 065d41c | 2005-11-13 16:06:46 -0800 | [diff] [blame] | 2540 | if (page_get_cache(page) != cachep) { | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2541 | printk(KERN_ERR | 
|  | 2542 | "mismatch in kmem_cache_free: expected cache %p, got %p\n", | 
|  | 2543 | page_get_cache(page), cachep); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2544 | printk(KERN_ERR "%p is %s.\n", cachep, cachep->name); | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2545 | printk(KERN_ERR "%p is %s.\n", page_get_cache(page), | 
|  | 2546 | page_get_cache(page)->name); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2547 | WARN_ON(1); | 
|  | 2548 | } | 
| Pekka Enberg | 065d41c | 2005-11-13 16:06:46 -0800 | [diff] [blame] | 2549 | slabp = page_get_slab(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2550 |  | 
|  | 2551 | if (cachep->flags & SLAB_RED_ZONE) { | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2552 | if (*dbg_redzone1(cachep, objp) != RED_ACTIVE | 
|  | 2553 | || *dbg_redzone2(cachep, objp) != RED_ACTIVE) { | 
|  | 2554 | slab_error(cachep, | 
|  | 2555 | "double free, or memory outside" | 
|  | 2556 | " object was overwritten"); | 
|  | 2557 | printk(KERN_ERR | 
|  | 2558 | "%p: redzone 1: 0x%lx, redzone 2: 0x%lx.\n", | 
|  | 2559 | objp, *dbg_redzone1(cachep, objp), | 
|  | 2560 | *dbg_redzone2(cachep, objp)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2561 | } | 
|  | 2562 | *dbg_redzone1(cachep, objp) = RED_INACTIVE; | 
|  | 2563 | *dbg_redzone2(cachep, objp) = RED_INACTIVE; | 
|  | 2564 | } | 
|  | 2565 | if (cachep->flags & SLAB_STORE_USER) | 
|  | 2566 | *dbg_userword(cachep, objp) = caller; | 
|  | 2567 |  | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 2568 | objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2569 |  | 
|  | 2570 | BUG_ON(objnr >= cachep->num); | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 2571 | BUG_ON(objp != slabp->s_mem + objnr * cachep->buffer_size); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2572 |  | 
|  | 2573 | if (cachep->flags & SLAB_DEBUG_INITIAL) { | 
|  | 2574 | /* Need to call the slab's constructor so the | 
|  | 2575 | * caller can perform a verify of its state (debugging). | 
|  | 2576 | * Called without the cache-lock held. | 
|  | 2577 | */ | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 2578 | cachep->ctor(objp + obj_offset(cachep), | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2579 | cachep, SLAB_CTOR_CONSTRUCTOR | SLAB_CTOR_VERIFY); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2580 | } | 
|  | 2581 | if (cachep->flags & SLAB_POISON && cachep->dtor) { | 
|  | 2582 | /* we want to cache poison the object, | 
|  | 2583 | * call the destruction callback | 
|  | 2584 | */ | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 2585 | cachep->dtor(objp + obj_offset(cachep), cachep, 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2586 | } | 
|  | 2587 | if (cachep->flags & SLAB_POISON) { | 
|  | 2588 | #ifdef CONFIG_DEBUG_PAGEALLOC | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 2589 | if ((cachep->buffer_size % PAGE_SIZE) == 0 && OFF_SLAB(cachep)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2590 | store_stackinfo(cachep, objp, (unsigned long)caller); | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2591 | kernel_map_pages(virt_to_page(objp), | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 2592 | cachep->buffer_size / PAGE_SIZE, 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2593 | } else { | 
|  | 2594 | poison_obj(cachep, objp, POISON_FREE); | 
|  | 2595 | } | 
|  | 2596 | #else | 
|  | 2597 | poison_obj(cachep, objp, POISON_FREE); | 
|  | 2598 | #endif | 
|  | 2599 | } | 
|  | 2600 | return objp; | 
|  | 2601 | } | 
|  | 2602 |  | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 2603 | static void check_slabp(struct kmem_cache *cachep, struct slab *slabp) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2604 | { | 
|  | 2605 | kmem_bufctl_t i; | 
|  | 2606 | int entries = 0; | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2607 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2608 | /* Check slab's freelist to see if this obj is there. */ | 
|  | 2609 | for (i = slabp->free; i != BUFCTL_END; i = slab_bufctl(slabp)[i]) { | 
|  | 2610 | entries++; | 
|  | 2611 | if (entries > cachep->num || i >= cachep->num) | 
|  | 2612 | goto bad; | 
|  | 2613 | } | 
|  | 2614 | if (entries != cachep->num - slabp->inuse) { | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2615 | bad: | 
|  | 2616 | printk(KERN_ERR | 
|  | 2617 | "slab: Internal list corruption detected in cache '%s'(%d), slabp %p(%d). Hexdump:\n", | 
|  | 2618 | cachep->name, cachep->num, slabp, slabp->inuse); | 
|  | 2619 | for (i = 0; | 
| Linus Torvalds | 264132b | 2006-03-06 12:10:07 -0800 | [diff] [blame] | 2620 | i < sizeof(*slabp) + cachep->num * sizeof(kmem_bufctl_t); | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2621 | i++) { | 
|  | 2622 | if ((i % 16) == 0) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2623 | printk("\n%03x:", i); | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2624 | printk(" %02x", ((unsigned char *)slabp)[i]); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2625 | } | 
|  | 2626 | printk("\n"); | 
|  | 2627 | BUG(); | 
|  | 2628 | } | 
|  | 2629 | } | 
|  | 2630 | #else | 
|  | 2631 | #define kfree_debugcheck(x) do { } while(0) | 
|  | 2632 | #define cache_free_debugcheck(x,objp,z) (objp) | 
|  | 2633 | #define check_slabp(x,y) do { } while(0) | 
|  | 2634 | #endif | 
|  | 2635 |  | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 2636 | static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2637 | { | 
|  | 2638 | int batchcount; | 
|  | 2639 | struct kmem_list3 *l3; | 
|  | 2640 | struct array_cache *ac; | 
|  | 2641 |  | 
|  | 2642 | check_irq_off(); | 
| Pekka Enberg | 9a2dba4 | 2006-02-01 03:05:49 -0800 | [diff] [blame] | 2643 | ac = cpu_cache_get(cachep); | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2644 | retry: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2645 | batchcount = ac->batchcount; | 
|  | 2646 | if (!ac->touched && batchcount > BATCHREFILL_LIMIT) { | 
|  | 2647 | /* if there was little recent activity on this | 
|  | 2648 | * cache, then perform only a partial refill. | 
|  | 2649 | * Otherwise we could generate refill bouncing. | 
|  | 2650 | */ | 
|  | 2651 | batchcount = BATCHREFILL_LIMIT; | 
|  | 2652 | } | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2653 | l3 = cachep->nodelists[numa_node_id()]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2654 |  | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2655 | BUG_ON(ac->avail > 0 || !l3); | 
|  | 2656 | spin_lock(&l3->list_lock); | 
|  | 2657 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2658 | if (l3->shared) { | 
|  | 2659 | struct array_cache *shared_array = l3->shared; | 
|  | 2660 | if (shared_array->avail) { | 
|  | 2661 | if (batchcount > shared_array->avail) | 
|  | 2662 | batchcount = shared_array->avail; | 
|  | 2663 | shared_array->avail -= batchcount; | 
|  | 2664 | ac->avail = batchcount; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2665 | memcpy(ac->entry, | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2666 | &(shared_array->entry[shared_array->avail]), | 
|  | 2667 | sizeof(void *) * batchcount); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2668 | shared_array->touched = 1; | 
|  | 2669 | goto alloc_done; | 
|  | 2670 | } | 
|  | 2671 | } | 
|  | 2672 | while (batchcount > 0) { | 
|  | 2673 | struct list_head *entry; | 
|  | 2674 | struct slab *slabp; | 
|  | 2675 | /* Get slab alloc is to come from. */ | 
|  | 2676 | entry = l3->slabs_partial.next; | 
|  | 2677 | if (entry == &l3->slabs_partial) { | 
|  | 2678 | l3->free_touched = 1; | 
|  | 2679 | entry = l3->slabs_free.next; | 
|  | 2680 | if (entry == &l3->slabs_free) | 
|  | 2681 | goto must_grow; | 
|  | 2682 | } | 
|  | 2683 |  | 
|  | 2684 | slabp = list_entry(entry, struct slab, list); | 
|  | 2685 | check_slabp(cachep, slabp); | 
|  | 2686 | check_spinlock_acquired(cachep); | 
|  | 2687 | while (slabp->inuse < cachep->num && batchcount--) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2688 | STATS_INC_ALLOCED(cachep); | 
|  | 2689 | STATS_INC_ACTIVE(cachep); | 
|  | 2690 | STATS_SET_HIGH(cachep); | 
|  | 2691 |  | 
| Matthew Dobson | 78d382d | 2006-02-01 03:05:47 -0800 | [diff] [blame] | 2692 | ac->entry[ac->avail++] = slab_get_obj(cachep, slabp, | 
|  | 2693 | numa_node_id()); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2694 | } | 
|  | 2695 | check_slabp(cachep, slabp); | 
|  | 2696 |  | 
|  | 2697 | /* move slabp to correct slabp list: */ | 
|  | 2698 | list_del(&slabp->list); | 
|  | 2699 | if (slabp->free == BUFCTL_END) | 
|  | 2700 | list_add(&slabp->list, &l3->slabs_full); | 
|  | 2701 | else | 
|  | 2702 | list_add(&slabp->list, &l3->slabs_partial); | 
|  | 2703 | } | 
|  | 2704 |  | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2705 | must_grow: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2706 | l3->free_objects -= ac->avail; | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2707 | alloc_done: | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2708 | spin_unlock(&l3->list_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2709 |  | 
|  | 2710 | if (unlikely(!ac->avail)) { | 
|  | 2711 | int x; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2712 | x = cache_grow(cachep, flags, numa_node_id()); | 
|  | 2713 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2714 | // cache_grow can reenable interrupts, then ac could change. | 
| Pekka Enberg | 9a2dba4 | 2006-02-01 03:05:49 -0800 | [diff] [blame] | 2715 | ac = cpu_cache_get(cachep); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2716 | if (!x && ac->avail == 0)	// no objects in sight? abort | 
|  | 2717 | return NULL; | 
|  | 2718 |  | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2719 | if (!ac->avail)	// objects refilled by interrupt? | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2720 | goto retry; | 
|  | 2721 | } | 
|  | 2722 | ac->touched = 1; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2723 | return ac->entry[--ac->avail]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2724 | } | 
|  | 2725 |  | 
|  | 2726 | static inline void | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 2727 | cache_alloc_debugcheck_before(struct kmem_cache *cachep, gfp_t flags) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2728 | { | 
|  | 2729 | might_sleep_if(flags & __GFP_WAIT); | 
|  | 2730 | #if DEBUG | 
|  | 2731 | kmem_flagcheck(cachep, flags); | 
|  | 2732 | #endif | 
|  | 2733 | } | 
|  | 2734 |  | 
|  | 2735 | #if DEBUG | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 2736 | static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, gfp_t flags, | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2737 | void *objp, void *caller) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2738 | { | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2739 | if (!objp) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2740 | return objp; | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2741 | if (cachep->flags & SLAB_POISON) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2742 | #ifdef CONFIG_DEBUG_PAGEALLOC | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 2743 | if ((cachep->buffer_size % PAGE_SIZE) == 0 && OFF_SLAB(cachep)) | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2744 | kernel_map_pages(virt_to_page(objp), | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 2745 | cachep->buffer_size / PAGE_SIZE, 1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2746 | else | 
|  | 2747 | check_poison_obj(cachep, objp); | 
|  | 2748 | #else | 
|  | 2749 | check_poison_obj(cachep, objp); | 
|  | 2750 | #endif | 
|  | 2751 | poison_obj(cachep, objp, POISON_INUSE); | 
|  | 2752 | } | 
|  | 2753 | if (cachep->flags & SLAB_STORE_USER) | 
|  | 2754 | *dbg_userword(cachep, objp) = caller; | 
|  | 2755 |  | 
|  | 2756 | if (cachep->flags & SLAB_RED_ZONE) { | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2757 | if (*dbg_redzone1(cachep, objp) != RED_INACTIVE | 
|  | 2758 | || *dbg_redzone2(cachep, objp) != RED_INACTIVE) { | 
|  | 2759 | slab_error(cachep, | 
|  | 2760 | "double free, or memory outside" | 
|  | 2761 | " object was overwritten"); | 
|  | 2762 | printk(KERN_ERR | 
|  | 2763 | "%p: redzone 1: 0x%lx, redzone 2: 0x%lx.\n", | 
|  | 2764 | objp, *dbg_redzone1(cachep, objp), | 
|  | 2765 | *dbg_redzone2(cachep, objp)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2766 | } | 
|  | 2767 | *dbg_redzone1(cachep, objp) = RED_ACTIVE; | 
|  | 2768 | *dbg_redzone2(cachep, objp) = RED_ACTIVE; | 
|  | 2769 | } | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 2770 | objp += obj_offset(cachep); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2771 | if (cachep->ctor && cachep->flags & SLAB_POISON) { | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2772 | unsigned long ctor_flags = SLAB_CTOR_CONSTRUCTOR; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2773 |  | 
|  | 2774 | if (!(flags & __GFP_WAIT)) | 
|  | 2775 | ctor_flags |= SLAB_CTOR_ATOMIC; | 
|  | 2776 |  | 
|  | 2777 | cachep->ctor(objp, cachep, ctor_flags); | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2778 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2779 | return objp; | 
|  | 2780 | } | 
|  | 2781 | #else | 
|  | 2782 | #define cache_alloc_debugcheck_after(a,b,objp,d) (objp) | 
|  | 2783 | #endif | 
|  | 2784 |  | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 2785 | static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2786 | { | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2787 | void *objp; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2788 | struct array_cache *ac; | 
|  | 2789 |  | 
| Christoph Lameter | dc85da1 | 2006-01-18 17:42:36 -0800 | [diff] [blame] | 2790 | #ifdef CONFIG_NUMA | 
| Christoph Lameter | 86c562a | 2006-01-18 17:42:37 -0800 | [diff] [blame] | 2791 | if (unlikely(current->mempolicy && !in_interrupt())) { | 
| Christoph Lameter | dc85da1 | 2006-01-18 17:42:36 -0800 | [diff] [blame] | 2792 | int nid = slab_node(current->mempolicy); | 
|  | 2793 |  | 
|  | 2794 | if (nid != numa_node_id()) | 
|  | 2795 | return __cache_alloc_node(cachep, flags, nid); | 
|  | 2796 | } | 
|  | 2797 | #endif | 
|  | 2798 |  | 
| Alok N Kataria | 5c38230 | 2005-09-27 21:45:46 -0700 | [diff] [blame] | 2799 | check_irq_off(); | 
| Pekka Enberg | 9a2dba4 | 2006-02-01 03:05:49 -0800 | [diff] [blame] | 2800 | ac = cpu_cache_get(cachep); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2801 | if (likely(ac->avail)) { | 
|  | 2802 | STATS_INC_ALLOCHIT(cachep); | 
|  | 2803 | ac->touched = 1; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2804 | objp = ac->entry[--ac->avail]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2805 | } else { | 
|  | 2806 | STATS_INC_ALLOCMISS(cachep); | 
|  | 2807 | objp = cache_alloc_refill(cachep, flags); | 
|  | 2808 | } | 
| Alok N Kataria | 5c38230 | 2005-09-27 21:45:46 -0700 | [diff] [blame] | 2809 | return objp; | 
|  | 2810 | } | 
|  | 2811 |  | 
| Pekka Enberg | 7fd6b14 | 2006-02-01 03:05:52 -0800 | [diff] [blame] | 2812 | static __always_inline void * | 
|  | 2813 | __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller) | 
| Alok N Kataria | 5c38230 | 2005-09-27 21:45:46 -0700 | [diff] [blame] | 2814 | { | 
|  | 2815 | unsigned long save_flags; | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2816 | void *objp; | 
| Alok N Kataria | 5c38230 | 2005-09-27 21:45:46 -0700 | [diff] [blame] | 2817 |  | 
|  | 2818 | cache_alloc_debugcheck_before(cachep, flags); | 
|  | 2819 |  | 
|  | 2820 | local_irq_save(save_flags); | 
|  | 2821 | objp = ____cache_alloc(cachep, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2822 | local_irq_restore(save_flags); | 
| Eric Dumazet | 34342e8 | 2005-09-03 15:55:06 -0700 | [diff] [blame] | 2823 | objp = cache_alloc_debugcheck_after(cachep, flags, objp, | 
| Pekka Enberg | 7fd6b14 | 2006-02-01 03:05:52 -0800 | [diff] [blame] | 2824 | caller); | 
| Eric Dumazet | 34342e8 | 2005-09-03 15:55:06 -0700 | [diff] [blame] | 2825 | prefetchw(objp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2826 | return objp; | 
|  | 2827 | } | 
|  | 2828 |  | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2829 | #ifdef CONFIG_NUMA | 
|  | 2830 | /* | 
|  | 2831 | * A interface to enable slab creation on nodeid | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2832 | */ | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 2833 | static void *__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2834 | { | 
|  | 2835 | struct list_head *entry; | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2836 | struct slab *slabp; | 
|  | 2837 | struct kmem_list3 *l3; | 
|  | 2838 | void *obj; | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2839 | int x; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2840 |  | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2841 | l3 = cachep->nodelists[nodeid]; | 
|  | 2842 | BUG_ON(!l3); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2843 |  | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2844 | retry: | 
| Ravikiran G Thirumalai | ca3b9b9 | 2006-02-04 23:27:58 -0800 | [diff] [blame] | 2845 | check_irq_off(); | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2846 | spin_lock(&l3->list_lock); | 
|  | 2847 | entry = l3->slabs_partial.next; | 
|  | 2848 | if (entry == &l3->slabs_partial) { | 
|  | 2849 | l3->free_touched = 1; | 
|  | 2850 | entry = l3->slabs_free.next; | 
|  | 2851 | if (entry == &l3->slabs_free) | 
|  | 2852 | goto must_grow; | 
|  | 2853 | } | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2854 |  | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2855 | slabp = list_entry(entry, struct slab, list); | 
|  | 2856 | check_spinlock_acquired_node(cachep, nodeid); | 
|  | 2857 | check_slabp(cachep, slabp); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2858 |  | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2859 | STATS_INC_NODEALLOCS(cachep); | 
|  | 2860 | STATS_INC_ACTIVE(cachep); | 
|  | 2861 | STATS_SET_HIGH(cachep); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2862 |  | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2863 | BUG_ON(slabp->inuse == cachep->num); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2864 |  | 
| Matthew Dobson | 78d382d | 2006-02-01 03:05:47 -0800 | [diff] [blame] | 2865 | obj = slab_get_obj(cachep, slabp, nodeid); | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2866 | check_slabp(cachep, slabp); | 
|  | 2867 | l3->free_objects--; | 
|  | 2868 | /* move slabp to correct slabp list: */ | 
|  | 2869 | list_del(&slabp->list); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2870 |  | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2871 | if (slabp->free == BUFCTL_END) { | 
|  | 2872 | list_add(&slabp->list, &l3->slabs_full); | 
|  | 2873 | } else { | 
|  | 2874 | list_add(&slabp->list, &l3->slabs_partial); | 
|  | 2875 | } | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2876 |  | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2877 | spin_unlock(&l3->list_lock); | 
|  | 2878 | goto done; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2879 |  | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2880 | must_grow: | 
|  | 2881 | spin_unlock(&l3->list_lock); | 
|  | 2882 | x = cache_grow(cachep, flags, nodeid); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2883 |  | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2884 | if (!x) | 
|  | 2885 | return NULL; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2886 |  | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2887 | goto retry; | 
|  | 2888 | done: | 
|  | 2889 | return obj; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2890 | } | 
|  | 2891 | #endif | 
|  | 2892 |  | 
|  | 2893 | /* | 
|  | 2894 | * Caller needs to acquire correct kmem_list's list_lock | 
|  | 2895 | */ | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 2896 | static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects, | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2897 | int node) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2898 | { | 
|  | 2899 | int i; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2900 | struct kmem_list3 *l3; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2901 |  | 
|  | 2902 | for (i = 0; i < nr_objects; i++) { | 
|  | 2903 | void *objp = objpp[i]; | 
|  | 2904 | struct slab *slabp; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2905 |  | 
| Pekka Enberg | 6ed5eb221 | 2006-02-01 03:05:49 -0800 | [diff] [blame] | 2906 | slabp = virt_to_slab(objp); | 
| Christoph Lameter | ff69416 | 2005-09-22 21:44:02 -0700 | [diff] [blame] | 2907 | l3 = cachep->nodelists[node]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2908 | list_del(&slabp->list); | 
| Christoph Lameter | ff69416 | 2005-09-22 21:44:02 -0700 | [diff] [blame] | 2909 | check_spinlock_acquired_node(cachep, node); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2910 | check_slabp(cachep, slabp); | 
| Matthew Dobson | 78d382d | 2006-02-01 03:05:47 -0800 | [diff] [blame] | 2911 | slab_put_obj(cachep, slabp, objp, node); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2912 | STATS_DEC_ACTIVE(cachep); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2913 | l3->free_objects++; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2914 | check_slabp(cachep, slabp); | 
|  | 2915 |  | 
|  | 2916 | /* fixup slab chains */ | 
|  | 2917 | if (slabp->inuse == 0) { | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2918 | if (l3->free_objects > l3->free_limit) { | 
|  | 2919 | l3->free_objects -= cachep->num; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2920 | slab_destroy(cachep, slabp); | 
|  | 2921 | } else { | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2922 | list_add(&slabp->list, &l3->slabs_free); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2923 | } | 
|  | 2924 | } else { | 
|  | 2925 | /* Unconditionally move a slab to the end of the | 
|  | 2926 | * partial list on free - maximum time for the | 
|  | 2927 | * other objects to be freed, too. | 
|  | 2928 | */ | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2929 | list_add_tail(&slabp->list, &l3->slabs_partial); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2930 | } | 
|  | 2931 | } | 
|  | 2932 | } | 
|  | 2933 |  | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 2934 | static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2935 | { | 
|  | 2936 | int batchcount; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2937 | struct kmem_list3 *l3; | 
| Christoph Lameter | ff69416 | 2005-09-22 21:44:02 -0700 | [diff] [blame] | 2938 | int node = numa_node_id(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2939 |  | 
|  | 2940 | batchcount = ac->batchcount; | 
|  | 2941 | #if DEBUG | 
|  | 2942 | BUG_ON(!batchcount || batchcount > ac->avail); | 
|  | 2943 | #endif | 
|  | 2944 | check_irq_off(); | 
| Christoph Lameter | ff69416 | 2005-09-22 21:44:02 -0700 | [diff] [blame] | 2945 | l3 = cachep->nodelists[node]; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2946 | spin_lock(&l3->list_lock); | 
|  | 2947 | if (l3->shared) { | 
|  | 2948 | struct array_cache *shared_array = l3->shared; | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2949 | int max = shared_array->limit - shared_array->avail; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2950 | if (max) { | 
|  | 2951 | if (batchcount > max) | 
|  | 2952 | batchcount = max; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2953 | memcpy(&(shared_array->entry[shared_array->avail]), | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2954 | ac->entry, sizeof(void *) * batchcount); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2955 | shared_array->avail += batchcount; | 
|  | 2956 | goto free_done; | 
|  | 2957 | } | 
|  | 2958 | } | 
|  | 2959 |  | 
| Christoph Lameter | ff69416 | 2005-09-22 21:44:02 -0700 | [diff] [blame] | 2960 | free_block(cachep, ac->entry, batchcount, node); | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2961 | free_done: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2962 | #if STATS | 
|  | 2963 | { | 
|  | 2964 | int i = 0; | 
|  | 2965 | struct list_head *p; | 
|  | 2966 |  | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2967 | p = l3->slabs_free.next; | 
|  | 2968 | while (p != &(l3->slabs_free)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2969 | struct slab *slabp; | 
|  | 2970 |  | 
|  | 2971 | slabp = list_entry(p, struct slab, list); | 
|  | 2972 | BUG_ON(slabp->inuse); | 
|  | 2973 |  | 
|  | 2974 | i++; | 
|  | 2975 | p = p->next; | 
|  | 2976 | } | 
|  | 2977 | STATS_SET_FREEABLE(cachep, i); | 
|  | 2978 | } | 
|  | 2979 | #endif | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2980 | spin_unlock(&l3->list_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2981 | ac->avail -= batchcount; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 2982 | memmove(ac->entry, &(ac->entry[batchcount]), | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 2983 | sizeof(void *) * ac->avail); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2984 | } | 
|  | 2985 |  | 
|  | 2986 | /* | 
|  | 2987 | * __cache_free | 
|  | 2988 | * Release an obj back to its cache. If the obj has a constructed | 
|  | 2989 | * state, it must be in this state _before_ it is released. | 
|  | 2990 | * | 
|  | 2991 | * Called with disabled ints. | 
|  | 2992 | */ | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 2993 | static inline void __cache_free(struct kmem_cache *cachep, void *objp) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2994 | { | 
| Pekka Enberg | 9a2dba4 | 2006-02-01 03:05:49 -0800 | [diff] [blame] | 2995 | struct array_cache *ac = cpu_cache_get(cachep); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2996 |  | 
|  | 2997 | check_irq_off(); | 
|  | 2998 | objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0)); | 
|  | 2999 |  | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3000 | /* Make sure we are not freeing a object from another | 
|  | 3001 | * node to the array cache on this cpu. | 
|  | 3002 | */ | 
|  | 3003 | #ifdef CONFIG_NUMA | 
|  | 3004 | { | 
|  | 3005 | struct slab *slabp; | 
| Pekka Enberg | 6ed5eb221 | 2006-02-01 03:05:49 -0800 | [diff] [blame] | 3006 | slabp = virt_to_slab(objp); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3007 | if (unlikely(slabp->nodeid != numa_node_id())) { | 
|  | 3008 | struct array_cache *alien = NULL; | 
|  | 3009 | int nodeid = slabp->nodeid; | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3010 | struct kmem_list3 *l3 = | 
|  | 3011 | cachep->nodelists[numa_node_id()]; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3012 |  | 
|  | 3013 | STATS_INC_NODEFREES(cachep); | 
|  | 3014 | if (l3->alien && l3->alien[nodeid]) { | 
|  | 3015 | alien = l3->alien[nodeid]; | 
|  | 3016 | spin_lock(&alien->lock); | 
|  | 3017 | if (unlikely(alien->avail == alien->limit)) | 
|  | 3018 | __drain_alien_cache(cachep, | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3019 | alien, nodeid); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3020 | alien->entry[alien->avail++] = objp; | 
|  | 3021 | spin_unlock(&alien->lock); | 
|  | 3022 | } else { | 
|  | 3023 | spin_lock(&(cachep->nodelists[nodeid])-> | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3024 | list_lock); | 
| Christoph Lameter | ff69416 | 2005-09-22 21:44:02 -0700 | [diff] [blame] | 3025 | free_block(cachep, &objp, 1, nodeid); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3026 | spin_unlock(&(cachep->nodelists[nodeid])-> | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3027 | list_lock); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3028 | } | 
|  | 3029 | return; | 
|  | 3030 | } | 
|  | 3031 | } | 
|  | 3032 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3033 | if (likely(ac->avail < ac->limit)) { | 
|  | 3034 | STATS_INC_FREEHIT(cachep); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3035 | ac->entry[ac->avail++] = objp; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3036 | return; | 
|  | 3037 | } else { | 
|  | 3038 | STATS_INC_FREEMISS(cachep); | 
|  | 3039 | cache_flusharray(cachep, ac); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3040 | ac->entry[ac->avail++] = objp; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3041 | } | 
|  | 3042 | } | 
|  | 3043 |  | 
|  | 3044 | /** | 
|  | 3045 | * kmem_cache_alloc - Allocate an object | 
|  | 3046 | * @cachep: The cache to allocate from. | 
|  | 3047 | * @flags: See kmalloc(). | 
|  | 3048 | * | 
|  | 3049 | * Allocate an object from this cache.  The flags are only relevant | 
|  | 3050 | * if the cache has no available objects. | 
|  | 3051 | */ | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 3052 | void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3053 | { | 
| Pekka Enberg | 7fd6b14 | 2006-02-01 03:05:52 -0800 | [diff] [blame] | 3054 | return __cache_alloc(cachep, flags, __builtin_return_address(0)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3055 | } | 
|  | 3056 | EXPORT_SYMBOL(kmem_cache_alloc); | 
|  | 3057 |  | 
|  | 3058 | /** | 
|  | 3059 | * kmem_ptr_validate - check if an untrusted pointer might | 
|  | 3060 | *	be a slab entry. | 
|  | 3061 | * @cachep: the cache we're checking against | 
|  | 3062 | * @ptr: pointer to validate | 
|  | 3063 | * | 
|  | 3064 | * This verifies that the untrusted pointer looks sane: | 
|  | 3065 | * it is _not_ a guarantee that the pointer is actually | 
|  | 3066 | * part of the slab cache in question, but it at least | 
|  | 3067 | * validates that the pointer can be dereferenced and | 
|  | 3068 | * looks half-way sane. | 
|  | 3069 | * | 
|  | 3070 | * Currently only used for dentry validation. | 
|  | 3071 | */ | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 3072 | int fastcall kmem_ptr_validate(struct kmem_cache *cachep, void *ptr) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3073 | { | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3074 | unsigned long addr = (unsigned long)ptr; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3075 | unsigned long min_addr = PAGE_OFFSET; | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3076 | unsigned long align_mask = BYTES_PER_WORD - 1; | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 3077 | unsigned long size = cachep->buffer_size; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3078 | struct page *page; | 
|  | 3079 |  | 
|  | 3080 | if (unlikely(addr < min_addr)) | 
|  | 3081 | goto out; | 
|  | 3082 | if (unlikely(addr > (unsigned long)high_memory - size)) | 
|  | 3083 | goto out; | 
|  | 3084 | if (unlikely(addr & align_mask)) | 
|  | 3085 | goto out; | 
|  | 3086 | if (unlikely(!kern_addr_valid(addr))) | 
|  | 3087 | goto out; | 
|  | 3088 | if (unlikely(!kern_addr_valid(addr + size - 1))) | 
|  | 3089 | goto out; | 
|  | 3090 | page = virt_to_page(ptr); | 
|  | 3091 | if (unlikely(!PageSlab(page))) | 
|  | 3092 | goto out; | 
| Pekka Enberg | 065d41c | 2005-11-13 16:06:46 -0800 | [diff] [blame] | 3093 | if (unlikely(page_get_cache(page) != cachep)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3094 | goto out; | 
|  | 3095 | return 1; | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3096 | out: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3097 | return 0; | 
|  | 3098 | } | 
|  | 3099 |  | 
|  | 3100 | #ifdef CONFIG_NUMA | 
|  | 3101 | /** | 
|  | 3102 | * kmem_cache_alloc_node - Allocate an object on the specified node | 
|  | 3103 | * @cachep: The cache to allocate from. | 
|  | 3104 | * @flags: See kmalloc(). | 
|  | 3105 | * @nodeid: node number of the target node. | 
|  | 3106 | * | 
|  | 3107 | * Identical to kmem_cache_alloc, except that this function is slow | 
|  | 3108 | * and can sleep. And it will allocate memory on the given node, which | 
|  | 3109 | * can improve the performance for cpu bound structures. | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3110 | * New and improved: it will now make sure that the object gets | 
|  | 3111 | * put on the correct node list so that there is no false sharing. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3112 | */ | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 3113 | void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3114 | { | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3115 | unsigned long save_flags; | 
|  | 3116 | void *ptr; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3117 |  | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3118 | cache_alloc_debugcheck_before(cachep, flags); | 
|  | 3119 | local_irq_save(save_flags); | 
| Christoph Lameter | 18f820f | 2006-02-01 03:05:43 -0800 | [diff] [blame] | 3120 |  | 
|  | 3121 | if (nodeid == -1 || nodeid == numa_node_id() || | 
|  | 3122 | !cachep->nodelists[nodeid]) | 
| Alok N Kataria | 5c38230 | 2005-09-27 21:45:46 -0700 | [diff] [blame] | 3123 | ptr = ____cache_alloc(cachep, flags); | 
|  | 3124 | else | 
|  | 3125 | ptr = __cache_alloc_node(cachep, flags, nodeid); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3126 | local_irq_restore(save_flags); | 
| Christoph Lameter | 18f820f | 2006-02-01 03:05:43 -0800 | [diff] [blame] | 3127 |  | 
|  | 3128 | ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, | 
|  | 3129 | __builtin_return_address(0)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3130 |  | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3131 | return ptr; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3132 | } | 
|  | 3133 | EXPORT_SYMBOL(kmem_cache_alloc_node); | 
|  | 3134 |  | 
| Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 3135 | void *kmalloc_node(size_t size, gfp_t flags, int node) | 
| Manfred Spraul | 97e2bde | 2005-05-01 08:58:38 -0700 | [diff] [blame] | 3136 | { | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 3137 | struct kmem_cache *cachep; | 
| Manfred Spraul | 97e2bde | 2005-05-01 08:58:38 -0700 | [diff] [blame] | 3138 |  | 
|  | 3139 | cachep = kmem_find_general_cachep(size, flags); | 
|  | 3140 | if (unlikely(cachep == NULL)) | 
|  | 3141 | return NULL; | 
|  | 3142 | return kmem_cache_alloc_node(cachep, flags, node); | 
|  | 3143 | } | 
|  | 3144 | EXPORT_SYMBOL(kmalloc_node); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3145 | #endif | 
|  | 3146 |  | 
|  | 3147 | /** | 
|  | 3148 | * kmalloc - allocate memory | 
|  | 3149 | * @size: how many bytes of memory are required. | 
|  | 3150 | * @flags: the type of memory to allocate. | 
|  | 3151 | * | 
|  | 3152 | * kmalloc is the normal method of allocating memory | 
|  | 3153 | * in the kernel. | 
|  | 3154 | * | 
|  | 3155 | * The @flags argument may be one of: | 
|  | 3156 | * | 
|  | 3157 | * %GFP_USER - Allocate memory on behalf of user.  May sleep. | 
|  | 3158 | * | 
|  | 3159 | * %GFP_KERNEL - Allocate normal kernel ram.  May sleep. | 
|  | 3160 | * | 
|  | 3161 | * %GFP_ATOMIC - Allocation will not sleep.  Use inside interrupt handlers. | 
|  | 3162 | * | 
|  | 3163 | * Additionally, the %GFP_DMA flag may be set to indicate the memory | 
|  | 3164 | * must be suitable for DMA.  This can mean different things on different | 
|  | 3165 | * platforms.  For example, on i386, it means that the memory must come | 
|  | 3166 | * from the first 16MB. | 
|  | 3167 | */ | 
| Pekka Enberg | 7fd6b14 | 2006-02-01 03:05:52 -0800 | [diff] [blame] | 3168 | static __always_inline void *__do_kmalloc(size_t size, gfp_t flags, | 
|  | 3169 | void *caller) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3170 | { | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 3171 | struct kmem_cache *cachep; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3172 |  | 
| Manfred Spraul | 97e2bde | 2005-05-01 08:58:38 -0700 | [diff] [blame] | 3173 | /* If you want to save a few bytes .text space: replace | 
|  | 3174 | * __ with kmem_. | 
|  | 3175 | * Then kmalloc uses the uninlined functions instead of the inline | 
|  | 3176 | * functions. | 
|  | 3177 | */ | 
|  | 3178 | cachep = __find_general_cachep(size, flags); | 
| Andrew Morton | dbdb904 | 2005-09-23 13:24:10 -0700 | [diff] [blame] | 3179 | if (unlikely(cachep == NULL)) | 
|  | 3180 | return NULL; | 
| Pekka Enberg | 7fd6b14 | 2006-02-01 03:05:52 -0800 | [diff] [blame] | 3181 | return __cache_alloc(cachep, flags, caller); | 
|  | 3182 | } | 
|  | 3183 |  | 
|  | 3184 | #ifndef CONFIG_DEBUG_SLAB | 
|  | 3185 |  | 
|  | 3186 | void *__kmalloc(size_t size, gfp_t flags) | 
|  | 3187 | { | 
|  | 3188 | return __do_kmalloc(size, flags, NULL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3189 | } | 
|  | 3190 | EXPORT_SYMBOL(__kmalloc); | 
|  | 3191 |  | 
| Pekka Enberg | 7fd6b14 | 2006-02-01 03:05:52 -0800 | [diff] [blame] | 3192 | #else | 
|  | 3193 |  | 
|  | 3194 | void *__kmalloc_track_caller(size_t size, gfp_t flags, void *caller) | 
|  | 3195 | { | 
|  | 3196 | return __do_kmalloc(size, flags, caller); | 
|  | 3197 | } | 
|  | 3198 | EXPORT_SYMBOL(__kmalloc_track_caller); | 
|  | 3199 |  | 
|  | 3200 | #endif | 
|  | 3201 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3202 | #ifdef CONFIG_SMP | 
|  | 3203 | /** | 
|  | 3204 | * __alloc_percpu - allocate one copy of the object for every present | 
|  | 3205 | * cpu in the system, zeroing them. | 
|  | 3206 | * Objects should be dereferenced using the per_cpu_ptr macro only. | 
|  | 3207 | * | 
|  | 3208 | * @size: how many bytes of memory are required. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3209 | */ | 
| Pekka Enberg | f9f7500 | 2006-01-08 01:00:33 -0800 | [diff] [blame] | 3210 | void *__alloc_percpu(size_t size) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3211 | { | 
|  | 3212 | int i; | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3213 | struct percpu_data *pdata = kmalloc(sizeof(*pdata), GFP_KERNEL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3214 |  | 
|  | 3215 | if (!pdata) | 
|  | 3216 | return NULL; | 
|  | 3217 |  | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3218 | /* | 
|  | 3219 | * Cannot use for_each_online_cpu since a cpu may come online | 
|  | 3220 | * and we have no way of figuring out how to fix the array | 
|  | 3221 | * that we have allocated then.... | 
|  | 3222 | */ | 
|  | 3223 | for_each_cpu(i) { | 
|  | 3224 | int node = cpu_to_node(i); | 
|  | 3225 |  | 
|  | 3226 | if (node_online(node)) | 
|  | 3227 | pdata->ptrs[i] = kmalloc_node(size, GFP_KERNEL, node); | 
|  | 3228 | else | 
|  | 3229 | pdata->ptrs[i] = kmalloc(size, GFP_KERNEL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3230 |  | 
|  | 3231 | if (!pdata->ptrs[i]) | 
|  | 3232 | goto unwind_oom; | 
|  | 3233 | memset(pdata->ptrs[i], 0, size); | 
|  | 3234 | } | 
|  | 3235 |  | 
|  | 3236 | /* Catch derefs w/o wrappers */ | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3237 | return (void *)(~(unsigned long)pdata); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3238 |  | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3239 | unwind_oom: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3240 | while (--i >= 0) { | 
|  | 3241 | if (!cpu_possible(i)) | 
|  | 3242 | continue; | 
|  | 3243 | kfree(pdata->ptrs[i]); | 
|  | 3244 | } | 
|  | 3245 | kfree(pdata); | 
|  | 3246 | return NULL; | 
|  | 3247 | } | 
|  | 3248 | EXPORT_SYMBOL(__alloc_percpu); | 
|  | 3249 | #endif | 
|  | 3250 |  | 
|  | 3251 | /** | 
|  | 3252 | * kmem_cache_free - Deallocate an object | 
|  | 3253 | * @cachep: The cache the allocation was from. | 
|  | 3254 | * @objp: The previously allocated object. | 
|  | 3255 | * | 
|  | 3256 | * Free an object which was previously allocated from this | 
|  | 3257 | * cache. | 
|  | 3258 | */ | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 3259 | void kmem_cache_free(struct kmem_cache *cachep, void *objp) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3260 | { | 
|  | 3261 | unsigned long flags; | 
|  | 3262 |  | 
|  | 3263 | local_irq_save(flags); | 
|  | 3264 | __cache_free(cachep, objp); | 
|  | 3265 | local_irq_restore(flags); | 
|  | 3266 | } | 
|  | 3267 | EXPORT_SYMBOL(kmem_cache_free); | 
|  | 3268 |  | 
|  | 3269 | /** | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3270 | * kfree - free previously allocated memory | 
|  | 3271 | * @objp: pointer returned by kmalloc. | 
|  | 3272 | * | 
| Pekka Enberg | 80e93ef | 2005-09-09 13:10:16 -0700 | [diff] [blame] | 3273 | * If @objp is NULL, no operation is performed. | 
|  | 3274 | * | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3275 | * Don't free memory not originally allocated by kmalloc() | 
|  | 3276 | * or you will run into trouble. | 
|  | 3277 | */ | 
|  | 3278 | void kfree(const void *objp) | 
|  | 3279 | { | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 3280 | struct kmem_cache *c; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3281 | unsigned long flags; | 
|  | 3282 |  | 
|  | 3283 | if (unlikely(!objp)) | 
|  | 3284 | return; | 
|  | 3285 | local_irq_save(flags); | 
|  | 3286 | kfree_debugcheck(objp); | 
| Pekka Enberg | 6ed5eb221 | 2006-02-01 03:05:49 -0800 | [diff] [blame] | 3287 | c = virt_to_cache(objp); | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 3288 | mutex_debug_check_no_locks_freed(objp, obj_size(c)); | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3289 | __cache_free(c, (void *)objp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3290 | local_irq_restore(flags); | 
|  | 3291 | } | 
|  | 3292 | EXPORT_SYMBOL(kfree); | 
|  | 3293 |  | 
|  | 3294 | #ifdef CONFIG_SMP | 
|  | 3295 | /** | 
|  | 3296 | * free_percpu - free previously allocated percpu memory | 
|  | 3297 | * @objp: pointer returned by alloc_percpu. | 
|  | 3298 | * | 
|  | 3299 | * Don't free memory not originally allocated by alloc_percpu() | 
|  | 3300 | * The complemented objp is to check for that. | 
|  | 3301 | */ | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3302 | void free_percpu(const void *objp) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3303 | { | 
|  | 3304 | int i; | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3305 | struct percpu_data *p = (struct percpu_data *)(~(unsigned long)objp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3306 |  | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3307 | /* | 
|  | 3308 | * We allocate for all cpus so we cannot use for online cpu here. | 
|  | 3309 | */ | 
|  | 3310 | for_each_cpu(i) | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3311 | kfree(p->ptrs[i]); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3312 | kfree(p); | 
|  | 3313 | } | 
|  | 3314 | EXPORT_SYMBOL(free_percpu); | 
|  | 3315 | #endif | 
|  | 3316 |  | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 3317 | unsigned int kmem_cache_size(struct kmem_cache *cachep) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3318 | { | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 3319 | return obj_size(cachep); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3320 | } | 
|  | 3321 | EXPORT_SYMBOL(kmem_cache_size); | 
|  | 3322 |  | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 3323 | const char *kmem_cache_name(struct kmem_cache *cachep) | 
| Arnaldo Carvalho de Melo | 1944972 | 2005-06-18 22:46:19 -0700 | [diff] [blame] | 3324 | { | 
|  | 3325 | return cachep->name; | 
|  | 3326 | } | 
|  | 3327 | EXPORT_SYMBOL_GPL(kmem_cache_name); | 
|  | 3328 |  | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3329 | /* | 
|  | 3330 | * This initializes kmem_list3 for all nodes. | 
|  | 3331 | */ | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 3332 | static int alloc_kmemlist(struct kmem_cache *cachep) | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3333 | { | 
|  | 3334 | int node; | 
|  | 3335 | struct kmem_list3 *l3; | 
|  | 3336 | int err = 0; | 
|  | 3337 |  | 
|  | 3338 | for_each_online_node(node) { | 
|  | 3339 | struct array_cache *nc = NULL, *new; | 
|  | 3340 | struct array_cache **new_alien = NULL; | 
|  | 3341 | #ifdef CONFIG_NUMA | 
|  | 3342 | if (!(new_alien = alloc_alien_cache(node, cachep->limit))) | 
|  | 3343 | goto fail; | 
|  | 3344 | #endif | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3345 | if (!(new = alloc_arraycache(node, (cachep->shared * | 
|  | 3346 | cachep->batchcount), | 
|  | 3347 | 0xbaadf00d))) | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3348 | goto fail; | 
|  | 3349 | if ((l3 = cachep->nodelists[node])) { | 
|  | 3350 |  | 
|  | 3351 | spin_lock_irq(&l3->list_lock); | 
|  | 3352 |  | 
|  | 3353 | if ((nc = cachep->nodelists[node]->shared)) | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3354 | free_block(cachep, nc->entry, nc->avail, node); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3355 |  | 
|  | 3356 | l3->shared = new; | 
|  | 3357 | if (!cachep->nodelists[node]->alien) { | 
|  | 3358 | l3->alien = new_alien; | 
|  | 3359 | new_alien = NULL; | 
|  | 3360 | } | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3361 | l3->free_limit = (1 + nr_cpus_node(node)) * | 
|  | 3362 | cachep->batchcount + cachep->num; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3363 | spin_unlock_irq(&l3->list_lock); | 
|  | 3364 | kfree(nc); | 
|  | 3365 | free_alien_cache(new_alien); | 
|  | 3366 | continue; | 
|  | 3367 | } | 
|  | 3368 | if (!(l3 = kmalloc_node(sizeof(struct kmem_list3), | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3369 | GFP_KERNEL, node))) | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3370 | goto fail; | 
|  | 3371 |  | 
|  | 3372 | kmem_list3_init(l3); | 
|  | 3373 | l3->next_reap = jiffies + REAPTIMEOUT_LIST3 + | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3374 | ((unsigned long)cachep) % REAPTIMEOUT_LIST3; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3375 | l3->shared = new; | 
|  | 3376 | l3->alien = new_alien; | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3377 | l3->free_limit = (1 + nr_cpus_node(node)) * | 
|  | 3378 | cachep->batchcount + cachep->num; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3379 | cachep->nodelists[node] = l3; | 
|  | 3380 | } | 
|  | 3381 | return err; | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3382 | fail: | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3383 | err = -ENOMEM; | 
|  | 3384 | return err; | 
|  | 3385 | } | 
|  | 3386 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3387 | struct ccupdate_struct { | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 3388 | struct kmem_cache *cachep; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3389 | struct array_cache *new[NR_CPUS]; | 
|  | 3390 | }; | 
|  | 3391 |  | 
|  | 3392 | static void do_ccupdate_local(void *info) | 
|  | 3393 | { | 
|  | 3394 | struct ccupdate_struct *new = (struct ccupdate_struct *)info; | 
|  | 3395 | struct array_cache *old; | 
|  | 3396 |  | 
|  | 3397 | check_irq_off(); | 
| Pekka Enberg | 9a2dba4 | 2006-02-01 03:05:49 -0800 | [diff] [blame] | 3398 | old = cpu_cache_get(new->cachep); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3399 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3400 | new->cachep->array[smp_processor_id()] = new->new[smp_processor_id()]; | 
|  | 3401 | new->new[smp_processor_id()] = old; | 
|  | 3402 | } | 
|  | 3403 |  | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 3404 | static int do_tune_cpucache(struct kmem_cache *cachep, int limit, int batchcount, | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3405 | int shared) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3406 | { | 
|  | 3407 | struct ccupdate_struct new; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3408 | int i, err; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3409 |  | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3410 | memset(&new.new, 0, sizeof(new.new)); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3411 | for_each_online_cpu(i) { | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3412 | new.new[i] = | 
|  | 3413 | alloc_arraycache(cpu_to_node(i), limit, batchcount); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3414 | if (!new.new[i]) { | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3415 | for (i--; i >= 0; i--) | 
|  | 3416 | kfree(new.new[i]); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3417 | return -ENOMEM; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3418 | } | 
|  | 3419 | } | 
|  | 3420 | new.cachep = cachep; | 
|  | 3421 |  | 
|  | 3422 | smp_call_function_all_cpus(do_ccupdate_local, (void *)&new); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3423 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3424 | check_irq_on(); | 
| Ravikiran G Thirumalai | ca3b9b9 | 2006-02-04 23:27:58 -0800 | [diff] [blame] | 3425 | spin_lock(&cachep->spinlock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3426 | cachep->batchcount = batchcount; | 
|  | 3427 | cachep->limit = limit; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3428 | cachep->shared = shared; | 
| Ravikiran G Thirumalai | ca3b9b9 | 2006-02-04 23:27:58 -0800 | [diff] [blame] | 3429 | spin_unlock(&cachep->spinlock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3430 |  | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3431 | for_each_online_cpu(i) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3432 | struct array_cache *ccold = new.new[i]; | 
|  | 3433 | if (!ccold) | 
|  | 3434 | continue; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3435 | spin_lock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock); | 
| Christoph Lameter | ff69416 | 2005-09-22 21:44:02 -0700 | [diff] [blame] | 3436 | free_block(cachep, ccold->entry, ccold->avail, cpu_to_node(i)); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3437 | spin_unlock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3438 | kfree(ccold); | 
|  | 3439 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3440 |  | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3441 | err = alloc_kmemlist(cachep); | 
|  | 3442 | if (err) { | 
|  | 3443 | printk(KERN_ERR "alloc_kmemlist failed for %s, error %d.\n", | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3444 | cachep->name, -err); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3445 | BUG(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3446 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3447 | return 0; | 
|  | 3448 | } | 
|  | 3449 |  | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 3450 | static void enable_cpucache(struct kmem_cache *cachep) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3451 | { | 
|  | 3452 | int err; | 
|  | 3453 | int limit, shared; | 
|  | 3454 |  | 
|  | 3455 | /* The head array serves three purposes: | 
|  | 3456 | * - create a LIFO ordering, i.e. return objects that are cache-warm | 
|  | 3457 | * - reduce the number of spinlock operations. | 
|  | 3458 | * - reduce the number of linked list operations on the slab and | 
|  | 3459 | *   bufctl chains: array operations are cheaper. | 
|  | 3460 | * The numbers are guessed, we should auto-tune as described by | 
|  | 3461 | * Bonwick. | 
|  | 3462 | */ | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 3463 | if (cachep->buffer_size > 131072) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3464 | limit = 1; | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 3465 | else if (cachep->buffer_size > PAGE_SIZE) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3466 | limit = 8; | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 3467 | else if (cachep->buffer_size > 1024) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3468 | limit = 24; | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 3469 | else if (cachep->buffer_size > 256) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3470 | limit = 54; | 
|  | 3471 | else | 
|  | 3472 | limit = 120; | 
|  | 3473 |  | 
|  | 3474 | /* Cpu bound tasks (e.g. network routing) can exhibit cpu bound | 
|  | 3475 | * allocation behaviour: Most allocs on one cpu, most free operations | 
|  | 3476 | * on another cpu. For these cases, an efficient object passing between | 
|  | 3477 | * cpus is necessary. This is provided by a shared array. The array | 
|  | 3478 | * replaces Bonwick's magazine layer. | 
|  | 3479 | * On uniprocessor, it's functionally equivalent (but less efficient) | 
|  | 3480 | * to a larger limit. Thus disabled by default. | 
|  | 3481 | */ | 
|  | 3482 | shared = 0; | 
|  | 3483 | #ifdef CONFIG_SMP | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 3484 | if (cachep->buffer_size <= PAGE_SIZE) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3485 | shared = 8; | 
|  | 3486 | #endif | 
|  | 3487 |  | 
|  | 3488 | #if DEBUG | 
|  | 3489 | /* With debugging enabled, large batchcount lead to excessively | 
|  | 3490 | * long periods with disabled local interrupts. Limit the | 
|  | 3491 | * batchcount | 
|  | 3492 | */ | 
|  | 3493 | if (limit > 32) | 
|  | 3494 | limit = 32; | 
|  | 3495 | #endif | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3496 | err = do_tune_cpucache(cachep, limit, (limit + 1) / 2, shared); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3497 | if (err) | 
|  | 3498 | printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n", | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3499 | cachep->name, -err); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3500 | } | 
|  | 3501 |  | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 3502 | static void drain_array_locked(struct kmem_cache *cachep, struct array_cache *ac, | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3503 | int force, int node) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3504 | { | 
|  | 3505 | int tofree; | 
|  | 3506 |  | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3507 | check_spinlock_acquired_node(cachep, node); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3508 | if (ac->touched && !force) { | 
|  | 3509 | ac->touched = 0; | 
|  | 3510 | } else if (ac->avail) { | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3511 | tofree = force ? ac->avail : (ac->limit + 4) / 5; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3512 | if (tofree > ac->avail) { | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3513 | tofree = (ac->avail + 1) / 2; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3514 | } | 
| Christoph Lameter | ff69416 | 2005-09-22 21:44:02 -0700 | [diff] [blame] | 3515 | free_block(cachep, ac->entry, tofree, node); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3516 | ac->avail -= tofree; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3517 | memmove(ac->entry, &(ac->entry[tofree]), | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3518 | sizeof(void *) * ac->avail); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3519 | } | 
|  | 3520 | } | 
|  | 3521 |  | 
|  | 3522 | /** | 
|  | 3523 | * cache_reap - Reclaim memory from caches. | 
| Randy Dunlap | 1e5d533 | 2005-11-07 01:01:06 -0800 | [diff] [blame] | 3524 | * @unused: unused parameter | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3525 | * | 
|  | 3526 | * Called from workqueue/eventd every few seconds. | 
|  | 3527 | * Purpose: | 
|  | 3528 | * - clear the per-cpu caches for this CPU. | 
|  | 3529 | * - return freeable pages to the main free memory pool. | 
|  | 3530 | * | 
| Ingo Molnar | fc0abb1 | 2006-01-18 17:42:33 -0800 | [diff] [blame] | 3531 | * If we cannot acquire the cache chain mutex then just give up - we'll | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3532 | * try again on the next iteration. | 
|  | 3533 | */ | 
|  | 3534 | static void cache_reap(void *unused) | 
|  | 3535 | { | 
|  | 3536 | struct list_head *walk; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3537 | struct kmem_list3 *l3; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3538 |  | 
| Ingo Molnar | fc0abb1 | 2006-01-18 17:42:33 -0800 | [diff] [blame] | 3539 | if (!mutex_trylock(&cache_chain_mutex)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3540 | /* Give up. Setup the next iteration. */ | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3541 | schedule_delayed_work(&__get_cpu_var(reap_work), | 
|  | 3542 | REAPTIMEOUT_CPUC); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3543 | return; | 
|  | 3544 | } | 
|  | 3545 |  | 
|  | 3546 | list_for_each(walk, &cache_chain) { | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 3547 | struct kmem_cache *searchp; | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3548 | struct list_head *p; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3549 | int tofree; | 
|  | 3550 | struct slab *slabp; | 
|  | 3551 |  | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 3552 | searchp = list_entry(walk, struct kmem_cache, next); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3553 |  | 
|  | 3554 | if (searchp->flags & SLAB_NO_REAP) | 
|  | 3555 | goto next; | 
|  | 3556 |  | 
|  | 3557 | check_irq_on(); | 
|  | 3558 |  | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3559 | l3 = searchp->nodelists[numa_node_id()]; | 
| Christoph Lameter | 8fce4d8 | 2006-03-09 17:33:54 -0800 | [diff] [blame] | 3560 | reap_alien(searchp, l3); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3561 | spin_lock_irq(&l3->list_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3562 |  | 
| Pekka Enberg | 9a2dba4 | 2006-02-01 03:05:49 -0800 | [diff] [blame] | 3563 | drain_array_locked(searchp, cpu_cache_get(searchp), 0, | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3564 | numa_node_id()); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3565 |  | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3566 | if (time_after(l3->next_reap, jiffies)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3567 | goto next_unlock; | 
|  | 3568 |  | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3569 | l3->next_reap = jiffies + REAPTIMEOUT_LIST3; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3570 |  | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3571 | if (l3->shared) | 
|  | 3572 | drain_array_locked(searchp, l3->shared, 0, | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3573 | numa_node_id()); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3574 |  | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3575 | if (l3->free_touched) { | 
|  | 3576 | l3->free_touched = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3577 | goto next_unlock; | 
|  | 3578 | } | 
|  | 3579 |  | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3580 | tofree = | 
|  | 3581 | (l3->free_limit + 5 * searchp->num - | 
|  | 3582 | 1) / (5 * searchp->num); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3583 | do { | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3584 | p = l3->slabs_free.next; | 
|  | 3585 | if (p == &(l3->slabs_free)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3586 | break; | 
|  | 3587 |  | 
|  | 3588 | slabp = list_entry(p, struct slab, list); | 
|  | 3589 | BUG_ON(slabp->inuse); | 
|  | 3590 | list_del(&slabp->list); | 
|  | 3591 | STATS_INC_REAPED(searchp); | 
|  | 3592 |  | 
|  | 3593 | /* Safe to drop the lock. The slab is no longer | 
|  | 3594 | * linked to the cache. | 
|  | 3595 | * searchp cannot disappear, we hold | 
|  | 3596 | * cache_chain_lock | 
|  | 3597 | */ | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3598 | l3->free_objects -= searchp->num; | 
|  | 3599 | spin_unlock_irq(&l3->list_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3600 | slab_destroy(searchp, slabp); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3601 | spin_lock_irq(&l3->list_lock); | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3602 | } while (--tofree > 0); | 
|  | 3603 | next_unlock: | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3604 | spin_unlock_irq(&l3->list_lock); | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3605 | next: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3606 | cond_resched(); | 
|  | 3607 | } | 
|  | 3608 | check_irq_on(); | 
| Ingo Molnar | fc0abb1 | 2006-01-18 17:42:33 -0800 | [diff] [blame] | 3609 | mutex_unlock(&cache_chain_mutex); | 
| Christoph Lameter | 8fce4d8 | 2006-03-09 17:33:54 -0800 | [diff] [blame] | 3610 | next_reap_node(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3611 | /* Setup the next iteration */ | 
| Manfred Spraul | cd61ef6 | 2005-11-07 00:58:02 -0800 | [diff] [blame] | 3612 | schedule_delayed_work(&__get_cpu_var(reap_work), REAPTIMEOUT_CPUC); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3613 | } | 
|  | 3614 |  | 
|  | 3615 | #ifdef CONFIG_PROC_FS | 
|  | 3616 |  | 
| Pekka Enberg | 85289f9 | 2006-01-08 01:00:36 -0800 | [diff] [blame] | 3617 | static void print_slabinfo_header(struct seq_file *m) | 
|  | 3618 | { | 
|  | 3619 | /* | 
|  | 3620 | * Output format version, so at least we can change it | 
|  | 3621 | * without _too_ many complaints. | 
|  | 3622 | */ | 
|  | 3623 | #if STATS | 
|  | 3624 | seq_puts(m, "slabinfo - version: 2.1 (statistics)\n"); | 
|  | 3625 | #else | 
|  | 3626 | seq_puts(m, "slabinfo - version: 2.1\n"); | 
|  | 3627 | #endif | 
|  | 3628 | seq_puts(m, "# name            <active_objs> <num_objs> <objsize> " | 
|  | 3629 | "<objperslab> <pagesperslab>"); | 
|  | 3630 | seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>"); | 
|  | 3631 | seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>"); | 
|  | 3632 | #if STATS | 
|  | 3633 | seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> " | 
|  | 3634 | "<error> <maxfreeable> <nodeallocs> <remotefrees>"); | 
|  | 3635 | seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>"); | 
|  | 3636 | #endif | 
|  | 3637 | seq_putc(m, '\n'); | 
|  | 3638 | } | 
|  | 3639 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3640 | static void *s_start(struct seq_file *m, loff_t *pos) | 
|  | 3641 | { | 
|  | 3642 | loff_t n = *pos; | 
|  | 3643 | struct list_head *p; | 
|  | 3644 |  | 
| Ingo Molnar | fc0abb1 | 2006-01-18 17:42:33 -0800 | [diff] [blame] | 3645 | mutex_lock(&cache_chain_mutex); | 
| Pekka Enberg | 85289f9 | 2006-01-08 01:00:36 -0800 | [diff] [blame] | 3646 | if (!n) | 
|  | 3647 | print_slabinfo_header(m); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3648 | p = cache_chain.next; | 
|  | 3649 | while (n--) { | 
|  | 3650 | p = p->next; | 
|  | 3651 | if (p == &cache_chain) | 
|  | 3652 | return NULL; | 
|  | 3653 | } | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 3654 | return list_entry(p, struct kmem_cache, next); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3655 | } | 
|  | 3656 |  | 
|  | 3657 | static void *s_next(struct seq_file *m, void *p, loff_t *pos) | 
|  | 3658 | { | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 3659 | struct kmem_cache *cachep = p; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3660 | ++*pos; | 
|  | 3661 | return cachep->next.next == &cache_chain ? NULL | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 3662 | : list_entry(cachep->next.next, struct kmem_cache, next); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3663 | } | 
|  | 3664 |  | 
|  | 3665 | static void s_stop(struct seq_file *m, void *p) | 
|  | 3666 | { | 
| Ingo Molnar | fc0abb1 | 2006-01-18 17:42:33 -0800 | [diff] [blame] | 3667 | mutex_unlock(&cache_chain_mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3668 | } | 
|  | 3669 |  | 
|  | 3670 | static int s_show(struct seq_file *m, void *p) | 
|  | 3671 | { | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 3672 | struct kmem_cache *cachep = p; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3673 | struct list_head *q; | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3674 | struct slab *slabp; | 
|  | 3675 | unsigned long active_objs; | 
|  | 3676 | unsigned long num_objs; | 
|  | 3677 | unsigned long active_slabs = 0; | 
|  | 3678 | unsigned long num_slabs, free_objects = 0, shared_avail = 0; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3679 | const char *name; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3680 | char *error = NULL; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3681 | int node; | 
|  | 3682 | struct kmem_list3 *l3; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3683 |  | 
| Ravikiran G Thirumalai | ca3b9b9 | 2006-02-04 23:27:58 -0800 | [diff] [blame] | 3684 | spin_lock(&cachep->spinlock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3685 | active_objs = 0; | 
|  | 3686 | num_slabs = 0; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3687 | for_each_online_node(node) { | 
|  | 3688 | l3 = cachep->nodelists[node]; | 
|  | 3689 | if (!l3) | 
|  | 3690 | continue; | 
|  | 3691 |  | 
| Ravikiran G Thirumalai | ca3b9b9 | 2006-02-04 23:27:58 -0800 | [diff] [blame] | 3692 | check_irq_on(); | 
|  | 3693 | spin_lock_irq(&l3->list_lock); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3694 |  | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3695 | list_for_each(q, &l3->slabs_full) { | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3696 | slabp = list_entry(q, struct slab, list); | 
|  | 3697 | if (slabp->inuse != cachep->num && !error) | 
|  | 3698 | error = "slabs_full accounting error"; | 
|  | 3699 | active_objs += cachep->num; | 
|  | 3700 | active_slabs++; | 
|  | 3701 | } | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3702 | list_for_each(q, &l3->slabs_partial) { | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3703 | slabp = list_entry(q, struct slab, list); | 
|  | 3704 | if (slabp->inuse == cachep->num && !error) | 
|  | 3705 | error = "slabs_partial inuse accounting error"; | 
|  | 3706 | if (!slabp->inuse && !error) | 
|  | 3707 | error = "slabs_partial/inuse accounting error"; | 
|  | 3708 | active_objs += slabp->inuse; | 
|  | 3709 | active_slabs++; | 
|  | 3710 | } | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3711 | list_for_each(q, &l3->slabs_free) { | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3712 | slabp = list_entry(q, struct slab, list); | 
|  | 3713 | if (slabp->inuse && !error) | 
|  | 3714 | error = "slabs_free/inuse accounting error"; | 
|  | 3715 | num_slabs++; | 
|  | 3716 | } | 
|  | 3717 | free_objects += l3->free_objects; | 
| Ravikiran G Thirumalai | 4484ebf | 2006-02-04 23:27:59 -0800 | [diff] [blame] | 3718 | if (l3->shared) | 
|  | 3719 | shared_avail += l3->shared->avail; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3720 |  | 
| Ravikiran G Thirumalai | ca3b9b9 | 2006-02-04 23:27:58 -0800 | [diff] [blame] | 3721 | spin_unlock_irq(&l3->list_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3722 | } | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3723 | num_slabs += active_slabs; | 
|  | 3724 | num_objs = num_slabs * cachep->num; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3725 | if (num_objs - active_objs != free_objects && !error) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3726 | error = "free_objects accounting error"; | 
|  | 3727 |  | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3728 | name = cachep->name; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3729 | if (error) | 
|  | 3730 | printk(KERN_ERR "slab: cache %s error: %s\n", name, error); | 
|  | 3731 |  | 
|  | 3732 | seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", | 
| Manfred Spraul | 3dafccf | 2006-02-01 03:05:42 -0800 | [diff] [blame] | 3733 | name, active_objs, num_objs, cachep->buffer_size, | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3734 | cachep->num, (1 << cachep->gfporder)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3735 | seq_printf(m, " : tunables %4u %4u %4u", | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3736 | cachep->limit, cachep->batchcount, cachep->shared); | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3737 | seq_printf(m, " : slabdata %6lu %6lu %6lu", | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3738 | active_slabs, num_slabs, shared_avail); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3739 | #if STATS | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3740 | {			/* list3 stats */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3741 | unsigned long high = cachep->high_mark; | 
|  | 3742 | unsigned long allocs = cachep->num_allocations; | 
|  | 3743 | unsigned long grown = cachep->grown; | 
|  | 3744 | unsigned long reaped = cachep->reaped; | 
|  | 3745 | unsigned long errors = cachep->errors; | 
|  | 3746 | unsigned long max_freeable = cachep->max_freeable; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3747 | unsigned long node_allocs = cachep->node_allocs; | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3748 | unsigned long node_frees = cachep->node_frees; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3749 |  | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3750 | seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu \ | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3751 | %4lu %4lu %4lu %4lu", allocs, high, grown, reaped, errors, max_freeable, node_allocs, node_frees); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3752 | } | 
|  | 3753 | /* cpu stats */ | 
|  | 3754 | { | 
|  | 3755 | unsigned long allochit = atomic_read(&cachep->allochit); | 
|  | 3756 | unsigned long allocmiss = atomic_read(&cachep->allocmiss); | 
|  | 3757 | unsigned long freehit = atomic_read(&cachep->freehit); | 
|  | 3758 | unsigned long freemiss = atomic_read(&cachep->freemiss); | 
|  | 3759 |  | 
|  | 3760 | seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu", | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3761 | allochit, allocmiss, freehit, freemiss); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3762 | } | 
|  | 3763 | #endif | 
|  | 3764 | seq_putc(m, '\n'); | 
| Ravikiran G Thirumalai | ca3b9b9 | 2006-02-04 23:27:58 -0800 | [diff] [blame] | 3765 | spin_unlock(&cachep->spinlock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3766 | return 0; | 
|  | 3767 | } | 
|  | 3768 |  | 
|  | 3769 | /* | 
|  | 3770 | * slabinfo_op - iterator that generates /proc/slabinfo | 
|  | 3771 | * | 
|  | 3772 | * Output layout: | 
|  | 3773 | * cache-name | 
|  | 3774 | * num-active-objs | 
|  | 3775 | * total-objs | 
|  | 3776 | * object size | 
|  | 3777 | * num-active-slabs | 
|  | 3778 | * total-slabs | 
|  | 3779 | * num-pages-per-slab | 
|  | 3780 | * + further values on SMP and with statistics enabled | 
|  | 3781 | */ | 
|  | 3782 |  | 
|  | 3783 | struct seq_operations slabinfo_op = { | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3784 | .start = s_start, | 
|  | 3785 | .next = s_next, | 
|  | 3786 | .stop = s_stop, | 
|  | 3787 | .show = s_show, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3788 | }; | 
|  | 3789 |  | 
|  | 3790 | #define MAX_SLABINFO_WRITE 128 | 
|  | 3791 | /** | 
|  | 3792 | * slabinfo_write - Tuning for the slab allocator | 
|  | 3793 | * @file: unused | 
|  | 3794 | * @buffer: user buffer | 
|  | 3795 | * @count: data length | 
|  | 3796 | * @ppos: unused | 
|  | 3797 | */ | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3798 | ssize_t slabinfo_write(struct file *file, const char __user * buffer, | 
|  | 3799 | size_t count, loff_t *ppos) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3800 | { | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3801 | char kbuf[MAX_SLABINFO_WRITE + 1], *tmp; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3802 | int limit, batchcount, shared, res; | 
|  | 3803 | struct list_head *p; | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3804 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3805 | if (count > MAX_SLABINFO_WRITE) | 
|  | 3806 | return -EINVAL; | 
|  | 3807 | if (copy_from_user(&kbuf, buffer, count)) | 
|  | 3808 | return -EFAULT; | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3809 | kbuf[MAX_SLABINFO_WRITE] = '\0'; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3810 |  | 
|  | 3811 | tmp = strchr(kbuf, ' '); | 
|  | 3812 | if (!tmp) | 
|  | 3813 | return -EINVAL; | 
|  | 3814 | *tmp = '\0'; | 
|  | 3815 | tmp++; | 
|  | 3816 | if (sscanf(tmp, " %d %d %d", &limit, &batchcount, &shared) != 3) | 
|  | 3817 | return -EINVAL; | 
|  | 3818 |  | 
|  | 3819 | /* Find the cache in the chain of caches. */ | 
| Ingo Molnar | fc0abb1 | 2006-01-18 17:42:33 -0800 | [diff] [blame] | 3820 | mutex_lock(&cache_chain_mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3821 | res = -EINVAL; | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3822 | list_for_each(p, &cache_chain) { | 
| Pekka Enberg | 343e0d7 | 2006-02-01 03:05:50 -0800 | [diff] [blame] | 3823 | struct kmem_cache *cachep = list_entry(p, struct kmem_cache, | 
|  | 3824 | next); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3825 |  | 
|  | 3826 | if (!strcmp(cachep->name, kbuf)) { | 
|  | 3827 | if (limit < 1 || | 
|  | 3828 | batchcount < 1 || | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3829 | batchcount > limit || shared < 0) { | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3830 | res = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3831 | } else { | 
| Christoph Lameter | e498be7 | 2005-09-09 13:03:32 -0700 | [diff] [blame] | 3832 | res = do_tune_cpucache(cachep, limit, | 
| Pekka Enberg | b28a02d | 2006-01-08 01:00:37 -0800 | [diff] [blame] | 3833 | batchcount, shared); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3834 | } | 
|  | 3835 | break; | 
|  | 3836 | } | 
|  | 3837 | } | 
| Ingo Molnar | fc0abb1 | 2006-01-18 17:42:33 -0800 | [diff] [blame] | 3838 | mutex_unlock(&cache_chain_mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3839 | if (res >= 0) | 
|  | 3840 | res = count; | 
|  | 3841 | return res; | 
|  | 3842 | } | 
|  | 3843 | #endif | 
|  | 3844 |  | 
| Manfred Spraul | 00e145b | 2005-09-03 15:55:07 -0700 | [diff] [blame] | 3845 | /** | 
|  | 3846 | * ksize - get the actual amount of memory allocated for a given object | 
|  | 3847 | * @objp: Pointer to the object | 
|  | 3848 | * | 
|  | 3849 | * kmalloc may internally round up allocations and return more memory | 
|  | 3850 | * than requested. ksize() can be used to determine the actual amount of | 
|  | 3851 | * memory allocated. The caller may use this additional memory, even though | 
|  | 3852 | * a smaller amount of memory was initially specified with the kmalloc call. | 
|  | 3853 | * The caller must guarantee that objp points to a valid object previously | 
|  | 3854 | * allocated with either kmalloc() or kmem_cache_alloc(). The object | 
|  | 3855 | * must not be freed during the duration of the call. | 
|  | 3856 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3857 | unsigned int ksize(const void *objp) | 
|  | 3858 | { | 
| Manfred Spraul | 00e145b | 2005-09-03 15:55:07 -0700 | [diff] [blame] | 3859 | if (unlikely(objp == NULL)) | 
|  | 3860 | return 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3861 |  | 
| Pekka Enberg | 6ed5eb221 | 2006-02-01 03:05:49 -0800 | [diff] [blame] | 3862 | return obj_size(virt_to_cache(objp)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3863 | } |