| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
| Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 2 |  * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk). | 
 | 3 |  * | 
 | 4 |  * (C) SGI 2006, Christoph Lameter <clameter@sgi.com> | 
 | 5 |  * 	Cleaned up and restructured to ease the addition of alternative | 
 | 6 |  * 	implementations of SLAB allocators. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 |  */ | 
 | 8 |  | 
 | 9 | #ifndef _LINUX_SLAB_H | 
 | 10 | #define	_LINUX_SLAB_H | 
 | 11 |  | 
| Andrew Morton | 1b1cec4 | 2006-12-06 20:33:22 -0800 | [diff] [blame] | 12 | #ifdef __KERNEL__ | 
 | 13 |  | 
 | 14 | #include <linux/gfp.h> | 
| Andrew Morton | 1b1cec4 | 2006-12-06 20:33:22 -0800 | [diff] [blame] | 15 | #include <linux/types.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 |  | 
| Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 17 | /* | 
 | 18 |  * Flags to pass to kmem_cache_create(). | 
 | 19 |  * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 |  */ | 
| Christoph Lameter | 55935a3 | 2006-12-13 00:34:24 -0800 | [diff] [blame] | 21 | #define SLAB_DEBUG_FREE		0x00000100UL	/* DEBUG: Perform (expensive) checks on free */ | 
| Christoph Lameter | 55935a3 | 2006-12-13 00:34:24 -0800 | [diff] [blame] | 22 | #define SLAB_RED_ZONE		0x00000400UL	/* DEBUG: Red zone objs in a cache */ | 
 | 23 | #define SLAB_POISON		0x00000800UL	/* DEBUG: Poison objects */ | 
 | 24 | #define SLAB_HWCACHE_ALIGN	0x00002000UL	/* Align objs on cache lines */ | 
| Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 25 | #define SLAB_CACHE_DMA		0x00004000UL	/* Use GFP_DMA memory */ | 
| Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 26 | #define SLAB_STORE_USER		0x00010000UL	/* DEBUG: Store the last owner for bug hunting */ | 
| Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 27 | #define SLAB_PANIC		0x00040000UL	/* Panic if kmem_cache_create() fails */ | 
 | 28 | #define SLAB_DESTROY_BY_RCU	0x00080000UL	/* Defer freeing slabs to RCU */ | 
| Paul Jackson | 101a500 | 2006-03-24 03:16:07 -0800 | [diff] [blame] | 29 | #define SLAB_MEM_SPREAD		0x00100000UL	/* Spread some memory over cpuset */ | 
| Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 30 | #define SLAB_TRACE		0x00200000UL	/* Trace allocations and frees */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 |  | 
| Mel Gorman | e12ba74 | 2007-10-16 01:25:52 -0700 | [diff] [blame] | 32 | /* The following flags affect the page allocator grouping pages by mobility */ | 
 | 33 | #define SLAB_RECLAIM_ACCOUNT	0x00020000UL		/* Objects are reclaimable */ | 
 | 34 | #define SLAB_TEMPORARY		SLAB_RECLAIM_ACCOUNT	/* Objects are short-lived */ | 
| Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 35 | /* | 
| Christoph Lameter | 6cb8f91 | 2007-07-17 04:03:22 -0700 | [diff] [blame] | 36 |  * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests. | 
 | 37 |  * | 
 | 38 |  * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault. | 
 | 39 |  * | 
 | 40 |  * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can. | 
 | 41 |  * Both make kfree a no-op. | 
 | 42 |  */ | 
 | 43 | #define ZERO_SIZE_PTR ((void *)16) | 
 | 44 |  | 
| Roland Dreier | 1d4ec7b | 2007-07-20 12:13:20 -0700 | [diff] [blame] | 45 | #define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \ | 
| Christoph Lameter | 6cb8f91 | 2007-07-17 04:03:22 -0700 | [diff] [blame] | 46 | 				(unsigned long)ZERO_SIZE_PTR) | 
 | 47 |  | 
 | 48 | /* | 
| Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 49 |  * struct kmem_cache related prototypes | 
 | 50 |  */ | 
 | 51 | void __init kmem_cache_init(void); | 
| Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 52 | int slab_is_available(void); | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 53 |  | 
| Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 54 | struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, | 
| Christoph Lameter | ebe2973 | 2006-12-06 20:32:59 -0800 | [diff] [blame] | 55 | 			unsigned long, | 
| Christoph Lameter | 4ba9b9d | 2007-10-16 23:25:51 -0700 | [diff] [blame] | 56 | 			void (*)(struct kmem_cache *, void *)); | 
| Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 57 | void kmem_cache_destroy(struct kmem_cache *); | 
 | 58 | int kmem_cache_shrink(struct kmem_cache *); | 
| Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 59 | void kmem_cache_free(struct kmem_cache *, void *); | 
 | 60 | unsigned int kmem_cache_size(struct kmem_cache *); | 
 | 61 | const char *kmem_cache_name(struct kmem_cache *); | 
| Christoph Lameter | 55935a3 | 2006-12-13 00:34:24 -0800 | [diff] [blame] | 62 | int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 63 |  | 
| Christoph Lameter | 0a31bd5 | 2007-05-06 14:49:57 -0700 | [diff] [blame] | 64 | /* | 
 | 65 |  * Please use this macro to create slab caches. Simply specify the | 
 | 66 |  * name of the structure and maybe some flags that are listed above. | 
 | 67 |  * | 
 | 68 |  * The alignment of the struct determines object alignment. If you | 
 | 69 |  * f.e. add ____cacheline_aligned_in_smp to the struct declaration | 
 | 70 |  * then the objects will be properly aligned in SMP configurations. | 
 | 71 |  */ | 
 | 72 | #define KMEM_CACHE(__struct, __flags) kmem_cache_create(#__struct,\ | 
 | 73 | 		sizeof(struct __struct), __alignof__(struct __struct),\ | 
| Paul Mundt | 20c2df8 | 2007-07-20 10:11:58 +0900 | [diff] [blame] | 74 | 		(__flags), NULL) | 
| Christoph Lameter | 0a31bd5 | 2007-05-06 14:49:57 -0700 | [diff] [blame] | 75 |  | 
| Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 76 | /* | 
| Christoph Lameter | 0aa817f | 2007-05-16 22:11:01 -0700 | [diff] [blame] | 77 |  * The largest kmalloc size supported by the slab allocators is | 
 | 78 |  * 32 megabyte (2^25) or the maximum allocatable page order if that is | 
 | 79 |  * less than 32 MB. | 
 | 80 |  * | 
 | 81 |  * WARNING: Its not easy to increase this value since the allocators have | 
 | 82 |  * to do various tricks to work around compiler limitations in order to | 
 | 83 |  * ensure proper constant folding. | 
 | 84 |  */ | 
| Christoph Lameter | debee07 | 2007-06-23 17:16:43 -0700 | [diff] [blame] | 85 | #define KMALLOC_SHIFT_HIGH	((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \ | 
 | 86 | 				(MAX_ORDER + PAGE_SHIFT - 1) : 25) | 
| Christoph Lameter | 0aa817f | 2007-05-16 22:11:01 -0700 | [diff] [blame] | 87 |  | 
 | 88 | #define KMALLOC_MAX_SIZE	(1UL << KMALLOC_SHIFT_HIGH) | 
 | 89 | #define KMALLOC_MAX_ORDER	(KMALLOC_SHIFT_HIGH - PAGE_SHIFT) | 
 | 90 |  | 
 | 91 | /* | 
| Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 92 |  * Common kmalloc functions provided by all allocators | 
 | 93 |  */ | 
| Pekka Enberg | fd76bab | 2007-05-06 14:48:40 -0700 | [diff] [blame] | 94 | void * __must_check krealloc(const void *, size_t, gfp_t); | 
| Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 95 | void kfree(const void *); | 
| Pekka Enberg | fd76bab | 2007-05-06 14:48:40 -0700 | [diff] [blame] | 96 | size_t ksize(const void *); | 
| Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 97 |  | 
| Christoph Lameter | 81cda66 | 2007-07-17 04:03:29 -0700 | [diff] [blame] | 98 | /* | 
 | 99 |  * Allocator specific definitions. These are mainly used to establish optimized | 
 | 100 |  * ways to convert kmalloc() calls to kmem_cache_alloc() invocations by | 
 | 101 |  * selecting the appropriate general cache at compile time. | 
 | 102 |  * | 
 | 103 |  * Allocators must define at least: | 
 | 104 |  * | 
 | 105 |  *	kmem_cache_alloc() | 
 | 106 |  *	__kmalloc() | 
 | 107 |  *	kmalloc() | 
 | 108 |  * | 
 | 109 |  * Those wishing to support NUMA must also define: | 
 | 110 |  * | 
 | 111 |  *	kmem_cache_alloc_node() | 
 | 112 |  *	kmalloc_node() | 
 | 113 |  * | 
 | 114 |  * See each allocator definition file for additional comments and | 
 | 115 |  * implementation notes. | 
 | 116 |  */ | 
 | 117 | #ifdef CONFIG_SLUB | 
 | 118 | #include <linux/slub_def.h> | 
 | 119 | #elif defined(CONFIG_SLOB) | 
 | 120 | #include <linux/slob_def.h> | 
 | 121 | #else | 
 | 122 | #include <linux/slab_def.h> | 
 | 123 | #endif | 
 | 124 |  | 
| Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 125 | /** | 
 | 126 |  * kcalloc - allocate memory for an array. The memory is set to zero. | 
 | 127 |  * @n: number of elements. | 
 | 128 |  * @size: element size. | 
 | 129 |  * @flags: the type of memory to allocate. | 
| Paul Drynoff | 800590f | 2006-06-23 02:03:48 -0700 | [diff] [blame] | 130 |  * | 
 | 131 |  * The @flags argument may be one of: | 
 | 132 |  * | 
 | 133 |  * %GFP_USER - Allocate memory on behalf of user.  May sleep. | 
 | 134 |  * | 
 | 135 |  * %GFP_KERNEL - Allocate normal kernel ram.  May sleep. | 
 | 136 |  * | 
| Paul Mundt | 6193a2f | 2007-07-15 23:38:22 -0700 | [diff] [blame] | 137 |  * %GFP_ATOMIC - Allocation will not sleep.  May use emergency pools. | 
| Paul Drynoff | 800590f | 2006-06-23 02:03:48 -0700 | [diff] [blame] | 138 |  *   For example, use this inside interrupt handlers. | 
 | 139 |  * | 
 | 140 |  * %GFP_HIGHUSER - Allocate pages from high memory. | 
 | 141 |  * | 
 | 142 |  * %GFP_NOIO - Do not do any I/O at all while trying to get memory. | 
 | 143 |  * | 
 | 144 |  * %GFP_NOFS - Do not make any fs calls while trying to get memory. | 
 | 145 |  * | 
| Paul Mundt | 6193a2f | 2007-07-15 23:38:22 -0700 | [diff] [blame] | 146 |  * %GFP_NOWAIT - Allocation will not sleep. | 
 | 147 |  * | 
 | 148 |  * %GFP_THISNODE - Allocate node-local memory only. | 
 | 149 |  * | 
 | 150 |  * %GFP_DMA - Allocation suitable for DMA. | 
 | 151 |  *   Should only be used for kmalloc() caches. Otherwise, use a | 
 | 152 |  *   slab created with SLAB_DMA. | 
 | 153 |  * | 
| Paul Drynoff | 800590f | 2006-06-23 02:03:48 -0700 | [diff] [blame] | 154 |  * Also it is possible to set different flags by OR'ing | 
 | 155 |  * in one or more of the following additional @flags: | 
 | 156 |  * | 
 | 157 |  * %__GFP_COLD - Request cache-cold pages instead of | 
 | 158 |  *   trying to return cache-warm pages. | 
 | 159 |  * | 
| Paul Drynoff | 800590f | 2006-06-23 02:03:48 -0700 | [diff] [blame] | 160 |  * %__GFP_HIGH - This allocation has high priority and may use emergency pools. | 
 | 161 |  * | 
| Paul Drynoff | 800590f | 2006-06-23 02:03:48 -0700 | [diff] [blame] | 162 |  * %__GFP_NOFAIL - Indicate that this allocation is in no way allowed to fail | 
 | 163 |  *   (think twice before using). | 
 | 164 |  * | 
 | 165 |  * %__GFP_NORETRY - If memory is not immediately available, | 
 | 166 |  *   then give up at once. | 
 | 167 |  * | 
 | 168 |  * %__GFP_NOWARN - If allocation fails, don't issue any warnings. | 
 | 169 |  * | 
 | 170 |  * %__GFP_REPEAT - If allocation fails initially, try once more before failing. | 
| Paul Mundt | 6193a2f | 2007-07-15 23:38:22 -0700 | [diff] [blame] | 171 |  * | 
 | 172 |  * There are other flags available as well, but these are not intended | 
 | 173 |  * for general use, and so are not documented here. For a full list of | 
 | 174 |  * potential flags, always refer to linux/gfp.h. | 
| Paul Drynoff | 800590f | 2006-06-23 02:03:48 -0700 | [diff] [blame] | 175 |  */ | 
| Paul Mundt | 6193a2f | 2007-07-15 23:38:22 -0700 | [diff] [blame] | 176 | static inline void *kcalloc(size_t n, size_t size, gfp_t flags) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 177 | { | 
| Paul Mundt | 6193a2f | 2007-07-15 23:38:22 -0700 | [diff] [blame] | 178 | 	if (n != 0 && size > ULONG_MAX / n) | 
 | 179 | 		return NULL; | 
| Christoph Lameter | 81cda66 | 2007-07-17 04:03:29 -0700 | [diff] [blame] | 180 | 	return __kmalloc(n * size, flags | __GFP_ZERO); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 181 | } | 
 | 182 |  | 
| Paul Mundt | 6193a2f | 2007-07-15 23:38:22 -0700 | [diff] [blame] | 183 | #if !defined(CONFIG_NUMA) && !defined(CONFIG_SLOB) | 
 | 184 | /** | 
 | 185 |  * kmalloc_node - allocate memory from a specific node | 
 | 186 |  * @size: how many bytes of memory are required. | 
 | 187 |  * @flags: the type of memory to allocate (see kcalloc). | 
 | 188 |  * @node: node to allocate from. | 
 | 189 |  * | 
 | 190 |  * kmalloc() for non-local nodes, used to allocate from a specific node | 
 | 191 |  * if available. Equivalent to kmalloc() in the non-NUMA single-node | 
 | 192 |  * case. | 
 | 193 |  */ | 
| Christoph Lameter | 55935a3 | 2006-12-13 00:34:24 -0800 | [diff] [blame] | 194 | static inline void *kmalloc_node(size_t size, gfp_t flags, int node) | 
 | 195 | { | 
 | 196 | 	return kmalloc(size, flags); | 
 | 197 | } | 
 | 198 |  | 
 | 199 | static inline void *__kmalloc_node(size_t size, gfp_t flags, int node) | 
 | 200 | { | 
 | 201 | 	return __kmalloc(size, flags); | 
 | 202 | } | 
| Paul Mundt | 6193a2f | 2007-07-15 23:38:22 -0700 | [diff] [blame] | 203 |  | 
 | 204 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t); | 
 | 205 |  | 
 | 206 | static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep, | 
 | 207 | 					gfp_t flags, int node) | 
 | 208 | { | 
 | 209 | 	return kmem_cache_alloc(cachep, flags); | 
 | 210 | } | 
 | 211 | #endif /* !CONFIG_NUMA && !CONFIG_SLOB */ | 
| Christoph Lameter | 55935a3 | 2006-12-13 00:34:24 -0800 | [diff] [blame] | 212 |  | 
| Christoph Hellwig | 1d2c8ee | 2006-10-04 02:15:25 -0700 | [diff] [blame] | 213 | /* | 
 | 214 |  * kmalloc_track_caller is a special version of kmalloc that records the | 
 | 215 |  * calling function of the routine calling it for slab leak tracking instead | 
 | 216 |  * of just the calling function (confusing, eh?). | 
 | 217 |  * It's useful when the call to kmalloc comes from a widely-used standard | 
 | 218 |  * allocator where we care about the real place the memory allocation | 
 | 219 |  * request comes from. | 
 | 220 |  */ | 
| Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 221 | #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) | 
| Christoph Hellwig | 1d2c8ee | 2006-10-04 02:15:25 -0700 | [diff] [blame] | 222 | extern void *__kmalloc_track_caller(size_t, gfp_t, void*); | 
 | 223 | #define kmalloc_track_caller(size, flags) \ | 
 | 224 | 	__kmalloc_track_caller(size, flags, __builtin_return_address(0)) | 
| Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 225 | #else | 
 | 226 | #define kmalloc_track_caller(size, flags) \ | 
 | 227 | 	__kmalloc(size, flags) | 
 | 228 | #endif /* DEBUG_SLAB */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 229 |  | 
| Manfred Spraul | 97e2bde | 2005-05-01 08:58:38 -0700 | [diff] [blame] | 230 | #ifdef CONFIG_NUMA | 
| Christoph Hellwig | 8b98c16 | 2006-12-06 20:32:30 -0800 | [diff] [blame] | 231 | /* | 
 | 232 |  * kmalloc_node_track_caller is a special version of kmalloc_node that | 
 | 233 |  * records the calling function of the routine calling it for slab leak | 
 | 234 |  * tracking instead of just the calling function (confusing, eh?). | 
 | 235 |  * It's useful when the call to kmalloc_node comes from a widely-used | 
 | 236 |  * standard allocator where we care about the real place the memory | 
 | 237 |  * allocation request comes from. | 
 | 238 |  */ | 
| Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 239 | #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) | 
| Christoph Hellwig | 8b98c16 | 2006-12-06 20:32:30 -0800 | [diff] [blame] | 240 | extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, void *); | 
 | 241 | #define kmalloc_node_track_caller(size, flags, node) \ | 
 | 242 | 	__kmalloc_node_track_caller(size, flags, node, \ | 
 | 243 | 			__builtin_return_address(0)) | 
| Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 244 | #else | 
 | 245 | #define kmalloc_node_track_caller(size, flags, node) \ | 
 | 246 | 	__kmalloc_node(size, flags, node) | 
| Christoph Hellwig | 8b98c16 | 2006-12-06 20:32:30 -0800 | [diff] [blame] | 247 | #endif | 
| Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 248 |  | 
| Christoph Hellwig | 8b98c16 | 2006-12-06 20:32:30 -0800 | [diff] [blame] | 249 | #else /* CONFIG_NUMA */ | 
| Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 250 |  | 
 | 251 | #define kmalloc_node_track_caller(size, flags, node) \ | 
 | 252 | 	kmalloc_track_caller(size, flags) | 
 | 253 |  | 
| Christoph Lameter | 55935a3 | 2006-12-13 00:34:24 -0800 | [diff] [blame] | 254 | #endif /* DEBUG_SLAB */ | 
| Christoph Hellwig | 8b98c16 | 2006-12-06 20:32:30 -0800 | [diff] [blame] | 255 |  | 
| Christoph Lameter | 81cda66 | 2007-07-17 04:03:29 -0700 | [diff] [blame] | 256 | /* | 
 | 257 |  * Shortcuts | 
 | 258 |  */ | 
 | 259 | static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags) | 
 | 260 | { | 
 | 261 | 	return kmem_cache_alloc(k, flags | __GFP_ZERO); | 
 | 262 | } | 
 | 263 |  | 
 | 264 | /** | 
 | 265 |  * kzalloc - allocate memory. The memory is set to zero. | 
 | 266 |  * @size: how many bytes of memory are required. | 
 | 267 |  * @flags: the type of memory to allocate (see kmalloc). | 
 | 268 |  */ | 
 | 269 | static inline void *kzalloc(size_t size, gfp_t flags) | 
 | 270 | { | 
 | 271 | 	return kmalloc(size, flags | __GFP_ZERO); | 
 | 272 | } | 
 | 273 |  | 
| Linus Torvalds | 158a962 | 2008-01-02 13:04:48 -0800 | [diff] [blame] | 274 | #ifdef CONFIG_SLABINFO | 
 | 275 | extern const struct seq_operations slabinfo_op; | 
 | 276 | ssize_t slabinfo_write(struct file *, const char __user *, size_t, loff_t *); | 
 | 277 | #endif | 
 | 278 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 279 | #endif	/* __KERNEL__ */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 280 | #endif	/* _LINUX_SLAB_H */ |