| Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 1 | #ifndef _LINUX_SLUB_DEF_H | 
|  | 2 | #define _LINUX_SLUB_DEF_H | 
|  | 3 |  | 
|  | 4 | /* | 
|  | 5 | * SLUB : A Slab allocator without object queues. | 
|  | 6 | * | 
| Christoph Lameter | cde5353 | 2008-07-04 09:59:22 -0700 | [diff] [blame] | 7 | * (C) 2007 SGI, Christoph Lameter | 
| Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 8 | */ | 
|  | 9 | #include <linux/types.h> | 
|  | 10 | #include <linux/gfp.h> | 
|  | 11 | #include <linux/workqueue.h> | 
|  | 12 | #include <linux/kobject.h> | 
| Zhaolei | 02af61b | 2009-04-10 14:26:18 +0800 | [diff] [blame] | 13 | #include <linux/kmemtrace.h> | 
| Catalin Marinas | e4f7c0b4 | 2009-07-07 10:32:59 +0100 | [diff] [blame] | 14 | #include <linux/kmemleak.h> | 
| Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 15 |  | 
| Christoph Lameter | 8ff12cf | 2008-02-07 17:47:41 -0800 | [diff] [blame] | 16 | enum stat_item { | 
|  | 17 | ALLOC_FASTPATH,		/* Allocation from cpu slab */ | 
|  | 18 | ALLOC_SLOWPATH,		/* Allocation by getting a new cpu slab */ | 
|  | 19 | FREE_FASTPATH,		/* Free to cpu slub */ | 
|  | 20 | FREE_SLOWPATH,		/* Freeing not to cpu slab */ | 
|  | 21 | FREE_FROZEN,		/* Freeing to frozen slab */ | 
|  | 22 | FREE_ADD_PARTIAL,	/* Freeing moves slab to partial list */ | 
|  | 23 | FREE_REMOVE_PARTIAL,	/* Freeing removes last object */ | 
|  | 24 | ALLOC_FROM_PARTIAL,	/* Cpu slab acquired from partial list */ | 
|  | 25 | ALLOC_SLAB,		/* Cpu slab acquired from page allocator */ | 
|  | 26 | ALLOC_REFILL,		/* Refill cpu slab from slab freelist */ | 
|  | 27 | FREE_SLAB,		/* Slab freed to the page allocator */ | 
|  | 28 | CPUSLAB_FLUSH,		/* Abandoning of the cpu slab */ | 
|  | 29 | DEACTIVATE_FULL,	/* Cpu slab was full when deactivated */ | 
|  | 30 | DEACTIVATE_EMPTY,	/* Cpu slab was empty when deactivated */ | 
|  | 31 | DEACTIVATE_TO_HEAD,	/* Cpu slab was moved to the head of partials */ | 
|  | 32 | DEACTIVATE_TO_TAIL,	/* Cpu slab was moved to the tail of partials */ | 
|  | 33 | DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */ | 
| Christoph Lameter | 65c3376 | 2008-04-14 19:11:40 +0300 | [diff] [blame] | 34 | ORDER_FALLBACK,		/* Number of times fallback was necessary */ | 
| Christoph Lameter | 8ff12cf | 2008-02-07 17:47:41 -0800 | [diff] [blame] | 35 | NR_SLUB_STAT_ITEMS }; | 
|  | 36 |  | 
| Christoph Lameter | dfb4f09 | 2007-10-16 01:26:05 -0700 | [diff] [blame] | 37 | struct kmem_cache_cpu { | 
| Christoph Lameter | da89b79 | 2008-01-07 23:20:31 -0800 | [diff] [blame] | 38 | void **freelist;	/* Pointer to first free per cpu object */ | 
|  | 39 | struct page *page;	/* The slab from which we are allocating */ | 
|  | 40 | int node;		/* The node of the page (or -1 for debug) */ | 
| Christoph Lameter | 8ff12cf | 2008-02-07 17:47:41 -0800 | [diff] [blame] | 41 | #ifdef CONFIG_SLUB_STATS | 
|  | 42 | unsigned stat[NR_SLUB_STAT_ITEMS]; | 
|  | 43 | #endif | 
| Christoph Lameter | 4c93c355 | 2007-10-16 01:26:08 -0700 | [diff] [blame] | 44 | }; | 
| Christoph Lameter | dfb4f09 | 2007-10-16 01:26:05 -0700 | [diff] [blame] | 45 |  | 
| Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 46 | struct kmem_cache_node { | 
|  | 47 | spinlock_t list_lock;	/* Protect partial list and nr_partial */ | 
|  | 48 | unsigned long nr_partial; | 
| Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 49 | struct list_head partial; | 
| Christoph Lameter | 0c71001 | 2007-07-17 04:03:24 -0700 | [diff] [blame] | 50 | #ifdef CONFIG_SLUB_DEBUG | 
| Christoph Lameter | 0f389ec | 2008-04-14 18:53:02 +0300 | [diff] [blame] | 51 | atomic_long_t nr_slabs; | 
| Christoph Lameter | 205ab99 | 2008-04-14 19:11:40 +0300 | [diff] [blame] | 52 | atomic_long_t total_objects; | 
| Christoph Lameter | 643b113 | 2007-05-06 14:49:42 -0700 | [diff] [blame] | 53 | struct list_head full; | 
| Christoph Lameter | 0c71001 | 2007-07-17 04:03:24 -0700 | [diff] [blame] | 54 | #endif | 
| Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 55 | }; | 
|  | 56 |  | 
|  | 57 | /* | 
| Christoph Lameter | 834f3d1 | 2008-04-14 19:11:31 +0300 | [diff] [blame] | 58 | * Word size structure that can be atomically updated or read and that | 
|  | 59 | * contains both the order and the number of objects that a slab of the | 
|  | 60 | * given order would contain. | 
|  | 61 | */ | 
|  | 62 | struct kmem_cache_order_objects { | 
|  | 63 | unsigned long x; | 
|  | 64 | }; | 
|  | 65 |  | 
|  | 66 | /* | 
| Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 67 | * Slab cache management. | 
|  | 68 | */ | 
|  | 69 | struct kmem_cache { | 
| Christoph Lameter | 9dfc6e6 | 2009-12-18 16:26:20 -0600 | [diff] [blame] | 70 | struct kmem_cache_cpu *cpu_slab; | 
| Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 71 | /* Used for retriving partial slabs etc */ | 
|  | 72 | unsigned long flags; | 
|  | 73 | int size;		/* The size of an object including meta data */ | 
|  | 74 | int objsize;		/* The size of an object without meta data */ | 
|  | 75 | int offset;		/* Free pointer offset. */ | 
| Christoph Lameter | 834f3d1 | 2008-04-14 19:11:31 +0300 | [diff] [blame] | 76 | struct kmem_cache_order_objects oo; | 
| Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 77 |  | 
|  | 78 | /* | 
|  | 79 | * Avoid an extra cache line for UP, SMP and for the node local to | 
|  | 80 | * struct kmem_cache. | 
|  | 81 | */ | 
|  | 82 | struct kmem_cache_node local_node; | 
|  | 83 |  | 
|  | 84 | /* Allocation and freeing of slabs */ | 
| Christoph Lameter | 205ab99 | 2008-04-14 19:11:40 +0300 | [diff] [blame] | 85 | struct kmem_cache_order_objects max; | 
| Christoph Lameter | 65c3376 | 2008-04-14 19:11:40 +0300 | [diff] [blame] | 86 | struct kmem_cache_order_objects min; | 
| Christoph Lameter | b7a49f0 | 2008-02-14 14:21:32 -0800 | [diff] [blame] | 87 | gfp_t allocflags;	/* gfp flags to use on each alloc */ | 
| Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 88 | int refcount;		/* Refcount for slab cache destroy */ | 
| Alexey Dobriyan | 51cc506 | 2008-07-25 19:45:34 -0700 | [diff] [blame] | 89 | void (*ctor)(void *); | 
| Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 90 | int inuse;		/* Offset to metadata */ | 
|  | 91 | int align;		/* Alignment */ | 
| David Rientjes | 3b89d7d | 2009-02-22 17:40:07 -0800 | [diff] [blame] | 92 | unsigned long min_partial; | 
| Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 93 | const char *name;	/* Name (only for display!) */ | 
|  | 94 | struct list_head list;	/* List of slab caches */ | 
| Christoph Lameter | 0c71001 | 2007-07-17 04:03:24 -0700 | [diff] [blame] | 95 | #ifdef CONFIG_SLUB_DEBUG | 
| Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 96 | struct kobject kobj;	/* For sysfs */ | 
| Christoph Lameter | 0c71001 | 2007-07-17 04:03:24 -0700 | [diff] [blame] | 97 | #endif | 
| Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 98 |  | 
|  | 99 | #ifdef CONFIG_NUMA | 
| Christoph Lameter | 9824601 | 2008-01-07 23:20:26 -0800 | [diff] [blame] | 100 | /* | 
|  | 101 | * Defragmentation by allocating from a remote node. | 
|  | 102 | */ | 
|  | 103 | int remote_node_defrag_ratio; | 
| Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 104 | struct kmem_cache_node *node[MAX_NUMNODES]; | 
|  | 105 | #endif | 
| Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 106 | }; | 
|  | 107 |  | 
|  | 108 | /* | 
|  | 109 | * Kmalloc subsystem. | 
|  | 110 | */ | 
| Christoph Lameter | 4b356be | 2007-06-16 10:16:13 -0700 | [diff] [blame] | 111 | #if defined(ARCH_KMALLOC_MINALIGN) && ARCH_KMALLOC_MINALIGN > 8 | 
|  | 112 | #define KMALLOC_MIN_SIZE ARCH_KMALLOC_MINALIGN | 
|  | 113 | #else | 
|  | 114 | #define KMALLOC_MIN_SIZE 8 | 
|  | 115 | #endif | 
|  | 116 |  | 
|  | 117 | #define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE) | 
| Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 118 |  | 
| Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 119 | /* | 
| Christoph Lameter | ffadd4d | 2009-02-17 12:05:07 -0500 | [diff] [blame] | 120 | * Maximum kmalloc object size handled by SLUB. Larger object allocations | 
|  | 121 | * are passed through to the page allocator. The page allocator "fastpath" | 
|  | 122 | * is relatively slow so we need this value sufficiently high so that | 
|  | 123 | * performance critical objects are allocated through the SLUB fastpath. | 
|  | 124 | * | 
|  | 125 | * This should be dropped to PAGE_SIZE / 2 once the page allocator | 
|  | 126 | * "fastpath" becomes competitive with the slab allocator fastpaths. | 
|  | 127 | */ | 
| Pekka Enberg | 51735a7 | 2009-02-20 12:21:33 +0200 | [diff] [blame] | 128 | #define SLUB_MAX_SIZE (2 * PAGE_SIZE) | 
| Christoph Lameter | ffadd4d | 2009-02-17 12:05:07 -0500 | [diff] [blame] | 129 |  | 
| Pekka Enberg | 51735a7 | 2009-02-20 12:21:33 +0200 | [diff] [blame] | 130 | #define SLUB_PAGE_SHIFT (PAGE_SHIFT + 2) | 
| Christoph Lameter | ffadd4d | 2009-02-17 12:05:07 -0500 | [diff] [blame] | 131 |  | 
| Christoph Lameter | 756dee7 | 2009-12-18 16:26:21 -0600 | [diff] [blame] | 132 | #ifdef CONFIG_ZONE_DMA | 
|  | 133 | #define SLUB_DMA __GFP_DMA | 
|  | 134 | /* Reserve extra caches for potential DMA use */ | 
|  | 135 | #define KMALLOC_CACHES (2 * SLUB_PAGE_SHIFT - 6) | 
|  | 136 | #else | 
|  | 137 | /* Disable DMA functionality */ | 
|  | 138 | #define SLUB_DMA (__force gfp_t)0 | 
|  | 139 | #define KMALLOC_CACHES SLUB_PAGE_SHIFT | 
|  | 140 | #endif | 
|  | 141 |  | 
| Christoph Lameter | ffadd4d | 2009-02-17 12:05:07 -0500 | [diff] [blame] | 142 | /* | 
| Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 143 | * We keep the general caches in an array of slab caches that are used for | 
|  | 144 | * 2^x bytes of allocations. | 
|  | 145 | */ | 
| Christoph Lameter | 756dee7 | 2009-12-18 16:26:21 -0600 | [diff] [blame] | 146 | extern struct kmem_cache kmalloc_caches[KMALLOC_CACHES]; | 
| Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 147 |  | 
|  | 148 | /* | 
|  | 149 | * Sorry that the following has to be that ugly but some versions of GCC | 
|  | 150 | * have trouble with constant propagation and loops. | 
|  | 151 | */ | 
| Christoph Lameter | aa137f9 | 2007-08-31 00:48:45 -0700 | [diff] [blame] | 152 | static __always_inline int kmalloc_index(size_t size) | 
| Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 153 | { | 
| Christoph Lameter | 272c1d2 | 2007-06-08 13:46:49 -0700 | [diff] [blame] | 154 | if (!size) | 
|  | 155 | return 0; | 
| Christoph Lameter | 614410d | 2007-05-06 14:49:38 -0700 | [diff] [blame] | 156 |  | 
| Christoph Lameter | 4b356be | 2007-06-16 10:16:13 -0700 | [diff] [blame] | 157 | if (size <= KMALLOC_MIN_SIZE) | 
|  | 158 | return KMALLOC_SHIFT_LOW; | 
|  | 159 |  | 
| Aaro Koskinen | acdfcd0 | 2009-08-28 14:28:54 +0300 | [diff] [blame] | 160 | if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96) | 
| Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 161 | return 1; | 
| Aaro Koskinen | acdfcd0 | 2009-08-28 14:28:54 +0300 | [diff] [blame] | 162 | if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192) | 
| Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 163 | return 2; | 
|  | 164 | if (size <=          8) return 3; | 
|  | 165 | if (size <=         16) return 4; | 
|  | 166 | if (size <=         32) return 5; | 
|  | 167 | if (size <=         64) return 6; | 
|  | 168 | if (size <=        128) return 7; | 
|  | 169 | if (size <=        256) return 8; | 
|  | 170 | if (size <=        512) return 9; | 
|  | 171 | if (size <=       1024) return 10; | 
|  | 172 | if (size <=   2 * 1024) return 11; | 
| Christoph Lameter | 6446faa | 2008-02-15 23:45:26 -0800 | [diff] [blame] | 173 | if (size <=   4 * 1024) return 12; | 
| Christoph Lameter | aadb4bc | 2007-10-16 01:24:38 -0700 | [diff] [blame] | 174 | /* | 
|  | 175 | * The following is only needed to support architectures with a larger page | 
|  | 176 | * size than 4k. | 
|  | 177 | */ | 
| Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 178 | if (size <=   8 * 1024) return 13; | 
|  | 179 | if (size <=  16 * 1024) return 14; | 
|  | 180 | if (size <=  32 * 1024) return 15; | 
|  | 181 | if (size <=  64 * 1024) return 16; | 
|  | 182 | if (size <= 128 * 1024) return 17; | 
|  | 183 | if (size <= 256 * 1024) return 18; | 
| Christoph Lameter | aadb4bc | 2007-10-16 01:24:38 -0700 | [diff] [blame] | 184 | if (size <= 512 * 1024) return 19; | 
| Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 185 | if (size <= 1024 * 1024) return 20; | 
| Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 186 | if (size <=  2 * 1024 * 1024) return 21; | 
| Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 187 | return -1; | 
|  | 188 |  | 
|  | 189 | /* | 
|  | 190 | * What we really wanted to do and cannot do because of compiler issues is: | 
|  | 191 | *	int i; | 
|  | 192 | *	for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) | 
|  | 193 | *		if (size <= (1 << i)) | 
|  | 194 | *			return i; | 
|  | 195 | */ | 
|  | 196 | } | 
|  | 197 |  | 
|  | 198 | /* | 
|  | 199 | * Find the slab cache for a given combination of allocation flags and size. | 
|  | 200 | * | 
|  | 201 | * This ought to end up with a global pointer to the right cache | 
|  | 202 | * in kmalloc_caches. | 
|  | 203 | */ | 
| Christoph Lameter | aa137f9 | 2007-08-31 00:48:45 -0700 | [diff] [blame] | 204 | static __always_inline struct kmem_cache *kmalloc_slab(size_t size) | 
| Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 205 | { | 
|  | 206 | int index = kmalloc_index(size); | 
|  | 207 |  | 
|  | 208 | if (index == 0) | 
|  | 209 | return NULL; | 
|  | 210 |  | 
| Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 211 | return &kmalloc_caches[index]; | 
|  | 212 | } | 
|  | 213 |  | 
| Paul Mundt | 6193a2f | 2007-07-15 23:38:22 -0700 | [diff] [blame] | 214 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t); | 
|  | 215 | void *__kmalloc(size_t size, gfp_t flags); | 
|  | 216 |  | 
| Li Zefan | 0f24f12 | 2009-12-11 15:45:30 +0800 | [diff] [blame] | 217 | #ifdef CONFIG_TRACING | 
| Eduard - Gabriel Munteanu | 5b882be | 2008-08-19 20:43:26 +0300 | [diff] [blame] | 218 | extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags); | 
|  | 219 | #else | 
|  | 220 | static __always_inline void * | 
|  | 221 | kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags) | 
|  | 222 | { | 
|  | 223 | return kmem_cache_alloc(s, gfpflags); | 
|  | 224 | } | 
|  | 225 | #endif | 
|  | 226 |  | 
| Pekka Enberg | eada35e | 2008-02-11 22:47:46 +0200 | [diff] [blame] | 227 | static __always_inline void *kmalloc_large(size_t size, gfp_t flags) | 
|  | 228 | { | 
| Eduard - Gabriel Munteanu | 5b882be | 2008-08-19 20:43:26 +0300 | [diff] [blame] | 229 | unsigned int order = get_order(size); | 
|  | 230 | void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order); | 
|  | 231 |  | 
| Catalin Marinas | e4f7c0b4 | 2009-07-07 10:32:59 +0100 | [diff] [blame] | 232 | kmemleak_alloc(ret, size, 1, flags); | 
| Eduard - Gabriel Munteanu | ca2b84c | 2009-03-23 15:12:24 +0200 | [diff] [blame] | 233 | trace_kmalloc(_THIS_IP_, ret, size, PAGE_SIZE << order, flags); | 
| Eduard - Gabriel Munteanu | 5b882be | 2008-08-19 20:43:26 +0300 | [diff] [blame] | 234 |  | 
|  | 235 | return ret; | 
| Pekka Enberg | eada35e | 2008-02-11 22:47:46 +0200 | [diff] [blame] | 236 | } | 
|  | 237 |  | 
| Christoph Lameter | aa137f9 | 2007-08-31 00:48:45 -0700 | [diff] [blame] | 238 | static __always_inline void *kmalloc(size_t size, gfp_t flags) | 
| Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 239 | { | 
| Eduard - Gabriel Munteanu | 5b882be | 2008-08-19 20:43:26 +0300 | [diff] [blame] | 240 | void *ret; | 
|  | 241 |  | 
| Christoph Lameter | aadb4bc | 2007-10-16 01:24:38 -0700 | [diff] [blame] | 242 | if (__builtin_constant_p(size)) { | 
| Christoph Lameter | ffadd4d | 2009-02-17 12:05:07 -0500 | [diff] [blame] | 243 | if (size > SLUB_MAX_SIZE) | 
| Pekka Enberg | eada35e | 2008-02-11 22:47:46 +0200 | [diff] [blame] | 244 | return kmalloc_large(size, flags); | 
| Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 245 |  | 
| Christoph Lameter | aadb4bc | 2007-10-16 01:24:38 -0700 | [diff] [blame] | 246 | if (!(flags & SLUB_DMA)) { | 
|  | 247 | struct kmem_cache *s = kmalloc_slab(size); | 
| Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 248 |  | 
| Christoph Lameter | aadb4bc | 2007-10-16 01:24:38 -0700 | [diff] [blame] | 249 | if (!s) | 
|  | 250 | return ZERO_SIZE_PTR; | 
|  | 251 |  | 
| Eduard - Gabriel Munteanu | 5b882be | 2008-08-19 20:43:26 +0300 | [diff] [blame] | 252 | ret = kmem_cache_alloc_notrace(s, flags); | 
|  | 253 |  | 
| Eduard - Gabriel Munteanu | ca2b84c | 2009-03-23 15:12:24 +0200 | [diff] [blame] | 254 | trace_kmalloc(_THIS_IP_, ret, size, s->size, flags); | 
| Eduard - Gabriel Munteanu | 5b882be | 2008-08-19 20:43:26 +0300 | [diff] [blame] | 255 |  | 
|  | 256 | return ret; | 
| Christoph Lameter | aadb4bc | 2007-10-16 01:24:38 -0700 | [diff] [blame] | 257 | } | 
|  | 258 | } | 
|  | 259 | return __kmalloc(size, flags); | 
| Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 260 | } | 
|  | 261 |  | 
| Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 262 | #ifdef CONFIG_NUMA | 
| Paul Mundt | 6193a2f | 2007-07-15 23:38:22 -0700 | [diff] [blame] | 263 | void *__kmalloc_node(size_t size, gfp_t flags, int node); | 
|  | 264 | void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); | 
| Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 265 |  | 
| Li Zefan | 0f24f12 | 2009-12-11 15:45:30 +0800 | [diff] [blame] | 266 | #ifdef CONFIG_TRACING | 
| Eduard - Gabriel Munteanu | 5b882be | 2008-08-19 20:43:26 +0300 | [diff] [blame] | 267 | extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *s, | 
|  | 268 | gfp_t gfpflags, | 
|  | 269 | int node); | 
|  | 270 | #else | 
|  | 271 | static __always_inline void * | 
|  | 272 | kmem_cache_alloc_node_notrace(struct kmem_cache *s, | 
|  | 273 | gfp_t gfpflags, | 
|  | 274 | int node) | 
|  | 275 | { | 
|  | 276 | return kmem_cache_alloc_node(s, gfpflags, node); | 
|  | 277 | } | 
|  | 278 | #endif | 
|  | 279 |  | 
| Christoph Lameter | aa137f9 | 2007-08-31 00:48:45 -0700 | [diff] [blame] | 280 | static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) | 
| Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 281 | { | 
| Eduard - Gabriel Munteanu | 5b882be | 2008-08-19 20:43:26 +0300 | [diff] [blame] | 282 | void *ret; | 
|  | 283 |  | 
| Christoph Lameter | aadb4bc | 2007-10-16 01:24:38 -0700 | [diff] [blame] | 284 | if (__builtin_constant_p(size) && | 
| Christoph Lameter | ffadd4d | 2009-02-17 12:05:07 -0500 | [diff] [blame] | 285 | size <= SLUB_MAX_SIZE && !(flags & SLUB_DMA)) { | 
| Christoph Lameter | aadb4bc | 2007-10-16 01:24:38 -0700 | [diff] [blame] | 286 | struct kmem_cache *s = kmalloc_slab(size); | 
| Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 287 |  | 
|  | 288 | if (!s) | 
| Christoph Lameter | 272c1d2 | 2007-06-08 13:46:49 -0700 | [diff] [blame] | 289 | return ZERO_SIZE_PTR; | 
| Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 290 |  | 
| Eduard - Gabriel Munteanu | 5b882be | 2008-08-19 20:43:26 +0300 | [diff] [blame] | 291 | ret = kmem_cache_alloc_node_notrace(s, flags, node); | 
|  | 292 |  | 
| Eduard - Gabriel Munteanu | ca2b84c | 2009-03-23 15:12:24 +0200 | [diff] [blame] | 293 | trace_kmalloc_node(_THIS_IP_, ret, | 
|  | 294 | size, s->size, flags, node); | 
| Eduard - Gabriel Munteanu | 5b882be | 2008-08-19 20:43:26 +0300 | [diff] [blame] | 295 |  | 
|  | 296 | return ret; | 
| Christoph Lameter | aadb4bc | 2007-10-16 01:24:38 -0700 | [diff] [blame] | 297 | } | 
|  | 298 | return __kmalloc_node(size, flags, node); | 
| Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 299 | } | 
|  | 300 | #endif | 
|  | 301 |  | 
| Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 302 | #endif /* _LINUX_SLUB_DEF_H */ |