| Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 1 | #ifndef _LINUX_SLUB_DEF_H | 
|  | 2 | #define _LINUX_SLUB_DEF_H | 
|  | 3 |  | 
|  | 4 | /* | 
|  | 5 | * SLUB : A Slab allocator without object queues. | 
|  | 6 | * | 
|  | 7 | * (C) 2007 SGI, Christoph Lameter <clameter@sgi.com> | 
|  | 8 | */ | 
|  | 9 | #include <linux/types.h> | 
|  | 10 | #include <linux/gfp.h> | 
|  | 11 | #include <linux/workqueue.h> | 
|  | 12 | #include <linux/kobject.h> | 
|  | 13 |  | 
| Christoph Lameter | dfb4f09 | 2007-10-16 01:26:05 -0700 | [diff] [blame] | 14 | struct kmem_cache_cpu { | 
|  | 15 | void **freelist; | 
|  | 16 | struct page *page; | 
|  | 17 | int node; | 
| Christoph Lameter | b3fba8d | 2007-10-16 01:26:06 -0700 | [diff] [blame] | 18 | unsigned int offset; | 
| Christoph Lameter | 42a9fdb | 2007-10-16 01:26:09 -0700 | [diff] [blame] | 19 | unsigned int objsize; | 
| Christoph Lameter | 4c93c355 | 2007-10-16 01:26:08 -0700 | [diff] [blame] | 20 | }; | 
| Christoph Lameter | dfb4f09 | 2007-10-16 01:26:05 -0700 | [diff] [blame] | 21 |  | 
| Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 22 | struct kmem_cache_node { | 
|  | 23 | spinlock_t list_lock;	/* Protect partial list and nr_partial */ | 
|  | 24 | unsigned long nr_partial; | 
|  | 25 | atomic_long_t nr_slabs; | 
|  | 26 | struct list_head partial; | 
| Christoph Lameter | 0c71001 | 2007-07-17 04:03:24 -0700 | [diff] [blame] | 27 | #ifdef CONFIG_SLUB_DEBUG | 
| Christoph Lameter | 643b113 | 2007-05-06 14:49:42 -0700 | [diff] [blame] | 28 | struct list_head full; | 
| Christoph Lameter | 0c71001 | 2007-07-17 04:03:24 -0700 | [diff] [blame] | 29 | #endif | 
| Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 30 | }; | 
|  | 31 |  | 
|  | 32 | /* | 
|  | 33 | * Slab cache management. | 
|  | 34 | */ | 
|  | 35 | struct kmem_cache { | 
|  | 36 | /* Used for retriving partial slabs etc */ | 
|  | 37 | unsigned long flags; | 
|  | 38 | int size;		/* The size of an object including meta data */ | 
|  | 39 | int objsize;		/* The size of an object without meta data */ | 
|  | 40 | int offset;		/* Free pointer offset. */ | 
| Christoph Lameter | 4b356be | 2007-06-16 10:16:13 -0700 | [diff] [blame] | 41 | int order; | 
| Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 42 |  | 
|  | 43 | /* | 
|  | 44 | * Avoid an extra cache line for UP, SMP and for the node local to | 
|  | 45 | * struct kmem_cache. | 
|  | 46 | */ | 
|  | 47 | struct kmem_cache_node local_node; | 
|  | 48 |  | 
|  | 49 | /* Allocation and freeing of slabs */ | 
|  | 50 | int objects;		/* Number of objects in slab */ | 
|  | 51 | int refcount;		/* Refcount for slab cache destroy */ | 
| Christoph Lameter | 4ba9b9d | 2007-10-16 23:25:51 -0700 | [diff] [blame] | 52 | void (*ctor)(struct kmem_cache *, void *); | 
| Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 53 | int inuse;		/* Offset to metadata */ | 
|  | 54 | int align;		/* Alignment */ | 
|  | 55 | const char *name;	/* Name (only for display!) */ | 
|  | 56 | struct list_head list;	/* List of slab caches */ | 
| Christoph Lameter | 0c71001 | 2007-07-17 04:03:24 -0700 | [diff] [blame] | 57 | #ifdef CONFIG_SLUB_DEBUG | 
| Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 58 | struct kobject kobj;	/* For sysfs */ | 
| Christoph Lameter | 0c71001 | 2007-07-17 04:03:24 -0700 | [diff] [blame] | 59 | #endif | 
| Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 60 |  | 
|  | 61 | #ifdef CONFIG_NUMA | 
|  | 62 | int defrag_ratio; | 
|  | 63 | struct kmem_cache_node *node[MAX_NUMNODES]; | 
|  | 64 | #endif | 
| Christoph Lameter | 4c93c355 | 2007-10-16 01:26:08 -0700 | [diff] [blame] | 65 | #ifdef CONFIG_SMP | 
|  | 66 | struct kmem_cache_cpu *cpu_slab[NR_CPUS]; | 
|  | 67 | #else | 
|  | 68 | struct kmem_cache_cpu cpu_slab; | 
|  | 69 | #endif | 
| Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 70 | }; | 
|  | 71 |  | 
|  | 72 | /* | 
|  | 73 | * Kmalloc subsystem. | 
|  | 74 | */ | 
| Christoph Lameter | 4b356be | 2007-06-16 10:16:13 -0700 | [diff] [blame] | 75 | #if defined(ARCH_KMALLOC_MINALIGN) && ARCH_KMALLOC_MINALIGN > 8 | 
|  | 76 | #define KMALLOC_MIN_SIZE ARCH_KMALLOC_MINALIGN | 
|  | 77 | #else | 
|  | 78 | #define KMALLOC_MIN_SIZE 8 | 
|  | 79 | #endif | 
|  | 80 |  | 
|  | 81 | #define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE) | 
| Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 82 |  | 
| Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 83 | /* | 
|  | 84 | * We keep the general caches in an array of slab caches that are used for | 
|  | 85 | * 2^x bytes of allocations. | 
|  | 86 | */ | 
| Christoph Lameter | aadb4bc | 2007-10-16 01:24:38 -0700 | [diff] [blame] | 87 | extern struct kmem_cache kmalloc_caches[PAGE_SHIFT]; | 
| Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 88 |  | 
|  | 89 | /* | 
|  | 90 | * Sorry that the following has to be that ugly but some versions of GCC | 
|  | 91 | * have trouble with constant propagation and loops. | 
|  | 92 | */ | 
| Christoph Lameter | aa137f9 | 2007-08-31 00:48:45 -0700 | [diff] [blame] | 93 | static __always_inline int kmalloc_index(size_t size) | 
| Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 94 | { | 
| Christoph Lameter | 272c1d2 | 2007-06-08 13:46:49 -0700 | [diff] [blame] | 95 | if (!size) | 
|  | 96 | return 0; | 
| Christoph Lameter | 614410d | 2007-05-06 14:49:38 -0700 | [diff] [blame] | 97 |  | 
| Christoph Lameter | 4b356be | 2007-06-16 10:16:13 -0700 | [diff] [blame] | 98 | if (size <= KMALLOC_MIN_SIZE) | 
|  | 99 | return KMALLOC_SHIFT_LOW; | 
|  | 100 |  | 
| Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 101 | if (size > 64 && size <= 96) | 
|  | 102 | return 1; | 
|  | 103 | if (size > 128 && size <= 192) | 
|  | 104 | return 2; | 
|  | 105 | if (size <=          8) return 3; | 
|  | 106 | if (size <=         16) return 4; | 
|  | 107 | if (size <=         32) return 5; | 
|  | 108 | if (size <=         64) return 6; | 
|  | 109 | if (size <=        128) return 7; | 
|  | 110 | if (size <=        256) return 8; | 
|  | 111 | if (size <=        512) return 9; | 
|  | 112 | if (size <=       1024) return 10; | 
|  | 113 | if (size <=   2 * 1024) return 11; | 
| Christoph Lameter | aadb4bc | 2007-10-16 01:24:38 -0700 | [diff] [blame] | 114 | /* | 
|  | 115 | * The following is only needed to support architectures with a larger page | 
|  | 116 | * size than 4k. | 
|  | 117 | */ | 
| Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 118 | if (size <=   4 * 1024) return 12; | 
|  | 119 | if (size <=   8 * 1024) return 13; | 
|  | 120 | if (size <=  16 * 1024) return 14; | 
|  | 121 | if (size <=  32 * 1024) return 15; | 
|  | 122 | if (size <=  64 * 1024) return 16; | 
|  | 123 | if (size <= 128 * 1024) return 17; | 
|  | 124 | if (size <= 256 * 1024) return 18; | 
| Christoph Lameter | aadb4bc | 2007-10-16 01:24:38 -0700 | [diff] [blame] | 125 | if (size <= 512 * 1024) return 19; | 
| Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 126 | if (size <= 1024 * 1024) return 20; | 
| Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 127 | if (size <=  2 * 1024 * 1024) return 21; | 
| Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 128 | return -1; | 
|  | 129 |  | 
|  | 130 | /* | 
|  | 131 | * What we really wanted to do and cannot do because of compiler issues is: | 
|  | 132 | *	int i; | 
|  | 133 | *	for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) | 
|  | 134 | *		if (size <= (1 << i)) | 
|  | 135 | *			return i; | 
|  | 136 | */ | 
|  | 137 | } | 
|  | 138 |  | 
|  | 139 | /* | 
|  | 140 | * Find the slab cache for a given combination of allocation flags and size. | 
|  | 141 | * | 
|  | 142 | * This ought to end up with a global pointer to the right cache | 
|  | 143 | * in kmalloc_caches. | 
|  | 144 | */ | 
| Christoph Lameter | aa137f9 | 2007-08-31 00:48:45 -0700 | [diff] [blame] | 145 | static __always_inline struct kmem_cache *kmalloc_slab(size_t size) | 
| Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 146 | { | 
|  | 147 | int index = kmalloc_index(size); | 
|  | 148 |  | 
|  | 149 | if (index == 0) | 
|  | 150 | return NULL; | 
|  | 151 |  | 
| Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 152 | return &kmalloc_caches[index]; | 
|  | 153 | } | 
|  | 154 |  | 
|  | 155 | #ifdef CONFIG_ZONE_DMA | 
|  | 156 | #define SLUB_DMA __GFP_DMA | 
|  | 157 | #else | 
|  | 158 | /* Disable DMA functionality */ | 
| Al Viro | d046943 | 2007-07-20 16:18:06 +0100 | [diff] [blame] | 159 | #define SLUB_DMA (__force gfp_t)0 | 
| Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 160 | #endif | 
|  | 161 |  | 
| Paul Mundt | 6193a2f | 2007-07-15 23:38:22 -0700 | [diff] [blame] | 162 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t); | 
|  | 163 | void *__kmalloc(size_t size, gfp_t flags); | 
|  | 164 |  | 
| Christoph Lameter | aa137f9 | 2007-08-31 00:48:45 -0700 | [diff] [blame] | 165 | static __always_inline void *kmalloc(size_t size, gfp_t flags) | 
| Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 166 | { | 
| Christoph Lameter | aadb4bc | 2007-10-16 01:24:38 -0700 | [diff] [blame] | 167 | if (__builtin_constant_p(size)) { | 
|  | 168 | if (size > PAGE_SIZE / 2) | 
|  | 169 | return (void *)__get_free_pages(flags | __GFP_COMP, | 
|  | 170 | get_order(size)); | 
| Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 171 |  | 
| Christoph Lameter | aadb4bc | 2007-10-16 01:24:38 -0700 | [diff] [blame] | 172 | if (!(flags & SLUB_DMA)) { | 
|  | 173 | struct kmem_cache *s = kmalloc_slab(size); | 
| Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 174 |  | 
| Christoph Lameter | aadb4bc | 2007-10-16 01:24:38 -0700 | [diff] [blame] | 175 | if (!s) | 
|  | 176 | return ZERO_SIZE_PTR; | 
|  | 177 |  | 
|  | 178 | return kmem_cache_alloc(s, flags); | 
|  | 179 | } | 
|  | 180 | } | 
|  | 181 | return __kmalloc(size, flags); | 
| Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 182 | } | 
|  | 183 |  | 
| Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 184 | #ifdef CONFIG_NUMA | 
| Paul Mundt | 6193a2f | 2007-07-15 23:38:22 -0700 | [diff] [blame] | 185 | void *__kmalloc_node(size_t size, gfp_t flags, int node); | 
|  | 186 | void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); | 
| Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 187 |  | 
| Christoph Lameter | aa137f9 | 2007-08-31 00:48:45 -0700 | [diff] [blame] | 188 | static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) | 
| Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 189 | { | 
| Christoph Lameter | aadb4bc | 2007-10-16 01:24:38 -0700 | [diff] [blame] | 190 | if (__builtin_constant_p(size) && | 
|  | 191 | size <= PAGE_SIZE / 2 && !(flags & SLUB_DMA)) { | 
|  | 192 | struct kmem_cache *s = kmalloc_slab(size); | 
| Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 193 |  | 
|  | 194 | if (!s) | 
| Christoph Lameter | 272c1d2 | 2007-06-08 13:46:49 -0700 | [diff] [blame] | 195 | return ZERO_SIZE_PTR; | 
| Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 196 |  | 
|  | 197 | return kmem_cache_alloc_node(s, flags, node); | 
| Christoph Lameter | aadb4bc | 2007-10-16 01:24:38 -0700 | [diff] [blame] | 198 | } | 
|  | 199 | return __kmalloc_node(size, flags, node); | 
| Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 200 | } | 
|  | 201 | #endif | 
|  | 202 |  | 
| Christoph Lameter | 81819f0 | 2007-05-06 14:49:36 -0700 | [diff] [blame] | 203 | #endif /* _LINUX_SLUB_DEF_H */ |