| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 1 | /* | 
|  | 2 | * SLOB Allocator: Simple List Of Blocks | 
|  | 3 | * | 
|  | 4 | * Matt Mackall <mpm@selenic.com> 12/30/03 | 
|  | 5 | * | 
| Paul Mundt | 6193a2f | 2007-07-15 23:38:22 -0700 | [diff] [blame] | 6 | * NUMA support by Paul Mundt, 2007. | 
|  | 7 | * | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 8 | * How SLOB works: | 
|  | 9 | * | 
|  | 10 | * The core of SLOB is a traditional K&R style heap allocator, with | 
|  | 11 | * support for returning aligned objects. The granularity of this | 
| Nick Piggin | 5539484 | 2007-07-15 23:38:09 -0700 | [diff] [blame] | 12 | * allocator is as little as 2 bytes, however typically most architectures | 
|  | 13 | * will require 4 bytes on 32-bit and 8 bytes on 64-bit. | 
| Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 14 | * | 
| Matt Mackall | 20cecba | 2008-02-04 22:29:37 -0800 | [diff] [blame] | 15 | * The slob heap is a set of linked list of pages from alloc_pages(), | 
|  | 16 | * and within each page, there is a singly-linked list of free blocks | 
|  | 17 | * (slob_t). The heap is grown on demand. To reduce fragmentation, | 
|  | 18 | * heap pages are segregated into three lists, with objects less than | 
|  | 19 | * 256 bytes, objects less than 1024 bytes, and all other objects. | 
|  | 20 | * | 
|  | 21 | * Allocation from heap involves first searching for a page with | 
|  | 22 | * sufficient free blocks (using a next-fit-like approach) followed by | 
|  | 23 | * a first-fit scan of the page. Deallocation inserts objects back | 
|  | 24 | * into the free list in address order, so this is effectively an | 
|  | 25 | * address-ordered first fit. | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 26 | * | 
|  | 27 | * Above this is an implementation of kmalloc/kfree. Blocks returned | 
| Nick Piggin | 5539484 | 2007-07-15 23:38:09 -0700 | [diff] [blame] | 28 | * from kmalloc are prepended with a 4-byte header with the kmalloc size. | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 29 | * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls | 
| Paul Mundt | 6193a2f | 2007-07-15 23:38:22 -0700 | [diff] [blame] | 30 | * alloc_pages() directly, allocating compound pages so the page order | 
| Nick Piggin | d87a133 | 2007-07-15 23:38:08 -0700 | [diff] [blame] | 31 | * does not have to be separately tracked, and also stores the exact | 
|  | 32 | * allocation size in page->private so that it can be used to accurately | 
|  | 33 | * provide ksize(). These objects are detected in kfree() because slob_page() | 
|  | 34 | * is false for them. | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 35 | * | 
|  | 36 | * SLAB is emulated on top of SLOB by simply calling constructors and | 
| Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 37 | * destructors for every SLAB allocation. Objects are returned with the | 
|  | 38 | * 4-byte alignment unless the SLAB_HWCACHE_ALIGN flag is set, in which | 
|  | 39 | * case the low-level allocator will fragment blocks to create the proper | 
|  | 40 | * alignment. Again, objects of page-size or greater are allocated by | 
| Paul Mundt | 6193a2f | 2007-07-15 23:38:22 -0700 | [diff] [blame] | 41 | * calling alloc_pages(). As SLAB objects know their size, no separate | 
| Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 42 | * size bookkeeping is necessary and there is essentially no allocation | 
| Nick Piggin | d87a133 | 2007-07-15 23:38:08 -0700 | [diff] [blame] | 43 | * space overhead, and compound pages aren't needed for multi-page | 
|  | 44 | * allocations. | 
| Paul Mundt | 6193a2f | 2007-07-15 23:38:22 -0700 | [diff] [blame] | 45 | * | 
|  | 46 | * NUMA support in SLOB is fairly simplistic, pushing most of the real | 
|  | 47 | * logic down to the page allocator, and simply doing the node accounting | 
|  | 48 | * on the upper levels. In the event that a node id is explicitly | 
| Mel Gorman | 6484eb3 | 2009-06-16 15:31:54 -0700 | [diff] [blame] | 49 | * provided, alloc_pages_exact_node() with the specified node id is used | 
| Paul Mundt | 6193a2f | 2007-07-15 23:38:22 -0700 | [diff] [blame] | 50 | * instead. The common case (or when the node id isn't explicitly provided) | 
|  | 51 | * will default to the current node, as per numa_node_id(). | 
|  | 52 | * | 
|  | 53 | * Node aware pages are still inserted in to the global freelist, and | 
|  | 54 | * these are scanned for by matching against the node id encoded in the | 
|  | 55 | * page flags. As a result, block allocations that can be satisfied from | 
|  | 56 | * the freelist will only be done so on pages residing on the same node, | 
|  | 57 | * in order to prevent random node placement. | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 58 | */ | 
|  | 59 |  | 
| Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 60 | #include <linux/kernel.h> | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 61 | #include <linux/slab.h> | 
|  | 62 | #include <linux/mm.h> | 
| Nick Piggin | 1f0532e | 2009-05-05 19:13:45 +1000 | [diff] [blame] | 63 | #include <linux/swap.h> /* struct reclaim_state */ | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 64 | #include <linux/cache.h> | 
|  | 65 | #include <linux/init.h> | 
|  | 66 | #include <linux/module.h> | 
| Nick Piggin | afc0ced | 2007-05-16 22:10:49 -0700 | [diff] [blame] | 67 | #include <linux/rcupdate.h> | 
| Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 68 | #include <linux/list.h> | 
| Zhaolei | 02af61b | 2009-04-10 14:26:18 +0800 | [diff] [blame] | 69 | #include <linux/kmemtrace.h> | 
| Catalin Marinas | 4374e61 | 2009-06-11 13:23:17 +0100 | [diff] [blame] | 70 | #include <linux/kmemleak.h> | 
| Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 71 | #include <asm/atomic.h> | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 72 |  | 
| Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 73 | /* | 
|  | 74 | * slob_block has a field 'units', which indicates size of block if +ve, | 
|  | 75 | * or offset of next block if -ve (in SLOB_UNITs). | 
|  | 76 | * | 
|  | 77 | * Free blocks of size 1 unit simply contain the offset of the next block. | 
|  | 78 | * Those with larger size contain their size in the first SLOB_UNIT of | 
|  | 79 | * memory, and the offset of the next free block in the second SLOB_UNIT. | 
|  | 80 | */ | 
| Nick Piggin | 5539484 | 2007-07-15 23:38:09 -0700 | [diff] [blame] | 81 | #if PAGE_SIZE <= (32767 * 2) | 
| Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 82 | typedef s16 slobidx_t; | 
|  | 83 | #else | 
|  | 84 | typedef s32 slobidx_t; | 
|  | 85 | #endif | 
|  | 86 |  | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 87 | struct slob_block { | 
| Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 88 | slobidx_t units; | 
| Nick Piggin | 5539484 | 2007-07-15 23:38:09 -0700 | [diff] [blame] | 89 | }; | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 90 | typedef struct slob_block slob_t; | 
|  | 91 |  | 
| Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 92 | /* | 
|  | 93 | * We use struct page fields to manage some slob allocation aspects, | 
|  | 94 | * however to avoid the horrible mess in include/linux/mm_types.h, we'll | 
|  | 95 | * just define our own struct page type variant here. | 
|  | 96 | */ | 
|  | 97 | struct slob_page { | 
|  | 98 | union { | 
|  | 99 | struct { | 
|  | 100 | unsigned long flags;	/* mandatory */ | 
|  | 101 | atomic_t _count;	/* mandatory */ | 
|  | 102 | slobidx_t units;	/* free units left in page */ | 
|  | 103 | unsigned long pad[2]; | 
|  | 104 | slob_t *free;		/* first free slob_t in page */ | 
|  | 105 | struct list_head list;	/* linked list of free pages */ | 
|  | 106 | }; | 
|  | 107 | struct page page; | 
|  | 108 | }; | 
|  | 109 | }; | 
|  | 110 | static inline void struct_slob_page_wrong_size(void) | 
|  | 111 | { BUILD_BUG_ON(sizeof(struct slob_page) != sizeof(struct page)); } | 
|  | 112 |  | 
|  | 113 | /* | 
|  | 114 | * free_slob_page: call before a slob_page is returned to the page allocator. | 
|  | 115 | */ | 
|  | 116 | static inline void free_slob_page(struct slob_page *sp) | 
|  | 117 | { | 
|  | 118 | reset_page_mapcount(&sp->page); | 
|  | 119 | sp->page.mapping = NULL; | 
|  | 120 | } | 
|  | 121 |  | 
|  | 122 | /* | 
| Matt Mackall | 20cecba | 2008-02-04 22:29:37 -0800 | [diff] [blame] | 123 | * All partially free slob pages go on these lists. | 
| Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 124 | */ | 
| Matt Mackall | 20cecba | 2008-02-04 22:29:37 -0800 | [diff] [blame] | 125 | #define SLOB_BREAK1 256 | 
|  | 126 | #define SLOB_BREAK2 1024 | 
|  | 127 | static LIST_HEAD(free_slob_small); | 
|  | 128 | static LIST_HEAD(free_slob_medium); | 
|  | 129 | static LIST_HEAD(free_slob_large); | 
| Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 130 |  | 
|  | 131 | /* | 
| Américo Wang | 6e9ed0c | 2009-01-19 02:00:38 +0800 | [diff] [blame] | 132 | * is_slob_page: True for all slob pages (false for bigblock pages) | 
| Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 133 | */ | 
| Américo Wang | 6e9ed0c | 2009-01-19 02:00:38 +0800 | [diff] [blame] | 134 | static inline int is_slob_page(struct slob_page *sp) | 
| Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 135 | { | 
| Wu Fengguang | 7303f24 | 2009-05-11 09:59:34 +0300 | [diff] [blame] | 136 | return PageSlab((struct page *)sp); | 
| Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 137 | } | 
|  | 138 |  | 
|  | 139 | static inline void set_slob_page(struct slob_page *sp) | 
|  | 140 | { | 
| Wu Fengguang | 7303f24 | 2009-05-11 09:59:34 +0300 | [diff] [blame] | 141 | __SetPageSlab((struct page *)sp); | 
| Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 142 | } | 
|  | 143 |  | 
|  | 144 | static inline void clear_slob_page(struct slob_page *sp) | 
|  | 145 | { | 
| Wu Fengguang | 7303f24 | 2009-05-11 09:59:34 +0300 | [diff] [blame] | 146 | __ClearPageSlab((struct page *)sp); | 
| Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 147 | } | 
|  | 148 |  | 
| Américo Wang | 6e9ed0c | 2009-01-19 02:00:38 +0800 | [diff] [blame] | 149 | static inline struct slob_page *slob_page(const void *addr) | 
|  | 150 | { | 
|  | 151 | return (struct slob_page *)virt_to_page(addr); | 
|  | 152 | } | 
|  | 153 |  | 
| Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 154 | /* | 
|  | 155 | * slob_page_free: true for pages on free_slob_pages list. | 
|  | 156 | */ | 
|  | 157 | static inline int slob_page_free(struct slob_page *sp) | 
|  | 158 | { | 
| Andy Whitcroft | 9023cb7 | 2008-07-23 21:27:19 -0700 | [diff] [blame] | 159 | return PageSlobFree((struct page *)sp); | 
| Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 160 | } | 
|  | 161 |  | 
| Matt Mackall | 20cecba | 2008-02-04 22:29:37 -0800 | [diff] [blame] | 162 | static void set_slob_page_free(struct slob_page *sp, struct list_head *list) | 
| Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 163 | { | 
| Matt Mackall | 20cecba | 2008-02-04 22:29:37 -0800 | [diff] [blame] | 164 | list_add(&sp->list, list); | 
| Andy Whitcroft | 9023cb7 | 2008-07-23 21:27:19 -0700 | [diff] [blame] | 165 | __SetPageSlobFree((struct page *)sp); | 
| Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 166 | } | 
|  | 167 |  | 
|  | 168 | static inline void clear_slob_page_free(struct slob_page *sp) | 
|  | 169 | { | 
|  | 170 | list_del(&sp->list); | 
| Andy Whitcroft | 9023cb7 | 2008-07-23 21:27:19 -0700 | [diff] [blame] | 171 | __ClearPageSlobFree((struct page *)sp); | 
| Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 172 | } | 
|  | 173 |  | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 174 | #define SLOB_UNIT sizeof(slob_t) | 
|  | 175 | #define SLOB_UNITS(size) (((size) + SLOB_UNIT - 1)/SLOB_UNIT) | 
|  | 176 | #define SLOB_ALIGN L1_CACHE_BYTES | 
|  | 177 |  | 
| Nick Piggin | afc0ced | 2007-05-16 22:10:49 -0700 | [diff] [blame] | 178 | /* | 
|  | 179 | * struct slob_rcu is inserted at the tail of allocated slob blocks, which | 
|  | 180 | * were created with a SLAB_DESTROY_BY_RCU slab. slob_rcu is used to free | 
|  | 181 | * the block using call_rcu. | 
|  | 182 | */ | 
|  | 183 | struct slob_rcu { | 
|  | 184 | struct rcu_head head; | 
|  | 185 | int size; | 
|  | 186 | }; | 
|  | 187 |  | 
| Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 188 | /* | 
|  | 189 | * slob_lock protects all slob allocator structures. | 
|  | 190 | */ | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 191 | static DEFINE_SPINLOCK(slob_lock); | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 192 |  | 
| Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 193 | /* | 
|  | 194 | * Encode the given size and next info into a free slob block s. | 
|  | 195 | */ | 
|  | 196 | static void set_slob(slob_t *s, slobidx_t size, slob_t *next) | 
|  | 197 | { | 
|  | 198 | slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK); | 
|  | 199 | slobidx_t offset = next - base; | 
| Dimitri Gorokhovik | bcb4ddb | 2006-12-29 16:48:28 -0800 | [diff] [blame] | 200 |  | 
| Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 201 | if (size > 1) { | 
|  | 202 | s[0].units = size; | 
|  | 203 | s[1].units = offset; | 
|  | 204 | } else | 
|  | 205 | s[0].units = -offset; | 
|  | 206 | } | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 207 |  | 
| Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 208 | /* | 
|  | 209 | * Return the size of a slob block. | 
|  | 210 | */ | 
|  | 211 | static slobidx_t slob_units(slob_t *s) | 
|  | 212 | { | 
|  | 213 | if (s->units > 0) | 
|  | 214 | return s->units; | 
|  | 215 | return 1; | 
|  | 216 | } | 
|  | 217 |  | 
|  | 218 | /* | 
|  | 219 | * Return the next free slob block pointer after this one. | 
|  | 220 | */ | 
|  | 221 | static slob_t *slob_next(slob_t *s) | 
|  | 222 | { | 
|  | 223 | slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK); | 
|  | 224 | slobidx_t next; | 
|  | 225 |  | 
|  | 226 | if (s[0].units < 0) | 
|  | 227 | next = -s[0].units; | 
|  | 228 | else | 
|  | 229 | next = s[1].units; | 
|  | 230 | return base+next; | 
|  | 231 | } | 
|  | 232 |  | 
|  | 233 | /* | 
|  | 234 | * Returns true if s is the last free block in its page. | 
|  | 235 | */ | 
|  | 236 | static int slob_last(slob_t *s) | 
|  | 237 | { | 
|  | 238 | return !((unsigned long)slob_next(s) & ~PAGE_MASK); | 
|  | 239 | } | 
|  | 240 |  | 
| Américo Wang | 6e9ed0c | 2009-01-19 02:00:38 +0800 | [diff] [blame] | 241 | static void *slob_new_pages(gfp_t gfp, int order, int node) | 
| Paul Mundt | 6193a2f | 2007-07-15 23:38:22 -0700 | [diff] [blame] | 242 | { | 
|  | 243 | void *page; | 
|  | 244 |  | 
|  | 245 | #ifdef CONFIG_NUMA | 
|  | 246 | if (node != -1) | 
| Mel Gorman | 6484eb3 | 2009-06-16 15:31:54 -0700 | [diff] [blame] | 247 | page = alloc_pages_exact_node(node, gfp, order); | 
| Paul Mundt | 6193a2f | 2007-07-15 23:38:22 -0700 | [diff] [blame] | 248 | else | 
|  | 249 | #endif | 
|  | 250 | page = alloc_pages(gfp, order); | 
|  | 251 |  | 
|  | 252 | if (!page) | 
|  | 253 | return NULL; | 
|  | 254 |  | 
|  | 255 | return page_address(page); | 
|  | 256 | } | 
|  | 257 |  | 
| Américo Wang | 6e9ed0c | 2009-01-19 02:00:38 +0800 | [diff] [blame] | 258 | static void slob_free_pages(void *b, int order) | 
|  | 259 | { | 
| Nick Piggin | 1f0532e | 2009-05-05 19:13:45 +1000 | [diff] [blame] | 260 | if (current->reclaim_state) | 
|  | 261 | current->reclaim_state->reclaimed_slab += 1 << order; | 
| Américo Wang | 6e9ed0c | 2009-01-19 02:00:38 +0800 | [diff] [blame] | 262 | free_pages((unsigned long)b, order); | 
|  | 263 | } | 
|  | 264 |  | 
| Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 265 | /* | 
|  | 266 | * Allocate a slob block within a given slob_page sp. | 
|  | 267 | */ | 
|  | 268 | static void *slob_page_alloc(struct slob_page *sp, size_t size, int align) | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 269 | { | 
| Américo Wang | 6e9ed0c | 2009-01-19 02:00:38 +0800 | [diff] [blame] | 270 | slob_t *prev, *cur, *aligned = NULL; | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 271 | int delta = 0, units = SLOB_UNITS(size); | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 272 |  | 
| Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 273 | for (prev = NULL, cur = sp->free; ; prev = cur, cur = slob_next(cur)) { | 
|  | 274 | slobidx_t avail = slob_units(cur); | 
|  | 275 |  | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 276 | if (align) { | 
|  | 277 | aligned = (slob_t *)ALIGN((unsigned long)cur, align); | 
|  | 278 | delta = aligned - cur; | 
|  | 279 | } | 
| Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 280 | if (avail >= units + delta) { /* room enough? */ | 
|  | 281 | slob_t *next; | 
|  | 282 |  | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 283 | if (delta) { /* need to fragment head to align? */ | 
| Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 284 | next = slob_next(cur); | 
|  | 285 | set_slob(aligned, avail - delta, next); | 
|  | 286 | set_slob(cur, delta, aligned); | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 287 | prev = cur; | 
|  | 288 | cur = aligned; | 
| Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 289 | avail = slob_units(cur); | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 290 | } | 
|  | 291 |  | 
| Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 292 | next = slob_next(cur); | 
|  | 293 | if (avail == units) { /* exact fit? unlink. */ | 
|  | 294 | if (prev) | 
|  | 295 | set_slob(prev, slob_units(prev), next); | 
|  | 296 | else | 
|  | 297 | sp->free = next; | 
|  | 298 | } else { /* fragment */ | 
|  | 299 | if (prev) | 
|  | 300 | set_slob(prev, slob_units(prev), cur + units); | 
|  | 301 | else | 
|  | 302 | sp->free = cur + units; | 
|  | 303 | set_slob(cur + units, avail - units, next); | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 304 | } | 
|  | 305 |  | 
| Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 306 | sp->units -= units; | 
|  | 307 | if (!sp->units) | 
|  | 308 | clear_slob_page_free(sp); | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 309 | return cur; | 
|  | 310 | } | 
| Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 311 | if (slob_last(cur)) | 
|  | 312 | return NULL; | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 313 | } | 
|  | 314 | } | 
|  | 315 |  | 
| Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 316 | /* | 
|  | 317 | * slob_alloc: entry point into the slob allocator. | 
|  | 318 | */ | 
| Paul Mundt | 6193a2f | 2007-07-15 23:38:22 -0700 | [diff] [blame] | 319 | static void *slob_alloc(size_t size, gfp_t gfp, int align, int node) | 
| Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 320 | { | 
|  | 321 | struct slob_page *sp; | 
| Matt Mackall | d626954 | 2007-07-21 04:37:40 -0700 | [diff] [blame] | 322 | struct list_head *prev; | 
| Matt Mackall | 20cecba | 2008-02-04 22:29:37 -0800 | [diff] [blame] | 323 | struct list_head *slob_list; | 
| Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 324 | slob_t *b = NULL; | 
|  | 325 | unsigned long flags; | 
|  | 326 |  | 
| Matt Mackall | 20cecba | 2008-02-04 22:29:37 -0800 | [diff] [blame] | 327 | if (size < SLOB_BREAK1) | 
|  | 328 | slob_list = &free_slob_small; | 
|  | 329 | else if (size < SLOB_BREAK2) | 
|  | 330 | slob_list = &free_slob_medium; | 
|  | 331 | else | 
|  | 332 | slob_list = &free_slob_large; | 
|  | 333 |  | 
| Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 334 | spin_lock_irqsave(&slob_lock, flags); | 
|  | 335 | /* Iterate through each partially free page, try to find room */ | 
| Matt Mackall | 20cecba | 2008-02-04 22:29:37 -0800 | [diff] [blame] | 336 | list_for_each_entry(sp, slob_list, list) { | 
| Paul Mundt | 6193a2f | 2007-07-15 23:38:22 -0700 | [diff] [blame] | 337 | #ifdef CONFIG_NUMA | 
|  | 338 | /* | 
|  | 339 | * If there's a node specification, search for a partial | 
|  | 340 | * page with a matching node id in the freelist. | 
|  | 341 | */ | 
|  | 342 | if (node != -1 && page_to_nid(&sp->page) != node) | 
|  | 343 | continue; | 
|  | 344 | #endif | 
| Matt Mackall | d626954 | 2007-07-21 04:37:40 -0700 | [diff] [blame] | 345 | /* Enough room on this page? */ | 
|  | 346 | if (sp->units < SLOB_UNITS(size)) | 
|  | 347 | continue; | 
| Paul Mundt | 6193a2f | 2007-07-15 23:38:22 -0700 | [diff] [blame] | 348 |  | 
| Matt Mackall | d626954 | 2007-07-21 04:37:40 -0700 | [diff] [blame] | 349 | /* Attempt to alloc */ | 
|  | 350 | prev = sp->list.prev; | 
|  | 351 | b = slob_page_alloc(sp, size, align); | 
|  | 352 | if (!b) | 
|  | 353 | continue; | 
|  | 354 |  | 
|  | 355 | /* Improve fragment distribution and reduce our average | 
|  | 356 | * search time by starting our next search here. (see | 
|  | 357 | * Knuth vol 1, sec 2.5, pg 449) */ | 
| Matt Mackall | 20cecba | 2008-02-04 22:29:37 -0800 | [diff] [blame] | 358 | if (prev != slob_list->prev && | 
|  | 359 | slob_list->next != prev->next) | 
|  | 360 | list_move_tail(slob_list, prev->next); | 
| Matt Mackall | d626954 | 2007-07-21 04:37:40 -0700 | [diff] [blame] | 361 | break; | 
| Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 362 | } | 
|  | 363 | spin_unlock_irqrestore(&slob_lock, flags); | 
|  | 364 |  | 
|  | 365 | /* Not enough space: must allocate a new page */ | 
|  | 366 | if (!b) { | 
| Américo Wang | 6e9ed0c | 2009-01-19 02:00:38 +0800 | [diff] [blame] | 367 | b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node); | 
| Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 368 | if (!b) | 
| Américo Wang | 6e9ed0c | 2009-01-19 02:00:38 +0800 | [diff] [blame] | 369 | return NULL; | 
|  | 370 | sp = slob_page(b); | 
| Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 371 | set_slob_page(sp); | 
|  | 372 |  | 
|  | 373 | spin_lock_irqsave(&slob_lock, flags); | 
|  | 374 | sp->units = SLOB_UNITS(PAGE_SIZE); | 
|  | 375 | sp->free = b; | 
|  | 376 | INIT_LIST_HEAD(&sp->list); | 
|  | 377 | set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE)); | 
| Matt Mackall | 20cecba | 2008-02-04 22:29:37 -0800 | [diff] [blame] | 378 | set_slob_page_free(sp, slob_list); | 
| Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 379 | b = slob_page_alloc(sp, size, align); | 
|  | 380 | BUG_ON(!b); | 
|  | 381 | spin_unlock_irqrestore(&slob_lock, flags); | 
|  | 382 | } | 
| Christoph Lameter | d07dbea | 2007-07-17 04:03:23 -0700 | [diff] [blame] | 383 | if (unlikely((gfp & __GFP_ZERO) && b)) | 
|  | 384 | memset(b, 0, size); | 
| Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 385 | return b; | 
|  | 386 | } | 
|  | 387 |  | 
|  | 388 | /* | 
|  | 389 | * slob_free: entry point into the slob allocator. | 
|  | 390 | */ | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 391 | static void slob_free(void *block, int size) | 
|  | 392 | { | 
| Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 393 | struct slob_page *sp; | 
|  | 394 | slob_t *prev, *next, *b = (slob_t *)block; | 
|  | 395 | slobidx_t units; | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 396 | unsigned long flags; | 
|  | 397 |  | 
| Satyam Sharma | 2408c55 | 2007-10-16 01:24:44 -0700 | [diff] [blame] | 398 | if (unlikely(ZERO_OR_NULL_PTR(block))) | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 399 | return; | 
| Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 400 | BUG_ON(!size); | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 401 |  | 
| Américo Wang | 6e9ed0c | 2009-01-19 02:00:38 +0800 | [diff] [blame] | 402 | sp = slob_page(block); | 
| Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 403 | units = SLOB_UNITS(size); | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 404 |  | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 405 | spin_lock_irqsave(&slob_lock, flags); | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 406 |  | 
| Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 407 | if (sp->units + units == SLOB_UNITS(PAGE_SIZE)) { | 
|  | 408 | /* Go directly to page allocator. Do not pass slob allocator */ | 
|  | 409 | if (slob_page_free(sp)) | 
|  | 410 | clear_slob_page_free(sp); | 
| Nick Piggin | 6fb8f42 | 2009-03-16 21:00:28 +1100 | [diff] [blame] | 411 | spin_unlock_irqrestore(&slob_lock, flags); | 
| Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 412 | clear_slob_page(sp); | 
|  | 413 | free_slob_page(sp); | 
| Nick Piggin | 1f0532e | 2009-05-05 19:13:45 +1000 | [diff] [blame] | 414 | slob_free_pages(b, 0); | 
| Nick Piggin | 6fb8f42 | 2009-03-16 21:00:28 +1100 | [diff] [blame] | 415 | return; | 
| Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 416 | } | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 417 |  | 
| Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 418 | if (!slob_page_free(sp)) { | 
|  | 419 | /* This slob page is about to become partially free. Easy! */ | 
|  | 420 | sp->units = units; | 
|  | 421 | sp->free = b; | 
|  | 422 | set_slob(b, units, | 
|  | 423 | (void *)((unsigned long)(b + | 
|  | 424 | SLOB_UNITS(PAGE_SIZE)) & PAGE_MASK)); | 
| Matt Mackall | 20cecba | 2008-02-04 22:29:37 -0800 | [diff] [blame] | 425 | set_slob_page_free(sp, &free_slob_small); | 
| Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 426 | goto out; | 
|  | 427 | } | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 428 |  | 
| Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 429 | /* | 
|  | 430 | * Otherwise the page is already partially free, so find reinsertion | 
|  | 431 | * point. | 
|  | 432 | */ | 
|  | 433 | sp->units += units; | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 434 |  | 
| Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 435 | if (b < sp->free) { | 
| Matt Mackall | 679299b | 2008-02-04 22:29:37 -0800 | [diff] [blame] | 436 | if (b + units == sp->free) { | 
|  | 437 | units += slob_units(sp->free); | 
|  | 438 | sp->free = slob_next(sp->free); | 
|  | 439 | } | 
| Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 440 | set_slob(b, units, sp->free); | 
|  | 441 | sp->free = b; | 
|  | 442 | } else { | 
|  | 443 | prev = sp->free; | 
|  | 444 | next = slob_next(prev); | 
|  | 445 | while (b > next) { | 
|  | 446 | prev = next; | 
|  | 447 | next = slob_next(prev); | 
|  | 448 | } | 
|  | 449 |  | 
|  | 450 | if (!slob_last(prev) && b + units == next) { | 
|  | 451 | units += slob_units(next); | 
|  | 452 | set_slob(b, units, slob_next(next)); | 
|  | 453 | } else | 
|  | 454 | set_slob(b, units, next); | 
|  | 455 |  | 
|  | 456 | if (prev + slob_units(prev) == b) { | 
|  | 457 | units = slob_units(b) + slob_units(prev); | 
|  | 458 | set_slob(prev, units, slob_next(b)); | 
|  | 459 | } else | 
|  | 460 | set_slob(prev, slob_units(prev), b); | 
|  | 461 | } | 
|  | 462 | out: | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 463 | spin_unlock_irqrestore(&slob_lock, flags); | 
|  | 464 | } | 
|  | 465 |  | 
| Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 466 | /* | 
|  | 467 | * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend. | 
|  | 468 | */ | 
|  | 469 |  | 
| Nick Piggin | 5539484 | 2007-07-15 23:38:09 -0700 | [diff] [blame] | 470 | #ifndef ARCH_KMALLOC_MINALIGN | 
|  | 471 | #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long) | 
|  | 472 | #endif | 
|  | 473 |  | 
|  | 474 | #ifndef ARCH_SLAB_MINALIGN | 
|  | 475 | #define ARCH_SLAB_MINALIGN __alignof__(unsigned long) | 
|  | 476 | #endif | 
|  | 477 |  | 
| Paul Mundt | 6193a2f | 2007-07-15 23:38:22 -0700 | [diff] [blame] | 478 | void *__kmalloc_node(size_t size, gfp_t gfp, int node) | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 479 | { | 
| Christoph Lameter | 6cb8f91 | 2007-07-17 04:03:22 -0700 | [diff] [blame] | 480 | unsigned int *m; | 
| Nick Piggin | 5539484 | 2007-07-15 23:38:09 -0700 | [diff] [blame] | 481 | int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); | 
| Eduard - Gabriel Munteanu | 3eae2cb2 | 2008-08-10 20:14:07 +0300 | [diff] [blame] | 482 | void *ret; | 
| Nick Piggin | 5539484 | 2007-07-15 23:38:09 -0700 | [diff] [blame] | 483 |  | 
| Ingo Molnar | 19cefdf | 2009-03-15 06:03:11 +0100 | [diff] [blame] | 484 | lockdep_trace_alloc(gfp); | 
| Nick Piggin | cf40bd1 | 2009-01-21 08:12:39 +0100 | [diff] [blame] | 485 |  | 
| Nick Piggin | 5539484 | 2007-07-15 23:38:09 -0700 | [diff] [blame] | 486 | if (size < PAGE_SIZE - align) { | 
| Christoph Lameter | 6cb8f91 | 2007-07-17 04:03:22 -0700 | [diff] [blame] | 487 | if (!size) | 
|  | 488 | return ZERO_SIZE_PTR; | 
|  | 489 |  | 
| Paul Mundt | 6193a2f | 2007-07-15 23:38:22 -0700 | [diff] [blame] | 490 | m = slob_alloc(size + align, gfp, align, node); | 
| Eduard - Gabriel Munteanu | 3eae2cb2 | 2008-08-10 20:14:07 +0300 | [diff] [blame] | 491 |  | 
| MinChan Kim | 239f49c | 2008-05-19 22:12:08 +0900 | [diff] [blame] | 492 | if (!m) | 
|  | 493 | return NULL; | 
|  | 494 | *m = size; | 
| Eduard - Gabriel Munteanu | 3eae2cb2 | 2008-08-10 20:14:07 +0300 | [diff] [blame] | 495 | ret = (void *)m + align; | 
| Nick Piggin | d87a133 | 2007-07-15 23:38:08 -0700 | [diff] [blame] | 496 |  | 
| Eduard - Gabriel Munteanu | ca2b84c | 2009-03-23 15:12:24 +0200 | [diff] [blame] | 497 | trace_kmalloc_node(_RET_IP_, ret, | 
|  | 498 | size, size + align, gfp, node); | 
| Nick Piggin | d87a133 | 2007-07-15 23:38:08 -0700 | [diff] [blame] | 499 | } else { | 
| Eduard - Gabriel Munteanu | 3eae2cb2 | 2008-08-10 20:14:07 +0300 | [diff] [blame] | 500 | unsigned int order = get_order(size); | 
| Nick Piggin | d87a133 | 2007-07-15 23:38:08 -0700 | [diff] [blame] | 501 |  | 
| Américo Wang | 6e9ed0c | 2009-01-19 02:00:38 +0800 | [diff] [blame] | 502 | ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node); | 
| Nick Piggin | d87a133 | 2007-07-15 23:38:08 -0700 | [diff] [blame] | 503 | if (ret) { | 
|  | 504 | struct page *page; | 
|  | 505 | page = virt_to_page(ret); | 
|  | 506 | page->private = size; | 
|  | 507 | } | 
| Eduard - Gabriel Munteanu | 3eae2cb2 | 2008-08-10 20:14:07 +0300 | [diff] [blame] | 508 |  | 
| Eduard - Gabriel Munteanu | ca2b84c | 2009-03-23 15:12:24 +0200 | [diff] [blame] | 509 | trace_kmalloc_node(_RET_IP_, ret, | 
|  | 510 | size, PAGE_SIZE << order, gfp, node); | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 511 | } | 
| Eduard - Gabriel Munteanu | 3eae2cb2 | 2008-08-10 20:14:07 +0300 | [diff] [blame] | 512 |  | 
| Catalin Marinas | 4374e61 | 2009-06-11 13:23:17 +0100 | [diff] [blame] | 513 | kmemleak_alloc(ret, size, 1, gfp); | 
| Eduard - Gabriel Munteanu | 3eae2cb2 | 2008-08-10 20:14:07 +0300 | [diff] [blame] | 514 | return ret; | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 515 | } | 
| Paul Mundt | 6193a2f | 2007-07-15 23:38:22 -0700 | [diff] [blame] | 516 | EXPORT_SYMBOL(__kmalloc_node); | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 517 |  | 
|  | 518 | void kfree(const void *block) | 
|  | 519 | { | 
| Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 520 | struct slob_page *sp; | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 521 |  | 
| Pekka Enberg | 2121db7 | 2009-03-25 11:05:57 +0200 | [diff] [blame] | 522 | trace_kfree(_RET_IP_, block); | 
|  | 523 |  | 
| Satyam Sharma | 2408c55 | 2007-10-16 01:24:44 -0700 | [diff] [blame] | 524 | if (unlikely(ZERO_OR_NULL_PTR(block))) | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 525 | return; | 
| Catalin Marinas | 4374e61 | 2009-06-11 13:23:17 +0100 | [diff] [blame] | 526 | kmemleak_free(block); | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 527 |  | 
| Américo Wang | 6e9ed0c | 2009-01-19 02:00:38 +0800 | [diff] [blame] | 528 | sp = slob_page(block); | 
|  | 529 | if (is_slob_page(sp)) { | 
| Nick Piggin | 5539484 | 2007-07-15 23:38:09 -0700 | [diff] [blame] | 530 | int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); | 
|  | 531 | unsigned int *m = (unsigned int *)(block - align); | 
|  | 532 | slob_free(m, *m + align); | 
| Nick Piggin | d87a133 | 2007-07-15 23:38:08 -0700 | [diff] [blame] | 533 | } else | 
|  | 534 | put_page(&sp->page); | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 535 | } | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 536 | EXPORT_SYMBOL(kfree); | 
|  | 537 |  | 
| Nick Piggin | d87a133 | 2007-07-15 23:38:08 -0700 | [diff] [blame] | 538 | /* can't use ksize for kmem_cache_alloc memory, only kmalloc */ | 
| Pekka Enberg | fd76bab | 2007-05-06 14:48:40 -0700 | [diff] [blame] | 539 | size_t ksize(const void *block) | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 540 | { | 
| Nick Piggin | 95b3512 | 2007-07-15 23:38:07 -0700 | [diff] [blame] | 541 | struct slob_page *sp; | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 542 |  | 
| Christoph Lameter | ef8b452 | 2007-10-16 01:24:46 -0700 | [diff] [blame] | 543 | BUG_ON(!block); | 
|  | 544 | if (unlikely(block == ZERO_SIZE_PTR)) | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 545 | return 0; | 
|  | 546 |  | 
| Américo Wang | 6e9ed0c | 2009-01-19 02:00:38 +0800 | [diff] [blame] | 547 | sp = slob_page(block); | 
|  | 548 | if (is_slob_page(sp)) { | 
| Matt Mackall | 70096a5 | 2008-10-08 14:51:57 -0500 | [diff] [blame] | 549 | int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); | 
|  | 550 | unsigned int *m = (unsigned int *)(block - align); | 
|  | 551 | return SLOB_UNITS(*m) * SLOB_UNIT; | 
|  | 552 | } else | 
| Nick Piggin | d87a133 | 2007-07-15 23:38:08 -0700 | [diff] [blame] | 553 | return sp->page.private; | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 554 | } | 
| Kirill A. Shutemov | b1aabec | 2009-02-10 15:21:44 +0200 | [diff] [blame] | 555 | EXPORT_SYMBOL(ksize); | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 556 |  | 
|  | 557 | struct kmem_cache { | 
|  | 558 | unsigned int size, align; | 
| Nick Piggin | afc0ced | 2007-05-16 22:10:49 -0700 | [diff] [blame] | 559 | unsigned long flags; | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 560 | const char *name; | 
| Alexey Dobriyan | 51cc506 | 2008-07-25 19:45:34 -0700 | [diff] [blame] | 561 | void (*ctor)(void *); | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 562 | }; | 
|  | 563 |  | 
|  | 564 | struct kmem_cache *kmem_cache_create(const char *name, size_t size, | 
| Alexey Dobriyan | 51cc506 | 2008-07-25 19:45:34 -0700 | [diff] [blame] | 565 | size_t align, unsigned long flags, void (*ctor)(void *)) | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 566 | { | 
|  | 567 | struct kmem_cache *c; | 
|  | 568 |  | 
| Yi Li | 0701a9e | 2008-04-25 19:49:21 +0300 | [diff] [blame] | 569 | c = slob_alloc(sizeof(struct kmem_cache), | 
| Catalin Marinas | 5e18e2b | 2008-12-15 13:54:16 -0800 | [diff] [blame] | 570 | GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1); | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 571 |  | 
|  | 572 | if (c) { | 
|  | 573 | c->name = name; | 
|  | 574 | c->size = size; | 
| Nick Piggin | afc0ced | 2007-05-16 22:10:49 -0700 | [diff] [blame] | 575 | if (flags & SLAB_DESTROY_BY_RCU) { | 
| Nick Piggin | afc0ced | 2007-05-16 22:10:49 -0700 | [diff] [blame] | 576 | /* leave room for rcu footer at the end of object */ | 
|  | 577 | c->size += sizeof(struct slob_rcu); | 
|  | 578 | } | 
|  | 579 | c->flags = flags; | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 580 | c->ctor = ctor; | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 581 | /* ignore alignment unless it's forced */ | 
| Christoph Lameter | 5af6083 | 2007-05-06 14:49:56 -0700 | [diff] [blame] | 582 | c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0; | 
| Nick Piggin | 5539484 | 2007-07-15 23:38:09 -0700 | [diff] [blame] | 583 | if (c->align < ARCH_SLAB_MINALIGN) | 
|  | 584 | c->align = ARCH_SLAB_MINALIGN; | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 585 | if (c->align < align) | 
|  | 586 | c->align = align; | 
| Akinobu Mita | bc0055a | 2007-05-06 14:49:52 -0700 | [diff] [blame] | 587 | } else if (flags & SLAB_PANIC) | 
|  | 588 | panic("Cannot create slab cache %s\n", name); | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 589 |  | 
| Catalin Marinas | 4374e61 | 2009-06-11 13:23:17 +0100 | [diff] [blame] | 590 | kmemleak_alloc(c, sizeof(struct kmem_cache), 1, GFP_KERNEL); | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 591 | return c; | 
|  | 592 | } | 
|  | 593 | EXPORT_SYMBOL(kmem_cache_create); | 
|  | 594 |  | 
| Alexey Dobriyan | 133d205 | 2006-09-27 01:49:41 -0700 | [diff] [blame] | 595 | void kmem_cache_destroy(struct kmem_cache *c) | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 596 | { | 
| Catalin Marinas | 4374e61 | 2009-06-11 13:23:17 +0100 | [diff] [blame] | 597 | kmemleak_free(c); | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 598 | slob_free(c, sizeof(struct kmem_cache)); | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 599 | } | 
|  | 600 | EXPORT_SYMBOL(kmem_cache_destroy); | 
|  | 601 |  | 
| Paul Mundt | 6193a2f | 2007-07-15 23:38:22 -0700 | [diff] [blame] | 602 | void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node) | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 603 | { | 
|  | 604 | void *b; | 
|  | 605 |  | 
| Eduard - Gabriel Munteanu | 3eae2cb2 | 2008-08-10 20:14:07 +0300 | [diff] [blame] | 606 | if (c->size < PAGE_SIZE) { | 
| Paul Mundt | 6193a2f | 2007-07-15 23:38:22 -0700 | [diff] [blame] | 607 | b = slob_alloc(c->size, flags, c->align, node); | 
| Eduard - Gabriel Munteanu | ca2b84c | 2009-03-23 15:12:24 +0200 | [diff] [blame] | 608 | trace_kmem_cache_alloc_node(_RET_IP_, b, c->size, | 
|  | 609 | SLOB_UNITS(c->size) * SLOB_UNIT, | 
|  | 610 | flags, node); | 
| Eduard - Gabriel Munteanu | 3eae2cb2 | 2008-08-10 20:14:07 +0300 | [diff] [blame] | 611 | } else { | 
| Américo Wang | 6e9ed0c | 2009-01-19 02:00:38 +0800 | [diff] [blame] | 612 | b = slob_new_pages(flags, get_order(c->size), node); | 
| Eduard - Gabriel Munteanu | ca2b84c | 2009-03-23 15:12:24 +0200 | [diff] [blame] | 613 | trace_kmem_cache_alloc_node(_RET_IP_, b, c->size, | 
|  | 614 | PAGE_SIZE << get_order(c->size), | 
|  | 615 | flags, node); | 
| Eduard - Gabriel Munteanu | 3eae2cb2 | 2008-08-10 20:14:07 +0300 | [diff] [blame] | 616 | } | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 617 |  | 
|  | 618 | if (c->ctor) | 
| Alexey Dobriyan | 51cc506 | 2008-07-25 19:45:34 -0700 | [diff] [blame] | 619 | c->ctor(b); | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 620 |  | 
| Catalin Marinas | 4374e61 | 2009-06-11 13:23:17 +0100 | [diff] [blame] | 621 | kmemleak_alloc_recursive(b, c->size, 1, c->flags, flags); | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 622 | return b; | 
|  | 623 | } | 
| Paul Mundt | 6193a2f | 2007-07-15 23:38:22 -0700 | [diff] [blame] | 624 | EXPORT_SYMBOL(kmem_cache_alloc_node); | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 625 |  | 
| Nick Piggin | afc0ced | 2007-05-16 22:10:49 -0700 | [diff] [blame] | 626 | static void __kmem_cache_free(void *b, int size) | 
|  | 627 | { | 
|  | 628 | if (size < PAGE_SIZE) | 
|  | 629 | slob_free(b, size); | 
|  | 630 | else | 
| Américo Wang | 6e9ed0c | 2009-01-19 02:00:38 +0800 | [diff] [blame] | 631 | slob_free_pages(b, get_order(size)); | 
| Nick Piggin | afc0ced | 2007-05-16 22:10:49 -0700 | [diff] [blame] | 632 | } | 
|  | 633 |  | 
|  | 634 | static void kmem_rcu_free(struct rcu_head *head) | 
|  | 635 | { | 
|  | 636 | struct slob_rcu *slob_rcu = (struct slob_rcu *)head; | 
|  | 637 | void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu)); | 
|  | 638 |  | 
|  | 639 | __kmem_cache_free(b, slob_rcu->size); | 
|  | 640 | } | 
|  | 641 |  | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 642 | void kmem_cache_free(struct kmem_cache *c, void *b) | 
|  | 643 | { | 
| Catalin Marinas | 4374e61 | 2009-06-11 13:23:17 +0100 | [diff] [blame] | 644 | kmemleak_free_recursive(b, c->flags); | 
| Nick Piggin | afc0ced | 2007-05-16 22:10:49 -0700 | [diff] [blame] | 645 | if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) { | 
|  | 646 | struct slob_rcu *slob_rcu; | 
|  | 647 | slob_rcu = b + (c->size - sizeof(struct slob_rcu)); | 
|  | 648 | INIT_RCU_HEAD(&slob_rcu->head); | 
|  | 649 | slob_rcu->size = c->size; | 
|  | 650 | call_rcu(&slob_rcu->head, kmem_rcu_free); | 
|  | 651 | } else { | 
| Nick Piggin | afc0ced | 2007-05-16 22:10:49 -0700 | [diff] [blame] | 652 | __kmem_cache_free(b, c->size); | 
|  | 653 | } | 
| Eduard - Gabriel Munteanu | 3eae2cb2 | 2008-08-10 20:14:07 +0300 | [diff] [blame] | 654 |  | 
| Eduard - Gabriel Munteanu | ca2b84c | 2009-03-23 15:12:24 +0200 | [diff] [blame] | 655 | trace_kmem_cache_free(_RET_IP_, b); | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 656 | } | 
|  | 657 | EXPORT_SYMBOL(kmem_cache_free); | 
|  | 658 |  | 
|  | 659 | unsigned int kmem_cache_size(struct kmem_cache *c) | 
|  | 660 | { | 
|  | 661 | return c->size; | 
|  | 662 | } | 
|  | 663 | EXPORT_SYMBOL(kmem_cache_size); | 
|  | 664 |  | 
|  | 665 | const char *kmem_cache_name(struct kmem_cache *c) | 
|  | 666 | { | 
|  | 667 | return c->name; | 
|  | 668 | } | 
|  | 669 | EXPORT_SYMBOL(kmem_cache_name); | 
|  | 670 |  | 
| Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 671 | int kmem_cache_shrink(struct kmem_cache *d) | 
|  | 672 | { | 
|  | 673 | return 0; | 
|  | 674 | } | 
|  | 675 | EXPORT_SYMBOL(kmem_cache_shrink); | 
|  | 676 |  | 
| Christoph Lameter | 55935a3 | 2006-12-13 00:34:24 -0800 | [diff] [blame] | 677 | int kmem_ptr_validate(struct kmem_cache *a, const void *b) | 
| Christoph Lameter | 2e892f4 | 2006-12-13 00:34:23 -0800 | [diff] [blame] | 678 | { | 
|  | 679 | return 0; | 
|  | 680 | } | 
|  | 681 |  | 
| Paul Mundt | 84a01c2 | 2007-07-15 23:38:24 -0700 | [diff] [blame] | 682 | static unsigned int slob_ready __read_mostly; | 
|  | 683 |  | 
|  | 684 | int slab_is_available(void) | 
|  | 685 | { | 
|  | 686 | return slob_ready; | 
|  | 687 | } | 
|  | 688 |  | 
| Dimitri Gorokhovik | bcb4ddb | 2006-12-29 16:48:28 -0800 | [diff] [blame] | 689 | void __init kmem_cache_init(void) | 
|  | 690 | { | 
| Paul Mundt | 84a01c2 | 2007-07-15 23:38:24 -0700 | [diff] [blame] | 691 | slob_ready = 1; | 
| Matt Mackall | 10cef60 | 2006-01-08 01:01:45 -0800 | [diff] [blame] | 692 | } |