blob: 455f9affea9abafe24c6be97a861d1839ee770a0 [file] [log] [blame]
Christoph Lameter2e892f42006-12-13 00:34:23 -08001#ifndef _LINUX_SLAB_DEF_H
2#define _LINUX_SLAB_DEF_H
3
4/*
5 * Definitions unique to the original Linux SLAB allocator.
6 *
7 * What we provide here is a way to optimize the frequent kmalloc
8 * calls in the kernel by selecting the appropriate general cache
9 * if kmalloc was called with a size that can be established at
10 * compile time.
11 */
12
13#include <linux/init.h>
14#include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */
15#include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */
16#include <linux/compiler.h>
Frederic Weisbecker36994e52008-12-29 13:42:23 -080017#include <trace/kmemtrace.h>
Christoph Lameter2e892f42006-12-13 00:34:23 -080018
19/* Size description struct for general caches. */
20struct cache_sizes {
21 size_t cs_size;
22 struct kmem_cache *cs_cachep;
Christoph Lameter4b51d662007-02-10 01:43:10 -080023#ifdef CONFIG_ZONE_DMA
Christoph Lameter2e892f42006-12-13 00:34:23 -080024 struct kmem_cache *cs_dmacachep;
Christoph Lameter4b51d662007-02-10 01:43:10 -080025#endif
Christoph Lameter2e892f42006-12-13 00:34:23 -080026};
27extern struct cache_sizes malloc_sizes[];
28
Paul Mundt6193a2f2007-07-15 23:38:22 -070029void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
30void *__kmalloc(size_t size, gfp_t flags);
31
Eduard - Gabriel Munteanu36555752008-08-10 20:14:05 +030032#ifdef CONFIG_KMEMTRACE
33extern void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags);
34extern size_t slab_buffer_size(struct kmem_cache *cachep);
35#else
36static __always_inline void *
37kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags)
Christoph Lameter2e892f42006-12-13 00:34:23 -080038{
Eduard - Gabriel Munteanu36555752008-08-10 20:14:05 +030039 return kmem_cache_alloc(cachep, flags);
40}
41static inline size_t slab_buffer_size(struct kmem_cache *cachep)
42{
43 return 0;
44}
45#endif
46
47static __always_inline void *kmalloc(size_t size, gfp_t flags)
48{
49 struct kmem_cache *cachep;
50 void *ret;
51
Christoph Lameter2e892f42006-12-13 00:34:23 -080052 if (__builtin_constant_p(size)) {
53 int i = 0;
Christoph Lameter6cb8f912007-07-17 04:03:22 -070054
55 if (!size)
56 return ZERO_SIZE_PTR;
57
Christoph Lameter2e892f42006-12-13 00:34:23 -080058#define CACHE(x) \
59 if (size <= x) \
60 goto found; \
61 else \
62 i++;
Joe Perches1c61fc42008-03-05 13:58:17 -080063#include <linux/kmalloc_sizes.h>
Christoph Lameter2e892f42006-12-13 00:34:23 -080064#undef CACHE
65 {
66 extern void __you_cannot_kmalloc_that_much(void);
67 __you_cannot_kmalloc_that_much();
68 }
69found:
Christoph Lameter4b51d662007-02-10 01:43:10 -080070#ifdef CONFIG_ZONE_DMA
71 if (flags & GFP_DMA)
Eduard - Gabriel Munteanu36555752008-08-10 20:14:05 +030072 cachep = malloc_sizes[i].cs_dmacachep;
73 else
Christoph Lameter4b51d662007-02-10 01:43:10 -080074#endif
Eduard - Gabriel Munteanu36555752008-08-10 20:14:05 +030075 cachep = malloc_sizes[i].cs_cachep;
76
77 ret = kmem_cache_alloc_notrace(cachep, flags);
78
79 kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, _THIS_IP_, ret,
80 size, slab_buffer_size(cachep), flags);
81
82 return ret;
Christoph Lameter2e892f42006-12-13 00:34:23 -080083 }
84 return __kmalloc(size, flags);
85}
86
Christoph Lameter2e892f42006-12-13 00:34:23 -080087#ifdef CONFIG_NUMA
88extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
Paul Mundt6193a2f2007-07-15 23:38:22 -070089extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
Christoph Lameter2e892f42006-12-13 00:34:23 -080090
Eduard - Gabriel Munteanu36555752008-08-10 20:14:05 +030091#ifdef CONFIG_KMEMTRACE
92extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
93 gfp_t flags,
94 int nodeid);
95#else
96static __always_inline void *
97kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
98 gfp_t flags,
99 int nodeid)
Christoph Lameter2e892f42006-12-13 00:34:23 -0800100{
Eduard - Gabriel Munteanu36555752008-08-10 20:14:05 +0300101 return kmem_cache_alloc_node(cachep, flags, nodeid);
102}
103#endif
104
105static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
106{
107 struct kmem_cache *cachep;
108 void *ret;
109
Christoph Lameter2e892f42006-12-13 00:34:23 -0800110 if (__builtin_constant_p(size)) {
111 int i = 0;
Christoph Lameter6cb8f912007-07-17 04:03:22 -0700112
113 if (!size)
114 return ZERO_SIZE_PTR;
115
Christoph Lameter2e892f42006-12-13 00:34:23 -0800116#define CACHE(x) \
117 if (size <= x) \
118 goto found; \
119 else \
120 i++;
Joe Perches1c61fc42008-03-05 13:58:17 -0800121#include <linux/kmalloc_sizes.h>
Christoph Lameter2e892f42006-12-13 00:34:23 -0800122#undef CACHE
123 {
124 extern void __you_cannot_kmalloc_that_much(void);
125 __you_cannot_kmalloc_that_much();
126 }
127found:
Christoph Lameter4b51d662007-02-10 01:43:10 -0800128#ifdef CONFIG_ZONE_DMA
129 if (flags & GFP_DMA)
Eduard - Gabriel Munteanu36555752008-08-10 20:14:05 +0300130 cachep = malloc_sizes[i].cs_dmacachep;
131 else
Christoph Lameter4b51d662007-02-10 01:43:10 -0800132#endif
Eduard - Gabriel Munteanu36555752008-08-10 20:14:05 +0300133 cachep = malloc_sizes[i].cs_cachep;
134
135 ret = kmem_cache_alloc_node_notrace(cachep, flags, node);
136
137 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, _THIS_IP_,
138 ret, size, slab_buffer_size(cachep),
139 flags, node);
140
141 return ret;
Christoph Lameter2e892f42006-12-13 00:34:23 -0800142 }
143 return __kmalloc_node(size, flags, node);
144}
145
146#endif /* CONFIG_NUMA */
147
Christoph Lameter2e892f42006-12-13 00:34:23 -0800148#endif /* _LINUX_SLAB_DEF_H */