| #ifndef _LINUX_SLAB_DEF_H | 
 | #define	_LINUX_SLAB_DEF_H | 
 |  | 
 | /* | 
 |  * Definitions unique to the original Linux SLAB allocator. | 
 |  * | 
 |  * What we provide here is a way to optimize the frequent kmalloc | 
 |  * calls in the kernel by selecting the appropriate general cache | 
 |  * if kmalloc was called with a size that can be established at | 
 |  * compile time. | 
 |  */ | 
 |  | 
 | #include <linux/init.h> | 
 | #include <asm/page.h>		/* kmalloc_sizes.h needs PAGE_SIZE */ | 
 | #include <asm/cache.h>		/* kmalloc_sizes.h needs L1_CACHE_BYTES */ | 
 | #include <linux/compiler.h> | 
 | #include <linux/kmemtrace.h> | 
 |  | 
 | /* Size description struct for general caches. */ | 
 | struct cache_sizes { | 
 | 	size_t		 	cs_size; | 
 | 	struct kmem_cache	*cs_cachep; | 
 | #ifdef CONFIG_ZONE_DMA | 
 | 	struct kmem_cache	*cs_dmacachep; | 
 | #endif | 
 | }; | 
 | extern struct cache_sizes malloc_sizes[]; | 
 |  | 
 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t); | 
 | void *__kmalloc(size_t size, gfp_t flags); | 
 |  | 
 | #ifdef CONFIG_KMEMTRACE | 
 | extern void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags); | 
 | extern size_t slab_buffer_size(struct kmem_cache *cachep); | 
 | #else | 
 | static __always_inline void * | 
 | kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags) | 
 | { | 
 | 	return kmem_cache_alloc(cachep, flags); | 
 | } | 
 | static inline size_t slab_buffer_size(struct kmem_cache *cachep) | 
 | { | 
 | 	return 0; | 
 | } | 
 | #endif | 
 |  | 
 | static __always_inline void *kmalloc(size_t size, gfp_t flags) | 
 | { | 
 | 	struct kmem_cache *cachep; | 
 | 	void *ret; | 
 |  | 
 | 	if (__builtin_constant_p(size)) { | 
 | 		int i = 0; | 
 |  | 
 | 		if (!size) | 
 | 			return ZERO_SIZE_PTR; | 
 |  | 
 | #define CACHE(x) \ | 
 | 		if (size <= x) \ | 
 | 			goto found; \ | 
 | 		else \ | 
 | 			i++; | 
 | #include <linux/kmalloc_sizes.h> | 
 | #undef CACHE | 
 | 		return NULL; | 
 | found: | 
 | #ifdef CONFIG_ZONE_DMA | 
 | 		if (flags & GFP_DMA) | 
 | 			cachep = malloc_sizes[i].cs_dmacachep; | 
 | 		else | 
 | #endif | 
 | 			cachep = malloc_sizes[i].cs_cachep; | 
 |  | 
 | 		ret = kmem_cache_alloc_notrace(cachep, flags); | 
 |  | 
 | 		trace_kmalloc(_THIS_IP_, ret, | 
 | 			      size, slab_buffer_size(cachep), flags); | 
 |  | 
 | 		return ret; | 
 | 	} | 
 | 	return __kmalloc(size, flags); | 
 | } | 
 |  | 
 | #ifdef CONFIG_NUMA | 
 | extern void *__kmalloc_node(size_t size, gfp_t flags, int node); | 
 | extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); | 
 |  | 
 | #ifdef CONFIG_KMEMTRACE | 
 | extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep, | 
 | 					   gfp_t flags, | 
 | 					   int nodeid); | 
 | #else | 
 | static __always_inline void * | 
 | kmem_cache_alloc_node_notrace(struct kmem_cache *cachep, | 
 | 			      gfp_t flags, | 
 | 			      int nodeid) | 
 | { | 
 | 	return kmem_cache_alloc_node(cachep, flags, nodeid); | 
 | } | 
 | #endif | 
 |  | 
 | static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) | 
 | { | 
 | 	struct kmem_cache *cachep; | 
 | 	void *ret; | 
 |  | 
 | 	if (__builtin_constant_p(size)) { | 
 | 		int i = 0; | 
 |  | 
 | 		if (!size) | 
 | 			return ZERO_SIZE_PTR; | 
 |  | 
 | #define CACHE(x) \ | 
 | 		if (size <= x) \ | 
 | 			goto found; \ | 
 | 		else \ | 
 | 			i++; | 
 | #include <linux/kmalloc_sizes.h> | 
 | #undef CACHE | 
 | 		return NULL; | 
 | found: | 
 | #ifdef CONFIG_ZONE_DMA | 
 | 		if (flags & GFP_DMA) | 
 | 			cachep = malloc_sizes[i].cs_dmacachep; | 
 | 		else | 
 | #endif | 
 | 			cachep = malloc_sizes[i].cs_cachep; | 
 |  | 
 | 		ret = kmem_cache_alloc_node_notrace(cachep, flags, node); | 
 |  | 
 | 		trace_kmalloc_node(_THIS_IP_, ret, | 
 | 				   size, slab_buffer_size(cachep), | 
 | 				   flags, node); | 
 |  | 
 | 		return ret; | 
 | 	} | 
 | 	return __kmalloc_node(size, flags, node); | 
 | } | 
 |  | 
 | #endif	/* CONFIG_NUMA */ | 
 |  | 
 | #endif	/* _LINUX_SLAB_DEF_H */ |