| Vegard Nossum | dfec072 | 2008-04-04 00:51:41 +0200 | [diff] [blame] | 1 | #ifndef LINUX_KMEMCHECK_H | 
|  | 2 | #define LINUX_KMEMCHECK_H | 
|  | 3 |  | 
|  | 4 | #include <linux/mm_types.h> | 
|  | 5 | #include <linux/types.h> | 
|  | 6 |  | 
|  | 7 | #ifdef CONFIG_KMEMCHECK | 
|  | 8 | extern int kmemcheck_enabled; | 
|  | 9 |  | 
| Vegard Nossum | 2dff440 | 2008-05-31 15:56:17 +0200 | [diff] [blame] | 10 | /* The slab-related functions. */ | 
| Vegard Nossum | b1eeab6 | 2008-11-25 16:55:53 +0100 | [diff] [blame] | 11 | void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node); | 
|  | 12 | void kmemcheck_free_shadow(struct page *page, int order); | 
| Vegard Nossum | 2dff440 | 2008-05-31 15:56:17 +0200 | [diff] [blame] | 13 | void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object, | 
|  | 14 | size_t size); | 
|  | 15 | void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size); | 
|  | 16 |  | 
| Vegard Nossum | b1eeab6 | 2008-11-25 16:55:53 +0100 | [diff] [blame] | 17 | void kmemcheck_pagealloc_alloc(struct page *p, unsigned int order, | 
|  | 18 | gfp_t gfpflags); | 
|  | 19 |  | 
| Vegard Nossum | 2dff440 | 2008-05-31 15:56:17 +0200 | [diff] [blame] | 20 | void kmemcheck_show_pages(struct page *p, unsigned int n); | 
|  | 21 | void kmemcheck_hide_pages(struct page *p, unsigned int n); | 
|  | 22 |  | 
|  | 23 | bool kmemcheck_page_is_tracked(struct page *p); | 
|  | 24 |  | 
|  | 25 | void kmemcheck_mark_unallocated(void *address, unsigned int n); | 
|  | 26 | void kmemcheck_mark_uninitialized(void *address, unsigned int n); | 
|  | 27 | void kmemcheck_mark_initialized(void *address, unsigned int n); | 
|  | 28 | void kmemcheck_mark_freed(void *address, unsigned int n); | 
|  | 29 |  | 
|  | 30 | void kmemcheck_mark_unallocated_pages(struct page *p, unsigned int n); | 
|  | 31 | void kmemcheck_mark_uninitialized_pages(struct page *p, unsigned int n); | 
| Vegard Nossum | b1eeab6 | 2008-11-25 16:55:53 +0100 | [diff] [blame] | 32 | void kmemcheck_mark_initialized_pages(struct page *p, unsigned int n); | 
| Vegard Nossum | 2dff440 | 2008-05-31 15:56:17 +0200 | [diff] [blame] | 33 |  | 
| Vegard Nossum | dfec072 | 2008-04-04 00:51:41 +0200 | [diff] [blame] | 34 | int kmemcheck_show_addr(unsigned long address); | 
|  | 35 | int kmemcheck_hide_addr(unsigned long address); | 
| Vegard Nossum | fc7d0c9 | 2008-08-30 12:16:05 +0200 | [diff] [blame] | 36 |  | 
| Vegard Nossum | dfec072 | 2008-04-04 00:51:41 +0200 | [diff] [blame] | 37 | #else | 
|  | 38 | #define kmemcheck_enabled 0 | 
|  | 39 |  | 
| Vegard Nossum | 2dff440 | 2008-05-31 15:56:17 +0200 | [diff] [blame] | 40 | static inline void | 
| Vegard Nossum | b1eeab6 | 2008-11-25 16:55:53 +0100 | [diff] [blame] | 41 | kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node) | 
| Vegard Nossum | 2dff440 | 2008-05-31 15:56:17 +0200 | [diff] [blame] | 42 | { | 
|  | 43 | } | 
|  | 44 |  | 
|  | 45 | static inline void | 
| Vegard Nossum | b1eeab6 | 2008-11-25 16:55:53 +0100 | [diff] [blame] | 46 | kmemcheck_free_shadow(struct page *page, int order) | 
| Vegard Nossum | 2dff440 | 2008-05-31 15:56:17 +0200 | [diff] [blame] | 47 | { | 
|  | 48 | } | 
|  | 49 |  | 
|  | 50 | static inline void | 
|  | 51 | kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object, | 
|  | 52 | size_t size) | 
|  | 53 | { | 
|  | 54 | } | 
|  | 55 |  | 
|  | 56 | static inline void kmemcheck_slab_free(struct kmem_cache *s, void *object, | 
|  | 57 | size_t size) | 
|  | 58 | { | 
|  | 59 | } | 
|  | 60 |  | 
| Vegard Nossum | b1eeab6 | 2008-11-25 16:55:53 +0100 | [diff] [blame] | 61 | static inline void kmemcheck_pagealloc_alloc(struct page *p, | 
|  | 62 | unsigned int order, gfp_t gfpflags) | 
|  | 63 | { | 
|  | 64 | } | 
|  | 65 |  | 
| Vegard Nossum | 2dff440 | 2008-05-31 15:56:17 +0200 | [diff] [blame] | 66 | static inline bool kmemcheck_page_is_tracked(struct page *p) | 
|  | 67 | { | 
|  | 68 | return false; | 
|  | 69 | } | 
| Vegard Nossum | d700285 | 2008-07-20 10:44:54 +0200 | [diff] [blame] | 70 |  | 
|  | 71 | static inline void kmemcheck_mark_unallocated(void *address, unsigned int n) | 
|  | 72 | { | 
|  | 73 | } | 
|  | 74 |  | 
|  | 75 | static inline void kmemcheck_mark_uninitialized(void *address, unsigned int n) | 
|  | 76 | { | 
|  | 77 | } | 
|  | 78 |  | 
|  | 79 | static inline void kmemcheck_mark_initialized(void *address, unsigned int n) | 
|  | 80 | { | 
|  | 81 | } | 
|  | 82 |  | 
|  | 83 | static inline void kmemcheck_mark_freed(void *address, unsigned int n) | 
|  | 84 | { | 
|  | 85 | } | 
| Vegard Nossum | b1eeab6 | 2008-11-25 16:55:53 +0100 | [diff] [blame] | 86 |  | 
|  | 87 | static inline void kmemcheck_mark_unallocated_pages(struct page *p, | 
|  | 88 | unsigned int n) | 
|  | 89 | { | 
|  | 90 | } | 
|  | 91 |  | 
|  | 92 | static inline void kmemcheck_mark_uninitialized_pages(struct page *p, | 
|  | 93 | unsigned int n) | 
|  | 94 | { | 
|  | 95 | } | 
|  | 96 |  | 
|  | 97 | static inline void kmemcheck_mark_initialized_pages(struct page *p, | 
|  | 98 | unsigned int n) | 
|  | 99 | { | 
|  | 100 | } | 
|  | 101 |  | 
| Vegard Nossum | dfec072 | 2008-04-04 00:51:41 +0200 | [diff] [blame] | 102 | #endif /* CONFIG_KMEMCHECK */ | 
|  | 103 |  | 
| Vegard Nossum | fc7d0c9 | 2008-08-30 12:16:05 +0200 | [diff] [blame] | 104 | /* | 
|  | 105 | * Bitfield annotations | 
|  | 106 | * | 
|  | 107 | * How to use: If you have a struct using bitfields, for example | 
|  | 108 | * | 
|  | 109 | *     struct a { | 
|  | 110 | *             int x:8, y:8; | 
|  | 111 | *     }; | 
|  | 112 | * | 
|  | 113 | * then this should be rewritten as | 
|  | 114 | * | 
|  | 115 | *     struct a { | 
|  | 116 | *             kmemcheck_bitfield_begin(flags); | 
|  | 117 | *             int x:8, y:8; | 
|  | 118 | *             kmemcheck_bitfield_end(flags); | 
|  | 119 | *     }; | 
|  | 120 | * | 
|  | 121 | * Now the "flags_begin" and "flags_end" members may be used to refer to the | 
|  | 122 | * beginning and end, respectively, of the bitfield (and things like | 
|  | 123 | * &x.flags_begin is allowed). As soon as the struct is allocated, the bit- | 
|  | 124 | * fields should be annotated: | 
|  | 125 | * | 
|  | 126 | *     struct a *a = kmalloc(sizeof(struct a), GFP_KERNEL); | 
|  | 127 | *     kmemcheck_annotate_bitfield(a, flags); | 
|  | 128 | * | 
|  | 129 | * Note: We provide the same definitions for both kmemcheck and non- | 
|  | 130 | * kmemcheck kernels. This makes it harder to introduce accidental errors. It | 
|  | 131 | * is also allowed to pass NULL pointers to kmemcheck_annotate_bitfield(). | 
|  | 132 | */ | 
|  | 133 | #define kmemcheck_bitfield_begin(name)	\ | 
|  | 134 | int name##_begin[0]; | 
|  | 135 |  | 
|  | 136 | #define kmemcheck_bitfield_end(name)	\ | 
|  | 137 | int name##_end[0]; | 
|  | 138 |  | 
|  | 139 | #define kmemcheck_annotate_bitfield(ptr, name)				\ | 
|  | 140 | do if (ptr) {							\ | 
|  | 141 | int _n = (long) &((ptr)->name##_end)			\ | 
|  | 142 | - (long) &((ptr)->name##_begin);		\ | 
|  | 143 | BUILD_BUG_ON(_n < 0);					\ | 
|  | 144 | \ | 
|  | 145 | kmemcheck_mark_initialized(&((ptr)->name##_begin), _n);	\ | 
|  | 146 | } while (0) | 
|  | 147 |  | 
|  | 148 | #define kmemcheck_annotate_variable(var)				\ | 
|  | 149 | do {								\ | 
|  | 150 | kmemcheck_mark_initialized(&(var), sizeof(var));	\ | 
|  | 151 | } while (0)							\ | 
|  | 152 |  | 
| Vegard Nossum | dfec072 | 2008-04-04 00:51:41 +0200 | [diff] [blame] | 153 | #endif /* LINUX_KMEMCHECK_H */ |