| Vegard Nossum | dfec072 | 2008-04-04 00:51:41 +0200 | [diff] [blame] | 1 | #include <linux/kmemcheck.h> | 
|  | 2 | #include <linux/module.h> | 
|  | 3 | #include <linux/mm.h> | 
| Vegard Nossum | dfec072 | 2008-04-04 00:51:41 +0200 | [diff] [blame] | 4 |  | 
|  | 5 | #include <asm/page.h> | 
|  | 6 | #include <asm/pgtable.h> | 
|  | 7 |  | 
|  | 8 | #include "pte.h" | 
|  | 9 | #include "shadow.h" | 
|  | 10 |  | 
|  | 11 | /* | 
|  | 12 | * Return the shadow address for the given address. Returns NULL if the | 
|  | 13 | * address is not tracked. | 
|  | 14 | * | 
|  | 15 | * We need to be extremely careful not to follow any invalid pointers, | 
|  | 16 | * because this function can be called for *any* possible address. | 
|  | 17 | */ | 
|  | 18 | void *kmemcheck_shadow_lookup(unsigned long address) | 
|  | 19 | { | 
|  | 20 | pte_t *pte; | 
|  | 21 | struct page *page; | 
|  | 22 |  | 
|  | 23 | if (!virt_addr_valid(address)) | 
|  | 24 | return NULL; | 
|  | 25 |  | 
|  | 26 | pte = kmemcheck_pte_lookup(address); | 
|  | 27 | if (!pte) | 
|  | 28 | return NULL; | 
|  | 29 |  | 
|  | 30 | page = virt_to_page(address); | 
|  | 31 | if (!page->shadow) | 
|  | 32 | return NULL; | 
|  | 33 | return page->shadow + (address & (PAGE_SIZE - 1)); | 
|  | 34 | } | 
|  | 35 |  | 
|  | 36 | static void mark_shadow(void *address, unsigned int n, | 
|  | 37 | enum kmemcheck_shadow status) | 
|  | 38 | { | 
|  | 39 | unsigned long addr = (unsigned long) address; | 
|  | 40 | unsigned long last_addr = addr + n - 1; | 
|  | 41 | unsigned long page = addr & PAGE_MASK; | 
|  | 42 | unsigned long last_page = last_addr & PAGE_MASK; | 
|  | 43 | unsigned int first_n; | 
|  | 44 | void *shadow; | 
|  | 45 |  | 
|  | 46 | /* If the memory range crosses a page boundary, stop there. */ | 
|  | 47 | if (page == last_page) | 
|  | 48 | first_n = n; | 
|  | 49 | else | 
|  | 50 | first_n = page + PAGE_SIZE - addr; | 
|  | 51 |  | 
|  | 52 | shadow = kmemcheck_shadow_lookup(addr); | 
|  | 53 | if (shadow) | 
|  | 54 | memset(shadow, status, first_n); | 
|  | 55 |  | 
|  | 56 | addr += first_n; | 
|  | 57 | n -= first_n; | 
|  | 58 |  | 
|  | 59 | /* Do full-page memset()s. */ | 
|  | 60 | while (n >= PAGE_SIZE) { | 
|  | 61 | shadow = kmemcheck_shadow_lookup(addr); | 
|  | 62 | if (shadow) | 
|  | 63 | memset(shadow, status, PAGE_SIZE); | 
|  | 64 |  | 
|  | 65 | addr += PAGE_SIZE; | 
|  | 66 | n -= PAGE_SIZE; | 
|  | 67 | } | 
|  | 68 |  | 
|  | 69 | /* Do the remaining page, if any. */ | 
|  | 70 | if (n > 0) { | 
|  | 71 | shadow = kmemcheck_shadow_lookup(addr); | 
|  | 72 | if (shadow) | 
|  | 73 | memset(shadow, status, n); | 
|  | 74 | } | 
|  | 75 | } | 
|  | 76 |  | 
|  | 77 | void kmemcheck_mark_unallocated(void *address, unsigned int n) | 
|  | 78 | { | 
|  | 79 | mark_shadow(address, n, KMEMCHECK_SHADOW_UNALLOCATED); | 
|  | 80 | } | 
|  | 81 |  | 
|  | 82 | void kmemcheck_mark_uninitialized(void *address, unsigned int n) | 
|  | 83 | { | 
|  | 84 | mark_shadow(address, n, KMEMCHECK_SHADOW_UNINITIALIZED); | 
|  | 85 | } | 
|  | 86 |  | 
|  | 87 | /* | 
|  | 88 | * Fill the shadow memory of the given address such that the memory at that | 
|  | 89 | * address is marked as being initialized. | 
|  | 90 | */ | 
|  | 91 | void kmemcheck_mark_initialized(void *address, unsigned int n) | 
|  | 92 | { | 
|  | 93 | mark_shadow(address, n, KMEMCHECK_SHADOW_INITIALIZED); | 
|  | 94 | } | 
|  | 95 | EXPORT_SYMBOL_GPL(kmemcheck_mark_initialized); | 
|  | 96 |  | 
|  | 97 | void kmemcheck_mark_freed(void *address, unsigned int n) | 
|  | 98 | { | 
|  | 99 | mark_shadow(address, n, KMEMCHECK_SHADOW_FREED); | 
|  | 100 | } | 
|  | 101 |  | 
|  | 102 | void kmemcheck_mark_unallocated_pages(struct page *p, unsigned int n) | 
|  | 103 | { | 
|  | 104 | unsigned int i; | 
|  | 105 |  | 
|  | 106 | for (i = 0; i < n; ++i) | 
|  | 107 | kmemcheck_mark_unallocated(page_address(&p[i]), PAGE_SIZE); | 
|  | 108 | } | 
|  | 109 |  | 
|  | 110 | void kmemcheck_mark_uninitialized_pages(struct page *p, unsigned int n) | 
|  | 111 | { | 
|  | 112 | unsigned int i; | 
|  | 113 |  | 
|  | 114 | for (i = 0; i < n; ++i) | 
|  | 115 | kmemcheck_mark_uninitialized(page_address(&p[i]), PAGE_SIZE); | 
|  | 116 | } | 
|  | 117 |  | 
| Vegard Nossum | b1eeab6 | 2008-11-25 16:55:53 +0100 | [diff] [blame] | 118 | void kmemcheck_mark_initialized_pages(struct page *p, unsigned int n) | 
|  | 119 | { | 
|  | 120 | unsigned int i; | 
|  | 121 |  | 
|  | 122 | for (i = 0; i < n; ++i) | 
|  | 123 | kmemcheck_mark_initialized(page_address(&p[i]), PAGE_SIZE); | 
|  | 124 | } | 
|  | 125 |  | 
| Vegard Nossum | dfec072 | 2008-04-04 00:51:41 +0200 | [diff] [blame] | 126 | enum kmemcheck_shadow kmemcheck_shadow_test(void *shadow, unsigned int size) | 
|  | 127 | { | 
|  | 128 | uint8_t *x; | 
|  | 129 | unsigned int i; | 
|  | 130 |  | 
|  | 131 | x = shadow; | 
|  | 132 |  | 
|  | 133 | #ifdef CONFIG_KMEMCHECK_PARTIAL_OK | 
|  | 134 | /* | 
|  | 135 | * Make sure _some_ bytes are initialized. Gcc frequently generates | 
|  | 136 | * code to access neighboring bytes. | 
|  | 137 | */ | 
|  | 138 | for (i = 0; i < size; ++i) { | 
|  | 139 | if (x[i] == KMEMCHECK_SHADOW_INITIALIZED) | 
|  | 140 | return x[i]; | 
|  | 141 | } | 
|  | 142 | #else | 
|  | 143 | /* All bytes must be initialized. */ | 
|  | 144 | for (i = 0; i < size; ++i) { | 
|  | 145 | if (x[i] != KMEMCHECK_SHADOW_INITIALIZED) | 
|  | 146 | return x[i]; | 
|  | 147 | } | 
|  | 148 | #endif | 
|  | 149 |  | 
|  | 150 | return x[0]; | 
|  | 151 | } | 
|  | 152 |  | 
|  | 153 | void kmemcheck_shadow_set(void *shadow, unsigned int size) | 
|  | 154 | { | 
|  | 155 | uint8_t *x; | 
|  | 156 | unsigned int i; | 
|  | 157 |  | 
|  | 158 | x = shadow; | 
|  | 159 | for (i = 0; i < size; ++i) | 
|  | 160 | x[i] = KMEMCHECK_SHADOW_INITIALIZED; | 
|  | 161 | } |