| Pallipadi, Venkatesh | 9e41a49 | 2010-02-10 15:26:07 -0800 | [diff] [blame] | 1 | /* | 
|  | 2 | * Handle caching attributes in page tables (PAT) | 
|  | 3 | * | 
|  | 4 | * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> | 
|  | 5 | *          Suresh B Siddha <suresh.b.siddha@intel.com> | 
|  | 6 | * | 
|  | 7 | * Interval tree (augmented rbtree) used to store the PAT memory type | 
|  | 8 | * reservations. | 
|  | 9 | */ | 
|  | 10 |  | 
|  | 11 | #include <linux/seq_file.h> | 
|  | 12 | #include <linux/debugfs.h> | 
|  | 13 | #include <linux/kernel.h> | 
|  | 14 | #include <linux/module.h> | 
|  | 15 | #include <linux/rbtree.h> | 
|  | 16 | #include <linux/sched.h> | 
|  | 17 | #include <linux/gfp.h> | 
|  | 18 |  | 
|  | 19 | #include <asm/pgtable.h> | 
|  | 20 | #include <asm/pat.h> | 
|  | 21 |  | 
|  | 22 | #include "pat_internal.h" | 
|  | 23 |  | 
|  | 24 | /* | 
|  | 25 | * The memtype tree keeps track of memory type for specific | 
|  | 26 | * physical memory areas. Without proper tracking, conflicting memory | 
|  | 27 | * types in different mappings can cause CPU cache corruption. | 
|  | 28 | * | 
|  | 29 | * The tree is an interval tree (augmented rbtree) with tree ordered | 
|  | 30 | * on starting address. Tree can contain multiple entries for | 
|  | 31 | * different regions which overlap. All the aliases have the same | 
|  | 32 | * cache attributes of course. | 
|  | 33 | * | 
|  | 34 | * memtype_lock protects the rbtree. | 
|  | 35 | */ | 
|  | 36 |  | 
| Peter Zijlstra | b945d6b | 2010-05-29 15:31:43 +0200 | [diff] [blame] | 37 | static struct rb_root memtype_rbroot = RB_ROOT; | 
| Pallipadi, Venkatesh | 9e41a49 | 2010-02-10 15:26:07 -0800 | [diff] [blame] | 38 |  | 
|  | 39 | static int is_node_overlap(struct memtype *node, u64 start, u64 end) | 
|  | 40 | { | 
|  | 41 | if (node->start >= end || node->end <= start) | 
|  | 42 | return 0; | 
|  | 43 |  | 
|  | 44 | return 1; | 
|  | 45 | } | 
|  | 46 |  | 
|  | 47 | static u64 get_subtree_max_end(struct rb_node *node) | 
|  | 48 | { | 
|  | 49 | u64 ret = 0; | 
|  | 50 | if (node) { | 
|  | 51 | struct memtype *data = container_of(node, struct memtype, rb); | 
|  | 52 | ret = data->subtree_max_end; | 
|  | 53 | } | 
|  | 54 | return ret; | 
|  | 55 | } | 
|  | 56 |  | 
|  | 57 | /* Update 'subtree_max_end' for a node, based on node and its children */ | 
| Peter Zijlstra | b945d6b | 2010-05-29 15:31:43 +0200 | [diff] [blame] | 58 | static void memtype_rb_augment_cb(struct rb_node *node, void *__unused) | 
| Pallipadi, Venkatesh | 9e41a49 | 2010-02-10 15:26:07 -0800 | [diff] [blame] | 59 | { | 
|  | 60 | struct memtype *data; | 
|  | 61 | u64 max_end, child_max_end; | 
|  | 62 |  | 
|  | 63 | if (!node) | 
|  | 64 | return; | 
|  | 65 |  | 
|  | 66 | data = container_of(node, struct memtype, rb); | 
|  | 67 | max_end = data->end; | 
|  | 68 |  | 
|  | 69 | child_max_end = get_subtree_max_end(node->rb_right); | 
|  | 70 | if (child_max_end > max_end) | 
|  | 71 | max_end = child_max_end; | 
|  | 72 |  | 
|  | 73 | child_max_end = get_subtree_max_end(node->rb_left); | 
|  | 74 | if (child_max_end > max_end) | 
|  | 75 | max_end = child_max_end; | 
|  | 76 |  | 
|  | 77 | data->subtree_max_end = max_end; | 
|  | 78 | } | 
|  | 79 |  | 
| Pallipadi, Venkatesh | 9e41a49 | 2010-02-10 15:26:07 -0800 | [diff] [blame] | 80 | /* Find the first (lowest start addr) overlapping range from rb tree */ | 
|  | 81 | static struct memtype *memtype_rb_lowest_match(struct rb_root *root, | 
|  | 82 | u64 start, u64 end) | 
|  | 83 | { | 
|  | 84 | struct rb_node *node = root->rb_node; | 
|  | 85 | struct memtype *last_lower = NULL; | 
|  | 86 |  | 
|  | 87 | while (node) { | 
|  | 88 | struct memtype *data = container_of(node, struct memtype, rb); | 
|  | 89 |  | 
|  | 90 | if (get_subtree_max_end(node->rb_left) > start) { | 
|  | 91 | /* Lowest overlap if any must be on left side */ | 
|  | 92 | node = node->rb_left; | 
|  | 93 | } else if (is_node_overlap(data, start, end)) { | 
|  | 94 | last_lower = data; | 
|  | 95 | break; | 
|  | 96 | } else if (start >= data->start) { | 
|  | 97 | /* Lowest overlap if any must be on right side */ | 
|  | 98 | node = node->rb_right; | 
|  | 99 | } else { | 
|  | 100 | break; | 
|  | 101 | } | 
|  | 102 | } | 
|  | 103 | return last_lower; /* Returns NULL if there is no overlap */ | 
|  | 104 | } | 
|  | 105 |  | 
|  | 106 | static struct memtype *memtype_rb_exact_match(struct rb_root *root, | 
|  | 107 | u64 start, u64 end) | 
|  | 108 | { | 
|  | 109 | struct memtype *match; | 
|  | 110 |  | 
|  | 111 | match = memtype_rb_lowest_match(root, start, end); | 
|  | 112 | while (match != NULL && match->start < end) { | 
|  | 113 | struct rb_node *node; | 
|  | 114 |  | 
|  | 115 | if (match->start == start && match->end == end) | 
|  | 116 | return match; | 
|  | 117 |  | 
|  | 118 | node = rb_next(&match->rb); | 
|  | 119 | if (node) | 
|  | 120 | match = container_of(node, struct memtype, rb); | 
|  | 121 | else | 
|  | 122 | match = NULL; | 
|  | 123 | } | 
|  | 124 |  | 
|  | 125 | return NULL; /* Returns NULL if there is no exact match */ | 
|  | 126 | } | 
|  | 127 |  | 
|  | 128 | static int memtype_rb_check_conflict(struct rb_root *root, | 
|  | 129 | u64 start, u64 end, | 
|  | 130 | unsigned long reqtype, unsigned long *newtype) | 
|  | 131 | { | 
|  | 132 | struct rb_node *node; | 
|  | 133 | struct memtype *match; | 
|  | 134 | int found_type = reqtype; | 
|  | 135 |  | 
|  | 136 | match = memtype_rb_lowest_match(&memtype_rbroot, start, end); | 
|  | 137 | if (match == NULL) | 
|  | 138 | goto success; | 
|  | 139 |  | 
|  | 140 | if (match->type != found_type && newtype == NULL) | 
|  | 141 | goto failure; | 
|  | 142 |  | 
|  | 143 | dprintk("Overlap at 0x%Lx-0x%Lx\n", match->start, match->end); | 
|  | 144 | found_type = match->type; | 
|  | 145 |  | 
|  | 146 | node = rb_next(&match->rb); | 
|  | 147 | while (node) { | 
|  | 148 | match = container_of(node, struct memtype, rb); | 
|  | 149 |  | 
|  | 150 | if (match->start >= end) /* Checked all possible matches */ | 
|  | 151 | goto success; | 
|  | 152 |  | 
|  | 153 | if (is_node_overlap(match, start, end) && | 
|  | 154 | match->type != found_type) { | 
|  | 155 | goto failure; | 
|  | 156 | } | 
|  | 157 |  | 
|  | 158 | node = rb_next(&match->rb); | 
|  | 159 | } | 
|  | 160 | success: | 
|  | 161 | if (newtype) | 
|  | 162 | *newtype = found_type; | 
|  | 163 |  | 
|  | 164 | return 0; | 
|  | 165 |  | 
|  | 166 | failure: | 
|  | 167 | printk(KERN_INFO "%s:%d conflicting memory types " | 
|  | 168 | "%Lx-%Lx %s<->%s\n", current->comm, current->pid, start, | 
|  | 169 | end, cattr_name(found_type), cattr_name(match->type)); | 
|  | 170 | return -EBUSY; | 
|  | 171 | } | 
|  | 172 |  | 
| Pallipadi, Venkatesh | 9e41a49 | 2010-02-10 15:26:07 -0800 | [diff] [blame] | 173 | static void memtype_rb_insert(struct rb_root *root, struct memtype *newdata) | 
|  | 174 | { | 
|  | 175 | struct rb_node **node = &(root->rb_node); | 
|  | 176 | struct rb_node *parent = NULL; | 
|  | 177 |  | 
|  | 178 | while (*node) { | 
|  | 179 | struct memtype *data = container_of(*node, struct memtype, rb); | 
|  | 180 |  | 
|  | 181 | parent = *node; | 
|  | 182 | if (newdata->start <= data->start) | 
|  | 183 | node = &((*node)->rb_left); | 
|  | 184 | else if (newdata->start > data->start) | 
|  | 185 | node = &((*node)->rb_right); | 
|  | 186 | } | 
|  | 187 |  | 
|  | 188 | rb_link_node(&newdata->rb, parent, node); | 
|  | 189 | rb_insert_color(&newdata->rb, root); | 
| Peter Zijlstra | b945d6b | 2010-05-29 15:31:43 +0200 | [diff] [blame] | 190 | rb_augment_insert(&newdata->rb, memtype_rb_augment_cb, NULL); | 
| Pallipadi, Venkatesh | 9e41a49 | 2010-02-10 15:26:07 -0800 | [diff] [blame] | 191 | } | 
|  | 192 |  | 
|  | 193 | int rbt_memtype_check_insert(struct memtype *new, unsigned long *ret_type) | 
|  | 194 | { | 
|  | 195 | int err = 0; | 
|  | 196 |  | 
|  | 197 | err = memtype_rb_check_conflict(&memtype_rbroot, new->start, new->end, | 
|  | 198 | new->type, ret_type); | 
|  | 199 |  | 
|  | 200 | if (!err) { | 
| Pallipadi, Venkatesh | 4daa2a8 | 2010-02-24 13:43:55 -0800 | [diff] [blame] | 201 | if (ret_type) | 
|  | 202 | new->type = *ret_type; | 
|  | 203 |  | 
| Venkatesh Pallipadi | 6a4f3b5 | 2010-06-10 17:45:01 -0700 | [diff] [blame] | 204 | new->subtree_max_end = new->end; | 
| Pallipadi, Venkatesh | 9e41a49 | 2010-02-10 15:26:07 -0800 | [diff] [blame] | 205 | memtype_rb_insert(&memtype_rbroot, new); | 
|  | 206 | } | 
|  | 207 | return err; | 
|  | 208 | } | 
|  | 209 |  | 
| Xiaotian Feng | 20413f2 | 2010-05-26 09:51:10 +0800 | [diff] [blame] | 210 | struct memtype *rbt_memtype_erase(u64 start, u64 end) | 
| Pallipadi, Venkatesh | 9e41a49 | 2010-02-10 15:26:07 -0800 | [diff] [blame] | 211 | { | 
| Peter Zijlstra | b945d6b | 2010-05-29 15:31:43 +0200 | [diff] [blame] | 212 | struct rb_node *deepest; | 
| Pallipadi, Venkatesh | 9e41a49 | 2010-02-10 15:26:07 -0800 | [diff] [blame] | 213 | struct memtype *data; | 
|  | 214 |  | 
|  | 215 | data = memtype_rb_exact_match(&memtype_rbroot, start, end); | 
|  | 216 | if (!data) | 
| Xiaotian Feng | 20413f2 | 2010-05-26 09:51:10 +0800 | [diff] [blame] | 217 | goto out; | 
| Pallipadi, Venkatesh | 9e41a49 | 2010-02-10 15:26:07 -0800 | [diff] [blame] | 218 |  | 
| Peter Zijlstra | b945d6b | 2010-05-29 15:31:43 +0200 | [diff] [blame] | 219 | deepest = rb_augment_erase_begin(&data->rb); | 
| Pallipadi, Venkatesh | 9e41a49 | 2010-02-10 15:26:07 -0800 | [diff] [blame] | 220 | rb_erase(&data->rb, &memtype_rbroot); | 
| Peter Zijlstra | b945d6b | 2010-05-29 15:31:43 +0200 | [diff] [blame] | 221 | rb_augment_erase_end(deepest, memtype_rb_augment_cb, NULL); | 
| Xiaotian Feng | 20413f2 | 2010-05-26 09:51:10 +0800 | [diff] [blame] | 222 | out: | 
|  | 223 | return data; | 
| Pallipadi, Venkatesh | 9e41a49 | 2010-02-10 15:26:07 -0800 | [diff] [blame] | 224 | } | 
|  | 225 |  | 
|  | 226 | struct memtype *rbt_memtype_lookup(u64 addr) | 
|  | 227 | { | 
|  | 228 | struct memtype *data; | 
|  | 229 | data = memtype_rb_lowest_match(&memtype_rbroot, addr, addr + PAGE_SIZE); | 
|  | 230 | return data; | 
|  | 231 | } | 
|  | 232 |  | 
|  | 233 | #if defined(CONFIG_DEBUG_FS) | 
|  | 234 | int rbt_memtype_copy_nth_element(struct memtype *out, loff_t pos) | 
|  | 235 | { | 
|  | 236 | struct rb_node *node; | 
|  | 237 | int i = 1; | 
|  | 238 |  | 
|  | 239 | node = rb_first(&memtype_rbroot); | 
|  | 240 | while (node && pos != i) { | 
|  | 241 | node = rb_next(node); | 
|  | 242 | i++; | 
|  | 243 | } | 
|  | 244 |  | 
|  | 245 | if (node) { /* pos == i */ | 
|  | 246 | struct memtype *this = container_of(node, struct memtype, rb); | 
|  | 247 | *out = *this; | 
|  | 248 | return 0; | 
|  | 249 | } else { | 
|  | 250 | return 1; | 
|  | 251 | } | 
|  | 252 | } | 
|  | 253 | #endif |