Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 1 | /* Support for MMIO probes. |
| 2 | * Benfit many code from kprobes |
| 3 | * (C) 2002 Louis Zhuang <louis.zhuang@intel.com>. |
| 4 | * 2007 Alexander Eichner |
| 5 | * 2008 Pekka Paalanen <pq@iki.fi> |
| 6 | */ |
| 7 | |
| 8 | #include <linux/version.h> |
| 9 | #include <linux/spinlock.h> |
| 10 | #include <linux/hash.h> |
| 11 | #include <linux/init.h> |
| 12 | #include <linux/module.h> |
| 13 | #include <linux/slab.h> |
| 14 | #include <linux/kernel.h> |
| 15 | #include <linux/mm.h> |
| 16 | #include <linux/uaccess.h> |
| 17 | #include <linux/ptrace.h> |
| 18 | #include <linux/preempt.h> |
Pekka Paalanen | f513638 | 2008-05-12 21:20:57 +0200 | [diff] [blame^] | 19 | #include <linux/percpu.h> |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 20 | #include <asm/io.h> |
| 21 | #include <asm/cacheflush.h> |
| 22 | #include <asm/errno.h> |
| 23 | #include <asm/tlbflush.h> |
Pekka Paalanen | 75bb883 | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 24 | #include <asm/pgtable.h> |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 25 | |
| 26 | #include "kmmio.h" |
| 27 | |
| 28 | #define KMMIO_HASH_BITS 6 |
| 29 | #define KMMIO_TABLE_SIZE (1 << KMMIO_HASH_BITS) |
| 30 | #define KMMIO_PAGE_HASH_BITS 4 |
| 31 | #define KMMIO_PAGE_TABLE_SIZE (1 << KMMIO_PAGE_HASH_BITS) |
| 32 | |
| 33 | struct kmmio_context { |
| 34 | struct kmmio_fault_page *fpage; |
| 35 | struct kmmio_probe *probe; |
| 36 | unsigned long saved_flags; |
| 37 | int active; |
| 38 | }; |
| 39 | |
| 40 | static int kmmio_page_fault(struct pt_regs *regs, unsigned long error_code, |
| 41 | unsigned long address); |
| 42 | static int kmmio_die_notifier(struct notifier_block *nb, unsigned long val, |
| 43 | void *args); |
| 44 | |
| 45 | static DEFINE_SPINLOCK(kmmio_lock); |
| 46 | |
| 47 | /* These are protected by kmmio_lock */ |
| 48 | unsigned int kmmio_count; |
| 49 | static unsigned int handler_registered; |
| 50 | static struct list_head kmmio_page_table[KMMIO_PAGE_TABLE_SIZE]; |
| 51 | static LIST_HEAD(kmmio_probes); |
| 52 | |
Pekka Paalanen | f513638 | 2008-05-12 21:20:57 +0200 | [diff] [blame^] | 53 | /* Accessed per-cpu */ |
| 54 | static DEFINE_PER_CPU(struct kmmio_context, kmmio_ctx); |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 55 | |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 56 | static struct notifier_block nb_die = { |
| 57 | .notifier_call = kmmio_die_notifier |
| 58 | }; |
| 59 | |
| 60 | int init_kmmio(void) |
| 61 | { |
| 62 | int i; |
| 63 | for (i = 0; i < KMMIO_PAGE_TABLE_SIZE; i++) |
| 64 | INIT_LIST_HEAD(&kmmio_page_table[i]); |
| 65 | |
| 66 | register_die_notifier(&nb_die); |
| 67 | return 0; |
| 68 | } |
| 69 | |
| 70 | void cleanup_kmmio(void) |
| 71 | { |
| 72 | /* |
| 73 | * Assume the following have been already cleaned by calling |
| 74 | * unregister_kmmio_probe() appropriately: |
| 75 | * kmmio_page_table, kmmio_probes |
| 76 | */ |
| 77 | if (handler_registered) { |
Pekka Paalanen | 10c43d2 | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 78 | if (mmiotrace_unregister_pf(&kmmio_page_fault)) |
| 79 | BUG(); |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 80 | synchronize_rcu(); |
| 81 | } |
| 82 | unregister_die_notifier(&nb_die); |
| 83 | } |
| 84 | |
| 85 | /* |
| 86 | * this is basically a dynamic stabbing problem: |
| 87 | * Could use the existing prio tree code or |
| 88 | * Possible better implementations: |
| 89 | * The Interval Skip List: A Data Structure for Finding All Intervals That |
| 90 | * Overlap a Point (might be simple) |
| 91 | * Space Efficient Dynamic Stabbing with Fast Queries - Mikkel Thorup |
| 92 | */ |
| 93 | /* Get the kmmio at this addr (if any). You must be holding kmmio_lock. */ |
| 94 | static struct kmmio_probe *get_kmmio_probe(unsigned long addr) |
| 95 | { |
| 96 | struct kmmio_probe *p; |
| 97 | list_for_each_entry(p, &kmmio_probes, list) { |
| 98 | if (addr >= p->addr && addr <= (p->addr + p->len)) |
| 99 | return p; |
| 100 | } |
| 101 | return NULL; |
| 102 | } |
| 103 | |
| 104 | static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long page) |
| 105 | { |
| 106 | struct list_head *head, *tmp; |
| 107 | |
| 108 | page &= PAGE_MASK; |
| 109 | head = &kmmio_page_table[hash_long(page, KMMIO_PAGE_HASH_BITS)]; |
| 110 | list_for_each(tmp, head) { |
| 111 | struct kmmio_fault_page *p |
| 112 | = list_entry(tmp, struct kmmio_fault_page, list); |
| 113 | if (p->page == page) |
| 114 | return p; |
| 115 | } |
| 116 | |
| 117 | return NULL; |
| 118 | } |
| 119 | |
Pekka Paalanen | 75bb883 | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 120 | static void arm_kmmio_fault_page(unsigned long page, int *page_level) |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 121 | { |
| 122 | unsigned long address = page & PAGE_MASK; |
Pekka Paalanen | 75bb883 | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 123 | int level; |
| 124 | pte_t *pte = lookup_address(address, &level); |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 125 | |
Pekka Paalanen | 75bb883 | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 126 | if (!pte) { |
| 127 | printk(KERN_ERR "Error in %s: no pte for page 0x%08lx\n", |
| 128 | __FUNCTION__, page); |
| 129 | return; |
| 130 | } |
| 131 | |
| 132 | if (level == PG_LEVEL_2M) { |
| 133 | pmd_t *pmd = (pmd_t *)pte; |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 134 | set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_PRESENT)); |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 135 | } else { |
Pekka Paalanen | 75bb883 | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 136 | /* PG_LEVEL_4K */ |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 137 | set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_PRESENT)); |
| 138 | } |
| 139 | |
Pekka Paalanen | 75bb883 | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 140 | if (page_level) |
| 141 | *page_level = level; |
| 142 | |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 143 | __flush_tlb_one(page); |
| 144 | } |
| 145 | |
Pekka Paalanen | 75bb883 | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 146 | static void disarm_kmmio_fault_page(unsigned long page, int *page_level) |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 147 | { |
| 148 | unsigned long address = page & PAGE_MASK; |
Pekka Paalanen | 75bb883 | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 149 | int level; |
| 150 | pte_t *pte = lookup_address(address, &level); |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 151 | |
Pekka Paalanen | 75bb883 | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 152 | if (!pte) { |
| 153 | printk(KERN_ERR "Error in %s: no pte for page 0x%08lx\n", |
| 154 | __FUNCTION__, page); |
| 155 | return; |
| 156 | } |
| 157 | |
| 158 | if (level == PG_LEVEL_2M) { |
| 159 | pmd_t *pmd = (pmd_t *)pte; |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 160 | set_pmd(pmd, __pmd(pmd_val(*pmd) | _PAGE_PRESENT)); |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 161 | } else { |
Pekka Paalanen | 75bb883 | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 162 | /* PG_LEVEL_4K */ |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 163 | set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT)); |
| 164 | } |
| 165 | |
Pekka Paalanen | 75bb883 | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 166 | if (page_level) |
| 167 | *page_level = level; |
| 168 | |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 169 | __flush_tlb_one(page); |
| 170 | } |
| 171 | |
| 172 | /* |
| 173 | * Interrupts are disabled on entry as trap3 is an interrupt gate |
| 174 | * and they remain disabled thorough out this function. |
| 175 | */ |
| 176 | static int kmmio_handler(struct pt_regs *regs, unsigned long addr) |
| 177 | { |
Pekka Paalanen | f513638 | 2008-05-12 21:20:57 +0200 | [diff] [blame^] | 178 | struct kmmio_context *ctx = &get_cpu_var(kmmio_ctx); |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 179 | |
| 180 | /* |
| 181 | * Preemption is now disabled to prevent process switch during |
| 182 | * single stepping. We can only handle one active kmmio trace |
| 183 | * per cpu, so ensure that we finish it before something else |
| 184 | * gets to run. |
| 185 | * |
| 186 | * XXX what if an interrupt occurs between returning from |
| 187 | * do_page_fault() and entering the single-step exception handler? |
| 188 | * And that interrupt triggers a kmmio trap? |
| 189 | */ |
| 190 | preempt_disable(); |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 191 | |
| 192 | /* interrupts disabled and CPU-local data => atomicity guaranteed. */ |
| 193 | if (ctx->active) { |
| 194 | /* |
| 195 | * This avoids a deadlock with kmmio_lock. |
| 196 | * If this page fault really was due to kmmio trap, |
| 197 | * all hell breaks loose. |
| 198 | */ |
| 199 | printk(KERN_EMERG "mmiotrace: recursive probe hit on CPU %d, " |
| 200 | "for address %lu. Ignoring.\n", |
Pekka Paalanen | f513638 | 2008-05-12 21:20:57 +0200 | [diff] [blame^] | 201 | smp_processor_id(), addr); |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 202 | goto no_kmmio; |
| 203 | } |
| 204 | ctx->active++; |
| 205 | |
| 206 | /* |
| 207 | * Acquire the kmmio lock to prevent changes affecting |
| 208 | * get_kmmio_fault_page() and get_kmmio_probe(), since we save their |
| 209 | * returned pointers. |
| 210 | * The lock is released in post_kmmio_handler(). |
| 211 | * XXX: could/should get_kmmio_*() be using RCU instead of spinlock? |
| 212 | */ |
| 213 | spin_lock(&kmmio_lock); |
| 214 | |
| 215 | ctx->fpage = get_kmmio_fault_page(addr); |
| 216 | if (!ctx->fpage) { |
| 217 | /* this page fault is not caused by kmmio */ |
| 218 | goto no_kmmio_locked; |
| 219 | } |
| 220 | |
| 221 | ctx->probe = get_kmmio_probe(addr); |
| 222 | ctx->saved_flags = (regs->flags & (TF_MASK|IF_MASK)); |
| 223 | |
| 224 | if (ctx->probe && ctx->probe->pre_handler) |
| 225 | ctx->probe->pre_handler(ctx->probe, regs, addr); |
| 226 | |
| 227 | regs->flags |= TF_MASK; |
| 228 | regs->flags &= ~IF_MASK; |
| 229 | |
| 230 | /* We hold lock, now we set present bit in PTE and single step. */ |
| 231 | disarm_kmmio_fault_page(ctx->fpage->page, NULL); |
| 232 | |
Pekka Paalanen | f513638 | 2008-05-12 21:20:57 +0200 | [diff] [blame^] | 233 | put_cpu_var(kmmio_ctx); |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 234 | return 1; |
| 235 | |
| 236 | no_kmmio_locked: |
| 237 | spin_unlock(&kmmio_lock); |
| 238 | ctx->active--; |
| 239 | no_kmmio: |
| 240 | preempt_enable_no_resched(); |
Pekka Paalanen | f513638 | 2008-05-12 21:20:57 +0200 | [diff] [blame^] | 241 | put_cpu_var(kmmio_ctx); |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 242 | /* page fault not handled by kmmio */ |
| 243 | return 0; |
| 244 | } |
| 245 | |
| 246 | /* |
| 247 | * Interrupts are disabled on entry as trap1 is an interrupt gate |
| 248 | * and they remain disabled thorough out this function. |
| 249 | * And we hold kmmio lock. |
| 250 | */ |
| 251 | static int post_kmmio_handler(unsigned long condition, struct pt_regs *regs) |
| 252 | { |
Pekka Paalanen | f513638 | 2008-05-12 21:20:57 +0200 | [diff] [blame^] | 253 | int ret = 0; |
| 254 | struct kmmio_context *ctx = &get_cpu_var(kmmio_ctx); |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 255 | |
| 256 | if (!ctx->active) |
Pekka Paalanen | f513638 | 2008-05-12 21:20:57 +0200 | [diff] [blame^] | 257 | goto out; |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 258 | |
| 259 | if (ctx->probe && ctx->probe->post_handler) |
| 260 | ctx->probe->post_handler(ctx->probe, condition, regs); |
| 261 | |
| 262 | arm_kmmio_fault_page(ctx->fpage->page, NULL); |
| 263 | |
| 264 | regs->flags &= ~TF_MASK; |
| 265 | regs->flags |= ctx->saved_flags; |
| 266 | |
| 267 | /* These were acquired in kmmio_handler(). */ |
| 268 | ctx->active--; |
| 269 | spin_unlock(&kmmio_lock); |
| 270 | preempt_enable_no_resched(); |
| 271 | |
| 272 | /* |
| 273 | * if somebody else is singlestepping across a probe point, flags |
| 274 | * will have TF set, in which case, continue the remaining processing |
| 275 | * of do_debug, as if this is not a probe hit. |
| 276 | */ |
Pekka Paalanen | f513638 | 2008-05-12 21:20:57 +0200 | [diff] [blame^] | 277 | if (!(regs->flags & TF_MASK)) |
| 278 | ret = 1; |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 279 | |
Pekka Paalanen | f513638 | 2008-05-12 21:20:57 +0200 | [diff] [blame^] | 280 | out: |
| 281 | put_cpu_var(kmmio_ctx); |
| 282 | return ret; |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 283 | } |
| 284 | |
| 285 | static int add_kmmio_fault_page(unsigned long page) |
| 286 | { |
| 287 | struct kmmio_fault_page *f; |
| 288 | |
| 289 | page &= PAGE_MASK; |
| 290 | f = get_kmmio_fault_page(page); |
| 291 | if (f) { |
| 292 | f->count++; |
| 293 | return 0; |
| 294 | } |
| 295 | |
| 296 | f = kmalloc(sizeof(*f), GFP_ATOMIC); |
| 297 | if (!f) |
| 298 | return -1; |
| 299 | |
| 300 | f->count = 1; |
| 301 | f->page = page; |
| 302 | list_add(&f->list, |
| 303 | &kmmio_page_table[hash_long(f->page, KMMIO_PAGE_HASH_BITS)]); |
| 304 | |
| 305 | arm_kmmio_fault_page(f->page, NULL); |
| 306 | |
| 307 | return 0; |
| 308 | } |
| 309 | |
| 310 | static void release_kmmio_fault_page(unsigned long page) |
| 311 | { |
| 312 | struct kmmio_fault_page *f; |
| 313 | |
| 314 | page &= PAGE_MASK; |
| 315 | f = get_kmmio_fault_page(page); |
| 316 | if (!f) |
| 317 | return; |
| 318 | |
| 319 | f->count--; |
| 320 | if (!f->count) { |
| 321 | disarm_kmmio_fault_page(f->page, NULL); |
| 322 | list_del(&f->list); |
| 323 | } |
| 324 | } |
| 325 | |
| 326 | int register_kmmio_probe(struct kmmio_probe *p) |
| 327 | { |
| 328 | int ret = 0; |
| 329 | unsigned long size = 0; |
| 330 | |
| 331 | spin_lock_irq(&kmmio_lock); |
| 332 | kmmio_count++; |
| 333 | if (get_kmmio_probe(p->addr)) { |
| 334 | ret = -EEXIST; |
| 335 | goto out; |
| 336 | } |
| 337 | list_add(&p->list, &kmmio_probes); |
| 338 | /*printk("adding fault pages...\n");*/ |
| 339 | while (size < p->len) { |
| 340 | if (add_kmmio_fault_page(p->addr + size)) |
| 341 | printk(KERN_ERR "mmio: Unable to set page fault.\n"); |
| 342 | size += PAGE_SIZE; |
| 343 | } |
| 344 | |
| 345 | if (!handler_registered) { |
Pekka Paalanen | 10c43d2 | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 346 | if (mmiotrace_register_pf(&kmmio_page_fault)) |
| 347 | printk(KERN_ERR "mmiotrace: Cannot register page " |
| 348 | "fault handler.\n"); |
| 349 | else |
| 350 | handler_registered++; |
Pekka Paalanen | 8b7d89d | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 351 | } |
| 352 | |
| 353 | out: |
| 354 | spin_unlock_irq(&kmmio_lock); |
| 355 | /* |
| 356 | * XXX: What should I do here? |
| 357 | * Here was a call to global_flush_tlb(), but it does not exist |
| 358 | * anymore. |
| 359 | */ |
| 360 | return ret; |
| 361 | } |
| 362 | |
| 363 | void unregister_kmmio_probe(struct kmmio_probe *p) |
| 364 | { |
| 365 | unsigned long size = 0; |
| 366 | |
| 367 | spin_lock_irq(&kmmio_lock); |
| 368 | while (size < p->len) { |
| 369 | release_kmmio_fault_page(p->addr + size); |
| 370 | size += PAGE_SIZE; |
| 371 | } |
| 372 | list_del(&p->list); |
| 373 | kmmio_count--; |
| 374 | spin_unlock_irq(&kmmio_lock); |
| 375 | } |
| 376 | |
| 377 | /* |
| 378 | * According to 2.6.20, mainly x86_64 arch: |
| 379 | * This is being called from do_page_fault(), via the page fault notifier |
| 380 | * chain. The chain is called for both user space faults and kernel space |
| 381 | * faults (address >= TASK_SIZE64), except not on faults serviced by |
| 382 | * vmalloc_fault(). |
| 383 | * |
| 384 | * We may be in an interrupt or a critical section. Also prefecthing may |
| 385 | * trigger a page fault. We may be in the middle of process switch. |
| 386 | * The page fault hook functionality has put us inside RCU read lock. |
| 387 | * |
| 388 | * Local interrupts are disabled, so preemption cannot happen. |
| 389 | * Do not enable interrupts, do not sleep, and watch out for other CPUs. |
| 390 | */ |
| 391 | static int kmmio_page_fault(struct pt_regs *regs, unsigned long error_code, |
| 392 | unsigned long address) |
| 393 | { |
| 394 | if (is_kmmio_active()) |
| 395 | if (kmmio_handler(regs, address) == 1) |
| 396 | return -1; |
| 397 | return 0; |
| 398 | } |
| 399 | |
| 400 | static int kmmio_die_notifier(struct notifier_block *nb, unsigned long val, |
| 401 | void *args) |
| 402 | { |
| 403 | struct die_args *arg = args; |
| 404 | |
| 405 | if (val == DIE_DEBUG) |
| 406 | if (post_kmmio_handler(arg->err, arg->regs) == 1) |
| 407 | return NOTIFY_STOP; |
| 408 | |
| 409 | return NOTIFY_DONE; |
| 410 | } |