blob: e759f7c3878f47c5270aacac15b6db97695db669 [file] [log] [blame]
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +02001/* Support for MMIO probes.
2 * Benfit many code from kprobes
3 * (C) 2002 Louis Zhuang <louis.zhuang@intel.com>.
4 * 2007 Alexander Eichner
5 * 2008 Pekka Paalanen <pq@iki.fi>
6 */
7
8#include <linux/version.h>
9#include <linux/spinlock.h>
10#include <linux/hash.h>
11#include <linux/init.h>
12#include <linux/module.h>
13#include <linux/slab.h>
14#include <linux/kernel.h>
15#include <linux/mm.h>
16#include <linux/uaccess.h>
17#include <linux/ptrace.h>
18#include <linux/preempt.h>
19#include <asm/io.h>
20#include <asm/cacheflush.h>
21#include <asm/errno.h>
22#include <asm/tlbflush.h>
Pekka Paalanen75bb8832008-05-12 21:20:56 +020023#include <asm/pgtable.h>
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +020024
25#include "kmmio.h"
26
27#define KMMIO_HASH_BITS 6
28#define KMMIO_TABLE_SIZE (1 << KMMIO_HASH_BITS)
29#define KMMIO_PAGE_HASH_BITS 4
30#define KMMIO_PAGE_TABLE_SIZE (1 << KMMIO_PAGE_HASH_BITS)
31
32struct kmmio_context {
33 struct kmmio_fault_page *fpage;
34 struct kmmio_probe *probe;
35 unsigned long saved_flags;
36 int active;
37};
38
39static int kmmio_page_fault(struct pt_regs *regs, unsigned long error_code,
40 unsigned long address);
41static int kmmio_die_notifier(struct notifier_block *nb, unsigned long val,
42 void *args);
43
44static DEFINE_SPINLOCK(kmmio_lock);
45
46/* These are protected by kmmio_lock */
47unsigned int kmmio_count;
48static unsigned int handler_registered;
49static struct list_head kmmio_page_table[KMMIO_PAGE_TABLE_SIZE];
50static LIST_HEAD(kmmio_probes);
51
52static struct kmmio_context kmmio_ctx[NR_CPUS];
53
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +020054static struct notifier_block nb_die = {
55 .notifier_call = kmmio_die_notifier
56};
57
58int init_kmmio(void)
59{
60 int i;
61 for (i = 0; i < KMMIO_PAGE_TABLE_SIZE; i++)
62 INIT_LIST_HEAD(&kmmio_page_table[i]);
63
64 register_die_notifier(&nb_die);
65 return 0;
66}
67
68void cleanup_kmmio(void)
69{
70 /*
71 * Assume the following have been already cleaned by calling
72 * unregister_kmmio_probe() appropriately:
73 * kmmio_page_table, kmmio_probes
74 */
75 if (handler_registered) {
Pekka Paalanen10c43d22008-05-12 21:20:57 +020076 if (mmiotrace_unregister_pf(&kmmio_page_fault))
77 BUG();
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +020078 synchronize_rcu();
79 }
80 unregister_die_notifier(&nb_die);
81}
82
83/*
84 * this is basically a dynamic stabbing problem:
85 * Could use the existing prio tree code or
86 * Possible better implementations:
87 * The Interval Skip List: A Data Structure for Finding All Intervals That
88 * Overlap a Point (might be simple)
89 * Space Efficient Dynamic Stabbing with Fast Queries - Mikkel Thorup
90 */
91/* Get the kmmio at this addr (if any). You must be holding kmmio_lock. */
92static struct kmmio_probe *get_kmmio_probe(unsigned long addr)
93{
94 struct kmmio_probe *p;
95 list_for_each_entry(p, &kmmio_probes, list) {
96 if (addr >= p->addr && addr <= (p->addr + p->len))
97 return p;
98 }
99 return NULL;
100}
101
102static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long page)
103{
104 struct list_head *head, *tmp;
105
106 page &= PAGE_MASK;
107 head = &kmmio_page_table[hash_long(page, KMMIO_PAGE_HASH_BITS)];
108 list_for_each(tmp, head) {
109 struct kmmio_fault_page *p
110 = list_entry(tmp, struct kmmio_fault_page, list);
111 if (p->page == page)
112 return p;
113 }
114
115 return NULL;
116}
117
Pekka Paalanen75bb8832008-05-12 21:20:56 +0200118static void arm_kmmio_fault_page(unsigned long page, int *page_level)
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +0200119{
120 unsigned long address = page & PAGE_MASK;
Pekka Paalanen75bb8832008-05-12 21:20:56 +0200121 int level;
122 pte_t *pte = lookup_address(address, &level);
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +0200123
Pekka Paalanen75bb8832008-05-12 21:20:56 +0200124 if (!pte) {
125 printk(KERN_ERR "Error in %s: no pte for page 0x%08lx\n",
126 __FUNCTION__, page);
127 return;
128 }
129
130 if (level == PG_LEVEL_2M) {
131 pmd_t *pmd = (pmd_t *)pte;
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +0200132 set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_PRESENT));
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +0200133 } else {
Pekka Paalanen75bb8832008-05-12 21:20:56 +0200134 /* PG_LEVEL_4K */
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +0200135 set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_PRESENT));
136 }
137
Pekka Paalanen75bb8832008-05-12 21:20:56 +0200138 if (page_level)
139 *page_level = level;
140
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +0200141 __flush_tlb_one(page);
142}
143
Pekka Paalanen75bb8832008-05-12 21:20:56 +0200144static void disarm_kmmio_fault_page(unsigned long page, int *page_level)
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +0200145{
146 unsigned long address = page & PAGE_MASK;
Pekka Paalanen75bb8832008-05-12 21:20:56 +0200147 int level;
148 pte_t *pte = lookup_address(address, &level);
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +0200149
Pekka Paalanen75bb8832008-05-12 21:20:56 +0200150 if (!pte) {
151 printk(KERN_ERR "Error in %s: no pte for page 0x%08lx\n",
152 __FUNCTION__, page);
153 return;
154 }
155
156 if (level == PG_LEVEL_2M) {
157 pmd_t *pmd = (pmd_t *)pte;
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +0200158 set_pmd(pmd, __pmd(pmd_val(*pmd) | _PAGE_PRESENT));
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +0200159 } else {
Pekka Paalanen75bb8832008-05-12 21:20:56 +0200160 /* PG_LEVEL_4K */
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +0200161 set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT));
162 }
163
Pekka Paalanen75bb8832008-05-12 21:20:56 +0200164 if (page_level)
165 *page_level = level;
166
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +0200167 __flush_tlb_one(page);
168}
169
170/*
171 * Interrupts are disabled on entry as trap3 is an interrupt gate
172 * and they remain disabled thorough out this function.
173 */
174static int kmmio_handler(struct pt_regs *regs, unsigned long addr)
175{
176 struct kmmio_context *ctx;
177 int cpu;
178
179 /*
180 * Preemption is now disabled to prevent process switch during
181 * single stepping. We can only handle one active kmmio trace
182 * per cpu, so ensure that we finish it before something else
183 * gets to run.
184 *
185 * XXX what if an interrupt occurs between returning from
186 * do_page_fault() and entering the single-step exception handler?
187 * And that interrupt triggers a kmmio trap?
188 */
189 preempt_disable();
190 cpu = smp_processor_id();
191 ctx = &kmmio_ctx[cpu];
192
193 /* interrupts disabled and CPU-local data => atomicity guaranteed. */
194 if (ctx->active) {
195 /*
196 * This avoids a deadlock with kmmio_lock.
197 * If this page fault really was due to kmmio trap,
198 * all hell breaks loose.
199 */
200 printk(KERN_EMERG "mmiotrace: recursive probe hit on CPU %d, "
201 "for address %lu. Ignoring.\n",
202 cpu, addr);
203 goto no_kmmio;
204 }
205 ctx->active++;
206
207 /*
208 * Acquire the kmmio lock to prevent changes affecting
209 * get_kmmio_fault_page() and get_kmmio_probe(), since we save their
210 * returned pointers.
211 * The lock is released in post_kmmio_handler().
212 * XXX: could/should get_kmmio_*() be using RCU instead of spinlock?
213 */
214 spin_lock(&kmmio_lock);
215
216 ctx->fpage = get_kmmio_fault_page(addr);
217 if (!ctx->fpage) {
218 /* this page fault is not caused by kmmio */
219 goto no_kmmio_locked;
220 }
221
222 ctx->probe = get_kmmio_probe(addr);
223 ctx->saved_flags = (regs->flags & (TF_MASK|IF_MASK));
224
225 if (ctx->probe && ctx->probe->pre_handler)
226 ctx->probe->pre_handler(ctx->probe, regs, addr);
227
228 regs->flags |= TF_MASK;
229 regs->flags &= ~IF_MASK;
230
231 /* We hold lock, now we set present bit in PTE and single step. */
232 disarm_kmmio_fault_page(ctx->fpage->page, NULL);
233
234 return 1;
235
236no_kmmio_locked:
237 spin_unlock(&kmmio_lock);
238 ctx->active--;
239no_kmmio:
240 preempt_enable_no_resched();
241 /* page fault not handled by kmmio */
242 return 0;
243}
244
245/*
246 * Interrupts are disabled on entry as trap1 is an interrupt gate
247 * and they remain disabled thorough out this function.
248 * And we hold kmmio lock.
249 */
250static int post_kmmio_handler(unsigned long condition, struct pt_regs *regs)
251{
252 int cpu = smp_processor_id();
253 struct kmmio_context *ctx = &kmmio_ctx[cpu];
254
255 if (!ctx->active)
256 return 0;
257
258 if (ctx->probe && ctx->probe->post_handler)
259 ctx->probe->post_handler(ctx->probe, condition, regs);
260
261 arm_kmmio_fault_page(ctx->fpage->page, NULL);
262
263 regs->flags &= ~TF_MASK;
264 regs->flags |= ctx->saved_flags;
265
266 /* These were acquired in kmmio_handler(). */
267 ctx->active--;
268 spin_unlock(&kmmio_lock);
269 preempt_enable_no_resched();
270
271 /*
272 * if somebody else is singlestepping across a probe point, flags
273 * will have TF set, in which case, continue the remaining processing
274 * of do_debug, as if this is not a probe hit.
275 */
276 if (regs->flags & TF_MASK)
277 return 0;
278
279 return 1;
280}
281
282static int add_kmmio_fault_page(unsigned long page)
283{
284 struct kmmio_fault_page *f;
285
286 page &= PAGE_MASK;
287 f = get_kmmio_fault_page(page);
288 if (f) {
289 f->count++;
290 return 0;
291 }
292
293 f = kmalloc(sizeof(*f), GFP_ATOMIC);
294 if (!f)
295 return -1;
296
297 f->count = 1;
298 f->page = page;
299 list_add(&f->list,
300 &kmmio_page_table[hash_long(f->page, KMMIO_PAGE_HASH_BITS)]);
301
302 arm_kmmio_fault_page(f->page, NULL);
303
304 return 0;
305}
306
307static void release_kmmio_fault_page(unsigned long page)
308{
309 struct kmmio_fault_page *f;
310
311 page &= PAGE_MASK;
312 f = get_kmmio_fault_page(page);
313 if (!f)
314 return;
315
316 f->count--;
317 if (!f->count) {
318 disarm_kmmio_fault_page(f->page, NULL);
319 list_del(&f->list);
320 }
321}
322
323int register_kmmio_probe(struct kmmio_probe *p)
324{
325 int ret = 0;
326 unsigned long size = 0;
327
328 spin_lock_irq(&kmmio_lock);
329 kmmio_count++;
330 if (get_kmmio_probe(p->addr)) {
331 ret = -EEXIST;
332 goto out;
333 }
334 list_add(&p->list, &kmmio_probes);
335 /*printk("adding fault pages...\n");*/
336 while (size < p->len) {
337 if (add_kmmio_fault_page(p->addr + size))
338 printk(KERN_ERR "mmio: Unable to set page fault.\n");
339 size += PAGE_SIZE;
340 }
341
342 if (!handler_registered) {
Pekka Paalanen10c43d22008-05-12 21:20:57 +0200343 if (mmiotrace_register_pf(&kmmio_page_fault))
344 printk(KERN_ERR "mmiotrace: Cannot register page "
345 "fault handler.\n");
346 else
347 handler_registered++;
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +0200348 }
349
350out:
351 spin_unlock_irq(&kmmio_lock);
352 /*
353 * XXX: What should I do here?
354 * Here was a call to global_flush_tlb(), but it does not exist
355 * anymore.
356 */
357 return ret;
358}
359
360void unregister_kmmio_probe(struct kmmio_probe *p)
361{
362 unsigned long size = 0;
363
364 spin_lock_irq(&kmmio_lock);
365 while (size < p->len) {
366 release_kmmio_fault_page(p->addr + size);
367 size += PAGE_SIZE;
368 }
369 list_del(&p->list);
370 kmmio_count--;
371 spin_unlock_irq(&kmmio_lock);
372}
373
374/*
375 * According to 2.6.20, mainly x86_64 arch:
376 * This is being called from do_page_fault(), via the page fault notifier
377 * chain. The chain is called for both user space faults and kernel space
378 * faults (address >= TASK_SIZE64), except not on faults serviced by
379 * vmalloc_fault().
380 *
381 * We may be in an interrupt or a critical section. Also prefecthing may
382 * trigger a page fault. We may be in the middle of process switch.
383 * The page fault hook functionality has put us inside RCU read lock.
384 *
385 * Local interrupts are disabled, so preemption cannot happen.
386 * Do not enable interrupts, do not sleep, and watch out for other CPUs.
387 */
388static int kmmio_page_fault(struct pt_regs *regs, unsigned long error_code,
389 unsigned long address)
390{
391 if (is_kmmio_active())
392 if (kmmio_handler(regs, address) == 1)
393 return -1;
394 return 0;
395}
396
397static int kmmio_die_notifier(struct notifier_block *nb, unsigned long val,
398 void *args)
399{
400 struct die_args *arg = args;
401
402 if (val == DIE_DEBUG)
403 if (post_kmmio_handler(arg->err, arg->regs) == 1)
404 return NOTIFY_STOP;
405
406 return NOTIFY_DONE;
407}