blob: 28411dadb8b3441d5c13c5db0e36ea58ccbe65e9 [file] [log] [blame]
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +02001/* Support for MMIO probes.
2 * Benfit many code from kprobes
3 * (C) 2002 Louis Zhuang <louis.zhuang@intel.com>.
4 * 2007 Alexander Eichner
5 * 2008 Pekka Paalanen <pq@iki.fi>
6 */
7
8#include <linux/version.h>
9#include <linux/spinlock.h>
10#include <linux/hash.h>
11#include <linux/init.h>
12#include <linux/module.h>
13#include <linux/slab.h>
14#include <linux/kernel.h>
15#include <linux/mm.h>
16#include <linux/uaccess.h>
17#include <linux/ptrace.h>
18#include <linux/preempt.h>
19#include <asm/io.h>
20#include <asm/cacheflush.h>
21#include <asm/errno.h>
22#include <asm/tlbflush.h>
Pekka Paalanen75bb8832008-05-12 21:20:56 +020023#include <asm/pgtable.h>
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +020024
25#include "kmmio.h"
26
27#define KMMIO_HASH_BITS 6
28#define KMMIO_TABLE_SIZE (1 << KMMIO_HASH_BITS)
29#define KMMIO_PAGE_HASH_BITS 4
30#define KMMIO_PAGE_TABLE_SIZE (1 << KMMIO_PAGE_HASH_BITS)
31
32struct kmmio_context {
33 struct kmmio_fault_page *fpage;
34 struct kmmio_probe *probe;
35 unsigned long saved_flags;
36 int active;
37};
38
39static int kmmio_page_fault(struct pt_regs *regs, unsigned long error_code,
40 unsigned long address);
41static int kmmio_die_notifier(struct notifier_block *nb, unsigned long val,
42 void *args);
43
44static DEFINE_SPINLOCK(kmmio_lock);
45
46/* These are protected by kmmio_lock */
47unsigned int kmmio_count;
48static unsigned int handler_registered;
49static struct list_head kmmio_page_table[KMMIO_PAGE_TABLE_SIZE];
50static LIST_HEAD(kmmio_probes);
51
52static struct kmmio_context kmmio_ctx[NR_CPUS];
53
54static struct pf_handler kmmio_pf_hook = {
55 .handler = kmmio_page_fault
56};
57
58static struct notifier_block nb_die = {
59 .notifier_call = kmmio_die_notifier
60};
61
62int init_kmmio(void)
63{
64 int i;
65 for (i = 0; i < KMMIO_PAGE_TABLE_SIZE; i++)
66 INIT_LIST_HEAD(&kmmio_page_table[i]);
67
68 register_die_notifier(&nb_die);
69 return 0;
70}
71
72void cleanup_kmmio(void)
73{
74 /*
75 * Assume the following have been already cleaned by calling
76 * unregister_kmmio_probe() appropriately:
77 * kmmio_page_table, kmmio_probes
78 */
79 if (handler_registered) {
80 unregister_page_fault_handler(&kmmio_pf_hook);
81 synchronize_rcu();
82 }
83 unregister_die_notifier(&nb_die);
84}
85
86/*
87 * this is basically a dynamic stabbing problem:
88 * Could use the existing prio tree code or
89 * Possible better implementations:
90 * The Interval Skip List: A Data Structure for Finding All Intervals That
91 * Overlap a Point (might be simple)
92 * Space Efficient Dynamic Stabbing with Fast Queries - Mikkel Thorup
93 */
94/* Get the kmmio at this addr (if any). You must be holding kmmio_lock. */
95static struct kmmio_probe *get_kmmio_probe(unsigned long addr)
96{
97 struct kmmio_probe *p;
98 list_for_each_entry(p, &kmmio_probes, list) {
99 if (addr >= p->addr && addr <= (p->addr + p->len))
100 return p;
101 }
102 return NULL;
103}
104
105static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long page)
106{
107 struct list_head *head, *tmp;
108
109 page &= PAGE_MASK;
110 head = &kmmio_page_table[hash_long(page, KMMIO_PAGE_HASH_BITS)];
111 list_for_each(tmp, head) {
112 struct kmmio_fault_page *p
113 = list_entry(tmp, struct kmmio_fault_page, list);
114 if (p->page == page)
115 return p;
116 }
117
118 return NULL;
119}
120
Pekka Paalanen75bb8832008-05-12 21:20:56 +0200121static void arm_kmmio_fault_page(unsigned long page, int *page_level)
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +0200122{
123 unsigned long address = page & PAGE_MASK;
Pekka Paalanen75bb8832008-05-12 21:20:56 +0200124 int level;
125 pte_t *pte = lookup_address(address, &level);
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +0200126
Pekka Paalanen75bb8832008-05-12 21:20:56 +0200127 if (!pte) {
128 printk(KERN_ERR "Error in %s: no pte for page 0x%08lx\n",
129 __FUNCTION__, page);
130 return;
131 }
132
133 if (level == PG_LEVEL_2M) {
134 pmd_t *pmd = (pmd_t *)pte;
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +0200135 set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_PRESENT));
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +0200136 } else {
Pekka Paalanen75bb8832008-05-12 21:20:56 +0200137 /* PG_LEVEL_4K */
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +0200138 set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_PRESENT));
139 }
140
Pekka Paalanen75bb8832008-05-12 21:20:56 +0200141 if (page_level)
142 *page_level = level;
143
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +0200144 __flush_tlb_one(page);
145}
146
Pekka Paalanen75bb8832008-05-12 21:20:56 +0200147static void disarm_kmmio_fault_page(unsigned long page, int *page_level)
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +0200148{
149 unsigned long address = page & PAGE_MASK;
Pekka Paalanen75bb8832008-05-12 21:20:56 +0200150 int level;
151 pte_t *pte = lookup_address(address, &level);
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +0200152
Pekka Paalanen75bb8832008-05-12 21:20:56 +0200153 if (!pte) {
154 printk(KERN_ERR "Error in %s: no pte for page 0x%08lx\n",
155 __FUNCTION__, page);
156 return;
157 }
158
159 if (level == PG_LEVEL_2M) {
160 pmd_t *pmd = (pmd_t *)pte;
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +0200161 set_pmd(pmd, __pmd(pmd_val(*pmd) | _PAGE_PRESENT));
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +0200162 } else {
Pekka Paalanen75bb8832008-05-12 21:20:56 +0200163 /* PG_LEVEL_4K */
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +0200164 set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT));
165 }
166
Pekka Paalanen75bb8832008-05-12 21:20:56 +0200167 if (page_level)
168 *page_level = level;
169
Pekka Paalanen8b7d89d2008-05-12 21:20:56 +0200170 __flush_tlb_one(page);
171}
172
173/*
174 * Interrupts are disabled on entry as trap3 is an interrupt gate
175 * and they remain disabled thorough out this function.
176 */
177static int kmmio_handler(struct pt_regs *regs, unsigned long addr)
178{
179 struct kmmio_context *ctx;
180 int cpu;
181
182 /*
183 * Preemption is now disabled to prevent process switch during
184 * single stepping. We can only handle one active kmmio trace
185 * per cpu, so ensure that we finish it before something else
186 * gets to run.
187 *
188 * XXX what if an interrupt occurs between returning from
189 * do_page_fault() and entering the single-step exception handler?
190 * And that interrupt triggers a kmmio trap?
191 */
192 preempt_disable();
193 cpu = smp_processor_id();
194 ctx = &kmmio_ctx[cpu];
195
196 /* interrupts disabled and CPU-local data => atomicity guaranteed. */
197 if (ctx->active) {
198 /*
199 * This avoids a deadlock with kmmio_lock.
200 * If this page fault really was due to kmmio trap,
201 * all hell breaks loose.
202 */
203 printk(KERN_EMERG "mmiotrace: recursive probe hit on CPU %d, "
204 "for address %lu. Ignoring.\n",
205 cpu, addr);
206 goto no_kmmio;
207 }
208 ctx->active++;
209
210 /*
211 * Acquire the kmmio lock to prevent changes affecting
212 * get_kmmio_fault_page() and get_kmmio_probe(), since we save their
213 * returned pointers.
214 * The lock is released in post_kmmio_handler().
215 * XXX: could/should get_kmmio_*() be using RCU instead of spinlock?
216 */
217 spin_lock(&kmmio_lock);
218
219 ctx->fpage = get_kmmio_fault_page(addr);
220 if (!ctx->fpage) {
221 /* this page fault is not caused by kmmio */
222 goto no_kmmio_locked;
223 }
224
225 ctx->probe = get_kmmio_probe(addr);
226 ctx->saved_flags = (regs->flags & (TF_MASK|IF_MASK));
227
228 if (ctx->probe && ctx->probe->pre_handler)
229 ctx->probe->pre_handler(ctx->probe, regs, addr);
230
231 regs->flags |= TF_MASK;
232 regs->flags &= ~IF_MASK;
233
234 /* We hold lock, now we set present bit in PTE and single step. */
235 disarm_kmmio_fault_page(ctx->fpage->page, NULL);
236
237 return 1;
238
239no_kmmio_locked:
240 spin_unlock(&kmmio_lock);
241 ctx->active--;
242no_kmmio:
243 preempt_enable_no_resched();
244 /* page fault not handled by kmmio */
245 return 0;
246}
247
248/*
249 * Interrupts are disabled on entry as trap1 is an interrupt gate
250 * and they remain disabled thorough out this function.
251 * And we hold kmmio lock.
252 */
253static int post_kmmio_handler(unsigned long condition, struct pt_regs *regs)
254{
255 int cpu = smp_processor_id();
256 struct kmmio_context *ctx = &kmmio_ctx[cpu];
257
258 if (!ctx->active)
259 return 0;
260
261 if (ctx->probe && ctx->probe->post_handler)
262 ctx->probe->post_handler(ctx->probe, condition, regs);
263
264 arm_kmmio_fault_page(ctx->fpage->page, NULL);
265
266 regs->flags &= ~TF_MASK;
267 regs->flags |= ctx->saved_flags;
268
269 /* These were acquired in kmmio_handler(). */
270 ctx->active--;
271 spin_unlock(&kmmio_lock);
272 preempt_enable_no_resched();
273
274 /*
275 * if somebody else is singlestepping across a probe point, flags
276 * will have TF set, in which case, continue the remaining processing
277 * of do_debug, as if this is not a probe hit.
278 */
279 if (regs->flags & TF_MASK)
280 return 0;
281
282 return 1;
283}
284
285static int add_kmmio_fault_page(unsigned long page)
286{
287 struct kmmio_fault_page *f;
288
289 page &= PAGE_MASK;
290 f = get_kmmio_fault_page(page);
291 if (f) {
292 f->count++;
293 return 0;
294 }
295
296 f = kmalloc(sizeof(*f), GFP_ATOMIC);
297 if (!f)
298 return -1;
299
300 f->count = 1;
301 f->page = page;
302 list_add(&f->list,
303 &kmmio_page_table[hash_long(f->page, KMMIO_PAGE_HASH_BITS)]);
304
305 arm_kmmio_fault_page(f->page, NULL);
306
307 return 0;
308}
309
310static void release_kmmio_fault_page(unsigned long page)
311{
312 struct kmmio_fault_page *f;
313
314 page &= PAGE_MASK;
315 f = get_kmmio_fault_page(page);
316 if (!f)
317 return;
318
319 f->count--;
320 if (!f->count) {
321 disarm_kmmio_fault_page(f->page, NULL);
322 list_del(&f->list);
323 }
324}
325
326int register_kmmio_probe(struct kmmio_probe *p)
327{
328 int ret = 0;
329 unsigned long size = 0;
330
331 spin_lock_irq(&kmmio_lock);
332 kmmio_count++;
333 if (get_kmmio_probe(p->addr)) {
334 ret = -EEXIST;
335 goto out;
336 }
337 list_add(&p->list, &kmmio_probes);
338 /*printk("adding fault pages...\n");*/
339 while (size < p->len) {
340 if (add_kmmio_fault_page(p->addr + size))
341 printk(KERN_ERR "mmio: Unable to set page fault.\n");
342 size += PAGE_SIZE;
343 }
344
345 if (!handler_registered) {
346 register_page_fault_handler(&kmmio_pf_hook);
347 handler_registered++;
348 }
349
350out:
351 spin_unlock_irq(&kmmio_lock);
352 /*
353 * XXX: What should I do here?
354 * Here was a call to global_flush_tlb(), but it does not exist
355 * anymore.
356 */
357 return ret;
358}
359
360void unregister_kmmio_probe(struct kmmio_probe *p)
361{
362 unsigned long size = 0;
363
364 spin_lock_irq(&kmmio_lock);
365 while (size < p->len) {
366 release_kmmio_fault_page(p->addr + size);
367 size += PAGE_SIZE;
368 }
369 list_del(&p->list);
370 kmmio_count--;
371 spin_unlock_irq(&kmmio_lock);
372}
373
374/*
375 * According to 2.6.20, mainly x86_64 arch:
376 * This is being called from do_page_fault(), via the page fault notifier
377 * chain. The chain is called for both user space faults and kernel space
378 * faults (address >= TASK_SIZE64), except not on faults serviced by
379 * vmalloc_fault().
380 *
381 * We may be in an interrupt or a critical section. Also prefecthing may
382 * trigger a page fault. We may be in the middle of process switch.
383 * The page fault hook functionality has put us inside RCU read lock.
384 *
385 * Local interrupts are disabled, so preemption cannot happen.
386 * Do not enable interrupts, do not sleep, and watch out for other CPUs.
387 */
388static int kmmio_page_fault(struct pt_regs *regs, unsigned long error_code,
389 unsigned long address)
390{
391 if (is_kmmio_active())
392 if (kmmio_handler(regs, address) == 1)
393 return -1;
394 return 0;
395}
396
397static int kmmio_die_notifier(struct notifier_block *nb, unsigned long val,
398 void *args)
399{
400 struct die_args *arg = args;
401
402 if (val == DIE_DEBUG)
403 if (post_kmmio_handler(arg->err, arg->regs) == 1)
404 return NOTIFY_STOP;
405
406 return NOTIFY_DONE;
407}