blob: f68eaaaa8062ca880187de882eb81cf3162cf436 [file] [log] [blame]
Michael Grundy4ba069b2006-09-20 15:58:39 +02001/*
2 * Kernel Probes (KProbes)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright (C) IBM Corporation, 2002, 2006
19 *
20 * s390 port, used ppc64 as template. Mike Grundy <grundym@us.ibm.com>
21 */
22
Michael Grundy4ba069b2006-09-20 15:58:39 +020023#include <linux/kprobes.h>
24#include <linux/ptrace.h>
25#include <linux/preempt.h>
26#include <linux/stop_machine.h>
Christoph Hellwig1eeb66a2007-05-08 00:27:03 -070027#include <linux/kdebug.h>
Heiko Carstensa2b53672009-06-12 10:26:43 +020028#include <linux/uaccess.h>
Michael Grundy4ba069b2006-09-20 15:58:39 +020029#include <asm/cacheflush.h>
Michael Grundy4ba069b2006-09-20 15:58:39 +020030#include <asm/sections.h>
Michael Grundy4ba069b2006-09-20 15:58:39 +020031#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090032#include <linux/slab.h>
Martin Schwidefskyadb45832010-11-10 10:05:57 +010033#include <linux/hardirq.h>
Michael Grundy4ba069b2006-09-20 15:58:39 +020034
35DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
36DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
37
Masami Hiramatsuf438d912007-10-16 01:27:49 -070038struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}};
39
Martin Schwidefskyba640a52011-01-05 12:47:19 +010040static int __kprobes is_prohibited_opcode(kprobe_opcode_t *insn)
Michael Grundy4ba069b2006-09-20 15:58:39 +020041{
Martin Schwidefskyba640a52011-01-05 12:47:19 +010042 switch (insn[0] >> 8) {
Michael Grundy4ba069b2006-09-20 15:58:39 +020043 case 0x0c: /* bassm */
44 case 0x0b: /* bsm */
45 case 0x83: /* diag */
46 case 0x44: /* ex */
Heiko Carstensbac9f152010-05-26 23:26:20 +020047 case 0xac: /* stnsm */
48 case 0xad: /* stosm */
Michael Grundy4ba069b2006-09-20 15:58:39 +020049 return -EINVAL;
50 }
Martin Schwidefskyba640a52011-01-05 12:47:19 +010051 switch (insn[0]) {
Michael Grundy4ba069b2006-09-20 15:58:39 +020052 case 0x0101: /* pr */
53 case 0xb25a: /* bsa */
54 case 0xb240: /* bakr */
55 case 0xb258: /* bsg */
56 case 0xb218: /* pc */
57 case 0xb228: /* pt */
Heiko Carstensbac9f152010-05-26 23:26:20 +020058 case 0xb98d: /* epsw */
Michael Grundy4ba069b2006-09-20 15:58:39 +020059 return -EINVAL;
60 }
61 return 0;
62}
63
Martin Schwidefskyba640a52011-01-05 12:47:19 +010064static int __kprobes get_fixup_type(kprobe_opcode_t *insn)
Michael Grundy4ba069b2006-09-20 15:58:39 +020065{
66 /* default fixup method */
Martin Schwidefskyba640a52011-01-05 12:47:19 +010067 int fixup = FIXUP_PSW_NORMAL;
Michael Grundy4ba069b2006-09-20 15:58:39 +020068
Martin Schwidefskyba640a52011-01-05 12:47:19 +010069 switch (insn[0] >> 8) {
Michael Grundy4ba069b2006-09-20 15:58:39 +020070 case 0x05: /* balr */
71 case 0x0d: /* basr */
Martin Schwidefskyba640a52011-01-05 12:47:19 +010072 fixup = FIXUP_RETURN_REGISTER;
Michael Grundy4ba069b2006-09-20 15:58:39 +020073 /* if r2 = 0, no branch will be taken */
Martin Schwidefskyba640a52011-01-05 12:47:19 +010074 if ((insn[0] & 0x0f) == 0)
75 fixup |= FIXUP_BRANCH_NOT_TAKEN;
Michael Grundy4ba069b2006-09-20 15:58:39 +020076 break;
77 case 0x06: /* bctr */
78 case 0x07: /* bcr */
Martin Schwidefskyba640a52011-01-05 12:47:19 +010079 fixup = FIXUP_BRANCH_NOT_TAKEN;
Michael Grundy4ba069b2006-09-20 15:58:39 +020080 break;
81 case 0x45: /* bal */
82 case 0x4d: /* bas */
Martin Schwidefskyba640a52011-01-05 12:47:19 +010083 fixup = FIXUP_RETURN_REGISTER;
Michael Grundy4ba069b2006-09-20 15:58:39 +020084 break;
85 case 0x47: /* bc */
86 case 0x46: /* bct */
87 case 0x86: /* bxh */
88 case 0x87: /* bxle */
Martin Schwidefskyba640a52011-01-05 12:47:19 +010089 fixup = FIXUP_BRANCH_NOT_TAKEN;
Michael Grundy4ba069b2006-09-20 15:58:39 +020090 break;
91 case 0x82: /* lpsw */
Martin Schwidefskyba640a52011-01-05 12:47:19 +010092 fixup = FIXUP_NOT_REQUIRED;
Michael Grundy4ba069b2006-09-20 15:58:39 +020093 break;
94 case 0xb2: /* lpswe */
Martin Schwidefskyba640a52011-01-05 12:47:19 +010095 if ((insn[0] & 0xff) == 0xb2)
96 fixup = FIXUP_NOT_REQUIRED;
Michael Grundy4ba069b2006-09-20 15:58:39 +020097 break;
98 case 0xa7: /* bras */
Martin Schwidefskyba640a52011-01-05 12:47:19 +010099 if ((insn[0] & 0x0f) == 0x05)
100 fixup |= FIXUP_RETURN_REGISTER;
Michael Grundy4ba069b2006-09-20 15:58:39 +0200101 break;
102 case 0xc0:
Martin Schwidefskyba640a52011-01-05 12:47:19 +0100103 if ((insn[0] & 0x0f) == 0x00 || /* larl */
104 (insn[0] & 0x0f) == 0x05) /* brasl */
105 fixup |= FIXUP_RETURN_REGISTER;
Michael Grundy4ba069b2006-09-20 15:58:39 +0200106 break;
107 case 0xeb:
Martin Schwidefskyba640a52011-01-05 12:47:19 +0100108 if ((insn[2] & 0xff) == 0x44 || /* bxhg */
109 (insn[2] & 0xff) == 0x45) /* bxleg */
110 fixup = FIXUP_BRANCH_NOT_TAKEN;
Michael Grundy4ba069b2006-09-20 15:58:39 +0200111 break;
112 case 0xe3: /* bctg */
Martin Schwidefskyba640a52011-01-05 12:47:19 +0100113 if ((insn[2] & 0xff) == 0x46)
114 fixup = FIXUP_BRANCH_NOT_TAKEN;
Michael Grundy4ba069b2006-09-20 15:58:39 +0200115 break;
116 }
Martin Schwidefskyba640a52011-01-05 12:47:19 +0100117 return fixup;
118}
119
120int __kprobes arch_prepare_kprobe(struct kprobe *p)
121{
122 if ((unsigned long) p->addr & 0x01)
123 return -EINVAL;
124
125 /* Make sure the probe isn't going on a difficult instruction */
126 if (is_prohibited_opcode((kprobe_opcode_t *) p->addr))
127 return -EINVAL;
128
129 /* Use the get_insn_slot() facility for correctness */
130 if (!(p->ainsn.insn = get_insn_slot()))
131 return -ENOMEM;
132
133 p->opcode = *p->addr;
134 memcpy(p->ainsn.insn, p->addr, ((p->opcode >> 14) + 3) & -2);
135
136 return 0;
Michael Grundy4ba069b2006-09-20 15:58:39 +0200137}
138
Martin Schwidefsky5a8b5892011-01-05 12:47:18 +0100139struct ins_replace_args {
140 kprobe_opcode_t *ptr;
141 kprobe_opcode_t opcode;
142};
143
Michael Grundy4ba069b2006-09-20 15:58:39 +0200144static int __kprobes swap_instruction(void *aref)
145{
Heiko Carstensacf01802009-06-22 12:08:23 +0200146 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
147 unsigned long status = kcb->kprobe_status;
Michael Grundy4ba069b2006-09-20 15:58:39 +0200148 struct ins_replace_args *args = aref;
Michael Grundy4ba069b2006-09-20 15:58:39 +0200149
Heiko Carstensacf01802009-06-22 12:08:23 +0200150 kcb->kprobe_status = KPROBE_SWAP_INST;
Martin Schwidefsky5a8b5892011-01-05 12:47:18 +0100151 probe_kernel_write(args->ptr, &args->opcode, sizeof(args->opcode));
Heiko Carstensacf01802009-06-22 12:08:23 +0200152 kcb->kprobe_status = status;
Martin Schwidefsky5a8b5892011-01-05 12:47:18 +0100153 return 0;
Michael Grundy4ba069b2006-09-20 15:58:39 +0200154}
155
156void __kprobes arch_arm_kprobe(struct kprobe *p)
157{
Michael Grundy4ba069b2006-09-20 15:58:39 +0200158 struct ins_replace_args args;
159
160 args.ptr = p->addr;
Martin Schwidefsky5a8b5892011-01-05 12:47:18 +0100161 args.opcode = BREAKPOINT_INSTRUCTION;
Rusty Russell9b1a4d32008-07-28 12:16:30 -0500162 stop_machine(swap_instruction, &args, NULL);
Michael Grundy4ba069b2006-09-20 15:58:39 +0200163}
164
165void __kprobes arch_disarm_kprobe(struct kprobe *p)
166{
Michael Grundy4ba069b2006-09-20 15:58:39 +0200167 struct ins_replace_args args;
168
169 args.ptr = p->addr;
Martin Schwidefsky5a8b5892011-01-05 12:47:18 +0100170 args.opcode = p->opcode;
Rusty Russell9b1a4d32008-07-28 12:16:30 -0500171 stop_machine(swap_instruction, &args, NULL);
Michael Grundy4ba069b2006-09-20 15:58:39 +0200172}
173
174void __kprobes arch_remove_kprobe(struct kprobe *p)
175{
Masami Hiramatsu12941562009-01-06 14:41:50 -0800176 if (p->ainsn.insn) {
177 free_insn_slot(p->ainsn.insn, 0);
178 p->ainsn.insn = NULL;
179 }
Michael Grundy4ba069b2006-09-20 15:58:39 +0200180}
181
Martin Schwidefskyfc0a1fe2011-01-05 12:47:17 +0100182static void __kprobes enable_singlestep(struct kprobe_ctlblk *kcb,
183 struct pt_regs *regs,
184 unsigned long ip)
Michael Grundy4ba069b2006-09-20 15:58:39 +0200185{
186 per_cr_bits kprobe_per_regs[1];
187
Michael Grundy4ba069b2006-09-20 15:58:39 +0200188 /* Set up the per control reg info, will pass to lctl */
Martin Schwidefskyfc0a1fe2011-01-05 12:47:17 +0100189 memset(kprobe_per_regs, 0, sizeof(per_cr_bits));
Michael Grundy4ba069b2006-09-20 15:58:39 +0200190 kprobe_per_regs[0].em_instruction_fetch = 1;
Martin Schwidefskyfc0a1fe2011-01-05 12:47:17 +0100191 kprobe_per_regs[0].starting_addr = ip;
192 kprobe_per_regs[0].ending_addr = ip;
Michael Grundy4ba069b2006-09-20 15:58:39 +0200193
Martin Schwidefskyfc0a1fe2011-01-05 12:47:17 +0100194 /* Save control regs and psw mask */
195 __ctl_store(kcb->kprobe_saved_ctl, 9, 11);
196 kcb->kprobe_saved_imask = regs->psw.mask &
197 (PSW_MASK_PER | PSW_MASK_IO | PSW_MASK_EXT);
198
199 /* Set PER control regs, turns on single step for the given address */
Michael Grundy4ba069b2006-09-20 15:58:39 +0200200 __ctl_load(kprobe_per_regs, 9, 11);
201 regs->psw.mask |= PSW_MASK_PER;
Martin Schwidefskyadb45832010-11-10 10:05:57 +0100202 regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT);
Martin Schwidefskyfc0a1fe2011-01-05 12:47:17 +0100203 regs->psw.addr = ip | PSW_ADDR_AMODE;
Michael Grundy4ba069b2006-09-20 15:58:39 +0200204}
205
Martin Schwidefskyfc0a1fe2011-01-05 12:47:17 +0100206static void __kprobes disable_singlestep(struct kprobe_ctlblk *kcb,
207 struct pt_regs *regs,
208 unsigned long ip)
209{
210 /* Restore control regs and psw mask, set new psw address */
211 __ctl_load(kcb->kprobe_saved_ctl, 9, 11);
212 regs->psw.mask &= ~PSW_MASK_PER;
213 regs->psw.mask |= kcb->kprobe_saved_imask;
214 regs->psw.addr = ip | PSW_ADDR_AMODE;
215}
216
Martin Schwidefskyb9599792011-01-05 12:47:20 +0100217/*
218 * Activate a kprobe by storing its pointer to current_kprobe. The
219 * previous kprobe is stored in kcb->prev_kprobe. A stack of up to
220 * two kprobes can be active, see KPROBE_REENTER.
221 */
222static void __kprobes push_kprobe(struct kprobe_ctlblk *kcb, struct kprobe *p)
Michael Grundy4ba069b2006-09-20 15:58:39 +0200223{
Martin Schwidefskyb9599792011-01-05 12:47:20 +0100224 kcb->prev_kprobe.kp = __get_cpu_var(current_kprobe);
Michael Grundy4ba069b2006-09-20 15:58:39 +0200225 kcb->prev_kprobe.status = kcb->kprobe_status;
Martin Schwidefskyb9599792011-01-05 12:47:20 +0100226 __get_cpu_var(current_kprobe) = p;
Michael Grundy4ba069b2006-09-20 15:58:39 +0200227}
228
Martin Schwidefskyb9599792011-01-05 12:47:20 +0100229/*
230 * Deactivate a kprobe by backing up to the previous state. If the
231 * current state is KPROBE_REENTER prev_kprobe.kp will be non-NULL,
232 * for any other state prev_kprobe.kp will be NULL.
233 */
234static void __kprobes pop_kprobe(struct kprobe_ctlblk *kcb)
Michael Grundy4ba069b2006-09-20 15:58:39 +0200235{
236 __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
237 kcb->kprobe_status = kcb->prev_kprobe.status;
Michael Grundy4ba069b2006-09-20 15:58:39 +0200238}
239
Christoph Hellwig4c4308c2007-05-08 00:34:14 -0700240void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
Michael Grundy4ba069b2006-09-20 15:58:39 +0200241 struct pt_regs *regs)
242{
Christoph Hellwig4c4308c2007-05-08 00:34:14 -0700243 ri->ret_addr = (kprobe_opcode_t *) regs->gprs[14];
Michael Grundy4ba069b2006-09-20 15:58:39 +0200244
Christoph Hellwig4c4308c2007-05-08 00:34:14 -0700245 /* Replace the return addr with trampoline addr */
246 regs->gprs[14] = (unsigned long)&kretprobe_trampoline;
Michael Grundy4ba069b2006-09-20 15:58:39 +0200247}
248
249static int __kprobes kprobe_handler(struct pt_regs *regs)
250{
251 struct kprobe *p;
252 int ret = 0;
253 unsigned long *addr = (unsigned long *)
254 ((regs->psw.addr & PSW_ADDR_INSN) - 2);
255 struct kprobe_ctlblk *kcb;
256
257 /*
258 * We don't want to be preempted for the entire
259 * duration of kprobe processing
260 */
261 preempt_disable();
262 kcb = get_kprobe_ctlblk();
263
264 /* Check we're not actually recursing */
265 if (kprobe_running()) {
266 p = get_kprobe(addr);
267 if (p) {
Martin Schwidefskyb9599792011-01-05 12:47:20 +0100268 /*
269 * We have hit a kprobe while another is still
270 * active. This can happen in the pre and post
271 * handler. Single step the instruction of the
272 * new probe but do not call any handler function
273 * of this secondary kprobe.
274 * push_kprobe and pop_kprobe saves and restores
275 * the currently active kprobe.
Michael Grundy4ba069b2006-09-20 15:58:39 +0200276 */
Martin Schwidefskyb9599792011-01-05 12:47:20 +0100277 push_kprobe(kcb, p);
Michael Grundy4ba069b2006-09-20 15:58:39 +0200278 kprobes_inc_nmissed_count(p);
Martin Schwidefskyfc0a1fe2011-01-05 12:47:17 +0100279 enable_singlestep(kcb, regs,
280 (unsigned long) p->ainsn.insn);
Michael Grundy4ba069b2006-09-20 15:58:39 +0200281 kcb->kprobe_status = KPROBE_REENTER;
282 return 1;
283 } else {
284 p = __get_cpu_var(current_kprobe);
285 if (p->break_handler && p->break_handler(p, regs)) {
286 goto ss_probe;
287 }
288 }
289 goto no_kprobe;
290 }
291
292 p = get_kprobe(addr);
Martin Schwidefskyf794c822007-03-05 23:35:38 +0100293 if (!p)
294 /*
295 * No kprobe at this address. The fault has not been
296 * caused by a kprobe breakpoint. The race of breakpoint
297 * vs. kprobe remove does not exist because on s390 we
Rusty Russell9b1a4d32008-07-28 12:16:30 -0500298 * use stop_machine to arm/disarm the breakpoints.
Martin Schwidefskyf794c822007-03-05 23:35:38 +0100299 */
Michael Grundy4ba069b2006-09-20 15:58:39 +0200300 goto no_kprobe;
Michael Grundy4ba069b2006-09-20 15:58:39 +0200301
302 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
Martin Schwidefskyb9599792011-01-05 12:47:20 +0100303 push_kprobe(kcb, p);
Michael Grundy4ba069b2006-09-20 15:58:39 +0200304 if (p->pre_handler && p->pre_handler(p, regs))
305 /* handler has already set things up, so skip ss setup */
306 return 1;
307
308ss_probe:
Martin Schwidefskyfc0a1fe2011-01-05 12:47:17 +0100309 enable_singlestep(kcb, regs, (unsigned long) p->ainsn.insn);
Michael Grundy4ba069b2006-09-20 15:58:39 +0200310 kcb->kprobe_status = KPROBE_HIT_SS;
311 return 1;
312
313no_kprobe:
314 preempt_enable_no_resched();
315 return ret;
316}
317
318/*
319 * Function return probe trampoline:
320 * - init_kprobes() establishes a probepoint here
321 * - When the probed function returns, this probe
322 * causes the handlers to fire
323 */
Heiko Carstensa8061702008-04-17 07:46:26 +0200324static void __used kretprobe_trampoline_holder(void)
Michael Grundy4ba069b2006-09-20 15:58:39 +0200325{
326 asm volatile(".global kretprobe_trampoline\n"
327 "kretprobe_trampoline: bcr 0,0\n");
328}
329
330/*
331 * Called when the probe at kretprobe trampoline is hit
332 */
Heiko Carstens2b67fc42007-02-05 21:16:47 +0100333static int __kprobes trampoline_probe_handler(struct kprobe *p,
334 struct pt_regs *regs)
Michael Grundy4ba069b2006-09-20 15:58:39 +0200335{
336 struct kretprobe_instance *ri = NULL;
bibo,mao99219a32006-10-02 02:17:35 -0700337 struct hlist_head *head, empty_rp;
Michael Grundy4ba069b2006-09-20 15:58:39 +0200338 struct hlist_node *node, *tmp;
339 unsigned long flags, orig_ret_address = 0;
340 unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
Martin Schwidefsky89480802010-11-10 10:05:58 +0100341 kprobe_opcode_t *correct_ret_addr = NULL;
Michael Grundy4ba069b2006-09-20 15:58:39 +0200342
bibo,mao99219a32006-10-02 02:17:35 -0700343 INIT_HLIST_HEAD(&empty_rp);
Srinivasa D Sef53d9c2008-07-25 01:46:04 -0700344 kretprobe_hash_lock(current, &head, &flags);
Michael Grundy4ba069b2006-09-20 15:58:39 +0200345
346 /*
347 * It is possible to have multiple instances associated with a given
348 * task either because an multiple functions in the call path
Frederik Schwarzer025dfda2008-10-16 19:02:37 +0200349 * have a return probe installed on them, and/or more than one return
Michael Grundy4ba069b2006-09-20 15:58:39 +0200350 * return probe was registered for a target function.
351 *
352 * We can handle this because:
353 * - instances are always inserted at the head of the list
354 * - when multiple return probes are registered for the same
355 * function, the first instance's ret_addr will point to the
356 * real return address, and all the rest will point to
357 * kretprobe_trampoline
358 */
359 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
360 if (ri->task != current)
361 /* another task is sharing our hash bucket */
362 continue;
363
Martin Schwidefsky89480802010-11-10 10:05:58 +0100364 orig_ret_address = (unsigned long)ri->ret_addr;
365
366 if (orig_ret_address != trampoline_address)
367 /*
368 * This is the real return address. Any other
369 * instances associated with this task are for
370 * other calls deeper on the call stack
371 */
372 break;
373 }
374
375 kretprobe_assert(ri, orig_ret_address, trampoline_address);
376
377 correct_ret_addr = ri->ret_addr;
378 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
379 if (ri->task != current)
380 /* another task is sharing our hash bucket */
381 continue;
Michael Grundy4ba069b2006-09-20 15:58:39 +0200382
383 orig_ret_address = (unsigned long)ri->ret_addr;
Martin Schwidefsky89480802010-11-10 10:05:58 +0100384
385 if (ri->rp && ri->rp->handler) {
386 ri->ret_addr = correct_ret_addr;
387 ri->rp->handler(ri, regs);
388 }
389
bibo,mao99219a32006-10-02 02:17:35 -0700390 recycle_rp_inst(ri, &empty_rp);
Michael Grundy4ba069b2006-09-20 15:58:39 +0200391
392 if (orig_ret_address != trampoline_address) {
393 /*
394 * This is the real return address. Any other
395 * instances associated with this task are for
396 * other calls deeper on the call stack
397 */
398 break;
399 }
400 }
Martin Schwidefsky89480802010-11-10 10:05:58 +0100401
Michael Grundy4ba069b2006-09-20 15:58:39 +0200402 regs->psw.addr = orig_ret_address | PSW_ADDR_AMODE;
403
Martin Schwidefskyb9599792011-01-05 12:47:20 +0100404 pop_kprobe(get_kprobe_ctlblk());
Srinivasa D Sef53d9c2008-07-25 01:46:04 -0700405 kretprobe_hash_unlock(current, &flags);
Michael Grundy4ba069b2006-09-20 15:58:39 +0200406 preempt_enable_no_resched();
407
bibo,mao99219a32006-10-02 02:17:35 -0700408 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
409 hlist_del(&ri->hlist);
410 kfree(ri);
411 }
Michael Grundy4ba069b2006-09-20 15:58:39 +0200412 /*
413 * By returning a non-zero value, we are telling
414 * kprobe_handler() that we don't want the post_handler
415 * to run (and have re-enabled preemption)
416 */
417 return 1;
418}
419
420/*
421 * Called after single-stepping. p->addr is the address of the
422 * instruction whose first byte has been replaced by the "breakpoint"
423 * instruction. To avoid the SMP problems that can occur when we
424 * temporarily put back the original opcode to single-step, we
425 * single-stepped a copy of the instruction. The address of this
426 * copy is p->ainsn.insn.
427 */
428static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
429{
430 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
Martin Schwidefskyfc0a1fe2011-01-05 12:47:17 +0100431 unsigned long ip = regs->psw.addr & PSW_ADDR_INSN;
Martin Schwidefskyba640a52011-01-05 12:47:19 +0100432 int fixup = get_fixup_type(p->ainsn.insn);
Michael Grundy4ba069b2006-09-20 15:58:39 +0200433
Martin Schwidefskyba640a52011-01-05 12:47:19 +0100434 if (fixup & FIXUP_PSW_NORMAL)
Martin Schwidefskyfc0a1fe2011-01-05 12:47:17 +0100435 ip += (unsigned long) p->addr - (unsigned long) p->ainsn.insn;
Michael Grundy4ba069b2006-09-20 15:58:39 +0200436
Martin Schwidefskyba640a52011-01-05 12:47:19 +0100437 if (fixup & FIXUP_BRANCH_NOT_TAKEN) {
438 int ilen = ((p->ainsn.insn[0] >> 14) + 3) & -2;
439 if (ip - (unsigned long) p->ainsn.insn == ilen)
440 ip = (unsigned long) p->addr + ilen;
441 }
Michael Grundy4ba069b2006-09-20 15:58:39 +0200442
Martin Schwidefskyba640a52011-01-05 12:47:19 +0100443 if (fixup & FIXUP_RETURN_REGISTER) {
444 int reg = (p->ainsn.insn[0] & 0xf0) >> 4;
445 regs->gprs[reg] += (unsigned long) p->addr -
446 (unsigned long) p->ainsn.insn;
447 }
Michael Grundy4ba069b2006-09-20 15:58:39 +0200448
Martin Schwidefskyfc0a1fe2011-01-05 12:47:17 +0100449 disable_singlestep(kcb, regs, ip);
Michael Grundy4ba069b2006-09-20 15:58:39 +0200450}
451
452static int __kprobes post_kprobe_handler(struct pt_regs *regs)
453{
454 struct kprobe *cur = kprobe_running();
455 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
456
457 if (!cur)
458 return 0;
459
460 if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
461 kcb->kprobe_status = KPROBE_HIT_SSDONE;
462 cur->post_handler(cur, regs, 0);
463 }
464
465 resume_execution(cur, regs);
Martin Schwidefskyb9599792011-01-05 12:47:20 +0100466 pop_kprobe(kcb);
Michael Grundy4ba069b2006-09-20 15:58:39 +0200467 preempt_enable_no_resched();
468
469 /*
470 * if somebody else is singlestepping across a probe point, psw mask
471 * will have PER set, in which case, continue the remaining processing
472 * of do_single_step, as if this is not a probe hit.
473 */
474 if (regs->psw.mask & PSW_MASK_PER) {
475 return 0;
476 }
477
478 return 1;
479}
480
Martin Schwidefskyadb45832010-11-10 10:05:57 +0100481static int __kprobes kprobe_trap_handler(struct pt_regs *regs, int trapnr)
Michael Grundy4ba069b2006-09-20 15:58:39 +0200482{
483 struct kprobe *cur = kprobe_running();
484 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
485 const struct exception_table_entry *entry;
486
487 switch(kcb->kprobe_status) {
488 case KPROBE_SWAP_INST:
489 /* We are here because the instruction replacement failed */
490 return 0;
491 case KPROBE_HIT_SS:
492 case KPROBE_REENTER:
493 /*
494 * We are here because the instruction being single
495 * stepped caused a page fault. We reset the current
496 * kprobe and the nip points back to the probe address
497 * and allow the page fault handler to continue as a
498 * normal page fault.
499 */
Martin Schwidefskyfc0a1fe2011-01-05 12:47:17 +0100500 disable_singlestep(kcb, regs, (unsigned long) cur->addr);
Martin Schwidefskyb9599792011-01-05 12:47:20 +0100501 pop_kprobe(kcb);
Michael Grundy4ba069b2006-09-20 15:58:39 +0200502 preempt_enable_no_resched();
503 break;
504 case KPROBE_HIT_ACTIVE:
505 case KPROBE_HIT_SSDONE:
506 /*
507 * We increment the nmissed count for accounting,
508 * we can also use npre/npostfault count for accouting
509 * these specific fault cases.
510 */
511 kprobes_inc_nmissed_count(cur);
512
513 /*
514 * We come here because instructions in the pre/post
515 * handler caused the page_fault, this could happen
516 * if handler tries to access user space by
517 * copy_from_user(), get_user() etc. Let the
518 * user-specified handler try to fix it first.
519 */
520 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
521 return 1;
522
523 /*
524 * In case the user-specified fault handler returned
525 * zero, try to fix up.
526 */
527 entry = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
528 if (entry) {
529 regs->psw.addr = entry->fixup | PSW_ADDR_AMODE;
530 return 1;
531 }
532
533 /*
534 * fixup_exception() could not handle it,
535 * Let do_page_fault() fix it.
536 */
537 break;
538 default:
539 break;
540 }
541 return 0;
542}
543
Martin Schwidefskyadb45832010-11-10 10:05:57 +0100544int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
545{
546 int ret;
547
548 if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT))
549 local_irq_disable();
550 ret = kprobe_trap_handler(regs, trapnr);
551 if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT))
552 local_irq_restore(regs->psw.mask & ~PSW_MASK_PER);
553 return ret;
554}
555
Michael Grundy4ba069b2006-09-20 15:58:39 +0200556/*
557 * Wrapper routine to for handling exceptions.
558 */
559int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
560 unsigned long val, void *data)
561{
562 struct die_args *args = (struct die_args *)data;
Martin Schwidefskyadb45832010-11-10 10:05:57 +0100563 struct pt_regs *regs = args->regs;
Michael Grundy4ba069b2006-09-20 15:58:39 +0200564 int ret = NOTIFY_DONE;
565
Martin Schwidefskyadb45832010-11-10 10:05:57 +0100566 if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT))
567 local_irq_disable();
568
Michael Grundy4ba069b2006-09-20 15:58:39 +0200569 switch (val) {
570 case DIE_BPT:
571 if (kprobe_handler(args->regs))
572 ret = NOTIFY_STOP;
573 break;
574 case DIE_SSTEP:
575 if (post_kprobe_handler(args->regs))
576 ret = NOTIFY_STOP;
577 break;
578 case DIE_TRAP:
Martin Schwidefskyadb45832010-11-10 10:05:57 +0100579 if (!preemptible() && kprobe_running() &&
580 kprobe_trap_handler(args->regs, args->trapnr))
Michael Grundy4ba069b2006-09-20 15:58:39 +0200581 ret = NOTIFY_STOP;
Michael Grundy4ba069b2006-09-20 15:58:39 +0200582 break;
583 default:
584 break;
585 }
Martin Schwidefskyadb45832010-11-10 10:05:57 +0100586
587 if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT))
588 local_irq_restore(regs->psw.mask & ~PSW_MASK_PER);
589
Michael Grundy4ba069b2006-09-20 15:58:39 +0200590 return ret;
591}
592
593int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
594{
595 struct jprobe *jp = container_of(p, struct jprobe, kp);
596 unsigned long addr;
597 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
598
599 memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs));
600
601 /* setup return addr to the jprobe handler routine */
602 regs->psw.addr = (unsigned long)(jp->entry) | PSW_ADDR_AMODE;
Martin Schwidefskyadb45832010-11-10 10:05:57 +0100603 regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT);
Michael Grundy4ba069b2006-09-20 15:58:39 +0200604
605 /* r14 is the function return address */
606 kcb->jprobe_saved_r14 = (unsigned long)regs->gprs[14];
607 /* r15 is the stack pointer */
608 kcb->jprobe_saved_r15 = (unsigned long)regs->gprs[15];
609 addr = (unsigned long)kcb->jprobe_saved_r15;
610
611 memcpy(kcb->jprobes_stack, (kprobe_opcode_t *) addr,
612 MIN_STACK_SIZE(addr));
613 return 1;
614}
615
616void __kprobes jprobe_return(void)
617{
618 asm volatile(".word 0x0002");
619}
620
621void __kprobes jprobe_return_end(void)
622{
623 asm volatile("bcr 0,0");
624}
625
626int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
627{
628 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
629 unsigned long stack_addr = (unsigned long)(kcb->jprobe_saved_r15);
630
631 /* Put the regs back */
632 memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs));
633 /* put the stack back */
634 memcpy((kprobe_opcode_t *) stack_addr, kcb->jprobes_stack,
635 MIN_STACK_SIZE(stack_addr));
636 preempt_enable_no_resched();
637 return 1;
638}
639
640static struct kprobe trampoline_p = {
641 .addr = (kprobe_opcode_t *) & kretprobe_trampoline,
642 .pre_handler = trampoline_probe_handler
643};
644
645int __init arch_init_kprobes(void)
646{
647 return register_kprobe(&trampoline_p);
648}
Ananth N Mavinakayanahallibf8f6e52007-05-08 00:34:16 -0700649
650int __kprobes arch_trampoline_kprobe(struct kprobe *p)
651{
652 if (p->addr == (kprobe_opcode_t *) & kretprobe_trampoline)
653 return 1;
654 return 0;
655}