blob: 4efd5dbfd72d651b3dbb025af928596915e304ca [file] [log] [blame]
Michael Grundy4ba069b2006-09-20 15:58:39 +02001/*
2 * Kernel Probes (KProbes)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright (C) IBM Corporation, 2002, 2006
19 *
20 * s390 port, used ppc64 as template. Mike Grundy <grundym@us.ibm.com>
21 */
22
Michael Grundy4ba069b2006-09-20 15:58:39 +020023#include <linux/kprobes.h>
24#include <linux/ptrace.h>
25#include <linux/preempt.h>
26#include <linux/stop_machine.h>
Christoph Hellwig1eeb66a2007-05-08 00:27:03 -070027#include <linux/kdebug.h>
Heiko Carstensa2b53672009-06-12 10:26:43 +020028#include <linux/uaccess.h>
Michael Grundy4ba069b2006-09-20 15:58:39 +020029#include <asm/cacheflush.h>
Michael Grundy4ba069b2006-09-20 15:58:39 +020030#include <asm/sections.h>
Michael Grundy4ba069b2006-09-20 15:58:39 +020031#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090032#include <linux/slab.h>
Martin Schwidefskyadb45832010-11-10 10:05:57 +010033#include <linux/hardirq.h>
Michael Grundy4ba069b2006-09-20 15:58:39 +020034
35DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
36DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
37
Masami Hiramatsuf438d912007-10-16 01:27:49 -070038struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}};
39
Martin Schwidefskyba640a52011-01-05 12:47:19 +010040static int __kprobes is_prohibited_opcode(kprobe_opcode_t *insn)
Michael Grundy4ba069b2006-09-20 15:58:39 +020041{
Martin Schwidefskyba640a52011-01-05 12:47:19 +010042 switch (insn[0] >> 8) {
Michael Grundy4ba069b2006-09-20 15:58:39 +020043 case 0x0c: /* bassm */
44 case 0x0b: /* bsm */
45 case 0x83: /* diag */
46 case 0x44: /* ex */
Heiko Carstensbac9f152010-05-26 23:26:20 +020047 case 0xac: /* stnsm */
48 case 0xad: /* stosm */
Michael Grundy4ba069b2006-09-20 15:58:39 +020049 return -EINVAL;
50 }
Martin Schwidefskyba640a52011-01-05 12:47:19 +010051 switch (insn[0]) {
Michael Grundy4ba069b2006-09-20 15:58:39 +020052 case 0x0101: /* pr */
53 case 0xb25a: /* bsa */
54 case 0xb240: /* bakr */
55 case 0xb258: /* bsg */
56 case 0xb218: /* pc */
57 case 0xb228: /* pt */
Heiko Carstensbac9f152010-05-26 23:26:20 +020058 case 0xb98d: /* epsw */
Michael Grundy4ba069b2006-09-20 15:58:39 +020059 return -EINVAL;
60 }
61 return 0;
62}
63
Martin Schwidefskyba640a52011-01-05 12:47:19 +010064static int __kprobes get_fixup_type(kprobe_opcode_t *insn)
Michael Grundy4ba069b2006-09-20 15:58:39 +020065{
66 /* default fixup method */
Martin Schwidefskyba640a52011-01-05 12:47:19 +010067 int fixup = FIXUP_PSW_NORMAL;
Michael Grundy4ba069b2006-09-20 15:58:39 +020068
Martin Schwidefskyba640a52011-01-05 12:47:19 +010069 switch (insn[0] >> 8) {
Michael Grundy4ba069b2006-09-20 15:58:39 +020070 case 0x05: /* balr */
71 case 0x0d: /* basr */
Martin Schwidefskyba640a52011-01-05 12:47:19 +010072 fixup = FIXUP_RETURN_REGISTER;
Michael Grundy4ba069b2006-09-20 15:58:39 +020073 /* if r2 = 0, no branch will be taken */
Martin Schwidefskyba640a52011-01-05 12:47:19 +010074 if ((insn[0] & 0x0f) == 0)
75 fixup |= FIXUP_BRANCH_NOT_TAKEN;
Michael Grundy4ba069b2006-09-20 15:58:39 +020076 break;
77 case 0x06: /* bctr */
78 case 0x07: /* bcr */
Martin Schwidefskyba640a52011-01-05 12:47:19 +010079 fixup = FIXUP_BRANCH_NOT_TAKEN;
Michael Grundy4ba069b2006-09-20 15:58:39 +020080 break;
81 case 0x45: /* bal */
82 case 0x4d: /* bas */
Martin Schwidefskyba640a52011-01-05 12:47:19 +010083 fixup = FIXUP_RETURN_REGISTER;
Michael Grundy4ba069b2006-09-20 15:58:39 +020084 break;
85 case 0x47: /* bc */
86 case 0x46: /* bct */
87 case 0x86: /* bxh */
88 case 0x87: /* bxle */
Martin Schwidefskyba640a52011-01-05 12:47:19 +010089 fixup = FIXUP_BRANCH_NOT_TAKEN;
Michael Grundy4ba069b2006-09-20 15:58:39 +020090 break;
91 case 0x82: /* lpsw */
Martin Schwidefskyba640a52011-01-05 12:47:19 +010092 fixup = FIXUP_NOT_REQUIRED;
Michael Grundy4ba069b2006-09-20 15:58:39 +020093 break;
94 case 0xb2: /* lpswe */
Martin Schwidefskyba640a52011-01-05 12:47:19 +010095 if ((insn[0] & 0xff) == 0xb2)
96 fixup = FIXUP_NOT_REQUIRED;
Michael Grundy4ba069b2006-09-20 15:58:39 +020097 break;
98 case 0xa7: /* bras */
Martin Schwidefskyba640a52011-01-05 12:47:19 +010099 if ((insn[0] & 0x0f) == 0x05)
100 fixup |= FIXUP_RETURN_REGISTER;
Michael Grundy4ba069b2006-09-20 15:58:39 +0200101 break;
102 case 0xc0:
Martin Schwidefskyba640a52011-01-05 12:47:19 +0100103 if ((insn[0] & 0x0f) == 0x00 || /* larl */
104 (insn[0] & 0x0f) == 0x05) /* brasl */
105 fixup |= FIXUP_RETURN_REGISTER;
Michael Grundy4ba069b2006-09-20 15:58:39 +0200106 break;
107 case 0xeb:
Martin Schwidefskyba640a52011-01-05 12:47:19 +0100108 if ((insn[2] & 0xff) == 0x44 || /* bxhg */
109 (insn[2] & 0xff) == 0x45) /* bxleg */
110 fixup = FIXUP_BRANCH_NOT_TAKEN;
Michael Grundy4ba069b2006-09-20 15:58:39 +0200111 break;
112 case 0xe3: /* bctg */
Martin Schwidefskyba640a52011-01-05 12:47:19 +0100113 if ((insn[2] & 0xff) == 0x46)
114 fixup = FIXUP_BRANCH_NOT_TAKEN;
Michael Grundy4ba069b2006-09-20 15:58:39 +0200115 break;
116 }
Martin Schwidefskyba640a52011-01-05 12:47:19 +0100117 return fixup;
118}
119
120int __kprobes arch_prepare_kprobe(struct kprobe *p)
121{
122 if ((unsigned long) p->addr & 0x01)
123 return -EINVAL;
124
125 /* Make sure the probe isn't going on a difficult instruction */
126 if (is_prohibited_opcode((kprobe_opcode_t *) p->addr))
127 return -EINVAL;
128
Martin Schwidefskyba640a52011-01-05 12:47:19 +0100129 p->opcode = *p->addr;
130 memcpy(p->ainsn.insn, p->addr, ((p->opcode >> 14) + 3) & -2);
131
132 return 0;
Michael Grundy4ba069b2006-09-20 15:58:39 +0200133}
134
Martin Schwidefsky5a8b5892011-01-05 12:47:18 +0100135struct ins_replace_args {
136 kprobe_opcode_t *ptr;
137 kprobe_opcode_t opcode;
138};
139
Michael Grundy4ba069b2006-09-20 15:58:39 +0200140static int __kprobes swap_instruction(void *aref)
141{
Heiko Carstensacf01802009-06-22 12:08:23 +0200142 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
143 unsigned long status = kcb->kprobe_status;
Michael Grundy4ba069b2006-09-20 15:58:39 +0200144 struct ins_replace_args *args = aref;
Michael Grundy4ba069b2006-09-20 15:58:39 +0200145
Heiko Carstensacf01802009-06-22 12:08:23 +0200146 kcb->kprobe_status = KPROBE_SWAP_INST;
Martin Schwidefsky5a8b5892011-01-05 12:47:18 +0100147 probe_kernel_write(args->ptr, &args->opcode, sizeof(args->opcode));
Heiko Carstensacf01802009-06-22 12:08:23 +0200148 kcb->kprobe_status = status;
Martin Schwidefsky5a8b5892011-01-05 12:47:18 +0100149 return 0;
Michael Grundy4ba069b2006-09-20 15:58:39 +0200150}
151
152void __kprobes arch_arm_kprobe(struct kprobe *p)
153{
Michael Grundy4ba069b2006-09-20 15:58:39 +0200154 struct ins_replace_args args;
155
156 args.ptr = p->addr;
Martin Schwidefsky5a8b5892011-01-05 12:47:18 +0100157 args.opcode = BREAKPOINT_INSTRUCTION;
Rusty Russell9b1a4d32008-07-28 12:16:30 -0500158 stop_machine(swap_instruction, &args, NULL);
Michael Grundy4ba069b2006-09-20 15:58:39 +0200159}
160
161void __kprobes arch_disarm_kprobe(struct kprobe *p)
162{
Michael Grundy4ba069b2006-09-20 15:58:39 +0200163 struct ins_replace_args args;
164
165 args.ptr = p->addr;
Martin Schwidefsky5a8b5892011-01-05 12:47:18 +0100166 args.opcode = p->opcode;
Rusty Russell9b1a4d32008-07-28 12:16:30 -0500167 stop_machine(swap_instruction, &args, NULL);
Michael Grundy4ba069b2006-09-20 15:58:39 +0200168}
169
170void __kprobes arch_remove_kprobe(struct kprobe *p)
171{
Michael Grundy4ba069b2006-09-20 15:58:39 +0200172}
173
Martin Schwidefskyfc0a1fe2011-01-05 12:47:17 +0100174static void __kprobes enable_singlestep(struct kprobe_ctlblk *kcb,
175 struct pt_regs *regs,
176 unsigned long ip)
Michael Grundy4ba069b2006-09-20 15:58:39 +0200177{
178 per_cr_bits kprobe_per_regs[1];
179
Michael Grundy4ba069b2006-09-20 15:58:39 +0200180 /* Set up the per control reg info, will pass to lctl */
Martin Schwidefskyfc0a1fe2011-01-05 12:47:17 +0100181 memset(kprobe_per_regs, 0, sizeof(per_cr_bits));
Michael Grundy4ba069b2006-09-20 15:58:39 +0200182 kprobe_per_regs[0].em_instruction_fetch = 1;
Martin Schwidefskyfc0a1fe2011-01-05 12:47:17 +0100183 kprobe_per_regs[0].starting_addr = ip;
184 kprobe_per_regs[0].ending_addr = ip;
Michael Grundy4ba069b2006-09-20 15:58:39 +0200185
Martin Schwidefskyfc0a1fe2011-01-05 12:47:17 +0100186 /* Save control regs and psw mask */
187 __ctl_store(kcb->kprobe_saved_ctl, 9, 11);
188 kcb->kprobe_saved_imask = regs->psw.mask &
189 (PSW_MASK_PER | PSW_MASK_IO | PSW_MASK_EXT);
190
191 /* Set PER control regs, turns on single step for the given address */
Michael Grundy4ba069b2006-09-20 15:58:39 +0200192 __ctl_load(kprobe_per_regs, 9, 11);
193 regs->psw.mask |= PSW_MASK_PER;
Martin Schwidefskyadb45832010-11-10 10:05:57 +0100194 regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT);
Martin Schwidefskyfc0a1fe2011-01-05 12:47:17 +0100195 regs->psw.addr = ip | PSW_ADDR_AMODE;
Michael Grundy4ba069b2006-09-20 15:58:39 +0200196}
197
Martin Schwidefskyfc0a1fe2011-01-05 12:47:17 +0100198static void __kprobes disable_singlestep(struct kprobe_ctlblk *kcb,
199 struct pt_regs *regs,
200 unsigned long ip)
201{
202 /* Restore control regs and psw mask, set new psw address */
203 __ctl_load(kcb->kprobe_saved_ctl, 9, 11);
204 regs->psw.mask &= ~PSW_MASK_PER;
205 regs->psw.mask |= kcb->kprobe_saved_imask;
206 regs->psw.addr = ip | PSW_ADDR_AMODE;
207}
208
Martin Schwidefskyb9599792011-01-05 12:47:20 +0100209/*
210 * Activate a kprobe by storing its pointer to current_kprobe. The
211 * previous kprobe is stored in kcb->prev_kprobe. A stack of up to
212 * two kprobes can be active, see KPROBE_REENTER.
213 */
214static void __kprobes push_kprobe(struct kprobe_ctlblk *kcb, struct kprobe *p)
Michael Grundy4ba069b2006-09-20 15:58:39 +0200215{
Martin Schwidefskyb9599792011-01-05 12:47:20 +0100216 kcb->prev_kprobe.kp = __get_cpu_var(current_kprobe);
Michael Grundy4ba069b2006-09-20 15:58:39 +0200217 kcb->prev_kprobe.status = kcb->kprobe_status;
Martin Schwidefskyb9599792011-01-05 12:47:20 +0100218 __get_cpu_var(current_kprobe) = p;
Michael Grundy4ba069b2006-09-20 15:58:39 +0200219}
220
Martin Schwidefskyb9599792011-01-05 12:47:20 +0100221/*
222 * Deactivate a kprobe by backing up to the previous state. If the
223 * current state is KPROBE_REENTER prev_kprobe.kp will be non-NULL,
224 * for any other state prev_kprobe.kp will be NULL.
225 */
226static void __kprobes pop_kprobe(struct kprobe_ctlblk *kcb)
Michael Grundy4ba069b2006-09-20 15:58:39 +0200227{
228 __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
229 kcb->kprobe_status = kcb->prev_kprobe.status;
Michael Grundy4ba069b2006-09-20 15:58:39 +0200230}
231
Christoph Hellwig4c4308c2007-05-08 00:34:14 -0700232void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
Michael Grundy4ba069b2006-09-20 15:58:39 +0200233 struct pt_regs *regs)
234{
Christoph Hellwig4c4308c2007-05-08 00:34:14 -0700235 ri->ret_addr = (kprobe_opcode_t *) regs->gprs[14];
Michael Grundy4ba069b2006-09-20 15:58:39 +0200236
Christoph Hellwig4c4308c2007-05-08 00:34:14 -0700237 /* Replace the return addr with trampoline addr */
238 regs->gprs[14] = (unsigned long)&kretprobe_trampoline;
Michael Grundy4ba069b2006-09-20 15:58:39 +0200239}
240
241static int __kprobes kprobe_handler(struct pt_regs *regs)
242{
243 struct kprobe *p;
244 int ret = 0;
245 unsigned long *addr = (unsigned long *)
246 ((regs->psw.addr & PSW_ADDR_INSN) - 2);
247 struct kprobe_ctlblk *kcb;
248
249 /*
250 * We don't want to be preempted for the entire
251 * duration of kprobe processing
252 */
253 preempt_disable();
254 kcb = get_kprobe_ctlblk();
255
256 /* Check we're not actually recursing */
257 if (kprobe_running()) {
258 p = get_kprobe(addr);
259 if (p) {
Martin Schwidefskyb9599792011-01-05 12:47:20 +0100260 /*
261 * We have hit a kprobe while another is still
262 * active. This can happen in the pre and post
263 * handler. Single step the instruction of the
264 * new probe but do not call any handler function
265 * of this secondary kprobe.
266 * push_kprobe and pop_kprobe saves and restores
267 * the currently active kprobe.
Michael Grundy4ba069b2006-09-20 15:58:39 +0200268 */
Martin Schwidefskyb9599792011-01-05 12:47:20 +0100269 push_kprobe(kcb, p);
Michael Grundy4ba069b2006-09-20 15:58:39 +0200270 kprobes_inc_nmissed_count(p);
Martin Schwidefskyfc0a1fe2011-01-05 12:47:17 +0100271 enable_singlestep(kcb, regs,
272 (unsigned long) p->ainsn.insn);
Michael Grundy4ba069b2006-09-20 15:58:39 +0200273 kcb->kprobe_status = KPROBE_REENTER;
274 return 1;
275 } else {
276 p = __get_cpu_var(current_kprobe);
277 if (p->break_handler && p->break_handler(p, regs)) {
278 goto ss_probe;
279 }
280 }
281 goto no_kprobe;
282 }
283
284 p = get_kprobe(addr);
Martin Schwidefskyf794c822007-03-05 23:35:38 +0100285 if (!p)
286 /*
287 * No kprobe at this address. The fault has not been
288 * caused by a kprobe breakpoint. The race of breakpoint
289 * vs. kprobe remove does not exist because on s390 we
Rusty Russell9b1a4d32008-07-28 12:16:30 -0500290 * use stop_machine to arm/disarm the breakpoints.
Martin Schwidefskyf794c822007-03-05 23:35:38 +0100291 */
Michael Grundy4ba069b2006-09-20 15:58:39 +0200292 goto no_kprobe;
Michael Grundy4ba069b2006-09-20 15:58:39 +0200293
294 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
Martin Schwidefskyb9599792011-01-05 12:47:20 +0100295 push_kprobe(kcb, p);
Michael Grundy4ba069b2006-09-20 15:58:39 +0200296 if (p->pre_handler && p->pre_handler(p, regs))
297 /* handler has already set things up, so skip ss setup */
298 return 1;
299
300ss_probe:
Martin Schwidefskyfc0a1fe2011-01-05 12:47:17 +0100301 enable_singlestep(kcb, regs, (unsigned long) p->ainsn.insn);
Michael Grundy4ba069b2006-09-20 15:58:39 +0200302 kcb->kprobe_status = KPROBE_HIT_SS;
303 return 1;
304
305no_kprobe:
306 preempt_enable_no_resched();
307 return ret;
308}
309
310/*
311 * Function return probe trampoline:
312 * - init_kprobes() establishes a probepoint here
313 * - When the probed function returns, this probe
314 * causes the handlers to fire
315 */
Heiko Carstensa8061702008-04-17 07:46:26 +0200316static void __used kretprobe_trampoline_holder(void)
Michael Grundy4ba069b2006-09-20 15:58:39 +0200317{
318 asm volatile(".global kretprobe_trampoline\n"
319 "kretprobe_trampoline: bcr 0,0\n");
320}
321
322/*
323 * Called when the probe at kretprobe trampoline is hit
324 */
Heiko Carstens2b67fc42007-02-05 21:16:47 +0100325static int __kprobes trampoline_probe_handler(struct kprobe *p,
326 struct pt_regs *regs)
Michael Grundy4ba069b2006-09-20 15:58:39 +0200327{
328 struct kretprobe_instance *ri = NULL;
bibo,mao99219a32006-10-02 02:17:35 -0700329 struct hlist_head *head, empty_rp;
Michael Grundy4ba069b2006-09-20 15:58:39 +0200330 struct hlist_node *node, *tmp;
331 unsigned long flags, orig_ret_address = 0;
332 unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
Martin Schwidefsky89480802010-11-10 10:05:58 +0100333 kprobe_opcode_t *correct_ret_addr = NULL;
Michael Grundy4ba069b2006-09-20 15:58:39 +0200334
bibo,mao99219a32006-10-02 02:17:35 -0700335 INIT_HLIST_HEAD(&empty_rp);
Srinivasa D Sef53d9c2008-07-25 01:46:04 -0700336 kretprobe_hash_lock(current, &head, &flags);
Michael Grundy4ba069b2006-09-20 15:58:39 +0200337
338 /*
339 * It is possible to have multiple instances associated with a given
340 * task either because an multiple functions in the call path
Frederik Schwarzer025dfda2008-10-16 19:02:37 +0200341 * have a return probe installed on them, and/or more than one return
Michael Grundy4ba069b2006-09-20 15:58:39 +0200342 * return probe was registered for a target function.
343 *
344 * We can handle this because:
345 * - instances are always inserted at the head of the list
346 * - when multiple return probes are registered for the same
347 * function, the first instance's ret_addr will point to the
348 * real return address, and all the rest will point to
349 * kretprobe_trampoline
350 */
351 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
352 if (ri->task != current)
353 /* another task is sharing our hash bucket */
354 continue;
355
Martin Schwidefsky89480802010-11-10 10:05:58 +0100356 orig_ret_address = (unsigned long)ri->ret_addr;
357
358 if (orig_ret_address != trampoline_address)
359 /*
360 * This is the real return address. Any other
361 * instances associated with this task are for
362 * other calls deeper on the call stack
363 */
364 break;
365 }
366
367 kretprobe_assert(ri, orig_ret_address, trampoline_address);
368
369 correct_ret_addr = ri->ret_addr;
370 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
371 if (ri->task != current)
372 /* another task is sharing our hash bucket */
373 continue;
Michael Grundy4ba069b2006-09-20 15:58:39 +0200374
375 orig_ret_address = (unsigned long)ri->ret_addr;
Martin Schwidefsky89480802010-11-10 10:05:58 +0100376
377 if (ri->rp && ri->rp->handler) {
378 ri->ret_addr = correct_ret_addr;
379 ri->rp->handler(ri, regs);
380 }
381
bibo,mao99219a32006-10-02 02:17:35 -0700382 recycle_rp_inst(ri, &empty_rp);
Michael Grundy4ba069b2006-09-20 15:58:39 +0200383
384 if (orig_ret_address != trampoline_address) {
385 /*
386 * This is the real return address. Any other
387 * instances associated with this task are for
388 * other calls deeper on the call stack
389 */
390 break;
391 }
392 }
Martin Schwidefsky89480802010-11-10 10:05:58 +0100393
Michael Grundy4ba069b2006-09-20 15:58:39 +0200394 regs->psw.addr = orig_ret_address | PSW_ADDR_AMODE;
395
Martin Schwidefskyb9599792011-01-05 12:47:20 +0100396 pop_kprobe(get_kprobe_ctlblk());
Srinivasa D Sef53d9c2008-07-25 01:46:04 -0700397 kretprobe_hash_unlock(current, &flags);
Michael Grundy4ba069b2006-09-20 15:58:39 +0200398 preempt_enable_no_resched();
399
bibo,mao99219a32006-10-02 02:17:35 -0700400 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
401 hlist_del(&ri->hlist);
402 kfree(ri);
403 }
Michael Grundy4ba069b2006-09-20 15:58:39 +0200404 /*
405 * By returning a non-zero value, we are telling
406 * kprobe_handler() that we don't want the post_handler
407 * to run (and have re-enabled preemption)
408 */
409 return 1;
410}
411
412/*
413 * Called after single-stepping. p->addr is the address of the
414 * instruction whose first byte has been replaced by the "breakpoint"
415 * instruction. To avoid the SMP problems that can occur when we
416 * temporarily put back the original opcode to single-step, we
417 * single-stepped a copy of the instruction. The address of this
418 * copy is p->ainsn.insn.
419 */
420static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
421{
422 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
Martin Schwidefskyfc0a1fe2011-01-05 12:47:17 +0100423 unsigned long ip = regs->psw.addr & PSW_ADDR_INSN;
Martin Schwidefskyba640a52011-01-05 12:47:19 +0100424 int fixup = get_fixup_type(p->ainsn.insn);
Michael Grundy4ba069b2006-09-20 15:58:39 +0200425
Martin Schwidefskyba640a52011-01-05 12:47:19 +0100426 if (fixup & FIXUP_PSW_NORMAL)
Martin Schwidefskyfc0a1fe2011-01-05 12:47:17 +0100427 ip += (unsigned long) p->addr - (unsigned long) p->ainsn.insn;
Michael Grundy4ba069b2006-09-20 15:58:39 +0200428
Martin Schwidefskyba640a52011-01-05 12:47:19 +0100429 if (fixup & FIXUP_BRANCH_NOT_TAKEN) {
430 int ilen = ((p->ainsn.insn[0] >> 14) + 3) & -2;
431 if (ip - (unsigned long) p->ainsn.insn == ilen)
432 ip = (unsigned long) p->addr + ilen;
433 }
Michael Grundy4ba069b2006-09-20 15:58:39 +0200434
Martin Schwidefskyba640a52011-01-05 12:47:19 +0100435 if (fixup & FIXUP_RETURN_REGISTER) {
436 int reg = (p->ainsn.insn[0] & 0xf0) >> 4;
437 regs->gprs[reg] += (unsigned long) p->addr -
438 (unsigned long) p->ainsn.insn;
439 }
Michael Grundy4ba069b2006-09-20 15:58:39 +0200440
Martin Schwidefskyfc0a1fe2011-01-05 12:47:17 +0100441 disable_singlestep(kcb, regs, ip);
Michael Grundy4ba069b2006-09-20 15:58:39 +0200442}
443
444static int __kprobes post_kprobe_handler(struct pt_regs *regs)
445{
446 struct kprobe *cur = kprobe_running();
447 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
448
449 if (!cur)
450 return 0;
451
452 if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
453 kcb->kprobe_status = KPROBE_HIT_SSDONE;
454 cur->post_handler(cur, regs, 0);
455 }
456
457 resume_execution(cur, regs);
Martin Schwidefskyb9599792011-01-05 12:47:20 +0100458 pop_kprobe(kcb);
Michael Grundy4ba069b2006-09-20 15:58:39 +0200459 preempt_enable_no_resched();
460
461 /*
462 * if somebody else is singlestepping across a probe point, psw mask
463 * will have PER set, in which case, continue the remaining processing
464 * of do_single_step, as if this is not a probe hit.
465 */
466 if (regs->psw.mask & PSW_MASK_PER) {
467 return 0;
468 }
469
470 return 1;
471}
472
Martin Schwidefskyadb45832010-11-10 10:05:57 +0100473static int __kprobes kprobe_trap_handler(struct pt_regs *regs, int trapnr)
Michael Grundy4ba069b2006-09-20 15:58:39 +0200474{
475 struct kprobe *cur = kprobe_running();
476 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
477 const struct exception_table_entry *entry;
478
479 switch(kcb->kprobe_status) {
480 case KPROBE_SWAP_INST:
481 /* We are here because the instruction replacement failed */
482 return 0;
483 case KPROBE_HIT_SS:
484 case KPROBE_REENTER:
485 /*
486 * We are here because the instruction being single
487 * stepped caused a page fault. We reset the current
488 * kprobe and the nip points back to the probe address
489 * and allow the page fault handler to continue as a
490 * normal page fault.
491 */
Martin Schwidefskyfc0a1fe2011-01-05 12:47:17 +0100492 disable_singlestep(kcb, regs, (unsigned long) cur->addr);
Martin Schwidefskyb9599792011-01-05 12:47:20 +0100493 pop_kprobe(kcb);
Michael Grundy4ba069b2006-09-20 15:58:39 +0200494 preempt_enable_no_resched();
495 break;
496 case KPROBE_HIT_ACTIVE:
497 case KPROBE_HIT_SSDONE:
498 /*
499 * We increment the nmissed count for accounting,
500 * we can also use npre/npostfault count for accouting
501 * these specific fault cases.
502 */
503 kprobes_inc_nmissed_count(cur);
504
505 /*
506 * We come here because instructions in the pre/post
507 * handler caused the page_fault, this could happen
508 * if handler tries to access user space by
509 * copy_from_user(), get_user() etc. Let the
510 * user-specified handler try to fix it first.
511 */
512 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
513 return 1;
514
515 /*
516 * In case the user-specified fault handler returned
517 * zero, try to fix up.
518 */
519 entry = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
520 if (entry) {
521 regs->psw.addr = entry->fixup | PSW_ADDR_AMODE;
522 return 1;
523 }
524
525 /*
526 * fixup_exception() could not handle it,
527 * Let do_page_fault() fix it.
528 */
529 break;
530 default:
531 break;
532 }
533 return 0;
534}
535
Martin Schwidefskyadb45832010-11-10 10:05:57 +0100536int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
537{
538 int ret;
539
540 if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT))
541 local_irq_disable();
542 ret = kprobe_trap_handler(regs, trapnr);
543 if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT))
544 local_irq_restore(regs->psw.mask & ~PSW_MASK_PER);
545 return ret;
546}
547
Michael Grundy4ba069b2006-09-20 15:58:39 +0200548/*
549 * Wrapper routine to for handling exceptions.
550 */
551int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
552 unsigned long val, void *data)
553{
554 struct die_args *args = (struct die_args *)data;
Martin Schwidefskyadb45832010-11-10 10:05:57 +0100555 struct pt_regs *regs = args->regs;
Michael Grundy4ba069b2006-09-20 15:58:39 +0200556 int ret = NOTIFY_DONE;
557
Martin Schwidefskyadb45832010-11-10 10:05:57 +0100558 if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT))
559 local_irq_disable();
560
Michael Grundy4ba069b2006-09-20 15:58:39 +0200561 switch (val) {
562 case DIE_BPT:
563 if (kprobe_handler(args->regs))
564 ret = NOTIFY_STOP;
565 break;
566 case DIE_SSTEP:
567 if (post_kprobe_handler(args->regs))
568 ret = NOTIFY_STOP;
569 break;
570 case DIE_TRAP:
Martin Schwidefskyadb45832010-11-10 10:05:57 +0100571 if (!preemptible() && kprobe_running() &&
572 kprobe_trap_handler(args->regs, args->trapnr))
Michael Grundy4ba069b2006-09-20 15:58:39 +0200573 ret = NOTIFY_STOP;
Michael Grundy4ba069b2006-09-20 15:58:39 +0200574 break;
575 default:
576 break;
577 }
Martin Schwidefskyadb45832010-11-10 10:05:57 +0100578
579 if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT))
580 local_irq_restore(regs->psw.mask & ~PSW_MASK_PER);
581
Michael Grundy4ba069b2006-09-20 15:58:39 +0200582 return ret;
583}
584
585int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
586{
587 struct jprobe *jp = container_of(p, struct jprobe, kp);
Michael Grundy4ba069b2006-09-20 15:58:39 +0200588 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
Martin Schwidefsky92b8cbf2011-01-05 12:47:22 +0100589 unsigned long stack;
Michael Grundy4ba069b2006-09-20 15:58:39 +0200590
591 memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs));
592
593 /* setup return addr to the jprobe handler routine */
594 regs->psw.addr = (unsigned long)(jp->entry) | PSW_ADDR_AMODE;
Martin Schwidefskyadb45832010-11-10 10:05:57 +0100595 regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT);
Michael Grundy4ba069b2006-09-20 15:58:39 +0200596
Michael Grundy4ba069b2006-09-20 15:58:39 +0200597 /* r15 is the stack pointer */
Martin Schwidefsky92b8cbf2011-01-05 12:47:22 +0100598 stack = (unsigned long) regs->gprs[15];
Michael Grundy4ba069b2006-09-20 15:58:39 +0200599
Martin Schwidefsky92b8cbf2011-01-05 12:47:22 +0100600 memcpy(kcb->jprobes_stack, (void *) stack, MIN_STACK_SIZE(stack));
Michael Grundy4ba069b2006-09-20 15:58:39 +0200601 return 1;
602}
603
604void __kprobes jprobe_return(void)
605{
606 asm volatile(".word 0x0002");
607}
608
609void __kprobes jprobe_return_end(void)
610{
611 asm volatile("bcr 0,0");
612}
613
614int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
615{
616 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
Martin Schwidefsky92b8cbf2011-01-05 12:47:22 +0100617 unsigned long stack;
618
619 stack = (unsigned long) kcb->jprobe_saved_regs.gprs[15];
Michael Grundy4ba069b2006-09-20 15:58:39 +0200620
621 /* Put the regs back */
622 memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs));
623 /* put the stack back */
Martin Schwidefsky92b8cbf2011-01-05 12:47:22 +0100624 memcpy((void *) stack, kcb->jprobes_stack, MIN_STACK_SIZE(stack));
Michael Grundy4ba069b2006-09-20 15:58:39 +0200625 preempt_enable_no_resched();
626 return 1;
627}
628
629static struct kprobe trampoline_p = {
630 .addr = (kprobe_opcode_t *) & kretprobe_trampoline,
631 .pre_handler = trampoline_probe_handler
632};
633
634int __init arch_init_kprobes(void)
635{
636 return register_kprobe(&trampoline_p);
637}
Ananth N Mavinakayanahallibf8f6e52007-05-08 00:34:16 -0700638
639int __kprobes arch_trampoline_kprobe(struct kprobe *p)
640{
641 if (p->addr == (kprobe_opcode_t *) & kretprobe_trampoline)
642 return 1;
643 return 0;
644}