Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 1 | /* |
| 2 | * Kernel Probes (KProbes) |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License as published by |
| 6 | * the Free Software Foundation; either version 2 of the License, or |
| 7 | * (at your option) any later version. |
| 8 | * |
| 9 | * This program is distributed in the hope that it will be useful, |
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 12 | * GNU General Public License for more details. |
| 13 | * |
| 14 | * You should have received a copy of the GNU General Public License |
| 15 | * along with this program; if not, write to the Free Software |
| 16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
| 17 | * |
| 18 | * Copyright (C) IBM Corporation, 2002, 2006 |
| 19 | * |
| 20 | * s390 port, used ppc64 as template. Mike Grundy <grundym@us.ibm.com> |
| 21 | */ |
| 22 | |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 23 | #include <linux/kprobes.h> |
| 24 | #include <linux/ptrace.h> |
| 25 | #include <linux/preempt.h> |
| 26 | #include <linux/stop_machine.h> |
| 27 | #include <asm/cacheflush.h> |
| 28 | #include <asm/kdebug.h> |
| 29 | #include <asm/sections.h> |
| 30 | #include <asm/uaccess.h> |
| 31 | #include <linux/module.h> |
| 32 | |
| 33 | DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; |
| 34 | DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); |
| 35 | |
| 36 | int __kprobes arch_prepare_kprobe(struct kprobe *p) |
| 37 | { |
| 38 | /* Make sure the probe isn't going on a difficult instruction */ |
| 39 | if (is_prohibited_opcode((kprobe_opcode_t *) p->addr)) |
| 40 | return -EINVAL; |
| 41 | |
| 42 | if ((unsigned long)p->addr & 0x01) { |
| 43 | printk("Attempt to register kprobe at an unaligned address\n"); |
| 44 | return -EINVAL; |
| 45 | } |
| 46 | |
| 47 | /* Use the get_insn_slot() facility for correctness */ |
| 48 | if (!(p->ainsn.insn = get_insn_slot())) |
| 49 | return -ENOMEM; |
| 50 | |
| 51 | memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); |
| 52 | |
| 53 | get_instruction_type(&p->ainsn); |
| 54 | p->opcode = *p->addr; |
| 55 | return 0; |
| 56 | } |
| 57 | |
| 58 | int __kprobes is_prohibited_opcode(kprobe_opcode_t *instruction) |
| 59 | { |
| 60 | switch (*(__u8 *) instruction) { |
| 61 | case 0x0c: /* bassm */ |
| 62 | case 0x0b: /* bsm */ |
| 63 | case 0x83: /* diag */ |
| 64 | case 0x44: /* ex */ |
| 65 | return -EINVAL; |
| 66 | } |
| 67 | switch (*(__u16 *) instruction) { |
| 68 | case 0x0101: /* pr */ |
| 69 | case 0xb25a: /* bsa */ |
| 70 | case 0xb240: /* bakr */ |
| 71 | case 0xb258: /* bsg */ |
| 72 | case 0xb218: /* pc */ |
| 73 | case 0xb228: /* pt */ |
| 74 | return -EINVAL; |
| 75 | } |
| 76 | return 0; |
| 77 | } |
| 78 | |
| 79 | void __kprobes get_instruction_type(struct arch_specific_insn *ainsn) |
| 80 | { |
| 81 | /* default fixup method */ |
| 82 | ainsn->fixup = FIXUP_PSW_NORMAL; |
| 83 | |
| 84 | /* save r1 operand */ |
| 85 | ainsn->reg = (*ainsn->insn & 0xf0) >> 4; |
| 86 | |
| 87 | /* save the instruction length (pop 5-5) in bytes */ |
| 88 | switch (*(__u8 *) (ainsn->insn) >> 4) { |
| 89 | case 0: |
| 90 | ainsn->ilen = 2; |
| 91 | break; |
| 92 | case 1: |
| 93 | case 2: |
| 94 | ainsn->ilen = 4; |
| 95 | break; |
| 96 | case 3: |
| 97 | ainsn->ilen = 6; |
| 98 | break; |
| 99 | } |
| 100 | |
| 101 | switch (*(__u8 *) ainsn->insn) { |
| 102 | case 0x05: /* balr */ |
| 103 | case 0x0d: /* basr */ |
| 104 | ainsn->fixup = FIXUP_RETURN_REGISTER; |
| 105 | /* if r2 = 0, no branch will be taken */ |
| 106 | if ((*ainsn->insn & 0x0f) == 0) |
| 107 | ainsn->fixup |= FIXUP_BRANCH_NOT_TAKEN; |
| 108 | break; |
| 109 | case 0x06: /* bctr */ |
| 110 | case 0x07: /* bcr */ |
| 111 | ainsn->fixup = FIXUP_BRANCH_NOT_TAKEN; |
| 112 | break; |
| 113 | case 0x45: /* bal */ |
| 114 | case 0x4d: /* bas */ |
| 115 | ainsn->fixup = FIXUP_RETURN_REGISTER; |
| 116 | break; |
| 117 | case 0x47: /* bc */ |
| 118 | case 0x46: /* bct */ |
| 119 | case 0x86: /* bxh */ |
| 120 | case 0x87: /* bxle */ |
| 121 | ainsn->fixup = FIXUP_BRANCH_NOT_TAKEN; |
| 122 | break; |
| 123 | case 0x82: /* lpsw */ |
| 124 | ainsn->fixup = FIXUP_NOT_REQUIRED; |
| 125 | break; |
| 126 | case 0xb2: /* lpswe */ |
| 127 | if (*(((__u8 *) ainsn->insn) + 1) == 0xb2) { |
| 128 | ainsn->fixup = FIXUP_NOT_REQUIRED; |
| 129 | } |
| 130 | break; |
| 131 | case 0xa7: /* bras */ |
| 132 | if ((*ainsn->insn & 0x0f) == 0x05) { |
| 133 | ainsn->fixup |= FIXUP_RETURN_REGISTER; |
| 134 | } |
| 135 | break; |
| 136 | case 0xc0: |
| 137 | if ((*ainsn->insn & 0x0f) == 0x00 /* larl */ |
| 138 | || (*ainsn->insn & 0x0f) == 0x05) /* brasl */ |
| 139 | ainsn->fixup |= FIXUP_RETURN_REGISTER; |
| 140 | break; |
| 141 | case 0xeb: |
| 142 | if (*(((__u8 *) ainsn->insn) + 5 ) == 0x44 || /* bxhg */ |
| 143 | *(((__u8 *) ainsn->insn) + 5) == 0x45) {/* bxleg */ |
| 144 | ainsn->fixup = FIXUP_BRANCH_NOT_TAKEN; |
| 145 | } |
| 146 | break; |
| 147 | case 0xe3: /* bctg */ |
| 148 | if (*(((__u8 *) ainsn->insn) + 5) == 0x46) { |
| 149 | ainsn->fixup = FIXUP_BRANCH_NOT_TAKEN; |
| 150 | } |
| 151 | break; |
| 152 | } |
| 153 | } |
| 154 | |
| 155 | static int __kprobes swap_instruction(void *aref) |
| 156 | { |
| 157 | struct ins_replace_args *args = aref; |
Heiko Carstens | 162e006 | 2007-02-05 21:18:41 +0100 | [diff] [blame^] | 158 | u32 *addr; |
| 159 | u32 instr; |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 160 | int err = -EFAULT; |
| 161 | |
Heiko Carstens | 162e006 | 2007-02-05 21:18:41 +0100 | [diff] [blame^] | 162 | /* |
| 163 | * Text segment is read-only, hence we use stura to bypass dynamic |
| 164 | * address translation to exchange the instruction. Since stura |
| 165 | * always operates on four bytes, but we only want to exchange two |
| 166 | * bytes do some calculations to get things right. In addition we |
| 167 | * shall not cross any page boundaries (vmalloc area!) when writing |
| 168 | * the new instruction. |
| 169 | */ |
| 170 | addr = (u32 *)ALIGN((unsigned long)args->ptr, 4); |
| 171 | if ((unsigned long)args->ptr & 2) |
| 172 | instr = ((*addr) & 0xffff0000) | args->new; |
| 173 | else |
| 174 | instr = ((*addr) & 0x0000ffff) | args->new << 16; |
| 175 | |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 176 | asm volatile( |
Heiko Carstens | 162e006 | 2007-02-05 21:18:41 +0100 | [diff] [blame^] | 177 | " lra %1,0(%1)\n" |
| 178 | "0: stura %2,%1\n" |
| 179 | "1: la %0,0\n" |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 180 | "2:\n" |
| 181 | EX_TABLE(0b,2b) |
Heiko Carstens | 162e006 | 2007-02-05 21:18:41 +0100 | [diff] [blame^] | 182 | : "+d" (err) |
| 183 | : "a" (addr), "d" (instr) |
| 184 | : "memory", "cc"); |
| 185 | |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 186 | return err; |
| 187 | } |
| 188 | |
| 189 | void __kprobes arch_arm_kprobe(struct kprobe *p) |
| 190 | { |
| 191 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
| 192 | unsigned long status = kcb->kprobe_status; |
| 193 | struct ins_replace_args args; |
| 194 | |
| 195 | args.ptr = p->addr; |
| 196 | args.old = p->opcode; |
| 197 | args.new = BREAKPOINT_INSTRUCTION; |
| 198 | |
| 199 | kcb->kprobe_status = KPROBE_SWAP_INST; |
| 200 | stop_machine_run(swap_instruction, &args, NR_CPUS); |
| 201 | kcb->kprobe_status = status; |
| 202 | } |
| 203 | |
| 204 | void __kprobes arch_disarm_kprobe(struct kprobe *p) |
| 205 | { |
| 206 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
| 207 | unsigned long status = kcb->kprobe_status; |
| 208 | struct ins_replace_args args; |
| 209 | |
| 210 | args.ptr = p->addr; |
| 211 | args.old = BREAKPOINT_INSTRUCTION; |
| 212 | args.new = p->opcode; |
| 213 | |
| 214 | kcb->kprobe_status = KPROBE_SWAP_INST; |
| 215 | stop_machine_run(swap_instruction, &args, NR_CPUS); |
| 216 | kcb->kprobe_status = status; |
| 217 | } |
| 218 | |
| 219 | void __kprobes arch_remove_kprobe(struct kprobe *p) |
| 220 | { |
| 221 | mutex_lock(&kprobe_mutex); |
Masami Hiramatsu | b4c6c34 | 2006-12-06 20:38:11 -0800 | [diff] [blame] | 222 | free_insn_slot(p->ainsn.insn, 0); |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 223 | mutex_unlock(&kprobe_mutex); |
| 224 | } |
| 225 | |
| 226 | static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs) |
| 227 | { |
| 228 | per_cr_bits kprobe_per_regs[1]; |
| 229 | |
| 230 | memset(kprobe_per_regs, 0, sizeof(per_cr_bits)); |
| 231 | regs->psw.addr = (unsigned long)p->ainsn.insn | PSW_ADDR_AMODE; |
| 232 | |
| 233 | /* Set up the per control reg info, will pass to lctl */ |
| 234 | kprobe_per_regs[0].em_instruction_fetch = 1; |
| 235 | kprobe_per_regs[0].starting_addr = (unsigned long)p->ainsn.insn; |
| 236 | kprobe_per_regs[0].ending_addr = (unsigned long)p->ainsn.insn + 1; |
| 237 | |
| 238 | /* Set the PER control regs, turns on single step for this address */ |
| 239 | __ctl_load(kprobe_per_regs, 9, 11); |
| 240 | regs->psw.mask |= PSW_MASK_PER; |
| 241 | regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK); |
| 242 | } |
| 243 | |
| 244 | static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) |
| 245 | { |
| 246 | kcb->prev_kprobe.kp = kprobe_running(); |
| 247 | kcb->prev_kprobe.status = kcb->kprobe_status; |
| 248 | kcb->prev_kprobe.kprobe_saved_imask = kcb->kprobe_saved_imask; |
| 249 | memcpy(kcb->prev_kprobe.kprobe_saved_ctl, kcb->kprobe_saved_ctl, |
| 250 | sizeof(kcb->kprobe_saved_ctl)); |
| 251 | } |
| 252 | |
| 253 | static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) |
| 254 | { |
| 255 | __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp; |
| 256 | kcb->kprobe_status = kcb->prev_kprobe.status; |
| 257 | kcb->kprobe_saved_imask = kcb->prev_kprobe.kprobe_saved_imask; |
| 258 | memcpy(kcb->kprobe_saved_ctl, kcb->prev_kprobe.kprobe_saved_ctl, |
| 259 | sizeof(kcb->kprobe_saved_ctl)); |
| 260 | } |
| 261 | |
| 262 | static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, |
| 263 | struct kprobe_ctlblk *kcb) |
| 264 | { |
| 265 | __get_cpu_var(current_kprobe) = p; |
| 266 | /* Save the interrupt and per flags */ |
| 267 | kcb->kprobe_saved_imask = regs->psw.mask & |
| 268 | (PSW_MASK_PER | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK); |
| 269 | /* Save the control regs that govern PER */ |
| 270 | __ctl_store(kcb->kprobe_saved_ctl, 9, 11); |
| 271 | } |
| 272 | |
| 273 | /* Called with kretprobe_lock held */ |
| 274 | void __kprobes arch_prepare_kretprobe(struct kretprobe *rp, |
| 275 | struct pt_regs *regs) |
| 276 | { |
| 277 | struct kretprobe_instance *ri; |
| 278 | |
| 279 | if ((ri = get_free_rp_inst(rp)) != NULL) { |
| 280 | ri->rp = rp; |
| 281 | ri->task = current; |
| 282 | ri->ret_addr = (kprobe_opcode_t *) regs->gprs[14]; |
| 283 | |
| 284 | /* Replace the return addr with trampoline addr */ |
| 285 | regs->gprs[14] = (unsigned long)&kretprobe_trampoline; |
| 286 | |
| 287 | add_rp_inst(ri); |
| 288 | } else { |
| 289 | rp->nmissed++; |
| 290 | } |
| 291 | } |
| 292 | |
| 293 | static int __kprobes kprobe_handler(struct pt_regs *regs) |
| 294 | { |
| 295 | struct kprobe *p; |
| 296 | int ret = 0; |
| 297 | unsigned long *addr = (unsigned long *) |
| 298 | ((regs->psw.addr & PSW_ADDR_INSN) - 2); |
| 299 | struct kprobe_ctlblk *kcb; |
| 300 | |
| 301 | /* |
| 302 | * We don't want to be preempted for the entire |
| 303 | * duration of kprobe processing |
| 304 | */ |
| 305 | preempt_disable(); |
| 306 | kcb = get_kprobe_ctlblk(); |
| 307 | |
| 308 | /* Check we're not actually recursing */ |
| 309 | if (kprobe_running()) { |
| 310 | p = get_kprobe(addr); |
| 311 | if (p) { |
| 312 | if (kcb->kprobe_status == KPROBE_HIT_SS && |
| 313 | *p->ainsn.insn == BREAKPOINT_INSTRUCTION) { |
| 314 | regs->psw.mask &= ~PSW_MASK_PER; |
| 315 | regs->psw.mask |= kcb->kprobe_saved_imask; |
| 316 | goto no_kprobe; |
| 317 | } |
| 318 | /* We have reentered the kprobe_handler(), since |
| 319 | * another probe was hit while within the handler. |
| 320 | * We here save the original kprobes variables and |
| 321 | * just single step on the instruction of the new probe |
| 322 | * without calling any user handlers. |
| 323 | */ |
| 324 | save_previous_kprobe(kcb); |
| 325 | set_current_kprobe(p, regs, kcb); |
| 326 | kprobes_inc_nmissed_count(p); |
| 327 | prepare_singlestep(p, regs); |
| 328 | kcb->kprobe_status = KPROBE_REENTER; |
| 329 | return 1; |
| 330 | } else { |
| 331 | p = __get_cpu_var(current_kprobe); |
| 332 | if (p->break_handler && p->break_handler(p, regs)) { |
| 333 | goto ss_probe; |
| 334 | } |
| 335 | } |
| 336 | goto no_kprobe; |
| 337 | } |
| 338 | |
| 339 | p = get_kprobe(addr); |
| 340 | if (!p) { |
| 341 | if (*addr != BREAKPOINT_INSTRUCTION) { |
| 342 | /* |
| 343 | * The breakpoint instruction was removed right |
| 344 | * after we hit it. Another cpu has removed |
| 345 | * either a probepoint or a debugger breakpoint |
| 346 | * at this address. In either case, no further |
| 347 | * handling of this interrupt is appropriate. |
| 348 | * |
| 349 | */ |
| 350 | ret = 1; |
| 351 | } |
| 352 | /* Not one of ours: let kernel handle it */ |
| 353 | goto no_kprobe; |
| 354 | } |
| 355 | |
| 356 | kcb->kprobe_status = KPROBE_HIT_ACTIVE; |
| 357 | set_current_kprobe(p, regs, kcb); |
| 358 | if (p->pre_handler && p->pre_handler(p, regs)) |
| 359 | /* handler has already set things up, so skip ss setup */ |
| 360 | return 1; |
| 361 | |
| 362 | ss_probe: |
| 363 | prepare_singlestep(p, regs); |
| 364 | kcb->kprobe_status = KPROBE_HIT_SS; |
| 365 | return 1; |
| 366 | |
| 367 | no_kprobe: |
| 368 | preempt_enable_no_resched(); |
| 369 | return ret; |
| 370 | } |
| 371 | |
| 372 | /* |
| 373 | * Function return probe trampoline: |
| 374 | * - init_kprobes() establishes a probepoint here |
| 375 | * - When the probed function returns, this probe |
| 376 | * causes the handlers to fire |
| 377 | */ |
Heiko Carstens | d42335a | 2007-02-05 21:17:32 +0100 | [diff] [blame] | 378 | void kretprobe_trampoline_holder(void) |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 379 | { |
| 380 | asm volatile(".global kretprobe_trampoline\n" |
| 381 | "kretprobe_trampoline: bcr 0,0\n"); |
| 382 | } |
| 383 | |
| 384 | /* |
| 385 | * Called when the probe at kretprobe trampoline is hit |
| 386 | */ |
Heiko Carstens | 2b67fc4 | 2007-02-05 21:16:47 +0100 | [diff] [blame] | 387 | static int __kprobes trampoline_probe_handler(struct kprobe *p, |
| 388 | struct pt_regs *regs) |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 389 | { |
| 390 | struct kretprobe_instance *ri = NULL; |
bibo,mao | 99219a3 | 2006-10-02 02:17:35 -0700 | [diff] [blame] | 391 | struct hlist_head *head, empty_rp; |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 392 | struct hlist_node *node, *tmp; |
| 393 | unsigned long flags, orig_ret_address = 0; |
| 394 | unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline; |
| 395 | |
bibo,mao | 99219a3 | 2006-10-02 02:17:35 -0700 | [diff] [blame] | 396 | INIT_HLIST_HEAD(&empty_rp); |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 397 | spin_lock_irqsave(&kretprobe_lock, flags); |
| 398 | head = kretprobe_inst_table_head(current); |
| 399 | |
| 400 | /* |
| 401 | * It is possible to have multiple instances associated with a given |
| 402 | * task either because an multiple functions in the call path |
| 403 | * have a return probe installed on them, and/or more then one return |
| 404 | * return probe was registered for a target function. |
| 405 | * |
| 406 | * We can handle this because: |
| 407 | * - instances are always inserted at the head of the list |
| 408 | * - when multiple return probes are registered for the same |
| 409 | * function, the first instance's ret_addr will point to the |
| 410 | * real return address, and all the rest will point to |
| 411 | * kretprobe_trampoline |
| 412 | */ |
| 413 | hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { |
| 414 | if (ri->task != current) |
| 415 | /* another task is sharing our hash bucket */ |
| 416 | continue; |
| 417 | |
| 418 | if (ri->rp && ri->rp->handler) |
| 419 | ri->rp->handler(ri, regs); |
| 420 | |
| 421 | orig_ret_address = (unsigned long)ri->ret_addr; |
bibo,mao | 99219a3 | 2006-10-02 02:17:35 -0700 | [diff] [blame] | 422 | recycle_rp_inst(ri, &empty_rp); |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 423 | |
| 424 | if (orig_ret_address != trampoline_address) { |
| 425 | /* |
| 426 | * This is the real return address. Any other |
| 427 | * instances associated with this task are for |
| 428 | * other calls deeper on the call stack |
| 429 | */ |
| 430 | break; |
| 431 | } |
| 432 | } |
| 433 | BUG_ON(!orig_ret_address || (orig_ret_address == trampoline_address)); |
| 434 | regs->psw.addr = orig_ret_address | PSW_ADDR_AMODE; |
| 435 | |
| 436 | reset_current_kprobe(); |
| 437 | spin_unlock_irqrestore(&kretprobe_lock, flags); |
| 438 | preempt_enable_no_resched(); |
| 439 | |
bibo,mao | 99219a3 | 2006-10-02 02:17:35 -0700 | [diff] [blame] | 440 | hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { |
| 441 | hlist_del(&ri->hlist); |
| 442 | kfree(ri); |
| 443 | } |
Michael Grundy | 4ba069b | 2006-09-20 15:58:39 +0200 | [diff] [blame] | 444 | /* |
| 445 | * By returning a non-zero value, we are telling |
| 446 | * kprobe_handler() that we don't want the post_handler |
| 447 | * to run (and have re-enabled preemption) |
| 448 | */ |
| 449 | return 1; |
| 450 | } |
| 451 | |
| 452 | /* |
| 453 | * Called after single-stepping. p->addr is the address of the |
| 454 | * instruction whose first byte has been replaced by the "breakpoint" |
| 455 | * instruction. To avoid the SMP problems that can occur when we |
| 456 | * temporarily put back the original opcode to single-step, we |
| 457 | * single-stepped a copy of the instruction. The address of this |
| 458 | * copy is p->ainsn.insn. |
| 459 | */ |
| 460 | static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs) |
| 461 | { |
| 462 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
| 463 | |
| 464 | regs->psw.addr &= PSW_ADDR_INSN; |
| 465 | |
| 466 | if (p->ainsn.fixup & FIXUP_PSW_NORMAL) |
| 467 | regs->psw.addr = (unsigned long)p->addr + |
| 468 | ((unsigned long)regs->psw.addr - |
| 469 | (unsigned long)p->ainsn.insn); |
| 470 | |
| 471 | if (p->ainsn.fixup & FIXUP_BRANCH_NOT_TAKEN) |
| 472 | if ((unsigned long)regs->psw.addr - |
| 473 | (unsigned long)p->ainsn.insn == p->ainsn.ilen) |
| 474 | regs->psw.addr = (unsigned long)p->addr + p->ainsn.ilen; |
| 475 | |
| 476 | if (p->ainsn.fixup & FIXUP_RETURN_REGISTER) |
| 477 | regs->gprs[p->ainsn.reg] = ((unsigned long)p->addr + |
| 478 | (regs->gprs[p->ainsn.reg] - |
| 479 | (unsigned long)p->ainsn.insn)) |
| 480 | | PSW_ADDR_AMODE; |
| 481 | |
| 482 | regs->psw.addr |= PSW_ADDR_AMODE; |
| 483 | /* turn off PER mode */ |
| 484 | regs->psw.mask &= ~PSW_MASK_PER; |
| 485 | /* Restore the original per control regs */ |
| 486 | __ctl_load(kcb->kprobe_saved_ctl, 9, 11); |
| 487 | regs->psw.mask |= kcb->kprobe_saved_imask; |
| 488 | } |
| 489 | |
| 490 | static int __kprobes post_kprobe_handler(struct pt_regs *regs) |
| 491 | { |
| 492 | struct kprobe *cur = kprobe_running(); |
| 493 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
| 494 | |
| 495 | if (!cur) |
| 496 | return 0; |
| 497 | |
| 498 | if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) { |
| 499 | kcb->kprobe_status = KPROBE_HIT_SSDONE; |
| 500 | cur->post_handler(cur, regs, 0); |
| 501 | } |
| 502 | |
| 503 | resume_execution(cur, regs); |
| 504 | |
| 505 | /*Restore back the original saved kprobes variables and continue. */ |
| 506 | if (kcb->kprobe_status == KPROBE_REENTER) { |
| 507 | restore_previous_kprobe(kcb); |
| 508 | goto out; |
| 509 | } |
| 510 | reset_current_kprobe(); |
| 511 | out: |
| 512 | preempt_enable_no_resched(); |
| 513 | |
| 514 | /* |
| 515 | * if somebody else is singlestepping across a probe point, psw mask |
| 516 | * will have PER set, in which case, continue the remaining processing |
| 517 | * of do_single_step, as if this is not a probe hit. |
| 518 | */ |
| 519 | if (regs->psw.mask & PSW_MASK_PER) { |
| 520 | return 0; |
| 521 | } |
| 522 | |
| 523 | return 1; |
| 524 | } |
| 525 | |
| 526 | static int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) |
| 527 | { |
| 528 | struct kprobe *cur = kprobe_running(); |
| 529 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
| 530 | const struct exception_table_entry *entry; |
| 531 | |
| 532 | switch(kcb->kprobe_status) { |
| 533 | case KPROBE_SWAP_INST: |
| 534 | /* We are here because the instruction replacement failed */ |
| 535 | return 0; |
| 536 | case KPROBE_HIT_SS: |
| 537 | case KPROBE_REENTER: |
| 538 | /* |
| 539 | * We are here because the instruction being single |
| 540 | * stepped caused a page fault. We reset the current |
| 541 | * kprobe and the nip points back to the probe address |
| 542 | * and allow the page fault handler to continue as a |
| 543 | * normal page fault. |
| 544 | */ |
| 545 | regs->psw.addr = (unsigned long)cur->addr | PSW_ADDR_AMODE; |
| 546 | regs->psw.mask &= ~PSW_MASK_PER; |
| 547 | regs->psw.mask |= kcb->kprobe_saved_imask; |
| 548 | if (kcb->kprobe_status == KPROBE_REENTER) |
| 549 | restore_previous_kprobe(kcb); |
| 550 | else |
| 551 | reset_current_kprobe(); |
| 552 | preempt_enable_no_resched(); |
| 553 | break; |
| 554 | case KPROBE_HIT_ACTIVE: |
| 555 | case KPROBE_HIT_SSDONE: |
| 556 | /* |
| 557 | * We increment the nmissed count for accounting, |
| 558 | * we can also use npre/npostfault count for accouting |
| 559 | * these specific fault cases. |
| 560 | */ |
| 561 | kprobes_inc_nmissed_count(cur); |
| 562 | |
| 563 | /* |
| 564 | * We come here because instructions in the pre/post |
| 565 | * handler caused the page_fault, this could happen |
| 566 | * if handler tries to access user space by |
| 567 | * copy_from_user(), get_user() etc. Let the |
| 568 | * user-specified handler try to fix it first. |
| 569 | */ |
| 570 | if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) |
| 571 | return 1; |
| 572 | |
| 573 | /* |
| 574 | * In case the user-specified fault handler returned |
| 575 | * zero, try to fix up. |
| 576 | */ |
| 577 | entry = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN); |
| 578 | if (entry) { |
| 579 | regs->psw.addr = entry->fixup | PSW_ADDR_AMODE; |
| 580 | return 1; |
| 581 | } |
| 582 | |
| 583 | /* |
| 584 | * fixup_exception() could not handle it, |
| 585 | * Let do_page_fault() fix it. |
| 586 | */ |
| 587 | break; |
| 588 | default: |
| 589 | break; |
| 590 | } |
| 591 | return 0; |
| 592 | } |
| 593 | |
| 594 | /* |
| 595 | * Wrapper routine to for handling exceptions. |
| 596 | */ |
| 597 | int __kprobes kprobe_exceptions_notify(struct notifier_block *self, |
| 598 | unsigned long val, void *data) |
| 599 | { |
| 600 | struct die_args *args = (struct die_args *)data; |
| 601 | int ret = NOTIFY_DONE; |
| 602 | |
| 603 | switch (val) { |
| 604 | case DIE_BPT: |
| 605 | if (kprobe_handler(args->regs)) |
| 606 | ret = NOTIFY_STOP; |
| 607 | break; |
| 608 | case DIE_SSTEP: |
| 609 | if (post_kprobe_handler(args->regs)) |
| 610 | ret = NOTIFY_STOP; |
| 611 | break; |
| 612 | case DIE_TRAP: |
| 613 | case DIE_PAGE_FAULT: |
| 614 | /* kprobe_running() needs smp_processor_id() */ |
| 615 | preempt_disable(); |
| 616 | if (kprobe_running() && |
| 617 | kprobe_fault_handler(args->regs, args->trapnr)) |
| 618 | ret = NOTIFY_STOP; |
| 619 | preempt_enable(); |
| 620 | break; |
| 621 | default: |
| 622 | break; |
| 623 | } |
| 624 | return ret; |
| 625 | } |
| 626 | |
| 627 | int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) |
| 628 | { |
| 629 | struct jprobe *jp = container_of(p, struct jprobe, kp); |
| 630 | unsigned long addr; |
| 631 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
| 632 | |
| 633 | memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs)); |
| 634 | |
| 635 | /* setup return addr to the jprobe handler routine */ |
| 636 | regs->psw.addr = (unsigned long)(jp->entry) | PSW_ADDR_AMODE; |
| 637 | |
| 638 | /* r14 is the function return address */ |
| 639 | kcb->jprobe_saved_r14 = (unsigned long)regs->gprs[14]; |
| 640 | /* r15 is the stack pointer */ |
| 641 | kcb->jprobe_saved_r15 = (unsigned long)regs->gprs[15]; |
| 642 | addr = (unsigned long)kcb->jprobe_saved_r15; |
| 643 | |
| 644 | memcpy(kcb->jprobes_stack, (kprobe_opcode_t *) addr, |
| 645 | MIN_STACK_SIZE(addr)); |
| 646 | return 1; |
| 647 | } |
| 648 | |
| 649 | void __kprobes jprobe_return(void) |
| 650 | { |
| 651 | asm volatile(".word 0x0002"); |
| 652 | } |
| 653 | |
| 654 | void __kprobes jprobe_return_end(void) |
| 655 | { |
| 656 | asm volatile("bcr 0,0"); |
| 657 | } |
| 658 | |
| 659 | int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) |
| 660 | { |
| 661 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
| 662 | unsigned long stack_addr = (unsigned long)(kcb->jprobe_saved_r15); |
| 663 | |
| 664 | /* Put the regs back */ |
| 665 | memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs)); |
| 666 | /* put the stack back */ |
| 667 | memcpy((kprobe_opcode_t *) stack_addr, kcb->jprobes_stack, |
| 668 | MIN_STACK_SIZE(stack_addr)); |
| 669 | preempt_enable_no_resched(); |
| 670 | return 1; |
| 671 | } |
| 672 | |
| 673 | static struct kprobe trampoline_p = { |
| 674 | .addr = (kprobe_opcode_t *) & kretprobe_trampoline, |
| 675 | .pre_handler = trampoline_probe_handler |
| 676 | }; |
| 677 | |
| 678 | int __init arch_init_kprobes(void) |
| 679 | { |
| 680 | return register_kprobe(&trampoline_p); |
| 681 | } |