Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Kernel Probes (KProbes) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License as published by |
| 6 | * the Free Software Foundation; either version 2 of the License, or |
| 7 | * (at your option) any later version. |
| 8 | * |
| 9 | * This program is distributed in the hope that it will be useful, |
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 12 | * GNU General Public License for more details. |
| 13 | * |
| 14 | * You should have received a copy of the GNU General Public License |
| 15 | * along with this program; if not, write to the Free Software |
| 16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
| 17 | * |
| 18 | * Copyright (C) IBM Corporation, 2002, 2004 |
| 19 | * |
| 20 | * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel |
| 21 | * Probes initial implementation ( includes contributions from |
| 22 | * Rusty Russell). |
| 23 | * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes |
| 24 | * interface to access function arguments. |
| 25 | * 2004-Oct Jim Keniston <kenistoj@us.ibm.com> and Prasanna S Panchamukhi |
| 26 | * <prasanna@in.ibm.com> adapted for x86_64 |
| 27 | * 2005-Mar Roland McGrath <roland@redhat.com> |
| 28 | * Fixed to handle %rip-relative addressing mode correctly. |
Rusty Lynch | 73649da | 2005-06-23 00:09:23 -0700 | [diff] [blame] | 29 | * 2005-May Rusty Lynch <rusty.lynch@intel.com> |
| 30 | * Added function return probes functionality |
Masami Hiramatsu | da07ab0 | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 31 | * 2007-Dec Masami Hiramatsu <mhiramat@redhat.com> added kprobe-booster |
| 32 | * and kretprobe-booster for x86-64 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 | */ |
| 34 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 | #include <linux/kprobes.h> |
| 36 | #include <linux/ptrace.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 | #include <linux/string.h> |
| 38 | #include <linux/slab.h> |
| 39 | #include <linux/preempt.h> |
Prasanna S Panchamukhi | c28f896 | 2006-03-26 01:38:23 -0800 | [diff] [blame] | 40 | #include <linux/module.h> |
Christoph Hellwig | 1eeb66a | 2007-05-08 00:27:03 -0700 | [diff] [blame] | 41 | #include <linux/kdebug.h> |
Ananth N Mavinakayanahalli | 9ec4b1f | 2005-06-27 15:17:01 -0700 | [diff] [blame] | 42 | |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame^] | 43 | #include <asm/cacheflush.h> |
| 44 | #include <asm/desc.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 45 | #include <asm/pgtable.h> |
Prasanna S Panchamukhi | c28f896 | 2006-03-26 01:38:23 -0800 | [diff] [blame] | 46 | #include <asm/uaccess.h> |
Andi Kleen | 19d36cc | 2007-07-22 11:12:31 +0200 | [diff] [blame] | 47 | #include <asm/alternative.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 49 | void jprobe_return_end(void); |
| 50 | |
Ananth N Mavinakayanahalli | e7a510f | 2005-11-07 01:00:12 -0800 | [diff] [blame] | 51 | DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; |
| 52 | DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 53 | |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame^] | 54 | #define stack_addr(regs) ((unsigned long *)regs->sp) |
| 55 | |
| 56 | #define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\ |
| 57 | (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \ |
| 58 | (b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) | \ |
| 59 | (b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) | \ |
| 60 | (bc##UL << 0xc)|(bd##UL << 0xd)|(be##UL << 0xe)|(bf##UL << 0xf)) \ |
| 61 | << (row % 32)) |
| 62 | /* |
| 63 | * Undefined/reserved opcodes, conditional jump, Opcode Extension |
| 64 | * Groups, and some special opcodes can not boost. |
| 65 | */ |
| 66 | static const u32 twobyte_is_boostable[256 / 32] = { |
| 67 | /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ |
| 68 | /* ---------------------------------------------- */ |
| 69 | W(0x00, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0) | /* 00 */ |
| 70 | W(0x10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 10 */ |
| 71 | W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 20 */ |
| 72 | W(0x30, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 30 */ |
| 73 | W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */ |
| 74 | W(0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 50 */ |
| 75 | W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1) | /* 60 */ |
| 76 | W(0x70, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1) , /* 70 */ |
| 77 | W(0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 80 */ |
| 78 | W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */ |
| 79 | W(0xa0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) | /* a0 */ |
| 80 | W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1) , /* b0 */ |
| 81 | W(0xc0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* c0 */ |
| 82 | W(0xd0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) , /* d0 */ |
| 83 | W(0xe0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) | /* e0 */ |
| 84 | W(0xf0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0) /* f0 */ |
| 85 | /* ----------------------------------------------- */ |
| 86 | /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ |
| 87 | }; |
| 88 | static const u32 onebyte_has_modrm[256 / 32] = { |
| 89 | /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ |
| 90 | /* ----------------------------------------------- */ |
| 91 | W(0x00, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* 00 */ |
| 92 | W(0x10, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) , /* 10 */ |
| 93 | W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* 20 */ |
| 94 | W(0x30, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) , /* 30 */ |
| 95 | W(0x40, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 40 */ |
| 96 | W(0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 50 */ |
| 97 | W(0x60, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0) | /* 60 */ |
| 98 | W(0x70, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 70 */ |
| 99 | W(0x80, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */ |
| 100 | W(0x90, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 90 */ |
| 101 | W(0xa0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* a0 */ |
| 102 | W(0xb0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* b0 */ |
| 103 | W(0xc0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0) | /* c0 */ |
| 104 | W(0xd0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */ |
| 105 | W(0xe0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* e0 */ |
| 106 | W(0xf0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1) /* f0 */ |
| 107 | /* ----------------------------------------------- */ |
| 108 | /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ |
| 109 | }; |
| 110 | static const u32 twobyte_has_modrm[256 / 32] = { |
| 111 | /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ |
| 112 | /* ----------------------------------------------- */ |
| 113 | W(0x00, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1) | /* 0f */ |
| 114 | W(0x10, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0) , /* 1f */ |
| 115 | W(0x20, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* 2f */ |
| 116 | W(0x30, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 3f */ |
| 117 | W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 4f */ |
| 118 | W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 5f */ |
| 119 | W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 6f */ |
| 120 | W(0x70, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1) , /* 7f */ |
| 121 | W(0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 8f */ |
| 122 | W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 9f */ |
| 123 | W(0xa0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1) | /* af */ |
| 124 | W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1) , /* bf */ |
| 125 | W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0) | /* cf */ |
| 126 | W(0xd0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* df */ |
| 127 | W(0xe0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* ef */ |
| 128 | W(0xf0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0) /* ff */ |
| 129 | /* ----------------------------------------------- */ |
| 130 | /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ |
| 131 | }; |
| 132 | #undef W |
| 133 | |
Masami Hiramatsu | f438d91 | 2007-10-16 01:27:49 -0700 | [diff] [blame] | 134 | struct kretprobe_blackpoint kretprobe_blacklist[] = { |
| 135 | {"__switch_to", }, /* This function switches only current task, but |
| 136 | doesn't switch kernel stack.*/ |
| 137 | {NULL, NULL} /* Terminator */ |
| 138 | }; |
| 139 | const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist); |
| 140 | |
Masami Hiramatsu | aa47014 | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 141 | /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/ |
| 142 | static __always_inline void set_jmp_op(void *from, void *to) |
| 143 | { |
| 144 | struct __arch_jmp_op { |
| 145 | char op; |
| 146 | s32 raddr; |
| 147 | } __attribute__((packed)) * jop; |
| 148 | jop = (struct __arch_jmp_op *)from; |
| 149 | jop->raddr = (s32)((long)(to) - ((long)(from) + 5)); |
| 150 | jop->op = RELATIVEJUMP_INSTRUCTION; |
| 151 | } |
| 152 | |
| 153 | /* |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame^] | 154 | * returns non-zero if opcode is boostable. |
Masami Hiramatsu | aa47014 | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 155 | * RIP relative instructions are adjusted at copying time |
| 156 | */ |
| 157 | static __always_inline int can_boost(kprobe_opcode_t *opcodes) |
| 158 | { |
Masami Hiramatsu | aa47014 | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 159 | kprobe_opcode_t opcode; |
| 160 | kprobe_opcode_t *orig_opcodes = opcodes; |
| 161 | |
| 162 | retry: |
| 163 | if (opcodes - orig_opcodes > MAX_INSN_SIZE - 1) |
| 164 | return 0; |
| 165 | opcode = *(opcodes++); |
| 166 | |
| 167 | /* 2nd-byte opcode */ |
| 168 | if (opcode == 0x0f) { |
| 169 | if (opcodes - orig_opcodes > MAX_INSN_SIZE - 1) |
| 170 | return 0; |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame^] | 171 | return test_bit(*opcodes, |
| 172 | (unsigned long *)twobyte_is_boostable); |
Masami Hiramatsu | aa47014 | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 173 | } |
| 174 | |
| 175 | switch (opcode & 0xf0) { |
| 176 | case 0x40: |
| 177 | goto retry; /* REX prefix is boostable */ |
| 178 | case 0x60: |
| 179 | if (0x63 < opcode && opcode < 0x67) |
| 180 | goto retry; /* prefixes */ |
| 181 | /* can't boost Address-size override and bound */ |
| 182 | return (opcode != 0x62 && opcode != 0x67); |
| 183 | case 0x70: |
| 184 | return 0; /* can't boost conditional jump */ |
| 185 | case 0xc0: |
| 186 | /* can't boost software-interruptions */ |
| 187 | return (0xc1 < opcode && opcode < 0xcc) || opcode == 0xcf; |
| 188 | case 0xd0: |
| 189 | /* can boost AA* and XLAT */ |
| 190 | return (opcode == 0xd4 || opcode == 0xd5 || opcode == 0xd7); |
| 191 | case 0xe0: |
| 192 | /* can boost in/out and absolute jmps */ |
| 193 | return ((opcode & 0x04) || opcode == 0xea); |
| 194 | case 0xf0: |
| 195 | if ((opcode & 0x0c) == 0 && opcode != 0xf1) |
| 196 | goto retry; /* lock/rep(ne) prefix */ |
| 197 | /* clear and set flags are boostable */ |
| 198 | return (opcode == 0xf5 || (0xf7 < opcode && opcode < 0xfe)); |
| 199 | default: |
| 200 | /* segment override prefixes are boostable */ |
| 201 | if (opcode == 0x26 || opcode == 0x36 || opcode == 0x3e) |
| 202 | goto retry; /* prefixes */ |
| 203 | /* CS override prefix and call are not boostable */ |
| 204 | return (opcode != 0x2e && opcode != 0x9a); |
| 205 | } |
| 206 | } |
| 207 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 208 | /* |
| 209 | * returns non-zero if opcode modifies the interrupt flag. |
| 210 | */ |
Andrew Morton | 8645419 | 2007-11-26 20:42:19 +0100 | [diff] [blame] | 211 | static int __kprobes is_IF_modifier(kprobe_opcode_t *insn) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 212 | { |
| 213 | switch (*insn) { |
| 214 | case 0xfa: /* cli */ |
| 215 | case 0xfb: /* sti */ |
| 216 | case 0xcf: /* iret/iretd */ |
| 217 | case 0x9d: /* popf/popfd */ |
| 218 | return 1; |
| 219 | } |
| 220 | |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame^] | 221 | /* |
| 222 | * on 64 bit x86, 0x40-0x4f are prefixes so we need to look |
| 223 | * at the next byte instead.. but of course not recurse infinitely |
| 224 | */ |
| 225 | if (*insn >= 0x40 && *insn <= 0x4f) |
| 226 | return is_IF_modifier(++insn); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 227 | return 0; |
| 228 | } |
| 229 | |
| 230 | /* |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame^] | 231 | * Adjust the displacement if the instruction uses the %rip-relative |
| 232 | * addressing mode. |
Masami Hiramatsu | aa47014 | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 233 | * If it does, Return the address of the 32-bit displacement word. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 234 | * If not, return null. |
| 235 | */ |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame^] | 236 | static void __kprobes fix_riprel(struct kprobe *p) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 237 | { |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame^] | 238 | u8 *insn = p->ainsn.insn; |
| 239 | s64 disp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 240 | int need_modrm; |
| 241 | |
| 242 | /* Skip legacy instruction prefixes. */ |
| 243 | while (1) { |
| 244 | switch (*insn) { |
| 245 | case 0x66: |
| 246 | case 0x67: |
| 247 | case 0x2e: |
| 248 | case 0x3e: |
| 249 | case 0x26: |
| 250 | case 0x64: |
| 251 | case 0x65: |
| 252 | case 0x36: |
| 253 | case 0xf0: |
| 254 | case 0xf3: |
| 255 | case 0xf2: |
| 256 | ++insn; |
| 257 | continue; |
| 258 | } |
| 259 | break; |
| 260 | } |
| 261 | |
| 262 | /* Skip REX instruction prefix. */ |
| 263 | if ((*insn & 0xf0) == 0x40) |
| 264 | ++insn; |
| 265 | |
| 266 | if (*insn == 0x0f) { /* Two-byte opcode. */ |
| 267 | ++insn; |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame^] | 268 | need_modrm = test_bit(*insn, |
| 269 | (unsigned long *)twobyte_has_modrm); |
| 270 | } else /* One-byte opcode. */ |
| 271 | need_modrm = test_bit(*insn, |
| 272 | (unsigned long *)onebyte_has_modrm); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 273 | |
| 274 | if (need_modrm) { |
| 275 | u8 modrm = *++insn; |
| 276 | if ((modrm & 0xc7) == 0x05) { /* %rip+disp32 addressing mode */ |
| 277 | /* Displacement follows ModRM byte. */ |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame^] | 278 | ++insn; |
| 279 | /* |
| 280 | * The copied instruction uses the %rip-relative |
| 281 | * addressing mode. Adjust the displacement for the |
| 282 | * difference between the original location of this |
| 283 | * instruction and the location of the copy that will |
| 284 | * actually be run. The tricky bit here is making sure |
| 285 | * that the sign extension happens correctly in this |
| 286 | * calculation, since we need a signed 32-bit result to |
| 287 | * be sign-extended to 64 bits when it's added to the |
| 288 | * %rip value and yield the same 64-bit result that the |
| 289 | * sign-extension of the original signed 32-bit |
| 290 | * displacement would have given. |
| 291 | */ |
| 292 | disp = (u8 *) p->addr + *((s32 *) insn) - |
| 293 | (u8 *) p->ainsn.insn; |
| 294 | BUG_ON((s64) (s32) disp != disp); /* Sanity check. */ |
| 295 | *(s32 *)insn = (s32) disp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 296 | } |
| 297 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 298 | } |
| 299 | |
Keshavamurthy Anil S | f709b12 | 2006-01-09 20:52:44 -0800 | [diff] [blame] | 300 | static void __kprobes arch_copy_kprobe(struct kprobe *p) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 301 | { |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame^] | 302 | memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); |
| 303 | fix_riprel(p); |
| 304 | if (can_boost(p->addr)) |
Masami Hiramatsu | aa47014 | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 305 | p->ainsn.boostable = 0; |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame^] | 306 | else |
Masami Hiramatsu | aa47014 | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 307 | p->ainsn.boostable = -1; |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame^] | 308 | |
Rusty Lynch | 7e1048b | 2005-06-23 00:09:25 -0700 | [diff] [blame] | 309 | p->opcode = *p->addr; |
| 310 | } |
| 311 | |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame^] | 312 | int __kprobes arch_prepare_kprobe(struct kprobe *p) |
| 313 | { |
| 314 | /* insn: must be on special executable page on x86. */ |
| 315 | p->ainsn.insn = get_insn_slot(); |
| 316 | if (!p->ainsn.insn) |
| 317 | return -ENOMEM; |
| 318 | arch_copy_kprobe(p); |
| 319 | return 0; |
| 320 | } |
| 321 | |
Prasanna S Panchamukhi | 0f2fbdc | 2005-09-06 15:19:28 -0700 | [diff] [blame] | 322 | void __kprobes arch_arm_kprobe(struct kprobe *p) |
Rusty Lynch | 7e1048b | 2005-06-23 00:09:25 -0700 | [diff] [blame] | 323 | { |
Andi Kleen | 19d36cc | 2007-07-22 11:12:31 +0200 | [diff] [blame] | 324 | text_poke(p->addr, ((unsigned char []){BREAKPOINT_INSTRUCTION}), 1); |
Rusty Lynch | 7e1048b | 2005-06-23 00:09:25 -0700 | [diff] [blame] | 325 | } |
| 326 | |
Prasanna S Panchamukhi | 0f2fbdc | 2005-09-06 15:19:28 -0700 | [diff] [blame] | 327 | void __kprobes arch_disarm_kprobe(struct kprobe *p) |
Rusty Lynch | 7e1048b | 2005-06-23 00:09:25 -0700 | [diff] [blame] | 328 | { |
Andi Kleen | 19d36cc | 2007-07-22 11:12:31 +0200 | [diff] [blame] | 329 | text_poke(p->addr, &p->opcode, 1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 330 | } |
| 331 | |
Ananth N Mavinakayanahalli | 0498b63 | 2006-01-09 20:52:46 -0800 | [diff] [blame] | 332 | void __kprobes arch_remove_kprobe(struct kprobe *p) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 333 | { |
Ingo Molnar | 7a7d1cf | 2006-03-23 03:00:35 -0800 | [diff] [blame] | 334 | mutex_lock(&kprobe_mutex); |
Masami Hiramatsu | aa47014 | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 335 | free_insn_slot(p->ainsn.insn, (p->ainsn.boostable == 1)); |
Ingo Molnar | 7a7d1cf | 2006-03-23 03:00:35 -0800 | [diff] [blame] | 336 | mutex_unlock(&kprobe_mutex); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 337 | } |
| 338 | |
Prasanna S Panchamukhi | 3b60211 | 2006-04-18 22:22:00 -0700 | [diff] [blame] | 339 | static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) |
Prasanna S Panchamukhi | aa3d7e3 | 2005-06-23 00:09:37 -0700 | [diff] [blame] | 340 | { |
Ananth N Mavinakayanahalli | e7a510f | 2005-11-07 01:00:12 -0800 | [diff] [blame] | 341 | kcb->prev_kprobe.kp = kprobe_running(); |
| 342 | kcb->prev_kprobe.status = kcb->kprobe_status; |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame^] | 343 | kcb->prev_kprobe.old_flags = kcb->kprobe_old_flags; |
| 344 | kcb->prev_kprobe.saved_flags = kcb->kprobe_saved_flags; |
Prasanna S Panchamukhi | aa3d7e3 | 2005-06-23 00:09:37 -0700 | [diff] [blame] | 345 | } |
| 346 | |
Prasanna S Panchamukhi | 3b60211 | 2006-04-18 22:22:00 -0700 | [diff] [blame] | 347 | static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) |
Prasanna S Panchamukhi | aa3d7e3 | 2005-06-23 00:09:37 -0700 | [diff] [blame] | 348 | { |
Ananth N Mavinakayanahalli | e7a510f | 2005-11-07 01:00:12 -0800 | [diff] [blame] | 349 | __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp; |
| 350 | kcb->kprobe_status = kcb->prev_kprobe.status; |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame^] | 351 | kcb->kprobe_old_flags = kcb->prev_kprobe.old_flags; |
| 352 | kcb->kprobe_saved_flags = kcb->prev_kprobe.saved_flags; |
Prasanna S Panchamukhi | aa3d7e3 | 2005-06-23 00:09:37 -0700 | [diff] [blame] | 353 | } |
| 354 | |
Prasanna S Panchamukhi | 3b60211 | 2006-04-18 22:22:00 -0700 | [diff] [blame] | 355 | static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, |
Ananth N Mavinakayanahalli | e7a510f | 2005-11-07 01:00:12 -0800 | [diff] [blame] | 356 | struct kprobe_ctlblk *kcb) |
Prasanna S Panchamukhi | aa3d7e3 | 2005-06-23 00:09:37 -0700 | [diff] [blame] | 357 | { |
Ananth N Mavinakayanahalli | e7a510f | 2005-11-07 01:00:12 -0800 | [diff] [blame] | 358 | __get_cpu_var(current_kprobe) = p; |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame^] | 359 | kcb->kprobe_saved_flags = kcb->kprobe_old_flags |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 360 | = (regs->flags & (TF_MASK | IF_MASK)); |
Prasanna S Panchamukhi | aa3d7e3 | 2005-06-23 00:09:37 -0700 | [diff] [blame] | 361 | if (is_IF_modifier(p->ainsn.insn)) |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame^] | 362 | kcb->kprobe_saved_flags &= ~IF_MASK; |
Prasanna S Panchamukhi | aa3d7e3 | 2005-06-23 00:09:37 -0700 | [diff] [blame] | 363 | } |
| 364 | |
Roland McGrath | 1ecc798 | 2008-01-30 13:30:54 +0100 | [diff] [blame] | 365 | static __always_inline void clear_btf(void) |
| 366 | { |
| 367 | if (test_thread_flag(TIF_DEBUGCTLMSR)) |
| 368 | wrmsrl(MSR_IA32_DEBUGCTLMSR, 0); |
| 369 | } |
| 370 | |
| 371 | static __always_inline void restore_btf(void) |
| 372 | { |
| 373 | if (test_thread_flag(TIF_DEBUGCTLMSR)) |
| 374 | wrmsrl(MSR_IA32_DEBUGCTLMSR, current->thread.debugctlmsr); |
| 375 | } |
| 376 | |
Prasanna S Panchamukhi | 0f2fbdc | 2005-09-06 15:19:28 -0700 | [diff] [blame] | 377 | static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 378 | { |
Roland McGrath | 1ecc798 | 2008-01-30 13:30:54 +0100 | [diff] [blame] | 379 | clear_btf(); |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 380 | regs->flags |= TF_MASK; |
| 381 | regs->flags &= ~IF_MASK; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 382 | /*single step inline if the instruction is an int3*/ |
| 383 | if (p->opcode == BREAKPOINT_INSTRUCTION) |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 384 | regs->ip = (unsigned long)p->addr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 385 | else |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 386 | regs->ip = (unsigned long)p->ainsn.insn; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 387 | } |
| 388 | |
Ananth N Mavinakayanahalli | 991a51d | 2005-11-07 01:00:14 -0800 | [diff] [blame] | 389 | /* Called with kretprobe_lock held */ |
Christoph Hellwig | 4c4308c | 2007-05-08 00:34:14 -0700 | [diff] [blame] | 390 | void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, |
Prasanna S Panchamukhi | 0f2fbdc | 2005-09-06 15:19:28 -0700 | [diff] [blame] | 391 | struct pt_regs *regs) |
Rusty Lynch | 73649da | 2005-06-23 00:09:23 -0700 | [diff] [blame] | 392 | { |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame^] | 393 | unsigned long *sara = stack_addr(regs); |
Rusty Lynch | 73649da | 2005-06-23 00:09:23 -0700 | [diff] [blame] | 394 | |
Christoph Hellwig | 4c4308c | 2007-05-08 00:34:14 -0700 | [diff] [blame] | 395 | ri->ret_addr = (kprobe_opcode_t *) *sara; |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame^] | 396 | |
Christoph Hellwig | 4c4308c | 2007-05-08 00:34:14 -0700 | [diff] [blame] | 397 | /* Replace the return addr with trampoline addr */ |
| 398 | *sara = (unsigned long) &kretprobe_trampoline; |
Rusty Lynch | 73649da | 2005-06-23 00:09:23 -0700 | [diff] [blame] | 399 | } |
| 400 | |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame^] | 401 | /* |
| 402 | * Interrupts are disabled on entry as trap3 is an interrupt gate and they |
| 403 | * remain disabled thorough out this function. |
| 404 | */ |
| 405 | static int __kprobes kprobe_handler(struct pt_regs *regs) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 406 | { |
| 407 | struct kprobe *p; |
| 408 | int ret = 0; |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame^] | 409 | kprobe_opcode_t *addr; |
Ananth N Mavinakayanahalli | d217d54 | 2005-11-07 01:00:14 -0800 | [diff] [blame] | 410 | struct kprobe_ctlblk *kcb; |
| 411 | |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame^] | 412 | addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t)); |
| 413 | |
Ananth N Mavinakayanahalli | d217d54 | 2005-11-07 01:00:14 -0800 | [diff] [blame] | 414 | /* |
| 415 | * We don't want to be preempted for the entire |
| 416 | * duration of kprobe processing |
| 417 | */ |
| 418 | preempt_disable(); |
| 419 | kcb = get_kprobe_ctlblk(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 420 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 421 | /* Check we're not actually recursing */ |
| 422 | if (kprobe_running()) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 423 | p = get_kprobe(addr); |
| 424 | if (p) { |
Ananth N Mavinakayanahalli | e7a510f | 2005-11-07 01:00:12 -0800 | [diff] [blame] | 425 | if (kcb->kprobe_status == KPROBE_HIT_SS && |
Keshavamurthy Anil S | deac66a | 2005-09-06 15:19:35 -0700 | [diff] [blame] | 426 | *p->ainsn.insn == BREAKPOINT_INSTRUCTION) { |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 427 | regs->flags &= ~TF_MASK; |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame^] | 428 | regs->flags |= kcb->kprobe_saved_flags; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 429 | goto no_kprobe; |
Ananth N Mavinakayanahalli | e7a510f | 2005-11-07 01:00:12 -0800 | [diff] [blame] | 430 | } else if (kcb->kprobe_status == KPROBE_HIT_SSDONE) { |
Prasanna S Panchamukhi | aa3d7e3 | 2005-06-23 00:09:37 -0700 | [diff] [blame] | 431 | /* TODO: Provide re-entrancy from |
| 432 | * post_kprobes_handler() and avoid exception |
| 433 | * stack corruption while single-stepping on |
| 434 | * the instruction of the new probe. |
| 435 | */ |
| 436 | arch_disarm_kprobe(p); |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 437 | regs->ip = (unsigned long)p->addr; |
Ananth N Mavinakayanahalli | e7a510f | 2005-11-07 01:00:12 -0800 | [diff] [blame] | 438 | reset_current_kprobe(); |
Prasanna S Panchamukhi | aa3d7e3 | 2005-06-23 00:09:37 -0700 | [diff] [blame] | 439 | return 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 440 | } |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame^] | 441 | /* We have reentered the kprobe_handler(), since |
| 442 | * another probe was hit while within the handler. |
| 443 | * We here save the original kprobes variables and |
| 444 | * just single step on the instruction of the new probe |
| 445 | * without calling any user handlers. |
| 446 | */ |
| 447 | save_previous_kprobe(kcb); |
| 448 | set_current_kprobe(p, regs, kcb); |
| 449 | kprobes_inc_nmissed_count(p); |
| 450 | prepare_singlestep(p, regs); |
| 451 | kcb->kprobe_status = KPROBE_REENTER; |
| 452 | return 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 453 | } else { |
Keshavamurthy Anil S | eb3a729 | 2006-01-11 12:17:42 -0800 | [diff] [blame] | 454 | if (*addr != BREAKPOINT_INSTRUCTION) { |
| 455 | /* The breakpoint instruction was removed by |
| 456 | * another cpu right after we hit, no further |
| 457 | * handling of this interrupt is appropriate |
| 458 | */ |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 459 | regs->ip = (unsigned long)addr; |
Keshavamurthy Anil S | eb3a729 | 2006-01-11 12:17:42 -0800 | [diff] [blame] | 460 | ret = 1; |
| 461 | goto no_kprobe; |
| 462 | } |
Ananth N Mavinakayanahalli | e7a510f | 2005-11-07 01:00:12 -0800 | [diff] [blame] | 463 | p = __get_cpu_var(current_kprobe); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 464 | if (p->break_handler && p->break_handler(p, regs)) { |
| 465 | goto ss_probe; |
| 466 | } |
| 467 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 468 | goto no_kprobe; |
| 469 | } |
| 470 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 471 | p = get_kprobe(addr); |
| 472 | if (!p) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 473 | if (*addr != BREAKPOINT_INSTRUCTION) { |
| 474 | /* |
| 475 | * The breakpoint instruction was removed right |
| 476 | * after we hit it. Another cpu has removed |
| 477 | * either a probepoint or a debugger breakpoint |
| 478 | * at this address. In either case, no further |
| 479 | * handling of this interrupt is appropriate. |
Jim Keniston | bce0649 | 2005-09-06 15:19:34 -0700 | [diff] [blame] | 480 | * Back up over the (now missing) int3 and run |
| 481 | * the original instruction. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 482 | */ |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 483 | regs->ip = (unsigned long)addr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 484 | ret = 1; |
| 485 | } |
| 486 | /* Not one of ours: let kernel handle it */ |
| 487 | goto no_kprobe; |
| 488 | } |
| 489 | |
Ananth N Mavinakayanahalli | e7a510f | 2005-11-07 01:00:12 -0800 | [diff] [blame] | 490 | set_current_kprobe(p, regs, kcb); |
| 491 | kcb->kprobe_status = KPROBE_HIT_ACTIVE; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 492 | |
| 493 | if (p->pre_handler && p->pre_handler(p, regs)) |
| 494 | /* handler has already set things up, so skip ss setup */ |
| 495 | return 1; |
| 496 | |
| 497 | ss_probe: |
Masami Hiramatsu | aa47014 | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 498 | #if !defined(CONFIG_PREEMPT) || defined(CONFIG_PM) |
| 499 | if (p->ainsn.boostable == 1 && !p->post_handler) { |
| 500 | /* Boost up -- we can execute copied instructions directly */ |
| 501 | reset_current_kprobe(); |
| 502 | regs->ip = (unsigned long)p->ainsn.insn; |
| 503 | preempt_enable_no_resched(); |
| 504 | return 1; |
| 505 | } |
| 506 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 507 | prepare_singlestep(p, regs); |
Ananth N Mavinakayanahalli | e7a510f | 2005-11-07 01:00:12 -0800 | [diff] [blame] | 508 | kcb->kprobe_status = KPROBE_HIT_SS; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 509 | return 1; |
| 510 | |
| 511 | no_kprobe: |
Ananth N Mavinakayanahalli | d217d54 | 2005-11-07 01:00:14 -0800 | [diff] [blame] | 512 | preempt_enable_no_resched(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 513 | return ret; |
| 514 | } |
| 515 | |
| 516 | /* |
Masami Hiramatsu | da07ab0 | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 517 | * When a retprobed function returns, this code saves registers and |
| 518 | * calls trampoline_handler() runs, which calls the kretprobe's handler. |
Rusty Lynch | 73649da | 2005-06-23 00:09:23 -0700 | [diff] [blame] | 519 | */ |
Masami Hiramatsu | da07ab0 | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 520 | void __kprobes kretprobe_trampoline_holder(void) |
Rusty Lynch | 73649da | 2005-06-23 00:09:23 -0700 | [diff] [blame] | 521 | { |
| 522 | asm volatile ( ".global kretprobe_trampoline\n" |
Masami Hiramatsu | da07ab0 | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 523 | "kretprobe_trampoline: \n" |
| 524 | /* We don't bother saving the ss register */ |
| 525 | " pushq %rsp\n" |
| 526 | " pushfq\n" |
| 527 | /* |
| 528 | * Skip cs, ip, orig_ax. |
| 529 | * trampoline_handler() will plug in these values |
| 530 | */ |
| 531 | " subq $24, %rsp\n" |
| 532 | " pushq %rdi\n" |
| 533 | " pushq %rsi\n" |
| 534 | " pushq %rdx\n" |
| 535 | " pushq %rcx\n" |
| 536 | " pushq %rax\n" |
| 537 | " pushq %r8\n" |
| 538 | " pushq %r9\n" |
| 539 | " pushq %r10\n" |
| 540 | " pushq %r11\n" |
| 541 | " pushq %rbx\n" |
| 542 | " pushq %rbp\n" |
| 543 | " pushq %r12\n" |
| 544 | " pushq %r13\n" |
| 545 | " pushq %r14\n" |
| 546 | " pushq %r15\n" |
| 547 | " movq %rsp, %rdi\n" |
| 548 | " call trampoline_handler\n" |
| 549 | /* Replace saved sp with true return address. */ |
| 550 | " movq %rax, 152(%rsp)\n" |
| 551 | " popq %r15\n" |
| 552 | " popq %r14\n" |
| 553 | " popq %r13\n" |
| 554 | " popq %r12\n" |
| 555 | " popq %rbp\n" |
| 556 | " popq %rbx\n" |
| 557 | " popq %r11\n" |
| 558 | " popq %r10\n" |
| 559 | " popq %r9\n" |
| 560 | " popq %r8\n" |
| 561 | " popq %rax\n" |
| 562 | " popq %rcx\n" |
| 563 | " popq %rdx\n" |
| 564 | " popq %rsi\n" |
| 565 | " popq %rdi\n" |
| 566 | /* Skip orig_ax, ip, cs */ |
| 567 | " addq $24, %rsp\n" |
| 568 | " popfq\n" |
| 569 | " ret\n"); |
Rusty Lynch | 73649da | 2005-06-23 00:09:23 -0700 | [diff] [blame] | 570 | } |
| 571 | |
| 572 | /* |
Masami Hiramatsu | da07ab0 | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 573 | * Called from kretprobe_trampoline |
Rusty Lynch | 73649da | 2005-06-23 00:09:23 -0700 | [diff] [blame] | 574 | */ |
Masami Hiramatsu | da07ab0 | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 575 | fastcall void * __kprobes trampoline_handler(struct pt_regs *regs) |
Rusty Lynch | 73649da | 2005-06-23 00:09:23 -0700 | [diff] [blame] | 576 | { |
bibo,mao | 62c27be | 2006-10-02 02:17:33 -0700 | [diff] [blame] | 577 | struct kretprobe_instance *ri = NULL; |
bibo,mao | 99219a3 | 2006-10-02 02:17:35 -0700 | [diff] [blame] | 578 | struct hlist_head *head, empty_rp; |
bibo,mao | 62c27be | 2006-10-02 02:17:33 -0700 | [diff] [blame] | 579 | struct hlist_node *node, *tmp; |
Ananth N Mavinakayanahalli | 991a51d | 2005-11-07 01:00:14 -0800 | [diff] [blame] | 580 | unsigned long flags, orig_ret_address = 0; |
Rusty Lynch | ba8af12 | 2005-06-27 15:17:10 -0700 | [diff] [blame] | 581 | unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline; |
Rusty Lynch | 73649da | 2005-06-23 00:09:23 -0700 | [diff] [blame] | 582 | |
bibo,mao | 99219a3 | 2006-10-02 02:17:35 -0700 | [diff] [blame] | 583 | INIT_HLIST_HEAD(&empty_rp); |
Ananth N Mavinakayanahalli | 991a51d | 2005-11-07 01:00:14 -0800 | [diff] [blame] | 584 | spin_lock_irqsave(&kretprobe_lock, flags); |
bibo,mao | 62c27be | 2006-10-02 02:17:33 -0700 | [diff] [blame] | 585 | head = kretprobe_inst_table_head(current); |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame^] | 586 | /* fixup registers */ |
Masami Hiramatsu | da07ab0 | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 587 | regs->cs = __KERNEL_CS; |
| 588 | regs->ip = trampoline_address; |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame^] | 589 | regs->orig_ax = ~0UL; |
Rusty Lynch | 73649da | 2005-06-23 00:09:23 -0700 | [diff] [blame] | 590 | |
Rusty Lynch | ba8af12 | 2005-06-27 15:17:10 -0700 | [diff] [blame] | 591 | /* |
| 592 | * It is possible to have multiple instances associated with a given |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame^] | 593 | * task either because multiple functions in the call path have |
| 594 | * return probes installed on them, and/or more then one |
Rusty Lynch | ba8af12 | 2005-06-27 15:17:10 -0700 | [diff] [blame] | 595 | * return probe was registered for a target function. |
| 596 | * |
| 597 | * We can handle this because: |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame^] | 598 | * - instances are always pushed into the head of the list |
Rusty Lynch | ba8af12 | 2005-06-27 15:17:10 -0700 | [diff] [blame] | 599 | * - when multiple return probes are registered for the same |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame^] | 600 | * function, the (chronologically) first instance's ret_addr |
| 601 | * will be the real return address, and all the rest will |
| 602 | * point to kretprobe_trampoline. |
Rusty Lynch | ba8af12 | 2005-06-27 15:17:10 -0700 | [diff] [blame] | 603 | */ |
| 604 | hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { |
bibo,mao | 62c27be | 2006-10-02 02:17:33 -0700 | [diff] [blame] | 605 | if (ri->task != current) |
Rusty Lynch | ba8af12 | 2005-06-27 15:17:10 -0700 | [diff] [blame] | 606 | /* another task is sharing our hash bucket */ |
bibo,mao | 62c27be | 2006-10-02 02:17:33 -0700 | [diff] [blame] | 607 | continue; |
Rusty Lynch | 73649da | 2005-06-23 00:09:23 -0700 | [diff] [blame] | 608 | |
Masami Hiramatsu | da07ab0 | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 609 | if (ri->rp && ri->rp->handler) { |
| 610 | __get_cpu_var(current_kprobe) = &ri->rp->kp; |
| 611 | get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE; |
Rusty Lynch | ba8af12 | 2005-06-27 15:17:10 -0700 | [diff] [blame] | 612 | ri->rp->handler(ri, regs); |
Masami Hiramatsu | da07ab0 | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 613 | __get_cpu_var(current_kprobe) = NULL; |
| 614 | } |
Rusty Lynch | 73649da | 2005-06-23 00:09:23 -0700 | [diff] [blame] | 615 | |
Rusty Lynch | ba8af12 | 2005-06-27 15:17:10 -0700 | [diff] [blame] | 616 | orig_ret_address = (unsigned long)ri->ret_addr; |
bibo,mao | 99219a3 | 2006-10-02 02:17:35 -0700 | [diff] [blame] | 617 | recycle_rp_inst(ri, &empty_rp); |
Rusty Lynch | ba8af12 | 2005-06-27 15:17:10 -0700 | [diff] [blame] | 618 | |
| 619 | if (orig_ret_address != trampoline_address) |
| 620 | /* |
| 621 | * This is the real return address. Any other |
| 622 | * instances associated with this task are for |
| 623 | * other calls deeper on the call stack |
| 624 | */ |
| 625 | break; |
Rusty Lynch | 73649da | 2005-06-23 00:09:23 -0700 | [diff] [blame] | 626 | } |
Rusty Lynch | ba8af12 | 2005-06-27 15:17:10 -0700 | [diff] [blame] | 627 | |
Ananth N Mavinakayanahalli | 0f95b7f | 2007-05-08 00:28:27 -0700 | [diff] [blame] | 628 | kretprobe_assert(ri, orig_ret_address, trampoline_address); |
Rusty Lynch | ba8af12 | 2005-06-27 15:17:10 -0700 | [diff] [blame] | 629 | |
Ananth N Mavinakayanahalli | 991a51d | 2005-11-07 01:00:14 -0800 | [diff] [blame] | 630 | spin_unlock_irqrestore(&kretprobe_lock, flags); |
Rusty Lynch | ba8af12 | 2005-06-27 15:17:10 -0700 | [diff] [blame] | 631 | |
bibo,mao | 99219a3 | 2006-10-02 02:17:35 -0700 | [diff] [blame] | 632 | hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { |
| 633 | hlist_del(&ri->hlist); |
| 634 | kfree(ri); |
| 635 | } |
Masami Hiramatsu | da07ab0 | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 636 | return (void *)orig_ret_address; |
Rusty Lynch | 73649da | 2005-06-23 00:09:23 -0700 | [diff] [blame] | 637 | } |
| 638 | |
| 639 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 640 | * Called after single-stepping. p->addr is the address of the |
| 641 | * instruction whose first byte has been replaced by the "int 3" |
| 642 | * instruction. To avoid the SMP problems that can occur when we |
| 643 | * temporarily put back the original opcode to single-step, we |
| 644 | * single-stepped a copy of the instruction. The address of this |
| 645 | * copy is p->ainsn.insn. |
| 646 | * |
| 647 | * This function prepares to return from the post-single-step |
| 648 | * interrupt. We have to fix up the stack as follows: |
| 649 | * |
| 650 | * 0) Except in the case of absolute or indirect jump or call instructions, |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 651 | * the new ip is relative to the copied instruction. We need to make |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 652 | * it relative to the original instruction. |
| 653 | * |
| 654 | * 1) If the single-stepped instruction was pushfl, then the TF and IF |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 655 | * flags are set in the just-pushed flags, and may need to be cleared. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 656 | * |
| 657 | * 2) If the single-stepped instruction was a call, the return address |
| 658 | * that is atop the stack is the address following the copied instruction. |
| 659 | * We need to make it the address following the original instruction. |
Masami Hiramatsu | aa47014 | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 660 | * |
| 661 | * If this is the first time we've single-stepped the instruction at |
| 662 | * this probepoint, and the instruction is boostable, boost it: add a |
| 663 | * jump instruction after the copied instruction, that jumps to the next |
| 664 | * instruction after the probepoint. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 665 | */ |
Ananth N Mavinakayanahalli | e7a510f | 2005-11-07 01:00:12 -0800 | [diff] [blame] | 666 | static void __kprobes resume_execution(struct kprobe *p, |
| 667 | struct pt_regs *regs, struct kprobe_ctlblk *kcb) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 668 | { |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame^] | 669 | unsigned long *tos = stack_addr(regs); |
| 670 | unsigned long copy_ip = (unsigned long)p->ainsn.insn; |
| 671 | unsigned long orig_ip = (unsigned long)p->addr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 672 | kprobe_opcode_t *insn = p->ainsn.insn; |
| 673 | |
| 674 | /*skip the REX prefix*/ |
| 675 | if (*insn >= 0x40 && *insn <= 0x4f) |
| 676 | insn++; |
| 677 | |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 678 | regs->flags &= ~TF_MASK; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 679 | switch (*insn) { |
Masami Hiramatsu | 0b0122f | 2007-12-18 18:05:58 +0100 | [diff] [blame] | 680 | case 0x9c: /* pushfl */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 681 | *tos &= ~(TF_MASK | IF_MASK); |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame^] | 682 | *tos |= kcb->kprobe_old_flags; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 683 | break; |
Masami Hiramatsu | 0b0122f | 2007-12-18 18:05:58 +0100 | [diff] [blame] | 684 | case 0xc2: /* iret/ret/lret */ |
| 685 | case 0xc3: |
Prasanna S Panchamukhi | 0b9e2ca | 2005-05-05 16:15:40 -0700 | [diff] [blame] | 686 | case 0xca: |
Masami Hiramatsu | 0b0122f | 2007-12-18 18:05:58 +0100 | [diff] [blame] | 687 | case 0xcb: |
| 688 | case 0xcf: |
| 689 | case 0xea: /* jmp absolute -- ip is correct */ |
| 690 | /* ip is already adjusted, no more changes required */ |
Masami Hiramatsu | aa47014 | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 691 | p->ainsn.boostable = 1; |
Masami Hiramatsu | 0b0122f | 2007-12-18 18:05:58 +0100 | [diff] [blame] | 692 | goto no_change; |
| 693 | case 0xe8: /* call relative - Fix return addr */ |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame^] | 694 | *tos = orig_ip + (*tos - copy_ip); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 695 | break; |
| 696 | case 0xff: |
Satoshi Oshima | dc49e34 | 2006-05-20 15:00:21 -0700 | [diff] [blame] | 697 | if ((insn[1] & 0x30) == 0x10) { |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame^] | 698 | /* |
| 699 | * call absolute, indirect |
| 700 | * Fix return addr; ip is correct. |
| 701 | * But this is not boostable |
| 702 | */ |
| 703 | *tos = orig_ip + (*tos - copy_ip); |
Masami Hiramatsu | 0b0122f | 2007-12-18 18:05:58 +0100 | [diff] [blame] | 704 | goto no_change; |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame^] | 705 | } else if (((insn[1] & 0x31) == 0x20) || |
| 706 | ((insn[1] & 0x31) == 0x21)) { |
| 707 | /* |
| 708 | * jmp near and far, absolute indirect |
| 709 | * ip is correct. And this is boostable |
| 710 | */ |
Masami Hiramatsu | aa47014 | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 711 | p->ainsn.boostable = 1; |
Masami Hiramatsu | 0b0122f | 2007-12-18 18:05:58 +0100 | [diff] [blame] | 712 | goto no_change; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 713 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 714 | default: |
| 715 | break; |
| 716 | } |
| 717 | |
Masami Hiramatsu | aa47014 | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 718 | if (p->ainsn.boostable == 0) { |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame^] | 719 | if ((regs->ip > copy_ip) && |
| 720 | (regs->ip - copy_ip) + 5 < MAX_INSN_SIZE) { |
Masami Hiramatsu | aa47014 | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 721 | /* |
| 722 | * These instructions can be executed directly if it |
| 723 | * jumps back to correct address. |
| 724 | */ |
| 725 | set_jmp_op((void *)regs->ip, |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame^] | 726 | (void *)orig_ip + (regs->ip - copy_ip)); |
Masami Hiramatsu | aa47014 | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 727 | p->ainsn.boostable = 1; |
| 728 | } else { |
| 729 | p->ainsn.boostable = -1; |
| 730 | } |
| 731 | } |
| 732 | |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame^] | 733 | regs->ip += orig_ip - copy_ip; |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 734 | |
Masami Hiramatsu | 0b0122f | 2007-12-18 18:05:58 +0100 | [diff] [blame] | 735 | no_change: |
Roland McGrath | 1ecc798 | 2008-01-30 13:30:54 +0100 | [diff] [blame] | 736 | restore_btf(); |
Masami Hiramatsu | 0b0122f | 2007-12-18 18:05:58 +0100 | [diff] [blame] | 737 | |
| 738 | return; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 739 | } |
| 740 | |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame^] | 741 | /* |
| 742 | * Interrupts are disabled on entry as trap1 is an interrupt gate and they |
| 743 | * remain disabled thoroughout this function. |
| 744 | */ |
| 745 | static int __kprobes post_kprobe_handler(struct pt_regs *regs) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 746 | { |
Ananth N Mavinakayanahalli | e7a510f | 2005-11-07 01:00:12 -0800 | [diff] [blame] | 747 | struct kprobe *cur = kprobe_running(); |
| 748 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
| 749 | |
| 750 | if (!cur) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 751 | return 0; |
| 752 | |
Ananth N Mavinakayanahalli | e7a510f | 2005-11-07 01:00:12 -0800 | [diff] [blame] | 753 | if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) { |
| 754 | kcb->kprobe_status = KPROBE_HIT_SSDONE; |
| 755 | cur->post_handler(cur, regs, 0); |
Prasanna S Panchamukhi | aa3d7e3 | 2005-06-23 00:09:37 -0700 | [diff] [blame] | 756 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 757 | |
Ananth N Mavinakayanahalli | e7a510f | 2005-11-07 01:00:12 -0800 | [diff] [blame] | 758 | resume_execution(cur, regs, kcb); |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame^] | 759 | regs->flags |= kcb->kprobe_saved_flags; |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 760 | trace_hardirqs_fixup_flags(regs->flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 761 | |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame^] | 762 | /* Restore back the original saved kprobes variables and continue. */ |
Ananth N Mavinakayanahalli | e7a510f | 2005-11-07 01:00:12 -0800 | [diff] [blame] | 763 | if (kcb->kprobe_status == KPROBE_REENTER) { |
| 764 | restore_previous_kprobe(kcb); |
Prasanna S Panchamukhi | aa3d7e3 | 2005-06-23 00:09:37 -0700 | [diff] [blame] | 765 | goto out; |
Prasanna S Panchamukhi | aa3d7e3 | 2005-06-23 00:09:37 -0700 | [diff] [blame] | 766 | } |
Ananth N Mavinakayanahalli | e7a510f | 2005-11-07 01:00:12 -0800 | [diff] [blame] | 767 | reset_current_kprobe(); |
Prasanna S Panchamukhi | aa3d7e3 | 2005-06-23 00:09:37 -0700 | [diff] [blame] | 768 | out: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 769 | preempt_enable_no_resched(); |
| 770 | |
| 771 | /* |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 772 | * if somebody else is singlestepping across a probe point, flags |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 773 | * will have TF set, in which case, continue the remaining processing |
| 774 | * of do_debug, as if this is not a probe hit. |
| 775 | */ |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 776 | if (regs->flags & TF_MASK) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 777 | return 0; |
| 778 | |
| 779 | return 1; |
| 780 | } |
| 781 | |
Prasanna S Panchamukhi | 0f2fbdc | 2005-09-06 15:19:28 -0700 | [diff] [blame] | 782 | int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 783 | { |
Ananth N Mavinakayanahalli | e7a510f | 2005-11-07 01:00:12 -0800 | [diff] [blame] | 784 | struct kprobe *cur = kprobe_running(); |
| 785 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
Prasanna S Panchamukhi | c28f896 | 2006-03-26 01:38:23 -0800 | [diff] [blame] | 786 | const struct exception_table_entry *fixup; |
Ananth N Mavinakayanahalli | e7a510f | 2005-11-07 01:00:12 -0800 | [diff] [blame] | 787 | |
Prasanna S Panchamukhi | c28f896 | 2006-03-26 01:38:23 -0800 | [diff] [blame] | 788 | switch(kcb->kprobe_status) { |
| 789 | case KPROBE_HIT_SS: |
| 790 | case KPROBE_REENTER: |
| 791 | /* |
| 792 | * We are here because the instruction being single |
| 793 | * stepped caused a page fault. We reset the current |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 794 | * kprobe and the ip points back to the probe address |
Prasanna S Panchamukhi | c28f896 | 2006-03-26 01:38:23 -0800 | [diff] [blame] | 795 | * and allow the page fault handler to continue as a |
| 796 | * normal page fault. |
| 797 | */ |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 798 | regs->ip = (unsigned long)cur->addr; |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame^] | 799 | regs->flags |= kcb->kprobe_old_flags; |
Prasanna S Panchamukhi | c28f896 | 2006-03-26 01:38:23 -0800 | [diff] [blame] | 800 | if (kcb->kprobe_status == KPROBE_REENTER) |
| 801 | restore_previous_kprobe(kcb); |
| 802 | else |
| 803 | reset_current_kprobe(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 804 | preempt_enable_no_resched(); |
Prasanna S Panchamukhi | c28f896 | 2006-03-26 01:38:23 -0800 | [diff] [blame] | 805 | break; |
| 806 | case KPROBE_HIT_ACTIVE: |
| 807 | case KPROBE_HIT_SSDONE: |
| 808 | /* |
| 809 | * We increment the nmissed count for accounting, |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame^] | 810 | * we can also use npre/npostfault count for accounting |
Prasanna S Panchamukhi | c28f896 | 2006-03-26 01:38:23 -0800 | [diff] [blame] | 811 | * these specific fault cases. |
| 812 | */ |
| 813 | kprobes_inc_nmissed_count(cur); |
| 814 | |
| 815 | /* |
| 816 | * We come here because instructions in the pre/post |
| 817 | * handler caused the page_fault, this could happen |
| 818 | * if handler tries to access user space by |
| 819 | * copy_from_user(), get_user() etc. Let the |
| 820 | * user-specified handler try to fix it first. |
| 821 | */ |
| 822 | if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) |
| 823 | return 1; |
| 824 | |
| 825 | /* |
| 826 | * In case the user-specified fault handler returned |
| 827 | * zero, try to fix up. |
| 828 | */ |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 829 | fixup = search_exception_tables(regs->ip); |
Prasanna S Panchamukhi | c28f896 | 2006-03-26 01:38:23 -0800 | [diff] [blame] | 830 | if (fixup) { |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 831 | regs->ip = fixup->fixup; |
Prasanna S Panchamukhi | c28f896 | 2006-03-26 01:38:23 -0800 | [diff] [blame] | 832 | return 1; |
| 833 | } |
| 834 | |
| 835 | /* |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame^] | 836 | * fixup routine could not handle it, |
Prasanna S Panchamukhi | c28f896 | 2006-03-26 01:38:23 -0800 | [diff] [blame] | 837 | * Let do_page_fault() fix it. |
| 838 | */ |
| 839 | break; |
| 840 | default: |
| 841 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 842 | } |
| 843 | return 0; |
| 844 | } |
| 845 | |
| 846 | /* |
| 847 | * Wrapper routine for handling exceptions. |
| 848 | */ |
Prasanna S Panchamukhi | 0f2fbdc | 2005-09-06 15:19:28 -0700 | [diff] [blame] | 849 | int __kprobes kprobe_exceptions_notify(struct notifier_block *self, |
| 850 | unsigned long val, void *data) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 851 | { |
| 852 | struct die_args *args = (struct die_args *)data; |
Ananth N Mavinakayanahalli | 66ff2d0 | 2005-11-07 01:00:07 -0800 | [diff] [blame] | 853 | int ret = NOTIFY_DONE; |
| 854 | |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame^] | 855 | if (args->regs && user_mode_vm(args->regs)) |
bibo,mao | 2326c77 | 2006-03-26 01:38:21 -0800 | [diff] [blame] | 856 | return ret; |
| 857 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 858 | switch (val) { |
| 859 | case DIE_INT3: |
| 860 | if (kprobe_handler(args->regs)) |
Ananth N Mavinakayanahalli | 66ff2d0 | 2005-11-07 01:00:07 -0800 | [diff] [blame] | 861 | ret = NOTIFY_STOP; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 862 | break; |
| 863 | case DIE_DEBUG: |
| 864 | if (post_kprobe_handler(args->regs)) |
Ananth N Mavinakayanahalli | 66ff2d0 | 2005-11-07 01:00:07 -0800 | [diff] [blame] | 865 | ret = NOTIFY_STOP; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 866 | break; |
| 867 | case DIE_GPF: |
Ananth N Mavinakayanahalli | d217d54 | 2005-11-07 01:00:14 -0800 | [diff] [blame] | 868 | /* kprobe_running() needs smp_processor_id() */ |
| 869 | preempt_disable(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 870 | if (kprobe_running() && |
| 871 | kprobe_fault_handler(args->regs, args->trapnr)) |
Ananth N Mavinakayanahalli | 66ff2d0 | 2005-11-07 01:00:07 -0800 | [diff] [blame] | 872 | ret = NOTIFY_STOP; |
Ananth N Mavinakayanahalli | d217d54 | 2005-11-07 01:00:14 -0800 | [diff] [blame] | 873 | preempt_enable(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 874 | break; |
| 875 | default: |
| 876 | break; |
| 877 | } |
Ananth N Mavinakayanahalli | 66ff2d0 | 2005-11-07 01:00:07 -0800 | [diff] [blame] | 878 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 879 | } |
| 880 | |
Prasanna S Panchamukhi | 0f2fbdc | 2005-09-06 15:19:28 -0700 | [diff] [blame] | 881 | int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 882 | { |
| 883 | struct jprobe *jp = container_of(p, struct jprobe, kp); |
| 884 | unsigned long addr; |
Ananth N Mavinakayanahalli | e7a510f | 2005-11-07 01:00:12 -0800 | [diff] [blame] | 885 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 886 | |
Ananth N Mavinakayanahalli | e7a510f | 2005-11-07 01:00:12 -0800 | [diff] [blame] | 887 | kcb->jprobe_saved_regs = *regs; |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame^] | 888 | kcb->jprobe_saved_sp = stack_addr(regs); |
| 889 | addr = (unsigned long)(kcb->jprobe_saved_sp); |
| 890 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 891 | /* |
| 892 | * As Linus pointed out, gcc assumes that the callee |
| 893 | * owns the argument space and could overwrite it, e.g. |
| 894 | * tailcall optimization. So, to be absolutely safe |
| 895 | * we also save and restore enough stack bytes to cover |
| 896 | * the argument area. |
| 897 | */ |
Ananth N Mavinakayanahalli | e7a510f | 2005-11-07 01:00:12 -0800 | [diff] [blame] | 898 | memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr, |
| 899 | MIN_STACK_SIZE(addr)); |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 900 | regs->flags &= ~IF_MASK; |
Peter Zijlstra | 58dfe88 | 2007-10-11 22:25:25 +0200 | [diff] [blame] | 901 | trace_hardirqs_off(); |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 902 | regs->ip = (unsigned long)(jp->entry); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 903 | return 1; |
| 904 | } |
| 905 | |
Prasanna S Panchamukhi | 0f2fbdc | 2005-09-06 15:19:28 -0700 | [diff] [blame] | 906 | void __kprobes jprobe_return(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 907 | { |
Ananth N Mavinakayanahalli | e7a510f | 2005-11-07 01:00:12 -0800 | [diff] [blame] | 908 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
| 909 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 910 | asm volatile (" xchg %%rbx,%%rsp \n" |
| 911 | " int3 \n" |
| 912 | " .globl jprobe_return_end \n" |
| 913 | " jprobe_return_end: \n" |
| 914 | " nop \n"::"b" |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame^] | 915 | (kcb->jprobe_saved_sp):"memory"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 916 | } |
| 917 | |
Prasanna S Panchamukhi | 0f2fbdc | 2005-09-06 15:19:28 -0700 | [diff] [blame] | 918 | int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 919 | { |
Ananth N Mavinakayanahalli | e7a510f | 2005-11-07 01:00:12 -0800 | [diff] [blame] | 920 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 921 | u8 *addr = (u8 *) (regs->ip - 1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 922 | struct jprobe *jp = container_of(p, struct jprobe, kp); |
| 923 | |
| 924 | if ((addr > (u8 *) jprobe_return) && (addr < (u8 *) jprobe_return_end)) { |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame^] | 925 | if (stack_addr(regs) != kcb->jprobe_saved_sp) { |
Masami Hiramatsu | 29b6cd7 | 2007-12-18 18:05:58 +0100 | [diff] [blame] | 926 | struct pt_regs *saved_regs = &kcb->jprobe_saved_regs; |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 927 | printk("current sp %p does not match saved sp %p\n", |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame^] | 928 | stack_addr(regs), kcb->jprobe_saved_sp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 929 | printk("Saved registers for jprobe %p\n", jp); |
| 930 | show_registers(saved_regs); |
| 931 | printk("Current registers\n"); |
| 932 | show_registers(regs); |
| 933 | BUG(); |
| 934 | } |
Ananth N Mavinakayanahalli | e7a510f | 2005-11-07 01:00:12 -0800 | [diff] [blame] | 935 | *regs = kcb->jprobe_saved_regs; |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame^] | 936 | memcpy((kprobe_opcode_t *)(kcb->jprobe_saved_sp), |
| 937 | kcb->jprobes_stack, |
| 938 | MIN_STACK_SIZE(kcb->jprobe_saved_sp)); |
Ananth N Mavinakayanahalli | d217d54 | 2005-11-07 01:00:14 -0800 | [diff] [blame] | 939 | preempt_enable_no_resched(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 940 | return 1; |
| 941 | } |
| 942 | return 0; |
| 943 | } |
Rusty Lynch | ba8af12 | 2005-06-27 15:17:10 -0700 | [diff] [blame] | 944 | |
Rusty Lynch | 6772926 | 2005-07-05 18:54:50 -0700 | [diff] [blame] | 945 | int __init arch_init_kprobes(void) |
Rusty Lynch | ba8af12 | 2005-06-27 15:17:10 -0700 | [diff] [blame] | 946 | { |
Masami Hiramatsu | da07ab0 | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 947 | return 0; |
Rusty Lynch | ba8af12 | 2005-06-27 15:17:10 -0700 | [diff] [blame] | 948 | } |
Ananth N Mavinakayanahalli | bf8f6e5 | 2007-05-08 00:34:16 -0700 | [diff] [blame] | 949 | |
| 950 | int __kprobes arch_trampoline_kprobe(struct kprobe *p) |
| 951 | { |
Ananth N Mavinakayanahalli | bf8f6e5 | 2007-05-08 00:34:16 -0700 | [diff] [blame] | 952 | return 0; |
| 953 | } |