Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Kernel Probes (KProbes) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License as published by |
| 6 | * the Free Software Foundation; either version 2 of the License, or |
| 7 | * (at your option) any later version. |
| 8 | * |
| 9 | * This program is distributed in the hope that it will be useful, |
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 12 | * GNU General Public License for more details. |
| 13 | * |
| 14 | * You should have received a copy of the GNU General Public License |
| 15 | * along with this program; if not, write to the Free Software |
| 16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
| 17 | * |
| 18 | * Copyright (C) IBM Corporation, 2002, 2004 |
| 19 | * |
| 20 | * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel |
| 21 | * Probes initial implementation ( includes contributions from |
| 22 | * Rusty Russell). |
| 23 | * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes |
| 24 | * interface to access function arguments. |
Masami Hiramatsu | d6be29b | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 25 | * 2004-Oct Jim Keniston <jkenisto@us.ibm.com> and Prasanna S Panchamukhi |
| 26 | * <prasanna@in.ibm.com> adapted for x86_64 from i386. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | * 2005-Mar Roland McGrath <roland@redhat.com> |
| 28 | * Fixed to handle %rip-relative addressing mode correctly. |
Masami Hiramatsu | d6be29b | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 29 | * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston |
| 30 | * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi |
| 31 | * <prasanna@in.ibm.com> added function-return probes. |
| 32 | * 2005-May Rusty Lynch <rusty.lynch@intel.com> |
| 33 | * Added function return probes functionality |
| 34 | * 2006-Feb Masami Hiramatsu <hiramatu@sdl.hitachi.co.jp> added |
| 35 | * kprobe-booster and kretprobe-booster for i386. |
Masami Hiramatsu | da07ab0 | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 36 | * 2007-Dec Masami Hiramatsu <mhiramat@redhat.com> added kprobe-booster |
| 37 | * and kretprobe-booster for x86-64 |
Masami Hiramatsu | d6be29b | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 38 | * 2007-Dec Masami Hiramatsu <mhiramat@redhat.com>, Arjan van de Ven |
| 39 | * <arjan@infradead.org> and Jim Keniston <jkenisto@us.ibm.com> |
| 40 | * unified x86 kprobes code. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | */ |
| 42 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 43 | #include <linux/kprobes.h> |
| 44 | #include <linux/ptrace.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 45 | #include <linux/string.h> |
| 46 | #include <linux/slab.h> |
| 47 | #include <linux/preempt.h> |
Prasanna S Panchamukhi | c28f896 | 2006-03-26 01:38:23 -0800 | [diff] [blame] | 48 | #include <linux/module.h> |
Christoph Hellwig | 1eeb66a | 2007-05-08 00:27:03 -0700 | [diff] [blame] | 49 | #include <linux/kdebug.h> |
Ananth N Mavinakayanahalli | 9ec4b1f | 2005-06-27 15:17:01 -0700 | [diff] [blame] | 50 | |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 51 | #include <asm/cacheflush.h> |
| 52 | #include <asm/desc.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 53 | #include <asm/pgtable.h> |
Prasanna S Panchamukhi | c28f896 | 2006-03-26 01:38:23 -0800 | [diff] [blame] | 54 | #include <asm/uaccess.h> |
Andi Kleen | 19d36cc | 2007-07-22 11:12:31 +0200 | [diff] [blame] | 55 | #include <asm/alternative.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 56 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 57 | void jprobe_return_end(void); |
| 58 | |
Ananth N Mavinakayanahalli | e7a510f | 2005-11-07 01:00:12 -0800 | [diff] [blame] | 59 | DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; |
| 60 | DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 61 | |
Masami Hiramatsu | d6be29b | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 62 | #ifdef CONFIG_X86_64 |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 63 | #define stack_addr(regs) ((unsigned long *)regs->sp) |
Masami Hiramatsu | d6be29b | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 64 | #else |
| 65 | /* |
| 66 | * "®s->sp" looks wrong, but it's correct for x86_32. x86_32 CPUs |
| 67 | * don't save the ss and esp registers if the CPU is already in kernel |
| 68 | * mode when it traps. So for kprobes, regs->sp and regs->ss are not |
| 69 | * the [nonexistent] saved stack pointer and ss register, but rather |
| 70 | * the top 8 bytes of the pre-int3 stack. So ®s->sp happens to |
| 71 | * point to the top of the pre-int3 stack. |
| 72 | */ |
| 73 | #define stack_addr(regs) ((unsigned long *)®s->sp) |
| 74 | #endif |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 75 | |
| 76 | #define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\ |
| 77 | (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \ |
| 78 | (b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) | \ |
| 79 | (b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) | \ |
| 80 | (bc##UL << 0xc)|(bd##UL << 0xd)|(be##UL << 0xe)|(bf##UL << 0xf)) \ |
| 81 | << (row % 32)) |
| 82 | /* |
| 83 | * Undefined/reserved opcodes, conditional jump, Opcode Extension |
| 84 | * Groups, and some special opcodes can not boost. |
| 85 | */ |
| 86 | static const u32 twobyte_is_boostable[256 / 32] = { |
| 87 | /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ |
| 88 | /* ---------------------------------------------- */ |
| 89 | W(0x00, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0) | /* 00 */ |
| 90 | W(0x10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 10 */ |
| 91 | W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 20 */ |
| 92 | W(0x30, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 30 */ |
| 93 | W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */ |
| 94 | W(0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 50 */ |
| 95 | W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1) | /* 60 */ |
| 96 | W(0x70, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1) , /* 70 */ |
| 97 | W(0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 80 */ |
| 98 | W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */ |
| 99 | W(0xa0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) | /* a0 */ |
| 100 | W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1) , /* b0 */ |
| 101 | W(0xc0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* c0 */ |
| 102 | W(0xd0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) , /* d0 */ |
| 103 | W(0xe0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) | /* e0 */ |
| 104 | W(0xf0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0) /* f0 */ |
| 105 | /* ----------------------------------------------- */ |
| 106 | /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ |
| 107 | }; |
| 108 | static const u32 onebyte_has_modrm[256 / 32] = { |
| 109 | /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ |
| 110 | /* ----------------------------------------------- */ |
| 111 | W(0x00, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* 00 */ |
| 112 | W(0x10, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) , /* 10 */ |
| 113 | W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* 20 */ |
| 114 | W(0x30, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) , /* 30 */ |
| 115 | W(0x40, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 40 */ |
| 116 | W(0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 50 */ |
| 117 | W(0x60, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0) | /* 60 */ |
| 118 | W(0x70, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 70 */ |
| 119 | W(0x80, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */ |
| 120 | W(0x90, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 90 */ |
| 121 | W(0xa0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* a0 */ |
| 122 | W(0xb0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* b0 */ |
| 123 | W(0xc0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0) | /* c0 */ |
| 124 | W(0xd0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */ |
| 125 | W(0xe0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* e0 */ |
| 126 | W(0xf0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1) /* f0 */ |
| 127 | /* ----------------------------------------------- */ |
| 128 | /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ |
| 129 | }; |
| 130 | static const u32 twobyte_has_modrm[256 / 32] = { |
| 131 | /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ |
| 132 | /* ----------------------------------------------- */ |
| 133 | W(0x00, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1) | /* 0f */ |
| 134 | W(0x10, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0) , /* 1f */ |
| 135 | W(0x20, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* 2f */ |
| 136 | W(0x30, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 3f */ |
| 137 | W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 4f */ |
| 138 | W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 5f */ |
| 139 | W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 6f */ |
| 140 | W(0x70, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1) , /* 7f */ |
| 141 | W(0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 8f */ |
| 142 | W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 9f */ |
| 143 | W(0xa0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1) | /* af */ |
| 144 | W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1) , /* bf */ |
| 145 | W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0) | /* cf */ |
| 146 | W(0xd0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* df */ |
| 147 | W(0xe0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* ef */ |
| 148 | W(0xf0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0) /* ff */ |
| 149 | /* ----------------------------------------------- */ |
| 150 | /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ |
| 151 | }; |
| 152 | #undef W |
| 153 | |
Masami Hiramatsu | f438d91 | 2007-10-16 01:27:49 -0700 | [diff] [blame] | 154 | struct kretprobe_blackpoint kretprobe_blacklist[] = { |
| 155 | {"__switch_to", }, /* This function switches only current task, but |
| 156 | doesn't switch kernel stack.*/ |
| 157 | {NULL, NULL} /* Terminator */ |
| 158 | }; |
| 159 | const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist); |
| 160 | |
Masami Hiramatsu | aa47014 | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 161 | /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/ |
Harvey Harrison | e7b5e11 | 2008-01-30 13:31:43 +0100 | [diff] [blame] | 162 | static void __kprobes set_jmp_op(void *from, void *to) |
Masami Hiramatsu | aa47014 | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 163 | { |
| 164 | struct __arch_jmp_op { |
| 165 | char op; |
| 166 | s32 raddr; |
| 167 | } __attribute__((packed)) * jop; |
| 168 | jop = (struct __arch_jmp_op *)from; |
| 169 | jop->raddr = (s32)((long)(to) - ((long)(from) + 5)); |
| 170 | jop->op = RELATIVEJUMP_INSTRUCTION; |
| 171 | } |
| 172 | |
| 173 | /* |
Masami Hiramatsu | d6be29b | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 174 | * Returns non-zero if opcode is boostable. |
| 175 | * RIP relative instructions are adjusted at copying time in 64 bits mode |
Masami Hiramatsu | aa47014 | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 176 | */ |
Harvey Harrison | e7b5e11 | 2008-01-30 13:31:43 +0100 | [diff] [blame] | 177 | static int __kprobes can_boost(kprobe_opcode_t *opcodes) |
Masami Hiramatsu | aa47014 | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 178 | { |
Masami Hiramatsu | aa47014 | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 179 | kprobe_opcode_t opcode; |
| 180 | kprobe_opcode_t *orig_opcodes = opcodes; |
| 181 | |
| 182 | retry: |
| 183 | if (opcodes - orig_opcodes > MAX_INSN_SIZE - 1) |
| 184 | return 0; |
| 185 | opcode = *(opcodes++); |
| 186 | |
| 187 | /* 2nd-byte opcode */ |
| 188 | if (opcode == 0x0f) { |
| 189 | if (opcodes - orig_opcodes > MAX_INSN_SIZE - 1) |
| 190 | return 0; |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 191 | return test_bit(*opcodes, |
| 192 | (unsigned long *)twobyte_is_boostable); |
Masami Hiramatsu | aa47014 | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 193 | } |
| 194 | |
| 195 | switch (opcode & 0xf0) { |
Masami Hiramatsu | d6be29b | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 196 | #ifdef CONFIG_X86_64 |
Masami Hiramatsu | aa47014 | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 197 | case 0x40: |
| 198 | goto retry; /* REX prefix is boostable */ |
Masami Hiramatsu | d6be29b | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 199 | #endif |
Masami Hiramatsu | aa47014 | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 200 | case 0x60: |
| 201 | if (0x63 < opcode && opcode < 0x67) |
| 202 | goto retry; /* prefixes */ |
| 203 | /* can't boost Address-size override and bound */ |
| 204 | return (opcode != 0x62 && opcode != 0x67); |
| 205 | case 0x70: |
| 206 | return 0; /* can't boost conditional jump */ |
| 207 | case 0xc0: |
| 208 | /* can't boost software-interruptions */ |
| 209 | return (0xc1 < opcode && opcode < 0xcc) || opcode == 0xcf; |
| 210 | case 0xd0: |
| 211 | /* can boost AA* and XLAT */ |
| 212 | return (opcode == 0xd4 || opcode == 0xd5 || opcode == 0xd7); |
| 213 | case 0xe0: |
| 214 | /* can boost in/out and absolute jmps */ |
| 215 | return ((opcode & 0x04) || opcode == 0xea); |
| 216 | case 0xf0: |
| 217 | if ((opcode & 0x0c) == 0 && opcode != 0xf1) |
| 218 | goto retry; /* lock/rep(ne) prefix */ |
| 219 | /* clear and set flags are boostable */ |
| 220 | return (opcode == 0xf5 || (0xf7 < opcode && opcode < 0xfe)); |
| 221 | default: |
| 222 | /* segment override prefixes are boostable */ |
| 223 | if (opcode == 0x26 || opcode == 0x36 || opcode == 0x3e) |
| 224 | goto retry; /* prefixes */ |
| 225 | /* CS override prefix and call are not boostable */ |
| 226 | return (opcode != 0x2e && opcode != 0x9a); |
| 227 | } |
| 228 | } |
| 229 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 230 | /* |
Masami Hiramatsu | d6be29b | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 231 | * Returns non-zero if opcode modifies the interrupt flag. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 232 | */ |
Andrew Morton | 8645419 | 2007-11-26 20:42:19 +0100 | [diff] [blame] | 233 | static int __kprobes is_IF_modifier(kprobe_opcode_t *insn) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 234 | { |
| 235 | switch (*insn) { |
| 236 | case 0xfa: /* cli */ |
| 237 | case 0xfb: /* sti */ |
| 238 | case 0xcf: /* iret/iretd */ |
| 239 | case 0x9d: /* popf/popfd */ |
| 240 | return 1; |
| 241 | } |
Masami Hiramatsu | d6be29b | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 242 | #ifdef CONFIG_X86_64 |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 243 | /* |
| 244 | * on 64 bit x86, 0x40-0x4f are prefixes so we need to look |
| 245 | * at the next byte instead.. but of course not recurse infinitely |
| 246 | */ |
| 247 | if (*insn >= 0x40 && *insn <= 0x4f) |
| 248 | return is_IF_modifier(++insn); |
Masami Hiramatsu | d6be29b | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 249 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 250 | return 0; |
| 251 | } |
| 252 | |
Masami Hiramatsu | d6be29b | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 253 | #ifdef CONFIG_X86_64 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 254 | /* |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 255 | * Adjust the displacement if the instruction uses the %rip-relative |
| 256 | * addressing mode. |
Masami Hiramatsu | aa47014 | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 257 | * If it does, Return the address of the 32-bit displacement word. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 258 | * If not, return null. |
| 259 | */ |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 260 | static void __kprobes fix_riprel(struct kprobe *p) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 261 | { |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 262 | u8 *insn = p->ainsn.insn; |
| 263 | s64 disp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 264 | int need_modrm; |
| 265 | |
| 266 | /* Skip legacy instruction prefixes. */ |
| 267 | while (1) { |
| 268 | switch (*insn) { |
| 269 | case 0x66: |
| 270 | case 0x67: |
| 271 | case 0x2e: |
| 272 | case 0x3e: |
| 273 | case 0x26: |
| 274 | case 0x64: |
| 275 | case 0x65: |
| 276 | case 0x36: |
| 277 | case 0xf0: |
| 278 | case 0xf3: |
| 279 | case 0xf2: |
| 280 | ++insn; |
| 281 | continue; |
| 282 | } |
| 283 | break; |
| 284 | } |
| 285 | |
| 286 | /* Skip REX instruction prefix. */ |
| 287 | if ((*insn & 0xf0) == 0x40) |
| 288 | ++insn; |
| 289 | |
Masami Hiramatsu | d6be29b | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 290 | if (*insn == 0x0f) { |
| 291 | /* Two-byte opcode. */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 292 | ++insn; |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 293 | need_modrm = test_bit(*insn, |
| 294 | (unsigned long *)twobyte_has_modrm); |
Masami Hiramatsu | d6be29b | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 295 | } else |
| 296 | /* One-byte opcode. */ |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 297 | need_modrm = test_bit(*insn, |
| 298 | (unsigned long *)onebyte_has_modrm); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 299 | |
| 300 | if (need_modrm) { |
| 301 | u8 modrm = *++insn; |
Masami Hiramatsu | d6be29b | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 302 | if ((modrm & 0xc7) == 0x05) { |
| 303 | /* %rip+disp32 addressing mode */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 304 | /* Displacement follows ModRM byte. */ |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 305 | ++insn; |
| 306 | /* |
| 307 | * The copied instruction uses the %rip-relative |
| 308 | * addressing mode. Adjust the displacement for the |
| 309 | * difference between the original location of this |
| 310 | * instruction and the location of the copy that will |
| 311 | * actually be run. The tricky bit here is making sure |
| 312 | * that the sign extension happens correctly in this |
| 313 | * calculation, since we need a signed 32-bit result to |
| 314 | * be sign-extended to 64 bits when it's added to the |
| 315 | * %rip value and yield the same 64-bit result that the |
| 316 | * sign-extension of the original signed 32-bit |
| 317 | * displacement would have given. |
| 318 | */ |
| 319 | disp = (u8 *) p->addr + *((s32 *) insn) - |
| 320 | (u8 *) p->ainsn.insn; |
| 321 | BUG_ON((s64) (s32) disp != disp); /* Sanity check. */ |
| 322 | *(s32 *)insn = (s32) disp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 323 | } |
| 324 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 325 | } |
Masami Hiramatsu | d6be29b | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 326 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 327 | |
Keshavamurthy Anil S | f709b12 | 2006-01-09 20:52:44 -0800 | [diff] [blame] | 328 | static void __kprobes arch_copy_kprobe(struct kprobe *p) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 329 | { |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 330 | memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); |
Masami Hiramatsu | d6be29b | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 331 | #ifdef CONFIG_X86_64 |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 332 | fix_riprel(p); |
Masami Hiramatsu | d6be29b | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 333 | #endif |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 334 | if (can_boost(p->addr)) |
Masami Hiramatsu | aa47014 | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 335 | p->ainsn.boostable = 0; |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 336 | else |
Masami Hiramatsu | aa47014 | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 337 | p->ainsn.boostable = -1; |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 338 | |
Rusty Lynch | 7e1048b | 2005-06-23 00:09:25 -0700 | [diff] [blame] | 339 | p->opcode = *p->addr; |
| 340 | } |
| 341 | |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 342 | int __kprobes arch_prepare_kprobe(struct kprobe *p) |
| 343 | { |
| 344 | /* insn: must be on special executable page on x86. */ |
| 345 | p->ainsn.insn = get_insn_slot(); |
| 346 | if (!p->ainsn.insn) |
| 347 | return -ENOMEM; |
| 348 | arch_copy_kprobe(p); |
| 349 | return 0; |
| 350 | } |
| 351 | |
Prasanna S Panchamukhi | 0f2fbdc | 2005-09-06 15:19:28 -0700 | [diff] [blame] | 352 | void __kprobes arch_arm_kprobe(struct kprobe *p) |
Rusty Lynch | 7e1048b | 2005-06-23 00:09:25 -0700 | [diff] [blame] | 353 | { |
Andi Kleen | 19d36cc | 2007-07-22 11:12:31 +0200 | [diff] [blame] | 354 | text_poke(p->addr, ((unsigned char []){BREAKPOINT_INSTRUCTION}), 1); |
Rusty Lynch | 7e1048b | 2005-06-23 00:09:25 -0700 | [diff] [blame] | 355 | } |
| 356 | |
Prasanna S Panchamukhi | 0f2fbdc | 2005-09-06 15:19:28 -0700 | [diff] [blame] | 357 | void __kprobes arch_disarm_kprobe(struct kprobe *p) |
Rusty Lynch | 7e1048b | 2005-06-23 00:09:25 -0700 | [diff] [blame] | 358 | { |
Andi Kleen | 19d36cc | 2007-07-22 11:12:31 +0200 | [diff] [blame] | 359 | text_poke(p->addr, &p->opcode, 1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 360 | } |
| 361 | |
Ananth N Mavinakayanahalli | 0498b63 | 2006-01-09 20:52:46 -0800 | [diff] [blame] | 362 | void __kprobes arch_remove_kprobe(struct kprobe *p) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 363 | { |
Ingo Molnar | 7a7d1cf | 2006-03-23 03:00:35 -0800 | [diff] [blame] | 364 | mutex_lock(&kprobe_mutex); |
Masami Hiramatsu | aa47014 | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 365 | free_insn_slot(p->ainsn.insn, (p->ainsn.boostable == 1)); |
Ingo Molnar | 7a7d1cf | 2006-03-23 03:00:35 -0800 | [diff] [blame] | 366 | mutex_unlock(&kprobe_mutex); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 367 | } |
| 368 | |
Prasanna S Panchamukhi | 3b60211 | 2006-04-18 22:22:00 -0700 | [diff] [blame] | 369 | static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) |
Prasanna S Panchamukhi | aa3d7e3 | 2005-06-23 00:09:37 -0700 | [diff] [blame] | 370 | { |
Ananth N Mavinakayanahalli | e7a510f | 2005-11-07 01:00:12 -0800 | [diff] [blame] | 371 | kcb->prev_kprobe.kp = kprobe_running(); |
| 372 | kcb->prev_kprobe.status = kcb->kprobe_status; |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 373 | kcb->prev_kprobe.old_flags = kcb->kprobe_old_flags; |
| 374 | kcb->prev_kprobe.saved_flags = kcb->kprobe_saved_flags; |
Prasanna S Panchamukhi | aa3d7e3 | 2005-06-23 00:09:37 -0700 | [diff] [blame] | 375 | } |
| 376 | |
Prasanna S Panchamukhi | 3b60211 | 2006-04-18 22:22:00 -0700 | [diff] [blame] | 377 | static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) |
Prasanna S Panchamukhi | aa3d7e3 | 2005-06-23 00:09:37 -0700 | [diff] [blame] | 378 | { |
Ananth N Mavinakayanahalli | e7a510f | 2005-11-07 01:00:12 -0800 | [diff] [blame] | 379 | __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp; |
| 380 | kcb->kprobe_status = kcb->prev_kprobe.status; |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 381 | kcb->kprobe_old_flags = kcb->prev_kprobe.old_flags; |
| 382 | kcb->kprobe_saved_flags = kcb->prev_kprobe.saved_flags; |
Prasanna S Panchamukhi | aa3d7e3 | 2005-06-23 00:09:37 -0700 | [diff] [blame] | 383 | } |
| 384 | |
Prasanna S Panchamukhi | 3b60211 | 2006-04-18 22:22:00 -0700 | [diff] [blame] | 385 | static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, |
Ananth N Mavinakayanahalli | e7a510f | 2005-11-07 01:00:12 -0800 | [diff] [blame] | 386 | struct kprobe_ctlblk *kcb) |
Prasanna S Panchamukhi | aa3d7e3 | 2005-06-23 00:09:37 -0700 | [diff] [blame] | 387 | { |
Ananth N Mavinakayanahalli | e7a510f | 2005-11-07 01:00:12 -0800 | [diff] [blame] | 388 | __get_cpu_var(current_kprobe) = p; |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 389 | kcb->kprobe_saved_flags = kcb->kprobe_old_flags |
Glauber de Oliveira Costa | 053de04 | 2008-01-30 13:31:27 +0100 | [diff] [blame] | 390 | = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF)); |
Prasanna S Panchamukhi | aa3d7e3 | 2005-06-23 00:09:37 -0700 | [diff] [blame] | 391 | if (is_IF_modifier(p->ainsn.insn)) |
Glauber de Oliveira Costa | 053de04 | 2008-01-30 13:31:27 +0100 | [diff] [blame] | 392 | kcb->kprobe_saved_flags &= ~X86_EFLAGS_IF; |
Prasanna S Panchamukhi | aa3d7e3 | 2005-06-23 00:09:37 -0700 | [diff] [blame] | 393 | } |
| 394 | |
Harvey Harrison | e7b5e11 | 2008-01-30 13:31:43 +0100 | [diff] [blame] | 395 | static void __kprobes clear_btf(void) |
Roland McGrath | 1ecc798 | 2008-01-30 13:30:54 +0100 | [diff] [blame] | 396 | { |
| 397 | if (test_thread_flag(TIF_DEBUGCTLMSR)) |
Masami Hiramatsu | d6be29b | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 398 | wrmsr(MSR_IA32_DEBUGCTLMSR, 0, 0); |
Roland McGrath | 1ecc798 | 2008-01-30 13:30:54 +0100 | [diff] [blame] | 399 | } |
| 400 | |
Harvey Harrison | e7b5e11 | 2008-01-30 13:31:43 +0100 | [diff] [blame] | 401 | static void __kprobes restore_btf(void) |
Roland McGrath | 1ecc798 | 2008-01-30 13:30:54 +0100 | [diff] [blame] | 402 | { |
| 403 | if (test_thread_flag(TIF_DEBUGCTLMSR)) |
Masami Hiramatsu | d6be29b | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 404 | wrmsr(MSR_IA32_DEBUGCTLMSR, current->thread.debugctlmsr, 0); |
Roland McGrath | 1ecc798 | 2008-01-30 13:30:54 +0100 | [diff] [blame] | 405 | } |
| 406 | |
Prasanna S Panchamukhi | 0f2fbdc | 2005-09-06 15:19:28 -0700 | [diff] [blame] | 407 | static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 408 | { |
Roland McGrath | 1ecc798 | 2008-01-30 13:30:54 +0100 | [diff] [blame] | 409 | clear_btf(); |
Glauber de Oliveira Costa | 053de04 | 2008-01-30 13:31:27 +0100 | [diff] [blame] | 410 | regs->flags |= X86_EFLAGS_TF; |
| 411 | regs->flags &= ~X86_EFLAGS_IF; |
Harvey Harrison | e7b5e11 | 2008-01-30 13:31:43 +0100 | [diff] [blame] | 412 | /* single step inline if the instruction is an int3 */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 413 | if (p->opcode == BREAKPOINT_INSTRUCTION) |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 414 | regs->ip = (unsigned long)p->addr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 415 | else |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 416 | regs->ip = (unsigned long)p->ainsn.insn; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 417 | } |
| 418 | |
Ananth N Mavinakayanahalli | 991a51d | 2005-11-07 01:00:14 -0800 | [diff] [blame] | 419 | /* Called with kretprobe_lock held */ |
Christoph Hellwig | 4c4308c | 2007-05-08 00:34:14 -0700 | [diff] [blame] | 420 | void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, |
Prasanna S Panchamukhi | 0f2fbdc | 2005-09-06 15:19:28 -0700 | [diff] [blame] | 421 | struct pt_regs *regs) |
Rusty Lynch | 73649da | 2005-06-23 00:09:23 -0700 | [diff] [blame] | 422 | { |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 423 | unsigned long *sara = stack_addr(regs); |
Rusty Lynch | 73649da | 2005-06-23 00:09:23 -0700 | [diff] [blame] | 424 | |
Christoph Hellwig | 4c4308c | 2007-05-08 00:34:14 -0700 | [diff] [blame] | 425 | ri->ret_addr = (kprobe_opcode_t *) *sara; |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 426 | |
Christoph Hellwig | 4c4308c | 2007-05-08 00:34:14 -0700 | [diff] [blame] | 427 | /* Replace the return addr with trampoline addr */ |
| 428 | *sara = (unsigned long) &kretprobe_trampoline; |
Rusty Lynch | 73649da | 2005-06-23 00:09:23 -0700 | [diff] [blame] | 429 | } |
Harvey Harrison | 40102d4 | 2008-01-30 13:32:02 +0100 | [diff] [blame^] | 430 | /* |
| 431 | * We have reentered the kprobe_handler(), since another probe was hit while |
| 432 | * within the handler. We save the original kprobes variables and just single |
| 433 | * step on the instruction of the new probe without calling any user handlers. |
| 434 | */ |
| 435 | static void __kprobes reenter_kprobe(struct kprobe *p, struct pt_regs *regs, |
| 436 | struct kprobe_ctlblk *kcb) |
| 437 | { |
| 438 | save_previous_kprobe(kcb); |
| 439 | set_current_kprobe(p, regs, kcb); |
| 440 | kprobes_inc_nmissed_count(p); |
| 441 | prepare_singlestep(p, regs); |
| 442 | kcb->kprobe_status = KPROBE_REENTER; |
| 443 | } |
Rusty Lynch | 73649da | 2005-06-23 00:09:23 -0700 | [diff] [blame] | 444 | |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 445 | /* |
| 446 | * Interrupts are disabled on entry as trap3 is an interrupt gate and they |
| 447 | * remain disabled thorough out this function. |
| 448 | */ |
| 449 | static int __kprobes kprobe_handler(struct pt_regs *regs) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 450 | { |
| 451 | struct kprobe *p; |
| 452 | int ret = 0; |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 453 | kprobe_opcode_t *addr; |
Ananth N Mavinakayanahalli | d217d54 | 2005-11-07 01:00:14 -0800 | [diff] [blame] | 454 | struct kprobe_ctlblk *kcb; |
| 455 | |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 456 | addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t)); |
| 457 | |
Ananth N Mavinakayanahalli | d217d54 | 2005-11-07 01:00:14 -0800 | [diff] [blame] | 458 | /* |
| 459 | * We don't want to be preempted for the entire |
| 460 | * duration of kprobe processing |
| 461 | */ |
| 462 | preempt_disable(); |
| 463 | kcb = get_kprobe_ctlblk(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 464 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 465 | /* Check we're not actually recursing */ |
| 466 | if (kprobe_running()) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 467 | p = get_kprobe(addr); |
| 468 | if (p) { |
Ananth N Mavinakayanahalli | e7a510f | 2005-11-07 01:00:12 -0800 | [diff] [blame] | 469 | if (kcb->kprobe_status == KPROBE_HIT_SS && |
Keshavamurthy Anil S | deac66a | 2005-09-06 15:19:35 -0700 | [diff] [blame] | 470 | *p->ainsn.insn == BREAKPOINT_INSTRUCTION) { |
Glauber de Oliveira Costa | 053de04 | 2008-01-30 13:31:27 +0100 | [diff] [blame] | 471 | regs->flags &= ~X86_EFLAGS_TF; |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 472 | regs->flags |= kcb->kprobe_saved_flags; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 473 | goto no_kprobe; |
Masami Hiramatsu | d6be29b | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 474 | #ifdef CONFIG_X86_64 |
Ananth N Mavinakayanahalli | e7a510f | 2005-11-07 01:00:12 -0800 | [diff] [blame] | 475 | } else if (kcb->kprobe_status == KPROBE_HIT_SSDONE) { |
Prasanna S Panchamukhi | aa3d7e3 | 2005-06-23 00:09:37 -0700 | [diff] [blame] | 476 | /* TODO: Provide re-entrancy from |
| 477 | * post_kprobes_handler() and avoid exception |
| 478 | * stack corruption while single-stepping on |
| 479 | * the instruction of the new probe. |
| 480 | */ |
| 481 | arch_disarm_kprobe(p); |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 482 | regs->ip = (unsigned long)p->addr; |
Ananth N Mavinakayanahalli | e7a510f | 2005-11-07 01:00:12 -0800 | [diff] [blame] | 483 | reset_current_kprobe(); |
Masami Hiramatsu | ddc66df | 2008-01-30 13:32:01 +0100 | [diff] [blame] | 484 | ret = 1; |
| 485 | goto no_kprobe; |
Masami Hiramatsu | d6be29b | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 486 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 487 | } |
Harvey Harrison | 40102d4 | 2008-01-30 13:32:02 +0100 | [diff] [blame^] | 488 | reenter_kprobe(p, regs, kcb); |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 489 | return 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 490 | } else { |
Keshavamurthy Anil S | eb3a729 | 2006-01-11 12:17:42 -0800 | [diff] [blame] | 491 | if (*addr != BREAKPOINT_INSTRUCTION) { |
| 492 | /* The breakpoint instruction was removed by |
| 493 | * another cpu right after we hit, no further |
| 494 | * handling of this interrupt is appropriate |
| 495 | */ |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 496 | regs->ip = (unsigned long)addr; |
Keshavamurthy Anil S | eb3a729 | 2006-01-11 12:17:42 -0800 | [diff] [blame] | 497 | ret = 1; |
| 498 | goto no_kprobe; |
| 499 | } |
Ananth N Mavinakayanahalli | e7a510f | 2005-11-07 01:00:12 -0800 | [diff] [blame] | 500 | p = __get_cpu_var(current_kprobe); |
Masami Hiramatsu | d6be29b | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 501 | if (p->break_handler && p->break_handler(p, regs)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 502 | goto ss_probe; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 503 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 504 | goto no_kprobe; |
| 505 | } |
| 506 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 507 | p = get_kprobe(addr); |
| 508 | if (!p) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 509 | if (*addr != BREAKPOINT_INSTRUCTION) { |
| 510 | /* |
| 511 | * The breakpoint instruction was removed right |
| 512 | * after we hit it. Another cpu has removed |
| 513 | * either a probepoint or a debugger breakpoint |
| 514 | * at this address. In either case, no further |
| 515 | * handling of this interrupt is appropriate. |
Jim Keniston | bce0649 | 2005-09-06 15:19:34 -0700 | [diff] [blame] | 516 | * Back up over the (now missing) int3 and run |
| 517 | * the original instruction. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 518 | */ |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 519 | regs->ip = (unsigned long)addr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 520 | ret = 1; |
| 521 | } |
| 522 | /* Not one of ours: let kernel handle it */ |
| 523 | goto no_kprobe; |
| 524 | } |
| 525 | |
Ananth N Mavinakayanahalli | e7a510f | 2005-11-07 01:00:12 -0800 | [diff] [blame] | 526 | set_current_kprobe(p, regs, kcb); |
| 527 | kcb->kprobe_status = KPROBE_HIT_ACTIVE; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 528 | |
| 529 | if (p->pre_handler && p->pre_handler(p, regs)) |
| 530 | /* handler has already set things up, so skip ss setup */ |
| 531 | return 1; |
| 532 | |
| 533 | ss_probe: |
Masami Hiramatsu | aa47014 | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 534 | #if !defined(CONFIG_PREEMPT) || defined(CONFIG_PM) |
| 535 | if (p->ainsn.boostable == 1 && !p->post_handler) { |
| 536 | /* Boost up -- we can execute copied instructions directly */ |
| 537 | reset_current_kprobe(); |
| 538 | regs->ip = (unsigned long)p->ainsn.insn; |
| 539 | preempt_enable_no_resched(); |
| 540 | return 1; |
| 541 | } |
| 542 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 543 | prepare_singlestep(p, regs); |
Ananth N Mavinakayanahalli | e7a510f | 2005-11-07 01:00:12 -0800 | [diff] [blame] | 544 | kcb->kprobe_status = KPROBE_HIT_SS; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 545 | return 1; |
| 546 | |
| 547 | no_kprobe: |
Ananth N Mavinakayanahalli | d217d54 | 2005-11-07 01:00:14 -0800 | [diff] [blame] | 548 | preempt_enable_no_resched(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 549 | return ret; |
| 550 | } |
| 551 | |
| 552 | /* |
Masami Hiramatsu | da07ab0 | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 553 | * When a retprobed function returns, this code saves registers and |
| 554 | * calls trampoline_handler() runs, which calls the kretprobe's handler. |
Rusty Lynch | 73649da | 2005-06-23 00:09:23 -0700 | [diff] [blame] | 555 | */ |
Masami Hiramatsu | da07ab0 | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 556 | void __kprobes kretprobe_trampoline_holder(void) |
Rusty Lynch | 73649da | 2005-06-23 00:09:23 -0700 | [diff] [blame] | 557 | { |
Masami Hiramatsu | d6be29b | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 558 | asm volatile ( |
| 559 | ".global kretprobe_trampoline\n" |
Masami Hiramatsu | da07ab0 | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 560 | "kretprobe_trampoline: \n" |
Masami Hiramatsu | d6be29b | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 561 | #ifdef CONFIG_X86_64 |
Masami Hiramatsu | da07ab0 | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 562 | /* We don't bother saving the ss register */ |
| 563 | " pushq %rsp\n" |
| 564 | " pushfq\n" |
| 565 | /* |
| 566 | * Skip cs, ip, orig_ax. |
| 567 | * trampoline_handler() will plug in these values |
| 568 | */ |
| 569 | " subq $24, %rsp\n" |
| 570 | " pushq %rdi\n" |
| 571 | " pushq %rsi\n" |
| 572 | " pushq %rdx\n" |
| 573 | " pushq %rcx\n" |
| 574 | " pushq %rax\n" |
| 575 | " pushq %r8\n" |
| 576 | " pushq %r9\n" |
| 577 | " pushq %r10\n" |
| 578 | " pushq %r11\n" |
| 579 | " pushq %rbx\n" |
| 580 | " pushq %rbp\n" |
| 581 | " pushq %r12\n" |
| 582 | " pushq %r13\n" |
| 583 | " pushq %r14\n" |
| 584 | " pushq %r15\n" |
| 585 | " movq %rsp, %rdi\n" |
| 586 | " call trampoline_handler\n" |
| 587 | /* Replace saved sp with true return address. */ |
| 588 | " movq %rax, 152(%rsp)\n" |
| 589 | " popq %r15\n" |
| 590 | " popq %r14\n" |
| 591 | " popq %r13\n" |
| 592 | " popq %r12\n" |
| 593 | " popq %rbp\n" |
| 594 | " popq %rbx\n" |
| 595 | " popq %r11\n" |
| 596 | " popq %r10\n" |
| 597 | " popq %r9\n" |
| 598 | " popq %r8\n" |
| 599 | " popq %rax\n" |
| 600 | " popq %rcx\n" |
| 601 | " popq %rdx\n" |
| 602 | " popq %rsi\n" |
| 603 | " popq %rdi\n" |
| 604 | /* Skip orig_ax, ip, cs */ |
| 605 | " addq $24, %rsp\n" |
| 606 | " popfq\n" |
Masami Hiramatsu | d6be29b | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 607 | #else |
| 608 | " pushf\n" |
| 609 | /* |
| 610 | * Skip cs, ip, orig_ax. |
| 611 | * trampoline_handler() will plug in these values |
| 612 | */ |
| 613 | " subl $12, %esp\n" |
| 614 | " pushl %fs\n" |
| 615 | " pushl %ds\n" |
| 616 | " pushl %es\n" |
| 617 | " pushl %eax\n" |
| 618 | " pushl %ebp\n" |
| 619 | " pushl %edi\n" |
| 620 | " pushl %esi\n" |
| 621 | " pushl %edx\n" |
| 622 | " pushl %ecx\n" |
| 623 | " pushl %ebx\n" |
| 624 | " movl %esp, %eax\n" |
| 625 | " call trampoline_handler\n" |
| 626 | /* Move flags to cs */ |
| 627 | " movl 52(%esp), %edx\n" |
| 628 | " movl %edx, 48(%esp)\n" |
| 629 | /* Replace saved flags with true return address. */ |
| 630 | " movl %eax, 52(%esp)\n" |
| 631 | " popl %ebx\n" |
| 632 | " popl %ecx\n" |
| 633 | " popl %edx\n" |
| 634 | " popl %esi\n" |
| 635 | " popl %edi\n" |
| 636 | " popl %ebp\n" |
| 637 | " popl %eax\n" |
| 638 | /* Skip ip, orig_ax, es, ds, fs */ |
| 639 | " addl $20, %esp\n" |
| 640 | " popf\n" |
| 641 | #endif |
Masami Hiramatsu | da07ab0 | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 642 | " ret\n"); |
Rusty Lynch | 73649da | 2005-06-23 00:09:23 -0700 | [diff] [blame] | 643 | } |
| 644 | |
| 645 | /* |
Masami Hiramatsu | da07ab0 | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 646 | * Called from kretprobe_trampoline |
Rusty Lynch | 73649da | 2005-06-23 00:09:23 -0700 | [diff] [blame] | 647 | */ |
Masami Hiramatsu | d6be29b | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 648 | void * __kprobes trampoline_handler(struct pt_regs *regs) |
Rusty Lynch | 73649da | 2005-06-23 00:09:23 -0700 | [diff] [blame] | 649 | { |
bibo,mao | 62c27be | 2006-10-02 02:17:33 -0700 | [diff] [blame] | 650 | struct kretprobe_instance *ri = NULL; |
bibo,mao | 99219a3 | 2006-10-02 02:17:35 -0700 | [diff] [blame] | 651 | struct hlist_head *head, empty_rp; |
bibo,mao | 62c27be | 2006-10-02 02:17:33 -0700 | [diff] [blame] | 652 | struct hlist_node *node, *tmp; |
Ananth N Mavinakayanahalli | 991a51d | 2005-11-07 01:00:14 -0800 | [diff] [blame] | 653 | unsigned long flags, orig_ret_address = 0; |
Masami Hiramatsu | d6be29b | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 654 | unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline; |
Rusty Lynch | 73649da | 2005-06-23 00:09:23 -0700 | [diff] [blame] | 655 | |
bibo,mao | 99219a3 | 2006-10-02 02:17:35 -0700 | [diff] [blame] | 656 | INIT_HLIST_HEAD(&empty_rp); |
Ananth N Mavinakayanahalli | 991a51d | 2005-11-07 01:00:14 -0800 | [diff] [blame] | 657 | spin_lock_irqsave(&kretprobe_lock, flags); |
bibo,mao | 62c27be | 2006-10-02 02:17:33 -0700 | [diff] [blame] | 658 | head = kretprobe_inst_table_head(current); |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 659 | /* fixup registers */ |
Masami Hiramatsu | d6be29b | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 660 | #ifdef CONFIG_X86_64 |
Masami Hiramatsu | da07ab0 | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 661 | regs->cs = __KERNEL_CS; |
Masami Hiramatsu | d6be29b | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 662 | #else |
| 663 | regs->cs = __KERNEL_CS | get_kernel_rpl(); |
| 664 | #endif |
Masami Hiramatsu | da07ab0 | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 665 | regs->ip = trampoline_address; |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 666 | regs->orig_ax = ~0UL; |
Rusty Lynch | 73649da | 2005-06-23 00:09:23 -0700 | [diff] [blame] | 667 | |
Rusty Lynch | ba8af12 | 2005-06-27 15:17:10 -0700 | [diff] [blame] | 668 | /* |
| 669 | * It is possible to have multiple instances associated with a given |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 670 | * task either because multiple functions in the call path have |
| 671 | * return probes installed on them, and/or more then one |
Rusty Lynch | ba8af12 | 2005-06-27 15:17:10 -0700 | [diff] [blame] | 672 | * return probe was registered for a target function. |
| 673 | * |
| 674 | * We can handle this because: |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 675 | * - instances are always pushed into the head of the list |
Rusty Lynch | ba8af12 | 2005-06-27 15:17:10 -0700 | [diff] [blame] | 676 | * - when multiple return probes are registered for the same |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 677 | * function, the (chronologically) first instance's ret_addr |
| 678 | * will be the real return address, and all the rest will |
| 679 | * point to kretprobe_trampoline. |
Rusty Lynch | ba8af12 | 2005-06-27 15:17:10 -0700 | [diff] [blame] | 680 | */ |
| 681 | hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { |
bibo,mao | 62c27be | 2006-10-02 02:17:33 -0700 | [diff] [blame] | 682 | if (ri->task != current) |
Rusty Lynch | ba8af12 | 2005-06-27 15:17:10 -0700 | [diff] [blame] | 683 | /* another task is sharing our hash bucket */ |
bibo,mao | 62c27be | 2006-10-02 02:17:33 -0700 | [diff] [blame] | 684 | continue; |
Rusty Lynch | 73649da | 2005-06-23 00:09:23 -0700 | [diff] [blame] | 685 | |
Masami Hiramatsu | da07ab0 | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 686 | if (ri->rp && ri->rp->handler) { |
| 687 | __get_cpu_var(current_kprobe) = &ri->rp->kp; |
| 688 | get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE; |
Rusty Lynch | ba8af12 | 2005-06-27 15:17:10 -0700 | [diff] [blame] | 689 | ri->rp->handler(ri, regs); |
Masami Hiramatsu | da07ab0 | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 690 | __get_cpu_var(current_kprobe) = NULL; |
| 691 | } |
Rusty Lynch | 73649da | 2005-06-23 00:09:23 -0700 | [diff] [blame] | 692 | |
Rusty Lynch | ba8af12 | 2005-06-27 15:17:10 -0700 | [diff] [blame] | 693 | orig_ret_address = (unsigned long)ri->ret_addr; |
bibo,mao | 99219a3 | 2006-10-02 02:17:35 -0700 | [diff] [blame] | 694 | recycle_rp_inst(ri, &empty_rp); |
Rusty Lynch | ba8af12 | 2005-06-27 15:17:10 -0700 | [diff] [blame] | 695 | |
| 696 | if (orig_ret_address != trampoline_address) |
| 697 | /* |
| 698 | * This is the real return address. Any other |
| 699 | * instances associated with this task are for |
| 700 | * other calls deeper on the call stack |
| 701 | */ |
| 702 | break; |
Rusty Lynch | 73649da | 2005-06-23 00:09:23 -0700 | [diff] [blame] | 703 | } |
Rusty Lynch | ba8af12 | 2005-06-27 15:17:10 -0700 | [diff] [blame] | 704 | |
Ananth N Mavinakayanahalli | 0f95b7f | 2007-05-08 00:28:27 -0700 | [diff] [blame] | 705 | kretprobe_assert(ri, orig_ret_address, trampoline_address); |
Rusty Lynch | ba8af12 | 2005-06-27 15:17:10 -0700 | [diff] [blame] | 706 | |
Ananth N Mavinakayanahalli | 991a51d | 2005-11-07 01:00:14 -0800 | [diff] [blame] | 707 | spin_unlock_irqrestore(&kretprobe_lock, flags); |
Rusty Lynch | ba8af12 | 2005-06-27 15:17:10 -0700 | [diff] [blame] | 708 | |
bibo,mao | 99219a3 | 2006-10-02 02:17:35 -0700 | [diff] [blame] | 709 | hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { |
| 710 | hlist_del(&ri->hlist); |
| 711 | kfree(ri); |
| 712 | } |
Masami Hiramatsu | da07ab0 | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 713 | return (void *)orig_ret_address; |
Rusty Lynch | 73649da | 2005-06-23 00:09:23 -0700 | [diff] [blame] | 714 | } |
| 715 | |
| 716 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 717 | * Called after single-stepping. p->addr is the address of the |
| 718 | * instruction whose first byte has been replaced by the "int 3" |
| 719 | * instruction. To avoid the SMP problems that can occur when we |
| 720 | * temporarily put back the original opcode to single-step, we |
| 721 | * single-stepped a copy of the instruction. The address of this |
| 722 | * copy is p->ainsn.insn. |
| 723 | * |
| 724 | * This function prepares to return from the post-single-step |
| 725 | * interrupt. We have to fix up the stack as follows: |
| 726 | * |
| 727 | * 0) Except in the case of absolute or indirect jump or call instructions, |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 728 | * the new ip is relative to the copied instruction. We need to make |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 729 | * it relative to the original instruction. |
| 730 | * |
| 731 | * 1) If the single-stepped instruction was pushfl, then the TF and IF |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 732 | * flags are set in the just-pushed flags, and may need to be cleared. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 733 | * |
| 734 | * 2) If the single-stepped instruction was a call, the return address |
| 735 | * that is atop the stack is the address following the copied instruction. |
| 736 | * We need to make it the address following the original instruction. |
Masami Hiramatsu | aa47014 | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 737 | * |
| 738 | * If this is the first time we've single-stepped the instruction at |
| 739 | * this probepoint, and the instruction is boostable, boost it: add a |
| 740 | * jump instruction after the copied instruction, that jumps to the next |
| 741 | * instruction after the probepoint. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 742 | */ |
Ananth N Mavinakayanahalli | e7a510f | 2005-11-07 01:00:12 -0800 | [diff] [blame] | 743 | static void __kprobes resume_execution(struct kprobe *p, |
| 744 | struct pt_regs *regs, struct kprobe_ctlblk *kcb) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 745 | { |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 746 | unsigned long *tos = stack_addr(regs); |
| 747 | unsigned long copy_ip = (unsigned long)p->ainsn.insn; |
| 748 | unsigned long orig_ip = (unsigned long)p->addr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 749 | kprobe_opcode_t *insn = p->ainsn.insn; |
| 750 | |
Masami Hiramatsu | d6be29b | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 751 | #ifdef CONFIG_X86_64 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 752 | /*skip the REX prefix*/ |
| 753 | if (*insn >= 0x40 && *insn <= 0x4f) |
| 754 | insn++; |
Masami Hiramatsu | d6be29b | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 755 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 756 | |
Glauber de Oliveira Costa | 053de04 | 2008-01-30 13:31:27 +0100 | [diff] [blame] | 757 | regs->flags &= ~X86_EFLAGS_TF; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 758 | switch (*insn) { |
Masami Hiramatsu | 0b0122f | 2007-12-18 18:05:58 +0100 | [diff] [blame] | 759 | case 0x9c: /* pushfl */ |
Glauber de Oliveira Costa | 053de04 | 2008-01-30 13:31:27 +0100 | [diff] [blame] | 760 | *tos &= ~(X86_EFLAGS_TF | X86_EFLAGS_IF); |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 761 | *tos |= kcb->kprobe_old_flags; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 762 | break; |
Masami Hiramatsu | 0b0122f | 2007-12-18 18:05:58 +0100 | [diff] [blame] | 763 | case 0xc2: /* iret/ret/lret */ |
| 764 | case 0xc3: |
Prasanna S Panchamukhi | 0b9e2ca | 2005-05-05 16:15:40 -0700 | [diff] [blame] | 765 | case 0xca: |
Masami Hiramatsu | 0b0122f | 2007-12-18 18:05:58 +0100 | [diff] [blame] | 766 | case 0xcb: |
| 767 | case 0xcf: |
| 768 | case 0xea: /* jmp absolute -- ip is correct */ |
| 769 | /* ip is already adjusted, no more changes required */ |
Masami Hiramatsu | aa47014 | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 770 | p->ainsn.boostable = 1; |
Masami Hiramatsu | 0b0122f | 2007-12-18 18:05:58 +0100 | [diff] [blame] | 771 | goto no_change; |
| 772 | case 0xe8: /* call relative - Fix return addr */ |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 773 | *tos = orig_ip + (*tos - copy_ip); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 774 | break; |
Harvey Harrison | e7b5e11 | 2008-01-30 13:31:43 +0100 | [diff] [blame] | 775 | #ifdef CONFIG_X86_32 |
Masami Hiramatsu | d6be29b | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 776 | case 0x9a: /* call absolute -- same as call absolute, indirect */ |
| 777 | *tos = orig_ip + (*tos - copy_ip); |
| 778 | goto no_change; |
| 779 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 780 | case 0xff: |
Satoshi Oshima | dc49e34 | 2006-05-20 15:00:21 -0700 | [diff] [blame] | 781 | if ((insn[1] & 0x30) == 0x10) { |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 782 | /* |
| 783 | * call absolute, indirect |
| 784 | * Fix return addr; ip is correct. |
| 785 | * But this is not boostable |
| 786 | */ |
| 787 | *tos = orig_ip + (*tos - copy_ip); |
Masami Hiramatsu | 0b0122f | 2007-12-18 18:05:58 +0100 | [diff] [blame] | 788 | goto no_change; |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 789 | } else if (((insn[1] & 0x31) == 0x20) || |
| 790 | ((insn[1] & 0x31) == 0x21)) { |
| 791 | /* |
| 792 | * jmp near and far, absolute indirect |
| 793 | * ip is correct. And this is boostable |
| 794 | */ |
Masami Hiramatsu | aa47014 | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 795 | p->ainsn.boostable = 1; |
Masami Hiramatsu | 0b0122f | 2007-12-18 18:05:58 +0100 | [diff] [blame] | 796 | goto no_change; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 797 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 798 | default: |
| 799 | break; |
| 800 | } |
| 801 | |
Masami Hiramatsu | aa47014 | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 802 | if (p->ainsn.boostable == 0) { |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 803 | if ((regs->ip > copy_ip) && |
| 804 | (regs->ip - copy_ip) + 5 < MAX_INSN_SIZE) { |
Masami Hiramatsu | aa47014 | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 805 | /* |
| 806 | * These instructions can be executed directly if it |
| 807 | * jumps back to correct address. |
| 808 | */ |
| 809 | set_jmp_op((void *)regs->ip, |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 810 | (void *)orig_ip + (regs->ip - copy_ip)); |
Masami Hiramatsu | aa47014 | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 811 | p->ainsn.boostable = 1; |
| 812 | } else { |
| 813 | p->ainsn.boostable = -1; |
| 814 | } |
| 815 | } |
| 816 | |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 817 | regs->ip += orig_ip - copy_ip; |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 818 | |
Masami Hiramatsu | 0b0122f | 2007-12-18 18:05:58 +0100 | [diff] [blame] | 819 | no_change: |
Roland McGrath | 1ecc798 | 2008-01-30 13:30:54 +0100 | [diff] [blame] | 820 | restore_btf(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 821 | } |
| 822 | |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 823 | /* |
| 824 | * Interrupts are disabled on entry as trap1 is an interrupt gate and they |
| 825 | * remain disabled thoroughout this function. |
| 826 | */ |
| 827 | static int __kprobes post_kprobe_handler(struct pt_regs *regs) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 828 | { |
Ananth N Mavinakayanahalli | e7a510f | 2005-11-07 01:00:12 -0800 | [diff] [blame] | 829 | struct kprobe *cur = kprobe_running(); |
| 830 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
| 831 | |
| 832 | if (!cur) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 833 | return 0; |
| 834 | |
Ananth N Mavinakayanahalli | e7a510f | 2005-11-07 01:00:12 -0800 | [diff] [blame] | 835 | if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) { |
| 836 | kcb->kprobe_status = KPROBE_HIT_SSDONE; |
| 837 | cur->post_handler(cur, regs, 0); |
Prasanna S Panchamukhi | aa3d7e3 | 2005-06-23 00:09:37 -0700 | [diff] [blame] | 838 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 839 | |
Ananth N Mavinakayanahalli | e7a510f | 2005-11-07 01:00:12 -0800 | [diff] [blame] | 840 | resume_execution(cur, regs, kcb); |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 841 | regs->flags |= kcb->kprobe_saved_flags; |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 842 | trace_hardirqs_fixup_flags(regs->flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 843 | |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 844 | /* Restore back the original saved kprobes variables and continue. */ |
Ananth N Mavinakayanahalli | e7a510f | 2005-11-07 01:00:12 -0800 | [diff] [blame] | 845 | if (kcb->kprobe_status == KPROBE_REENTER) { |
| 846 | restore_previous_kprobe(kcb); |
Prasanna S Panchamukhi | aa3d7e3 | 2005-06-23 00:09:37 -0700 | [diff] [blame] | 847 | goto out; |
Prasanna S Panchamukhi | aa3d7e3 | 2005-06-23 00:09:37 -0700 | [diff] [blame] | 848 | } |
Ananth N Mavinakayanahalli | e7a510f | 2005-11-07 01:00:12 -0800 | [diff] [blame] | 849 | reset_current_kprobe(); |
Prasanna S Panchamukhi | aa3d7e3 | 2005-06-23 00:09:37 -0700 | [diff] [blame] | 850 | out: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 851 | preempt_enable_no_resched(); |
| 852 | |
| 853 | /* |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 854 | * if somebody else is singlestepping across a probe point, flags |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 855 | * will have TF set, in which case, continue the remaining processing |
| 856 | * of do_debug, as if this is not a probe hit. |
| 857 | */ |
Glauber de Oliveira Costa | 053de04 | 2008-01-30 13:31:27 +0100 | [diff] [blame] | 858 | if (regs->flags & X86_EFLAGS_TF) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 859 | return 0; |
| 860 | |
| 861 | return 1; |
| 862 | } |
| 863 | |
Prasanna S Panchamukhi | 0f2fbdc | 2005-09-06 15:19:28 -0700 | [diff] [blame] | 864 | int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 865 | { |
Ananth N Mavinakayanahalli | e7a510f | 2005-11-07 01:00:12 -0800 | [diff] [blame] | 866 | struct kprobe *cur = kprobe_running(); |
| 867 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
| 868 | |
Masami Hiramatsu | d6be29b | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 869 | switch (kcb->kprobe_status) { |
Prasanna S Panchamukhi | c28f896 | 2006-03-26 01:38:23 -0800 | [diff] [blame] | 870 | case KPROBE_HIT_SS: |
| 871 | case KPROBE_REENTER: |
| 872 | /* |
| 873 | * We are here because the instruction being single |
| 874 | * stepped caused a page fault. We reset the current |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 875 | * kprobe and the ip points back to the probe address |
Prasanna S Panchamukhi | c28f896 | 2006-03-26 01:38:23 -0800 | [diff] [blame] | 876 | * and allow the page fault handler to continue as a |
| 877 | * normal page fault. |
| 878 | */ |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 879 | regs->ip = (unsigned long)cur->addr; |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 880 | regs->flags |= kcb->kprobe_old_flags; |
Prasanna S Panchamukhi | c28f896 | 2006-03-26 01:38:23 -0800 | [diff] [blame] | 881 | if (kcb->kprobe_status == KPROBE_REENTER) |
| 882 | restore_previous_kprobe(kcb); |
| 883 | else |
| 884 | reset_current_kprobe(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 885 | preempt_enable_no_resched(); |
Prasanna S Panchamukhi | c28f896 | 2006-03-26 01:38:23 -0800 | [diff] [blame] | 886 | break; |
| 887 | case KPROBE_HIT_ACTIVE: |
| 888 | case KPROBE_HIT_SSDONE: |
| 889 | /* |
| 890 | * We increment the nmissed count for accounting, |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 891 | * we can also use npre/npostfault count for accounting |
Prasanna S Panchamukhi | c28f896 | 2006-03-26 01:38:23 -0800 | [diff] [blame] | 892 | * these specific fault cases. |
| 893 | */ |
| 894 | kprobes_inc_nmissed_count(cur); |
| 895 | |
| 896 | /* |
| 897 | * We come here because instructions in the pre/post |
| 898 | * handler caused the page_fault, this could happen |
| 899 | * if handler tries to access user space by |
| 900 | * copy_from_user(), get_user() etc. Let the |
| 901 | * user-specified handler try to fix it first. |
| 902 | */ |
| 903 | if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) |
| 904 | return 1; |
| 905 | |
| 906 | /* |
| 907 | * In case the user-specified fault handler returned |
| 908 | * zero, try to fix up. |
| 909 | */ |
Masami Hiramatsu | d6be29b | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 910 | if (fixup_exception(regs)) |
| 911 | return 1; |
Harvey Harrison | 6d48583 | 2008-01-30 13:31:41 +0100 | [diff] [blame] | 912 | |
Prasanna S Panchamukhi | c28f896 | 2006-03-26 01:38:23 -0800 | [diff] [blame] | 913 | /* |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 914 | * fixup routine could not handle it, |
Prasanna S Panchamukhi | c28f896 | 2006-03-26 01:38:23 -0800 | [diff] [blame] | 915 | * Let do_page_fault() fix it. |
| 916 | */ |
| 917 | break; |
| 918 | default: |
| 919 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 920 | } |
| 921 | return 0; |
| 922 | } |
| 923 | |
| 924 | /* |
| 925 | * Wrapper routine for handling exceptions. |
| 926 | */ |
Prasanna S Panchamukhi | 0f2fbdc | 2005-09-06 15:19:28 -0700 | [diff] [blame] | 927 | int __kprobes kprobe_exceptions_notify(struct notifier_block *self, |
| 928 | unsigned long val, void *data) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 929 | { |
| 930 | struct die_args *args = (struct die_args *)data; |
Ananth N Mavinakayanahalli | 66ff2d0 | 2005-11-07 01:00:07 -0800 | [diff] [blame] | 931 | int ret = NOTIFY_DONE; |
| 932 | |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 933 | if (args->regs && user_mode_vm(args->regs)) |
bibo,mao | 2326c77 | 2006-03-26 01:38:21 -0800 | [diff] [blame] | 934 | return ret; |
| 935 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 936 | switch (val) { |
| 937 | case DIE_INT3: |
| 938 | if (kprobe_handler(args->regs)) |
Ananth N Mavinakayanahalli | 66ff2d0 | 2005-11-07 01:00:07 -0800 | [diff] [blame] | 939 | ret = NOTIFY_STOP; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 940 | break; |
| 941 | case DIE_DEBUG: |
| 942 | if (post_kprobe_handler(args->regs)) |
Ananth N Mavinakayanahalli | 66ff2d0 | 2005-11-07 01:00:07 -0800 | [diff] [blame] | 943 | ret = NOTIFY_STOP; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 944 | break; |
| 945 | case DIE_GPF: |
Ananth N Mavinakayanahalli | d217d54 | 2005-11-07 01:00:14 -0800 | [diff] [blame] | 946 | /* kprobe_running() needs smp_processor_id() */ |
| 947 | preempt_disable(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 948 | if (kprobe_running() && |
| 949 | kprobe_fault_handler(args->regs, args->trapnr)) |
Ananth N Mavinakayanahalli | 66ff2d0 | 2005-11-07 01:00:07 -0800 | [diff] [blame] | 950 | ret = NOTIFY_STOP; |
Ananth N Mavinakayanahalli | d217d54 | 2005-11-07 01:00:14 -0800 | [diff] [blame] | 951 | preempt_enable(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 952 | break; |
| 953 | default: |
| 954 | break; |
| 955 | } |
Ananth N Mavinakayanahalli | 66ff2d0 | 2005-11-07 01:00:07 -0800 | [diff] [blame] | 956 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 957 | } |
| 958 | |
Prasanna S Panchamukhi | 0f2fbdc | 2005-09-06 15:19:28 -0700 | [diff] [blame] | 959 | int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 960 | { |
| 961 | struct jprobe *jp = container_of(p, struct jprobe, kp); |
| 962 | unsigned long addr; |
Ananth N Mavinakayanahalli | e7a510f | 2005-11-07 01:00:12 -0800 | [diff] [blame] | 963 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 964 | |
Ananth N Mavinakayanahalli | e7a510f | 2005-11-07 01:00:12 -0800 | [diff] [blame] | 965 | kcb->jprobe_saved_regs = *regs; |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 966 | kcb->jprobe_saved_sp = stack_addr(regs); |
| 967 | addr = (unsigned long)(kcb->jprobe_saved_sp); |
| 968 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 969 | /* |
| 970 | * As Linus pointed out, gcc assumes that the callee |
| 971 | * owns the argument space and could overwrite it, e.g. |
| 972 | * tailcall optimization. So, to be absolutely safe |
| 973 | * we also save and restore enough stack bytes to cover |
| 974 | * the argument area. |
| 975 | */ |
Ananth N Mavinakayanahalli | e7a510f | 2005-11-07 01:00:12 -0800 | [diff] [blame] | 976 | memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr, |
Masami Hiramatsu | d6be29b | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 977 | MIN_STACK_SIZE(addr)); |
Glauber de Oliveira Costa | 053de04 | 2008-01-30 13:31:27 +0100 | [diff] [blame] | 978 | regs->flags &= ~X86_EFLAGS_IF; |
Peter Zijlstra | 58dfe88 | 2007-10-11 22:25:25 +0200 | [diff] [blame] | 979 | trace_hardirqs_off(); |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 980 | regs->ip = (unsigned long)(jp->entry); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 981 | return 1; |
| 982 | } |
| 983 | |
Prasanna S Panchamukhi | 0f2fbdc | 2005-09-06 15:19:28 -0700 | [diff] [blame] | 984 | void __kprobes jprobe_return(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 985 | { |
Ananth N Mavinakayanahalli | e7a510f | 2005-11-07 01:00:12 -0800 | [diff] [blame] | 986 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
| 987 | |
Masami Hiramatsu | d6be29b | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 988 | asm volatile ( |
| 989 | #ifdef CONFIG_X86_64 |
| 990 | " xchg %%rbx,%%rsp \n" |
| 991 | #else |
| 992 | " xchgl %%ebx,%%esp \n" |
| 993 | #endif |
| 994 | " int3 \n" |
| 995 | " .globl jprobe_return_end\n" |
| 996 | " jprobe_return_end: \n" |
| 997 | " nop \n"::"b" |
| 998 | (kcb->jprobe_saved_sp):"memory"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 999 | } |
| 1000 | |
Prasanna S Panchamukhi | 0f2fbdc | 2005-09-06 15:19:28 -0700 | [diff] [blame] | 1001 | int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1002 | { |
Ananth N Mavinakayanahalli | e7a510f | 2005-11-07 01:00:12 -0800 | [diff] [blame] | 1003 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 1004 | u8 *addr = (u8 *) (regs->ip - 1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1005 | struct jprobe *jp = container_of(p, struct jprobe, kp); |
| 1006 | |
Masami Hiramatsu | d6be29b | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 1007 | if ((addr > (u8 *) jprobe_return) && |
| 1008 | (addr < (u8 *) jprobe_return_end)) { |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 1009 | if (stack_addr(regs) != kcb->jprobe_saved_sp) { |
Masami Hiramatsu | 29b6cd7 | 2007-12-18 18:05:58 +0100 | [diff] [blame] | 1010 | struct pt_regs *saved_regs = &kcb->jprobe_saved_regs; |
Masami Hiramatsu | d6be29b | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 1011 | printk(KERN_ERR |
| 1012 | "current sp %p does not match saved sp %p\n", |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 1013 | stack_addr(regs), kcb->jprobe_saved_sp); |
Masami Hiramatsu | d6be29b | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 1014 | printk(KERN_ERR "Saved registers for jprobe %p\n", jp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1015 | show_registers(saved_regs); |
Masami Hiramatsu | d6be29b | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 1016 | printk(KERN_ERR "Current registers\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1017 | show_registers(regs); |
| 1018 | BUG(); |
| 1019 | } |
Ananth N Mavinakayanahalli | e7a510f | 2005-11-07 01:00:12 -0800 | [diff] [blame] | 1020 | *regs = kcb->jprobe_saved_regs; |
Masami Hiramatsu | 8533bbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 1021 | memcpy((kprobe_opcode_t *)(kcb->jprobe_saved_sp), |
| 1022 | kcb->jprobes_stack, |
| 1023 | MIN_STACK_SIZE(kcb->jprobe_saved_sp)); |
Ananth N Mavinakayanahalli | d217d54 | 2005-11-07 01:00:14 -0800 | [diff] [blame] | 1024 | preempt_enable_no_resched(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1025 | return 1; |
| 1026 | } |
| 1027 | return 0; |
| 1028 | } |
Rusty Lynch | ba8af12 | 2005-06-27 15:17:10 -0700 | [diff] [blame] | 1029 | |
Rusty Lynch | 6772926 | 2005-07-05 18:54:50 -0700 | [diff] [blame] | 1030 | int __init arch_init_kprobes(void) |
Rusty Lynch | ba8af12 | 2005-06-27 15:17:10 -0700 | [diff] [blame] | 1031 | { |
Masami Hiramatsu | da07ab0 | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 1032 | return 0; |
Rusty Lynch | ba8af12 | 2005-06-27 15:17:10 -0700 | [diff] [blame] | 1033 | } |
Ananth N Mavinakayanahalli | bf8f6e5 | 2007-05-08 00:34:16 -0700 | [diff] [blame] | 1034 | |
| 1035 | int __kprobes arch_trampoline_kprobe(struct kprobe *p) |
| 1036 | { |
Ananth N Mavinakayanahalli | bf8f6e5 | 2007-05-08 00:34:16 -0700 | [diff] [blame] | 1037 | return 0; |
| 1038 | } |