blob: ba4d4c0740cff601aa445e15e638674cc495f0a0 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Kernel Probes (KProbes)
3 * kernel/kprobes.c
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 *
19 * Copyright (C) IBM Corporation, 2002, 2004
20 *
21 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
22 * Probes initial implementation (includes suggestions from
23 * Rusty Russell).
24 * 2004-Aug Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
25 * hlists and exceptions notifier as suggested by Andi Kleen.
26 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
27 * interface to access function arguments.
28 * 2004-Sep Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
29 * exceptions notifier to be first on the priority list.
Hien Nguyenb94cce92005-06-23 00:09:19 -070030 * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston
31 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
32 * <prasanna@in.ibm.com> added function-return probes.
Linus Torvalds1da177e2005-04-16 15:20:36 -070033 */
34#include <linux/kprobes.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <linux/hash.h>
36#include <linux/init.h>
Tim Schmielau4e57b682005-10-30 15:03:48 -080037#include <linux/slab.h>
Randy Dunlape3869792007-05-08 00:27:01 -070038#include <linux/stddef.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#include <linux/module.h>
Ananth N Mavinakayanahalli9ec4b1f2005-06-27 15:17:01 -070040#include <linux/moduleloader.h>
Ananth N Mavinakayanahalli3a872d82006-10-02 02:17:30 -070041#include <linux/kallsyms.h>
Masami Hiramatsub4c6c342006-12-06 20:38:11 -080042#include <linux/freezer.h>
Srinivasa Ds346fd592007-02-20 13:57:54 -080043#include <linux/seq_file.h>
44#include <linux/debugfs.h>
Masami Hiramatsub2be84d2010-02-25 08:34:15 -050045#include <linux/sysctl.h>
Christoph Hellwig1eeb66a2007-05-08 00:27:03 -070046#include <linux/kdebug.h>
Mathieu Desnoyers4460fda2009-03-06 10:36:38 -050047#include <linux/memory.h>
Masami Hiramatsu4554dbc2010-02-02 16:49:18 -050048#include <linux/ftrace.h>
Masami Hiramatsuafd66252010-02-25 08:34:07 -050049#include <linux/cpu.h>
Jason Baronbf5438fc2010-09-17 11:09:00 -040050#include <linux/jump_label.h>
Ananth N Mavinakayanahallibf8f6e52007-05-08 00:34:16 -070051
Prasanna S Panchamukhid0aaff92005-09-06 15:19:26 -070052#include <asm-generic/sections.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070053#include <asm/cacheflush.h>
54#include <asm/errno.h>
Ananth N Mavinakayanahallibf8f6e52007-05-08 00:34:16 -070055#include <asm/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070056
57#define KPROBE_HASH_BITS 6
58#define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
59
Ananth N Mavinakayanahalli3a872d82006-10-02 02:17:30 -070060
61/*
62 * Some oddball architectures like 64bit powerpc have function descriptors
63 * so this must be overridable.
64 */
65#ifndef kprobe_lookup_name
66#define kprobe_lookup_name(name, addr) \
67 addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name)))
68#endif
69
Srinivasa D Sef53d9c2008-07-25 01:46:04 -070070static int kprobes_initialized;
Linus Torvalds1da177e2005-04-16 15:20:36 -070071static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
Hien Nguyenb94cce92005-06-23 00:09:19 -070072static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
Linus Torvalds1da177e2005-04-16 15:20:36 -070073
Ananth N Mavinakayanahallibf8f6e52007-05-08 00:34:16 -070074/* NOTE: change this value only with kprobe_mutex held */
Masami Hiramatsue579abe2009-04-06 19:01:01 -070075static bool kprobes_all_disarmed;
Ananth N Mavinakayanahallibf8f6e52007-05-08 00:34:16 -070076
Masami Hiramatsu43948f52010-10-25 22:18:01 +090077/* This protects kprobe_table and optimizing_list */
78static DEFINE_MUTEX(kprobe_mutex);
Ananth N Mavinakayanahallie6584522005-11-07 01:00:07 -080079static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
Srinivasa D Sef53d9c2008-07-25 01:46:04 -070080static struct {
Andrew Morton7e036d02008-11-12 13:26:57 -080081 spinlock_t lock ____cacheline_aligned_in_smp;
Srinivasa D Sef53d9c2008-07-25 01:46:04 -070082} kretprobe_table_locks[KPROBE_TABLE_SIZE];
83
84static spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
85{
86 return &(kretprobe_table_locks[hash].lock);
87}
Linus Torvalds1da177e2005-04-16 15:20:36 -070088
Srinivasa Ds3d8d9962008-04-28 02:14:26 -070089/*
90 * Normally, functions that we'd want to prohibit kprobes in, are marked
91 * __kprobes. But, there are cases where such functions already belong to
92 * a different section (__sched for preempt_schedule)
93 *
94 * For such cases, we now have a blacklist
95 */
Daniel Guilak544304b2008-07-10 09:38:19 -070096static struct kprobe_blackpoint kprobe_blacklist[] = {
Srinivasa Ds3d8d9962008-04-28 02:14:26 -070097 {"preempt_schedule",},
Masami Hiramatsu65e234e2009-08-27 13:23:32 -040098 {"native_get_debugreg",},
Masami Hiramatsua00e8172009-09-08 12:47:55 -040099 {"irq_entries_start",},
100 {"common_interrupt",},
Masami Hiramatsu5ecaafd2010-02-05 01:24:34 -0500101 {"mcount",}, /* mcount can be called from everywhere */
Srinivasa Ds3d8d9962008-04-28 02:14:26 -0700102 {NULL} /* Terminator */
103};
104
Anil S Keshavamurthy2d14e392006-01-09 20:52:41 -0800105#ifdef __ARCH_WANT_KPROBES_INSN_SLOT
Ananth N Mavinakayanahalli9ec4b1f2005-06-27 15:17:01 -0700106/*
107 * kprobe->ainsn.insn points to the copy of the instruction to be
108 * single-stepped. x86_64, POWER4 and above have no-exec support and
109 * stepping on the instruction on a vmalloced/kmalloced/data page
110 * is a recipe for disaster
111 */
Ananth N Mavinakayanahalli9ec4b1f2005-06-27 15:17:01 -0700112struct kprobe_insn_page {
Masami Hiramatsuc5cb5a22009-06-30 17:08:14 -0400113 struct list_head list;
Ananth N Mavinakayanahalli9ec4b1f2005-06-27 15:17:01 -0700114 kprobe_opcode_t *insns; /* Page of instruction slots */
Ananth N Mavinakayanahalli9ec4b1f2005-06-27 15:17:01 -0700115 int nused;
Masami Hiramatsub4c6c342006-12-06 20:38:11 -0800116 int ngarbage;
Masami Hiramatsu4610ee12010-02-25 08:33:59 -0500117 char slot_used[];
Ananth N Mavinakayanahalli9ec4b1f2005-06-27 15:17:01 -0700118};
119
Masami Hiramatsu4610ee12010-02-25 08:33:59 -0500120#define KPROBE_INSN_PAGE_SIZE(slots) \
121 (offsetof(struct kprobe_insn_page, slot_used) + \
122 (sizeof(char) * (slots)))
123
124struct kprobe_insn_cache {
125 struct list_head pages; /* list of kprobe_insn_page */
126 size_t insn_size; /* size of instruction slot */
127 int nr_garbage;
128};
129
130static int slots_per_page(struct kprobe_insn_cache *c)
131{
132 return PAGE_SIZE/(c->insn_size * sizeof(kprobe_opcode_t));
133}
134
Masami Hiramatsuab40c5c2007-01-30 14:36:06 -0800135enum kprobe_slot_state {
136 SLOT_CLEAN = 0,
137 SLOT_DIRTY = 1,
138 SLOT_USED = 2,
139};
140
Masami Hiramatsu4610ee12010-02-25 08:33:59 -0500141static DEFINE_MUTEX(kprobe_insn_mutex); /* Protects kprobe_insn_slots */
142static struct kprobe_insn_cache kprobe_insn_slots = {
143 .pages = LIST_HEAD_INIT(kprobe_insn_slots.pages),
144 .insn_size = MAX_INSN_SIZE,
145 .nr_garbage = 0,
146};
147static int __kprobes collect_garbage_slots(struct kprobe_insn_cache *c);
Masami Hiramatsub4c6c342006-12-06 20:38:11 -0800148
Ananth N Mavinakayanahalli9ec4b1f2005-06-27 15:17:01 -0700149/**
Masami Hiramatsu12941562009-01-06 14:41:50 -0800150 * __get_insn_slot() - Find a slot on an executable page for an instruction.
Ananth N Mavinakayanahalli9ec4b1f2005-06-27 15:17:01 -0700151 * We allocate an executable page if there's no room on existing ones.
152 */
Masami Hiramatsu4610ee12010-02-25 08:33:59 -0500153static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
Ananth N Mavinakayanahalli9ec4b1f2005-06-27 15:17:01 -0700154{
155 struct kprobe_insn_page *kip;
Ananth N Mavinakayanahalli9ec4b1f2005-06-27 15:17:01 -0700156
Christoph Hellwig6f716ac2007-05-08 00:34:13 -0700157 retry:
Masami Hiramatsu4610ee12010-02-25 08:33:59 -0500158 list_for_each_entry(kip, &c->pages, list) {
159 if (kip->nused < slots_per_page(c)) {
Ananth N Mavinakayanahalli9ec4b1f2005-06-27 15:17:01 -0700160 int i;
Masami Hiramatsu4610ee12010-02-25 08:33:59 -0500161 for (i = 0; i < slots_per_page(c); i++) {
Masami Hiramatsuab40c5c2007-01-30 14:36:06 -0800162 if (kip->slot_used[i] == SLOT_CLEAN) {
163 kip->slot_used[i] = SLOT_USED;
Ananth N Mavinakayanahalli9ec4b1f2005-06-27 15:17:01 -0700164 kip->nused++;
Masami Hiramatsu4610ee12010-02-25 08:33:59 -0500165 return kip->insns + (i * c->insn_size);
Ananth N Mavinakayanahalli9ec4b1f2005-06-27 15:17:01 -0700166 }
167 }
Masami Hiramatsu4610ee12010-02-25 08:33:59 -0500168 /* kip->nused is broken. Fix it. */
169 kip->nused = slots_per_page(c);
170 WARN_ON(1);
Ananth N Mavinakayanahalli9ec4b1f2005-06-27 15:17:01 -0700171 }
172 }
173
Masami Hiramatsub4c6c342006-12-06 20:38:11 -0800174 /* If there are any garbage slots, collect it and try again. */
Masami Hiramatsu4610ee12010-02-25 08:33:59 -0500175 if (c->nr_garbage && collect_garbage_slots(c) == 0)
Masami Hiramatsub4c6c342006-12-06 20:38:11 -0800176 goto retry;
Masami Hiramatsu4610ee12010-02-25 08:33:59 -0500177
178 /* All out of space. Need to allocate a new page. */
179 kip = kmalloc(KPROBE_INSN_PAGE_SIZE(slots_per_page(c)), GFP_KERNEL);
Christoph Hellwig6f716ac2007-05-08 00:34:13 -0700180 if (!kip)
Ananth N Mavinakayanahalli9ec4b1f2005-06-27 15:17:01 -0700181 return NULL;
Ananth N Mavinakayanahalli9ec4b1f2005-06-27 15:17:01 -0700182
183 /*
184 * Use module_alloc so this page is within +/- 2GB of where the
185 * kernel image and loaded module images reside. This is required
186 * so x86_64 can correctly handle the %rip-relative fixups.
187 */
188 kip->insns = module_alloc(PAGE_SIZE);
189 if (!kip->insns) {
190 kfree(kip);
191 return NULL;
192 }
Masami Hiramatsuc5cb5a22009-06-30 17:08:14 -0400193 INIT_LIST_HEAD(&kip->list);
Masami Hiramatsu4610ee12010-02-25 08:33:59 -0500194 memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c));
Masami Hiramatsuab40c5c2007-01-30 14:36:06 -0800195 kip->slot_used[0] = SLOT_USED;
Ananth N Mavinakayanahalli9ec4b1f2005-06-27 15:17:01 -0700196 kip->nused = 1;
Masami Hiramatsub4c6c342006-12-06 20:38:11 -0800197 kip->ngarbage = 0;
Masami Hiramatsu4610ee12010-02-25 08:33:59 -0500198 list_add(&kip->list, &c->pages);
Ananth N Mavinakayanahalli9ec4b1f2005-06-27 15:17:01 -0700199 return kip->insns;
200}
201
Masami Hiramatsu4610ee12010-02-25 08:33:59 -0500202
Masami Hiramatsu12941562009-01-06 14:41:50 -0800203kprobe_opcode_t __kprobes *get_insn_slot(void)
204{
Masami Hiramatsu4610ee12010-02-25 08:33:59 -0500205 kprobe_opcode_t *ret = NULL;
206
Masami Hiramatsu12941562009-01-06 14:41:50 -0800207 mutex_lock(&kprobe_insn_mutex);
Masami Hiramatsu4610ee12010-02-25 08:33:59 -0500208 ret = __get_insn_slot(&kprobe_insn_slots);
Masami Hiramatsu12941562009-01-06 14:41:50 -0800209 mutex_unlock(&kprobe_insn_mutex);
Masami Hiramatsu4610ee12010-02-25 08:33:59 -0500210
Masami Hiramatsu12941562009-01-06 14:41:50 -0800211 return ret;
212}
213
Masami Hiramatsub4c6c342006-12-06 20:38:11 -0800214/* Return 1 if all garbages are collected, otherwise 0. */
215static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
216{
Masami Hiramatsuab40c5c2007-01-30 14:36:06 -0800217 kip->slot_used[idx] = SLOT_CLEAN;
Masami Hiramatsub4c6c342006-12-06 20:38:11 -0800218 kip->nused--;
219 if (kip->nused == 0) {
220 /*
221 * Page is no longer in use. Free it unless
222 * it's the last one. We keep the last one
223 * so as not to have to set it up again the
224 * next time somebody inserts a probe.
225 */
Masami Hiramatsu4610ee12010-02-25 08:33:59 -0500226 if (!list_is_singular(&kip->list)) {
Masami Hiramatsuc5cb5a22009-06-30 17:08:14 -0400227 list_del(&kip->list);
Masami Hiramatsub4c6c342006-12-06 20:38:11 -0800228 module_free(NULL, kip->insns);
229 kfree(kip);
230 }
231 return 1;
232 }
233 return 0;
234}
235
Masami Hiramatsu4610ee12010-02-25 08:33:59 -0500236static int __kprobes collect_garbage_slots(struct kprobe_insn_cache *c)
Masami Hiramatsub4c6c342006-12-06 20:38:11 -0800237{
Masami Hiramatsuc5cb5a22009-06-30 17:08:14 -0400238 struct kprobe_insn_page *kip, *next;
Masami Hiramatsub4c6c342006-12-06 20:38:11 -0800239
Masami Hiramatsu615d0eb2010-02-02 16:49:04 -0500240 /* Ensure no-one is interrupted on the garbages */
241 synchronize_sched();
Masami Hiramatsub4c6c342006-12-06 20:38:11 -0800242
Masami Hiramatsu4610ee12010-02-25 08:33:59 -0500243 list_for_each_entry_safe(kip, next, &c->pages, list) {
Masami Hiramatsub4c6c342006-12-06 20:38:11 -0800244 int i;
Masami Hiramatsub4c6c342006-12-06 20:38:11 -0800245 if (kip->ngarbage == 0)
246 continue;
247 kip->ngarbage = 0; /* we will collect all garbages */
Masami Hiramatsu4610ee12010-02-25 08:33:59 -0500248 for (i = 0; i < slots_per_page(c); i++) {
Masami Hiramatsuab40c5c2007-01-30 14:36:06 -0800249 if (kip->slot_used[i] == SLOT_DIRTY &&
Masami Hiramatsub4c6c342006-12-06 20:38:11 -0800250 collect_one_slot(kip, i))
251 break;
252 }
253 }
Masami Hiramatsu4610ee12010-02-25 08:33:59 -0500254 c->nr_garbage = 0;
Masami Hiramatsub4c6c342006-12-06 20:38:11 -0800255 return 0;
256}
257
Masami Hiramatsu4610ee12010-02-25 08:33:59 -0500258static void __kprobes __free_insn_slot(struct kprobe_insn_cache *c,
259 kprobe_opcode_t *slot, int dirty)
260{
261 struct kprobe_insn_page *kip;
262
263 list_for_each_entry(kip, &c->pages, list) {
Masami Hiramatsu83ff56f2010-03-09 10:22:19 -0500264 long idx = ((long)slot - (long)kip->insns) /
265 (c->insn_size * sizeof(kprobe_opcode_t));
Masami Hiramatsu4610ee12010-02-25 08:33:59 -0500266 if (idx >= 0 && idx < slots_per_page(c)) {
267 WARN_ON(kip->slot_used[idx] != SLOT_USED);
268 if (dirty) {
269 kip->slot_used[idx] = SLOT_DIRTY;
270 kip->ngarbage++;
271 if (++c->nr_garbage > slots_per_page(c))
272 collect_garbage_slots(c);
273 } else
274 collect_one_slot(kip, idx);
275 return;
276 }
277 }
278 /* Could not free this slot. */
279 WARN_ON(1);
280}
281
Masami Hiramatsub4c6c342006-12-06 20:38:11 -0800282void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty)
Ananth N Mavinakayanahalli9ec4b1f2005-06-27 15:17:01 -0700283{
Masami Hiramatsu12941562009-01-06 14:41:50 -0800284 mutex_lock(&kprobe_insn_mutex);
Masami Hiramatsu4610ee12010-02-25 08:33:59 -0500285 __free_insn_slot(&kprobe_insn_slots, slot, dirty);
Masami Hiramatsu12941562009-01-06 14:41:50 -0800286 mutex_unlock(&kprobe_insn_mutex);
Ananth N Mavinakayanahalli9ec4b1f2005-06-27 15:17:01 -0700287}
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500288#ifdef CONFIG_OPTPROBES
289/* For optimized_kprobe buffer */
290static DEFINE_MUTEX(kprobe_optinsn_mutex); /* Protects kprobe_optinsn_slots */
291static struct kprobe_insn_cache kprobe_optinsn_slots = {
292 .pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages),
293 /* .insn_size is initialized later */
294 .nr_garbage = 0,
295};
296/* Get a slot for optimized_kprobe buffer */
297kprobe_opcode_t __kprobes *get_optinsn_slot(void)
298{
299 kprobe_opcode_t *ret = NULL;
300
301 mutex_lock(&kprobe_optinsn_mutex);
302 ret = __get_insn_slot(&kprobe_optinsn_slots);
303 mutex_unlock(&kprobe_optinsn_mutex);
304
305 return ret;
306}
307
308void __kprobes free_optinsn_slot(kprobe_opcode_t * slot, int dirty)
309{
310 mutex_lock(&kprobe_optinsn_mutex);
311 __free_insn_slot(&kprobe_optinsn_slots, slot, dirty);
312 mutex_unlock(&kprobe_optinsn_mutex);
313}
314#endif
Anil S Keshavamurthy2d14e392006-01-09 20:52:41 -0800315#endif
Ananth N Mavinakayanahalli9ec4b1f2005-06-27 15:17:01 -0700316
Ananth N Mavinakayanahallie6584522005-11-07 01:00:07 -0800317/* We have preemption disabled.. so it is safe to use __ versions */
318static inline void set_kprobe_instance(struct kprobe *kp)
319{
320 __get_cpu_var(kprobe_instance) = kp;
321}
322
323static inline void reset_kprobe_instance(void)
324{
325 __get_cpu_var(kprobe_instance) = NULL;
326}
327
Ananth N Mavinakayanahalli3516a462005-11-07 01:00:13 -0800328/*
329 * This routine is called either:
Anil S Keshavamurthy49a2a1b2006-01-09 20:52:43 -0800330 * - under the kprobe_mutex - during kprobe_[un]register()
Ananth N Mavinakayanahalli3516a462005-11-07 01:00:13 -0800331 * OR
Ananth N Mavinakayanahallid217d542005-11-07 01:00:14 -0800332 * - with preemption disabled - from arch/xxx/kernel/kprobes.c
Ananth N Mavinakayanahalli3516a462005-11-07 01:00:13 -0800333 */
Prasanna S Panchamukhid0aaff92005-09-06 15:19:26 -0700334struct kprobe __kprobes *get_kprobe(void *addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335{
336 struct hlist_head *head;
337 struct hlist_node *node;
Ananth N Mavinakayanahalli3516a462005-11-07 01:00:13 -0800338 struct kprobe *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339
340 head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
Ananth N Mavinakayanahalli3516a462005-11-07 01:00:13 -0800341 hlist_for_each_entry_rcu(p, node, head, hlist) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342 if (p->addr == addr)
343 return p;
344 }
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500345
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346 return NULL;
347}
348
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500349static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs);
350
351/* Return true if the kprobe is an aggregator */
352static inline int kprobe_aggrprobe(struct kprobe *p)
353{
354 return p->pre_handler == aggr_pre_handler;
355}
356
Masami Hiramatsu6274de42010-12-03 18:54:09 +0900357/* Return true(!0) if the kprobe is unused */
358static inline int kprobe_unused(struct kprobe *p)
359{
360 return kprobe_aggrprobe(p) && kprobe_disabled(p) &&
361 list_empty(&p->list);
362}
363
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500364/*
365 * Keep all fields in the kprobe consistent
366 */
Masami Hiramatsu6d8e40a2010-12-03 18:53:50 +0900367static inline void copy_kprobe(struct kprobe *ap, struct kprobe *p)
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500368{
Masami Hiramatsu6d8e40a2010-12-03 18:53:50 +0900369 memcpy(&p->opcode, &ap->opcode, sizeof(kprobe_opcode_t));
370 memcpy(&p->ainsn, &ap->ainsn, sizeof(struct arch_specific_insn));
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500371}
372
373#ifdef CONFIG_OPTPROBES
Masami Hiramatsub2be84d2010-02-25 08:34:15 -0500374/* NOTE: change this value only with kprobe_mutex held */
375static bool kprobes_allow_optimization;
376
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500377/*
378 * Call all pre_handler on the list, but ignores its return value.
379 * This must be called from arch-dep optimized caller.
380 */
381void __kprobes opt_pre_handler(struct kprobe *p, struct pt_regs *regs)
382{
383 struct kprobe *kp;
384
385 list_for_each_entry_rcu(kp, &p->list, list) {
386 if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
387 set_kprobe_instance(kp);
388 kp->pre_handler(kp, regs);
389 }
390 reset_kprobe_instance();
391 }
392}
393
Masami Hiramatsu6274de42010-12-03 18:54:09 +0900394/* Free optimized instructions and optimized_kprobe */
395static __kprobes void free_aggr_kprobe(struct kprobe *p)
396{
397 struct optimized_kprobe *op;
398
399 op = container_of(p, struct optimized_kprobe, kp);
400 arch_remove_optimized_kprobe(op);
401 arch_remove_kprobe(p);
402 kfree(op);
403}
404
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500405/* Return true(!0) if the kprobe is ready for optimization. */
406static inline int kprobe_optready(struct kprobe *p)
407{
408 struct optimized_kprobe *op;
409
410 if (kprobe_aggrprobe(p)) {
411 op = container_of(p, struct optimized_kprobe, kp);
412 return arch_prepared_optinsn(&op->optinsn);
413 }
414
415 return 0;
416}
417
Masami Hiramatsu6274de42010-12-03 18:54:09 +0900418/* Return true(!0) if the kprobe is disarmed. Note: p must be on hash list */
419static inline int kprobe_disarmed(struct kprobe *p)
420{
421 struct optimized_kprobe *op;
422
423 /* If kprobe is not aggr/opt probe, just return kprobe is disabled */
424 if (!kprobe_aggrprobe(p))
425 return kprobe_disabled(p);
426
427 op = container_of(p, struct optimized_kprobe, kp);
428
429 return kprobe_disabled(p) && list_empty(&op->list);
430}
431
432/* Return true(!0) if the probe is queued on (un)optimizing lists */
433static int __kprobes kprobe_queued(struct kprobe *p)
434{
435 struct optimized_kprobe *op;
436
437 if (kprobe_aggrprobe(p)) {
438 op = container_of(p, struct optimized_kprobe, kp);
439 if (!list_empty(&op->list))
440 return 1;
441 }
442 return 0;
443}
444
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500445/*
446 * Return an optimized kprobe whose optimizing code replaces
447 * instructions including addr (exclude breakpoint).
448 */
Namhyung Kim6376b222010-09-15 10:04:28 +0900449static struct kprobe *__kprobes get_optimized_kprobe(unsigned long addr)
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500450{
451 int i;
452 struct kprobe *p = NULL;
453 struct optimized_kprobe *op;
454
455 /* Don't check i == 0, since that is a breakpoint case. */
456 for (i = 1; !p && i < MAX_OPTIMIZED_LENGTH; i++)
457 p = get_kprobe((void *)(addr - i));
458
459 if (p && kprobe_optready(p)) {
460 op = container_of(p, struct optimized_kprobe, kp);
461 if (arch_within_optimized_kprobe(op, addr))
462 return p;
463 }
464
465 return NULL;
466}
467
468/* Optimization staging list, protected by kprobe_mutex */
469static LIST_HEAD(optimizing_list);
Masami Hiramatsu6274de42010-12-03 18:54:09 +0900470static LIST_HEAD(unoptimizing_list);
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500471
472static void kprobe_optimizer(struct work_struct *work);
473static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
Masami Hiramatsu6274de42010-12-03 18:54:09 +0900474static DECLARE_COMPLETION(optimizer_comp);
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500475#define OPTIMIZE_DELAY 5
476
Masami Hiramatsu61f4e132010-12-03 18:54:03 +0900477/*
478 * Optimize (replace a breakpoint with a jump) kprobes listed on
479 * optimizing_list.
480 */
481static __kprobes void do_optimize_kprobes(void)
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500482{
483 struct optimized_kprobe *op, *tmp;
484
Masami Hiramatsu6274de42010-12-03 18:54:09 +0900485 /* Optimization never be done when disarmed */
486 if (kprobes_all_disarmed || !kprobes_allow_optimization ||
487 list_empty(&optimizing_list))
488 return;
489
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500490 /*
491 * The optimization/unoptimization refers online_cpus via
492 * stop_machine() and cpu-hotplug modifies online_cpus.
493 * And same time, text_mutex will be held in cpu-hotplug and here.
494 * This combination can cause a deadlock (cpu-hotplug try to lock
495 * text_mutex but stop_machine can not be done because online_cpus
496 * has been changed)
497 * To avoid this deadlock, we need to call get_online_cpus()
498 * for preventing cpu-hotplug outside of text_mutex locking.
499 */
500 get_online_cpus();
501 mutex_lock(&text_mutex);
502 list_for_each_entry_safe(op, tmp, &optimizing_list, list) {
503 WARN_ON(kprobe_disabled(&op->kp));
504 if (arch_optimize_kprobe(op) < 0)
505 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
506 list_del_init(&op->list);
507 }
508 mutex_unlock(&text_mutex);
509 put_online_cpus();
Masami Hiramatsu61f4e132010-12-03 18:54:03 +0900510}
511
Masami Hiramatsu6274de42010-12-03 18:54:09 +0900512/*
513 * Unoptimize (replace a jump with a breakpoint and remove the breakpoint
514 * if need) kprobes listed on unoptimizing_list.
515 */
516static __kprobes void do_unoptimize_kprobes(struct list_head *free_list)
517{
518 struct optimized_kprobe *op, *tmp;
519
520 /* Unoptimization must be done anytime */
521 if (list_empty(&unoptimizing_list))
522 return;
523
524 /* Ditto to do_optimize_kprobes */
525 get_online_cpus();
526 mutex_lock(&text_mutex);
527 list_for_each_entry_safe(op, tmp, &unoptimizing_list, list) {
528 /* Unoptimize kprobes */
529 arch_unoptimize_kprobe(op);
530 /* Disarm probes if marked disabled */
531 if (kprobe_disabled(&op->kp))
532 arch_disarm_kprobe(&op->kp);
533 if (kprobe_unused(&op->kp)) {
534 /*
535 * Remove unused probes from hash list. After waiting
536 * for synchronization, these probes are reclaimed.
537 * (reclaiming is done by do_free_cleaned_kprobes.)
538 */
539 hlist_del_rcu(&op->kp.hlist);
540 /* Move only unused probes on free_list */
541 list_move(&op->list, free_list);
542 } else
543 list_del_init(&op->list);
544 }
545 mutex_unlock(&text_mutex);
546 put_online_cpus();
547}
548
549/* Reclaim all kprobes on the free_list */
550static __kprobes void do_free_cleaned_kprobes(struct list_head *free_list)
551{
552 struct optimized_kprobe *op, *tmp;
553
554 list_for_each_entry_safe(op, tmp, free_list, list) {
555 BUG_ON(!kprobe_unused(&op->kp));
556 list_del_init(&op->list);
557 free_aggr_kprobe(&op->kp);
558 }
559}
560
561/* Start optimizer after OPTIMIZE_DELAY passed */
562static __kprobes void kick_kprobe_optimizer(void)
563{
564 if (!delayed_work_pending(&optimizing_work))
565 schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY);
566}
567
Masami Hiramatsu61f4e132010-12-03 18:54:03 +0900568/* Kprobe jump optimizer */
569static __kprobes void kprobe_optimizer(struct work_struct *work)
570{
Masami Hiramatsu6274de42010-12-03 18:54:09 +0900571 LIST_HEAD(free_list);
572
Masami Hiramatsu61f4e132010-12-03 18:54:03 +0900573 /* Lock modules while optimizing kprobes */
574 mutex_lock(&module_mutex);
575 mutex_lock(&kprobe_mutex);
Masami Hiramatsu61f4e132010-12-03 18:54:03 +0900576
577 /*
Masami Hiramatsu6274de42010-12-03 18:54:09 +0900578 * Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed)
579 * kprobes before waiting for quiesence period.
580 */
581 do_unoptimize_kprobes(&free_list);
582
583 /*
584 * Step 2: Wait for quiesence period to ensure all running interrupts
Masami Hiramatsu61f4e132010-12-03 18:54:03 +0900585 * are done. Because optprobe may modify multiple instructions
586 * there is a chance that Nth instruction is interrupted. In that
587 * case, running interrupt can return to 2nd-Nth byte of jump
588 * instruction. This wait is for avoiding it.
589 */
590 synchronize_sched();
591
Masami Hiramatsu6274de42010-12-03 18:54:09 +0900592 /* Step 3: Optimize kprobes after quiesence period */
Masami Hiramatsu61f4e132010-12-03 18:54:03 +0900593 do_optimize_kprobes();
Masami Hiramatsu6274de42010-12-03 18:54:09 +0900594
595 /* Step 4: Free cleaned kprobes after quiesence period */
596 do_free_cleaned_kprobes(&free_list);
597
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500598 mutex_unlock(&kprobe_mutex);
599 mutex_unlock(&module_mutex);
Masami Hiramatsu6274de42010-12-03 18:54:09 +0900600
601 /* Wake up all waiters */
602 complete_all(&optimizer_comp);
603}
604
605/* Wait for completing optimization and unoptimization */
606static __kprobes void wait_for_kprobe_optimizer(void)
607{
608 if (delayed_work_pending(&optimizing_work))
609 wait_for_completion(&optimizer_comp);
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500610}
611
612/* Optimize kprobe if p is ready to be optimized */
613static __kprobes void optimize_kprobe(struct kprobe *p)
614{
615 struct optimized_kprobe *op;
616
617 /* Check if the kprobe is disabled or not ready for optimization. */
Masami Hiramatsub2be84d2010-02-25 08:34:15 -0500618 if (!kprobe_optready(p) || !kprobes_allow_optimization ||
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500619 (kprobe_disabled(p) || kprobes_all_disarmed))
620 return;
621
622 /* Both of break_handler and post_handler are not supported. */
623 if (p->break_handler || p->post_handler)
624 return;
625
626 op = container_of(p, struct optimized_kprobe, kp);
627
628 /* Check there is no other kprobes at the optimized instructions */
629 if (arch_check_optimized_kprobe(op) < 0)
630 return;
631
632 /* Check if it is already optimized. */
633 if (op->kp.flags & KPROBE_FLAG_OPTIMIZED)
634 return;
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500635 op->kp.flags |= KPROBE_FLAG_OPTIMIZED;
Masami Hiramatsu6274de42010-12-03 18:54:09 +0900636
637 if (!list_empty(&op->list))
638 /* This is under unoptimizing. Just dequeue the probe */
639 list_del_init(&op->list);
640 else {
641 list_add(&op->list, &optimizing_list);
642 kick_kprobe_optimizer();
643 }
644}
645
646/* Short cut to direct unoptimizing */
647static __kprobes void force_unoptimize_kprobe(struct optimized_kprobe *op)
648{
649 get_online_cpus();
650 arch_unoptimize_kprobe(op);
651 put_online_cpus();
652 if (kprobe_disabled(&op->kp))
653 arch_disarm_kprobe(&op->kp);
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500654}
655
656/* Unoptimize a kprobe if p is optimized */
Masami Hiramatsu6274de42010-12-03 18:54:09 +0900657static __kprobes void unoptimize_kprobe(struct kprobe *p, bool force)
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500658{
659 struct optimized_kprobe *op;
660
Masami Hiramatsu6274de42010-12-03 18:54:09 +0900661 if (!kprobe_aggrprobe(p) || kprobe_disarmed(p))
662 return; /* This is not an optprobe nor optimized */
663
664 op = container_of(p, struct optimized_kprobe, kp);
665 if (!kprobe_optimized(p)) {
666 /* Unoptimized or unoptimizing case */
667 if (force && !list_empty(&op->list)) {
668 /*
669 * Only if this is unoptimizing kprobe and forced,
670 * forcibly unoptimize it. (No need to unoptimize
671 * unoptimized kprobe again :)
672 */
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500673 list_del_init(&op->list);
Masami Hiramatsu6274de42010-12-03 18:54:09 +0900674 force_unoptimize_kprobe(op);
675 }
676 return;
677 }
678
679 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
680 if (!list_empty(&op->list)) {
681 /* Dequeue from the optimization queue */
682 list_del_init(&op->list);
683 return;
684 }
685 /* Optimized kprobe case */
686 if (force)
687 /* Forcibly update the code: this is a special case */
688 force_unoptimize_kprobe(op);
689 else {
690 list_add(&op->list, &unoptimizing_list);
691 kick_kprobe_optimizer();
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500692 }
693}
694
695/* Remove optimized instructions */
696static void __kprobes kill_optimized_kprobe(struct kprobe *p)
697{
698 struct optimized_kprobe *op;
699
700 op = container_of(p, struct optimized_kprobe, kp);
Masami Hiramatsu6274de42010-12-03 18:54:09 +0900701 if (!list_empty(&op->list))
702 /* Dequeue from the (un)optimization queue */
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500703 list_del_init(&op->list);
Masami Hiramatsu6274de42010-12-03 18:54:09 +0900704
705 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
706 /* Don't touch the code, because it is already freed. */
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500707 arch_remove_optimized_kprobe(op);
708}
709
710/* Try to prepare optimized instructions */
711static __kprobes void prepare_optimized_kprobe(struct kprobe *p)
712{
713 struct optimized_kprobe *op;
714
715 op = container_of(p, struct optimized_kprobe, kp);
716 arch_prepare_optimized_kprobe(op);
717}
718
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500719/* Allocate new optimized_kprobe and try to prepare optimized instructions */
720static __kprobes struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
721{
722 struct optimized_kprobe *op;
723
724 op = kzalloc(sizeof(struct optimized_kprobe), GFP_KERNEL);
725 if (!op)
726 return NULL;
727
728 INIT_LIST_HEAD(&op->list);
729 op->kp.addr = p->addr;
730 arch_prepare_optimized_kprobe(op);
731
732 return &op->kp;
733}
734
735static void __kprobes init_aggr_kprobe(struct kprobe *ap, struct kprobe *p);
736
737/*
738 * Prepare an optimized_kprobe and optimize it
739 * NOTE: p must be a normal registered kprobe
740 */
741static __kprobes void try_to_optimize_kprobe(struct kprobe *p)
742{
743 struct kprobe *ap;
744 struct optimized_kprobe *op;
745
746 ap = alloc_aggr_kprobe(p);
747 if (!ap)
748 return;
749
750 op = container_of(ap, struct optimized_kprobe, kp);
751 if (!arch_prepared_optinsn(&op->optinsn)) {
752 /* If failed to setup optimizing, fallback to kprobe */
Masami Hiramatsu6274de42010-12-03 18:54:09 +0900753 arch_remove_optimized_kprobe(op);
754 kfree(op);
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500755 return;
756 }
757
758 init_aggr_kprobe(ap, p);
759 optimize_kprobe(ap);
760}
761
Masami Hiramatsub2be84d2010-02-25 08:34:15 -0500762#ifdef CONFIG_SYSCTL
Masami Hiramatsu43948f52010-10-25 22:18:01 +0900763/* This should be called with kprobe_mutex locked */
Masami Hiramatsub2be84d2010-02-25 08:34:15 -0500764static void __kprobes optimize_all_kprobes(void)
765{
766 struct hlist_head *head;
767 struct hlist_node *node;
768 struct kprobe *p;
769 unsigned int i;
770
771 /* If optimization is already allowed, just return */
772 if (kprobes_allow_optimization)
773 return;
774
775 kprobes_allow_optimization = true;
Masami Hiramatsub2be84d2010-02-25 08:34:15 -0500776 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
777 head = &kprobe_table[i];
778 hlist_for_each_entry_rcu(p, node, head, hlist)
779 if (!kprobe_disabled(p))
780 optimize_kprobe(p);
781 }
Masami Hiramatsub2be84d2010-02-25 08:34:15 -0500782 printk(KERN_INFO "Kprobes globally optimized\n");
783}
784
Masami Hiramatsu43948f52010-10-25 22:18:01 +0900785/* This should be called with kprobe_mutex locked */
Masami Hiramatsub2be84d2010-02-25 08:34:15 -0500786static void __kprobes unoptimize_all_kprobes(void)
787{
788 struct hlist_head *head;
789 struct hlist_node *node;
790 struct kprobe *p;
791 unsigned int i;
792
793 /* If optimization is already prohibited, just return */
794 if (!kprobes_allow_optimization)
795 return;
796
797 kprobes_allow_optimization = false;
Masami Hiramatsub2be84d2010-02-25 08:34:15 -0500798 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
799 head = &kprobe_table[i];
800 hlist_for_each_entry_rcu(p, node, head, hlist) {
801 if (!kprobe_disabled(p))
Masami Hiramatsu6274de42010-12-03 18:54:09 +0900802 unoptimize_kprobe(p, false);
Masami Hiramatsub2be84d2010-02-25 08:34:15 -0500803 }
804 }
Masami Hiramatsu6274de42010-12-03 18:54:09 +0900805 /* Wait for unoptimizing completion */
806 wait_for_kprobe_optimizer();
807 printk(KERN_INFO "Kprobes globally unoptimized\n");
Masami Hiramatsub2be84d2010-02-25 08:34:15 -0500808}
809
810int sysctl_kprobes_optimization;
811int proc_kprobes_optimization_handler(struct ctl_table *table, int write,
812 void __user *buffer, size_t *length,
813 loff_t *ppos)
814{
815 int ret;
816
817 mutex_lock(&kprobe_mutex);
818 sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0;
819 ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
820
821 if (sysctl_kprobes_optimization)
822 optimize_all_kprobes();
823 else
824 unoptimize_all_kprobes();
825 mutex_unlock(&kprobe_mutex);
826
827 return ret;
828}
829#endif /* CONFIG_SYSCTL */
830
Masami Hiramatsu6274de42010-12-03 18:54:09 +0900831/* Put a breakpoint for a probe. Must be called with text_mutex locked */
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500832static void __kprobes __arm_kprobe(struct kprobe *p)
833{
Masami Hiramatsu6d8e40a2010-12-03 18:53:50 +0900834 struct kprobe *_p;
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500835
836 /* Check collision with other optimized kprobes */
Masami Hiramatsu6d8e40a2010-12-03 18:53:50 +0900837 _p = get_optimized_kprobe((unsigned long)p->addr);
838 if (unlikely(_p))
Masami Hiramatsu6274de42010-12-03 18:54:09 +0900839 /* Fallback to unoptimized kprobe */
840 unoptimize_kprobe(_p, true);
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500841
842 arch_arm_kprobe(p);
843 optimize_kprobe(p); /* Try to optimize (add kprobe to a list) */
844}
845
Masami Hiramatsu6274de42010-12-03 18:54:09 +0900846/* Remove the breakpoint of a probe. Must be called with text_mutex locked */
847static void __kprobes __disarm_kprobe(struct kprobe *p, bool reopt)
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500848{
Masami Hiramatsu6d8e40a2010-12-03 18:53:50 +0900849 struct kprobe *_p;
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500850
Masami Hiramatsu6274de42010-12-03 18:54:09 +0900851 unoptimize_kprobe(p, false); /* Try to unoptimize */
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500852
Masami Hiramatsu6274de42010-12-03 18:54:09 +0900853 if (!kprobe_queued(p)) {
854 arch_disarm_kprobe(p);
855 /* If another kprobe was blocked, optimize it. */
856 _p = get_optimized_kprobe((unsigned long)p->addr);
857 if (unlikely(_p) && reopt)
858 optimize_kprobe(_p);
859 }
860 /* TODO: reoptimize others after unoptimized this probe */
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500861}
862
863#else /* !CONFIG_OPTPROBES */
864
865#define optimize_kprobe(p) do {} while (0)
Masami Hiramatsu6274de42010-12-03 18:54:09 +0900866#define unoptimize_kprobe(p, f) do {} while (0)
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500867#define kill_optimized_kprobe(p) do {} while (0)
868#define prepare_optimized_kprobe(p) do {} while (0)
869#define try_to_optimize_kprobe(p) do {} while (0)
870#define __arm_kprobe(p) arch_arm_kprobe(p)
Masami Hiramatsu6274de42010-12-03 18:54:09 +0900871#define __disarm_kprobe(p, o) arch_disarm_kprobe(p)
872#define kprobe_disarmed(p) kprobe_disabled(p)
873#define wait_for_kprobe_optimizer() do {} while (0)
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500874
875static __kprobes void free_aggr_kprobe(struct kprobe *p)
876{
Masami Hiramatsu6274de42010-12-03 18:54:09 +0900877 arch_remove_kprobe(p);
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500878 kfree(p);
879}
880
881static __kprobes struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
882{
883 return kzalloc(sizeof(struct kprobe), GFP_KERNEL);
884}
885#endif /* CONFIG_OPTPROBES */
886
Masami Hiramatsu201517a2009-05-07 16:31:26 -0400887/* Arm a kprobe with text_mutex */
888static void __kprobes arm_kprobe(struct kprobe *kp)
889{
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500890 /*
891 * Here, since __arm_kprobe() doesn't use stop_machine(),
892 * this doesn't cause deadlock on text_mutex. So, we don't
893 * need get_online_cpus().
894 */
Masami Hiramatsu201517a2009-05-07 16:31:26 -0400895 mutex_lock(&text_mutex);
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500896 __arm_kprobe(kp);
Masami Hiramatsu201517a2009-05-07 16:31:26 -0400897 mutex_unlock(&text_mutex);
898}
899
900/* Disarm a kprobe with text_mutex */
901static void __kprobes disarm_kprobe(struct kprobe *kp)
902{
Masami Hiramatsu6274de42010-12-03 18:54:09 +0900903 /* Ditto */
Masami Hiramatsu201517a2009-05-07 16:31:26 -0400904 mutex_lock(&text_mutex);
Masami Hiramatsu6274de42010-12-03 18:54:09 +0900905 __disarm_kprobe(kp, true);
Masami Hiramatsu201517a2009-05-07 16:31:26 -0400906 mutex_unlock(&text_mutex);
907}
908
Ananth N Mavinakayanahalli64f562c2005-05-05 16:15:42 -0700909/*
910 * Aggregate handlers for multiple kprobes support - these handlers
911 * take care of invoking the individual kprobe handlers on p->list
912 */
Prasanna S Panchamukhid0aaff92005-09-06 15:19:26 -0700913static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
Ananth N Mavinakayanahalli64f562c2005-05-05 16:15:42 -0700914{
915 struct kprobe *kp;
916
Ananth N Mavinakayanahalli3516a462005-11-07 01:00:13 -0800917 list_for_each_entry_rcu(kp, &p->list, list) {
Masami Hiramatsude5bd882009-04-06 19:01:02 -0700918 if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
Ananth N Mavinakayanahallie6584522005-11-07 01:00:07 -0800919 set_kprobe_instance(kp);
Prasanna S Panchamukhi8b0914e2005-06-23 00:09:41 -0700920 if (kp->pre_handler(kp, regs))
921 return 1;
Ananth N Mavinakayanahalli64f562c2005-05-05 16:15:42 -0700922 }
Ananth N Mavinakayanahallie6584522005-11-07 01:00:07 -0800923 reset_kprobe_instance();
Ananth N Mavinakayanahalli64f562c2005-05-05 16:15:42 -0700924 }
925 return 0;
926}
927
Prasanna S Panchamukhid0aaff92005-09-06 15:19:26 -0700928static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
929 unsigned long flags)
Ananth N Mavinakayanahalli64f562c2005-05-05 16:15:42 -0700930{
931 struct kprobe *kp;
932
Ananth N Mavinakayanahalli3516a462005-11-07 01:00:13 -0800933 list_for_each_entry_rcu(kp, &p->list, list) {
Masami Hiramatsude5bd882009-04-06 19:01:02 -0700934 if (kp->post_handler && likely(!kprobe_disabled(kp))) {
Ananth N Mavinakayanahallie6584522005-11-07 01:00:07 -0800935 set_kprobe_instance(kp);
Ananth N Mavinakayanahalli64f562c2005-05-05 16:15:42 -0700936 kp->post_handler(kp, regs, flags);
Ananth N Mavinakayanahallie6584522005-11-07 01:00:07 -0800937 reset_kprobe_instance();
Ananth N Mavinakayanahalli64f562c2005-05-05 16:15:42 -0700938 }
939 }
Ananth N Mavinakayanahalli64f562c2005-05-05 16:15:42 -0700940}
941
Prasanna S Panchamukhid0aaff92005-09-06 15:19:26 -0700942static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
943 int trapnr)
Ananth N Mavinakayanahalli64f562c2005-05-05 16:15:42 -0700944{
Ananth N Mavinakayanahallie6584522005-11-07 01:00:07 -0800945 struct kprobe *cur = __get_cpu_var(kprobe_instance);
946
Ananth N Mavinakayanahalli64f562c2005-05-05 16:15:42 -0700947 /*
948 * if we faulted "during" the execution of a user specified
949 * probe handler, invoke just that probe's fault handler
950 */
Ananth N Mavinakayanahallie6584522005-11-07 01:00:07 -0800951 if (cur && cur->fault_handler) {
952 if (cur->fault_handler(cur, regs, trapnr))
Ananth N Mavinakayanahalli64f562c2005-05-05 16:15:42 -0700953 return 1;
954 }
955 return 0;
956}
957
Prasanna S Panchamukhid0aaff92005-09-06 15:19:26 -0700958static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
Prasanna S Panchamukhi8b0914e2005-06-23 00:09:41 -0700959{
Ananth N Mavinakayanahallie6584522005-11-07 01:00:07 -0800960 struct kprobe *cur = __get_cpu_var(kprobe_instance);
961 int ret = 0;
962
963 if (cur && cur->break_handler) {
964 if (cur->break_handler(cur, regs))
965 ret = 1;
Prasanna S Panchamukhi8b0914e2005-06-23 00:09:41 -0700966 }
Ananth N Mavinakayanahallie6584522005-11-07 01:00:07 -0800967 reset_kprobe_instance();
968 return ret;
Prasanna S Panchamukhi8b0914e2005-06-23 00:09:41 -0700969}
970
Keshavamurthy Anil Sbf8d5c52005-12-12 00:37:34 -0800971/* Walks the list and increments nmissed count for multiprobe case */
972void __kprobes kprobes_inc_nmissed_count(struct kprobe *p)
973{
974 struct kprobe *kp;
Masami Hiramatsuafd66252010-02-25 08:34:07 -0500975 if (!kprobe_aggrprobe(p)) {
Keshavamurthy Anil Sbf8d5c52005-12-12 00:37:34 -0800976 p->nmissed++;
977 } else {
978 list_for_each_entry_rcu(kp, &p->list, list)
979 kp->nmissed++;
980 }
981 return;
982}
983
bibo,mao99219a32006-10-02 02:17:35 -0700984void __kprobes recycle_rp_inst(struct kretprobe_instance *ri,
985 struct hlist_head *head)
Hien Nguyenb94cce92005-06-23 00:09:19 -0700986{
Srinivasa D Sef53d9c2008-07-25 01:46:04 -0700987 struct kretprobe *rp = ri->rp;
988
Hien Nguyenb94cce92005-06-23 00:09:19 -0700989 /* remove rp inst off the rprobe_inst_table */
990 hlist_del(&ri->hlist);
Srinivasa D Sef53d9c2008-07-25 01:46:04 -0700991 INIT_HLIST_NODE(&ri->hlist);
992 if (likely(rp)) {
993 spin_lock(&rp->lock);
994 hlist_add_head(&ri->hlist, &rp->free_instances);
995 spin_unlock(&rp->lock);
Hien Nguyenb94cce92005-06-23 00:09:19 -0700996 } else
997 /* Unregistering */
bibo,mao99219a32006-10-02 02:17:35 -0700998 hlist_add_head(&ri->hlist, head);
Hien Nguyenb94cce92005-06-23 00:09:19 -0700999}
1000
Masami Hiramatsu017c39b2009-01-06 14:41:51 -08001001void __kprobes kretprobe_hash_lock(struct task_struct *tsk,
Srinivasa D Sef53d9c2008-07-25 01:46:04 -07001002 struct hlist_head **head, unsigned long *flags)
Namhyung Kim635c17c2010-09-15 10:04:30 +09001003__acquires(hlist_lock)
Hien Nguyenb94cce92005-06-23 00:09:19 -07001004{
Srinivasa D Sef53d9c2008-07-25 01:46:04 -07001005 unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
1006 spinlock_t *hlist_lock;
1007
1008 *head = &kretprobe_inst_table[hash];
1009 hlist_lock = kretprobe_table_lock_ptr(hash);
1010 spin_lock_irqsave(hlist_lock, *flags);
1011}
1012
Masami Hiramatsu017c39b2009-01-06 14:41:51 -08001013static void __kprobes kretprobe_table_lock(unsigned long hash,
1014 unsigned long *flags)
Namhyung Kim635c17c2010-09-15 10:04:30 +09001015__acquires(hlist_lock)
Srinivasa D Sef53d9c2008-07-25 01:46:04 -07001016{
1017 spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
1018 spin_lock_irqsave(hlist_lock, *flags);
1019}
1020
Masami Hiramatsu017c39b2009-01-06 14:41:51 -08001021void __kprobes kretprobe_hash_unlock(struct task_struct *tsk,
1022 unsigned long *flags)
Namhyung Kim635c17c2010-09-15 10:04:30 +09001023__releases(hlist_lock)
Srinivasa D Sef53d9c2008-07-25 01:46:04 -07001024{
1025 unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
1026 spinlock_t *hlist_lock;
1027
1028 hlist_lock = kretprobe_table_lock_ptr(hash);
1029 spin_unlock_irqrestore(hlist_lock, *flags);
1030}
1031
Namhyung Kim6376b222010-09-15 10:04:28 +09001032static void __kprobes kretprobe_table_unlock(unsigned long hash,
1033 unsigned long *flags)
Namhyung Kim635c17c2010-09-15 10:04:30 +09001034__releases(hlist_lock)
Srinivasa D Sef53d9c2008-07-25 01:46:04 -07001035{
1036 spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
1037 spin_unlock_irqrestore(hlist_lock, *flags);
Hien Nguyenb94cce92005-06-23 00:09:19 -07001038}
1039
Hien Nguyenb94cce92005-06-23 00:09:19 -07001040/*
bibo maoc6fd91f2006-03-26 01:38:20 -08001041 * This function is called from finish_task_switch when task tk becomes dead,
1042 * so that we can recycle any function-return probe instances associated
1043 * with this task. These left over instances represent probed functions
1044 * that have been called but will never return.
Hien Nguyenb94cce92005-06-23 00:09:19 -07001045 */
Prasanna S Panchamukhid0aaff92005-09-06 15:19:26 -07001046void __kprobes kprobe_flush_task(struct task_struct *tk)
Hien Nguyenb94cce92005-06-23 00:09:19 -07001047{
bibo,mao62c27be2006-10-02 02:17:33 -07001048 struct kretprobe_instance *ri;
bibo,mao99219a32006-10-02 02:17:35 -07001049 struct hlist_head *head, empty_rp;
Rusty Lynch802eae72005-06-27 15:17:08 -07001050 struct hlist_node *node, *tmp;
Srinivasa D Sef53d9c2008-07-25 01:46:04 -07001051 unsigned long hash, flags = 0;
Rusty Lynch802eae72005-06-27 15:17:08 -07001052
Srinivasa D Sef53d9c2008-07-25 01:46:04 -07001053 if (unlikely(!kprobes_initialized))
1054 /* Early boot. kretprobe_table_locks not yet initialized. */
1055 return;
1056
1057 hash = hash_ptr(tk, KPROBE_HASH_BITS);
1058 head = &kretprobe_inst_table[hash];
1059 kretprobe_table_lock(hash, &flags);
bibo,mao62c27be2006-10-02 02:17:33 -07001060 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
1061 if (ri->task == tk)
bibo,mao99219a32006-10-02 02:17:35 -07001062 recycle_rp_inst(ri, &empty_rp);
bibo,mao62c27be2006-10-02 02:17:33 -07001063 }
Srinivasa D Sef53d9c2008-07-25 01:46:04 -07001064 kretprobe_table_unlock(hash, &flags);
1065 INIT_HLIST_HEAD(&empty_rp);
bibo,mao99219a32006-10-02 02:17:35 -07001066 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
1067 hlist_del(&ri->hlist);
1068 kfree(ri);
1069 }
Hien Nguyenb94cce92005-06-23 00:09:19 -07001070}
1071
Hien Nguyenb94cce92005-06-23 00:09:19 -07001072static inline void free_rp_inst(struct kretprobe *rp)
1073{
1074 struct kretprobe_instance *ri;
Christoph Hellwig4c4308c2007-05-08 00:34:14 -07001075 struct hlist_node *pos, *next;
1076
Srinivasa D Sef53d9c2008-07-25 01:46:04 -07001077 hlist_for_each_entry_safe(ri, pos, next, &rp->free_instances, hlist) {
1078 hlist_del(&ri->hlist);
Hien Nguyenb94cce92005-06-23 00:09:19 -07001079 kfree(ri);
1080 }
1081}
1082
Masami Hiramatsu4a296e02008-04-28 02:14:29 -07001083static void __kprobes cleanup_rp_inst(struct kretprobe *rp)
1084{
Srinivasa D Sef53d9c2008-07-25 01:46:04 -07001085 unsigned long flags, hash;
Masami Hiramatsu4a296e02008-04-28 02:14:29 -07001086 struct kretprobe_instance *ri;
1087 struct hlist_node *pos, *next;
Srinivasa D Sef53d9c2008-07-25 01:46:04 -07001088 struct hlist_head *head;
1089
Masami Hiramatsu4a296e02008-04-28 02:14:29 -07001090 /* No race here */
Srinivasa D Sef53d9c2008-07-25 01:46:04 -07001091 for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) {
1092 kretprobe_table_lock(hash, &flags);
1093 head = &kretprobe_inst_table[hash];
1094 hlist_for_each_entry_safe(ri, pos, next, head, hlist) {
1095 if (ri->rp == rp)
1096 ri->rp = NULL;
1097 }
1098 kretprobe_table_unlock(hash, &flags);
Masami Hiramatsu4a296e02008-04-28 02:14:29 -07001099 }
Masami Hiramatsu4a296e02008-04-28 02:14:29 -07001100 free_rp_inst(rp);
1101}
1102
Ananth N Mavinakayanahalli64f562c2005-05-05 16:15:42 -07001103/*
Masami Hiramatsub918e5e2009-04-06 19:00:58 -07001104* Add the new probe to ap->list. Fail if this is the
Prasanna S Panchamukhi8b0914e2005-06-23 00:09:41 -07001105* second jprobe at the address - two jprobes can't coexist
1106*/
Masami Hiramatsub918e5e2009-04-06 19:00:58 -07001107static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p)
Prasanna S Panchamukhi8b0914e2005-06-23 00:09:41 -07001108{
Masami Hiramatsude5bd882009-04-06 19:01:02 -07001109 BUG_ON(kprobe_gone(ap) || kprobe_gone(p));
Masami Hiramatsuafd66252010-02-25 08:34:07 -05001110
1111 if (p->break_handler || p->post_handler)
Masami Hiramatsu6274de42010-12-03 18:54:09 +09001112 unoptimize_kprobe(ap, true); /* Fall back to normal kprobe */
Masami Hiramatsuafd66252010-02-25 08:34:07 -05001113
Prasanna S Panchamukhi8b0914e2005-06-23 00:09:41 -07001114 if (p->break_handler) {
Masami Hiramatsub918e5e2009-04-06 19:00:58 -07001115 if (ap->break_handler)
mao, bibo36721652006-06-26 00:25:22 -07001116 return -EEXIST;
Masami Hiramatsub918e5e2009-04-06 19:00:58 -07001117 list_add_tail_rcu(&p->list, &ap->list);
1118 ap->break_handler = aggr_break_handler;
Prasanna S Panchamukhi8b0914e2005-06-23 00:09:41 -07001119 } else
Masami Hiramatsub918e5e2009-04-06 19:00:58 -07001120 list_add_rcu(&p->list, &ap->list);
1121 if (p->post_handler && !ap->post_handler)
1122 ap->post_handler = aggr_post_handler;
Masami Hiramatsude5bd882009-04-06 19:01:02 -07001123
1124 if (kprobe_disabled(ap) && !kprobe_disabled(p)) {
1125 ap->flags &= ~KPROBE_FLAG_DISABLED;
1126 if (!kprobes_all_disarmed)
1127 /* Arm the breakpoint again. */
Masami Hiramatsuafd66252010-02-25 08:34:07 -05001128 __arm_kprobe(ap);
Masami Hiramatsude5bd882009-04-06 19:01:02 -07001129 }
Prasanna S Panchamukhi8b0914e2005-06-23 00:09:41 -07001130 return 0;
1131}
1132
1133/*
Ananth N Mavinakayanahalli64f562c2005-05-05 16:15:42 -07001134 * Fill in the required fields of the "manager kprobe". Replace the
1135 * earlier kprobe in the hlist with the manager kprobe
1136 */
Masami Hiramatsuafd66252010-02-25 08:34:07 -05001137static void __kprobes init_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
Ananth N Mavinakayanahalli64f562c2005-05-05 16:15:42 -07001138{
Masami Hiramatsuafd66252010-02-25 08:34:07 -05001139 /* Copy p's insn slot to ap */
Prasanna S Panchamukhi8b0914e2005-06-23 00:09:41 -07001140 copy_kprobe(p, ap);
bibo, maoa9ad9652006-07-30 03:03:26 -07001141 flush_insn_slot(ap);
Ananth N Mavinakayanahalli64f562c2005-05-05 16:15:42 -07001142 ap->addr = p->addr;
Masami Hiramatsuafd66252010-02-25 08:34:07 -05001143 ap->flags = p->flags & ~KPROBE_FLAG_OPTIMIZED;
Ananth N Mavinakayanahalli64f562c2005-05-05 16:15:42 -07001144 ap->pre_handler = aggr_pre_handler;
Ananth N Mavinakayanahalli64f562c2005-05-05 16:15:42 -07001145 ap->fault_handler = aggr_fault_handler;
Masami Hiramatsue8386a02009-01-06 14:41:52 -08001146 /* We don't care the kprobe which has gone. */
1147 if (p->post_handler && !kprobe_gone(p))
mao, bibo36721652006-06-26 00:25:22 -07001148 ap->post_handler = aggr_post_handler;
Masami Hiramatsue8386a02009-01-06 14:41:52 -08001149 if (p->break_handler && !kprobe_gone(p))
mao, bibo36721652006-06-26 00:25:22 -07001150 ap->break_handler = aggr_break_handler;
Ananth N Mavinakayanahalli64f562c2005-05-05 16:15:42 -07001151
1152 INIT_LIST_HEAD(&ap->list);
Masami Hiramatsuafd66252010-02-25 08:34:07 -05001153 INIT_HLIST_NODE(&ap->hlist);
Ananth N Mavinakayanahalli64f562c2005-05-05 16:15:42 -07001154
Masami Hiramatsuafd66252010-02-25 08:34:07 -05001155 list_add_rcu(&p->list, &ap->list);
Keshavamurthy Anil Sadad0f32005-12-12 00:37:12 -08001156 hlist_replace_rcu(&p->hlist, &ap->hlist);
Ananth N Mavinakayanahalli64f562c2005-05-05 16:15:42 -07001157}
1158
1159/*
1160 * This is the second or subsequent kprobe at the address - handle
1161 * the intricacies
Ananth N Mavinakayanahalli64f562c2005-05-05 16:15:42 -07001162 */
Masami Hiramatsu6d8e40a2010-12-03 18:53:50 +09001163static int __kprobes register_aggr_kprobe(struct kprobe *orig_p,
Prasanna S Panchamukhid0aaff92005-09-06 15:19:26 -07001164 struct kprobe *p)
Ananth N Mavinakayanahalli64f562c2005-05-05 16:15:42 -07001165{
1166 int ret = 0;
Masami Hiramatsu6d8e40a2010-12-03 18:53:50 +09001167 struct kprobe *ap = orig_p;
Ananth N Mavinakayanahalli64f562c2005-05-05 16:15:42 -07001168
Masami Hiramatsu6d8e40a2010-12-03 18:53:50 +09001169 if (!kprobe_aggrprobe(orig_p)) {
1170 /* If orig_p is not an aggr_kprobe, create new aggr_kprobe. */
1171 ap = alloc_aggr_kprobe(orig_p);
Masami Hiramatsub918e5e2009-04-06 19:00:58 -07001172 if (!ap)
1173 return -ENOMEM;
Masami Hiramatsu6d8e40a2010-12-03 18:53:50 +09001174 init_aggr_kprobe(ap, orig_p);
Masami Hiramatsu6274de42010-12-03 18:54:09 +09001175 } else if (kprobe_unused(ap))
1176 /* Busy to die */
1177 return -EBUSY;
Masami Hiramatsub918e5e2009-04-06 19:00:58 -07001178
1179 if (kprobe_gone(ap)) {
Masami Hiramatsue8386a02009-01-06 14:41:52 -08001180 /*
1181 * Attempting to insert new probe at the same location that
1182 * had a probe in the module vaddr area which already
1183 * freed. So, the instruction slot has already been
1184 * released. We need a new slot for the new probe.
1185 */
Masami Hiramatsub918e5e2009-04-06 19:00:58 -07001186 ret = arch_prepare_kprobe(ap);
Masami Hiramatsue8386a02009-01-06 14:41:52 -08001187 if (ret)
Masami Hiramatsub918e5e2009-04-06 19:00:58 -07001188 /*
1189 * Even if fail to allocate new slot, don't need to
1190 * free aggr_probe. It will be used next time, or
1191 * freed by unregister_kprobe.
1192 */
Masami Hiramatsue8386a02009-01-06 14:41:52 -08001193 return ret;
Masami Hiramatsude5bd882009-04-06 19:01:02 -07001194
Masami Hiramatsuafd66252010-02-25 08:34:07 -05001195 /* Prepare optimized instructions if possible. */
1196 prepare_optimized_kprobe(ap);
1197
Masami Hiramatsue8386a02009-01-06 14:41:52 -08001198 /*
Masami Hiramatsude5bd882009-04-06 19:01:02 -07001199 * Clear gone flag to prevent allocating new slot again, and
1200 * set disabled flag because it is not armed yet.
Masami Hiramatsue8386a02009-01-06 14:41:52 -08001201 */
Masami Hiramatsude5bd882009-04-06 19:01:02 -07001202 ap->flags = (ap->flags & ~KPROBE_FLAG_GONE)
1203 | KPROBE_FLAG_DISABLED;
Masami Hiramatsue8386a02009-01-06 14:41:52 -08001204 }
Masami Hiramatsub918e5e2009-04-06 19:00:58 -07001205
Masami Hiramatsuafd66252010-02-25 08:34:07 -05001206 /* Copy ap's insn slot to p */
Masami Hiramatsub918e5e2009-04-06 19:00:58 -07001207 copy_kprobe(ap, p);
1208 return add_new_kprobe(ap, p);
Ananth N Mavinakayanahalli64f562c2005-05-05 16:15:42 -07001209}
1210
Prasanna S Panchamukhid0aaff92005-09-06 15:19:26 -07001211static int __kprobes in_kprobes_functions(unsigned long addr)
1212{
Srinivasa Ds3d8d9962008-04-28 02:14:26 -07001213 struct kprobe_blackpoint *kb;
1214
Christoph Hellwig6f716ac2007-05-08 00:34:13 -07001215 if (addr >= (unsigned long)__kprobes_text_start &&
1216 addr < (unsigned long)__kprobes_text_end)
Prasanna S Panchamukhid0aaff92005-09-06 15:19:26 -07001217 return -EINVAL;
Srinivasa Ds3d8d9962008-04-28 02:14:26 -07001218 /*
1219 * If there exists a kprobe_blacklist, verify and
1220 * fail any probe registration in the prohibited area
1221 */
1222 for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
1223 if (kb->start_addr) {
1224 if (addr >= kb->start_addr &&
1225 addr < (kb->start_addr + kb->range))
1226 return -EINVAL;
1227 }
1228 }
Prasanna S Panchamukhid0aaff92005-09-06 15:19:26 -07001229 return 0;
1230}
1231
Masami Hiramatsub2a5cd62008-03-04 14:29:44 -08001232/*
1233 * If we have a symbol_name argument, look it up and add the offset field
1234 * to it. This way, we can specify a relative address to a symbol.
1235 */
1236static kprobe_opcode_t __kprobes *kprobe_addr(struct kprobe *p)
1237{
1238 kprobe_opcode_t *addr = p->addr;
1239 if (p->symbol_name) {
1240 if (addr)
1241 return NULL;
1242 kprobe_lookup_name(p->symbol_name, addr);
1243 }
1244
1245 if (!addr)
1246 return NULL;
1247 return (kprobe_opcode_t *)(((char *)addr) + p->offset);
1248}
1249
Ananth N Mavinakayanahalli1f0ab402009-09-15 10:43:07 +05301250/* Check passed kprobe is valid and return kprobe in kprobe_table. */
1251static struct kprobe * __kprobes __get_valid_kprobe(struct kprobe *p)
1252{
Masami Hiramatsu6d8e40a2010-12-03 18:53:50 +09001253 struct kprobe *ap, *list_p;
Ananth N Mavinakayanahalli1f0ab402009-09-15 10:43:07 +05301254
Masami Hiramatsu6d8e40a2010-12-03 18:53:50 +09001255 ap = get_kprobe(p->addr);
1256 if (unlikely(!ap))
Ananth N Mavinakayanahalli1f0ab402009-09-15 10:43:07 +05301257 return NULL;
1258
Masami Hiramatsu6d8e40a2010-12-03 18:53:50 +09001259 if (p != ap) {
1260 list_for_each_entry_rcu(list_p, &ap->list, list)
Ananth N Mavinakayanahalli1f0ab402009-09-15 10:43:07 +05301261 if (list_p == p)
1262 /* kprobe p is a valid probe */
1263 goto valid;
1264 return NULL;
1265 }
1266valid:
Masami Hiramatsu6d8e40a2010-12-03 18:53:50 +09001267 return ap;
Ananth N Mavinakayanahalli1f0ab402009-09-15 10:43:07 +05301268}
1269
1270/* Return error if the kprobe is being re-registered */
1271static inline int check_kprobe_rereg(struct kprobe *p)
1272{
1273 int ret = 0;
Ananth N Mavinakayanahalli1f0ab402009-09-15 10:43:07 +05301274
1275 mutex_lock(&kprobe_mutex);
Masami Hiramatsu6d8e40a2010-12-03 18:53:50 +09001276 if (__get_valid_kprobe(p))
Ananth N Mavinakayanahalli1f0ab402009-09-15 10:43:07 +05301277 ret = -EINVAL;
1278 mutex_unlock(&kprobe_mutex);
Masami Hiramatsu6d8e40a2010-12-03 18:53:50 +09001279
Ananth N Mavinakayanahalli1f0ab402009-09-15 10:43:07 +05301280 return ret;
1281}
1282
Masami Hiramatsu49ad2fd2009-01-06 14:41:53 -08001283int __kprobes register_kprobe(struct kprobe *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001284{
1285 int ret = 0;
Ananth N Mavinakayanahalli64f562c2005-05-05 16:15:42 -07001286 struct kprobe *old_p;
Keshavamurthy Anil Sdf019b12006-01-11 12:17:41 -08001287 struct module *probed_mod;
Masami Hiramatsub2a5cd62008-03-04 14:29:44 -08001288 kprobe_opcode_t *addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001289
Masami Hiramatsub2a5cd62008-03-04 14:29:44 -08001290 addr = kprobe_addr(p);
1291 if (!addr)
Ananth N Mavinakayanahalli3a872d82006-10-02 02:17:30 -07001292 return -EINVAL;
Masami Hiramatsub2a5cd62008-03-04 14:29:44 -08001293 p->addr = addr;
Ananth N Mavinakayanahalli3a872d82006-10-02 02:17:30 -07001294
Ananth N Mavinakayanahalli1f0ab402009-09-15 10:43:07 +05301295 ret = check_kprobe_rereg(p);
1296 if (ret)
1297 return ret;
1298
Jason Baron91bad2f82010-10-01 17:23:48 -04001299 jump_label_lock();
Steven Rostedtde31c3c2010-10-18 10:38:58 -04001300 preempt_disable();
Masami Hiramatsuec30c5f2009-07-28 19:47:23 -04001301 if (!kernel_text_address((unsigned long) p->addr) ||
Masami Hiramatsu4554dbc2010-02-02 16:49:18 -05001302 in_kprobes_functions((unsigned long) p->addr) ||
Jason Baron4c3ef6d2010-09-17 11:09:08 -04001303 ftrace_text_reserved(p->addr, p->addr) ||
Steven Rostedtde31c3c2010-10-18 10:38:58 -04001304 jump_label_text_reserved(p->addr, p->addr))
1305 goto fail_with_jump_label;
Mao, Bibob3e55c72005-12-12 00:37:00 -08001306
Masami Hiramatsude5bd882009-04-06 19:01:02 -07001307 /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
1308 p->flags &= KPROBE_FLAG_DISABLED;
1309
Christoph Hellwig6f716ac2007-05-08 00:34:13 -07001310 /*
1311 * Check if are we probing a module.
1312 */
Masami Hiramatsua189d032008-11-12 13:26:51 -08001313 probed_mod = __module_text_address((unsigned long) p->addr);
Christoph Hellwig6f716ac2007-05-08 00:34:13 -07001314 if (probed_mod) {
Christoph Hellwig6f716ac2007-05-08 00:34:13 -07001315 /*
Masami Hiramatsue8386a02009-01-06 14:41:52 -08001316 * We must hold a refcount of the probed module while updating
1317 * its code to prohibit unexpected unloading.
Keshavamurthy Anil Sdf019b12006-01-11 12:17:41 -08001318 */
Steven Rostedtde31c3c2010-10-18 10:38:58 -04001319 if (unlikely(!try_module_get(probed_mod)))
1320 goto fail_with_jump_label;
1321
Masami Hiramatsuf24659d2009-01-06 14:41:55 -08001322 /*
1323 * If the module freed .init.text, we couldn't insert
1324 * kprobes in there.
1325 */
1326 if (within_module_init((unsigned long)p->addr, probed_mod) &&
1327 probed_mod->state != MODULE_STATE_COMING) {
1328 module_put(probed_mod);
Steven Rostedtde31c3c2010-10-18 10:38:58 -04001329 goto fail_with_jump_label;
Masami Hiramatsuf24659d2009-01-06 14:41:55 -08001330 }
Keshavamurthy Anil Sdf019b12006-01-11 12:17:41 -08001331 }
Masami Hiramatsua189d032008-11-12 13:26:51 -08001332 preempt_enable();
Steven Rostedtde31c3c2010-10-18 10:38:58 -04001333 jump_label_unlock();
Mao, Bibob3e55c72005-12-12 00:37:00 -08001334
Ananth N Mavinakayanahalli3516a462005-11-07 01:00:13 -08001335 p->nmissed = 0;
Masami Hiramatsu98616682008-04-28 02:14:28 -07001336 INIT_LIST_HEAD(&p->list);
Ingo Molnar7a7d1cf2006-03-23 03:00:35 -08001337 mutex_lock(&kprobe_mutex);
Masami Hiramatsuafd66252010-02-25 08:34:07 -05001338
Jason Baron91bad2f82010-10-01 17:23:48 -04001339 jump_label_lock(); /* needed to call jump_label_text_reserved() */
1340
Masami Hiramatsuafd66252010-02-25 08:34:07 -05001341 get_online_cpus(); /* For avoiding text_mutex deadlock. */
1342 mutex_lock(&text_mutex);
1343
Ananth N Mavinakayanahalli64f562c2005-05-05 16:15:42 -07001344 old_p = get_kprobe(p->addr);
1345 if (old_p) {
Masami Hiramatsuafd66252010-02-25 08:34:07 -05001346 /* Since this may unoptimize old_p, locking text_mutex. */
Ananth N Mavinakayanahalli64f562c2005-05-05 16:15:42 -07001347 ret = register_aggr_kprobe(old_p, p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001348 goto out;
1349 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001350
Christoph Hellwig6f716ac2007-05-08 00:34:13 -07001351 ret = arch_prepare_kprobe(p);
1352 if (ret)
Masami Hiramatsuafd66252010-02-25 08:34:07 -05001353 goto out;
Anil S Keshavamurthy49a2a1b2006-01-09 20:52:43 -08001354
Ananth N Mavinakayanahalli64f562c2005-05-05 16:15:42 -07001355 INIT_HLIST_NODE(&p->hlist);
Ananth N Mavinakayanahalli3516a462005-11-07 01:00:13 -08001356 hlist_add_head_rcu(&p->hlist,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001357 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
1358
Masami Hiramatsude5bd882009-04-06 19:01:02 -07001359 if (!kprobes_all_disarmed && !kprobe_disabled(p))
Masami Hiramatsuafd66252010-02-25 08:34:07 -05001360 __arm_kprobe(p);
Christoph Hellwig74a0b572007-10-16 01:24:07 -07001361
Masami Hiramatsuafd66252010-02-25 08:34:07 -05001362 /* Try to optimize kprobe */
1363 try_to_optimize_kprobe(p);
1364
Linus Torvalds1da177e2005-04-16 15:20:36 -07001365out:
Masami Hiramatsuafd66252010-02-25 08:34:07 -05001366 mutex_unlock(&text_mutex);
1367 put_online_cpus();
Jason Baron91bad2f82010-10-01 17:23:48 -04001368 jump_label_unlock();
Ingo Molnar7a7d1cf2006-03-23 03:00:35 -08001369 mutex_unlock(&kprobe_mutex);
Anil S Keshavamurthy49a2a1b2006-01-09 20:52:43 -08001370
Masami Hiramatsue8386a02009-01-06 14:41:52 -08001371 if (probed_mod)
Keshavamurthy Anil Sdf019b12006-01-11 12:17:41 -08001372 module_put(probed_mod);
Masami Hiramatsue8386a02009-01-06 14:41:52 -08001373
Linus Torvalds1da177e2005-04-16 15:20:36 -07001374 return ret;
Steven Rostedtde31c3c2010-10-18 10:38:58 -04001375
1376fail_with_jump_label:
1377 preempt_enable();
1378 jump_label_unlock();
1379 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001380}
Masami Hiramatsu99081ab2009-04-06 19:00:59 -07001381EXPORT_SYMBOL_GPL(register_kprobe);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001382
Masami Hiramatsu6f0f1dd2010-12-03 18:53:57 +09001383/* Check if all probes on the aggrprobe are disabled */
1384static int __kprobes aggr_kprobe_disabled(struct kprobe *ap)
1385{
1386 struct kprobe *kp;
1387
1388 list_for_each_entry_rcu(kp, &ap->list, list)
1389 if (!kprobe_disabled(kp))
1390 /*
1391 * There is an active probe on the list.
1392 * We can't disable this ap.
1393 */
1394 return 0;
1395
1396 return 1;
1397}
1398
1399/* Disable one kprobe: Make sure called under kprobe_mutex is locked */
1400static struct kprobe *__kprobes __disable_kprobe(struct kprobe *p)
1401{
1402 struct kprobe *orig_p;
1403
1404 /* Get an original kprobe for return */
1405 orig_p = __get_valid_kprobe(p);
1406 if (unlikely(orig_p == NULL))
1407 return NULL;
1408
1409 if (!kprobe_disabled(p)) {
1410 /* Disable probe if it is a child probe */
1411 if (p != orig_p)
1412 p->flags |= KPROBE_FLAG_DISABLED;
1413
1414 /* Try to disarm and disable this/parent probe */
1415 if (p == orig_p || aggr_kprobe_disabled(orig_p)) {
1416 disarm_kprobe(orig_p);
1417 orig_p->flags |= KPROBE_FLAG_DISABLED;
1418 }
1419 }
1420
1421 return orig_p;
1422}
1423
Masami Hiramatsu98616682008-04-28 02:14:28 -07001424/*
1425 * Unregister a kprobe without a scheduler synchronization.
1426 */
1427static int __kprobes __unregister_kprobe_top(struct kprobe *p)
Keshavamurthy Anil Sdf019b12006-01-11 12:17:41 -08001428{
Masami Hiramatsu6d8e40a2010-12-03 18:53:50 +09001429 struct kprobe *ap, *list_p;
Ananth N Mavinakayanahalli64f562c2005-05-05 16:15:42 -07001430
Masami Hiramatsu6f0f1dd2010-12-03 18:53:57 +09001431 /* Disable kprobe. This will disarm it if needed. */
1432 ap = __disable_kprobe(p);
Masami Hiramatsu6d8e40a2010-12-03 18:53:50 +09001433 if (ap == NULL)
Masami Hiramatsu98616682008-04-28 02:14:28 -07001434 return -EINVAL;
1435
Masami Hiramatsu6f0f1dd2010-12-03 18:53:57 +09001436 if (ap == p)
Ananth N Mavinakayanahallibf8f6e52007-05-08 00:34:16 -07001437 /*
Masami Hiramatsu6f0f1dd2010-12-03 18:53:57 +09001438 * This probe is an independent(and non-optimized) kprobe
1439 * (not an aggrprobe). Remove from the hash list.
Ananth N Mavinakayanahallibf8f6e52007-05-08 00:34:16 -07001440 */
Masami Hiramatsu6f0f1dd2010-12-03 18:53:57 +09001441 goto disarmed;
1442
1443 /* Following process expects this probe is an aggrprobe */
1444 WARN_ON(!kprobe_aggrprobe(ap));
1445
Masami Hiramatsu6274de42010-12-03 18:54:09 +09001446 if (list_is_singular(&ap->list) && kprobe_disarmed(ap))
1447 /*
1448 * !disarmed could be happen if the probe is under delayed
1449 * unoptimizing.
1450 */
Masami Hiramatsu6f0f1dd2010-12-03 18:53:57 +09001451 goto disarmed;
1452 else {
1453 /* If disabling probe has special handlers, update aggrprobe */
Masami Hiramatsue8386a02009-01-06 14:41:52 -08001454 if (p->break_handler && !kprobe_gone(p))
Masami Hiramatsu6d8e40a2010-12-03 18:53:50 +09001455 ap->break_handler = NULL;
Masami Hiramatsue8386a02009-01-06 14:41:52 -08001456 if (p->post_handler && !kprobe_gone(p)) {
Masami Hiramatsu6d8e40a2010-12-03 18:53:50 +09001457 list_for_each_entry_rcu(list_p, &ap->list, list) {
Masami Hiramatsu98616682008-04-28 02:14:28 -07001458 if ((list_p != p) && (list_p->post_handler))
1459 goto noclean;
1460 }
Masami Hiramatsu6d8e40a2010-12-03 18:53:50 +09001461 ap->post_handler = NULL;
Masami Hiramatsu98616682008-04-28 02:14:28 -07001462 }
1463noclean:
Masami Hiramatsu6f0f1dd2010-12-03 18:53:57 +09001464 /*
1465 * Remove from the aggrprobe: this path will do nothing in
1466 * __unregister_kprobe_bottom().
1467 */
Anil S Keshavamurthy49a2a1b2006-01-09 20:52:43 -08001468 list_del_rcu(&p->list);
Masami Hiramatsu6f0f1dd2010-12-03 18:53:57 +09001469 if (!kprobe_disabled(ap) && !kprobes_all_disarmed)
1470 /*
1471 * Try to optimize this probe again, because post
1472 * handler may have been changed.
1473 */
1474 optimize_kprobe(ap);
Anil S Keshavamurthy49a2a1b2006-01-09 20:52:43 -08001475 }
Masami Hiramatsu98616682008-04-28 02:14:28 -07001476 return 0;
Masami Hiramatsu6f0f1dd2010-12-03 18:53:57 +09001477
1478disarmed:
Masami Hiramatsu6274de42010-12-03 18:54:09 +09001479 BUG_ON(!kprobe_disarmed(ap));
Masami Hiramatsu6f0f1dd2010-12-03 18:53:57 +09001480 hlist_del_rcu(&ap->hlist);
1481 return 0;
Masami Hiramatsu98616682008-04-28 02:14:28 -07001482}
Mao, Bibob3e55c72005-12-12 00:37:00 -08001483
Masami Hiramatsu98616682008-04-28 02:14:28 -07001484static void __kprobes __unregister_kprobe_bottom(struct kprobe *p)
1485{
Masami Hiramatsu6d8e40a2010-12-03 18:53:50 +09001486 struct kprobe *ap;
Mao, Bibob3e55c72005-12-12 00:37:00 -08001487
Masami Hiramatsue8386a02009-01-06 14:41:52 -08001488 if (list_empty(&p->list))
Masami Hiramatsu6274de42010-12-03 18:54:09 +09001489 /* This is an independent kprobe */
Ananth N Mavinakayanahalli0498b632006-01-09 20:52:46 -08001490 arch_remove_kprobe(p);
Masami Hiramatsue8386a02009-01-06 14:41:52 -08001491 else if (list_is_singular(&p->list)) {
Masami Hiramatsu6274de42010-12-03 18:54:09 +09001492 /* This is the last child of an aggrprobe */
Masami Hiramatsu6d8e40a2010-12-03 18:53:50 +09001493 ap = list_entry(p->list.next, struct kprobe, list);
Masami Hiramatsue8386a02009-01-06 14:41:52 -08001494 list_del(&p->list);
Masami Hiramatsu6d8e40a2010-12-03 18:53:50 +09001495 free_aggr_kprobe(ap);
Anil S Keshavamurthy49a2a1b2006-01-09 20:52:43 -08001496 }
Masami Hiramatsu6274de42010-12-03 18:54:09 +09001497 /* Otherwise, do nothing. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001498}
1499
Masami Hiramatsu49ad2fd2009-01-06 14:41:53 -08001500int __kprobes register_kprobes(struct kprobe **kps, int num)
Masami Hiramatsu98616682008-04-28 02:14:28 -07001501{
1502 int i, ret = 0;
1503
1504 if (num <= 0)
1505 return -EINVAL;
1506 for (i = 0; i < num; i++) {
Masami Hiramatsu49ad2fd2009-01-06 14:41:53 -08001507 ret = register_kprobe(kps[i]);
Masami Hiramatsu67dddaa2008-06-12 15:21:35 -07001508 if (ret < 0) {
1509 if (i > 0)
1510 unregister_kprobes(kps, i);
Masami Hiramatsu98616682008-04-28 02:14:28 -07001511 break;
1512 }
1513 }
1514 return ret;
1515}
Masami Hiramatsu99081ab2009-04-06 19:00:59 -07001516EXPORT_SYMBOL_GPL(register_kprobes);
Masami Hiramatsu98616682008-04-28 02:14:28 -07001517
Masami Hiramatsu98616682008-04-28 02:14:28 -07001518void __kprobes unregister_kprobe(struct kprobe *p)
1519{
1520 unregister_kprobes(&p, 1);
1521}
Masami Hiramatsu99081ab2009-04-06 19:00:59 -07001522EXPORT_SYMBOL_GPL(unregister_kprobe);
Masami Hiramatsu98616682008-04-28 02:14:28 -07001523
Masami Hiramatsu98616682008-04-28 02:14:28 -07001524void __kprobes unregister_kprobes(struct kprobe **kps, int num)
1525{
1526 int i;
1527
1528 if (num <= 0)
1529 return;
1530 mutex_lock(&kprobe_mutex);
1531 for (i = 0; i < num; i++)
1532 if (__unregister_kprobe_top(kps[i]) < 0)
1533 kps[i]->addr = NULL;
1534 mutex_unlock(&kprobe_mutex);
1535
1536 synchronize_sched();
1537 for (i = 0; i < num; i++)
1538 if (kps[i]->addr)
1539 __unregister_kprobe_bottom(kps[i]);
1540}
Masami Hiramatsu99081ab2009-04-06 19:00:59 -07001541EXPORT_SYMBOL_GPL(unregister_kprobes);
Masami Hiramatsu98616682008-04-28 02:14:28 -07001542
Linus Torvalds1da177e2005-04-16 15:20:36 -07001543static struct notifier_block kprobe_exceptions_nb = {
1544 .notifier_call = kprobe_exceptions_notify,
Anil S Keshavamurthy3d5631e2006-06-26 00:25:28 -07001545 .priority = 0x7fffffff /* we need to be notified first */
1546};
1547
Michael Ellerman3d7e3382007-07-19 01:48:11 -07001548unsigned long __weak arch_deref_entry_point(void *entry)
1549{
1550 return (unsigned long)entry;
1551}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001552
Masami Hiramatsu49ad2fd2009-01-06 14:41:53 -08001553int __kprobes register_jprobes(struct jprobe **jps, int num)
Masami Hiramatsu26b31c12008-04-28 02:14:29 -07001554{
1555 struct jprobe *jp;
1556 int ret = 0, i;
1557
1558 if (num <= 0)
1559 return -EINVAL;
1560 for (i = 0; i < num; i++) {
Namhyung Kim05662bd2010-09-15 10:04:27 +09001561 unsigned long addr, offset;
Masami Hiramatsu26b31c12008-04-28 02:14:29 -07001562 jp = jps[i];
1563 addr = arch_deref_entry_point(jp->entry);
1564
Namhyung Kim05662bd2010-09-15 10:04:27 +09001565 /* Verify probepoint is a function entry point */
1566 if (kallsyms_lookup_size_offset(addr, NULL, &offset) &&
1567 offset == 0) {
1568 jp->kp.pre_handler = setjmp_pre_handler;
1569 jp->kp.break_handler = longjmp_break_handler;
1570 ret = register_kprobe(&jp->kp);
1571 } else
1572 ret = -EINVAL;
Namhyung Kimedbaadb2010-09-15 10:04:26 +09001573
Masami Hiramatsu67dddaa2008-06-12 15:21:35 -07001574 if (ret < 0) {
1575 if (i > 0)
1576 unregister_jprobes(jps, i);
Masami Hiramatsu26b31c12008-04-28 02:14:29 -07001577 break;
1578 }
1579 }
1580 return ret;
1581}
Masami Hiramatsu99081ab2009-04-06 19:00:59 -07001582EXPORT_SYMBOL_GPL(register_jprobes);
Masami Hiramatsu26b31c12008-04-28 02:14:29 -07001583
Prasanna S Panchamukhid0aaff92005-09-06 15:19:26 -07001584int __kprobes register_jprobe(struct jprobe *jp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001585{
Masami Hiramatsu49ad2fd2009-01-06 14:41:53 -08001586 return register_jprobes(&jp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001587}
Masami Hiramatsu99081ab2009-04-06 19:00:59 -07001588EXPORT_SYMBOL_GPL(register_jprobe);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001589
Prasanna S Panchamukhid0aaff92005-09-06 15:19:26 -07001590void __kprobes unregister_jprobe(struct jprobe *jp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001591{
Masami Hiramatsu26b31c12008-04-28 02:14:29 -07001592 unregister_jprobes(&jp, 1);
1593}
Masami Hiramatsu99081ab2009-04-06 19:00:59 -07001594EXPORT_SYMBOL_GPL(unregister_jprobe);
Masami Hiramatsu26b31c12008-04-28 02:14:29 -07001595
Masami Hiramatsu26b31c12008-04-28 02:14:29 -07001596void __kprobes unregister_jprobes(struct jprobe **jps, int num)
1597{
1598 int i;
1599
1600 if (num <= 0)
1601 return;
1602 mutex_lock(&kprobe_mutex);
1603 for (i = 0; i < num; i++)
1604 if (__unregister_kprobe_top(&jps[i]->kp) < 0)
1605 jps[i]->kp.addr = NULL;
1606 mutex_unlock(&kprobe_mutex);
1607
1608 synchronize_sched();
1609 for (i = 0; i < num; i++) {
1610 if (jps[i]->kp.addr)
1611 __unregister_kprobe_bottom(&jps[i]->kp);
1612 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001613}
Masami Hiramatsu99081ab2009-04-06 19:00:59 -07001614EXPORT_SYMBOL_GPL(unregister_jprobes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001615
Ananth N Mavinakayanahalli9edddaa2008-03-04 14:28:37 -08001616#ifdef CONFIG_KRETPROBES
Adrian Bunke65cefe2006-02-03 03:03:42 -08001617/*
1618 * This kprobe pre_handler is registered with every kretprobe. When probe
1619 * hits it will set up the return probe.
1620 */
1621static int __kprobes pre_handler_kretprobe(struct kprobe *p,
1622 struct pt_regs *regs)
1623{
1624 struct kretprobe *rp = container_of(p, struct kretprobe, kp);
Srinivasa D Sef53d9c2008-07-25 01:46:04 -07001625 unsigned long hash, flags = 0;
1626 struct kretprobe_instance *ri;
Adrian Bunke65cefe2006-02-03 03:03:42 -08001627
1628 /*TODO: consider to only swap the RA after the last pre_handler fired */
Srinivasa D Sef53d9c2008-07-25 01:46:04 -07001629 hash = hash_ptr(current, KPROBE_HASH_BITS);
1630 spin_lock_irqsave(&rp->lock, flags);
Christoph Hellwig4c4308c2007-05-08 00:34:14 -07001631 if (!hlist_empty(&rp->free_instances)) {
Christoph Hellwig4c4308c2007-05-08 00:34:14 -07001632 ri = hlist_entry(rp->free_instances.first,
Srinivasa D Sef53d9c2008-07-25 01:46:04 -07001633 struct kretprobe_instance, hlist);
1634 hlist_del(&ri->hlist);
1635 spin_unlock_irqrestore(&rp->lock, flags);
1636
Christoph Hellwig4c4308c2007-05-08 00:34:14 -07001637 ri->rp = rp;
1638 ri->task = current;
Abhishek Sagarf47cd9b2008-02-06 01:38:22 -08001639
Ananth N Mavinakayanahallif02b8622009-03-18 17:06:21 +05301640 if (rp->entry_handler && rp->entry_handler(ri, regs))
Abhishek Sagarf47cd9b2008-02-06 01:38:22 -08001641 return 0;
Abhishek Sagarf47cd9b2008-02-06 01:38:22 -08001642
Christoph Hellwig4c4308c2007-05-08 00:34:14 -07001643 arch_prepare_kretprobe(ri, regs);
1644
1645 /* XXX(hch): why is there no hlist_move_head? */
Srinivasa D Sef53d9c2008-07-25 01:46:04 -07001646 INIT_HLIST_NODE(&ri->hlist);
1647 kretprobe_table_lock(hash, &flags);
1648 hlist_add_head(&ri->hlist, &kretprobe_inst_table[hash]);
1649 kretprobe_table_unlock(hash, &flags);
1650 } else {
Christoph Hellwig4c4308c2007-05-08 00:34:14 -07001651 rp->nmissed++;
Srinivasa D Sef53d9c2008-07-25 01:46:04 -07001652 spin_unlock_irqrestore(&rp->lock, flags);
1653 }
Adrian Bunke65cefe2006-02-03 03:03:42 -08001654 return 0;
1655}
1656
Masami Hiramatsu49ad2fd2009-01-06 14:41:53 -08001657int __kprobes register_kretprobe(struct kretprobe *rp)
Hien Nguyenb94cce92005-06-23 00:09:19 -07001658{
1659 int ret = 0;
1660 struct kretprobe_instance *inst;
1661 int i;
Masami Hiramatsub2a5cd62008-03-04 14:29:44 -08001662 void *addr;
Masami Hiramatsuf438d912007-10-16 01:27:49 -07001663
1664 if (kretprobe_blacklist_size) {
Masami Hiramatsub2a5cd62008-03-04 14:29:44 -08001665 addr = kprobe_addr(&rp->kp);
1666 if (!addr)
1667 return -EINVAL;
Masami Hiramatsuf438d912007-10-16 01:27:49 -07001668
1669 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
1670 if (kretprobe_blacklist[i].addr == addr)
1671 return -EINVAL;
1672 }
1673 }
Hien Nguyenb94cce92005-06-23 00:09:19 -07001674
1675 rp->kp.pre_handler = pre_handler_kretprobe;
Ananth N Mavinakayanahalli7522a842006-04-20 02:43:11 -07001676 rp->kp.post_handler = NULL;
1677 rp->kp.fault_handler = NULL;
1678 rp->kp.break_handler = NULL;
Hien Nguyenb94cce92005-06-23 00:09:19 -07001679
1680 /* Pre-allocate memory for max kretprobe instances */
1681 if (rp->maxactive <= 0) {
1682#ifdef CONFIG_PREEMPT
Heiko Carstensc2ef6662009-12-21 13:02:24 +01001683 rp->maxactive = max_t(unsigned int, 10, 2*num_possible_cpus());
Hien Nguyenb94cce92005-06-23 00:09:19 -07001684#else
Ananth N Mavinakayanahalli4dae5602009-10-30 19:23:10 +05301685 rp->maxactive = num_possible_cpus();
Hien Nguyenb94cce92005-06-23 00:09:19 -07001686#endif
1687 }
Srinivasa D Sef53d9c2008-07-25 01:46:04 -07001688 spin_lock_init(&rp->lock);
Hien Nguyenb94cce92005-06-23 00:09:19 -07001689 INIT_HLIST_HEAD(&rp->free_instances);
1690 for (i = 0; i < rp->maxactive; i++) {
Abhishek Sagarf47cd9b2008-02-06 01:38:22 -08001691 inst = kmalloc(sizeof(struct kretprobe_instance) +
1692 rp->data_size, GFP_KERNEL);
Hien Nguyenb94cce92005-06-23 00:09:19 -07001693 if (inst == NULL) {
1694 free_rp_inst(rp);
1695 return -ENOMEM;
1696 }
Srinivasa D Sef53d9c2008-07-25 01:46:04 -07001697 INIT_HLIST_NODE(&inst->hlist);
1698 hlist_add_head(&inst->hlist, &rp->free_instances);
Hien Nguyenb94cce92005-06-23 00:09:19 -07001699 }
1700
1701 rp->nmissed = 0;
1702 /* Establish function entry probe point */
Masami Hiramatsu49ad2fd2009-01-06 14:41:53 -08001703 ret = register_kprobe(&rp->kp);
Masami Hiramatsu4a296e02008-04-28 02:14:29 -07001704 if (ret != 0)
Hien Nguyenb94cce92005-06-23 00:09:19 -07001705 free_rp_inst(rp);
1706 return ret;
1707}
Masami Hiramatsu99081ab2009-04-06 19:00:59 -07001708EXPORT_SYMBOL_GPL(register_kretprobe);
Hien Nguyenb94cce92005-06-23 00:09:19 -07001709
Masami Hiramatsu49ad2fd2009-01-06 14:41:53 -08001710int __kprobes register_kretprobes(struct kretprobe **rps, int num)
Masami Hiramatsu4a296e02008-04-28 02:14:29 -07001711{
1712 int ret = 0, i;
1713
1714 if (num <= 0)
1715 return -EINVAL;
1716 for (i = 0; i < num; i++) {
Masami Hiramatsu49ad2fd2009-01-06 14:41:53 -08001717 ret = register_kretprobe(rps[i]);
Masami Hiramatsu67dddaa2008-06-12 15:21:35 -07001718 if (ret < 0) {
1719 if (i > 0)
1720 unregister_kretprobes(rps, i);
Masami Hiramatsu4a296e02008-04-28 02:14:29 -07001721 break;
1722 }
1723 }
1724 return ret;
1725}
Masami Hiramatsu99081ab2009-04-06 19:00:59 -07001726EXPORT_SYMBOL_GPL(register_kretprobes);
Masami Hiramatsu4a296e02008-04-28 02:14:29 -07001727
Masami Hiramatsu4a296e02008-04-28 02:14:29 -07001728void __kprobes unregister_kretprobe(struct kretprobe *rp)
1729{
1730 unregister_kretprobes(&rp, 1);
1731}
Masami Hiramatsu99081ab2009-04-06 19:00:59 -07001732EXPORT_SYMBOL_GPL(unregister_kretprobe);
Masami Hiramatsu4a296e02008-04-28 02:14:29 -07001733
Masami Hiramatsu4a296e02008-04-28 02:14:29 -07001734void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
1735{
1736 int i;
1737
1738 if (num <= 0)
1739 return;
1740 mutex_lock(&kprobe_mutex);
1741 for (i = 0; i < num; i++)
1742 if (__unregister_kprobe_top(&rps[i]->kp) < 0)
1743 rps[i]->kp.addr = NULL;
1744 mutex_unlock(&kprobe_mutex);
1745
1746 synchronize_sched();
1747 for (i = 0; i < num; i++) {
1748 if (rps[i]->kp.addr) {
1749 __unregister_kprobe_bottom(&rps[i]->kp);
1750 cleanup_rp_inst(rps[i]);
1751 }
1752 }
1753}
Masami Hiramatsu99081ab2009-04-06 19:00:59 -07001754EXPORT_SYMBOL_GPL(unregister_kretprobes);
Masami Hiramatsu4a296e02008-04-28 02:14:29 -07001755
Ananth N Mavinakayanahalli9edddaa2008-03-04 14:28:37 -08001756#else /* CONFIG_KRETPROBES */
Prasanna S Panchamukhid0aaff92005-09-06 15:19:26 -07001757int __kprobes register_kretprobe(struct kretprobe *rp)
Hien Nguyenb94cce92005-06-23 00:09:19 -07001758{
1759 return -ENOSYS;
1760}
Masami Hiramatsu99081ab2009-04-06 19:00:59 -07001761EXPORT_SYMBOL_GPL(register_kretprobe);
Hien Nguyenb94cce92005-06-23 00:09:19 -07001762
Masami Hiramatsu4a296e02008-04-28 02:14:29 -07001763int __kprobes register_kretprobes(struct kretprobe **rps, int num)
1764{
1765 return -ENOSYS;
1766}
Masami Hiramatsu99081ab2009-04-06 19:00:59 -07001767EXPORT_SYMBOL_GPL(register_kretprobes);
1768
Masami Hiramatsu4a296e02008-04-28 02:14:29 -07001769void __kprobes unregister_kretprobe(struct kretprobe *rp)
1770{
1771}
Masami Hiramatsu99081ab2009-04-06 19:00:59 -07001772EXPORT_SYMBOL_GPL(unregister_kretprobe);
Masami Hiramatsu4a296e02008-04-28 02:14:29 -07001773
1774void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
1775{
1776}
Masami Hiramatsu99081ab2009-04-06 19:00:59 -07001777EXPORT_SYMBOL_GPL(unregister_kretprobes);
Masami Hiramatsu4a296e02008-04-28 02:14:29 -07001778
Srinivasa Ds346fd592007-02-20 13:57:54 -08001779static int __kprobes pre_handler_kretprobe(struct kprobe *p,
1780 struct pt_regs *regs)
1781{
1782 return 0;
1783}
Masami Hiramatsu4a296e02008-04-28 02:14:29 -07001784
Ananth N Mavinakayanahalli9edddaa2008-03-04 14:28:37 -08001785#endif /* CONFIG_KRETPROBES */
Hien Nguyenb94cce92005-06-23 00:09:19 -07001786
Masami Hiramatsue8386a02009-01-06 14:41:52 -08001787/* Set the kprobe gone and remove its instruction buffer. */
1788static void __kprobes kill_kprobe(struct kprobe *p)
1789{
1790 struct kprobe *kp;
Masami Hiramatsude5bd882009-04-06 19:01:02 -07001791
Masami Hiramatsue8386a02009-01-06 14:41:52 -08001792 p->flags |= KPROBE_FLAG_GONE;
Masami Hiramatsuafd66252010-02-25 08:34:07 -05001793 if (kprobe_aggrprobe(p)) {
Masami Hiramatsue8386a02009-01-06 14:41:52 -08001794 /*
1795 * If this is an aggr_kprobe, we have to list all the
1796 * chained probes and mark them GONE.
1797 */
1798 list_for_each_entry_rcu(kp, &p->list, list)
1799 kp->flags |= KPROBE_FLAG_GONE;
1800 p->post_handler = NULL;
1801 p->break_handler = NULL;
Masami Hiramatsuafd66252010-02-25 08:34:07 -05001802 kill_optimized_kprobe(p);
Masami Hiramatsue8386a02009-01-06 14:41:52 -08001803 }
1804 /*
1805 * Here, we can remove insn_slot safely, because no thread calls
1806 * the original probed function (which will be freed soon) any more.
1807 */
1808 arch_remove_kprobe(p);
1809}
1810
Masami Hiramatsuc0614822010-04-27 18:33:12 -04001811/* Disable one kprobe */
1812int __kprobes disable_kprobe(struct kprobe *kp)
1813{
1814 int ret = 0;
Masami Hiramatsuc0614822010-04-27 18:33:12 -04001815
1816 mutex_lock(&kprobe_mutex);
1817
Masami Hiramatsu6f0f1dd2010-12-03 18:53:57 +09001818 /* Disable this kprobe */
1819 if (__disable_kprobe(kp) == NULL)
Masami Hiramatsuc0614822010-04-27 18:33:12 -04001820 ret = -EINVAL;
Masami Hiramatsuc0614822010-04-27 18:33:12 -04001821
Masami Hiramatsuc0614822010-04-27 18:33:12 -04001822 mutex_unlock(&kprobe_mutex);
1823 return ret;
1824}
1825EXPORT_SYMBOL_GPL(disable_kprobe);
1826
1827/* Enable one kprobe */
1828int __kprobes enable_kprobe(struct kprobe *kp)
1829{
1830 int ret = 0;
1831 struct kprobe *p;
1832
1833 mutex_lock(&kprobe_mutex);
1834
1835 /* Check whether specified probe is valid. */
1836 p = __get_valid_kprobe(kp);
1837 if (unlikely(p == NULL)) {
1838 ret = -EINVAL;
1839 goto out;
1840 }
1841
1842 if (kprobe_gone(kp)) {
1843 /* This kprobe has gone, we couldn't enable it. */
1844 ret = -EINVAL;
1845 goto out;
1846 }
1847
1848 if (p != kp)
1849 kp->flags &= ~KPROBE_FLAG_DISABLED;
1850
1851 if (!kprobes_all_disarmed && kprobe_disabled(p)) {
1852 p->flags &= ~KPROBE_FLAG_DISABLED;
1853 arm_kprobe(p);
1854 }
1855out:
1856 mutex_unlock(&kprobe_mutex);
1857 return ret;
1858}
1859EXPORT_SYMBOL_GPL(enable_kprobe);
1860
Frederic Weisbecker24851d22009-08-26 23:38:30 +02001861void __kprobes dump_kprobe(struct kprobe *kp)
1862{
1863 printk(KERN_WARNING "Dumping kprobe:\n");
1864 printk(KERN_WARNING "Name: %s\nAddress: %p\nOffset: %x\n",
1865 kp->symbol_name, kp->addr, kp->offset);
1866}
1867
Masami Hiramatsue8386a02009-01-06 14:41:52 -08001868/* Module notifier call back, checking kprobes on the module */
1869static int __kprobes kprobes_module_callback(struct notifier_block *nb,
1870 unsigned long val, void *data)
1871{
1872 struct module *mod = data;
1873 struct hlist_head *head;
1874 struct hlist_node *node;
1875 struct kprobe *p;
1876 unsigned int i;
Masami Hiramatsuf24659d2009-01-06 14:41:55 -08001877 int checkcore = (val == MODULE_STATE_GOING);
Masami Hiramatsue8386a02009-01-06 14:41:52 -08001878
Masami Hiramatsuf24659d2009-01-06 14:41:55 -08001879 if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE)
Masami Hiramatsue8386a02009-01-06 14:41:52 -08001880 return NOTIFY_DONE;
1881
1882 /*
Masami Hiramatsuf24659d2009-01-06 14:41:55 -08001883 * When MODULE_STATE_GOING was notified, both of module .text and
1884 * .init.text sections would be freed. When MODULE_STATE_LIVE was
1885 * notified, only .init.text section would be freed. We need to
1886 * disable kprobes which have been inserted in the sections.
Masami Hiramatsue8386a02009-01-06 14:41:52 -08001887 */
1888 mutex_lock(&kprobe_mutex);
1889 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
1890 head = &kprobe_table[i];
1891 hlist_for_each_entry_rcu(p, node, head, hlist)
Masami Hiramatsuf24659d2009-01-06 14:41:55 -08001892 if (within_module_init((unsigned long)p->addr, mod) ||
1893 (checkcore &&
1894 within_module_core((unsigned long)p->addr, mod))) {
Masami Hiramatsue8386a02009-01-06 14:41:52 -08001895 /*
1896 * The vaddr this probe is installed will soon
1897 * be vfreed buy not synced to disk. Hence,
1898 * disarming the breakpoint isn't needed.
1899 */
1900 kill_kprobe(p);
1901 }
1902 }
1903 mutex_unlock(&kprobe_mutex);
1904 return NOTIFY_DONE;
1905}
1906
1907static struct notifier_block kprobe_module_nb = {
1908 .notifier_call = kprobes_module_callback,
1909 .priority = 0
1910};
1911
Linus Torvalds1da177e2005-04-16 15:20:36 -07001912static int __init init_kprobes(void)
1913{
1914 int i, err = 0;
Srinivasa Ds3d8d9962008-04-28 02:14:26 -07001915 unsigned long offset = 0, size = 0;
1916 char *modname, namebuf[128];
1917 const char *symbol_name;
1918 void *addr;
1919 struct kprobe_blackpoint *kb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001920
1921 /* FIXME allocate the probe table, currently defined statically */
1922 /* initialize all list heads */
Hien Nguyenb94cce92005-06-23 00:09:19 -07001923 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001924 INIT_HLIST_HEAD(&kprobe_table[i]);
Hien Nguyenb94cce92005-06-23 00:09:19 -07001925 INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
Srinivasa D Sef53d9c2008-07-25 01:46:04 -07001926 spin_lock_init(&(kretprobe_table_locks[i].lock));
Hien Nguyenb94cce92005-06-23 00:09:19 -07001927 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001928
Srinivasa Ds3d8d9962008-04-28 02:14:26 -07001929 /*
1930 * Lookup and populate the kprobe_blacklist.
1931 *
1932 * Unlike the kretprobe blacklist, we'll need to determine
1933 * the range of addresses that belong to the said functions,
1934 * since a kprobe need not necessarily be at the beginning
1935 * of a function.
1936 */
1937 for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
1938 kprobe_lookup_name(kb->name, addr);
1939 if (!addr)
1940 continue;
1941
1942 kb->start_addr = (unsigned long)addr;
1943 symbol_name = kallsyms_lookup(kb->start_addr,
1944 &size, &offset, &modname, namebuf);
1945 if (!symbol_name)
1946 kb->range = 0;
1947 else
1948 kb->range = size;
1949 }
1950
Masami Hiramatsuf438d912007-10-16 01:27:49 -07001951 if (kretprobe_blacklist_size) {
1952 /* lookup the function address from its name */
1953 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
1954 kprobe_lookup_name(kretprobe_blacklist[i].name,
1955 kretprobe_blacklist[i].addr);
1956 if (!kretprobe_blacklist[i].addr)
1957 printk("kretprobe: lookup failed: %s\n",
1958 kretprobe_blacklist[i].name);
1959 }
1960 }
1961
Masami Hiramatsub2be84d2010-02-25 08:34:15 -05001962#if defined(CONFIG_OPTPROBES)
1963#if defined(__ARCH_WANT_KPROBES_INSN_SLOT)
Masami Hiramatsuafd66252010-02-25 08:34:07 -05001964 /* Init kprobe_optinsn_slots */
1965 kprobe_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
1966#endif
Masami Hiramatsub2be84d2010-02-25 08:34:15 -05001967 /* By default, kprobes can be optimized */
1968 kprobes_allow_optimization = true;
1969#endif
Masami Hiramatsuafd66252010-02-25 08:34:07 -05001970
Masami Hiramatsue579abe2009-04-06 19:01:01 -07001971 /* By default, kprobes are armed */
1972 kprobes_all_disarmed = false;
Ananth N Mavinakayanahallibf8f6e52007-05-08 00:34:16 -07001973
Rusty Lynch67729262005-07-05 18:54:50 -07001974 err = arch_init_kprobes();
Rusty Lynch802eae72005-06-27 15:17:08 -07001975 if (!err)
1976 err = register_die_notifier(&kprobe_exceptions_nb);
Masami Hiramatsue8386a02009-01-06 14:41:52 -08001977 if (!err)
1978 err = register_module_notifier(&kprobe_module_nb);
1979
Srinivasa D Sef53d9c2008-07-25 01:46:04 -07001980 kprobes_initialized = (err == 0);
Rusty Lynch802eae72005-06-27 15:17:08 -07001981
Ananth N Mavinakayanahalli8c1c9352008-01-30 13:32:53 +01001982 if (!err)
1983 init_test_probes();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001984 return err;
1985}
1986
Srinivasa Ds346fd592007-02-20 13:57:54 -08001987#ifdef CONFIG_DEBUG_FS
1988static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
Masami Hiramatsuafd66252010-02-25 08:34:07 -05001989 const char *sym, int offset, char *modname, struct kprobe *pp)
Srinivasa Ds346fd592007-02-20 13:57:54 -08001990{
1991 char *kprobe_type;
1992
1993 if (p->pre_handler == pre_handler_kretprobe)
1994 kprobe_type = "r";
1995 else if (p->pre_handler == setjmp_pre_handler)
1996 kprobe_type = "j";
1997 else
1998 kprobe_type = "k";
Masami Hiramatsuafd66252010-02-25 08:34:07 -05001999
Srinivasa Ds346fd592007-02-20 13:57:54 -08002000 if (sym)
Masami Hiramatsuafd66252010-02-25 08:34:07 -05002001 seq_printf(pi, "%p %s %s+0x%x %s ",
Masami Hiramatsude5bd882009-04-06 19:01:02 -07002002 p->addr, kprobe_type, sym, offset,
Masami Hiramatsuafd66252010-02-25 08:34:07 -05002003 (modname ? modname : " "));
Srinivasa Ds346fd592007-02-20 13:57:54 -08002004 else
Masami Hiramatsuafd66252010-02-25 08:34:07 -05002005 seq_printf(pi, "%p %s %p ",
2006 p->addr, kprobe_type, p->addr);
2007
2008 if (!pp)
2009 pp = p;
2010 seq_printf(pi, "%s%s%s\n",
2011 (kprobe_gone(p) ? "[GONE]" : ""),
2012 ((kprobe_disabled(p) && !kprobe_gone(p)) ? "[DISABLED]" : ""),
2013 (kprobe_optimized(pp) ? "[OPTIMIZED]" : ""));
Srinivasa Ds346fd592007-02-20 13:57:54 -08002014}
2015
2016static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos)
2017{
2018 return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
2019}
2020
2021static void __kprobes *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos)
2022{
2023 (*pos)++;
2024 if (*pos >= KPROBE_TABLE_SIZE)
2025 return NULL;
2026 return pos;
2027}
2028
2029static void __kprobes kprobe_seq_stop(struct seq_file *f, void *v)
2030{
2031 /* Nothing to do */
2032}
2033
2034static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
2035{
2036 struct hlist_head *head;
2037 struct hlist_node *node;
2038 struct kprobe *p, *kp;
2039 const char *sym = NULL;
2040 unsigned int i = *(loff_t *) v;
Alexey Dobriyanffb45122007-05-08 00:28:41 -07002041 unsigned long offset = 0;
Srinivasa Ds346fd592007-02-20 13:57:54 -08002042 char *modname, namebuf[128];
2043
2044 head = &kprobe_table[i];
2045 preempt_disable();
2046 hlist_for_each_entry_rcu(p, node, head, hlist) {
Alexey Dobriyanffb45122007-05-08 00:28:41 -07002047 sym = kallsyms_lookup((unsigned long)p->addr, NULL,
Srinivasa Ds346fd592007-02-20 13:57:54 -08002048 &offset, &modname, namebuf);
Masami Hiramatsuafd66252010-02-25 08:34:07 -05002049 if (kprobe_aggrprobe(p)) {
Srinivasa Ds346fd592007-02-20 13:57:54 -08002050 list_for_each_entry_rcu(kp, &p->list, list)
Masami Hiramatsuafd66252010-02-25 08:34:07 -05002051 report_probe(pi, kp, sym, offset, modname, p);
Srinivasa Ds346fd592007-02-20 13:57:54 -08002052 } else
Masami Hiramatsuafd66252010-02-25 08:34:07 -05002053 report_probe(pi, p, sym, offset, modname, NULL);
Srinivasa Ds346fd592007-02-20 13:57:54 -08002054 }
2055 preempt_enable();
2056 return 0;
2057}
2058
James Morris88e9d342009-09-22 16:43:43 -07002059static const struct seq_operations kprobes_seq_ops = {
Srinivasa Ds346fd592007-02-20 13:57:54 -08002060 .start = kprobe_seq_start,
2061 .next = kprobe_seq_next,
2062 .stop = kprobe_seq_stop,
2063 .show = show_kprobe_addr
2064};
2065
2066static int __kprobes kprobes_open(struct inode *inode, struct file *filp)
2067{
2068 return seq_open(filp, &kprobes_seq_ops);
2069}
2070
Alexey Dobriyan828c0952009-10-01 15:43:56 -07002071static const struct file_operations debugfs_kprobes_operations = {
Srinivasa Ds346fd592007-02-20 13:57:54 -08002072 .open = kprobes_open,
2073 .read = seq_read,
2074 .llseek = seq_lseek,
2075 .release = seq_release,
2076};
2077
Masami Hiramatsue579abe2009-04-06 19:01:01 -07002078static void __kprobes arm_all_kprobes(void)
Ananth N Mavinakayanahallibf8f6e52007-05-08 00:34:16 -07002079{
2080 struct hlist_head *head;
2081 struct hlist_node *node;
2082 struct kprobe *p;
2083 unsigned int i;
2084
2085 mutex_lock(&kprobe_mutex);
2086
Masami Hiramatsue579abe2009-04-06 19:01:01 -07002087 /* If kprobes are armed, just return */
2088 if (!kprobes_all_disarmed)
Ananth N Mavinakayanahallibf8f6e52007-05-08 00:34:16 -07002089 goto already_enabled;
2090
Masami Hiramatsuafd66252010-02-25 08:34:07 -05002091 /* Arming kprobes doesn't optimize kprobe itself */
Mathieu Desnoyers4460fda2009-03-06 10:36:38 -05002092 mutex_lock(&text_mutex);
Ananth N Mavinakayanahallibf8f6e52007-05-08 00:34:16 -07002093 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2094 head = &kprobe_table[i];
2095 hlist_for_each_entry_rcu(p, node, head, hlist)
Masami Hiramatsude5bd882009-04-06 19:01:02 -07002096 if (!kprobe_disabled(p))
Masami Hiramatsuafd66252010-02-25 08:34:07 -05002097 __arm_kprobe(p);
Ananth N Mavinakayanahallibf8f6e52007-05-08 00:34:16 -07002098 }
Mathieu Desnoyers4460fda2009-03-06 10:36:38 -05002099 mutex_unlock(&text_mutex);
Ananth N Mavinakayanahallibf8f6e52007-05-08 00:34:16 -07002100
Masami Hiramatsue579abe2009-04-06 19:01:01 -07002101 kprobes_all_disarmed = false;
Ananth N Mavinakayanahallibf8f6e52007-05-08 00:34:16 -07002102 printk(KERN_INFO "Kprobes globally enabled\n");
2103
2104already_enabled:
2105 mutex_unlock(&kprobe_mutex);
2106 return;
2107}
2108
Masami Hiramatsue579abe2009-04-06 19:01:01 -07002109static void __kprobes disarm_all_kprobes(void)
Ananth N Mavinakayanahallibf8f6e52007-05-08 00:34:16 -07002110{
2111 struct hlist_head *head;
2112 struct hlist_node *node;
2113 struct kprobe *p;
2114 unsigned int i;
2115
2116 mutex_lock(&kprobe_mutex);
2117
Masami Hiramatsue579abe2009-04-06 19:01:01 -07002118 /* If kprobes are already disarmed, just return */
Masami Hiramatsu6274de42010-12-03 18:54:09 +09002119 if (kprobes_all_disarmed) {
2120 mutex_unlock(&kprobe_mutex);
2121 return;
2122 }
Ananth N Mavinakayanahallibf8f6e52007-05-08 00:34:16 -07002123
Masami Hiramatsue579abe2009-04-06 19:01:01 -07002124 kprobes_all_disarmed = true;
Ananth N Mavinakayanahallibf8f6e52007-05-08 00:34:16 -07002125 printk(KERN_INFO "Kprobes globally disabled\n");
Masami Hiramatsuafd66252010-02-25 08:34:07 -05002126
Mathieu Desnoyers4460fda2009-03-06 10:36:38 -05002127 mutex_lock(&text_mutex);
Ananth N Mavinakayanahallibf8f6e52007-05-08 00:34:16 -07002128 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2129 head = &kprobe_table[i];
2130 hlist_for_each_entry_rcu(p, node, head, hlist) {
Masami Hiramatsude5bd882009-04-06 19:01:02 -07002131 if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p))
Masami Hiramatsu6274de42010-12-03 18:54:09 +09002132 __disarm_kprobe(p, false);
Ananth N Mavinakayanahallibf8f6e52007-05-08 00:34:16 -07002133 }
2134 }
Mathieu Desnoyers4460fda2009-03-06 10:36:38 -05002135 mutex_unlock(&text_mutex);
Ananth N Mavinakayanahallibf8f6e52007-05-08 00:34:16 -07002136 mutex_unlock(&kprobe_mutex);
Ananth N Mavinakayanahallibf8f6e52007-05-08 00:34:16 -07002137
Masami Hiramatsu6274de42010-12-03 18:54:09 +09002138 /* Wait for disarming all kprobes by optimizer */
2139 wait_for_kprobe_optimizer();
Ananth N Mavinakayanahallibf8f6e52007-05-08 00:34:16 -07002140}
2141
2142/*
2143 * XXX: The debugfs bool file interface doesn't allow for callbacks
2144 * when the bool state is switched. We can reuse that facility when
2145 * available
2146 */
2147static ssize_t read_enabled_file_bool(struct file *file,
2148 char __user *user_buf, size_t count, loff_t *ppos)
2149{
2150 char buf[3];
2151
Masami Hiramatsue579abe2009-04-06 19:01:01 -07002152 if (!kprobes_all_disarmed)
Ananth N Mavinakayanahallibf8f6e52007-05-08 00:34:16 -07002153 buf[0] = '1';
2154 else
2155 buf[0] = '0';
2156 buf[1] = '\n';
2157 buf[2] = 0x00;
2158 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
2159}
2160
2161static ssize_t write_enabled_file_bool(struct file *file,
2162 const char __user *user_buf, size_t count, loff_t *ppos)
2163{
2164 char buf[32];
2165 int buf_size;
2166
2167 buf_size = min(count, (sizeof(buf)-1));
2168 if (copy_from_user(buf, user_buf, buf_size))
2169 return -EFAULT;
2170
2171 switch (buf[0]) {
2172 case 'y':
2173 case 'Y':
2174 case '1':
Masami Hiramatsue579abe2009-04-06 19:01:01 -07002175 arm_all_kprobes();
Ananth N Mavinakayanahallibf8f6e52007-05-08 00:34:16 -07002176 break;
2177 case 'n':
2178 case 'N':
2179 case '0':
Masami Hiramatsue579abe2009-04-06 19:01:01 -07002180 disarm_all_kprobes();
Ananth N Mavinakayanahallibf8f6e52007-05-08 00:34:16 -07002181 break;
2182 }
2183
2184 return count;
2185}
2186
Alexey Dobriyan828c0952009-10-01 15:43:56 -07002187static const struct file_operations fops_kp = {
Ananth N Mavinakayanahallibf8f6e52007-05-08 00:34:16 -07002188 .read = read_enabled_file_bool,
2189 .write = write_enabled_file_bool,
Arnd Bergmann6038f372010-08-15 18:52:59 +02002190 .llseek = default_llseek,
Ananth N Mavinakayanahallibf8f6e52007-05-08 00:34:16 -07002191};
2192
Srinivasa Ds346fd592007-02-20 13:57:54 -08002193static int __kprobes debugfs_kprobe_init(void)
2194{
2195 struct dentry *dir, *file;
Ananth N Mavinakayanahallibf8f6e52007-05-08 00:34:16 -07002196 unsigned int value = 1;
Srinivasa Ds346fd592007-02-20 13:57:54 -08002197
2198 dir = debugfs_create_dir("kprobes", NULL);
2199 if (!dir)
2200 return -ENOMEM;
2201
Randy Dunlape3869792007-05-08 00:27:01 -07002202 file = debugfs_create_file("list", 0444, dir, NULL,
Srinivasa Ds346fd592007-02-20 13:57:54 -08002203 &debugfs_kprobes_operations);
2204 if (!file) {
2205 debugfs_remove(dir);
2206 return -ENOMEM;
2207 }
2208
Ananth N Mavinakayanahallibf8f6e52007-05-08 00:34:16 -07002209 file = debugfs_create_file("enabled", 0600, dir,
2210 &value, &fops_kp);
2211 if (!file) {
2212 debugfs_remove(dir);
2213 return -ENOMEM;
2214 }
2215
Srinivasa Ds346fd592007-02-20 13:57:54 -08002216 return 0;
2217}
2218
2219late_initcall(debugfs_kprobe_init);
2220#endif /* CONFIG_DEBUG_FS */
2221
2222module_init(init_kprobes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002223
Masami Hiramatsu99081ab2009-04-06 19:00:59 -07002224/* defined in arch/.../kernel/kprobes.c */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002225EXPORT_SYMBOL_GPL(jprobe_return);