Matt Fleming | bf61ad1 | 2009-08-13 19:49:03 +0900 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2009 Matt Fleming |
| 3 | * |
| 4 | * Based, in part, on kernel/time/clocksource.c. |
| 5 | * |
| 6 | * This file provides arbitration code for stack unwinders. |
| 7 | * |
| 8 | * Multiple stack unwinders can be available on a system, usually with |
| 9 | * the most accurate unwinder being the currently active one. |
| 10 | */ |
| 11 | #include <linux/errno.h> |
| 12 | #include <linux/list.h> |
| 13 | #include <linux/spinlock.h> |
| 14 | #include <asm/unwinder.h> |
| 15 | #include <asm/atomic.h> |
| 16 | |
| 17 | /* |
| 18 | * This is the most basic stack unwinder an architecture can |
| 19 | * provide. For architectures without reliable frame pointers, e.g. |
| 20 | * RISC CPUs, it can be implemented by looking through the stack for |
| 21 | * addresses that lie within the kernel text section. |
| 22 | * |
| 23 | * Other CPUs, e.g. x86, can use their frame pointer register to |
| 24 | * construct more accurate stack traces. |
| 25 | */ |
| 26 | static struct list_head unwinder_list; |
| 27 | static struct unwinder stack_reader = { |
| 28 | .name = "stack-reader", |
| 29 | .dump = stack_reader_dump, |
| 30 | .rating = 50, |
| 31 | .list = { |
| 32 | .next = &unwinder_list, |
| 33 | .prev = &unwinder_list, |
| 34 | }, |
| 35 | }; |
| 36 | |
| 37 | /* |
| 38 | * "curr_unwinder" points to the stack unwinder currently in use. This |
| 39 | * is the unwinder with the highest rating. |
| 40 | * |
| 41 | * "unwinder_list" is a linked-list of all available unwinders, sorted |
| 42 | * by rating. |
| 43 | * |
| 44 | * All modifications of "curr_unwinder" and "unwinder_list" must be |
| 45 | * performed whilst holding "unwinder_lock". |
| 46 | */ |
| 47 | static struct unwinder *curr_unwinder = &stack_reader; |
| 48 | |
| 49 | static struct list_head unwinder_list = { |
| 50 | .next = &stack_reader.list, |
| 51 | .prev = &stack_reader.list, |
| 52 | }; |
| 53 | |
| 54 | static DEFINE_SPINLOCK(unwinder_lock); |
| 55 | |
Matt Fleming | bf61ad1 | 2009-08-13 19:49:03 +0900 | [diff] [blame] | 56 | /** |
| 57 | * select_unwinder - Select the best registered stack unwinder. |
| 58 | * |
| 59 | * Private function. Must hold unwinder_lock when called. |
| 60 | * |
| 61 | * Select the stack unwinder with the best rating. This is useful for |
| 62 | * setting up curr_unwinder. |
| 63 | */ |
| 64 | static struct unwinder *select_unwinder(void) |
| 65 | { |
| 66 | struct unwinder *best; |
| 67 | |
| 68 | if (list_empty(&unwinder_list)) |
| 69 | return NULL; |
| 70 | |
| 71 | best = list_entry(unwinder_list.next, struct unwinder, list); |
| 72 | if (best == curr_unwinder) |
| 73 | return NULL; |
| 74 | |
| 75 | return best; |
| 76 | } |
| 77 | |
| 78 | /* |
| 79 | * Enqueue the stack unwinder sorted by rating. |
| 80 | */ |
| 81 | static int unwinder_enqueue(struct unwinder *ops) |
| 82 | { |
| 83 | struct list_head *tmp, *entry = &unwinder_list; |
| 84 | |
| 85 | list_for_each(tmp, &unwinder_list) { |
| 86 | struct unwinder *o; |
| 87 | |
| 88 | o = list_entry(tmp, struct unwinder, list); |
| 89 | if (o == ops) |
| 90 | return -EBUSY; |
| 91 | /* Keep track of the place, where to insert */ |
| 92 | if (o->rating >= ops->rating) |
| 93 | entry = tmp; |
| 94 | } |
| 95 | list_add(&ops->list, entry); |
| 96 | |
| 97 | return 0; |
| 98 | } |
| 99 | |
| 100 | /** |
| 101 | * unwinder_register - Used to install new stack unwinder |
| 102 | * @u: unwinder to be registered |
| 103 | * |
| 104 | * Install the new stack unwinder on the unwinder list, which is sorted |
| 105 | * by rating. |
| 106 | * |
| 107 | * Returns -EBUSY if registration fails, zero otherwise. |
| 108 | */ |
| 109 | int unwinder_register(struct unwinder *u) |
| 110 | { |
| 111 | unsigned long flags; |
| 112 | int ret; |
| 113 | |
| 114 | spin_lock_irqsave(&unwinder_lock, flags); |
| 115 | ret = unwinder_enqueue(u); |
| 116 | if (!ret) |
| 117 | curr_unwinder = select_unwinder(); |
| 118 | spin_unlock_irqrestore(&unwinder_lock, flags); |
| 119 | |
| 120 | return ret; |
| 121 | } |
| 122 | |
Matt Fleming | b344e24 | 2009-08-16 21:54:48 +0100 | [diff] [blame^] | 123 | int unwinder_faulted = 0; |
| 124 | |
Matt Fleming | bf61ad1 | 2009-08-13 19:49:03 +0900 | [diff] [blame] | 125 | /* |
| 126 | * Unwind the call stack and pass information to the stacktrace_ops |
| 127 | * functions. Also handle the case where we need to switch to a new |
| 128 | * stack dumper because the current one faulted unexpectedly. |
| 129 | */ |
| 130 | void unwind_stack(struct task_struct *task, struct pt_regs *regs, |
| 131 | unsigned long *sp, const struct stacktrace_ops *ops, |
| 132 | void *data) |
| 133 | { |
| 134 | unsigned long flags; |
| 135 | |
| 136 | /* |
| 137 | * The problem with unwinders with high ratings is that they are |
| 138 | * inherently more complicated than the simple ones with lower |
| 139 | * ratings. We are therefore more likely to fault in the |
| 140 | * complicated ones, e.g. hitting BUG()s. If we fault in the |
| 141 | * code for the current stack unwinder we try to downgrade to |
| 142 | * one with a lower rating. |
| 143 | * |
| 144 | * Hopefully this will give us a semi-reliable stacktrace so we |
| 145 | * can diagnose why curr_unwinder->dump() faulted. |
| 146 | */ |
Matt Fleming | b344e24 | 2009-08-16 21:54:48 +0100 | [diff] [blame^] | 147 | if (unwinder_faulted) { |
Matt Fleming | bf61ad1 | 2009-08-13 19:49:03 +0900 | [diff] [blame] | 148 | spin_lock_irqsave(&unwinder_lock, flags); |
| 149 | |
Matt Fleming | b344e24 | 2009-08-16 21:54:48 +0100 | [diff] [blame^] | 150 | /* Make sure no one beat us to changing the unwinder */ |
| 151 | if (unwinder_faulted && !list_is_singular(&unwinder_list)) { |
Matt Fleming | bf61ad1 | 2009-08-13 19:49:03 +0900 | [diff] [blame] | 152 | list_del(&curr_unwinder->list); |
| 153 | curr_unwinder = select_unwinder(); |
Matt Fleming | b344e24 | 2009-08-16 21:54:48 +0100 | [diff] [blame^] | 154 | |
| 155 | unwinder_faulted = 0; |
Matt Fleming | bf61ad1 | 2009-08-13 19:49:03 +0900 | [diff] [blame] | 156 | } |
| 157 | |
| 158 | spin_unlock_irqrestore(&unwinder_lock, flags); |
Matt Fleming | bf61ad1 | 2009-08-13 19:49:03 +0900 | [diff] [blame] | 159 | } |
| 160 | |
| 161 | curr_unwinder->dump(task, regs, sp, ops, data); |
Matt Fleming | b344e24 | 2009-08-16 21:54:48 +0100 | [diff] [blame^] | 162 | } |
Matt Fleming | bf61ad1 | 2009-08-13 19:49:03 +0900 | [diff] [blame] | 163 | |
Matt Fleming | b344e24 | 2009-08-16 21:54:48 +0100 | [diff] [blame^] | 164 | /* |
| 165 | * Trap handler for UWINDER_BUG() statements. We must switch to the |
| 166 | * unwinder with the next highest rating. |
| 167 | */ |
| 168 | BUILD_TRAP_HANDLER(unwinder) |
| 169 | { |
| 170 | insn_size_t insn; |
| 171 | TRAP_HANDLER_DECL; |
| 172 | |
| 173 | /* Rewind */ |
| 174 | regs->pc -= instruction_size(ctrl_inw(regs->pc - 4)); |
| 175 | insn = *(insn_size_t *)instruction_pointer(regs); |
| 176 | |
| 177 | /* Switch unwinders when unwind_stack() is called */ |
| 178 | unwinder_faulted = 1; |
| 179 | |
| 180 | #ifdef CONFIG_BUG |
| 181 | handle_BUG(regs); |
| 182 | #endif |
Matt Fleming | bf61ad1 | 2009-08-13 19:49:03 +0900 | [diff] [blame] | 183 | } |