blob: e51168064e5669b114d903f6917979e3f2786d4e [file] [log] [blame]
Matt Flemingbd353862009-08-14 01:58:43 +09001/*
2 * Copyright (C) 2009 Matt Fleming <matt@console-pimps.org>
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * This is an implementation of a DWARF unwinder. Its main purpose is
9 * for generating stacktrace information. Based on the DWARF 3
10 * specification from http://www.dwarfstd.org.
11 *
12 * TODO:
13 * - DWARF64 doesn't work.
Matt Fleming97efbbd2009-08-16 15:56:35 +010014 * - Registers with DWARF_VAL_OFFSET rules aren't handled properly.
Matt Flemingbd353862009-08-14 01:58:43 +090015 */
16
17/* #define DEBUG */
18#include <linux/kernel.h>
19#include <linux/io.h>
20#include <linux/list.h>
Matt Flemingfb3f3e72009-08-16 15:44:08 +010021#include <linux/mempool.h>
Matt Flemingbd353862009-08-14 01:58:43 +090022#include <linux/mm.h>
Paul Mundt5a3abba2009-10-13 13:32:19 +090023#include <linux/elf.h>
Matt Fleming60339fa2009-10-24 18:56:57 +000024#include <linux/ftrace.h>
Matt Flemingbd353862009-08-14 01:58:43 +090025#include <asm/dwarf.h>
26#include <asm/unwinder.h>
27#include <asm/sections.h>
Paul Mundt34974472009-08-14 02:10:59 +090028#include <asm/unaligned.h>
Matt Flemingbd353862009-08-14 01:58:43 +090029#include <asm/stacktrace.h>
30
Matt Flemingfb3f3e72009-08-16 15:44:08 +010031/* Reserve enough memory for two stack frames */
32#define DWARF_FRAME_MIN_REQ 2
33/* ... with 4 registers per frame. */
34#define DWARF_REG_MIN_REQ (DWARF_FRAME_MIN_REQ * 4)
35
36static struct kmem_cache *dwarf_frame_cachep;
37static mempool_t *dwarf_frame_pool;
38
39static struct kmem_cache *dwarf_reg_cachep;
40static mempool_t *dwarf_reg_pool;
41
Matt Flemingbd353862009-08-14 01:58:43 +090042static LIST_HEAD(dwarf_cie_list);
Paul Mundt97f361e2009-08-17 05:07:38 +090043static DEFINE_SPINLOCK(dwarf_cie_lock);
Matt Flemingbd353862009-08-14 01:58:43 +090044
45static LIST_HEAD(dwarf_fde_list);
Paul Mundt97f361e2009-08-17 05:07:38 +090046static DEFINE_SPINLOCK(dwarf_fde_lock);
Matt Flemingbd353862009-08-14 01:58:43 +090047
48static struct dwarf_cie *cached_cie;
49
Matt Flemingfb3f3e72009-08-16 15:44:08 +010050/**
51 * dwarf_frame_alloc_reg - allocate memory for a DWARF register
52 * @frame: the DWARF frame whose list of registers we insert on
53 * @reg_num: the register number
Matt Flemingbd353862009-08-14 01:58:43 +090054 *
Matt Flemingfb3f3e72009-08-16 15:44:08 +010055 * Allocate space for, and initialise, a dwarf reg from
56 * dwarf_reg_pool and insert it onto the (unsorted) linked-list of
57 * dwarf registers for @frame.
58 *
59 * Return the initialised DWARF reg.
Matt Flemingbd353862009-08-14 01:58:43 +090060 */
Matt Flemingfb3f3e72009-08-16 15:44:08 +010061static struct dwarf_reg *dwarf_frame_alloc_reg(struct dwarf_frame *frame,
62 unsigned int reg_num)
Matt Flemingbd353862009-08-14 01:58:43 +090063{
Matt Flemingfb3f3e72009-08-16 15:44:08 +010064 struct dwarf_reg *reg;
Matt Flemingbd353862009-08-14 01:58:43 +090065
Matt Flemingfb3f3e72009-08-16 15:44:08 +010066 reg = mempool_alloc(dwarf_reg_pool, GFP_ATOMIC);
67 if (!reg) {
68 printk(KERN_WARNING "Unable to allocate a DWARF register\n");
Matt Flemingbd353862009-08-14 01:58:43 +090069 /*
70 * Let's just bomb hard here, we have no way to
71 * gracefully recover.
72 */
Matt Flemingb344e24a2009-08-16 21:54:48 +010073 UNWINDER_BUG();
Matt Flemingbd353862009-08-14 01:58:43 +090074 }
75
Matt Flemingfb3f3e72009-08-16 15:44:08 +010076 reg->number = reg_num;
77 reg->addr = 0;
78 reg->flags = 0;
79
80 list_add(&reg->link, &frame->reg_list);
81
82 return reg;
83}
84
85static void dwarf_frame_free_regs(struct dwarf_frame *frame)
86{
87 struct dwarf_reg *reg, *n;
88
89 list_for_each_entry_safe(reg, n, &frame->reg_list, link) {
90 list_del(&reg->link);
91 mempool_free(reg, dwarf_reg_pool);
92 }
93}
94
95/**
96 * dwarf_frame_reg - return a DWARF register
97 * @frame: the DWARF frame to search in for @reg_num
98 * @reg_num: the register number to search for
99 *
100 * Lookup and return the dwarf reg @reg_num for this frame. Return
101 * NULL if @reg_num is an register invalid number.
102 */
103static struct dwarf_reg *dwarf_frame_reg(struct dwarf_frame *frame,
104 unsigned int reg_num)
105{
106 struct dwarf_reg *reg;
107
108 list_for_each_entry(reg, &frame->reg_list, link) {
109 if (reg->number == reg_num)
110 return reg;
Matt Flemingbd353862009-08-14 01:58:43 +0900111 }
112
Matt Flemingfb3f3e72009-08-16 15:44:08 +0100113 return NULL;
Matt Flemingbd353862009-08-14 01:58:43 +0900114}
115
116/**
117 * dwarf_read_addr - read dwarf data
118 * @src: source address of data
119 * @dst: destination address to store the data to
120 *
121 * Read 'n' bytes from @src, where 'n' is the size of an address on
122 * the native machine. We return the number of bytes read, which
123 * should always be 'n'. We also have to be careful when reading
124 * from @src and writing to @dst, because they can be arbitrarily
125 * aligned. Return 'n' - the number of bytes read.
126 */
Paul Mundt34974472009-08-14 02:10:59 +0900127static inline int dwarf_read_addr(unsigned long *src, unsigned long *dst)
Matt Flemingbd353862009-08-14 01:58:43 +0900128{
Paul Mundtbf43a162009-08-14 03:06:13 +0900129 u32 val = get_unaligned(src);
130 put_unaligned(val, dst);
Matt Flemingbd353862009-08-14 01:58:43 +0900131 return sizeof(unsigned long *);
132}
133
134/**
135 * dwarf_read_uleb128 - read unsigned LEB128 data
136 * @addr: the address where the ULEB128 data is stored
137 * @ret: address to store the result
138 *
139 * Decode an unsigned LEB128 encoded datum. The algorithm is taken
140 * from Appendix C of the DWARF 3 spec. For information on the
141 * encodings refer to section "7.6 - Variable Length Data". Return
142 * the number of bytes read.
143 */
144static inline unsigned long dwarf_read_uleb128(char *addr, unsigned int *ret)
145{
146 unsigned int result;
147 unsigned char byte;
148 int shift, count;
149
150 result = 0;
151 shift = 0;
152 count = 0;
153
154 while (1) {
155 byte = __raw_readb(addr);
156 addr++;
157 count++;
158
159 result |= (byte & 0x7f) << shift;
160 shift += 7;
161
162 if (!(byte & 0x80))
163 break;
164 }
165
166 *ret = result;
167
168 return count;
169}
170
171/**
172 * dwarf_read_leb128 - read signed LEB128 data
173 * @addr: the address of the LEB128 encoded data
174 * @ret: address to store the result
175 *
176 * Decode signed LEB128 data. The algorithm is taken from Appendix
177 * C of the DWARF 3 spec. Return the number of bytes read.
178 */
179static inline unsigned long dwarf_read_leb128(char *addr, int *ret)
180{
181 unsigned char byte;
182 int result, shift;
183 int num_bits;
184 int count;
185
186 result = 0;
187 shift = 0;
188 count = 0;
189
190 while (1) {
191 byte = __raw_readb(addr);
192 addr++;
193 result |= (byte & 0x7f) << shift;
194 shift += 7;
195 count++;
196
197 if (!(byte & 0x80))
198 break;
199 }
200
201 /* The number of bits in a signed integer. */
202 num_bits = 8 * sizeof(result);
203
204 if ((shift < num_bits) && (byte & 0x40))
205 result |= (-1 << shift);
206
207 *ret = result;
208
209 return count;
210}
211
212/**
213 * dwarf_read_encoded_value - return the decoded value at @addr
214 * @addr: the address of the encoded value
215 * @val: where to write the decoded value
216 * @encoding: the encoding with which we can decode @addr
217 *
218 * GCC emits encoded address in the .eh_frame FDE entries. Decode
219 * the value at @addr using @encoding. The decoded value is written
220 * to @val and the number of bytes read is returned.
221 */
222static int dwarf_read_encoded_value(char *addr, unsigned long *val,
223 char encoding)
224{
225 unsigned long decoded_addr = 0;
226 int count = 0;
227
228 switch (encoding & 0x70) {
229 case DW_EH_PE_absptr:
230 break;
231 case DW_EH_PE_pcrel:
232 decoded_addr = (unsigned long)addr;
233 break;
234 default:
235 pr_debug("encoding=0x%x\n", (encoding & 0x70));
Matt Flemingb344e24a2009-08-16 21:54:48 +0100236 UNWINDER_BUG();
Matt Flemingbd353862009-08-14 01:58:43 +0900237 }
238
239 if ((encoding & 0x07) == 0x00)
240 encoding |= DW_EH_PE_udata4;
241
242 switch (encoding & 0x0f) {
243 case DW_EH_PE_sdata4:
244 case DW_EH_PE_udata4:
245 count += 4;
Paul Mundt34974472009-08-14 02:10:59 +0900246 decoded_addr += get_unaligned((u32 *)addr);
Matt Flemingbd353862009-08-14 01:58:43 +0900247 __raw_writel(decoded_addr, val);
248 break;
249 default:
250 pr_debug("encoding=0x%x\n", encoding);
Matt Flemingb344e24a2009-08-16 21:54:48 +0100251 UNWINDER_BUG();
Matt Flemingbd353862009-08-14 01:58:43 +0900252 }
253
254 return count;
255}
256
257/**
258 * dwarf_entry_len - return the length of an FDE or CIE
259 * @addr: the address of the entry
260 * @len: the length of the entry
261 *
262 * Read the initial_length field of the entry and store the size of
263 * the entry in @len. We return the number of bytes read. Return a
264 * count of 0 on error.
265 */
266static inline int dwarf_entry_len(char *addr, unsigned long *len)
267{
268 u32 initial_len;
269 int count;
270
Paul Mundt34974472009-08-14 02:10:59 +0900271 initial_len = get_unaligned((u32 *)addr);
Matt Flemingbd353862009-08-14 01:58:43 +0900272 count = 4;
273
274 /*
275 * An initial length field value in the range DW_LEN_EXT_LO -
276 * DW_LEN_EXT_HI indicates an extension, and should not be
277 * interpreted as a length. The only extension that we currently
278 * understand is the use of DWARF64 addresses.
279 */
280 if (initial_len >= DW_EXT_LO && initial_len <= DW_EXT_HI) {
281 /*
282 * The 64-bit length field immediately follows the
283 * compulsory 32-bit length field.
284 */
285 if (initial_len == DW_EXT_DWARF64) {
Paul Mundt34974472009-08-14 02:10:59 +0900286 *len = get_unaligned((u64 *)addr + 4);
Matt Flemingbd353862009-08-14 01:58:43 +0900287 count = 12;
288 } else {
289 printk(KERN_WARNING "Unknown DWARF extension\n");
290 count = 0;
291 }
292 } else
293 *len = initial_len;
294
295 return count;
296}
297
298/**
299 * dwarf_lookup_cie - locate the cie
300 * @cie_ptr: pointer to help with lookup
301 */
302static struct dwarf_cie *dwarf_lookup_cie(unsigned long cie_ptr)
303{
Paul Mundt97f361e2009-08-17 05:07:38 +0900304 struct dwarf_cie *cie;
Matt Flemingbd353862009-08-14 01:58:43 +0900305 unsigned long flags;
306
307 spin_lock_irqsave(&dwarf_cie_lock, flags);
308
309 /*
310 * We've cached the last CIE we looked up because chances are
311 * that the FDE wants this CIE.
312 */
313 if (cached_cie && cached_cie->cie_pointer == cie_ptr) {
314 cie = cached_cie;
315 goto out;
316 }
317
Paul Mundt97f361e2009-08-17 05:07:38 +0900318 list_for_each_entry(cie, &dwarf_cie_list, link) {
Matt Flemingbd353862009-08-14 01:58:43 +0900319 if (cie->cie_pointer == cie_ptr) {
320 cached_cie = cie;
321 break;
322 }
323 }
324
325 /* Couldn't find the entry in the list. */
326 if (&cie->link == &dwarf_cie_list)
327 cie = NULL;
328out:
329 spin_unlock_irqrestore(&dwarf_cie_lock, flags);
330 return cie;
331}
332
333/**
334 * dwarf_lookup_fde - locate the FDE that covers pc
335 * @pc: the program counter
336 */
337struct dwarf_fde *dwarf_lookup_fde(unsigned long pc)
338{
Paul Mundt97f361e2009-08-17 05:07:38 +0900339 struct dwarf_fde *fde;
Matt Flemingbd353862009-08-14 01:58:43 +0900340 unsigned long flags;
Matt Flemingbd353862009-08-14 01:58:43 +0900341
342 spin_lock_irqsave(&dwarf_fde_lock, flags);
Paul Mundt97f361e2009-08-17 05:07:38 +0900343
344 list_for_each_entry(fde, &dwarf_fde_list, link) {
Matt Flemingbd353862009-08-14 01:58:43 +0900345 unsigned long start, end;
346
347 start = fde->initial_location;
348 end = fde->initial_location + fde->address_range;
349
350 if (pc >= start && pc < end)
351 break;
352 }
353
354 /* Couldn't find the entry in the list. */
355 if (&fde->link == &dwarf_fde_list)
356 fde = NULL;
357
358 spin_unlock_irqrestore(&dwarf_fde_lock, flags);
359
360 return fde;
361}
362
363/**
364 * dwarf_cfa_execute_insns - execute instructions to calculate a CFA
365 * @insn_start: address of the first instruction
366 * @insn_end: address of the last instruction
367 * @cie: the CIE for this function
368 * @fde: the FDE for this function
369 * @frame: the instructions calculate the CFA for this frame
370 * @pc: the program counter of the address we're interested in
371 *
372 * Execute the Call Frame instruction sequence starting at
373 * @insn_start and ending at @insn_end. The instructions describe
374 * how to calculate the Canonical Frame Address of a stackframe.
375 * Store the results in @frame.
376 */
377static int dwarf_cfa_execute_insns(unsigned char *insn_start,
378 unsigned char *insn_end,
379 struct dwarf_cie *cie,
380 struct dwarf_fde *fde,
381 struct dwarf_frame *frame,
Matt Flemingb9558732009-08-15 23:10:57 +0100382 unsigned long pc)
Matt Flemingbd353862009-08-14 01:58:43 +0900383{
384 unsigned char insn;
385 unsigned char *current_insn;
386 unsigned int count, delta, reg, expr_len, offset;
Matt Flemingfb3f3e72009-08-16 15:44:08 +0100387 struct dwarf_reg *regp;
Matt Flemingbd353862009-08-14 01:58:43 +0900388
389 current_insn = insn_start;
390
Matt Flemingb9558732009-08-15 23:10:57 +0100391 while (current_insn < insn_end && frame->pc <= pc) {
Matt Flemingbd353862009-08-14 01:58:43 +0900392 insn = __raw_readb(current_insn++);
393
394 /*
395 * Firstly, handle the opcodes that embed their operands
396 * in the instructions.
397 */
398 switch (DW_CFA_opcode(insn)) {
399 case DW_CFA_advance_loc:
400 delta = DW_CFA_operand(insn);
401 delta *= cie->code_alignment_factor;
402 frame->pc += delta;
403 continue;
404 /* NOTREACHED */
405 case DW_CFA_offset:
406 reg = DW_CFA_operand(insn);
407 count = dwarf_read_uleb128(current_insn, &offset);
408 current_insn += count;
409 offset *= cie->data_alignment_factor;
Matt Flemingfb3f3e72009-08-16 15:44:08 +0100410 regp = dwarf_frame_alloc_reg(frame, reg);
411 regp->addr = offset;
412 regp->flags |= DWARF_REG_OFFSET;
Matt Flemingbd353862009-08-14 01:58:43 +0900413 continue;
414 /* NOTREACHED */
415 case DW_CFA_restore:
416 reg = DW_CFA_operand(insn);
417 continue;
418 /* NOTREACHED */
419 }
420
421 /*
422 * Secondly, handle the opcodes that don't embed their
423 * operands in the instruction.
424 */
425 switch (insn) {
426 case DW_CFA_nop:
427 continue;
428 case DW_CFA_advance_loc1:
429 delta = *current_insn++;
430 frame->pc += delta * cie->code_alignment_factor;
431 break;
432 case DW_CFA_advance_loc2:
Paul Mundt34974472009-08-14 02:10:59 +0900433 delta = get_unaligned((u16 *)current_insn);
Matt Flemingbd353862009-08-14 01:58:43 +0900434 current_insn += 2;
435 frame->pc += delta * cie->code_alignment_factor;
436 break;
437 case DW_CFA_advance_loc4:
Paul Mundt34974472009-08-14 02:10:59 +0900438 delta = get_unaligned((u32 *)current_insn);
Matt Flemingbd353862009-08-14 01:58:43 +0900439 current_insn += 4;
440 frame->pc += delta * cie->code_alignment_factor;
441 break;
442 case DW_CFA_offset_extended:
443 count = dwarf_read_uleb128(current_insn, &reg);
444 current_insn += count;
445 count = dwarf_read_uleb128(current_insn, &offset);
446 current_insn += count;
447 offset *= cie->data_alignment_factor;
448 break;
449 case DW_CFA_restore_extended:
450 count = dwarf_read_uleb128(current_insn, &reg);
451 current_insn += count;
452 break;
453 case DW_CFA_undefined:
454 count = dwarf_read_uleb128(current_insn, &reg);
455 current_insn += count;
Matt Fleming5580e902009-08-20 19:53:49 +0100456 regp = dwarf_frame_alloc_reg(frame, reg);
457 regp->flags |= DWARF_UNDEFINED;
Matt Flemingbd353862009-08-14 01:58:43 +0900458 break;
459 case DW_CFA_def_cfa:
460 count = dwarf_read_uleb128(current_insn,
461 &frame->cfa_register);
462 current_insn += count;
463 count = dwarf_read_uleb128(current_insn,
464 &frame->cfa_offset);
465 current_insn += count;
466
467 frame->flags |= DWARF_FRAME_CFA_REG_OFFSET;
468 break;
469 case DW_CFA_def_cfa_register:
470 count = dwarf_read_uleb128(current_insn,
471 &frame->cfa_register);
472 current_insn += count;
473 frame->flags |= DWARF_FRAME_CFA_REG_OFFSET;
474 break;
475 case DW_CFA_def_cfa_offset:
476 count = dwarf_read_uleb128(current_insn, &offset);
477 current_insn += count;
478 frame->cfa_offset = offset;
479 break;
480 case DW_CFA_def_cfa_expression:
481 count = dwarf_read_uleb128(current_insn, &expr_len);
482 current_insn += count;
483
484 frame->cfa_expr = current_insn;
485 frame->cfa_expr_len = expr_len;
486 current_insn += expr_len;
487
488 frame->flags |= DWARF_FRAME_CFA_REG_EXP;
489 break;
490 case DW_CFA_offset_extended_sf:
491 count = dwarf_read_uleb128(current_insn, &reg);
492 current_insn += count;
493 count = dwarf_read_leb128(current_insn, &offset);
494 current_insn += count;
495 offset *= cie->data_alignment_factor;
Matt Flemingfb3f3e72009-08-16 15:44:08 +0100496 regp = dwarf_frame_alloc_reg(frame, reg);
497 regp->flags |= DWARF_REG_OFFSET;
498 regp->addr = offset;
Matt Flemingbd353862009-08-14 01:58:43 +0900499 break;
500 case DW_CFA_val_offset:
501 count = dwarf_read_uleb128(current_insn, &reg);
502 current_insn += count;
503 count = dwarf_read_leb128(current_insn, &offset);
504 offset *= cie->data_alignment_factor;
Matt Flemingfb3f3e72009-08-16 15:44:08 +0100505 regp = dwarf_frame_alloc_reg(frame, reg);
Matt Fleming97efbbd2009-08-16 15:56:35 +0100506 regp->flags |= DWARF_VAL_OFFSET;
Matt Flemingfb3f3e72009-08-16 15:44:08 +0100507 regp->addr = offset;
Matt Flemingbd353862009-08-14 01:58:43 +0900508 break;
Matt Flemingcd7246f2009-08-16 01:44:33 +0100509 case DW_CFA_GNU_args_size:
510 count = dwarf_read_uleb128(current_insn, &offset);
511 current_insn += count;
512 break;
513 case DW_CFA_GNU_negative_offset_extended:
514 count = dwarf_read_uleb128(current_insn, &reg);
515 current_insn += count;
516 count = dwarf_read_uleb128(current_insn, &offset);
517 offset *= cie->data_alignment_factor;
Matt Flemingfb3f3e72009-08-16 15:44:08 +0100518
519 regp = dwarf_frame_alloc_reg(frame, reg);
520 regp->flags |= DWARF_REG_OFFSET;
521 regp->addr = -offset;
Matt Flemingcd7246f2009-08-16 01:44:33 +0100522 break;
Matt Flemingbd353862009-08-14 01:58:43 +0900523 default:
524 pr_debug("unhandled DWARF instruction 0x%x\n", insn);
Matt Flemingb344e24a2009-08-16 21:54:48 +0100525 UNWINDER_BUG();
Matt Flemingbd353862009-08-14 01:58:43 +0900526 break;
527 }
528 }
529
530 return 0;
531}
532
533/**
Matt Fleminged4fe7f2009-10-10 16:03:11 +0100534 * dwarf_free_frame - free the memory allocated for @frame
535 * @frame: the frame to free
536 */
537void dwarf_free_frame(struct dwarf_frame *frame)
538{
539 dwarf_frame_free_regs(frame);
540 mempool_free(frame, dwarf_frame_pool);
541}
542
Matt Fleming944a3432010-01-30 17:36:20 +0000543extern void ret_from_irq(void);
544
Matt Fleminged4fe7f2009-10-10 16:03:11 +0100545/**
Matt Flemingc2d474d2009-10-10 16:17:06 +0100546 * dwarf_unwind_stack - unwind the stack
547 *
Matt Flemingbd353862009-08-14 01:58:43 +0900548 * @pc: address of the function to unwind
549 * @prev: struct dwarf_frame of the previous stackframe on the callstack
550 *
551 * Return a struct dwarf_frame representing the most recent frame
552 * on the callstack. Each of the lower (older) stack frames are
553 * linked via the "prev" member.
554 */
Matt Flemingb344e24a2009-08-16 21:54:48 +0100555struct dwarf_frame * dwarf_unwind_stack(unsigned long pc,
556 struct dwarf_frame *prev)
Matt Flemingbd353862009-08-14 01:58:43 +0900557{
558 struct dwarf_frame *frame;
559 struct dwarf_cie *cie;
560 struct dwarf_fde *fde;
Matt Flemingfb3f3e72009-08-16 15:44:08 +0100561 struct dwarf_reg *reg;
Matt Flemingbd353862009-08-14 01:58:43 +0900562 unsigned long addr;
Matt Flemingbd353862009-08-14 01:58:43 +0900563
564 /*
Matt Flemingc2d474d2009-10-10 16:17:06 +0100565 * If we're starting at the top of the stack we need get the
566 * contents of a physical register to get the CFA in order to
567 * begin the virtual unwinding of the stack.
Matt Flemingbd353862009-08-14 01:58:43 +0900568 *
Matt Flemingf8264662009-08-13 20:41:31 +0100569 * NOTE: the return address is guaranteed to be setup by the
570 * time this function makes its first function call.
Matt Flemingbd353862009-08-14 01:58:43 +0900571 */
Paul Mundt421b5412009-11-06 17:23:33 +0900572 if (!pc || !prev)
Matt Flemingb9558732009-08-15 23:10:57 +0100573 pc = (unsigned long)current_text_addr();
Matt Flemingbd353862009-08-14 01:58:43 +0900574
Matt Fleming60339fa2009-10-24 18:56:57 +0000575#ifdef CONFIG_FUNCTION_GRAPH_TRACER
576 /*
577 * If our stack has been patched by the function graph tracer
578 * then we might see the address of return_to_handler() where we
579 * expected to find the real return address.
580 */
581 if (pc == (unsigned long)&return_to_handler) {
582 int index = current->curr_ret_stack;
583
584 /*
585 * We currently have no way of tracking how many
586 * return_to_handler()'s we've seen. If there is more
587 * than one patched return address on our stack,
588 * complain loudly.
589 */
590 WARN_ON(index > 0);
591
592 pc = current->ret_stack[index].ret;
593 }
594#endif
595
Matt Flemingfb3f3e72009-08-16 15:44:08 +0100596 frame = mempool_alloc(dwarf_frame_pool, GFP_ATOMIC);
597 if (!frame) {
598 printk(KERN_ERR "Unable to allocate a dwarf frame\n");
Matt Flemingb344e24a2009-08-16 21:54:48 +0100599 UNWINDER_BUG();
Matt Flemingfb3f3e72009-08-16 15:44:08 +0100600 }
Matt Flemingbd353862009-08-14 01:58:43 +0900601
Matt Flemingfb3f3e72009-08-16 15:44:08 +0100602 INIT_LIST_HEAD(&frame->reg_list);
603 frame->flags = 0;
Matt Flemingbd353862009-08-14 01:58:43 +0900604 frame->prev = prev;
Matt Flemingfb3f3e72009-08-16 15:44:08 +0100605 frame->return_addr = 0;
Matt Flemingbd353862009-08-14 01:58:43 +0900606
607 fde = dwarf_lookup_fde(pc);
608 if (!fde) {
609 /*
Matt Flemingc2d474d2009-10-10 16:17:06 +0100610 * This is our normal exit path. There are two reasons
611 * why we might exit here,
Matt Flemingbd353862009-08-14 01:58:43 +0900612 *
613 * a) pc has no asscociated DWARF frame info and so
614 * we don't know how to unwind this frame. This is
615 * usually the case when we're trying to unwind a
616 * frame that was called from some assembly code
617 * that has no DWARF info, e.g. syscalls.
618 *
619 * b) the DEBUG info for pc is bogus. There's
620 * really no way to distinguish this case from the
621 * case above, which sucks because we could print a
622 * warning here.
623 */
Matt Flemingfb3f3e72009-08-16 15:44:08 +0100624 goto bail;
Matt Flemingbd353862009-08-14 01:58:43 +0900625 }
626
627 cie = dwarf_lookup_cie(fde->cie_pointer);
628
629 frame->pc = fde->initial_location;
630
631 /* CIE initial instructions */
632 dwarf_cfa_execute_insns(cie->initial_instructions,
Matt Flemingf8264662009-08-13 20:41:31 +0100633 cie->instructions_end, cie, fde,
Matt Flemingb9558732009-08-15 23:10:57 +0100634 frame, pc);
Matt Flemingbd353862009-08-14 01:58:43 +0900635
636 /* FDE instructions */
637 dwarf_cfa_execute_insns(fde->instructions, fde->end, cie,
Matt Flemingb9558732009-08-15 23:10:57 +0100638 fde, frame, pc);
Matt Flemingbd353862009-08-14 01:58:43 +0900639
640 /* Calculate the CFA */
641 switch (frame->flags) {
642 case DWARF_FRAME_CFA_REG_OFFSET:
643 if (prev) {
Matt Flemingfb3f3e72009-08-16 15:44:08 +0100644 reg = dwarf_frame_reg(prev, frame->cfa_register);
Matt Flemingb344e24a2009-08-16 21:54:48 +0100645 UNWINDER_BUG_ON(!reg);
646 UNWINDER_BUG_ON(reg->flags != DWARF_REG_OFFSET);
Matt Flemingbd353862009-08-14 01:58:43 +0900647
Matt Flemingfb3f3e72009-08-16 15:44:08 +0100648 addr = prev->cfa + reg->addr;
Matt Flemingbd353862009-08-14 01:58:43 +0900649 frame->cfa = __raw_readl(addr);
650
651 } else {
652 /*
Matt Flemingc2d474d2009-10-10 16:17:06 +0100653 * Again, we're starting from the top of the
654 * stack. We need to physically read
655 * the contents of a register in order to get
656 * the Canonical Frame Address for this
Matt Flemingbd353862009-08-14 01:58:43 +0900657 * function.
658 */
659 frame->cfa = dwarf_read_arch_reg(frame->cfa_register);
660 }
661
662 frame->cfa += frame->cfa_offset;
663 break;
664 default:
Matt Flemingb344e24a2009-08-16 21:54:48 +0100665 UNWINDER_BUG();
Matt Flemingbd353862009-08-14 01:58:43 +0900666 }
667
Matt Flemingfb3f3e72009-08-16 15:44:08 +0100668 reg = dwarf_frame_reg(frame, DWARF_ARCH_RA_REG);
Matt Fleming5580e902009-08-20 19:53:49 +0100669
670 /*
671 * If we haven't seen the return address register or the return
672 * address column is undefined then we must assume that this is
673 * the end of the callstack.
674 */
675 if (!reg || reg->flags == DWARF_UNDEFINED)
676 goto bail;
677
Matt Flemingb344e24a2009-08-16 21:54:48 +0100678 UNWINDER_BUG_ON(reg->flags != DWARF_REG_OFFSET);
Matt Flemingbd353862009-08-14 01:58:43 +0900679
Matt Flemingfb3f3e72009-08-16 15:44:08 +0100680 addr = frame->cfa + reg->addr;
Matt Flemingbd353862009-08-14 01:58:43 +0900681 frame->return_addr = __raw_readl(addr);
682
Matt Fleming944a3432010-01-30 17:36:20 +0000683 /*
684 * Ah, the joys of unwinding through interrupts.
685 *
686 * Interrupts are tricky - the DWARF info needs to be _really_
687 * accurate and unfortunately I'm seeing a lot of bogus DWARF
688 * info. For example, I've seen interrupts occur in epilogues
689 * just after the frame pointer (r14) had been restored. The
690 * problem was that the DWARF info claimed that the CFA could be
691 * reached by using the value of the frame pointer before it was
692 * restored.
693 *
694 * So until the compiler can be trusted to produce reliable
695 * DWARF info when it really matters, let's stop unwinding once
696 * we've calculated the function that was interrupted.
697 */
698 if (prev && prev->pc == (unsigned long)ret_from_irq)
699 frame->return_addr = 0;
700
Matt Flemingbd353862009-08-14 01:58:43 +0900701 return frame;
Matt Flemingfb3f3e72009-08-16 15:44:08 +0100702
703bail:
Matt Fleminged4fe7f2009-10-10 16:03:11 +0100704 dwarf_free_frame(frame);
Matt Flemingfb3f3e72009-08-16 15:44:08 +0100705 return NULL;
Matt Flemingbd353862009-08-14 01:58:43 +0900706}
707
708static int dwarf_parse_cie(void *entry, void *p, unsigned long len,
Matt Fleminga6a2f2a2009-10-09 23:20:54 +0100709 unsigned char *end, struct module *mod)
Matt Flemingbd353862009-08-14 01:58:43 +0900710{
711 struct dwarf_cie *cie;
712 unsigned long flags;
713 int count;
714
715 cie = kzalloc(sizeof(*cie), GFP_KERNEL);
716 if (!cie)
717 return -ENOMEM;
718
719 cie->length = len;
720
721 /*
722 * Record the offset into the .eh_frame section
723 * for this CIE. It allows this CIE to be
724 * quickly and easily looked up from the
725 * corresponding FDE.
726 */
727 cie->cie_pointer = (unsigned long)entry;
728
729 cie->version = *(char *)p++;
Matt Flemingb344e24a2009-08-16 21:54:48 +0100730 UNWINDER_BUG_ON(cie->version != 1);
Matt Flemingbd353862009-08-14 01:58:43 +0900731
732 cie->augmentation = p;
733 p += strlen(cie->augmentation) + 1;
734
735 count = dwarf_read_uleb128(p, &cie->code_alignment_factor);
736 p += count;
737
738 count = dwarf_read_leb128(p, &cie->data_alignment_factor);
739 p += count;
740
741 /*
742 * Which column in the rule table contains the
743 * return address?
744 */
745 if (cie->version == 1) {
746 cie->return_address_reg = __raw_readb(p);
747 p++;
748 } else {
749 count = dwarf_read_uleb128(p, &cie->return_address_reg);
750 p += count;
751 }
752
753 if (cie->augmentation[0] == 'z') {
754 unsigned int length, count;
755 cie->flags |= DWARF_CIE_Z_AUGMENTATION;
756
757 count = dwarf_read_uleb128(p, &length);
758 p += count;
759
Matt Flemingb344e24a2009-08-16 21:54:48 +0100760 UNWINDER_BUG_ON((unsigned char *)p > end);
Matt Flemingbd353862009-08-14 01:58:43 +0900761
762 cie->initial_instructions = p + length;
763 cie->augmentation++;
764 }
765
766 while (*cie->augmentation) {
767 /*
768 * "L" indicates a byte showing how the
769 * LSDA pointer is encoded. Skip it.
770 */
771 if (*cie->augmentation == 'L') {
772 p++;
773 cie->augmentation++;
774 } else if (*cie->augmentation == 'R') {
775 /*
776 * "R" indicates a byte showing
777 * how FDE addresses are
778 * encoded.
779 */
780 cie->encoding = *(char *)p++;
781 cie->augmentation++;
782 } else if (*cie->augmentation == 'P') {
783 /*
784 * "R" indicates a personality
785 * routine in the CIE
786 * augmentation.
787 */
Matt Flemingb344e24a2009-08-16 21:54:48 +0100788 UNWINDER_BUG();
Matt Flemingbd353862009-08-14 01:58:43 +0900789 } else if (*cie->augmentation == 'S') {
Matt Flemingb344e24a2009-08-16 21:54:48 +0100790 UNWINDER_BUG();
Matt Flemingbd353862009-08-14 01:58:43 +0900791 } else {
792 /*
793 * Unknown augmentation. Assume
794 * 'z' augmentation.
795 */
796 p = cie->initial_instructions;
Matt Flemingb344e24a2009-08-16 21:54:48 +0100797 UNWINDER_BUG_ON(!p);
Matt Flemingbd353862009-08-14 01:58:43 +0900798 break;
799 }
800 }
801
802 cie->initial_instructions = p;
803 cie->instructions_end = end;
804
Matt Fleminga6a2f2a2009-10-09 23:20:54 +0100805 cie->mod = mod;
806
Matt Flemingbd353862009-08-14 01:58:43 +0900807 /* Add to list */
808 spin_lock_irqsave(&dwarf_cie_lock, flags);
809 list_add_tail(&cie->link, &dwarf_cie_list);
810 spin_unlock_irqrestore(&dwarf_cie_lock, flags);
811
812 return 0;
813}
814
815static int dwarf_parse_fde(void *entry, u32 entry_type,
Matt Fleming54806752009-08-20 19:42:34 +0100816 void *start, unsigned long len,
Matt Fleminga6a2f2a2009-10-09 23:20:54 +0100817 unsigned char *end, struct module *mod)
Matt Flemingbd353862009-08-14 01:58:43 +0900818{
819 struct dwarf_fde *fde;
820 struct dwarf_cie *cie;
821 unsigned long flags;
822 int count;
823 void *p = start;
824
825 fde = kzalloc(sizeof(*fde), GFP_KERNEL);
826 if (!fde)
827 return -ENOMEM;
828
829 fde->length = len;
830
831 /*
832 * In a .eh_frame section the CIE pointer is the
833 * delta between the address within the FDE
834 */
835 fde->cie_pointer = (unsigned long)(p - entry_type - 4);
836
837 cie = dwarf_lookup_cie(fde->cie_pointer);
838 fde->cie = cie;
839
840 if (cie->encoding)
841 count = dwarf_read_encoded_value(p, &fde->initial_location,
842 cie->encoding);
843 else
844 count = dwarf_read_addr(p, &fde->initial_location);
845
846 p += count;
847
848 if (cie->encoding)
849 count = dwarf_read_encoded_value(p, &fde->address_range,
850 cie->encoding & 0x0f);
851 else
852 count = dwarf_read_addr(p, &fde->address_range);
853
854 p += count;
855
856 if (fde->cie->flags & DWARF_CIE_Z_AUGMENTATION) {
857 unsigned int length;
858 count = dwarf_read_uleb128(p, &length);
859 p += count + length;
860 }
861
862 /* Call frame instructions. */
863 fde->instructions = p;
Matt Fleming54806752009-08-20 19:42:34 +0100864 fde->end = end;
Matt Flemingbd353862009-08-14 01:58:43 +0900865
Matt Fleminga6a2f2a2009-10-09 23:20:54 +0100866 fde->mod = mod;
867
Matt Flemingbd353862009-08-14 01:58:43 +0900868 /* Add to list. */
869 spin_lock_irqsave(&dwarf_fde_lock, flags);
870 list_add_tail(&fde->link, &dwarf_fde_list);
871 spin_unlock_irqrestore(&dwarf_fde_lock, flags);
872
873 return 0;
874}
875
Matt Flemingb344e24a2009-08-16 21:54:48 +0100876static void dwarf_unwinder_dump(struct task_struct *task,
877 struct pt_regs *regs,
Matt Flemingbd353862009-08-14 01:58:43 +0900878 unsigned long *sp,
Matt Flemingb344e24a2009-08-16 21:54:48 +0100879 const struct stacktrace_ops *ops,
880 void *data)
Matt Flemingbd353862009-08-14 01:58:43 +0900881{
Matt Flemingfb3f3e72009-08-16 15:44:08 +0100882 struct dwarf_frame *frame, *_frame;
883 unsigned long return_addr;
Matt Flemingbd353862009-08-14 01:58:43 +0900884
Matt Flemingfb3f3e72009-08-16 15:44:08 +0100885 _frame = NULL;
886 return_addr = 0;
Matt Flemingbd353862009-08-14 01:58:43 +0900887
Matt Flemingfb3f3e72009-08-16 15:44:08 +0100888 while (1) {
889 frame = dwarf_unwind_stack(return_addr, _frame);
890
Matt Fleminged4fe7f2009-10-10 16:03:11 +0100891 if (_frame)
892 dwarf_free_frame(_frame);
Matt Flemingfb3f3e72009-08-16 15:44:08 +0100893
894 _frame = frame;
895
896 if (!frame || !frame->return_addr)
897 break;
898
899 return_addr = frame->return_addr;
900 ops->address(data, return_addr, 1);
Matt Flemingbd353862009-08-14 01:58:43 +0900901 }
Matt Fleminged4fe7f2009-10-10 16:03:11 +0100902
903 if (frame)
904 dwarf_free_frame(frame);
Matt Flemingbd353862009-08-14 01:58:43 +0900905}
906
907static struct unwinder dwarf_unwinder = {
908 .name = "dwarf-unwinder",
909 .dump = dwarf_unwinder_dump,
910 .rating = 150,
911};
912
913static void dwarf_unwinder_cleanup(void)
914{
Marek Skuczynski00b3e0a2010-01-30 22:27:41 +0100915 struct dwarf_cie *cie, *cie_tmp;
916 struct dwarf_fde *fde, *fde_tmp;
Matt Flemingbd353862009-08-14 01:58:43 +0900917
918 /*
919 * Deallocate all the memory allocated for the DWARF unwinder.
920 * Traverse all the FDE/CIE lists and remove and free all the
921 * memory associated with those data structures.
922 */
Marek Skuczynski00b3e0a2010-01-30 22:27:41 +0100923 list_for_each_entry_safe(cie, cie_tmp, &dwarf_cie_list, link)
Matt Flemingbd353862009-08-14 01:58:43 +0900924 kfree(cie);
Matt Flemingbd353862009-08-14 01:58:43 +0900925
Marek Skuczynski00b3e0a2010-01-30 22:27:41 +0100926 list_for_each_entry_safe(fde, fde_tmp, &dwarf_fde_list, link)
Matt Flemingbd353862009-08-14 01:58:43 +0900927 kfree(fde);
Matt Flemingfb3f3e72009-08-16 15:44:08 +0100928
929 kmem_cache_destroy(dwarf_reg_cachep);
930 kmem_cache_destroy(dwarf_frame_cachep);
Matt Flemingbd353862009-08-14 01:58:43 +0900931}
932
933/**
Matt Fleminga6a2f2a2009-10-09 23:20:54 +0100934 * dwarf_parse_section - parse DWARF section
935 * @eh_frame_start: start address of the .eh_frame section
936 * @eh_frame_end: end address of the .eh_frame section
937 * @mod: the kernel module containing the .eh_frame section
938 *
939 * Parse the information in a .eh_frame section.
940 */
Paul Mundt5a3abba2009-10-13 13:32:19 +0900941static int dwarf_parse_section(char *eh_frame_start, char *eh_frame_end,
942 struct module *mod)
Matt Fleminga6a2f2a2009-10-09 23:20:54 +0100943{
944 u32 entry_type;
945 void *p, *entry;
Paul Mundt8ec006c2009-10-12 08:50:07 +0900946 int count, err = 0;
Paul Mundteca28e32009-10-19 15:51:21 +0900947 unsigned long len = 0;
Matt Fleminga6a2f2a2009-10-09 23:20:54 +0100948 unsigned int c_entries, f_entries;
949 unsigned char *end;
950
951 c_entries = 0;
952 f_entries = 0;
953 entry = eh_frame_start;
954
955 while ((char *)entry < eh_frame_end) {
956 p = entry;
957
958 count = dwarf_entry_len(p, &len);
959 if (count == 0) {
960 /*
961 * We read a bogus length field value. There is
962 * nothing we can do here apart from disabling
963 * the DWARF unwinder. We can't even skip this
964 * entry and move to the next one because 'len'
965 * tells us where our next entry is.
966 */
967 err = -EINVAL;
968 goto out;
969 } else
970 p += count;
971
972 /* initial length does not include itself */
973 end = p + len;
974
975 entry_type = get_unaligned((u32 *)p);
976 p += 4;
977
978 if (entry_type == DW_EH_FRAME_CIE) {
979 err = dwarf_parse_cie(entry, p, len, end, mod);
980 if (err < 0)
981 goto out;
982 else
983 c_entries++;
984 } else {
985 err = dwarf_parse_fde(entry, entry_type, p, len,
986 end, mod);
987 if (err < 0)
988 goto out;
989 else
990 f_entries++;
991 }
992
993 entry = (char *)entry + len + 4;
994 }
995
996 printk(KERN_INFO "DWARF unwinder initialised: read %u CIEs, %u FDEs\n",
997 c_entries, f_entries);
998
999 return 0;
1000
1001out:
1002 return err;
1003}
1004
Paul Mundt5a3abba2009-10-13 13:32:19 +09001005#ifdef CONFIG_MODULES
1006int module_dwarf_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
1007 struct module *me)
1008{
1009 unsigned int i, err;
1010 unsigned long start, end;
1011 char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
1012
1013 start = end = 0;
1014
1015 for (i = 1; i < hdr->e_shnum; i++) {
1016 /* Alloc bit cleared means "ignore it." */
1017 if ((sechdrs[i].sh_flags & SHF_ALLOC)
1018 && !strcmp(secstrings+sechdrs[i].sh_name, ".eh_frame")) {
1019 start = sechdrs[i].sh_addr;
1020 end = start + sechdrs[i].sh_size;
1021 break;
1022 }
1023 }
1024
1025 /* Did we find the .eh_frame section? */
1026 if (i != hdr->e_shnum) {
1027 err = dwarf_parse_section((char *)start, (char *)end, me);
1028 if (err) {
1029 printk(KERN_WARNING "%s: failed to parse DWARF info\n",
1030 me->name);
1031 return err;
1032 }
1033 }
1034
1035 return 0;
1036}
1037
Matt Fleminga6a2f2a2009-10-09 23:20:54 +01001038/**
Paul Mundt5a3abba2009-10-13 13:32:19 +09001039 * module_dwarf_cleanup - remove FDE/CIEs associated with @mod
Matt Fleminga6a2f2a2009-10-09 23:20:54 +01001040 * @mod: the module that is being unloaded
1041 *
1042 * Remove any FDEs and CIEs from the global lists that came from
1043 * @mod's .eh_frame section because @mod is being unloaded.
1044 */
Paul Mundt5a3abba2009-10-13 13:32:19 +09001045void module_dwarf_cleanup(struct module *mod)
Matt Fleminga6a2f2a2009-10-09 23:20:54 +01001046{
1047 struct dwarf_fde *fde;
1048 struct dwarf_cie *cie;
1049 unsigned long flags;
1050
1051 spin_lock_irqsave(&dwarf_cie_lock, flags);
1052
1053again_cie:
1054 list_for_each_entry(cie, &dwarf_cie_list, link) {
1055 if (cie->mod == mod)
1056 break;
1057 }
1058
1059 if (&cie->link != &dwarf_cie_list) {
1060 list_del(&cie->link);
1061 kfree(cie);
1062 goto again_cie;
1063 }
1064
1065 spin_unlock_irqrestore(&dwarf_cie_lock, flags);
1066
1067 spin_lock_irqsave(&dwarf_fde_lock, flags);
1068
1069again_fde:
1070 list_for_each_entry(fde, &dwarf_fde_list, link) {
1071 if (fde->mod == mod)
1072 break;
1073 }
1074
1075 if (&fde->link != &dwarf_fde_list) {
1076 list_del(&fde->link);
1077 kfree(fde);
1078 goto again_fde;
1079 }
1080
1081 spin_unlock_irqrestore(&dwarf_fde_lock, flags);
1082}
Paul Mundt5a3abba2009-10-13 13:32:19 +09001083#endif /* CONFIG_MODULES */
Matt Fleminga6a2f2a2009-10-09 23:20:54 +01001084
1085/**
Matt Flemingbd353862009-08-14 01:58:43 +09001086 * dwarf_unwinder_init - initialise the dwarf unwinder
1087 *
1088 * Build the data structures describing the .dwarf_frame section to
1089 * make it easier to lookup CIE and FDE entries. Because the
1090 * .eh_frame section is packed as tightly as possible it is not
1091 * easy to lookup the FDE for a given PC, so we build a list of FDE
1092 * and CIE entries that make it easier.
1093 */
Paul Mundt97f361e2009-08-17 05:07:38 +09001094static int __init dwarf_unwinder_init(void)
Matt Flemingbd353862009-08-14 01:58:43 +09001095{
Matt Fleminga6a2f2a2009-10-09 23:20:54 +01001096 int err;
Matt Flemingbd353862009-08-14 01:58:43 +09001097 INIT_LIST_HEAD(&dwarf_cie_list);
1098 INIT_LIST_HEAD(&dwarf_fde_list);
1099
Matt Flemingfb3f3e72009-08-16 15:44:08 +01001100 dwarf_frame_cachep = kmem_cache_create("dwarf_frames",
Paul Mundt4f896ff2009-08-22 19:03:25 +09001101 sizeof(struct dwarf_frame), 0,
1102 SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL);
1103
Matt Flemingfb3f3e72009-08-16 15:44:08 +01001104 dwarf_reg_cachep = kmem_cache_create("dwarf_regs",
Paul Mundt4f896ff2009-08-22 19:03:25 +09001105 sizeof(struct dwarf_reg), 0,
1106 SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL);
Matt Flemingfb3f3e72009-08-16 15:44:08 +01001107
1108 dwarf_frame_pool = mempool_create(DWARF_FRAME_MIN_REQ,
1109 mempool_alloc_slab,
1110 mempool_free_slab,
1111 dwarf_frame_cachep);
1112
1113 dwarf_reg_pool = mempool_create(DWARF_REG_MIN_REQ,
1114 mempool_alloc_slab,
1115 mempool_free_slab,
1116 dwarf_reg_cachep);
1117
Matt Fleminga6a2f2a2009-10-09 23:20:54 +01001118 err = dwarf_parse_section(__start_eh_frame, __stop_eh_frame, NULL);
1119 if (err)
1120 goto out;
Matt Flemingbd353862009-08-14 01:58:43 +09001121
1122 err = unwinder_register(&dwarf_unwinder);
1123 if (err)
1124 goto out;
1125
Paul Mundt97f361e2009-08-17 05:07:38 +09001126 return 0;
Matt Flemingbd353862009-08-14 01:58:43 +09001127
1128out:
1129 printk(KERN_ERR "Failed to initialise DWARF unwinder: %d\n", err);
1130 dwarf_unwinder_cleanup();
Paul Mundt97f361e2009-08-17 05:07:38 +09001131 return -EINVAL;
Matt Flemingbd353862009-08-14 01:58:43 +09001132}
Paul Mundt97f361e2009-08-17 05:07:38 +09001133early_initcall(dwarf_unwinder_init);