blob: 36f848b353ce46171917a286ffdedebefefc7749 [file] [log] [blame]
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001/*
2 * Copyright (C) 2012 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29#include <errno.h>
30#include <sys/mman.h>
31
32#include "linker_phdr.h"
33
34/**
35 TECHNICAL NOTE ON ELF LOADING.
36
37 An ELF file's program header table contains one or more PT_LOAD
38 segments, which corresponds to portions of the file that need to
39 be mapped into the process' address space.
40
41 Each loadable segment has the following important properties:
42
43 p_offset -> segment file offset
44 p_filesz -> segment file size
45 p_memsz -> segment memory size (always >= p_filesz)
46 p_vaddr -> segment's virtual address
47 p_flags -> segment flags (e.g. readable, writable, executable)
48
49 We will ignore the p_paddr and p_align fields of Elf32_Phdr for now.
50
51 The loadable segments can be seen as a list of [p_vaddr ... p_vaddr+p_memsz)
52 ranges of virtual addresses. A few rules apply:
53
54 - the virtual address ranges should not overlap.
55
56 - if a segment's p_filesz is smaller than its p_memsz, the extra bytes
57 between them should always be initialized to 0.
58
59 - ranges do not necessarily start or end at page boundaries. Two distinct
60 segments can have their start and end on the same page. In this case, the
61 page inherits the mapping flags of the latter segment.
62
63 Finally, the real load addrs of each segment is not p_vaddr. Instead the
64 loader decides where to load the first segment, then will load all others
65 relative to the first one to respect the initial range layout.
66
67 For example, consider the following list:
68
69 [ offset:0, filesz:0x4000, memsz:0x4000, vaddr:0x30000 ],
70 [ offset:0x4000, filesz:0x2000, memsz:0x8000, vaddr:0x40000 ],
71
72 This corresponds to two segments that cover these virtual address ranges:
73
74 0x30000...0x34000
75 0x40000...0x48000
76
77 If the loader decides to load the first segment at address 0xa0000000
78 then the segments' load address ranges will be:
79
80 0xa0030000...0xa0034000
81 0xa0040000...0xa0048000
82
83 In other words, all segments must be loaded at an address that has the same
84 constant offset from their p_vaddr value. This offset is computed as the
85 difference between the first segment's load address, and its p_vaddr value.
86
87 However, in practice, segments do _not_ start at page boundaries. Since we
88 can only memory-map at page boundaries, this means that the bias is
89 computed as:
90
91 load_bias = phdr0_load_address - PAGE_START(phdr0->p_vaddr)
92
93 (NOTE: The value must be used as a 32-bit unsigned integer, to deal with
94 possible wrap around UINT32_MAX for possible large p_vaddr values).
95
96 And that the phdr0_load_address must start at a page boundary, with
97 the segment's real content starting at:
98
99 phdr0_load_address + PAGE_OFFSET(phdr0->p_vaddr)
100
101 Note that ELF requires the following condition to make the mmap()-ing work:
102
103 PAGE_OFFSET(phdr0->p_vaddr) == PAGE_OFFSET(phdr0->p_offset)
104
105 The load_bias must be added to any p_vaddr value read from the ELF file to
106 determine the corresponding memory address.
107
108 **/
109
110#define MAYBE_MAP_FLAG(x,from,to) (((x) & (from)) ? (to) : 0)
111#define PFLAGS_TO_PROT(x) (MAYBE_MAP_FLAG((x), PF_X, PROT_EXEC) | \
112 MAYBE_MAP_FLAG((x), PF_R, PROT_READ) | \
113 MAYBE_MAP_FLAG((x), PF_W, PROT_WRITE))
114
115/* Load the program header table from an ELF file into a read-only private
116 * anonymous mmap-ed block.
117 *
118 * Input:
119 * fd -> file descriptor
120 * phdr_offset -> file offset of phdr table
121 * phdr_num -> number of entries in the table.
122 *
123 * Output:
124 * phdr_mmap -> address of mmap block in memory.
125 * phdr_memsize -> size of mmap block in memory.
126 * phdr_table -> address of first entry in memory.
127 *
128 * Return:
129 * -1 on error, or 0 on success.
130 */
131int phdr_table_load(int fd,
132 Elf32_Addr phdr_offset,
133 Elf32_Half phdr_num,
134 void** phdr_mmap,
135 Elf32_Addr* phdr_size,
136 const Elf32_Phdr** phdr_table)
137{
138 Elf32_Addr page_min, page_max, page_offset;
139 void* mmap_result;
140
141 /* Just like the kernel, we only accept program header tables that
142 * are smaller than 64KB. */
143 if (phdr_num < 1 || phdr_num > 65536/sizeof(Elf32_Phdr)) {
144 errno = EINVAL;
145 return -1;
146 }
147
148 page_min = PAGE_START(phdr_offset);
149 page_max = PAGE_END(phdr_offset + phdr_num*sizeof(Elf32_Phdr));
150 page_offset = PAGE_OFFSET(phdr_offset);
151
152 mmap_result = mmap(NULL,
153 page_max - page_min,
154 PROT_READ,
155 MAP_PRIVATE,
156 fd,
157 page_min);
158
159 if (mmap_result == MAP_FAILED) {
160 return -1;
161 }
162
163 *phdr_mmap = mmap_result;
164 *phdr_size = page_max - page_min;
165 *phdr_table = (Elf32_Phdr*)((char*)mmap_result + page_offset);
166
167 return 0;
168}
169
170void phdr_table_unload(void* phdr_mmap, Elf32_Addr phdr_memsize)
171{
172 munmap(phdr_mmap, phdr_memsize);
173}
174
175
176/* Compute the extent of all loadable segments in an ELF program header
177 * table. This corresponds to the page-aligned size in bytes that needs to be
178 * reserved in the process' address space
179 *
180 * This returns 0 if there are no loadable segments.
181 */
182Elf32_Addr phdr_table_get_load_size(const Elf32_Phdr* phdr_table,
Elliott Hughes46882792012-08-03 16:49:39 -0700183 size_t phdr_count)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200184{
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200185 Elf32_Addr min_vaddr = 0xFFFFFFFFU;
186 Elf32_Addr max_vaddr = 0x00000000U;
187
Elliott Hughes46882792012-08-03 16:49:39 -0700188 for (size_t i = 0; i < phdr_count; ++i) {
189 const Elf32_Phdr* phdr = &phdr_table[i];
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200190
Elliott Hughes46882792012-08-03 16:49:39 -0700191 if (phdr->p_type != PT_LOAD) {
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200192 continue;
Elliott Hughes46882792012-08-03 16:49:39 -0700193 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200194
Elliott Hughes46882792012-08-03 16:49:39 -0700195 if (phdr->p_vaddr < min_vaddr) {
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200196 min_vaddr = phdr->p_vaddr;
Elliott Hughes46882792012-08-03 16:49:39 -0700197 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200198
Elliott Hughes46882792012-08-03 16:49:39 -0700199 if (phdr->p_vaddr + phdr->p_memsz > max_vaddr) {
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200200 max_vaddr = phdr->p_vaddr + phdr->p_memsz;
Elliott Hughes46882792012-08-03 16:49:39 -0700201 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200202 }
203
204 if (min_vaddr > max_vaddr) {
205 return 0;
206 }
207
208 min_vaddr = PAGE_START(min_vaddr);
209 max_vaddr = PAGE_END(max_vaddr);
210
211 return max_vaddr - min_vaddr;
212}
213
214/* Reserve a virtual address range big enough to hold all loadable
215 * segments of a program header table. This is done by creating a
216 * private anonymous mmap() with PROT_NONE.
217 *
218 * Input:
219 * phdr_table -> program header table
220 * phdr_count -> number of entries in the tables
Pawit Pornkitprasan2200dea2012-11-23 12:27:25 +0700221 * required_base -> for prelinked libraries, mandatory load address
222 * of the first loadable segment. 0 otherwise.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200223 * Output:
224 * load_start -> first page of reserved address space range
225 * load_size -> size in bytes of reserved address space range
226 * load_bias -> load bias, as described in technical note above.
227 *
228 * Return:
229 * 0 on success, -1 otherwise. Error code in errno.
230 */
231int
232phdr_table_reserve_memory(const Elf32_Phdr* phdr_table,
Elliott Hughes46882792012-08-03 16:49:39 -0700233 size_t phdr_count,
Pawit Pornkitprasan2200dea2012-11-23 12:27:25 +0700234 Elf32_Addr required_base,
Elliott Hughes46882792012-08-03 16:49:39 -0700235 void** load_start,
236 Elf32_Addr* load_size,
237 Elf32_Addr* load_bias)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200238{
239 Elf32_Addr size = phdr_table_get_load_size(phdr_table, phdr_count);
Pawit Pornkitprasan2200dea2012-11-23 12:27:25 +0700240
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200241 if (size == 0) {
242 errno = EINVAL;
243 return -1;
244 }
245
Elliott Hughes46882792012-08-03 16:49:39 -0700246 int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS;
Pawit Pornkitprasan2200dea2012-11-23 12:27:25 +0700247 if (required_base != 0)
248 mmap_flags |= MAP_FIXED;
249 void* start = mmap((void*)required_base, size, PROT_NONE, mmap_flags, -1, 0);
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200250 if (start == MAP_FAILED) {
251 return -1;
252 }
253
254 *load_start = start;
255 *load_size = size;
256 *load_bias = 0;
257
Elliott Hughes46882792012-08-03 16:49:39 -0700258 for (size_t i = 0; i < phdr_count; ++i) {
259 const Elf32_Phdr* phdr = &phdr_table[i];
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200260 if (phdr->p_type == PT_LOAD) {
261 *load_bias = (Elf32_Addr)start - PAGE_START(phdr->p_vaddr);
262 break;
263 }
264 }
265 return 0;
266}
267
268/* Map all loadable segments in process' address space.
269 * This assumes you already called phdr_table_reserve_memory to
270 * reserve the address space range for the library.
271 *
272 * Input:
273 * phdr_table -> program header table
274 * phdr_count -> number of entries in the table
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200275 * load_bias -> load offset.
276 * fd -> input file descriptor.
277 *
278 * Return:
279 * 0 on success, -1 otherwise. Error code in errno.
280 */
281int
282phdr_table_load_segments(const Elf32_Phdr* phdr_table,
283 int phdr_count,
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200284 Elf32_Addr load_bias,
285 int fd)
286{
287 int nn;
288
289 for (nn = 0; nn < phdr_count; nn++) {
290 const Elf32_Phdr* phdr = &phdr_table[nn];
291 void* seg_addr;
292
293 if (phdr->p_type != PT_LOAD)
294 continue;
295
296 /* Segment addresses in memory */
297 Elf32_Addr seg_start = phdr->p_vaddr + load_bias;
298 Elf32_Addr seg_end = seg_start + phdr->p_memsz;
299
300 Elf32_Addr seg_page_start = PAGE_START(seg_start);
301 Elf32_Addr seg_page_end = PAGE_END(seg_end);
302
303 Elf32_Addr seg_file_end = seg_start + phdr->p_filesz;
304
305 /* File offsets */
306 Elf32_Addr file_start = phdr->p_offset;
307 Elf32_Addr file_end = file_start + phdr->p_filesz;
308
309 Elf32_Addr file_page_start = PAGE_START(file_start);
310 Elf32_Addr file_page_end = PAGE_END(file_end);
311
312 seg_addr = mmap((void*)seg_page_start,
313 file_end - file_page_start,
314 PFLAGS_TO_PROT(phdr->p_flags),
315 MAP_FIXED|MAP_PRIVATE,
316 fd,
317 file_page_start);
318
319 if (seg_addr == MAP_FAILED) {
320 return -1;
321 }
322
323 /* if the segment is writable, and does not end on a page boundary,
324 * zero-fill it until the page limit. */
325 if ((phdr->p_flags & PF_W) != 0 && PAGE_OFFSET(seg_file_end) > 0) {
326 memset((void*)seg_file_end, 0, PAGE_SIZE - PAGE_OFFSET(seg_file_end));
327 }
328
329 seg_file_end = PAGE_END(seg_file_end);
330
331 /* seg_file_end is now the first page address after the file
332 * content. If seg_end is larger, we need to zero anything
333 * between them. This is done by using a private anonymous
334 * map for all extra pages.
335 */
336 if (seg_page_end > seg_file_end) {
337 void* zeromap = mmap((void*)seg_file_end,
338 seg_page_end - seg_file_end,
339 PFLAGS_TO_PROT(phdr->p_flags),
340 MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE,
341 -1,
342 0);
343 if (zeromap == MAP_FAILED) {
344 return -1;
345 }
346 }
347 }
348 return 0;
349}
350
Elliott Hughes105bc262012-08-15 16:56:00 -0700351/* Used internally. Used to set the protection bits of all loaded segments
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200352 * with optional extra flags (i.e. really PROT_WRITE). Used by
353 * phdr_table_protect_segments and phdr_table_unprotect_segments.
354 */
355static int
356_phdr_table_set_load_prot(const Elf32_Phdr* phdr_table,
357 int phdr_count,
358 Elf32_Addr load_bias,
359 int extra_prot_flags)
360{
361 const Elf32_Phdr* phdr = phdr_table;
362 const Elf32_Phdr* phdr_limit = phdr + phdr_count;
363
364 for (; phdr < phdr_limit; phdr++) {
365 if (phdr->p_type != PT_LOAD || (phdr->p_flags & PF_W) != 0)
366 continue;
367
368 Elf32_Addr seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
369 Elf32_Addr seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
370
371 int ret = mprotect((void*)seg_page_start,
372 seg_page_end - seg_page_start,
373 PFLAGS_TO_PROT(phdr->p_flags) | extra_prot_flags);
374 if (ret < 0) {
375 return -1;
376 }
377 }
378 return 0;
379}
380
381/* Restore the original protection modes for all loadable segments.
382 * You should only call this after phdr_table_unprotect_segments and
383 * applying all relocations.
384 *
385 * Input:
386 * phdr_table -> program header table
Elliott Hughes105bc262012-08-15 16:56:00 -0700387 * phdr_count -> number of entries in tables
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200388 * load_bias -> load bias
389 * Return:
390 * 0 on error, -1 on failure (error code in errno).
391 */
392int
393phdr_table_protect_segments(const Elf32_Phdr* phdr_table,
394 int phdr_count,
395 Elf32_Addr load_bias)
396{
397 return _phdr_table_set_load_prot(phdr_table, phdr_count,
398 load_bias, 0);
399}
400
401/* Change the protection of all loaded segments in memory to writable.
402 * This is useful before performing relocations. Once completed, you
403 * will have to call phdr_table_protect_segments to restore the original
404 * protection flags on all segments.
405 *
406 * Note that some writable segments can also have their content turned
407 * to read-only by calling phdr_table_protect_gnu_relro. This is no
408 * performed here.
409 *
410 * Input:
411 * phdr_table -> program header table
Elliott Hughes105bc262012-08-15 16:56:00 -0700412 * phdr_count -> number of entries in tables
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200413 * load_bias -> load bias
414 * Return:
415 * 0 on error, -1 on failure (error code in errno).
416 */
417int
418phdr_table_unprotect_segments(const Elf32_Phdr* phdr_table,
419 int phdr_count,
420 Elf32_Addr load_bias)
421{
422 return _phdr_table_set_load_prot(phdr_table, phdr_count,
423 load_bias, PROT_WRITE);
424}
425
426/* Used internally by phdr_table_protect_gnu_relro and
427 * phdr_table_unprotect_gnu_relro.
428 */
429static int
430_phdr_table_set_gnu_relro_prot(const Elf32_Phdr* phdr_table,
431 int phdr_count,
432 Elf32_Addr load_bias,
433 int prot_flags)
434{
435 const Elf32_Phdr* phdr = phdr_table;
436 const Elf32_Phdr* phdr_limit = phdr + phdr_count;
437
438 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
439 if (phdr->p_type != PT_GNU_RELRO)
440 continue;
441
442 /* Tricky: what happens when the relro segment does not start
443 * or end at page boundaries?. We're going to be over-protective
444 * here and put every page touched by the segment as read-only.
445 *
446 * This seems to match Ian Lance Taylor's description of the
447 * feature at http://www.airs.com/blog/archives/189.
448 *
449 * Extract:
450 * Note that the current dynamic linker code will only work
451 * correctly if the PT_GNU_RELRO segment starts on a page
452 * boundary. This is because the dynamic linker rounds the
453 * p_vaddr field down to the previous page boundary. If
454 * there is anything on the page which should not be read-only,
455 * the program is likely to fail at runtime. So in effect the
456 * linker must only emit a PT_GNU_RELRO segment if it ensures
457 * that it starts on a page boundary.
458 */
459 Elf32_Addr seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
460 Elf32_Addr seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
461
462 int ret = mprotect((void*)seg_page_start,
463 seg_page_end - seg_page_start,
464 prot_flags);
465 if (ret < 0) {
466 return -1;
467 }
468 }
469 return 0;
470}
471
472/* Apply GNU relro protection if specified by the program header. This will
473 * turn some of the pages of a writable PT_LOAD segment to read-only, as
474 * specified by one or more PT_GNU_RELRO segments. This must be always
475 * performed after relocations.
476 *
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +0200477 * The areas typically covered are .got and .data.rel.ro, these are
478 * read-only from the program's POV, but contain absolute addresses
479 * that need to be relocated before use.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200480 *
481 * Input:
482 * phdr_table -> program header table
Elliott Hughes105bc262012-08-15 16:56:00 -0700483 * phdr_count -> number of entries in tables
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200484 * load_bias -> load bias
485 * Return:
486 * 0 on error, -1 on failure (error code in errno).
487 */
488int
489phdr_table_protect_gnu_relro(const Elf32_Phdr* phdr_table,
490 int phdr_count,
491 Elf32_Addr load_bias)
492{
493 return _phdr_table_set_gnu_relro_prot(phdr_table,
494 phdr_count,
495 load_bias,
496 PROT_READ);
497}
498
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200499#ifdef ANDROID_ARM_LINKER
500
501# ifndef PT_ARM_EXIDX
502# define PT_ARM_EXIDX 0x70000001 /* .ARM.exidx segment */
503# endif
504
505/* Return the address and size of the .ARM.exidx section in memory,
506 * if present.
507 *
508 * Input:
509 * phdr_table -> program header table
Elliott Hughes105bc262012-08-15 16:56:00 -0700510 * phdr_count -> number of entries in tables
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200511 * load_bias -> load bias
512 * Output:
513 * arm_exidx -> address of table in memory (NULL on failure).
514 * arm_exidx_count -> number of items in table (0 on failure).
515 * Return:
516 * 0 on error, -1 on failure (_no_ error code in errno)
517 */
518int
519phdr_table_get_arm_exidx(const Elf32_Phdr* phdr_table,
520 int phdr_count,
521 Elf32_Addr load_bias,
522 Elf32_Addr** arm_exidx,
523 unsigned* arm_exidx_count)
524{
525 const Elf32_Phdr* phdr = phdr_table;
526 const Elf32_Phdr* phdr_limit = phdr + phdr_count;
527
528 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
529 if (phdr->p_type != PT_ARM_EXIDX)
530 continue;
531
532 *arm_exidx = (Elf32_Addr*)(load_bias + phdr->p_vaddr);
533 *arm_exidx_count = (unsigned)(phdr->p_memsz / 8);
534 return 0;
535 }
536 *arm_exidx = NULL;
537 *arm_exidx_count = 0;
538 return -1;
539}
540#endif /* ANDROID_ARM_LINKER */
541
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +0200542/* Return the address and size of the ELF file's .dynamic section in memory,
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200543 * or NULL if missing.
544 *
545 * Input:
546 * phdr_table -> program header table
Elliott Hughes105bc262012-08-15 16:56:00 -0700547 * phdr_count -> number of entries in tables
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200548 * load_bias -> load bias
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +0200549 * Output:
550 * dynamic -> address of table in memory (NULL on failure).
551 * dynamic_count -> number of items in table (0 on failure).
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200552 * Return:
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +0200553 * void
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200554 */
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +0200555void
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200556phdr_table_get_dynamic_section(const Elf32_Phdr* phdr_table,
557 int phdr_count,
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +0200558 Elf32_Addr load_bias,
559 Elf32_Addr** dynamic,
560 size_t* dynamic_count)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200561{
562 const Elf32_Phdr* phdr = phdr_table;
563 const Elf32_Phdr* phdr_limit = phdr + phdr_count;
564
565 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +0200566 if (phdr->p_type != PT_DYNAMIC) {
567 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200568 }
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +0200569
570 *dynamic = (Elf32_Addr*)(load_bias + phdr->p_vaddr);
571 if (dynamic_count) {
572 *dynamic_count = (unsigned)(phdr->p_memsz / 8);
573 }
574 return;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200575 }
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +0200576 *dynamic = NULL;
577 if (dynamic_count) {
578 *dynamic_count = 0;
579 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200580}
581
582/* Return the address of the program header table as it appears in the loaded
583 * segments in memory. This is in contrast with the input 'phdr_table' which
584 * is temporary and will be released before the library is relocated.
585 *
586 * Input:
587 * phdr_table -> program header table
588 * phdr_count -> number of entries in tables
589 * load_bias -> load bias
590 * Return:
591 * Address of loaded program header table on success (it has
592 * 'phdr_count' entries), or NULL on failure (no error code).
593 */
594const Elf32_Phdr*
595phdr_table_get_loaded_phdr(const Elf32_Phdr* phdr_table,
596 int phdr_count,
597 Elf32_Addr load_bias)
598{
599 const Elf32_Phdr* phdr = phdr_table;
600 const Elf32_Phdr* phdr_limit = phdr + phdr_count;
601 Elf32_Addr loaded = 0;
602 Elf32_Addr loaded_end;
603
604 /* If there is a PT_PHDR, use it directly */
605 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
606 if (phdr->p_type == PT_PHDR) {
607 loaded = load_bias + phdr->p_vaddr;
608 goto CHECK;
609 }
610 }
611
612 /* Otherwise, check the first loadable segment. If its file offset
613 * is 0, it starts with the ELF header, and we can trivially find the
614 * loaded program header from it. */
615 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
616 if (phdr->p_type == PT_LOAD) {
617 if (phdr->p_offset == 0) {
618 Elf32_Addr elf_addr = load_bias + phdr->p_vaddr;
619 const Elf32_Ehdr* ehdr = (const Elf32_Ehdr*)(void*)elf_addr;
620 Elf32_Addr offset = ehdr->e_phoff;
621 loaded = (Elf32_Addr)ehdr + offset;
622 goto CHECK;
623 }
624 break;
625 }
626 }
627
628 /* We didn't find it, let the client know. He may be able to
629 * keep a copy of the input phdr_table instead. */
630 return NULL;
631
632CHECK:
633 /* Ensure that our program header is actually within a loadable
634 * segment. This should help catch badly-formed ELF files that
635 * would cause the linker to crash later when trying to access it.
636 */
637 loaded_end = loaded + phdr_count*sizeof(Elf32_Phdr);
638
639 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
640 if (phdr->p_type != PT_LOAD)
641 continue;
642 Elf32_Addr seg_start = phdr->p_vaddr + load_bias;
643 Elf32_Addr seg_end = phdr->p_filesz + seg_start;
644
645 if (seg_start <= loaded && loaded_end <= seg_end) {
646 return (const Elf32_Phdr*)loaded;
647 }
648 }
649 return NULL;
650}