blob: 08733382c5111822863e3ee39e4233341a21c269 [file] [log] [blame]
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001/*
2 * Copyright (C) 2012 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
Elliott Hughes650be4e2013-03-05 18:47:58 -080029#include "linker_phdr.h"
30
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020031#include <errno.h>
Marcus Oaklande365f9d2013-10-10 15:19:31 +010032#include <machine/exec.h>
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020033#include <sys/mman.h>
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +000034#include <sys/types.h>
35#include <sys/stat.h>
36#include <unistd.h>
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020037
Elliott Hughes650be4e2013-03-05 18:47:58 -080038#include "linker.h"
39#include "linker_debug.h"
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020040
41/**
42 TECHNICAL NOTE ON ELF LOADING.
43
44 An ELF file's program header table contains one or more PT_LOAD
45 segments, which corresponds to portions of the file that need to
46 be mapped into the process' address space.
47
48 Each loadable segment has the following important properties:
49
50 p_offset -> segment file offset
51 p_filesz -> segment file size
52 p_memsz -> segment memory size (always >= p_filesz)
53 p_vaddr -> segment's virtual address
54 p_flags -> segment flags (e.g. readable, writable, executable)
55
Elliott Hughes0266ae52014-02-10 17:46:57 -080056 We will ignore the p_paddr and p_align fields of ElfW(Phdr) for now.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020057
58 The loadable segments can be seen as a list of [p_vaddr ... p_vaddr+p_memsz)
59 ranges of virtual addresses. A few rules apply:
60
61 - the virtual address ranges should not overlap.
62
63 - if a segment's p_filesz is smaller than its p_memsz, the extra bytes
64 between them should always be initialized to 0.
65
66 - ranges do not necessarily start or end at page boundaries. Two distinct
67 segments can have their start and end on the same page. In this case, the
68 page inherits the mapping flags of the latter segment.
69
70 Finally, the real load addrs of each segment is not p_vaddr. Instead the
71 loader decides where to load the first segment, then will load all others
72 relative to the first one to respect the initial range layout.
73
74 For example, consider the following list:
75
76 [ offset:0, filesz:0x4000, memsz:0x4000, vaddr:0x30000 ],
77 [ offset:0x4000, filesz:0x2000, memsz:0x8000, vaddr:0x40000 ],
78
79 This corresponds to two segments that cover these virtual address ranges:
80
81 0x30000...0x34000
82 0x40000...0x48000
83
84 If the loader decides to load the first segment at address 0xa0000000
85 then the segments' load address ranges will be:
86
87 0xa0030000...0xa0034000
88 0xa0040000...0xa0048000
89
90 In other words, all segments must be loaded at an address that has the same
91 constant offset from their p_vaddr value. This offset is computed as the
92 difference between the first segment's load address, and its p_vaddr value.
93
94 However, in practice, segments do _not_ start at page boundaries. Since we
95 can only memory-map at page boundaries, this means that the bias is
96 computed as:
97
98 load_bias = phdr0_load_address - PAGE_START(phdr0->p_vaddr)
99
100 (NOTE: The value must be used as a 32-bit unsigned integer, to deal with
101 possible wrap around UINT32_MAX for possible large p_vaddr values).
102
103 And that the phdr0_load_address must start at a page boundary, with
104 the segment's real content starting at:
105
106 phdr0_load_address + PAGE_OFFSET(phdr0->p_vaddr)
107
108 Note that ELF requires the following condition to make the mmap()-ing work:
109
110 PAGE_OFFSET(phdr0->p_vaddr) == PAGE_OFFSET(phdr0->p_offset)
111
112 The load_bias must be added to any p_vaddr value read from the ELF file to
113 determine the corresponding memory address.
114
115 **/
116
Elliott Hughesfaf05ba2014-02-11 16:59:37 -0800117#define MAYBE_MAP_FLAG(x, from, to) (((x) & (from)) ? (to) : 0)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200118#define PFLAGS_TO_PROT(x) (MAYBE_MAP_FLAG((x), PF_X, PROT_EXEC) | \
119 MAYBE_MAP_FLAG((x), PF_R, PROT_READ) | \
120 MAYBE_MAP_FLAG((x), PF_W, PROT_WRITE))
121
Dmitriy Ivanovde017802014-10-03 17:52:44 -0700122ElfReader::ElfReader(const char* name, int fd, off64_t file_offset)
123 : name_(name), fd_(fd), file_offset_(file_offset),
Dmitriy Ivanovcfad7ae2014-08-29 12:02:36 -0700124 phdr_num_(0), phdr_mmap_(nullptr), phdr_table_(nullptr), phdr_size_(0),
Pawit Pornkitprasanbdfedaa2012-11-23 12:27:25 +0700125#ifdef ENABLE_PRELINK_SUPPORT
Nick Reuter9e357692015-03-10 22:46:31 -0500126 load_start_(nullptr), load_size_(0), load_bias_(0), required_base_(0),
Pawit Pornkitprasanbdfedaa2012-11-23 12:27:25 +0700127#else
Dmitriy Ivanovcfad7ae2014-08-29 12:02:36 -0700128 load_start_(nullptr), load_size_(0), load_bias_(0),
Pawit Pornkitprasanbdfedaa2012-11-23 12:27:25 +0700129#endif
Dmitriy Ivanovcfad7ae2014-08-29 12:02:36 -0700130 loaded_phdr_(nullptr) {
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200131}
132
Elliott Hughes650be4e2013-03-05 18:47:58 -0800133ElfReader::~ElfReader() {
Dmitriy Ivanovcfad7ae2014-08-29 12:02:36 -0700134 if (phdr_mmap_ != nullptr) {
Elliott Hughes650be4e2013-03-05 18:47:58 -0800135 munmap(phdr_mmap_, phdr_size_);
136 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200137}
138
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000139bool ElfReader::Load(const android_dlextinfo* extinfo) {
Elliott Hughes650be4e2013-03-05 18:47:58 -0800140 return ReadElfHeader() &&
141 VerifyElfHeader() &&
142 ReadProgramHeader() &&
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000143 ReserveAddressSpace(extinfo) &&
Elliott Hughes650be4e2013-03-05 18:47:58 -0800144 LoadSegments() &&
145 FindPhdr();
146}
147
148bool ElfReader::ReadElfHeader() {
Dmitriy Ivanov702ab5b2014-10-21 12:09:18 -0700149 ssize_t rc = TEMP_FAILURE_RETRY(pread64(fd_, &header_, sizeof(header_), file_offset_));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800150 if (rc < 0) {
151 DL_ERR("can't read file \"%s\": %s", name_, strerror(errno));
152 return false;
153 }
Dmitriy Ivanov702ab5b2014-10-21 12:09:18 -0700154
Elliott Hughes650be4e2013-03-05 18:47:58 -0800155 if (rc != sizeof(header_)) {
Elliott Hughesc6200592013-09-30 18:43:46 -0700156 DL_ERR("\"%s\" is too small to be an ELF executable: only found %zd bytes", name_,
157 static_cast<size_t>(rc));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800158 return false;
159 }
160 return true;
161}
162
163bool ElfReader::VerifyElfHeader() {
Elliott Hughes30021312014-07-15 16:53:13 -0700164 if (memcmp(header_.e_ident, ELFMAG, SELFMAG) != 0) {
Elliott Hughes650be4e2013-03-05 18:47:58 -0800165 DL_ERR("\"%s\" has bad ELF magic", name_);
166 return false;
167 }
168
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700169 // Try to give a clear diagnostic for ELF class mismatches, since they're
170 // an easy mistake to make during the 32-bit/64-bit transition period.
171 int elf_class = header_.e_ident[EI_CLASS];
172#if defined(__LP64__)
173 if (elf_class != ELFCLASS64) {
174 if (elf_class == ELFCLASS32) {
175 DL_ERR("\"%s\" is 32-bit instead of 64-bit", name_);
176 } else {
177 DL_ERR("\"%s\" has unknown ELF class: %d", name_, elf_class);
178 }
Elliott Hughes650be4e2013-03-05 18:47:58 -0800179 return false;
180 }
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700181#else
182 if (elf_class != ELFCLASS32) {
183 if (elf_class == ELFCLASS64) {
184 DL_ERR("\"%s\" is 64-bit instead of 32-bit", name_);
185 } else {
186 DL_ERR("\"%s\" has unknown ELF class: %d", name_, elf_class);
187 }
188 return false;
189 }
190#endif
191
Elliott Hughes650be4e2013-03-05 18:47:58 -0800192 if (header_.e_ident[EI_DATA] != ELFDATA2LSB) {
193 DL_ERR("\"%s\" not little-endian: %d", name_, header_.e_ident[EI_DATA]);
194 return false;
195 }
196
197 if (header_.e_type != ET_DYN) {
198 DL_ERR("\"%s\" has unexpected e_type: %d", name_, header_.e_type);
199 return false;
200 }
201
202 if (header_.e_version != EV_CURRENT) {
203 DL_ERR("\"%s\" has unexpected e_version: %d", name_, header_.e_version);
204 return false;
205 }
206
Marcus Oaklande365f9d2013-10-10 15:19:31 +0100207 if (header_.e_machine != ELF_TARG_MACH) {
Elliott Hughes650be4e2013-03-05 18:47:58 -0800208 DL_ERR("\"%s\" has unexpected e_machine: %d", name_, header_.e_machine);
209 return false;
210 }
211
212 return true;
213}
214
215// Loads the program header table from an ELF file into a read-only private
216// anonymous mmap-ed block.
217bool ElfReader::ReadProgramHeader() {
218 phdr_num_ = header_.e_phnum;
219
220 // Like the kernel, we only accept program header tables that
221 // are smaller than 64KiB.
Elliott Hughes0266ae52014-02-10 17:46:57 -0800222 if (phdr_num_ < 1 || phdr_num_ > 65536/sizeof(ElfW(Phdr))) {
Elliott Hughesc6200592013-09-30 18:43:46 -0700223 DL_ERR("\"%s\" has invalid e_phnum: %zd", name_, phdr_num_);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800224 return false;
225 }
226
Elliott Hughes0266ae52014-02-10 17:46:57 -0800227 ElfW(Addr) page_min = PAGE_START(header_.e_phoff);
228 ElfW(Addr) page_max = PAGE_END(header_.e_phoff + (phdr_num_ * sizeof(ElfW(Phdr))));
229 ElfW(Addr) page_offset = PAGE_OFFSET(header_.e_phoff);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800230
231 phdr_size_ = page_max - page_min;
232
Dmitriy Ivanovde017802014-10-03 17:52:44 -0700233 void* mmap_result = mmap64(nullptr, phdr_size_, PROT_READ, MAP_PRIVATE, fd_, file_offset_ + page_min);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800234 if (mmap_result == MAP_FAILED) {
235 DL_ERR("\"%s\" phdr mmap failed: %s", name_, strerror(errno));
236 return false;
237 }
238
239 phdr_mmap_ = mmap_result;
Elliott Hughes0266ae52014-02-10 17:46:57 -0800240 phdr_table_ = reinterpret_cast<ElfW(Phdr)*>(reinterpret_cast<char*>(mmap_result) + page_offset);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800241 return true;
242}
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200243
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800244/* Returns the size of the extent of all the possibly non-contiguous
245 * loadable segments in an ELF program header table. This corresponds
246 * to the page-aligned size in bytes that needs to be reserved in the
247 * process' address space. If there are no loadable segments, 0 is
248 * returned.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200249 *
Dmitriy Ivanovcfad7ae2014-08-29 12:02:36 -0700250 * If out_min_vaddr or out_max_vaddr are not null, they will be
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800251 * set to the minimum and maximum addresses of pages to be reserved,
252 * or 0 if there is nothing to load.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200253 */
Elliott Hughes0266ae52014-02-10 17:46:57 -0800254size_t phdr_table_get_load_size(const ElfW(Phdr)* phdr_table, size_t phdr_count,
255 ElfW(Addr)* out_min_vaddr,
256 ElfW(Addr)* out_max_vaddr) {
257 ElfW(Addr) min_vaddr = UINTPTR_MAX;
258 ElfW(Addr) max_vaddr = 0;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200259
Elliott Hughes0266ae52014-02-10 17:46:57 -0800260 bool found_pt_load = false;
261 for (size_t i = 0; i < phdr_count; ++i) {
262 const ElfW(Phdr)* phdr = &phdr_table[i];
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200263
Elliott Hughes0266ae52014-02-10 17:46:57 -0800264 if (phdr->p_type != PT_LOAD) {
265 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200266 }
Elliott Hughes0266ae52014-02-10 17:46:57 -0800267 found_pt_load = true;
268
269 if (phdr->p_vaddr < min_vaddr) {
270 min_vaddr = phdr->p_vaddr;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200271 }
272
Elliott Hughes0266ae52014-02-10 17:46:57 -0800273 if (phdr->p_vaddr + phdr->p_memsz > max_vaddr) {
274 max_vaddr = phdr->p_vaddr + phdr->p_memsz;
275 }
276 }
277 if (!found_pt_load) {
278 min_vaddr = 0;
279 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200280
Elliott Hughes0266ae52014-02-10 17:46:57 -0800281 min_vaddr = PAGE_START(min_vaddr);
282 max_vaddr = PAGE_END(max_vaddr);
283
Dmitriy Ivanovcfad7ae2014-08-29 12:02:36 -0700284 if (out_min_vaddr != nullptr) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800285 *out_min_vaddr = min_vaddr;
286 }
Dmitriy Ivanovcfad7ae2014-08-29 12:02:36 -0700287 if (out_max_vaddr != nullptr) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800288 *out_max_vaddr = max_vaddr;
289 }
290 return max_vaddr - min_vaddr;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200291}
292
Pawit Pornkitprasanbdfedaa2012-11-23 12:27:25 +0700293#ifdef ENABLE_PRELINK_SUPPORT
294typedef struct {
295 long mmap_addr;
296 char tag[4]; /* 'P', 'R', 'E', ' ' */
297} prelink_info_t;
298
299/* Returns the requested base address if the library is prelinked,
300 * and 0 otherwise. */
301static Elf32_Addr is_prelinked(int fd, const char *name)
302{
303 off_t sz = lseek(fd, -sizeof(prelink_info_t), SEEK_END);
304 if (sz < 0) {
305 DL_ERR("lseek() failed!");
306 return 0;
307 }
308
309 prelink_info_t info;
310 int rc = TEMP_FAILURE_RETRY(read(fd, &info, sizeof(info)));
311 if (rc != sizeof(info)) {
312 DL_ERR("Could not read prelink_info_t structure for `%s`\n", name);
313 return 0;
314 }
315
316 if (memcmp(info.tag, "PRE ", 4)) {
317 DL_ERR("`%s` is not a prelinked library\n", name);
318 return 0;
319 }
320
321 return (unsigned long)info.mmap_addr;
322}
323#endif
324
Elliott Hughes650be4e2013-03-05 18:47:58 -0800325// Reserve a virtual address range big enough to hold all loadable
326// segments of a program header table. This is done by creating a
327// private anonymous mmap() with PROT_NONE.
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000328bool ElfReader::ReserveAddressSpace(const android_dlextinfo* extinfo) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800329 ElfW(Addr) min_vaddr;
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800330 load_size_ = phdr_table_get_load_size(phdr_table_, phdr_num_, &min_vaddr);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800331 if (load_size_ == 0) {
332 DL_ERR("\"%s\" has no loadable segments", name_);
333 return false;
334 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200335
Pawit Pornkitprasanbdfedaa2012-11-23 12:27:25 +0700336#ifdef ENABLE_PRELINK_SUPPORT
337 required_base_ = is_prelinked(fd_, name_);
338#endif
339
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800340 uint8_t* addr = reinterpret_cast<uint8_t*>(min_vaddr);
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000341 void* start;
342 size_t reserved_size = 0;
343 bool reserved_hint = true;
344
Dmitriy Ivanovcfad7ae2014-08-29 12:02:36 -0700345 if (extinfo != nullptr) {
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000346 if (extinfo->flags & ANDROID_DLEXT_RESERVED_ADDRESS) {
347 reserved_size = extinfo->reserved_size;
348 reserved_hint = false;
349 } else if (extinfo->flags & ANDROID_DLEXT_RESERVED_ADDRESS_HINT) {
350 reserved_size = extinfo->reserved_size;
351 }
352 }
353
354 if (load_size_ > reserved_size) {
355 if (!reserved_hint) {
356 DL_ERR("reserved address space %zd smaller than %zd bytes needed for \"%s\"",
357 reserved_size - load_size_, load_size_, name_);
358 return false;
359 }
360 int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS;
Pawit Pornkitprasanbdfedaa2012-11-23 12:27:25 +0700361#ifdef ENABLE_PRELINK_SUPPORT
362 if (required_base_ != 0) {
363 mmap_flags |= MAP_FIXED;
364 addr = (uint8_t*) required_base_;
365 }
366#endif
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000367 start = mmap(addr, load_size_, PROT_NONE, mmap_flags, -1, 0);
368 if (start == MAP_FAILED) {
369 DL_ERR("couldn't reserve %zd bytes of address space for \"%s\"", load_size_, name_);
370 return false;
371 }
372 } else {
373 start = extinfo->reserved_addr;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800374 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200375
Elliott Hughes650be4e2013-03-05 18:47:58 -0800376 load_start_ = start;
Pawit Pornkitprasanbdfedaa2012-11-23 12:27:25 +0700377#ifdef ENABLE_PRELINK_SUPPORT
378 load_bias_ = reinterpret_cast<uint8_t*>(start) - reinterpret_cast<uint8_t*>(min_vaddr);
379#else
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800380 load_bias_ = reinterpret_cast<uint8_t*>(start) - addr;
Pawit Pornkitprasanbdfedaa2012-11-23 12:27:25 +0700381#endif
Elliott Hughes650be4e2013-03-05 18:47:58 -0800382 return true;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200383}
384
Elliott Hughes650be4e2013-03-05 18:47:58 -0800385bool ElfReader::LoadSegments() {
386 for (size_t i = 0; i < phdr_num_; ++i) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800387 const ElfW(Phdr)* phdr = &phdr_table_[i];
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200388
Elliott Hughes650be4e2013-03-05 18:47:58 -0800389 if (phdr->p_type != PT_LOAD) {
390 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200391 }
Elliott Hughes650be4e2013-03-05 18:47:58 -0800392
393 // Segment addresses in memory.
Elliott Hughes0266ae52014-02-10 17:46:57 -0800394 ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
395 ElfW(Addr) seg_end = seg_start + phdr->p_memsz;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800396
Elliott Hughes0266ae52014-02-10 17:46:57 -0800397 ElfW(Addr) seg_page_start = PAGE_START(seg_start);
398 ElfW(Addr) seg_page_end = PAGE_END(seg_end);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800399
Elliott Hughes0266ae52014-02-10 17:46:57 -0800400 ElfW(Addr) seg_file_end = seg_start + phdr->p_filesz;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800401
402 // File offsets.
Elliott Hughes0266ae52014-02-10 17:46:57 -0800403 ElfW(Addr) file_start = phdr->p_offset;
404 ElfW(Addr) file_end = file_start + phdr->p_filesz;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800405
Elliott Hughes0266ae52014-02-10 17:46:57 -0800406 ElfW(Addr) file_page_start = PAGE_START(file_start);
407 ElfW(Addr) file_length = file_end - file_page_start;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800408
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700409 if (file_length != 0) {
Dmitriy Ivanovde017802014-10-03 17:52:44 -0700410 void* seg_addr = mmap64(reinterpret_cast<void*>(seg_page_start),
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700411 file_length,
412 PFLAGS_TO_PROT(phdr->p_flags),
413 MAP_FIXED|MAP_PRIVATE,
414 fd_,
Dmitriy Ivanovde017802014-10-03 17:52:44 -0700415 file_offset_ + file_page_start);
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700416 if (seg_addr == MAP_FAILED) {
Elliott Hughesc6200592013-09-30 18:43:46 -0700417 DL_ERR("couldn't map \"%s\" segment %zd: %s", name_, i, strerror(errno));
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700418 return false;
419 }
Elliott Hughes650be4e2013-03-05 18:47:58 -0800420 }
421
422 // if the segment is writable, and does not end on a page boundary,
423 // zero-fill it until the page limit.
424 if ((phdr->p_flags & PF_W) != 0 && PAGE_OFFSET(seg_file_end) > 0) {
Elliott Hughesfaf05ba2014-02-11 16:59:37 -0800425 memset(reinterpret_cast<void*>(seg_file_end), 0, PAGE_SIZE - PAGE_OFFSET(seg_file_end));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800426 }
427
428 seg_file_end = PAGE_END(seg_file_end);
429
430 // seg_file_end is now the first page address after the file
431 // content. If seg_end is larger, we need to zero anything
432 // between them. This is done by using a private anonymous
433 // map for all extra pages.
434 if (seg_page_end > seg_file_end) {
Elliott Hughesfaf05ba2014-02-11 16:59:37 -0800435 void* zeromap = mmap(reinterpret_cast<void*>(seg_file_end),
Elliott Hughes650be4e2013-03-05 18:47:58 -0800436 seg_page_end - seg_file_end,
437 PFLAGS_TO_PROT(phdr->p_flags),
438 MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE,
439 -1,
440 0);
441 if (zeromap == MAP_FAILED) {
442 DL_ERR("couldn't zero fill \"%s\" gap: %s", name_, strerror(errno));
443 return false;
444 }
445 }
446 }
447 return true;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200448}
449
Elliott Hughes105bc262012-08-15 16:56:00 -0700450/* Used internally. Used to set the protection bits of all loaded segments
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200451 * with optional extra flags (i.e. really PROT_WRITE). Used by
452 * phdr_table_protect_segments and phdr_table_unprotect_segments.
453 */
Elliott Hughes0266ae52014-02-10 17:46:57 -0800454static int _phdr_table_set_load_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
455 ElfW(Addr) load_bias, int extra_prot_flags) {
456 const ElfW(Phdr)* phdr = phdr_table;
457 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200458
Elliott Hughes0266ae52014-02-10 17:46:57 -0800459 for (; phdr < phdr_limit; phdr++) {
460 if (phdr->p_type != PT_LOAD || (phdr->p_flags & PF_W) != 0) {
461 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200462 }
Elliott Hughes0266ae52014-02-10 17:46:57 -0800463
464 ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
465 ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
466
Elliott Hughesfaf05ba2014-02-11 16:59:37 -0800467 int ret = mprotect(reinterpret_cast<void*>(seg_page_start),
Elliott Hughes0266ae52014-02-10 17:46:57 -0800468 seg_page_end - seg_page_start,
469 PFLAGS_TO_PROT(phdr->p_flags) | extra_prot_flags);
470 if (ret < 0) {
471 return -1;
472 }
473 }
474 return 0;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200475}
476
477/* Restore the original protection modes for all loadable segments.
478 * You should only call this after phdr_table_unprotect_segments and
479 * applying all relocations.
480 *
481 * Input:
482 * phdr_table -> program header table
Elliott Hughes105bc262012-08-15 16:56:00 -0700483 * phdr_count -> number of entries in tables
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200484 * load_bias -> load bias
485 * Return:
486 * 0 on error, -1 on failure (error code in errno).
487 */
Elliott Hughes0266ae52014-02-10 17:46:57 -0800488int phdr_table_protect_segments(const ElfW(Phdr)* phdr_table, size_t phdr_count, ElfW(Addr) load_bias) {
489 return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, 0);
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200490}
491
492/* Change the protection of all loaded segments in memory to writable.
493 * This is useful before performing relocations. Once completed, you
494 * will have to call phdr_table_protect_segments to restore the original
495 * protection flags on all segments.
496 *
497 * Note that some writable segments can also have their content turned
498 * to read-only by calling phdr_table_protect_gnu_relro. This is no
499 * performed here.
500 *
501 * Input:
502 * phdr_table -> program header table
Elliott Hughes105bc262012-08-15 16:56:00 -0700503 * phdr_count -> number of entries in tables
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200504 * load_bias -> load bias
505 * Return:
506 * 0 on error, -1 on failure (error code in errno).
507 */
Elliott Hughes0266ae52014-02-10 17:46:57 -0800508int phdr_table_unprotect_segments(const ElfW(Phdr)* phdr_table, size_t phdr_count, ElfW(Addr) load_bias) {
509 return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, PROT_WRITE);
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200510}
511
512/* Used internally by phdr_table_protect_gnu_relro and
513 * phdr_table_unprotect_gnu_relro.
514 */
Elliott Hughes0266ae52014-02-10 17:46:57 -0800515static int _phdr_table_set_gnu_relro_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
516 ElfW(Addr) load_bias, int prot_flags) {
517 const ElfW(Phdr)* phdr = phdr_table;
518 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200519
Elliott Hughes0266ae52014-02-10 17:46:57 -0800520 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
521 if (phdr->p_type != PT_GNU_RELRO) {
522 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200523 }
Elliott Hughes0266ae52014-02-10 17:46:57 -0800524
525 // Tricky: what happens when the relro segment does not start
526 // or end at page boundaries? We're going to be over-protective
527 // here and put every page touched by the segment as read-only.
528
529 // This seems to match Ian Lance Taylor's description of the
530 // feature at http://www.airs.com/blog/archives/189.
531
532 // Extract:
533 // Note that the current dynamic linker code will only work
534 // correctly if the PT_GNU_RELRO segment starts on a page
535 // boundary. This is because the dynamic linker rounds the
536 // p_vaddr field down to the previous page boundary. If
537 // there is anything on the page which should not be read-only,
538 // the program is likely to fail at runtime. So in effect the
539 // linker must only emit a PT_GNU_RELRO segment if it ensures
540 // that it starts on a page boundary.
541 ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
542 ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
543
Elliott Hughesfaf05ba2014-02-11 16:59:37 -0800544 int ret = mprotect(reinterpret_cast<void*>(seg_page_start),
Elliott Hughes0266ae52014-02-10 17:46:57 -0800545 seg_page_end - seg_page_start,
546 prot_flags);
547 if (ret < 0) {
548 return -1;
549 }
550 }
551 return 0;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200552}
553
554/* Apply GNU relro protection if specified by the program header. This will
555 * turn some of the pages of a writable PT_LOAD segment to read-only, as
556 * specified by one or more PT_GNU_RELRO segments. This must be always
557 * performed after relocations.
558 *
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +0200559 * The areas typically covered are .got and .data.rel.ro, these are
560 * read-only from the program's POV, but contain absolute addresses
561 * that need to be relocated before use.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200562 *
563 * Input:
564 * phdr_table -> program header table
Elliott Hughes105bc262012-08-15 16:56:00 -0700565 * phdr_count -> number of entries in tables
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200566 * load_bias -> load bias
567 * Return:
568 * 0 on error, -1 on failure (error code in errno).
569 */
Elliott Hughes0266ae52014-02-10 17:46:57 -0800570int phdr_table_protect_gnu_relro(const ElfW(Phdr)* phdr_table, size_t phdr_count, ElfW(Addr) load_bias) {
571 return _phdr_table_set_gnu_relro_prot(phdr_table, phdr_count, load_bias, PROT_READ);
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200572}
573
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000574/* Serialize the GNU relro segments to the given file descriptor. This can be
575 * performed after relocations to allow another process to later share the
576 * relocated segment, if it was loaded at the same address.
577 *
578 * Input:
579 * phdr_table -> program header table
580 * phdr_count -> number of entries in tables
581 * load_bias -> load bias
582 * fd -> writable file descriptor to use
583 * Return:
584 * 0 on error, -1 on failure (error code in errno).
585 */
586int phdr_table_serialize_gnu_relro(const ElfW(Phdr)* phdr_table, size_t phdr_count, ElfW(Addr) load_bias,
587 int fd) {
588 const ElfW(Phdr)* phdr = phdr_table;
589 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
590 ssize_t file_offset = 0;
591
592 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
593 if (phdr->p_type != PT_GNU_RELRO) {
594 continue;
595 }
596
597 ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
598 ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
599 ssize_t size = seg_page_end - seg_page_start;
600
601 ssize_t written = TEMP_FAILURE_RETRY(write(fd, reinterpret_cast<void*>(seg_page_start), size));
602 if (written != size) {
603 return -1;
604 }
605 void* map = mmap(reinterpret_cast<void*>(seg_page_start), size, PROT_READ,
606 MAP_PRIVATE|MAP_FIXED, fd, file_offset);
607 if (map == MAP_FAILED) {
608 return -1;
609 }
610 file_offset += size;
611 }
612 return 0;
613}
614
615/* Where possible, replace the GNU relro segments with mappings of the given
616 * file descriptor. This can be performed after relocations to allow a file
617 * previously created by phdr_table_serialize_gnu_relro in another process to
618 * replace the dirty relocated pages, saving memory, if it was loaded at the
619 * same address. We have to compare the data before we map over it, since some
620 * parts of the relro segment may not be identical due to other libraries in
621 * the process being loaded at different addresses.
622 *
623 * Input:
624 * phdr_table -> program header table
625 * phdr_count -> number of entries in tables
626 * load_bias -> load bias
627 * fd -> readable file descriptor to use
628 * Return:
629 * 0 on error, -1 on failure (error code in errno).
630 */
631int phdr_table_map_gnu_relro(const ElfW(Phdr)* phdr_table, size_t phdr_count, ElfW(Addr) load_bias,
632 int fd) {
633 // Map the file at a temporary location so we can compare its contents.
634 struct stat file_stat;
635 if (TEMP_FAILURE_RETRY(fstat(fd, &file_stat)) != 0) {
636 return -1;
637 }
638 off_t file_size = file_stat.st_size;
Dmitriy Ivanovcfad7ae2014-08-29 12:02:36 -0700639 void* temp_mapping = nullptr;
Torne (Richard Coles)26ec9672014-04-30 15:48:40 +0100640 if (file_size > 0) {
Dmitriy Ivanovcfad7ae2014-08-29 12:02:36 -0700641 temp_mapping = mmap(nullptr, file_size, PROT_READ, MAP_PRIVATE, fd, 0);
Torne (Richard Coles)26ec9672014-04-30 15:48:40 +0100642 if (temp_mapping == MAP_FAILED) {
643 return -1;
644 }
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000645 }
646 size_t file_offset = 0;
647
648 // Iterate over the relro segments and compare/remap the pages.
649 const ElfW(Phdr)* phdr = phdr_table;
650 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
651
652 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
653 if (phdr->p_type != PT_GNU_RELRO) {
654 continue;
655 }
656
657 ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
658 ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
659
660 char* file_base = static_cast<char*>(temp_mapping) + file_offset;
661 char* mem_base = reinterpret_cast<char*>(seg_page_start);
662 size_t match_offset = 0;
663 size_t size = seg_page_end - seg_page_start;
664
Torne (Richard Coles)26ec9672014-04-30 15:48:40 +0100665 if (file_size - file_offset < size) {
666 // File is too short to compare to this segment. The contents are likely
667 // different as well (it's probably for a different library version) so
668 // just don't bother checking.
669 break;
670 }
671
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000672 while (match_offset < size) {
673 // Skip over dissimilar pages.
674 while (match_offset < size &&
675 memcmp(mem_base + match_offset, file_base + match_offset, PAGE_SIZE) != 0) {
676 match_offset += PAGE_SIZE;
677 }
678
679 // Count similar pages.
680 size_t mismatch_offset = match_offset;
681 while (mismatch_offset < size &&
682 memcmp(mem_base + mismatch_offset, file_base + mismatch_offset, PAGE_SIZE) == 0) {
683 mismatch_offset += PAGE_SIZE;
684 }
685
686 // Map over similar pages.
687 if (mismatch_offset > match_offset) {
688 void* map = mmap(mem_base + match_offset, mismatch_offset - match_offset,
689 PROT_READ, MAP_PRIVATE|MAP_FIXED, fd, match_offset);
690 if (map == MAP_FAILED) {
691 munmap(temp_mapping, file_size);
692 return -1;
693 }
694 }
695
696 match_offset = mismatch_offset;
697 }
698
699 // Add to the base file offset in case there are multiple relro segments.
700 file_offset += size;
701 }
702 munmap(temp_mapping, file_size);
703 return 0;
704}
705
706
Elliott Hughes4eeb1f12013-10-25 17:38:02 -0700707#if defined(__arm__)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200708
709# ifndef PT_ARM_EXIDX
710# define PT_ARM_EXIDX 0x70000001 /* .ARM.exidx segment */
711# endif
712
713/* Return the address and size of the .ARM.exidx section in memory,
714 * if present.
715 *
716 * Input:
717 * phdr_table -> program header table
Elliott Hughes105bc262012-08-15 16:56:00 -0700718 * phdr_count -> number of entries in tables
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200719 * load_bias -> load bias
720 * Output:
Dmitriy Ivanovcfad7ae2014-08-29 12:02:36 -0700721 * arm_exidx -> address of table in memory (null on failure).
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200722 * arm_exidx_count -> number of items in table (0 on failure).
723 * Return:
724 * 0 on error, -1 on failure (_no_ error code in errno)
725 */
Elliott Hughes0266ae52014-02-10 17:46:57 -0800726int phdr_table_get_arm_exidx(const ElfW(Phdr)* phdr_table, size_t phdr_count,
727 ElfW(Addr) load_bias,
728 ElfW(Addr)** arm_exidx, unsigned* arm_exidx_count) {
729 const ElfW(Phdr)* phdr = phdr_table;
730 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200731
Elliott Hughes0266ae52014-02-10 17:46:57 -0800732 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
733 if (phdr->p_type != PT_ARM_EXIDX) {
734 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200735 }
Elliott Hughes0266ae52014-02-10 17:46:57 -0800736
737 *arm_exidx = reinterpret_cast<ElfW(Addr)*>(load_bias + phdr->p_vaddr);
738 *arm_exidx_count = (unsigned)(phdr->p_memsz / 8);
739 return 0;
740 }
Dmitriy Ivanovcfad7ae2014-08-29 12:02:36 -0700741 *arm_exidx = nullptr;
Elliott Hughes0266ae52014-02-10 17:46:57 -0800742 *arm_exidx_count = 0;
743 return -1;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200744}
Elliott Hughes4eeb1f12013-10-25 17:38:02 -0700745#endif
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200746
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +0200747/* Return the address and size of the ELF file's .dynamic section in memory,
Dmitriy Ivanovcfad7ae2014-08-29 12:02:36 -0700748 * or null if missing.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200749 *
750 * Input:
751 * phdr_table -> program header table
Elliott Hughes105bc262012-08-15 16:56:00 -0700752 * phdr_count -> number of entries in tables
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200753 * load_bias -> load bias
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +0200754 * Output:
Dmitriy Ivanovcfad7ae2014-08-29 12:02:36 -0700755 * dynamic -> address of table in memory (null on failure).
Chris Dearmancf239052013-01-11 15:32:20 -0800756 * dynamic_flags -> protection flags for section (unset on failure)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200757 * Return:
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +0200758 * void
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200759 */
Elliott Hughes0266ae52014-02-10 17:46:57 -0800760void phdr_table_get_dynamic_section(const ElfW(Phdr)* phdr_table, size_t phdr_count,
Ningsheng Jian04f5f412014-09-16 15:22:10 +0800761 ElfW(Addr) load_bias, ElfW(Dyn)** dynamic,
762 ElfW(Word)* dynamic_flags) {
Dmitriy Ivanovc0133a72014-09-05 14:57:59 -0700763 *dynamic = nullptr;
Dmitriy Ivanovae69a952014-09-05 16:42:53 -0700764 for (const ElfW(Phdr)* phdr = phdr_table, *phdr_limit = phdr + phdr_count; phdr < phdr_limit; phdr++) {
765 if (phdr->p_type == PT_DYNAMIC) {
766 *dynamic = reinterpret_cast<ElfW(Dyn)*>(load_bias + phdr->p_vaddr);
Ningsheng Jian04f5f412014-09-16 15:22:10 +0800767 if (dynamic_flags) {
768 *dynamic_flags = phdr->p_flags;
769 }
Dmitriy Ivanovae69a952014-09-05 16:42:53 -0700770 return;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200771 }
Elliott Hughes0266ae52014-02-10 17:46:57 -0800772 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200773}
774
Elliott Hughes650be4e2013-03-05 18:47:58 -0800775// Returns the address of the program header table as it appears in the loaded
776// segments in memory. This is in contrast with 'phdr_table_' which
777// is temporary and will be released before the library is relocated.
778bool ElfReader::FindPhdr() {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800779 const ElfW(Phdr)* phdr_limit = phdr_table_ + phdr_num_;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200780
Elliott Hughes650be4e2013-03-05 18:47:58 -0800781 // If there is a PT_PHDR, use it directly.
Elliott Hughes0266ae52014-02-10 17:46:57 -0800782 for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
Elliott Hughes650be4e2013-03-05 18:47:58 -0800783 if (phdr->p_type == PT_PHDR) {
784 return CheckPhdr(load_bias_ + phdr->p_vaddr);
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200785 }
Elliott Hughes650be4e2013-03-05 18:47:58 -0800786 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200787
Elliott Hughes650be4e2013-03-05 18:47:58 -0800788 // Otherwise, check the first loadable segment. If its file offset
789 // is 0, it starts with the ELF header, and we can trivially find the
790 // loaded program header from it.
Elliott Hughes0266ae52014-02-10 17:46:57 -0800791 for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
Elliott Hughes650be4e2013-03-05 18:47:58 -0800792 if (phdr->p_type == PT_LOAD) {
793 if (phdr->p_offset == 0) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800794 ElfW(Addr) elf_addr = load_bias_ + phdr->p_vaddr;
Elliott Hughesfaf05ba2014-02-11 16:59:37 -0800795 const ElfW(Ehdr)* ehdr = reinterpret_cast<const ElfW(Ehdr)*>(elf_addr);
Elliott Hughes0266ae52014-02-10 17:46:57 -0800796 ElfW(Addr) offset = ehdr->e_phoff;
797 return CheckPhdr((ElfW(Addr))ehdr + offset);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800798 }
799 break;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200800 }
Elliott Hughes650be4e2013-03-05 18:47:58 -0800801 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200802
Elliott Hughes650be4e2013-03-05 18:47:58 -0800803 DL_ERR("can't find loaded phdr for \"%s\"", name_);
804 return false;
805}
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200806
Elliott Hughes650be4e2013-03-05 18:47:58 -0800807// Ensures that our program header is actually within a loadable
808// segment. This should help catch badly-formed ELF files that
809// would cause the linker to crash later when trying to access it.
Elliott Hughes0266ae52014-02-10 17:46:57 -0800810bool ElfReader::CheckPhdr(ElfW(Addr) loaded) {
811 const ElfW(Phdr)* phdr_limit = phdr_table_ + phdr_num_;
812 ElfW(Addr) loaded_end = loaded + (phdr_num_ * sizeof(ElfW(Phdr)));
813 for (ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
Elliott Hughes650be4e2013-03-05 18:47:58 -0800814 if (phdr->p_type != PT_LOAD) {
815 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200816 }
Elliott Hughes0266ae52014-02-10 17:46:57 -0800817 ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
818 ElfW(Addr) seg_end = phdr->p_filesz + seg_start;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800819 if (seg_start <= loaded && loaded_end <= seg_end) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800820 loaded_phdr_ = reinterpret_cast<const ElfW(Phdr)*>(loaded);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800821 return true;
822 }
823 }
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700824 DL_ERR("\"%s\" loaded phdr %p not in loadable segment", name_, reinterpret_cast<void*>(loaded));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800825 return false;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200826}