blob: 5ef30fcc02b6316f23c11c12a7a422c0bb538336 [file] [log] [blame]
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001/*
2 * Copyright (C) 2012 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
Elliott Hughes650be4e2013-03-05 18:47:58 -080029#include "linker_phdr.h"
30
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020031#include <errno.h>
Elliott Hughes05fc1d72015-01-28 18:02:33 -080032#include <string.h>
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020033#include <sys/mman.h>
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +000034#include <sys/types.h>
35#include <sys/stat.h>
36#include <unistd.h>
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020037
Elliott Hughes650be4e2013-03-05 18:47:58 -080038#include "linker.h"
39#include "linker_debug.h"
Dmitriy Ivanov3c524812015-11-20 17:28:12 -080040#include "linker_utils.h"
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020041
Dimitry Ivanov1b84afc2016-01-27 18:12:03 -080042#include "private/bionic_prctl.h"
43
Elliott Hughesb5140262014-12-02 16:16:29 -080044static int GetTargetElfMachine() {
45#if defined(__arm__)
46 return EM_ARM;
47#elif defined(__aarch64__)
48 return EM_AARCH64;
49#elif defined(__i386__)
50 return EM_386;
51#elif defined(__mips__)
52 return EM_MIPS;
53#elif defined(__x86_64__)
54 return EM_X86_64;
55#endif
56}
57
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020058/**
59 TECHNICAL NOTE ON ELF LOADING.
60
61 An ELF file's program header table contains one or more PT_LOAD
62 segments, which corresponds to portions of the file that need to
63 be mapped into the process' address space.
64
65 Each loadable segment has the following important properties:
66
67 p_offset -> segment file offset
68 p_filesz -> segment file size
69 p_memsz -> segment memory size (always >= p_filesz)
70 p_vaddr -> segment's virtual address
71 p_flags -> segment flags (e.g. readable, writable, executable)
72
Elliott Hughes0266ae52014-02-10 17:46:57 -080073 We will ignore the p_paddr and p_align fields of ElfW(Phdr) for now.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020074
75 The loadable segments can be seen as a list of [p_vaddr ... p_vaddr+p_memsz)
76 ranges of virtual addresses. A few rules apply:
77
78 - the virtual address ranges should not overlap.
79
80 - if a segment's p_filesz is smaller than its p_memsz, the extra bytes
81 between them should always be initialized to 0.
82
83 - ranges do not necessarily start or end at page boundaries. Two distinct
84 segments can have their start and end on the same page. In this case, the
85 page inherits the mapping flags of the latter segment.
86
87 Finally, the real load addrs of each segment is not p_vaddr. Instead the
88 loader decides where to load the first segment, then will load all others
89 relative to the first one to respect the initial range layout.
90
91 For example, consider the following list:
92
93 [ offset:0, filesz:0x4000, memsz:0x4000, vaddr:0x30000 ],
94 [ offset:0x4000, filesz:0x2000, memsz:0x8000, vaddr:0x40000 ],
95
96 This corresponds to two segments that cover these virtual address ranges:
97
98 0x30000...0x34000
99 0x40000...0x48000
100
101 If the loader decides to load the first segment at address 0xa0000000
102 then the segments' load address ranges will be:
103
104 0xa0030000...0xa0034000
105 0xa0040000...0xa0048000
106
107 In other words, all segments must be loaded at an address that has the same
108 constant offset from their p_vaddr value. This offset is computed as the
109 difference between the first segment's load address, and its p_vaddr value.
110
111 However, in practice, segments do _not_ start at page boundaries. Since we
112 can only memory-map at page boundaries, this means that the bias is
113 computed as:
114
115 load_bias = phdr0_load_address - PAGE_START(phdr0->p_vaddr)
116
117 (NOTE: The value must be used as a 32-bit unsigned integer, to deal with
118 possible wrap around UINT32_MAX for possible large p_vaddr values).
119
120 And that the phdr0_load_address must start at a page boundary, with
121 the segment's real content starting at:
122
123 phdr0_load_address + PAGE_OFFSET(phdr0->p_vaddr)
124
125 Note that ELF requires the following condition to make the mmap()-ing work:
126
127 PAGE_OFFSET(phdr0->p_vaddr) == PAGE_OFFSET(phdr0->p_offset)
128
129 The load_bias must be added to any p_vaddr value read from the ELF file to
130 determine the corresponding memory address.
131
132 **/
133
Elliott Hughesfaf05ba2014-02-11 16:59:37 -0800134#define MAYBE_MAP_FLAG(x, from, to) (((x) & (from)) ? (to) : 0)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200135#define PFLAGS_TO_PROT(x) (MAYBE_MAP_FLAG((x), PF_X, PROT_EXEC) | \
136 MAYBE_MAP_FLAG((x), PF_R, PROT_READ) | \
137 MAYBE_MAP_FLAG((x), PF_W, PROT_WRITE))
138
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700139ElfReader::ElfReader()
140 : did_read_(false), did_load_(false), fd_(-1), file_offset_(0), file_size_(0), phdr_num_(0),
141 phdr_table_(nullptr), shdr_table_(nullptr), shdr_num_(0), dynamic_(nullptr), strtab_(nullptr),
Pawit Pornkitprasan79b8d032012-11-23 12:27:25 +0700142#ifdef ENABLE_PRELINK_SUPPORT
143 strtab_size_(0), load_start_(nullptr), load_size_(0), load_bias_(0), required_base_(0), loaded_phdr_(nullptr),
144#else
Dimitry Ivanovf45b0e92016-01-15 11:13:35 -0800145 strtab_size_(0), load_start_(nullptr), load_size_(0), load_bias_(0), loaded_phdr_(nullptr),
Pawit Pornkitprasan79b8d032012-11-23 12:27:25 +0700146#endif
Dimitry Ivanovf45b0e92016-01-15 11:13:35 -0800147 mapped_by_caller_(false) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700148}
149
150bool ElfReader::Read(const char* name, int fd, off64_t file_offset, off64_t file_size) {
151 CHECK(!did_read_);
152 CHECK(!did_load_);
153 name_ = name;
154 fd_ = fd;
155 file_offset_ = file_offset;
156 file_size_ = file_size;
157
158 if (ReadElfHeader() &&
159 VerifyElfHeader() &&
160 ReadProgramHeaders() &&
161 ReadSectionHeaders() &&
162 ReadDynamicSection()) {
163 did_read_ = true;
164 }
165
166 return did_read_;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200167}
168
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000169bool ElfReader::Load(const android_dlextinfo* extinfo) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700170 CHECK(did_read_);
171 CHECK(!did_load_);
172 if (ReserveAddressSpace(extinfo) &&
173 LoadSegments() &&
174 FindPhdr()) {
175 did_load_ = true;
176 }
177
178 return did_load_;
179}
180
181const char* ElfReader::get_string(ElfW(Word) index) const {
182 CHECK(strtab_ != nullptr);
183 CHECK(index < strtab_size_);
184
185 return strtab_ + index;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800186}
187
188bool ElfReader::ReadElfHeader() {
Dmitriy Ivanova6c12792014-10-21 12:09:18 -0700189 ssize_t rc = TEMP_FAILURE_RETRY(pread64(fd_, &header_, sizeof(header_), file_offset_));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800190 if (rc < 0) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700191 DL_ERR("can't read file \"%s\": %s", name_.c_str(), strerror(errno));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800192 return false;
193 }
Dmitriy Ivanova6c12792014-10-21 12:09:18 -0700194
Elliott Hughes650be4e2013-03-05 18:47:58 -0800195 if (rc != sizeof(header_)) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700196 DL_ERR("\"%s\" is too small to be an ELF executable: only found %zd bytes", name_.c_str(),
Elliott Hughesc6200592013-09-30 18:43:46 -0700197 static_cast<size_t>(rc));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800198 return false;
199 }
200 return true;
201}
202
203bool ElfReader::VerifyElfHeader() {
Elliott Hughes625993d2014-07-15 16:53:13 -0700204 if (memcmp(header_.e_ident, ELFMAG, SELFMAG) != 0) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700205 DL_ERR("\"%s\" has bad ELF magic", name_.c_str());
Elliott Hughes650be4e2013-03-05 18:47:58 -0800206 return false;
207 }
208
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700209 // Try to give a clear diagnostic for ELF class mismatches, since they're
210 // an easy mistake to make during the 32-bit/64-bit transition period.
211 int elf_class = header_.e_ident[EI_CLASS];
212#if defined(__LP64__)
213 if (elf_class != ELFCLASS64) {
214 if (elf_class == ELFCLASS32) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700215 DL_ERR("\"%s\" is 32-bit instead of 64-bit", name_.c_str());
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700216 } else {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700217 DL_ERR("\"%s\" has unknown ELF class: %d", name_.c_str(), elf_class);
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700218 }
Elliott Hughes650be4e2013-03-05 18:47:58 -0800219 return false;
220 }
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700221#else
222 if (elf_class != ELFCLASS32) {
223 if (elf_class == ELFCLASS64) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700224 DL_ERR("\"%s\" is 64-bit instead of 32-bit", name_.c_str());
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700225 } else {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700226 DL_ERR("\"%s\" has unknown ELF class: %d", name_.c_str(), elf_class);
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700227 }
228 return false;
229 }
230#endif
231
Elliott Hughes650be4e2013-03-05 18:47:58 -0800232 if (header_.e_ident[EI_DATA] != ELFDATA2LSB) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700233 DL_ERR("\"%s\" not little-endian: %d", name_.c_str(), header_.e_ident[EI_DATA]);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800234 return false;
235 }
236
237 if (header_.e_type != ET_DYN) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700238 DL_ERR("\"%s\" has unexpected e_type: %d", name_.c_str(), header_.e_type);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800239 return false;
240 }
241
242 if (header_.e_version != EV_CURRENT) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700243 DL_ERR("\"%s\" has unexpected e_version: %d", name_.c_str(), header_.e_version);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800244 return false;
245 }
246
Elliott Hughesb5140262014-12-02 16:16:29 -0800247 if (header_.e_machine != GetTargetElfMachine()) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700248 DL_ERR("\"%s\" has unexpected e_machine: %d", name_.c_str(), header_.e_machine);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800249 return false;
250 }
251
252 return true;
253}
254
Dimitry Ivanov7e2d49a2016-08-08 17:12:18 -0700255bool ElfReader::CheckFileRange(ElfW(Addr) offset, size_t size, size_t alignment) {
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800256 off64_t range_start;
257 off64_t range_end;
258
259 return safe_add(&range_start, file_offset_, offset) &&
260 safe_add(&range_end, range_start, size) &&
Dimitry Ivanov7e2d49a2016-08-08 17:12:18 -0700261 (range_start < file_size_) &&
262 (range_end <= file_size_) &&
263 ((offset % alignment) == 0);
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800264}
265
Elliott Hughes650be4e2013-03-05 18:47:58 -0800266// Loads the program header table from an ELF file into a read-only private
267// anonymous mmap-ed block.
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700268bool ElfReader::ReadProgramHeaders() {
Elliott Hughes650be4e2013-03-05 18:47:58 -0800269 phdr_num_ = header_.e_phnum;
270
271 // Like the kernel, we only accept program header tables that
272 // are smaller than 64KiB.
Elliott Hughes0266ae52014-02-10 17:46:57 -0800273 if (phdr_num_ < 1 || phdr_num_ > 65536/sizeof(ElfW(Phdr))) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700274 DL_ERR("\"%s\" has invalid e_phnum: %zd", name_.c_str(), phdr_num_);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800275 return false;
276 }
277
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800278 // Boundary checks
279 size_t size = phdr_num_ * sizeof(ElfW(Phdr));
Dimitry Ivanov7e2d49a2016-08-08 17:12:18 -0700280 if (!CheckFileRange(header_.e_phoff, size, alignof(ElfW(Phdr)))) {
281 DL_ERR_AND_LOG("\"%s\" has invalid phdr offset/size: %zu/%zu",
282 name_.c_str(),
283 static_cast<size_t>(header_.e_phoff),
284 size);
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800285 return false;
286 }
287
288 if (!phdr_fragment_.Map(fd_, file_offset_, header_.e_phoff, size)) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700289 DL_ERR("\"%s\" phdr mmap failed: %s", name_.c_str(), strerror(errno));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800290 return false;
291 }
292
Dmitriy Ivanovcf1cbbe2015-10-19 16:57:46 -0700293 phdr_table_ = static_cast<ElfW(Phdr)*>(phdr_fragment_.data());
Elliott Hughes650be4e2013-03-05 18:47:58 -0800294 return true;
295}
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200296
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700297bool ElfReader::ReadSectionHeaders() {
298 shdr_num_ = header_.e_shnum;
299
Dmitriy Ivanovb76123f2015-11-20 10:42:02 -0800300 if (shdr_num_ == 0) {
Dimitry Ivanov7e2d49a2016-08-08 17:12:18 -0700301 DL_ERR_AND_LOG("\"%s\" has no section headers", name_.c_str());
Dmitriy Ivanovb76123f2015-11-20 10:42:02 -0800302 return false;
303 }
304
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800305 size_t size = shdr_num_ * sizeof(ElfW(Shdr));
Dimitry Ivanov7e2d49a2016-08-08 17:12:18 -0700306 if (!CheckFileRange(header_.e_shoff, size, alignof(const ElfW(Shdr)))) {
307 DL_ERR_AND_LOG("\"%s\" has invalid shdr offset/size: %zu/%zu",
308 name_.c_str(),
309 static_cast<size_t>(header_.e_shoff),
310 size);
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800311 return false;
312 }
313
314 if (!shdr_fragment_.Map(fd_, file_offset_, header_.e_shoff, size)) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700315 DL_ERR("\"%s\" shdr mmap failed: %s", name_.c_str(), strerror(errno));
316 return false;
317 }
318
319 shdr_table_ = static_cast<const ElfW(Shdr)*>(shdr_fragment_.data());
320 return true;
321}
322
323bool ElfReader::ReadDynamicSection() {
324 // 1. Find .dynamic section (in section headers)
325 const ElfW(Shdr)* dynamic_shdr = nullptr;
326 for (size_t i = 0; i < shdr_num_; ++i) {
327 if (shdr_table_[i].sh_type == SHT_DYNAMIC) {
328 dynamic_shdr = &shdr_table_ [i];
329 break;
330 }
331 }
332
333 if (dynamic_shdr == nullptr) {
Dimitry Ivanov7e2d49a2016-08-08 17:12:18 -0700334 DL_ERR_AND_LOG("\"%s\" .dynamic section header was not found", name_.c_str());
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700335 return false;
336 }
337
338 if (dynamic_shdr->sh_link >= shdr_num_) {
Dimitry Ivanov7e2d49a2016-08-08 17:12:18 -0700339 DL_ERR_AND_LOG("\"%s\" .dynamic section has invalid sh_link: %d",
340 name_.c_str(),
341 dynamic_shdr->sh_link);
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700342 return false;
343 }
344
345 const ElfW(Shdr)* strtab_shdr = &shdr_table_[dynamic_shdr->sh_link];
346
347 if (strtab_shdr->sh_type != SHT_STRTAB) {
Dimitry Ivanov7e2d49a2016-08-08 17:12:18 -0700348 DL_ERR_AND_LOG("\"%s\" .dynamic section has invalid link(%d) sh_type: %d (expected SHT_STRTAB)",
349 name_.c_str(), dynamic_shdr->sh_link, strtab_shdr->sh_type);
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700350 return false;
351 }
352
Dimitry Ivanov7e2d49a2016-08-08 17:12:18 -0700353 if (!CheckFileRange(dynamic_shdr->sh_offset, dynamic_shdr->sh_size, alignof(const ElfW(Dyn)))) {
354 DL_ERR_AND_LOG("\"%s\" has invalid offset/size of .dynamic section", name_.c_str());
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800355 return false;
356 }
357
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700358 if (!dynamic_fragment_.Map(fd_, file_offset_, dynamic_shdr->sh_offset, dynamic_shdr->sh_size)) {
359 DL_ERR("\"%s\" dynamic section mmap failed: %s", name_.c_str(), strerror(errno));
360 return false;
361 }
362
363 dynamic_ = static_cast<const ElfW(Dyn)*>(dynamic_fragment_.data());
364
Dimitry Ivanov7e2d49a2016-08-08 17:12:18 -0700365 if (!CheckFileRange(strtab_shdr->sh_offset, strtab_shdr->sh_size, alignof(const char))) {
366 DL_ERR_AND_LOG("\"%s\" has invalid offset/size of the .strtab section linked from .dynamic section",
367 name_.c_str());
Dmitriy Ivanov3c524812015-11-20 17:28:12 -0800368 return false;
369 }
370
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700371 if (!strtab_fragment_.Map(fd_, file_offset_, strtab_shdr->sh_offset, strtab_shdr->sh_size)) {
372 DL_ERR("\"%s\" strtab section mmap failed: %s", name_.c_str(), strerror(errno));
373 return false;
374 }
375
376 strtab_ = static_cast<const char*>(strtab_fragment_.data());
377 strtab_size_ = strtab_fragment_.size();
378 return true;
379}
380
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800381/* Returns the size of the extent of all the possibly non-contiguous
382 * loadable segments in an ELF program header table. This corresponds
383 * to the page-aligned size in bytes that needs to be reserved in the
384 * process' address space. If there are no loadable segments, 0 is
385 * returned.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200386 *
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700387 * If out_min_vaddr or out_max_vaddr are not null, they will be
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800388 * set to the minimum and maximum addresses of pages to be reserved,
389 * or 0 if there is nothing to load.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200390 */
Elliott Hughes0266ae52014-02-10 17:46:57 -0800391size_t phdr_table_get_load_size(const ElfW(Phdr)* phdr_table, size_t phdr_count,
392 ElfW(Addr)* out_min_vaddr,
393 ElfW(Addr)* out_max_vaddr) {
394 ElfW(Addr) min_vaddr = UINTPTR_MAX;
395 ElfW(Addr) max_vaddr = 0;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200396
Elliott Hughes0266ae52014-02-10 17:46:57 -0800397 bool found_pt_load = false;
398 for (size_t i = 0; i < phdr_count; ++i) {
399 const ElfW(Phdr)* phdr = &phdr_table[i];
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200400
Elliott Hughes0266ae52014-02-10 17:46:57 -0800401 if (phdr->p_type != PT_LOAD) {
402 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200403 }
Elliott Hughes0266ae52014-02-10 17:46:57 -0800404 found_pt_load = true;
405
406 if (phdr->p_vaddr < min_vaddr) {
407 min_vaddr = phdr->p_vaddr;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200408 }
409
Elliott Hughes0266ae52014-02-10 17:46:57 -0800410 if (phdr->p_vaddr + phdr->p_memsz > max_vaddr) {
411 max_vaddr = phdr->p_vaddr + phdr->p_memsz;
412 }
413 }
414 if (!found_pt_load) {
415 min_vaddr = 0;
416 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200417
Elliott Hughes0266ae52014-02-10 17:46:57 -0800418 min_vaddr = PAGE_START(min_vaddr);
419 max_vaddr = PAGE_END(max_vaddr);
420
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700421 if (out_min_vaddr != nullptr) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800422 *out_min_vaddr = min_vaddr;
423 }
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700424 if (out_max_vaddr != nullptr) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800425 *out_max_vaddr = max_vaddr;
426 }
427 return max_vaddr - min_vaddr;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200428}
429
Pawit Pornkitprasan79b8d032012-11-23 12:27:25 +0700430#ifdef ENABLE_PRELINK_SUPPORT
431typedef struct {
432 long mmap_addr;
433 char tag[4]; /* 'P', 'R', 'E', ' ' */
434} prelink_info_t;
435
436/* Returns the requested base address if the library is prelinked,
437 * and 0 otherwise. */
438static ElfW(Addr) is_prelinked(int fd, const char *name)
439{
440 off_t sz = lseek(fd, -sizeof(prelink_info_t), SEEK_END);
441 if (sz < 0) {
442 DL_ERR("lseek() failed!");
443 return 0;
444 }
445
446 prelink_info_t info;
447 int rc = TEMP_FAILURE_RETRY(read(fd, &info, sizeof(info)));
448 if (rc != sizeof(info)) {
449 DL_ERR("Could not read prelink_info_t structure for `%s`\n", name);
450 return 0;
451 }
452
453 if (memcmp(info.tag, "PRE ", 4)) {
454 DL_ERR("`%s` is not a prelinked library\n", name);
455 return 0;
456 }
457
458 return (unsigned long)info.mmap_addr;
459}
460#endif
461
Elliott Hughes650be4e2013-03-05 18:47:58 -0800462// Reserve a virtual address range big enough to hold all loadable
463// segments of a program header table. This is done by creating a
464// private anonymous mmap() with PROT_NONE.
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000465bool ElfReader::ReserveAddressSpace(const android_dlextinfo* extinfo) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800466 ElfW(Addr) min_vaddr;
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800467 load_size_ = phdr_table_get_load_size(phdr_table_, phdr_num_, &min_vaddr);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800468 if (load_size_ == 0) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700469 DL_ERR("\"%s\" has no loadable segments", name_.c_str());
Elliott Hughes650be4e2013-03-05 18:47:58 -0800470 return false;
471 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200472
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800473 uint8_t* addr = reinterpret_cast<uint8_t*>(min_vaddr);
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000474 void* start;
475 size_t reserved_size = 0;
476 bool reserved_hint = true;
Dmitriy Ivanov126af752015-10-07 16:34:20 -0700477 bool strict_hint = false;
Dmitriy Ivanov8a116282015-06-05 22:16:23 -0700478 // Assume position independent executable by default.
Dmitriy Ivanov126af752015-10-07 16:34:20 -0700479 void* mmap_hint = nullptr;
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000480
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700481 if (extinfo != nullptr) {
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000482 if (extinfo->flags & ANDROID_DLEXT_RESERVED_ADDRESS) {
483 reserved_size = extinfo->reserved_size;
484 reserved_hint = false;
485 } else if (extinfo->flags & ANDROID_DLEXT_RESERVED_ADDRESS_HINT) {
486 reserved_size = extinfo->reserved_size;
487 }
Dmitriy Ivanov8a116282015-06-05 22:16:23 -0700488
Dmitriy Ivanov126af752015-10-07 16:34:20 -0700489 if (addr != nullptr && (extinfo->flags & ANDROID_DLEXT_FORCE_FIXED_VADDR) != 0) {
Dmitriy Ivanov8a116282015-06-05 22:16:23 -0700490 mmap_hint = addr;
Dmitriy Ivanov126af752015-10-07 16:34:20 -0700491 } else if ((extinfo->flags & ANDROID_DLEXT_LOAD_AT_FIXED_ADDRESS) != 0) {
492 mmap_hint = extinfo->reserved_addr;
493 strict_hint = true;
Dmitriy Ivanov8a116282015-06-05 22:16:23 -0700494 }
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000495 }
496
497 if (load_size_ > reserved_size) {
498 if (!reserved_hint) {
499 DL_ERR("reserved address space %zd smaller than %zd bytes needed for \"%s\"",
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700500 reserved_size - load_size_, load_size_, name_.c_str());
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000501 return false;
502 }
Pawit Pornkitprasan79b8d032012-11-23 12:27:25 +0700503#ifdef ENABLE_PRELINK_SUPPORT
504 required_base_ = is_prelinked(fd_, name_.c_str());
505#endif
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000506 int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS;
Pawit Pornkitprasan79b8d032012-11-23 12:27:25 +0700507#ifdef ENABLE_PRELINK_SUPPORT
508 if (required_base_ != 0) {
509 mmap_flags |= MAP_FIXED;
510 mmap_hint = (uint8_t*) required_base_;
511 }
512#endif
Dmitriy Ivanov8a116282015-06-05 22:16:23 -0700513 start = mmap(mmap_hint, load_size_, PROT_NONE, mmap_flags, -1, 0);
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000514 if (start == MAP_FAILED) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700515 DL_ERR("couldn't reserve %zd bytes of address space for \"%s\"", load_size_, name_.c_str());
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000516 return false;
517 }
Dmitriy Ivanov126af752015-10-07 16:34:20 -0700518 if (strict_hint && (start != mmap_hint)) {
519 munmap(start, load_size_);
520 DL_ERR("couldn't reserve %zd bytes of address space at %p for \"%s\"",
521 load_size_, mmap_hint, name_.c_str());
522 return false;
523 }
Torne (Richard Coles)12bbb912014-02-06 14:34:21 +0000524 } else {
525 start = extinfo->reserved_addr;
Dimitry Ivanovf45b0e92016-01-15 11:13:35 -0800526 mapped_by_caller_ = true;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800527 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200528
Elliott Hughes650be4e2013-03-05 18:47:58 -0800529 load_start_ = start;
Pawit Pornkitprasan79b8d032012-11-23 12:27:25 +0700530#ifdef ENABLE_PRELINK_SUPPORT
531 load_bias_ = reinterpret_cast<uint8_t*>(start) - reinterpret_cast<uint8_t*>(min_vaddr);
532#else
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800533 load_bias_ = reinterpret_cast<uint8_t*>(start) - addr;
Pawit Pornkitprasan79b8d032012-11-23 12:27:25 +0700534#endif
Elliott Hughes650be4e2013-03-05 18:47:58 -0800535 return true;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200536}
537
Elliott Hughes650be4e2013-03-05 18:47:58 -0800538bool ElfReader::LoadSegments() {
539 for (size_t i = 0; i < phdr_num_; ++i) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800540 const ElfW(Phdr)* phdr = &phdr_table_[i];
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200541
Elliott Hughes650be4e2013-03-05 18:47:58 -0800542 if (phdr->p_type != PT_LOAD) {
543 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200544 }
Elliott Hughes650be4e2013-03-05 18:47:58 -0800545
546 // Segment addresses in memory.
Elliott Hughes0266ae52014-02-10 17:46:57 -0800547 ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
548 ElfW(Addr) seg_end = seg_start + phdr->p_memsz;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800549
Elliott Hughes0266ae52014-02-10 17:46:57 -0800550 ElfW(Addr) seg_page_start = PAGE_START(seg_start);
551 ElfW(Addr) seg_page_end = PAGE_END(seg_end);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800552
Elliott Hughes0266ae52014-02-10 17:46:57 -0800553 ElfW(Addr) seg_file_end = seg_start + phdr->p_filesz;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800554
555 // File offsets.
Elliott Hughes0266ae52014-02-10 17:46:57 -0800556 ElfW(Addr) file_start = phdr->p_offset;
557 ElfW(Addr) file_end = file_start + phdr->p_filesz;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800558
Elliott Hughes0266ae52014-02-10 17:46:57 -0800559 ElfW(Addr) file_page_start = PAGE_START(file_start);
560 ElfW(Addr) file_length = file_end - file_page_start;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800561
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -0700562 if (file_size_ <= 0) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700563 DL_ERR("\"%s\" invalid file size: %" PRId64, name_.c_str(), file_size_);
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -0700564 return false;
565 }
566
skvalex93ce3542015-08-20 01:06:42 +0300567 if (file_end > static_cast<size_t>(file_size_)) {
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -0700568 DL_ERR("invalid ELF file \"%s\" load segment[%zd]:"
569 " p_offset (%p) + p_filesz (%p) ( = %p) past end of file (0x%" PRIx64 ")",
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700570 name_.c_str(), i, reinterpret_cast<void*>(phdr->p_offset),
Dmitriy Ivanov3f987f52015-06-25 15:51:41 -0700571 reinterpret_cast<void*>(phdr->p_filesz),
572 reinterpret_cast<void*>(file_end), file_size_);
573 return false;
574 }
575
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700576 if (file_length != 0) {
Dimitry Ivanova6f3b632016-08-24 14:14:58 -0700577 int prot = PFLAGS_TO_PROT(phdr->p_flags);
578 // W + E PT_LOAD segments are not allowed.
579 if ((prot & (PROT_EXEC | PROT_WRITE)) == (PROT_EXEC | PROT_WRITE)) {
580 DL_WARN("\"%s\": has W+E (writable and executable) load segments. "
581 "This is a security risk shared libraries with W+E load segments "
582 "will not be supported in a future Android release. "
583 "Please fix the library.", name_.c_str());
584 }
585
Dmitriy Ivanov07e5bc12014-10-03 17:52:44 -0700586 void* seg_addr = mmap64(reinterpret_cast<void*>(seg_page_start),
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700587 file_length,
Dimitry Ivanova6f3b632016-08-24 14:14:58 -0700588 prot,
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700589 MAP_FIXED|MAP_PRIVATE,
590 fd_,
Dmitriy Ivanov07e5bc12014-10-03 17:52:44 -0700591 file_offset_ + file_page_start);
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700592 if (seg_addr == MAP_FAILED) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700593 DL_ERR("couldn't map \"%s\" segment %zd: %s", name_.c_str(), i, strerror(errno));
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700594 return false;
595 }
Elliott Hughes650be4e2013-03-05 18:47:58 -0800596 }
597
598 // if the segment is writable, and does not end on a page boundary,
599 // zero-fill it until the page limit.
600 if ((phdr->p_flags & PF_W) != 0 && PAGE_OFFSET(seg_file_end) > 0) {
Elliott Hughesfaf05ba2014-02-11 16:59:37 -0800601 memset(reinterpret_cast<void*>(seg_file_end), 0, PAGE_SIZE - PAGE_OFFSET(seg_file_end));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800602 }
603
604 seg_file_end = PAGE_END(seg_file_end);
605
606 // seg_file_end is now the first page address after the file
607 // content. If seg_end is larger, we need to zero anything
608 // between them. This is done by using a private anonymous
609 // map for all extra pages.
610 if (seg_page_end > seg_file_end) {
Dimitry Ivanov1b84afc2016-01-27 18:12:03 -0800611 size_t zeromap_size = seg_page_end - seg_file_end;
Elliott Hughesfaf05ba2014-02-11 16:59:37 -0800612 void* zeromap = mmap(reinterpret_cast<void*>(seg_file_end),
Dimitry Ivanov1b84afc2016-01-27 18:12:03 -0800613 zeromap_size,
Elliott Hughes650be4e2013-03-05 18:47:58 -0800614 PFLAGS_TO_PROT(phdr->p_flags),
615 MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE,
616 -1,
617 0);
618 if (zeromap == MAP_FAILED) {
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -0700619 DL_ERR("couldn't zero fill \"%s\" gap: %s", name_.c_str(), strerror(errno));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800620 return false;
621 }
Dimitry Ivanov1b84afc2016-01-27 18:12:03 -0800622
623 prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, zeromap, zeromap_size, ".bss");
Elliott Hughes650be4e2013-03-05 18:47:58 -0800624 }
625 }
626 return true;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200627}
628
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +0000629/* Used internally. Used to set the protection bits of all loaded segments
630 * with optional extra flags (i.e. really PROT_WRITE). Used by
631 * phdr_table_protect_segments and phdr_table_unprotect_segments.
632 */
633static int _phdr_table_set_load_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
634 ElfW(Addr) load_bias, int extra_prot_flags) {
635 const ElfW(Phdr)* phdr = phdr_table;
636 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
637
638 for (; phdr < phdr_limit; phdr++) {
639 if (phdr->p_type != PT_LOAD || (phdr->p_flags & PF_W) != 0) {
640 continue;
641 }
642
643 ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
644 ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
645
Nick Kralevich8fdb3412015-04-01 16:57:50 -0700646 int prot = PFLAGS_TO_PROT(phdr->p_flags);
647 if ((extra_prot_flags & PROT_WRITE) != 0) {
648 // make sure we're never simultaneously writable / executable
649 prot &= ~PROT_EXEC;
650 }
651
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +0000652 int ret = mprotect(reinterpret_cast<void*>(seg_page_start),
653 seg_page_end - seg_page_start,
Nick Kralevich8fdb3412015-04-01 16:57:50 -0700654 prot | extra_prot_flags);
Dimitry Ivanov56be6ed2015-04-01 21:18:48 +0000655 if (ret < 0) {
656 return -1;
657 }
658 }
659 return 0;
660}
661
662/* Restore the original protection modes for all loadable segments.
663 * You should only call this after phdr_table_unprotect_segments and
664 * applying all relocations.
665 *
666 * Input:
667 * phdr_table -> program header table
668 * phdr_count -> number of entries in tables
669 * load_bias -> load bias
670 * Return:
671 * 0 on error, -1 on failure (error code in errno).
672 */
673int phdr_table_protect_segments(const ElfW(Phdr)* phdr_table,
674 size_t phdr_count, ElfW(Addr) load_bias) {
675 return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, 0);
676}
677
678/* Change the protection of all loaded segments in memory to writable.
679 * This is useful before performing relocations. Once completed, you
680 * will have to call phdr_table_protect_segments to restore the original
681 * protection flags on all segments.
682 *
683 * Note that some writable segments can also have their content turned
684 * to read-only by calling phdr_table_protect_gnu_relro. This is no
685 * performed here.
686 *
687 * Input:
688 * phdr_table -> program header table
689 * phdr_count -> number of entries in tables
690 * load_bias -> load bias
691 * Return:
692 * 0 on error, -1 on failure (error code in errno).
693 */
694int phdr_table_unprotect_segments(const ElfW(Phdr)* phdr_table,
695 size_t phdr_count, ElfW(Addr) load_bias) {
696 return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, PROT_WRITE);
697}
698
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200699/* Used internally by phdr_table_protect_gnu_relro and
700 * phdr_table_unprotect_gnu_relro.
701 */
Elliott Hughes0266ae52014-02-10 17:46:57 -0800702static int _phdr_table_set_gnu_relro_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
703 ElfW(Addr) load_bias, int prot_flags) {
704 const ElfW(Phdr)* phdr = phdr_table;
705 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200706
Elliott Hughes0266ae52014-02-10 17:46:57 -0800707 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
708 if (phdr->p_type != PT_GNU_RELRO) {
709 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200710 }
Elliott Hughes0266ae52014-02-10 17:46:57 -0800711
712 // Tricky: what happens when the relro segment does not start
713 // or end at page boundaries? We're going to be over-protective
714 // here and put every page touched by the segment as read-only.
715
716 // This seems to match Ian Lance Taylor's description of the
717 // feature at http://www.airs.com/blog/archives/189.
718
719 // Extract:
720 // Note that the current dynamic linker code will only work
721 // correctly if the PT_GNU_RELRO segment starts on a page
722 // boundary. This is because the dynamic linker rounds the
723 // p_vaddr field down to the previous page boundary. If
724 // there is anything on the page which should not be read-only,
725 // the program is likely to fail at runtime. So in effect the
726 // linker must only emit a PT_GNU_RELRO segment if it ensures
727 // that it starts on a page boundary.
728 ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
729 ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
730
Elliott Hughesfaf05ba2014-02-11 16:59:37 -0800731 int ret = mprotect(reinterpret_cast<void*>(seg_page_start),
Elliott Hughes0266ae52014-02-10 17:46:57 -0800732 seg_page_end - seg_page_start,
733 prot_flags);
734 if (ret < 0) {
735 return -1;
736 }
737 }
738 return 0;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200739}
740
741/* Apply GNU relro protection if specified by the program header. This will
742 * turn some of the pages of a writable PT_LOAD segment to read-only, as
743 * specified by one or more PT_GNU_RELRO segments. This must be always
744 * performed after relocations.
745 *
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +0200746 * The areas typically covered are .got and .data.rel.ro, these are
747 * read-only from the program's POV, but contain absolute addresses
748 * that need to be relocated before use.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200749 *
750 * Input:
751 * phdr_table -> program header table
Elliott Hughes105bc262012-08-15 16:56:00 -0700752 * phdr_count -> number of entries in tables
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200753 * load_bias -> load bias
754 * Return:
755 * 0 on error, -1 on failure (error code in errno).
756 */
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -0700757int phdr_table_protect_gnu_relro(const ElfW(Phdr)* phdr_table,
758 size_t phdr_count, ElfW(Addr) load_bias) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800759 return _phdr_table_set_gnu_relro_prot(phdr_table, phdr_count, load_bias, PROT_READ);
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200760}
761
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000762/* Serialize the GNU relro segments to the given file descriptor. This can be
763 * performed after relocations to allow another process to later share the
764 * relocated segment, if it was loaded at the same address.
765 *
766 * Input:
767 * phdr_table -> program header table
768 * phdr_count -> number of entries in tables
769 * load_bias -> load bias
770 * fd -> writable file descriptor to use
771 * Return:
772 * 0 on error, -1 on failure (error code in errno).
773 */
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -0700774int phdr_table_serialize_gnu_relro(const ElfW(Phdr)* phdr_table,
775 size_t phdr_count,
776 ElfW(Addr) load_bias,
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000777 int fd) {
778 const ElfW(Phdr)* phdr = phdr_table;
779 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
780 ssize_t file_offset = 0;
781
782 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
783 if (phdr->p_type != PT_GNU_RELRO) {
784 continue;
785 }
786
787 ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
788 ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
789 ssize_t size = seg_page_end - seg_page_start;
790
791 ssize_t written = TEMP_FAILURE_RETRY(write(fd, reinterpret_cast<void*>(seg_page_start), size));
792 if (written != size) {
793 return -1;
794 }
795 void* map = mmap(reinterpret_cast<void*>(seg_page_start), size, PROT_READ,
796 MAP_PRIVATE|MAP_FIXED, fd, file_offset);
797 if (map == MAP_FAILED) {
798 return -1;
799 }
800 file_offset += size;
801 }
802 return 0;
803}
804
805/* Where possible, replace the GNU relro segments with mappings of the given
806 * file descriptor. This can be performed after relocations to allow a file
807 * previously created by phdr_table_serialize_gnu_relro in another process to
808 * replace the dirty relocated pages, saving memory, if it was loaded at the
809 * same address. We have to compare the data before we map over it, since some
810 * parts of the relro segment may not be identical due to other libraries in
811 * the process being loaded at different addresses.
812 *
813 * Input:
814 * phdr_table -> program header table
815 * phdr_count -> number of entries in tables
816 * load_bias -> load bias
817 * fd -> readable file descriptor to use
818 * Return:
819 * 0 on error, -1 on failure (error code in errno).
820 */
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -0700821int phdr_table_map_gnu_relro(const ElfW(Phdr)* phdr_table,
822 size_t phdr_count,
823 ElfW(Addr) load_bias,
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000824 int fd) {
825 // Map the file at a temporary location so we can compare its contents.
826 struct stat file_stat;
827 if (TEMP_FAILURE_RETRY(fstat(fd, &file_stat)) != 0) {
828 return -1;
829 }
830 off_t file_size = file_stat.st_size;
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700831 void* temp_mapping = nullptr;
Torne (Richard Coles)26ec9672014-04-30 15:48:40 +0100832 if (file_size > 0) {
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700833 temp_mapping = mmap(nullptr, file_size, PROT_READ, MAP_PRIVATE, fd, 0);
Torne (Richard Coles)26ec9672014-04-30 15:48:40 +0100834 if (temp_mapping == MAP_FAILED) {
835 return -1;
836 }
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000837 }
838 size_t file_offset = 0;
839
840 // Iterate over the relro segments and compare/remap the pages.
841 const ElfW(Phdr)* phdr = phdr_table;
842 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
843
844 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
845 if (phdr->p_type != PT_GNU_RELRO) {
846 continue;
847 }
848
849 ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
850 ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
851
852 char* file_base = static_cast<char*>(temp_mapping) + file_offset;
853 char* mem_base = reinterpret_cast<char*>(seg_page_start);
854 size_t match_offset = 0;
855 size_t size = seg_page_end - seg_page_start;
856
Torne (Richard Coles)26ec9672014-04-30 15:48:40 +0100857 if (file_size - file_offset < size) {
858 // File is too short to compare to this segment. The contents are likely
859 // different as well (it's probably for a different library version) so
860 // just don't bother checking.
861 break;
862 }
863
Torne (Richard Coles)183ad9d2014-02-27 13:18:00 +0000864 while (match_offset < size) {
865 // Skip over dissimilar pages.
866 while (match_offset < size &&
867 memcmp(mem_base + match_offset, file_base + match_offset, PAGE_SIZE) != 0) {
868 match_offset += PAGE_SIZE;
869 }
870
871 // Count similar pages.
872 size_t mismatch_offset = match_offset;
873 while (mismatch_offset < size &&
874 memcmp(mem_base + mismatch_offset, file_base + mismatch_offset, PAGE_SIZE) == 0) {
875 mismatch_offset += PAGE_SIZE;
876 }
877
878 // Map over similar pages.
879 if (mismatch_offset > match_offset) {
880 void* map = mmap(mem_base + match_offset, mismatch_offset - match_offset,
881 PROT_READ, MAP_PRIVATE|MAP_FIXED, fd, match_offset);
882 if (map == MAP_FAILED) {
883 munmap(temp_mapping, file_size);
884 return -1;
885 }
886 }
887
888 match_offset = mismatch_offset;
889 }
890
891 // Add to the base file offset in case there are multiple relro segments.
892 file_offset += size;
893 }
894 munmap(temp_mapping, file_size);
895 return 0;
896}
897
898
Elliott Hughes4eeb1f12013-10-25 17:38:02 -0700899#if defined(__arm__)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200900
901# ifndef PT_ARM_EXIDX
902# define PT_ARM_EXIDX 0x70000001 /* .ARM.exidx segment */
903# endif
904
905/* Return the address and size of the .ARM.exidx section in memory,
906 * if present.
907 *
908 * Input:
909 * phdr_table -> program header table
Elliott Hughes105bc262012-08-15 16:56:00 -0700910 * phdr_count -> number of entries in tables
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200911 * load_bias -> load bias
912 * Output:
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700913 * arm_exidx -> address of table in memory (null on failure).
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200914 * arm_exidx_count -> number of items in table (0 on failure).
915 * Return:
916 * 0 on error, -1 on failure (_no_ error code in errno)
917 */
Elliott Hughes0266ae52014-02-10 17:46:57 -0800918int phdr_table_get_arm_exidx(const ElfW(Phdr)* phdr_table, size_t phdr_count,
919 ElfW(Addr) load_bias,
Dmitriy Ivanov1649e7e2015-01-22 16:04:25 -0800920 ElfW(Addr)** arm_exidx, size_t* arm_exidx_count) {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800921 const ElfW(Phdr)* phdr = phdr_table;
922 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200923
Elliott Hughes0266ae52014-02-10 17:46:57 -0800924 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
925 if (phdr->p_type != PT_ARM_EXIDX) {
926 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200927 }
Elliott Hughes0266ae52014-02-10 17:46:57 -0800928
929 *arm_exidx = reinterpret_cast<ElfW(Addr)*>(load_bias + phdr->p_vaddr);
Dmitriy Ivanov1649e7e2015-01-22 16:04:25 -0800930 *arm_exidx_count = phdr->p_memsz / 8;
Elliott Hughes0266ae52014-02-10 17:46:57 -0800931 return 0;
932 }
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700933 *arm_exidx = nullptr;
Elliott Hughes0266ae52014-02-10 17:46:57 -0800934 *arm_exidx_count = 0;
935 return -1;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200936}
Elliott Hughes4eeb1f12013-10-25 17:38:02 -0700937#endif
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200938
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +0200939/* Return the address and size of the ELF file's .dynamic section in memory,
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700940 * or null if missing.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200941 *
942 * Input:
943 * phdr_table -> program header table
Elliott Hughes105bc262012-08-15 16:56:00 -0700944 * phdr_count -> number of entries in tables
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200945 * load_bias -> load bias
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +0200946 * Output:
Dmitriy Ivanov851135b2014-08-29 12:02:36 -0700947 * dynamic -> address of table in memory (null on failure).
Ningsheng Jiane93be992014-09-16 15:22:10 +0800948 * dynamic_flags -> protection flags for section (unset on failure)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200949 * Return:
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +0200950 * void
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200951 */
Elliott Hughes0266ae52014-02-10 17:46:57 -0800952void phdr_table_get_dynamic_section(const ElfW(Phdr)* phdr_table, size_t phdr_count,
Ningsheng Jiane93be992014-09-16 15:22:10 +0800953 ElfW(Addr) load_bias, ElfW(Dyn)** dynamic,
954 ElfW(Word)* dynamic_flags) {
Dmitriy Ivanov498eb182014-09-05 14:57:59 -0700955 *dynamic = nullptr;
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -0700956 for (size_t i = 0; i<phdr_count; ++i) {
957 const ElfW(Phdr)& phdr = phdr_table[i];
958 if (phdr.p_type == PT_DYNAMIC) {
959 *dynamic = reinterpret_cast<ElfW(Dyn)*>(load_bias + phdr.p_vaddr);
Ningsheng Jiane93be992014-09-16 15:22:10 +0800960 if (dynamic_flags) {
Dmitriy Ivanov20d89cb2015-03-30 18:43:38 -0700961 *dynamic_flags = phdr.p_flags;
Ningsheng Jiane93be992014-09-16 15:22:10 +0800962 }
Dmitriy Ivanov14669a92014-09-05 16:42:53 -0700963 return;
964 }
Elliott Hughes0266ae52014-02-10 17:46:57 -0800965 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200966}
967
Evgenii Stepanovd640b222015-07-10 17:54:01 -0700968/* Return the program interpreter string, or nullptr if missing.
969 *
970 * Input:
971 * phdr_table -> program header table
972 * phdr_count -> number of entries in tables
973 * load_bias -> load bias
974 * Return:
975 * pointer to the program interpreter string.
976 */
977const char* phdr_table_get_interpreter_name(const ElfW(Phdr) * phdr_table, size_t phdr_count,
978 ElfW(Addr) load_bias) {
979 for (size_t i = 0; i<phdr_count; ++i) {
980 const ElfW(Phdr)& phdr = phdr_table[i];
981 if (phdr.p_type == PT_INTERP) {
982 return reinterpret_cast<const char*>(load_bias + phdr.p_vaddr);
983 }
984 }
985 return nullptr;
986}
987
Robert Grosse4544d9f2014-10-15 14:32:19 -0700988// Sets loaded_phdr_ to the address of the program header table as it appears
989// in the loaded segments in memory. This is in contrast with phdr_table_,
990// which is temporary and will be released before the library is relocated.
Elliott Hughes650be4e2013-03-05 18:47:58 -0800991bool ElfReader::FindPhdr() {
Elliott Hughes0266ae52014-02-10 17:46:57 -0800992 const ElfW(Phdr)* phdr_limit = phdr_table_ + phdr_num_;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200993
Elliott Hughes650be4e2013-03-05 18:47:58 -0800994 // If there is a PT_PHDR, use it directly.
Elliott Hughes0266ae52014-02-10 17:46:57 -0800995 for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
Elliott Hughes650be4e2013-03-05 18:47:58 -0800996 if (phdr->p_type == PT_PHDR) {
997 return CheckPhdr(load_bias_ + phdr->p_vaddr);
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200998 }
Elliott Hughes650be4e2013-03-05 18:47:58 -0800999 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001000
Elliott Hughes650be4e2013-03-05 18:47:58 -08001001 // Otherwise, check the first loadable segment. If its file offset
1002 // is 0, it starts with the ELF header, and we can trivially find the
1003 // loaded program header from it.
Elliott Hughes0266ae52014-02-10 17:46:57 -08001004 for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
Elliott Hughes650be4e2013-03-05 18:47:58 -08001005 if (phdr->p_type == PT_LOAD) {
1006 if (phdr->p_offset == 0) {
Elliott Hughes0266ae52014-02-10 17:46:57 -08001007 ElfW(Addr) elf_addr = load_bias_ + phdr->p_vaddr;
Elliott Hughesfaf05ba2014-02-11 16:59:37 -08001008 const ElfW(Ehdr)* ehdr = reinterpret_cast<const ElfW(Ehdr)*>(elf_addr);
Elliott Hughes0266ae52014-02-10 17:46:57 -08001009 ElfW(Addr) offset = ehdr->e_phoff;
Dmitriy Ivanov1649e7e2015-01-22 16:04:25 -08001010 return CheckPhdr(reinterpret_cast<ElfW(Addr)>(ehdr) + offset);
Elliott Hughes650be4e2013-03-05 18:47:58 -08001011 }
1012 break;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001013 }
Elliott Hughes650be4e2013-03-05 18:47:58 -08001014 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001015
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -07001016 DL_ERR("can't find loaded phdr for \"%s\"", name_.c_str());
Elliott Hughes650be4e2013-03-05 18:47:58 -08001017 return false;
1018}
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001019
Elliott Hughes650be4e2013-03-05 18:47:58 -08001020// Ensures that our program header is actually within a loadable
1021// segment. This should help catch badly-formed ELF files that
1022// would cause the linker to crash later when trying to access it.
Elliott Hughes0266ae52014-02-10 17:46:57 -08001023bool ElfReader::CheckPhdr(ElfW(Addr) loaded) {
1024 const ElfW(Phdr)* phdr_limit = phdr_table_ + phdr_num_;
1025 ElfW(Addr) loaded_end = loaded + (phdr_num_ * sizeof(ElfW(Phdr)));
Dmitriy Ivanovcf1cbbe2015-10-19 16:57:46 -07001026 for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
Elliott Hughes650be4e2013-03-05 18:47:58 -08001027 if (phdr->p_type != PT_LOAD) {
1028 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001029 }
Elliott Hughes0266ae52014-02-10 17:46:57 -08001030 ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
1031 ElfW(Addr) seg_end = phdr->p_filesz + seg_start;
Elliott Hughes650be4e2013-03-05 18:47:58 -08001032 if (seg_start <= loaded && loaded_end <= seg_end) {
Elliott Hughes0266ae52014-02-10 17:46:57 -08001033 loaded_phdr_ = reinterpret_cast<const ElfW(Phdr)*>(loaded);
Elliott Hughes650be4e2013-03-05 18:47:58 -08001034 return true;
1035 }
1036 }
Dmitriy Ivanov4f7a7ad2015-10-15 12:07:25 -07001037 DL_ERR("\"%s\" loaded phdr %p not in loadable segment",
1038 name_.c_str(), reinterpret_cast<void*>(loaded));
Elliott Hughes650be4e2013-03-05 18:47:58 -08001039 return false;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001040}