blob: 34ed80eff13284d42d8d7fb2341c13a1d6bc9f3e [file] [log] [blame]
The Android Open Source Projecta27d2ba2008-10-21 07:00:00 -07001#include <linux/auxvec.h>
2
3#include <stdio.h>
4#include <stdlib.h>
5#include <string.h>
6#include <unistd.h>
7#include <fcntl.h>
8#include <errno.h>
9#include <dlfcn.h>
The Android Open Source Project4e468ed2008-12-17 18:03:48 -080010#include <sys/stat.h>
The Android Open Source Projecta27d2ba2008-10-21 07:00:00 -070011
12//#include <pthread.h>
13
14#include <sys/mman.h>
15
16#include <sys/atomics.h>
17#include <sys/tls.h>
18
19#include "linker.h"
20#include "linker_debug.h"
21
22#define SO_MAX 64
23
24/* >>> IMPORTANT NOTE - READ ME BEFORE MODIFYING <<<
25 *
26 * Do NOT use malloc() and friends or pthread_*() code here.
27 * Don't use printf() either; it's caused mysterious memory
28 * corruption in the past.
29 * The linker runs before we bring up libc and it's easiest
30 * to make sure it does not depend on any complex libc features
31 *
32 * open issues / todo:
33 *
34 * - should we do anything special for STB_WEAK symbols?
35 * - are we doing everything we should for ARM_COPY relocations?
36 * - cleaner error reporting
37 * - configuration for paths (LD_LIBRARY_PATH?)
38 * - after linking, set as much stuff as possible to READONLY
39 * and NOEXEC
40 * - linker hardcodes PAGE_SIZE and PAGE_MASK because the kernel
41 * headers provide versions that are negative...
42 * - allocate space for soinfo structs dynamically instead of
43 * having a hard limit (64)
44 *
45 * features to add someday:
46 *
47 * - dlopen() and friends
48 *
49*/
50
51
52static int link_image(soinfo *si, unsigned wr_offset);
53
54static int socount = 0;
55static soinfo sopool[SO_MAX];
56static soinfo *freelist = NULL;
57static soinfo *solist = &libdl_info;
58static soinfo *sonext = &libdl_info;
59
60int debug_verbosity;
61static int pid;
62
63#if STATS
64struct _link_stats linker_stats;
65#endif
66
67#if COUNT_PAGES
68unsigned bitmask[4096];
69#endif
70
71#ifndef PT_ARM_EXIDX
72#define PT_ARM_EXIDX 0x70000001 /* .ARM.exidx segment */
73#endif
74
75/*
76 * This function is an empty stub where GDB locates a breakpoint to get notified
77 * about linker activity.
78 */
79extern void __attribute__((noinline)) rtld_db_dlactivity(void);
80
81extern void sched_yield(void);
82
83static struct r_debug _r_debug = {1, NULL, &rtld_db_dlactivity, RT_CONSISTENT, 0};
84static struct link_map *r_debug_tail = 0;
85
86//static pthread_mutex_t _r_debug_lock = PTHREAD_MUTEX_INITIALIZER;
87
88static volatile int loader_lock = 0;
89
90static void insert_soinfo_into_debug_map(soinfo * info)
91{
92 struct link_map * map;
93
94 /* Copy the necessary fields into the debug structure.
95 */
96 map = &(info->linkmap);
97 map->l_addr = info->base;
98 map->l_name = (char*) info->name;
99
100 /* Stick the new library at the end of the list.
101 * gdb tends to care more about libc than it does
102 * about leaf libraries, and ordering it this way
103 * reduces the back-and-forth over the wire.
104 */
105 if (r_debug_tail) {
106 r_debug_tail->l_next = map;
107 map->l_prev = r_debug_tail;
108 map->l_next = 0;
109 } else {
110 _r_debug.r_map = map;
111 map->l_prev = 0;
112 map->l_next = 0;
113 }
114 r_debug_tail = map;
115}
116
117void notify_gdb_of_load(soinfo * info)
118{
119 if (info->flags & FLAG_EXE) {
120 // GDB already knows about the main executable
121 return;
122 }
123
124 /* yes, this is a little gross, but it does avoid
125 ** pulling in pthread_*() and at the moment we don't
126 ** dlopen() anything anyway
127 */
128 while(__atomic_swap(1, &loader_lock) != 0) {
129 sched_yield();
130 usleep(5000);
131 }
132
133 _r_debug.r_state = RT_ADD;
134 rtld_db_dlactivity();
135
136 insert_soinfo_into_debug_map(info);
137
138 _r_debug.r_state = RT_CONSISTENT;
139 rtld_db_dlactivity();
140
141 __atomic_swap(0, &loader_lock);
142}
143
144void notify_gdb_of_libraries()
145{
146 _r_debug.r_state = RT_ADD;
147 rtld_db_dlactivity();
148 _r_debug.r_state = RT_CONSISTENT;
149 rtld_db_dlactivity();
150}
151
152static soinfo *alloc_info(const char *name)
153{
154 soinfo *si;
155
156 if(strlen(name) >= SOINFO_NAME_LEN) {
157 ERROR("%5d library name %s too long\n", pid, name);
158 return 0;
159 }
160
161 /* The freelist is populated when we call free_info(), which in turn is
162 done only by dlclose(), which is not likely to be used.
163 */
164 if (!freelist) {
165 if(socount == SO_MAX) {
166 ERROR("%5d too many libraries when loading %s\n", pid, name);
167 return NULL;
168 }
169 freelist = sopool + socount++;
170 freelist->next = NULL;
171 }
172
173 si = freelist;
174 freelist = freelist->next;
175
176 /* Make sure we get a clean block of soinfo */
177 memset(si, 0, sizeof(soinfo));
178 strcpy((char*) si->name, name);
179 sonext->next = si;
180 si->next = NULL;
181 si->refcount = 0;
182 sonext = si;
183
184 TRACE("%5d name %s: allocated soinfo @ %p\n", pid, name, si);
185 return si;
186}
187
188static void free_info(soinfo *si)
189{
190 soinfo *prev = NULL, *trav;
191
192 TRACE("%5d name %s: freeing soinfo @ %p\n", pid, si->name, si);
193
194 for(trav = solist; trav != NULL; trav = trav->next){
195 if (trav == si)
196 break;
197 prev = trav;
198 }
199 if (trav == NULL) {
200 /* si was not ni solist */
201 ERROR("%5d name %s is not in solist!\n", pid, si->name);
202 return;
203 }
204
205 /* prev will never be NULL, because the first entry in solist is
206 always the static libdl_info.
207 */
208 prev->next = si->next;
209 if (si == sonext) sonext = prev;
210 si->next = freelist;
211 freelist = si;
212}
213
214#ifndef LINKER_TEXT_BASE
215#error "linker's makefile must define LINKER_TEXT_BASE"
216#endif
217#ifndef LINKER_AREA_SIZE
218#error "linker's makefile must define LINKER_AREA_SIZE"
219#endif
220#define LINKER_BASE ((LINKER_TEXT_BASE) & 0xfff00000)
221#define LINKER_TOP (LINKER_BASE + (LINKER_AREA_SIZE))
222
223const char *addr_to_name(unsigned addr)
224{
225 soinfo *si;
226
227 for(si = solist; si != 0; si = si->next){
228 if((addr >= si->base) && (addr < (si->base + si->size))) {
229 return si->name;
230 }
231 }
232
233 if((addr >= LINKER_BASE) && (addr < LINKER_TOP)){
234 return "linker";
235 }
236
237 return "";
238}
239
240/* For a given PC, find the .so that it belongs to.
241 * Returns the base address of the .ARM.exidx section
242 * for that .so, and the number of 8-byte entries
243 * in that section (via *pcount).
244 *
245 * Intended to be called by libc's __gnu_Unwind_Find_exidx().
246 *
247 * This function is exposed via dlfcn.c and libdl.so.
248 */
249#ifdef ANDROID_ARM_LINKER
250_Unwind_Ptr dl_unwind_find_exidx(_Unwind_Ptr pc, int *pcount)
251{
252 soinfo *si;
253 unsigned addr = (unsigned)pc;
254
255 if ((addr < LINKER_BASE) || (addr >= LINKER_TOP)) {
256 for (si = solist; si != 0; si = si->next){
257 if ((addr >= si->base) && (addr < (si->base + si->size))) {
258 *pcount = si->ARM_exidx_count;
259 return (_Unwind_Ptr)(si->base + (unsigned long)si->ARM_exidx);
260 }
261 }
262 }
263 *pcount = 0;
264 return NULL;
265}
266#elif defined(ANDROID_X86_LINKER)
267/* Here, we only have to provide a callback to iterate across all the
268 * loaded libraries. gcc_eh does the rest. */
269int
270dl_iterate_phdr(int (*cb)(struct dl_phdr_info *info, size_t size, void *data),
271 void *data)
272{
273 soinfo *si;
274 struct dl_phdr_info dl_info;
275 int rv = 0;
276
277 for (si = solist; si != NULL; si = si->next) {
278 dl_info.dlpi_addr = si->linkmap.l_addr;
279 dl_info.dlpi_name = si->linkmap.l_name;
280 dl_info.dlpi_phdr = si->phdr;
281 dl_info.dlpi_phnum = si->phnum;
282 rv = cb(&dl_info, sizeof (struct dl_phdr_info), data);
283 if (rv != 0)
284 break;
285 }
286 return rv;
287}
288#endif
289
290static Elf32_Sym *_elf_lookup(soinfo *si, unsigned hash, const char *name)
291{
292 Elf32_Sym *s;
293 Elf32_Sym *symtab = si->symtab;
294 const char *strtab = si->strtab;
295 unsigned n;
296
297 TRACE_TYPE(LOOKUP, "%5d SEARCH %s in %s@0x%08x %08x %d\n", pid,
298 name, si->name, si->base, hash, hash % si->nbucket);
299 n = hash % si->nbucket;
300
301 for(n = si->bucket[hash % si->nbucket]; n != 0; n = si->chain[n]){
302 s = symtab + n;
303 if(strcmp(strtab + s->st_name, name)) continue;
304
305 /* only concern ourselves with global symbols */
306 switch(ELF32_ST_BIND(s->st_info)){
307 case STB_GLOBAL:
308 /* no section == undefined */
309 if(s->st_shndx == 0) continue;
310
311 case STB_WEAK:
312 TRACE_TYPE(LOOKUP, "%5d FOUND %s in %s (%08x) %d\n", pid,
313 name, si->name, s->st_value, s->st_size);
314 return s;
315 }
316 }
317
318 return 0;
319}
320
321static unsigned elfhash(const char *_name)
322{
323 const unsigned char *name = (const unsigned char *) _name;
324 unsigned h = 0, g;
325
326 while(*name) {
327 h = (h << 4) + *name++;
328 g = h & 0xf0000000;
329 h ^= g;
330 h ^= g >> 24;
331 }
332 return h;
333}
334
335static Elf32_Sym *
336_do_lookup_in_so(soinfo *si, const char *name, unsigned *elf_hash)
337{
338 if (*elf_hash == 0)
339 *elf_hash = elfhash(name);
340 return _elf_lookup (si, *elf_hash, name);
341}
342
343/* This is used by dl_sym() */
344Elf32_Sym *lookup_in_library(soinfo *si, const char *name)
345{
346 unsigned unused = 0;
347 return _do_lookup_in_so(si, name, &unused);
348}
349
350static Elf32_Sym *
351_do_lookup(soinfo *user_si, const char *name, unsigned *base)
352{
353 unsigned elf_hash = 0;
354 Elf32_Sym *s = NULL;
355 soinfo *si;
356
357 /* Look for symbols in the local scope first (the object who is
358 * searching). This happens with C++ templates on i386 for some
359 * reason. */
360 if (user_si) {
361 s = _do_lookup_in_so(user_si, name, &elf_hash);
362 if (s != NULL)
363 *base = user_si->base;
364 }
365
366 for(si = solist; (s == NULL) && (si != NULL); si = si->next)
367 {
368 if((si->flags & FLAG_ERROR) || (si == user_si))
369 continue;
370 s = _do_lookup_in_so(si, name, &elf_hash);
371 if (s != NULL) {
372 *base = si->base;
373 break;
374 }
375 }
376
377 if (s != NULL) {
378 TRACE_TYPE(LOOKUP, "%5d %s s->st_value = 0x%08x, "
379 "si->base = 0x%08x\n", pid, name, s->st_value, si->base);
380 return s;
381 }
382
383 return 0;
384}
385
386/* This is used by dl_sym() */
387Elf32_Sym *lookup(const char *name, unsigned *base)
388{
389 return _do_lookup(NULL, name, base);
390}
391
392#if 0
393static void dump(soinfo *si)
394{
395 Elf32_Sym *s = si->symtab;
396 unsigned n;
397
398 for(n = 0; n < si->nchain; n++) {
399 TRACE("%5d %04d> %08x: %02x %04x %08x %08x %s\n", pid, n, s,
400 s->st_info, s->st_shndx, s->st_value, s->st_size,
401 si->strtab + s->st_name);
402 s++;
403 }
404}
405#endif
406
407static const char *sopaths[] = {
408 "/system/lib",
409 "/lib",
410 0
411};
412
The Android Open Source Project4e468ed2008-12-17 18:03:48 -0800413static int _open_lib(const char *name)
414{
415 int fd;
416 struct stat filestat;
417
418 if ((stat(name, &filestat) >= 0) && S_ISREG(filestat.st_mode)) {
419 if ((fd = open(name, O_RDONLY)) >= 0)
420 return fd;
421 }
422
423 return -1;
424}
425
426/* TODO: Need to add support for initializing the so search path with
427 * LD_LIBRARY_PATH env variable for non-setuid programs. */
The Android Open Source Projecta27d2ba2008-10-21 07:00:00 -0700428static int open_library(const char *name)
429{
430 int fd;
431 char buf[512];
432 const char **path;
433
434 TRACE("[ %5d opening %s ]\n", pid, name);
435
The Android Open Source Projecta27d2ba2008-10-21 07:00:00 -0700436 if(name == 0) return -1;
The Android Open Source Project4e468ed2008-12-17 18:03:48 -0800437 if(strlen(name) > 256) return -1;
The Android Open Source Projecta27d2ba2008-10-21 07:00:00 -0700438
The Android Open Source Project4e468ed2008-12-17 18:03:48 -0800439 if ((name[0] == '/') && ((fd = _open_lib(name)) >= 0))
440 return fd;
The Android Open Source Projecta27d2ba2008-10-21 07:00:00 -0700441
The Android Open Source Project4e468ed2008-12-17 18:03:48 -0800442 for (path = sopaths; *path; path++) {
443 snprintf(buf, sizeof(buf), "%s/%s", *path, name);
444 if ((fd = _open_lib(buf)) >= 0)
445 return fd;
The Android Open Source Projecta27d2ba2008-10-21 07:00:00 -0700446 }
447
448 return -1;
449}
450
451static unsigned libbase = LIBBASE;
452
453/* temporary space for holding the first page of the shared lib
454 * which contains the elf header (with the pht). */
455static unsigned char __header[PAGE_SIZE];
456
457typedef struct {
458 long mmap_addr;
459 char tag[4]; /* 'P', 'R', 'E', ' ' */
460} prelink_info_t;
461
462/* Returns the requested base address if the library is prelinked,
463 * and 0 otherwise. */
464static unsigned long
465is_prelinked(int fd, const char *name)
466{
467 off_t sz;
468 prelink_info_t info;
469
470 sz = lseek(fd, -sizeof(prelink_info_t), SEEK_END);
471 if (sz < 0) {
472 ERROR("lseek() failed!\n");
473 return 0;
474 }
475
476 if (read(fd, &info, sizeof(info)) != sizeof(info)) {
477 WARN("Could not read prelink_info_t structure for `%s`\n", name);
478 return 0;
479 }
480
481 if (strncmp(info.tag, "PRE ", 4)) {
482 WARN("`%s` is not a prelinked library\n", name);
483 return 0;
484 }
485
486 return (unsigned long)info.mmap_addr;
487}
488
489/* verify_elf_object
490 * Verifies if the object @ base is a valid ELF object
491 *
492 * Args:
493 *
494 * Returns:
495 * 0 on success
496 * -1 if no valid ELF object is found @ base.
497 */
498static int
499verify_elf_object(void *base, const char *name)
500{
501 Elf32_Ehdr *hdr = (Elf32_Ehdr *) base;
502
503 if (hdr->e_ident[EI_MAG0] != ELFMAG0) return -1;
504 if (hdr->e_ident[EI_MAG1] != ELFMAG1) return -1;
505 if (hdr->e_ident[EI_MAG2] != ELFMAG2) return -1;
506 if (hdr->e_ident[EI_MAG3] != ELFMAG3) return -1;
507
508 /* TODO: Should we verify anything else in the header? */
509
510 return 0;
511}
512
513
514/* get_lib_extents
515 * Retrieves the base (*base) address where the ELF object should be
516 * mapped and its overall memory size (*total_sz).
517 *
518 * Args:
519 * fd: Opened file descriptor for the library
520 * name: The name of the library
521 * _hdr: Pointer to the header page of the library
522 * total_sz: Total size of the memory that should be allocated for
523 * this library
524 *
525 * Returns:
526 * -1 if there was an error while trying to get the lib extents.
527 * The possible reasons are:
528 * - Could not determine if the library was prelinked.
529 * - The library provided is not a valid ELF object
530 * 0 if the library did not request a specific base offset (normal
531 * for non-prelinked libs)
532 * > 0 if the library requests a specific address to be mapped to.
533 * This indicates a pre-linked library.
534 */
535static unsigned
536get_lib_extents(int fd, const char *name, void *__hdr, unsigned *total_sz)
537{
538 unsigned req_base;
539 unsigned min_vaddr = 0xffffffff;
540 unsigned max_vaddr = 0;
541 unsigned char *_hdr = (unsigned char *)__hdr;
542 Elf32_Ehdr *ehdr = (Elf32_Ehdr *)_hdr;
543 Elf32_Phdr *phdr;
544 int cnt;
545
546 TRACE("[ %5d Computing extents for '%s'. ]\n", pid, name);
547 if (verify_elf_object(_hdr, name) < 0) {
548 ERROR("%5d - %s is not a valid ELF object\n", pid, name);
549 return (unsigned)-1;
550 }
551
552 req_base = (unsigned) is_prelinked(fd, name);
553 if (req_base == (unsigned)-1)
554 return -1;
555 else if (req_base != 0) {
556 TRACE("[ %5d - Prelinked library '%s' requesting base @ 0x%08x ]\n",
557 pid, name, req_base);
558 } else {
559 TRACE("[ %5d - Non-prelinked library '%s' found. ]\n", pid, name);
560 }
561
562 phdr = (Elf32_Phdr *)(_hdr + ehdr->e_phoff);
563
564 /* find the min/max p_vaddrs from all the PT_LOAD segments so we can
565 * get the range. */
566 for (cnt = 0; cnt < ehdr->e_phnum; ++cnt, ++phdr) {
567 if (phdr->p_type == PT_LOAD) {
568 if ((phdr->p_vaddr + phdr->p_memsz) > max_vaddr)
569 max_vaddr = phdr->p_vaddr + phdr->p_memsz;
570 if (phdr->p_vaddr < min_vaddr)
571 min_vaddr = phdr->p_vaddr;
572 }
573 }
574
575 if ((min_vaddr == 0xffffffff) && (max_vaddr == 0)) {
576 ERROR("%5d - No loadable segments found in %s.\n", pid, name);
577 return (unsigned)-1;
578 }
579
580 /* truncate min_vaddr down to page boundary */
581 min_vaddr &= ~PAGE_MASK;
582
583 /* round max_vaddr up to the next page */
584 max_vaddr = (max_vaddr + PAGE_SIZE - 1) & ~PAGE_MASK;
585
586 *total_sz = (max_vaddr - min_vaddr);
587 return (unsigned)req_base;
588}
589
590/* alloc_mem_region
591 *
592 * This function reserves a chunk of memory to be used for mapping in
593 * the shared library. We reserve the entire memory region here, and
594 * then the rest of the linker will relocate the individual loadable
595 * segments into the correct locations within this memory range.
596 *
597 * Args:
598 * req_base: The requested base of the allocation. If 0, a sane one will be
599 * chosen in the range LIBBASE <= base < LIBLAST.
600 * sz: The size of the allocation.
601 *
602 * Returns:
603 * NULL on failure, and non-NULL pointer to memory region on success.
604 */
605static void *
606alloc_mem_region(const char *name, unsigned req_base, unsigned sz)
607{
608 void *base;
609
610 if (req_base) {
611 /* we should probably map it as PROT_NONE, but the init code needs
612 * to read the phdr, so mark everything as readable. */
613 base = mmap((void *)req_base, sz, PROT_READ | PROT_EXEC,
614 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
615 if (base == MAP_FAILED) {
616 WARN("%5d can NOT map (prelinked) library '%s' at 0x%08x "
617 "as requested, will try general pool: %d (%s)\n",
618 pid, name, req_base, errno, strerror(errno));
619 } else if (base != (void *)req_base) {
620 ERROR("OOPS: %5d prelinked library '%s' mapped at 0x%08x, "
621 "not at 0x%08x\n", pid, name, (unsigned)base, req_base);
622 munmap(base, sz);
623 return NULL;
624 }
625
626 /* Here we know that we got a valid allocation. Hooray! */
627 return base;
628 }
629
630 /* We either did not request a specific base address to map at
631 * (i.e. not-prelinked) OR we could not map at the requested address.
632 * Try to find a memory range in our "reserved" area that can be mapped.
633 */
634 while(libbase < LIBLAST) {
635 base = mmap((void*) libbase, sz, PROT_READ | PROT_EXEC,
636 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
637
638 if(((unsigned)base) == libbase) {
639 /* success -- got the address we wanted */
640 return base;
641 }
642
643 /* If we got a different address than requested (rather than
644 * just a failure), we need to unmap the mismapped library
645 * before trying again
646 */
647 if(base != MAP_FAILED)
648 munmap(base, sz);
649
650 libbase += LIBINC;
651 }
652
653 ERROR("OOPS: %5d cannot map library '%s'. no vspace available.\n",
654 pid, name);
655 return NULL;
656}
657
658#define MAYBE_MAP_FLAG(x,from,to) (((x) & (from)) ? (to) : 0)
659#define PFLAGS_TO_PROT(x) (MAYBE_MAP_FLAG((x), PF_X, PROT_EXEC) | \
660 MAYBE_MAP_FLAG((x), PF_R, PROT_READ) | \
661 MAYBE_MAP_FLAG((x), PF_W, PROT_WRITE))
662/* load_segments
663 *
664 * This function loads all the loadable (PT_LOAD) segments into memory
665 * at their appropriate memory offsets off the base address.
666 *
667 * Args:
668 * fd: Open file descriptor to the library to load.
669 * header: Pointer to a header page that contains the ELF header.
670 * This is needed since we haven't mapped in the real file yet.
671 * si: ptr to soinfo struct describing the shared object.
672 *
673 * Returns:
674 * 0 on success, -1 on failure.
675 */
676static int
677load_segments(int fd, void *header, soinfo *si)
678{
679 Elf32_Ehdr *ehdr = (Elf32_Ehdr *)header;
680 Elf32_Phdr *phdr = (Elf32_Phdr *)((unsigned char *)header + ehdr->e_phoff);
681 unsigned char *base = (unsigned char *)si->base;
682 int cnt;
683 unsigned len;
684 unsigned char *tmp;
685 unsigned char *pbase;
686 unsigned char *extra_base;
687 unsigned extra_len;
688 unsigned total_sz = 0;
689
690 si->wrprotect_start = 0xffffffff;
691 si->wrprotect_end = 0;
692
693 TRACE("[ %5d - Begin loading segments for '%s' @ 0x%08x ]\n",
694 pid, si->name, (unsigned)si->base);
695 /* Now go through all the PT_LOAD segments and map them into memory
696 * at the appropriate locations. */
697 for (cnt = 0; cnt < ehdr->e_phnum; ++cnt, ++phdr) {
698 if (phdr->p_type == PT_LOAD) {
699 DEBUG_DUMP_PHDR(phdr, "PT_LOAD", pid);
700 /* we want to map in the segment on a page boundary */
701 tmp = base + (phdr->p_vaddr & (~PAGE_MASK));
702 /* add the # of bytes we masked off above to the total length. */
703 len = phdr->p_filesz + (phdr->p_vaddr & PAGE_MASK);
704
705 TRACE("[ %d - Trying to load segment from '%s' @ 0x%08x "
706 "(0x%08x). p_vaddr=0x%08x p_offset=0x%08x ]\n", pid, si->name,
707 (unsigned)tmp, len, phdr->p_vaddr, phdr->p_offset);
708 pbase = mmap(tmp, len, PFLAGS_TO_PROT(phdr->p_flags),
709 MAP_PRIVATE | MAP_FIXED, fd,
710 phdr->p_offset & (~PAGE_MASK));
711 if (pbase == MAP_FAILED) {
712 ERROR("%d failed to map segment from '%s' @ 0x%08x (0x%08x). "
713 "p_vaddr=0x%08x p_offset=0x%08x\n", pid, si->name,
714 (unsigned)tmp, len, phdr->p_vaddr, phdr->p_offset);
715 goto fail;
716 }
717
718 /* If 'len' didn't end on page boundary, and it's a writable
719 * segment, zero-fill the rest. */
720 if ((len & PAGE_MASK) && (phdr->p_flags & PF_W))
721 memset((void *)(pbase + len), 0, PAGE_SIZE - (len & PAGE_MASK));
722
723 /* Check to see if we need to extend the map for this segment to
724 * cover the diff between filesz and memsz (i.e. for bss).
725 *
726 * base _+---------------------+ page boundary
727 * . .
728 * | |
729 * . .
730 * pbase _+---------------------+ page boundary
731 * | |
732 * . .
733 * base + p_vaddr _| |
734 * . \ \ .
735 * . | filesz | .
736 * pbase + len _| / | |
737 * <0 pad> . . .
738 * extra_base _+------------|--------+ page boundary
739 * / . . .
740 * | . . .
741 * | +------------|--------+ page boundary
742 * extra_len-> | | | |
743 * | . | memsz .
744 * | . | .
745 * \ _| / |
746 * . .
747 * | |
748 * _+---------------------+ page boundary
749 */
750 tmp = (unsigned char *)(((unsigned)pbase + len + PAGE_SIZE - 1) &
751 (~PAGE_MASK));
752 if (tmp < (base + phdr->p_vaddr + phdr->p_memsz)) {
753 extra_len = base + phdr->p_vaddr + phdr->p_memsz - tmp;
754 TRACE("[ %5d - Need to extend segment from '%s' @ 0x%08x "
755 "(0x%08x) ]\n", pid, si->name, (unsigned)tmp, extra_len);
756 /* map in the extra page(s) as anonymous into the range.
757 * This is probably not necessary as we already mapped in
758 * the entire region previously, but we just want to be
759 * sure. This will also set the right flags on the region
760 * (though we can probably accomplish the same thing with
761 * mprotect).
762 */
763 extra_base = mmap((void *)tmp, extra_len,
764 PFLAGS_TO_PROT(phdr->p_flags),
765 MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS,
766 -1, 0);
767 if (extra_base == MAP_FAILED) {
768 ERROR("[ %5d - failed to extend segment from '%s' @ 0x%08x "
769 "(0x%08x) ]\n", pid, si->name, (unsigned)tmp,
770 extra_len);
771 goto fail;
772 }
773 /* TODO: Check if we need to memset-0 this region.
774 * Anonymous mappings are zero-filled copy-on-writes, so we
775 * shouldn't need to. */
776 TRACE("[ %5d - Segment from '%s' extended @ 0x%08x "
777 "(0x%08x)\n", pid, si->name, (unsigned)extra_base,
778 extra_len);
779 }
780 /* set the len here to show the full extent of the segment we
781 * just loaded, mostly for debugging */
782 len = (((unsigned)base + phdr->p_vaddr + phdr->p_memsz +
783 PAGE_SIZE - 1) & (~PAGE_MASK)) - (unsigned)pbase;
784 TRACE("[ %5d - Successfully loaded segment from '%s' @ 0x%08x "
785 "(0x%08x). p_vaddr=0x%08x p_offset=0x%08x\n", pid, si->name,
786 (unsigned)pbase, len, phdr->p_vaddr, phdr->p_offset);
787 total_sz += len;
788 /* Make the section writable just in case we'll have to write to
789 * it during relocation (i.e. text segment). However, we will
790 * remember what range of addresses should be write protected.
791 *
792 */
793 if (!(phdr->p_flags & PF_W)) {
794 if ((unsigned)pbase < si->wrprotect_start)
795 si->wrprotect_start = (unsigned)pbase;
796 if (((unsigned)pbase + len) > si->wrprotect_end)
797 si->wrprotect_end = (unsigned)pbase + len;
798 mprotect(pbase, len,
799 PFLAGS_TO_PROT(phdr->p_flags) | PROT_WRITE);
800 }
801 } else if (phdr->p_type == PT_DYNAMIC) {
802 DEBUG_DUMP_PHDR(phdr, "PT_DYNAMIC", pid);
803 /* this segment contains the dynamic linking information */
804 si->dynamic = (unsigned *)(base + phdr->p_vaddr);
805 } else {
806#ifdef ANDROID_ARM_LINKER
807 if (phdr->p_type == PT_ARM_EXIDX) {
808 DEBUG_DUMP_PHDR(phdr, "PT_ARM_EXIDX", pid);
809 /* exidx entries (used for stack unwinding) are 8 bytes each.
810 */
811 si->ARM_exidx = (unsigned *)phdr->p_vaddr;
812 si->ARM_exidx_count = phdr->p_memsz / 8;
813 }
814#endif
815 }
816
817 }
818
819 /* Sanity check */
820 if (total_sz > si->size) {
821 ERROR("%5d - Total length (0x%08x) of mapped segments from '%s' is "
822 "greater than what was allocated (0x%08x). THIS IS BAD!\n",
823 pid, total_sz, si->name, si->size);
824 goto fail;
825 }
826
827 TRACE("[ %5d - Finish loading segments for '%s' @ 0x%08x. "
828 "Total memory footprint: 0x%08x bytes ]\n", pid, si->name,
829 (unsigned)si->base, si->size);
830 return 0;
831
832fail:
833 /* We can just blindly unmap the entire region even though some things
834 * were mapped in originally with anonymous and others could have been
835 * been mapped in from the file before we failed. The kernel will unmap
836 * all the pages in the range, irrespective of how they got there.
837 */
838 munmap((void *)si->base, si->size);
839 si->flags |= FLAG_ERROR;
840 return -1;
841}
842
843/* TODO: Implement this to take care of the fact that Android ARM
844 * ELF objects shove everything into a single loadable segment that has the
845 * write bit set. wr_offset is then used to set non-(data|bss) pages to be
846 * non-writable.
847 */
848#if 0
849static unsigned
850get_wr_offset(int fd, const char *name, Elf32_Ehdr *ehdr)
851{
852 Elf32_Shdr *shdr_start;
853 Elf32_Shdr *shdr;
854 int shdr_sz = ehdr->e_shnum * sizeof(Elf32_Shdr);
855 int cnt;
856 unsigned wr_offset = 0xffffffff;
857
858 shdr_start = mmap(0, shdr_sz, PROT_READ, MAP_PRIVATE, fd,
859 ehdr->e_shoff & (~PAGE_MASK));
860 if (shdr_start == MAP_FAILED) {
861 WARN("%5d - Could not read section header info from '%s'. Will not "
862 "not be able to determine write-protect offset.\n", pid, name);
863 return (unsigned)-1;
864 }
865
866 for(cnt = 0, shdr = shdr_start; cnt < ehdr->e_shnum; ++cnt, ++shdr) {
867 if ((shdr->sh_type != SHT_NULL) && (shdr->sh_flags & SHF_WRITE) &&
868 (shdr->sh_addr < wr_offset)) {
869 wr_offset = shdr->sh_addr;
870 }
871 }
872
873 munmap(shdr_start, shdr_sz);
874 return wr_offset;
875}
876#endif
877
878static soinfo *
879load_library(const char *name)
880{
881 int fd = open_library(name);
882 int cnt;
883 unsigned ext_sz;
884 unsigned req_base;
885 void *base;
886 soinfo *si;
887 Elf32_Ehdr *hdr;
888
889 if(fd == -1)
890 return NULL;
891
892 /* We have to read the ELF header to figure out what to do with this image
893 */
894 if (lseek(fd, 0, SEEK_SET) < 0) {
895 ERROR("lseek() failed!\n");
896 goto fail;
897 }
898
899 if ((cnt = read(fd, &__header[0], PAGE_SIZE)) < 0) {
900 ERROR("read() failed!\n");
901 goto fail;
902 }
903
904 /* Parse the ELF header and get the size of the memory footprint for
905 * the library */
906 req_base = get_lib_extents(fd, name, &__header[0], &ext_sz);
907 if (req_base == (unsigned)-1)
908 goto fail;
909 TRACE("[ %5d - '%s' (%s) wants base=0x%08x sz=0x%08x ]\n", pid, name,
910 (req_base ? "prelinked" : "not pre-linked"), req_base, ext_sz);
911
912 /* Carve out a chunk of memory where we will map in the individual
913 * segments */
914 base = alloc_mem_region(name, req_base, ext_sz);
915 if (base == NULL)
916 goto fail;
917 TRACE("[ %5d allocated memory for %s @ %p (0x%08x) ]\n",
918 pid, name, base, (unsigned) ext_sz);
919
920 /* Now configure the soinfo struct where we'll store all of our data
921 * for the ELF object. If the loading fails, we waste the entry, but
922 * same thing would happen if we failed during linking. Configuring the
923 * soinfo struct here is a lot more convenient.
924 */
925 si = alloc_info(name);
926 if (si == NULL)
927 goto fail;
928
929 si->base = (unsigned)base;
930 si->size = ext_sz;
931 si->flags = 0;
932 si->entry = 0;
933 si->dynamic = (unsigned *)-1;
934
935 /* Now actually load the library's segments into right places in memory */
936 if (load_segments(fd, &__header[0], si) < 0)
937 goto fail;
938
939 /* this might not be right. Technically, we don't even need this info
940 * once we go through 'load_segments'. */
941 hdr = (Elf32_Ehdr *)base;
942 si->phdr = (Elf32_Phdr *)((unsigned char *)si->base + hdr->e_phoff);
943 si->phnum = hdr->e_phnum;
944 /**/
945
946 close(fd);
947 return si;
948
949fail:
950 close(fd);
951 return NULL;
952}
953
954static soinfo *
955init_library(soinfo *si)
956{
957 unsigned wr_offset = 0xffffffff;
958 unsigned libbase_before = 0;
959 unsigned libbase_after = 0;
960
961 /* At this point we know that whatever is loaded @ base is a valid ELF
962 * shared library whose segments are properly mapped in. */
963 TRACE("[ %5d init_library base=0x%08x sz=0x%08x name='%s') ]\n",
964 pid, si->base, si->size, si->name);
965
966 if (si->base < LIBBASE || si->base >= LIBLAST)
967 si->flags |= FLAG_PRELINKED;
968
969 /* Adjust libbase for the size of this library, rounded up to
970 ** LIBINC alignment. Make note of the previous and current
971 ** value of libbase to allow us to roll back in the event of
972 ** a link failure.
973 */
974 if (!(si->flags & FLAG_PRELINKED)) {
975 libbase_before = libbase;
976 libbase += (si->size + (LIBINC - 1)) & (~(LIBINC - 1));
977 libbase_after = libbase;
978 }
979
980 if(link_image(si, wr_offset)) {
981 /* We failed to link. However, we can only restore libbase
982 ** if no additional libraries have moved it since we updated it.
983 */
984 if(!(si->flags & FLAG_PRELINKED) && (libbase == libbase_after)) {
985 libbase = libbase_before;
986 }
987 munmap((void *)si->base, si->size);
988 return NULL;
989 }
990
991 return si;
992}
993
994soinfo *find_library(const char *name)
995{
996 soinfo *si;
997
998 for(si = solist; si != 0; si = si->next){
999 if(!strcmp(name, si->name)) {
1000 if(si->flags & FLAG_ERROR) return 0;
1001 if(si->flags & FLAG_LINKED) return si;
1002 ERROR("OOPS: %5d recursive link to '%s'\n", pid, si->name);
1003 return 0;
1004 }
1005 }
1006
1007 TRACE("[ %5d '%s' has not been loaded yet. Locating...]\n", pid, name);
1008 si = load_library(name);
1009 if(si == NULL)
1010 return NULL;
1011 return init_library(si);
1012}
1013
1014/* TODO:
1015 * notify gdb of unload
1016 * for non-prelinked libraries, find a way to decrement libbase
1017 */
1018static void call_destructors(soinfo *si);
1019unsigned unload_library(soinfo *si)
1020{
1021 unsigned *d;
1022 if (si->refcount == 1) {
1023 TRACE("%5d unloading '%s'\n", pid, si->name);
1024 call_destructors(si);
1025
1026 for(d = si->dynamic; *d; d += 2) {
1027 if(d[0] == DT_NEEDED){
1028 TRACE("%5d %s needs to unload %s\n", pid,
1029 si->name, si->strtab + d[1]);
1030 soinfo *lsi = find_library(si->strtab + d[1]);
1031 if(lsi)
1032 unload_library(lsi);
1033 else
1034 ERROR("%5d could not unload '%s'\n",
1035 pid, si->strtab + d[1]);
1036 }
1037 }
1038
1039 munmap((char *)si->base, si->size);
1040 free_info(si);
1041 si->refcount = 0;
1042 }
1043 else {
1044 si->refcount--;
The Android Open Source Project4e468ed2008-12-17 18:03:48 -08001045 PRINT("%5d not unloading '%s', decrementing refcount to %d\n",
The Android Open Source Projecta27d2ba2008-10-21 07:00:00 -07001046 pid, si->name, si->refcount);
1047 }
1048 return si->refcount;
1049}
1050
1051/* TODO: don't use unsigned for addrs below. It works, but is not
1052 * ideal. They should probably be either uint32_t, Elf32_Addr, or unsigned
1053 * long.
1054 */
1055static int reloc_library(soinfo *si, Elf32_Rel *rel, unsigned count)
1056{
1057 Elf32_Sym *symtab = si->symtab;
1058 const char *strtab = si->strtab;
1059 Elf32_Sym *s;
1060 unsigned base;
1061 Elf32_Rel *start = rel;
1062 unsigned idx;
1063
1064 for (idx = 0; idx < count; ++idx) {
1065 unsigned type = ELF32_R_TYPE(rel->r_info);
1066 unsigned sym = ELF32_R_SYM(rel->r_info);
1067 unsigned reloc = (unsigned)(rel->r_offset + si->base);
1068 unsigned sym_addr = 0;
1069 char *sym_name = NULL;
1070
1071 DEBUG("%5d Processing '%s' relocation at index %d\n", pid,
1072 si->name, idx);
1073 if(sym != 0) {
1074 s = _do_lookup(si, strtab + symtab[sym].st_name, &base);
1075 if(s == 0) {
1076 ERROR("%5d cannot locate '%s'...\n", pid, sym_name);
1077 return -1;
1078 }
1079#if 0
1080 if((base == 0) && (si->base != 0)){
1081 /* linking from libraries to main image is bad */
1082 ERROR("%5d cannot locate '%s'...\n",
1083 pid, strtab + symtab[sym].st_name);
1084 return -1;
1085 }
1086#endif
1087 if ((s->st_shndx == SHN_UNDEF) && (s->st_value != 0)) {
1088 ERROR("%5d In '%s', shndx=%d && value=0x%08x. We do not "
1089 "handle this yet\n", pid, si->name, s->st_shndx,
1090 s->st_value);
1091 return -1;
1092 }
1093 sym_addr = (unsigned)(s->st_value + base);
1094 sym_name = (char *)(strtab + symtab[sym].st_name);
1095 COUNT_RELOC(RELOC_SYMBOL);
1096 } else {
1097 s = 0;
1098 }
1099
1100/* TODO: This is ugly. Split up the relocations by arch into
1101 * different files.
1102 */
1103 switch(type){
1104#if defined(ANDROID_ARM_LINKER)
1105 case R_ARM_JUMP_SLOT:
1106 case R_ARM_GLOB_DAT:
1107 case R_ARM_ABS32:
1108 COUNT_RELOC(RELOC_ABSOLUTE);
1109 MARK(rel->r_offset);
1110 TRACE_TYPE(RELO, "%5d RELO ABS %08x <- %08x %s\n", pid,
1111 reloc, sym_addr, sym_name);
1112 *((unsigned*)reloc) = sym_addr;
1113 break;
1114#elif defined(ANDROID_X86_LINKER)
1115 case R_386_JUMP_SLOT:
1116 COUNT_RELOC(RELOC_ABSOLUTE);
1117 MARK(rel->r_offset);
1118 TRACE_TYPE(RELO, "%5d RELO JMP_SLOT %08x <- %08x %s\n", pid,
1119 reloc, sym_addr, sym_name);
1120 *((unsigned*)reloc) = sym_addr;
1121 break;
1122 case R_386_GLOB_DAT:
1123 COUNT_RELOC(RELOC_ABSOLUTE);
1124 MARK(rel->r_offset);
1125 TRACE_TYPE(RELO, "%5d RELO GLOB_DAT %08x <- %08x %s\n", pid,
1126 reloc, sym_addr, sym_name);
1127 *((unsigned*)reloc) = sym_addr;
1128 break;
1129#endif /* ANDROID_*_LINKER */
1130
1131#if defined(ANDROID_ARM_LINKER)
1132 case R_ARM_RELATIVE:
1133#elif defined(ANDROID_X86_LINKER)
1134 case R_386_RELATIVE:
1135#endif /* ANDROID_*_LINKER */
1136 COUNT_RELOC(RELOC_RELATIVE);
1137 MARK(rel->r_offset);
1138 if(sym){
1139 ERROR("%5d odd RELATIVE form...\n", pid);
1140 return -1;
1141 }
1142 TRACE_TYPE(RELO, "%5d RELO RELATIVE %08x <- +%08x\n", pid,
1143 reloc, si->base);
1144 *((unsigned*)reloc) += si->base;
1145 break;
1146
1147#if defined(ANDROID_X86_LINKER)
1148 case R_386_32:
1149 COUNT_RELOC(RELOC_RELATIVE);
1150 MARK(rel->r_offset);
1151
1152 TRACE_TYPE(RELO, "%5d RELO R_386_32 %08x <- +%08x %s\n", pid,
1153 reloc, sym_addr, sym_name);
1154 *((unsigned *)reloc) += (unsigned)sym_addr;
1155 break;
1156
1157 case R_386_PC32:
1158 COUNT_RELOC(RELOC_RELATIVE);
1159 MARK(rel->r_offset);
1160 TRACE_TYPE(RELO, "%5d RELO R_386_PC32 %08x <- "
1161 "+%08x (%08x - %08x) %s\n", pid, reloc,
1162 (sym_addr - reloc), sym_addr, reloc, sym_name);
1163 *((unsigned *)reloc) += (unsigned)(sym_addr - reloc);
1164 break;
1165#endif /* ANDROID_X86_LINKER */
1166
1167#ifdef ANDROID_ARM_LINKER
1168 case R_ARM_COPY:
1169 COUNT_RELOC(RELOC_COPY);
1170 MARK(rel->r_offset);
1171 TRACE_TYPE(RELO, "%5d RELO %08x <- %d @ %08x %s\n", pid,
1172 reloc, s->st_size, sym_addr, sym_name);
1173 memcpy((void*)reloc, (void*)sym_addr, s->st_size);
1174 break;
1175#endif /* ANDROID_ARM_LINKER */
1176
1177 default:
1178 ERROR("%5d unknown reloc type %d @ %p (%d)\n",
1179 pid, type, rel, (int) (rel - start));
1180 return -1;
1181 }
1182 rel++;
1183 }
1184 return 0;
1185}
1186
1187static void call_array(unsigned *ctor, int count)
1188{
1189 int n;
1190 for(n = count; n > 0; n--){
1191 TRACE("[ %5d Looking at ctor *0x%08x == 0x%08x ]\n", pid,
1192 (unsigned)ctor, (unsigned)*ctor);
1193 void (*func)() = (void (*)()) *ctor++;
1194 if(((int) func == 0) || ((int) func == -1)) continue;
1195 TRACE("[ %5d Calling func @ 0x%08x ]\n", pid, (unsigned)func);
1196 func();
1197 }
1198}
1199
1200static void call_constructors(soinfo *si)
1201{
1202 /* TODO: THE ORIGINAL CODE SEEMED TO CALL THE INIT FUNCS IN THE WRONG ORDER.
1203 * Old order: init, init_array, preinit_array..
1204 * Correct order: preinit_array, init, init_array.
1205 * Verify WHY.
1206 */
1207
1208 if (si->flags & FLAG_EXE) {
1209 TRACE("[ %5d Calling preinit_array @ 0x%08x [%d] for '%s' ]\n",
1210 pid, (unsigned)si->preinit_array, si->preinit_array_count,
1211 si->name);
1212 call_array(si->preinit_array, si->preinit_array_count);
1213 TRACE("[ %5d Done calling preinit_array for '%s' ]\n", pid, si->name);
1214 } else {
1215 if (si->preinit_array) {
1216 ERROR("%5d Shared library '%s' has a preinit_array table @ 0x%08x."
1217 " This is INVALID.\n", pid, si->name,
1218 (unsigned)si->preinit_array);
1219 }
1220 }
1221
1222 // If we have an init section, then we should call it now, to make sure
1223 // that all the funcs in the .ctors section get run.
1224 // Note: For ARM, we shouldn't have a .ctor section (should be empty)
1225 // when we have an (pre)init_array section, but let's be compatible with
1226 // old (non-eabi) binaries and try the _init (DT_INIT) anyway.
1227 if (si->init_func) {
1228 TRACE("[ %5d Calling init_func @ 0x%08x for '%s' ]\n", pid,
1229 (unsigned)si->init_func, si->name);
1230 si->init_func();
1231 TRACE("[ %5d Done calling init_func for '%s' ]\n", pid, si->name);
1232 }
1233
1234 if (si->init_array) {
1235 TRACE("[ %5d Calling init_array @ 0x%08x [%d] for '%s' ]\n", pid,
1236 (unsigned)si->init_array, si->init_array_count, si->name);
1237 call_array(si->init_array, si->init_array_count);
1238 TRACE("[ %5d Done calling init_array for '%s' ]\n", pid, si->name);
1239 }
1240}
1241
1242static void call_destructors(soinfo *si)
1243{
1244 if (si->fini_array) {
1245 TRACE("[ %5d Calling fini_array @ 0x%08x [%d] for '%s' ]\n", pid,
1246 (unsigned)si->fini_array, si->fini_array_count, si->name);
1247 call_array(si->fini_array, si->fini_array_count);
1248 TRACE("[ %5d Done calling fini_array for '%s' ]\n", pid, si->name);
1249 }
1250
1251 // If we have an fini section, then we should call it now, to make sure
1252 // that all the funcs in the .dtors section get run.
1253 // Note: For ARM, we shouldn't have a .dtor section (should be empty)
1254 // when we have an fini_array section, but let's be compatible with
1255 // old (non-eabi) binaries and try the _fini (DT_FINI) anyway.
1256 if (si->fini_func) {
1257 TRACE("[ %5d Calling fini_func @ 0x%08x for '%s' ]\n", pid,
1258 (unsigned)si->fini_func, si->name);
1259 si->fini_func();
1260 TRACE("[ %5d Done calling fini_func for '%s' ]\n", pid, si->name);
1261 }
1262}
1263
1264/* Force any of the closed stdin, stdout and stderr to be associated with
1265 /dev/null. */
1266static int nullify_closed_stdio (void)
1267{
1268 int dev_null, i, status;
1269 int return_value = 0;
1270
1271 dev_null = open("/dev/null", O_RDWR);
1272 if (dev_null < 0) {
1273 ERROR("Cannot open /dev/null.\n");
1274 return -1;
1275 }
1276 TRACE("[ %5d Opened /dev/null file-descriptor=%d]\n", pid, dev_null);
1277
1278 /* If any of the stdio file descriptors is valid and not associated
1279 with /dev/null, dup /dev/null to it. */
1280 for (i = 0; i < 3; i++) {
1281 /* If it is /dev/null already, we are done. */
1282 if (i == dev_null)
1283 continue;
1284
1285 TRACE("[ %5d Nullifying stdio file descriptor %d]\n", pid, i);
1286 /* The man page of fcntl does not say that fcntl(..,F_GETFL)
1287 can be interrupted but we do this just to be safe. */
1288 do {
1289 status = fcntl(i, F_GETFL);
1290 } while (status < 0 && errno == EINTR);
1291
1292 /* If file is openned, we are good. */
1293 if (status >= 0)
1294 continue;
1295
1296 /* The only error we allow is that the file descriptor does not
1297 exist, in which case we dup /dev/null to it. */
1298 if (errno != EBADF) {
1299 ERROR("nullify_stdio: unhandled error %s\n", strerror(errno));
1300 return_value = -1;
1301 continue;
1302 }
1303
1304 /* Try dupping /dev/null to this stdio file descriptor and
1305 repeat if there is a signal. Note that any errors in closing
1306 the stdio descriptor are lost. */
1307 do {
1308 status = dup2(dev_null, i);
1309 } while (status < 0 && errno == EINTR);
1310
1311 if (status < 0) {
1312 ERROR("nullify_stdio: dup2 error %s\n", strerror(errno));
1313 return_value = -1;
1314 continue;
1315 }
1316 }
1317
1318 /* If /dev/null is not one of the stdio file descriptors, close it. */
1319 if (dev_null > 2) {
1320 TRACE("[ %5d Closing /dev/null file-descriptor=%d]\n", pid, dev_null);
1321 do {
1322 status = close(dev_null);
1323 } while (status < 0 && errno == EINTR);
1324
1325 if (status < 0) {
1326 ERROR("nullify_stdio: close error %s\n", strerror(errno));
1327 return_value = -1;
1328 }
1329 }
1330
1331 return return_value;
1332}
1333
1334static int link_image(soinfo *si, unsigned wr_offset)
1335{
1336 unsigned *d;
1337 Elf32_Phdr *phdr = si->phdr;
1338 int phnum = si->phnum;
1339
1340 INFO("[ %5d linking %s ]\n", pid, si->name);
1341 DEBUG("%5d si->base = 0x%08x si->flags = 0x%08x\n", pid,
1342 si->base, si->flags);
1343
1344 if (si->flags & FLAG_EXE) {
1345 /* Locate the needed program segments (DYNAMIC/ARM_EXIDX) for
1346 * linkage info if this is the executable. If this was a
1347 * dynamic lib, that would have been done at load time.
1348 *
1349 * TODO: It's unfortunate that small pieces of this are
1350 * repeated from the load_library routine. Refactor this just
1351 * slightly to reuse these bits.
1352 */
1353 si->size = 0;
1354 for(; phnum > 0; --phnum, ++phdr) {
1355#ifdef ANDROID_ARM_LINKER
1356 if(phdr->p_type == PT_ARM_EXIDX) {
1357 /* exidx entries (used for stack unwinding) are 8 bytes each.
1358 */
1359 si->ARM_exidx = (unsigned *)phdr->p_vaddr;
1360 si->ARM_exidx_count = phdr->p_memsz / 8;
1361 }
1362#endif
1363 if (phdr->p_type == PT_LOAD) {
1364 /* For the executable, we use the si->size field only in
1365 dl_unwind_find_exidx(), so the meaning of si->size
1366 is not the size of the executable; it is the last
1367 virtual address of the loadable part of the executable;
1368 since si->base == 0 for an executable, we use the
1369 range [0, si->size) to determine whether a PC value
1370 falls within the executable section. Of course, if
1371 a value is below phdr->p_vaddr, it's not in the
1372 executable section, but a) we shouldn't be asking for
1373 such a value anyway, and b) if we have to provide
1374 an EXIDX for such a value, then the executable's
1375 EXIDX is probably the better choice.
1376 */
1377 DEBUG_DUMP_PHDR(phdr, "PT_LOAD", pid);
1378 if (phdr->p_vaddr + phdr->p_memsz > si->size)
1379 si->size = phdr->p_vaddr + phdr->p_memsz;
1380 /* try to remember what range of addresses should be write
1381 * protected */
1382 if (!(phdr->p_flags & PF_W)) {
1383 unsigned _end;
1384
1385 if (phdr->p_vaddr < si->wrprotect_start)
1386 si->wrprotect_start = phdr->p_vaddr;
1387 _end = (((phdr->p_vaddr + phdr->p_memsz + PAGE_SIZE - 1) &
1388 (~PAGE_MASK)));
1389 if (_end > si->wrprotect_end)
1390 si->wrprotect_end = _end;
1391 }
1392 } else if (phdr->p_type == PT_DYNAMIC) {
1393 if (si->dynamic != (unsigned *)-1) {
1394 ERROR("%5d multiple PT_DYNAMIC segments found in '%s'. "
1395 "Segment at 0x%08x, previously one found at 0x%08x\n",
1396 pid, si->name, si->base + phdr->p_vaddr,
1397 (unsigned)si->dynamic);
1398 goto fail;
1399 }
1400 DEBUG_DUMP_PHDR(phdr, "PT_DYNAMIC", pid);
1401 si->dynamic = (unsigned *) (si->base + phdr->p_vaddr);
1402 }
1403 }
1404 }
1405
1406 if (si->dynamic == (unsigned *)-1) {
1407 ERROR("%5d missing PT_DYNAMIC?!\n", pid);
1408 goto fail;
1409 }
1410
1411 DEBUG("%5d dynamic = %p\n", pid, si->dynamic);
1412
1413 /* extract useful information from dynamic section */
1414 for(d = si->dynamic; *d; d++){
1415 DEBUG("%5d d = %p, d[0] = 0x%08x d[1] = 0x%08x\n", pid, d, d[0], d[1]);
1416 switch(*d++){
1417 case DT_HASH:
1418 si->nbucket = ((unsigned *) (si->base + *d))[0];
1419 si->nchain = ((unsigned *) (si->base + *d))[1];
1420 si->bucket = (unsigned *) (si->base + *d + 8);
1421 si->chain = (unsigned *) (si->base + *d + 8 + si->nbucket * 4);
1422 break;
1423 case DT_STRTAB:
1424 si->strtab = (const char *) (si->base + *d);
1425 break;
1426 case DT_SYMTAB:
1427 si->symtab = (Elf32_Sym *) (si->base + *d);
1428 break;
1429 case DT_PLTREL:
1430 if(*d != DT_REL) {
1431 ERROR("DT_RELA not supported\n");
1432 goto fail;
1433 }
1434 break;
1435 case DT_JMPREL:
1436 si->plt_rel = (Elf32_Rel*) (si->base + *d);
1437 break;
1438 case DT_PLTRELSZ:
1439 si->plt_rel_count = *d / 8;
1440 break;
1441 case DT_REL:
1442 si->rel = (Elf32_Rel*) (si->base + *d);
1443 break;
1444 case DT_RELSZ:
1445 si->rel_count = *d / 8;
1446 break;
1447 case DT_PLTGOT:
1448 /* Save this in case we decide to do lazy binding. We don't yet. */
1449 si->plt_got = (unsigned *)(si->base + *d);
1450 break;
1451 case DT_DEBUG:
1452 // Set the DT_DEBUG entry to the addres of _r_debug for GDB
1453 *d = (int) &_r_debug;
1454 break;
1455 case DT_RELA:
1456 ERROR("%5d DT_RELA not supported\n", pid);
1457 goto fail;
1458 case DT_INIT:
1459 si->init_func = (void (*)(void))(si->base + *d);
1460 DEBUG("%5d %s constructors (init func) found at %p\n",
1461 pid, si->name, si->init_func);
1462 break;
1463 case DT_FINI:
1464 si->fini_func = (void (*)(void))(si->base + *d);
1465 DEBUG("%5d %s destructors (fini func) found at %p\n",
1466 pid, si->name, si->fini_func);
1467 break;
1468 case DT_INIT_ARRAY:
1469 si->init_array = (unsigned *)(si->base + *d);
1470 DEBUG("%5d %s constructors (init_array) found at %p\n",
1471 pid, si->name, si->init_array);
1472 break;
1473 case DT_INIT_ARRAYSZ:
1474 si->init_array_count = ((unsigned)*d) / sizeof(Elf32_Addr);
1475 break;
1476 case DT_FINI_ARRAY:
1477 si->fini_array = (unsigned *)(si->base + *d);
1478 DEBUG("%5d %s destructors (fini_array) found at %p\n",
1479 pid, si->name, si->fini_array);
1480 break;
1481 case DT_FINI_ARRAYSZ:
1482 si->fini_array_count = ((unsigned)*d) / sizeof(Elf32_Addr);
1483 break;
1484 case DT_PREINIT_ARRAY:
1485 si->preinit_array = (unsigned *)(si->base + *d);
1486 DEBUG("%5d %s constructors (preinit_array) found at %p\n",
1487 pid, si->name, si->preinit_array);
1488 break;
1489 case DT_PREINIT_ARRAYSZ:
1490 si->preinit_array_count = ((unsigned)*d) / sizeof(Elf32_Addr);
1491 break;
1492 case DT_TEXTREL:
1493 /* TODO: make use of this. */
1494 /* this means that we might have to write into where the text
1495 * segment was loaded during relocation... Do something with
1496 * it.
1497 */
1498 DEBUG("%5d Text segment should be writable during relocation.\n",
1499 pid);
1500 break;
1501 }
1502 }
1503
1504 DEBUG("%5d si->base = 0x%08x, si->strtab = %p, si->symtab = %p\n",
1505 pid, si->base, si->strtab, si->symtab);
1506
1507 if((si->strtab == 0) || (si->symtab == 0)) {
1508 ERROR("%5d missing essential tables\n", pid);
1509 goto fail;
1510 }
1511
1512 for(d = si->dynamic; *d; d += 2) {
1513 if(d[0] == DT_NEEDED){
1514 DEBUG("%5d %s needs %s\n", pid, si->name, si->strtab + d[1]);
1515 soinfo *lsi = find_library(si->strtab + d[1]);
1516 if(lsi == 0) {
1517 ERROR("%5d could not load '%s'\n", pid, si->strtab + d[1]);
1518 goto fail;
1519 }
1520 lsi->refcount++;
1521 }
1522 }
1523
1524 if(si->plt_rel) {
1525 DEBUG("[ %5d relocating %s plt ]\n", pid, si->name );
1526 if(reloc_library(si, si->plt_rel, si->plt_rel_count))
1527 goto fail;
1528 }
1529 if(si->rel) {
1530 DEBUG("[ %5d relocating %s ]\n", pid, si->name );
1531 if(reloc_library(si, si->rel, si->rel_count))
1532 goto fail;
1533 }
1534
1535 si->flags |= FLAG_LINKED;
1536 DEBUG("[ %5d finished linking %s ]\n", pid, si->name);
1537
1538#if 0
1539 /* This is the way that the old dynamic linker did protection of
1540 * non-writable areas. It would scan section headers and find where
1541 * .text ended (rather where .data/.bss began) and assume that this is
1542 * the upper range of the non-writable area. This is too coarse,
1543 * and is kept here for reference until we fully move away from single
1544 * segment elf objects. See the code in get_wr_offset (also #if'd 0)
1545 * that made this possible.
1546 */
1547 if(wr_offset < 0xffffffff){
1548 mprotect((void*) si->base, wr_offset, PROT_READ | PROT_EXEC);
1549 }
1550#else
1551 /* TODO: Verify that this does the right thing in all cases, as it
1552 * presently probably does not. It is possible that an ELF image will
1553 * come with multiple read-only segments. What we ought to do is scan
1554 * the program headers again and mprotect all the read-only segments.
1555 * To prevent re-scanning the program header, we would have to build a
1556 * list of loadable segments in si, and then scan that instead. */
1557 if (si->wrprotect_start != 0xffffffff && si->wrprotect_end != 0) {
1558 mprotect((void *)si->wrprotect_start,
1559 si->wrprotect_end - si->wrprotect_start,
1560 PROT_READ | PROT_EXEC);
1561 }
1562#endif
1563
1564 /* If this is a SETUID programme, dup /dev/null to openned stdin,
1565 stdout and stderr to close a security hole described in:
1566
1567 ftp://ftp.freebsd.org/pub/FreeBSD/CERT/advisories/FreeBSD-SA-02:23.stdio.asc
1568
1569 */
1570 if (getuid() != geteuid())
1571 nullify_closed_stdio ();
1572 call_constructors(si);
1573 notify_gdb_of_load(si);
1574 return 0;
1575
1576fail:
1577 ERROR("failed to link %s\n", si->name);
1578 si->flags |= FLAG_ERROR;
1579 return -1;
1580}
1581
1582int main(int argc, char **argv)
1583{
1584 return 0;
1585}
1586
1587#define ANDROID_TLS_SLOTS BIONIC_TLS_SLOTS
1588
1589static void * __tls_area[ANDROID_TLS_SLOTS];
1590
1591unsigned __linker_init(unsigned **elfdata)
1592{
1593 static soinfo linker_soinfo;
1594
1595 int argc = (int) *elfdata;
1596 char **argv = (char**) (elfdata + 1);
1597 unsigned *vecs = (unsigned*) (argv + argc + 1);
1598 soinfo *si;
1599 struct link_map * map;
1600
1601 pid = getpid();
1602
1603#if TIMING
1604 struct timeval t0, t1;
1605 gettimeofday(&t0, 0);
1606#endif
1607
1608 __set_tls(__tls_area);
1609 ((unsigned *)__get_tls())[TLS_SLOT_THREAD_ID] = gettid();
1610
1611 debugger_init();
1612
1613 /* skip past the environment */
1614 while(vecs[0] != 0) {
1615 if(!strncmp((char*) vecs[0], "DEBUG=", 6)) {
1616 debug_verbosity = atoi(((char*) vecs[0]) + 6);
1617 }
1618 vecs++;
1619 }
1620 vecs++;
1621
1622 INFO("[ android linker & debugger ]\n");
1623 DEBUG("%5d elfdata @ 0x%08x\n", pid, (unsigned)elfdata);
1624
1625 si = alloc_info(argv[0]);
1626 if(si == 0) {
1627 exit(-1);
1628 }
1629
1630 /* bootstrap the link map, the main exe always needs to be first */
1631 si->flags |= FLAG_EXE;
1632 map = &(si->linkmap);
1633
1634 map->l_addr = 0;
1635 map->l_name = argv[0];
1636 map->l_prev = NULL;
1637 map->l_next = NULL;
1638
1639 _r_debug.r_map = map;
1640 r_debug_tail = map;
1641
1642 /* gdb expects the linker to be in the debug shared object list,
1643 * and we need to make sure that the reported load address is zero.
1644 * Without this, gdb gets the wrong idea of where rtld_db_dlactivity()
1645 * is. Don't use alloc_info(), because the linker shouldn't
1646 * be on the soinfo list.
1647 */
1648 strcpy((char*) linker_soinfo.name, "/system/bin/linker");
1649 linker_soinfo.flags = 0;
1650 linker_soinfo.base = 0; // This is the important part; must be zero.
1651 insert_soinfo_into_debug_map(&linker_soinfo);
1652
1653 /* extract information passed from the kernel */
1654 while(vecs[0] != 0){
1655 switch(vecs[0]){
1656 case AT_PHDR:
1657 si->phdr = (Elf32_Phdr*) vecs[1];
1658 break;
1659 case AT_PHNUM:
1660 si->phnum = (int) vecs[1];
1661 break;
1662 case AT_ENTRY:
1663 si->entry = vecs[1];
1664 break;
1665 }
1666 vecs += 2;
1667 }
1668
1669 si->base = 0;
1670 si->dynamic = (unsigned *)-1;
1671 si->wrprotect_start = 0xffffffff;
1672 si->wrprotect_end = 0;
1673
1674 if(link_image(si, 0)){
1675 ERROR("CANNOT LINK EXECUTABLE '%s'\n", argv[0]);
1676 exit(-1);
1677 }
1678
1679#if TIMING
1680 gettimeofday(&t1,NULL);
1681 PRINT("LINKER TIME: %s: %d microseconds\n", argv[0], (int) (
1682 (((long long)t1.tv_sec * 1000000LL) + (long long)t1.tv_usec) -
1683 (((long long)t0.tv_sec * 1000000LL) + (long long)t0.tv_usec)
1684 ));
1685#endif
1686#if STATS
1687 PRINT("RELO STATS: %s: %d abs, %d rel, %d copy, %d symbol\n", argv[0],
1688 linker_stats.reloc[RELOC_ABSOLUTE],
1689 linker_stats.reloc[RELOC_RELATIVE],
1690 linker_stats.reloc[RELOC_COPY],
1691 linker_stats.reloc[RELOC_SYMBOL]);
1692#endif
1693#if COUNT_PAGES
1694 {
1695 unsigned n;
1696 unsigned i;
1697 unsigned count = 0;
1698 for(n = 0; n < 4096; n++){
1699 if(bitmask[n]){
1700 unsigned x = bitmask[n];
1701 for(i = 0; i < 8; i++){
1702 if(x & 1) count++;
1703 x >>= 1;
1704 }
1705 }
1706 }
1707 PRINT("PAGES MODIFIED: %s: %d (%dKB)\n", argv[0], count, count * 4);
1708 }
1709#endif
1710
1711#if TIMING || STATS || COUNT_PAGES
1712 fflush(stdout);
1713#endif
1714
1715 TRACE("[ %5d Ready to execute '%s' @ 0x%08x ]\n", pid, si->name,
1716 si->entry);
1717 return si->entry;
1718}