1 /*
2 * Copyright (C) 2008, 2009 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
29 #include <dlfcn.h>
30 #include <errno.h>
31 #include <fcntl.h>
32 #include <inttypes.h>
33 #include <pthread.h>
34 #include <stdio.h>
35 #include <stdlib.h>
36 #include <string.h>
37 #include <sys/atomics.h>
38 #include <sys/mman.h>
39 #include <sys/stat.h>
40 #include <unistd.h>
42 // Private C library headers.
43 #include "private/bionic_tls.h"
44 #include "private/KernelArgumentBlock.h"
45 #include "private/ScopedPthreadMutexLocker.h"
47 #include "linker.h"
48 #include "linker_debug.h"
49 #include "linker_environ.h"
50 #include "linker_phdr.h"
52 /* >>> IMPORTANT NOTE - READ ME BEFORE MODIFYING <<<
53 *
54 * Do NOT use malloc() and friends or pthread_*() code here.
55 * Don't use printf() either; it's caused mysterious memory
56 * corruption in the past.
57 * The linker runs before we bring up libc and it's easiest
58 * to make sure it does not depend on any complex libc features
59 *
60 * open issues / todo:
61 *
62 * - are we doing everything we should for ARM_COPY relocations?
63 * - cleaner error reporting
64 * - after linking, set as much stuff as possible to READONLY
65 * and NOEXEC
66 */
68 static bool soinfo_link_image(soinfo* si);
69 static ElfW(Addr) get_elf_exec_load_bias(const ElfW(Ehdr)* elf);
71 // We can't use malloc(3) in the dynamic linker. We use a linked list of anonymous
72 // maps, each a single page in size. The pages are broken up into as many struct soinfo
73 // objects as will fit, and they're all threaded together on a free list.
74 #define SOINFO_PER_POOL ((PAGE_SIZE - sizeof(soinfo_pool_t*)) / sizeof(soinfo))
75 struct soinfo_pool_t {
76 soinfo_pool_t* next;
77 soinfo info[SOINFO_PER_POOL];
78 };
79 static struct soinfo_pool_t* gSoInfoPools = NULL;
80 static soinfo* gSoInfoFreeList = NULL;
82 static soinfo* solist = &libdl_info;
83 static soinfo* sonext = &libdl_info;
84 static soinfo* somain; /* main process, always the one after libdl_info */
86 static const char* const gDefaultLdPaths[] = {
87 #if defined(__LP64__)
88 "/vendor/lib64",
89 "/system/lib64",
90 #else
91 "/vendor/lib",
92 "/system/lib",
93 #endif
94 NULL
95 };
97 #define LDPATH_BUFSIZE (LDPATH_MAX*64)
98 #define LDPATH_MAX 8
100 #define LDPRELOAD_BUFSIZE (LDPRELOAD_MAX*64)
101 #define LDPRELOAD_MAX 8
103 static char gLdPathsBuffer[LDPATH_BUFSIZE];
104 static const char* gLdPaths[LDPATH_MAX + 1];
106 static char gLdPreloadsBuffer[LDPRELOAD_BUFSIZE];
107 static const char* gLdPreloadNames[LDPRELOAD_MAX + 1];
109 static soinfo* gLdPreloads[LDPRELOAD_MAX + 1];
111 __LIBC_HIDDEN__ int gLdDebugVerbosity;
113 __LIBC_HIDDEN__ abort_msg_t* gAbortMessage = NULL; // For debuggerd.
115 enum RelocationKind {
116 kRelocAbsolute = 0,
117 kRelocRelative,
118 kRelocCopy,
119 kRelocSymbol,
120 kRelocMax
121 };
123 #if STATS
124 struct linker_stats_t {
125 int count[kRelocMax];
126 };
128 static linker_stats_t linker_stats;
130 static void count_relocation(RelocationKind kind) {
131 ++linker_stats.count[kind];
132 }
133 #else
134 static void count_relocation(RelocationKind) {
135 }
136 #endif
138 #if COUNT_PAGES
139 static unsigned bitmask[4096];
140 #if defined(__LP64__)
141 #define MARK(offset) \
142 do { \
143 if ((((offset) >> 12) >> 5) < 4096) \
144 bitmask[((offset) >> 12) >> 5] |= (1 << (((offset) >> 12) & 31)); \
145 } while (0)
146 #else
147 #define MARK(offset) \
148 do { \
149 bitmask[((offset) >> 12) >> 3] |= (1 << (((offset) >> 12) & 7)); \
150 } while (0)
151 #endif
152 #else
153 #define MARK(x) do {} while (0)
154 #endif
156 // You shouldn't try to call memory-allocating functions in the dynamic linker.
157 // Guard against the most obvious ones.
158 #define DISALLOW_ALLOCATION(return_type, name, ...) \
159 return_type name __VA_ARGS__ \
160 { \
161 const char* msg = "ERROR: " #name " called from the dynamic linker!\n"; \
162 __libc_format_log(ANDROID_LOG_FATAL, "linker", "%s", msg); \
163 write(2, msg, strlen(msg)); \
164 abort(); \
165 }
166 #define UNUSED __attribute__((unused))
167 DISALLOW_ALLOCATION(void*, malloc, (size_t u UNUSED));
168 DISALLOW_ALLOCATION(void, free, (void* u UNUSED));
169 DISALLOW_ALLOCATION(void*, realloc, (void* u1 UNUSED, size_t u2 UNUSED));
170 DISALLOW_ALLOCATION(void*, calloc, (size_t u1 UNUSED, size_t u2 UNUSED));
172 static char tmp_err_buf[768];
173 static char __linker_dl_err_buf[768];
175 char* linker_get_error_buffer() {
176 return &__linker_dl_err_buf[0];
177 }
179 size_t linker_get_error_buffer_size() {
180 return sizeof(__linker_dl_err_buf);
181 }
183 /*
184 * This function is an empty stub where GDB locates a breakpoint to get notified
185 * about linker activity.
186 */
187 extern "C" void __attribute__((noinline)) __attribute__((visibility("default"))) rtld_db_dlactivity();
189 static r_debug _r_debug = {1, NULL, reinterpret_cast<uintptr_t>(&rtld_db_dlactivity), r_debug::RT_CONSISTENT, 0};
190 static link_map* r_debug_tail = 0;
192 static pthread_mutex_t gDebugMutex = PTHREAD_MUTEX_INITIALIZER;
194 static void insert_soinfo_into_debug_map(soinfo* info) {
195 // Copy the necessary fields into the debug structure.
196 link_map* map = &(info->link_map_head);
197 map->l_addr = info->load_bias;
198 map->l_name = reinterpret_cast<char*>(info->name);
199 map->l_ld = info->dynamic;
201 /* Stick the new library at the end of the list.
202 * gdb tends to care more about libc than it does
203 * about leaf libraries, and ordering it this way
204 * reduces the back-and-forth over the wire.
205 */
206 if (r_debug_tail) {
207 r_debug_tail->l_next = map;
208 map->l_prev = r_debug_tail;
209 map->l_next = 0;
210 } else {
211 _r_debug.r_map = map;
212 map->l_prev = 0;
213 map->l_next = 0;
214 }
215 r_debug_tail = map;
216 }
218 static void remove_soinfo_from_debug_map(soinfo* info) {
219 link_map* map = &(info->link_map_head);
221 if (r_debug_tail == map) {
222 r_debug_tail = map->l_prev;
223 }
225 if (map->l_prev) {
226 map->l_prev->l_next = map->l_next;
227 }
228 if (map->l_next) {
229 map->l_next->l_prev = map->l_prev;
230 }
231 }
233 static void notify_gdb_of_load(soinfo* info) {
234 if (info->flags & FLAG_EXE) {
235 // GDB already knows about the main executable
236 return;
237 }
239 ScopedPthreadMutexLocker locker(&gDebugMutex);
241 _r_debug.r_state = r_debug::RT_ADD;
242 rtld_db_dlactivity();
244 insert_soinfo_into_debug_map(info);
246 _r_debug.r_state = r_debug::RT_CONSISTENT;
247 rtld_db_dlactivity();
248 }
250 static void notify_gdb_of_unload(soinfo* info) {
251 if (info->flags & FLAG_EXE) {
252 // GDB already knows about the main executable
253 return;
254 }
256 ScopedPthreadMutexLocker locker(&gDebugMutex);
258 _r_debug.r_state = r_debug::RT_DELETE;
259 rtld_db_dlactivity();
261 remove_soinfo_from_debug_map(info);
263 _r_debug.r_state = r_debug::RT_CONSISTENT;
264 rtld_db_dlactivity();
265 }
267 void notify_gdb_of_libraries() {
268 _r_debug.r_state = r_debug::RT_ADD;
269 rtld_db_dlactivity();
270 _r_debug.r_state = r_debug::RT_CONSISTENT;
271 rtld_db_dlactivity();
272 }
274 static bool ensure_free_list_non_empty() {
275 if (gSoInfoFreeList != NULL) {
276 return true;
277 }
279 // Allocate a new pool.
280 soinfo_pool_t* pool = reinterpret_cast<soinfo_pool_t*>(mmap(NULL, sizeof(*pool),
281 PROT_READ|PROT_WRITE,
282 MAP_PRIVATE|MAP_ANONYMOUS, 0, 0));
283 if (pool == MAP_FAILED) {
284 return false;
285 }
287 // Add the pool to our list of pools.
288 pool->next = gSoInfoPools;
289 gSoInfoPools = pool;
291 // Chain the entries in the new pool onto the free list.
292 gSoInfoFreeList = &pool->info[0];
293 soinfo* next = NULL;
294 for (int i = SOINFO_PER_POOL - 1; i >= 0; --i) {
295 pool->info[i].next = next;
296 next = &pool->info[i];
297 }
299 return true;
300 }
302 static void set_soinfo_pool_protection(int protection) {
303 for (soinfo_pool_t* p = gSoInfoPools; p != NULL; p = p->next) {
304 if (mprotect(p, sizeof(*p), protection) == -1) {
305 abort(); // Can't happen.
306 }
307 }
308 }
310 static soinfo* soinfo_alloc(const char* name) {
311 if (strlen(name) >= SOINFO_NAME_LEN) {
312 DL_ERR("library name \"%s\" too long", name);
313 return NULL;
314 }
316 if (!ensure_free_list_non_empty()) {
317 DL_ERR("out of memory when loading \"%s\"", name);
318 return NULL;
319 }
321 // Take the head element off the free list.
322 soinfo* si = gSoInfoFreeList;
323 gSoInfoFreeList = gSoInfoFreeList->next;
325 // Initialize the new element.
326 memset(si, 0, sizeof(soinfo));
327 strlcpy(si->name, name, sizeof(si->name));
328 sonext->next = si;
329 sonext = si;
331 TRACE("name %s: allocated soinfo @ %p", name, si);
332 return si;
333 }
335 static void soinfo_free(soinfo* si) {
336 if (si == NULL) {
337 return;
338 }
340 soinfo *prev = NULL, *trav;
342 TRACE("name %s: freeing soinfo @ %p", si->name, si);
344 for (trav = solist; trav != NULL; trav = trav->next) {
345 if (trav == si)
346 break;
347 prev = trav;
348 }
349 if (trav == NULL) {
350 /* si was not in solist */
351 DL_ERR("name \"%s\" is not in solist!", si->name);
352 return;
353 }
355 /* prev will never be NULL, because the first entry in solist is
356 always the static libdl_info.
357 */
358 prev->next = si->next;
359 if (si == sonext) {
360 sonext = prev;
361 }
362 si->next = gSoInfoFreeList;
363 gSoInfoFreeList = si;
364 }
367 static void parse_path(const char* path, const char* delimiters,
368 const char** array, char* buf, size_t buf_size, size_t max_count) {
369 if (path == NULL) {
370 return;
371 }
373 size_t len = strlcpy(buf, path, buf_size);
375 size_t i = 0;
376 char* buf_p = buf;
377 while (i < max_count && (array[i] = strsep(&buf_p, delimiters))) {
378 if (*array[i] != '\0') {
379 ++i;
380 }
381 }
383 // Forget the last path if we had to truncate; this occurs if the 2nd to
384 // last char isn't '\0' (i.e. wasn't originally a delimiter).
385 if (i > 0 && len >= buf_size && buf[buf_size - 2] != '\0') {
386 array[i - 1] = NULL;
387 } else {
388 array[i] = NULL;
389 }
390 }
392 static void parse_LD_LIBRARY_PATH(const char* path) {
393 parse_path(path, ":", gLdPaths,
394 gLdPathsBuffer, sizeof(gLdPathsBuffer), LDPATH_MAX);
395 }
397 static void parse_LD_PRELOAD(const char* path) {
398 // We have historically supported ':' as well as ' ' in LD_PRELOAD.
399 parse_path(path, " :", gLdPreloadNames,
400 gLdPreloadsBuffer, sizeof(gLdPreloadsBuffer), LDPRELOAD_MAX);
401 }
403 #if defined(__arm__)
405 /* For a given PC, find the .so that it belongs to.
406 * Returns the base address of the .ARM.exidx section
407 * for that .so, and the number of 8-byte entries
408 * in that section (via *pcount).
409 *
410 * Intended to be called by libc's __gnu_Unwind_Find_exidx().
411 *
412 * This function is exposed via dlfcn.cpp and libdl.so.
413 */
414 _Unwind_Ptr dl_unwind_find_exidx(_Unwind_Ptr pc, int* pcount) {
415 unsigned addr = (unsigned)pc;
417 for (soinfo* si = solist; si != 0; si = si->next) {
418 if ((addr >= si->base) && (addr < (si->base + si->size))) {
419 *pcount = si->ARM_exidx_count;
420 return (_Unwind_Ptr)si->ARM_exidx;
421 }
422 }
423 *pcount = 0;
424 return NULL;
425 }
427 #endif
429 /* Here, we only have to provide a callback to iterate across all the
430 * loaded libraries. gcc_eh does the rest. */
431 int dl_iterate_phdr(int (*cb)(dl_phdr_info* info, size_t size, void* data), void* data) {
432 int rv = 0;
433 for (soinfo* si = solist; si != NULL; si = si->next) {
434 dl_phdr_info dl_info;
435 dl_info.dlpi_addr = si->link_map_head.l_addr;
436 dl_info.dlpi_name = si->link_map_head.l_name;
437 dl_info.dlpi_phdr = si->phdr;
438 dl_info.dlpi_phnum = si->phnum;
439 rv = cb(&dl_info, sizeof(dl_phdr_info), data);
440 if (rv != 0) {
441 break;
442 }
443 }
444 return rv;
445 }
447 static ElfW(Sym)* soinfo_elf_lookup(soinfo* si, unsigned hash, const char* name) {
448 ElfW(Sym)* symtab = si->symtab;
449 const char* strtab = si->strtab;
451 TRACE_TYPE(LOOKUP, "SEARCH %s in %s@%p %x %zd",
452 name, si->name, reinterpret_cast<void*>(si->base), hash, hash % si->nbucket);
454 for (unsigned n = si->bucket[hash % si->nbucket]; n != 0; n = si->chain[n]) {
455 ElfW(Sym)* s = symtab + n;
456 if (strcmp(strtab + s->st_name, name)) continue;
458 /* only concern ourselves with global and weak symbol definitions */
459 switch (ELF_ST_BIND(s->st_info)) {
460 case STB_GLOBAL:
461 case STB_WEAK:
462 if (s->st_shndx == SHN_UNDEF) {
463 continue;
464 }
466 TRACE_TYPE(LOOKUP, "FOUND %s in %s (%p) %zd",
467 name, si->name, reinterpret_cast<void*>(s->st_value),
468 static_cast<size_t>(s->st_size));
469 return s;
470 }
471 }
473 return NULL;
474 }
476 static unsigned elfhash(const char* _name) {
477 const unsigned char* name = reinterpret_cast<const unsigned char*>(_name);
478 unsigned h = 0, g;
480 while (*name) {
481 h = (h << 4) + *name++;
482 g = h & 0xf0000000;
483 h ^= g;
484 h ^= g >> 24;
485 }
486 return h;
487 }
489 static ElfW(Sym)* soinfo_do_lookup(soinfo* si, const char* name, soinfo** lsi, soinfo* needed[]) {
490 unsigned elf_hash = elfhash(name);
491 ElfW(Sym)* s = NULL;
493 if (si != NULL && somain != NULL) {
494 /*
495 * Local scope is executable scope. Just start looking into it right away
496 * for the shortcut.
497 */
499 if (si == somain) {
500 s = soinfo_elf_lookup(si, elf_hash, name);
501 if (s != NULL) {
502 *lsi = si;
503 goto done;
504 }
505 } else {
506 /* Order of symbol lookup is controlled by DT_SYMBOLIC flag */
508 /*
509 * If this object was built with symbolic relocations disabled, the
510 * first place to look to resolve external references is the main
511 * executable.
512 */
514 if (!si->has_DT_SYMBOLIC) {
515 DEBUG("%s: looking up %s in executable %s",
516 si->name, name, somain->name);
517 s = soinfo_elf_lookup(somain, elf_hash, name);
518 if (s != NULL) {
519 *lsi = somain;
520 goto done;
521 }
522 }
524 /* Look for symbols in the local scope (the object who is
525 * searching). This happens with C++ templates on x86 for some
526 * reason.
527 *
528 * Notes on weak symbols:
529 * The ELF specs are ambiguous about treatment of weak definitions in
530 * dynamic linking. Some systems return the first definition found
531 * and some the first non-weak definition. This is system dependent.
532 * Here we return the first definition found for simplicity. */
534 s = soinfo_elf_lookup(si, elf_hash, name);
535 if (s != NULL) {
536 *lsi = si;
537 goto done;
538 }
540 /*
541 * If this object was built with -Bsymbolic and symbol is not found
542 * in the local scope, try to find the symbol in the main executable.
543 */
545 if (si->has_DT_SYMBOLIC) {
546 DEBUG("%s: looking up %s in executable %s after local scope",
547 si->name, name, somain->name);
548 s = soinfo_elf_lookup(somain, elf_hash, name);
549 if (s != NULL) {
550 *lsi = somain;
551 goto done;
552 }
553 }
554 }
555 }
557 /* Next, look for it in the preloads list */
558 for (int i = 0; gLdPreloads[i] != NULL; i++) {
559 s = soinfo_elf_lookup(gLdPreloads[i], elf_hash, name);
560 if (s != NULL) {
561 *lsi = gLdPreloads[i];
562 goto done;
563 }
564 }
566 for (int i = 0; needed[i] != NULL; i++) {
567 DEBUG("%s: looking up %s in %s",
568 si->name, name, needed[i]->name);
569 s = soinfo_elf_lookup(needed[i], elf_hash, name);
570 if (s != NULL) {
571 *lsi = needed[i];
572 goto done;
573 }
574 }
576 done:
577 if (s != NULL) {
578 TRACE_TYPE(LOOKUP, "si %s sym %s s->st_value = %p, "
579 "found in %s, base = %p, load bias = %p",
580 si->name, name, reinterpret_cast<void*>(s->st_value),
581 (*lsi)->name, reinterpret_cast<void*>((*lsi)->base),
582 reinterpret_cast<void*>((*lsi)->load_bias));
583 return s;
584 }
586 return NULL;
587 }
589 /* This is used by dlsym(3). It performs symbol lookup only within the
590 specified soinfo object and not in any of its dependencies.
592 TODO: Only looking in the specified soinfo seems wrong. dlsym(3) says
593 that it should do a breadth first search through the dependency
594 tree. This agrees with the ELF spec (aka System V Application
595 Binary Interface) where in Chapter 5 it discuss resolving "Shared
596 Object Dependencies" in breadth first search order.
597 */
598 ElfW(Sym)* dlsym_handle_lookup(soinfo* si, const char* name) {
599 return soinfo_elf_lookup(si, elfhash(name), name);
600 }
602 /* This is used by dlsym(3) to performs a global symbol lookup. If the
603 start value is null (for RTLD_DEFAULT), the search starts at the
604 beginning of the global solist. Otherwise the search starts at the
605 specified soinfo (for RTLD_NEXT).
606 */
607 ElfW(Sym)* dlsym_linear_lookup(const char* name, soinfo** found, soinfo* start) {
608 unsigned elf_hash = elfhash(name);
610 if (start == NULL) {
611 start = solist;
612 }
614 ElfW(Sym)* s = NULL;
615 for (soinfo* si = start; (s == NULL) && (si != NULL); si = si->next) {
616 s = soinfo_elf_lookup(si, elf_hash, name);
617 if (s != NULL) {
618 *found = si;
619 break;
620 }
621 }
623 if (s != NULL) {
624 TRACE_TYPE(LOOKUP, "%s s->st_value = %p, found->base = %p",
625 name, reinterpret_cast<void*>(s->st_value), reinterpret_cast<void*>((*found)->base));
626 }
628 return s;
629 }
631 soinfo* find_containing_library(const void* p) {
632 ElfW(Addr) address = reinterpret_cast<ElfW(Addr)>(p);
633 for (soinfo* si = solist; si != NULL; si = si->next) {
634 if (address >= si->base && address - si->base < si->size) {
635 return si;
636 }
637 }
638 return NULL;
639 }
641 ElfW(Sym)* dladdr_find_symbol(soinfo* si, const void* addr) {
642 ElfW(Addr) soaddr = reinterpret_cast<ElfW(Addr)>(addr) - si->base;
644 // Search the library's symbol table for any defined symbol which
645 // contains this address.
646 for (size_t i = 0; i < si->nchain; ++i) {
647 ElfW(Sym)* sym = &si->symtab[i];
648 if (sym->st_shndx != SHN_UNDEF &&
649 soaddr >= sym->st_value &&
650 soaddr < sym->st_value + sym->st_size) {
651 return sym;
652 }
653 }
655 return NULL;
656 }
658 static int open_library_on_path(const char* name, const char* const paths[]) {
659 char buf[512];
660 for (size_t i = 0; paths[i] != NULL; ++i) {
661 int n = __libc_format_buffer(buf, sizeof(buf), "%s/%s", paths[i], name);
662 if (n < 0 || n >= static_cast<int>(sizeof(buf))) {
663 PRINT("Warning: ignoring very long library path: %s/%s", paths[i], name);
664 continue;
665 }
666 int fd = TEMP_FAILURE_RETRY(open(buf, O_RDONLY | O_CLOEXEC));
667 if (fd != -1) {
668 return fd;
669 }
670 }
671 return -1;
672 }
674 static int open_library(const char* name) {
675 TRACE("[ opening %s ]", name);
677 // If the name contains a slash, we should attempt to open it directly and not search the paths.
678 if (strchr(name, '/') != NULL) {
679 int fd = TEMP_FAILURE_RETRY(open(name, O_RDONLY | O_CLOEXEC));
680 if (fd != -1) {
681 return fd;
682 }
683 // ...but nvidia binary blobs (at least) rely on this behavior, so fall through for now.
684 }
686 // Otherwise we try LD_LIBRARY_PATH first, and fall back to the built-in well known paths.
687 int fd = open_library_on_path(name, gLdPaths);
688 if (fd == -1) {
689 fd = open_library_on_path(name, gDefaultLdPaths);
690 }
691 return fd;
692 }
694 static soinfo* load_library(const char* name) {
695 // Open the file.
696 int fd = open_library(name);
697 if (fd == -1) {
698 DL_ERR("library \"%s\" not found", name);
699 return NULL;
700 }
702 // Read the ELF header and load the segments.
703 ElfReader elf_reader(name, fd);
704 if (!elf_reader.Load()) {
705 return NULL;
706 }
708 const char* bname = strrchr(name, '/');
709 soinfo* si = soinfo_alloc(bname ? bname + 1 : name);
710 if (si == NULL) {
711 return NULL;
712 }
713 si->base = elf_reader.load_start();
714 si->size = elf_reader.load_size();
715 si->load_bias = elf_reader.load_bias();
716 si->flags = 0;
717 si->entry = 0;
718 si->dynamic = NULL;
719 si->phnum = elf_reader.phdr_count();
720 si->phdr = elf_reader.loaded_phdr();
721 return si;
722 }
724 static soinfo *find_loaded_library(const char* name) {
725 // TODO: don't use basename only for determining libraries
726 // http://code.google.com/p/android/issues/detail?id=6670
728 const char* bname = strrchr(name, '/');
729 bname = bname ? bname + 1 : name;
731 for (soinfo* si = solist; si != NULL; si = si->next) {
732 if (!strcmp(bname, si->name)) {
733 return si;
734 }
735 }
736 return NULL;
737 }
739 static soinfo* find_library_internal(const char* name) {
740 if (name == NULL) {
741 return somain;
742 }
744 soinfo* si = find_loaded_library(name);
745 if (si != NULL) {
746 if (si->flags & FLAG_LINKED) {
747 return si;
748 }
749 DL_ERR("OOPS: recursive link to \"%s\"", si->name);
750 return NULL;
751 }
753 TRACE("[ '%s' has not been loaded yet. Locating...]", name);
754 si = load_library(name);
755 if (si == NULL) {
756 return NULL;
757 }
759 // At this point we know that whatever is loaded @ base is a valid ELF
760 // shared library whose segments are properly mapped in.
761 TRACE("[ find_library_internal base=%p size=%zu name='%s' ]",
762 reinterpret_cast<void*>(si->base), si->size, si->name);
764 if (!soinfo_link_image(si)) {
765 munmap(reinterpret_cast<void*>(si->base), si->size);
766 soinfo_free(si);
767 return NULL;
768 }
770 return si;
771 }
773 static soinfo* find_library(const char* name) {
774 soinfo* si = find_library_internal(name);
775 if (si != NULL) {
776 si->ref_count++;
777 }
778 return si;
779 }
781 static int soinfo_unload(soinfo* si) {
782 if (si->ref_count == 1) {
783 TRACE("unloading '%s'", si->name);
784 si->CallDestructors();
786 for (ElfW(Dyn)* d = si->dynamic; d->d_tag != DT_NULL; ++d) {
787 if (d->d_tag == DT_NEEDED) {
788 const char* library_name = si->strtab + d->d_un.d_val;
789 TRACE("%s needs to unload %s", si->name, library_name);
790 soinfo_unload(find_loaded_library(library_name));
791 }
792 }
794 munmap(reinterpret_cast<void*>(si->base), si->size);
795 notify_gdb_of_unload(si);
796 soinfo_free(si);
797 si->ref_count = 0;
798 } else {
799 si->ref_count--;
800 TRACE("not unloading '%s', decrementing ref_count to %zd", si->name, si->ref_count);
801 }
802 return 0;
803 }
805 void do_android_get_LD_LIBRARY_PATH(char* buffer, size_t buffer_size) {
806 snprintf(buffer, buffer_size, "%s:%s", gDefaultLdPaths[0], gDefaultLdPaths[1]);
807 }
809 void do_android_update_LD_LIBRARY_PATH(const char* ld_library_path) {
810 if (!get_AT_SECURE()) {
811 parse_LD_LIBRARY_PATH(ld_library_path);
812 }
813 }
815 soinfo* do_dlopen(const char* name, int flags) {
816 if ((flags & ~(RTLD_NOW|RTLD_LAZY|RTLD_LOCAL|RTLD_GLOBAL)) != 0) {
817 DL_ERR("invalid flags to dlopen: %x", flags);
818 return NULL;
819 }
820 set_soinfo_pool_protection(PROT_READ | PROT_WRITE);
821 soinfo* si = find_library(name);
822 if (si != NULL) {
823 si->CallConstructors();
824 }
825 set_soinfo_pool_protection(PROT_READ);
826 return si;
827 }
829 int do_dlclose(soinfo* si) {
830 set_soinfo_pool_protection(PROT_READ | PROT_WRITE);
831 int result = soinfo_unload(si);
832 set_soinfo_pool_protection(PROT_READ);
833 return result;
834 }
836 #if defined(USE_RELA)
837 static int soinfo_relocate(soinfo* si, ElfW(Rela)* rela, unsigned count, soinfo* needed[]) {
838 ElfW(Sym)* s;
839 soinfo* lsi;
841 for (size_t idx = 0; idx < count; ++idx, ++rela) {
842 unsigned type = ELFW(R_TYPE)(rela->r_info);
843 unsigned sym = ELFW(R_SYM)(rela->r_info);
844 ElfW(Addr) reloc = static_cast<ElfW(Addr)>(rela->r_offset + si->load_bias);
845 ElfW(Addr) sym_addr = 0;
846 const char* sym_name = NULL;
848 DEBUG("Processing '%s' relocation at index %zd", si->name, idx);
849 if (type == 0) { // R_*_NONE
850 continue;
851 }
852 if (sym != 0) {
853 sym_name = reinterpret_cast<const char*>(si->strtab + si->symtab[sym].st_name);
854 s = soinfo_do_lookup(si, sym_name, &lsi, needed);
855 if (s == NULL) {
856 // We only allow an undefined symbol if this is a weak reference...
857 s = &si->symtab[sym];
858 if (ELF_ST_BIND(s->st_info) != STB_WEAK) {
859 DL_ERR("cannot locate symbol \"%s\" referenced by \"%s\"...", sym_name, si->name);
860 return -1;
861 }
863 /* IHI0044C AAELF 4.5.1.1:
865 Libraries are not searched to resolve weak references.
866 It is not an error for a weak reference to remain unsatisfied.
868 During linking, the value of an undefined weak reference is:
869 - Zero if the relocation type is absolute
870 - The address of the place if the relocation is pc-relative
871 - The address of nominal base address if the relocation
872 type is base-relative.
873 */
875 switch (type) {
876 #if defined(__aarch64__)
877 case R_AARCH64_JUMP_SLOT:
878 case R_AARCH64_GLOB_DAT:
879 case R_AARCH64_ABS64:
880 case R_AARCH64_ABS32:
881 case R_AARCH64_ABS16:
882 case R_AARCH64_RELATIVE:
883 /*
884 * The sym_addr was initialized to be zero above, or the relocation
885 * code below does not care about value of sym_addr.
886 * No need to do anything.
887 */
888 break;
889 #elif defined(__x86_64__)
890 case R_X86_64_JUMP_SLOT:
891 case R_X86_64_GLOB_DAT:
892 case R_X86_64_32:
893 case R_X86_64_RELATIVE:
894 // No need to do anything.
895 break;
896 case R_X86_64_PC32:
897 sym_addr = reloc;
898 break;
899 #endif
900 default:
901 DL_ERR("unknown weak reloc type %d @ %p (%zu)", type, rela, idx);
902 return -1;
903 }
904 } else {
905 // We got a definition.
906 sym_addr = static_cast<ElfW(Addr)>(s->st_value + lsi->load_bias);
907 }
908 count_relocation(kRelocSymbol);
909 } else {
910 s = NULL;
911 }
913 switch (type) {
914 #if defined(__aarch64__)
915 case R_AARCH64_JUMP_SLOT:
916 count_relocation(kRelocAbsolute);
917 MARK(rela->r_offset);
918 TRACE_TYPE(RELO, "RELO JMP_SLOT %16llx <- %16llx %s\n",
919 reloc, (sym_addr + rela->r_addend), sym_name);
920 *reinterpret_cast<ElfW(Addr)*>(reloc) = (sym_addr + rela->r_addend);
921 break;
922 case R_AARCH64_GLOB_DAT:
923 count_relocation(kRelocAbsolute);
924 MARK(rela->r_offset);
925 TRACE_TYPE(RELO, "RELO GLOB_DAT %16llx <- %16llx %s\n",
926 reloc, (sym_addr + rela->r_addend), sym_name);
927 *reinterpret_cast<ElfW(Addr)*>(reloc) = (sym_addr + rela->r_addend);
928 break;
929 case R_AARCH64_ABS64:
930 count_relocation(kRelocAbsolute);
931 MARK(rela->r_offset);
932 TRACE_TYPE(RELO, "RELO ABS64 %16llx <- %16llx %s\n",
933 reloc, (sym_addr + rela->r_addend), sym_name);
934 *reinterpret_cast<ElfW(Addr)*>(reloc) += (sym_addr + rela->r_addend);
935 break;
936 case R_AARCH64_ABS32:
937 count_relocation(kRelocAbsolute);
938 MARK(rela->r_offset);
939 TRACE_TYPE(RELO, "RELO ABS32 %16llx <- %16llx %s\n",
940 reloc, (sym_addr + rela->r_addend), sym_name);
941 if ((static_cast<ElfW(Addr)>(INT32_MIN) <= (*reinterpret_cast<ElfW(Addr)*>(reloc) + (sym_addr + rela->r_addend))) &&
942 ((*reinterpret_cast<ElfW(Addr)*>(reloc) + (sym_addr + rela->r_addend)) <= static_cast<ElfW(Addr)>(UINT32_MAX))) {
943 *reinterpret_cast<ElfW(Addr)*>(reloc) += (sym_addr + rela->r_addend);
944 } else {
945 DL_ERR("0x%016llx out of range 0x%016llx to 0x%016llx",
946 (*reinterpret_cast<ElfW(Addr)*>(reloc) + (sym_addr + rela->r_addend)),
947 static_cast<ElfW(Addr)>(INT32_MIN),
948 static_cast<ElfW(Addr)>(UINT32_MAX));
949 return -1;
950 }
951 break;
952 case R_AARCH64_ABS16:
953 count_relocation(kRelocAbsolute);
954 MARK(rela->r_offset);
955 TRACE_TYPE(RELO, "RELO ABS16 %16llx <- %16llx %s\n",
956 reloc, (sym_addr + rela->r_addend), sym_name);
957 if ((static_cast<ElfW(Addr)>(INT16_MIN) <= (*reinterpret_cast<ElfW(Addr)*>(reloc) + (sym_addr + rela->r_addend))) &&
958 ((*reinterpret_cast<ElfW(Addr)*>(reloc) + (sym_addr + rela->r_addend)) <= static_cast<ElfW(Addr)>(UINT16_MAX))) {
959 *reinterpret_cast<ElfW(Addr)*>(reloc) += (sym_addr + rela->r_addend);
960 } else {
961 DL_ERR("0x%016llx out of range 0x%016llx to 0x%016llx",
962 (*reinterpret_cast<ElfW(Addr)*>(reloc) + (sym_addr + rela->r_addend)),
963 static_cast<ElfW(Addr)>(INT16_MIN),
964 static_cast<ElfW(Addr)>(UINT16_MAX));
965 return -1;
966 }
967 break;
968 case R_AARCH64_PREL64:
969 count_relocation(kRelocRelative);
970 MARK(rela->r_offset);
971 TRACE_TYPE(RELO, "RELO REL64 %16llx <- %16llx - %16llx %s\n",
972 reloc, (sym_addr + rela->r_addend), rela->r_offset, sym_name);
973 *reinterpret_cast<ElfW(Addr)*>(reloc) += (sym_addr + rela->r_addend) - rela->r_offset;
974 break;
975 case R_AARCH64_PREL32:
976 count_relocation(kRelocRelative);
977 MARK(rela->r_offset);
978 TRACE_TYPE(RELO, "RELO REL32 %16llx <- %16llx - %16llx %s\n",
979 reloc, (sym_addr + rela->r_addend), rela->r_offset, sym_name);
980 if ((static_cast<ElfW(Addr)>(INT32_MIN) <= (*reinterpret_cast<ElfW(Addr)*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset))) &&
981 ((*reinterpret_cast<ElfW(Addr)*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset)) <= static_cast<ElfW(Addr)>(UINT32_MAX))) {
982 *reinterpret_cast<ElfW(Addr)*>(reloc) += ((sym_addr + rela->r_addend) - rela->r_offset);
983 } else {
984 DL_ERR("0x%016llx out of range 0x%016llx to 0x%016llx",
985 (*reinterpret_cast<ElfW(Addr)*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset)),
986 static_cast<ElfW(Addr)>(INT32_MIN),
987 static_cast<ElfW(Addr)>(UINT32_MAX));
988 return -1;
989 }
990 break;
991 case R_AARCH64_PREL16:
992 count_relocation(kRelocRelative);
993 MARK(rela->r_offset);
994 TRACE_TYPE(RELO, "RELO REL16 %16llx <- %16llx - %16llx %s\n",
995 reloc, (sym_addr + rela->r_addend), rela->r_offset, sym_name);
996 if ((static_cast<ElfW(Addr)>(INT16_MIN) <= (*reinterpret_cast<ElfW(Addr)*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset))) &&
997 ((*reinterpret_cast<ElfW(Addr)*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset)) <= static_cast<ElfW(Addr)>(UINT16_MAX))) {
998 *reinterpret_cast<ElfW(Addr)*>(reloc) += ((sym_addr + rela->r_addend) - rela->r_offset);
999 } else {
1000 DL_ERR("0x%016llx out of range 0x%016llx to 0x%016llx",
1001 (*reinterpret_cast<ElfW(Addr)*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset)),
1002 static_cast<ElfW(Addr)>(INT16_MIN),
1003 static_cast<ElfW(Addr)>(UINT16_MAX));
1004 return -1;
1005 }
1006 break;
1008 case R_AARCH64_RELATIVE:
1009 count_relocation(kRelocRelative);
1010 MARK(rela->r_offset);
1011 if (sym) {
1012 DL_ERR("odd RELATIVE form...");
1013 return -1;
1014 }
1015 TRACE_TYPE(RELO, "RELO RELATIVE %16llx <- %16llx\n",
1016 reloc, (si->base + rela->r_addend));
1017 *reinterpret_cast<ElfW(Addr)*>(reloc) = (si->base + rela->r_addend);
1018 break;
1020 case R_AARCH64_COPY:
1021 if ((si->flags & FLAG_EXE) == 0) {
1022 /*
1023 * http://infocenter.arm.com/help/topic/com.arm.doc.ihi0044d/IHI0044D_aaelf.pdf
1024 *
1025 * Section 4.7.1.10 "Dynamic relocations"
1026 * R_AARCH64_COPY may only appear in executable objects where e_type is
1027 * set to ET_EXEC.
1028 *
1029 * FLAG_EXE is set for both ET_DYN and ET_EXEC executables.
1030 * We should explicitly disallow ET_DYN executables from having
1031 * R_AARCH64_COPY relocations.
1032 */
1033 DL_ERR("%s R_AARCH64_COPY relocations only supported for ET_EXEC", si->name);
1034 return -1;
1035 }
1036 count_relocation(kRelocCopy);
1037 MARK(rela->r_offset);
1038 TRACE_TYPE(RELO, "RELO COPY %16llx <- %lld @ %16llx %s\n",
1039 reloc,
1040 s->st_size,
1041 (sym_addr + rela->r_addend),
1042 sym_name);
1043 if (reloc == (sym_addr + rela->r_addend)) {
1044 ElfW(Sym)* src = soinfo_do_lookup(NULL, sym_name, &lsi, needed);
1046 if (src == NULL) {
1047 DL_ERR("%s R_AARCH64_COPY relocation source cannot be resolved", si->name);
1048 return -1;
1049 }
1050 if (lsi->has_DT_SYMBOLIC) {
1051 DL_ERR("%s invalid R_AARCH64_COPY relocation against DT_SYMBOLIC shared "
1052 "library %s (built with -Bsymbolic?)", si->name, lsi->name);
1053 return -1;
1054 }
1055 if (s->st_size < src->st_size) {
1056 DL_ERR("%s R_AARCH64_COPY relocation size mismatch (%lld < %lld)",
1057 si->name, s->st_size, src->st_size);
1058 return -1;
1059 }
1060 memcpy(reinterpret_cast<void*>(reloc),
1061 reinterpret_cast<void*>(src->st_value + lsi->load_bias), src->st_size);
1062 } else {
1063 DL_ERR("%s R_AARCH64_COPY relocation target cannot be resolved", si->name);
1064 return -1;
1065 }
1066 break;
1067 case R_AARCH64_TLS_TPREL64:
1068 TRACE_TYPE(RELO, "RELO TLS_TPREL64 *** %16llx <- %16llx - %16llx\n",
1069 reloc, (sym_addr + rela->r_addend), rela->r_offset);
1070 break;
1071 case R_AARCH64_TLS_DTPREL32:
1072 TRACE_TYPE(RELO, "RELO TLS_DTPREL32 *** %16llx <- %16llx - %16llx\n",
1073 reloc, (sym_addr + rela->r_addend), rela->r_offset);
1074 break;
1075 #elif defined(__x86_64__)
1076 case R_X86_64_JUMP_SLOT:
1077 count_relocation(kRelocAbsolute);
1078 MARK(rela->r_offset);
1079 TRACE_TYPE(RELO, "RELO JMP_SLOT %08zx <- %08zx %s", static_cast<size_t>(reloc),
1080 static_cast<size_t>(sym_addr + rela->r_addend), sym_name);
1081 *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr + rela->r_addend;
1082 break;
1083 case R_X86_64_GLOB_DAT:
1084 count_relocation(kRelocAbsolute);
1085 MARK(rela->r_offset);
1086 TRACE_TYPE(RELO, "RELO GLOB_DAT %08zx <- %08zx %s", static_cast<size_t>(reloc),
1087 static_cast<size_t>(sym_addr + rela->r_addend), sym_name);
1088 *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr + rela->r_addend;
1089 break;
1090 case R_X86_64_RELATIVE:
1091 count_relocation(kRelocRelative);
1092 MARK(rela->r_offset);
1093 if (sym) {
1094 DL_ERR("odd RELATIVE form...");
1095 return -1;
1096 }
1097 TRACE_TYPE(RELO, "RELO RELATIVE %08zx <- +%08zx", static_cast<size_t>(reloc),
1098 static_cast<size_t>(si->base));
1099 *reinterpret_cast<ElfW(Addr)*>(reloc) = si->base + rela->r_addend;
1100 break;
1101 case R_X86_64_32:
1102 count_relocation(kRelocRelative);
1103 MARK(rela->r_offset);
1104 TRACE_TYPE(RELO, "RELO R_X86_64_32 %08zx <- +%08zx %s", static_cast<size_t>(reloc),
1105 static_cast<size_t>(sym_addr), sym_name);
1106 *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr + rela->r_addend;
1107 break;
1108 case R_X86_64_64:
1109 count_relocation(kRelocRelative);
1110 MARK(rela->r_offset);
1111 TRACE_TYPE(RELO, "RELO R_X86_64_64 %08zx <- +%08zx %s", static_cast<size_t>(reloc),
1112 static_cast<size_t>(sym_addr), sym_name);
1113 *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr + rela->r_addend;
1114 break;
1115 case R_X86_64_PC32:
1116 count_relocation(kRelocRelative);
1117 MARK(rela->r_offset);
1118 TRACE_TYPE(RELO, "RELO R_X86_64_PC32 %08zx <- +%08zx (%08zx - %08zx) %s",
1119 static_cast<size_t>(reloc), static_cast<size_t>(sym_addr - reloc),
1120 static_cast<size_t>(sym_addr), static_cast<size_t>(reloc), sym_name);
1121 *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr + rela->r_addend - reloc;
1122 break;
1123 #endif
1125 default:
1126 DL_ERR("unknown reloc type %d @ %p (%zu)", type, rela, idx);
1127 return -1;
1128 }
1129 }
1130 return 0;
1131 }
1133 #else // REL, not RELA.
1135 static int soinfo_relocate(soinfo* si, ElfW(Rel)* rel, unsigned count, soinfo* needed[]) {
1136 ElfW(Sym)* s;
1137 soinfo* lsi;
1139 for (size_t idx = 0; idx < count; ++idx, ++rel) {
1140 unsigned type = ELFW(R_TYPE)(rel->r_info);
1141 // TODO: don't use unsigned for 'sym'. Use uint32_t or ElfW(Addr) instead.
1142 unsigned sym = ELFW(R_SYM)(rel->r_info);
1143 ElfW(Addr) reloc = static_cast<ElfW(Addr)>(rel->r_offset + si->load_bias);
1144 ElfW(Addr) sym_addr = 0;
1145 const char* sym_name = NULL;
1147 DEBUG("Processing '%s' relocation at index %zd", si->name, idx);
1148 if (type == 0) { // R_*_NONE
1149 continue;
1150 }
1151 if (sym != 0) {
1152 sym_name = reinterpret_cast<const char*>(si->strtab + si->symtab[sym].st_name);
1153 s = soinfo_do_lookup(si, sym_name, &lsi, needed);
1154 if (s == NULL) {
1155 // We only allow an undefined symbol if this is a weak reference...
1156 s = &si->symtab[sym];
1157 if (ELF_ST_BIND(s->st_info) != STB_WEAK) {
1158 DL_ERR("cannot locate symbol \"%s\" referenced by \"%s\"...", sym_name, si->name);
1159 return -1;
1160 }
1162 /* IHI0044C AAELF 4.5.1.1:
1164 Libraries are not searched to resolve weak references.
1165 It is not an error for a weak reference to remain
1166 unsatisfied.
1168 During linking, the value of an undefined weak reference is:
1169 - Zero if the relocation type is absolute
1170 - The address of the place if the relocation is pc-relative
1171 - The address of nominal base address if the relocation
1172 type is base-relative.
1173 */
1175 switch (type) {
1176 #if defined(__arm__)
1177 case R_ARM_JUMP_SLOT:
1178 case R_ARM_GLOB_DAT:
1179 case R_ARM_ABS32:
1180 case R_ARM_RELATIVE: /* Don't care. */
1181 // sym_addr was initialized to be zero above or relocation
1182 // code below does not care about value of sym_addr.
1183 // No need to do anything.
1184 break;
1185 #elif defined(__i386__)
1186 case R_386_JMP_SLOT:
1187 case R_386_GLOB_DAT:
1188 case R_386_32:
1189 case R_386_RELATIVE: /* Don't care. */
1190 // sym_addr was initialized to be zero above or relocation
1191 // code below does not care about value of sym_addr.
1192 // No need to do anything.
1193 break;
1194 case R_386_PC32:
1195 sym_addr = reloc;
1196 break;
1197 #endif
1199 #if defined(__arm__)
1200 case R_ARM_COPY:
1201 // Fall through. Can't really copy if weak symbol is not found at run-time.
1202 #endif
1203 default:
1204 DL_ERR("unknown weak reloc type %d @ %p (%zu)", type, rel, idx);
1205 return -1;
1206 }
1207 } else {
1208 // We got a definition.
1209 sym_addr = static_cast<ElfW(Addr)>(s->st_value + lsi->load_bias);
1210 }
1211 count_relocation(kRelocSymbol);
1212 } else {
1213 s = NULL;
1214 }
1216 switch (type) {
1217 #if defined(__arm__)
1218 case R_ARM_JUMP_SLOT:
1219 count_relocation(kRelocAbsolute);
1220 MARK(rel->r_offset);
1221 TRACE_TYPE(RELO, "RELO JMP_SLOT %08x <- %08x %s", reloc, sym_addr, sym_name);
1222 *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr;
1223 break;
1224 case R_ARM_GLOB_DAT:
1225 count_relocation(kRelocAbsolute);
1226 MARK(rel->r_offset);
1227 TRACE_TYPE(RELO, "RELO GLOB_DAT %08x <- %08x %s", reloc, sym_addr, sym_name);
1228 *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr;
1229 break;
1230 case R_ARM_ABS32:
1231 count_relocation(kRelocAbsolute);
1232 MARK(rel->r_offset);
1233 TRACE_TYPE(RELO, "RELO ABS %08x <- %08x %s", reloc, sym_addr, sym_name);
1234 *reinterpret_cast<ElfW(Addr)*>(reloc) += sym_addr;
1235 break;
1236 case R_ARM_REL32:
1237 count_relocation(kRelocRelative);
1238 MARK(rel->r_offset);
1239 TRACE_TYPE(RELO, "RELO REL32 %08x <- %08x - %08x %s",
1240 reloc, sym_addr, rel->r_offset, sym_name);
1241 *reinterpret_cast<ElfW(Addr)*>(reloc) += sym_addr - rel->r_offset;
1242 break;
1243 case R_ARM_COPY:
1244 if ((si->flags & FLAG_EXE) == 0) {
1245 /*
1246 * http://infocenter.arm.com/help/topic/com.arm.doc.ihi0044d/IHI0044D_aaelf.pdf
1247 *
1248 * Section 4.7.1.10 "Dynamic relocations"
1249 * R_ARM_COPY may only appear in executable objects where e_type is
1250 * set to ET_EXEC.
1251 *
1252 * TODO: FLAG_EXE is set for both ET_DYN and ET_EXEC executables.
1253 * We should explicitly disallow ET_DYN executables from having
1254 * R_ARM_COPY relocations.
1255 */
1256 DL_ERR("%s R_ARM_COPY relocations only supported for ET_EXEC", si->name);
1257 return -1;
1258 }
1259 count_relocation(kRelocCopy);
1260 MARK(rel->r_offset);
1261 TRACE_TYPE(RELO, "RELO %08x <- %d @ %08x %s", reloc, s->st_size, sym_addr, sym_name);
1262 if (reloc == sym_addr) {
1263 ElfW(Sym)* src = soinfo_do_lookup(NULL, sym_name, &lsi, needed);
1265 if (src == NULL) {
1266 DL_ERR("%s R_ARM_COPY relocation source cannot be resolved", si->name);
1267 return -1;
1268 }
1269 if (lsi->has_DT_SYMBOLIC) {
1270 DL_ERR("%s invalid R_ARM_COPY relocation against DT_SYMBOLIC shared "
1271 "library %s (built with -Bsymbolic?)", si->name, lsi->name);
1272 return -1;
1273 }
1274 if (s->st_size < src->st_size) {
1275 DL_ERR("%s R_ARM_COPY relocation size mismatch (%d < %d)",
1276 si->name, s->st_size, src->st_size);
1277 return -1;
1278 }
1279 memcpy(reinterpret_cast<void*>(reloc),
1280 reinterpret_cast<void*>(src->st_value + lsi->load_bias), src->st_size);
1281 } else {
1282 DL_ERR("%s R_ARM_COPY relocation target cannot be resolved", si->name);
1283 return -1;
1284 }
1285 break;
1286 #elif defined(__i386__)
1287 case R_386_JMP_SLOT:
1288 count_relocation(kRelocAbsolute);
1289 MARK(rel->r_offset);
1290 TRACE_TYPE(RELO, "RELO JMP_SLOT %08x <- %08x %s", reloc, sym_addr, sym_name);
1291 *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr;
1292 break;
1293 case R_386_GLOB_DAT:
1294 count_relocation(kRelocAbsolute);
1295 MARK(rel->r_offset);
1296 TRACE_TYPE(RELO, "RELO GLOB_DAT %08x <- %08x %s", reloc, sym_addr, sym_name);
1297 *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr;
1298 break;
1299 case R_386_32:
1300 count_relocation(kRelocRelative);
1301 MARK(rel->r_offset);
1302 TRACE_TYPE(RELO, "RELO R_386_32 %08x <- +%08x %s", reloc, sym_addr, sym_name);
1303 *reinterpret_cast<ElfW(Addr)*>(reloc) += sym_addr;
1304 break;
1305 case R_386_PC32:
1306 count_relocation(kRelocRelative);
1307 MARK(rel->r_offset);
1308 TRACE_TYPE(RELO, "RELO R_386_PC32 %08x <- +%08x (%08x - %08x) %s",
1309 reloc, (sym_addr - reloc), sym_addr, reloc, sym_name);
1310 *reinterpret_cast<ElfW(Addr)*>(reloc) += (sym_addr - reloc);
1311 break;
1312 #elif defined(__mips__)
1313 case R_MIPS_REL32:
1314 #if defined(__LP64__)
1315 // MIPS Elf64_Rel entries contain compound relocations
1316 // We only handle the R_MIPS_NONE|R_MIPS_64|R_MIPS_REL32 case
1317 if (ELF64_R_TYPE2(rel->r_info) != R_MIPS_64 ||
1318 ELF64_R_TYPE3(rel->r_info) != R_MIPS_NONE) {
1319 DL_ERR("Unexpected compound relocation type:%d type2:%d type3:%d @ %p (%zu)",
1320 type, (unsigned)ELF64_R_TYPE2(rel->r_info),
1321 (unsigned)ELF64_R_TYPE3(rel->r_info), rel, idx);
1322 return -1;
1323 }
1324 #endif
1325 count_relocation(kRelocAbsolute);
1326 MARK(rel->r_offset);
1327 TRACE_TYPE(RELO, "RELO REL32 %08zx <- %08zx %s", static_cast<size_t>(reloc),
1328 static_cast<size_t>(sym_addr), sym_name ? sym_name : "*SECTIONHDR*");
1329 if (s) {
1330 *reinterpret_cast<ElfW(Addr)*>(reloc) += sym_addr;
1331 } else {
1332 *reinterpret_cast<ElfW(Addr)*>(reloc) += si->base;
1333 }
1334 break;
1335 #endif
1337 #if defined(__arm__)
1338 case R_ARM_RELATIVE:
1339 #elif defined(__i386__)
1340 case R_386_RELATIVE:
1341 #endif
1342 count_relocation(kRelocRelative);
1343 MARK(rel->r_offset);
1344 if (sym) {
1345 DL_ERR("odd RELATIVE form...");
1346 return -1;
1347 }
1348 TRACE_TYPE(RELO, "RELO RELATIVE %p <- +%p",
1349 reinterpret_cast<void*>(reloc), reinterpret_cast<void*>(si->base));
1350 *reinterpret_cast<ElfW(Addr)*>(reloc) += si->base;
1351 break;
1353 default:
1354 DL_ERR("unknown reloc type %d @ %p (%zu)", type, rel, idx);
1355 return -1;
1356 }
1357 }
1358 return 0;
1359 }
1360 #endif
1362 #if defined(__mips__)
1363 static bool mips_relocate_got(soinfo* si, soinfo* needed[]) {
1364 ElfW(Addr)** got = si->plt_got;
1365 if (got == NULL) {
1366 return true;
1367 }
1368 unsigned local_gotno = si->mips_local_gotno;
1369 unsigned gotsym = si->mips_gotsym;
1370 unsigned symtabno = si->mips_symtabno;
1371 ElfW(Sym)* symtab = si->symtab;
1373 // got[0] is the address of the lazy resolver function.
1374 // got[1] may be used for a GNU extension.
1375 // Set it to a recognizable address in case someone calls it (should be _rtld_bind_start).
1376 // FIXME: maybe this should be in a separate routine?
1377 if ((si->flags & FLAG_LINKER) == 0) {
1378 size_t g = 0;
1379 got[g++] = reinterpret_cast<ElfW(Addr)*>(0xdeadbeef);
1380 if (reinterpret_cast<intptr_t>(got[g]) < 0) {
1381 got[g++] = reinterpret_cast<ElfW(Addr)*>(0xdeadfeed);
1382 }
1383 // Relocate the local GOT entries.
1384 for (; g < local_gotno; g++) {
1385 got[g] = reinterpret_cast<ElfW(Addr)*>(reinterpret_cast<uintptr_t>(got[g]) + si->load_bias);
1386 }
1387 }
1389 // Now for the global GOT entries...
1390 ElfW(Sym)* sym = symtab + gotsym;
1391 got = si->plt_got + local_gotno;
1392 for (size_t g = gotsym; g < symtabno; g++, sym++, got++) {
1393 // This is an undefined reference... try to locate it.
1394 const char* sym_name = si->strtab + sym->st_name;
1395 soinfo* lsi;
1396 ElfW(Sym)* s = soinfo_do_lookup(si, sym_name, &lsi, needed);
1397 if (s == NULL) {
1398 // We only allow an undefined symbol if this is a weak reference.
1399 s = &symtab[g];
1400 if (ELF_ST_BIND(s->st_info) != STB_WEAK) {
1401 DL_ERR("cannot locate \"%s\"...", sym_name);
1402 return false;
1403 }
1404 *got = 0;
1405 } else {
1406 // FIXME: is this sufficient?
1407 // For reference see NetBSD link loader
1408 // http://cvsweb.netbsd.org/bsdweb.cgi/src/libexec/ld.elf_so/arch/mips/mips_reloc.c?rev=1.53&content-type=text/x-cvsweb-markup
1409 *got = reinterpret_cast<ElfW(Addr)*>(lsi->load_bias + s->st_value);
1410 }
1411 }
1412 return true;
1413 }
1414 #endif
1416 void soinfo::CallArray(const char* array_name UNUSED, linker_function_t* functions, size_t count, bool reverse) {
1417 if (functions == NULL) {
1418 return;
1419 }
1421 TRACE("[ Calling %s (size %zd) @ %p for '%s' ]", array_name, count, functions, name);
1423 int begin = reverse ? (count - 1) : 0;
1424 int end = reverse ? -1 : count;
1425 int step = reverse ? -1 : 1;
1427 for (int i = begin; i != end; i += step) {
1428 TRACE("[ %s[%d] == %p ]", array_name, i, functions[i]);
1429 CallFunction("function", functions[i]);
1430 }
1432 TRACE("[ Done calling %s for '%s' ]", array_name, name);
1433 }
1435 void soinfo::CallFunction(const char* function_name UNUSED, linker_function_t function) {
1436 if (function == NULL || reinterpret_cast<uintptr_t>(function) == static_cast<uintptr_t>(-1)) {
1437 return;
1438 }
1440 TRACE("[ Calling %s @ %p for '%s' ]", function_name, function, name);
1441 function();
1442 TRACE("[ Done calling %s @ %p for '%s' ]", function_name, function, name);
1444 // The function may have called dlopen(3) or dlclose(3), so we need to ensure our data structures
1445 // are still writable. This happens with our debug malloc (see http://b/7941716).
1446 set_soinfo_pool_protection(PROT_READ | PROT_WRITE);
1447 }
1449 void soinfo::CallPreInitConstructors() {
1450 // DT_PREINIT_ARRAY functions are called before any other constructors for executables,
1451 // but ignored in a shared library.
1452 CallArray("DT_PREINIT_ARRAY", preinit_array, preinit_array_count, false);
1453 }
1455 void soinfo::CallConstructors() {
1456 if (constructors_called) {
1457 return;
1458 }
1460 // We set constructors_called before actually calling the constructors, otherwise it doesn't
1461 // protect against recursive constructor calls. One simple example of constructor recursion
1462 // is the libc debug malloc, which is implemented in libc_malloc_debug_leak.so:
1463 // 1. The program depends on libc, so libc's constructor is called here.
1464 // 2. The libc constructor calls dlopen() to load libc_malloc_debug_leak.so.
1465 // 3. dlopen() calls the constructors on the newly created
1466 // soinfo for libc_malloc_debug_leak.so.
1467 // 4. The debug .so depends on libc, so CallConstructors is
1468 // called again with the libc soinfo. If it doesn't trigger the early-
1469 // out above, the libc constructor will be called again (recursively!).
1470 constructors_called = true;
1472 if ((flags & FLAG_EXE) == 0 && preinit_array != NULL) {
1473 // The GNU dynamic linker silently ignores these, but we warn the developer.
1474 PRINT("\"%s\": ignoring %zd-entry DT_PREINIT_ARRAY in shared library!",
1475 name, preinit_array_count);
1476 }
1478 if (dynamic != NULL) {
1479 for (ElfW(Dyn)* d = dynamic; d->d_tag != DT_NULL; ++d) {
1480 if (d->d_tag == DT_NEEDED) {
1481 const char* library_name = strtab + d->d_un.d_val;
1482 TRACE("\"%s\": calling constructors in DT_NEEDED \"%s\"", name, library_name);
1483 find_loaded_library(library_name)->CallConstructors();
1484 }
1485 }
1486 }
1488 TRACE("\"%s\": calling constructors", name);
1490 // DT_INIT should be called before DT_INIT_ARRAY if both are present.
1491 CallFunction("DT_INIT", init_func);
1492 CallArray("DT_INIT_ARRAY", init_array, init_array_count, false);
1493 }
1495 void soinfo::CallDestructors() {
1496 TRACE("\"%s\": calling destructors", name);
1498 // DT_FINI_ARRAY must be parsed in reverse order.
1499 CallArray("DT_FINI_ARRAY", fini_array, fini_array_count, true);
1501 // DT_FINI should be called after DT_FINI_ARRAY if both are present.
1502 CallFunction("DT_FINI", fini_func);
1503 }
1505 /* Force any of the closed stdin, stdout and stderr to be associated with
1506 /dev/null. */
1507 static int nullify_closed_stdio() {
1508 int dev_null, i, status;
1509 int return_value = 0;
1511 dev_null = TEMP_FAILURE_RETRY(open("/dev/null", O_RDWR));
1512 if (dev_null < 0) {
1513 DL_ERR("cannot open /dev/null: %s", strerror(errno));
1514 return -1;
1515 }
1516 TRACE("[ Opened /dev/null file-descriptor=%d]", dev_null);
1518 /* If any of the stdio file descriptors is valid and not associated
1519 with /dev/null, dup /dev/null to it. */
1520 for (i = 0; i < 3; i++) {
1521 /* If it is /dev/null already, we are done. */
1522 if (i == dev_null) {
1523 continue;
1524 }
1526 TRACE("[ Nullifying stdio file descriptor %d]", i);
1527 status = TEMP_FAILURE_RETRY(fcntl(i, F_GETFL));
1529 /* If file is opened, we are good. */
1530 if (status != -1) {
1531 continue;
1532 }
1534 /* The only error we allow is that the file descriptor does not
1535 exist, in which case we dup /dev/null to it. */
1536 if (errno != EBADF) {
1537 DL_ERR("fcntl failed: %s", strerror(errno));
1538 return_value = -1;
1539 continue;
1540 }
1542 /* Try dupping /dev/null to this stdio file descriptor and
1543 repeat if there is a signal. Note that any errors in closing
1544 the stdio descriptor are lost. */
1545 status = TEMP_FAILURE_RETRY(dup2(dev_null, i));
1546 if (status < 0) {
1547 DL_ERR("dup2 failed: %s", strerror(errno));
1548 return_value = -1;
1549 continue;
1550 }
1551 }
1553 /* If /dev/null is not one of the stdio file descriptors, close it. */
1554 if (dev_null > 2) {
1555 TRACE("[ Closing /dev/null file-descriptor=%d]", dev_null);
1556 status = TEMP_FAILURE_RETRY(close(dev_null));
1557 if (status == -1) {
1558 DL_ERR("close failed: %s", strerror(errno));
1559 return_value = -1;
1560 }
1561 }
1563 return return_value;
1564 }
1566 static bool soinfo_link_image(soinfo* si) {
1567 /* "base" might wrap around UINT32_MAX. */
1568 ElfW(Addr) base = si->load_bias;
1569 const ElfW(Phdr)* phdr = si->phdr;
1570 int phnum = si->phnum;
1571 bool relocating_linker = (si->flags & FLAG_LINKER) != 0;
1573 /* We can't debug anything until the linker is relocated */
1574 if (!relocating_linker) {
1575 INFO("[ linking %s ]", si->name);
1576 DEBUG("si->base = %p si->flags = 0x%08x", reinterpret_cast<void*>(si->base), si->flags);
1577 }
1579 /* Extract dynamic section */
1580 size_t dynamic_count;
1581 ElfW(Word) dynamic_flags;
1582 phdr_table_get_dynamic_section(phdr, phnum, base, &si->dynamic,
1583 &dynamic_count, &dynamic_flags);
1584 if (si->dynamic == NULL) {
1585 if (!relocating_linker) {
1586 DL_ERR("missing PT_DYNAMIC in \"%s\"", si->name);
1587 }
1588 return false;
1589 } else {
1590 if (!relocating_linker) {
1591 DEBUG("dynamic = %p", si->dynamic);
1592 }
1593 }
1595 #if defined(__arm__)
1596 (void) phdr_table_get_arm_exidx(phdr, phnum, base,
1597 &si->ARM_exidx, &si->ARM_exidx_count);
1598 #endif
1600 // Extract useful information from dynamic section.
1601 uint32_t needed_count = 0;
1602 for (ElfW(Dyn)* d = si->dynamic; d->d_tag != DT_NULL; ++d) {
1603 DEBUG("d = %p, d[0](tag) = %p d[1](val) = %p",
1604 d, reinterpret_cast<void*>(d->d_tag), reinterpret_cast<void*>(d->d_un.d_val));
1605 switch (d->d_tag) {
1606 case DT_HASH:
1607 si->nbucket = reinterpret_cast<uint32_t*>(base + d->d_un.d_ptr)[0];
1608 si->nchain = reinterpret_cast<uint32_t*>(base + d->d_un.d_ptr)[1];
1609 si->bucket = reinterpret_cast<uint32_t*>(base + d->d_un.d_ptr + 8);
1610 si->chain = reinterpret_cast<uint32_t*>(base + d->d_un.d_ptr + 8 + si->nbucket * 4);
1611 break;
1612 case DT_STRTAB:
1613 si->strtab = reinterpret_cast<const char*>(base + d->d_un.d_ptr);
1614 break;
1615 case DT_SYMTAB:
1616 si->symtab = reinterpret_cast<ElfW(Sym)*>(base + d->d_un.d_ptr);
1617 break;
1618 #if !defined(__LP64__)
1619 case DT_PLTREL:
1620 if (d->d_un.d_val != DT_REL) {
1621 DL_ERR("unsupported DT_RELA in \"%s\"", si->name);
1622 return false;
1623 }
1624 break;
1625 #endif
1626 case DT_JMPREL:
1627 #if defined(USE_RELA)
1628 si->plt_rela = reinterpret_cast<ElfW(Rela)*>(base + d->d_un.d_ptr);
1629 #else
1630 si->plt_rel = reinterpret_cast<ElfW(Rel)*>(base + d->d_un.d_ptr);
1631 #endif
1632 break;
1633 case DT_PLTRELSZ:
1634 #if defined(USE_RELA)
1635 si->plt_rela_count = d->d_un.d_val / sizeof(ElfW(Rela));
1636 #else
1637 si->plt_rel_count = d->d_un.d_val / sizeof(ElfW(Rel));
1638 #endif
1639 break;
1640 #if defined(__mips__)
1641 case DT_PLTGOT:
1642 // Used by mips and mips64.
1643 si->plt_got = reinterpret_cast<ElfW(Addr)**>(base + d->d_un.d_ptr);
1644 break;
1645 #endif
1646 case DT_DEBUG:
1647 // Set the DT_DEBUG entry to the address of _r_debug for GDB
1648 // if the dynamic table is writable
1649 // FIXME: not working currently for N64
1650 // The flags for the LOAD and DYNAMIC program headers do not agree.
1651 // The LOAD section containng the dynamic table has been mapped as
1652 // read-only, but the DYNAMIC header claims it is writable.
1653 #if !(defined(__mips__) && defined(__LP64__))
1654 if ((dynamic_flags & PF_W) != 0) {
1655 d->d_un.d_val = reinterpret_cast<uintptr_t>(&_r_debug);
1656 }
1657 break;
1658 #endif
1659 #if defined(USE_RELA)
1660 case DT_RELA:
1661 si->rela = reinterpret_cast<ElfW(Rela)*>(base + d->d_un.d_ptr);
1662 break;
1663 case DT_RELASZ:
1664 si->rela_count = d->d_un.d_val / sizeof(ElfW(Rela));
1665 break;
1666 case DT_REL:
1667 DL_ERR("unsupported DT_REL in \"%s\"", si->name);
1668 return false;
1669 case DT_RELSZ:
1670 DL_ERR("unsupported DT_RELSZ in \"%s\"", si->name);
1671 return false;
1672 #else
1673 case DT_REL:
1674 si->rel = reinterpret_cast<ElfW(Rel)*>(base + d->d_un.d_ptr);
1675 break;
1676 case DT_RELSZ:
1677 si->rel_count = d->d_un.d_val / sizeof(ElfW(Rel));
1678 break;
1679 case DT_RELA:
1680 DL_ERR("unsupported DT_RELA in \"%s\"", si->name);
1681 return false;
1682 #endif
1683 case DT_INIT:
1684 si->init_func = reinterpret_cast<linker_function_t>(base + d->d_un.d_ptr);
1685 DEBUG("%s constructors (DT_INIT) found at %p", si->name, si->init_func);
1686 break;
1687 case DT_FINI:
1688 si->fini_func = reinterpret_cast<linker_function_t>(base + d->d_un.d_ptr);
1689 DEBUG("%s destructors (DT_FINI) found at %p", si->name, si->fini_func);
1690 break;
1691 case DT_INIT_ARRAY:
1692 si->init_array = reinterpret_cast<linker_function_t*>(base + d->d_un.d_ptr);
1693 DEBUG("%s constructors (DT_INIT_ARRAY) found at %p", si->name, si->init_array);
1694 break;
1695 case DT_INIT_ARRAYSZ:
1696 si->init_array_count = ((unsigned)d->d_un.d_val) / sizeof(ElfW(Addr));
1697 break;
1698 case DT_FINI_ARRAY:
1699 si->fini_array = reinterpret_cast<linker_function_t*>(base + d->d_un.d_ptr);
1700 DEBUG("%s destructors (DT_FINI_ARRAY) found at %p", si->name, si->fini_array);
1701 break;
1702 case DT_FINI_ARRAYSZ:
1703 si->fini_array_count = ((unsigned)d->d_un.d_val) / sizeof(ElfW(Addr));
1704 break;
1705 case DT_PREINIT_ARRAY:
1706 si->preinit_array = reinterpret_cast<linker_function_t*>(base + d->d_un.d_ptr);
1707 DEBUG("%s constructors (DT_PREINIT_ARRAY) found at %p", si->name, si->preinit_array);
1708 break;
1709 case DT_PREINIT_ARRAYSZ:
1710 si->preinit_array_count = ((unsigned)d->d_un.d_val) / sizeof(ElfW(Addr));
1711 break;
1712 case DT_TEXTREL:
1713 #if defined(__LP64__)
1714 DL_ERR("text relocations (DT_TEXTREL) found in 64-bit ELF file \"%s\"", si->name);
1715 return false;
1716 #else
1717 si->has_text_relocations = true;
1718 break;
1719 #endif
1720 case DT_SYMBOLIC:
1721 si->has_DT_SYMBOLIC = true;
1722 break;
1723 case DT_NEEDED:
1724 ++needed_count;
1725 break;
1726 case DT_FLAGS:
1727 if (d->d_un.d_val & DF_TEXTREL) {
1728 #if defined(__LP64__)
1729 DL_ERR("text relocations (DF_TEXTREL) found in 64-bit ELF file \"%s\"", si->name);
1730 return false;
1731 #else
1732 si->has_text_relocations = true;
1733 #endif
1734 }
1735 if (d->d_un.d_val & DF_SYMBOLIC) {
1736 si->has_DT_SYMBOLIC = true;
1737 }
1738 break;
1739 #if defined(__mips__)
1740 case DT_STRSZ:
1741 case DT_SYMENT:
1742 case DT_RELENT:
1743 break;
1744 case DT_MIPS_RLD_MAP:
1745 // Set the DT_MIPS_RLD_MAP entry to the address of _r_debug for GDB.
1746 {
1747 r_debug** dp = reinterpret_cast<r_debug**>(base + d->d_un.d_ptr);
1748 *dp = &_r_debug;
1749 }
1750 break;
1751 case DT_MIPS_RLD_VERSION:
1752 case DT_MIPS_FLAGS:
1753 case DT_MIPS_BASE_ADDRESS:
1754 case DT_MIPS_UNREFEXTNO:
1755 break;
1757 case DT_MIPS_SYMTABNO:
1758 si->mips_symtabno = d->d_un.d_val;
1759 break;
1761 case DT_MIPS_LOCAL_GOTNO:
1762 si->mips_local_gotno = d->d_un.d_val;
1763 break;
1765 case DT_MIPS_GOTSYM:
1766 si->mips_gotsym = d->d_un.d_val;
1767 break;
1768 #endif
1770 default:
1771 DEBUG("Unused DT entry: type %p arg %p",
1772 reinterpret_cast<void*>(d->d_tag), reinterpret_cast<void*>(d->d_un.d_val));
1773 break;
1774 }
1775 }
1777 DEBUG("si->base = %p, si->strtab = %p, si->symtab = %p",
1778 reinterpret_cast<void*>(si->base), si->strtab, si->symtab);
1780 // Sanity checks.
1781 if (relocating_linker && needed_count != 0) {
1782 DL_ERR("linker cannot have DT_NEEDED dependencies on other libraries");
1783 return false;
1784 }
1785 if (si->nbucket == 0) {
1786 DL_ERR("empty/missing DT_HASH in \"%s\" (built with --hash-style=gnu?)", si->name);
1787 return false;
1788 }
1789 if (si->strtab == 0) {
1790 DL_ERR("empty/missing DT_STRTAB in \"%s\"", si->name);
1791 return false;
1792 }
1793 if (si->symtab == 0) {
1794 DL_ERR("empty/missing DT_SYMTAB in \"%s\"", si->name);
1795 return false;
1796 }
1798 // If this is the main executable, then load all of the libraries from LD_PRELOAD now.
1799 if (si->flags & FLAG_EXE) {
1800 memset(gLdPreloads, 0, sizeof(gLdPreloads));
1801 size_t preload_count = 0;
1802 for (size_t i = 0; gLdPreloadNames[i] != NULL; i++) {
1803 soinfo* lsi = find_library(gLdPreloadNames[i]);
1804 if (lsi != NULL) {
1805 gLdPreloads[preload_count++] = lsi;
1806 } else {
1807 // As with glibc, failure to load an LD_PRELOAD library is just a warning.
1808 DL_WARN("could not load library \"%s\" from LD_PRELOAD for \"%s\"; caused by %s",
1809 gLdPreloadNames[i], si->name, linker_get_error_buffer());
1810 }
1811 }
1812 }
1814 soinfo** needed = reinterpret_cast<soinfo**>(alloca((1 + needed_count) * sizeof(soinfo*)));
1815 soinfo** pneeded = needed;
1817 for (ElfW(Dyn)* d = si->dynamic; d->d_tag != DT_NULL; ++d) {
1818 if (d->d_tag == DT_NEEDED) {
1819 const char* library_name = si->strtab + d->d_un.d_val;
1820 DEBUG("%s needs %s", si->name, library_name);
1821 soinfo* lsi = find_library(library_name);
1822 if (lsi == NULL) {
1823 strlcpy(tmp_err_buf, linker_get_error_buffer(), sizeof(tmp_err_buf));
1824 DL_ERR("could not load library \"%s\" needed by \"%s\"; caused by %s",
1825 library_name, si->name, tmp_err_buf);
1826 return false;
1827 }
1828 *pneeded++ = lsi;
1829 }
1830 }
1831 *pneeded = NULL;
1833 #if !defined(__LP64__)
1834 if (si->has_text_relocations) {
1835 // Make segments writable to allow text relocations to work properly. We will later call
1836 // phdr_table_protect_segments() after all of them are applied and all constructors are run.
1837 DL_WARN("%s has text relocations. This is wasting memory and prevents "
1838 "security hardening. Please fix.", si->name);
1839 if (phdr_table_unprotect_segments(si->phdr, si->phnum, si->load_bias) < 0) {
1840 DL_ERR("can't unprotect loadable segments for \"%s\": %s",
1841 si->name, strerror(errno));
1842 return false;
1843 }
1844 }
1845 #endif
1847 #if defined(USE_RELA)
1848 if (si->plt_rela != NULL) {
1849 DEBUG("[ relocating %s plt ]\n", si->name);
1850 if (soinfo_relocate(si, si->plt_rela, si->plt_rela_count, needed)) {
1851 return false;
1852 }
1853 }
1854 if (si->rela != NULL) {
1855 DEBUG("[ relocating %s ]\n", si->name);
1856 if (soinfo_relocate(si, si->rela, si->rela_count, needed)) {
1857 return false;
1858 }
1859 }
1860 #else
1861 if (si->plt_rel != NULL) {
1862 DEBUG("[ relocating %s plt ]", si->name);
1863 if (soinfo_relocate(si, si->plt_rel, si->plt_rel_count, needed)) {
1864 return false;
1865 }
1866 }
1867 if (si->rel != NULL) {
1868 DEBUG("[ relocating %s ]", si->name);
1869 if (soinfo_relocate(si, si->rel, si->rel_count, needed)) {
1870 return false;
1871 }
1872 }
1873 #endif
1875 #if defined(__mips__)
1876 if (!mips_relocate_got(si, needed)) {
1877 return false;
1878 }
1879 #endif
1881 si->flags |= FLAG_LINKED;
1882 DEBUG("[ finished linking %s ]", si->name);
1884 #if !defined(__LP64__)
1885 if (si->has_text_relocations) {
1886 // All relocations are done, we can protect our segments back to read-only.
1887 if (phdr_table_protect_segments(si->phdr, si->phnum, si->load_bias) < 0) {
1888 DL_ERR("can't protect segments for \"%s\": %s",
1889 si->name, strerror(errno));
1890 return false;
1891 }
1892 }
1893 #endif
1895 /* We can also turn on GNU RELRO protection */
1896 if (phdr_table_protect_gnu_relro(si->phdr, si->phnum, si->load_bias) < 0) {
1897 DL_ERR("can't enable GNU RELRO protection for \"%s\": %s",
1898 si->name, strerror(errno));
1899 return false;
1900 }
1902 notify_gdb_of_load(si);
1903 return true;
1904 }
1906 /*
1907 * This function add vdso to internal dso list.
1908 * It helps to stack unwinding through signal handlers.
1909 * Also, it makes bionic more like glibc.
1910 */
1911 static void add_vdso(KernelArgumentBlock& args UNUSED) {
1912 #if defined(AT_SYSINFO_EHDR)
1913 ElfW(Ehdr)* ehdr_vdso = reinterpret_cast<ElfW(Ehdr)*>(args.getauxval(AT_SYSINFO_EHDR));
1914 if (ehdr_vdso == NULL) {
1915 return;
1916 }
1918 soinfo* si = soinfo_alloc("[vdso]");
1920 si->phdr = reinterpret_cast<ElfW(Phdr)*>(reinterpret_cast<char*>(ehdr_vdso) + ehdr_vdso->e_phoff);
1921 si->phnum = ehdr_vdso->e_phnum;
1922 si->base = reinterpret_cast<ElfW(Addr)>(ehdr_vdso);
1923 si->size = phdr_table_get_load_size(si->phdr, si->phnum);
1924 si->flags = 0;
1925 si->load_bias = get_elf_exec_load_bias(ehdr_vdso);
1927 soinfo_link_image(si);
1928 #endif
1929 }
1931 /*
1932 * This code is called after the linker has linked itself and
1933 * fixed it's own GOT. It is safe to make references to externs
1934 * and other non-local data at this point.
1935 */
1936 static ElfW(Addr) __linker_init_post_relocation(KernelArgumentBlock& args, ElfW(Addr) linker_base) {
1937 /* NOTE: we store the args pointer on a special location
1938 * of the temporary TLS area in order to pass it to
1939 * the C Library's runtime initializer.
1940 *
1941 * The initializer must clear the slot and reset the TLS
1942 * to point to a different location to ensure that no other
1943 * shared library constructor can access it.
1944 */
1945 __libc_init_tls(args);
1947 #if TIMING
1948 struct timeval t0, t1;
1949 gettimeofday(&t0, 0);
1950 #endif
1952 // Initialize environment functions, and get to the ELF aux vectors table.
1953 linker_env_init(args);
1955 // If this is a setuid/setgid program, close the security hole described in
1956 // ftp://ftp.freebsd.org/pub/FreeBSD/CERT/advisories/FreeBSD-SA-02:23.stdio.asc
1957 if (get_AT_SECURE()) {
1958 nullify_closed_stdio();
1959 }
1961 debuggerd_init();
1963 // Get a few environment variables.
1964 const char* LD_DEBUG = linker_env_get("LD_DEBUG");
1965 if (LD_DEBUG != NULL) {
1966 gLdDebugVerbosity = atoi(LD_DEBUG);
1967 }
1969 // Normally, these are cleaned by linker_env_init, but the test
1970 // doesn't cost us anything.
1971 const char* ldpath_env = NULL;
1972 const char* ldpreload_env = NULL;
1973 if (!get_AT_SECURE()) {
1974 ldpath_env = linker_env_get("LD_LIBRARY_PATH");
1975 ldpreload_env = linker_env_get("LD_PRELOAD");
1976 }
1978 INFO("[ android linker & debugger ]");
1980 soinfo* si = soinfo_alloc(args.argv[0]);
1981 if (si == NULL) {
1982 exit(EXIT_FAILURE);
1983 }
1985 /* bootstrap the link map, the main exe always needs to be first */
1986 si->flags |= FLAG_EXE;
1987 link_map* map = &(si->link_map_head);
1989 map->l_addr = 0;
1990 map->l_name = args.argv[0];
1991 map->l_prev = NULL;
1992 map->l_next = NULL;
1994 _r_debug.r_map = map;
1995 r_debug_tail = map;
1997 /* gdb expects the linker to be in the debug shared object list.
1998 * Without this, gdb has trouble locating the linker's ".text"
1999 * and ".plt" sections. Gdb could also potentially use this to
2000 * relocate the offset of our exported 'rtld_db_dlactivity' symbol.
2001 * Don't use soinfo_alloc(), because the linker shouldn't
2002 * be on the soinfo list.
2003 */
2004 {
2005 static soinfo linker_soinfo;
2006 #if defined(__LP64__)
2007 strlcpy(linker_soinfo.name, "/system/bin/linker64", sizeof(linker_soinfo.name));
2008 #else
2009 strlcpy(linker_soinfo.name, "/system/bin/linker", sizeof(linker_soinfo.name));
2010 #endif
2011 linker_soinfo.flags = 0;
2012 linker_soinfo.base = linker_base;
2014 /*
2015 * Set the dynamic field in the link map otherwise gdb will complain with
2016 * the following:
2017 * warning: .dynamic section for "/system/bin/linker" is not at the
2018 * expected address (wrong library or version mismatch?)
2019 */
2020 ElfW(Ehdr)* elf_hdr = reinterpret_cast<ElfW(Ehdr)*>(linker_base);
2021 ElfW(Phdr)* phdr = reinterpret_cast<ElfW(Phdr)*>(linker_base + elf_hdr->e_phoff);
2022 phdr_table_get_dynamic_section(phdr, elf_hdr->e_phnum, linker_base,
2023 &linker_soinfo.dynamic, NULL, NULL);
2024 insert_soinfo_into_debug_map(&linker_soinfo);
2025 }
2027 // Extract information passed from the kernel.
2028 si->phdr = reinterpret_cast<ElfW(Phdr)*>(args.getauxval(AT_PHDR));
2029 si->phnum = args.getauxval(AT_PHNUM);
2030 si->entry = args.getauxval(AT_ENTRY);
2032 /* Compute the value of si->base. We can't rely on the fact that
2033 * the first entry is the PHDR because this will not be true
2034 * for certain executables (e.g. some in the NDK unit test suite)
2035 */
2036 si->base = 0;
2037 si->size = phdr_table_get_load_size(si->phdr, si->phnum);
2038 si->load_bias = 0;
2039 for (size_t i = 0; i < si->phnum; ++i) {
2040 if (si->phdr[i].p_type == PT_PHDR) {
2041 si->load_bias = reinterpret_cast<ElfW(Addr)>(si->phdr) - si->phdr[i].p_vaddr;
2042 si->base = reinterpret_cast<ElfW(Addr)>(si->phdr) - si->phdr[i].p_offset;
2043 break;
2044 }
2045 }
2046 si->dynamic = NULL;
2047 si->ref_count = 1;
2049 // Use LD_LIBRARY_PATH and LD_PRELOAD (but only if we aren't setuid/setgid).
2050 parse_LD_LIBRARY_PATH(ldpath_env);
2051 parse_LD_PRELOAD(ldpreload_env);
2053 somain = si;
2055 if (!soinfo_link_image(si)) {
2056 __libc_format_fd(2, "CANNOT LINK EXECUTABLE: %s\n", linker_get_error_buffer());
2057 exit(EXIT_FAILURE);
2058 }
2060 add_vdso(args);
2062 si->CallPreInitConstructors();
2064 for (size_t i = 0; gLdPreloads[i] != NULL; ++i) {
2065 gLdPreloads[i]->CallConstructors();
2066 }
2068 /* After the link_image, the si->load_bias is initialized.
2069 * For so lib, the map->l_addr will be updated in notify_gdb_of_load.
2070 * We need to update this value for so exe here. So Unwind_Backtrace
2071 * for some arch like x86 could work correctly within so exe.
2072 */
2073 map->l_addr = si->load_bias;
2074 si->CallConstructors();
2076 #if TIMING
2077 gettimeofday(&t1, NULL);
2078 PRINT("LINKER TIME: %s: %d microseconds", args.argv[0], (int) (
2079 (((long long)t1.tv_sec * 1000000LL) + (long long)t1.tv_usec) -
2080 (((long long)t0.tv_sec * 1000000LL) + (long long)t0.tv_usec)));
2081 #endif
2082 #if STATS
2083 PRINT("RELO STATS: %s: %d abs, %d rel, %d copy, %d symbol", args.argv[0],
2084 linker_stats.count[kRelocAbsolute],
2085 linker_stats.count[kRelocRelative],
2086 linker_stats.count[kRelocCopy],
2087 linker_stats.count[kRelocSymbol]);
2088 #endif
2089 #if COUNT_PAGES
2090 {
2091 unsigned n;
2092 unsigned i;
2093 unsigned count = 0;
2094 for (n = 0; n < 4096; n++) {
2095 if (bitmask[n]) {
2096 unsigned x = bitmask[n];
2097 #if defined(__LP64__)
2098 for (i = 0; i < 32; i++) {
2099 #else
2100 for (i = 0; i < 8; i++) {
2101 #endif
2102 if (x & 1) {
2103 count++;
2104 }
2105 x >>= 1;
2106 }
2107 }
2108 }
2109 PRINT("PAGES MODIFIED: %s: %d (%dKB)", args.argv[0], count, count * 4);
2110 }
2111 #endif
2113 #if TIMING || STATS || COUNT_PAGES
2114 fflush(stdout);
2115 #endif
2117 TRACE("[ Ready to execute '%s' @ %p ]", si->name, reinterpret_cast<void*>(si->entry));
2118 return si->entry;
2119 }
2121 /* Compute the load-bias of an existing executable. This shall only
2122 * be used to compute the load bias of an executable or shared library
2123 * that was loaded by the kernel itself.
2124 *
2125 * Input:
2126 * elf -> address of ELF header, assumed to be at the start of the file.
2127 * Return:
2128 * load bias, i.e. add the value of any p_vaddr in the file to get
2129 * the corresponding address in memory.
2130 */
2131 static ElfW(Addr) get_elf_exec_load_bias(const ElfW(Ehdr)* elf) {
2132 ElfW(Addr) offset = elf->e_phoff;
2133 const ElfW(Phdr)* phdr_table = reinterpret_cast<const ElfW(Phdr)*>(reinterpret_cast<uintptr_t>(elf) + offset);
2134 const ElfW(Phdr)* phdr_end = phdr_table + elf->e_phnum;
2136 for (const ElfW(Phdr)* phdr = phdr_table; phdr < phdr_end; phdr++) {
2137 if (phdr->p_type == PT_LOAD) {
2138 return reinterpret_cast<ElfW(Addr)>(elf) + phdr->p_offset - phdr->p_vaddr;
2139 }
2140 }
2141 return 0;
2142 }
2144 /*
2145 * This is the entry point for the linker, called from begin.S. This
2146 * method is responsible for fixing the linker's own relocations, and
2147 * then calling __linker_init_post_relocation().
2148 *
2149 * Because this method is called before the linker has fixed it's own
2150 * relocations, any attempt to reference an extern variable, extern
2151 * function, or other GOT reference will generate a segfault.
2152 */
2153 extern "C" ElfW(Addr) __linker_init(void* raw_args) {
2154 KernelArgumentBlock args(raw_args);
2156 ElfW(Addr) linker_addr = args.getauxval(AT_BASE);
2157 ElfW(Ehdr)* elf_hdr = reinterpret_cast<ElfW(Ehdr)*>(linker_addr);
2158 ElfW(Phdr)* phdr = reinterpret_cast<ElfW(Phdr)*>(linker_addr + elf_hdr->e_phoff);
2160 soinfo linker_so;
2161 memset(&linker_so, 0, sizeof(soinfo));
2163 strcpy(linker_so.name, "[dynamic linker]");
2164 linker_so.base = linker_addr;
2165 linker_so.size = phdr_table_get_load_size(phdr, elf_hdr->e_phnum);
2166 linker_so.load_bias = get_elf_exec_load_bias(elf_hdr);
2167 linker_so.dynamic = NULL;
2168 linker_so.phdr = phdr;
2169 linker_so.phnum = elf_hdr->e_phnum;
2170 linker_so.flags |= FLAG_LINKER;
2172 if (!soinfo_link_image(&linker_so)) {
2173 // It would be nice to print an error message, but if the linker
2174 // can't link itself, there's no guarantee that we'll be able to
2175 // call write() (because it involves a GOT reference). We may as
2176 // well try though...
2177 const char* msg = "CANNOT LINK EXECUTABLE: ";
2178 write(2, msg, strlen(msg));
2179 write(2, __linker_dl_err_buf, strlen(__linker_dl_err_buf));
2180 write(2, "\n", 1);
2181 _exit(EXIT_FAILURE);
2182 }
2184 // We have successfully fixed our own relocations. It's safe to run
2185 // the main part of the linker now.
2186 args.abort_message_ptr = &gAbortMessage;
2187 ElfW(Addr) start_address = __linker_init_post_relocation(args, linker_addr);
2189 set_soinfo_pool_protection(PROT_READ);
2191 // Return the address that the calling assembly stub should jump to.
2192 return start_address;
2193 }