1 /*
2 * Copyright (C) 2008, 2009 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
29 #include <dlfcn.h>
30 #include <errno.h>
31 #include <fcntl.h>
32 #include <inttypes.h>
33 #include <pthread.h>
34 #include <stdio.h>
35 #include <stdlib.h>
36 #include <string.h>
37 #include <sys/atomics.h>
38 #include <sys/mman.h>
39 #include <sys/stat.h>
40 #include <unistd.h>
42 // Private C library headers.
43 #include "private/bionic_tls.h"
44 #include "private/KernelArgumentBlock.h"
45 #include "private/ScopedPthreadMutexLocker.h"
47 #include "linker.h"
48 #include "linker_debug.h"
49 #include "linker_environ.h"
50 #include "linker_phdr.h"
51 #include "linker_allocator.h"
53 /* >>> IMPORTANT NOTE - READ ME BEFORE MODIFYING <<<
54 *
55 * Do NOT use malloc() and friends or pthread_*() code here.
56 * Don't use printf() either; it's caused mysterious memory
57 * corruption in the past.
58 * The linker runs before we bring up libc and it's easiest
59 * to make sure it does not depend on any complex libc features
60 *
61 * open issues / todo:
62 *
63 * - cleaner error reporting
64 * - after linking, set as much stuff as possible to READONLY
65 * and NOEXEC
66 */
68 static bool soinfo_link_image(soinfo* si, const android_dlextinfo* extinfo);
69 static ElfW(Addr) get_elf_exec_load_bias(const ElfW(Ehdr)* elf);
71 static LinkerAllocator<soinfo> g_soinfo_allocator;
72 static LinkerAllocator<LinkedListEntry<soinfo>> g_soinfo_links_allocator;
74 static soinfo* solist;
75 static soinfo* sonext;
76 static soinfo* somain; /* main process, always the one after libdl_info */
78 static const char* const kDefaultLdPaths[] = {
79 #if defined(__LP64__)
80 "/vendor/lib64",
81 "/system/lib64",
82 #else
83 "/vendor/lib",
84 "/system/lib",
85 #endif
86 NULL
87 };
89 #define LDPATH_BUFSIZE (LDPATH_MAX*64)
90 #define LDPATH_MAX 8
92 #define LDPRELOAD_BUFSIZE (LDPRELOAD_MAX*64)
93 #define LDPRELOAD_MAX 8
95 static char g_ld_library_paths_buffer[LDPATH_BUFSIZE];
96 static const char* g_ld_library_paths[LDPATH_MAX + 1];
98 static char g_ld_preloads_buffer[LDPRELOAD_BUFSIZE];
99 static const char* g_ld_preload_names[LDPRELOAD_MAX + 1];
101 static soinfo* g_ld_preloads[LDPRELOAD_MAX + 1];
103 __LIBC_HIDDEN__ int g_ld_debug_verbosity;
105 __LIBC_HIDDEN__ abort_msg_t* g_abort_message = NULL; // For debuggerd.
107 enum RelocationKind {
108 kRelocAbsolute = 0,
109 kRelocRelative,
110 kRelocCopy,
111 kRelocSymbol,
112 kRelocMax
113 };
115 #if STATS
116 struct linker_stats_t {
117 int count[kRelocMax];
118 };
120 static linker_stats_t linker_stats;
122 static void count_relocation(RelocationKind kind) {
123 ++linker_stats.count[kind];
124 }
125 #else
126 static void count_relocation(RelocationKind) {
127 }
128 #endif
130 #if COUNT_PAGES
131 static unsigned bitmask[4096];
132 #if defined(__LP64__)
133 #define MARK(offset) \
134 do { \
135 if ((((offset) >> 12) >> 5) < 4096) \
136 bitmask[((offset) >> 12) >> 5] |= (1 << (((offset) >> 12) & 31)); \
137 } while (0)
138 #else
139 #define MARK(offset) \
140 do { \
141 bitmask[((offset) >> 12) >> 3] |= (1 << (((offset) >> 12) & 7)); \
142 } while (0)
143 #endif
144 #else
145 #define MARK(x) do {} while (0)
146 #endif
148 // You shouldn't try to call memory-allocating functions in the dynamic linker.
149 // Guard against the most obvious ones.
150 #define DISALLOW_ALLOCATION(return_type, name, ...) \
151 return_type name __VA_ARGS__ \
152 { \
153 const char* msg = "ERROR: " #name " called from the dynamic linker!\n"; \
154 __libc_format_log(ANDROID_LOG_FATAL, "linker", "%s", msg); \
155 write(2, msg, strlen(msg)); \
156 abort(); \
157 }
158 DISALLOW_ALLOCATION(void*, malloc, (size_t u __unused));
159 DISALLOW_ALLOCATION(void, free, (void* u __unused));
160 DISALLOW_ALLOCATION(void*, realloc, (void* u1 __unused, size_t u2 __unused));
161 DISALLOW_ALLOCATION(void*, calloc, (size_t u1 __unused, size_t u2 __unused));
163 static char tmp_err_buf[768];
164 static char __linker_dl_err_buf[768];
166 char* linker_get_error_buffer() {
167 return &__linker_dl_err_buf[0];
168 }
170 size_t linker_get_error_buffer_size() {
171 return sizeof(__linker_dl_err_buf);
172 }
174 /*
175 * This function is an empty stub where GDB locates a breakpoint to get notified
176 * about linker activity.
177 */
178 extern "C" void __attribute__((noinline)) __attribute__((visibility("default"))) rtld_db_dlactivity();
180 static pthread_mutex_t g__r_debug_mutex = PTHREAD_MUTEX_INITIALIZER;
181 static r_debug _r_debug = {1, NULL, reinterpret_cast<uintptr_t>(&rtld_db_dlactivity), r_debug::RT_CONSISTENT, 0};
182 static link_map* r_debug_tail = 0;
184 static void insert_soinfo_into_debug_map(soinfo* info) {
185 // Copy the necessary fields into the debug structure.
186 link_map* map = &(info->link_map_head);
187 map->l_addr = info->load_bias;
188 map->l_name = reinterpret_cast<char*>(info->name);
189 map->l_ld = info->dynamic;
191 /* Stick the new library at the end of the list.
192 * gdb tends to care more about libc than it does
193 * about leaf libraries, and ordering it this way
194 * reduces the back-and-forth over the wire.
195 */
196 if (r_debug_tail) {
197 r_debug_tail->l_next = map;
198 map->l_prev = r_debug_tail;
199 map->l_next = 0;
200 } else {
201 _r_debug.r_map = map;
202 map->l_prev = 0;
203 map->l_next = 0;
204 }
205 r_debug_tail = map;
206 }
208 static void remove_soinfo_from_debug_map(soinfo* info) {
209 link_map* map = &(info->link_map_head);
211 if (r_debug_tail == map) {
212 r_debug_tail = map->l_prev;
213 }
215 if (map->l_prev) {
216 map->l_prev->l_next = map->l_next;
217 }
218 if (map->l_next) {
219 map->l_next->l_prev = map->l_prev;
220 }
221 }
223 static void notify_gdb_of_load(soinfo* info) {
224 if (info->flags & FLAG_EXE) {
225 // GDB already knows about the main executable
226 return;
227 }
229 ScopedPthreadMutexLocker locker(&g__r_debug_mutex);
231 _r_debug.r_state = r_debug::RT_ADD;
232 rtld_db_dlactivity();
234 insert_soinfo_into_debug_map(info);
236 _r_debug.r_state = r_debug::RT_CONSISTENT;
237 rtld_db_dlactivity();
238 }
240 static void notify_gdb_of_unload(soinfo* info) {
241 if (info->flags & FLAG_EXE) {
242 // GDB already knows about the main executable
243 return;
244 }
246 ScopedPthreadMutexLocker locker(&g__r_debug_mutex);
248 _r_debug.r_state = r_debug::RT_DELETE;
249 rtld_db_dlactivity();
251 remove_soinfo_from_debug_map(info);
253 _r_debug.r_state = r_debug::RT_CONSISTENT;
254 rtld_db_dlactivity();
255 }
257 void notify_gdb_of_libraries() {
258 _r_debug.r_state = r_debug::RT_ADD;
259 rtld_db_dlactivity();
260 _r_debug.r_state = r_debug::RT_CONSISTENT;
261 rtld_db_dlactivity();
262 }
264 LinkedListEntry<soinfo>* SoinfoListAllocator::alloc() {
265 return g_soinfo_links_allocator.alloc();
266 }
268 void SoinfoListAllocator::free(LinkedListEntry<soinfo>* entry) {
269 g_soinfo_links_allocator.free(entry);
270 }
272 static void protect_data(int protection) {
273 g_soinfo_allocator.protect_all(protection);
274 g_soinfo_links_allocator.protect_all(protection);
275 }
277 static soinfo* soinfo_alloc(const char* name, struct stat* file_stat) {
278 if (strlen(name) >= SOINFO_NAME_LEN) {
279 DL_ERR("library name \"%s\" too long", name);
280 return NULL;
281 }
283 soinfo* si = g_soinfo_allocator.alloc();
285 // Initialize the new element.
286 memset(si, 0, sizeof(soinfo));
287 strlcpy(si->name, name, sizeof(si->name));
288 si->flags = FLAG_NEW_SOINFO;
290 if (file_stat != NULL) {
291 si->set_st_dev(file_stat->st_dev);
292 si->set_st_ino(file_stat->st_ino);
293 }
295 sonext->next = si;
296 sonext = si;
298 TRACE("name %s: allocated soinfo @ %p", name, si);
299 return si;
300 }
302 static void soinfo_free(soinfo* si) {
303 if (si == NULL) {
304 return;
305 }
307 if (si->base != 0 && si->size != 0) {
308 munmap(reinterpret_cast<void*>(si->base), si->size);
309 }
311 soinfo *prev = NULL, *trav;
313 TRACE("name %s: freeing soinfo @ %p", si->name, si);
315 for (trav = solist; trav != NULL; trav = trav->next) {
316 if (trav == si)
317 break;
318 prev = trav;
319 }
320 if (trav == NULL) {
321 /* si was not in solist */
322 DL_ERR("name \"%s\" is not in solist!", si->name);
323 return;
324 }
326 // clear links to/from si
327 si->remove_all_links();
329 /* prev will never be NULL, because the first entry in solist is
330 always the static libdl_info.
331 */
332 prev->next = si->next;
333 if (si == sonext) {
334 sonext = prev;
335 }
337 g_soinfo_allocator.free(si);
338 }
341 static void parse_path(const char* path, const char* delimiters,
342 const char** array, char* buf, size_t buf_size, size_t max_count) {
343 if (path == NULL) {
344 return;
345 }
347 size_t len = strlcpy(buf, path, buf_size);
349 size_t i = 0;
350 char* buf_p = buf;
351 while (i < max_count && (array[i] = strsep(&buf_p, delimiters))) {
352 if (*array[i] != '\0') {
353 ++i;
354 }
355 }
357 // Forget the last path if we had to truncate; this occurs if the 2nd to
358 // last char isn't '\0' (i.e. wasn't originally a delimiter).
359 if (i > 0 && len >= buf_size && buf[buf_size - 2] != '\0') {
360 array[i - 1] = NULL;
361 } else {
362 array[i] = NULL;
363 }
364 }
366 static void parse_LD_LIBRARY_PATH(const char* path) {
367 parse_path(path, ":", g_ld_library_paths,
368 g_ld_library_paths_buffer, sizeof(g_ld_library_paths_buffer), LDPATH_MAX);
369 }
371 static void parse_LD_PRELOAD(const char* path) {
372 // We have historically supported ':' as well as ' ' in LD_PRELOAD.
373 parse_path(path, " :", g_ld_preload_names,
374 g_ld_preloads_buffer, sizeof(g_ld_preloads_buffer), LDPRELOAD_MAX);
375 }
377 #if defined(__arm__)
379 /* For a given PC, find the .so that it belongs to.
380 * Returns the base address of the .ARM.exidx section
381 * for that .so, and the number of 8-byte entries
382 * in that section (via *pcount).
383 *
384 * Intended to be called by libc's __gnu_Unwind_Find_exidx().
385 *
386 * This function is exposed via dlfcn.cpp and libdl.so.
387 */
388 _Unwind_Ptr dl_unwind_find_exidx(_Unwind_Ptr pc, int* pcount) {
389 unsigned addr = (unsigned)pc;
391 for (soinfo* si = solist; si != 0; si = si->next) {
392 if ((addr >= si->base) && (addr < (si->base + si->size))) {
393 *pcount = si->ARM_exidx_count;
394 return (_Unwind_Ptr)si->ARM_exidx;
395 }
396 }
397 *pcount = 0;
398 return NULL;
399 }
401 #endif
403 /* Here, we only have to provide a callback to iterate across all the
404 * loaded libraries. gcc_eh does the rest. */
405 int dl_iterate_phdr(int (*cb)(dl_phdr_info* info, size_t size, void* data), void* data) {
406 int rv = 0;
407 for (soinfo* si = solist; si != NULL; si = si->next) {
408 dl_phdr_info dl_info;
409 dl_info.dlpi_addr = si->link_map_head.l_addr;
410 dl_info.dlpi_name = si->link_map_head.l_name;
411 dl_info.dlpi_phdr = si->phdr;
412 dl_info.dlpi_phnum = si->phnum;
413 rv = cb(&dl_info, sizeof(dl_phdr_info), data);
414 if (rv != 0) {
415 break;
416 }
417 }
418 return rv;
419 }
421 static ElfW(Sym)* soinfo_elf_lookup(soinfo* si, unsigned hash, const char* name) {
422 ElfW(Sym)* symtab = si->symtab;
423 const char* strtab = si->strtab;
425 TRACE_TYPE(LOOKUP, "SEARCH %s in %s@%p %x %zd",
426 name, si->name, reinterpret_cast<void*>(si->base), hash, hash % si->nbucket);
428 for (unsigned n = si->bucket[hash % si->nbucket]; n != 0; n = si->chain[n]) {
429 ElfW(Sym)* s = symtab + n;
430 if (strcmp(strtab + s->st_name, name)) continue;
432 /* only concern ourselves with global and weak symbol definitions */
433 switch (ELF_ST_BIND(s->st_info)) {
434 case STB_GLOBAL:
435 case STB_WEAK:
436 if (s->st_shndx == SHN_UNDEF) {
437 continue;
438 }
440 TRACE_TYPE(LOOKUP, "FOUND %s in %s (%p) %zd",
441 name, si->name, reinterpret_cast<void*>(s->st_value),
442 static_cast<size_t>(s->st_size));
443 return s;
444 }
445 }
447 return NULL;
448 }
450 static unsigned elfhash(const char* _name) {
451 const unsigned char* name = reinterpret_cast<const unsigned char*>(_name);
452 unsigned h = 0, g;
454 while (*name) {
455 h = (h << 4) + *name++;
456 g = h & 0xf0000000;
457 h ^= g;
458 h ^= g >> 24;
459 }
460 return h;
461 }
463 static ElfW(Sym)* soinfo_do_lookup(soinfo* si, const char* name, soinfo** lsi, soinfo* needed[]) {
464 unsigned elf_hash = elfhash(name);
465 ElfW(Sym)* s = NULL;
467 if (si != NULL && somain != NULL) {
468 /*
469 * Local scope is executable scope. Just start looking into it right away
470 * for the shortcut.
471 */
473 if (si == somain) {
474 s = soinfo_elf_lookup(si, elf_hash, name);
475 if (s != NULL) {
476 *lsi = si;
477 goto done;
478 }
479 } else {
480 /* Order of symbol lookup is controlled by DT_SYMBOLIC flag */
482 /*
483 * If this object was built with symbolic relocations disabled, the
484 * first place to look to resolve external references is the main
485 * executable.
486 */
488 if (!si->has_DT_SYMBOLIC) {
489 DEBUG("%s: looking up %s in executable %s",
490 si->name, name, somain->name);
491 s = soinfo_elf_lookup(somain, elf_hash, name);
492 if (s != NULL) {
493 *lsi = somain;
494 goto done;
495 }
496 }
498 /* Look for symbols in the local scope (the object who is
499 * searching). This happens with C++ templates on x86 for some
500 * reason.
501 *
502 * Notes on weak symbols:
503 * The ELF specs are ambiguous about treatment of weak definitions in
504 * dynamic linking. Some systems return the first definition found
505 * and some the first non-weak definition. This is system dependent.
506 * Here we return the first definition found for simplicity. */
508 s = soinfo_elf_lookup(si, elf_hash, name);
509 if (s != NULL) {
510 *lsi = si;
511 goto done;
512 }
514 /*
515 * If this object was built with -Bsymbolic and symbol is not found
516 * in the local scope, try to find the symbol in the main executable.
517 */
519 if (si->has_DT_SYMBOLIC) {
520 DEBUG("%s: looking up %s in executable %s after local scope",
521 si->name, name, somain->name);
522 s = soinfo_elf_lookup(somain, elf_hash, name);
523 if (s != NULL) {
524 *lsi = somain;
525 goto done;
526 }
527 }
528 }
529 }
531 /* Next, look for it in the preloads list */
532 for (int i = 0; g_ld_preloads[i] != NULL; i++) {
533 s = soinfo_elf_lookup(g_ld_preloads[i], elf_hash, name);
534 if (s != NULL) {
535 *lsi = g_ld_preloads[i];
536 goto done;
537 }
538 }
540 for (int i = 0; needed[i] != NULL; i++) {
541 DEBUG("%s: looking up %s in %s",
542 si->name, name, needed[i]->name);
543 s = soinfo_elf_lookup(needed[i], elf_hash, name);
544 if (s != NULL) {
545 *lsi = needed[i];
546 goto done;
547 }
548 }
550 done:
551 if (s != NULL) {
552 TRACE_TYPE(LOOKUP, "si %s sym %s s->st_value = %p, "
553 "found in %s, base = %p, load bias = %p",
554 si->name, name, reinterpret_cast<void*>(s->st_value),
555 (*lsi)->name, reinterpret_cast<void*>((*lsi)->base),
556 reinterpret_cast<void*>((*lsi)->load_bias));
557 return s;
558 }
560 return NULL;
561 }
563 /* This is used by dlsym(3). It performs symbol lookup only within the
564 specified soinfo object and not in any of its dependencies.
566 TODO: Only looking in the specified soinfo seems wrong. dlsym(3) says
567 that it should do a breadth first search through the dependency
568 tree. This agrees with the ELF spec (aka System V Application
569 Binary Interface) where in Chapter 5 it discuss resolving "Shared
570 Object Dependencies" in breadth first search order.
571 */
572 ElfW(Sym)* dlsym_handle_lookup(soinfo* si, const char* name) {
573 return soinfo_elf_lookup(si, elfhash(name), name);
574 }
576 /* This is used by dlsym(3) to performs a global symbol lookup. If the
577 start value is null (for RTLD_DEFAULT), the search starts at the
578 beginning of the global solist. Otherwise the search starts at the
579 specified soinfo (for RTLD_NEXT).
580 */
581 ElfW(Sym)* dlsym_linear_lookup(const char* name, soinfo** found, soinfo* start) {
582 unsigned elf_hash = elfhash(name);
584 if (start == NULL) {
585 start = solist;
586 }
588 ElfW(Sym)* s = NULL;
589 for (soinfo* si = start; (s == NULL) && (si != NULL); si = si->next) {
590 s = soinfo_elf_lookup(si, elf_hash, name);
591 if (s != NULL) {
592 *found = si;
593 break;
594 }
595 }
597 if (s != NULL) {
598 TRACE_TYPE(LOOKUP, "%s s->st_value = %p, found->base = %p",
599 name, reinterpret_cast<void*>(s->st_value), reinterpret_cast<void*>((*found)->base));
600 }
602 return s;
603 }
605 soinfo* find_containing_library(const void* p) {
606 ElfW(Addr) address = reinterpret_cast<ElfW(Addr)>(p);
607 for (soinfo* si = solist; si != NULL; si = si->next) {
608 if (address >= si->base && address - si->base < si->size) {
609 return si;
610 }
611 }
612 return NULL;
613 }
615 ElfW(Sym)* dladdr_find_symbol(soinfo* si, const void* addr) {
616 ElfW(Addr) soaddr = reinterpret_cast<ElfW(Addr)>(addr) - si->base;
618 // Search the library's symbol table for any defined symbol which
619 // contains this address.
620 for (size_t i = 0; i < si->nchain; ++i) {
621 ElfW(Sym)* sym = &si->symtab[i];
622 if (sym->st_shndx != SHN_UNDEF &&
623 soaddr >= sym->st_value &&
624 soaddr < sym->st_value + sym->st_size) {
625 return sym;
626 }
627 }
629 return NULL;
630 }
632 static int open_library_on_path(const char* name, const char* const paths[]) {
633 char buf[512];
634 for (size_t i = 0; paths[i] != NULL; ++i) {
635 int n = __libc_format_buffer(buf, sizeof(buf), "%s/%s", paths[i], name);
636 if (n < 0 || n >= static_cast<int>(sizeof(buf))) {
637 PRINT("Warning: ignoring very long library path: %s/%s", paths[i], name);
638 continue;
639 }
640 int fd = TEMP_FAILURE_RETRY(open(buf, O_RDONLY | O_CLOEXEC));
641 if (fd != -1) {
642 return fd;
643 }
644 }
645 return -1;
646 }
648 static int open_library(const char* name) {
649 TRACE("[ opening %s ]", name);
651 // If the name contains a slash, we should attempt to open it directly and not search the paths.
652 if (strchr(name, '/') != NULL) {
653 int fd = TEMP_FAILURE_RETRY(open(name, O_RDONLY | O_CLOEXEC));
654 if (fd != -1) {
655 return fd;
656 }
657 // ...but nvidia binary blobs (at least) rely on this behavior, so fall through for now.
658 #if defined(__LP64__)
659 return -1;
660 #endif
661 }
663 // Otherwise we try LD_LIBRARY_PATH first, and fall back to the built-in well known paths.
664 int fd = open_library_on_path(name, g_ld_library_paths);
665 if (fd == -1) {
666 fd = open_library_on_path(name, kDefaultLdPaths);
667 }
668 return fd;
669 }
671 static soinfo* load_library(const char* name, const android_dlextinfo* extinfo) {
672 // Open the file.
673 int fd = open_library(name);
674 if (fd == -1) {
675 DL_ERR("library \"%s\" not found", name);
676 return NULL;
677 }
679 ElfReader elf_reader(name, fd);
681 struct stat file_stat;
682 if (TEMP_FAILURE_RETRY(fstat(fd, &file_stat)) != 0) {
683 DL_ERR("unable to stat file for the library %s: %s", name, strerror(errno));
684 return NULL;
685 }
687 // Check for symlink and other situations where
688 // file can have different names.
689 for (soinfo* si = solist; si != NULL; si = si->next) {
690 if (si->get_st_dev() != 0 &&
691 si->get_st_ino() != 0 &&
692 si->get_st_dev() == file_stat.st_dev &&
693 si->get_st_ino() == file_stat.st_ino) {
694 TRACE("library \"%s\" is already loaded under different name/path \"%s\" - will return existing soinfo", name, si->name);
695 return si;
696 }
697 }
699 // Read the ELF header and load the segments.
700 if (!elf_reader.Load(extinfo)) {
701 return NULL;
702 }
704 const char* bname = strrchr(name, '/');
705 soinfo* si = soinfo_alloc(bname ? bname + 1 : name, &file_stat);
706 if (si == NULL) {
707 return NULL;
708 }
709 si->base = elf_reader.load_start();
710 si->size = elf_reader.load_size();
711 si->load_bias = elf_reader.load_bias();
712 si->phnum = elf_reader.phdr_count();
713 si->phdr = elf_reader.loaded_phdr();
715 // At this point we know that whatever is loaded @ base is a valid ELF
716 // shared library whose segments are properly mapped in.
717 TRACE("[ find_library_internal base=%p size=%zu name='%s' ]",
718 reinterpret_cast<void*>(si->base), si->size, si->name);
720 if (!soinfo_link_image(si, extinfo)) {
721 soinfo_free(si);
722 return NULL;
723 }
725 return si;
726 }
728 static soinfo *find_loaded_library(const char* name) {
729 // TODO: don't use basename only for determining libraries
730 // http://code.google.com/p/android/issues/detail?id=6670
732 const char* bname = strrchr(name, '/');
733 bname = bname ? bname + 1 : name;
735 for (soinfo* si = solist; si != NULL; si = si->next) {
736 if (!strcmp(bname, si->name)) {
737 return si;
738 }
739 }
740 return NULL;
741 }
743 static soinfo* find_library_internal(const char* name, const android_dlextinfo* extinfo) {
744 if (name == NULL) {
745 return somain;
746 }
748 soinfo* si = find_loaded_library(name);
749 if (si != NULL) {
750 if (si->flags & FLAG_LINKED) {
751 return si;
752 }
753 DL_ERR("OOPS: recursive link to \"%s\"", si->name);
754 return NULL;
755 }
757 TRACE("[ '%s' has not been loaded yet. Locating...]", name);
758 return load_library(name, extinfo);
759 }
761 static soinfo* find_library(const char* name, const android_dlextinfo* extinfo) {
762 soinfo* si = find_library_internal(name, extinfo);
763 if (si != NULL) {
764 si->ref_count++;
765 }
766 return si;
767 }
769 static int soinfo_unload(soinfo* si) {
770 if (si->ref_count == 1) {
771 TRACE("unloading '%s'", si->name);
772 si->CallDestructors();
774 if ((si->flags | FLAG_NEW_SOINFO) != 0) {
775 si->get_children().for_each([&] (soinfo* child) {
776 TRACE("%s needs to unload %s", si->name, child->name);
777 soinfo_unload(child);
778 });
779 } else {
780 for (ElfW(Dyn)* d = si->dynamic; d->d_tag != DT_NULL; ++d) {
781 if (d->d_tag == DT_NEEDED) {
782 const char* library_name = si->strtab + d->d_un.d_val;
783 TRACE("%s needs to unload %s", si->name, library_name);
784 soinfo_unload(find_loaded_library(library_name));
785 }
786 }
787 }
789 notify_gdb_of_unload(si);
790 si->ref_count = 0;
791 soinfo_free(si);
792 } else {
793 si->ref_count--;
794 TRACE("not unloading '%s', decrementing ref_count to %zd", si->name, si->ref_count);
795 }
796 return 0;
797 }
799 void do_android_get_LD_LIBRARY_PATH(char* buffer, size_t buffer_size) {
800 snprintf(buffer, buffer_size, "%s:%s", kDefaultLdPaths[0], kDefaultLdPaths[1]);
801 }
803 void do_android_update_LD_LIBRARY_PATH(const char* ld_library_path) {
804 if (!get_AT_SECURE()) {
805 parse_LD_LIBRARY_PATH(ld_library_path);
806 }
807 }
809 soinfo* do_dlopen(const char* name, int flags, const android_dlextinfo* extinfo) {
810 if ((flags & ~(RTLD_NOW|RTLD_LAZY|RTLD_LOCAL|RTLD_GLOBAL)) != 0) {
811 DL_ERR("invalid flags to dlopen: %x", flags);
812 return NULL;
813 }
814 if (extinfo != NULL && ((extinfo->flags & ~(ANDROID_DLEXT_VALID_FLAG_BITS)) != 0)) {
815 DL_ERR("invalid extended flags to android_dlopen_ext: %x", extinfo->flags);
816 return NULL;
817 }
818 protect_data(PROT_READ | PROT_WRITE);
819 soinfo* si = find_library(name, extinfo);
820 if (si != NULL) {
821 si->CallConstructors();
822 somain->add_child(si);
823 }
824 protect_data(PROT_READ);
825 return si;
826 }
828 int do_dlclose(soinfo* si) {
829 protect_data(PROT_READ | PROT_WRITE);
830 int result = soinfo_unload(si);
831 protect_data(PROT_READ);
832 return result;
833 }
835 #if defined(USE_RELA)
836 static int soinfo_relocate(soinfo* si, ElfW(Rela)* rela, unsigned count, soinfo* needed[]) {
837 ElfW(Sym)* s;
838 soinfo* lsi;
840 for (size_t idx = 0; idx < count; ++idx, ++rela) {
841 unsigned type = ELFW(R_TYPE)(rela->r_info);
842 unsigned sym = ELFW(R_SYM)(rela->r_info);
843 ElfW(Addr) reloc = static_cast<ElfW(Addr)>(rela->r_offset + si->load_bias);
844 ElfW(Addr) sym_addr = 0;
845 const char* sym_name = NULL;
847 DEBUG("Processing '%s' relocation at index %zd", si->name, idx);
848 if (type == 0) { // R_*_NONE
849 continue;
850 }
851 if (sym != 0) {
852 sym_name = reinterpret_cast<const char*>(si->strtab + si->symtab[sym].st_name);
853 s = soinfo_do_lookup(si, sym_name, &lsi, needed);
854 if (s == NULL) {
855 // We only allow an undefined symbol if this is a weak reference...
856 s = &si->symtab[sym];
857 if (ELF_ST_BIND(s->st_info) != STB_WEAK) {
858 DL_ERR("cannot locate symbol \"%s\" referenced by \"%s\"...", sym_name, si->name);
859 return -1;
860 }
862 /* IHI0044C AAELF 4.5.1.1:
864 Libraries are not searched to resolve weak references.
865 It is not an error for a weak reference to remain unsatisfied.
867 During linking, the value of an undefined weak reference is:
868 - Zero if the relocation type is absolute
869 - The address of the place if the relocation is pc-relative
870 - The address of nominal base address if the relocation
871 type is base-relative.
872 */
874 switch (type) {
875 #if defined(__aarch64__)
876 case R_AARCH64_JUMP_SLOT:
877 case R_AARCH64_GLOB_DAT:
878 case R_AARCH64_ABS64:
879 case R_AARCH64_ABS32:
880 case R_AARCH64_ABS16:
881 case R_AARCH64_RELATIVE:
882 /*
883 * The sym_addr was initialized to be zero above, or the relocation
884 * code below does not care about value of sym_addr.
885 * No need to do anything.
886 */
887 break;
888 #elif defined(__x86_64__)
889 case R_X86_64_JUMP_SLOT:
890 case R_X86_64_GLOB_DAT:
891 case R_X86_64_32:
892 case R_X86_64_64:
893 case R_X86_64_RELATIVE:
894 // No need to do anything.
895 break;
896 case R_X86_64_PC32:
897 sym_addr = reloc;
898 break;
899 #endif
900 default:
901 DL_ERR("unknown weak reloc type %d @ %p (%zu)", type, rela, idx);
902 return -1;
903 }
904 } else {
905 // We got a definition.
906 sym_addr = static_cast<ElfW(Addr)>(s->st_value + lsi->load_bias);
907 }
908 count_relocation(kRelocSymbol);
909 } else {
910 s = NULL;
911 }
913 switch (type) {
914 #if defined(__aarch64__)
915 case R_AARCH64_JUMP_SLOT:
916 count_relocation(kRelocAbsolute);
917 MARK(rela->r_offset);
918 TRACE_TYPE(RELO, "RELO JMP_SLOT %16llx <- %16llx %s\n",
919 reloc, (sym_addr + rela->r_addend), sym_name);
920 *reinterpret_cast<ElfW(Addr)*>(reloc) = (sym_addr + rela->r_addend);
921 break;
922 case R_AARCH64_GLOB_DAT:
923 count_relocation(kRelocAbsolute);
924 MARK(rela->r_offset);
925 TRACE_TYPE(RELO, "RELO GLOB_DAT %16llx <- %16llx %s\n",
926 reloc, (sym_addr + rela->r_addend), sym_name);
927 *reinterpret_cast<ElfW(Addr)*>(reloc) = (sym_addr + rela->r_addend);
928 break;
929 case R_AARCH64_ABS64:
930 count_relocation(kRelocAbsolute);
931 MARK(rela->r_offset);
932 TRACE_TYPE(RELO, "RELO ABS64 %16llx <- %16llx %s\n",
933 reloc, (sym_addr + rela->r_addend), sym_name);
934 *reinterpret_cast<ElfW(Addr)*>(reloc) += (sym_addr + rela->r_addend);
935 break;
936 case R_AARCH64_ABS32:
937 count_relocation(kRelocAbsolute);
938 MARK(rela->r_offset);
939 TRACE_TYPE(RELO, "RELO ABS32 %16llx <- %16llx %s\n",
940 reloc, (sym_addr + rela->r_addend), sym_name);
941 if ((static_cast<ElfW(Addr)>(INT32_MIN) <= (*reinterpret_cast<ElfW(Addr)*>(reloc) + (sym_addr + rela->r_addend))) &&
942 ((*reinterpret_cast<ElfW(Addr)*>(reloc) + (sym_addr + rela->r_addend)) <= static_cast<ElfW(Addr)>(UINT32_MAX))) {
943 *reinterpret_cast<ElfW(Addr)*>(reloc) += (sym_addr + rela->r_addend);
944 } else {
945 DL_ERR("0x%016llx out of range 0x%016llx to 0x%016llx",
946 (*reinterpret_cast<ElfW(Addr)*>(reloc) + (sym_addr + rela->r_addend)),
947 static_cast<ElfW(Addr)>(INT32_MIN),
948 static_cast<ElfW(Addr)>(UINT32_MAX));
949 return -1;
950 }
951 break;
952 case R_AARCH64_ABS16:
953 count_relocation(kRelocAbsolute);
954 MARK(rela->r_offset);
955 TRACE_TYPE(RELO, "RELO ABS16 %16llx <- %16llx %s\n",
956 reloc, (sym_addr + rela->r_addend), sym_name);
957 if ((static_cast<ElfW(Addr)>(INT16_MIN) <= (*reinterpret_cast<ElfW(Addr)*>(reloc) + (sym_addr + rela->r_addend))) &&
958 ((*reinterpret_cast<ElfW(Addr)*>(reloc) + (sym_addr + rela->r_addend)) <= static_cast<ElfW(Addr)>(UINT16_MAX))) {
959 *reinterpret_cast<ElfW(Addr)*>(reloc) += (sym_addr + rela->r_addend);
960 } else {
961 DL_ERR("0x%016llx out of range 0x%016llx to 0x%016llx",
962 (*reinterpret_cast<ElfW(Addr)*>(reloc) + (sym_addr + rela->r_addend)),
963 static_cast<ElfW(Addr)>(INT16_MIN),
964 static_cast<ElfW(Addr)>(UINT16_MAX));
965 return -1;
966 }
967 break;
968 case R_AARCH64_PREL64:
969 count_relocation(kRelocRelative);
970 MARK(rela->r_offset);
971 TRACE_TYPE(RELO, "RELO REL64 %16llx <- %16llx - %16llx %s\n",
972 reloc, (sym_addr + rela->r_addend), rela->r_offset, sym_name);
973 *reinterpret_cast<ElfW(Addr)*>(reloc) += (sym_addr + rela->r_addend) - rela->r_offset;
974 break;
975 case R_AARCH64_PREL32:
976 count_relocation(kRelocRelative);
977 MARK(rela->r_offset);
978 TRACE_TYPE(RELO, "RELO REL32 %16llx <- %16llx - %16llx %s\n",
979 reloc, (sym_addr + rela->r_addend), rela->r_offset, sym_name);
980 if ((static_cast<ElfW(Addr)>(INT32_MIN) <= (*reinterpret_cast<ElfW(Addr)*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset))) &&
981 ((*reinterpret_cast<ElfW(Addr)*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset)) <= static_cast<ElfW(Addr)>(UINT32_MAX))) {
982 *reinterpret_cast<ElfW(Addr)*>(reloc) += ((sym_addr + rela->r_addend) - rela->r_offset);
983 } else {
984 DL_ERR("0x%016llx out of range 0x%016llx to 0x%016llx",
985 (*reinterpret_cast<ElfW(Addr)*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset)),
986 static_cast<ElfW(Addr)>(INT32_MIN),
987 static_cast<ElfW(Addr)>(UINT32_MAX));
988 return -1;
989 }
990 break;
991 case R_AARCH64_PREL16:
992 count_relocation(kRelocRelative);
993 MARK(rela->r_offset);
994 TRACE_TYPE(RELO, "RELO REL16 %16llx <- %16llx - %16llx %s\n",
995 reloc, (sym_addr + rela->r_addend), rela->r_offset, sym_name);
996 if ((static_cast<ElfW(Addr)>(INT16_MIN) <= (*reinterpret_cast<ElfW(Addr)*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset))) &&
997 ((*reinterpret_cast<ElfW(Addr)*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset)) <= static_cast<ElfW(Addr)>(UINT16_MAX))) {
998 *reinterpret_cast<ElfW(Addr)*>(reloc) += ((sym_addr + rela->r_addend) - rela->r_offset);
999 } else {
1000 DL_ERR("0x%016llx out of range 0x%016llx to 0x%016llx",
1001 (*reinterpret_cast<ElfW(Addr)*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset)),
1002 static_cast<ElfW(Addr)>(INT16_MIN),
1003 static_cast<ElfW(Addr)>(UINT16_MAX));
1004 return -1;
1005 }
1006 break;
1008 case R_AARCH64_RELATIVE:
1009 count_relocation(kRelocRelative);
1010 MARK(rela->r_offset);
1011 if (sym) {
1012 DL_ERR("odd RELATIVE form...");
1013 return -1;
1014 }
1015 TRACE_TYPE(RELO, "RELO RELATIVE %16llx <- %16llx\n",
1016 reloc, (si->base + rela->r_addend));
1017 *reinterpret_cast<ElfW(Addr)*>(reloc) = (si->base + rela->r_addend);
1018 break;
1020 case R_AARCH64_COPY:
1021 /*
1022 * ET_EXEC is not supported so this should not happen.
1023 *
1024 * http://infocenter.arm.com/help/topic/com.arm.doc.ihi0044d/IHI0044D_aaelf.pdf
1025 *
1026 * Section 4.7.1.10 "Dynamic relocations"
1027 * R_AARCH64_COPY may only appear in executable objects where e_type is
1028 * set to ET_EXEC.
1029 */
1030 DL_ERR("%s R_AARCH64_COPY relocations are not supported", si->name);
1031 return -1;
1032 case R_AARCH64_TLS_TPREL64:
1033 TRACE_TYPE(RELO, "RELO TLS_TPREL64 *** %16llx <- %16llx - %16llx\n",
1034 reloc, (sym_addr + rela->r_addend), rela->r_offset);
1035 break;
1036 case R_AARCH64_TLS_DTPREL32:
1037 TRACE_TYPE(RELO, "RELO TLS_DTPREL32 *** %16llx <- %16llx - %16llx\n",
1038 reloc, (sym_addr + rela->r_addend), rela->r_offset);
1039 break;
1040 #elif defined(__x86_64__)
1041 case R_X86_64_JUMP_SLOT:
1042 count_relocation(kRelocAbsolute);
1043 MARK(rela->r_offset);
1044 TRACE_TYPE(RELO, "RELO JMP_SLOT %08zx <- %08zx %s", static_cast<size_t>(reloc),
1045 static_cast<size_t>(sym_addr + rela->r_addend), sym_name);
1046 *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr + rela->r_addend;
1047 break;
1048 case R_X86_64_GLOB_DAT:
1049 count_relocation(kRelocAbsolute);
1050 MARK(rela->r_offset);
1051 TRACE_TYPE(RELO, "RELO GLOB_DAT %08zx <- %08zx %s", static_cast<size_t>(reloc),
1052 static_cast<size_t>(sym_addr + rela->r_addend), sym_name);
1053 *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr + rela->r_addend;
1054 break;
1055 case R_X86_64_RELATIVE:
1056 count_relocation(kRelocRelative);
1057 MARK(rela->r_offset);
1058 if (sym) {
1059 DL_ERR("odd RELATIVE form...");
1060 return -1;
1061 }
1062 TRACE_TYPE(RELO, "RELO RELATIVE %08zx <- +%08zx", static_cast<size_t>(reloc),
1063 static_cast<size_t>(si->base));
1064 *reinterpret_cast<ElfW(Addr)*>(reloc) = si->base + rela->r_addend;
1065 break;
1066 case R_X86_64_32:
1067 count_relocation(kRelocRelative);
1068 MARK(rela->r_offset);
1069 TRACE_TYPE(RELO, "RELO R_X86_64_32 %08zx <- +%08zx %s", static_cast<size_t>(reloc),
1070 static_cast<size_t>(sym_addr), sym_name);
1071 *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr + rela->r_addend;
1072 break;
1073 case R_X86_64_64:
1074 count_relocation(kRelocRelative);
1075 MARK(rela->r_offset);
1076 TRACE_TYPE(RELO, "RELO R_X86_64_64 %08zx <- +%08zx %s", static_cast<size_t>(reloc),
1077 static_cast<size_t>(sym_addr), sym_name);
1078 *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr + rela->r_addend;
1079 break;
1080 case R_X86_64_PC32:
1081 count_relocation(kRelocRelative);
1082 MARK(rela->r_offset);
1083 TRACE_TYPE(RELO, "RELO R_X86_64_PC32 %08zx <- +%08zx (%08zx - %08zx) %s",
1084 static_cast<size_t>(reloc), static_cast<size_t>(sym_addr - reloc),
1085 static_cast<size_t>(sym_addr), static_cast<size_t>(reloc), sym_name);
1086 *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr + rela->r_addend - reloc;
1087 break;
1088 #endif
1090 default:
1091 DL_ERR("unknown reloc type %d @ %p (%zu)", type, rela, idx);
1092 return -1;
1093 }
1094 }
1095 return 0;
1096 }
1098 #else // REL, not RELA.
1100 static int soinfo_relocate(soinfo* si, ElfW(Rel)* rel, unsigned count, soinfo* needed[]) {
1101 ElfW(Sym)* s;
1102 soinfo* lsi;
1104 for (size_t idx = 0; idx < count; ++idx, ++rel) {
1105 unsigned type = ELFW(R_TYPE)(rel->r_info);
1106 // TODO: don't use unsigned for 'sym'. Use uint32_t or ElfW(Addr) instead.
1107 unsigned sym = ELFW(R_SYM)(rel->r_info);
1108 ElfW(Addr) reloc = static_cast<ElfW(Addr)>(rel->r_offset + si->load_bias);
1109 ElfW(Addr) sym_addr = 0;
1110 const char* sym_name = NULL;
1112 DEBUG("Processing '%s' relocation at index %zd", si->name, idx);
1113 if (type == 0) { // R_*_NONE
1114 continue;
1115 }
1116 if (sym != 0) {
1117 sym_name = reinterpret_cast<const char*>(si->strtab + si->symtab[sym].st_name);
1118 s = soinfo_do_lookup(si, sym_name, &lsi, needed);
1119 if (s == NULL) {
1120 // We only allow an undefined symbol if this is a weak reference...
1121 s = &si->symtab[sym];
1122 if (ELF_ST_BIND(s->st_info) != STB_WEAK) {
1123 DL_ERR("cannot locate symbol \"%s\" referenced by \"%s\"...", sym_name, si->name);
1124 return -1;
1125 }
1127 /* IHI0044C AAELF 4.5.1.1:
1129 Libraries are not searched to resolve weak references.
1130 It is not an error for a weak reference to remain
1131 unsatisfied.
1133 During linking, the value of an undefined weak reference is:
1134 - Zero if the relocation type is absolute
1135 - The address of the place if the relocation is pc-relative
1136 - The address of nominal base address if the relocation
1137 type is base-relative.
1138 */
1140 switch (type) {
1141 #if defined(__arm__)
1142 case R_ARM_JUMP_SLOT:
1143 case R_ARM_GLOB_DAT:
1144 case R_ARM_ABS32:
1145 case R_ARM_RELATIVE: /* Don't care. */
1146 // sym_addr was initialized to be zero above or relocation
1147 // code below does not care about value of sym_addr.
1148 // No need to do anything.
1149 break;
1150 #elif defined(__i386__)
1151 case R_386_JMP_SLOT:
1152 case R_386_GLOB_DAT:
1153 case R_386_32:
1154 case R_386_RELATIVE: /* Don't care. */
1155 // sym_addr was initialized to be zero above or relocation
1156 // code below does not care about value of sym_addr.
1157 // No need to do anything.
1158 break;
1159 case R_386_PC32:
1160 sym_addr = reloc;
1161 break;
1162 #endif
1164 #if defined(__arm__)
1165 case R_ARM_COPY:
1166 // Fall through. Can't really copy if weak symbol is not found at run-time.
1167 #endif
1168 default:
1169 DL_ERR("unknown weak reloc type %d @ %p (%zu)", type, rel, idx);
1170 return -1;
1171 }
1172 } else {
1173 // We got a definition.
1174 sym_addr = static_cast<ElfW(Addr)>(s->st_value + lsi->load_bias);
1175 }
1176 count_relocation(kRelocSymbol);
1177 } else {
1178 s = NULL;
1179 }
1181 switch (type) {
1182 #if defined(__arm__)
1183 case R_ARM_JUMP_SLOT:
1184 count_relocation(kRelocAbsolute);
1185 MARK(rel->r_offset);
1186 TRACE_TYPE(RELO, "RELO JMP_SLOT %08x <- %08x %s", reloc, sym_addr, sym_name);
1187 *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr;
1188 break;
1189 case R_ARM_GLOB_DAT:
1190 count_relocation(kRelocAbsolute);
1191 MARK(rel->r_offset);
1192 TRACE_TYPE(RELO, "RELO GLOB_DAT %08x <- %08x %s", reloc, sym_addr, sym_name);
1193 *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr;
1194 break;
1195 case R_ARM_ABS32:
1196 count_relocation(kRelocAbsolute);
1197 MARK(rel->r_offset);
1198 TRACE_TYPE(RELO, "RELO ABS %08x <- %08x %s", reloc, sym_addr, sym_name);
1199 *reinterpret_cast<ElfW(Addr)*>(reloc) += sym_addr;
1200 break;
1201 case R_ARM_REL32:
1202 count_relocation(kRelocRelative);
1203 MARK(rel->r_offset);
1204 TRACE_TYPE(RELO, "RELO REL32 %08x <- %08x - %08x %s",
1205 reloc, sym_addr, rel->r_offset, sym_name);
1206 *reinterpret_cast<ElfW(Addr)*>(reloc) += sym_addr - rel->r_offset;
1207 break;
1208 case R_ARM_COPY:
1209 /*
1210 * ET_EXEC is not supported so this should not happen.
1211 *
1212 * http://infocenter.arm.com/help/topic/com.arm.doc.ihi0044d/IHI0044D_aaelf.pdf
1213 *
1214 * Section 4.7.1.10 "Dynamic relocations"
1215 * R_ARM_COPY may only appear in executable objects where e_type is
1216 * set to ET_EXEC.
1217 */
1218 DL_ERR("%s R_ARM_COPY relocations are not supported", si->name);
1219 return -1;
1220 #elif defined(__i386__)
1221 case R_386_JMP_SLOT:
1222 count_relocation(kRelocAbsolute);
1223 MARK(rel->r_offset);
1224 TRACE_TYPE(RELO, "RELO JMP_SLOT %08x <- %08x %s", reloc, sym_addr, sym_name);
1225 *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr;
1226 break;
1227 case R_386_GLOB_DAT:
1228 count_relocation(kRelocAbsolute);
1229 MARK(rel->r_offset);
1230 TRACE_TYPE(RELO, "RELO GLOB_DAT %08x <- %08x %s", reloc, sym_addr, sym_name);
1231 *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr;
1232 break;
1233 case R_386_32:
1234 count_relocation(kRelocRelative);
1235 MARK(rel->r_offset);
1236 TRACE_TYPE(RELO, "RELO R_386_32 %08x <- +%08x %s", reloc, sym_addr, sym_name);
1237 *reinterpret_cast<ElfW(Addr)*>(reloc) += sym_addr;
1238 break;
1239 case R_386_PC32:
1240 count_relocation(kRelocRelative);
1241 MARK(rel->r_offset);
1242 TRACE_TYPE(RELO, "RELO R_386_PC32 %08x <- +%08x (%08x - %08x) %s",
1243 reloc, (sym_addr - reloc), sym_addr, reloc, sym_name);
1244 *reinterpret_cast<ElfW(Addr)*>(reloc) += (sym_addr - reloc);
1245 break;
1246 #elif defined(__mips__)
1247 case R_MIPS_REL32:
1248 #if defined(__LP64__)
1249 // MIPS Elf64_Rel entries contain compound relocations
1250 // We only handle the R_MIPS_NONE|R_MIPS_64|R_MIPS_REL32 case
1251 if (ELF64_R_TYPE2(rel->r_info) != R_MIPS_64 ||
1252 ELF64_R_TYPE3(rel->r_info) != R_MIPS_NONE) {
1253 DL_ERR("Unexpected compound relocation type:%d type2:%d type3:%d @ %p (%zu)",
1254 type, (unsigned)ELF64_R_TYPE2(rel->r_info),
1255 (unsigned)ELF64_R_TYPE3(rel->r_info), rel, idx);
1256 return -1;
1257 }
1258 #endif
1259 count_relocation(kRelocAbsolute);
1260 MARK(rel->r_offset);
1261 TRACE_TYPE(RELO, "RELO REL32 %08zx <- %08zx %s", static_cast<size_t>(reloc),
1262 static_cast<size_t>(sym_addr), sym_name ? sym_name : "*SECTIONHDR*");
1263 if (s) {
1264 *reinterpret_cast<ElfW(Addr)*>(reloc) += sym_addr;
1265 } else {
1266 *reinterpret_cast<ElfW(Addr)*>(reloc) += si->base;
1267 }
1268 break;
1269 #endif
1271 #if defined(__arm__)
1272 case R_ARM_RELATIVE:
1273 #elif defined(__i386__)
1274 case R_386_RELATIVE:
1275 #endif
1276 count_relocation(kRelocRelative);
1277 MARK(rel->r_offset);
1278 if (sym) {
1279 DL_ERR("odd RELATIVE form...");
1280 return -1;
1281 }
1282 TRACE_TYPE(RELO, "RELO RELATIVE %p <- +%p",
1283 reinterpret_cast<void*>(reloc), reinterpret_cast<void*>(si->base));
1284 *reinterpret_cast<ElfW(Addr)*>(reloc) += si->base;
1285 break;
1287 default:
1288 DL_ERR("unknown reloc type %d @ %p (%zu)", type, rel, idx);
1289 return -1;
1290 }
1291 }
1292 return 0;
1293 }
1294 #endif
1296 #if defined(__mips__)
1297 static bool mips_relocate_got(soinfo* si, soinfo* needed[]) {
1298 ElfW(Addr)** got = si->plt_got;
1299 if (got == NULL) {
1300 return true;
1301 }
1302 unsigned local_gotno = si->mips_local_gotno;
1303 unsigned gotsym = si->mips_gotsym;
1304 unsigned symtabno = si->mips_symtabno;
1305 ElfW(Sym)* symtab = si->symtab;
1307 // got[0] is the address of the lazy resolver function.
1308 // got[1] may be used for a GNU extension.
1309 // Set it to a recognizable address in case someone calls it (should be _rtld_bind_start).
1310 // FIXME: maybe this should be in a separate routine?
1311 if ((si->flags & FLAG_LINKER) == 0) {
1312 size_t g = 0;
1313 got[g++] = reinterpret_cast<ElfW(Addr)*>(0xdeadbeef);
1314 if (reinterpret_cast<intptr_t>(got[g]) < 0) {
1315 got[g++] = reinterpret_cast<ElfW(Addr)*>(0xdeadfeed);
1316 }
1317 // Relocate the local GOT entries.
1318 for (; g < local_gotno; g++) {
1319 got[g] = reinterpret_cast<ElfW(Addr)*>(reinterpret_cast<uintptr_t>(got[g]) + si->load_bias);
1320 }
1321 }
1323 // Now for the global GOT entries...
1324 ElfW(Sym)* sym = symtab + gotsym;
1325 got = si->plt_got + local_gotno;
1326 for (size_t g = gotsym; g < symtabno; g++, sym++, got++) {
1327 // This is an undefined reference... try to locate it.
1328 const char* sym_name = si->strtab + sym->st_name;
1329 soinfo* lsi;
1330 ElfW(Sym)* s = soinfo_do_lookup(si, sym_name, &lsi, needed);
1331 if (s == NULL) {
1332 // We only allow an undefined symbol if this is a weak reference.
1333 s = &symtab[g];
1334 if (ELF_ST_BIND(s->st_info) != STB_WEAK) {
1335 DL_ERR("cannot locate \"%s\"...", sym_name);
1336 return false;
1337 }
1338 *got = 0;
1339 } else {
1340 // FIXME: is this sufficient?
1341 // For reference see NetBSD link loader
1342 // http://cvsweb.netbsd.org/bsdweb.cgi/src/libexec/ld.elf_so/arch/mips/mips_reloc.c?rev=1.53&content-type=text/x-cvsweb-markup
1343 *got = reinterpret_cast<ElfW(Addr)*>(lsi->load_bias + s->st_value);
1344 }
1345 }
1346 return true;
1347 }
1348 #endif
1350 void soinfo::CallArray(const char* array_name __unused, linker_function_t* functions, size_t count, bool reverse) {
1351 if (functions == NULL) {
1352 return;
1353 }
1355 TRACE("[ Calling %s (size %zd) @ %p for '%s' ]", array_name, count, functions, name);
1357 int begin = reverse ? (count - 1) : 0;
1358 int end = reverse ? -1 : count;
1359 int step = reverse ? -1 : 1;
1361 for (int i = begin; i != end; i += step) {
1362 TRACE("[ %s[%d] == %p ]", array_name, i, functions[i]);
1363 CallFunction("function", functions[i]);
1364 }
1366 TRACE("[ Done calling %s for '%s' ]", array_name, name);
1367 }
1369 void soinfo::CallFunction(const char* function_name __unused, linker_function_t function) {
1370 if (function == NULL || reinterpret_cast<uintptr_t>(function) == static_cast<uintptr_t>(-1)) {
1371 return;
1372 }
1374 TRACE("[ Calling %s @ %p for '%s' ]", function_name, function, name);
1375 function();
1376 TRACE("[ Done calling %s @ %p for '%s' ]", function_name, function, name);
1378 // The function may have called dlopen(3) or dlclose(3), so we need to ensure our data structures
1379 // are still writable. This happens with our debug malloc (see http://b/7941716).
1380 protect_data(PROT_READ | PROT_WRITE);
1381 }
1383 void soinfo::CallPreInitConstructors() {
1384 // DT_PREINIT_ARRAY functions are called before any other constructors for executables,
1385 // but ignored in a shared library.
1386 CallArray("DT_PREINIT_ARRAY", preinit_array, preinit_array_count, false);
1387 }
1389 void soinfo::CallConstructors() {
1390 if (constructors_called) {
1391 return;
1392 }
1394 // We set constructors_called before actually calling the constructors, otherwise it doesn't
1395 // protect against recursive constructor calls. One simple example of constructor recursion
1396 // is the libc debug malloc, which is implemented in libc_malloc_debug_leak.so:
1397 // 1. The program depends on libc, so libc's constructor is called here.
1398 // 2. The libc constructor calls dlopen() to load libc_malloc_debug_leak.so.
1399 // 3. dlopen() calls the constructors on the newly created
1400 // soinfo for libc_malloc_debug_leak.so.
1401 // 4. The debug .so depends on libc, so CallConstructors is
1402 // called again with the libc soinfo. If it doesn't trigger the early-
1403 // out above, the libc constructor will be called again (recursively!).
1404 constructors_called = true;
1406 if ((flags & FLAG_EXE) == 0 && preinit_array != NULL) {
1407 // The GNU dynamic linker silently ignores these, but we warn the developer.
1408 PRINT("\"%s\": ignoring %zd-entry DT_PREINIT_ARRAY in shared library!",
1409 name, preinit_array_count);
1410 }
1412 get_children().for_each([] (soinfo* si) {
1413 si->CallConstructors();
1414 });
1416 TRACE("\"%s\": calling constructors", name);
1418 // DT_INIT should be called before DT_INIT_ARRAY if both are present.
1419 CallFunction("DT_INIT", init_func);
1420 CallArray("DT_INIT_ARRAY", init_array, init_array_count, false);
1421 }
1423 void soinfo::CallDestructors() {
1424 TRACE("\"%s\": calling destructors", name);
1426 // DT_FINI_ARRAY must be parsed in reverse order.
1427 CallArray("DT_FINI_ARRAY", fini_array, fini_array_count, true);
1429 // DT_FINI should be called after DT_FINI_ARRAY if both are present.
1430 CallFunction("DT_FINI", fini_func);
1431 }
1433 void soinfo::add_child(soinfo* child) {
1434 if ((this->flags & FLAG_NEW_SOINFO) == 0) {
1435 return;
1436 }
1438 this->children.push_front(child);
1439 child->parents.push_front(this);
1440 }
1442 void soinfo::remove_all_links() {
1443 if ((this->flags & FLAG_NEW_SOINFO) == 0) {
1444 return;
1445 }
1447 // 1. Untie connected soinfos from 'this'.
1448 children.for_each([&] (soinfo* child) {
1449 child->parents.remove_if([&] (const soinfo* parent) {
1450 return parent == this;
1451 });
1452 });
1454 parents.for_each([&] (soinfo* parent) {
1455 parent->children.for_each([&] (const soinfo* child) {
1456 return child == this;
1457 });
1458 });
1460 // 2. Once everything untied - clear local lists.
1461 parents.clear();
1462 children.clear();
1463 }
1465 void soinfo::set_st_dev(dev_t dev) {
1466 if ((this->flags & FLAG_NEW_SOINFO) == 0) {
1467 return;
1468 }
1470 st_dev = dev;
1471 }
1473 void soinfo::set_st_ino(ino_t ino) {
1474 if ((this->flags & FLAG_NEW_SOINFO) == 0) {
1475 return;
1476 }
1478 st_ino = ino;
1479 }
1481 dev_t soinfo::get_st_dev() {
1482 if ((this->flags & FLAG_NEW_SOINFO) == 0) {
1483 return 0;
1484 }
1486 return st_dev;
1487 };
1489 ino_t soinfo::get_st_ino() {
1490 if ((this->flags & FLAG_NEW_SOINFO) == 0) {
1491 return 0;
1492 }
1494 return st_ino;
1495 }
1497 // This is a return on get_children() in case
1498 // 'this->flags' does not have FLAG_NEW_SOINFO set.
1499 static soinfo::soinfo_list_t g_empty_list;
1501 soinfo::soinfo_list_t& soinfo::get_children() {
1502 if ((this->flags & FLAG_NEW_SOINFO) == 0) {
1503 return g_empty_list;
1504 }
1506 return this->children;
1507 }
1509 /* Force any of the closed stdin, stdout and stderr to be associated with
1510 /dev/null. */
1511 static int nullify_closed_stdio() {
1512 int dev_null, i, status;
1513 int return_value = 0;
1515 dev_null = TEMP_FAILURE_RETRY(open("/dev/null", O_RDWR));
1516 if (dev_null < 0) {
1517 DL_ERR("cannot open /dev/null: %s", strerror(errno));
1518 return -1;
1519 }
1520 TRACE("[ Opened /dev/null file-descriptor=%d]", dev_null);
1522 /* If any of the stdio file descriptors is valid and not associated
1523 with /dev/null, dup /dev/null to it. */
1524 for (i = 0; i < 3; i++) {
1525 /* If it is /dev/null already, we are done. */
1526 if (i == dev_null) {
1527 continue;
1528 }
1530 TRACE("[ Nullifying stdio file descriptor %d]", i);
1531 status = TEMP_FAILURE_RETRY(fcntl(i, F_GETFL));
1533 /* If file is opened, we are good. */
1534 if (status != -1) {
1535 continue;
1536 }
1538 /* The only error we allow is that the file descriptor does not
1539 exist, in which case we dup /dev/null to it. */
1540 if (errno != EBADF) {
1541 DL_ERR("fcntl failed: %s", strerror(errno));
1542 return_value = -1;
1543 continue;
1544 }
1546 /* Try dupping /dev/null to this stdio file descriptor and
1547 repeat if there is a signal. Note that any errors in closing
1548 the stdio descriptor are lost. */
1549 status = TEMP_FAILURE_RETRY(dup2(dev_null, i));
1550 if (status < 0) {
1551 DL_ERR("dup2 failed: %s", strerror(errno));
1552 return_value = -1;
1553 continue;
1554 }
1555 }
1557 /* If /dev/null is not one of the stdio file descriptors, close it. */
1558 if (dev_null > 2) {
1559 TRACE("[ Closing /dev/null file-descriptor=%d]", dev_null);
1560 status = TEMP_FAILURE_RETRY(close(dev_null));
1561 if (status == -1) {
1562 DL_ERR("close failed: %s", strerror(errno));
1563 return_value = -1;
1564 }
1565 }
1567 return return_value;
1568 }
1570 static bool soinfo_link_image(soinfo* si, const android_dlextinfo* extinfo) {
1571 /* "base" might wrap around UINT32_MAX. */
1572 ElfW(Addr) base = si->load_bias;
1573 const ElfW(Phdr)* phdr = si->phdr;
1574 int phnum = si->phnum;
1575 bool relocating_linker = (si->flags & FLAG_LINKER) != 0;
1577 /* We can't debug anything until the linker is relocated */
1578 if (!relocating_linker) {
1579 INFO("[ linking %s ]", si->name);
1580 DEBUG("si->base = %p si->flags = 0x%08x", reinterpret_cast<void*>(si->base), si->flags);
1581 }
1583 /* Extract dynamic section */
1584 size_t dynamic_count;
1585 ElfW(Word) dynamic_flags;
1586 phdr_table_get_dynamic_section(phdr, phnum, base, &si->dynamic,
1587 &dynamic_count, &dynamic_flags);
1588 if (si->dynamic == NULL) {
1589 if (!relocating_linker) {
1590 DL_ERR("missing PT_DYNAMIC in \"%s\"", si->name);
1591 }
1592 return false;
1593 } else {
1594 if (!relocating_linker) {
1595 DEBUG("dynamic = %p", si->dynamic);
1596 }
1597 }
1599 #if defined(__arm__)
1600 (void) phdr_table_get_arm_exidx(phdr, phnum, base,
1601 &si->ARM_exidx, &si->ARM_exidx_count);
1602 #endif
1604 // Extract useful information from dynamic section.
1605 uint32_t needed_count = 0;
1606 for (ElfW(Dyn)* d = si->dynamic; d->d_tag != DT_NULL; ++d) {
1607 DEBUG("d = %p, d[0](tag) = %p d[1](val) = %p",
1608 d, reinterpret_cast<void*>(d->d_tag), reinterpret_cast<void*>(d->d_un.d_val));
1609 switch (d->d_tag) {
1610 case DT_HASH:
1611 si->nbucket = reinterpret_cast<uint32_t*>(base + d->d_un.d_ptr)[0];
1612 si->nchain = reinterpret_cast<uint32_t*>(base + d->d_un.d_ptr)[1];
1613 si->bucket = reinterpret_cast<uint32_t*>(base + d->d_un.d_ptr + 8);
1614 si->chain = reinterpret_cast<uint32_t*>(base + d->d_un.d_ptr + 8 + si->nbucket * 4);
1615 break;
1616 case DT_STRTAB:
1617 si->strtab = reinterpret_cast<const char*>(base + d->d_un.d_ptr);
1618 break;
1619 case DT_SYMTAB:
1620 si->symtab = reinterpret_cast<ElfW(Sym)*>(base + d->d_un.d_ptr);
1621 break;
1622 #if !defined(__LP64__)
1623 case DT_PLTREL:
1624 if (d->d_un.d_val != DT_REL) {
1625 DL_ERR("unsupported DT_RELA in \"%s\"", si->name);
1626 return false;
1627 }
1628 break;
1629 #endif
1630 case DT_JMPREL:
1631 #if defined(USE_RELA)
1632 si->plt_rela = reinterpret_cast<ElfW(Rela)*>(base + d->d_un.d_ptr);
1633 #else
1634 si->plt_rel = reinterpret_cast<ElfW(Rel)*>(base + d->d_un.d_ptr);
1635 #endif
1636 break;
1637 case DT_PLTRELSZ:
1638 #if defined(USE_RELA)
1639 si->plt_rela_count = d->d_un.d_val / sizeof(ElfW(Rela));
1640 #else
1641 si->plt_rel_count = d->d_un.d_val / sizeof(ElfW(Rel));
1642 #endif
1643 break;
1644 #if defined(__mips__)
1645 case DT_PLTGOT:
1646 // Used by mips and mips64.
1647 si->plt_got = reinterpret_cast<ElfW(Addr)**>(base + d->d_un.d_ptr);
1648 break;
1649 #endif
1650 case DT_DEBUG:
1651 // Set the DT_DEBUG entry to the address of _r_debug for GDB
1652 // if the dynamic table is writable
1653 // FIXME: not working currently for N64
1654 // The flags for the LOAD and DYNAMIC program headers do not agree.
1655 // The LOAD section containng the dynamic table has been mapped as
1656 // read-only, but the DYNAMIC header claims it is writable.
1657 #if !(defined(__mips__) && defined(__LP64__))
1658 if ((dynamic_flags & PF_W) != 0) {
1659 d->d_un.d_val = reinterpret_cast<uintptr_t>(&_r_debug);
1660 }
1661 break;
1662 #endif
1663 #if defined(USE_RELA)
1664 case DT_RELA:
1665 si->rela = reinterpret_cast<ElfW(Rela)*>(base + d->d_un.d_ptr);
1666 break;
1667 case DT_RELASZ:
1668 si->rela_count = d->d_un.d_val / sizeof(ElfW(Rela));
1669 break;
1670 case DT_REL:
1671 DL_ERR("unsupported DT_REL in \"%s\"", si->name);
1672 return false;
1673 case DT_RELSZ:
1674 DL_ERR("unsupported DT_RELSZ in \"%s\"", si->name);
1675 return false;
1676 #else
1677 case DT_REL:
1678 si->rel = reinterpret_cast<ElfW(Rel)*>(base + d->d_un.d_ptr);
1679 break;
1680 case DT_RELSZ:
1681 si->rel_count = d->d_un.d_val / sizeof(ElfW(Rel));
1682 break;
1683 case DT_RELA:
1684 DL_ERR("unsupported DT_RELA in \"%s\"", si->name);
1685 return false;
1686 #endif
1687 case DT_INIT:
1688 si->init_func = reinterpret_cast<linker_function_t>(base + d->d_un.d_ptr);
1689 DEBUG("%s constructors (DT_INIT) found at %p", si->name, si->init_func);
1690 break;
1691 case DT_FINI:
1692 si->fini_func = reinterpret_cast<linker_function_t>(base + d->d_un.d_ptr);
1693 DEBUG("%s destructors (DT_FINI) found at %p", si->name, si->fini_func);
1694 break;
1695 case DT_INIT_ARRAY:
1696 si->init_array = reinterpret_cast<linker_function_t*>(base + d->d_un.d_ptr);
1697 DEBUG("%s constructors (DT_INIT_ARRAY) found at %p", si->name, si->init_array);
1698 break;
1699 case DT_INIT_ARRAYSZ:
1700 si->init_array_count = ((unsigned)d->d_un.d_val) / sizeof(ElfW(Addr));
1701 break;
1702 case DT_FINI_ARRAY:
1703 si->fini_array = reinterpret_cast<linker_function_t*>(base + d->d_un.d_ptr);
1704 DEBUG("%s destructors (DT_FINI_ARRAY) found at %p", si->name, si->fini_array);
1705 break;
1706 case DT_FINI_ARRAYSZ:
1707 si->fini_array_count = ((unsigned)d->d_un.d_val) / sizeof(ElfW(Addr));
1708 break;
1709 case DT_PREINIT_ARRAY:
1710 si->preinit_array = reinterpret_cast<linker_function_t*>(base + d->d_un.d_ptr);
1711 DEBUG("%s constructors (DT_PREINIT_ARRAY) found at %p", si->name, si->preinit_array);
1712 break;
1713 case DT_PREINIT_ARRAYSZ:
1714 si->preinit_array_count = ((unsigned)d->d_un.d_val) / sizeof(ElfW(Addr));
1715 break;
1716 case DT_TEXTREL:
1717 #if defined(__LP64__)
1718 DL_ERR("text relocations (DT_TEXTREL) found in 64-bit ELF file \"%s\"", si->name);
1719 return false;
1720 #else
1721 si->has_text_relocations = true;
1722 break;
1723 #endif
1724 case DT_SYMBOLIC:
1725 si->has_DT_SYMBOLIC = true;
1726 break;
1727 case DT_NEEDED:
1728 ++needed_count;
1729 break;
1730 case DT_FLAGS:
1731 if (d->d_un.d_val & DF_TEXTREL) {
1732 #if defined(__LP64__)
1733 DL_ERR("text relocations (DF_TEXTREL) found in 64-bit ELF file \"%s\"", si->name);
1734 return false;
1735 #else
1736 si->has_text_relocations = true;
1737 #endif
1738 }
1739 if (d->d_un.d_val & DF_SYMBOLIC) {
1740 si->has_DT_SYMBOLIC = true;
1741 }
1742 break;
1743 #if defined(__mips__)
1744 case DT_STRSZ:
1745 case DT_SYMENT:
1746 case DT_RELENT:
1747 break;
1748 case DT_MIPS_RLD_MAP:
1749 // Set the DT_MIPS_RLD_MAP entry to the address of _r_debug for GDB.
1750 {
1751 r_debug** dp = reinterpret_cast<r_debug**>(base + d->d_un.d_ptr);
1752 *dp = &_r_debug;
1753 }
1754 break;
1755 case DT_MIPS_RLD_VERSION:
1756 case DT_MIPS_FLAGS:
1757 case DT_MIPS_BASE_ADDRESS:
1758 case DT_MIPS_UNREFEXTNO:
1759 break;
1761 case DT_MIPS_SYMTABNO:
1762 si->mips_symtabno = d->d_un.d_val;
1763 break;
1765 case DT_MIPS_LOCAL_GOTNO:
1766 si->mips_local_gotno = d->d_un.d_val;
1767 break;
1769 case DT_MIPS_GOTSYM:
1770 si->mips_gotsym = d->d_un.d_val;
1771 break;
1772 #endif
1774 default:
1775 DEBUG("Unused DT entry: type %p arg %p",
1776 reinterpret_cast<void*>(d->d_tag), reinterpret_cast<void*>(d->d_un.d_val));
1777 break;
1778 }
1779 }
1781 DEBUG("si->base = %p, si->strtab = %p, si->symtab = %p",
1782 reinterpret_cast<void*>(si->base), si->strtab, si->symtab);
1784 // Sanity checks.
1785 if (relocating_linker && needed_count != 0) {
1786 DL_ERR("linker cannot have DT_NEEDED dependencies on other libraries");
1787 return false;
1788 }
1789 if (si->nbucket == 0) {
1790 DL_ERR("empty/missing DT_HASH in \"%s\" (built with --hash-style=gnu?)", si->name);
1791 return false;
1792 }
1793 if (si->strtab == 0) {
1794 DL_ERR("empty/missing DT_STRTAB in \"%s\"", si->name);
1795 return false;
1796 }
1797 if (si->symtab == 0) {
1798 DL_ERR("empty/missing DT_SYMTAB in \"%s\"", si->name);
1799 return false;
1800 }
1802 // If this is the main executable, then load all of the libraries from LD_PRELOAD now.
1803 if (si->flags & FLAG_EXE) {
1804 memset(g_ld_preloads, 0, sizeof(g_ld_preloads));
1805 size_t preload_count = 0;
1806 for (size_t i = 0; g_ld_preload_names[i] != NULL; i++) {
1807 soinfo* lsi = find_library(g_ld_preload_names[i], NULL);
1808 if (lsi != NULL) {
1809 g_ld_preloads[preload_count++] = lsi;
1810 } else {
1811 // As with glibc, failure to load an LD_PRELOAD library is just a warning.
1812 DL_WARN("could not load library \"%s\" from LD_PRELOAD for \"%s\"; caused by %s",
1813 g_ld_preload_names[i], si->name, linker_get_error_buffer());
1814 }
1815 }
1816 }
1818 soinfo** needed = reinterpret_cast<soinfo**>(alloca((1 + needed_count) * sizeof(soinfo*)));
1819 soinfo** pneeded = needed;
1821 for (ElfW(Dyn)* d = si->dynamic; d->d_tag != DT_NULL; ++d) {
1822 if (d->d_tag == DT_NEEDED) {
1823 const char* library_name = si->strtab + d->d_un.d_val;
1824 DEBUG("%s needs %s", si->name, library_name);
1825 soinfo* lsi = find_library(library_name, NULL);
1826 if (lsi == NULL) {
1827 strlcpy(tmp_err_buf, linker_get_error_buffer(), sizeof(tmp_err_buf));
1828 DL_ERR("could not load library \"%s\" needed by \"%s\"; caused by %s",
1829 library_name, si->name, tmp_err_buf);
1830 return false;
1831 }
1833 si->add_child(lsi);
1834 *pneeded++ = lsi;
1835 }
1836 }
1837 *pneeded = NULL;
1839 #if !defined(__LP64__)
1840 if (si->has_text_relocations) {
1841 // Make segments writable to allow text relocations to work properly. We will later call
1842 // phdr_table_protect_segments() after all of them are applied and all constructors are run.
1843 #if !defined(__i386__) // The platform itself has too many text relocations on x86.
1844 DL_WARN("%s has text relocations. This is wasting memory and prevents "
1845 "security hardening. Please fix.", si->name);
1846 #endif
1847 if (phdr_table_unprotect_segments(si->phdr, si->phnum, si->load_bias) < 0) {
1848 DL_ERR("can't unprotect loadable segments for \"%s\": %s",
1849 si->name, strerror(errno));
1850 return false;
1851 }
1852 }
1853 #endif
1855 #if defined(USE_RELA)
1856 if (si->plt_rela != NULL) {
1857 DEBUG("[ relocating %s plt ]\n", si->name);
1858 if (soinfo_relocate(si, si->plt_rela, si->plt_rela_count, needed)) {
1859 return false;
1860 }
1861 }
1862 if (si->rela != NULL) {
1863 DEBUG("[ relocating %s ]\n", si->name);
1864 if (soinfo_relocate(si, si->rela, si->rela_count, needed)) {
1865 return false;
1866 }
1867 }
1868 #else
1869 if (si->plt_rel != NULL) {
1870 DEBUG("[ relocating %s plt ]", si->name);
1871 if (soinfo_relocate(si, si->plt_rel, si->plt_rel_count, needed)) {
1872 return false;
1873 }
1874 }
1875 if (si->rel != NULL) {
1876 DEBUG("[ relocating %s ]", si->name);
1877 if (soinfo_relocate(si, si->rel, si->rel_count, needed)) {
1878 return false;
1879 }
1880 }
1881 #endif
1883 #if defined(__mips__)
1884 if (!mips_relocate_got(si, needed)) {
1885 return false;
1886 }
1887 #endif
1889 si->flags |= FLAG_LINKED;
1890 DEBUG("[ finished linking %s ]", si->name);
1892 #if !defined(__LP64__)
1893 if (si->has_text_relocations) {
1894 // All relocations are done, we can protect our segments back to read-only.
1895 if (phdr_table_protect_segments(si->phdr, si->phnum, si->load_bias) < 0) {
1896 DL_ERR("can't protect segments for \"%s\": %s",
1897 si->name, strerror(errno));
1898 return false;
1899 }
1900 }
1901 #endif
1903 /* We can also turn on GNU RELRO protection */
1904 if (phdr_table_protect_gnu_relro(si->phdr, si->phnum, si->load_bias) < 0) {
1905 DL_ERR("can't enable GNU RELRO protection for \"%s\": %s",
1906 si->name, strerror(errno));
1907 return false;
1908 }
1910 /* Handle serializing/sharing the RELRO segment */
1911 if (extinfo && (extinfo->flags & ANDROID_DLEXT_WRITE_RELRO)) {
1912 if (phdr_table_serialize_gnu_relro(si->phdr, si->phnum, si->load_bias,
1913 extinfo->relro_fd) < 0) {
1914 DL_ERR("failed serializing GNU RELRO section for \"%s\": %s",
1915 si->name, strerror(errno));
1916 return false;
1917 }
1918 } else if (extinfo && (extinfo->flags & ANDROID_DLEXT_USE_RELRO)) {
1919 if (phdr_table_map_gnu_relro(si->phdr, si->phnum, si->load_bias,
1920 extinfo->relro_fd) < 0) {
1921 DL_ERR("failed mapping GNU RELRO section for \"%s\": %s",
1922 si->name, strerror(errno));
1923 return false;
1924 }
1925 }
1927 notify_gdb_of_load(si);
1928 return true;
1929 }
1931 /*
1932 * This function add vdso to internal dso list.
1933 * It helps to stack unwinding through signal handlers.
1934 * Also, it makes bionic more like glibc.
1935 */
1936 static void add_vdso(KernelArgumentBlock& args __unused) {
1937 #if defined(AT_SYSINFO_EHDR)
1938 ElfW(Ehdr)* ehdr_vdso = reinterpret_cast<ElfW(Ehdr)*>(args.getauxval(AT_SYSINFO_EHDR));
1939 if (ehdr_vdso == NULL) {
1940 return;
1941 }
1943 soinfo* si = soinfo_alloc("[vdso]", NULL);
1945 si->phdr = reinterpret_cast<ElfW(Phdr)*>(reinterpret_cast<char*>(ehdr_vdso) + ehdr_vdso->e_phoff);
1946 si->phnum = ehdr_vdso->e_phnum;
1947 si->base = reinterpret_cast<ElfW(Addr)>(ehdr_vdso);
1948 si->size = phdr_table_get_load_size(si->phdr, si->phnum);
1949 si->load_bias = get_elf_exec_load_bias(ehdr_vdso);
1951 soinfo_link_image(si, NULL);
1952 #endif
1953 }
1955 /*
1956 * This is linker soinfo for GDB. See details below.
1957 */
1958 static soinfo linker_soinfo_for_gdb;
1960 /* gdb expects the linker to be in the debug shared object list.
1961 * Without this, gdb has trouble locating the linker's ".text"
1962 * and ".plt" sections. Gdb could also potentially use this to
1963 * relocate the offset of our exported 'rtld_db_dlactivity' symbol.
1964 * Don't use soinfo_alloc(), because the linker shouldn't
1965 * be on the soinfo list.
1966 */
1967 static void init_linker_info_for_gdb(ElfW(Addr) linker_base) {
1968 #if defined(__LP64__)
1969 strlcpy(linker_soinfo_for_gdb.name, "/system/bin/linker64", sizeof(linker_soinfo_for_gdb.name));
1970 #else
1971 strlcpy(linker_soinfo_for_gdb.name, "/system/bin/linker", sizeof(linker_soinfo_for_gdb.name));
1972 #endif
1973 linker_soinfo_for_gdb.flags = FLAG_NEW_SOINFO;
1974 linker_soinfo_for_gdb.base = linker_base;
1976 /*
1977 * Set the dynamic field in the link map otherwise gdb will complain with
1978 * the following:
1979 * warning: .dynamic section for "/system/bin/linker" is not at the
1980 * expected address (wrong library or version mismatch?)
1981 */
1982 ElfW(Ehdr)* elf_hdr = reinterpret_cast<ElfW(Ehdr)*>(linker_base);
1983 ElfW(Phdr)* phdr = reinterpret_cast<ElfW(Phdr)*>(linker_base + elf_hdr->e_phoff);
1984 phdr_table_get_dynamic_section(phdr, elf_hdr->e_phnum, linker_base,
1985 &linker_soinfo_for_gdb.dynamic, NULL, NULL);
1986 insert_soinfo_into_debug_map(&linker_soinfo_for_gdb);
1987 }
1989 /*
1990 * This code is called after the linker has linked itself and
1991 * fixed it's own GOT. It is safe to make references to externs
1992 * and other non-local data at this point.
1993 */
1994 static ElfW(Addr) __linker_init_post_relocation(KernelArgumentBlock& args, ElfW(Addr) linker_base) {
1995 /* NOTE: we store the args pointer on a special location
1996 * of the temporary TLS area in order to pass it to
1997 * the C Library's runtime initializer.
1998 *
1999 * The initializer must clear the slot and reset the TLS
2000 * to point to a different location to ensure that no other
2001 * shared library constructor can access it.
2002 */
2003 __libc_init_tls(args);
2005 #if TIMING
2006 struct timeval t0, t1;
2007 gettimeofday(&t0, 0);
2008 #endif
2010 // Initialize environment functions, and get to the ELF aux vectors table.
2011 linker_env_init(args);
2013 // If this is a setuid/setgid program, close the security hole described in
2014 // ftp://ftp.freebsd.org/pub/FreeBSD/CERT/advisories/FreeBSD-SA-02:23.stdio.asc
2015 if (get_AT_SECURE()) {
2016 nullify_closed_stdio();
2017 }
2019 debuggerd_init();
2021 // Get a few environment variables.
2022 const char* LD_DEBUG = linker_env_get("LD_DEBUG");
2023 if (LD_DEBUG != NULL) {
2024 g_ld_debug_verbosity = atoi(LD_DEBUG);
2025 }
2027 // Normally, these are cleaned by linker_env_init, but the test
2028 // doesn't cost us anything.
2029 const char* ldpath_env = NULL;
2030 const char* ldpreload_env = NULL;
2031 if (!get_AT_SECURE()) {
2032 ldpath_env = linker_env_get("LD_LIBRARY_PATH");
2033 ldpreload_env = linker_env_get("LD_PRELOAD");
2034 }
2036 // Linker does not call constructors for its own
2037 // global variables so we need to initialize
2038 // the allocators explicitly.
2039 g_soinfo_allocator.init();
2040 g_soinfo_links_allocator.init();
2042 INFO("[ android linker & debugger ]");
2044 soinfo* si = soinfo_alloc(args.argv[0], NULL);
2045 if (si == NULL) {
2046 exit(EXIT_FAILURE);
2047 }
2049 /* bootstrap the link map, the main exe always needs to be first */
2050 si->flags |= FLAG_EXE;
2051 link_map* map = &(si->link_map_head);
2053 map->l_addr = 0;
2054 map->l_name = args.argv[0];
2055 map->l_prev = NULL;
2056 map->l_next = NULL;
2058 _r_debug.r_map = map;
2059 r_debug_tail = map;
2061 init_linker_info_for_gdb(linker_base);
2063 // Extract information passed from the kernel.
2064 si->phdr = reinterpret_cast<ElfW(Phdr)*>(args.getauxval(AT_PHDR));
2065 si->phnum = args.getauxval(AT_PHNUM);
2066 si->entry = args.getauxval(AT_ENTRY);
2068 /* Compute the value of si->base. We can't rely on the fact that
2069 * the first entry is the PHDR because this will not be true
2070 * for certain executables (e.g. some in the NDK unit test suite)
2071 */
2072 si->base = 0;
2073 si->size = phdr_table_get_load_size(si->phdr, si->phnum);
2074 si->load_bias = 0;
2075 for (size_t i = 0; i < si->phnum; ++i) {
2076 if (si->phdr[i].p_type == PT_PHDR) {
2077 si->load_bias = reinterpret_cast<ElfW(Addr)>(si->phdr) - si->phdr[i].p_vaddr;
2078 si->base = reinterpret_cast<ElfW(Addr)>(si->phdr) - si->phdr[i].p_offset;
2079 break;
2080 }
2081 }
2082 si->dynamic = NULL;
2083 si->ref_count = 1;
2085 ElfW(Ehdr)* elf_hdr = reinterpret_cast<ElfW(Ehdr)*>(si->base);
2086 if (elf_hdr->e_type != ET_DYN) {
2087 __libc_format_fd(2, "error: only position independent executables (PIE) are supported.\n");
2088 exit(EXIT_FAILURE);
2089 }
2091 // Use LD_LIBRARY_PATH and LD_PRELOAD (but only if we aren't setuid/setgid).
2092 parse_LD_LIBRARY_PATH(ldpath_env);
2093 parse_LD_PRELOAD(ldpreload_env);
2095 somain = si;
2097 if (!soinfo_link_image(si, NULL)) {
2098 __libc_format_fd(2, "CANNOT LINK EXECUTABLE: %s\n", linker_get_error_buffer());
2099 exit(EXIT_FAILURE);
2100 }
2102 add_vdso(args);
2104 si->CallPreInitConstructors();
2106 for (size_t i = 0; g_ld_preloads[i] != NULL; ++i) {
2107 g_ld_preloads[i]->CallConstructors();
2108 }
2110 /* After the link_image, the si->load_bias is initialized.
2111 * For so lib, the map->l_addr will be updated in notify_gdb_of_load.
2112 * We need to update this value for so exe here. So Unwind_Backtrace
2113 * for some arch like x86 could work correctly within so exe.
2114 */
2115 map->l_addr = si->load_bias;
2116 si->CallConstructors();
2118 #if TIMING
2119 gettimeofday(&t1, NULL);
2120 PRINT("LINKER TIME: %s: %d microseconds", args.argv[0], (int) (
2121 (((long long)t1.tv_sec * 1000000LL) + (long long)t1.tv_usec) -
2122 (((long long)t0.tv_sec * 1000000LL) + (long long)t0.tv_usec)));
2123 #endif
2124 #if STATS
2125 PRINT("RELO STATS: %s: %d abs, %d rel, %d copy, %d symbol", args.argv[0],
2126 linker_stats.count[kRelocAbsolute],
2127 linker_stats.count[kRelocRelative],
2128 linker_stats.count[kRelocCopy],
2129 linker_stats.count[kRelocSymbol]);
2130 #endif
2131 #if COUNT_PAGES
2132 {
2133 unsigned n;
2134 unsigned i;
2135 unsigned count = 0;
2136 for (n = 0; n < 4096; n++) {
2137 if (bitmask[n]) {
2138 unsigned x = bitmask[n];
2139 #if defined(__LP64__)
2140 for (i = 0; i < 32; i++) {
2141 #else
2142 for (i = 0; i < 8; i++) {
2143 #endif
2144 if (x & 1) {
2145 count++;
2146 }
2147 x >>= 1;
2148 }
2149 }
2150 }
2151 PRINT("PAGES MODIFIED: %s: %d (%dKB)", args.argv[0], count, count * 4);
2152 }
2153 #endif
2155 #if TIMING || STATS || COUNT_PAGES
2156 fflush(stdout);
2157 #endif
2159 TRACE("[ Ready to execute '%s' @ %p ]", si->name, reinterpret_cast<void*>(si->entry));
2160 return si->entry;
2161 }
2163 /* Compute the load-bias of an existing executable. This shall only
2164 * be used to compute the load bias of an executable or shared library
2165 * that was loaded by the kernel itself.
2166 *
2167 * Input:
2168 * elf -> address of ELF header, assumed to be at the start of the file.
2169 * Return:
2170 * load bias, i.e. add the value of any p_vaddr in the file to get
2171 * the corresponding address in memory.
2172 */
2173 static ElfW(Addr) get_elf_exec_load_bias(const ElfW(Ehdr)* elf) {
2174 ElfW(Addr) offset = elf->e_phoff;
2175 const ElfW(Phdr)* phdr_table = reinterpret_cast<const ElfW(Phdr)*>(reinterpret_cast<uintptr_t>(elf) + offset);
2176 const ElfW(Phdr)* phdr_end = phdr_table + elf->e_phnum;
2178 for (const ElfW(Phdr)* phdr = phdr_table; phdr < phdr_end; phdr++) {
2179 if (phdr->p_type == PT_LOAD) {
2180 return reinterpret_cast<ElfW(Addr)>(elf) + phdr->p_offset - phdr->p_vaddr;
2181 }
2182 }
2183 return 0;
2184 }
2186 /*
2187 * This is the entry point for the linker, called from begin.S. This
2188 * method is responsible for fixing the linker's own relocations, and
2189 * then calling __linker_init_post_relocation().
2190 *
2191 * Because this method is called before the linker has fixed it's own
2192 * relocations, any attempt to reference an extern variable, extern
2193 * function, or other GOT reference will generate a segfault.
2194 */
2195 extern "C" ElfW(Addr) __linker_init(void* raw_args) {
2196 // Initialize static variables.
2197 solist = get_libdl_info();
2198 sonext = get_libdl_info();
2200 KernelArgumentBlock args(raw_args);
2202 ElfW(Addr) linker_addr = args.getauxval(AT_BASE);
2203 ElfW(Ehdr)* elf_hdr = reinterpret_cast<ElfW(Ehdr)*>(linker_addr);
2204 ElfW(Phdr)* phdr = reinterpret_cast<ElfW(Phdr)*>(linker_addr + elf_hdr->e_phoff);
2206 soinfo linker_so;
2207 memset(&linker_so, 0, sizeof(soinfo));
2209 strcpy(linker_so.name, "[dynamic linker]");
2210 linker_so.base = linker_addr;
2211 linker_so.size = phdr_table_get_load_size(phdr, elf_hdr->e_phnum);
2212 linker_so.load_bias = get_elf_exec_load_bias(elf_hdr);
2213 linker_so.dynamic = NULL;
2214 linker_so.phdr = phdr;
2215 linker_so.phnum = elf_hdr->e_phnum;
2216 linker_so.flags |= FLAG_LINKER;
2218 if (!soinfo_link_image(&linker_so, NULL)) {
2219 // It would be nice to print an error message, but if the linker
2220 // can't link itself, there's no guarantee that we'll be able to
2221 // call write() (because it involves a GOT reference). We may as
2222 // well try though...
2223 const char* msg = "CANNOT LINK EXECUTABLE: ";
2224 write(2, msg, strlen(msg));
2225 write(2, __linker_dl_err_buf, strlen(__linker_dl_err_buf));
2226 write(2, "\n", 1);
2227 _exit(EXIT_FAILURE);
2228 }
2230 // We have successfully fixed our own relocations. It's safe to run
2231 // the main part of the linker now.
2232 args.abort_message_ptr = &g_abort_message;
2233 ElfW(Addr) start_address = __linker_init_post_relocation(args, linker_addr);
2235 protect_data(PROT_READ);
2237 // Return the address that the calling assembly stub should jump to.
2238 return start_address;
2239 }