1 /*
2 * Copyright (C) 2008, 2009 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
29 #include <dlfcn.h>
30 #include <errno.h>
31 #include <fcntl.h>
32 #include <inttypes.h>
33 #include <pthread.h>
34 #include <stdio.h>
35 #include <stdlib.h>
36 #include <string.h>
37 #include <sys/mman.h>
38 #include <unistd.h>
40 #include <new>
42 // Private C library headers.
43 #include "private/bionic_tls.h"
44 #include "private/KernelArgumentBlock.h"
45 #include "private/ScopedPthreadMutexLocker.h"
46 #include "private/ScopedFd.h"
47 #include "private/ScopeGuard.h"
48 #include "private/UniquePtr.h"
50 #include "linker.h"
51 #include "linker_debug.h"
52 #include "linker_environ.h"
53 #include "linker_phdr.h"
54 #include "linker_allocator.h"
56 /* >>> IMPORTANT NOTE - READ ME BEFORE MODIFYING <<<
57 *
58 * Do NOT use malloc() and friends or pthread_*() code here.
59 * Don't use printf() either; it's caused mysterious memory
60 * corruption in the past.
61 * The linker runs before we bring up libc and it's easiest
62 * to make sure it does not depend on any complex libc features
63 *
64 * open issues / todo:
65 *
66 * - cleaner error reporting
67 * - after linking, set as much stuff as possible to READONLY
68 * and NOEXEC
69 */
71 #if defined(__LP64__)
72 #define SEARCH_NAME(x) x
73 #else
74 // Nvidia drivers are relying on the bug:
75 // http://code.google.com/p/android/issues/detail?id=6670
76 // so we continue to use base-name lookup for lp32
77 static const char* get_base_name(const char* name) {
78 const char* bname = strrchr(name, '/');
79 return bname ? bname + 1 : name;
80 }
81 #define SEARCH_NAME(x) get_base_name(x)
82 #endif
84 static ElfW(Addr) get_elf_exec_load_bias(const ElfW(Ehdr)* elf);
86 static LinkerAllocator<soinfo> g_soinfo_allocator;
87 static LinkerAllocator<LinkedListEntry<soinfo>> g_soinfo_links_allocator;
89 static soinfo* solist;
90 static soinfo* sonext;
91 static soinfo* somain; // main process, always the one after libdl_info
93 static const char* const kDefaultLdPaths[] = {
94 #if defined(__LP64__)
95 "/vendor/lib64",
96 "/system/lib64",
97 #else
98 "/vendor/lib",
99 "/system/lib",
100 #endif
101 nullptr
102 };
104 #define LDPATH_BUFSIZE (LDPATH_MAX*64)
105 #define LDPATH_MAX 8
107 #define LDPRELOAD_BUFSIZE (LDPRELOAD_MAX*64)
108 #define LDPRELOAD_MAX 8
110 static char g_ld_library_paths_buffer[LDPATH_BUFSIZE];
111 static const char* g_ld_library_paths[LDPATH_MAX + 1];
113 static char g_ld_preloads_buffer[LDPRELOAD_BUFSIZE];
114 static const char* g_ld_preload_names[LDPRELOAD_MAX + 1];
116 static soinfo* g_ld_preloads[LDPRELOAD_MAX + 1];
118 __LIBC_HIDDEN__ int g_ld_debug_verbosity;
120 __LIBC_HIDDEN__ abort_msg_t* g_abort_message = nullptr; // For debuggerd.
122 enum RelocationKind {
123 kRelocAbsolute = 0,
124 kRelocRelative,
125 kRelocCopy,
126 kRelocSymbol,
127 kRelocMax
128 };
130 #if STATS
131 struct linker_stats_t {
132 int count[kRelocMax];
133 };
135 static linker_stats_t linker_stats;
137 static void count_relocation(RelocationKind kind) {
138 ++linker_stats.count[kind];
139 }
140 #else
141 static void count_relocation(RelocationKind) {
142 }
143 #endif
145 #if COUNT_PAGES
146 static unsigned bitmask[4096];
147 #if defined(__LP64__)
148 #define MARK(offset) \
149 do { \
150 if ((((offset) >> 12) >> 5) < 4096) \
151 bitmask[((offset) >> 12) >> 5] |= (1 << (((offset) >> 12) & 31)); \
152 } while (0)
153 #else
154 #define MARK(offset) \
155 do { \
156 bitmask[((offset) >> 12) >> 3] |= (1 << (((offset) >> 12) & 7)); \
157 } while (0)
158 #endif
159 #else
160 #define MARK(x) do {} while (0)
161 #endif
163 // You shouldn't try to call memory-allocating functions in the dynamic linker.
164 // Guard against the most obvious ones.
165 #define DISALLOW_ALLOCATION(return_type, name, ...) \
166 return_type name __VA_ARGS__ \
167 { \
168 __libc_fatal("ERROR: " #name " called from the dynamic linker!\n"); \
169 }
170 DISALLOW_ALLOCATION(void*, malloc, (size_t u __unused));
171 DISALLOW_ALLOCATION(void, free, (void* u __unused));
172 DISALLOW_ALLOCATION(void*, realloc, (void* u1 __unused, size_t u2 __unused));
173 DISALLOW_ALLOCATION(void*, calloc, (size_t u1 __unused, size_t u2 __unused));
175 static char __linker_dl_err_buf[768];
177 char* linker_get_error_buffer() {
178 return &__linker_dl_err_buf[0];
179 }
181 size_t linker_get_error_buffer_size() {
182 return sizeof(__linker_dl_err_buf);
183 }
185 // This function is an empty stub where GDB locates a breakpoint to get notified
186 // about linker activity.
187 extern "C" void __attribute__((noinline)) __attribute__((visibility("default"))) rtld_db_dlactivity();
189 static pthread_mutex_t g__r_debug_mutex = PTHREAD_MUTEX_INITIALIZER;
190 static r_debug _r_debug = {1, nullptr, reinterpret_cast<uintptr_t>(&rtld_db_dlactivity), r_debug::RT_CONSISTENT, 0};
191 static link_map* r_debug_tail = 0;
193 static void insert_soinfo_into_debug_map(soinfo* info) {
194 // Copy the necessary fields into the debug structure.
195 link_map* map = &(info->link_map_head);
196 map->l_addr = info->load_bias;
197 map->l_name = reinterpret_cast<char*>(info->name);
198 map->l_ld = info->dynamic;
200 // Stick the new library at the end of the list.
201 // gdb tends to care more about libc than it does
202 // about leaf libraries, and ordering it this way
203 // reduces the back-and-forth over the wire.
204 if (r_debug_tail) {
205 r_debug_tail->l_next = map;
206 map->l_prev = r_debug_tail;
207 map->l_next = 0;
208 } else {
209 _r_debug.r_map = map;
210 map->l_prev = 0;
211 map->l_next = 0;
212 }
213 r_debug_tail = map;
214 }
216 static void remove_soinfo_from_debug_map(soinfo* info) {
217 link_map* map = &(info->link_map_head);
219 if (r_debug_tail == map) {
220 r_debug_tail = map->l_prev;
221 }
223 if (map->l_prev) {
224 map->l_prev->l_next = map->l_next;
225 }
226 if (map->l_next) {
227 map->l_next->l_prev = map->l_prev;
228 }
229 }
231 static void notify_gdb_of_load(soinfo* info) {
232 if (info->flags & FLAG_EXE) {
233 // GDB already knows about the main executable
234 return;
235 }
237 ScopedPthreadMutexLocker locker(&g__r_debug_mutex);
239 _r_debug.r_state = r_debug::RT_ADD;
240 rtld_db_dlactivity();
242 insert_soinfo_into_debug_map(info);
244 _r_debug.r_state = r_debug::RT_CONSISTENT;
245 rtld_db_dlactivity();
246 }
248 static void notify_gdb_of_unload(soinfo* info) {
249 if (info->flags & FLAG_EXE) {
250 // GDB already knows about the main executable
251 return;
252 }
254 ScopedPthreadMutexLocker locker(&g__r_debug_mutex);
256 _r_debug.r_state = r_debug::RT_DELETE;
257 rtld_db_dlactivity();
259 remove_soinfo_from_debug_map(info);
261 _r_debug.r_state = r_debug::RT_CONSISTENT;
262 rtld_db_dlactivity();
263 }
265 void notify_gdb_of_libraries() {
266 _r_debug.r_state = r_debug::RT_ADD;
267 rtld_db_dlactivity();
268 _r_debug.r_state = r_debug::RT_CONSISTENT;
269 rtld_db_dlactivity();
270 }
272 LinkedListEntry<soinfo>* SoinfoListAllocator::alloc() {
273 return g_soinfo_links_allocator.alloc();
274 }
276 void SoinfoListAllocator::free(LinkedListEntry<soinfo>* entry) {
277 g_soinfo_links_allocator.free(entry);
278 }
280 static void protect_data(int protection) {
281 g_soinfo_allocator.protect_all(protection);
282 g_soinfo_links_allocator.protect_all(protection);
283 }
285 static soinfo* soinfo_alloc(const char* name, struct stat* file_stat, int rtld_flags) {
286 if (strlen(name) >= SOINFO_NAME_LEN) {
287 DL_ERR("library name \"%s\" too long", name);
288 return nullptr;
289 }
291 soinfo* si = new (g_soinfo_allocator.alloc()) soinfo(name, file_stat, rtld_flags);
293 sonext->next = si;
294 sonext = si;
296 TRACE("name %s: allocated soinfo @ %p", name, si);
297 return si;
298 }
300 static void soinfo_free(soinfo* si) {
301 if (si == nullptr) {
302 return;
303 }
305 if (si->base != 0 && si->size != 0) {
306 munmap(reinterpret_cast<void*>(si->base), si->size);
307 }
309 soinfo *prev = nullptr, *trav;
311 TRACE("name %s: freeing soinfo @ %p", si->name, si);
313 for (trav = solist; trav != nullptr; trav = trav->next) {
314 if (trav == si) {
315 break;
316 }
317 prev = trav;
318 }
319 if (trav == nullptr) {
320 // si was not in solist
321 DL_ERR("name \"%s\" is not in solist!", si->name);
322 return;
323 }
325 // clear links to/from si
326 si->remove_all_links();
328 // prev will never be null, because the first entry in solist is
329 // always the static libdl_info.
330 prev->next = si->next;
331 if (si == sonext) {
332 sonext = prev;
333 }
335 g_soinfo_allocator.free(si);
336 }
339 static void parse_path(const char* path, const char* delimiters,
340 const char** array, char* buf, size_t buf_size, size_t max_count) {
341 if (path == nullptr) {
342 return;
343 }
345 size_t len = strlcpy(buf, path, buf_size);
347 size_t i = 0;
348 char* buf_p = buf;
349 while (i < max_count && (array[i] = strsep(&buf_p, delimiters))) {
350 if (*array[i] != '\0') {
351 ++i;
352 }
353 }
355 // Forget the last path if we had to truncate; this occurs if the 2nd to
356 // last char isn't '\0' (i.e. wasn't originally a delimiter).
357 if (i > 0 && len >= buf_size && buf[buf_size - 2] != '\0') {
358 array[i - 1] = nullptr;
359 } else {
360 array[i] = nullptr;
361 }
362 }
364 static void parse_LD_LIBRARY_PATH(const char* path) {
365 parse_path(path, ":", g_ld_library_paths,
366 g_ld_library_paths_buffer, sizeof(g_ld_library_paths_buffer), LDPATH_MAX);
367 }
369 static void parse_LD_PRELOAD(const char* path) {
370 // We have historically supported ':' as well as ' ' in LD_PRELOAD.
371 parse_path(path, " :", g_ld_preload_names,
372 g_ld_preloads_buffer, sizeof(g_ld_preloads_buffer), LDPRELOAD_MAX);
373 }
375 #if defined(__arm__)
377 // For a given PC, find the .so that it belongs to.
378 // Returns the base address of the .ARM.exidx section
379 // for that .so, and the number of 8-byte entries
380 // in that section (via *pcount).
381 //
382 // Intended to be called by libc's __gnu_Unwind_Find_exidx().
383 //
384 // This function is exposed via dlfcn.cpp and libdl.so.
385 _Unwind_Ptr dl_unwind_find_exidx(_Unwind_Ptr pc, int* pcount) {
386 unsigned addr = (unsigned)pc;
388 for (soinfo* si = solist; si != 0; si = si->next) {
389 if ((addr >= si->base) && (addr < (si->base + si->size))) {
390 *pcount = si->ARM_exidx_count;
391 return (_Unwind_Ptr)si->ARM_exidx;
392 }
393 }
394 *pcount = 0;
395 return nullptr;
396 }
398 #endif
400 // Here, we only have to provide a callback to iterate across all the
401 // loaded libraries. gcc_eh does the rest.
402 int dl_iterate_phdr(int (*cb)(dl_phdr_info* info, size_t size, void* data), void* data) {
403 int rv = 0;
404 for (soinfo* si = solist; si != nullptr; si = si->next) {
405 dl_phdr_info dl_info;
406 dl_info.dlpi_addr = si->link_map_head.l_addr;
407 dl_info.dlpi_name = si->link_map_head.l_name;
408 dl_info.dlpi_phdr = si->phdr;
409 dl_info.dlpi_phnum = si->phnum;
410 rv = cb(&dl_info, sizeof(dl_phdr_info), data);
411 if (rv != 0) {
412 break;
413 }
414 }
415 return rv;
416 }
418 static ElfW(Sym)* soinfo_elf_lookup(soinfo* si, unsigned hash, const char* name) {
419 ElfW(Sym)* symtab = si->symtab;
420 const char* strtab = si->strtab;
422 TRACE_TYPE(LOOKUP, "SEARCH %s in %s@%p %x %zd",
423 name, si->name, reinterpret_cast<void*>(si->base), hash, hash % si->nbucket);
425 for (unsigned n = si->bucket[hash % si->nbucket]; n != 0; n = si->chain[n]) {
426 ElfW(Sym)* s = symtab + n;
427 if (strcmp(strtab + s->st_name, name)) continue;
429 // only concern ourselves with global and weak symbol definitions
430 switch (ELF_ST_BIND(s->st_info)) {
431 case STB_GLOBAL:
432 case STB_WEAK:
433 if (s->st_shndx == SHN_UNDEF) {
434 continue;
435 }
437 TRACE_TYPE(LOOKUP, "FOUND %s in %s (%p) %zd",
438 name, si->name, reinterpret_cast<void*>(s->st_value),
439 static_cast<size_t>(s->st_size));
440 return s;
441 case STB_LOCAL:
442 continue;
443 default:
444 __libc_fatal("ERROR: Unexpected ST_BIND value: %d for '%s' in '%s'",
445 ELF_ST_BIND(s->st_info), name, si->name);
446 }
447 }
449 TRACE_TYPE(LOOKUP, "NOT FOUND %s in %s@%p %x %zd",
450 name, si->name, reinterpret_cast<void*>(si->base), hash, hash % si->nbucket);
453 return nullptr;
454 }
456 soinfo::soinfo(const char* name, const struct stat* file_stat, int rtld_flags) {
457 memset(this, 0, sizeof(*this));
459 strlcpy(this->name, name, sizeof(this->name));
460 flags = FLAG_NEW_SOINFO;
461 version = SOINFO_VERSION;
463 if (file_stat != nullptr) {
464 set_st_dev(file_stat->st_dev);
465 set_st_ino(file_stat->st_ino);
466 }
468 this->rtld_flags = rtld_flags;
469 }
471 static unsigned elfhash(const char* _name) {
472 const unsigned char* name = reinterpret_cast<const unsigned char*>(_name);
473 unsigned h = 0, g;
475 while (*name) {
476 h = (h << 4) + *name++;
477 g = h & 0xf0000000;
478 h ^= g;
479 h ^= g >> 24;
480 }
481 return h;
482 }
484 static ElfW(Sym)* soinfo_do_lookup(soinfo* si, const char* name, soinfo** lsi) {
485 unsigned elf_hash = elfhash(name);
486 ElfW(Sym)* s = nullptr;
488 if (somain != nullptr) {
489 DEBUG("%s: looking up %s in executable %s",
490 si->name, name, somain->name);
492 // 1. Look for it in the main executable
493 s = soinfo_elf_lookup(somain, elf_hash, name);
494 if (s != nullptr) {
495 *lsi = somain;
496 }
498 // 2. Look for it in the ld_preloads
499 if (s == nullptr) {
500 for (int i = 0; g_ld_preloads[i] != NULL; i++) {
501 s = soinfo_elf_lookup(g_ld_preloads[i], elf_hash, name);
502 if (s != nullptr) {
503 *lsi = g_ld_preloads[i];
504 break;
505 }
506 }
507 }
509 /* Look for symbols in the local scope (the object who is
510 * searching). This happens with C++ templates on x86 for some
511 * reason.
512 *
513 * Notes on weak symbols:
514 * The ELF specs are ambiguous about treatment of weak definitions in
515 * dynamic linking. Some systems return the first definition found
516 * and some the first non-weak definition. This is system dependent.
517 * Here we return the first definition found for simplicity. */
519 if (s == nullptr) {
520 s = soinfo_elf_lookup(si, elf_hash, name);
521 if (s != nullptr) {
522 *lsi = si;
523 }
524 }
525 }
527 if (s == nullptr) {
528 si->get_children().visit([&](soinfo* child) {
529 DEBUG("%s: looking up %s in %s", si->name, name, child->name);
530 s = soinfo_elf_lookup(child, elf_hash, name);
531 if (s != nullptr) {
532 *lsi = child;
533 return false;
534 }
535 return true;
536 });
537 }
539 if (s != nullptr) {
540 TRACE_TYPE(LOOKUP, "si %s sym %s s->st_value = %p, "
541 "found in %s, base = %p, load bias = %p",
542 si->name, name, reinterpret_cast<void*>(s->st_value),
543 (*lsi)->name, reinterpret_cast<void*>((*lsi)->base),
544 reinterpret_cast<void*>((*lsi)->load_bias));
545 }
547 return s;
548 }
550 // Each size has it's own allocator.
551 template<size_t size>
552 class SizeBasedAllocator {
553 public:
554 static void* alloc() {
555 return allocator_.alloc();
556 }
558 static void free(void* ptr) {
559 allocator_.free(ptr);
560 }
562 private:
563 static LinkerBlockAllocator allocator_;
564 };
566 template<size_t size>
567 LinkerBlockAllocator SizeBasedAllocator<size>::allocator_(size);
569 template<typename T>
570 class TypeBasedAllocator {
571 public:
572 static T* alloc() {
573 return reinterpret_cast<T*>(SizeBasedAllocator<sizeof(T)>::alloc());
574 }
576 static void free(T* ptr) {
577 SizeBasedAllocator<sizeof(T)>::free(ptr);
578 }
579 };
581 class LoadTask {
582 public:
583 struct deleter_t {
584 void operator()(LoadTask* t) {
585 TypeBasedAllocator<LoadTask>::free(t);
586 }
587 };
589 typedef UniquePtr<LoadTask, deleter_t> unique_ptr;
591 static deleter_t deleter;
593 static LoadTask* create(const char* name, soinfo* needed_by) {
594 LoadTask* ptr = TypeBasedAllocator<LoadTask>::alloc();
595 return new (ptr) LoadTask(name, needed_by);
596 }
598 const char* get_name() const {
599 return name_;
600 }
602 soinfo* get_needed_by() const {
603 return needed_by_;
604 }
605 private:
606 LoadTask(const char* name, soinfo* needed_by)
607 : name_(name), needed_by_(needed_by) {}
609 const char* name_;
610 soinfo* needed_by_;
612 DISALLOW_IMPLICIT_CONSTRUCTORS(LoadTask);
613 };
615 LoadTask::deleter_t LoadTask::deleter;
617 template <typename T>
618 using linked_list_t = LinkedList<T, TypeBasedAllocator<LinkedListEntry<T>>>;
620 typedef linked_list_t<soinfo> SoinfoLinkedList;
621 typedef linked_list_t<const char> StringLinkedList;
622 typedef linked_list_t<LoadTask> LoadTaskList;
625 // This is used by dlsym(3). It performs symbol lookup only within the
626 // specified soinfo object and its dependencies in breadth first order.
627 ElfW(Sym)* dlsym_handle_lookup(soinfo* si, soinfo** found, const char* name) {
628 SoinfoLinkedList visit_list;
629 SoinfoLinkedList visited;
631 visit_list.push_back(si);
632 soinfo* current_soinfo;
633 while ((current_soinfo = visit_list.pop_front()) != nullptr) {
634 if (visited.contains(current_soinfo)) {
635 continue;
636 }
638 ElfW(Sym)* result = soinfo_elf_lookup(current_soinfo, elfhash(name), name);
640 if (result != nullptr) {
641 *found = current_soinfo;
642 return result;
643 }
644 visited.push_back(current_soinfo);
646 current_soinfo->get_children().for_each([&](soinfo* child) {
647 visit_list.push_back(child);
648 });
649 }
651 return nullptr;
652 }
654 /* This is used by dlsym(3) to performs a global symbol lookup. If the
655 start value is null (for RTLD_DEFAULT), the search starts at the
656 beginning of the global solist. Otherwise the search starts at the
657 specified soinfo (for RTLD_NEXT).
658 */
659 ElfW(Sym)* dlsym_linear_lookup(const char* name, soinfo** found, soinfo* start) {
660 unsigned elf_hash = elfhash(name);
662 if (start == nullptr) {
663 start = solist;
664 }
666 ElfW(Sym)* s = nullptr;
667 for (soinfo* si = start; (s == nullptr) && (si != nullptr); si = si->next) {
668 if ((si->get_rtld_flags() & RTLD_GLOBAL) == 0) {
669 continue;
670 }
672 s = soinfo_elf_lookup(si, elf_hash, name);
673 if (s != nullptr) {
674 *found = si;
675 break;
676 }
677 }
679 if (s != nullptr) {
680 TRACE_TYPE(LOOKUP, "%s s->st_value = %p, found->base = %p",
681 name, reinterpret_cast<void*>(s->st_value), reinterpret_cast<void*>((*found)->base));
682 }
684 return s;
685 }
687 soinfo* find_containing_library(const void* p) {
688 ElfW(Addr) address = reinterpret_cast<ElfW(Addr)>(p);
689 for (soinfo* si = solist; si != nullptr; si = si->next) {
690 if (address >= si->base && address - si->base < si->size) {
691 return si;
692 }
693 }
694 return nullptr;
695 }
697 ElfW(Sym)* dladdr_find_symbol(soinfo* si, const void* addr) {
698 ElfW(Addr) soaddr = reinterpret_cast<ElfW(Addr)>(addr) - si->base;
700 // Search the library's symbol table for any defined symbol which
701 // contains this address.
702 for (size_t i = 0; i < si->nchain; ++i) {
703 ElfW(Sym)* sym = &si->symtab[i];
704 if (sym->st_shndx != SHN_UNDEF &&
705 soaddr >= sym->st_value &&
706 soaddr < sym->st_value + sym->st_size) {
707 return sym;
708 }
709 }
711 return nullptr;
712 }
714 static int open_library_on_path(const char* name, const char* const paths[]) {
715 char buf[512];
716 for (size_t i = 0; paths[i] != nullptr; ++i) {
717 int n = __libc_format_buffer(buf, sizeof(buf), "%s/%s", paths[i], name);
718 if (n < 0 || n >= static_cast<int>(sizeof(buf))) {
719 PRINT("Warning: ignoring very long library path: %s/%s", paths[i], name);
720 continue;
721 }
722 int fd = TEMP_FAILURE_RETRY(open(buf, O_RDONLY | O_CLOEXEC));
723 if (fd != -1) {
724 return fd;
725 }
726 }
727 return -1;
728 }
730 static int open_library(const char* name) {
731 TRACE("[ opening %s ]", name);
733 // If the name contains a slash, we should attempt to open it directly and not search the paths.
734 if (strchr(name, '/') != nullptr) {
735 int fd = TEMP_FAILURE_RETRY(open(name, O_RDONLY | O_CLOEXEC));
736 if (fd != -1) {
737 return fd;
738 }
739 // ...but nvidia binary blobs (at least) rely on this behavior, so fall through for now.
740 #if defined(__LP64__)
741 return -1;
742 #endif
743 }
745 // Otherwise we try LD_LIBRARY_PATH first, and fall back to the built-in well known paths.
746 int fd = open_library_on_path(name, g_ld_library_paths);
747 if (fd == -1) {
748 fd = open_library_on_path(name, kDefaultLdPaths);
749 }
750 return fd;
751 }
753 template<typename F>
754 static void for_each_dt_needed(const soinfo* si, F action) {
755 for (ElfW(Dyn)* d = si->dynamic; d->d_tag != DT_NULL; ++d) {
756 if (d->d_tag == DT_NEEDED) {
757 action(si->strtab + d->d_un.d_val);
758 }
759 }
760 }
762 static soinfo* load_library(LoadTaskList& load_tasks, const char* name, int rtld_flags, const android_dlextinfo* extinfo) {
763 int fd = -1;
764 ScopedFd file_guard(-1);
766 if (extinfo != nullptr && (extinfo->flags & ANDROID_DLEXT_USE_LIBRARY_FD) != 0) {
767 fd = extinfo->library_fd;
768 } else {
769 // Open the file.
770 fd = open_library(name);
771 if (fd == -1) {
772 DL_ERR("library \"%s\" not found", name);
773 return nullptr;
774 }
776 file_guard.reset(fd);
777 }
779 struct stat file_stat;
780 if (TEMP_FAILURE_RETRY(fstat(fd, &file_stat)) != 0) {
781 DL_ERR("unable to stat file for the library %s: %s", name, strerror(errno));
782 return nullptr;
783 }
785 // Check for symlink and other situations where
786 // file can have different names.
787 for (soinfo* si = solist; si != nullptr; si = si->next) {
788 if (si->get_st_dev() != 0 &&
789 si->get_st_ino() != 0 &&
790 si->get_st_dev() == file_stat.st_dev &&
791 si->get_st_ino() == file_stat.st_ino) {
792 TRACE("library \"%s\" is already loaded under different name/path \"%s\" - will return existing soinfo", name, si->name);
793 return si;
794 }
795 }
797 if ((rtld_flags & RTLD_NOLOAD) != 0) {
798 DL_ERR("library \"%s\" wasn't loaded and RTLD_NOLOAD prevented it", name);
799 return nullptr;
800 }
802 // Read the ELF header and load the segments.
803 ElfReader elf_reader(name, fd);
804 if (!elf_reader.Load(extinfo)) {
805 return nullptr;
806 }
808 soinfo* si = soinfo_alloc(SEARCH_NAME(name), &file_stat, rtld_flags);
809 if (si == nullptr) {
810 return nullptr;
811 }
812 si->base = elf_reader.load_start();
813 si->size = elf_reader.load_size();
814 si->load_bias = elf_reader.load_bias();
815 si->phnum = elf_reader.phdr_count();
816 si->phdr = elf_reader.loaded_phdr();
818 if (!si->PrelinkImage()) {
819 soinfo_free(si);
820 return nullptr;
821 }
823 for_each_dt_needed(si, [&] (const char* name) {
824 load_tasks.push_back(LoadTask::create(name, si));
825 });
827 return si;
828 }
830 static soinfo *find_loaded_library_by_name(const char* name) {
831 const char* search_name = SEARCH_NAME(name);
832 for (soinfo* si = solist; si != nullptr; si = si->next) {
833 if (!strcmp(search_name, si->name)) {
834 return si;
835 }
836 }
837 return nullptr;
838 }
840 static soinfo* find_library_internal(LoadTaskList& load_tasks, const char* name, int rtld_flags, const android_dlextinfo* extinfo) {
842 soinfo* si = find_loaded_library_by_name(name);
844 // Library might still be loaded, the accurate detection
845 // of this fact is done by load_library.
846 if (si == nullptr) {
847 TRACE("[ '%s' has not been found by name. Trying harder...]", name);
848 si = load_library(load_tasks, name, rtld_flags, extinfo);
849 }
851 return si;
852 }
854 static void soinfo_unload(soinfo* si);
856 static bool is_recursive(soinfo* si, soinfo* parent) {
857 if (parent == nullptr) {
858 return false;
859 }
861 if (si == parent) {
862 DL_ERR("recursive link to \"%s\"", si->name);
863 return true;
864 }
866 return !parent->get_parents().visit([&](soinfo* grandparent) {
867 return !is_recursive(si, grandparent);
868 });
869 }
871 static bool find_libraries(const char* const library_names[], size_t library_names_size, soinfo* soinfos[],
872 soinfo* ld_preloads[], size_t ld_preloads_size, int rtld_flags, const android_dlextinfo* extinfo) {
873 // Step 0: prepare.
874 LoadTaskList load_tasks;
875 for (size_t i = 0; i < library_names_size; ++i) {
876 const char* name = library_names[i];
877 load_tasks.push_back(LoadTask::create(name, nullptr));
878 }
880 // Libraries added to this list in reverse order so that we can
881 // start linking from bottom-up - see step 2.
882 SoinfoLinkedList found_libs;
883 size_t soinfos_size = 0;
885 auto failure_guard = make_scope_guard([&]() {
886 // Housekeeping
887 load_tasks.for_each([] (LoadTask* t) {
888 LoadTask::deleter(t);
889 });
891 for (size_t i = 0; i<soinfos_size; ++i) {
892 soinfo_unload(soinfos[i]);
893 }
894 });
896 // Step 1: load and pre-link all DT_NEEDED libraries in breadth first order.
897 for (LoadTask::unique_ptr task(load_tasks.pop_front()); task.get() != nullptr; task.reset(load_tasks.pop_front())) {
898 soinfo* si = find_library_internal(load_tasks, task->get_name(), rtld_flags, extinfo);
899 if (si == nullptr) {
900 return false;
901 }
903 soinfo* needed_by = task->get_needed_by();
905 if (is_recursive(si, needed_by)) {
906 return false;
907 }
909 si->ref_count++;
910 if (needed_by != nullptr) {
911 needed_by->add_child(si);
912 }
913 found_libs.push_front(si);
915 // When ld_preloads is not null first
916 // ld_preloads_size libs are in fact ld_preloads.
917 if (ld_preloads != nullptr && soinfos_size < ld_preloads_size) {
918 ld_preloads[soinfos_size] = si;
919 }
921 if (soinfos_size<library_names_size) {
922 soinfos[soinfos_size++] = si;
923 }
924 }
926 // Step 2: link libraries.
927 soinfo* si;
928 while ((si = found_libs.pop_front()) != nullptr) {
929 if ((si->flags & FLAG_LINKED) == 0) {
930 if (!si->LinkImage(extinfo)) {
931 return false;
932 }
933 si->flags |= FLAG_LINKED;
934 }
935 }
937 // All is well - found_libs and load_tasks are empty at this point
938 // and all libs are successfully linked.
939 failure_guard.disable();
940 return true;
941 }
943 static soinfo* find_library(const char* name, int rtld_flags, const android_dlextinfo* extinfo) {
944 if (name == nullptr) {
945 somain->ref_count++;
946 return somain;
947 }
949 soinfo* si;
951 if (!find_libraries(&name, 1, &si, nullptr, 0, rtld_flags, extinfo)) {
952 return nullptr;
953 }
955 return si;
956 }
958 static void soinfo_unload(soinfo* si) {
959 if (si->ref_count == 1) {
960 TRACE("unloading '%s'", si->name);
961 si->CallDestructors();
963 if (si->has_min_version(0)) {
964 soinfo* child = nullptr;
965 while ((child = si->get_children().pop_front()) != nullptr) {
966 TRACE("%s needs to unload %s", si->name, child->name);
967 soinfo_unload(child);
968 }
969 } else {
970 for_each_dt_needed(si, [&] (const char* library_name) {
971 TRACE("deprecated (old format of soinfo): %s needs to unload %s", si->name, library_name);
972 soinfo* needed = find_library(library_name, RTLD_NOLOAD, nullptr);
973 if (needed != nullptr) {
974 soinfo_unload(needed);
975 } else {
976 // Not found: for example if symlink was deleted between dlopen and dlclose
977 // Since we cannot really handle errors at this point - print and continue.
978 PRINT("warning: couldn't find %s needed by %s on unload.", library_name, si->name);
979 }
980 });
981 }
983 notify_gdb_of_unload(si);
984 si->ref_count = 0;
985 soinfo_free(si);
986 } else {
987 si->ref_count--;
988 TRACE("not unloading '%s', decrementing ref_count to %zd", si->name, si->ref_count);
989 }
990 }
992 void do_android_get_LD_LIBRARY_PATH(char* buffer, size_t buffer_size) {
993 // Use basic string manipulation calls to avoid snprintf.
994 // snprintf indirectly calls pthread_getspecific to get the size of a buffer.
995 // When debug malloc is enabled, this call returns 0. This in turn causes
996 // snprintf to do nothing, which causes libraries to fail to load.
997 // See b/17302493 for further details.
998 // Once the above bug is fixed, this code can be modified to use
999 // snprintf again.
1000 size_t required_len = strlen(kDefaultLdPaths[0]) + strlen(kDefaultLdPaths[1]) + 2;
1001 if (buffer_size < required_len) {
1002 __libc_fatal("android_get_LD_LIBRARY_PATH failed, buffer too small: buffer len %zu, required len %zu",
1003 buffer_size, required_len);
1004 }
1005 char* end = stpcpy(buffer, kDefaultLdPaths[0]);
1006 *end = ':';
1007 strcpy(end + 1, kDefaultLdPaths[1]);
1008 }
1010 void do_android_update_LD_LIBRARY_PATH(const char* ld_library_path) {
1011 if (!get_AT_SECURE()) {
1012 parse_LD_LIBRARY_PATH(ld_library_path);
1013 }
1014 }
1016 soinfo* do_dlopen(const char* name, int flags, const android_dlextinfo* extinfo) {
1017 if ((flags & ~(RTLD_NOW|RTLD_LAZY|RTLD_LOCAL|RTLD_GLOBAL|RTLD_NOLOAD)) != 0) {
1018 DL_ERR("invalid flags to dlopen: %x", flags);
1019 return nullptr;
1020 }
1021 if (extinfo != nullptr && ((extinfo->flags & ~(ANDROID_DLEXT_VALID_FLAG_BITS)) != 0)) {
1022 DL_ERR("invalid extended flags to android_dlopen_ext: %" PRIx64, extinfo->flags);
1023 return nullptr;
1024 }
1025 protect_data(PROT_READ | PROT_WRITE);
1026 soinfo* si = find_library(name, flags, extinfo);
1027 if (si != nullptr) {
1028 si->CallConstructors();
1029 }
1030 protect_data(PROT_READ);
1031 return si;
1032 }
1034 void do_dlclose(soinfo* si) {
1035 protect_data(PROT_READ | PROT_WRITE);
1036 soinfo_unload(si);
1037 protect_data(PROT_READ);
1038 }
1040 static ElfW(Addr) call_ifunc_resolver(ElfW(Addr) resolver_addr) {
1041 typedef ElfW(Addr) (*ifunc_resolver_t)(void);
1042 ifunc_resolver_t ifunc_resolver = reinterpret_cast<ifunc_resolver_t>(resolver_addr);
1043 ElfW(Addr) ifunc_addr = ifunc_resolver();
1044 TRACE_TYPE(RELO, "Called ifunc_resolver@%p. The result is %p", ifunc_resolver, reinterpret_cast<void*>(ifunc_addr));
1046 return ifunc_addr;
1047 }
1049 #if defined(USE_RELA)
1050 int soinfo::Relocate(ElfW(Rela)* rela, unsigned count) {
1051 for (size_t idx = 0; idx < count; ++idx, ++rela) {
1052 unsigned type = ELFW(R_TYPE)(rela->r_info);
1053 unsigned sym = ELFW(R_SYM)(rela->r_info);
1054 ElfW(Addr) reloc = static_cast<ElfW(Addr)>(rela->r_offset + load_bias);
1055 ElfW(Addr) sym_addr = 0;
1056 const char* sym_name = nullptr;
1058 DEBUG("Processing '%s' relocation at index %zd", name, idx);
1059 if (type == 0) { // R_*_NONE
1060 continue;
1061 }
1063 ElfW(Sym)* s = nullptr;
1064 soinfo* lsi = nullptr;
1066 if (sym != 0) {
1067 sym_name = reinterpret_cast<const char*>(strtab + symtab[sym].st_name);
1068 s = soinfo_do_lookup(this, sym_name, &lsi);
1069 if (s == nullptr) {
1070 // We only allow an undefined symbol if this is a weak reference...
1071 s = &symtab[sym];
1072 if (ELF_ST_BIND(s->st_info) != STB_WEAK) {
1073 DL_ERR("cannot locate symbol \"%s\" referenced by \"%s\"...", sym_name, name);
1074 return -1;
1075 }
1077 /* IHI0044C AAELF 4.5.1.1:
1079 Libraries are not searched to resolve weak references.
1080 It is not an error for a weak reference to remain unsatisfied.
1082 During linking, the value of an undefined weak reference is:
1083 - Zero if the relocation type is absolute
1084 - The address of the place if the relocation is pc-relative
1085 - The address of nominal base address if the relocation
1086 type is base-relative.
1087 */
1089 switch (type) {
1090 #if defined(__aarch64__)
1091 case R_AARCH64_JUMP_SLOT:
1092 case R_AARCH64_GLOB_DAT:
1093 case R_AARCH64_ABS64:
1094 case R_AARCH64_ABS32:
1095 case R_AARCH64_ABS16:
1096 case R_AARCH64_RELATIVE:
1097 case R_AARCH64_IRELATIVE:
1098 /*
1099 * The sym_addr was initialized to be zero above, or the relocation
1100 * code below does not care about value of sym_addr.
1101 * No need to do anything.
1102 */
1103 break;
1104 #elif defined(__x86_64__)
1105 case R_X86_64_JUMP_SLOT:
1106 case R_X86_64_GLOB_DAT:
1107 case R_X86_64_32:
1108 case R_X86_64_64:
1109 case R_X86_64_RELATIVE:
1110 case R_X86_64_IRELATIVE:
1111 // No need to do anything.
1112 break;
1113 case R_X86_64_PC32:
1114 sym_addr = reloc;
1115 break;
1116 #endif
1117 default:
1118 DL_ERR("unknown weak reloc type %d @ %p (%zu)", type, rela, idx);
1119 return -1;
1120 }
1121 } else {
1122 // We got a definition.
1123 sym_addr = lsi->resolve_symbol_address(s);
1124 }
1125 count_relocation(kRelocSymbol);
1126 }
1128 switch (type) {
1129 #if defined(__aarch64__)
1130 case R_AARCH64_JUMP_SLOT:
1131 count_relocation(kRelocAbsolute);
1132 MARK(rela->r_offset);
1133 TRACE_TYPE(RELO, "RELO JMP_SLOT %16llx <- %16llx %s\n",
1134 reloc, (sym_addr + rela->r_addend), sym_name);
1135 *reinterpret_cast<ElfW(Addr)*>(reloc) = (sym_addr + rela->r_addend);
1136 break;
1137 case R_AARCH64_GLOB_DAT:
1138 count_relocation(kRelocAbsolute);
1139 MARK(rela->r_offset);
1140 TRACE_TYPE(RELO, "RELO GLOB_DAT %16llx <- %16llx %s\n",
1141 reloc, (sym_addr + rela->r_addend), sym_name);
1142 *reinterpret_cast<ElfW(Addr)*>(reloc) = (sym_addr + rela->r_addend);
1143 break;
1144 case R_AARCH64_ABS64:
1145 count_relocation(kRelocAbsolute);
1146 MARK(rela->r_offset);
1147 TRACE_TYPE(RELO, "RELO ABS64 %16llx <- %16llx %s\n",
1148 reloc, (sym_addr + rela->r_addend), sym_name);
1149 *reinterpret_cast<ElfW(Addr)*>(reloc) += (sym_addr + rela->r_addend);
1150 break;
1151 case R_AARCH64_ABS32:
1152 count_relocation(kRelocAbsolute);
1153 MARK(rela->r_offset);
1154 TRACE_TYPE(RELO, "RELO ABS32 %16llx <- %16llx %s\n",
1155 reloc, (sym_addr + rela->r_addend), sym_name);
1156 if ((static_cast<ElfW(Addr)>(INT32_MIN) <= (*reinterpret_cast<ElfW(Addr)*>(reloc) + (sym_addr + rela->r_addend))) &&
1157 ((*reinterpret_cast<ElfW(Addr)*>(reloc) + (sym_addr + rela->r_addend)) <= static_cast<ElfW(Addr)>(UINT32_MAX))) {
1158 *reinterpret_cast<ElfW(Addr)*>(reloc) += (sym_addr + rela->r_addend);
1159 } else {
1160 DL_ERR("0x%016llx out of range 0x%016llx to 0x%016llx",
1161 (*reinterpret_cast<ElfW(Addr)*>(reloc) + (sym_addr + rela->r_addend)),
1162 static_cast<ElfW(Addr)>(INT32_MIN),
1163 static_cast<ElfW(Addr)>(UINT32_MAX));
1164 return -1;
1165 }
1166 break;
1167 case R_AARCH64_ABS16:
1168 count_relocation(kRelocAbsolute);
1169 MARK(rela->r_offset);
1170 TRACE_TYPE(RELO, "RELO ABS16 %16llx <- %16llx %s\n",
1171 reloc, (sym_addr + rela->r_addend), sym_name);
1172 if ((static_cast<ElfW(Addr)>(INT16_MIN) <= (*reinterpret_cast<ElfW(Addr)*>(reloc) + (sym_addr + rela->r_addend))) &&
1173 ((*reinterpret_cast<ElfW(Addr)*>(reloc) + (sym_addr + rela->r_addend)) <= static_cast<ElfW(Addr)>(UINT16_MAX))) {
1174 *reinterpret_cast<ElfW(Addr)*>(reloc) += (sym_addr + rela->r_addend);
1175 } else {
1176 DL_ERR("0x%016llx out of range 0x%016llx to 0x%016llx",
1177 (*reinterpret_cast<ElfW(Addr)*>(reloc) + (sym_addr + rela->r_addend)),
1178 static_cast<ElfW(Addr)>(INT16_MIN),
1179 static_cast<ElfW(Addr)>(UINT16_MAX));
1180 return -1;
1181 }
1182 break;
1183 case R_AARCH64_PREL64:
1184 count_relocation(kRelocRelative);
1185 MARK(rela->r_offset);
1186 TRACE_TYPE(RELO, "RELO REL64 %16llx <- %16llx - %16llx %s\n",
1187 reloc, (sym_addr + rela->r_addend), rela->r_offset, sym_name);
1188 *reinterpret_cast<ElfW(Addr)*>(reloc) += (sym_addr + rela->r_addend) - rela->r_offset;
1189 break;
1190 case R_AARCH64_PREL32:
1191 count_relocation(kRelocRelative);
1192 MARK(rela->r_offset);
1193 TRACE_TYPE(RELO, "RELO REL32 %16llx <- %16llx - %16llx %s\n",
1194 reloc, (sym_addr + rela->r_addend), rela->r_offset, sym_name);
1195 if ((static_cast<ElfW(Addr)>(INT32_MIN) <= (*reinterpret_cast<ElfW(Addr)*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset))) &&
1196 ((*reinterpret_cast<ElfW(Addr)*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset)) <= static_cast<ElfW(Addr)>(UINT32_MAX))) {
1197 *reinterpret_cast<ElfW(Addr)*>(reloc) += ((sym_addr + rela->r_addend) - rela->r_offset);
1198 } else {
1199 DL_ERR("0x%016llx out of range 0x%016llx to 0x%016llx",
1200 (*reinterpret_cast<ElfW(Addr)*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset)),
1201 static_cast<ElfW(Addr)>(INT32_MIN),
1202 static_cast<ElfW(Addr)>(UINT32_MAX));
1203 return -1;
1204 }
1205 break;
1206 case R_AARCH64_PREL16:
1207 count_relocation(kRelocRelative);
1208 MARK(rela->r_offset);
1209 TRACE_TYPE(RELO, "RELO REL16 %16llx <- %16llx - %16llx %s\n",
1210 reloc, (sym_addr + rela->r_addend), rela->r_offset, sym_name);
1211 if ((static_cast<ElfW(Addr)>(INT16_MIN) <= (*reinterpret_cast<ElfW(Addr)*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset))) &&
1212 ((*reinterpret_cast<ElfW(Addr)*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset)) <= static_cast<ElfW(Addr)>(UINT16_MAX))) {
1213 *reinterpret_cast<ElfW(Addr)*>(reloc) += ((sym_addr + rela->r_addend) - rela->r_offset);
1214 } else {
1215 DL_ERR("0x%016llx out of range 0x%016llx to 0x%016llx",
1216 (*reinterpret_cast<ElfW(Addr)*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset)),
1217 static_cast<ElfW(Addr)>(INT16_MIN),
1218 static_cast<ElfW(Addr)>(UINT16_MAX));
1219 return -1;
1220 }
1221 break;
1223 case R_AARCH64_RELATIVE:
1224 count_relocation(kRelocRelative);
1225 MARK(rela->r_offset);
1226 if (sym) {
1227 DL_ERR("odd RELATIVE form...");
1228 return -1;
1229 }
1230 TRACE_TYPE(RELO, "RELO RELATIVE %16llx <- %16llx\n",
1231 reloc, (base + rela->r_addend));
1232 *reinterpret_cast<ElfW(Addr)*>(reloc) = (base + rela->r_addend);
1233 break;
1235 case R_AARCH64_IRELATIVE:
1236 count_relocation(kRelocRelative);
1237 MARK(rela->r_offset);
1238 TRACE_TYPE(RELO, "RELO IRELATIVE %16llx <- %16llx\n", reloc, (base + rela->r_addend));
1239 *reinterpret_cast<ElfW(Addr)*>(reloc) = call_ifunc_resolver(base + rela->r_addend);
1240 break;
1242 case R_AARCH64_COPY:
1243 /*
1244 * ET_EXEC is not supported so this should not happen.
1245 *
1246 * http://infocenter.arm.com/help/topic/com.arm.doc.ihi0044d/IHI0044D_aaelf.pdf
1247 *
1248 * Section 4.7.1.10 "Dynamic relocations"
1249 * R_AARCH64_COPY may only appear in executable objects where e_type is
1250 * set to ET_EXEC.
1251 */
1252 DL_ERR("%s R_AARCH64_COPY relocations are not supported", name);
1253 return -1;
1254 case R_AARCH64_TLS_TPREL64:
1255 TRACE_TYPE(RELO, "RELO TLS_TPREL64 *** %16llx <- %16llx - %16llx\n",
1256 reloc, (sym_addr + rela->r_addend), rela->r_offset);
1257 break;
1258 case R_AARCH64_TLS_DTPREL32:
1259 TRACE_TYPE(RELO, "RELO TLS_DTPREL32 *** %16llx <- %16llx - %16llx\n",
1260 reloc, (sym_addr + rela->r_addend), rela->r_offset);
1261 break;
1262 #elif defined(__x86_64__)
1263 case R_X86_64_JUMP_SLOT:
1264 count_relocation(kRelocAbsolute);
1265 MARK(rela->r_offset);
1266 TRACE_TYPE(RELO, "RELO JMP_SLOT %08zx <- %08zx %s", static_cast<size_t>(reloc),
1267 static_cast<size_t>(sym_addr + rela->r_addend), sym_name);
1268 *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr + rela->r_addend;
1269 break;
1270 case R_X86_64_GLOB_DAT:
1271 count_relocation(kRelocAbsolute);
1272 MARK(rela->r_offset);
1273 TRACE_TYPE(RELO, "RELO GLOB_DAT %08zx <- %08zx %s", static_cast<size_t>(reloc),
1274 static_cast<size_t>(sym_addr + rela->r_addend), sym_name);
1275 *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr + rela->r_addend;
1276 break;
1277 case R_X86_64_RELATIVE:
1278 count_relocation(kRelocRelative);
1279 MARK(rela->r_offset);
1280 if (sym) {
1281 DL_ERR("odd RELATIVE form...");
1282 return -1;
1283 }
1284 TRACE_TYPE(RELO, "RELO RELATIVE %08zx <- +%08zx", static_cast<size_t>(reloc),
1285 static_cast<size_t>(base));
1286 *reinterpret_cast<ElfW(Addr)*>(reloc) = base + rela->r_addend;
1287 break;
1288 case R_X86_64_IRELATIVE:
1289 count_relocation(kRelocRelative);
1290 MARK(rela->r_offset);
1291 TRACE_TYPE(RELO, "RELO IRELATIVE %16llx <- %16llx\n", reloc, (base + rela->r_addend));
1292 *reinterpret_cast<ElfW(Addr)*>(reloc) = call_ifunc_resolver(base + rela->r_addend);
1293 break;
1294 case R_X86_64_32:
1295 count_relocation(kRelocRelative);
1296 MARK(rela->r_offset);
1297 TRACE_TYPE(RELO, "RELO R_X86_64_32 %08zx <- +%08zx %s", static_cast<size_t>(reloc),
1298 static_cast<size_t>(sym_addr), sym_name);
1299 *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr + rela->r_addend;
1300 break;
1301 case R_X86_64_64:
1302 count_relocation(kRelocRelative);
1303 MARK(rela->r_offset);
1304 TRACE_TYPE(RELO, "RELO R_X86_64_64 %08zx <- +%08zx %s", static_cast<size_t>(reloc),
1305 static_cast<size_t>(sym_addr), sym_name);
1306 *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr + rela->r_addend;
1307 break;
1308 case R_X86_64_PC32:
1309 count_relocation(kRelocRelative);
1310 MARK(rela->r_offset);
1311 TRACE_TYPE(RELO, "RELO R_X86_64_PC32 %08zx <- +%08zx (%08zx - %08zx) %s",
1312 static_cast<size_t>(reloc), static_cast<size_t>(sym_addr - reloc),
1313 static_cast<size_t>(sym_addr), static_cast<size_t>(reloc), sym_name);
1314 *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr + rela->r_addend - reloc;
1315 break;
1316 #endif
1318 default:
1319 DL_ERR("unknown reloc type %d @ %p (%zu)", type, rela, idx);
1320 return -1;
1321 }
1322 }
1323 return 0;
1324 }
1326 #else // REL, not RELA.
1327 int soinfo::Relocate(ElfW(Rel)* rel, unsigned count) {
1328 for (size_t idx = 0; idx < count; ++idx, ++rel) {
1329 unsigned type = ELFW(R_TYPE)(rel->r_info);
1330 // TODO: don't use unsigned for 'sym'. Use uint32_t or ElfW(Addr) instead.
1331 unsigned sym = ELFW(R_SYM)(rel->r_info);
1332 ElfW(Addr) reloc = static_cast<ElfW(Addr)>(rel->r_offset + load_bias);
1333 ElfW(Addr) sym_addr = 0;
1334 const char* sym_name = nullptr;
1336 DEBUG("Processing '%s' relocation at index %zd", name, idx);
1337 if (type == 0) { // R_*_NONE
1338 continue;
1339 }
1341 ElfW(Sym)* s = nullptr;
1342 soinfo* lsi = nullptr;
1344 if (sym != 0) {
1345 sym_name = reinterpret_cast<const char*>(strtab + symtab[sym].st_name);
1346 s = soinfo_do_lookup(this, sym_name, &lsi);
1347 if (s == nullptr) {
1348 // We only allow an undefined symbol if this is a weak reference...
1349 s = &symtab[sym];
1350 if (ELF_ST_BIND(s->st_info) != STB_WEAK) {
1351 DL_ERR("cannot locate symbol \"%s\" referenced by \"%s\"...", sym_name, name);
1352 return -1;
1353 }
1355 /* IHI0044C AAELF 4.5.1.1:
1357 Libraries are not searched to resolve weak references.
1358 It is not an error for a weak reference to remain
1359 unsatisfied.
1361 During linking, the value of an undefined weak reference is:
1362 - Zero if the relocation type is absolute
1363 - The address of the place if the relocation is pc-relative
1364 - The address of nominal base address if the relocation
1365 type is base-relative.
1366 */
1368 switch (type) {
1369 #if defined(__arm__)
1370 case R_ARM_JUMP_SLOT:
1371 case R_ARM_GLOB_DAT:
1372 case R_ARM_ABS32:
1373 case R_ARM_RELATIVE: /* Don't care. */
1374 // sym_addr was initialized to be zero above or relocation
1375 // code below does not care about value of sym_addr.
1376 // No need to do anything.
1377 break;
1378 #elif defined(__i386__)
1379 case R_386_JMP_SLOT:
1380 case R_386_GLOB_DAT:
1381 case R_386_32:
1382 case R_386_RELATIVE: /* Don't care. */
1383 case R_386_IRELATIVE:
1384 // sym_addr was initialized to be zero above or relocation
1385 // code below does not care about value of sym_addr.
1386 // No need to do anything.
1387 break;
1388 case R_386_PC32:
1389 sym_addr = reloc;
1390 break;
1391 #endif
1393 #if defined(__arm__)
1394 case R_ARM_COPY:
1395 // Fall through. Can't really copy if weak symbol is not found at run-time.
1396 #endif
1397 default:
1398 DL_ERR("unknown weak reloc type %d @ %p (%zu)", type, rel, idx);
1399 return -1;
1400 }
1401 } else {
1402 // We got a definition.
1403 sym_addr = lsi->resolve_symbol_address(s);
1404 }
1405 count_relocation(kRelocSymbol);
1406 }
1408 switch (type) {
1409 #if defined(__arm__)
1410 case R_ARM_JUMP_SLOT:
1411 count_relocation(kRelocAbsolute);
1412 MARK(rel->r_offset);
1413 TRACE_TYPE(RELO, "RELO JMP_SLOT %08x <- %08x %s", reloc, sym_addr, sym_name);
1414 *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr;
1415 break;
1416 case R_ARM_GLOB_DAT:
1417 count_relocation(kRelocAbsolute);
1418 MARK(rel->r_offset);
1419 TRACE_TYPE(RELO, "RELO GLOB_DAT %08x <- %08x %s", reloc, sym_addr, sym_name);
1420 *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr;
1421 break;
1422 case R_ARM_ABS32:
1423 count_relocation(kRelocAbsolute);
1424 MARK(rel->r_offset);
1425 TRACE_TYPE(RELO, "RELO ABS %08x <- %08x %s", reloc, sym_addr, sym_name);
1426 *reinterpret_cast<ElfW(Addr)*>(reloc) += sym_addr;
1427 break;
1428 case R_ARM_REL32:
1429 count_relocation(kRelocRelative);
1430 MARK(rel->r_offset);
1431 TRACE_TYPE(RELO, "RELO REL32 %08x <- %08x - %08x %s",
1432 reloc, sym_addr, rel->r_offset, sym_name);
1433 *reinterpret_cast<ElfW(Addr)*>(reloc) += sym_addr - rel->r_offset;
1434 break;
1435 case R_ARM_COPY:
1436 /*
1437 * ET_EXEC is not supported so this should not happen.
1438 *
1439 * http://infocenter.arm.com/help/topic/com.arm.doc.ihi0044d/IHI0044D_aaelf.pdf
1440 *
1441 * Section 4.7.1.10 "Dynamic relocations"
1442 * R_ARM_COPY may only appear in executable objects where e_type is
1443 * set to ET_EXEC.
1444 */
1445 DL_ERR("%s R_ARM_COPY relocations are not supported", name);
1446 return -1;
1447 #elif defined(__i386__)
1448 case R_386_JMP_SLOT:
1449 count_relocation(kRelocAbsolute);
1450 MARK(rel->r_offset);
1451 TRACE_TYPE(RELO, "RELO JMP_SLOT %08x <- %08x %s", reloc, sym_addr, sym_name);
1452 *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr;
1453 break;
1454 case R_386_GLOB_DAT:
1455 count_relocation(kRelocAbsolute);
1456 MARK(rel->r_offset);
1457 TRACE_TYPE(RELO, "RELO GLOB_DAT %08x <- %08x %s", reloc, sym_addr, sym_name);
1458 *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr;
1459 break;
1460 case R_386_32:
1461 count_relocation(kRelocRelative);
1462 MARK(rel->r_offset);
1463 TRACE_TYPE(RELO, "RELO R_386_32 %08x <- +%08x %s", reloc, sym_addr, sym_name);
1464 *reinterpret_cast<ElfW(Addr)*>(reloc) += sym_addr;
1465 break;
1466 case R_386_PC32:
1467 count_relocation(kRelocRelative);
1468 MARK(rel->r_offset);
1469 TRACE_TYPE(RELO, "RELO R_386_PC32 %08x <- +%08x (%08x - %08x) %s",
1470 reloc, (sym_addr - reloc), sym_addr, reloc, sym_name);
1471 *reinterpret_cast<ElfW(Addr)*>(reloc) += (sym_addr - reloc);
1472 break;
1473 #elif defined(__mips__)
1474 case R_MIPS_REL32:
1475 #if defined(__LP64__)
1476 // MIPS Elf64_Rel entries contain compound relocations
1477 // We only handle the R_MIPS_NONE|R_MIPS_64|R_MIPS_REL32 case
1478 if (ELF64_R_TYPE2(rel->r_info) != R_MIPS_64 ||
1479 ELF64_R_TYPE3(rel->r_info) != R_MIPS_NONE) {
1480 DL_ERR("Unexpected compound relocation type:%d type2:%d type3:%d @ %p (%zu)",
1481 type, (unsigned)ELF64_R_TYPE2(rel->r_info),
1482 (unsigned)ELF64_R_TYPE3(rel->r_info), rel, idx);
1483 return -1;
1484 }
1485 #endif
1486 count_relocation(kRelocAbsolute);
1487 MARK(rel->r_offset);
1488 TRACE_TYPE(RELO, "RELO REL32 %08zx <- %08zx %s", static_cast<size_t>(reloc),
1489 static_cast<size_t>(sym_addr), sym_name ? sym_name : "*SECTIONHDR*");
1490 if (s) {
1491 *reinterpret_cast<ElfW(Addr)*>(reloc) += sym_addr;
1492 } else {
1493 *reinterpret_cast<ElfW(Addr)*>(reloc) += base;
1494 }
1495 break;
1496 #endif
1498 #if defined(__arm__)
1499 case R_ARM_RELATIVE:
1500 #elif defined(__i386__)
1501 case R_386_RELATIVE:
1502 #endif
1503 count_relocation(kRelocRelative);
1504 MARK(rel->r_offset);
1505 if (sym) {
1506 DL_ERR("odd RELATIVE form...");
1507 return -1;
1508 }
1509 TRACE_TYPE(RELO, "RELO RELATIVE %p <- +%p",
1510 reinterpret_cast<void*>(reloc), reinterpret_cast<void*>(base));
1511 *reinterpret_cast<ElfW(Addr)*>(reloc) += base;
1512 break;
1513 #if defined(__i386__)
1514 case R_386_IRELATIVE:
1515 count_relocation(kRelocRelative);
1516 MARK(rel->r_offset);
1517 TRACE_TYPE(RELO, "RELO IRELATIVE %p <- %p", reinterpret_cast<void*>(reloc), reinterpret_cast<void*>(base));
1518 *reinterpret_cast<ElfW(Addr)*>(reloc) = call_ifunc_resolver(base + *reinterpret_cast<ElfW(Addr)*>(reloc));
1519 break;
1520 #endif
1522 default:
1523 DL_ERR("unknown reloc type %d @ %p (%zu)", type, rel, idx);
1524 return -1;
1525 }
1526 }
1527 return 0;
1528 }
1529 #endif
1531 #if defined(__mips__)
1532 static bool mips_relocate_got(soinfo* si) {
1533 ElfW(Addr)** got = si->plt_got;
1534 if (got == nullptr) {
1535 return true;
1536 }
1537 unsigned local_gotno = si->mips_local_gotno;
1538 unsigned gotsym = si->mips_gotsym;
1539 unsigned symtabno = si->mips_symtabno;
1540 ElfW(Sym)* symtab = si->symtab;
1542 // got[0] is the address of the lazy resolver function.
1543 // got[1] may be used for a GNU extension.
1544 // Set it to a recognizable address in case someone calls it (should be _rtld_bind_start).
1545 // FIXME: maybe this should be in a separate routine?
1546 if ((si->flags & FLAG_LINKER) == 0) {
1547 size_t g = 0;
1548 got[g++] = reinterpret_cast<ElfW(Addr)*>(0xdeadbeef);
1549 if (reinterpret_cast<intptr_t>(got[g]) < 0) {
1550 got[g++] = reinterpret_cast<ElfW(Addr)*>(0xdeadfeed);
1551 }
1552 // Relocate the local GOT entries.
1553 for (; g < local_gotno; g++) {
1554 got[g] = reinterpret_cast<ElfW(Addr)*>(reinterpret_cast<uintptr_t>(got[g]) + si->load_bias);
1555 }
1556 }
1558 // Now for the global GOT entries...
1559 ElfW(Sym)* sym = symtab + gotsym;
1560 got = si->plt_got + local_gotno;
1561 for (size_t g = gotsym; g < symtabno; g++, sym++, got++) {
1562 // This is an undefined reference... try to locate it.
1563 const char* sym_name = si->strtab + sym->st_name;
1564 soinfo* lsi = nullptr;
1565 ElfW(Sym)* s = soinfo_do_lookup(si, sym_name, &lsi);
1566 if (s == nullptr) {
1567 // We only allow an undefined symbol if this is a weak reference.
1568 s = &symtab[g];
1569 if (ELF_ST_BIND(s->st_info) != STB_WEAK) {
1570 DL_ERR("cannot locate \"%s\"...", sym_name);
1571 return false;
1572 }
1573 *got = 0;
1574 } else {
1575 // FIXME: is this sufficient?
1576 // For reference see NetBSD link loader
1577 // http://cvsweb.netbsd.org/bsdweb.cgi/src/libexec/ld.elf_so/arch/mips/mips_reloc.c?rev=1.53&content-type=text/x-cvsweb-markup
1578 *got = reinterpret_cast<ElfW(Addr)*>(lsi->resolve_symbol_address(s));
1579 }
1580 }
1581 return true;
1582 }
1583 #endif
1585 void soinfo::CallArray(const char* array_name __unused, linker_function_t* functions, size_t count, bool reverse) {
1586 if (functions == nullptr) {
1587 return;
1588 }
1590 TRACE("[ Calling %s (size %zd) @ %p for '%s' ]", array_name, count, functions, name);
1592 int begin = reverse ? (count - 1) : 0;
1593 int end = reverse ? -1 : count;
1594 int step = reverse ? -1 : 1;
1596 for (int i = begin; i != end; i += step) {
1597 TRACE("[ %s[%d] == %p ]", array_name, i, functions[i]);
1598 CallFunction("function", functions[i]);
1599 }
1601 TRACE("[ Done calling %s for '%s' ]", array_name, name);
1602 }
1604 void soinfo::CallFunction(const char* function_name __unused, linker_function_t function) {
1605 if (function == nullptr || reinterpret_cast<uintptr_t>(function) == static_cast<uintptr_t>(-1)) {
1606 return;
1607 }
1609 TRACE("[ Calling %s @ %p for '%s' ]", function_name, function, name);
1610 function();
1611 TRACE("[ Done calling %s @ %p for '%s' ]", function_name, function, name);
1613 // The function may have called dlopen(3) or dlclose(3), so we need to ensure our data structures
1614 // are still writable. This happens with our debug malloc (see http://b/7941716).
1615 protect_data(PROT_READ | PROT_WRITE);
1616 }
1618 void soinfo::CallPreInitConstructors() {
1619 // DT_PREINIT_ARRAY functions are called before any other constructors for executables,
1620 // but ignored in a shared library.
1621 CallArray("DT_PREINIT_ARRAY", preinit_array, preinit_array_count, false);
1622 }
1624 void soinfo::CallConstructors() {
1625 if (constructors_called) {
1626 return;
1627 }
1629 // We set constructors_called before actually calling the constructors, otherwise it doesn't
1630 // protect against recursive constructor calls. One simple example of constructor recursion
1631 // is the libc debug malloc, which is implemented in libc_malloc_debug_leak.so:
1632 // 1. The program depends on libc, so libc's constructor is called here.
1633 // 2. The libc constructor calls dlopen() to load libc_malloc_debug_leak.so.
1634 // 3. dlopen() calls the constructors on the newly created
1635 // soinfo for libc_malloc_debug_leak.so.
1636 // 4. The debug .so depends on libc, so CallConstructors is
1637 // called again with the libc soinfo. If it doesn't trigger the early-
1638 // out above, the libc constructor will be called again (recursively!).
1639 constructors_called = true;
1641 if ((flags & FLAG_EXE) == 0 && preinit_array != nullptr) {
1642 // The GNU dynamic linker silently ignores these, but we warn the developer.
1643 PRINT("\"%s\": ignoring %zd-entry DT_PREINIT_ARRAY in shared library!",
1644 name, preinit_array_count);
1645 }
1647 get_children().for_each([] (soinfo* si) {
1648 si->CallConstructors();
1649 });
1651 TRACE("\"%s\": calling constructors", name);
1653 // DT_INIT should be called before DT_INIT_ARRAY if both are present.
1654 CallFunction("DT_INIT", init_func);
1655 CallArray("DT_INIT_ARRAY", init_array, init_array_count, false);
1656 }
1658 void soinfo::CallDestructors() {
1659 if (!constructors_called) {
1660 return;
1661 }
1662 TRACE("\"%s\": calling destructors", name);
1664 // DT_FINI_ARRAY must be parsed in reverse order.
1665 CallArray("DT_FINI_ARRAY", fini_array, fini_array_count, true);
1667 // DT_FINI should be called after DT_FINI_ARRAY if both are present.
1668 CallFunction("DT_FINI", fini_func);
1670 // This is needed on second call to dlopen
1671 // after library has been unloaded with RTLD_NODELETE
1672 constructors_called = false;
1673 }
1675 void soinfo::add_child(soinfo* child) {
1676 if (has_min_version(0)) {
1677 child->parents.push_back(this);
1678 this->children.push_back(child);
1679 }
1680 }
1682 void soinfo::remove_all_links() {
1683 if (!has_min_version(0)) {
1684 return;
1685 }
1687 // 1. Untie connected soinfos from 'this'.
1688 children.for_each([&] (soinfo* child) {
1689 child->parents.remove_if([&] (const soinfo* parent) {
1690 return parent == this;
1691 });
1692 });
1694 parents.for_each([&] (soinfo* parent) {
1695 parent->children.remove_if([&] (const soinfo* child) {
1696 return child == this;
1697 });
1698 });
1700 // 2. Once everything untied - clear local lists.
1701 parents.clear();
1702 children.clear();
1703 }
1705 void soinfo::set_st_dev(dev_t dev) {
1706 if (has_min_version(0)) {
1707 st_dev = dev;
1708 }
1709 }
1711 void soinfo::set_st_ino(ino_t ino) {
1712 if (has_min_version(0)) {
1713 st_ino = ino;
1714 }
1715 }
1717 dev_t soinfo::get_st_dev() {
1718 if (has_min_version(0)) {
1719 return st_dev;
1720 }
1722 return 0;
1723 };
1725 ino_t soinfo::get_st_ino() {
1726 if (has_min_version(0)) {
1727 return st_ino;
1728 }
1730 return 0;
1731 }
1733 int soinfo::get_rtld_flags() {
1734 if (has_min_version(1)) {
1735 return rtld_flags;
1736 }
1738 return 0;
1739 }
1741 // This is a return on get_children()/get_parents() if
1742 // 'this->flags' does not have FLAG_NEW_SOINFO set.
1743 static soinfo::soinfo_list_t g_empty_list;
1745 soinfo::soinfo_list_t& soinfo::get_children() {
1746 if (has_min_version(0)) {
1747 return this->children;
1748 }
1750 return g_empty_list;
1751 }
1753 soinfo::soinfo_list_t& soinfo::get_parents() {
1754 if ((this->flags & FLAG_NEW_SOINFO) == 0) {
1755 return g_empty_list;
1756 }
1758 return this->parents;
1759 }
1761 ElfW(Addr) soinfo::resolve_symbol_address(ElfW(Sym)* s) {
1762 if (ELF_ST_TYPE(s->st_info) == STT_GNU_IFUNC) {
1763 return call_ifunc_resolver(s->st_value + load_bias);
1764 }
1766 return static_cast<ElfW(Addr)>(s->st_value + load_bias);
1767 }
1769 /* Force any of the closed stdin, stdout and stderr to be associated with
1770 /dev/null. */
1771 static int nullify_closed_stdio() {
1772 int dev_null, i, status;
1773 int return_value = 0;
1775 dev_null = TEMP_FAILURE_RETRY(open("/dev/null", O_RDWR));
1776 if (dev_null < 0) {
1777 DL_ERR("cannot open /dev/null: %s", strerror(errno));
1778 return -1;
1779 }
1780 TRACE("[ Opened /dev/null file-descriptor=%d]", dev_null);
1782 /* If any of the stdio file descriptors is valid and not associated
1783 with /dev/null, dup /dev/null to it. */
1784 for (i = 0; i < 3; i++) {
1785 /* If it is /dev/null already, we are done. */
1786 if (i == dev_null) {
1787 continue;
1788 }
1790 TRACE("[ Nullifying stdio file descriptor %d]", i);
1791 status = TEMP_FAILURE_RETRY(fcntl(i, F_GETFL));
1793 /* If file is opened, we are good. */
1794 if (status != -1) {
1795 continue;
1796 }
1798 /* The only error we allow is that the file descriptor does not
1799 exist, in which case we dup /dev/null to it. */
1800 if (errno != EBADF) {
1801 DL_ERR("fcntl failed: %s", strerror(errno));
1802 return_value = -1;
1803 continue;
1804 }
1806 /* Try dupping /dev/null to this stdio file descriptor and
1807 repeat if there is a signal. Note that any errors in closing
1808 the stdio descriptor are lost. */
1809 status = TEMP_FAILURE_RETRY(dup2(dev_null, i));
1810 if (status < 0) {
1811 DL_ERR("dup2 failed: %s", strerror(errno));
1812 return_value = -1;
1813 continue;
1814 }
1815 }
1817 /* If /dev/null is not one of the stdio file descriptors, close it. */
1818 if (dev_null > 2) {
1819 TRACE("[ Closing /dev/null file-descriptor=%d]", dev_null);
1820 status = TEMP_FAILURE_RETRY(close(dev_null));
1821 if (status == -1) {
1822 DL_ERR("close failed: %s", strerror(errno));
1823 return_value = -1;
1824 }
1825 }
1827 return return_value;
1828 }
1830 bool soinfo::PrelinkImage() {
1831 /* Extract dynamic section */
1832 ElfW(Word) dynamic_flags = 0;
1833 phdr_table_get_dynamic_section(phdr, phnum, load_bias, &dynamic, &dynamic_flags);
1835 /* We can't log anything until the linker is relocated */
1836 bool relocating_linker = (flags & FLAG_LINKER) != 0;
1837 if (!relocating_linker) {
1838 INFO("[ linking %s ]", name);
1839 DEBUG("si->base = %p si->flags = 0x%08x", reinterpret_cast<void*>(base), flags);
1840 }
1842 if (dynamic == nullptr) {
1843 if (!relocating_linker) {
1844 DL_ERR("missing PT_DYNAMIC in \"%s\"", name);
1845 }
1846 return false;
1847 } else {
1848 if (!relocating_linker) {
1849 DEBUG("dynamic = %p", dynamic);
1850 }
1851 }
1853 #if defined(__arm__)
1854 (void) phdr_table_get_arm_exidx(phdr, phnum, load_bias,
1855 &ARM_exidx, &ARM_exidx_count);
1856 #endif
1858 // Extract useful information from dynamic section.
1859 uint32_t needed_count = 0;
1860 for (ElfW(Dyn)* d = dynamic; d->d_tag != DT_NULL; ++d) {
1861 DEBUG("d = %p, d[0](tag) = %p d[1](val) = %p",
1862 d, reinterpret_cast<void*>(d->d_tag), reinterpret_cast<void*>(d->d_un.d_val));
1863 switch (d->d_tag) {
1864 case DT_SONAME:
1865 // TODO: glibc dynamic linker uses this name for
1866 // initial library lookup; consider doing the same here.
1867 break;
1868 case DT_HASH:
1869 nbucket = reinterpret_cast<uint32_t*>(load_bias + d->d_un.d_ptr)[0];
1870 nchain = reinterpret_cast<uint32_t*>(load_bias + d->d_un.d_ptr)[1];
1871 bucket = reinterpret_cast<uint32_t*>(load_bias + d->d_un.d_ptr + 8);
1872 chain = reinterpret_cast<uint32_t*>(load_bias + d->d_un.d_ptr + 8 + nbucket * 4);
1873 break;
1874 case DT_STRTAB:
1875 strtab = reinterpret_cast<const char*>(load_bias + d->d_un.d_ptr);
1876 break;
1877 case DT_SYMTAB:
1878 symtab = reinterpret_cast<ElfW(Sym)*>(load_bias + d->d_un.d_ptr);
1879 break;
1880 case DT_SYMENT:
1881 if (d->d_un.d_val != sizeof(ElfW(Sym))) {
1882 DL_ERR("invalid DT_SYMENT: %zd", static_cast<size_t>(d->d_un.d_val));
1883 return false;
1884 }
1885 break;
1886 #if !defined(__LP64__)
1887 case DT_PLTREL:
1888 if (d->d_un.d_val != DT_REL) {
1889 DL_ERR("unsupported DT_RELA in \"%s\"", name);
1890 return false;
1891 }
1892 break;
1893 #endif
1894 case DT_JMPREL:
1895 #if defined(USE_RELA)
1896 plt_rela = reinterpret_cast<ElfW(Rela)*>(load_bias + d->d_un.d_ptr);
1897 #else
1898 plt_rel = reinterpret_cast<ElfW(Rel)*>(load_bias + d->d_un.d_ptr);
1899 #endif
1900 break;
1901 case DT_PLTRELSZ:
1902 #if defined(USE_RELA)
1903 plt_rela_count = d->d_un.d_val / sizeof(ElfW(Rela));
1904 #else
1905 plt_rel_count = d->d_un.d_val / sizeof(ElfW(Rel));
1906 #endif
1907 break;
1908 case DT_PLTGOT:
1909 #if defined(__mips__)
1910 // Used by mips and mips64.
1911 plt_got = reinterpret_cast<ElfW(Addr)**>(load_bias + d->d_un.d_ptr);
1912 #endif
1913 // Ignore for other platforms... (because RTLD_LAZY is not supported)
1914 break;
1915 case DT_DEBUG:
1916 // Set the DT_DEBUG entry to the address of _r_debug for GDB
1917 // if the dynamic table is writable
1918 // FIXME: not working currently for N64
1919 // The flags for the LOAD and DYNAMIC program headers do not agree.
1920 // The LOAD section containing the dynamic table has been mapped as
1921 // read-only, but the DYNAMIC header claims it is writable.
1922 #if !(defined(__mips__) && defined(__LP64__))
1923 if ((dynamic_flags & PF_W) != 0) {
1924 d->d_un.d_val = reinterpret_cast<uintptr_t>(&_r_debug);
1925 }
1926 break;
1927 #endif
1928 #if defined(USE_RELA)
1929 case DT_RELA:
1930 rela = reinterpret_cast<ElfW(Rela)*>(load_bias + d->d_un.d_ptr);
1931 break;
1932 case DT_RELASZ:
1933 rela_count = d->d_un.d_val / sizeof(ElfW(Rela));
1934 break;
1935 case DT_RELAENT:
1936 if (d->d_un.d_val != sizeof(ElfW(Rela))) {
1937 DL_ERR("invalid DT_RELAENT: %zd", static_cast<size_t>(d->d_un.d_val));
1938 return false;
1939 }
1940 break;
1941 case DT_RELACOUNT:
1942 // ignored (see DT_RELCOUNT comments for details)
1943 break;
1944 case DT_REL:
1945 DL_ERR("unsupported DT_REL in \"%s\"", name);
1946 return false;
1947 case DT_RELSZ:
1948 DL_ERR("unsupported DT_RELSZ in \"%s\"", name);
1949 return false;
1950 #else
1951 case DT_REL:
1952 rel = reinterpret_cast<ElfW(Rel)*>(load_bias + d->d_un.d_ptr);
1953 break;
1954 case DT_RELSZ:
1955 rel_count = d->d_un.d_val / sizeof(ElfW(Rel));
1956 break;
1957 case DT_RELENT:
1958 if (d->d_un.d_val != sizeof(ElfW(Rel))) {
1959 DL_ERR("invalid DT_RELENT: %zd", static_cast<size_t>(d->d_un.d_val));
1960 return false;
1961 }
1962 break;
1963 case DT_RELCOUNT:
1964 // "Indicates that all RELATIVE relocations have been concatenated together,
1965 // and specifies the RELATIVE relocation count."
1966 //
1967 // TODO: Spec also mentions that this can be used to optimize relocation process;
1968 // Not currently used by bionic linker - ignored.
1969 break;
1970 case DT_RELA:
1971 DL_ERR("unsupported DT_RELA in \"%s\"", name);
1972 return false;
1973 #endif
1974 case DT_INIT:
1975 init_func = reinterpret_cast<linker_function_t>(load_bias + d->d_un.d_ptr);
1976 DEBUG("%s constructors (DT_INIT) found at %p", name, init_func);
1977 break;
1978 case DT_FINI:
1979 fini_func = reinterpret_cast<linker_function_t>(load_bias + d->d_un.d_ptr);
1980 DEBUG("%s destructors (DT_FINI) found at %p", name, fini_func);
1981 break;
1982 case DT_INIT_ARRAY:
1983 init_array = reinterpret_cast<linker_function_t*>(load_bias + d->d_un.d_ptr);
1984 DEBUG("%s constructors (DT_INIT_ARRAY) found at %p", name, init_array);
1985 break;
1986 case DT_INIT_ARRAYSZ:
1987 init_array_count = ((unsigned)d->d_un.d_val) / sizeof(ElfW(Addr));
1988 break;
1989 case DT_FINI_ARRAY:
1990 fini_array = reinterpret_cast<linker_function_t*>(load_bias + d->d_un.d_ptr);
1991 DEBUG("%s destructors (DT_FINI_ARRAY) found at %p", name, fini_array);
1992 break;
1993 case DT_FINI_ARRAYSZ:
1994 fini_array_count = ((unsigned)d->d_un.d_val) / sizeof(ElfW(Addr));
1995 break;
1996 case DT_PREINIT_ARRAY:
1997 preinit_array = reinterpret_cast<linker_function_t*>(load_bias + d->d_un.d_ptr);
1998 DEBUG("%s constructors (DT_PREINIT_ARRAY) found at %p", name, preinit_array);
1999 break;
2000 case DT_PREINIT_ARRAYSZ:
2001 preinit_array_count = ((unsigned)d->d_un.d_val) / sizeof(ElfW(Addr));
2002 break;
2003 case DT_TEXTREL:
2004 #if defined(__LP64__)
2005 DL_ERR("text relocations (DT_TEXTREL) found in 64-bit ELF file \"%s\"", name);
2006 return false;
2007 #else
2008 has_text_relocations = true;
2009 break;
2010 #endif
2011 case DT_SYMBOLIC:
2012 // ignored
2013 break;
2014 case DT_NEEDED:
2015 ++needed_count;
2016 break;
2017 case DT_FLAGS:
2018 if (d->d_un.d_val & DF_TEXTREL) {
2019 #if defined(__LP64__)
2020 DL_ERR("text relocations (DF_TEXTREL) found in 64-bit ELF file \"%s\"", name);
2021 return false;
2022 #else
2023 has_text_relocations = true;
2024 #endif
2025 }
2026 break;
2027 #if defined(__mips__)
2028 case DT_STRSZ:
2029 break;
2030 case DT_MIPS_RLD_MAP:
2031 // Set the DT_MIPS_RLD_MAP entry to the address of _r_debug for GDB.
2032 {
2033 r_debug** dp = reinterpret_cast<r_debug**>(load_bias + d->d_un.d_ptr);
2034 *dp = &_r_debug;
2035 }
2036 break;
2037 case DT_MIPS_RLD_VERSION:
2038 case DT_MIPS_FLAGS:
2039 case DT_MIPS_BASE_ADDRESS:
2040 case DT_MIPS_UNREFEXTNO:
2041 break;
2043 case DT_MIPS_SYMTABNO:
2044 mips_symtabno = d->d_un.d_val;
2045 break;
2047 case DT_MIPS_LOCAL_GOTNO:
2048 mips_local_gotno = d->d_un.d_val;
2049 break;
2051 case DT_MIPS_GOTSYM:
2052 mips_gotsym = d->d_un.d_val;
2053 break;
2054 #endif
2056 default:
2057 if (!relocating_linker) {
2058 DEBUG("%s: unused DT entry: type %p arg %p", name,
2059 reinterpret_cast<void*>(d->d_tag), reinterpret_cast<void*>(d->d_un.d_val));
2060 }
2061 break;
2062 }
2063 }
2065 DEBUG("si->base = %p, si->strtab = %p, si->symtab = %p",
2066 reinterpret_cast<void*>(base), strtab, symtab);
2068 // Sanity checks.
2069 if (relocating_linker && needed_count != 0) {
2070 DL_ERR("linker cannot have DT_NEEDED dependencies on other libraries");
2071 return false;
2072 }
2073 if (nbucket == 0) {
2074 DL_ERR("empty/missing DT_HASH in \"%s\" (built with --hash-style=gnu?)", name);
2075 return false;
2076 }
2077 if (strtab == 0) {
2078 DL_ERR("empty/missing DT_STRTAB in \"%s\"", name);
2079 return false;
2080 }
2081 if (symtab == 0) {
2082 DL_ERR("empty/missing DT_SYMTAB in \"%s\"", name);
2083 return false;
2084 }
2085 return true;
2086 }
2088 bool soinfo::LinkImage(const android_dlextinfo* extinfo) {
2090 #if !defined(__LP64__)
2091 if (has_text_relocations) {
2092 // Make segments writable to allow text relocations to work properly. We will later call
2093 // phdr_table_protect_segments() after all of them are applied and all constructors are run.
2094 DL_WARN("%s has text relocations. This is wasting memory and prevents "
2095 "security hardening. Please fix.", name);
2096 if (phdr_table_unprotect_segments(phdr, phnum, load_bias) < 0) {
2097 DL_ERR("can't unprotect loadable segments for \"%s\": %s",
2098 name, strerror(errno));
2099 return false;
2100 }
2101 }
2102 #endif
2104 #if defined(USE_RELA)
2105 if (rela != nullptr) {
2106 DEBUG("[ relocating %s ]", name);
2107 if (Relocate(rela, rela_count)) {
2108 return false;
2109 }
2110 }
2111 if (plt_rela != nullptr) {
2112 DEBUG("[ relocating %s plt ]", name);
2113 if (Relocate(plt_rela, plt_rela_count)) {
2114 return false;
2115 }
2116 }
2117 #else
2118 if (rel != nullptr) {
2119 DEBUG("[ relocating %s ]", name);
2120 if (Relocate(rel, rel_count)) {
2121 return false;
2122 }
2123 }
2124 if (plt_rel != nullptr) {
2125 DEBUG("[ relocating %s plt ]", name);
2126 if (Relocate(plt_rel, plt_rel_count)) {
2127 return false;
2128 }
2129 }
2130 #endif
2132 #if defined(__mips__)
2133 if (!mips_relocate_got(this)) {
2134 return false;
2135 }
2136 #endif
2138 DEBUG("[ finished linking %s ]", name);
2140 #if !defined(__LP64__)
2141 if (has_text_relocations) {
2142 // All relocations are done, we can protect our segments back to read-only.
2143 if (phdr_table_protect_segments(phdr, phnum, load_bias) < 0) {
2144 DL_ERR("can't protect segments for \"%s\": %s",
2145 name, strerror(errno));
2146 return false;
2147 }
2148 }
2149 #endif
2151 /* We can also turn on GNU RELRO protection */
2152 if (phdr_table_protect_gnu_relro(phdr, phnum, load_bias) < 0) {
2153 DL_ERR("can't enable GNU RELRO protection for \"%s\": %s",
2154 name, strerror(errno));
2155 return false;
2156 }
2158 /* Handle serializing/sharing the RELRO segment */
2159 if (extinfo && (extinfo->flags & ANDROID_DLEXT_WRITE_RELRO)) {
2160 if (phdr_table_serialize_gnu_relro(phdr, phnum, load_bias,
2161 extinfo->relro_fd) < 0) {
2162 DL_ERR("failed serializing GNU RELRO section for \"%s\": %s",
2163 name, strerror(errno));
2164 return false;
2165 }
2166 } else if (extinfo && (extinfo->flags & ANDROID_DLEXT_USE_RELRO)) {
2167 if (phdr_table_map_gnu_relro(phdr, phnum, load_bias,
2168 extinfo->relro_fd) < 0) {
2169 DL_ERR("failed mapping GNU RELRO section for \"%s\": %s",
2170 name, strerror(errno));
2171 return false;
2172 }
2173 }
2175 notify_gdb_of_load(this);
2176 return true;
2177 }
2179 /*
2180 * This function add vdso to internal dso list.
2181 * It helps to stack unwinding through signal handlers.
2182 * Also, it makes bionic more like glibc.
2183 */
2184 static void add_vdso(KernelArgumentBlock& args __unused) {
2185 #if defined(AT_SYSINFO_EHDR)
2186 ElfW(Ehdr)* ehdr_vdso = reinterpret_cast<ElfW(Ehdr)*>(args.getauxval(AT_SYSINFO_EHDR));
2187 if (ehdr_vdso == nullptr) {
2188 return;
2189 }
2191 soinfo* si = soinfo_alloc("[vdso]", nullptr, 0);
2193 si->phdr = reinterpret_cast<ElfW(Phdr)*>(reinterpret_cast<char*>(ehdr_vdso) + ehdr_vdso->e_phoff);
2194 si->phnum = ehdr_vdso->e_phnum;
2195 si->base = reinterpret_cast<ElfW(Addr)>(ehdr_vdso);
2196 si->size = phdr_table_get_load_size(si->phdr, si->phnum);
2197 si->load_bias = get_elf_exec_load_bias(ehdr_vdso);
2199 si->PrelinkImage();
2200 si->LinkImage(nullptr);
2201 #endif
2202 }
2204 /*
2205 * This is linker soinfo for GDB. See details below.
2206 */
2207 #if defined(__LP64__)
2208 #define LINKER_PATH "/system/bin/linker64"
2209 #else
2210 #define LINKER_PATH "/system/bin/linker"
2211 #endif
2212 static soinfo linker_soinfo_for_gdb(LINKER_PATH, nullptr, 0);
2214 /* gdb expects the linker to be in the debug shared object list.
2215 * Without this, gdb has trouble locating the linker's ".text"
2216 * and ".plt" sections. Gdb could also potentially use this to
2217 * relocate the offset of our exported 'rtld_db_dlactivity' symbol.
2218 * Don't use soinfo_alloc(), because the linker shouldn't
2219 * be on the soinfo list.
2220 */
2221 static void init_linker_info_for_gdb(ElfW(Addr) linker_base) {
2222 linker_soinfo_for_gdb.base = linker_base;
2224 /*
2225 * Set the dynamic field in the link map otherwise gdb will complain with
2226 * the following:
2227 * warning: .dynamic section for "/system/bin/linker" is not at the
2228 * expected address (wrong library or version mismatch?)
2229 */
2230 ElfW(Ehdr)* elf_hdr = reinterpret_cast<ElfW(Ehdr)*>(linker_base);
2231 ElfW(Phdr)* phdr = reinterpret_cast<ElfW(Phdr)*>(linker_base + elf_hdr->e_phoff);
2232 phdr_table_get_dynamic_section(phdr, elf_hdr->e_phnum, linker_base,
2233 &linker_soinfo_for_gdb.dynamic, nullptr);
2234 insert_soinfo_into_debug_map(&linker_soinfo_for_gdb);
2235 }
2237 /*
2238 * This code is called after the linker has linked itself and
2239 * fixed it's own GOT. It is safe to make references to externs
2240 * and other non-local data at this point.
2241 */
2242 static ElfW(Addr) __linker_init_post_relocation(KernelArgumentBlock& args, ElfW(Addr) linker_base) {
2243 #if TIMING
2244 struct timeval t0, t1;
2245 gettimeofday(&t0, 0);
2246 #endif
2248 // Initialize environment functions, and get to the ELF aux vectors table.
2249 linker_env_init(args);
2251 // If this is a setuid/setgid program, close the security hole described in
2252 // ftp://ftp.freebsd.org/pub/FreeBSD/CERT/advisories/FreeBSD-SA-02:23.stdio.asc
2253 if (get_AT_SECURE()) {
2254 nullify_closed_stdio();
2255 }
2257 debuggerd_init();
2259 // Get a few environment variables.
2260 const char* LD_DEBUG = linker_env_get("LD_DEBUG");
2261 if (LD_DEBUG != nullptr) {
2262 g_ld_debug_verbosity = atoi(LD_DEBUG);
2263 }
2265 // Normally, these are cleaned by linker_env_init, but the test
2266 // doesn't cost us anything.
2267 const char* ldpath_env = nullptr;
2268 const char* ldpreload_env = nullptr;
2269 if (!get_AT_SECURE()) {
2270 ldpath_env = linker_env_get("LD_LIBRARY_PATH");
2271 ldpreload_env = linker_env_get("LD_PRELOAD");
2272 }
2274 INFO("[ android linker & debugger ]");
2276 soinfo* si = soinfo_alloc(args.argv[0], nullptr, RTLD_GLOBAL);
2277 if (si == nullptr) {
2278 exit(EXIT_FAILURE);
2279 }
2281 /* bootstrap the link map, the main exe always needs to be first */
2282 si->flags |= FLAG_EXE;
2283 link_map* map = &(si->link_map_head);
2285 map->l_addr = 0;
2286 map->l_name = args.argv[0];
2287 map->l_prev = nullptr;
2288 map->l_next = nullptr;
2290 _r_debug.r_map = map;
2291 r_debug_tail = map;
2293 init_linker_info_for_gdb(linker_base);
2295 // Extract information passed from the kernel.
2296 si->phdr = reinterpret_cast<ElfW(Phdr)*>(args.getauxval(AT_PHDR));
2297 si->phnum = args.getauxval(AT_PHNUM);
2298 si->entry = args.getauxval(AT_ENTRY);
2300 /* Compute the value of si->base. We can't rely on the fact that
2301 * the first entry is the PHDR because this will not be true
2302 * for certain executables (e.g. some in the NDK unit test suite)
2303 */
2304 si->base = 0;
2305 si->size = phdr_table_get_load_size(si->phdr, si->phnum);
2306 si->load_bias = 0;
2307 for (size_t i = 0; i < si->phnum; ++i) {
2308 if (si->phdr[i].p_type == PT_PHDR) {
2309 si->load_bias = reinterpret_cast<ElfW(Addr)>(si->phdr) - si->phdr[i].p_vaddr;
2310 si->base = reinterpret_cast<ElfW(Addr)>(si->phdr) - si->phdr[i].p_offset;
2311 break;
2312 }
2313 }
2314 si->dynamic = nullptr;
2315 si->ref_count = 1;
2317 ElfW(Ehdr)* elf_hdr = reinterpret_cast<ElfW(Ehdr)*>(si->base);
2318 if (elf_hdr->e_type != ET_DYN) {
2319 __libc_format_fd(2, "error: only position independent executables (PIE) are supported.\n");
2320 exit(EXIT_FAILURE);
2321 }
2323 // Use LD_LIBRARY_PATH and LD_PRELOAD (but only if we aren't setuid/setgid).
2324 parse_LD_LIBRARY_PATH(ldpath_env);
2325 parse_LD_PRELOAD(ldpreload_env);
2327 somain = si;
2329 si->PrelinkImage();
2331 // Load ld_preloads and dependencies.
2332 StringLinkedList needed_library_name_list;
2333 size_t needed_libraries_count = 0;
2334 size_t ld_preloads_count = 0;
2335 while (g_ld_preload_names[ld_preloads_count] != nullptr) {
2336 needed_library_name_list.push_back(g_ld_preload_names[ld_preloads_count++]);
2337 ++needed_libraries_count;
2338 }
2340 for_each_dt_needed(si, [&](const char* name) {
2341 needed_library_name_list.push_back(name);
2342 ++needed_libraries_count;
2343 });
2345 const char* needed_library_names[needed_libraries_count];
2346 soinfo* needed_library_si[needed_libraries_count];
2348 memset(needed_library_names, 0, sizeof(needed_library_names));
2349 needed_library_name_list.copy_to_array(needed_library_names, needed_libraries_count);
2351 if (needed_libraries_count > 0 && !find_libraries(needed_library_names, needed_libraries_count, needed_library_si, g_ld_preloads, ld_preloads_count, RTLD_GLOBAL, nullptr)) {
2352 __libc_format_fd(2, "CANNOT LINK EXECUTABLE DEPENDENCIES: %s\n", linker_get_error_buffer());
2353 exit(EXIT_FAILURE);
2354 }
2356 for (size_t i = 0; i<needed_libraries_count; ++i) {
2357 si->add_child(needed_library_si[i]);
2358 }
2360 if (!si->LinkImage(nullptr)) {
2361 __libc_format_fd(2, "CANNOT LINK EXECUTABLE: %s\n", linker_get_error_buffer());
2362 exit(EXIT_FAILURE);
2363 }
2365 add_vdso(args);
2367 si->CallPreInitConstructors();
2369 /* After the PrelinkImage, the si->load_bias is initialized.
2370 * For so lib, the map->l_addr will be updated in notify_gdb_of_load.
2371 * We need to update this value for so exe here. So Unwind_Backtrace
2372 * for some arch like x86 could work correctly within so exe.
2373 */
2374 map->l_addr = si->load_bias;
2375 si->CallConstructors();
2377 #if TIMING
2378 gettimeofday(&t1, nullptr);
2379 PRINT("LINKER TIME: %s: %d microseconds", args.argv[0], (int) (
2380 (((long long)t1.tv_sec * 1000000LL) + (long long)t1.tv_usec) -
2381 (((long long)t0.tv_sec * 1000000LL) + (long long)t0.tv_usec)));
2382 #endif
2383 #if STATS
2384 PRINT("RELO STATS: %s: %d abs, %d rel, %d copy, %d symbol", args.argv[0],
2385 linker_stats.count[kRelocAbsolute],
2386 linker_stats.count[kRelocRelative],
2387 linker_stats.count[kRelocCopy],
2388 linker_stats.count[kRelocSymbol]);
2389 #endif
2390 #if COUNT_PAGES
2391 {
2392 unsigned n;
2393 unsigned i;
2394 unsigned count = 0;
2395 for (n = 0; n < 4096; n++) {
2396 if (bitmask[n]) {
2397 unsigned x = bitmask[n];
2398 #if defined(__LP64__)
2399 for (i = 0; i < 32; i++) {
2400 #else
2401 for (i = 0; i < 8; i++) {
2402 #endif
2403 if (x & 1) {
2404 count++;
2405 }
2406 x >>= 1;
2407 }
2408 }
2409 }
2410 PRINT("PAGES MODIFIED: %s: %d (%dKB)", args.argv[0], count, count * 4);
2411 }
2412 #endif
2414 #if TIMING || STATS || COUNT_PAGES
2415 fflush(stdout);
2416 #endif
2418 TRACE("[ Ready to execute '%s' @ %p ]", si->name, reinterpret_cast<void*>(si->entry));
2419 return si->entry;
2420 }
2422 /* Compute the load-bias of an existing executable. This shall only
2423 * be used to compute the load bias of an executable or shared library
2424 * that was loaded by the kernel itself.
2425 *
2426 * Input:
2427 * elf -> address of ELF header, assumed to be at the start of the file.
2428 * Return:
2429 * load bias, i.e. add the value of any p_vaddr in the file to get
2430 * the corresponding address in memory.
2431 */
2432 static ElfW(Addr) get_elf_exec_load_bias(const ElfW(Ehdr)* elf) {
2433 ElfW(Addr) offset = elf->e_phoff;
2434 const ElfW(Phdr)* phdr_table = reinterpret_cast<const ElfW(Phdr)*>(reinterpret_cast<uintptr_t>(elf) + offset);
2435 const ElfW(Phdr)* phdr_end = phdr_table + elf->e_phnum;
2437 for (const ElfW(Phdr)* phdr = phdr_table; phdr < phdr_end; phdr++) {
2438 if (phdr->p_type == PT_LOAD) {
2439 return reinterpret_cast<ElfW(Addr)>(elf) + phdr->p_offset - phdr->p_vaddr;
2440 }
2441 }
2442 return 0;
2443 }
2445 extern "C" void _start();
2447 /*
2448 * This is the entry point for the linker, called from begin.S. This
2449 * method is responsible for fixing the linker's own relocations, and
2450 * then calling __linker_init_post_relocation().
2451 *
2452 * Because this method is called before the linker has fixed it's own
2453 * relocations, any attempt to reference an extern variable, extern
2454 * function, or other GOT reference will generate a segfault.
2455 */
2456 extern "C" ElfW(Addr) __linker_init(void* raw_args) {
2457 KernelArgumentBlock args(raw_args);
2459 ElfW(Addr) linker_addr = args.getauxval(AT_BASE);
2460 ElfW(Addr) entry_point = args.getauxval(AT_ENTRY);
2461 ElfW(Ehdr)* elf_hdr = reinterpret_cast<ElfW(Ehdr)*>(linker_addr);
2462 ElfW(Phdr)* phdr = reinterpret_cast<ElfW(Phdr)*>(linker_addr + elf_hdr->e_phoff);
2464 soinfo linker_so("[dynamic linker]", nullptr, 0);
2466 // If the linker is not acting as PT_INTERP entry_point is equal to
2467 // _start. Which means that the linker is running as an executable and
2468 // already linked by PT_INTERP.
2469 //
2470 // This happens when user tries to run 'adb shell /system/bin/linker'
2471 // see also https://code.google.com/p/android/issues/detail?id=63174
2472 if (reinterpret_cast<ElfW(Addr)>(&_start) == entry_point) {
2473 __libc_fatal("This is %s, the helper program for shared library executables.\n", args.argv[0]);
2474 }
2476 linker_so.base = linker_addr;
2477 linker_so.size = phdr_table_get_load_size(phdr, elf_hdr->e_phnum);
2478 linker_so.load_bias = get_elf_exec_load_bias(elf_hdr);
2479 linker_so.dynamic = nullptr;
2480 linker_so.phdr = phdr;
2481 linker_so.phnum = elf_hdr->e_phnum;
2482 linker_so.flags |= FLAG_LINKER;
2484 if (!(linker_so.PrelinkImage() && linker_so.LinkImage(nullptr))) {
2485 // It would be nice to print an error message, but if the linker
2486 // can't link itself, there's no guarantee that we'll be able to
2487 // call write() (because it involves a GOT reference). We may as
2488 // well try though...
2489 const char* msg = "CANNOT LINK EXECUTABLE: ";
2490 write(2, msg, strlen(msg));
2491 write(2, __linker_dl_err_buf, strlen(__linker_dl_err_buf));
2492 write(2, "\n", 1);
2493 _exit(EXIT_FAILURE);
2494 }
2496 __libc_init_tls(args);
2498 // Initialize the linker's own global variables
2499 linker_so.CallConstructors();
2501 // Initialize static variables. Note that in order to
2502 // get correct libdl_info we need to call constructors
2503 // before get_libdl_info().
2504 solist = get_libdl_info();
2505 sonext = get_libdl_info();
2507 // We have successfully fixed our own relocations. It's safe to run
2508 // the main part of the linker now.
2509 args.abort_message_ptr = &g_abort_message;
2510 ElfW(Addr) start_address = __linker_init_post_relocation(args, linker_addr);
2512 protect_data(PROT_READ);
2514 // Return the address that the calling assembly stub should jump to.
2515 return start_address;
2516 }