1 /*
2 * Copyright (C) 2008, 2009 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
29 #include <dlfcn.h>
30 #include <errno.h>
31 #include <fcntl.h>
32 #include <inttypes.h>
33 #include <pthread.h>
34 #include <stdio.h>
35 #include <stdlib.h>
36 #include <string.h>
37 #include <sys/mman.h>
38 #include <sys/param.h>
39 #include <unistd.h>
41 #include <new>
43 // Private C library headers.
44 #include "private/bionic_tls.h"
45 #include "private/KernelArgumentBlock.h"
46 #include "private/ScopedPthreadMutexLocker.h"
47 #include "private/ScopedFd.h"
48 #include "private/ScopeGuard.h"
49 #include "private/UniquePtr.h"
51 #include "linker.h"
52 #include "linker_debug.h"
53 #include "linker_environ.h"
54 #include "linker_phdr.h"
55 #include "linker_allocator.h"
57 /* >>> IMPORTANT NOTE - READ ME BEFORE MODIFYING <<<
58 *
59 * Do NOT use malloc() and friends or pthread_*() code here.
60 * Don't use printf() either; it's caused mysterious memory
61 * corruption in the past.
62 * The linker runs before we bring up libc and it's easiest
63 * to make sure it does not depend on any complex libc features
64 *
65 * open issues / todo:
66 *
67 * - cleaner error reporting
68 * - after linking, set as much stuff as possible to READONLY
69 * and NOEXEC
70 */
72 #if defined(__LP64__)
73 #define SEARCH_NAME(x) x
74 #else
75 // Nvidia drivers are relying on the bug:
76 // http://code.google.com/p/android/issues/detail?id=6670
77 // so we continue to use base-name lookup for lp32
78 static const char* get_base_name(const char* name) {
79 const char* bname = strrchr(name, '/');
80 return bname ? bname + 1 : name;
81 }
82 #define SEARCH_NAME(x) get_base_name(x)
83 #endif
85 static ElfW(Addr) get_elf_exec_load_bias(const ElfW(Ehdr)* elf);
87 static LinkerAllocator<soinfo> g_soinfo_allocator;
88 static LinkerAllocator<LinkedListEntry<soinfo>> g_soinfo_links_allocator;
90 static soinfo* solist;
91 static soinfo* sonext;
92 static soinfo* somain; // main process, always the one after libdl_info
94 static const char* const kDefaultLdPaths[] = {
95 #if defined(__LP64__)
96 "/vendor/lib64",
97 "/system/lib64",
98 #else
99 "/vendor/lib",
100 "/system/lib",
101 #endif
102 nullptr
103 };
105 #define LDPATH_BUFSIZE (LDPATH_MAX*64)
106 #define LDPATH_MAX 8
108 #define LDPRELOAD_BUFSIZE (LDPRELOAD_MAX*64)
109 #define LDPRELOAD_MAX 8
111 static char g_ld_library_paths_buffer[LDPATH_BUFSIZE];
112 static const char* g_ld_library_paths[LDPATH_MAX + 1];
114 static char g_ld_preloads_buffer[LDPRELOAD_BUFSIZE];
115 static const char* g_ld_preload_names[LDPRELOAD_MAX + 1];
117 static soinfo* g_ld_preloads[LDPRELOAD_MAX + 1];
119 __LIBC_HIDDEN__ int g_ld_debug_verbosity;
121 __LIBC_HIDDEN__ abort_msg_t* g_abort_message = nullptr; // For debuggerd.
123 enum RelocationKind {
124 kRelocAbsolute = 0,
125 kRelocRelative,
126 kRelocCopy,
127 kRelocSymbol,
128 kRelocMax
129 };
131 #if STATS
132 struct linker_stats_t {
133 int count[kRelocMax];
134 };
136 static linker_stats_t linker_stats;
138 static void count_relocation(RelocationKind kind) {
139 ++linker_stats.count[kind];
140 }
141 #else
142 static void count_relocation(RelocationKind) {
143 }
144 #endif
146 #if COUNT_PAGES
147 static unsigned bitmask[4096];
148 #if defined(__LP64__)
149 #define MARK(offset) \
150 do { \
151 if ((((offset) >> 12) >> 5) < 4096) \
152 bitmask[((offset) >> 12) >> 5] |= (1 << (((offset) >> 12) & 31)); \
153 } while (0)
154 #else
155 #define MARK(offset) \
156 do { \
157 bitmask[((offset) >> 12) >> 3] |= (1 << (((offset) >> 12) & 7)); \
158 } while (0)
159 #endif
160 #else
161 #define MARK(x) do {} while (0)
162 #endif
164 // You shouldn't try to call memory-allocating functions in the dynamic linker.
165 // Guard against the most obvious ones.
166 #define DISALLOW_ALLOCATION(return_type, name, ...) \
167 return_type name __VA_ARGS__ \
168 { \
169 __libc_fatal("ERROR: " #name " called from the dynamic linker!\n"); \
170 }
171 DISALLOW_ALLOCATION(void*, malloc, (size_t u __unused));
172 DISALLOW_ALLOCATION(void, free, (void* u __unused));
173 DISALLOW_ALLOCATION(void*, realloc, (void* u1 __unused, size_t u2 __unused));
174 DISALLOW_ALLOCATION(void*, calloc, (size_t u1 __unused, size_t u2 __unused));
176 static char __linker_dl_err_buf[768];
178 char* linker_get_error_buffer() {
179 return &__linker_dl_err_buf[0];
180 }
182 size_t linker_get_error_buffer_size() {
183 return sizeof(__linker_dl_err_buf);
184 }
186 // This function is an empty stub where GDB locates a breakpoint to get notified
187 // about linker activity.
188 extern "C" void __attribute__((noinline)) __attribute__((visibility("default"))) rtld_db_dlactivity();
190 static pthread_mutex_t g__r_debug_mutex = PTHREAD_MUTEX_INITIALIZER;
191 static r_debug _r_debug = {1, nullptr, reinterpret_cast<uintptr_t>(&rtld_db_dlactivity), r_debug::RT_CONSISTENT, 0};
192 static link_map* r_debug_tail = 0;
194 static void insert_soinfo_into_debug_map(soinfo* info) {
195 // Copy the necessary fields into the debug structure.
196 link_map* map = &(info->link_map_head);
197 map->l_addr = info->load_bias;
198 map->l_name = info->name;
199 map->l_ld = info->dynamic;
201 // Stick the new library at the end of the list.
202 // gdb tends to care more about libc than it does
203 // about leaf libraries, and ordering it this way
204 // reduces the back-and-forth over the wire.
205 if (r_debug_tail) {
206 r_debug_tail->l_next = map;
207 map->l_prev = r_debug_tail;
208 map->l_next = 0;
209 } else {
210 _r_debug.r_map = map;
211 map->l_prev = 0;
212 map->l_next = 0;
213 }
214 r_debug_tail = map;
215 }
217 static void remove_soinfo_from_debug_map(soinfo* info) {
218 link_map* map = &(info->link_map_head);
220 if (r_debug_tail == map) {
221 r_debug_tail = map->l_prev;
222 }
224 if (map->l_prev) {
225 map->l_prev->l_next = map->l_next;
226 }
227 if (map->l_next) {
228 map->l_next->l_prev = map->l_prev;
229 }
230 }
232 static void notify_gdb_of_load(soinfo* info) {
233 if (info->flags & FLAG_EXE) {
234 // GDB already knows about the main executable
235 return;
236 }
238 ScopedPthreadMutexLocker locker(&g__r_debug_mutex);
240 _r_debug.r_state = r_debug::RT_ADD;
241 rtld_db_dlactivity();
243 insert_soinfo_into_debug_map(info);
245 _r_debug.r_state = r_debug::RT_CONSISTENT;
246 rtld_db_dlactivity();
247 }
249 static void notify_gdb_of_unload(soinfo* info) {
250 if (info->flags & FLAG_EXE) {
251 // GDB already knows about the main executable
252 return;
253 }
255 ScopedPthreadMutexLocker locker(&g__r_debug_mutex);
257 _r_debug.r_state = r_debug::RT_DELETE;
258 rtld_db_dlactivity();
260 remove_soinfo_from_debug_map(info);
262 _r_debug.r_state = r_debug::RT_CONSISTENT;
263 rtld_db_dlactivity();
264 }
266 void notify_gdb_of_libraries() {
267 _r_debug.r_state = r_debug::RT_ADD;
268 rtld_db_dlactivity();
269 _r_debug.r_state = r_debug::RT_CONSISTENT;
270 rtld_db_dlactivity();
271 }
273 LinkedListEntry<soinfo>* SoinfoListAllocator::alloc() {
274 return g_soinfo_links_allocator.alloc();
275 }
277 void SoinfoListAllocator::free(LinkedListEntry<soinfo>* entry) {
278 g_soinfo_links_allocator.free(entry);
279 }
281 static void protect_data(int protection) {
282 g_soinfo_allocator.protect_all(protection);
283 g_soinfo_links_allocator.protect_all(protection);
284 }
286 static soinfo* soinfo_alloc(const char* name, struct stat* file_stat, off64_t file_offset, uint32_t rtld_flags) {
287 if (strlen(name) >= SOINFO_NAME_LEN) {
288 DL_ERR("library name \"%s\" too long", name);
289 return nullptr;
290 }
292 soinfo* si = new (g_soinfo_allocator.alloc()) soinfo(name, file_stat, file_offset, rtld_flags);
294 sonext->next = si;
295 sonext = si;
297 TRACE("name %s: allocated soinfo @ %p", name, si);
298 return si;
299 }
301 static void soinfo_free(soinfo* si) {
302 if (si == nullptr) {
303 return;
304 }
306 if (si->base != 0 && si->size != 0) {
307 munmap(reinterpret_cast<void*>(si->base), si->size);
308 }
310 soinfo *prev = nullptr, *trav;
312 TRACE("name %s: freeing soinfo @ %p", si->name, si);
314 for (trav = solist; trav != nullptr; trav = trav->next) {
315 if (trav == si) {
316 break;
317 }
318 prev = trav;
319 }
321 if (trav == nullptr) {
322 // si was not in solist
323 DL_ERR("name \"%s\" is not in solist!", si->name);
324 return;
325 }
327 // clear links to/from si
328 si->remove_all_links();
330 // prev will never be null, because the first entry in solist is
331 // always the static libdl_info.
332 prev->next = si->next;
333 if (si == sonext) {
334 sonext = prev;
335 }
337 g_soinfo_allocator.free(si);
338 }
340 static void parse_path(const char* path, const char* delimiters,
341 const char** array, char* buf, size_t buf_size, size_t max_count) {
342 if (path == nullptr) {
343 return;
344 }
346 size_t len = strlcpy(buf, path, buf_size);
348 size_t i = 0;
349 char* buf_p = buf;
350 while (i < max_count && (array[i] = strsep(&buf_p, delimiters))) {
351 if (*array[i] != '\0') {
352 ++i;
353 }
354 }
356 // Forget the last path if we had to truncate; this occurs if the 2nd to
357 // last char isn't '\0' (i.e. wasn't originally a delimiter).
358 if (i > 0 && len >= buf_size && buf[buf_size - 2] != '\0') {
359 array[i - 1] = nullptr;
360 } else {
361 array[i] = nullptr;
362 }
363 }
365 static void parse_LD_LIBRARY_PATH(const char* path) {
366 parse_path(path, ":", g_ld_library_paths,
367 g_ld_library_paths_buffer, sizeof(g_ld_library_paths_buffer), LDPATH_MAX);
368 }
370 static void parse_LD_PRELOAD(const char* path) {
371 // We have historically supported ':' as well as ' ' in LD_PRELOAD.
372 parse_path(path, " :", g_ld_preload_names,
373 g_ld_preloads_buffer, sizeof(g_ld_preloads_buffer), LDPRELOAD_MAX);
374 }
376 #if defined(__arm__)
378 // For a given PC, find the .so that it belongs to.
379 // Returns the base address of the .ARM.exidx section
380 // for that .so, and the number of 8-byte entries
381 // in that section (via *pcount).
382 //
383 // Intended to be called by libc's __gnu_Unwind_Find_exidx().
384 //
385 // This function is exposed via dlfcn.cpp and libdl.so.
386 _Unwind_Ptr dl_unwind_find_exidx(_Unwind_Ptr pc, int* pcount) {
387 unsigned addr = (unsigned)pc;
389 for (soinfo* si = solist; si != 0; si = si->next) {
390 if ((addr >= si->base) && (addr < (si->base + si->size))) {
391 *pcount = si->ARM_exidx_count;
392 return (_Unwind_Ptr)si->ARM_exidx;
393 }
394 }
395 *pcount = 0;
396 return nullptr;
397 }
399 #endif
401 // Here, we only have to provide a callback to iterate across all the
402 // loaded libraries. gcc_eh does the rest.
403 int dl_iterate_phdr(int (*cb)(dl_phdr_info* info, size_t size, void* data), void* data) {
404 int rv = 0;
405 for (soinfo* si = solist; si != nullptr; si = si->next) {
406 dl_phdr_info dl_info;
407 dl_info.dlpi_addr = si->link_map_head.l_addr;
408 dl_info.dlpi_name = si->link_map_head.l_name;
409 dl_info.dlpi_phdr = si->phdr;
410 dl_info.dlpi_phnum = si->phnum;
411 rv = cb(&dl_info, sizeof(dl_phdr_info), data);
412 if (rv != 0) {
413 break;
414 }
415 }
416 return rv;
417 }
419 ElfW(Sym)* soinfo::find_symbol_by_name(SymbolName& symbol_name) {
420 return is_gnu_hash() ? gnu_lookup(symbol_name) : elf_lookup(symbol_name);
421 }
423 static bool is_symbol_global_and_defined(const soinfo* si, const ElfW(Sym)* s) {
424 if (ELF_ST_BIND(s->st_info) == STB_GLOBAL ||
425 ELF_ST_BIND(s->st_info) == STB_WEAK) {
426 return s->st_shndx != SHN_UNDEF;
427 } else if (ELF_ST_BIND(s->st_info) != STB_LOCAL) {
428 DL_WARN("unexpected ST_BIND value: %d for '%s' in '%s'",
429 ELF_ST_BIND(s->st_info), si->get_string(s->st_name), si->name);
430 }
432 return false;
433 }
435 ElfW(Sym)* soinfo::gnu_lookup(SymbolName& symbol_name) {
436 uint32_t hash = symbol_name.gnu_hash();
437 uint32_t h2 = hash >> gnu_shift2_;
439 uint32_t bloom_mask_bits = sizeof(ElfW(Addr))*8;
440 uint32_t word_num = (hash / bloom_mask_bits) & gnu_maskwords_;
441 ElfW(Addr) bloom_word = gnu_bloom_filter_[word_num];
443 // test against bloom filter
444 if ((1 & (bloom_word >> (hash % bloom_mask_bits)) & (bloom_word >> (h2 % bloom_mask_bits))) == 0) {
445 return nullptr;
446 }
448 // bloom test says "probably yes"...
449 uint32_t n = bucket_[hash % nbucket_];
451 if (n == 0) {
452 return nullptr;
453 }
455 do {
456 ElfW(Sym)* s = symtab_ + n;
457 if (((chain_[n] ^ hash) >> 1) == 0 &&
458 strcmp(get_string(s->st_name), symbol_name.get_name()) == 0 &&
459 is_symbol_global_and_defined(this, s)) {
460 return s;
461 }
462 } while ((chain_[n++] & 1) == 0);
464 return nullptr;
465 }
467 ElfW(Sym)* soinfo::elf_lookup(SymbolName& symbol_name) {
468 uint32_t hash = symbol_name.elf_hash();
470 TRACE_TYPE(LOOKUP, "SEARCH %s in %s@%p h=%x(elf) %zd",
471 symbol_name.get_name(), name, reinterpret_cast<void*>(base), hash, hash % nbucket_);
473 for (uint32_t n = bucket_[hash % nbucket_]; n != 0; n = chain_[n]) {
474 ElfW(Sym)* s = symtab_ + n;
475 if (strcmp(get_string(s->st_name), symbol_name.get_name()) == 0 && is_symbol_global_and_defined(this, s)) {
476 TRACE_TYPE(LOOKUP, "FOUND %s in %s (%p) %zd",
477 symbol_name.get_name(), name, reinterpret_cast<void*>(s->st_value),
478 static_cast<size_t>(s->st_size));
479 return s;
480 }
481 }
483 TRACE_TYPE(LOOKUP, "NOT FOUND %s in %s@%p %x %zd",
484 symbol_name.get_name(), name, reinterpret_cast<void*>(base), hash, hash % nbucket_);
486 return nullptr;
487 }
489 soinfo::soinfo(const char* name, const struct stat* file_stat, off64_t file_offset, int rtld_flags) {
490 memset(this, 0, sizeof(*this));
492 strlcpy(this->name, name, sizeof(this->name));
493 flags = FLAG_NEW_SOINFO;
494 version_ = SOINFO_VERSION;
496 if (file_stat != nullptr) {
497 this->st_dev_ = file_stat->st_dev;
498 this->st_ino_ = file_stat->st_ino;
499 this->file_offset_ = file_offset;
500 }
502 this->rtld_flags_ = rtld_flags;
503 }
506 uint32_t SymbolName::elf_hash() {
507 if (!has_elf_hash_) {
508 const unsigned char* name = reinterpret_cast<const unsigned char*>(name_);
509 uint32_t h = 0, g;
511 while (*name) {
512 h = (h << 4) + *name++;
513 g = h & 0xf0000000;
514 h ^= g;
515 h ^= g >> 24;
516 }
518 elf_hash_ = h;
519 has_elf_hash_ = true;
520 }
522 return elf_hash_;
523 }
525 uint32_t SymbolName::gnu_hash() {
526 if (!has_gnu_hash_) {
527 uint32_t h = 5381;
528 const unsigned char* name = reinterpret_cast<const unsigned char*>(name_);
529 while (*name != 0) {
530 h += (h << 5) + *name++; // h*33 + c = h + h * 32 + c = h + h << 5 + c
531 }
533 gnu_hash_ = h;
534 has_gnu_hash_ = true;
535 }
537 return gnu_hash_;
538 }
540 static ElfW(Sym)* soinfo_do_lookup(soinfo* si_from, const char* name, soinfo** si_found_in,
541 const soinfo::soinfo_list_t& global_group, const soinfo::soinfo_list_t& local_group) {
542 SymbolName symbol_name(name);
543 ElfW(Sym)* s = nullptr;
545 /* "This element's presence in a shared object library alters the dynamic linker's
546 * symbol resolution algorithm for references within the library. Instead of starting
547 * a symbol search with the executable file, the dynamic linker starts from the shared
548 * object itself. If the shared object fails to supply the referenced symbol, the
549 * dynamic linker then searches the executable file and other shared objects as usual."
550 *
551 * http://www.sco.com/developers/gabi/2012-12-31/ch5.dynamic.html
552 *
553 * Note that this is unlikely since static linker avoids generating
554 * relocations for -Bsymbolic linked dynamic executables.
555 */
556 if (si_from->has_DT_SYMBOLIC) {
557 DEBUG("%s: looking up %s in local scope (DT_SYMBOLIC)", si_from->name, name);
558 s = si_from->find_symbol_by_name(symbol_name);
559 if (s != nullptr) {
560 *si_found_in = si_from;
561 }
562 }
564 // 1. Look for it in global_group
565 if (s == nullptr) {
566 global_group.visit([&](soinfo* global_si) {
567 DEBUG("%s: looking up %s in %s (from global group)", si_from->name, name, global_si->name);
568 s = global_si->find_symbol_by_name(symbol_name);
569 if (s != nullptr) {
570 *si_found_in = global_si;
571 return false;
572 }
574 return true;
575 });
576 }
578 // 2. Look for it in the local group
579 if (s == nullptr) {
580 local_group.visit([&](soinfo* local_si) {
581 if (local_si == si_from && si_from->has_DT_SYMBOLIC) {
582 // we already did this - skip
583 return true;
584 }
586 DEBUG("%s: looking up %s in %s (from local group)", si_from->name, name, local_si->name);
587 s = local_si->find_symbol_by_name(symbol_name);
588 if (s != nullptr) {
589 *si_found_in = local_si;
590 return false;
591 }
593 return true;
594 });
595 }
597 if (s != nullptr) {
598 TRACE_TYPE(LOOKUP, "si %s sym %s s->st_value = %p, "
599 "found in %s, base = %p, load bias = %p",
600 si_from->name, name, reinterpret_cast<void*>(s->st_value),
601 (*si_found_in)->name, reinterpret_cast<void*>((*si_found_in)->base),
602 reinterpret_cast<void*>((*si_found_in)->load_bias));
603 }
605 return s;
606 }
608 // Each size has it's own allocator.
609 template<size_t size>
610 class SizeBasedAllocator {
611 public:
612 static void* alloc() {
613 return allocator_.alloc();
614 }
616 static void free(void* ptr) {
617 allocator_.free(ptr);
618 }
620 private:
621 static LinkerBlockAllocator allocator_;
622 };
624 template<size_t size>
625 LinkerBlockAllocator SizeBasedAllocator<size>::allocator_(size);
627 template<typename T>
628 class TypeBasedAllocator {
629 public:
630 static T* alloc() {
631 return reinterpret_cast<T*>(SizeBasedAllocator<sizeof(T)>::alloc());
632 }
634 static void free(T* ptr) {
635 SizeBasedAllocator<sizeof(T)>::free(ptr);
636 }
637 };
639 class LoadTask {
640 public:
641 struct deleter_t {
642 void operator()(LoadTask* t) {
643 TypeBasedAllocator<LoadTask>::free(t);
644 }
645 };
647 typedef UniquePtr<LoadTask, deleter_t> unique_ptr;
649 static deleter_t deleter;
651 static LoadTask* create(const char* name, soinfo* needed_by) {
652 LoadTask* ptr = TypeBasedAllocator<LoadTask>::alloc();
653 return new (ptr) LoadTask(name, needed_by);
654 }
656 const char* get_name() const {
657 return name_;
658 }
660 soinfo* get_needed_by() const {
661 return needed_by_;
662 }
663 private:
664 LoadTask(const char* name, soinfo* needed_by)
665 : name_(name), needed_by_(needed_by) {}
667 const char* name_;
668 soinfo* needed_by_;
670 DISALLOW_IMPLICIT_CONSTRUCTORS(LoadTask);
671 };
673 LoadTask::deleter_t LoadTask::deleter;
675 template <typename T>
676 using linked_list_t = LinkedList<T, TypeBasedAllocator<LinkedListEntry<T>>>;
678 typedef linked_list_t<soinfo> SoinfoLinkedList;
679 typedef linked_list_t<const char> StringLinkedList;
680 typedef linked_list_t<LoadTask> LoadTaskList;
683 // This function walks down the tree of soinfo dependencies
684 // in breadth-first order and
685 // * calls action(soinfo* si) for each node, and
686 // * terminates walk if action returns false.
687 //
688 // walk_dependencies_tree returns false if walk was terminated
689 // by the action and true otherwise.
690 template<typename F>
691 static bool walk_dependencies_tree(soinfo* root_soinfos[], size_t root_soinfos_size, F action) {
692 SoinfoLinkedList visit_list;
693 SoinfoLinkedList visited;
695 for (size_t i = 0; i < root_soinfos_size; ++i) {
696 visit_list.push_back(root_soinfos[i]);
697 }
699 soinfo* si;
700 while ((si = visit_list.pop_front()) != nullptr) {
701 if (visited.contains(si)) {
702 continue;
703 }
705 if (!action(si)) {
706 return false;
707 }
709 visited.push_back(si);
711 si->get_children().for_each([&](soinfo* child) {
712 visit_list.push_back(child);
713 });
714 }
716 return true;
717 }
720 // This is used by dlsym(3). It performs symbol lookup only within the
721 // specified soinfo object and its dependencies in breadth first order.
722 ElfW(Sym)* dlsym_handle_lookup(soinfo* si, soinfo** found, const char* name) {
723 ElfW(Sym)* result = nullptr;
724 SymbolName symbol_name(name);
727 walk_dependencies_tree(&si, 1, [&](soinfo* current_soinfo) {
728 result = current_soinfo->find_symbol_by_name(symbol_name);
729 if (result != nullptr) {
730 *found = current_soinfo;
731 return false;
732 }
734 return true;
735 });
737 return result;
738 }
740 /* This is used by dlsym(3) to performs a global symbol lookup. If the
741 start value is null (for RTLD_DEFAULT), the search starts at the
742 beginning of the global solist. Otherwise the search starts at the
743 specified soinfo (for RTLD_NEXT).
744 */
745 ElfW(Sym)* dlsym_linear_lookup(const char* name, soinfo** found, soinfo* start) {
746 SymbolName symbol_name(name);
748 if (start == nullptr) {
749 start = solist;
750 }
752 ElfW(Sym)* s = nullptr;
753 for (soinfo* si = start; (s == nullptr) && (si != nullptr); si = si->next) {
754 if ((si->get_rtld_flags() & RTLD_GLOBAL) == 0) {
755 continue;
756 }
758 s = si->find_symbol_by_name(symbol_name);
759 if (s != nullptr) {
760 *found = si;
761 break;
762 }
763 }
765 if (s != nullptr) {
766 TRACE_TYPE(LOOKUP, "%s s->st_value = %p, found->base = %p",
767 name, reinterpret_cast<void*>(s->st_value), reinterpret_cast<void*>((*found)->base));
768 }
770 return s;
771 }
773 soinfo* find_containing_library(const void* p) {
774 ElfW(Addr) address = reinterpret_cast<ElfW(Addr)>(p);
775 for (soinfo* si = solist; si != nullptr; si = si->next) {
776 if (address >= si->base && address - si->base < si->size) {
777 return si;
778 }
779 }
780 return nullptr;
781 }
783 ElfW(Sym)* soinfo::find_symbol_by_address(const void* addr) {
784 return is_gnu_hash() ? gnu_addr_lookup(addr) : elf_addr_lookup(addr);
785 }
787 static bool symbol_matches_soaddr(const ElfW(Sym)* sym, ElfW(Addr) soaddr) {
788 return sym->st_shndx != SHN_UNDEF &&
789 soaddr >= sym->st_value &&
790 soaddr < sym->st_value + sym->st_size;
791 }
793 ElfW(Sym)* soinfo::gnu_addr_lookup(const void* addr) {
794 ElfW(Addr) soaddr = reinterpret_cast<ElfW(Addr)>(addr) - base;
796 for (size_t i = 0; i < nbucket_; ++i) {
797 uint32_t n = bucket_[i];
799 if (n == 0) {
800 continue;
801 }
803 do {
804 ElfW(Sym)* sym = symtab_ + n;
805 if (symbol_matches_soaddr(sym, soaddr)) {
806 return sym;
807 }
808 } while ((chain_[n++] & 1) == 0);
809 }
811 return nullptr;
812 }
814 ElfW(Sym)* soinfo::elf_addr_lookup(const void* addr) {
815 ElfW(Addr) soaddr = reinterpret_cast<ElfW(Addr)>(addr) - base;
817 // Search the library's symbol table for any defined symbol which
818 // contains this address.
819 for (size_t i = 0; i < nchain_; ++i) {
820 ElfW(Sym)* sym = symtab_ + i;
821 if (symbol_matches_soaddr(sym, soaddr)) {
822 return sym;
823 }
824 }
826 return nullptr;
827 }
829 static int open_library_on_path(const char* name, const char* const paths[]) {
830 char buf[512];
831 for (size_t i = 0; paths[i] != nullptr; ++i) {
832 int n = __libc_format_buffer(buf, sizeof(buf), "%s/%s", paths[i], name);
833 if (n < 0 || n >= static_cast<int>(sizeof(buf))) {
834 PRINT("Warning: ignoring very long library path: %s/%s", paths[i], name);
835 continue;
836 }
837 int fd = TEMP_FAILURE_RETRY(open(buf, O_RDONLY | O_CLOEXEC));
838 if (fd != -1) {
839 return fd;
840 }
841 }
842 return -1;
843 }
845 static int open_library(const char* name) {
846 TRACE("[ opening %s ]", name);
848 // If the name contains a slash, we should attempt to open it directly and not search the paths.
849 if (strchr(name, '/') != nullptr) {
850 int fd = TEMP_FAILURE_RETRY(open(name, O_RDONLY | O_CLOEXEC));
851 if (fd != -1) {
852 return fd;
853 }
854 // ...but nvidia binary blobs (at least) rely on this behavior, so fall through for now.
855 #if defined(__LP64__)
856 return -1;
857 #endif
858 }
860 // Otherwise we try LD_LIBRARY_PATH first, and fall back to the built-in well known paths.
861 int fd = open_library_on_path(name, g_ld_library_paths);
862 if (fd == -1) {
863 fd = open_library_on_path(name, kDefaultLdPaths);
864 }
865 return fd;
866 }
868 template<typename F>
869 static void for_each_dt_needed(const soinfo* si, F action) {
870 for (ElfW(Dyn)* d = si->dynamic; d->d_tag != DT_NULL; ++d) {
871 if (d->d_tag == DT_NEEDED) {
872 action(si->get_string(d->d_un.d_val));
873 }
874 }
875 }
877 static soinfo* load_library(LoadTaskList& load_tasks, const char* name, int rtld_flags, const android_dlextinfo* extinfo) {
878 int fd = -1;
879 off64_t file_offset = 0;
880 ScopedFd file_guard(-1);
882 if (extinfo != nullptr && (extinfo->flags & ANDROID_DLEXT_USE_LIBRARY_FD) != 0) {
883 fd = extinfo->library_fd;
884 if ((extinfo->flags & ANDROID_DLEXT_USE_LIBRARY_FD_OFFSET) != 0) {
885 file_offset = extinfo->library_fd_offset;
886 }
887 } else {
888 // Open the file.
889 fd = open_library(name);
890 if (fd == -1) {
891 DL_ERR("library \"%s\" not found", name);
892 return nullptr;
893 }
895 file_guard.reset(fd);
896 }
898 if ((file_offset % PAGE_SIZE) != 0) {
899 DL_ERR("file offset for the library \"%s\" is not page-aligned: %" PRId64, name, file_offset);
900 return nullptr;
901 }
902 if (file_offset < 0) {
903 DL_ERR("file offset for the library \"%s\" is negative: %" PRId64, name, file_offset);
904 return nullptr;
905 }
907 struct stat file_stat;
908 if (TEMP_FAILURE_RETRY(fstat(fd, &file_stat)) != 0) {
909 DL_ERR("unable to stat file for the library \"%s\": %s", name, strerror(errno));
910 return nullptr;
911 }
912 if (file_offset >= file_stat.st_size) {
913 DL_ERR("file offset for the library \"%s\" >= file size: %" PRId64 " >= %" PRId64, name, file_offset, file_stat.st_size);
914 return nullptr;
915 }
917 // Check for symlink and other situations where
918 // file can have different names.
919 for (soinfo* si = solist; si != nullptr; si = si->next) {
920 if (si->get_st_dev() != 0 &&
921 si->get_st_ino() != 0 &&
922 si->get_st_dev() == file_stat.st_dev &&
923 si->get_st_ino() == file_stat.st_ino &&
924 si->get_file_offset() == file_offset) {
925 TRACE("library \"%s\" is already loaded under different name/path \"%s\" - will return existing soinfo", name, si->name);
926 return si;
927 }
928 }
930 if ((rtld_flags & RTLD_NOLOAD) != 0) {
931 DL_ERR("library \"%s\" wasn't loaded and RTLD_NOLOAD prevented it", name);
932 return nullptr;
933 }
935 // Read the ELF header and load the segments.
936 ElfReader elf_reader(name, fd, file_offset);
937 if (!elf_reader.Load(extinfo)) {
938 return nullptr;
939 }
941 soinfo* si = soinfo_alloc(SEARCH_NAME(name), &file_stat, file_offset, rtld_flags);
942 if (si == nullptr) {
943 return nullptr;
944 }
945 si->base = elf_reader.load_start();
946 si->size = elf_reader.load_size();
947 si->load_bias = elf_reader.load_bias();
948 si->phnum = elf_reader.phdr_count();
949 si->phdr = elf_reader.loaded_phdr();
951 if (!si->prelink_image()) {
952 soinfo_free(si);
953 return nullptr;
954 }
956 for_each_dt_needed(si, [&] (const char* name) {
957 load_tasks.push_back(LoadTask::create(name, si));
958 });
960 return si;
961 }
963 static soinfo *find_loaded_library_by_name(const char* name) {
964 const char* search_name = SEARCH_NAME(name);
965 for (soinfo* si = solist; si != nullptr; si = si->next) {
966 if (!strcmp(search_name, si->name)) {
967 return si;
968 }
969 }
970 return nullptr;
971 }
973 static soinfo* find_library_internal(LoadTaskList& load_tasks, const char* name, int rtld_flags, const android_dlextinfo* extinfo) {
975 soinfo* si = find_loaded_library_by_name(name);
977 // Library might still be loaded, the accurate detection
978 // of this fact is done by load_library.
979 if (si == nullptr) {
980 TRACE("[ '%s' has not been found by name. Trying harder...]", name);
981 si = load_library(load_tasks, name, rtld_flags, extinfo);
982 }
984 return si;
985 }
987 static void soinfo_unload(soinfo* si);
989 static bool is_recursive(soinfo* si, soinfo* parent) {
990 if (parent == nullptr) {
991 return false;
992 }
994 if (si == parent) {
995 DL_ERR("recursive link to \"%s\"", si->name);
996 return true;
997 }
999 return !parent->get_parents().visit([&](soinfo* grandparent) {
1000 return !is_recursive(si, grandparent);
1001 });
1002 }
1004 // TODO: this is slightly unusual way to construct
1005 // the global group for relocation. Not every RTLD_GLOBAL
1006 // library is included in this group for backwards-compatibility
1007 // reasons.
1008 //
1009 // This group consists of the main executable, LD_PRELOADs
1010 // and libraries with the DF_1_GLOBAL flag set.
1011 static soinfo::soinfo_list_t make_global_group() {
1012 soinfo::soinfo_list_t global_group;
1013 for (soinfo* si = somain; si != nullptr; si = si->next) {
1014 if ((si->get_dt_flags_1() & DF_1_GLOBAL) != 0) {
1015 global_group.push_back(si);
1016 }
1017 }
1019 return global_group;
1020 }
1022 static bool find_libraries(soinfo* start_with, const char* const library_names[], size_t library_names_count, soinfo* soinfos[],
1023 soinfo* ld_preloads[], size_t ld_preloads_count, int rtld_flags, const android_dlextinfo* extinfo) {
1024 // Step 0: prepare.
1025 LoadTaskList load_tasks;
1026 for (size_t i = 0; i < library_names_count; ++i) {
1027 const char* name = library_names[i];
1028 load_tasks.push_back(LoadTask::create(name, start_with));
1029 }
1031 // Construct global_group.
1032 soinfo::soinfo_list_t global_group = make_global_group();
1034 // If soinfos array is null allocate one on stack.
1035 // The array is needed in case of failure; for example
1036 // when library_names[] = {libone.so, libtwo.so} and libone.so
1037 // is loaded correctly but libtwo.so failed for some reason.
1038 // In this case libone.so should be unloaded on return.
1039 // See also implementation of failure_guard below.
1041 if (soinfos == nullptr) {
1042 size_t soinfos_size = sizeof(soinfo*)*library_names_count;
1043 soinfos = reinterpret_cast<soinfo**>(alloca(soinfos_size));
1044 memset(soinfos, 0, soinfos_size);
1045 }
1047 // list of libraries to link - see step 2.
1048 size_t soinfos_count = 0;
1050 auto failure_guard = make_scope_guard([&]() {
1051 // Housekeeping
1052 load_tasks.for_each([] (LoadTask* t) {
1053 LoadTask::deleter(t);
1054 });
1056 for (size_t i = 0; i<soinfos_count; ++i) {
1057 soinfo_unload(soinfos[i]);
1058 }
1059 });
1061 // Step 1: load and pre-link all DT_NEEDED libraries in breadth first order.
1062 for (LoadTask::unique_ptr task(load_tasks.pop_front()); task.get() != nullptr; task.reset(load_tasks.pop_front())) {
1063 soinfo* si = find_library_internal(load_tasks, task->get_name(), rtld_flags, extinfo);
1064 if (si == nullptr) {
1065 return false;
1066 }
1068 soinfo* needed_by = task->get_needed_by();
1070 if (is_recursive(si, needed_by)) {
1071 return false;
1072 }
1074 si->ref_count++;
1075 if (needed_by != nullptr) {
1076 needed_by->add_child(si);
1077 }
1079 // When ld_preloads is not null, the first
1080 // ld_preloads_count libs are in fact ld_preloads.
1081 if (ld_preloads != nullptr && soinfos_count < ld_preloads_count) {
1082 // Add LD_PRELOADed libraries to the global group for future runs.
1083 // There is no need to explicitly add them to the global group
1084 // for this run because they are going to appear in the local
1085 // group in the correct order.
1086 si->set_dt_flags_1(si->get_dt_flags_1() | DF_1_GLOBAL);
1087 ld_preloads[soinfos_count] = si;
1088 }
1090 if (soinfos_count < library_names_count) {
1091 soinfos[soinfos_count++] = si;
1092 }
1093 }
1095 // Step 2: link libraries.
1096 soinfo::soinfo_list_t local_group;
1097 walk_dependencies_tree(
1098 start_with == nullptr ? soinfos : &start_with,
1099 start_with == nullptr ? soinfos_count : 1,
1100 [&] (soinfo* si) {
1101 local_group.push_back(si);
1102 return true;
1103 });
1105 bool linked = local_group.visit([&](soinfo* si) {
1106 if ((si->flags & FLAG_LINKED) == 0) {
1107 if (!si->link_image(global_group, local_group, extinfo)) {
1108 return false;
1109 }
1110 si->flags |= FLAG_LINKED;
1111 }
1113 return true;
1114 });
1116 if (linked) {
1117 failure_guard.disable();
1118 }
1120 return linked;
1121 }
1123 static soinfo* find_library(const char* name, int rtld_flags, const android_dlextinfo* extinfo) {
1124 if (name == nullptr) {
1125 somain->ref_count++;
1126 return somain;
1127 }
1129 soinfo* si;
1131 if (!find_libraries(nullptr, &name, 1, &si, nullptr, 0, rtld_flags, extinfo)) {
1132 return nullptr;
1133 }
1135 return si;
1136 }
1138 static void soinfo_unload(soinfo* si) {
1139 if (!si->can_unload()) {
1140 TRACE("not unloading '%s' - the binary is flagged with NODELETE", si->name);
1141 return;
1142 }
1144 if (si->ref_count == 1) {
1145 TRACE("unloading '%s'", si->name);
1146 si->call_destructors();
1148 if (si->has_min_version(0)) {
1149 soinfo* child = nullptr;
1150 while ((child = si->get_children().pop_front()) != nullptr) {
1151 TRACE("%s needs to unload %s", si->name, child->name);
1152 soinfo_unload(child);
1153 }
1154 } else {
1155 for_each_dt_needed(si, [&] (const char* library_name) {
1156 TRACE("deprecated (old format of soinfo): %s needs to unload %s", si->name, library_name);
1157 soinfo* needed = find_library(library_name, RTLD_NOLOAD, nullptr);
1158 if (needed != nullptr) {
1159 soinfo_unload(needed);
1160 } else {
1161 // Not found: for example if symlink was deleted between dlopen and dlclose
1162 // Since we cannot really handle errors at this point - print and continue.
1163 PRINT("warning: couldn't find %s needed by %s on unload.", library_name, si->name);
1164 }
1165 });
1166 }
1168 notify_gdb_of_unload(si);
1169 si->ref_count = 0;
1170 soinfo_free(si);
1171 } else {
1172 si->ref_count--;
1173 TRACE("not unloading '%s', decrementing ref_count to %zd", si->name, si->ref_count);
1174 }
1175 }
1177 void do_android_get_LD_LIBRARY_PATH(char* buffer, size_t buffer_size) {
1178 // Use basic string manipulation calls to avoid snprintf.
1179 // snprintf indirectly calls pthread_getspecific to get the size of a buffer.
1180 // When debug malloc is enabled, this call returns 0. This in turn causes
1181 // snprintf to do nothing, which causes libraries to fail to load.
1182 // See b/17302493 for further details.
1183 // Once the above bug is fixed, this code can be modified to use
1184 // snprintf again.
1185 size_t required_len = strlen(kDefaultLdPaths[0]) + strlen(kDefaultLdPaths[1]) + 2;
1186 if (buffer_size < required_len) {
1187 __libc_fatal("android_get_LD_LIBRARY_PATH failed, buffer too small: buffer len %zu, required len %zu",
1188 buffer_size, required_len);
1189 }
1190 char* end = stpcpy(buffer, kDefaultLdPaths[0]);
1191 *end = ':';
1192 strcpy(end + 1, kDefaultLdPaths[1]);
1193 }
1195 void do_android_update_LD_LIBRARY_PATH(const char* ld_library_path) {
1196 if (!get_AT_SECURE()) {
1197 parse_LD_LIBRARY_PATH(ld_library_path);
1198 }
1199 }
1201 soinfo* do_dlopen(const char* name, int flags, const android_dlextinfo* extinfo) {
1202 if ((flags & ~(RTLD_NOW|RTLD_LAZY|RTLD_LOCAL|RTLD_GLOBAL|RTLD_NODELETE|RTLD_NOLOAD)) != 0) {
1203 DL_ERR("invalid flags to dlopen: %x", flags);
1204 return nullptr;
1205 }
1206 if (extinfo != nullptr) {
1207 if ((extinfo->flags & ~(ANDROID_DLEXT_VALID_FLAG_BITS)) != 0) {
1208 DL_ERR("invalid extended flags to android_dlopen_ext: 0x%" PRIx64, extinfo->flags);
1209 return nullptr;
1210 }
1211 if ((extinfo->flags & ANDROID_DLEXT_USE_LIBRARY_FD) == 0 &&
1212 (extinfo->flags & ANDROID_DLEXT_USE_LIBRARY_FD_OFFSET) != 0) {
1213 DL_ERR("invalid extended flag combination (ANDROID_DLEXT_USE_LIBRARY_FD_OFFSET without ANDROID_DLEXT_USE_LIBRARY_FD): 0x%" PRIx64, extinfo->flags);
1214 return nullptr;
1215 }
1216 }
1217 protect_data(PROT_READ | PROT_WRITE);
1218 soinfo* si = find_library(name, flags, extinfo);
1219 if (si != nullptr) {
1220 si->call_constructors();
1221 }
1222 protect_data(PROT_READ);
1223 return si;
1224 }
1226 void do_dlclose(soinfo* si) {
1227 protect_data(PROT_READ | PROT_WRITE);
1228 soinfo_unload(si);
1229 protect_data(PROT_READ);
1230 }
1232 static ElfW(Addr) call_ifunc_resolver(ElfW(Addr) resolver_addr) {
1233 typedef ElfW(Addr) (*ifunc_resolver_t)(void);
1234 ifunc_resolver_t ifunc_resolver = reinterpret_cast<ifunc_resolver_t>(resolver_addr);
1235 ElfW(Addr) ifunc_addr = ifunc_resolver();
1236 TRACE_TYPE(RELO, "Called ifunc_resolver@%p. The result is %p", ifunc_resolver, reinterpret_cast<void*>(ifunc_addr));
1238 return ifunc_addr;
1239 }
1241 #if defined(USE_RELA)
1242 int soinfo::relocate(ElfW(Rela)* rela, unsigned count, const soinfo_list_t& global_group, const soinfo_list_t& local_group) {
1243 for (size_t idx = 0; idx < count; ++idx, ++rela) {
1244 unsigned type = ELFW(R_TYPE)(rela->r_info);
1245 unsigned sym = ELFW(R_SYM)(rela->r_info);
1246 ElfW(Addr) reloc = static_cast<ElfW(Addr)>(rela->r_offset + load_bias);
1247 ElfW(Addr) sym_addr = 0;
1248 const char* sym_name = nullptr;
1250 DEBUG("Processing '%s' relocation at index %zd", name, idx);
1251 if (type == 0) { // R_*_NONE
1252 continue;
1253 }
1255 ElfW(Sym)* s = nullptr;
1256 soinfo* lsi = nullptr;
1258 if (sym != 0) {
1259 sym_name = get_string(symtab_[sym].st_name);
1260 s = soinfo_do_lookup(this, sym_name, &lsi, global_group,local_group);
1261 if (s == nullptr) {
1262 // We only allow an undefined symbol if this is a weak reference...
1263 s = &symtab_[sym];
1264 if (ELF_ST_BIND(s->st_info) != STB_WEAK) {
1265 DL_ERR("cannot locate symbol \"%s\" referenced by \"%s\"...", sym_name, name);
1266 return -1;
1267 }
1269 /* IHI0044C AAELF 4.5.1.1:
1271 Libraries are not searched to resolve weak references.
1272 It is not an error for a weak reference to remain unsatisfied.
1274 During linking, the value of an undefined weak reference is:
1275 - Zero if the relocation type is absolute
1276 - The address of the place if the relocation is pc-relative
1277 - The address of nominal base address if the relocation
1278 type is base-relative.
1279 */
1281 switch (type) {
1282 #if defined(__aarch64__)
1283 case R_AARCH64_JUMP_SLOT:
1284 case R_AARCH64_GLOB_DAT:
1285 case R_AARCH64_ABS64:
1286 case R_AARCH64_ABS32:
1287 case R_AARCH64_ABS16:
1288 case R_AARCH64_RELATIVE:
1289 case R_AARCH64_IRELATIVE:
1290 /*
1291 * The sym_addr was initialized to be zero above, or the relocation
1292 * code below does not care about value of sym_addr.
1293 * No need to do anything.
1294 */
1295 break;
1296 #elif defined(__x86_64__)
1297 case R_X86_64_JUMP_SLOT:
1298 case R_X86_64_GLOB_DAT:
1299 case R_X86_64_32:
1300 case R_X86_64_64:
1301 case R_X86_64_RELATIVE:
1302 case R_X86_64_IRELATIVE:
1303 // No need to do anything.
1304 break;
1305 case R_X86_64_PC32:
1306 sym_addr = reloc;
1307 break;
1308 #endif
1309 default:
1310 DL_ERR("unknown weak reloc type %d @ %p (%zu)", type, rela, idx);
1311 return -1;
1312 }
1313 } else {
1314 // We got a definition.
1315 sym_addr = lsi->resolve_symbol_address(s);
1316 }
1317 count_relocation(kRelocSymbol);
1318 }
1320 switch (type) {
1321 #if defined(__aarch64__)
1322 case R_AARCH64_JUMP_SLOT:
1323 count_relocation(kRelocAbsolute);
1324 MARK(rela->r_offset);
1325 TRACE_TYPE(RELO, "RELO JMP_SLOT %16llx <- %16llx %s\n",
1326 reloc, (sym_addr + rela->r_addend), sym_name);
1327 *reinterpret_cast<ElfW(Addr)*>(reloc) = (sym_addr + rela->r_addend);
1328 break;
1329 case R_AARCH64_GLOB_DAT:
1330 count_relocation(kRelocAbsolute);
1331 MARK(rela->r_offset);
1332 TRACE_TYPE(RELO, "RELO GLOB_DAT %16llx <- %16llx %s\n",
1333 reloc, (sym_addr + rela->r_addend), sym_name);
1334 *reinterpret_cast<ElfW(Addr)*>(reloc) = (sym_addr + rela->r_addend);
1335 break;
1336 case R_AARCH64_ABS64:
1337 count_relocation(kRelocAbsolute);
1338 MARK(rela->r_offset);
1339 TRACE_TYPE(RELO, "RELO ABS64 %16llx <- %16llx %s\n",
1340 reloc, (sym_addr + rela->r_addend), sym_name);
1341 *reinterpret_cast<ElfW(Addr)*>(reloc) += (sym_addr + rela->r_addend);
1342 break;
1343 case R_AARCH64_ABS32:
1344 count_relocation(kRelocAbsolute);
1345 MARK(rela->r_offset);
1346 TRACE_TYPE(RELO, "RELO ABS32 %16llx <- %16llx %s\n",
1347 reloc, (sym_addr + rela->r_addend), sym_name);
1348 if ((static_cast<ElfW(Addr)>(INT32_MIN) <= (*reinterpret_cast<ElfW(Addr)*>(reloc) + (sym_addr + rela->r_addend))) &&
1349 ((*reinterpret_cast<ElfW(Addr)*>(reloc) + (sym_addr + rela->r_addend)) <= static_cast<ElfW(Addr)>(UINT32_MAX))) {
1350 *reinterpret_cast<ElfW(Addr)*>(reloc) += (sym_addr + rela->r_addend);
1351 } else {
1352 DL_ERR("0x%016llx out of range 0x%016llx to 0x%016llx",
1353 (*reinterpret_cast<ElfW(Addr)*>(reloc) + (sym_addr + rela->r_addend)),
1354 static_cast<ElfW(Addr)>(INT32_MIN),
1355 static_cast<ElfW(Addr)>(UINT32_MAX));
1356 return -1;
1357 }
1358 break;
1359 case R_AARCH64_ABS16:
1360 count_relocation(kRelocAbsolute);
1361 MARK(rela->r_offset);
1362 TRACE_TYPE(RELO, "RELO ABS16 %16llx <- %16llx %s\n",
1363 reloc, (sym_addr + rela->r_addend), sym_name);
1364 if ((static_cast<ElfW(Addr)>(INT16_MIN) <= (*reinterpret_cast<ElfW(Addr)*>(reloc) + (sym_addr + rela->r_addend))) &&
1365 ((*reinterpret_cast<ElfW(Addr)*>(reloc) + (sym_addr + rela->r_addend)) <= static_cast<ElfW(Addr)>(UINT16_MAX))) {
1366 *reinterpret_cast<ElfW(Addr)*>(reloc) += (sym_addr + rela->r_addend);
1367 } else {
1368 DL_ERR("0x%016llx out of range 0x%016llx to 0x%016llx",
1369 (*reinterpret_cast<ElfW(Addr)*>(reloc) + (sym_addr + rela->r_addend)),
1370 static_cast<ElfW(Addr)>(INT16_MIN),
1371 static_cast<ElfW(Addr)>(UINT16_MAX));
1372 return -1;
1373 }
1374 break;
1375 case R_AARCH64_PREL64:
1376 count_relocation(kRelocRelative);
1377 MARK(rela->r_offset);
1378 TRACE_TYPE(RELO, "RELO REL64 %16llx <- %16llx - %16llx %s\n",
1379 reloc, (sym_addr + rela->r_addend), rela->r_offset, sym_name);
1380 *reinterpret_cast<ElfW(Addr)*>(reloc) += (sym_addr + rela->r_addend) - rela->r_offset;
1381 break;
1382 case R_AARCH64_PREL32:
1383 count_relocation(kRelocRelative);
1384 MARK(rela->r_offset);
1385 TRACE_TYPE(RELO, "RELO REL32 %16llx <- %16llx - %16llx %s\n",
1386 reloc, (sym_addr + rela->r_addend), rela->r_offset, sym_name);
1387 if ((static_cast<ElfW(Addr)>(INT32_MIN) <= (*reinterpret_cast<ElfW(Addr)*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset))) &&
1388 ((*reinterpret_cast<ElfW(Addr)*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset)) <= static_cast<ElfW(Addr)>(UINT32_MAX))) {
1389 *reinterpret_cast<ElfW(Addr)*>(reloc) += ((sym_addr + rela->r_addend) - rela->r_offset);
1390 } else {
1391 DL_ERR("0x%016llx out of range 0x%016llx to 0x%016llx",
1392 (*reinterpret_cast<ElfW(Addr)*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset)),
1393 static_cast<ElfW(Addr)>(INT32_MIN),
1394 static_cast<ElfW(Addr)>(UINT32_MAX));
1395 return -1;
1396 }
1397 break;
1398 case R_AARCH64_PREL16:
1399 count_relocation(kRelocRelative);
1400 MARK(rela->r_offset);
1401 TRACE_TYPE(RELO, "RELO REL16 %16llx <- %16llx - %16llx %s\n",
1402 reloc, (sym_addr + rela->r_addend), rela->r_offset, sym_name);
1403 if ((static_cast<ElfW(Addr)>(INT16_MIN) <= (*reinterpret_cast<ElfW(Addr)*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset))) &&
1404 ((*reinterpret_cast<ElfW(Addr)*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset)) <= static_cast<ElfW(Addr)>(UINT16_MAX))) {
1405 *reinterpret_cast<ElfW(Addr)*>(reloc) += ((sym_addr + rela->r_addend) - rela->r_offset);
1406 } else {
1407 DL_ERR("0x%016llx out of range 0x%016llx to 0x%016llx",
1408 (*reinterpret_cast<ElfW(Addr)*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset)),
1409 static_cast<ElfW(Addr)>(INT16_MIN),
1410 static_cast<ElfW(Addr)>(UINT16_MAX));
1411 return -1;
1412 }
1413 break;
1415 case R_AARCH64_RELATIVE:
1416 count_relocation(kRelocRelative);
1417 MARK(rela->r_offset);
1418 if (sym) {
1419 DL_ERR("odd RELATIVE form...");
1420 return -1;
1421 }
1422 TRACE_TYPE(RELO, "RELO RELATIVE %16llx <- %16llx\n",
1423 reloc, (base + rela->r_addend));
1424 *reinterpret_cast<ElfW(Addr)*>(reloc) = (base + rela->r_addend);
1425 break;
1427 case R_AARCH64_IRELATIVE:
1428 count_relocation(kRelocRelative);
1429 MARK(rela->r_offset);
1430 TRACE_TYPE(RELO, "RELO IRELATIVE %16llx <- %16llx\n", reloc, (base + rela->r_addend));
1431 *reinterpret_cast<ElfW(Addr)*>(reloc) = call_ifunc_resolver(base + rela->r_addend);
1432 break;
1434 case R_AARCH64_COPY:
1435 /*
1436 * ET_EXEC is not supported so this should not happen.
1437 *
1438 * http://infocenter.arm.com/help/topic/com.arm.doc.ihi0044d/IHI0044D_aaelf.pdf
1439 *
1440 * Section 4.7.1.10 "Dynamic relocations"
1441 * R_AARCH64_COPY may only appear in executable objects where e_type is
1442 * set to ET_EXEC.
1443 */
1444 DL_ERR("%s R_AARCH64_COPY relocations are not supported", name);
1445 return -1;
1446 case R_AARCH64_TLS_TPREL64:
1447 TRACE_TYPE(RELO, "RELO TLS_TPREL64 *** %16llx <- %16llx - %16llx\n",
1448 reloc, (sym_addr + rela->r_addend), rela->r_offset);
1449 break;
1450 case R_AARCH64_TLS_DTPREL32:
1451 TRACE_TYPE(RELO, "RELO TLS_DTPREL32 *** %16llx <- %16llx - %16llx\n",
1452 reloc, (sym_addr + rela->r_addend), rela->r_offset);
1453 break;
1454 #elif defined(__x86_64__)
1455 case R_X86_64_JUMP_SLOT:
1456 count_relocation(kRelocAbsolute);
1457 MARK(rela->r_offset);
1458 TRACE_TYPE(RELO, "RELO JMP_SLOT %08zx <- %08zx %s", static_cast<size_t>(reloc),
1459 static_cast<size_t>(sym_addr + rela->r_addend), sym_name);
1460 *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr + rela->r_addend;
1461 break;
1462 case R_X86_64_GLOB_DAT:
1463 count_relocation(kRelocAbsolute);
1464 MARK(rela->r_offset);
1465 TRACE_TYPE(RELO, "RELO GLOB_DAT %08zx <- %08zx %s", static_cast<size_t>(reloc),
1466 static_cast<size_t>(sym_addr + rela->r_addend), sym_name);
1467 *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr + rela->r_addend;
1468 break;
1469 case R_X86_64_RELATIVE:
1470 count_relocation(kRelocRelative);
1471 MARK(rela->r_offset);
1472 if (sym) {
1473 DL_ERR("odd RELATIVE form...");
1474 return -1;
1475 }
1476 TRACE_TYPE(RELO, "RELO RELATIVE %08zx <- +%08zx", static_cast<size_t>(reloc),
1477 static_cast<size_t>(base));
1478 *reinterpret_cast<ElfW(Addr)*>(reloc) = base + rela->r_addend;
1479 break;
1480 case R_X86_64_IRELATIVE:
1481 count_relocation(kRelocRelative);
1482 MARK(rela->r_offset);
1483 TRACE_TYPE(RELO, "RELO IRELATIVE %16llx <- %16llx\n", reloc, (base + rela->r_addend));
1484 *reinterpret_cast<ElfW(Addr)*>(reloc) = call_ifunc_resolver(base + rela->r_addend);
1485 break;
1486 case R_X86_64_32:
1487 count_relocation(kRelocRelative);
1488 MARK(rela->r_offset);
1489 TRACE_TYPE(RELO, "RELO R_X86_64_32 %08zx <- +%08zx %s", static_cast<size_t>(reloc),
1490 static_cast<size_t>(sym_addr), sym_name);
1491 *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr + rela->r_addend;
1492 break;
1493 case R_X86_64_64:
1494 count_relocation(kRelocRelative);
1495 MARK(rela->r_offset);
1496 TRACE_TYPE(RELO, "RELO R_X86_64_64 %08zx <- +%08zx %s", static_cast<size_t>(reloc),
1497 static_cast<size_t>(sym_addr), sym_name);
1498 *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr + rela->r_addend;
1499 break;
1500 case R_X86_64_PC32:
1501 count_relocation(kRelocRelative);
1502 MARK(rela->r_offset);
1503 TRACE_TYPE(RELO, "RELO R_X86_64_PC32 %08zx <- +%08zx (%08zx - %08zx) %s",
1504 static_cast<size_t>(reloc), static_cast<size_t>(sym_addr - reloc),
1505 static_cast<size_t>(sym_addr), static_cast<size_t>(reloc), sym_name);
1506 *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr + rela->r_addend - reloc;
1507 break;
1508 #endif
1510 default:
1511 DL_ERR("unknown reloc type %d @ %p (%zu)", type, rela, idx);
1512 return -1;
1513 }
1514 }
1515 return 0;
1516 }
1518 #else // REL, not RELA.
1519 int soinfo::relocate(ElfW(Rel)* rel, unsigned count, const soinfo_list_t& global_group, const soinfo_list_t& local_group) {
1520 for (size_t idx = 0; idx < count; ++idx, ++rel) {
1521 unsigned type = ELFW(R_TYPE)(rel->r_info);
1522 // TODO: don't use unsigned for 'sym'. Use uint32_t or ElfW(Addr) instead.
1523 unsigned sym = ELFW(R_SYM)(rel->r_info);
1524 ElfW(Addr) reloc = static_cast<ElfW(Addr)>(rel->r_offset + load_bias);
1525 ElfW(Addr) sym_addr = 0;
1526 const char* sym_name = nullptr;
1528 DEBUG("Processing '%s' relocation at index %zd", name, idx);
1529 if (type == 0) { // R_*_NONE
1530 continue;
1531 }
1533 ElfW(Sym)* s = nullptr;
1534 soinfo* lsi = nullptr;
1536 if (sym != 0) {
1537 sym_name = get_string(symtab_[sym].st_name);
1538 s = soinfo_do_lookup(this, sym_name, &lsi, global_group, local_group);
1539 if (s == nullptr) {
1540 // We only allow an undefined symbol if this is a weak reference...
1541 s = &symtab_[sym];
1542 if (ELF_ST_BIND(s->st_info) != STB_WEAK) {
1543 DL_ERR("cannot locate symbol \"%s\" referenced by \"%s\"...", sym_name, name);
1544 return -1;
1545 }
1547 /* IHI0044C AAELF 4.5.1.1:
1549 Libraries are not searched to resolve weak references.
1550 It is not an error for a weak reference to remain
1551 unsatisfied.
1553 During linking, the value of an undefined weak reference is:
1554 - Zero if the relocation type is absolute
1555 - The address of the place if the relocation is pc-relative
1556 - The address of nominal base address if the relocation
1557 type is base-relative.
1558 */
1560 switch (type) {
1561 #if defined(__arm__)
1562 case R_ARM_JUMP_SLOT:
1563 case R_ARM_GLOB_DAT:
1564 case R_ARM_ABS32:
1565 case R_ARM_RELATIVE: /* Don't care. */
1566 // sym_addr was initialized to be zero above or relocation
1567 // code below does not care about value of sym_addr.
1568 // No need to do anything.
1569 break;
1570 #elif defined(__i386__)
1571 case R_386_JMP_SLOT:
1572 case R_386_GLOB_DAT:
1573 case R_386_32:
1574 case R_386_RELATIVE: /* Don't care. */
1575 case R_386_IRELATIVE:
1576 // sym_addr was initialized to be zero above or relocation
1577 // code below does not care about value of sym_addr.
1578 // No need to do anything.
1579 break;
1580 case R_386_PC32:
1581 sym_addr = reloc;
1582 break;
1583 #endif
1585 #if defined(__arm__)
1586 case R_ARM_COPY:
1587 // Fall through. Can't really copy if weak symbol is not found at run-time.
1588 #endif
1589 default:
1590 DL_ERR("unknown weak reloc type %d @ %p (%zu)", type, rel, idx);
1591 return -1;
1592 }
1593 } else {
1594 // We got a definition.
1595 sym_addr = lsi->resolve_symbol_address(s);
1596 }
1597 count_relocation(kRelocSymbol);
1598 }
1600 switch (type) {
1601 #if defined(__arm__)
1602 case R_ARM_JUMP_SLOT:
1603 count_relocation(kRelocAbsolute);
1604 MARK(rel->r_offset);
1605 TRACE_TYPE(RELO, "RELO JMP_SLOT %08x <- %08x %s", reloc, sym_addr, sym_name);
1606 *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr;
1607 break;
1608 case R_ARM_GLOB_DAT:
1609 count_relocation(kRelocAbsolute);
1610 MARK(rel->r_offset);
1611 TRACE_TYPE(RELO, "RELO GLOB_DAT %08x <- %08x %s", reloc, sym_addr, sym_name);
1612 *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr;
1613 break;
1614 case R_ARM_ABS32:
1615 count_relocation(kRelocAbsolute);
1616 MARK(rel->r_offset);
1617 TRACE_TYPE(RELO, "RELO ABS %08x <- %08x %s", reloc, sym_addr, sym_name);
1618 *reinterpret_cast<ElfW(Addr)*>(reloc) += sym_addr;
1619 break;
1620 case R_ARM_REL32:
1621 count_relocation(kRelocRelative);
1622 MARK(rel->r_offset);
1623 TRACE_TYPE(RELO, "RELO REL32 %08x <- %08x - %08x %s",
1624 reloc, sym_addr, rel->r_offset, sym_name);
1625 *reinterpret_cast<ElfW(Addr)*>(reloc) += sym_addr - rel->r_offset;
1626 break;
1627 case R_ARM_COPY:
1628 /*
1629 * ET_EXEC is not supported so this should not happen.
1630 *
1631 * http://infocenter.arm.com/help/topic/com.arm.doc.ihi0044d/IHI0044D_aaelf.pdf
1632 *
1633 * Section 4.7.1.10 "Dynamic relocations"
1634 * R_ARM_COPY may only appear in executable objects where e_type is
1635 * set to ET_EXEC.
1636 */
1637 DL_ERR("%s R_ARM_COPY relocations are not supported", name);
1638 return -1;
1639 #elif defined(__i386__)
1640 case R_386_JMP_SLOT:
1641 count_relocation(kRelocAbsolute);
1642 MARK(rel->r_offset);
1643 TRACE_TYPE(RELO, "RELO JMP_SLOT %08x <- %08x %s", reloc, sym_addr, sym_name);
1644 *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr;
1645 break;
1646 case R_386_GLOB_DAT:
1647 count_relocation(kRelocAbsolute);
1648 MARK(rel->r_offset);
1649 TRACE_TYPE(RELO, "RELO GLOB_DAT %08x <- %08x %s", reloc, sym_addr, sym_name);
1650 *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr;
1651 break;
1652 case R_386_32:
1653 count_relocation(kRelocRelative);
1654 MARK(rel->r_offset);
1655 TRACE_TYPE(RELO, "RELO R_386_32 %08x <- +%08x %s", reloc, sym_addr, sym_name);
1656 *reinterpret_cast<ElfW(Addr)*>(reloc) += sym_addr;
1657 break;
1658 case R_386_PC32:
1659 count_relocation(kRelocRelative);
1660 MARK(rel->r_offset);
1661 TRACE_TYPE(RELO, "RELO R_386_PC32 %08x <- +%08x (%08x - %08x) %s",
1662 reloc, (sym_addr - reloc), sym_addr, reloc, sym_name);
1663 *reinterpret_cast<ElfW(Addr)*>(reloc) += (sym_addr - reloc);
1664 break;
1665 #elif defined(__mips__)
1666 case R_MIPS_REL32:
1667 #if defined(__LP64__)
1668 // MIPS Elf64_Rel entries contain compound relocations
1669 // We only handle the R_MIPS_NONE|R_MIPS_64|R_MIPS_REL32 case
1670 if (ELF64_R_TYPE2(rel->r_info) != R_MIPS_64 ||
1671 ELF64_R_TYPE3(rel->r_info) != R_MIPS_NONE) {
1672 DL_ERR("Unexpected compound relocation type:%d type2:%d type3:%d @ %p (%zu)",
1673 type, (unsigned)ELF64_R_TYPE2(rel->r_info),
1674 (unsigned)ELF64_R_TYPE3(rel->r_info), rel, idx);
1675 return -1;
1676 }
1677 #endif
1678 count_relocation(kRelocAbsolute);
1679 MARK(rel->r_offset);
1680 TRACE_TYPE(RELO, "RELO REL32 %08zx <- %08zx %s", static_cast<size_t>(reloc),
1681 static_cast<size_t>(sym_addr), sym_name ? sym_name : "*SECTIONHDR*");
1682 if (s) {
1683 *reinterpret_cast<ElfW(Addr)*>(reloc) += sym_addr;
1684 } else {
1685 *reinterpret_cast<ElfW(Addr)*>(reloc) += base;
1686 }
1687 break;
1688 #endif
1690 #if defined(__arm__)
1691 case R_ARM_RELATIVE:
1692 #elif defined(__i386__)
1693 case R_386_RELATIVE:
1694 #endif
1695 count_relocation(kRelocRelative);
1696 MARK(rel->r_offset);
1697 if (sym) {
1698 DL_ERR("odd RELATIVE form...");
1699 return -1;
1700 }
1701 TRACE_TYPE(RELO, "RELO RELATIVE %p <- +%p",
1702 reinterpret_cast<void*>(reloc), reinterpret_cast<void*>(base));
1703 *reinterpret_cast<ElfW(Addr)*>(reloc) += base;
1704 break;
1705 #if defined(__i386__)
1706 case R_386_IRELATIVE:
1707 count_relocation(kRelocRelative);
1708 MARK(rel->r_offset);
1709 TRACE_TYPE(RELO, "RELO IRELATIVE %p <- %p", reinterpret_cast<void*>(reloc), reinterpret_cast<void*>(base));
1710 *reinterpret_cast<ElfW(Addr)*>(reloc) = call_ifunc_resolver(base + *reinterpret_cast<ElfW(Addr)*>(reloc));
1711 break;
1712 #endif
1714 default:
1715 DL_ERR("unknown reloc type %d @ %p (%zu)", type, rel, idx);
1716 return -1;
1717 }
1718 }
1719 return 0;
1720 }
1721 #endif
1723 #if defined(__mips__)
1724 bool soinfo::mips_relocate_got(const soinfo_list_t& global_group, const soinfo_list_t& local_group) {
1725 ElfW(Addr)** got = plt_got_;
1726 if (got == nullptr) {
1727 return true;
1728 }
1730 // got[0] is the address of the lazy resolver function.
1731 // got[1] may be used for a GNU extension.
1732 // Set it to a recognizable address in case someone calls it (should be _rtld_bind_start).
1733 // FIXME: maybe this should be in a separate routine?
1734 if ((flags & FLAG_LINKER) == 0) {
1735 size_t g = 0;
1736 got[g++] = reinterpret_cast<ElfW(Addr)*>(0xdeadbeef);
1737 if (reinterpret_cast<intptr_t>(got[g]) < 0) {
1738 got[g++] = reinterpret_cast<ElfW(Addr)*>(0xdeadfeed);
1739 }
1740 // Relocate the local GOT entries.
1741 for (; g < mips_local_gotno_; g++) {
1742 got[g] = reinterpret_cast<ElfW(Addr)*>(reinterpret_cast<uintptr_t>(got[g]) + load_bias);
1743 }
1744 }
1746 // Now for the global GOT entries...
1747 ElfW(Sym)* sym = symtab_ + mips_gotsym_;
1748 got = plt_got_ + mips_local_gotno_;
1749 for (size_t g = mips_gotsym_; g < mips_symtabno_; g++, sym++, got++) {
1750 // This is an undefined reference... try to locate it.
1751 const char* sym_name = get_string(sym->st_name);
1752 soinfo* lsi = nullptr;
1753 ElfW(Sym)* s = soinfo_do_lookup(this, sym_name, &lsi, global_group, local_group);
1754 if (s == nullptr) {
1755 // We only allow an undefined symbol if this is a weak reference.
1756 s = &symtab_[g];
1757 if (ELF_ST_BIND(s->st_info) != STB_WEAK) {
1758 DL_ERR("cannot locate \"%s\"...", sym_name);
1759 return false;
1760 }
1761 *got = 0;
1762 } else {
1763 // FIXME: is this sufficient?
1764 // For reference see NetBSD link loader
1765 // http://cvsweb.netbsd.org/bsdweb.cgi/src/libexec/ld.elf_so/arch/mips/mips_reloc.c?rev=1.53&content-type=text/x-cvsweb-markup
1766 *got = reinterpret_cast<ElfW(Addr)*>(lsi->resolve_symbol_address(s));
1767 }
1768 }
1769 return true;
1770 }
1771 #endif
1773 void soinfo::call_array(const char* array_name __unused, linker_function_t* functions, size_t count, bool reverse) {
1774 if (functions == nullptr) {
1775 return;
1776 }
1778 TRACE("[ Calling %s (size %zd) @ %p for '%s' ]", array_name, count, functions, name);
1780 int begin = reverse ? (count - 1) : 0;
1781 int end = reverse ? -1 : count;
1782 int step = reverse ? -1 : 1;
1784 for (int i = begin; i != end; i += step) {
1785 TRACE("[ %s[%d] == %p ]", array_name, i, functions[i]);
1786 call_function("function", functions[i]);
1787 }
1789 TRACE("[ Done calling %s for '%s' ]", array_name, name);
1790 }
1792 void soinfo::call_function(const char* function_name __unused, linker_function_t function) {
1793 if (function == nullptr || reinterpret_cast<uintptr_t>(function) == static_cast<uintptr_t>(-1)) {
1794 return;
1795 }
1797 TRACE("[ Calling %s @ %p for '%s' ]", function_name, function, name);
1798 function();
1799 TRACE("[ Done calling %s @ %p for '%s' ]", function_name, function, name);
1801 // The function may have called dlopen(3) or dlclose(3), so we need to ensure our data structures
1802 // are still writable. This happens with our debug malloc (see http://b/7941716).
1803 protect_data(PROT_READ | PROT_WRITE);
1804 }
1806 void soinfo::call_pre_init_constructors() {
1807 // DT_PREINIT_ARRAY functions are called before any other constructors for executables,
1808 // but ignored in a shared library.
1809 call_array("DT_PREINIT_ARRAY", preinit_array_, preinit_array_count_, false);
1810 }
1812 void soinfo::call_constructors() {
1813 if (constructors_called) {
1814 return;
1815 }
1817 // We set constructors_called before actually calling the constructors, otherwise it doesn't
1818 // protect against recursive constructor calls. One simple example of constructor recursion
1819 // is the libc debug malloc, which is implemented in libc_malloc_debug_leak.so:
1820 // 1. The program depends on libc, so libc's constructor is called here.
1821 // 2. The libc constructor calls dlopen() to load libc_malloc_debug_leak.so.
1822 // 3. dlopen() calls the constructors on the newly created
1823 // soinfo for libc_malloc_debug_leak.so.
1824 // 4. The debug .so depends on libc, so CallConstructors is
1825 // called again with the libc soinfo. If it doesn't trigger the early-
1826 // out above, the libc constructor will be called again (recursively!).
1827 constructors_called = true;
1829 if ((flags & FLAG_EXE) == 0 && preinit_array_ != nullptr) {
1830 // The GNU dynamic linker silently ignores these, but we warn the developer.
1831 PRINT("\"%s\": ignoring %zd-entry DT_PREINIT_ARRAY in shared library!",
1832 name, preinit_array_count_);
1833 }
1835 get_children().for_each([] (soinfo* si) {
1836 si->call_constructors();
1837 });
1839 TRACE("\"%s\": calling constructors", name);
1841 // DT_INIT should be called before DT_INIT_ARRAY if both are present.
1842 call_function("DT_INIT", init_func_);
1843 call_array("DT_INIT_ARRAY", init_array_, init_array_count_, false);
1844 }
1846 void soinfo::call_destructors() {
1847 if (!constructors_called) {
1848 return;
1849 }
1850 TRACE("\"%s\": calling destructors", name);
1852 // DT_FINI_ARRAY must be parsed in reverse order.
1853 call_array("DT_FINI_ARRAY", fini_array_, fini_array_count_, true);
1855 // DT_FINI should be called after DT_FINI_ARRAY if both are present.
1856 call_function("DT_FINI", fini_func_);
1858 // This is needed on second call to dlopen
1859 // after library has been unloaded with RTLD_NODELETE
1860 constructors_called = false;
1861 }
1863 void soinfo::add_child(soinfo* child) {
1864 if (has_min_version(0)) {
1865 child->parents_.push_back(this);
1866 this->children_.push_back(child);
1867 }
1868 }
1870 void soinfo::remove_all_links() {
1871 if (!has_min_version(0)) {
1872 return;
1873 }
1875 // 1. Untie connected soinfos from 'this'.
1876 children_.for_each([&] (soinfo* child) {
1877 child->parents_.remove_if([&] (const soinfo* parent) {
1878 return parent == this;
1879 });
1880 });
1882 parents_.for_each([&] (soinfo* parent) {
1883 parent->children_.remove_if([&] (const soinfo* child) {
1884 return child == this;
1885 });
1886 });
1888 // 2. Once everything untied - clear local lists.
1889 parents_.clear();
1890 children_.clear();
1891 }
1893 dev_t soinfo::get_st_dev() const {
1894 if (has_min_version(0)) {
1895 return st_dev_;
1896 }
1898 return 0;
1899 };
1901 ino_t soinfo::get_st_ino() const {
1902 if (has_min_version(0)) {
1903 return st_ino_;
1904 }
1906 return 0;
1907 }
1909 off64_t soinfo::get_file_offset() const {
1910 if (has_min_version(1)) {
1911 return file_offset_;
1912 }
1914 return 0;
1915 }
1917 uint32_t soinfo::get_rtld_flags() const {
1918 if (has_min_version(1)) {
1919 return rtld_flags_;
1920 }
1922 return 0;
1923 }
1925 uint32_t soinfo::get_dt_flags_1() const {
1926 if (has_min_version(1)) {
1927 return dt_flags_1_;
1928 }
1930 return 0;
1931 }
1932 void soinfo::set_dt_flags_1(uint32_t dt_flags_1) {
1933 if (has_min_version(1)) {
1934 if ((dt_flags_1 & DF_1_GLOBAL) != 0) {
1935 rtld_flags_ |= RTLD_GLOBAL;
1936 }
1938 if ((dt_flags_1 & DF_1_NODELETE) != 0) {
1939 rtld_flags_ |= RTLD_NODELETE;
1940 }
1942 dt_flags_1_ = dt_flags_1;
1943 }
1944 }
1946 // This is a return on get_children()/get_parents() if
1947 // 'this->flags' does not have FLAG_NEW_SOINFO set.
1948 static soinfo::soinfo_list_t g_empty_list;
1950 soinfo::soinfo_list_t& soinfo::get_children() {
1951 if (has_min_version(0)) {
1952 return children_;
1953 }
1955 return g_empty_list;
1956 }
1958 soinfo::soinfo_list_t& soinfo::get_parents() {
1959 if (has_min_version(0)) {
1960 return parents_;
1961 }
1963 return g_empty_list;
1964 }
1966 ElfW(Addr) soinfo::resolve_symbol_address(ElfW(Sym)* s) {
1967 if (ELF_ST_TYPE(s->st_info) == STT_GNU_IFUNC) {
1968 return call_ifunc_resolver(s->st_value + load_bias);
1969 }
1971 return static_cast<ElfW(Addr)>(s->st_value + load_bias);
1972 }
1974 const char* soinfo::get_string(ElfW(Word) index) const {
1975 if (has_min_version(1) && (index >= strtab_size_)) {
1976 __libc_fatal("%s: strtab out of bounds error; STRSZ=%zd, name=%d", name, strtab_size_, index);
1977 }
1979 return strtab_ + index;
1980 }
1982 bool soinfo::is_gnu_hash() const {
1983 return (flags & FLAG_GNU_HASH) != 0;
1984 }
1986 bool soinfo::can_unload() const {
1987 return (get_rtld_flags() & (RTLD_NODELETE | RTLD_GLOBAL)) == 0;
1988 }
1990 /* Force any of the closed stdin, stdout and stderr to be associated with
1991 /dev/null. */
1992 static int nullify_closed_stdio() {
1993 int dev_null, i, status;
1994 int return_value = 0;
1996 dev_null = TEMP_FAILURE_RETRY(open("/dev/null", O_RDWR));
1997 if (dev_null < 0) {
1998 DL_ERR("cannot open /dev/null: %s", strerror(errno));
1999 return -1;
2000 }
2001 TRACE("[ Opened /dev/null file-descriptor=%d]", dev_null);
2003 /* If any of the stdio file descriptors is valid and not associated
2004 with /dev/null, dup /dev/null to it. */
2005 for (i = 0; i < 3; i++) {
2006 /* If it is /dev/null already, we are done. */
2007 if (i == dev_null) {
2008 continue;
2009 }
2011 TRACE("[ Nullifying stdio file descriptor %d]", i);
2012 status = TEMP_FAILURE_RETRY(fcntl(i, F_GETFL));
2014 /* If file is opened, we are good. */
2015 if (status != -1) {
2016 continue;
2017 }
2019 /* The only error we allow is that the file descriptor does not
2020 exist, in which case we dup /dev/null to it. */
2021 if (errno != EBADF) {
2022 DL_ERR("fcntl failed: %s", strerror(errno));
2023 return_value = -1;
2024 continue;
2025 }
2027 /* Try dupping /dev/null to this stdio file descriptor and
2028 repeat if there is a signal. Note that any errors in closing
2029 the stdio descriptor are lost. */
2030 status = TEMP_FAILURE_RETRY(dup2(dev_null, i));
2031 if (status < 0) {
2032 DL_ERR("dup2 failed: %s", strerror(errno));
2033 return_value = -1;
2034 continue;
2035 }
2036 }
2038 /* If /dev/null is not one of the stdio file descriptors, close it. */
2039 if (dev_null > 2) {
2040 TRACE("[ Closing /dev/null file-descriptor=%d]", dev_null);
2041 status = TEMP_FAILURE_RETRY(close(dev_null));
2042 if (status == -1) {
2043 DL_ERR("close failed: %s", strerror(errno));
2044 return_value = -1;
2045 }
2046 }
2048 return return_value;
2049 }
2051 bool soinfo::prelink_image() {
2052 /* Extract dynamic section */
2053 ElfW(Word) dynamic_flags = 0;
2054 phdr_table_get_dynamic_section(phdr, phnum, load_bias, &dynamic, &dynamic_flags);
2056 /* We can't log anything until the linker is relocated */
2057 bool relocating_linker = (flags & FLAG_LINKER) != 0;
2058 if (!relocating_linker) {
2059 INFO("[ linking %s ]", name);
2060 DEBUG("si->base = %p si->flags = 0x%08x", reinterpret_cast<void*>(base), flags);
2061 }
2063 if (dynamic == nullptr) {
2064 if (!relocating_linker) {
2065 DL_ERR("missing PT_DYNAMIC in \"%s\"", name);
2066 }
2067 return false;
2068 } else {
2069 if (!relocating_linker) {
2070 DEBUG("dynamic = %p", dynamic);
2071 }
2072 }
2074 #if defined(__arm__)
2075 (void) phdr_table_get_arm_exidx(phdr, phnum, load_bias,
2076 &ARM_exidx, &ARM_exidx_count);
2077 #endif
2079 // Extract useful information from dynamic section.
2080 uint32_t needed_count = 0;
2081 for (ElfW(Dyn)* d = dynamic; d->d_tag != DT_NULL; ++d) {
2082 DEBUG("d = %p, d[0](tag) = %p d[1](val) = %p",
2083 d, reinterpret_cast<void*>(d->d_tag), reinterpret_cast<void*>(d->d_un.d_val));
2084 switch (d->d_tag) {
2085 case DT_SONAME:
2086 // TODO: glibc dynamic linker uses this name for
2087 // initial library lookup; consider doing the same here.
2088 break;
2090 case DT_HASH:
2091 if (nbucket_ != 0) {
2092 // in case of --hash-style=both, we prefer gnu
2093 break;
2094 }
2096 nbucket_ = reinterpret_cast<uint32_t*>(load_bias + d->d_un.d_ptr)[0];
2097 nchain_ = reinterpret_cast<uint32_t*>(load_bias + d->d_un.d_ptr)[1];
2098 bucket_ = reinterpret_cast<uint32_t*>(load_bias + d->d_un.d_ptr + 8);
2099 chain_ = reinterpret_cast<uint32_t*>(load_bias + d->d_un.d_ptr + 8 + nbucket_ * 4);
2100 break;
2102 case DT_GNU_HASH:
2103 if (nbucket_ != 0) {
2104 // in case of --hash-style=both, we prefer gnu
2105 nchain_ = 0;
2106 }
2108 nbucket_ = reinterpret_cast<uint32_t*>(load_bias + d->d_un.d_ptr)[0];
2109 // skip symndx
2110 gnu_maskwords_ = reinterpret_cast<uint32_t*>(load_bias + d->d_un.d_ptr)[2];
2111 gnu_shift2_ = reinterpret_cast<uint32_t*>(load_bias + d->d_un.d_ptr)[3];
2113 gnu_bloom_filter_ = reinterpret_cast<ElfW(Addr)*>(load_bias + d->d_un.d_ptr + 16);
2114 bucket_ = reinterpret_cast<uint32_t*>(gnu_bloom_filter_ + gnu_maskwords_);
2115 // amend chain for symndx = header[1]
2116 chain_ = bucket_ + nbucket_ - reinterpret_cast<uint32_t*>(load_bias + d->d_un.d_ptr)[1];
2118 if (!powerof2(gnu_maskwords_)) {
2119 DL_ERR("invalid maskwords for gnu_hash = 0x%x, in \"%s\" expecting power to two", gnu_maskwords_, name);
2120 return false;
2121 }
2122 --gnu_maskwords_;
2124 flags |= FLAG_GNU_HASH;
2125 break;
2127 case DT_STRTAB:
2128 strtab_ = reinterpret_cast<const char*>(load_bias + d->d_un.d_ptr);
2129 break;
2131 case DT_STRSZ:
2132 strtab_size_ = d->d_un.d_val;
2133 break;
2135 case DT_SYMTAB:
2136 symtab_ = reinterpret_cast<ElfW(Sym)*>(load_bias + d->d_un.d_ptr);
2137 break;
2139 case DT_SYMENT:
2140 if (d->d_un.d_val != sizeof(ElfW(Sym))) {
2141 DL_ERR("invalid DT_SYMENT: %zd in \"%s\"", static_cast<size_t>(d->d_un.d_val), name);
2142 return false;
2143 }
2144 break;
2146 case DT_PLTREL:
2147 #if defined(USE_RELA)
2148 if (d->d_un.d_val != DT_RELA) {
2149 DL_ERR("unsupported DT_PLTREL in \"%s\"; expected DT_RELA", name);
2150 return false;
2151 }
2152 #else
2153 if (d->d_un.d_val != DT_REL) {
2154 DL_ERR("unsupported DT_PLTREL in \"%s\"; expected DT_REL", name);
2155 return false;
2156 }
2157 #endif
2158 break;
2160 case DT_JMPREL:
2161 #if defined(USE_RELA)
2162 plt_rela_ = reinterpret_cast<ElfW(Rela)*>(load_bias + d->d_un.d_ptr);
2163 #else
2164 plt_rel_ = reinterpret_cast<ElfW(Rel)*>(load_bias + d->d_un.d_ptr);
2165 #endif
2166 break;
2168 case DT_PLTRELSZ:
2169 #if defined(USE_RELA)
2170 plt_rela_count_ = d->d_un.d_val / sizeof(ElfW(Rela));
2171 #else
2172 plt_rel_count_ = d->d_un.d_val / sizeof(ElfW(Rel));
2173 #endif
2174 break;
2176 case DT_PLTGOT:
2177 #if defined(__mips__)
2178 // Used by mips and mips64.
2179 plt_got_ = reinterpret_cast<ElfW(Addr)**>(load_bias + d->d_un.d_ptr);
2180 #endif
2181 // Ignore for other platforms... (because RTLD_LAZY is not supported)
2182 break;
2184 case DT_DEBUG:
2185 // Set the DT_DEBUG entry to the address of _r_debug for GDB
2186 // if the dynamic table is writable
2187 // FIXME: not working currently for N64
2188 // The flags for the LOAD and DYNAMIC program headers do not agree.
2189 // The LOAD section containing the dynamic table has been mapped as
2190 // read-only, but the DYNAMIC header claims it is writable.
2191 #if !(defined(__mips__) && defined(__LP64__))
2192 if ((dynamic_flags & PF_W) != 0) {
2193 d->d_un.d_val = reinterpret_cast<uintptr_t>(&_r_debug);
2194 }
2195 break;
2196 #endif
2197 #if defined(USE_RELA)
2198 case DT_RELA:
2199 rela_ = reinterpret_cast<ElfW(Rela)*>(load_bias + d->d_un.d_ptr);
2200 break;
2202 case DT_RELASZ:
2203 rela_count_ = d->d_un.d_val / sizeof(ElfW(Rela));
2204 break;
2206 case DT_RELAENT:
2207 if (d->d_un.d_val != sizeof(ElfW(Rela))) {
2208 DL_ERR("invalid DT_RELAENT: %zd", static_cast<size_t>(d->d_un.d_val));
2209 return false;
2210 }
2211 break;
2213 // ignored (see DT_RELCOUNT comments for details)
2214 case DT_RELACOUNT:
2215 break;
2217 case DT_REL:
2218 DL_ERR("unsupported DT_REL in \"%s\"", name);
2219 return false;
2221 case DT_RELSZ:
2222 DL_ERR("unsupported DT_RELSZ in \"%s\"", name);
2223 return false;
2224 #else
2225 case DT_REL:
2226 rel_ = reinterpret_cast<ElfW(Rel)*>(load_bias + d->d_un.d_ptr);
2227 break;
2229 case DT_RELSZ:
2230 rel_count_ = d->d_un.d_val / sizeof(ElfW(Rel));
2231 break;
2233 case DT_RELENT:
2234 if (d->d_un.d_val != sizeof(ElfW(Rel))) {
2235 DL_ERR("invalid DT_RELENT: %zd", static_cast<size_t>(d->d_un.d_val));
2236 return false;
2237 }
2238 break;
2240 // "Indicates that all RELATIVE relocations have been concatenated together,
2241 // and specifies the RELATIVE relocation count."
2242 //
2243 // TODO: Spec also mentions that this can be used to optimize relocation process;
2244 // Not currently used by bionic linker - ignored.
2245 case DT_RELCOUNT:
2246 break;
2247 case DT_RELA:
2248 DL_ERR("unsupported DT_RELA in \"%s\"", name);
2249 return false;
2250 #endif
2251 case DT_INIT:
2252 init_func_ = reinterpret_cast<linker_function_t>(load_bias + d->d_un.d_ptr);
2253 DEBUG("%s constructors (DT_INIT) found at %p", name, init_func_);
2254 break;
2256 case DT_FINI:
2257 fini_func_ = reinterpret_cast<linker_function_t>(load_bias + d->d_un.d_ptr);
2258 DEBUG("%s destructors (DT_FINI) found at %p", name, fini_func_);
2259 break;
2261 case DT_INIT_ARRAY:
2262 init_array_ = reinterpret_cast<linker_function_t*>(load_bias + d->d_un.d_ptr);
2263 DEBUG("%s constructors (DT_INIT_ARRAY) found at %p", name, init_array_);
2264 break;
2266 case DT_INIT_ARRAYSZ:
2267 init_array_count_ = ((unsigned)d->d_un.d_val) / sizeof(ElfW(Addr));
2268 break;
2270 case DT_FINI_ARRAY:
2271 fini_array_ = reinterpret_cast<linker_function_t*>(load_bias + d->d_un.d_ptr);
2272 DEBUG("%s destructors (DT_FINI_ARRAY) found at %p", name, fini_array_);
2273 break;
2275 case DT_FINI_ARRAYSZ:
2276 fini_array_count_ = ((unsigned)d->d_un.d_val) / sizeof(ElfW(Addr));
2277 break;
2279 case DT_PREINIT_ARRAY:
2280 preinit_array_ = reinterpret_cast<linker_function_t*>(load_bias + d->d_un.d_ptr);
2281 DEBUG("%s constructors (DT_PREINIT_ARRAY) found at %p", name, preinit_array_);
2282 break;
2284 case DT_PREINIT_ARRAYSZ:
2285 preinit_array_count_ = ((unsigned)d->d_un.d_val) / sizeof(ElfW(Addr));
2286 break;
2288 case DT_TEXTREL:
2289 #if defined(__LP64__)
2290 DL_ERR("text relocations (DT_TEXTREL) found in 64-bit ELF file \"%s\"", name);
2291 return false;
2292 #else
2293 has_text_relocations = true;
2294 break;
2295 #endif
2297 case DT_SYMBOLIC:
2298 has_DT_SYMBOLIC = true;
2299 break;
2301 case DT_NEEDED:
2302 ++needed_count;
2303 break;
2305 case DT_FLAGS:
2306 if (d->d_un.d_val & DF_TEXTREL) {
2307 #if defined(__LP64__)
2308 DL_ERR("text relocations (DF_TEXTREL) found in 64-bit ELF file \"%s\"", name);
2309 return false;
2310 #else
2311 has_text_relocations = true;
2312 #endif
2313 }
2314 if (d->d_un.d_val & DF_SYMBOLIC) {
2315 has_DT_SYMBOLIC = true;
2316 }
2317 break;
2319 case DT_FLAGS_1:
2320 set_dt_flags_1(d->d_un.d_val);
2322 if ((d->d_un.d_val & ~SUPPORTED_DT_FLAGS_1) != 0) {
2323 DL_WARN("Unsupported flags DT_FLAGS_1=%p", reinterpret_cast<void*>(d->d_un.d_val));
2324 }
2325 break;
2326 #if defined(__mips__)
2327 case DT_MIPS_RLD_MAP:
2328 // Set the DT_MIPS_RLD_MAP entry to the address of _r_debug for GDB.
2329 {
2330 r_debug** dp = reinterpret_cast<r_debug**>(load_bias + d->d_un.d_ptr);
2331 *dp = &_r_debug;
2332 }
2333 break;
2335 case DT_MIPS_RLD_VERSION:
2336 case DT_MIPS_FLAGS:
2337 case DT_MIPS_BASE_ADDRESS:
2338 case DT_MIPS_UNREFEXTNO:
2339 break;
2341 case DT_MIPS_SYMTABNO:
2342 mips_symtabno_ = d->d_un.d_val;
2343 break;
2345 case DT_MIPS_LOCAL_GOTNO:
2346 mips_local_gotno_ = d->d_un.d_val;
2347 break;
2349 case DT_MIPS_GOTSYM:
2350 mips_gotsym_ = d->d_un.d_val;
2351 break;
2352 #endif
2353 // Ignored: "Its use has been superseded by the DF_BIND_NOW flag"
2354 case DT_BIND_NOW:
2355 break;
2357 // Ignore: bionic does not support symbol versioning...
2358 case DT_VERSYM:
2359 case DT_VERDEF:
2360 case DT_VERDEFNUM:
2361 break;
2363 default:
2364 if (!relocating_linker) {
2365 DL_WARN("%s: unused DT entry: type %p arg %p", name,
2366 reinterpret_cast<void*>(d->d_tag), reinterpret_cast<void*>(d->d_un.d_val));
2367 }
2368 break;
2369 }
2370 }
2372 DEBUG("si->base = %p, si->strtab = %p, si->symtab = %p",
2373 reinterpret_cast<void*>(base), strtab_, symtab_);
2375 // Sanity checks.
2376 if (relocating_linker && needed_count != 0) {
2377 DL_ERR("linker cannot have DT_NEEDED dependencies on other libraries");
2378 return false;
2379 }
2380 if (nbucket_ == 0) {
2381 DL_ERR("empty/missing DT_HASH/DT_GNU_HASH in \"%s\" (new hash type from the future?)", name);
2382 return false;
2383 }
2384 if (strtab_ == 0) {
2385 DL_ERR("empty/missing DT_STRTAB in \"%s\"", name);
2386 return false;
2387 }
2388 if (symtab_ == 0) {
2389 DL_ERR("empty/missing DT_SYMTAB in \"%s\"", name);
2390 return false;
2391 }
2392 return true;
2393 }
2395 bool soinfo::link_image(const soinfo_list_t& global_group, const soinfo_list_t& local_group, const android_dlextinfo* extinfo) {
2397 #if !defined(__LP64__)
2398 if (has_text_relocations) {
2399 // Make segments writable to allow text relocations to work properly. We will later call
2400 // phdr_table_protect_segments() after all of them are applied and all constructors are run.
2401 DL_WARN("%s has text relocations. This is wasting memory and prevents "
2402 "security hardening. Please fix.", name);
2403 if (phdr_table_unprotect_segments(phdr, phnum, load_bias) < 0) {
2404 DL_ERR("can't unprotect loadable segments for \"%s\": %s",
2405 name, strerror(errno));
2406 return false;
2407 }
2408 }
2409 #endif
2411 #if defined(USE_RELA)
2412 if (rela_ != nullptr) {
2413 DEBUG("[ relocating %s ]", name);
2414 if (relocate(rela_, rela_count_, global_group, local_group)) {
2415 return false;
2416 }
2417 }
2418 if (plt_rela_ != nullptr) {
2419 DEBUG("[ relocating %s plt ]", name);
2420 if (relocate(plt_rela_, plt_rela_count_, global_group, local_group)) {
2421 return false;
2422 }
2423 }
2424 #else
2425 if (rel_ != nullptr) {
2426 DEBUG("[ relocating %s ]", name);
2427 if (relocate(rel_, rel_count_, global_group, local_group)) {
2428 return false;
2429 }
2430 }
2431 if (plt_rel_ != nullptr) {
2432 DEBUG("[ relocating %s plt ]", name);
2433 if (relocate(plt_rel_, plt_rel_count_, global_group, local_group)) {
2434 return false;
2435 }
2436 }
2437 #endif
2439 #if defined(__mips__)
2440 if (!mips_relocate_got(global_group, local_group)) {
2441 return false;
2442 }
2443 #endif
2445 DEBUG("[ finished linking %s ]", name);
2447 #if !defined(__LP64__)
2448 if (has_text_relocations) {
2449 // All relocations are done, we can protect our segments back to read-only.
2450 if (phdr_table_protect_segments(phdr, phnum, load_bias) < 0) {
2451 DL_ERR("can't protect segments for \"%s\": %s",
2452 name, strerror(errno));
2453 return false;
2454 }
2455 }
2456 #endif
2458 /* We can also turn on GNU RELRO protection */
2459 if (phdr_table_protect_gnu_relro(phdr, phnum, load_bias) < 0) {
2460 DL_ERR("can't enable GNU RELRO protection for \"%s\": %s",
2461 name, strerror(errno));
2462 return false;
2463 }
2465 /* Handle serializing/sharing the RELRO segment */
2466 if (extinfo && (extinfo->flags & ANDROID_DLEXT_WRITE_RELRO)) {
2467 if (phdr_table_serialize_gnu_relro(phdr, phnum, load_bias,
2468 extinfo->relro_fd) < 0) {
2469 DL_ERR("failed serializing GNU RELRO section for \"%s\": %s",
2470 name, strerror(errno));
2471 return false;
2472 }
2473 } else if (extinfo && (extinfo->flags & ANDROID_DLEXT_USE_RELRO)) {
2474 if (phdr_table_map_gnu_relro(phdr, phnum, load_bias,
2475 extinfo->relro_fd) < 0) {
2476 DL_ERR("failed mapping GNU RELRO section for \"%s\": %s",
2477 name, strerror(errno));
2478 return false;
2479 }
2480 }
2482 notify_gdb_of_load(this);
2483 return true;
2484 }
2486 /*
2487 * This function add vdso to internal dso list.
2488 * It helps to stack unwinding through signal handlers.
2489 * Also, it makes bionic more like glibc.
2490 */
2491 static void add_vdso(KernelArgumentBlock& args __unused) {
2492 #if defined(AT_SYSINFO_EHDR)
2493 ElfW(Ehdr)* ehdr_vdso = reinterpret_cast<ElfW(Ehdr)*>(args.getauxval(AT_SYSINFO_EHDR));
2494 if (ehdr_vdso == nullptr) {
2495 return;
2496 }
2498 soinfo* si = soinfo_alloc("[vdso]", nullptr, 0, 0);
2500 si->phdr = reinterpret_cast<ElfW(Phdr)*>(reinterpret_cast<char*>(ehdr_vdso) + ehdr_vdso->e_phoff);
2501 si->phnum = ehdr_vdso->e_phnum;
2502 si->base = reinterpret_cast<ElfW(Addr)>(ehdr_vdso);
2503 si->size = phdr_table_get_load_size(si->phdr, si->phnum);
2504 si->load_bias = get_elf_exec_load_bias(ehdr_vdso);
2506 si->prelink_image();
2507 si->link_image(g_empty_list, soinfo::soinfo_list_t::make_list(si), nullptr);
2508 #endif
2509 }
2511 /*
2512 * This is linker soinfo for GDB. See details below.
2513 */
2514 #if defined(__LP64__)
2515 #define LINKER_PATH "/system/bin/linker64"
2516 #else
2517 #define LINKER_PATH "/system/bin/linker"
2518 #endif
2519 static soinfo linker_soinfo_for_gdb(LINKER_PATH, nullptr, 0, 0);
2521 /* gdb expects the linker to be in the debug shared object list.
2522 * Without this, gdb has trouble locating the linker's ".text"
2523 * and ".plt" sections. Gdb could also potentially use this to
2524 * relocate the offset of our exported 'rtld_db_dlactivity' symbol.
2525 * Don't use soinfo_alloc(), because the linker shouldn't
2526 * be on the soinfo list.
2527 */
2528 static void init_linker_info_for_gdb(ElfW(Addr) linker_base) {
2529 linker_soinfo_for_gdb.base = linker_base;
2531 /*
2532 * Set the dynamic field in the link map otherwise gdb will complain with
2533 * the following:
2534 * warning: .dynamic section for "/system/bin/linker" is not at the
2535 * expected address (wrong library or version mismatch?)
2536 */
2537 ElfW(Ehdr)* elf_hdr = reinterpret_cast<ElfW(Ehdr)*>(linker_base);
2538 ElfW(Phdr)* phdr = reinterpret_cast<ElfW(Phdr)*>(linker_base + elf_hdr->e_phoff);
2539 phdr_table_get_dynamic_section(phdr, elf_hdr->e_phnum, linker_base,
2540 &linker_soinfo_for_gdb.dynamic, nullptr);
2541 insert_soinfo_into_debug_map(&linker_soinfo_for_gdb);
2542 }
2544 /*
2545 * This code is called after the linker has linked itself and
2546 * fixed it's own GOT. It is safe to make references to externs
2547 * and other non-local data at this point.
2548 */
2549 static ElfW(Addr) __linker_init_post_relocation(KernelArgumentBlock& args, ElfW(Addr) linker_base) {
2550 #if TIMING
2551 struct timeval t0, t1;
2552 gettimeofday(&t0, 0);
2553 #endif
2555 // Initialize environment functions, and get to the ELF aux vectors table.
2556 linker_env_init(args);
2558 // If this is a setuid/setgid program, close the security hole described in
2559 // ftp://ftp.freebsd.org/pub/FreeBSD/CERT/advisories/FreeBSD-SA-02:23.stdio.asc
2560 if (get_AT_SECURE()) {
2561 nullify_closed_stdio();
2562 }
2564 debuggerd_init();
2566 // Get a few environment variables.
2567 const char* LD_DEBUG = linker_env_get("LD_DEBUG");
2568 if (LD_DEBUG != nullptr) {
2569 g_ld_debug_verbosity = atoi(LD_DEBUG);
2570 }
2572 // Normally, these are cleaned by linker_env_init, but the test
2573 // doesn't cost us anything.
2574 const char* ldpath_env = nullptr;
2575 const char* ldpreload_env = nullptr;
2576 if (!get_AT_SECURE()) {
2577 ldpath_env = linker_env_get("LD_LIBRARY_PATH");
2578 ldpreload_env = linker_env_get("LD_PRELOAD");
2579 }
2581 INFO("[ android linker & debugger ]");
2583 soinfo* si = soinfo_alloc(args.argv[0], nullptr, 0, RTLD_GLOBAL);
2584 if (si == nullptr) {
2585 exit(EXIT_FAILURE);
2586 }
2588 /* bootstrap the link map, the main exe always needs to be first */
2589 si->flags |= FLAG_EXE;
2590 link_map* map = &(si->link_map_head);
2592 map->l_addr = 0;
2593 map->l_name = args.argv[0];
2594 map->l_prev = nullptr;
2595 map->l_next = nullptr;
2597 _r_debug.r_map = map;
2598 r_debug_tail = map;
2600 init_linker_info_for_gdb(linker_base);
2602 // Extract information passed from the kernel.
2603 si->phdr = reinterpret_cast<ElfW(Phdr)*>(args.getauxval(AT_PHDR));
2604 si->phnum = args.getauxval(AT_PHNUM);
2605 si->entry = args.getauxval(AT_ENTRY);
2607 /* Compute the value of si->base. We can't rely on the fact that
2608 * the first entry is the PHDR because this will not be true
2609 * for certain executables (e.g. some in the NDK unit test suite)
2610 */
2611 si->base = 0;
2612 si->size = phdr_table_get_load_size(si->phdr, si->phnum);
2613 si->load_bias = 0;
2614 for (size_t i = 0; i < si->phnum; ++i) {
2615 if (si->phdr[i].p_type == PT_PHDR) {
2616 si->load_bias = reinterpret_cast<ElfW(Addr)>(si->phdr) - si->phdr[i].p_vaddr;
2617 si->base = reinterpret_cast<ElfW(Addr)>(si->phdr) - si->phdr[i].p_offset;
2618 break;
2619 }
2620 }
2621 si->dynamic = nullptr;
2622 si->ref_count = 1;
2624 ElfW(Ehdr)* elf_hdr = reinterpret_cast<ElfW(Ehdr)*>(si->base);
2625 if (elf_hdr->e_type != ET_DYN) {
2626 __libc_format_fd(2, "error: only position independent executables (PIE) are supported.\n");
2627 exit(EXIT_FAILURE);
2628 }
2630 // Use LD_LIBRARY_PATH and LD_PRELOAD (but only if we aren't setuid/setgid).
2631 parse_LD_LIBRARY_PATH(ldpath_env);
2632 parse_LD_PRELOAD(ldpreload_env);
2634 somain = si;
2636 si->prelink_image();
2638 // add somain to global group
2639 si->set_dt_flags_1(si->get_dt_flags_1() | DF_1_GLOBAL);
2641 // Load ld_preloads and dependencies.
2642 StringLinkedList needed_library_name_list;
2643 size_t needed_libraries_count = 0;
2644 size_t ld_preloads_count = 0;
2645 while (g_ld_preload_names[ld_preloads_count] != nullptr) {
2646 needed_library_name_list.push_back(g_ld_preload_names[ld_preloads_count++]);
2647 ++needed_libraries_count;
2648 }
2650 for_each_dt_needed(si, [&](const char* name) {
2651 needed_library_name_list.push_back(name);
2652 ++needed_libraries_count;
2653 });
2655 const char* needed_library_names[needed_libraries_count];
2657 memset(needed_library_names, 0, sizeof(needed_library_names));
2658 needed_library_name_list.copy_to_array(needed_library_names, needed_libraries_count);
2660 if (needed_libraries_count > 0 && !find_libraries(si, needed_library_names, needed_libraries_count, nullptr, g_ld_preloads, ld_preloads_count, RTLD_GLOBAL, nullptr)) {
2661 __libc_format_fd(2, "CANNOT LINK EXECUTABLE: %s\n", linker_get_error_buffer());
2662 exit(EXIT_FAILURE);
2663 }
2665 add_vdso(args);
2667 si->call_pre_init_constructors();
2669 /* After the prelink_image, the si->load_bias is initialized.
2670 * For so lib, the map->l_addr will be updated in notify_gdb_of_load.
2671 * We need to update this value for so exe here. So Unwind_Backtrace
2672 * for some arch like x86 could work correctly within so exe.
2673 */
2674 map->l_addr = si->load_bias;
2675 si->call_constructors();
2677 #if TIMING
2678 gettimeofday(&t1, nullptr);
2679 PRINT("LINKER TIME: %s: %d microseconds", args.argv[0], (int) (
2680 (((long long)t1.tv_sec * 1000000LL) + (long long)t1.tv_usec) -
2681 (((long long)t0.tv_sec * 1000000LL) + (long long)t0.tv_usec)));
2682 #endif
2683 #if STATS
2684 PRINT("RELO STATS: %s: %d abs, %d rel, %d copy, %d symbol", args.argv[0],
2685 linker_stats.count[kRelocAbsolute],
2686 linker_stats.count[kRelocRelative],
2687 linker_stats.count[kRelocCopy],
2688 linker_stats.count[kRelocSymbol]);
2689 #endif
2690 #if COUNT_PAGES
2691 {
2692 unsigned n;
2693 unsigned i;
2694 unsigned count = 0;
2695 for (n = 0; n < 4096; n++) {
2696 if (bitmask[n]) {
2697 unsigned x = bitmask[n];
2698 #if defined(__LP64__)
2699 for (i = 0; i < 32; i++) {
2700 #else
2701 for (i = 0; i < 8; i++) {
2702 #endif
2703 if (x & 1) {
2704 count++;
2705 }
2706 x >>= 1;
2707 }
2708 }
2709 }
2710 PRINT("PAGES MODIFIED: %s: %d (%dKB)", args.argv[0], count, count * 4);
2711 }
2712 #endif
2714 #if TIMING || STATS || COUNT_PAGES
2715 fflush(stdout);
2716 #endif
2718 TRACE("[ Ready to execute '%s' @ %p ]", si->name, reinterpret_cast<void*>(si->entry));
2719 return si->entry;
2720 }
2722 /* Compute the load-bias of an existing executable. This shall only
2723 * be used to compute the load bias of an executable or shared library
2724 * that was loaded by the kernel itself.
2725 *
2726 * Input:
2727 * elf -> address of ELF header, assumed to be at the start of the file.
2728 * Return:
2729 * load bias, i.e. add the value of any p_vaddr in the file to get
2730 * the corresponding address in memory.
2731 */
2732 static ElfW(Addr) get_elf_exec_load_bias(const ElfW(Ehdr)* elf) {
2733 ElfW(Addr) offset = elf->e_phoff;
2734 const ElfW(Phdr)* phdr_table = reinterpret_cast<const ElfW(Phdr)*>(reinterpret_cast<uintptr_t>(elf) + offset);
2735 const ElfW(Phdr)* phdr_end = phdr_table + elf->e_phnum;
2737 for (const ElfW(Phdr)* phdr = phdr_table; phdr < phdr_end; phdr++) {
2738 if (phdr->p_type == PT_LOAD) {
2739 return reinterpret_cast<ElfW(Addr)>(elf) + phdr->p_offset - phdr->p_vaddr;
2740 }
2741 }
2742 return 0;
2743 }
2745 extern "C" void _start();
2747 /*
2748 * This is the entry point for the linker, called from begin.S. This
2749 * method is responsible for fixing the linker's own relocations, and
2750 * then calling __linker_init_post_relocation().
2751 *
2752 * Because this method is called before the linker has fixed it's own
2753 * relocations, any attempt to reference an extern variable, extern
2754 * function, or other GOT reference will generate a segfault.
2755 */
2756 extern "C" ElfW(Addr) __linker_init(void* raw_args) {
2757 KernelArgumentBlock args(raw_args);
2759 ElfW(Addr) linker_addr = args.getauxval(AT_BASE);
2760 ElfW(Addr) entry_point = args.getauxval(AT_ENTRY);
2761 ElfW(Ehdr)* elf_hdr = reinterpret_cast<ElfW(Ehdr)*>(linker_addr);
2762 ElfW(Phdr)* phdr = reinterpret_cast<ElfW(Phdr)*>(linker_addr + elf_hdr->e_phoff);
2764 soinfo linker_so("[dynamic linker]", nullptr, 0, 0);
2766 // If the linker is not acting as PT_INTERP entry_point is equal to
2767 // _start. Which means that the linker is running as an executable and
2768 // already linked by PT_INTERP.
2769 //
2770 // This happens when user tries to run 'adb shell /system/bin/linker'
2771 // see also https://code.google.com/p/android/issues/detail?id=63174
2772 if (reinterpret_cast<ElfW(Addr)>(&_start) == entry_point) {
2773 __libc_fatal("This is %s, the helper program for shared library executables.\n", args.argv[0]);
2774 }
2776 linker_so.base = linker_addr;
2777 linker_so.size = phdr_table_get_load_size(phdr, elf_hdr->e_phnum);
2778 linker_so.load_bias = get_elf_exec_load_bias(elf_hdr);
2779 linker_so.dynamic = nullptr;
2780 linker_so.phdr = phdr;
2781 linker_so.phnum = elf_hdr->e_phnum;
2782 linker_so.flags |= FLAG_LINKER;
2784 // This might not be obvious... The reasons why we pass g_empty_list
2785 // in place of local_group here are (1) we do not really need it, because
2786 // linker is built with DT_SYMBOLIC and therefore relocates its symbols against
2787 // itself without having to look into local_group and (2) allocators
2788 // are not yet initialized, and therefore we cannot use linked_list.push_*
2789 // functions at this point.
2790 if (!(linker_so.prelink_image() && linker_so.link_image(g_empty_list, g_empty_list, nullptr))) {
2791 // It would be nice to print an error message, but if the linker
2792 // can't link itself, there's no guarantee that we'll be able to
2793 // call write() (because it involves a GOT reference). We may as
2794 // well try though...
2795 const char* msg = "CANNOT LINK EXECUTABLE: ";
2796 write(2, msg, strlen(msg));
2797 write(2, __linker_dl_err_buf, strlen(__linker_dl_err_buf));
2798 write(2, "\n", 1);
2799 _exit(EXIT_FAILURE);
2800 }
2802 __libc_init_tls(args);
2804 // Initialize the linker's own global variables
2805 linker_so.call_constructors();
2807 // Initialize static variables. Note that in order to
2808 // get correct libdl_info we need to call constructors
2809 // before get_libdl_info().
2810 solist = get_libdl_info();
2811 sonext = get_libdl_info();
2813 // We have successfully fixed our own relocations. It's safe to run
2814 // the main part of the linker now.
2815 args.abort_message_ptr = &g_abort_message;
2816 ElfW(Addr) start_address = __linker_init_post_relocation(args, linker_addr);
2818 protect_data(PROT_READ);
2820 // Return the address that the calling assembly stub should jump to.
2821 return start_address;
2822 }