1 /*
2 * Copyright (C) 2008, 2009 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
29 #include <dlfcn.h>
30 #include <errno.h>
31 #include <fcntl.h>
32 #include <inttypes.h>
33 #include <pthread.h>
34 #include <stdio.h>
35 #include <stdlib.h>
36 #include <string.h>
37 #include <sys/mman.h>
38 #include <sys/param.h>
39 #include <unistd.h>
41 #include <new>
43 // Private C library headers.
44 #include "private/bionic_tls.h"
45 #include "private/KernelArgumentBlock.h"
46 #include "private/ScopedPthreadMutexLocker.h"
47 #include "private/ScopedFd.h"
48 #include "private/ScopeGuard.h"
49 #include "private/UniquePtr.h"
51 #include "linker.h"
52 #include "linker_debug.h"
53 #include "linker_environ.h"
54 #include "linker_phdr.h"
55 #include "linker_allocator.h"
57 /* >>> IMPORTANT NOTE - READ ME BEFORE MODIFYING <<<
58 *
59 * Do NOT use malloc() and friends or pthread_*() code here.
60 * Don't use printf() either; it's caused mysterious memory
61 * corruption in the past.
62 * The linker runs before we bring up libc and it's easiest
63 * to make sure it does not depend on any complex libc features
64 *
65 * open issues / todo:
66 *
67 * - cleaner error reporting
68 * - after linking, set as much stuff as possible to READONLY
69 * and NOEXEC
70 */
72 #if defined(__LP64__)
73 #define SEARCH_NAME(x) x
74 #else
75 // Nvidia drivers are relying on the bug:
76 // http://code.google.com/p/android/issues/detail?id=6670
77 // so we continue to use base-name lookup for lp32
78 static const char* get_base_name(const char* name) {
79 const char* bname = strrchr(name, '/');
80 return bname ? bname + 1 : name;
81 }
82 #define SEARCH_NAME(x) get_base_name(x)
83 #endif
85 static ElfW(Addr) get_elf_exec_load_bias(const ElfW(Ehdr)* elf);
87 static LinkerAllocator<soinfo> g_soinfo_allocator;
88 static LinkerAllocator<LinkedListEntry<soinfo>> g_soinfo_links_allocator;
90 static soinfo* solist;
91 static soinfo* sonext;
92 static soinfo* somain; // main process, always the one after libdl_info
94 static const char* const kDefaultLdPaths[] = {
95 #if defined(__LP64__)
96 "/vendor/lib64",
97 "/system/lib64",
98 #else
99 "/vendor/lib",
100 "/system/lib",
101 #endif
102 nullptr
103 };
105 #define LDPATH_BUFSIZE (LDPATH_MAX*64)
106 #define LDPATH_MAX 8
108 #define LDPRELOAD_BUFSIZE (LDPRELOAD_MAX*64)
109 #define LDPRELOAD_MAX 8
111 static char g_ld_library_paths_buffer[LDPATH_BUFSIZE];
112 static const char* g_ld_library_paths[LDPATH_MAX + 1];
114 static char g_ld_preloads_buffer[LDPRELOAD_BUFSIZE];
115 static const char* g_ld_preload_names[LDPRELOAD_MAX + 1];
117 static soinfo* g_ld_preloads[LDPRELOAD_MAX + 1];
119 __LIBC_HIDDEN__ int g_ld_debug_verbosity;
121 __LIBC_HIDDEN__ abort_msg_t* g_abort_message = nullptr; // For debuggerd.
123 enum RelocationKind {
124 kRelocAbsolute = 0,
125 kRelocRelative,
126 kRelocCopy,
127 kRelocSymbol,
128 kRelocMax
129 };
131 #if STATS
132 struct linker_stats_t {
133 int count[kRelocMax];
134 };
136 static linker_stats_t linker_stats;
138 static void count_relocation(RelocationKind kind) {
139 ++linker_stats.count[kind];
140 }
141 #else
142 static void count_relocation(RelocationKind) {
143 }
144 #endif
146 #if COUNT_PAGES
147 static unsigned bitmask[4096];
148 #if defined(__LP64__)
149 #define MARK(offset) \
150 do { \
151 if ((((offset) >> 12) >> 5) < 4096) \
152 bitmask[((offset) >> 12) >> 5] |= (1 << (((offset) >> 12) & 31)); \
153 } while (0)
154 #else
155 #define MARK(offset) \
156 do { \
157 bitmask[((offset) >> 12) >> 3] |= (1 << (((offset) >> 12) & 7)); \
158 } while (0)
159 #endif
160 #else
161 #define MARK(x) do {} while (0)
162 #endif
164 // You shouldn't try to call memory-allocating functions in the dynamic linker.
165 // Guard against the most obvious ones.
166 #define DISALLOW_ALLOCATION(return_type, name, ...) \
167 return_type name __VA_ARGS__ \
168 { \
169 __libc_fatal("ERROR: " #name " called from the dynamic linker!\n"); \
170 }
171 DISALLOW_ALLOCATION(void*, malloc, (size_t u __unused));
172 DISALLOW_ALLOCATION(void, free, (void* u __unused));
173 DISALLOW_ALLOCATION(void*, realloc, (void* u1 __unused, size_t u2 __unused));
174 DISALLOW_ALLOCATION(void*, calloc, (size_t u1 __unused, size_t u2 __unused));
176 static char __linker_dl_err_buf[768];
178 char* linker_get_error_buffer() {
179 return &__linker_dl_err_buf[0];
180 }
182 size_t linker_get_error_buffer_size() {
183 return sizeof(__linker_dl_err_buf);
184 }
186 // This function is an empty stub where GDB locates a breakpoint to get notified
187 // about linker activity.
188 extern "C" void __attribute__((noinline)) __attribute__((visibility("default"))) rtld_db_dlactivity();
190 static pthread_mutex_t g__r_debug_mutex = PTHREAD_MUTEX_INITIALIZER;
191 static r_debug _r_debug = {1, nullptr, reinterpret_cast<uintptr_t>(&rtld_db_dlactivity), r_debug::RT_CONSISTENT, 0};
192 static link_map* r_debug_tail = 0;
194 static void insert_soinfo_into_debug_map(soinfo* info) {
195 // Copy the necessary fields into the debug structure.
196 link_map* map = &(info->link_map_head);
197 map->l_addr = info->load_bias;
198 map->l_name = info->name;
199 map->l_ld = info->dynamic;
201 // Stick the new library at the end of the list.
202 // gdb tends to care more about libc than it does
203 // about leaf libraries, and ordering it this way
204 // reduces the back-and-forth over the wire.
205 if (r_debug_tail) {
206 r_debug_tail->l_next = map;
207 map->l_prev = r_debug_tail;
208 map->l_next = 0;
209 } else {
210 _r_debug.r_map = map;
211 map->l_prev = 0;
212 map->l_next = 0;
213 }
214 r_debug_tail = map;
215 }
217 static void remove_soinfo_from_debug_map(soinfo* info) {
218 link_map* map = &(info->link_map_head);
220 if (r_debug_tail == map) {
221 r_debug_tail = map->l_prev;
222 }
224 if (map->l_prev) {
225 map->l_prev->l_next = map->l_next;
226 }
227 if (map->l_next) {
228 map->l_next->l_prev = map->l_prev;
229 }
230 }
232 static void notify_gdb_of_load(soinfo* info) {
233 if (info->flags & FLAG_EXE) {
234 // GDB already knows about the main executable
235 return;
236 }
238 ScopedPthreadMutexLocker locker(&g__r_debug_mutex);
240 _r_debug.r_state = r_debug::RT_ADD;
241 rtld_db_dlactivity();
243 insert_soinfo_into_debug_map(info);
245 _r_debug.r_state = r_debug::RT_CONSISTENT;
246 rtld_db_dlactivity();
247 }
249 static void notify_gdb_of_unload(soinfo* info) {
250 if (info->flags & FLAG_EXE) {
251 // GDB already knows about the main executable
252 return;
253 }
255 ScopedPthreadMutexLocker locker(&g__r_debug_mutex);
257 _r_debug.r_state = r_debug::RT_DELETE;
258 rtld_db_dlactivity();
260 remove_soinfo_from_debug_map(info);
262 _r_debug.r_state = r_debug::RT_CONSISTENT;
263 rtld_db_dlactivity();
264 }
266 void notify_gdb_of_libraries() {
267 _r_debug.r_state = r_debug::RT_ADD;
268 rtld_db_dlactivity();
269 _r_debug.r_state = r_debug::RT_CONSISTENT;
270 rtld_db_dlactivity();
271 }
273 LinkedListEntry<soinfo>* SoinfoListAllocator::alloc() {
274 return g_soinfo_links_allocator.alloc();
275 }
277 void SoinfoListAllocator::free(LinkedListEntry<soinfo>* entry) {
278 g_soinfo_links_allocator.free(entry);
279 }
281 static void protect_data(int protection) {
282 g_soinfo_allocator.protect_all(protection);
283 g_soinfo_links_allocator.protect_all(protection);
284 }
286 static soinfo* soinfo_alloc(const char* name, struct stat* file_stat, off64_t file_offset, uint32_t rtld_flags) {
287 if (strlen(name) >= SOINFO_NAME_LEN) {
288 DL_ERR("library name \"%s\" too long", name);
289 return nullptr;
290 }
292 soinfo* si = new (g_soinfo_allocator.alloc()) soinfo(name, file_stat, file_offset, rtld_flags);
294 sonext->next = si;
295 sonext = si;
297 TRACE("name %s: allocated soinfo @ %p", name, si);
298 return si;
299 }
301 static void soinfo_free(soinfo* si) {
302 if (si == nullptr) {
303 return;
304 }
306 if (si->base != 0 && si->size != 0) {
307 munmap(reinterpret_cast<void*>(si->base), si->size);
308 }
310 soinfo *prev = nullptr, *trav;
312 TRACE("name %s: freeing soinfo @ %p", si->name, si);
314 for (trav = solist; trav != nullptr; trav = trav->next) {
315 if (trav == si) {
316 break;
317 }
318 prev = trav;
319 }
321 if (trav == nullptr) {
322 // si was not in solist
323 DL_ERR("name \"%s\" is not in solist!", si->name);
324 return;
325 }
327 // clear links to/from si
328 si->remove_all_links();
330 // prev will never be null, because the first entry in solist is
331 // always the static libdl_info.
332 prev->next = si->next;
333 if (si == sonext) {
334 sonext = prev;
335 }
337 g_soinfo_allocator.free(si);
338 }
340 static void parse_path(const char* path, const char* delimiters,
341 const char** array, char* buf, size_t buf_size, size_t max_count) {
342 if (path == nullptr) {
343 return;
344 }
346 size_t len = strlcpy(buf, path, buf_size);
348 size_t i = 0;
349 char* buf_p = buf;
350 while (i < max_count && (array[i] = strsep(&buf_p, delimiters))) {
351 if (*array[i] != '\0') {
352 ++i;
353 }
354 }
356 // Forget the last path if we had to truncate; this occurs if the 2nd to
357 // last char isn't '\0' (i.e. wasn't originally a delimiter).
358 if (i > 0 && len >= buf_size && buf[buf_size - 2] != '\0') {
359 array[i - 1] = nullptr;
360 } else {
361 array[i] = nullptr;
362 }
363 }
365 static void parse_LD_LIBRARY_PATH(const char* path) {
366 parse_path(path, ":", g_ld_library_paths,
367 g_ld_library_paths_buffer, sizeof(g_ld_library_paths_buffer), LDPATH_MAX);
368 }
370 static void parse_LD_PRELOAD(const char* path) {
371 // We have historically supported ':' as well as ' ' in LD_PRELOAD.
372 parse_path(path, " :", g_ld_preload_names,
373 g_ld_preloads_buffer, sizeof(g_ld_preloads_buffer), LDPRELOAD_MAX);
374 }
376 #if defined(__arm__)
378 // For a given PC, find the .so that it belongs to.
379 // Returns the base address of the .ARM.exidx section
380 // for that .so, and the number of 8-byte entries
381 // in that section (via *pcount).
382 //
383 // Intended to be called by libc's __gnu_Unwind_Find_exidx().
384 //
385 // This function is exposed via dlfcn.cpp and libdl.so.
386 _Unwind_Ptr dl_unwind_find_exidx(_Unwind_Ptr pc, int* pcount) {
387 unsigned addr = (unsigned)pc;
389 for (soinfo* si = solist; si != 0; si = si->next) {
390 if ((addr >= si->base) && (addr < (si->base + si->size))) {
391 *pcount = si->ARM_exidx_count;
392 return (_Unwind_Ptr)si->ARM_exidx;
393 }
394 }
395 *pcount = 0;
396 return nullptr;
397 }
399 #endif
401 // Here, we only have to provide a callback to iterate across all the
402 // loaded libraries. gcc_eh does the rest.
403 int dl_iterate_phdr(int (*cb)(dl_phdr_info* info, size_t size, void* data), void* data) {
404 int rv = 0;
405 for (soinfo* si = solist; si != nullptr; si = si->next) {
406 dl_phdr_info dl_info;
407 dl_info.dlpi_addr = si->link_map_head.l_addr;
408 dl_info.dlpi_name = si->link_map_head.l_name;
409 dl_info.dlpi_phdr = si->phdr;
410 dl_info.dlpi_phnum = si->phnum;
411 rv = cb(&dl_info, sizeof(dl_phdr_info), data);
412 if (rv != 0) {
413 break;
414 }
415 }
416 return rv;
417 }
419 ElfW(Sym)* soinfo::find_symbol_by_name(SymbolName& symbol_name) {
420 return is_gnu_hash() ? gnu_lookup(symbol_name) : elf_lookup(symbol_name);
421 }
423 static bool is_symbol_global_and_defined(const soinfo* si, const ElfW(Sym)* s) {
424 if (ELF_ST_BIND(s->st_info) == STB_GLOBAL ||
425 ELF_ST_BIND(s->st_info) == STB_WEAK) {
426 return s->st_shndx != SHN_UNDEF;
427 } else if (ELF_ST_BIND(s->st_info) != STB_LOCAL) {
428 DL_WARN("unexpected ST_BIND value: %d for '%s' in '%s'",
429 ELF_ST_BIND(s->st_info), si->get_string(s->st_name), si->name);
430 }
432 return false;
433 }
435 ElfW(Sym)* soinfo::gnu_lookup(SymbolName& symbol_name) {
436 uint32_t hash = symbol_name.gnu_hash();
437 uint32_t h2 = hash >> gnu_shift2_;
439 uint32_t bloom_mask_bits = sizeof(ElfW(Addr))*8;
440 uint32_t word_num = (hash / bloom_mask_bits) & gnu_maskwords_;
441 ElfW(Addr) bloom_word = gnu_bloom_filter_[word_num];
443 // test against bloom filter
444 if ((1 & (bloom_word >> (hash % bloom_mask_bits)) & (bloom_word >> (h2 % bloom_mask_bits))) == 0) {
445 return nullptr;
446 }
448 // bloom test says "probably yes"...
449 uint32_t n = bucket_[hash % nbucket_];
451 if (n == 0) {
452 return nullptr;
453 }
455 do {
456 ElfW(Sym)* s = symtab_ + n;
457 if (((chain_[n] ^ hash) >> 1) == 0 &&
458 strcmp(get_string(s->st_name), symbol_name.get_name()) == 0 &&
459 is_symbol_global_and_defined(this, s)) {
460 return s;
461 }
462 } while ((chain_[n++] & 1) == 0);
464 return nullptr;
465 }
467 ElfW(Sym)* soinfo::elf_lookup(SymbolName& symbol_name) {
468 uint32_t hash = symbol_name.elf_hash();
470 TRACE_TYPE(LOOKUP, "SEARCH %s in %s@%p h=%x(elf) %zd",
471 symbol_name.get_name(), name, reinterpret_cast<void*>(base), hash, hash % nbucket_);
473 for (uint32_t n = bucket_[hash % nbucket_]; n != 0; n = chain_[n]) {
474 ElfW(Sym)* s = symtab_ + n;
475 if (strcmp(get_string(s->st_name), symbol_name.get_name()) == 0 && is_symbol_global_and_defined(this, s)) {
476 TRACE_TYPE(LOOKUP, "FOUND %s in %s (%p) %zd",
477 symbol_name.get_name(), name, reinterpret_cast<void*>(s->st_value),
478 static_cast<size_t>(s->st_size));
479 return s;
480 }
481 }
483 TRACE_TYPE(LOOKUP, "NOT FOUND %s in %s@%p %x %zd",
484 symbol_name.get_name(), name, reinterpret_cast<void*>(base), hash, hash % nbucket_);
486 return nullptr;
487 }
489 soinfo::soinfo(const char* name, const struct stat* file_stat, off64_t file_offset, int rtld_flags) {
490 memset(this, 0, sizeof(*this));
492 strlcpy(this->name, name, sizeof(this->name));
493 flags = FLAG_NEW_SOINFO;
494 version_ = SOINFO_VERSION;
496 if (file_stat != nullptr) {
497 this->st_dev_ = file_stat->st_dev;
498 this->st_ino_ = file_stat->st_ino;
499 this->file_offset_ = file_offset;
500 }
502 this->rtld_flags_ = rtld_flags;
503 }
506 uint32_t SymbolName::elf_hash() {
507 if (!has_elf_hash_) {
508 const unsigned char* name = reinterpret_cast<const unsigned char*>(name_);
509 uint32_t h = 0, g;
511 while (*name) {
512 h = (h << 4) + *name++;
513 g = h & 0xf0000000;
514 h ^= g;
515 h ^= g >> 24;
516 }
518 elf_hash_ = h;
519 has_elf_hash_ = true;
520 }
522 return elf_hash_;
523 }
525 uint32_t SymbolName::gnu_hash() {
526 if (!has_gnu_hash_) {
527 uint32_t h = 5381;
528 const unsigned char* name = reinterpret_cast<const unsigned char*>(name_);
529 while (*name != 0) {
530 h += (h << 5) + *name++; // h*33 + c = h + h * 32 + c = h + h << 5 + c
531 }
533 gnu_hash_ = h;
534 has_gnu_hash_ = true;
535 }
537 return gnu_hash_;
538 }
540 static ElfW(Sym)* soinfo_do_lookup(soinfo* si_from, const char* name, soinfo** si_found_in,
541 const soinfo::soinfo_list_t& global_group, const soinfo::soinfo_list_t& local_group) {
542 SymbolName symbol_name(name);
543 ElfW(Sym)* s = nullptr;
545 /* "This element's presence in a shared object library alters the dynamic linker's
546 * symbol resolution algorithm for references within the library. Instead of starting
547 * a symbol search with the executable file, the dynamic linker starts from the shared
548 * object itself. If the shared object fails to supply the referenced symbol, the
549 * dynamic linker then searches the executable file and other shared objects as usual."
550 *
551 * http://www.sco.com/developers/gabi/2012-12-31/ch5.dynamic.html
552 *
553 * Note that this is unlikely since static linker avoids generating
554 * relocations for -Bsymbolic linked dynamic executables.
555 */
556 if (si_from->has_DT_SYMBOLIC) {
557 DEBUG("%s: looking up %s in local scope (DT_SYMBOLIC)", si_from->name, name);
558 s = si_from->find_symbol_by_name(symbol_name);
559 if (s != nullptr) {
560 *si_found_in = si_from;
561 }
562 }
564 // 1. Look for it in global_group
565 if (s == nullptr) {
566 global_group.visit([&](soinfo* global_si) {
567 DEBUG("%s: looking up %s in %s (from global group)", si_from->name, name, global_si->name);
568 s = global_si->find_symbol_by_name(symbol_name);
569 if (s != nullptr) {
570 *si_found_in = global_si;
571 return false;
572 }
574 return true;
575 });
576 }
578 // 2. Look for it in the local group
579 if (s == nullptr) {
580 local_group.visit([&](soinfo* local_si) {
581 if (local_si == si_from && si_from->has_DT_SYMBOLIC) {
582 // we already did this - skip
583 return true;
584 }
586 DEBUG("%s: looking up %s in %s (from local group)", si_from->name, name, local_si->name);
587 s = local_si->find_symbol_by_name(symbol_name);
588 if (s != nullptr) {
589 *si_found_in = local_si;
590 return false;
591 }
593 return true;
594 });
595 }
597 if (s != nullptr) {
598 TRACE_TYPE(LOOKUP, "si %s sym %s s->st_value = %p, "
599 "found in %s, base = %p, load bias = %p",
600 si_from->name, name, reinterpret_cast<void*>(s->st_value),
601 (*si_found_in)->name, reinterpret_cast<void*>((*si_found_in)->base),
602 reinterpret_cast<void*>((*si_found_in)->load_bias));
603 }
605 return s;
606 }
608 // Each size has it's own allocator.
609 template<size_t size>
610 class SizeBasedAllocator {
611 public:
612 static void* alloc() {
613 return allocator_.alloc();
614 }
616 static void free(void* ptr) {
617 allocator_.free(ptr);
618 }
620 private:
621 static LinkerBlockAllocator allocator_;
622 };
624 template<size_t size>
625 LinkerBlockAllocator SizeBasedAllocator<size>::allocator_(size);
627 template<typename T>
628 class TypeBasedAllocator {
629 public:
630 static T* alloc() {
631 return reinterpret_cast<T*>(SizeBasedAllocator<sizeof(T)>::alloc());
632 }
634 static void free(T* ptr) {
635 SizeBasedAllocator<sizeof(T)>::free(ptr);
636 }
637 };
639 class LoadTask {
640 public:
641 struct deleter_t {
642 void operator()(LoadTask* t) {
643 TypeBasedAllocator<LoadTask>::free(t);
644 }
645 };
647 typedef UniquePtr<LoadTask, deleter_t> unique_ptr;
649 static deleter_t deleter;
651 static LoadTask* create(const char* name, soinfo* needed_by) {
652 LoadTask* ptr = TypeBasedAllocator<LoadTask>::alloc();
653 return new (ptr) LoadTask(name, needed_by);
654 }
656 const char* get_name() const {
657 return name_;
658 }
660 soinfo* get_needed_by() const {
661 return needed_by_;
662 }
663 private:
664 LoadTask(const char* name, soinfo* needed_by)
665 : name_(name), needed_by_(needed_by) {}
667 const char* name_;
668 soinfo* needed_by_;
670 DISALLOW_IMPLICIT_CONSTRUCTORS(LoadTask);
671 };
673 LoadTask::deleter_t LoadTask::deleter;
675 template <typename T>
676 using linked_list_t = LinkedList<T, TypeBasedAllocator<LinkedListEntry<T>>>;
678 typedef linked_list_t<soinfo> SoinfoLinkedList;
679 typedef linked_list_t<const char> StringLinkedList;
680 typedef linked_list_t<LoadTask> LoadTaskList;
683 // This function walks down the tree of soinfo dependencies
684 // in breadth-first order and
685 // * calls action(soinfo* si) for each node, and
686 // * terminates walk if action returns false.
687 //
688 // walk_dependencies_tree returns false if walk was terminated
689 // by the action and true otherwise.
690 template<typename F>
691 static bool walk_dependencies_tree(soinfo* root_soinfos[], size_t root_soinfos_size, F action) {
692 SoinfoLinkedList visit_list;
693 SoinfoLinkedList visited;
695 for (size_t i = 0; i < root_soinfos_size; ++i) {
696 visit_list.push_back(root_soinfos[i]);
697 }
699 soinfo* si;
700 while ((si = visit_list.pop_front()) != nullptr) {
701 if (visited.contains(si)) {
702 continue;
703 }
705 if (!action(si)) {
706 return false;
707 }
709 visited.push_back(si);
711 si->get_children().for_each([&](soinfo* child) {
712 visit_list.push_back(child);
713 });
714 }
716 return true;
717 }
720 // This is used by dlsym(3). It performs symbol lookup only within the
721 // specified soinfo object and its dependencies in breadth first order.
722 ElfW(Sym)* dlsym_handle_lookup(soinfo* si, soinfo** found, const char* name) {
723 ElfW(Sym)* result = nullptr;
724 SymbolName symbol_name(name);
727 walk_dependencies_tree(&si, 1, [&](soinfo* current_soinfo) {
728 result = current_soinfo->find_symbol_by_name(symbol_name);
729 if (result != nullptr) {
730 *found = current_soinfo;
731 return false;
732 }
734 return true;
735 });
737 return result;
738 }
740 /* This is used by dlsym(3) to performs a global symbol lookup. If the
741 start value is null (for RTLD_DEFAULT), the search starts at the
742 beginning of the global solist. Otherwise the search starts at the
743 specified soinfo (for RTLD_NEXT).
744 */
745 ElfW(Sym)* dlsym_linear_lookup(const char* name, soinfo** found, soinfo* start) {
746 SymbolName symbol_name(name);
748 if (start == nullptr) {
749 start = solist;
750 }
752 ElfW(Sym)* s = nullptr;
753 for (soinfo* si = start; (s == nullptr) && (si != nullptr); si = si->next) {
754 if ((si->get_rtld_flags() & RTLD_GLOBAL) == 0) {
755 continue;
756 }
758 s = si->find_symbol_by_name(symbol_name);
759 if (s != nullptr) {
760 *found = si;
761 break;
762 }
763 }
765 if (s != nullptr) {
766 TRACE_TYPE(LOOKUP, "%s s->st_value = %p, found->base = %p",
767 name, reinterpret_cast<void*>(s->st_value), reinterpret_cast<void*>((*found)->base));
768 }
770 return s;
771 }
773 soinfo* find_containing_library(const void* p) {
774 ElfW(Addr) address = reinterpret_cast<ElfW(Addr)>(p);
775 for (soinfo* si = solist; si != nullptr; si = si->next) {
776 if (address >= si->base && address - si->base < si->size) {
777 return si;
778 }
779 }
780 return nullptr;
781 }
783 ElfW(Sym)* soinfo::find_symbol_by_address(const void* addr) {
784 return is_gnu_hash() ? gnu_addr_lookup(addr) : elf_addr_lookup(addr);
785 }
787 static bool symbol_matches_soaddr(const ElfW(Sym)* sym, ElfW(Addr) soaddr) {
788 return sym->st_shndx != SHN_UNDEF &&
789 soaddr >= sym->st_value &&
790 soaddr < sym->st_value + sym->st_size;
791 }
793 ElfW(Sym)* soinfo::gnu_addr_lookup(const void* addr) {
794 ElfW(Addr) soaddr = reinterpret_cast<ElfW(Addr)>(addr) - base;
796 for (size_t i = 0; i < nbucket_; ++i) {
797 uint32_t n = bucket_[i];
799 if (n == 0) {
800 continue;
801 }
803 do {
804 ElfW(Sym)* sym = symtab_ + n;
805 if (symbol_matches_soaddr(sym, soaddr)) {
806 return sym;
807 }
808 } while ((chain_[n++] & 1) == 0);
809 }
811 return nullptr;
812 }
814 ElfW(Sym)* soinfo::elf_addr_lookup(const void* addr) {
815 ElfW(Addr) soaddr = reinterpret_cast<ElfW(Addr)>(addr) - base;
817 // Search the library's symbol table for any defined symbol which
818 // contains this address.
819 for (size_t i = 0; i < nchain_; ++i) {
820 ElfW(Sym)* sym = symtab_ + i;
821 if (symbol_matches_soaddr(sym, soaddr)) {
822 return sym;
823 }
824 }
826 return nullptr;
827 }
829 static int open_library_on_path(const char* name, const char* const paths[]) {
830 char buf[512];
831 for (size_t i = 0; paths[i] != nullptr; ++i) {
832 int n = __libc_format_buffer(buf, sizeof(buf), "%s/%s", paths[i], name);
833 if (n < 0 || n >= static_cast<int>(sizeof(buf))) {
834 PRINT("Warning: ignoring very long library path: %s/%s", paths[i], name);
835 continue;
836 }
837 int fd = TEMP_FAILURE_RETRY(open(buf, O_RDONLY | O_CLOEXEC));
838 if (fd != -1) {
839 return fd;
840 }
841 }
842 return -1;
843 }
845 static int open_library(const char* name) {
846 TRACE("[ opening %s ]", name);
848 // If the name contains a slash, we should attempt to open it directly and not search the paths.
849 if (strchr(name, '/') != nullptr) {
850 int fd = TEMP_FAILURE_RETRY(open(name, O_RDONLY | O_CLOEXEC));
851 if (fd != -1) {
852 return fd;
853 }
854 // ...but nvidia binary blobs (at least) rely on this behavior, so fall through for now.
855 #if defined(__LP64__)
856 return -1;
857 #endif
858 }
860 // Otherwise we try LD_LIBRARY_PATH first, and fall back to the built-in well known paths.
861 int fd = open_library_on_path(name, g_ld_library_paths);
862 if (fd == -1) {
863 fd = open_library_on_path(name, kDefaultLdPaths);
864 }
865 return fd;
866 }
868 template<typename F>
869 static void for_each_dt_needed(const soinfo* si, F action) {
870 for (ElfW(Dyn)* d = si->dynamic; d->d_tag != DT_NULL; ++d) {
871 if (d->d_tag == DT_NEEDED) {
872 action(si->get_string(d->d_un.d_val));
873 }
874 }
875 }
877 static soinfo* load_library(LoadTaskList& load_tasks, const char* name, int rtld_flags, const android_dlextinfo* extinfo) {
878 int fd = -1;
879 off64_t file_offset = 0;
880 ScopedFd file_guard(-1);
882 if (extinfo != nullptr && (extinfo->flags & ANDROID_DLEXT_USE_LIBRARY_FD) != 0) {
883 fd = extinfo->library_fd;
884 if ((extinfo->flags & ANDROID_DLEXT_USE_LIBRARY_FD_OFFSET) != 0) {
885 file_offset = extinfo->library_fd_offset;
886 }
887 } else {
888 // Open the file.
889 fd = open_library(name);
890 if (fd == -1) {
891 DL_ERR("library \"%s\" not found", name);
892 return nullptr;
893 }
895 file_guard.reset(fd);
896 }
898 if ((file_offset % PAGE_SIZE) != 0) {
899 DL_ERR("file offset for the library \"%s\" is not page-aligned: %" PRId64, name, file_offset);
900 return nullptr;
901 }
902 if (file_offset < 0) {
903 DL_ERR("file offset for the library \"%s\" is negative: %" PRId64, name, file_offset);
904 return nullptr;
905 }
907 struct stat file_stat;
908 if (TEMP_FAILURE_RETRY(fstat(fd, &file_stat)) != 0) {
909 DL_ERR("unable to stat file for the library \"%s\": %s", name, strerror(errno));
910 return nullptr;
911 }
912 if (file_offset >= file_stat.st_size) {
913 DL_ERR("file offset for the library \"%s\" >= file size: %" PRId64 " >= %" PRId64, name, file_offset, file_stat.st_size);
914 return nullptr;
915 }
917 // Check for symlink and other situations where
918 // file can have different names.
919 for (soinfo* si = solist; si != nullptr; si = si->next) {
920 if (si->get_st_dev() != 0 &&
921 si->get_st_ino() != 0 &&
922 si->get_st_dev() == file_stat.st_dev &&
923 si->get_st_ino() == file_stat.st_ino &&
924 si->get_file_offset() == file_offset) {
925 TRACE("library \"%s\" is already loaded under different name/path \"%s\" - will return existing soinfo", name, si->name);
926 return si;
927 }
928 }
930 if ((rtld_flags & RTLD_NOLOAD) != 0) {
931 DL_ERR("library \"%s\" wasn't loaded and RTLD_NOLOAD prevented it", name);
932 return nullptr;
933 }
935 // Read the ELF header and load the segments.
936 ElfReader elf_reader(name, fd, file_offset);
937 if (!elf_reader.Load(extinfo)) {
938 return nullptr;
939 }
941 soinfo* si = soinfo_alloc(SEARCH_NAME(name), &file_stat, file_offset, rtld_flags);
942 if (si == nullptr) {
943 return nullptr;
944 }
945 si->base = elf_reader.load_start();
946 si->size = elf_reader.load_size();
947 si->load_bias = elf_reader.load_bias();
948 si->phnum = elf_reader.phdr_count();
949 si->phdr = elf_reader.loaded_phdr();
951 if (!si->prelink_image()) {
952 soinfo_free(si);
953 return nullptr;
954 }
956 for_each_dt_needed(si, [&] (const char* name) {
957 load_tasks.push_back(LoadTask::create(name, si));
958 });
960 return si;
961 }
963 static soinfo *find_loaded_library_by_name(const char* name) {
964 const char* search_name = SEARCH_NAME(name);
965 for (soinfo* si = solist; si != nullptr; si = si->next) {
966 if (!strcmp(search_name, si->name)) {
967 return si;
968 }
969 }
970 return nullptr;
971 }
973 static soinfo* find_library_internal(LoadTaskList& load_tasks, const char* name, int rtld_flags, const android_dlextinfo* extinfo) {
975 soinfo* si = find_loaded_library_by_name(name);
977 // Library might still be loaded, the accurate detection
978 // of this fact is done by load_library.
979 if (si == nullptr) {
980 TRACE("[ '%s' has not been found by name. Trying harder...]", name);
981 si = load_library(load_tasks, name, rtld_flags, extinfo);
982 }
984 return si;
985 }
987 static void soinfo_unload(soinfo* si);
989 static bool is_recursive(soinfo* si, soinfo* parent) {
990 if (parent == nullptr) {
991 return false;
992 }
994 if (si == parent) {
995 DL_ERR("recursive link to \"%s\"", si->name);
996 return true;
997 }
999 return !parent->get_parents().visit([&](soinfo* grandparent) {
1000 return !is_recursive(si, grandparent);
1001 });
1002 }
1004 // TODO: this is slightly unusual way to construct
1005 // the global group for relocation. Not every RTLD_GLOBAL
1006 // library is included in this group for backwards-compatibility
1007 // reasons.
1008 //
1009 // This group consists of the main executable, LD_PRELOADs
1010 // and libraries with the DF_1_GLOBAL flag set.
1011 static soinfo::soinfo_list_t make_global_group() {
1012 soinfo::soinfo_list_t global_group;
1013 for (soinfo* si = somain; si != nullptr; si = si->next) {
1014 if ((si->get_dt_flags_1() & DF_1_GLOBAL) != 0) {
1015 global_group.push_back(si);
1016 }
1017 }
1019 return global_group;
1020 }
1022 static bool find_libraries(soinfo* start_with, const char* const library_names[], size_t library_names_count, soinfo* soinfos[],
1023 soinfo* ld_preloads[], size_t ld_preloads_count, int rtld_flags, const android_dlextinfo* extinfo) {
1024 // Step 0: prepare.
1025 LoadTaskList load_tasks;
1026 for (size_t i = 0; i < library_names_count; ++i) {
1027 const char* name = library_names[i];
1028 load_tasks.push_back(LoadTask::create(name, start_with));
1029 }
1031 // Construct global_group.
1032 soinfo::soinfo_list_t global_group = make_global_group();
1034 // If soinfos array is null allocate one on stack.
1035 // The array is needed in case of failure; for example
1036 // when library_names[] = {libone.so, libtwo.so} and libone.so
1037 // is loaded correctly but libtwo.so failed for some reason.
1038 // In this case libone.so should be unloaded on return.
1039 // See also implementation of failure_guard below.
1041 if (soinfos == nullptr) {
1042 size_t soinfos_size = sizeof(soinfo*)*library_names_count;
1043 soinfos = reinterpret_cast<soinfo**>(alloca(soinfos_size));
1044 memset(soinfos, 0, soinfos_size);
1045 }
1047 // list of libraries to link - see step 2.
1048 size_t soinfos_count = 0;
1050 auto failure_guard = make_scope_guard([&]() {
1051 // Housekeeping
1052 load_tasks.for_each([] (LoadTask* t) {
1053 LoadTask::deleter(t);
1054 });
1056 for (size_t i = 0; i<soinfos_count; ++i) {
1057 soinfo_unload(soinfos[i]);
1058 }
1059 });
1061 // Step 1: load and pre-link all DT_NEEDED libraries in breadth first order.
1062 for (LoadTask::unique_ptr task(load_tasks.pop_front()); task.get() != nullptr; task.reset(load_tasks.pop_front())) {
1063 soinfo* si = find_library_internal(load_tasks, task->get_name(), rtld_flags, extinfo);
1064 if (si == nullptr) {
1065 return false;
1066 }
1068 soinfo* needed_by = task->get_needed_by();
1070 if (is_recursive(si, needed_by)) {
1071 return false;
1072 }
1074 si->ref_count++;
1075 if (needed_by != nullptr) {
1076 needed_by->add_child(si);
1077 }
1079 // When ld_preloads is not null, the first
1080 // ld_preloads_count libs are in fact ld_preloads.
1081 if (ld_preloads != nullptr && soinfos_count < ld_preloads_count) {
1082 // Add LD_PRELOADed libraries to the global group for future runs.
1083 // There is no need to explicitly add them to the global group
1084 // for this run because they are going to appear in the local
1085 // group in the correct order.
1086 si->set_dt_flags_1(si->get_dt_flags_1() | DF_1_GLOBAL);
1087 ld_preloads[soinfos_count] = si;
1088 }
1090 if (soinfos_count < library_names_count) {
1091 soinfos[soinfos_count++] = si;
1092 }
1093 }
1095 // Step 2: link libraries.
1096 soinfo::soinfo_list_t local_group;
1097 walk_dependencies_tree(
1098 start_with == nullptr ? soinfos : &start_with,
1099 start_with == nullptr ? soinfos_count : 1,
1100 [&] (soinfo* si) {
1101 local_group.push_back(si);
1102 return true;
1103 });
1105 bool linked = local_group.visit([&](soinfo* si) {
1106 if ((si->flags & FLAG_LINKED) == 0) {
1107 if (!si->link_image(global_group, local_group, extinfo)) {
1108 return false;
1109 }
1110 si->flags |= FLAG_LINKED;
1111 }
1113 return true;
1114 });
1116 if (linked) {
1117 failure_guard.disable();
1118 }
1120 return linked;
1121 }
1123 static soinfo* find_library(const char* name, int rtld_flags, const android_dlextinfo* extinfo) {
1124 if (name == nullptr) {
1125 somain->ref_count++;
1126 return somain;
1127 }
1129 soinfo* si;
1131 if (!find_libraries(nullptr, &name, 1, &si, nullptr, 0, rtld_flags, extinfo)) {
1132 return nullptr;
1133 }
1135 return si;
1136 }
1138 static void soinfo_unload_schedule(soinfo::soinfo_list_t& unload_list, soinfo* si) {
1139 if (!si->can_unload()) {
1140 TRACE("not unloading '%s' - the binary is flagged with NODELETE", si->name);
1141 return;
1142 }
1144 if (si->ref_count == 1) {
1145 unload_list.push_back(si);
1147 if (si->has_min_version(0)) {
1148 soinfo* child = nullptr;
1149 while ((child = si->get_children().pop_front()) != nullptr) {
1150 TRACE("%s needs to unload %s", si->name, child->name);
1151 soinfo_unload_schedule(unload_list, child);
1152 }
1153 } else {
1154 for_each_dt_needed(si, [&] (const char* library_name) {
1155 TRACE("deprecated (old format of soinfo): %s needs to unload %s", si->name, library_name);
1156 soinfo* needed = find_library(library_name, RTLD_NOLOAD, nullptr);
1157 if (needed != nullptr) {
1158 soinfo_unload_schedule(unload_list, needed);
1159 } else {
1160 // Not found: for example if symlink was deleted between dlopen and dlclose
1161 // Since we cannot really handle errors at this point - print and continue.
1162 PRINT("warning: couldn't find %s needed by %s on unload.", library_name, si->name);
1163 }
1164 });
1165 }
1167 si->ref_count = 0;
1168 } else {
1169 si->ref_count--;
1170 TRACE("not unloading '%s', decrementing ref_count to %zd", si->name, si->ref_count);
1171 }
1172 }
1174 static void soinfo_unload(soinfo* root) {
1175 soinfo::soinfo_list_t unload_list;
1176 soinfo_unload_schedule(unload_list, root);
1177 unload_list.for_each([](soinfo* si) {
1178 si->call_destructors();
1179 });
1181 soinfo* si = nullptr;
1182 while ((si = unload_list.pop_front()) != nullptr) {
1183 TRACE("unloading '%s'", si->name);
1184 notify_gdb_of_unload(si);
1185 soinfo_free(si);
1186 }
1187 }
1189 void do_android_get_LD_LIBRARY_PATH(char* buffer, size_t buffer_size) {
1190 // Use basic string manipulation calls to avoid snprintf.
1191 // snprintf indirectly calls pthread_getspecific to get the size of a buffer.
1192 // When debug malloc is enabled, this call returns 0. This in turn causes
1193 // snprintf to do nothing, which causes libraries to fail to load.
1194 // See b/17302493 for further details.
1195 // Once the above bug is fixed, this code can be modified to use
1196 // snprintf again.
1197 size_t required_len = strlen(kDefaultLdPaths[0]) + strlen(kDefaultLdPaths[1]) + 2;
1198 if (buffer_size < required_len) {
1199 __libc_fatal("android_get_LD_LIBRARY_PATH failed, buffer too small: buffer len %zu, required len %zu",
1200 buffer_size, required_len);
1201 }
1202 char* end = stpcpy(buffer, kDefaultLdPaths[0]);
1203 *end = ':';
1204 strcpy(end + 1, kDefaultLdPaths[1]);
1205 }
1207 void do_android_update_LD_LIBRARY_PATH(const char* ld_library_path) {
1208 if (!get_AT_SECURE()) {
1209 parse_LD_LIBRARY_PATH(ld_library_path);
1210 }
1211 }
1213 soinfo* do_dlopen(const char* name, int flags, const android_dlextinfo* extinfo) {
1214 if ((flags & ~(RTLD_NOW|RTLD_LAZY|RTLD_LOCAL|RTLD_GLOBAL|RTLD_NODELETE|RTLD_NOLOAD)) != 0) {
1215 DL_ERR("invalid flags to dlopen: %x", flags);
1216 return nullptr;
1217 }
1218 if (extinfo != nullptr) {
1219 if ((extinfo->flags & ~(ANDROID_DLEXT_VALID_FLAG_BITS)) != 0) {
1220 DL_ERR("invalid extended flags to android_dlopen_ext: 0x%" PRIx64, extinfo->flags);
1221 return nullptr;
1222 }
1223 if ((extinfo->flags & ANDROID_DLEXT_USE_LIBRARY_FD) == 0 &&
1224 (extinfo->flags & ANDROID_DLEXT_USE_LIBRARY_FD_OFFSET) != 0) {
1225 DL_ERR("invalid extended flag combination (ANDROID_DLEXT_USE_LIBRARY_FD_OFFSET without ANDROID_DLEXT_USE_LIBRARY_FD): 0x%" PRIx64, extinfo->flags);
1226 return nullptr;
1227 }
1228 }
1229 protect_data(PROT_READ | PROT_WRITE);
1230 soinfo* si = find_library(name, flags, extinfo);
1231 if (si != nullptr) {
1232 si->call_constructors();
1233 }
1234 protect_data(PROT_READ);
1235 return si;
1236 }
1238 void do_dlclose(soinfo* si) {
1239 protect_data(PROT_READ | PROT_WRITE);
1240 soinfo_unload(si);
1241 protect_data(PROT_READ);
1242 }
1244 static ElfW(Addr) call_ifunc_resolver(ElfW(Addr) resolver_addr) {
1245 typedef ElfW(Addr) (*ifunc_resolver_t)(void);
1246 ifunc_resolver_t ifunc_resolver = reinterpret_cast<ifunc_resolver_t>(resolver_addr);
1247 ElfW(Addr) ifunc_addr = ifunc_resolver();
1248 TRACE_TYPE(RELO, "Called ifunc_resolver@%p. The result is %p", ifunc_resolver, reinterpret_cast<void*>(ifunc_addr));
1250 return ifunc_addr;
1251 }
1253 #if defined(USE_RELA)
1254 int soinfo::relocate(ElfW(Rela)* rela, unsigned count, const soinfo_list_t& global_group, const soinfo_list_t& local_group) {
1255 for (size_t idx = 0; idx < count; ++idx, ++rela) {
1256 unsigned type = ELFW(R_TYPE)(rela->r_info);
1257 unsigned sym = ELFW(R_SYM)(rela->r_info);
1258 ElfW(Addr) reloc = static_cast<ElfW(Addr)>(rela->r_offset + load_bias);
1259 ElfW(Addr) sym_addr = 0;
1260 const char* sym_name = nullptr;
1262 DEBUG("Processing '%s' relocation at index %zd", name, idx);
1263 if (type == 0) { // R_*_NONE
1264 continue;
1265 }
1267 ElfW(Sym)* s = nullptr;
1268 soinfo* lsi = nullptr;
1270 if (sym != 0) {
1271 sym_name = get_string(symtab_[sym].st_name);
1272 s = soinfo_do_lookup(this, sym_name, &lsi, global_group,local_group);
1273 if (s == nullptr) {
1274 // We only allow an undefined symbol if this is a weak reference...
1275 s = &symtab_[sym];
1276 if (ELF_ST_BIND(s->st_info) != STB_WEAK) {
1277 DL_ERR("cannot locate symbol \"%s\" referenced by \"%s\"...", sym_name, name);
1278 return -1;
1279 }
1281 /* IHI0044C AAELF 4.5.1.1:
1283 Libraries are not searched to resolve weak references.
1284 It is not an error for a weak reference to remain unsatisfied.
1286 During linking, the value of an undefined weak reference is:
1287 - Zero if the relocation type is absolute
1288 - The address of the place if the relocation is pc-relative
1289 - The address of nominal base address if the relocation
1290 type is base-relative.
1291 */
1293 switch (type) {
1294 #if defined(__aarch64__)
1295 case R_AARCH64_JUMP_SLOT:
1296 case R_AARCH64_GLOB_DAT:
1297 case R_AARCH64_ABS64:
1298 case R_AARCH64_ABS32:
1299 case R_AARCH64_ABS16:
1300 case R_AARCH64_RELATIVE:
1301 case R_AARCH64_IRELATIVE:
1302 /*
1303 * The sym_addr was initialized to be zero above, or the relocation
1304 * code below does not care about value of sym_addr.
1305 * No need to do anything.
1306 */
1307 break;
1308 #elif defined(__x86_64__)
1309 case R_X86_64_JUMP_SLOT:
1310 case R_X86_64_GLOB_DAT:
1311 case R_X86_64_32:
1312 case R_X86_64_64:
1313 case R_X86_64_RELATIVE:
1314 case R_X86_64_IRELATIVE:
1315 // No need to do anything.
1316 break;
1317 case R_X86_64_PC32:
1318 sym_addr = reloc;
1319 break;
1320 #endif
1321 default:
1322 DL_ERR("unknown weak reloc type %d @ %p (%zu)", type, rela, idx);
1323 return -1;
1324 }
1325 } else {
1326 // We got a definition.
1327 sym_addr = lsi->resolve_symbol_address(s);
1328 }
1329 count_relocation(kRelocSymbol);
1330 }
1332 switch (type) {
1333 #if defined(__aarch64__)
1334 case R_AARCH64_JUMP_SLOT:
1335 count_relocation(kRelocAbsolute);
1336 MARK(rela->r_offset);
1337 TRACE_TYPE(RELO, "RELO JMP_SLOT %16llx <- %16llx %s\n",
1338 reloc, (sym_addr + rela->r_addend), sym_name);
1339 *reinterpret_cast<ElfW(Addr)*>(reloc) = (sym_addr + rela->r_addend);
1340 break;
1341 case R_AARCH64_GLOB_DAT:
1342 count_relocation(kRelocAbsolute);
1343 MARK(rela->r_offset);
1344 TRACE_TYPE(RELO, "RELO GLOB_DAT %16llx <- %16llx %s\n",
1345 reloc, (sym_addr + rela->r_addend), sym_name);
1346 *reinterpret_cast<ElfW(Addr)*>(reloc) = (sym_addr + rela->r_addend);
1347 break;
1348 case R_AARCH64_ABS64:
1349 count_relocation(kRelocAbsolute);
1350 MARK(rela->r_offset);
1351 TRACE_TYPE(RELO, "RELO ABS64 %16llx <- %16llx %s\n",
1352 reloc, (sym_addr + rela->r_addend), sym_name);
1353 *reinterpret_cast<ElfW(Addr)*>(reloc) += (sym_addr + rela->r_addend);
1354 break;
1355 case R_AARCH64_ABS32:
1356 count_relocation(kRelocAbsolute);
1357 MARK(rela->r_offset);
1358 TRACE_TYPE(RELO, "RELO ABS32 %16llx <- %16llx %s\n",
1359 reloc, (sym_addr + rela->r_addend), sym_name);
1360 if ((static_cast<ElfW(Addr)>(INT32_MIN) <= (*reinterpret_cast<ElfW(Addr)*>(reloc) + (sym_addr + rela->r_addend))) &&
1361 ((*reinterpret_cast<ElfW(Addr)*>(reloc) + (sym_addr + rela->r_addend)) <= static_cast<ElfW(Addr)>(UINT32_MAX))) {
1362 *reinterpret_cast<ElfW(Addr)*>(reloc) += (sym_addr + rela->r_addend);
1363 } else {
1364 DL_ERR("0x%016llx out of range 0x%016llx to 0x%016llx",
1365 (*reinterpret_cast<ElfW(Addr)*>(reloc) + (sym_addr + rela->r_addend)),
1366 static_cast<ElfW(Addr)>(INT32_MIN),
1367 static_cast<ElfW(Addr)>(UINT32_MAX));
1368 return -1;
1369 }
1370 break;
1371 case R_AARCH64_ABS16:
1372 count_relocation(kRelocAbsolute);
1373 MARK(rela->r_offset);
1374 TRACE_TYPE(RELO, "RELO ABS16 %16llx <- %16llx %s\n",
1375 reloc, (sym_addr + rela->r_addend), sym_name);
1376 if ((static_cast<ElfW(Addr)>(INT16_MIN) <= (*reinterpret_cast<ElfW(Addr)*>(reloc) + (sym_addr + rela->r_addend))) &&
1377 ((*reinterpret_cast<ElfW(Addr)*>(reloc) + (sym_addr + rela->r_addend)) <= static_cast<ElfW(Addr)>(UINT16_MAX))) {
1378 *reinterpret_cast<ElfW(Addr)*>(reloc) += (sym_addr + rela->r_addend);
1379 } else {
1380 DL_ERR("0x%016llx out of range 0x%016llx to 0x%016llx",
1381 (*reinterpret_cast<ElfW(Addr)*>(reloc) + (sym_addr + rela->r_addend)),
1382 static_cast<ElfW(Addr)>(INT16_MIN),
1383 static_cast<ElfW(Addr)>(UINT16_MAX));
1384 return -1;
1385 }
1386 break;
1387 case R_AARCH64_PREL64:
1388 count_relocation(kRelocRelative);
1389 MARK(rela->r_offset);
1390 TRACE_TYPE(RELO, "RELO REL64 %16llx <- %16llx - %16llx %s\n",
1391 reloc, (sym_addr + rela->r_addend), rela->r_offset, sym_name);
1392 *reinterpret_cast<ElfW(Addr)*>(reloc) += (sym_addr + rela->r_addend) - rela->r_offset;
1393 break;
1394 case R_AARCH64_PREL32:
1395 count_relocation(kRelocRelative);
1396 MARK(rela->r_offset);
1397 TRACE_TYPE(RELO, "RELO REL32 %16llx <- %16llx - %16llx %s\n",
1398 reloc, (sym_addr + rela->r_addend), rela->r_offset, sym_name);
1399 if ((static_cast<ElfW(Addr)>(INT32_MIN) <= (*reinterpret_cast<ElfW(Addr)*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset))) &&
1400 ((*reinterpret_cast<ElfW(Addr)*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset)) <= static_cast<ElfW(Addr)>(UINT32_MAX))) {
1401 *reinterpret_cast<ElfW(Addr)*>(reloc) += ((sym_addr + rela->r_addend) - rela->r_offset);
1402 } else {
1403 DL_ERR("0x%016llx out of range 0x%016llx to 0x%016llx",
1404 (*reinterpret_cast<ElfW(Addr)*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset)),
1405 static_cast<ElfW(Addr)>(INT32_MIN),
1406 static_cast<ElfW(Addr)>(UINT32_MAX));
1407 return -1;
1408 }
1409 break;
1410 case R_AARCH64_PREL16:
1411 count_relocation(kRelocRelative);
1412 MARK(rela->r_offset);
1413 TRACE_TYPE(RELO, "RELO REL16 %16llx <- %16llx - %16llx %s\n",
1414 reloc, (sym_addr + rela->r_addend), rela->r_offset, sym_name);
1415 if ((static_cast<ElfW(Addr)>(INT16_MIN) <= (*reinterpret_cast<ElfW(Addr)*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset))) &&
1416 ((*reinterpret_cast<ElfW(Addr)*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset)) <= static_cast<ElfW(Addr)>(UINT16_MAX))) {
1417 *reinterpret_cast<ElfW(Addr)*>(reloc) += ((sym_addr + rela->r_addend) - rela->r_offset);
1418 } else {
1419 DL_ERR("0x%016llx out of range 0x%016llx to 0x%016llx",
1420 (*reinterpret_cast<ElfW(Addr)*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset)),
1421 static_cast<ElfW(Addr)>(INT16_MIN),
1422 static_cast<ElfW(Addr)>(UINT16_MAX));
1423 return -1;
1424 }
1425 break;
1427 case R_AARCH64_RELATIVE:
1428 count_relocation(kRelocRelative);
1429 MARK(rela->r_offset);
1430 if (sym) {
1431 DL_ERR("odd RELATIVE form...");
1432 return -1;
1433 }
1434 TRACE_TYPE(RELO, "RELO RELATIVE %16llx <- %16llx\n",
1435 reloc, (base + rela->r_addend));
1436 *reinterpret_cast<ElfW(Addr)*>(reloc) = (base + rela->r_addend);
1437 break;
1439 case R_AARCH64_IRELATIVE:
1440 count_relocation(kRelocRelative);
1441 MARK(rela->r_offset);
1442 TRACE_TYPE(RELO, "RELO IRELATIVE %16llx <- %16llx\n", reloc, (base + rela->r_addend));
1443 *reinterpret_cast<ElfW(Addr)*>(reloc) = call_ifunc_resolver(base + rela->r_addend);
1444 break;
1446 case R_AARCH64_COPY:
1447 /*
1448 * ET_EXEC is not supported so this should not happen.
1449 *
1450 * http://infocenter.arm.com/help/topic/com.arm.doc.ihi0044d/IHI0044D_aaelf.pdf
1451 *
1452 * Section 4.7.1.10 "Dynamic relocations"
1453 * R_AARCH64_COPY may only appear in executable objects where e_type is
1454 * set to ET_EXEC.
1455 */
1456 DL_ERR("%s R_AARCH64_COPY relocations are not supported", name);
1457 return -1;
1458 case R_AARCH64_TLS_TPREL64:
1459 TRACE_TYPE(RELO, "RELO TLS_TPREL64 *** %16llx <- %16llx - %16llx\n",
1460 reloc, (sym_addr + rela->r_addend), rela->r_offset);
1461 break;
1462 case R_AARCH64_TLS_DTPREL32:
1463 TRACE_TYPE(RELO, "RELO TLS_DTPREL32 *** %16llx <- %16llx - %16llx\n",
1464 reloc, (sym_addr + rela->r_addend), rela->r_offset);
1465 break;
1466 #elif defined(__x86_64__)
1467 case R_X86_64_JUMP_SLOT:
1468 count_relocation(kRelocAbsolute);
1469 MARK(rela->r_offset);
1470 TRACE_TYPE(RELO, "RELO JMP_SLOT %08zx <- %08zx %s", static_cast<size_t>(reloc),
1471 static_cast<size_t>(sym_addr + rela->r_addend), sym_name);
1472 *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr + rela->r_addend;
1473 break;
1474 case R_X86_64_GLOB_DAT:
1475 count_relocation(kRelocAbsolute);
1476 MARK(rela->r_offset);
1477 TRACE_TYPE(RELO, "RELO GLOB_DAT %08zx <- %08zx %s", static_cast<size_t>(reloc),
1478 static_cast<size_t>(sym_addr + rela->r_addend), sym_name);
1479 *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr + rela->r_addend;
1480 break;
1481 case R_X86_64_RELATIVE:
1482 count_relocation(kRelocRelative);
1483 MARK(rela->r_offset);
1484 if (sym) {
1485 DL_ERR("odd RELATIVE form...");
1486 return -1;
1487 }
1488 TRACE_TYPE(RELO, "RELO RELATIVE %08zx <- +%08zx", static_cast<size_t>(reloc),
1489 static_cast<size_t>(base));
1490 *reinterpret_cast<ElfW(Addr)*>(reloc) = base + rela->r_addend;
1491 break;
1492 case R_X86_64_IRELATIVE:
1493 count_relocation(kRelocRelative);
1494 MARK(rela->r_offset);
1495 TRACE_TYPE(RELO, "RELO IRELATIVE %16llx <- %16llx\n", reloc, (base + rela->r_addend));
1496 *reinterpret_cast<ElfW(Addr)*>(reloc) = call_ifunc_resolver(base + rela->r_addend);
1497 break;
1498 case R_X86_64_32:
1499 count_relocation(kRelocRelative);
1500 MARK(rela->r_offset);
1501 TRACE_TYPE(RELO, "RELO R_X86_64_32 %08zx <- +%08zx %s", static_cast<size_t>(reloc),
1502 static_cast<size_t>(sym_addr), sym_name);
1503 *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr + rela->r_addend;
1504 break;
1505 case R_X86_64_64:
1506 count_relocation(kRelocRelative);
1507 MARK(rela->r_offset);
1508 TRACE_TYPE(RELO, "RELO R_X86_64_64 %08zx <- +%08zx %s", static_cast<size_t>(reloc),
1509 static_cast<size_t>(sym_addr), sym_name);
1510 *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr + rela->r_addend;
1511 break;
1512 case R_X86_64_PC32:
1513 count_relocation(kRelocRelative);
1514 MARK(rela->r_offset);
1515 TRACE_TYPE(RELO, "RELO R_X86_64_PC32 %08zx <- +%08zx (%08zx - %08zx) %s",
1516 static_cast<size_t>(reloc), static_cast<size_t>(sym_addr - reloc),
1517 static_cast<size_t>(sym_addr), static_cast<size_t>(reloc), sym_name);
1518 *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr + rela->r_addend - reloc;
1519 break;
1520 #endif
1522 default:
1523 DL_ERR("unknown reloc type %d @ %p (%zu)", type, rela, idx);
1524 return -1;
1525 }
1526 }
1527 return 0;
1528 }
1530 #else // REL, not RELA.
1531 int soinfo::relocate(ElfW(Rel)* rel, unsigned count, const soinfo_list_t& global_group, const soinfo_list_t& local_group) {
1532 for (size_t idx = 0; idx < count; ++idx, ++rel) {
1533 unsigned type = ELFW(R_TYPE)(rel->r_info);
1534 // TODO: don't use unsigned for 'sym'. Use uint32_t or ElfW(Addr) instead.
1535 unsigned sym = ELFW(R_SYM)(rel->r_info);
1536 ElfW(Addr) reloc = static_cast<ElfW(Addr)>(rel->r_offset + load_bias);
1537 ElfW(Addr) sym_addr = 0;
1538 const char* sym_name = nullptr;
1540 DEBUG("Processing '%s' relocation at index %zd", name, idx);
1541 if (type == 0) { // R_*_NONE
1542 continue;
1543 }
1545 ElfW(Sym)* s = nullptr;
1546 soinfo* lsi = nullptr;
1548 if (sym != 0) {
1549 sym_name = get_string(symtab_[sym].st_name);
1550 s = soinfo_do_lookup(this, sym_name, &lsi, global_group, local_group);
1551 if (s == nullptr) {
1552 // We only allow an undefined symbol if this is a weak reference...
1553 s = &symtab_[sym];
1554 if (ELF_ST_BIND(s->st_info) != STB_WEAK) {
1555 DL_ERR("cannot locate symbol \"%s\" referenced by \"%s\"...", sym_name, name);
1556 return -1;
1557 }
1559 /* IHI0044C AAELF 4.5.1.1:
1561 Libraries are not searched to resolve weak references.
1562 It is not an error for a weak reference to remain
1563 unsatisfied.
1565 During linking, the value of an undefined weak reference is:
1566 - Zero if the relocation type is absolute
1567 - The address of the place if the relocation is pc-relative
1568 - The address of nominal base address if the relocation
1569 type is base-relative.
1570 */
1572 switch (type) {
1573 #if defined(__arm__)
1574 case R_ARM_JUMP_SLOT:
1575 case R_ARM_GLOB_DAT:
1576 case R_ARM_ABS32:
1577 case R_ARM_RELATIVE: /* Don't care. */
1578 // sym_addr was initialized to be zero above or relocation
1579 // code below does not care about value of sym_addr.
1580 // No need to do anything.
1581 break;
1582 #elif defined(__i386__)
1583 case R_386_JMP_SLOT:
1584 case R_386_GLOB_DAT:
1585 case R_386_32:
1586 case R_386_RELATIVE: /* Don't care. */
1587 case R_386_IRELATIVE:
1588 // sym_addr was initialized to be zero above or relocation
1589 // code below does not care about value of sym_addr.
1590 // No need to do anything.
1591 break;
1592 case R_386_PC32:
1593 sym_addr = reloc;
1594 break;
1595 #endif
1597 #if defined(__arm__)
1598 case R_ARM_COPY:
1599 // Fall through. Can't really copy if weak symbol is not found at run-time.
1600 #endif
1601 default:
1602 DL_ERR("unknown weak reloc type %d @ %p (%zu)", type, rel, idx);
1603 return -1;
1604 }
1605 } else {
1606 // We got a definition.
1607 sym_addr = lsi->resolve_symbol_address(s);
1608 }
1609 count_relocation(kRelocSymbol);
1610 }
1612 switch (type) {
1613 #if defined(__arm__)
1614 case R_ARM_JUMP_SLOT:
1615 count_relocation(kRelocAbsolute);
1616 MARK(rel->r_offset);
1617 TRACE_TYPE(RELO, "RELO JMP_SLOT %08x <- %08x %s", reloc, sym_addr, sym_name);
1618 *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr;
1619 break;
1620 case R_ARM_GLOB_DAT:
1621 count_relocation(kRelocAbsolute);
1622 MARK(rel->r_offset);
1623 TRACE_TYPE(RELO, "RELO GLOB_DAT %08x <- %08x %s", reloc, sym_addr, sym_name);
1624 *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr;
1625 break;
1626 case R_ARM_ABS32:
1627 count_relocation(kRelocAbsolute);
1628 MARK(rel->r_offset);
1629 TRACE_TYPE(RELO, "RELO ABS %08x <- %08x %s", reloc, sym_addr, sym_name);
1630 *reinterpret_cast<ElfW(Addr)*>(reloc) += sym_addr;
1631 break;
1632 case R_ARM_REL32:
1633 count_relocation(kRelocRelative);
1634 MARK(rel->r_offset);
1635 TRACE_TYPE(RELO, "RELO REL32 %08x <- %08x - %08x %s",
1636 reloc, sym_addr, rel->r_offset, sym_name);
1637 *reinterpret_cast<ElfW(Addr)*>(reloc) += sym_addr - rel->r_offset;
1638 break;
1639 case R_ARM_COPY:
1640 /*
1641 * ET_EXEC is not supported so this should not happen.
1642 *
1643 * http://infocenter.arm.com/help/topic/com.arm.doc.ihi0044d/IHI0044D_aaelf.pdf
1644 *
1645 * Section 4.7.1.10 "Dynamic relocations"
1646 * R_ARM_COPY may only appear in executable objects where e_type is
1647 * set to ET_EXEC.
1648 */
1649 DL_ERR("%s R_ARM_COPY relocations are not supported", name);
1650 return -1;
1651 #elif defined(__i386__)
1652 case R_386_JMP_SLOT:
1653 count_relocation(kRelocAbsolute);
1654 MARK(rel->r_offset);
1655 TRACE_TYPE(RELO, "RELO JMP_SLOT %08x <- %08x %s", reloc, sym_addr, sym_name);
1656 *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr;
1657 break;
1658 case R_386_GLOB_DAT:
1659 count_relocation(kRelocAbsolute);
1660 MARK(rel->r_offset);
1661 TRACE_TYPE(RELO, "RELO GLOB_DAT %08x <- %08x %s", reloc, sym_addr, sym_name);
1662 *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr;
1663 break;
1664 case R_386_32:
1665 count_relocation(kRelocRelative);
1666 MARK(rel->r_offset);
1667 TRACE_TYPE(RELO, "RELO R_386_32 %08x <- +%08x %s", reloc, sym_addr, sym_name);
1668 *reinterpret_cast<ElfW(Addr)*>(reloc) += sym_addr;
1669 break;
1670 case R_386_PC32:
1671 count_relocation(kRelocRelative);
1672 MARK(rel->r_offset);
1673 TRACE_TYPE(RELO, "RELO R_386_PC32 %08x <- +%08x (%08x - %08x) %s",
1674 reloc, (sym_addr - reloc), sym_addr, reloc, sym_name);
1675 *reinterpret_cast<ElfW(Addr)*>(reloc) += (sym_addr - reloc);
1676 break;
1677 #elif defined(__mips__)
1678 case R_MIPS_REL32:
1679 #if defined(__LP64__)
1680 // MIPS Elf64_Rel entries contain compound relocations
1681 // We only handle the R_MIPS_NONE|R_MIPS_64|R_MIPS_REL32 case
1682 if (ELF64_R_TYPE2(rel->r_info) != R_MIPS_64 ||
1683 ELF64_R_TYPE3(rel->r_info) != R_MIPS_NONE) {
1684 DL_ERR("Unexpected compound relocation type:%d type2:%d type3:%d @ %p (%zu)",
1685 type, (unsigned)ELF64_R_TYPE2(rel->r_info),
1686 (unsigned)ELF64_R_TYPE3(rel->r_info), rel, idx);
1687 return -1;
1688 }
1689 #endif
1690 count_relocation(kRelocAbsolute);
1691 MARK(rel->r_offset);
1692 TRACE_TYPE(RELO, "RELO REL32 %08zx <- %08zx %s", static_cast<size_t>(reloc),
1693 static_cast<size_t>(sym_addr), sym_name ? sym_name : "*SECTIONHDR*");
1694 if (s) {
1695 *reinterpret_cast<ElfW(Addr)*>(reloc) += sym_addr;
1696 } else {
1697 *reinterpret_cast<ElfW(Addr)*>(reloc) += base;
1698 }
1699 break;
1700 #endif
1702 #if defined(__arm__)
1703 case R_ARM_RELATIVE:
1704 #elif defined(__i386__)
1705 case R_386_RELATIVE:
1706 #endif
1707 count_relocation(kRelocRelative);
1708 MARK(rel->r_offset);
1709 if (sym) {
1710 DL_ERR("odd RELATIVE form...");
1711 return -1;
1712 }
1713 TRACE_TYPE(RELO, "RELO RELATIVE %p <- +%p",
1714 reinterpret_cast<void*>(reloc), reinterpret_cast<void*>(base));
1715 *reinterpret_cast<ElfW(Addr)*>(reloc) += base;
1716 break;
1717 #if defined(__i386__)
1718 case R_386_IRELATIVE:
1719 count_relocation(kRelocRelative);
1720 MARK(rel->r_offset);
1721 TRACE_TYPE(RELO, "RELO IRELATIVE %p <- %p", reinterpret_cast<void*>(reloc), reinterpret_cast<void*>(base));
1722 *reinterpret_cast<ElfW(Addr)*>(reloc) = call_ifunc_resolver(base + *reinterpret_cast<ElfW(Addr)*>(reloc));
1723 break;
1724 #endif
1726 default:
1727 DL_ERR("unknown reloc type %d @ %p (%zu)", type, rel, idx);
1728 return -1;
1729 }
1730 }
1731 return 0;
1732 }
1733 #endif
1735 #if defined(__mips__)
1736 bool soinfo::mips_relocate_got(const soinfo_list_t& global_group, const soinfo_list_t& local_group) {
1737 ElfW(Addr)** got = plt_got_;
1738 if (got == nullptr) {
1739 return true;
1740 }
1742 // got[0] is the address of the lazy resolver function.
1743 // got[1] may be used for a GNU extension.
1744 // Set it to a recognizable address in case someone calls it (should be _rtld_bind_start).
1745 // FIXME: maybe this should be in a separate routine?
1746 if ((flags & FLAG_LINKER) == 0) {
1747 size_t g = 0;
1748 got[g++] = reinterpret_cast<ElfW(Addr)*>(0xdeadbeef);
1749 if (reinterpret_cast<intptr_t>(got[g]) < 0) {
1750 got[g++] = reinterpret_cast<ElfW(Addr)*>(0xdeadfeed);
1751 }
1752 // Relocate the local GOT entries.
1753 for (; g < mips_local_gotno_; g++) {
1754 got[g] = reinterpret_cast<ElfW(Addr)*>(reinterpret_cast<uintptr_t>(got[g]) + load_bias);
1755 }
1756 }
1758 // Now for the global GOT entries...
1759 ElfW(Sym)* sym = symtab_ + mips_gotsym_;
1760 got = plt_got_ + mips_local_gotno_;
1761 for (size_t g = mips_gotsym_; g < mips_symtabno_; g++, sym++, got++) {
1762 // This is an undefined reference... try to locate it.
1763 const char* sym_name = get_string(sym->st_name);
1764 soinfo* lsi = nullptr;
1765 ElfW(Sym)* s = soinfo_do_lookup(this, sym_name, &lsi, global_group, local_group);
1766 if (s == nullptr) {
1767 // We only allow an undefined symbol if this is a weak reference.
1768 s = &symtab_[g];
1769 if (ELF_ST_BIND(s->st_info) != STB_WEAK) {
1770 DL_ERR("cannot locate \"%s\"...", sym_name);
1771 return false;
1772 }
1773 *got = 0;
1774 } else {
1775 // FIXME: is this sufficient?
1776 // For reference see NetBSD link loader
1777 // http://cvsweb.netbsd.org/bsdweb.cgi/src/libexec/ld.elf_so/arch/mips/mips_reloc.c?rev=1.53&content-type=text/x-cvsweb-markup
1778 *got = reinterpret_cast<ElfW(Addr)*>(lsi->resolve_symbol_address(s));
1779 }
1780 }
1781 return true;
1782 }
1783 #endif
1785 void soinfo::call_array(const char* array_name __unused, linker_function_t* functions, size_t count, bool reverse) {
1786 if (functions == nullptr) {
1787 return;
1788 }
1790 TRACE("[ Calling %s (size %zd) @ %p for '%s' ]", array_name, count, functions, name);
1792 int begin = reverse ? (count - 1) : 0;
1793 int end = reverse ? -1 : count;
1794 int step = reverse ? -1 : 1;
1796 for (int i = begin; i != end; i += step) {
1797 TRACE("[ %s[%d] == %p ]", array_name, i, functions[i]);
1798 call_function("function", functions[i]);
1799 }
1801 TRACE("[ Done calling %s for '%s' ]", array_name, name);
1802 }
1804 void soinfo::call_function(const char* function_name __unused, linker_function_t function) {
1805 if (function == nullptr || reinterpret_cast<uintptr_t>(function) == static_cast<uintptr_t>(-1)) {
1806 return;
1807 }
1809 TRACE("[ Calling %s @ %p for '%s' ]", function_name, function, name);
1810 function();
1811 TRACE("[ Done calling %s @ %p for '%s' ]", function_name, function, name);
1813 // The function may have called dlopen(3) or dlclose(3), so we need to ensure our data structures
1814 // are still writable. This happens with our debug malloc (see http://b/7941716).
1815 protect_data(PROT_READ | PROT_WRITE);
1816 }
1818 void soinfo::call_pre_init_constructors() {
1819 // DT_PREINIT_ARRAY functions are called before any other constructors for executables,
1820 // but ignored in a shared library.
1821 call_array("DT_PREINIT_ARRAY", preinit_array_, preinit_array_count_, false);
1822 }
1824 void soinfo::call_constructors() {
1825 if (constructors_called) {
1826 return;
1827 }
1829 // We set constructors_called before actually calling the constructors, otherwise it doesn't
1830 // protect against recursive constructor calls. One simple example of constructor recursion
1831 // is the libc debug malloc, which is implemented in libc_malloc_debug_leak.so:
1832 // 1. The program depends on libc, so libc's constructor is called here.
1833 // 2. The libc constructor calls dlopen() to load libc_malloc_debug_leak.so.
1834 // 3. dlopen() calls the constructors on the newly created
1835 // soinfo for libc_malloc_debug_leak.so.
1836 // 4. The debug .so depends on libc, so CallConstructors is
1837 // called again with the libc soinfo. If it doesn't trigger the early-
1838 // out above, the libc constructor will be called again (recursively!).
1839 constructors_called = true;
1841 if ((flags & FLAG_EXE) == 0 && preinit_array_ != nullptr) {
1842 // The GNU dynamic linker silently ignores these, but we warn the developer.
1843 PRINT("\"%s\": ignoring %zd-entry DT_PREINIT_ARRAY in shared library!",
1844 name, preinit_array_count_);
1845 }
1847 get_children().for_each([] (soinfo* si) {
1848 si->call_constructors();
1849 });
1851 TRACE("\"%s\": calling constructors", name);
1853 // DT_INIT should be called before DT_INIT_ARRAY if both are present.
1854 call_function("DT_INIT", init_func_);
1855 call_array("DT_INIT_ARRAY", init_array_, init_array_count_, false);
1856 }
1858 void soinfo::call_destructors() {
1859 if (!constructors_called) {
1860 return;
1861 }
1862 TRACE("\"%s\": calling destructors", name);
1864 // DT_FINI_ARRAY must be parsed in reverse order.
1865 call_array("DT_FINI_ARRAY", fini_array_, fini_array_count_, true);
1867 // DT_FINI should be called after DT_FINI_ARRAY if both are present.
1868 call_function("DT_FINI", fini_func_);
1870 // This is needed on second call to dlopen
1871 // after library has been unloaded with RTLD_NODELETE
1872 constructors_called = false;
1873 }
1875 void soinfo::add_child(soinfo* child) {
1876 if (has_min_version(0)) {
1877 child->parents_.push_back(this);
1878 this->children_.push_back(child);
1879 }
1880 }
1882 void soinfo::remove_all_links() {
1883 if (!has_min_version(0)) {
1884 return;
1885 }
1887 // 1. Untie connected soinfos from 'this'.
1888 children_.for_each([&] (soinfo* child) {
1889 child->parents_.remove_if([&] (const soinfo* parent) {
1890 return parent == this;
1891 });
1892 });
1894 parents_.for_each([&] (soinfo* parent) {
1895 parent->children_.remove_if([&] (const soinfo* child) {
1896 return child == this;
1897 });
1898 });
1900 // 2. Once everything untied - clear local lists.
1901 parents_.clear();
1902 children_.clear();
1903 }
1905 dev_t soinfo::get_st_dev() const {
1906 if (has_min_version(0)) {
1907 return st_dev_;
1908 }
1910 return 0;
1911 };
1913 ino_t soinfo::get_st_ino() const {
1914 if (has_min_version(0)) {
1915 return st_ino_;
1916 }
1918 return 0;
1919 }
1921 off64_t soinfo::get_file_offset() const {
1922 if (has_min_version(1)) {
1923 return file_offset_;
1924 }
1926 return 0;
1927 }
1929 uint32_t soinfo::get_rtld_flags() const {
1930 if (has_min_version(1)) {
1931 return rtld_flags_;
1932 }
1934 return 0;
1935 }
1937 uint32_t soinfo::get_dt_flags_1() const {
1938 if (has_min_version(1)) {
1939 return dt_flags_1_;
1940 }
1942 return 0;
1943 }
1944 void soinfo::set_dt_flags_1(uint32_t dt_flags_1) {
1945 if (has_min_version(1)) {
1946 if ((dt_flags_1 & DF_1_GLOBAL) != 0) {
1947 rtld_flags_ |= RTLD_GLOBAL;
1948 }
1950 if ((dt_flags_1 & DF_1_NODELETE) != 0) {
1951 rtld_flags_ |= RTLD_NODELETE;
1952 }
1954 dt_flags_1_ = dt_flags_1;
1955 }
1956 }
1958 // This is a return on get_children()/get_parents() if
1959 // 'this->flags' does not have FLAG_NEW_SOINFO set.
1960 static soinfo::soinfo_list_t g_empty_list;
1962 soinfo::soinfo_list_t& soinfo::get_children() {
1963 if (has_min_version(0)) {
1964 return children_;
1965 }
1967 return g_empty_list;
1968 }
1970 soinfo::soinfo_list_t& soinfo::get_parents() {
1971 if (has_min_version(0)) {
1972 return parents_;
1973 }
1975 return g_empty_list;
1976 }
1978 ElfW(Addr) soinfo::resolve_symbol_address(ElfW(Sym)* s) {
1979 if (ELF_ST_TYPE(s->st_info) == STT_GNU_IFUNC) {
1980 return call_ifunc_resolver(s->st_value + load_bias);
1981 }
1983 return static_cast<ElfW(Addr)>(s->st_value + load_bias);
1984 }
1986 const char* soinfo::get_string(ElfW(Word) index) const {
1987 if (has_min_version(1) && (index >= strtab_size_)) {
1988 __libc_fatal("%s: strtab out of bounds error; STRSZ=%zd, name=%d", name, strtab_size_, index);
1989 }
1991 return strtab_ + index;
1992 }
1994 bool soinfo::is_gnu_hash() const {
1995 return (flags & FLAG_GNU_HASH) != 0;
1996 }
1998 bool soinfo::can_unload() const {
1999 return (get_rtld_flags() & (RTLD_NODELETE | RTLD_GLOBAL)) == 0;
2000 }
2002 /* Force any of the closed stdin, stdout and stderr to be associated with
2003 /dev/null. */
2004 static int nullify_closed_stdio() {
2005 int dev_null, i, status;
2006 int return_value = 0;
2008 dev_null = TEMP_FAILURE_RETRY(open("/dev/null", O_RDWR));
2009 if (dev_null < 0) {
2010 DL_ERR("cannot open /dev/null: %s", strerror(errno));
2011 return -1;
2012 }
2013 TRACE("[ Opened /dev/null file-descriptor=%d]", dev_null);
2015 /* If any of the stdio file descriptors is valid and not associated
2016 with /dev/null, dup /dev/null to it. */
2017 for (i = 0; i < 3; i++) {
2018 /* If it is /dev/null already, we are done. */
2019 if (i == dev_null) {
2020 continue;
2021 }
2023 TRACE("[ Nullifying stdio file descriptor %d]", i);
2024 status = TEMP_FAILURE_RETRY(fcntl(i, F_GETFL));
2026 /* If file is opened, we are good. */
2027 if (status != -1) {
2028 continue;
2029 }
2031 /* The only error we allow is that the file descriptor does not
2032 exist, in which case we dup /dev/null to it. */
2033 if (errno != EBADF) {
2034 DL_ERR("fcntl failed: %s", strerror(errno));
2035 return_value = -1;
2036 continue;
2037 }
2039 /* Try dupping /dev/null to this stdio file descriptor and
2040 repeat if there is a signal. Note that any errors in closing
2041 the stdio descriptor are lost. */
2042 status = TEMP_FAILURE_RETRY(dup2(dev_null, i));
2043 if (status < 0) {
2044 DL_ERR("dup2 failed: %s", strerror(errno));
2045 return_value = -1;
2046 continue;
2047 }
2048 }
2050 /* If /dev/null is not one of the stdio file descriptors, close it. */
2051 if (dev_null > 2) {
2052 TRACE("[ Closing /dev/null file-descriptor=%d]", dev_null);
2053 status = TEMP_FAILURE_RETRY(close(dev_null));
2054 if (status == -1) {
2055 DL_ERR("close failed: %s", strerror(errno));
2056 return_value = -1;
2057 }
2058 }
2060 return return_value;
2061 }
2063 bool soinfo::prelink_image() {
2064 /* Extract dynamic section */
2065 ElfW(Word) dynamic_flags = 0;
2066 phdr_table_get_dynamic_section(phdr, phnum, load_bias, &dynamic, &dynamic_flags);
2068 /* We can't log anything until the linker is relocated */
2069 bool relocating_linker = (flags & FLAG_LINKER) != 0;
2070 if (!relocating_linker) {
2071 INFO("[ linking %s ]", name);
2072 DEBUG("si->base = %p si->flags = 0x%08x", reinterpret_cast<void*>(base), flags);
2073 }
2075 if (dynamic == nullptr) {
2076 if (!relocating_linker) {
2077 DL_ERR("missing PT_DYNAMIC in \"%s\"", name);
2078 }
2079 return false;
2080 } else {
2081 if (!relocating_linker) {
2082 DEBUG("dynamic = %p", dynamic);
2083 }
2084 }
2086 #if defined(__arm__)
2087 (void) phdr_table_get_arm_exidx(phdr, phnum, load_bias,
2088 &ARM_exidx, &ARM_exidx_count);
2089 #endif
2091 // Extract useful information from dynamic section.
2092 uint32_t needed_count = 0;
2093 for (ElfW(Dyn)* d = dynamic; d->d_tag != DT_NULL; ++d) {
2094 DEBUG("d = %p, d[0](tag) = %p d[1](val) = %p",
2095 d, reinterpret_cast<void*>(d->d_tag), reinterpret_cast<void*>(d->d_un.d_val));
2096 switch (d->d_tag) {
2097 case DT_SONAME:
2098 // TODO: glibc dynamic linker uses this name for
2099 // initial library lookup; consider doing the same here.
2100 break;
2102 case DT_HASH:
2103 if (nbucket_ != 0) {
2104 // in case of --hash-style=both, we prefer gnu
2105 break;
2106 }
2108 nbucket_ = reinterpret_cast<uint32_t*>(load_bias + d->d_un.d_ptr)[0];
2109 nchain_ = reinterpret_cast<uint32_t*>(load_bias + d->d_un.d_ptr)[1];
2110 bucket_ = reinterpret_cast<uint32_t*>(load_bias + d->d_un.d_ptr + 8);
2111 chain_ = reinterpret_cast<uint32_t*>(load_bias + d->d_un.d_ptr + 8 + nbucket_ * 4);
2112 break;
2114 case DT_GNU_HASH:
2115 if (nbucket_ != 0) {
2116 // in case of --hash-style=both, we prefer gnu
2117 nchain_ = 0;
2118 }
2120 nbucket_ = reinterpret_cast<uint32_t*>(load_bias + d->d_un.d_ptr)[0];
2121 // skip symndx
2122 gnu_maskwords_ = reinterpret_cast<uint32_t*>(load_bias + d->d_un.d_ptr)[2];
2123 gnu_shift2_ = reinterpret_cast<uint32_t*>(load_bias + d->d_un.d_ptr)[3];
2125 gnu_bloom_filter_ = reinterpret_cast<ElfW(Addr)*>(load_bias + d->d_un.d_ptr + 16);
2126 bucket_ = reinterpret_cast<uint32_t*>(gnu_bloom_filter_ + gnu_maskwords_);
2127 // amend chain for symndx = header[1]
2128 chain_ = bucket_ + nbucket_ - reinterpret_cast<uint32_t*>(load_bias + d->d_un.d_ptr)[1];
2130 if (!powerof2(gnu_maskwords_)) {
2131 DL_ERR("invalid maskwords for gnu_hash = 0x%x, in \"%s\" expecting power to two", gnu_maskwords_, name);
2132 return false;
2133 }
2134 --gnu_maskwords_;
2136 flags |= FLAG_GNU_HASH;
2137 break;
2139 case DT_STRTAB:
2140 strtab_ = reinterpret_cast<const char*>(load_bias + d->d_un.d_ptr);
2141 break;
2143 case DT_STRSZ:
2144 strtab_size_ = d->d_un.d_val;
2145 break;
2147 case DT_SYMTAB:
2148 symtab_ = reinterpret_cast<ElfW(Sym)*>(load_bias + d->d_un.d_ptr);
2149 break;
2151 case DT_SYMENT:
2152 if (d->d_un.d_val != sizeof(ElfW(Sym))) {
2153 DL_ERR("invalid DT_SYMENT: %zd in \"%s\"", static_cast<size_t>(d->d_un.d_val), name);
2154 return false;
2155 }
2156 break;
2158 case DT_PLTREL:
2159 #if defined(USE_RELA)
2160 if (d->d_un.d_val != DT_RELA) {
2161 DL_ERR("unsupported DT_PLTREL in \"%s\"; expected DT_RELA", name);
2162 return false;
2163 }
2164 #else
2165 if (d->d_un.d_val != DT_REL) {
2166 DL_ERR("unsupported DT_PLTREL in \"%s\"; expected DT_REL", name);
2167 return false;
2168 }
2169 #endif
2170 break;
2172 case DT_JMPREL:
2173 #if defined(USE_RELA)
2174 plt_rela_ = reinterpret_cast<ElfW(Rela)*>(load_bias + d->d_un.d_ptr);
2175 #else
2176 plt_rel_ = reinterpret_cast<ElfW(Rel)*>(load_bias + d->d_un.d_ptr);
2177 #endif
2178 break;
2180 case DT_PLTRELSZ:
2181 #if defined(USE_RELA)
2182 plt_rela_count_ = d->d_un.d_val / sizeof(ElfW(Rela));
2183 #else
2184 plt_rel_count_ = d->d_un.d_val / sizeof(ElfW(Rel));
2185 #endif
2186 break;
2188 case DT_PLTGOT:
2189 #if defined(__mips__)
2190 // Used by mips and mips64.
2191 plt_got_ = reinterpret_cast<ElfW(Addr)**>(load_bias + d->d_un.d_ptr);
2192 #endif
2193 // Ignore for other platforms... (because RTLD_LAZY is not supported)
2194 break;
2196 case DT_DEBUG:
2197 // Set the DT_DEBUG entry to the address of _r_debug for GDB
2198 // if the dynamic table is writable
2199 // FIXME: not working currently for N64
2200 // The flags for the LOAD and DYNAMIC program headers do not agree.
2201 // The LOAD section containing the dynamic table has been mapped as
2202 // read-only, but the DYNAMIC header claims it is writable.
2203 #if !(defined(__mips__) && defined(__LP64__))
2204 if ((dynamic_flags & PF_W) != 0) {
2205 d->d_un.d_val = reinterpret_cast<uintptr_t>(&_r_debug);
2206 }
2207 break;
2208 #endif
2209 #if defined(USE_RELA)
2210 case DT_RELA:
2211 rela_ = reinterpret_cast<ElfW(Rela)*>(load_bias + d->d_un.d_ptr);
2212 break;
2214 case DT_RELASZ:
2215 rela_count_ = d->d_un.d_val / sizeof(ElfW(Rela));
2216 break;
2218 case DT_RELAENT:
2219 if (d->d_un.d_val != sizeof(ElfW(Rela))) {
2220 DL_ERR("invalid DT_RELAENT: %zd", static_cast<size_t>(d->d_un.d_val));
2221 return false;
2222 }
2223 break;
2225 // ignored (see DT_RELCOUNT comments for details)
2226 case DT_RELACOUNT:
2227 break;
2229 case DT_REL:
2230 DL_ERR("unsupported DT_REL in \"%s\"", name);
2231 return false;
2233 case DT_RELSZ:
2234 DL_ERR("unsupported DT_RELSZ in \"%s\"", name);
2235 return false;
2236 #else
2237 case DT_REL:
2238 rel_ = reinterpret_cast<ElfW(Rel)*>(load_bias + d->d_un.d_ptr);
2239 break;
2241 case DT_RELSZ:
2242 rel_count_ = d->d_un.d_val / sizeof(ElfW(Rel));
2243 break;
2245 case DT_RELENT:
2246 if (d->d_un.d_val != sizeof(ElfW(Rel))) {
2247 DL_ERR("invalid DT_RELENT: %zd", static_cast<size_t>(d->d_un.d_val));
2248 return false;
2249 }
2250 break;
2252 // "Indicates that all RELATIVE relocations have been concatenated together,
2253 // and specifies the RELATIVE relocation count."
2254 //
2255 // TODO: Spec also mentions that this can be used to optimize relocation process;
2256 // Not currently used by bionic linker - ignored.
2257 case DT_RELCOUNT:
2258 break;
2259 case DT_RELA:
2260 DL_ERR("unsupported DT_RELA in \"%s\"", name);
2261 return false;
2262 #endif
2263 case DT_INIT:
2264 init_func_ = reinterpret_cast<linker_function_t>(load_bias + d->d_un.d_ptr);
2265 DEBUG("%s constructors (DT_INIT) found at %p", name, init_func_);
2266 break;
2268 case DT_FINI:
2269 fini_func_ = reinterpret_cast<linker_function_t>(load_bias + d->d_un.d_ptr);
2270 DEBUG("%s destructors (DT_FINI) found at %p", name, fini_func_);
2271 break;
2273 case DT_INIT_ARRAY:
2274 init_array_ = reinterpret_cast<linker_function_t*>(load_bias + d->d_un.d_ptr);
2275 DEBUG("%s constructors (DT_INIT_ARRAY) found at %p", name, init_array_);
2276 break;
2278 case DT_INIT_ARRAYSZ:
2279 init_array_count_ = ((unsigned)d->d_un.d_val) / sizeof(ElfW(Addr));
2280 break;
2282 case DT_FINI_ARRAY:
2283 fini_array_ = reinterpret_cast<linker_function_t*>(load_bias + d->d_un.d_ptr);
2284 DEBUG("%s destructors (DT_FINI_ARRAY) found at %p", name, fini_array_);
2285 break;
2287 case DT_FINI_ARRAYSZ:
2288 fini_array_count_ = ((unsigned)d->d_un.d_val) / sizeof(ElfW(Addr));
2289 break;
2291 case DT_PREINIT_ARRAY:
2292 preinit_array_ = reinterpret_cast<linker_function_t*>(load_bias + d->d_un.d_ptr);
2293 DEBUG("%s constructors (DT_PREINIT_ARRAY) found at %p", name, preinit_array_);
2294 break;
2296 case DT_PREINIT_ARRAYSZ:
2297 preinit_array_count_ = ((unsigned)d->d_un.d_val) / sizeof(ElfW(Addr));
2298 break;
2300 case DT_TEXTREL:
2301 #if defined(__LP64__)
2302 DL_ERR("text relocations (DT_TEXTREL) found in 64-bit ELF file \"%s\"", name);
2303 return false;
2304 #else
2305 has_text_relocations = true;
2306 break;
2307 #endif
2309 case DT_SYMBOLIC:
2310 has_DT_SYMBOLIC = true;
2311 break;
2313 case DT_NEEDED:
2314 ++needed_count;
2315 break;
2317 case DT_FLAGS:
2318 if (d->d_un.d_val & DF_TEXTREL) {
2319 #if defined(__LP64__)
2320 DL_ERR("text relocations (DF_TEXTREL) found in 64-bit ELF file \"%s\"", name);
2321 return false;
2322 #else
2323 has_text_relocations = true;
2324 #endif
2325 }
2326 if (d->d_un.d_val & DF_SYMBOLIC) {
2327 has_DT_SYMBOLIC = true;
2328 }
2329 break;
2331 case DT_FLAGS_1:
2332 set_dt_flags_1(d->d_un.d_val);
2334 if ((d->d_un.d_val & ~SUPPORTED_DT_FLAGS_1) != 0) {
2335 DL_WARN("Unsupported flags DT_FLAGS_1=%p", reinterpret_cast<void*>(d->d_un.d_val));
2336 }
2337 break;
2338 #if defined(__mips__)
2339 case DT_MIPS_RLD_MAP:
2340 // Set the DT_MIPS_RLD_MAP entry to the address of _r_debug for GDB.
2341 {
2342 r_debug** dp = reinterpret_cast<r_debug**>(load_bias + d->d_un.d_ptr);
2343 *dp = &_r_debug;
2344 }
2345 break;
2347 case DT_MIPS_RLD_VERSION:
2348 case DT_MIPS_FLAGS:
2349 case DT_MIPS_BASE_ADDRESS:
2350 case DT_MIPS_UNREFEXTNO:
2351 break;
2353 case DT_MIPS_SYMTABNO:
2354 mips_symtabno_ = d->d_un.d_val;
2355 break;
2357 case DT_MIPS_LOCAL_GOTNO:
2358 mips_local_gotno_ = d->d_un.d_val;
2359 break;
2361 case DT_MIPS_GOTSYM:
2362 mips_gotsym_ = d->d_un.d_val;
2363 break;
2364 #endif
2365 // Ignored: "Its use has been superseded by the DF_BIND_NOW flag"
2366 case DT_BIND_NOW:
2367 break;
2369 // Ignore: bionic does not support symbol versioning...
2370 case DT_VERSYM:
2371 case DT_VERDEF:
2372 case DT_VERDEFNUM:
2373 case DT_VERNEED:
2374 case DT_VERNEEDNUM:
2375 break;
2377 default:
2378 if (!relocating_linker) {
2379 DL_WARN("%s: unused DT entry: type %p arg %p", name,
2380 reinterpret_cast<void*>(d->d_tag), reinterpret_cast<void*>(d->d_un.d_val));
2381 }
2382 break;
2383 }
2384 }
2386 DEBUG("si->base = %p, si->strtab = %p, si->symtab = %p",
2387 reinterpret_cast<void*>(base), strtab_, symtab_);
2389 // Sanity checks.
2390 if (relocating_linker && needed_count != 0) {
2391 DL_ERR("linker cannot have DT_NEEDED dependencies on other libraries");
2392 return false;
2393 }
2394 if (nbucket_ == 0) {
2395 DL_ERR("empty/missing DT_HASH/DT_GNU_HASH in \"%s\" (new hash type from the future?)", name);
2396 return false;
2397 }
2398 if (strtab_ == 0) {
2399 DL_ERR("empty/missing DT_STRTAB in \"%s\"", name);
2400 return false;
2401 }
2402 if (symtab_ == 0) {
2403 DL_ERR("empty/missing DT_SYMTAB in \"%s\"", name);
2404 return false;
2405 }
2406 return true;
2407 }
2409 bool soinfo::link_image(const soinfo_list_t& global_group, const soinfo_list_t& local_group, const android_dlextinfo* extinfo) {
2411 #if !defined(__LP64__)
2412 if (has_text_relocations) {
2413 // Make segments writable to allow text relocations to work properly. We will later call
2414 // phdr_table_protect_segments() after all of them are applied and all constructors are run.
2415 DL_WARN("%s has text relocations. This is wasting memory and prevents "
2416 "security hardening. Please fix.", name);
2417 if (phdr_table_unprotect_segments(phdr, phnum, load_bias) < 0) {
2418 DL_ERR("can't unprotect loadable segments for \"%s\": %s",
2419 name, strerror(errno));
2420 return false;
2421 }
2422 }
2423 #endif
2425 #if defined(USE_RELA)
2426 if (rela_ != nullptr) {
2427 DEBUG("[ relocating %s ]", name);
2428 if (relocate(rela_, rela_count_, global_group, local_group)) {
2429 return false;
2430 }
2431 }
2432 if (plt_rela_ != nullptr) {
2433 DEBUG("[ relocating %s plt ]", name);
2434 if (relocate(plt_rela_, plt_rela_count_, global_group, local_group)) {
2435 return false;
2436 }
2437 }
2438 #else
2439 if (rel_ != nullptr) {
2440 DEBUG("[ relocating %s ]", name);
2441 if (relocate(rel_, rel_count_, global_group, local_group)) {
2442 return false;
2443 }
2444 }
2445 if (plt_rel_ != nullptr) {
2446 DEBUG("[ relocating %s plt ]", name);
2447 if (relocate(plt_rel_, plt_rel_count_, global_group, local_group)) {
2448 return false;
2449 }
2450 }
2451 #endif
2453 #if defined(__mips__)
2454 if (!mips_relocate_got(global_group, local_group)) {
2455 return false;
2456 }
2457 #endif
2459 DEBUG("[ finished linking %s ]", name);
2461 #if !defined(__LP64__)
2462 if (has_text_relocations) {
2463 // All relocations are done, we can protect our segments back to read-only.
2464 if (phdr_table_protect_segments(phdr, phnum, load_bias) < 0) {
2465 DL_ERR("can't protect segments for \"%s\": %s",
2466 name, strerror(errno));
2467 return false;
2468 }
2469 }
2470 #endif
2472 /* We can also turn on GNU RELRO protection */
2473 if (phdr_table_protect_gnu_relro(phdr, phnum, load_bias) < 0) {
2474 DL_ERR("can't enable GNU RELRO protection for \"%s\": %s",
2475 name, strerror(errno));
2476 return false;
2477 }
2479 /* Handle serializing/sharing the RELRO segment */
2480 if (extinfo && (extinfo->flags & ANDROID_DLEXT_WRITE_RELRO)) {
2481 if (phdr_table_serialize_gnu_relro(phdr, phnum, load_bias,
2482 extinfo->relro_fd) < 0) {
2483 DL_ERR("failed serializing GNU RELRO section for \"%s\": %s",
2484 name, strerror(errno));
2485 return false;
2486 }
2487 } else if (extinfo && (extinfo->flags & ANDROID_DLEXT_USE_RELRO)) {
2488 if (phdr_table_map_gnu_relro(phdr, phnum, load_bias,
2489 extinfo->relro_fd) < 0) {
2490 DL_ERR("failed mapping GNU RELRO section for \"%s\": %s",
2491 name, strerror(errno));
2492 return false;
2493 }
2494 }
2496 notify_gdb_of_load(this);
2497 return true;
2498 }
2500 /*
2501 * This function add vdso to internal dso list.
2502 * It helps to stack unwinding through signal handlers.
2503 * Also, it makes bionic more like glibc.
2504 */
2505 static void add_vdso(KernelArgumentBlock& args __unused) {
2506 #if defined(AT_SYSINFO_EHDR)
2507 ElfW(Ehdr)* ehdr_vdso = reinterpret_cast<ElfW(Ehdr)*>(args.getauxval(AT_SYSINFO_EHDR));
2508 if (ehdr_vdso == nullptr) {
2509 return;
2510 }
2512 soinfo* si = soinfo_alloc("[vdso]", nullptr, 0, 0);
2514 si->phdr = reinterpret_cast<ElfW(Phdr)*>(reinterpret_cast<char*>(ehdr_vdso) + ehdr_vdso->e_phoff);
2515 si->phnum = ehdr_vdso->e_phnum;
2516 si->base = reinterpret_cast<ElfW(Addr)>(ehdr_vdso);
2517 si->size = phdr_table_get_load_size(si->phdr, si->phnum);
2518 si->load_bias = get_elf_exec_load_bias(ehdr_vdso);
2520 si->prelink_image();
2521 si->link_image(g_empty_list, soinfo::soinfo_list_t::make_list(si), nullptr);
2522 #endif
2523 }
2525 /*
2526 * This is linker soinfo for GDB. See details below.
2527 */
2528 #if defined(__LP64__)
2529 #define LINKER_PATH "/system/bin/linker64"
2530 #else
2531 #define LINKER_PATH "/system/bin/linker"
2532 #endif
2533 static soinfo linker_soinfo_for_gdb(LINKER_PATH, nullptr, 0, 0);
2535 /* gdb expects the linker to be in the debug shared object list.
2536 * Without this, gdb has trouble locating the linker's ".text"
2537 * and ".plt" sections. Gdb could also potentially use this to
2538 * relocate the offset of our exported 'rtld_db_dlactivity' symbol.
2539 * Don't use soinfo_alloc(), because the linker shouldn't
2540 * be on the soinfo list.
2541 */
2542 static void init_linker_info_for_gdb(ElfW(Addr) linker_base) {
2543 linker_soinfo_for_gdb.base = linker_base;
2545 /*
2546 * Set the dynamic field in the link map otherwise gdb will complain with
2547 * the following:
2548 * warning: .dynamic section for "/system/bin/linker" is not at the
2549 * expected address (wrong library or version mismatch?)
2550 */
2551 ElfW(Ehdr)* elf_hdr = reinterpret_cast<ElfW(Ehdr)*>(linker_base);
2552 ElfW(Phdr)* phdr = reinterpret_cast<ElfW(Phdr)*>(linker_base + elf_hdr->e_phoff);
2553 phdr_table_get_dynamic_section(phdr, elf_hdr->e_phnum, linker_base,
2554 &linker_soinfo_for_gdb.dynamic, nullptr);
2555 insert_soinfo_into_debug_map(&linker_soinfo_for_gdb);
2556 }
2558 /*
2559 * This code is called after the linker has linked itself and
2560 * fixed it's own GOT. It is safe to make references to externs
2561 * and other non-local data at this point.
2562 */
2563 static ElfW(Addr) __linker_init_post_relocation(KernelArgumentBlock& args, ElfW(Addr) linker_base) {
2564 #if TIMING
2565 struct timeval t0, t1;
2566 gettimeofday(&t0, 0);
2567 #endif
2569 // Initialize environment functions, and get to the ELF aux vectors table.
2570 linker_env_init(args);
2572 // If this is a setuid/setgid program, close the security hole described in
2573 // ftp://ftp.freebsd.org/pub/FreeBSD/CERT/advisories/FreeBSD-SA-02:23.stdio.asc
2574 if (get_AT_SECURE()) {
2575 nullify_closed_stdio();
2576 }
2578 debuggerd_init();
2580 // Get a few environment variables.
2581 const char* LD_DEBUG = linker_env_get("LD_DEBUG");
2582 if (LD_DEBUG != nullptr) {
2583 g_ld_debug_verbosity = atoi(LD_DEBUG);
2584 }
2586 // Normally, these are cleaned by linker_env_init, but the test
2587 // doesn't cost us anything.
2588 const char* ldpath_env = nullptr;
2589 const char* ldpreload_env = nullptr;
2590 if (!get_AT_SECURE()) {
2591 ldpath_env = linker_env_get("LD_LIBRARY_PATH");
2592 ldpreload_env = linker_env_get("LD_PRELOAD");
2593 }
2595 INFO("[ android linker & debugger ]");
2597 soinfo* si = soinfo_alloc(args.argv[0], nullptr, 0, RTLD_GLOBAL);
2598 if (si == nullptr) {
2599 exit(EXIT_FAILURE);
2600 }
2602 /* bootstrap the link map, the main exe always needs to be first */
2603 si->flags |= FLAG_EXE;
2604 link_map* map = &(si->link_map_head);
2606 map->l_addr = 0;
2607 map->l_name = args.argv[0];
2608 map->l_prev = nullptr;
2609 map->l_next = nullptr;
2611 _r_debug.r_map = map;
2612 r_debug_tail = map;
2614 init_linker_info_for_gdb(linker_base);
2616 // Extract information passed from the kernel.
2617 si->phdr = reinterpret_cast<ElfW(Phdr)*>(args.getauxval(AT_PHDR));
2618 si->phnum = args.getauxval(AT_PHNUM);
2619 si->entry = args.getauxval(AT_ENTRY);
2621 /* Compute the value of si->base. We can't rely on the fact that
2622 * the first entry is the PHDR because this will not be true
2623 * for certain executables (e.g. some in the NDK unit test suite)
2624 */
2625 si->base = 0;
2626 si->size = phdr_table_get_load_size(si->phdr, si->phnum);
2627 si->load_bias = 0;
2628 for (size_t i = 0; i < si->phnum; ++i) {
2629 if (si->phdr[i].p_type == PT_PHDR) {
2630 si->load_bias = reinterpret_cast<ElfW(Addr)>(si->phdr) - si->phdr[i].p_vaddr;
2631 si->base = reinterpret_cast<ElfW(Addr)>(si->phdr) - si->phdr[i].p_offset;
2632 break;
2633 }
2634 }
2635 si->dynamic = nullptr;
2636 si->ref_count = 1;
2638 ElfW(Ehdr)* elf_hdr = reinterpret_cast<ElfW(Ehdr)*>(si->base);
2639 if (elf_hdr->e_type != ET_DYN) {
2640 __libc_format_fd(2, "error: only position independent executables (PIE) are supported.\n");
2641 exit(EXIT_FAILURE);
2642 }
2644 // Use LD_LIBRARY_PATH and LD_PRELOAD (but only if we aren't setuid/setgid).
2645 parse_LD_LIBRARY_PATH(ldpath_env);
2646 parse_LD_PRELOAD(ldpreload_env);
2648 somain = si;
2650 si->prelink_image();
2652 // add somain to global group
2653 si->set_dt_flags_1(si->get_dt_flags_1() | DF_1_GLOBAL);
2655 // Load ld_preloads and dependencies.
2656 StringLinkedList needed_library_name_list;
2657 size_t needed_libraries_count = 0;
2658 size_t ld_preloads_count = 0;
2659 while (g_ld_preload_names[ld_preloads_count] != nullptr) {
2660 needed_library_name_list.push_back(g_ld_preload_names[ld_preloads_count++]);
2661 ++needed_libraries_count;
2662 }
2664 for_each_dt_needed(si, [&](const char* name) {
2665 needed_library_name_list.push_back(name);
2666 ++needed_libraries_count;
2667 });
2669 const char* needed_library_names[needed_libraries_count];
2671 memset(needed_library_names, 0, sizeof(needed_library_names));
2672 needed_library_name_list.copy_to_array(needed_library_names, needed_libraries_count);
2674 if (needed_libraries_count > 0 && !find_libraries(si, needed_library_names, needed_libraries_count, nullptr, g_ld_preloads, ld_preloads_count, RTLD_GLOBAL, nullptr)) {
2675 __libc_format_fd(2, "CANNOT LINK EXECUTABLE: %s\n", linker_get_error_buffer());
2676 exit(EXIT_FAILURE);
2677 }
2679 add_vdso(args);
2681 si->call_pre_init_constructors();
2683 /* After the prelink_image, the si->load_bias is initialized.
2684 * For so lib, the map->l_addr will be updated in notify_gdb_of_load.
2685 * We need to update this value for so exe here. So Unwind_Backtrace
2686 * for some arch like x86 could work correctly within so exe.
2687 */
2688 map->l_addr = si->load_bias;
2689 si->call_constructors();
2691 #if TIMING
2692 gettimeofday(&t1, nullptr);
2693 PRINT("LINKER TIME: %s: %d microseconds", args.argv[0], (int) (
2694 (((long long)t1.tv_sec * 1000000LL) + (long long)t1.tv_usec) -
2695 (((long long)t0.tv_sec * 1000000LL) + (long long)t0.tv_usec)));
2696 #endif
2697 #if STATS
2698 PRINT("RELO STATS: %s: %d abs, %d rel, %d copy, %d symbol", args.argv[0],
2699 linker_stats.count[kRelocAbsolute],
2700 linker_stats.count[kRelocRelative],
2701 linker_stats.count[kRelocCopy],
2702 linker_stats.count[kRelocSymbol]);
2703 #endif
2704 #if COUNT_PAGES
2705 {
2706 unsigned n;
2707 unsigned i;
2708 unsigned count = 0;
2709 for (n = 0; n < 4096; n++) {
2710 if (bitmask[n]) {
2711 unsigned x = bitmask[n];
2712 #if defined(__LP64__)
2713 for (i = 0; i < 32; i++) {
2714 #else
2715 for (i = 0; i < 8; i++) {
2716 #endif
2717 if (x & 1) {
2718 count++;
2719 }
2720 x >>= 1;
2721 }
2722 }
2723 }
2724 PRINT("PAGES MODIFIED: %s: %d (%dKB)", args.argv[0], count, count * 4);
2725 }
2726 #endif
2728 #if TIMING || STATS || COUNT_PAGES
2729 fflush(stdout);
2730 #endif
2732 TRACE("[ Ready to execute '%s' @ %p ]", si->name, reinterpret_cast<void*>(si->entry));
2733 return si->entry;
2734 }
2736 /* Compute the load-bias of an existing executable. This shall only
2737 * be used to compute the load bias of an executable or shared library
2738 * that was loaded by the kernel itself.
2739 *
2740 * Input:
2741 * elf -> address of ELF header, assumed to be at the start of the file.
2742 * Return:
2743 * load bias, i.e. add the value of any p_vaddr in the file to get
2744 * the corresponding address in memory.
2745 */
2746 static ElfW(Addr) get_elf_exec_load_bias(const ElfW(Ehdr)* elf) {
2747 ElfW(Addr) offset = elf->e_phoff;
2748 const ElfW(Phdr)* phdr_table = reinterpret_cast<const ElfW(Phdr)*>(reinterpret_cast<uintptr_t>(elf) + offset);
2749 const ElfW(Phdr)* phdr_end = phdr_table + elf->e_phnum;
2751 for (const ElfW(Phdr)* phdr = phdr_table; phdr < phdr_end; phdr++) {
2752 if (phdr->p_type == PT_LOAD) {
2753 return reinterpret_cast<ElfW(Addr)>(elf) + phdr->p_offset - phdr->p_vaddr;
2754 }
2755 }
2756 return 0;
2757 }
2759 extern "C" void _start();
2761 /*
2762 * This is the entry point for the linker, called from begin.S. This
2763 * method is responsible for fixing the linker's own relocations, and
2764 * then calling __linker_init_post_relocation().
2765 *
2766 * Because this method is called before the linker has fixed it's own
2767 * relocations, any attempt to reference an extern variable, extern
2768 * function, or other GOT reference will generate a segfault.
2769 */
2770 extern "C" ElfW(Addr) __linker_init(void* raw_args) {
2771 KernelArgumentBlock args(raw_args);
2773 ElfW(Addr) linker_addr = args.getauxval(AT_BASE);
2774 ElfW(Addr) entry_point = args.getauxval(AT_ENTRY);
2775 ElfW(Ehdr)* elf_hdr = reinterpret_cast<ElfW(Ehdr)*>(linker_addr);
2776 ElfW(Phdr)* phdr = reinterpret_cast<ElfW(Phdr)*>(linker_addr + elf_hdr->e_phoff);
2778 soinfo linker_so("[dynamic linker]", nullptr, 0, 0);
2780 // If the linker is not acting as PT_INTERP entry_point is equal to
2781 // _start. Which means that the linker is running as an executable and
2782 // already linked by PT_INTERP.
2783 //
2784 // This happens when user tries to run 'adb shell /system/bin/linker'
2785 // see also https://code.google.com/p/android/issues/detail?id=63174
2786 if (reinterpret_cast<ElfW(Addr)>(&_start) == entry_point) {
2787 __libc_fatal("This is %s, the helper program for shared library executables.\n", args.argv[0]);
2788 }
2790 linker_so.base = linker_addr;
2791 linker_so.size = phdr_table_get_load_size(phdr, elf_hdr->e_phnum);
2792 linker_so.load_bias = get_elf_exec_load_bias(elf_hdr);
2793 linker_so.dynamic = nullptr;
2794 linker_so.phdr = phdr;
2795 linker_so.phnum = elf_hdr->e_phnum;
2796 linker_so.flags |= FLAG_LINKER;
2798 // This might not be obvious... The reasons why we pass g_empty_list
2799 // in place of local_group here are (1) we do not really need it, because
2800 // linker is built with DT_SYMBOLIC and therefore relocates its symbols against
2801 // itself without having to look into local_group and (2) allocators
2802 // are not yet initialized, and therefore we cannot use linked_list.push_*
2803 // functions at this point.
2804 if (!(linker_so.prelink_image() && linker_so.link_image(g_empty_list, g_empty_list, nullptr))) {
2805 // It would be nice to print an error message, but if the linker
2806 // can't link itself, there's no guarantee that we'll be able to
2807 // call write() (because it involves a GOT reference). We may as
2808 // well try though...
2809 const char* msg = "CANNOT LINK EXECUTABLE: ";
2810 write(2, msg, strlen(msg));
2811 write(2, __linker_dl_err_buf, strlen(__linker_dl_err_buf));
2812 write(2, "\n", 1);
2813 _exit(EXIT_FAILURE);
2814 }
2816 __libc_init_tls(args);
2818 // Initialize the linker's own global variables
2819 linker_so.call_constructors();
2821 // Initialize static variables. Note that in order to
2822 // get correct libdl_info we need to call constructors
2823 // before get_libdl_info().
2824 solist = get_libdl_info();
2825 sonext = get_libdl_info();
2827 // We have successfully fixed our own relocations. It's safe to run
2828 // the main part of the linker now.
2829 args.abort_message_ptr = &g_abort_message;
2830 ElfW(Addr) start_address = __linker_init_post_relocation(args, linker_addr);
2832 protect_data(PROT_READ);
2834 // Return the address that the calling assembly stub should jump to.
2835 return start_address;
2836 }