1 /*
2 * Copyright (C) 2008, 2009 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
29 #include <dlfcn.h>
30 #include <errno.h>
31 #include <fcntl.h>
32 #include <inttypes.h>
33 #include <pthread.h>
34 #include <stdio.h>
35 #include <stdlib.h>
36 #include <string.h>
37 #include <sys/mman.h>
38 #include <sys/param.h>
39 #include <unistd.h>
41 #include <new>
43 // Private C library headers.
44 #include "private/bionic_tls.h"
45 #include "private/KernelArgumentBlock.h"
46 #include "private/ScopedPthreadMutexLocker.h"
47 #include "private/ScopedFd.h"
48 #include "private/ScopeGuard.h"
49 #include "private/UniquePtr.h"
51 #include "linker.h"
52 #include "linker_debug.h"
53 #include "linker_environ.h"
54 #include "linker_phdr.h"
55 #include "linker_allocator.h"
57 /* >>> IMPORTANT NOTE - READ ME BEFORE MODIFYING <<<
58 *
59 * Do NOT use malloc() and friends or pthread_*() code here.
60 * Don't use printf() either; it's caused mysterious memory
61 * corruption in the past.
62 * The linker runs before we bring up libc and it's easiest
63 * to make sure it does not depend on any complex libc features
64 *
65 * open issues / todo:
66 *
67 * - cleaner error reporting
68 * - after linking, set as much stuff as possible to READONLY
69 * and NOEXEC
70 */
72 #if defined(__LP64__)
73 #define SEARCH_NAME(x) x
74 #else
75 // Nvidia drivers are relying on the bug:
76 // http://code.google.com/p/android/issues/detail?id=6670
77 // so we continue to use base-name lookup for lp32
78 static const char* get_base_name(const char* name) {
79 const char* bname = strrchr(name, '/');
80 return bname ? bname + 1 : name;
81 }
82 #define SEARCH_NAME(x) get_base_name(x)
83 #endif
85 static ElfW(Addr) get_elf_exec_load_bias(const ElfW(Ehdr)* elf);
87 static LinkerAllocator<soinfo> g_soinfo_allocator;
88 static LinkerAllocator<LinkedListEntry<soinfo>> g_soinfo_links_allocator;
90 static soinfo* solist;
91 static soinfo* sonext;
92 static soinfo* somain; // main process, always the one after libdl_info
94 static const char* const kDefaultLdPaths[] = {
95 #if defined(__LP64__)
96 "/vendor/lib64",
97 "/system/lib64",
98 #else
99 "/vendor/lib",
100 "/system/lib",
101 #endif
102 nullptr
103 };
105 #define LDPATH_BUFSIZE (LDPATH_MAX*64)
106 #define LDPATH_MAX 8
108 #define LDPRELOAD_BUFSIZE (LDPRELOAD_MAX*64)
109 #define LDPRELOAD_MAX 8
111 static char g_ld_library_paths_buffer[LDPATH_BUFSIZE];
112 static const char* g_ld_library_paths[LDPATH_MAX + 1];
114 static char g_ld_preloads_buffer[LDPRELOAD_BUFSIZE];
115 static const char* g_ld_preload_names[LDPRELOAD_MAX + 1];
117 static soinfo* g_ld_preloads[LDPRELOAD_MAX + 1];
119 __LIBC_HIDDEN__ int g_ld_debug_verbosity;
121 __LIBC_HIDDEN__ abort_msg_t* g_abort_message = nullptr; // For debuggerd.
123 enum RelocationKind {
124 kRelocAbsolute = 0,
125 kRelocRelative,
126 kRelocCopy,
127 kRelocSymbol,
128 kRelocMax
129 };
131 #if STATS
132 struct linker_stats_t {
133 int count[kRelocMax];
134 };
136 static linker_stats_t linker_stats;
138 static void count_relocation(RelocationKind kind) {
139 ++linker_stats.count[kind];
140 }
141 #else
142 static void count_relocation(RelocationKind) {
143 }
144 #endif
146 #if COUNT_PAGES
147 static unsigned bitmask[4096];
148 #if defined(__LP64__)
149 #define MARK(offset) \
150 do { \
151 if ((((offset) >> 12) >> 5) < 4096) \
152 bitmask[((offset) >> 12) >> 5] |= (1 << (((offset) >> 12) & 31)); \
153 } while (0)
154 #else
155 #define MARK(offset) \
156 do { \
157 bitmask[((offset) >> 12) >> 3] |= (1 << (((offset) >> 12) & 7)); \
158 } while (0)
159 #endif
160 #else
161 #define MARK(x) do {} while (0)
162 #endif
164 // You shouldn't try to call memory-allocating functions in the dynamic linker.
165 // Guard against the most obvious ones.
166 #define DISALLOW_ALLOCATION(return_type, name, ...) \
167 return_type name __VA_ARGS__ \
168 { \
169 __libc_fatal("ERROR: " #name " called from the dynamic linker!\n"); \
170 }
171 DISALLOW_ALLOCATION(void*, malloc, (size_t u __unused));
172 DISALLOW_ALLOCATION(void, free, (void* u __unused));
173 DISALLOW_ALLOCATION(void*, realloc, (void* u1 __unused, size_t u2 __unused));
174 DISALLOW_ALLOCATION(void*, calloc, (size_t u1 __unused, size_t u2 __unused));
176 static char __linker_dl_err_buf[768];
178 char* linker_get_error_buffer() {
179 return &__linker_dl_err_buf[0];
180 }
182 size_t linker_get_error_buffer_size() {
183 return sizeof(__linker_dl_err_buf);
184 }
186 // This function is an empty stub where GDB locates a breakpoint to get notified
187 // about linker activity.
188 extern "C" void __attribute__((noinline)) __attribute__((visibility("default"))) rtld_db_dlactivity();
190 static pthread_mutex_t g__r_debug_mutex = PTHREAD_MUTEX_INITIALIZER;
191 static r_debug _r_debug = {1, nullptr, reinterpret_cast<uintptr_t>(&rtld_db_dlactivity), r_debug::RT_CONSISTENT, 0};
192 static link_map* r_debug_tail = 0;
194 static void insert_soinfo_into_debug_map(soinfo* info) {
195 // Copy the necessary fields into the debug structure.
196 link_map* map = &(info->link_map_head);
197 map->l_addr = info->load_bias;
198 map->l_name = info->name;
199 map->l_ld = info->dynamic;
201 // Stick the new library at the end of the list.
202 // gdb tends to care more about libc than it does
203 // about leaf libraries, and ordering it this way
204 // reduces the back-and-forth over the wire.
205 if (r_debug_tail) {
206 r_debug_tail->l_next = map;
207 map->l_prev = r_debug_tail;
208 map->l_next = 0;
209 } else {
210 _r_debug.r_map = map;
211 map->l_prev = 0;
212 map->l_next = 0;
213 }
214 r_debug_tail = map;
215 }
217 static void remove_soinfo_from_debug_map(soinfo* info) {
218 link_map* map = &(info->link_map_head);
220 if (r_debug_tail == map) {
221 r_debug_tail = map->l_prev;
222 }
224 if (map->l_prev) {
225 map->l_prev->l_next = map->l_next;
226 }
227 if (map->l_next) {
228 map->l_next->l_prev = map->l_prev;
229 }
230 }
232 static void notify_gdb_of_load(soinfo* info) {
233 if (info->is_main_executable()) {
234 // GDB already knows about the main executable
235 return;
236 }
238 ScopedPthreadMutexLocker locker(&g__r_debug_mutex);
240 _r_debug.r_state = r_debug::RT_ADD;
241 rtld_db_dlactivity();
243 insert_soinfo_into_debug_map(info);
245 _r_debug.r_state = r_debug::RT_CONSISTENT;
246 rtld_db_dlactivity();
247 }
249 static void notify_gdb_of_unload(soinfo* info) {
250 if (info->is_main_executable()) {
251 // GDB already knows about the main executable
252 return;
253 }
255 ScopedPthreadMutexLocker locker(&g__r_debug_mutex);
257 _r_debug.r_state = r_debug::RT_DELETE;
258 rtld_db_dlactivity();
260 remove_soinfo_from_debug_map(info);
262 _r_debug.r_state = r_debug::RT_CONSISTENT;
263 rtld_db_dlactivity();
264 }
266 void notify_gdb_of_libraries() {
267 _r_debug.r_state = r_debug::RT_ADD;
268 rtld_db_dlactivity();
269 _r_debug.r_state = r_debug::RT_CONSISTENT;
270 rtld_db_dlactivity();
271 }
273 LinkedListEntry<soinfo>* SoinfoListAllocator::alloc() {
274 return g_soinfo_links_allocator.alloc();
275 }
277 void SoinfoListAllocator::free(LinkedListEntry<soinfo>* entry) {
278 g_soinfo_links_allocator.free(entry);
279 }
281 static void protect_data(int protection) {
282 g_soinfo_allocator.protect_all(protection);
283 g_soinfo_links_allocator.protect_all(protection);
284 }
286 static soinfo* soinfo_alloc(const char* name, struct stat* file_stat, off64_t file_offset, uint32_t rtld_flags) {
287 if (strlen(name) >= SOINFO_NAME_LEN) {
288 DL_ERR("library name \"%s\" too long", name);
289 return nullptr;
290 }
292 soinfo* si = new (g_soinfo_allocator.alloc()) soinfo(name, file_stat, file_offset, rtld_flags);
294 sonext->next = si;
295 sonext = si;
297 TRACE("name %s: allocated soinfo @ %p", name, si);
298 return si;
299 }
301 static void soinfo_free(soinfo* si) {
302 if (si == nullptr) {
303 return;
304 }
306 if (si->base != 0 && si->size != 0) {
307 munmap(reinterpret_cast<void*>(si->base), si->size);
308 }
310 soinfo *prev = nullptr, *trav;
312 TRACE("name %s: freeing soinfo @ %p", si->name, si);
314 for (trav = solist; trav != nullptr; trav = trav->next) {
315 if (trav == si) {
316 break;
317 }
318 prev = trav;
319 }
321 if (trav == nullptr) {
322 // si was not in solist
323 DL_ERR("name \"%s\" is not in solist!", si->name);
324 return;
325 }
327 // clear links to/from si
328 si->remove_all_links();
330 // prev will never be null, because the first entry in solist is
331 // always the static libdl_info.
332 prev->next = si->next;
333 if (si == sonext) {
334 sonext = prev;
335 }
337 g_soinfo_allocator.free(si);
338 }
340 static void parse_path(const char* path, const char* delimiters,
341 const char** array, char* buf, size_t buf_size, size_t max_count) {
342 if (path == nullptr) {
343 return;
344 }
346 size_t len = strlcpy(buf, path, buf_size);
348 size_t i = 0;
349 char* buf_p = buf;
350 while (i < max_count && (array[i] = strsep(&buf_p, delimiters))) {
351 if (*array[i] != '\0') {
352 ++i;
353 }
354 }
356 // Forget the last path if we had to truncate; this occurs if the 2nd to
357 // last char isn't '\0' (i.e. wasn't originally a delimiter).
358 if (i > 0 && len >= buf_size && buf[buf_size - 2] != '\0') {
359 array[i - 1] = nullptr;
360 } else {
361 array[i] = nullptr;
362 }
363 }
365 static void parse_LD_LIBRARY_PATH(const char* path) {
366 parse_path(path, ":", g_ld_library_paths,
367 g_ld_library_paths_buffer, sizeof(g_ld_library_paths_buffer), LDPATH_MAX);
368 }
370 static void parse_LD_PRELOAD(const char* path) {
371 // We have historically supported ':' as well as ' ' in LD_PRELOAD.
372 parse_path(path, " :", g_ld_preload_names,
373 g_ld_preloads_buffer, sizeof(g_ld_preloads_buffer), LDPRELOAD_MAX);
374 }
376 #if defined(__arm__)
378 // For a given PC, find the .so that it belongs to.
379 // Returns the base address of the .ARM.exidx section
380 // for that .so, and the number of 8-byte entries
381 // in that section (via *pcount).
382 //
383 // Intended to be called by libc's __gnu_Unwind_Find_exidx().
384 //
385 // This function is exposed via dlfcn.cpp and libdl.so.
386 _Unwind_Ptr dl_unwind_find_exidx(_Unwind_Ptr pc, int* pcount) {
387 unsigned addr = (unsigned)pc;
389 for (soinfo* si = solist; si != 0; si = si->next) {
390 if ((addr >= si->base) && (addr < (si->base + si->size))) {
391 *pcount = si->ARM_exidx_count;
392 return (_Unwind_Ptr)si->ARM_exidx;
393 }
394 }
395 *pcount = 0;
396 return nullptr;
397 }
399 #endif
401 // Here, we only have to provide a callback to iterate across all the
402 // loaded libraries. gcc_eh does the rest.
403 int dl_iterate_phdr(int (*cb)(dl_phdr_info* info, size_t size, void* data), void* data) {
404 int rv = 0;
405 for (soinfo* si = solist; si != nullptr; si = si->next) {
406 dl_phdr_info dl_info;
407 dl_info.dlpi_addr = si->link_map_head.l_addr;
408 dl_info.dlpi_name = si->link_map_head.l_name;
409 dl_info.dlpi_phdr = si->phdr;
410 dl_info.dlpi_phnum = si->phnum;
411 rv = cb(&dl_info, sizeof(dl_phdr_info), data);
412 if (rv != 0) {
413 break;
414 }
415 }
416 return rv;
417 }
419 ElfW(Sym)* soinfo::find_symbol_by_name(SymbolName& symbol_name) {
420 return is_gnu_hash() ? gnu_lookup(symbol_name) : elf_lookup(symbol_name);
421 }
423 static bool is_symbol_global_and_defined(const soinfo* si, const ElfW(Sym)* s) {
424 if (ELF_ST_BIND(s->st_info) == STB_GLOBAL ||
425 ELF_ST_BIND(s->st_info) == STB_WEAK) {
426 return s->st_shndx != SHN_UNDEF;
427 } else if (ELF_ST_BIND(s->st_info) != STB_LOCAL) {
428 DL_WARN("unexpected ST_BIND value: %d for '%s' in '%s'",
429 ELF_ST_BIND(s->st_info), si->get_string(s->st_name), si->name);
430 }
432 return false;
433 }
435 ElfW(Sym)* soinfo::gnu_lookup(SymbolName& symbol_name) {
436 uint32_t hash = symbol_name.gnu_hash();
437 uint32_t h2 = hash >> gnu_shift2_;
439 uint32_t bloom_mask_bits = sizeof(ElfW(Addr))*8;
440 uint32_t word_num = (hash / bloom_mask_bits) & gnu_maskwords_;
441 ElfW(Addr) bloom_word = gnu_bloom_filter_[word_num];
443 // test against bloom filter
444 if ((1 & (bloom_word >> (hash % bloom_mask_bits)) & (bloom_word >> (h2 % bloom_mask_bits))) == 0) {
445 return nullptr;
446 }
448 // bloom test says "probably yes"...
449 uint32_t n = bucket_[hash % nbucket_];
451 if (n == 0) {
452 return nullptr;
453 }
455 do {
456 ElfW(Sym)* s = symtab_ + n;
457 if (((chain_[n] ^ hash) >> 1) == 0 &&
458 strcmp(get_string(s->st_name), symbol_name.get_name()) == 0 &&
459 is_symbol_global_and_defined(this, s)) {
460 return s;
461 }
462 } while ((chain_[n++] & 1) == 0);
464 return nullptr;
465 }
467 ElfW(Sym)* soinfo::elf_lookup(SymbolName& symbol_name) {
468 uint32_t hash = symbol_name.elf_hash();
470 TRACE_TYPE(LOOKUP, "SEARCH %s in %s@%p h=%x(elf) %zd",
471 symbol_name.get_name(), name, reinterpret_cast<void*>(base), hash, hash % nbucket_);
473 for (uint32_t n = bucket_[hash % nbucket_]; n != 0; n = chain_[n]) {
474 ElfW(Sym)* s = symtab_ + n;
475 if (strcmp(get_string(s->st_name), symbol_name.get_name()) == 0 && is_symbol_global_and_defined(this, s)) {
476 TRACE_TYPE(LOOKUP, "FOUND %s in %s (%p) %zd",
477 symbol_name.get_name(), name, reinterpret_cast<void*>(s->st_value),
478 static_cast<size_t>(s->st_size));
479 return s;
480 }
481 }
483 TRACE_TYPE(LOOKUP, "NOT FOUND %s in %s@%p %x %zd",
484 symbol_name.get_name(), name, reinterpret_cast<void*>(base), hash, hash % nbucket_);
486 return nullptr;
487 }
489 soinfo::soinfo(const char* name, const struct stat* file_stat, off64_t file_offset, int rtld_flags) {
490 memset(this, 0, sizeof(*this));
492 strlcpy(this->name, name, sizeof(this->name));
493 flags_ = FLAG_NEW_SOINFO;
494 version_ = SOINFO_VERSION;
496 if (file_stat != nullptr) {
497 this->st_dev_ = file_stat->st_dev;
498 this->st_ino_ = file_stat->st_ino;
499 this->file_offset_ = file_offset;
500 }
502 this->rtld_flags_ = rtld_flags;
503 }
506 uint32_t SymbolName::elf_hash() {
507 if (!has_elf_hash_) {
508 const unsigned char* name = reinterpret_cast<const unsigned char*>(name_);
509 uint32_t h = 0, g;
511 while (*name) {
512 h = (h << 4) + *name++;
513 g = h & 0xf0000000;
514 h ^= g;
515 h ^= g >> 24;
516 }
518 elf_hash_ = h;
519 has_elf_hash_ = true;
520 }
522 return elf_hash_;
523 }
525 uint32_t SymbolName::gnu_hash() {
526 if (!has_gnu_hash_) {
527 uint32_t h = 5381;
528 const unsigned char* name = reinterpret_cast<const unsigned char*>(name_);
529 while (*name != 0) {
530 h += (h << 5) + *name++; // h*33 + c = h + h * 32 + c = h + h << 5 + c
531 }
533 gnu_hash_ = h;
534 has_gnu_hash_ = true;
535 }
537 return gnu_hash_;
538 }
540 static ElfW(Sym)* soinfo_do_lookup(soinfo* si_from, const char* name, soinfo** si_found_in,
541 const soinfo::soinfo_list_t& global_group, const soinfo::soinfo_list_t& local_group) {
542 SymbolName symbol_name(name);
543 ElfW(Sym)* s = nullptr;
545 /* "This element's presence in a shared object library alters the dynamic linker's
546 * symbol resolution algorithm for references within the library. Instead of starting
547 * a symbol search with the executable file, the dynamic linker starts from the shared
548 * object itself. If the shared object fails to supply the referenced symbol, the
549 * dynamic linker then searches the executable file and other shared objects as usual."
550 *
551 * http://www.sco.com/developers/gabi/2012-12-31/ch5.dynamic.html
552 *
553 * Note that this is unlikely since static linker avoids generating
554 * relocations for -Bsymbolic linked dynamic executables.
555 */
556 if (si_from->has_DT_SYMBOLIC) {
557 DEBUG("%s: looking up %s in local scope (DT_SYMBOLIC)", si_from->name, name);
558 s = si_from->find_symbol_by_name(symbol_name);
559 if (s != nullptr) {
560 *si_found_in = si_from;
561 }
562 }
564 // 1. Look for it in global_group
565 if (s == nullptr) {
566 global_group.visit([&](soinfo* global_si) {
567 DEBUG("%s: looking up %s in %s (from global group)", si_from->name, name, global_si->name);
568 s = global_si->find_symbol_by_name(symbol_name);
569 if (s != nullptr) {
570 *si_found_in = global_si;
571 return false;
572 }
574 return true;
575 });
576 }
578 // 2. Look for it in the local group
579 if (s == nullptr) {
580 local_group.visit([&](soinfo* local_si) {
581 if (local_si == si_from && si_from->has_DT_SYMBOLIC) {
582 // we already did this - skip
583 return true;
584 }
586 DEBUG("%s: looking up %s in %s (from local group)", si_from->name, name, local_si->name);
587 s = local_si->find_symbol_by_name(symbol_name);
588 if (s != nullptr) {
589 *si_found_in = local_si;
590 return false;
591 }
593 return true;
594 });
595 }
597 if (s != nullptr) {
598 TRACE_TYPE(LOOKUP, "si %s sym %s s->st_value = %p, "
599 "found in %s, base = %p, load bias = %p",
600 si_from->name, name, reinterpret_cast<void*>(s->st_value),
601 (*si_found_in)->name, reinterpret_cast<void*>((*si_found_in)->base),
602 reinterpret_cast<void*>((*si_found_in)->load_bias));
603 }
605 return s;
606 }
608 // Each size has it's own allocator.
609 template<size_t size>
610 class SizeBasedAllocator {
611 public:
612 static void* alloc() {
613 return allocator_.alloc();
614 }
616 static void free(void* ptr) {
617 allocator_.free(ptr);
618 }
620 private:
621 static LinkerBlockAllocator allocator_;
622 };
624 template<size_t size>
625 LinkerBlockAllocator SizeBasedAllocator<size>::allocator_(size);
627 template<typename T>
628 class TypeBasedAllocator {
629 public:
630 static T* alloc() {
631 return reinterpret_cast<T*>(SizeBasedAllocator<sizeof(T)>::alloc());
632 }
634 static void free(T* ptr) {
635 SizeBasedAllocator<sizeof(T)>::free(ptr);
636 }
637 };
639 class LoadTask {
640 public:
641 struct deleter_t {
642 void operator()(LoadTask* t) {
643 TypeBasedAllocator<LoadTask>::free(t);
644 }
645 };
647 typedef UniquePtr<LoadTask, deleter_t> unique_ptr;
649 static deleter_t deleter;
651 static LoadTask* create(const char* name, soinfo* needed_by) {
652 LoadTask* ptr = TypeBasedAllocator<LoadTask>::alloc();
653 return new (ptr) LoadTask(name, needed_by);
654 }
656 const char* get_name() const {
657 return name_;
658 }
660 soinfo* get_needed_by() const {
661 return needed_by_;
662 }
663 private:
664 LoadTask(const char* name, soinfo* needed_by)
665 : name_(name), needed_by_(needed_by) {}
667 const char* name_;
668 soinfo* needed_by_;
670 DISALLOW_IMPLICIT_CONSTRUCTORS(LoadTask);
671 };
673 LoadTask::deleter_t LoadTask::deleter;
675 template <typename T>
676 using linked_list_t = LinkedList<T, TypeBasedAllocator<LinkedListEntry<T>>>;
678 typedef linked_list_t<soinfo> SoinfoLinkedList;
679 typedef linked_list_t<const char> StringLinkedList;
680 typedef linked_list_t<LoadTask> LoadTaskList;
683 // This function walks down the tree of soinfo dependencies
684 // in breadth-first order and
685 // * calls action(soinfo* si) for each node, and
686 // * terminates walk if action returns false.
687 //
688 // walk_dependencies_tree returns false if walk was terminated
689 // by the action and true otherwise.
690 template<typename F>
691 static bool walk_dependencies_tree(soinfo* root_soinfos[], size_t root_soinfos_size, F action) {
692 SoinfoLinkedList visit_list;
693 SoinfoLinkedList visited;
695 for (size_t i = 0; i < root_soinfos_size; ++i) {
696 visit_list.push_back(root_soinfos[i]);
697 }
699 soinfo* si;
700 while ((si = visit_list.pop_front()) != nullptr) {
701 if (visited.contains(si)) {
702 continue;
703 }
705 if (!action(si)) {
706 return false;
707 }
709 visited.push_back(si);
711 si->get_children().for_each([&](soinfo* child) {
712 visit_list.push_back(child);
713 });
714 }
716 return true;
717 }
720 // This is used by dlsym(3). It performs symbol lookup only within the
721 // specified soinfo object and its dependencies in breadth first order.
722 ElfW(Sym)* dlsym_handle_lookup(soinfo* si, soinfo** found, const char* name) {
723 ElfW(Sym)* result = nullptr;
724 SymbolName symbol_name(name);
727 walk_dependencies_tree(&si, 1, [&](soinfo* current_soinfo) {
728 result = current_soinfo->find_symbol_by_name(symbol_name);
729 if (result != nullptr) {
730 *found = current_soinfo;
731 return false;
732 }
734 return true;
735 });
737 return result;
738 }
740 /* This is used by dlsym(3) to performs a global symbol lookup. If the
741 start value is null (for RTLD_DEFAULT), the search starts at the
742 beginning of the global solist. Otherwise the search starts at the
743 specified soinfo (for RTLD_NEXT).
744 */
745 ElfW(Sym)* dlsym_linear_lookup(const char* name, soinfo** found, soinfo* start) {
746 SymbolName symbol_name(name);
748 if (start == nullptr) {
749 start = solist;
750 }
752 ElfW(Sym)* s = nullptr;
753 for (soinfo* si = start; (s == nullptr) && (si != nullptr); si = si->next) {
754 if ((si->get_rtld_flags() & RTLD_GLOBAL) == 0) {
755 continue;
756 }
758 s = si->find_symbol_by_name(symbol_name);
759 if (s != nullptr) {
760 *found = si;
761 break;
762 }
763 }
765 if (s != nullptr) {
766 TRACE_TYPE(LOOKUP, "%s s->st_value = %p, found->base = %p",
767 name, reinterpret_cast<void*>(s->st_value), reinterpret_cast<void*>((*found)->base));
768 }
770 return s;
771 }
773 soinfo* find_containing_library(const void* p) {
774 ElfW(Addr) address = reinterpret_cast<ElfW(Addr)>(p);
775 for (soinfo* si = solist; si != nullptr; si = si->next) {
776 if (address >= si->base && address - si->base < si->size) {
777 return si;
778 }
779 }
780 return nullptr;
781 }
783 ElfW(Sym)* soinfo::find_symbol_by_address(const void* addr) {
784 return is_gnu_hash() ? gnu_addr_lookup(addr) : elf_addr_lookup(addr);
785 }
787 static bool symbol_matches_soaddr(const ElfW(Sym)* sym, ElfW(Addr) soaddr) {
788 return sym->st_shndx != SHN_UNDEF &&
789 soaddr >= sym->st_value &&
790 soaddr < sym->st_value + sym->st_size;
791 }
793 ElfW(Sym)* soinfo::gnu_addr_lookup(const void* addr) {
794 ElfW(Addr) soaddr = reinterpret_cast<ElfW(Addr)>(addr) - base;
796 for (size_t i = 0; i < nbucket_; ++i) {
797 uint32_t n = bucket_[i];
799 if (n == 0) {
800 continue;
801 }
803 do {
804 ElfW(Sym)* sym = symtab_ + n;
805 if (symbol_matches_soaddr(sym, soaddr)) {
806 return sym;
807 }
808 } while ((chain_[n++] & 1) == 0);
809 }
811 return nullptr;
812 }
814 ElfW(Sym)* soinfo::elf_addr_lookup(const void* addr) {
815 ElfW(Addr) soaddr = reinterpret_cast<ElfW(Addr)>(addr) - base;
817 // Search the library's symbol table for any defined symbol which
818 // contains this address.
819 for (size_t i = 0; i < nchain_; ++i) {
820 ElfW(Sym)* sym = symtab_ + i;
821 if (symbol_matches_soaddr(sym, soaddr)) {
822 return sym;
823 }
824 }
826 return nullptr;
827 }
829 static int open_library_on_path(const char* name, const char* const paths[]) {
830 char buf[512];
831 for (size_t i = 0; paths[i] != nullptr; ++i) {
832 int n = __libc_format_buffer(buf, sizeof(buf), "%s/%s", paths[i], name);
833 if (n < 0 || n >= static_cast<int>(sizeof(buf))) {
834 PRINT("Warning: ignoring very long library path: %s/%s", paths[i], name);
835 continue;
836 }
837 int fd = TEMP_FAILURE_RETRY(open(buf, O_RDONLY | O_CLOEXEC));
838 if (fd != -1) {
839 return fd;
840 }
841 }
842 return -1;
843 }
845 static int open_library(const char* name) {
846 TRACE("[ opening %s ]", name);
848 // If the name contains a slash, we should attempt to open it directly and not search the paths.
849 if (strchr(name, '/') != nullptr) {
850 int fd = TEMP_FAILURE_RETRY(open(name, O_RDONLY | O_CLOEXEC));
851 if (fd != -1) {
852 return fd;
853 }
854 // ...but nvidia binary blobs (at least) rely on this behavior, so fall through for now.
855 #if defined(__LP64__)
856 return -1;
857 #endif
858 }
860 // Otherwise we try LD_LIBRARY_PATH first, and fall back to the built-in well known paths.
861 int fd = open_library_on_path(name, g_ld_library_paths);
862 if (fd == -1) {
863 fd = open_library_on_path(name, kDefaultLdPaths);
864 }
865 return fd;
866 }
868 template<typename F>
869 static void for_each_dt_needed(const soinfo* si, F action) {
870 for (ElfW(Dyn)* d = si->dynamic; d->d_tag != DT_NULL; ++d) {
871 if (d->d_tag == DT_NEEDED) {
872 action(si->get_string(d->d_un.d_val));
873 }
874 }
875 }
877 static soinfo* load_library(LoadTaskList& load_tasks, const char* name, int rtld_flags, const android_dlextinfo* extinfo) {
878 int fd = -1;
879 off64_t file_offset = 0;
880 ScopedFd file_guard(-1);
882 if (extinfo != nullptr && (extinfo->flags & ANDROID_DLEXT_USE_LIBRARY_FD) != 0) {
883 fd = extinfo->library_fd;
884 if ((extinfo->flags & ANDROID_DLEXT_USE_LIBRARY_FD_OFFSET) != 0) {
885 file_offset = extinfo->library_fd_offset;
886 }
887 } else {
888 // Open the file.
889 fd = open_library(name);
890 if (fd == -1) {
891 DL_ERR("library \"%s\" not found", name);
892 return nullptr;
893 }
895 file_guard.reset(fd);
896 }
898 if ((file_offset % PAGE_SIZE) != 0) {
899 DL_ERR("file offset for the library \"%s\" is not page-aligned: %" PRId64, name, file_offset);
900 return nullptr;
901 }
902 if (file_offset < 0) {
903 DL_ERR("file offset for the library \"%s\" is negative: %" PRId64, name, file_offset);
904 return nullptr;
905 }
907 struct stat file_stat;
908 if (TEMP_FAILURE_RETRY(fstat(fd, &file_stat)) != 0) {
909 DL_ERR("unable to stat file for the library \"%s\": %s", name, strerror(errno));
910 return nullptr;
911 }
912 if (file_offset >= file_stat.st_size) {
913 DL_ERR("file offset for the library \"%s\" >= file size: %" PRId64 " >= %" PRId64, name, file_offset, file_stat.st_size);
914 return nullptr;
915 }
917 // Check for symlink and other situations where
918 // file can have different names.
919 for (soinfo* si = solist; si != nullptr; si = si->next) {
920 if (si->get_st_dev() != 0 &&
921 si->get_st_ino() != 0 &&
922 si->get_st_dev() == file_stat.st_dev &&
923 si->get_st_ino() == file_stat.st_ino &&
924 si->get_file_offset() == file_offset) {
925 TRACE("library \"%s\" is already loaded under different name/path \"%s\" - will return existing soinfo", name, si->name);
926 return si;
927 }
928 }
930 if ((rtld_flags & RTLD_NOLOAD) != 0) {
931 DL_ERR("library \"%s\" wasn't loaded and RTLD_NOLOAD prevented it", name);
932 return nullptr;
933 }
935 // Read the ELF header and load the segments.
936 ElfReader elf_reader(name, fd, file_offset);
937 if (!elf_reader.Load(extinfo)) {
938 return nullptr;
939 }
941 soinfo* si = soinfo_alloc(SEARCH_NAME(name), &file_stat, file_offset, rtld_flags);
942 if (si == nullptr) {
943 return nullptr;
944 }
945 si->base = elf_reader.load_start();
946 si->size = elf_reader.load_size();
947 si->load_bias = elf_reader.load_bias();
948 si->phnum = elf_reader.phdr_count();
949 si->phdr = elf_reader.loaded_phdr();
951 if (!si->prelink_image()) {
952 soinfo_free(si);
953 return nullptr;
954 }
956 for_each_dt_needed(si, [&] (const char* name) {
957 load_tasks.push_back(LoadTask::create(name, si));
958 });
960 return si;
961 }
963 static soinfo *find_loaded_library_by_name(const char* name) {
964 const char* search_name = SEARCH_NAME(name);
965 for (soinfo* si = solist; si != nullptr; si = si->next) {
966 if (!strcmp(search_name, si->name)) {
967 return si;
968 }
969 }
970 return nullptr;
971 }
973 static soinfo* find_library_internal(LoadTaskList& load_tasks, const char* name, int rtld_flags, const android_dlextinfo* extinfo) {
975 soinfo* si = find_loaded_library_by_name(name);
977 // Library might still be loaded, the accurate detection
978 // of this fact is done by load_library.
979 if (si == nullptr) {
980 TRACE("[ '%s' has not been found by name. Trying harder...]", name);
981 si = load_library(load_tasks, name, rtld_flags, extinfo);
982 }
984 return si;
985 }
987 static void soinfo_unload(soinfo* si);
989 // TODO: this is slightly unusual way to construct
990 // the global group for relocation. Not every RTLD_GLOBAL
991 // library is included in this group for backwards-compatibility
992 // reasons.
993 //
994 // This group consists of the main executable, LD_PRELOADs
995 // and libraries with the DF_1_GLOBAL flag set.
996 static soinfo::soinfo_list_t make_global_group() {
997 soinfo::soinfo_list_t global_group;
998 for (soinfo* si = somain; si != nullptr; si = si->next) {
999 if ((si->get_dt_flags_1() & DF_1_GLOBAL) != 0) {
1000 global_group.push_back(si);
1001 }
1002 }
1004 return global_group;
1005 }
1007 static bool find_libraries(soinfo* start_with, const char* const library_names[], size_t library_names_count, soinfo* soinfos[],
1008 soinfo* ld_preloads[], size_t ld_preloads_count, int rtld_flags, const android_dlextinfo* extinfo) {
1009 // Step 0: prepare.
1010 LoadTaskList load_tasks;
1011 for (size_t i = 0; i < library_names_count; ++i) {
1012 const char* name = library_names[i];
1013 load_tasks.push_back(LoadTask::create(name, start_with));
1014 }
1016 // Construct global_group.
1017 soinfo::soinfo_list_t global_group = make_global_group();
1019 // If soinfos array is null allocate one on stack.
1020 // The array is needed in case of failure; for example
1021 // when library_names[] = {libone.so, libtwo.so} and libone.so
1022 // is loaded correctly but libtwo.so failed for some reason.
1023 // In this case libone.so should be unloaded on return.
1024 // See also implementation of failure_guard below.
1026 if (soinfos == nullptr) {
1027 size_t soinfos_size = sizeof(soinfo*)*library_names_count;
1028 soinfos = reinterpret_cast<soinfo**>(alloca(soinfos_size));
1029 memset(soinfos, 0, soinfos_size);
1030 }
1032 // list of libraries to link - see step 2.
1033 size_t soinfos_count = 0;
1035 auto failure_guard = make_scope_guard([&]() {
1036 // Housekeeping
1037 load_tasks.for_each([] (LoadTask* t) {
1038 LoadTask::deleter(t);
1039 });
1041 for (size_t i = 0; i<soinfos_count; ++i) {
1042 soinfo_unload(soinfos[i]);
1043 }
1044 });
1046 // Step 1: load and pre-link all DT_NEEDED libraries in breadth first order.
1047 for (LoadTask::unique_ptr task(load_tasks.pop_front()); task.get() != nullptr; task.reset(load_tasks.pop_front())) {
1048 soinfo* si = find_library_internal(load_tasks, task->get_name(), rtld_flags, extinfo);
1049 if (si == nullptr) {
1050 return false;
1051 }
1053 soinfo* needed_by = task->get_needed_by();
1055 if (needed_by != nullptr) {
1056 needed_by->add_child(si);
1057 }
1059 if (si->is_linked()) {
1060 si->increment_ref_count();
1061 }
1063 // When ld_preloads is not null, the first
1064 // ld_preloads_count libs are in fact ld_preloads.
1065 if (ld_preloads != nullptr && soinfos_count < ld_preloads_count) {
1066 // Add LD_PRELOADed libraries to the global group for future runs.
1067 // There is no need to explicitly add them to the global group
1068 // for this run because they are going to appear in the local
1069 // group in the correct order.
1070 si->set_dt_flags_1(si->get_dt_flags_1() | DF_1_GLOBAL);
1071 ld_preloads[soinfos_count] = si;
1072 }
1074 if (soinfos_count < library_names_count) {
1075 soinfos[soinfos_count++] = si;
1076 }
1077 }
1079 // Step 2: link libraries.
1080 soinfo::soinfo_list_t local_group;
1081 walk_dependencies_tree(
1082 start_with == nullptr ? soinfos : &start_with,
1083 start_with == nullptr ? soinfos_count : 1,
1084 [&] (soinfo* si) {
1085 local_group.push_back(si);
1086 return true;
1087 });
1089 // We need to increment ref_count in case
1090 // the root of the local group was not linked.
1091 bool was_local_group_root_linked = local_group.front()->is_linked();
1093 bool linked = local_group.visit([&](soinfo* si) {
1094 if (!si->is_linked()) {
1095 if (!si->link_image(global_group, local_group, extinfo)) {
1096 return false;
1097 }
1098 si->set_linked();
1099 }
1101 return true;
1102 });
1104 if (linked) {
1105 failure_guard.disable();
1106 }
1108 if (!was_local_group_root_linked) {
1109 local_group.front()->increment_ref_count();
1110 }
1112 return linked;
1113 }
1115 static soinfo* find_library(const char* name, int rtld_flags, const android_dlextinfo* extinfo) {
1116 soinfo* si;
1118 if (name == nullptr) {
1119 si = somain;
1120 } else if (!find_libraries(nullptr, &name, 1, &si, nullptr, 0, rtld_flags, extinfo)) {
1121 return nullptr;
1122 }
1124 return si;
1125 }
1127 static void soinfo_unload(soinfo* root) {
1128 // Note that the library can be loaded but not linked;
1129 // in which case there is no root but we still need
1130 // to walk the tree and unload soinfos involved.
1131 //
1132 // This happens on unsuccessful dlopen, when one of
1133 // the DT_NEEDED libraries could not be linked/found.
1134 if (root->is_linked()) {
1135 root = root->get_local_group_root();
1136 }
1138 if (!root->can_unload()) {
1139 TRACE("not unloading '%s' - the binary is flagged with NODELETE", root->name);
1140 return;
1141 }
1143 size_t ref_count = root->is_linked() ? root->decrement_ref_count() : 0;
1145 if (ref_count == 0) {
1146 soinfo::soinfo_list_t local_unload_list;
1147 soinfo::soinfo_list_t external_unload_list;
1148 soinfo::soinfo_list_t depth_first_list;
1149 depth_first_list.push_back(root);
1150 soinfo* si = nullptr;
1152 while ((si = depth_first_list.pop_front()) != nullptr) {
1153 local_unload_list.push_back(si);
1154 if (si->has_min_version(0)) {
1155 soinfo* child = nullptr;
1156 while ((child = si->get_children().pop_front()) != nullptr) {
1157 TRACE("%s needs to unload %s", si->name, child->name);
1158 if (local_unload_list.contains(child)) {
1159 continue;
1160 } else if (child->get_local_group_root() != root) {
1161 external_unload_list.push_back(child);
1162 } else {
1163 depth_first_list.push_front(child);
1164 }
1165 }
1166 } else {
1167 for_each_dt_needed(si, [&] (const char* library_name) {
1168 TRACE("deprecated (old format of soinfo): %s needs to unload %s", si->name, library_name);
1169 soinfo* needed = find_library(library_name, RTLD_NOLOAD, nullptr);
1170 if (needed != nullptr) {
1171 // Not found: for example if symlink was deleted between dlopen and dlclose
1172 // Since we cannot really handle errors at this point - print and continue.
1173 PRINT("warning: couldn't find %s needed by %s on unload.", library_name, si->name);
1174 return;
1175 } else if (local_unload_list.contains(needed)) {
1176 // already visited
1177 return;
1178 } else if (needed->get_local_group_root() != root) {
1179 // external group
1180 external_unload_list.push_back(needed);
1181 } else {
1182 // local group
1183 depth_first_list.push_front(needed);
1184 }
1185 });
1186 }
1187 }
1189 local_unload_list.for_each([](soinfo* si) {
1190 si->call_destructors();
1191 });
1193 while ((si = local_unload_list.pop_front()) != nullptr) {
1194 notify_gdb_of_unload(si);
1195 soinfo_free(si);
1196 }
1198 while ((si = external_unload_list.pop_front()) != nullptr) {
1199 soinfo_unload(si);
1200 }
1201 } else {
1202 TRACE("not unloading '%s' group, decrementing ref_count to %zd", root->name, ref_count);
1203 }
1204 }
1206 void do_android_get_LD_LIBRARY_PATH(char* buffer, size_t buffer_size) {
1207 // Use basic string manipulation calls to avoid snprintf.
1208 // snprintf indirectly calls pthread_getspecific to get the size of a buffer.
1209 // When debug malloc is enabled, this call returns 0. This in turn causes
1210 // snprintf to do nothing, which causes libraries to fail to load.
1211 // See b/17302493 for further details.
1212 // Once the above bug is fixed, this code can be modified to use
1213 // snprintf again.
1214 size_t required_len = strlen(kDefaultLdPaths[0]) + strlen(kDefaultLdPaths[1]) + 2;
1215 if (buffer_size < required_len) {
1216 __libc_fatal("android_get_LD_LIBRARY_PATH failed, buffer too small: buffer len %zu, required len %zu",
1217 buffer_size, required_len);
1218 }
1219 char* end = stpcpy(buffer, kDefaultLdPaths[0]);
1220 *end = ':';
1221 strcpy(end + 1, kDefaultLdPaths[1]);
1222 }
1224 void do_android_update_LD_LIBRARY_PATH(const char* ld_library_path) {
1225 if (!get_AT_SECURE()) {
1226 parse_LD_LIBRARY_PATH(ld_library_path);
1227 }
1228 }
1230 soinfo* do_dlopen(const char* name, int flags, const android_dlextinfo* extinfo) {
1231 if ((flags & ~(RTLD_NOW|RTLD_LAZY|RTLD_LOCAL|RTLD_GLOBAL|RTLD_NODELETE|RTLD_NOLOAD)) != 0) {
1232 DL_ERR("invalid flags to dlopen: %x", flags);
1233 return nullptr;
1234 }
1235 if (extinfo != nullptr) {
1236 if ((extinfo->flags & ~(ANDROID_DLEXT_VALID_FLAG_BITS)) != 0) {
1237 DL_ERR("invalid extended flags to android_dlopen_ext: 0x%" PRIx64, extinfo->flags);
1238 return nullptr;
1239 }
1240 if ((extinfo->flags & ANDROID_DLEXT_USE_LIBRARY_FD) == 0 &&
1241 (extinfo->flags & ANDROID_DLEXT_USE_LIBRARY_FD_OFFSET) != 0) {
1242 DL_ERR("invalid extended flag combination (ANDROID_DLEXT_USE_LIBRARY_FD_OFFSET without ANDROID_DLEXT_USE_LIBRARY_FD): 0x%" PRIx64, extinfo->flags);
1243 return nullptr;
1244 }
1245 }
1246 protect_data(PROT_READ | PROT_WRITE);
1247 soinfo* si = find_library(name, flags, extinfo);
1248 if (si != nullptr) {
1249 si->call_constructors();
1250 }
1251 protect_data(PROT_READ);
1252 return si;
1253 }
1255 void do_dlclose(soinfo* si) {
1256 protect_data(PROT_READ | PROT_WRITE);
1257 soinfo_unload(si);
1258 protect_data(PROT_READ);
1259 }
1261 static ElfW(Addr) call_ifunc_resolver(ElfW(Addr) resolver_addr) {
1262 typedef ElfW(Addr) (*ifunc_resolver_t)(void);
1263 ifunc_resolver_t ifunc_resolver = reinterpret_cast<ifunc_resolver_t>(resolver_addr);
1264 ElfW(Addr) ifunc_addr = ifunc_resolver();
1265 TRACE_TYPE(RELO, "Called ifunc_resolver@%p. The result is %p", ifunc_resolver, reinterpret_cast<void*>(ifunc_addr));
1267 return ifunc_addr;
1268 }
1270 #if defined(USE_RELA)
1271 int soinfo::relocate(ElfW(Rela)* rela, unsigned count, const soinfo_list_t& global_group, const soinfo_list_t& local_group) {
1272 for (size_t idx = 0; idx < count; ++idx, ++rela) {
1273 unsigned type = ELFW(R_TYPE)(rela->r_info);
1274 unsigned sym = ELFW(R_SYM)(rela->r_info);
1275 ElfW(Addr) reloc = static_cast<ElfW(Addr)>(rela->r_offset + load_bias);
1276 ElfW(Addr) sym_addr = 0;
1277 const char* sym_name = nullptr;
1279 DEBUG("Processing '%s' relocation at index %zd", name, idx);
1280 if (type == 0) { // R_*_NONE
1281 continue;
1282 }
1284 ElfW(Sym)* s = nullptr;
1285 soinfo* lsi = nullptr;
1287 if (sym != 0) {
1288 sym_name = get_string(symtab_[sym].st_name);
1289 s = soinfo_do_lookup(this, sym_name, &lsi, global_group,local_group);
1290 if (s == nullptr) {
1291 // We only allow an undefined symbol if this is a weak reference...
1292 s = &symtab_[sym];
1293 if (ELF_ST_BIND(s->st_info) != STB_WEAK) {
1294 DL_ERR("cannot locate symbol \"%s\" referenced by \"%s\"...", sym_name, name);
1295 return -1;
1296 }
1298 /* IHI0044C AAELF 4.5.1.1:
1300 Libraries are not searched to resolve weak references.
1301 It is not an error for a weak reference to remain unsatisfied.
1303 During linking, the value of an undefined weak reference is:
1304 - Zero if the relocation type is absolute
1305 - The address of the place if the relocation is pc-relative
1306 - The address of nominal base address if the relocation
1307 type is base-relative.
1308 */
1310 switch (type) {
1311 #if defined(__aarch64__)
1312 case R_AARCH64_JUMP_SLOT:
1313 case R_AARCH64_GLOB_DAT:
1314 case R_AARCH64_ABS64:
1315 case R_AARCH64_ABS32:
1316 case R_AARCH64_ABS16:
1317 case R_AARCH64_RELATIVE:
1318 case R_AARCH64_IRELATIVE:
1319 /*
1320 * The sym_addr was initialized to be zero above, or the relocation
1321 * code below does not care about value of sym_addr.
1322 * No need to do anything.
1323 */
1324 break;
1325 #elif defined(__x86_64__)
1326 case R_X86_64_JUMP_SLOT:
1327 case R_X86_64_GLOB_DAT:
1328 case R_X86_64_32:
1329 case R_X86_64_64:
1330 case R_X86_64_RELATIVE:
1331 case R_X86_64_IRELATIVE:
1332 // No need to do anything.
1333 break;
1334 case R_X86_64_PC32:
1335 sym_addr = reloc;
1336 break;
1337 #endif
1338 default:
1339 DL_ERR("unknown weak reloc type %d @ %p (%zu)", type, rela, idx);
1340 return -1;
1341 }
1342 } else {
1343 // We got a definition.
1344 sym_addr = lsi->resolve_symbol_address(s);
1345 }
1346 count_relocation(kRelocSymbol);
1347 }
1349 switch (type) {
1350 #if defined(__aarch64__)
1351 case R_AARCH64_JUMP_SLOT:
1352 count_relocation(kRelocAbsolute);
1353 MARK(rela->r_offset);
1354 TRACE_TYPE(RELO, "RELO JMP_SLOT %16llx <- %16llx %s\n",
1355 reloc, (sym_addr + rela->r_addend), sym_name);
1356 *reinterpret_cast<ElfW(Addr)*>(reloc) = (sym_addr + rela->r_addend);
1357 break;
1358 case R_AARCH64_GLOB_DAT:
1359 count_relocation(kRelocAbsolute);
1360 MARK(rela->r_offset);
1361 TRACE_TYPE(RELO, "RELO GLOB_DAT %16llx <- %16llx %s\n",
1362 reloc, (sym_addr + rela->r_addend), sym_name);
1363 *reinterpret_cast<ElfW(Addr)*>(reloc) = (sym_addr + rela->r_addend);
1364 break;
1365 case R_AARCH64_ABS64:
1366 count_relocation(kRelocAbsolute);
1367 MARK(rela->r_offset);
1368 TRACE_TYPE(RELO, "RELO ABS64 %16llx <- %16llx %s\n",
1369 reloc, (sym_addr + rela->r_addend), sym_name);
1370 *reinterpret_cast<ElfW(Addr)*>(reloc) += (sym_addr + rela->r_addend);
1371 break;
1372 case R_AARCH64_ABS32:
1373 count_relocation(kRelocAbsolute);
1374 MARK(rela->r_offset);
1375 TRACE_TYPE(RELO, "RELO ABS32 %16llx <- %16llx %s\n",
1376 reloc, (sym_addr + rela->r_addend), sym_name);
1377 if ((static_cast<ElfW(Addr)>(INT32_MIN) <= (*reinterpret_cast<ElfW(Addr)*>(reloc) + (sym_addr + rela->r_addend))) &&
1378 ((*reinterpret_cast<ElfW(Addr)*>(reloc) + (sym_addr + rela->r_addend)) <= static_cast<ElfW(Addr)>(UINT32_MAX))) {
1379 *reinterpret_cast<ElfW(Addr)*>(reloc) += (sym_addr + rela->r_addend);
1380 } else {
1381 DL_ERR("0x%016llx out of range 0x%016llx to 0x%016llx",
1382 (*reinterpret_cast<ElfW(Addr)*>(reloc) + (sym_addr + rela->r_addend)),
1383 static_cast<ElfW(Addr)>(INT32_MIN),
1384 static_cast<ElfW(Addr)>(UINT32_MAX));
1385 return -1;
1386 }
1387 break;
1388 case R_AARCH64_ABS16:
1389 count_relocation(kRelocAbsolute);
1390 MARK(rela->r_offset);
1391 TRACE_TYPE(RELO, "RELO ABS16 %16llx <- %16llx %s\n",
1392 reloc, (sym_addr + rela->r_addend), sym_name);
1393 if ((static_cast<ElfW(Addr)>(INT16_MIN) <= (*reinterpret_cast<ElfW(Addr)*>(reloc) + (sym_addr + rela->r_addend))) &&
1394 ((*reinterpret_cast<ElfW(Addr)*>(reloc) + (sym_addr + rela->r_addend)) <= static_cast<ElfW(Addr)>(UINT16_MAX))) {
1395 *reinterpret_cast<ElfW(Addr)*>(reloc) += (sym_addr + rela->r_addend);
1396 } else {
1397 DL_ERR("0x%016llx out of range 0x%016llx to 0x%016llx",
1398 (*reinterpret_cast<ElfW(Addr)*>(reloc) + (sym_addr + rela->r_addend)),
1399 static_cast<ElfW(Addr)>(INT16_MIN),
1400 static_cast<ElfW(Addr)>(UINT16_MAX));
1401 return -1;
1402 }
1403 break;
1404 case R_AARCH64_PREL64:
1405 count_relocation(kRelocRelative);
1406 MARK(rela->r_offset);
1407 TRACE_TYPE(RELO, "RELO REL64 %16llx <- %16llx - %16llx %s\n",
1408 reloc, (sym_addr + rela->r_addend), rela->r_offset, sym_name);
1409 *reinterpret_cast<ElfW(Addr)*>(reloc) += (sym_addr + rela->r_addend) - rela->r_offset;
1410 break;
1411 case R_AARCH64_PREL32:
1412 count_relocation(kRelocRelative);
1413 MARK(rela->r_offset);
1414 TRACE_TYPE(RELO, "RELO REL32 %16llx <- %16llx - %16llx %s\n",
1415 reloc, (sym_addr + rela->r_addend), rela->r_offset, sym_name);
1416 if ((static_cast<ElfW(Addr)>(INT32_MIN) <= (*reinterpret_cast<ElfW(Addr)*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset))) &&
1417 ((*reinterpret_cast<ElfW(Addr)*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset)) <= static_cast<ElfW(Addr)>(UINT32_MAX))) {
1418 *reinterpret_cast<ElfW(Addr)*>(reloc) += ((sym_addr + rela->r_addend) - rela->r_offset);
1419 } else {
1420 DL_ERR("0x%016llx out of range 0x%016llx to 0x%016llx",
1421 (*reinterpret_cast<ElfW(Addr)*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset)),
1422 static_cast<ElfW(Addr)>(INT32_MIN),
1423 static_cast<ElfW(Addr)>(UINT32_MAX));
1424 return -1;
1425 }
1426 break;
1427 case R_AARCH64_PREL16:
1428 count_relocation(kRelocRelative);
1429 MARK(rela->r_offset);
1430 TRACE_TYPE(RELO, "RELO REL16 %16llx <- %16llx - %16llx %s\n",
1431 reloc, (sym_addr + rela->r_addend), rela->r_offset, sym_name);
1432 if ((static_cast<ElfW(Addr)>(INT16_MIN) <= (*reinterpret_cast<ElfW(Addr)*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset))) &&
1433 ((*reinterpret_cast<ElfW(Addr)*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset)) <= static_cast<ElfW(Addr)>(UINT16_MAX))) {
1434 *reinterpret_cast<ElfW(Addr)*>(reloc) += ((sym_addr + rela->r_addend) - rela->r_offset);
1435 } else {
1436 DL_ERR("0x%016llx out of range 0x%016llx to 0x%016llx",
1437 (*reinterpret_cast<ElfW(Addr)*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset)),
1438 static_cast<ElfW(Addr)>(INT16_MIN),
1439 static_cast<ElfW(Addr)>(UINT16_MAX));
1440 return -1;
1441 }
1442 break;
1444 case R_AARCH64_RELATIVE:
1445 count_relocation(kRelocRelative);
1446 MARK(rela->r_offset);
1447 if (sym) {
1448 DL_ERR("odd RELATIVE form...");
1449 return -1;
1450 }
1451 TRACE_TYPE(RELO, "RELO RELATIVE %16llx <- %16llx\n",
1452 reloc, (base + rela->r_addend));
1453 *reinterpret_cast<ElfW(Addr)*>(reloc) = (base + rela->r_addend);
1454 break;
1456 case R_AARCH64_IRELATIVE:
1457 count_relocation(kRelocRelative);
1458 MARK(rela->r_offset);
1459 TRACE_TYPE(RELO, "RELO IRELATIVE %16llx <- %16llx\n", reloc, (base + rela->r_addend));
1460 *reinterpret_cast<ElfW(Addr)*>(reloc) = call_ifunc_resolver(base + rela->r_addend);
1461 break;
1463 case R_AARCH64_COPY:
1464 /*
1465 * ET_EXEC is not supported so this should not happen.
1466 *
1467 * http://infocenter.arm.com/help/topic/com.arm.doc.ihi0044d/IHI0044D_aaelf.pdf
1468 *
1469 * Section 4.7.1.10 "Dynamic relocations"
1470 * R_AARCH64_COPY may only appear in executable objects where e_type is
1471 * set to ET_EXEC.
1472 */
1473 DL_ERR("%s R_AARCH64_COPY relocations are not supported", name);
1474 return -1;
1475 case R_AARCH64_TLS_TPREL64:
1476 TRACE_TYPE(RELO, "RELO TLS_TPREL64 *** %16llx <- %16llx - %16llx\n",
1477 reloc, (sym_addr + rela->r_addend), rela->r_offset);
1478 break;
1479 case R_AARCH64_TLS_DTPREL32:
1480 TRACE_TYPE(RELO, "RELO TLS_DTPREL32 *** %16llx <- %16llx - %16llx\n",
1481 reloc, (sym_addr + rela->r_addend), rela->r_offset);
1482 break;
1483 #elif defined(__x86_64__)
1484 case R_X86_64_JUMP_SLOT:
1485 count_relocation(kRelocAbsolute);
1486 MARK(rela->r_offset);
1487 TRACE_TYPE(RELO, "RELO JMP_SLOT %08zx <- %08zx %s", static_cast<size_t>(reloc),
1488 static_cast<size_t>(sym_addr + rela->r_addend), sym_name);
1489 *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr + rela->r_addend;
1490 break;
1491 case R_X86_64_GLOB_DAT:
1492 count_relocation(kRelocAbsolute);
1493 MARK(rela->r_offset);
1494 TRACE_TYPE(RELO, "RELO GLOB_DAT %08zx <- %08zx %s", static_cast<size_t>(reloc),
1495 static_cast<size_t>(sym_addr + rela->r_addend), sym_name);
1496 *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr + rela->r_addend;
1497 break;
1498 case R_X86_64_RELATIVE:
1499 count_relocation(kRelocRelative);
1500 MARK(rela->r_offset);
1501 if (sym) {
1502 DL_ERR("odd RELATIVE form...");
1503 return -1;
1504 }
1505 TRACE_TYPE(RELO, "RELO RELATIVE %08zx <- +%08zx", static_cast<size_t>(reloc),
1506 static_cast<size_t>(base));
1507 *reinterpret_cast<ElfW(Addr)*>(reloc) = base + rela->r_addend;
1508 break;
1509 case R_X86_64_IRELATIVE:
1510 count_relocation(kRelocRelative);
1511 MARK(rela->r_offset);
1512 TRACE_TYPE(RELO, "RELO IRELATIVE %16llx <- %16llx\n", reloc, (base + rela->r_addend));
1513 *reinterpret_cast<ElfW(Addr)*>(reloc) = call_ifunc_resolver(base + rela->r_addend);
1514 break;
1515 case R_X86_64_32:
1516 count_relocation(kRelocRelative);
1517 MARK(rela->r_offset);
1518 TRACE_TYPE(RELO, "RELO R_X86_64_32 %08zx <- +%08zx %s", static_cast<size_t>(reloc),
1519 static_cast<size_t>(sym_addr), sym_name);
1520 *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr + rela->r_addend;
1521 break;
1522 case R_X86_64_64:
1523 count_relocation(kRelocRelative);
1524 MARK(rela->r_offset);
1525 TRACE_TYPE(RELO, "RELO R_X86_64_64 %08zx <- +%08zx %s", static_cast<size_t>(reloc),
1526 static_cast<size_t>(sym_addr), sym_name);
1527 *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr + rela->r_addend;
1528 break;
1529 case R_X86_64_PC32:
1530 count_relocation(kRelocRelative);
1531 MARK(rela->r_offset);
1532 TRACE_TYPE(RELO, "RELO R_X86_64_PC32 %08zx <- +%08zx (%08zx - %08zx) %s",
1533 static_cast<size_t>(reloc), static_cast<size_t>(sym_addr - reloc),
1534 static_cast<size_t>(sym_addr), static_cast<size_t>(reloc), sym_name);
1535 *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr + rela->r_addend - reloc;
1536 break;
1537 #endif
1539 default:
1540 DL_ERR("unknown reloc type %d @ %p (%zu)", type, rela, idx);
1541 return -1;
1542 }
1543 }
1544 return 0;
1545 }
1547 #else // REL, not RELA.
1548 int soinfo::relocate(ElfW(Rel)* rel, unsigned count, const soinfo_list_t& global_group, const soinfo_list_t& local_group) {
1549 for (size_t idx = 0; idx < count; ++idx, ++rel) {
1550 unsigned type = ELFW(R_TYPE)(rel->r_info);
1551 // TODO: don't use unsigned for 'sym'. Use uint32_t or ElfW(Addr) instead.
1552 unsigned sym = ELFW(R_SYM)(rel->r_info);
1553 ElfW(Addr) reloc = static_cast<ElfW(Addr)>(rel->r_offset + load_bias);
1554 ElfW(Addr) sym_addr = 0;
1555 const char* sym_name = nullptr;
1557 DEBUG("Processing '%s' relocation at index %zd", name, idx);
1558 if (type == 0) { // R_*_NONE
1559 continue;
1560 }
1562 ElfW(Sym)* s = nullptr;
1563 soinfo* lsi = nullptr;
1565 if (sym != 0) {
1566 sym_name = get_string(symtab_[sym].st_name);
1567 s = soinfo_do_lookup(this, sym_name, &lsi, global_group, local_group);
1568 if (s == nullptr) {
1569 // We only allow an undefined symbol if this is a weak reference...
1570 s = &symtab_[sym];
1571 if (ELF_ST_BIND(s->st_info) != STB_WEAK) {
1572 DL_ERR("cannot locate symbol \"%s\" referenced by \"%s\"...", sym_name, name);
1573 return -1;
1574 }
1576 /* IHI0044C AAELF 4.5.1.1:
1578 Libraries are not searched to resolve weak references.
1579 It is not an error for a weak reference to remain
1580 unsatisfied.
1582 During linking, the value of an undefined weak reference is:
1583 - Zero if the relocation type is absolute
1584 - The address of the place if the relocation is pc-relative
1585 - The address of nominal base address if the relocation
1586 type is base-relative.
1587 */
1589 switch (type) {
1590 #if defined(__arm__)
1591 case R_ARM_JUMP_SLOT:
1592 case R_ARM_GLOB_DAT:
1593 case R_ARM_ABS32:
1594 case R_ARM_RELATIVE: /* Don't care. */
1595 // sym_addr was initialized to be zero above or relocation
1596 // code below does not care about value of sym_addr.
1597 // No need to do anything.
1598 break;
1599 #elif defined(__i386__)
1600 case R_386_JMP_SLOT:
1601 case R_386_GLOB_DAT:
1602 case R_386_32:
1603 case R_386_RELATIVE: /* Don't care. */
1604 case R_386_IRELATIVE:
1605 // sym_addr was initialized to be zero above or relocation
1606 // code below does not care about value of sym_addr.
1607 // No need to do anything.
1608 break;
1609 case R_386_PC32:
1610 sym_addr = reloc;
1611 break;
1612 #endif
1614 #if defined(__arm__)
1615 case R_ARM_COPY:
1616 // Fall through. Can't really copy if weak symbol is not found at run-time.
1617 #endif
1618 default:
1619 DL_ERR("unknown weak reloc type %d @ %p (%zu)", type, rel, idx);
1620 return -1;
1621 }
1622 } else {
1623 // We got a definition.
1624 sym_addr = lsi->resolve_symbol_address(s);
1625 }
1626 count_relocation(kRelocSymbol);
1627 }
1629 switch (type) {
1630 #if defined(__arm__)
1631 case R_ARM_JUMP_SLOT:
1632 count_relocation(kRelocAbsolute);
1633 MARK(rel->r_offset);
1634 TRACE_TYPE(RELO, "RELO JMP_SLOT %08x <- %08x %s", reloc, sym_addr, sym_name);
1635 *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr;
1636 break;
1637 case R_ARM_GLOB_DAT:
1638 count_relocation(kRelocAbsolute);
1639 MARK(rel->r_offset);
1640 TRACE_TYPE(RELO, "RELO GLOB_DAT %08x <- %08x %s", reloc, sym_addr, sym_name);
1641 *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr;
1642 break;
1643 case R_ARM_ABS32:
1644 count_relocation(kRelocAbsolute);
1645 MARK(rel->r_offset);
1646 TRACE_TYPE(RELO, "RELO ABS %08x <- %08x %s", reloc, sym_addr, sym_name);
1647 *reinterpret_cast<ElfW(Addr)*>(reloc) += sym_addr;
1648 break;
1649 case R_ARM_REL32:
1650 count_relocation(kRelocRelative);
1651 MARK(rel->r_offset);
1652 TRACE_TYPE(RELO, "RELO REL32 %08x <- %08x - %08x %s",
1653 reloc, sym_addr, rel->r_offset, sym_name);
1654 *reinterpret_cast<ElfW(Addr)*>(reloc) += sym_addr - rel->r_offset;
1655 break;
1656 case R_ARM_COPY:
1657 /*
1658 * ET_EXEC is not supported so this should not happen.
1659 *
1660 * http://infocenter.arm.com/help/topic/com.arm.doc.ihi0044d/IHI0044D_aaelf.pdf
1661 *
1662 * Section 4.7.1.10 "Dynamic relocations"
1663 * R_ARM_COPY may only appear in executable objects where e_type is
1664 * set to ET_EXEC.
1665 */
1666 DL_ERR("%s R_ARM_COPY relocations are not supported", name);
1667 return -1;
1668 #elif defined(__i386__)
1669 case R_386_JMP_SLOT:
1670 count_relocation(kRelocAbsolute);
1671 MARK(rel->r_offset);
1672 TRACE_TYPE(RELO, "RELO JMP_SLOT %08x <- %08x %s", reloc, sym_addr, sym_name);
1673 *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr;
1674 break;
1675 case R_386_GLOB_DAT:
1676 count_relocation(kRelocAbsolute);
1677 MARK(rel->r_offset);
1678 TRACE_TYPE(RELO, "RELO GLOB_DAT %08x <- %08x %s", reloc, sym_addr, sym_name);
1679 *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr;
1680 break;
1681 case R_386_32:
1682 count_relocation(kRelocRelative);
1683 MARK(rel->r_offset);
1684 TRACE_TYPE(RELO, "RELO R_386_32 %08x <- +%08x %s", reloc, sym_addr, sym_name);
1685 *reinterpret_cast<ElfW(Addr)*>(reloc) += sym_addr;
1686 break;
1687 case R_386_PC32:
1688 count_relocation(kRelocRelative);
1689 MARK(rel->r_offset);
1690 TRACE_TYPE(RELO, "RELO R_386_PC32 %08x <- +%08x (%08x - %08x) %s",
1691 reloc, (sym_addr - reloc), sym_addr, reloc, sym_name);
1692 *reinterpret_cast<ElfW(Addr)*>(reloc) += (sym_addr - reloc);
1693 break;
1694 #elif defined(__mips__)
1695 case R_MIPS_REL32:
1696 #if defined(__LP64__)
1697 // MIPS Elf64_Rel entries contain compound relocations
1698 // We only handle the R_MIPS_NONE|R_MIPS_64|R_MIPS_REL32 case
1699 if (ELF64_R_TYPE2(rel->r_info) != R_MIPS_64 ||
1700 ELF64_R_TYPE3(rel->r_info) != R_MIPS_NONE) {
1701 DL_ERR("Unexpected compound relocation type:%d type2:%d type3:%d @ %p (%zu)",
1702 type, (unsigned)ELF64_R_TYPE2(rel->r_info),
1703 (unsigned)ELF64_R_TYPE3(rel->r_info), rel, idx);
1704 return -1;
1705 }
1706 #endif
1707 count_relocation(kRelocAbsolute);
1708 MARK(rel->r_offset);
1709 TRACE_TYPE(RELO, "RELO REL32 %08zx <- %08zx %s", static_cast<size_t>(reloc),
1710 static_cast<size_t>(sym_addr), sym_name ? sym_name : "*SECTIONHDR*");
1711 if (s) {
1712 *reinterpret_cast<ElfW(Addr)*>(reloc) += sym_addr;
1713 } else {
1714 *reinterpret_cast<ElfW(Addr)*>(reloc) += base;
1715 }
1716 break;
1717 #endif
1719 #if defined(__arm__)
1720 case R_ARM_RELATIVE:
1721 #elif defined(__i386__)
1722 case R_386_RELATIVE:
1723 #endif
1724 count_relocation(kRelocRelative);
1725 MARK(rel->r_offset);
1726 if (sym) {
1727 DL_ERR("odd RELATIVE form...");
1728 return -1;
1729 }
1730 TRACE_TYPE(RELO, "RELO RELATIVE %p <- +%p",
1731 reinterpret_cast<void*>(reloc), reinterpret_cast<void*>(base));
1732 *reinterpret_cast<ElfW(Addr)*>(reloc) += base;
1733 break;
1734 #if defined(__i386__)
1735 case R_386_IRELATIVE:
1736 count_relocation(kRelocRelative);
1737 MARK(rel->r_offset);
1738 TRACE_TYPE(RELO, "RELO IRELATIVE %p <- %p", reinterpret_cast<void*>(reloc), reinterpret_cast<void*>(base));
1739 *reinterpret_cast<ElfW(Addr)*>(reloc) = call_ifunc_resolver(base + *reinterpret_cast<ElfW(Addr)*>(reloc));
1740 break;
1741 #endif
1743 default:
1744 DL_ERR("unknown reloc type %d @ %p (%zu)", type, rel, idx);
1745 return -1;
1746 }
1747 }
1748 return 0;
1749 }
1750 #endif
1752 #if defined(__mips__)
1753 bool soinfo::mips_relocate_got(const soinfo_list_t& global_group, const soinfo_list_t& local_group) {
1754 ElfW(Addr)** got = plt_got_;
1755 if (got == nullptr) {
1756 return true;
1757 }
1759 // got[0] is the address of the lazy resolver function.
1760 // got[1] may be used for a GNU extension.
1761 // Set it to a recognizable address in case someone calls it (should be _rtld_bind_start).
1762 // FIXME: maybe this should be in a separate routine?
1763 if ((flags_ & FLAG_LINKER) == 0) {
1764 size_t g = 0;
1765 got[g++] = reinterpret_cast<ElfW(Addr)*>(0xdeadbeef);
1766 if (reinterpret_cast<intptr_t>(got[g]) < 0) {
1767 got[g++] = reinterpret_cast<ElfW(Addr)*>(0xdeadfeed);
1768 }
1769 // Relocate the local GOT entries.
1770 for (; g < mips_local_gotno_; g++) {
1771 got[g] = reinterpret_cast<ElfW(Addr)*>(reinterpret_cast<uintptr_t>(got[g]) + load_bias);
1772 }
1773 }
1775 // Now for the global GOT entries...
1776 ElfW(Sym)* sym = symtab_ + mips_gotsym_;
1777 got = plt_got_ + mips_local_gotno_;
1778 for (size_t g = mips_gotsym_; g < mips_symtabno_; g++, sym++, got++) {
1779 // This is an undefined reference... try to locate it.
1780 const char* sym_name = get_string(sym->st_name);
1781 soinfo* lsi = nullptr;
1782 ElfW(Sym)* s = soinfo_do_lookup(this, sym_name, &lsi, global_group, local_group);
1783 if (s == nullptr) {
1784 // We only allow an undefined symbol if this is a weak reference.
1785 s = &symtab_[g];
1786 if (ELF_ST_BIND(s->st_info) != STB_WEAK) {
1787 DL_ERR("cannot locate \"%s\"...", sym_name);
1788 return false;
1789 }
1790 *got = 0;
1791 } else {
1792 // FIXME: is this sufficient?
1793 // For reference see NetBSD link loader
1794 // http://cvsweb.netbsd.org/bsdweb.cgi/src/libexec/ld.elf_so/arch/mips/mips_reloc.c?rev=1.53&content-type=text/x-cvsweb-markup
1795 *got = reinterpret_cast<ElfW(Addr)*>(lsi->resolve_symbol_address(s));
1796 }
1797 }
1798 return true;
1799 }
1800 #endif
1802 void soinfo::call_array(const char* array_name __unused, linker_function_t* functions, size_t count, bool reverse) {
1803 if (functions == nullptr) {
1804 return;
1805 }
1807 TRACE("[ Calling %s (size %zd) @ %p for '%s' ]", array_name, count, functions, name);
1809 int begin = reverse ? (count - 1) : 0;
1810 int end = reverse ? -1 : count;
1811 int step = reverse ? -1 : 1;
1813 for (int i = begin; i != end; i += step) {
1814 TRACE("[ %s[%d] == %p ]", array_name, i, functions[i]);
1815 call_function("function", functions[i]);
1816 }
1818 TRACE("[ Done calling %s for '%s' ]", array_name, name);
1819 }
1821 void soinfo::call_function(const char* function_name __unused, linker_function_t function) {
1822 if (function == nullptr || reinterpret_cast<uintptr_t>(function) == static_cast<uintptr_t>(-1)) {
1823 return;
1824 }
1826 TRACE("[ Calling %s @ %p for '%s' ]", function_name, function, name);
1827 function();
1828 TRACE("[ Done calling %s @ %p for '%s' ]", function_name, function, name);
1830 // The function may have called dlopen(3) or dlclose(3), so we need to ensure our data structures
1831 // are still writable. This happens with our debug malloc (see http://b/7941716).
1832 protect_data(PROT_READ | PROT_WRITE);
1833 }
1835 void soinfo::call_pre_init_constructors() {
1836 // DT_PREINIT_ARRAY functions are called before any other constructors for executables,
1837 // but ignored in a shared library.
1838 call_array("DT_PREINIT_ARRAY", preinit_array_, preinit_array_count_, false);
1839 }
1841 void soinfo::call_constructors() {
1842 if (constructors_called) {
1843 return;
1844 }
1846 // We set constructors_called before actually calling the constructors, otherwise it doesn't
1847 // protect against recursive constructor calls. One simple example of constructor recursion
1848 // is the libc debug malloc, which is implemented in libc_malloc_debug_leak.so:
1849 // 1. The program depends on libc, so libc's constructor is called here.
1850 // 2. The libc constructor calls dlopen() to load libc_malloc_debug_leak.so.
1851 // 3. dlopen() calls the constructors on the newly created
1852 // soinfo for libc_malloc_debug_leak.so.
1853 // 4. The debug .so depends on libc, so CallConstructors is
1854 // called again with the libc soinfo. If it doesn't trigger the early-
1855 // out above, the libc constructor will be called again (recursively!).
1856 constructors_called = true;
1858 if (!is_main_executable() && preinit_array_ != nullptr) {
1859 // The GNU dynamic linker silently ignores these, but we warn the developer.
1860 PRINT("\"%s\": ignoring %zd-entry DT_PREINIT_ARRAY in shared library!",
1861 name, preinit_array_count_);
1862 }
1864 get_children().for_each([] (soinfo* si) {
1865 si->call_constructors();
1866 });
1868 TRACE("\"%s\": calling constructors", name);
1870 // DT_INIT should be called before DT_INIT_ARRAY if both are present.
1871 call_function("DT_INIT", init_func_);
1872 call_array("DT_INIT_ARRAY", init_array_, init_array_count_, false);
1873 }
1875 void soinfo::call_destructors() {
1876 if (!constructors_called) {
1877 return;
1878 }
1879 TRACE("\"%s\": calling destructors", name);
1881 // DT_FINI_ARRAY must be parsed in reverse order.
1882 call_array("DT_FINI_ARRAY", fini_array_, fini_array_count_, true);
1884 // DT_FINI should be called after DT_FINI_ARRAY if both are present.
1885 call_function("DT_FINI", fini_func_);
1887 // This is needed on second call to dlopen
1888 // after library has been unloaded with RTLD_NODELETE
1889 constructors_called = false;
1890 }
1892 void soinfo::add_child(soinfo* child) {
1893 if (has_min_version(0)) {
1894 child->parents_.push_back(this);
1895 this->children_.push_back(child);
1896 }
1897 }
1899 void soinfo::remove_all_links() {
1900 if (!has_min_version(0)) {
1901 return;
1902 }
1904 // 1. Untie connected soinfos from 'this'.
1905 children_.for_each([&] (soinfo* child) {
1906 child->parents_.remove_if([&] (const soinfo* parent) {
1907 return parent == this;
1908 });
1909 });
1911 parents_.for_each([&] (soinfo* parent) {
1912 parent->children_.remove_if([&] (const soinfo* child) {
1913 return child == this;
1914 });
1915 });
1917 // 2. Once everything untied - clear local lists.
1918 parents_.clear();
1919 children_.clear();
1920 }
1922 dev_t soinfo::get_st_dev() const {
1923 if (has_min_version(0)) {
1924 return st_dev_;
1925 }
1927 return 0;
1928 };
1930 ino_t soinfo::get_st_ino() const {
1931 if (has_min_version(0)) {
1932 return st_ino_;
1933 }
1935 return 0;
1936 }
1938 off64_t soinfo::get_file_offset() const {
1939 if (has_min_version(1)) {
1940 return file_offset_;
1941 }
1943 return 0;
1944 }
1946 uint32_t soinfo::get_rtld_flags() const {
1947 if (has_min_version(1)) {
1948 return rtld_flags_;
1949 }
1951 return 0;
1952 }
1954 uint32_t soinfo::get_dt_flags_1() const {
1955 if (has_min_version(1)) {
1956 return dt_flags_1_;
1957 }
1959 return 0;
1960 }
1961 void soinfo::set_dt_flags_1(uint32_t dt_flags_1) {
1962 if (has_min_version(1)) {
1963 if ((dt_flags_1 & DF_1_GLOBAL) != 0) {
1964 rtld_flags_ |= RTLD_GLOBAL;
1965 }
1967 if ((dt_flags_1 & DF_1_NODELETE) != 0) {
1968 rtld_flags_ |= RTLD_NODELETE;
1969 }
1971 dt_flags_1_ = dt_flags_1;
1972 }
1973 }
1975 // This is a return on get_children()/get_parents() if
1976 // 'this->flags' does not have FLAG_NEW_SOINFO set.
1977 static soinfo::soinfo_list_t g_empty_list;
1979 soinfo::soinfo_list_t& soinfo::get_children() {
1980 if (has_min_version(0)) {
1981 return children_;
1982 }
1984 return g_empty_list;
1985 }
1987 soinfo::soinfo_list_t& soinfo::get_parents() {
1988 if (has_min_version(0)) {
1989 return parents_;
1990 }
1992 return g_empty_list;
1993 }
1995 ElfW(Addr) soinfo::resolve_symbol_address(ElfW(Sym)* s) {
1996 if (ELF_ST_TYPE(s->st_info) == STT_GNU_IFUNC) {
1997 return call_ifunc_resolver(s->st_value + load_bias);
1998 }
2000 return static_cast<ElfW(Addr)>(s->st_value + load_bias);
2001 }
2003 const char* soinfo::get_string(ElfW(Word) index) const {
2004 if (has_min_version(1) && (index >= strtab_size_)) {
2005 __libc_fatal("%s: strtab out of bounds error; STRSZ=%zd, name=%d", name, strtab_size_, index);
2006 }
2008 return strtab_ + index;
2009 }
2011 bool soinfo::is_gnu_hash() const {
2012 return (flags_ & FLAG_GNU_HASH) != 0;
2013 }
2015 bool soinfo::can_unload() const {
2016 return (get_rtld_flags() & (RTLD_NODELETE | RTLD_GLOBAL)) == 0;
2017 }
2019 bool soinfo::is_linked() const {
2020 return (flags_ & FLAG_LINKED) != 0;
2021 }
2023 bool soinfo::is_main_executable() const {
2024 return (flags_ & FLAG_EXE) != 0;
2025 }
2027 void soinfo::set_linked() {
2028 flags_ |= FLAG_LINKED;
2029 }
2031 void soinfo::set_linker_flag() {
2032 flags_ |= FLAG_LINKER;
2033 }
2035 void soinfo::set_main_executable() {
2036 flags_ |= FLAG_EXE;
2037 }
2039 void soinfo::increment_ref_count() {
2040 local_group_root_->ref_count_++;
2041 }
2043 size_t soinfo::decrement_ref_count() {
2044 return --local_group_root_->ref_count_;
2045 }
2047 soinfo* soinfo::get_local_group_root() const {
2048 return local_group_root_;
2049 }
2051 /* Force any of the closed stdin, stdout and stderr to be associated with
2052 /dev/null. */
2053 static int nullify_closed_stdio() {
2054 int dev_null, i, status;
2055 int return_value = 0;
2057 dev_null = TEMP_FAILURE_RETRY(open("/dev/null", O_RDWR));
2058 if (dev_null < 0) {
2059 DL_ERR("cannot open /dev/null: %s", strerror(errno));
2060 return -1;
2061 }
2062 TRACE("[ Opened /dev/null file-descriptor=%d]", dev_null);
2064 /* If any of the stdio file descriptors is valid and not associated
2065 with /dev/null, dup /dev/null to it. */
2066 for (i = 0; i < 3; i++) {
2067 /* If it is /dev/null already, we are done. */
2068 if (i == dev_null) {
2069 continue;
2070 }
2072 TRACE("[ Nullifying stdio file descriptor %d]", i);
2073 status = TEMP_FAILURE_RETRY(fcntl(i, F_GETFL));
2075 /* If file is opened, we are good. */
2076 if (status != -1) {
2077 continue;
2078 }
2080 /* The only error we allow is that the file descriptor does not
2081 exist, in which case we dup /dev/null to it. */
2082 if (errno != EBADF) {
2083 DL_ERR("fcntl failed: %s", strerror(errno));
2084 return_value = -1;
2085 continue;
2086 }
2088 /* Try dupping /dev/null to this stdio file descriptor and
2089 repeat if there is a signal. Note that any errors in closing
2090 the stdio descriptor are lost. */
2091 status = TEMP_FAILURE_RETRY(dup2(dev_null, i));
2092 if (status < 0) {
2093 DL_ERR("dup2 failed: %s", strerror(errno));
2094 return_value = -1;
2095 continue;
2096 }
2097 }
2099 /* If /dev/null is not one of the stdio file descriptors, close it. */
2100 if (dev_null > 2) {
2101 TRACE("[ Closing /dev/null file-descriptor=%d]", dev_null);
2102 status = TEMP_FAILURE_RETRY(close(dev_null));
2103 if (status == -1) {
2104 DL_ERR("close failed: %s", strerror(errno));
2105 return_value = -1;
2106 }
2107 }
2109 return return_value;
2110 }
2112 bool soinfo::prelink_image() {
2113 /* Extract dynamic section */
2114 ElfW(Word) dynamic_flags = 0;
2115 phdr_table_get_dynamic_section(phdr, phnum, load_bias, &dynamic, &dynamic_flags);
2117 /* We can't log anything until the linker is relocated */
2118 bool relocating_linker = (flags_ & FLAG_LINKER) != 0;
2119 if (!relocating_linker) {
2120 INFO("[ linking %s ]", name);
2121 DEBUG("si->base = %p si->flags = 0x%08x", reinterpret_cast<void*>(base), flags_);
2122 }
2124 if (dynamic == nullptr) {
2125 if (!relocating_linker) {
2126 DL_ERR("missing PT_DYNAMIC in \"%s\"", name);
2127 }
2128 return false;
2129 } else {
2130 if (!relocating_linker) {
2131 DEBUG("dynamic = %p", dynamic);
2132 }
2133 }
2135 #if defined(__arm__)
2136 (void) phdr_table_get_arm_exidx(phdr, phnum, load_bias,
2137 &ARM_exidx, &ARM_exidx_count);
2138 #endif
2140 // Extract useful information from dynamic section.
2141 uint32_t needed_count = 0;
2142 for (ElfW(Dyn)* d = dynamic; d->d_tag != DT_NULL; ++d) {
2143 DEBUG("d = %p, d[0](tag) = %p d[1](val) = %p",
2144 d, reinterpret_cast<void*>(d->d_tag), reinterpret_cast<void*>(d->d_un.d_val));
2145 switch (d->d_tag) {
2146 case DT_SONAME:
2147 // TODO: glibc dynamic linker uses this name for
2148 // initial library lookup; consider doing the same here.
2149 break;
2151 case DT_HASH:
2152 if (nbucket_ != 0) {
2153 // in case of --hash-style=both, we prefer gnu
2154 break;
2155 }
2157 nbucket_ = reinterpret_cast<uint32_t*>(load_bias + d->d_un.d_ptr)[0];
2158 nchain_ = reinterpret_cast<uint32_t*>(load_bias + d->d_un.d_ptr)[1];
2159 bucket_ = reinterpret_cast<uint32_t*>(load_bias + d->d_un.d_ptr + 8);
2160 chain_ = reinterpret_cast<uint32_t*>(load_bias + d->d_un.d_ptr + 8 + nbucket_ * 4);
2161 break;
2163 case DT_GNU_HASH:
2164 if (nbucket_ != 0) {
2165 // in case of --hash-style=both, we prefer gnu
2166 nchain_ = 0;
2167 }
2169 nbucket_ = reinterpret_cast<uint32_t*>(load_bias + d->d_un.d_ptr)[0];
2170 // skip symndx
2171 gnu_maskwords_ = reinterpret_cast<uint32_t*>(load_bias + d->d_un.d_ptr)[2];
2172 gnu_shift2_ = reinterpret_cast<uint32_t*>(load_bias + d->d_un.d_ptr)[3];
2174 gnu_bloom_filter_ = reinterpret_cast<ElfW(Addr)*>(load_bias + d->d_un.d_ptr + 16);
2175 bucket_ = reinterpret_cast<uint32_t*>(gnu_bloom_filter_ + gnu_maskwords_);
2176 // amend chain for symndx = header[1]
2177 chain_ = bucket_ + nbucket_ - reinterpret_cast<uint32_t*>(load_bias + d->d_un.d_ptr)[1];
2179 if (!powerof2(gnu_maskwords_)) {
2180 DL_ERR("invalid maskwords for gnu_hash = 0x%x, in \"%s\" expecting power to two", gnu_maskwords_, name);
2181 return false;
2182 }
2183 --gnu_maskwords_;
2185 flags_ |= FLAG_GNU_HASH;
2186 break;
2188 case DT_STRTAB:
2189 strtab_ = reinterpret_cast<const char*>(load_bias + d->d_un.d_ptr);
2190 break;
2192 case DT_STRSZ:
2193 strtab_size_ = d->d_un.d_val;
2194 break;
2196 case DT_SYMTAB:
2197 symtab_ = reinterpret_cast<ElfW(Sym)*>(load_bias + d->d_un.d_ptr);
2198 break;
2200 case DT_SYMENT:
2201 if (d->d_un.d_val != sizeof(ElfW(Sym))) {
2202 DL_ERR("invalid DT_SYMENT: %zd in \"%s\"", static_cast<size_t>(d->d_un.d_val), name);
2203 return false;
2204 }
2205 break;
2207 case DT_PLTREL:
2208 #if defined(USE_RELA)
2209 if (d->d_un.d_val != DT_RELA) {
2210 DL_ERR("unsupported DT_PLTREL in \"%s\"; expected DT_RELA", name);
2211 return false;
2212 }
2213 #else
2214 if (d->d_un.d_val != DT_REL) {
2215 DL_ERR("unsupported DT_PLTREL in \"%s\"; expected DT_REL", name);
2216 return false;
2217 }
2218 #endif
2219 break;
2221 case DT_JMPREL:
2222 #if defined(USE_RELA)
2223 plt_rela_ = reinterpret_cast<ElfW(Rela)*>(load_bias + d->d_un.d_ptr);
2224 #else
2225 plt_rel_ = reinterpret_cast<ElfW(Rel)*>(load_bias + d->d_un.d_ptr);
2226 #endif
2227 break;
2229 case DT_PLTRELSZ:
2230 #if defined(USE_RELA)
2231 plt_rela_count_ = d->d_un.d_val / sizeof(ElfW(Rela));
2232 #else
2233 plt_rel_count_ = d->d_un.d_val / sizeof(ElfW(Rel));
2234 #endif
2235 break;
2237 case DT_PLTGOT:
2238 #if defined(__mips__)
2239 // Used by mips and mips64.
2240 plt_got_ = reinterpret_cast<ElfW(Addr)**>(load_bias + d->d_un.d_ptr);
2241 #endif
2242 // Ignore for other platforms... (because RTLD_LAZY is not supported)
2243 break;
2245 case DT_DEBUG:
2246 // Set the DT_DEBUG entry to the address of _r_debug for GDB
2247 // if the dynamic table is writable
2248 // FIXME: not working currently for N64
2249 // The flags for the LOAD and DYNAMIC program headers do not agree.
2250 // The LOAD section containing the dynamic table has been mapped as
2251 // read-only, but the DYNAMIC header claims it is writable.
2252 #if !(defined(__mips__) && defined(__LP64__))
2253 if ((dynamic_flags & PF_W) != 0) {
2254 d->d_un.d_val = reinterpret_cast<uintptr_t>(&_r_debug);
2255 }
2256 break;
2257 #endif
2258 #if defined(USE_RELA)
2259 case DT_RELA:
2260 rela_ = reinterpret_cast<ElfW(Rela)*>(load_bias + d->d_un.d_ptr);
2261 break;
2263 case DT_RELASZ:
2264 rela_count_ = d->d_un.d_val / sizeof(ElfW(Rela));
2265 break;
2267 case DT_RELAENT:
2268 if (d->d_un.d_val != sizeof(ElfW(Rela))) {
2269 DL_ERR("invalid DT_RELAENT: %zd", static_cast<size_t>(d->d_un.d_val));
2270 return false;
2271 }
2272 break;
2274 // ignored (see DT_RELCOUNT comments for details)
2275 case DT_RELACOUNT:
2276 break;
2278 case DT_REL:
2279 DL_ERR("unsupported DT_REL in \"%s\"", name);
2280 return false;
2282 case DT_RELSZ:
2283 DL_ERR("unsupported DT_RELSZ in \"%s\"", name);
2284 return false;
2285 #else
2286 case DT_REL:
2287 rel_ = reinterpret_cast<ElfW(Rel)*>(load_bias + d->d_un.d_ptr);
2288 break;
2290 case DT_RELSZ:
2291 rel_count_ = d->d_un.d_val / sizeof(ElfW(Rel));
2292 break;
2294 case DT_RELENT:
2295 if (d->d_un.d_val != sizeof(ElfW(Rel))) {
2296 DL_ERR("invalid DT_RELENT: %zd", static_cast<size_t>(d->d_un.d_val));
2297 return false;
2298 }
2299 break;
2301 // "Indicates that all RELATIVE relocations have been concatenated together,
2302 // and specifies the RELATIVE relocation count."
2303 //
2304 // TODO: Spec also mentions that this can be used to optimize relocation process;
2305 // Not currently used by bionic linker - ignored.
2306 case DT_RELCOUNT:
2307 break;
2308 case DT_RELA:
2309 DL_ERR("unsupported DT_RELA in \"%s\"", name);
2310 return false;
2311 #endif
2312 case DT_INIT:
2313 init_func_ = reinterpret_cast<linker_function_t>(load_bias + d->d_un.d_ptr);
2314 DEBUG("%s constructors (DT_INIT) found at %p", name, init_func_);
2315 break;
2317 case DT_FINI:
2318 fini_func_ = reinterpret_cast<linker_function_t>(load_bias + d->d_un.d_ptr);
2319 DEBUG("%s destructors (DT_FINI) found at %p", name, fini_func_);
2320 break;
2322 case DT_INIT_ARRAY:
2323 init_array_ = reinterpret_cast<linker_function_t*>(load_bias + d->d_un.d_ptr);
2324 DEBUG("%s constructors (DT_INIT_ARRAY) found at %p", name, init_array_);
2325 break;
2327 case DT_INIT_ARRAYSZ:
2328 init_array_count_ = ((unsigned)d->d_un.d_val) / sizeof(ElfW(Addr));
2329 break;
2331 case DT_FINI_ARRAY:
2332 fini_array_ = reinterpret_cast<linker_function_t*>(load_bias + d->d_un.d_ptr);
2333 DEBUG("%s destructors (DT_FINI_ARRAY) found at %p", name, fini_array_);
2334 break;
2336 case DT_FINI_ARRAYSZ:
2337 fini_array_count_ = ((unsigned)d->d_un.d_val) / sizeof(ElfW(Addr));
2338 break;
2340 case DT_PREINIT_ARRAY:
2341 preinit_array_ = reinterpret_cast<linker_function_t*>(load_bias + d->d_un.d_ptr);
2342 DEBUG("%s constructors (DT_PREINIT_ARRAY) found at %p", name, preinit_array_);
2343 break;
2345 case DT_PREINIT_ARRAYSZ:
2346 preinit_array_count_ = ((unsigned)d->d_un.d_val) / sizeof(ElfW(Addr));
2347 break;
2349 case DT_TEXTREL:
2350 #if defined(__LP64__)
2351 DL_ERR("text relocations (DT_TEXTREL) found in 64-bit ELF file \"%s\"", name);
2352 return false;
2353 #else
2354 has_text_relocations = true;
2355 break;
2356 #endif
2358 case DT_SYMBOLIC:
2359 has_DT_SYMBOLIC = true;
2360 break;
2362 case DT_NEEDED:
2363 ++needed_count;
2364 break;
2366 case DT_FLAGS:
2367 if (d->d_un.d_val & DF_TEXTREL) {
2368 #if defined(__LP64__)
2369 DL_ERR("text relocations (DF_TEXTREL) found in 64-bit ELF file \"%s\"", name);
2370 return false;
2371 #else
2372 has_text_relocations = true;
2373 #endif
2374 }
2375 if (d->d_un.d_val & DF_SYMBOLIC) {
2376 has_DT_SYMBOLIC = true;
2377 }
2378 break;
2380 case DT_FLAGS_1:
2381 set_dt_flags_1(d->d_un.d_val);
2383 if ((d->d_un.d_val & ~SUPPORTED_DT_FLAGS_1) != 0) {
2384 DL_WARN("Unsupported flags DT_FLAGS_1=%p", reinterpret_cast<void*>(d->d_un.d_val));
2385 }
2386 break;
2387 #if defined(__mips__)
2388 case DT_MIPS_RLD_MAP:
2389 // Set the DT_MIPS_RLD_MAP entry to the address of _r_debug for GDB.
2390 {
2391 r_debug** dp = reinterpret_cast<r_debug**>(load_bias + d->d_un.d_ptr);
2392 *dp = &_r_debug;
2393 }
2394 break;
2396 case DT_MIPS_RLD_VERSION:
2397 case DT_MIPS_FLAGS:
2398 case DT_MIPS_BASE_ADDRESS:
2399 case DT_MIPS_UNREFEXTNO:
2400 break;
2402 case DT_MIPS_SYMTABNO:
2403 mips_symtabno_ = d->d_un.d_val;
2404 break;
2406 case DT_MIPS_LOCAL_GOTNO:
2407 mips_local_gotno_ = d->d_un.d_val;
2408 break;
2410 case DT_MIPS_GOTSYM:
2411 mips_gotsym_ = d->d_un.d_val;
2412 break;
2413 #endif
2414 // Ignored: "Its use has been superseded by the DF_BIND_NOW flag"
2415 case DT_BIND_NOW:
2416 break;
2418 // Ignore: bionic does not support symbol versioning...
2419 case DT_VERSYM:
2420 case DT_VERDEF:
2421 case DT_VERDEFNUM:
2422 case DT_VERNEED:
2423 case DT_VERNEEDNUM:
2424 break;
2426 default:
2427 if (!relocating_linker) {
2428 DL_WARN("%s: unused DT entry: type %p arg %p", name,
2429 reinterpret_cast<void*>(d->d_tag), reinterpret_cast<void*>(d->d_un.d_val));
2430 }
2431 break;
2432 }
2433 }
2435 DEBUG("si->base = %p, si->strtab = %p, si->symtab = %p",
2436 reinterpret_cast<void*>(base), strtab_, symtab_);
2438 // Sanity checks.
2439 if (relocating_linker && needed_count != 0) {
2440 DL_ERR("linker cannot have DT_NEEDED dependencies on other libraries");
2441 return false;
2442 }
2443 if (nbucket_ == 0) {
2444 DL_ERR("empty/missing DT_HASH/DT_GNU_HASH in \"%s\" (new hash type from the future?)", name);
2445 return false;
2446 }
2447 if (strtab_ == 0) {
2448 DL_ERR("empty/missing DT_STRTAB in \"%s\"", name);
2449 return false;
2450 }
2451 if (symtab_ == 0) {
2452 DL_ERR("empty/missing DT_SYMTAB in \"%s\"", name);
2453 return false;
2454 }
2455 return true;
2456 }
2458 bool soinfo::link_image(const soinfo_list_t& global_group, const soinfo_list_t& local_group, const android_dlextinfo* extinfo) {
2460 local_group_root_ = local_group.front();
2461 if (local_group_root_ == nullptr) {
2462 local_group_root_ = this;
2463 }
2465 #if !defined(__LP64__)
2466 if (has_text_relocations) {
2467 // Make segments writable to allow text relocations to work properly. We will later call
2468 // phdr_table_protect_segments() after all of them are applied and all constructors are run.
2469 DL_WARN("%s has text relocations. This is wasting memory and prevents "
2470 "security hardening. Please fix.", name);
2471 if (phdr_table_unprotect_segments(phdr, phnum, load_bias) < 0) {
2472 DL_ERR("can't unprotect loadable segments for \"%s\": %s",
2473 name, strerror(errno));
2474 return false;
2475 }
2476 }
2477 #endif
2479 #if defined(USE_RELA)
2480 if (rela_ != nullptr) {
2481 DEBUG("[ relocating %s ]", name);
2482 if (relocate(rela_, rela_count_, global_group, local_group)) {
2483 return false;
2484 }
2485 }
2486 if (plt_rela_ != nullptr) {
2487 DEBUG("[ relocating %s plt ]", name);
2488 if (relocate(plt_rela_, plt_rela_count_, global_group, local_group)) {
2489 return false;
2490 }
2491 }
2492 #else
2493 if (rel_ != nullptr) {
2494 DEBUG("[ relocating %s ]", name);
2495 if (relocate(rel_, rel_count_, global_group, local_group)) {
2496 return false;
2497 }
2498 }
2499 if (plt_rel_ != nullptr) {
2500 DEBUG("[ relocating %s plt ]", name);
2501 if (relocate(plt_rel_, plt_rel_count_, global_group, local_group)) {
2502 return false;
2503 }
2504 }
2505 #endif
2507 #if defined(__mips__)
2508 if (!mips_relocate_got(global_group, local_group)) {
2509 return false;
2510 }
2511 #endif
2513 DEBUG("[ finished linking %s ]", name);
2515 #if !defined(__LP64__)
2516 if (has_text_relocations) {
2517 // All relocations are done, we can protect our segments back to read-only.
2518 if (phdr_table_protect_segments(phdr, phnum, load_bias) < 0) {
2519 DL_ERR("can't protect segments for \"%s\": %s",
2520 name, strerror(errno));
2521 return false;
2522 }
2523 }
2524 #endif
2526 /* We can also turn on GNU RELRO protection */
2527 if (phdr_table_protect_gnu_relro(phdr, phnum, load_bias) < 0) {
2528 DL_ERR("can't enable GNU RELRO protection for \"%s\": %s",
2529 name, strerror(errno));
2530 return false;
2531 }
2533 /* Handle serializing/sharing the RELRO segment */
2534 if (extinfo && (extinfo->flags & ANDROID_DLEXT_WRITE_RELRO)) {
2535 if (phdr_table_serialize_gnu_relro(phdr, phnum, load_bias,
2536 extinfo->relro_fd) < 0) {
2537 DL_ERR("failed serializing GNU RELRO section for \"%s\": %s",
2538 name, strerror(errno));
2539 return false;
2540 }
2541 } else if (extinfo && (extinfo->flags & ANDROID_DLEXT_USE_RELRO)) {
2542 if (phdr_table_map_gnu_relro(phdr, phnum, load_bias,
2543 extinfo->relro_fd) < 0) {
2544 DL_ERR("failed mapping GNU RELRO section for \"%s\": %s",
2545 name, strerror(errno));
2546 return false;
2547 }
2548 }
2550 notify_gdb_of_load(this);
2551 return true;
2552 }
2554 /*
2555 * This function add vdso to internal dso list.
2556 * It helps to stack unwinding through signal handlers.
2557 * Also, it makes bionic more like glibc.
2558 */
2559 static void add_vdso(KernelArgumentBlock& args __unused) {
2560 #if defined(AT_SYSINFO_EHDR)
2561 ElfW(Ehdr)* ehdr_vdso = reinterpret_cast<ElfW(Ehdr)*>(args.getauxval(AT_SYSINFO_EHDR));
2562 if (ehdr_vdso == nullptr) {
2563 return;
2564 }
2566 soinfo* si = soinfo_alloc("[vdso]", nullptr, 0, 0);
2568 si->phdr = reinterpret_cast<ElfW(Phdr)*>(reinterpret_cast<char*>(ehdr_vdso) + ehdr_vdso->e_phoff);
2569 si->phnum = ehdr_vdso->e_phnum;
2570 si->base = reinterpret_cast<ElfW(Addr)>(ehdr_vdso);
2571 si->size = phdr_table_get_load_size(si->phdr, si->phnum);
2572 si->load_bias = get_elf_exec_load_bias(ehdr_vdso);
2574 si->prelink_image();
2575 si->link_image(g_empty_list, soinfo::soinfo_list_t::make_list(si), nullptr);
2576 #endif
2577 }
2579 /*
2580 * This is linker soinfo for GDB. See details below.
2581 */
2582 #if defined(__LP64__)
2583 #define LINKER_PATH "/system/bin/linker64"
2584 #else
2585 #define LINKER_PATH "/system/bin/linker"
2586 #endif
2587 static soinfo linker_soinfo_for_gdb(LINKER_PATH, nullptr, 0, 0);
2589 /* gdb expects the linker to be in the debug shared object list.
2590 * Without this, gdb has trouble locating the linker's ".text"
2591 * and ".plt" sections. Gdb could also potentially use this to
2592 * relocate the offset of our exported 'rtld_db_dlactivity' symbol.
2593 * Don't use soinfo_alloc(), because the linker shouldn't
2594 * be on the soinfo list.
2595 */
2596 static void init_linker_info_for_gdb(ElfW(Addr) linker_base) {
2597 linker_soinfo_for_gdb.base = linker_base;
2599 /*
2600 * Set the dynamic field in the link map otherwise gdb will complain with
2601 * the following:
2602 * warning: .dynamic section for "/system/bin/linker" is not at the
2603 * expected address (wrong library or version mismatch?)
2604 */
2605 ElfW(Ehdr)* elf_hdr = reinterpret_cast<ElfW(Ehdr)*>(linker_base);
2606 ElfW(Phdr)* phdr = reinterpret_cast<ElfW(Phdr)*>(linker_base + elf_hdr->e_phoff);
2607 phdr_table_get_dynamic_section(phdr, elf_hdr->e_phnum, linker_base,
2608 &linker_soinfo_for_gdb.dynamic, nullptr);
2609 insert_soinfo_into_debug_map(&linker_soinfo_for_gdb);
2610 }
2612 /*
2613 * This code is called after the linker has linked itself and
2614 * fixed it's own GOT. It is safe to make references to externs
2615 * and other non-local data at this point.
2616 */
2617 static ElfW(Addr) __linker_init_post_relocation(KernelArgumentBlock& args, ElfW(Addr) linker_base) {
2618 #if TIMING
2619 struct timeval t0, t1;
2620 gettimeofday(&t0, 0);
2621 #endif
2623 // Initialize environment functions, and get to the ELF aux vectors table.
2624 linker_env_init(args);
2626 // If this is a setuid/setgid program, close the security hole described in
2627 // ftp://ftp.freebsd.org/pub/FreeBSD/CERT/advisories/FreeBSD-SA-02:23.stdio.asc
2628 if (get_AT_SECURE()) {
2629 nullify_closed_stdio();
2630 }
2632 debuggerd_init();
2634 // Get a few environment variables.
2635 const char* LD_DEBUG = linker_env_get("LD_DEBUG");
2636 if (LD_DEBUG != nullptr) {
2637 g_ld_debug_verbosity = atoi(LD_DEBUG);
2638 }
2640 // Normally, these are cleaned by linker_env_init, but the test
2641 // doesn't cost us anything.
2642 const char* ldpath_env = nullptr;
2643 const char* ldpreload_env = nullptr;
2644 if (!get_AT_SECURE()) {
2645 ldpath_env = linker_env_get("LD_LIBRARY_PATH");
2646 ldpreload_env = linker_env_get("LD_PRELOAD");
2647 }
2649 INFO("[ android linker & debugger ]");
2651 soinfo* si = soinfo_alloc(args.argv[0], nullptr, 0, RTLD_GLOBAL);
2652 if (si == nullptr) {
2653 exit(EXIT_FAILURE);
2654 }
2656 /* bootstrap the link map, the main exe always needs to be first */
2657 si->set_main_executable();
2658 link_map* map = &(si->link_map_head);
2660 map->l_addr = 0;
2661 map->l_name = args.argv[0];
2662 map->l_prev = nullptr;
2663 map->l_next = nullptr;
2665 _r_debug.r_map = map;
2666 r_debug_tail = map;
2668 init_linker_info_for_gdb(linker_base);
2670 // Extract information passed from the kernel.
2671 si->phdr = reinterpret_cast<ElfW(Phdr)*>(args.getauxval(AT_PHDR));
2672 si->phnum = args.getauxval(AT_PHNUM);
2673 si->entry = args.getauxval(AT_ENTRY);
2675 /* Compute the value of si->base. We can't rely on the fact that
2676 * the first entry is the PHDR because this will not be true
2677 * for certain executables (e.g. some in the NDK unit test suite)
2678 */
2679 si->base = 0;
2680 si->size = phdr_table_get_load_size(si->phdr, si->phnum);
2681 si->load_bias = 0;
2682 for (size_t i = 0; i < si->phnum; ++i) {
2683 if (si->phdr[i].p_type == PT_PHDR) {
2684 si->load_bias = reinterpret_cast<ElfW(Addr)>(si->phdr) - si->phdr[i].p_vaddr;
2685 si->base = reinterpret_cast<ElfW(Addr)>(si->phdr) - si->phdr[i].p_offset;
2686 break;
2687 }
2688 }
2689 si->dynamic = nullptr;
2691 ElfW(Ehdr)* elf_hdr = reinterpret_cast<ElfW(Ehdr)*>(si->base);
2692 if (elf_hdr->e_type != ET_DYN) {
2693 __libc_format_fd(2, "error: only position independent executables (PIE) are supported.\n");
2694 exit(EXIT_FAILURE);
2695 }
2697 // Use LD_LIBRARY_PATH and LD_PRELOAD (but only if we aren't setuid/setgid).
2698 parse_LD_LIBRARY_PATH(ldpath_env);
2699 parse_LD_PRELOAD(ldpreload_env);
2701 somain = si;
2703 si->prelink_image();
2705 // add somain to global group
2706 si->set_dt_flags_1(si->get_dt_flags_1() | DF_1_GLOBAL);
2708 // Load ld_preloads and dependencies.
2709 StringLinkedList needed_library_name_list;
2710 size_t needed_libraries_count = 0;
2711 size_t ld_preloads_count = 0;
2712 while (g_ld_preload_names[ld_preloads_count] != nullptr) {
2713 needed_library_name_list.push_back(g_ld_preload_names[ld_preloads_count++]);
2714 ++needed_libraries_count;
2715 }
2717 for_each_dt_needed(si, [&](const char* name) {
2718 needed_library_name_list.push_back(name);
2719 ++needed_libraries_count;
2720 });
2722 const char* needed_library_names[needed_libraries_count];
2724 memset(needed_library_names, 0, sizeof(needed_library_names));
2725 needed_library_name_list.copy_to_array(needed_library_names, needed_libraries_count);
2727 if (needed_libraries_count > 0 && !find_libraries(si, needed_library_names, needed_libraries_count, nullptr, g_ld_preloads, ld_preloads_count, RTLD_GLOBAL, nullptr)) {
2728 __libc_format_fd(2, "CANNOT LINK EXECUTABLE: %s\n", linker_get_error_buffer());
2729 exit(EXIT_FAILURE);
2730 } else if (needed_libraries_count == 0) {
2731 if (!si->link_image(g_empty_list, soinfo::soinfo_list_t::make_list(si), nullptr)) {
2732 __libc_format_fd(2, "CANNOT LINK EXECUTABLE: %s\n", linker_get_error_buffer());
2733 exit(EXIT_FAILURE);
2734 }
2735 si->increment_ref_count();
2736 }
2738 add_vdso(args);
2740 si->call_pre_init_constructors();
2742 /* After the prelink_image, the si->load_bias is initialized.
2743 * For so lib, the map->l_addr will be updated in notify_gdb_of_load.
2744 * We need to update this value for so exe here. So Unwind_Backtrace
2745 * for some arch like x86 could work correctly within so exe.
2746 */
2747 map->l_addr = si->load_bias;
2748 si->call_constructors();
2750 #if TIMING
2751 gettimeofday(&t1, nullptr);
2752 PRINT("LINKER TIME: %s: %d microseconds", args.argv[0], (int) (
2753 (((long long)t1.tv_sec * 1000000LL) + (long long)t1.tv_usec) -
2754 (((long long)t0.tv_sec * 1000000LL) + (long long)t0.tv_usec)));
2755 #endif
2756 #if STATS
2757 PRINT("RELO STATS: %s: %d abs, %d rel, %d copy, %d symbol", args.argv[0],
2758 linker_stats.count[kRelocAbsolute],
2759 linker_stats.count[kRelocRelative],
2760 linker_stats.count[kRelocCopy],
2761 linker_stats.count[kRelocSymbol]);
2762 #endif
2763 #if COUNT_PAGES
2764 {
2765 unsigned n;
2766 unsigned i;
2767 unsigned count = 0;
2768 for (n = 0; n < 4096; n++) {
2769 if (bitmask[n]) {
2770 unsigned x = bitmask[n];
2771 #if defined(__LP64__)
2772 for (i = 0; i < 32; i++) {
2773 #else
2774 for (i = 0; i < 8; i++) {
2775 #endif
2776 if (x & 1) {
2777 count++;
2778 }
2779 x >>= 1;
2780 }
2781 }
2782 }
2783 PRINT("PAGES MODIFIED: %s: %d (%dKB)", args.argv[0], count, count * 4);
2784 }
2785 #endif
2787 #if TIMING || STATS || COUNT_PAGES
2788 fflush(stdout);
2789 #endif
2791 TRACE("[ Ready to execute '%s' @ %p ]", si->name, reinterpret_cast<void*>(si->entry));
2792 return si->entry;
2793 }
2795 /* Compute the load-bias of an existing executable. This shall only
2796 * be used to compute the load bias of an executable or shared library
2797 * that was loaded by the kernel itself.
2798 *
2799 * Input:
2800 * elf -> address of ELF header, assumed to be at the start of the file.
2801 * Return:
2802 * load bias, i.e. add the value of any p_vaddr in the file to get
2803 * the corresponding address in memory.
2804 */
2805 static ElfW(Addr) get_elf_exec_load_bias(const ElfW(Ehdr)* elf) {
2806 ElfW(Addr) offset = elf->e_phoff;
2807 const ElfW(Phdr)* phdr_table = reinterpret_cast<const ElfW(Phdr)*>(reinterpret_cast<uintptr_t>(elf) + offset);
2808 const ElfW(Phdr)* phdr_end = phdr_table + elf->e_phnum;
2810 for (const ElfW(Phdr)* phdr = phdr_table; phdr < phdr_end; phdr++) {
2811 if (phdr->p_type == PT_LOAD) {
2812 return reinterpret_cast<ElfW(Addr)>(elf) + phdr->p_offset - phdr->p_vaddr;
2813 }
2814 }
2815 return 0;
2816 }
2818 extern "C" void _start();
2820 /*
2821 * This is the entry point for the linker, called from begin.S. This
2822 * method is responsible for fixing the linker's own relocations, and
2823 * then calling __linker_init_post_relocation().
2824 *
2825 * Because this method is called before the linker has fixed it's own
2826 * relocations, any attempt to reference an extern variable, extern
2827 * function, or other GOT reference will generate a segfault.
2828 */
2829 extern "C" ElfW(Addr) __linker_init(void* raw_args) {
2830 KernelArgumentBlock args(raw_args);
2832 ElfW(Addr) linker_addr = args.getauxval(AT_BASE);
2833 ElfW(Addr) entry_point = args.getauxval(AT_ENTRY);
2834 ElfW(Ehdr)* elf_hdr = reinterpret_cast<ElfW(Ehdr)*>(linker_addr);
2835 ElfW(Phdr)* phdr = reinterpret_cast<ElfW(Phdr)*>(linker_addr + elf_hdr->e_phoff);
2837 soinfo linker_so("[dynamic linker]", nullptr, 0, 0);
2839 // If the linker is not acting as PT_INTERP entry_point is equal to
2840 // _start. Which means that the linker is running as an executable and
2841 // already linked by PT_INTERP.
2842 //
2843 // This happens when user tries to run 'adb shell /system/bin/linker'
2844 // see also https://code.google.com/p/android/issues/detail?id=63174
2845 if (reinterpret_cast<ElfW(Addr)>(&_start) == entry_point) {
2846 __libc_fatal("This is %s, the helper program for shared library executables.\n", args.argv[0]);
2847 }
2849 linker_so.base = linker_addr;
2850 linker_so.size = phdr_table_get_load_size(phdr, elf_hdr->e_phnum);
2851 linker_so.load_bias = get_elf_exec_load_bias(elf_hdr);
2852 linker_so.dynamic = nullptr;
2853 linker_so.phdr = phdr;
2854 linker_so.phnum = elf_hdr->e_phnum;
2855 linker_so.set_linker_flag();
2857 // This might not be obvious... The reasons why we pass g_empty_list
2858 // in place of local_group here are (1) we do not really need it, because
2859 // linker is built with DT_SYMBOLIC and therefore relocates its symbols against
2860 // itself without having to look into local_group and (2) allocators
2861 // are not yet initialized, and therefore we cannot use linked_list.push_*
2862 // functions at this point.
2863 if (!(linker_so.prelink_image() && linker_so.link_image(g_empty_list, g_empty_list, nullptr))) {
2864 // It would be nice to print an error message, but if the linker
2865 // can't link itself, there's no guarantee that we'll be able to
2866 // call write() (because it involves a GOT reference). We may as
2867 // well try though...
2868 const char* msg = "CANNOT LINK EXECUTABLE: ";
2869 write(2, msg, strlen(msg));
2870 write(2, __linker_dl_err_buf, strlen(__linker_dl_err_buf));
2871 write(2, "\n", 1);
2872 _exit(EXIT_FAILURE);
2873 }
2875 __libc_init_tls(args);
2877 // Initialize the linker's own global variables
2878 linker_so.call_constructors();
2880 // Initialize static variables. Note that in order to
2881 // get correct libdl_info we need to call constructors
2882 // before get_libdl_info().
2883 solist = get_libdl_info();
2884 sonext = get_libdl_info();
2886 // We have successfully fixed our own relocations. It's safe to run
2887 // the main part of the linker now.
2888 args.abort_message_ptr = &g_abort_message;
2889 ElfW(Addr) start_address = __linker_init_post_relocation(args, linker_addr);
2891 protect_data(PROT_READ);
2893 // Return the address that the calling assembly stub should jump to.
2894 return start_address;
2895 }