1 /*
2 * Copyright (C) 2012 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
29 #include "linker_phdr.h"
31 #include <errno.h>
32 #include <sys/mman.h>
33 #include <sys/types.h>
34 #include <sys/stat.h>
35 #include <unistd.h>
37 #include "linker.h"
38 #include "linker_debug.h"
40 static int GetTargetElfMachine() {
41 #if defined(__arm__)
42 return EM_ARM;
43 #elif defined(__aarch64__)
44 return EM_AARCH64;
45 #elif defined(__i386__)
46 return EM_386;
47 #elif defined(__mips__)
48 return EM_MIPS;
49 #elif defined(__x86_64__)
50 return EM_X86_64;
51 #endif
52 }
54 /**
55 TECHNICAL NOTE ON ELF LOADING.
57 An ELF file's program header table contains one or more PT_LOAD
58 segments, which corresponds to portions of the file that need to
59 be mapped into the process' address space.
61 Each loadable segment has the following important properties:
63 p_offset -> segment file offset
64 p_filesz -> segment file size
65 p_memsz -> segment memory size (always >= p_filesz)
66 p_vaddr -> segment's virtual address
67 p_flags -> segment flags (e.g. readable, writable, executable)
69 We will ignore the p_paddr and p_align fields of ElfW(Phdr) for now.
71 The loadable segments can be seen as a list of [p_vaddr ... p_vaddr+p_memsz)
72 ranges of virtual addresses. A few rules apply:
74 - the virtual address ranges should not overlap.
76 - if a segment's p_filesz is smaller than its p_memsz, the extra bytes
77 between them should always be initialized to 0.
79 - ranges do not necessarily start or end at page boundaries. Two distinct
80 segments can have their start and end on the same page. In this case, the
81 page inherits the mapping flags of the latter segment.
83 Finally, the real load addrs of each segment is not p_vaddr. Instead the
84 loader decides where to load the first segment, then will load all others
85 relative to the first one to respect the initial range layout.
87 For example, consider the following list:
89 [ offset:0, filesz:0x4000, memsz:0x4000, vaddr:0x30000 ],
90 [ offset:0x4000, filesz:0x2000, memsz:0x8000, vaddr:0x40000 ],
92 This corresponds to two segments that cover these virtual address ranges:
94 0x30000...0x34000
95 0x40000...0x48000
97 If the loader decides to load the first segment at address 0xa0000000
98 then the segments' load address ranges will be:
100 0xa0030000...0xa0034000
101 0xa0040000...0xa0048000
103 In other words, all segments must be loaded at an address that has the same
104 constant offset from their p_vaddr value. This offset is computed as the
105 difference between the first segment's load address, and its p_vaddr value.
107 However, in practice, segments do _not_ start at page boundaries. Since we
108 can only memory-map at page boundaries, this means that the bias is
109 computed as:
111 load_bias = phdr0_load_address - PAGE_START(phdr0->p_vaddr)
113 (NOTE: The value must be used as a 32-bit unsigned integer, to deal with
114 possible wrap around UINT32_MAX for possible large p_vaddr values).
116 And that the phdr0_load_address must start at a page boundary, with
117 the segment's real content starting at:
119 phdr0_load_address + PAGE_OFFSET(phdr0->p_vaddr)
121 Note that ELF requires the following condition to make the mmap()-ing work:
123 PAGE_OFFSET(phdr0->p_vaddr) == PAGE_OFFSET(phdr0->p_offset)
125 The load_bias must be added to any p_vaddr value read from the ELF file to
126 determine the corresponding memory address.
128 **/
130 #define MAYBE_MAP_FLAG(x, from, to) (((x) & (from)) ? (to) : 0)
131 #define PFLAGS_TO_PROT(x) (MAYBE_MAP_FLAG((x), PF_X, PROT_EXEC) | \
132 MAYBE_MAP_FLAG((x), PF_R, PROT_READ) | \
133 MAYBE_MAP_FLAG((x), PF_W, PROT_WRITE))
135 ElfReader::ElfReader(const char* name, int fd, off64_t file_offset)
136 : name_(name), fd_(fd), file_offset_(file_offset),
137 phdr_num_(0), phdr_mmap_(nullptr), phdr_table_(nullptr), phdr_size_(0),
138 load_start_(nullptr), load_size_(0), load_bias_(0),
139 loaded_phdr_(nullptr) {
140 }
142 ElfReader::~ElfReader() {
143 if (phdr_mmap_ != nullptr) {
144 munmap(phdr_mmap_, phdr_size_);
145 }
146 }
148 bool ElfReader::Load(const android_dlextinfo* extinfo) {
149 return ReadElfHeader() &&
150 VerifyElfHeader() &&
151 ReadProgramHeader() &&
152 ReserveAddressSpace(extinfo) &&
153 LoadSegments() &&
154 FindPhdr();
155 }
157 bool ElfReader::ReadElfHeader() {
158 ssize_t rc = TEMP_FAILURE_RETRY(pread64(fd_, &header_, sizeof(header_), file_offset_));
159 if (rc < 0) {
160 DL_ERR("can't read file \"%s\": %s", name_, strerror(errno));
161 return false;
162 }
164 if (rc != sizeof(header_)) {
165 DL_ERR("\"%s\" is too small to be an ELF executable: only found %zd bytes", name_,
166 static_cast<size_t>(rc));
167 return false;
168 }
169 return true;
170 }
172 bool ElfReader::VerifyElfHeader() {
173 if (memcmp(header_.e_ident, ELFMAG, SELFMAG) != 0) {
174 DL_ERR("\"%s\" has bad ELF magic", name_);
175 return false;
176 }
178 // Try to give a clear diagnostic for ELF class mismatches, since they're
179 // an easy mistake to make during the 32-bit/64-bit transition period.
180 int elf_class = header_.e_ident[EI_CLASS];
181 #if defined(__LP64__)
182 if (elf_class != ELFCLASS64) {
183 if (elf_class == ELFCLASS32) {
184 DL_ERR("\"%s\" is 32-bit instead of 64-bit", name_);
185 } else {
186 DL_ERR("\"%s\" has unknown ELF class: %d", name_, elf_class);
187 }
188 return false;
189 }
190 #else
191 if (elf_class != ELFCLASS32) {
192 if (elf_class == ELFCLASS64) {
193 DL_ERR("\"%s\" is 64-bit instead of 32-bit", name_);
194 } else {
195 DL_ERR("\"%s\" has unknown ELF class: %d", name_, elf_class);
196 }
197 return false;
198 }
199 #endif
201 if (header_.e_ident[EI_DATA] != ELFDATA2LSB) {
202 DL_ERR("\"%s\" not little-endian: %d", name_, header_.e_ident[EI_DATA]);
203 return false;
204 }
206 if (header_.e_type != ET_DYN) {
207 DL_ERR("\"%s\" has unexpected e_type: %d", name_, header_.e_type);
208 return false;
209 }
211 if (header_.e_version != EV_CURRENT) {
212 DL_ERR("\"%s\" has unexpected e_version: %d", name_, header_.e_version);
213 return false;
214 }
216 if (header_.e_machine != GetTargetElfMachine()) {
217 DL_ERR("\"%s\" has unexpected e_machine: %d", name_, header_.e_machine);
218 return false;
219 }
221 return true;
222 }
224 // Loads the program header table from an ELF file into a read-only private
225 // anonymous mmap-ed block.
226 bool ElfReader::ReadProgramHeader() {
227 phdr_num_ = header_.e_phnum;
229 // Like the kernel, we only accept program header tables that
230 // are smaller than 64KiB.
231 if (phdr_num_ < 1 || phdr_num_ > 65536/sizeof(ElfW(Phdr))) {
232 DL_ERR("\"%s\" has invalid e_phnum: %zd", name_, phdr_num_);
233 return false;
234 }
236 ElfW(Addr) page_min = PAGE_START(header_.e_phoff);
237 ElfW(Addr) page_max = PAGE_END(header_.e_phoff + (phdr_num_ * sizeof(ElfW(Phdr))));
238 ElfW(Addr) page_offset = PAGE_OFFSET(header_.e_phoff);
240 phdr_size_ = page_max - page_min;
242 void* mmap_result = mmap64(nullptr, phdr_size_, PROT_READ, MAP_PRIVATE, fd_, file_offset_ + page_min);
243 if (mmap_result == MAP_FAILED) {
244 DL_ERR("\"%s\" phdr mmap failed: %s", name_, strerror(errno));
245 return false;
246 }
248 phdr_mmap_ = mmap_result;
249 phdr_table_ = reinterpret_cast<ElfW(Phdr)*>(reinterpret_cast<char*>(mmap_result) + page_offset);
250 return true;
251 }
253 /* Returns the size of the extent of all the possibly non-contiguous
254 * loadable segments in an ELF program header table. This corresponds
255 * to the page-aligned size in bytes that needs to be reserved in the
256 * process' address space. If there are no loadable segments, 0 is
257 * returned.
258 *
259 * If out_min_vaddr or out_max_vaddr are not null, they will be
260 * set to the minimum and maximum addresses of pages to be reserved,
261 * or 0 if there is nothing to load.
262 */
263 size_t phdr_table_get_load_size(const ElfW(Phdr)* phdr_table, size_t phdr_count,
264 ElfW(Addr)* out_min_vaddr,
265 ElfW(Addr)* out_max_vaddr) {
266 ElfW(Addr) min_vaddr = UINTPTR_MAX;
267 ElfW(Addr) max_vaddr = 0;
269 bool found_pt_load = false;
270 for (size_t i = 0; i < phdr_count; ++i) {
271 const ElfW(Phdr)* phdr = &phdr_table[i];
273 if (phdr->p_type != PT_LOAD) {
274 continue;
275 }
276 found_pt_load = true;
278 if (phdr->p_vaddr < min_vaddr) {
279 min_vaddr = phdr->p_vaddr;
280 }
282 if (phdr->p_vaddr + phdr->p_memsz > max_vaddr) {
283 max_vaddr = phdr->p_vaddr + phdr->p_memsz;
284 }
285 }
286 if (!found_pt_load) {
287 min_vaddr = 0;
288 }
290 min_vaddr = PAGE_START(min_vaddr);
291 max_vaddr = PAGE_END(max_vaddr);
293 if (out_min_vaddr != nullptr) {
294 *out_min_vaddr = min_vaddr;
295 }
296 if (out_max_vaddr != nullptr) {
297 *out_max_vaddr = max_vaddr;
298 }
299 return max_vaddr - min_vaddr;
300 }
302 // Reserve a virtual address range big enough to hold all loadable
303 // segments of a program header table. This is done by creating a
304 // private anonymous mmap() with PROT_NONE.
305 bool ElfReader::ReserveAddressSpace(const android_dlextinfo* extinfo) {
306 ElfW(Addr) min_vaddr;
307 load_size_ = phdr_table_get_load_size(phdr_table_, phdr_num_, &min_vaddr);
308 if (load_size_ == 0) {
309 DL_ERR("\"%s\" has no loadable segments", name_);
310 return false;
311 }
313 uint8_t* addr = reinterpret_cast<uint8_t*>(min_vaddr);
314 void* start;
315 size_t reserved_size = 0;
316 bool reserved_hint = true;
318 if (extinfo != nullptr) {
319 if (extinfo->flags & ANDROID_DLEXT_RESERVED_ADDRESS) {
320 reserved_size = extinfo->reserved_size;
321 reserved_hint = false;
322 } else if (extinfo->flags & ANDROID_DLEXT_RESERVED_ADDRESS_HINT) {
323 reserved_size = extinfo->reserved_size;
324 }
325 }
327 if (load_size_ > reserved_size) {
328 if (!reserved_hint) {
329 DL_ERR("reserved address space %zd smaller than %zd bytes needed for \"%s\"",
330 reserved_size - load_size_, load_size_, name_);
331 return false;
332 }
333 int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS;
334 start = mmap(addr, load_size_, PROT_NONE, mmap_flags, -1, 0);
335 if (start == MAP_FAILED) {
336 DL_ERR("couldn't reserve %zd bytes of address space for \"%s\"", load_size_, name_);
337 return false;
338 }
339 } else {
340 start = extinfo->reserved_addr;
341 }
343 load_start_ = start;
344 load_bias_ = reinterpret_cast<uint8_t*>(start) - addr;
345 return true;
346 }
348 bool ElfReader::LoadSegments() {
349 for (size_t i = 0; i < phdr_num_; ++i) {
350 const ElfW(Phdr)* phdr = &phdr_table_[i];
352 if (phdr->p_type != PT_LOAD) {
353 continue;
354 }
356 // Segment addresses in memory.
357 ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
358 ElfW(Addr) seg_end = seg_start + phdr->p_memsz;
360 ElfW(Addr) seg_page_start = PAGE_START(seg_start);
361 ElfW(Addr) seg_page_end = PAGE_END(seg_end);
363 ElfW(Addr) seg_file_end = seg_start + phdr->p_filesz;
365 // File offsets.
366 ElfW(Addr) file_start = phdr->p_offset;
367 ElfW(Addr) file_end = file_start + phdr->p_filesz;
369 ElfW(Addr) file_page_start = PAGE_START(file_start);
370 ElfW(Addr) file_length = file_end - file_page_start;
372 if (file_length != 0) {
373 void* seg_addr = mmap64(reinterpret_cast<void*>(seg_page_start),
374 file_length,
375 PFLAGS_TO_PROT(phdr->p_flags),
376 MAP_FIXED|MAP_PRIVATE,
377 fd_,
378 file_offset_ + file_page_start);
379 if (seg_addr == MAP_FAILED) {
380 DL_ERR("couldn't map \"%s\" segment %zd: %s", name_, i, strerror(errno));
381 return false;
382 }
383 }
385 // if the segment is writable, and does not end on a page boundary,
386 // zero-fill it until the page limit.
387 if ((phdr->p_flags & PF_W) != 0 && PAGE_OFFSET(seg_file_end) > 0) {
388 memset(reinterpret_cast<void*>(seg_file_end), 0, PAGE_SIZE - PAGE_OFFSET(seg_file_end));
389 }
391 seg_file_end = PAGE_END(seg_file_end);
393 // seg_file_end is now the first page address after the file
394 // content. If seg_end is larger, we need to zero anything
395 // between them. This is done by using a private anonymous
396 // map for all extra pages.
397 if (seg_page_end > seg_file_end) {
398 void* zeromap = mmap(reinterpret_cast<void*>(seg_file_end),
399 seg_page_end - seg_file_end,
400 PFLAGS_TO_PROT(phdr->p_flags),
401 MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE,
402 -1,
403 0);
404 if (zeromap == MAP_FAILED) {
405 DL_ERR("couldn't zero fill \"%s\" gap: %s", name_, strerror(errno));
406 return false;
407 }
408 }
409 }
410 return true;
411 }
413 /* Used internally. Used to set the protection bits of all loaded segments
414 * with optional extra flags (i.e. really PROT_WRITE). Used by
415 * phdr_table_protect_segments and phdr_table_unprotect_segments.
416 */
417 static int _phdr_table_set_load_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
418 ElfW(Addr) load_bias, int extra_prot_flags) {
419 const ElfW(Phdr)* phdr = phdr_table;
420 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
422 for (; phdr < phdr_limit; phdr++) {
423 if (phdr->p_type != PT_LOAD || (phdr->p_flags & PF_W) != 0) {
424 continue;
425 }
427 ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
428 ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
430 int ret = mprotect(reinterpret_cast<void*>(seg_page_start),
431 seg_page_end - seg_page_start,
432 PFLAGS_TO_PROT(phdr->p_flags) | extra_prot_flags);
433 if (ret < 0) {
434 return -1;
435 }
436 }
437 return 0;
438 }
440 /* Restore the original protection modes for all loadable segments.
441 * You should only call this after phdr_table_unprotect_segments and
442 * applying all relocations.
443 *
444 * Input:
445 * phdr_table -> program header table
446 * phdr_count -> number of entries in tables
447 * load_bias -> load bias
448 * Return:
449 * 0 on error, -1 on failure (error code in errno).
450 */
451 int phdr_table_protect_segments(const ElfW(Phdr)* phdr_table, size_t phdr_count, ElfW(Addr) load_bias) {
452 return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, 0);
453 }
455 /* Change the protection of all loaded segments in memory to writable.
456 * This is useful before performing relocations. Once completed, you
457 * will have to call phdr_table_protect_segments to restore the original
458 * protection flags on all segments.
459 *
460 * Note that some writable segments can also have their content turned
461 * to read-only by calling phdr_table_protect_gnu_relro. This is no
462 * performed here.
463 *
464 * Input:
465 * phdr_table -> program header table
466 * phdr_count -> number of entries in tables
467 * load_bias -> load bias
468 * Return:
469 * 0 on error, -1 on failure (error code in errno).
470 */
471 int phdr_table_unprotect_segments(const ElfW(Phdr)* phdr_table, size_t phdr_count, ElfW(Addr) load_bias) {
472 return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, PROT_WRITE);
473 }
475 /* Used internally by phdr_table_protect_gnu_relro and
476 * phdr_table_unprotect_gnu_relro.
477 */
478 static int _phdr_table_set_gnu_relro_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
479 ElfW(Addr) load_bias, int prot_flags) {
480 const ElfW(Phdr)* phdr = phdr_table;
481 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
483 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
484 if (phdr->p_type != PT_GNU_RELRO) {
485 continue;
486 }
488 // Tricky: what happens when the relro segment does not start
489 // or end at page boundaries? We're going to be over-protective
490 // here and put every page touched by the segment as read-only.
492 // This seems to match Ian Lance Taylor's description of the
493 // feature at http://www.airs.com/blog/archives/189.
495 // Extract:
496 // Note that the current dynamic linker code will only work
497 // correctly if the PT_GNU_RELRO segment starts on a page
498 // boundary. This is because the dynamic linker rounds the
499 // p_vaddr field down to the previous page boundary. If
500 // there is anything on the page which should not be read-only,
501 // the program is likely to fail at runtime. So in effect the
502 // linker must only emit a PT_GNU_RELRO segment if it ensures
503 // that it starts on a page boundary.
504 ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
505 ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
507 int ret = mprotect(reinterpret_cast<void*>(seg_page_start),
508 seg_page_end - seg_page_start,
509 prot_flags);
510 if (ret < 0) {
511 return -1;
512 }
513 }
514 return 0;
515 }
517 /* Apply GNU relro protection if specified by the program header. This will
518 * turn some of the pages of a writable PT_LOAD segment to read-only, as
519 * specified by one or more PT_GNU_RELRO segments. This must be always
520 * performed after relocations.
521 *
522 * The areas typically covered are .got and .data.rel.ro, these are
523 * read-only from the program's POV, but contain absolute addresses
524 * that need to be relocated before use.
525 *
526 * Input:
527 * phdr_table -> program header table
528 * phdr_count -> number of entries in tables
529 * load_bias -> load bias
530 * Return:
531 * 0 on error, -1 on failure (error code in errno).
532 */
533 int phdr_table_protect_gnu_relro(const ElfW(Phdr)* phdr_table, size_t phdr_count, ElfW(Addr) load_bias) {
534 return _phdr_table_set_gnu_relro_prot(phdr_table, phdr_count, load_bias, PROT_READ);
535 }
537 /* Serialize the GNU relro segments to the given file descriptor. This can be
538 * performed after relocations to allow another process to later share the
539 * relocated segment, if it was loaded at the same address.
540 *
541 * Input:
542 * phdr_table -> program header table
543 * phdr_count -> number of entries in tables
544 * load_bias -> load bias
545 * fd -> writable file descriptor to use
546 * Return:
547 * 0 on error, -1 on failure (error code in errno).
548 */
549 int phdr_table_serialize_gnu_relro(const ElfW(Phdr)* phdr_table, size_t phdr_count, ElfW(Addr) load_bias,
550 int fd) {
551 const ElfW(Phdr)* phdr = phdr_table;
552 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
553 ssize_t file_offset = 0;
555 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
556 if (phdr->p_type != PT_GNU_RELRO) {
557 continue;
558 }
560 ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
561 ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
562 ssize_t size = seg_page_end - seg_page_start;
564 ssize_t written = TEMP_FAILURE_RETRY(write(fd, reinterpret_cast<void*>(seg_page_start), size));
565 if (written != size) {
566 return -1;
567 }
568 void* map = mmap(reinterpret_cast<void*>(seg_page_start), size, PROT_READ,
569 MAP_PRIVATE|MAP_FIXED, fd, file_offset);
570 if (map == MAP_FAILED) {
571 return -1;
572 }
573 file_offset += size;
574 }
575 return 0;
576 }
578 /* Where possible, replace the GNU relro segments with mappings of the given
579 * file descriptor. This can be performed after relocations to allow a file
580 * previously created by phdr_table_serialize_gnu_relro in another process to
581 * replace the dirty relocated pages, saving memory, if it was loaded at the
582 * same address. We have to compare the data before we map over it, since some
583 * parts of the relro segment may not be identical due to other libraries in
584 * the process being loaded at different addresses.
585 *
586 * Input:
587 * phdr_table -> program header table
588 * phdr_count -> number of entries in tables
589 * load_bias -> load bias
590 * fd -> readable file descriptor to use
591 * Return:
592 * 0 on error, -1 on failure (error code in errno).
593 */
594 int phdr_table_map_gnu_relro(const ElfW(Phdr)* phdr_table, size_t phdr_count, ElfW(Addr) load_bias,
595 int fd) {
596 // Map the file at a temporary location so we can compare its contents.
597 struct stat file_stat;
598 if (TEMP_FAILURE_RETRY(fstat(fd, &file_stat)) != 0) {
599 return -1;
600 }
601 off_t file_size = file_stat.st_size;
602 void* temp_mapping = nullptr;
603 if (file_size > 0) {
604 temp_mapping = mmap(nullptr, file_size, PROT_READ, MAP_PRIVATE, fd, 0);
605 if (temp_mapping == MAP_FAILED) {
606 return -1;
607 }
608 }
609 size_t file_offset = 0;
611 // Iterate over the relro segments and compare/remap the pages.
612 const ElfW(Phdr)* phdr = phdr_table;
613 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
615 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
616 if (phdr->p_type != PT_GNU_RELRO) {
617 continue;
618 }
620 ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
621 ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
623 char* file_base = static_cast<char*>(temp_mapping) + file_offset;
624 char* mem_base = reinterpret_cast<char*>(seg_page_start);
625 size_t match_offset = 0;
626 size_t size = seg_page_end - seg_page_start;
628 if (file_size - file_offset < size) {
629 // File is too short to compare to this segment. The contents are likely
630 // different as well (it's probably for a different library version) so
631 // just don't bother checking.
632 break;
633 }
635 while (match_offset < size) {
636 // Skip over dissimilar pages.
637 while (match_offset < size &&
638 memcmp(mem_base + match_offset, file_base + match_offset, PAGE_SIZE) != 0) {
639 match_offset += PAGE_SIZE;
640 }
642 // Count similar pages.
643 size_t mismatch_offset = match_offset;
644 while (mismatch_offset < size &&
645 memcmp(mem_base + mismatch_offset, file_base + mismatch_offset, PAGE_SIZE) == 0) {
646 mismatch_offset += PAGE_SIZE;
647 }
649 // Map over similar pages.
650 if (mismatch_offset > match_offset) {
651 void* map = mmap(mem_base + match_offset, mismatch_offset - match_offset,
652 PROT_READ, MAP_PRIVATE|MAP_FIXED, fd, match_offset);
653 if (map == MAP_FAILED) {
654 munmap(temp_mapping, file_size);
655 return -1;
656 }
657 }
659 match_offset = mismatch_offset;
660 }
662 // Add to the base file offset in case there are multiple relro segments.
663 file_offset += size;
664 }
665 munmap(temp_mapping, file_size);
666 return 0;
667 }
670 #if defined(__arm__)
672 # ifndef PT_ARM_EXIDX
673 # define PT_ARM_EXIDX 0x70000001 /* .ARM.exidx segment */
674 # endif
676 /* Return the address and size of the .ARM.exidx section in memory,
677 * if present.
678 *
679 * Input:
680 * phdr_table -> program header table
681 * phdr_count -> number of entries in tables
682 * load_bias -> load bias
683 * Output:
684 * arm_exidx -> address of table in memory (null on failure).
685 * arm_exidx_count -> number of items in table (0 on failure).
686 * Return:
687 * 0 on error, -1 on failure (_no_ error code in errno)
688 */
689 int phdr_table_get_arm_exidx(const ElfW(Phdr)* phdr_table, size_t phdr_count,
690 ElfW(Addr) load_bias,
691 ElfW(Addr)** arm_exidx, unsigned* arm_exidx_count) {
692 const ElfW(Phdr)* phdr = phdr_table;
693 const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
695 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
696 if (phdr->p_type != PT_ARM_EXIDX) {
697 continue;
698 }
700 *arm_exidx = reinterpret_cast<ElfW(Addr)*>(load_bias + phdr->p_vaddr);
701 *arm_exidx_count = (unsigned)(phdr->p_memsz / 8);
702 return 0;
703 }
704 *arm_exidx = nullptr;
705 *arm_exidx_count = 0;
706 return -1;
707 }
708 #endif
710 /* Return the address and size of the ELF file's .dynamic section in memory,
711 * or null if missing.
712 *
713 * Input:
714 * phdr_table -> program header table
715 * phdr_count -> number of entries in tables
716 * load_bias -> load bias
717 * Output:
718 * dynamic -> address of table in memory (null on failure).
719 * dynamic_flags -> protection flags for section (unset on failure)
720 * Return:
721 * void
722 */
723 void phdr_table_get_dynamic_section(const ElfW(Phdr)* phdr_table, size_t phdr_count,
724 ElfW(Addr) load_bias, ElfW(Dyn)** dynamic,
725 ElfW(Word)* dynamic_flags) {
726 *dynamic = nullptr;
727 for (const ElfW(Phdr)* phdr = phdr_table, *phdr_limit = phdr + phdr_count; phdr < phdr_limit; phdr++) {
728 if (phdr->p_type == PT_DYNAMIC) {
729 *dynamic = reinterpret_cast<ElfW(Dyn)*>(load_bias + phdr->p_vaddr);
730 if (dynamic_flags) {
731 *dynamic_flags = phdr->p_flags;
732 }
733 return;
734 }
735 }
736 }
738 // Sets loaded_phdr_ to the address of the program header table as it appears
739 // in the loaded segments in memory. This is in contrast with phdr_table_,
740 // which is temporary and will be released before the library is relocated.
741 bool ElfReader::FindPhdr() {
742 const ElfW(Phdr)* phdr_limit = phdr_table_ + phdr_num_;
744 // If there is a PT_PHDR, use it directly.
745 for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
746 if (phdr->p_type == PT_PHDR) {
747 return CheckPhdr(load_bias_ + phdr->p_vaddr);
748 }
749 }
751 // Otherwise, check the first loadable segment. If its file offset
752 // is 0, it starts with the ELF header, and we can trivially find the
753 // loaded program header from it.
754 for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
755 if (phdr->p_type == PT_LOAD) {
756 if (phdr->p_offset == 0) {
757 ElfW(Addr) elf_addr = load_bias_ + phdr->p_vaddr;
758 const ElfW(Ehdr)* ehdr = reinterpret_cast<const ElfW(Ehdr)*>(elf_addr);
759 ElfW(Addr) offset = ehdr->e_phoff;
760 return CheckPhdr((ElfW(Addr))ehdr + offset);
761 }
762 break;
763 }
764 }
766 DL_ERR("can't find loaded phdr for \"%s\"", name_);
767 return false;
768 }
770 // Ensures that our program header is actually within a loadable
771 // segment. This should help catch badly-formed ELF files that
772 // would cause the linker to crash later when trying to access it.
773 bool ElfReader::CheckPhdr(ElfW(Addr) loaded) {
774 const ElfW(Phdr)* phdr_limit = phdr_table_ + phdr_num_;
775 ElfW(Addr) loaded_end = loaded + (phdr_num_ * sizeof(ElfW(Phdr)));
776 for (ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
777 if (phdr->p_type != PT_LOAD) {
778 continue;
779 }
780 ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
781 ElfW(Addr) seg_end = phdr->p_filesz + seg_start;
782 if (seg_start <= loaded && loaded_end <= seg_end) {
783 loaded_phdr_ = reinterpret_cast<const ElfW(Phdr)*>(loaded);
784 return true;
785 }
786 }
787 DL_ERR("\"%s\" loaded phdr %p not in loadable segment", name_, reinterpret_cast<void*>(loaded));
788 return false;
789 }