author | The Android Open Source Project <initial-contribution@android.com> | |
Wed, 31 Mar 2010 21:24:25 +0000 (14:24 -0700) | ||
committer | The Android Open Source Project <initial-contribution@android.com> | |
Wed, 31 Mar 2010 21:24:25 +0000 (14:24 -0700) |
Change-Id: I950e9aca87cbb0c35099b1d53cff6378bd0f26f4
115 files changed:
diff --git a/.gitignore b/.gitignore
index 0db5b039383b1fd8b5bbf27fe9b2456a617c0b87..5a5ea87a5080022b26859e8ca3f77bdffd4d7f94 100644 (file)
--- a/.gitignore
+++ b/.gitignore
*.pyc
+*.*~
libc/kernel/original
diff --git a/CleanSpec.mk b/CleanSpec.mk
--- /dev/null
+++ b/CleanSpec.mk
@@ -0,0 +1,49 @@
+# Copyright (C) 2007 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# If you don't need to do a full clean build but would like to touch
+# a file or delete some intermediate files, add a clean step to the end
+# of the list. These steps will only be run once, if they haven't been
+# run before.
+#
+# E.g.:
+# $(call add-clean-step, touch -c external/sqlite/sqlite3.h)
+# $(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/STATIC_LIBRARIES/libz_intermediates)
+#
+# Always use "touch -c" and "rm -f" or "rm -rf" to gracefully deal with
+# files that are missing or have been moved.
+#
+# Use $(PRODUCT_OUT) to get to the "out/target/product/blah/" directory.
+# Use $(OUT_DIR) to refer to the "out" directory.
+#
+# If you need to re-do something that's already mentioned, just copy
+# the command and add it to the bottom of the list. E.g., if a change
+# that you made last week required touching a file and a change you
+# made today requires touching the same file, just copy the old
+# touch step and add it to the end of the list.
+#
+# ************************************************
+# NEWER CLEAN STEPS MUST BE AT THE END OF THE LIST
+# ************************************************
+
+# For example:
+#$(call add-clean-step, rm -rf $(OUT_DIR)/target/common/obj/APPS/AndroidTests_intermediates)
+#$(call add-clean-step, rm -rf $(OUT_DIR)/target/common/obj/JAVA_LIBRARIES/core_intermediates)
+#$(call add-clean-step, find $(OUT_DIR) -type f -name "IGTalkSession*" -print0 | xargs -0 rm -f)
+#$(call add-clean-step, rm -rf $(PRODUCT_OUT)/data/*)
+
+# ************************************************
+# NEWER CLEAN STEPS MUST BE AT THE END OF THE LIST
+# ************************************************
diff --git a/libc/Android.mk b/libc/Android.mk
index f6276407530e7abcf759bee6e9531136f96233dc..d8cc721ca4a90078d31c96c477a0a5896c4687f2 100644 (file)
--- a/libc/Android.mk
+++ b/libc/Android.mk
unistd/initgroups.c \
unistd/isatty.c \
unistd/issetugid.c \
+ unistd/killpg.c \
unistd/lseek64.c \
unistd/mmap.c \
unistd/nice.c \
unistd/sbrk.c \
unistd/send.c \
unistd/setegid.c \
+ unistd/setuid.c \
unistd/seteuid.c \
+ unistd/setreuid.c \
+ unistd/setresuid.c \
unistd/setpgrp.c \
unistd/sigblock.c \
unistd/siginterrupt.c \
bionic/__errno.c \
bionic/__set_errno.c \
bionic/_rand48.c \
+ bionic/cpuacct.c \
bionic/arc4random.c \
bionic/basename.c \
bionic/basename_r.c \
+ bionic/clearenv.c \
bionic/dirname.c \
bionic/dirname_r.c \
bionic/drand48.c \
bionic/erand48.c \
+ bionic/err.c \
+ bionic/fdprintf.c \
bionic/fork.c \
+ bionic/fts.c \
bionic/if_nametoindex.c \
bionic/if_indextoname.c \
bionic/ioctl.c \
netbsd/nameser/ns_ttl.c \
netbsd/nameser/ns_netint.c \
netbsd/nameser/ns_print.c \
- netbsd/nameser/ns_samedomain.c
+ netbsd/nameser/ns_samedomain.c \
+ regex/regcomp.c \
+ regex/regerror.c \
+ regex/regexec.c \
+ regex/regfree.c \
# Architecture specific source files go here
# =========================================================
ifeq ($(TARGET_ARCH),arm)
libc_common_src_files += \
bionic/eabi.c \
+ bionic/bionic_clone.c \
arch-arm/bionic/__get_pc.S \
arch-arm/bionic/__get_sp.S \
arch-arm/bionic/_exit_with_stack_teardown.S \
-DNEED_PSELECT=1 \
-DINET6 \
-I$(LOCAL_PATH)/private \
- -DUSE_DL_PREFIX
+ -DUSE_DL_PREFIX \
+ -DPOSIX_MISTAKE
+
+# these macro definitions are required to implement the
+# 'timezone' and 'daylight' global variables, as well as
+# properly update the 'tm_gmtoff' field in 'struct tm'.
+#
+libc_common_cflags += \
+ -DTM_GMTOFF=tm_gmtoff \
+ -DUSG_COMPAT=1
ifeq ($(strip $(DEBUG_BIONIC_LIBC)),true)
libc_common_cflags += -DDEBUG
LOCAL_SRC_FILES := \
$(libc_arch_static_src_files) \
bionic/dlmalloc.c \
+ bionic/malloc_debug_common.c \
bionic/libc_init_static.c
-LOCAL_CFLAGS := $(libc_common_cflags)
-
-ifeq ($(WITH_MALLOC_CHECK_LIBC_A),true)
- LOCAL_CFLAGS += -DMALLOC_LEAK_CHECK
- LOCAL_SRC_FILES += bionic/malloc_leak.c.arm
-endif
-
+LOCAL_CFLAGS := $(libc_common_cflags) \
+ -DLIBC_STATIC
LOCAL_C_INCLUDES := $(libc_common_c_includes)
-
LOCAL_MODULE := libc
LOCAL_WHOLE_STATIC_LIBRARIES := libc_common
LOCAL_SYSTEM_SHARED_LIBRARIES :=
LOCAL_SRC_FILES := \
$(libc_arch_dynamic_src_files) \
bionic/dlmalloc.c \
- bionic/malloc_leak.c.arm \
+ bionic/malloc_debug_common.c \
bionic/libc_init_dynamic.c
LOCAL_MODULE:= libc
include $(BUILD_SHARED_LIBRARY)
+# For all builds, except for the -user build we will enable memory
+# allocation checking (including memory leaks, buffer overwrites, etc.)
+# Note that all these checks are also controlled by env. settings
+# that can enable, or disable specific checks. Note also that some of
+# the checks are available only in emulator and are implemeted in
+# libc_malloc_qemu_instrumented.so.
+ifneq ($(TARGET_BUILD_VARIANT),user)
+
# ========================================================
-# libc_debug.so
+# libc_malloc_debug_leak.so
# ========================================================
include $(CLEAR_VARS)
LOCAL_C_INCLUDES := $(libc_common_c_includes)
LOCAL_SRC_FILES := \
- $(libc_arch_dynamic_src_files) \
- bionic/dlmalloc.c \
- bionic/malloc_leak.c.arm \
- bionic/libc_init_dynamic.c
+ bionic/malloc_debug_leak.c
-LOCAL_MODULE:= libc_debug
+LOCAL_MODULE:= libc_malloc_debug_leak
-# WARNING: The only library libc.so should depend on is libdl.so! If you add other libraries,
-# make sure to add -Wl,--exclude-libs=libgcc.a to the LOCAL_LDFLAGS for those libraries. This
-# ensures that symbols that are pulled into those new libraries from libgcc.a are not declared
-# external; if that were the case, then libc would not pull those symbols from libgcc.a as it
-# should, instead relying on the external symbols from the dependent libraries. That would
-# create an "cloaked" dependency on libgcc.a in libc though the libraries, which is not what
-# you wanted!
+LOCAL_SHARED_LIBRARIES := libc
+LOCAL_WHOLE_STATIC_LIBRARIES := libc_common
+LOCAL_SYSTEM_SHARED_LIBRARIES :=
+# Don't prelink
+LOCAL_PRELINK_MODULE := false
+# Don't install on release build
+LOCAL_MODULE_TAGS := eng debug
-LOCAL_SHARED_LIBRARIES := libdl
+include $(BUILD_SHARED_LIBRARY)
+
+
+# ========================================================
+# libc_malloc_debug_qemu.so
+# ========================================================
+include $(CLEAR_VARS)
+
+LOCAL_CFLAGS := \
+ $(libc_common_cflags) \
+ -DMALLOC_QEMU_INSTRUMENT
+
+LOCAL_C_INCLUDES := $(libc_common_c_includes)
+
+LOCAL_SRC_FILES := \
+ bionic/malloc_debug_qemu.c
+
+LOCAL_MODULE:= libc_malloc_debug_qemu
+
+LOCAL_SHARED_LIBRARIES := libc
LOCAL_WHOLE_STATIC_LIBRARIES := libc_common
LOCAL_SYSTEM_SHARED_LIBRARIES :=
# Don't prelink
LOCAL_PRELINK_MODULE := false
# Don't install on release build
-LOCAL_MODULE_TAGS := eng
+LOCAL_MODULE_TAGS := eng debug
include $(BUILD_SHARED_LIBRARY)
+endif #!user
+
+
# ========================================================
include $(call all-makefiles-under,$(LOCAL_PATH))
diff --git a/libc/SYSCALLS.TXT b/libc/SYSCALLS.TXT
index 684b43eb50f5f0dd8549478a51857f580f9e12a5..8c664d7594893f767e9fd0ad66e2ea6b37162b92 100644 (file)
--- a/libc/SYSCALLS.TXT
+++ b/libc/SYSCALLS.TXT
pid_t __fork:fork (void) 2
pid_t _waitpid:waitpid (pid_t, int*, int, struct rusage*) -1,7
int waitid(int, pid_t, struct siginfo_t*, int,void*) 280,284
-pid_t __clone:clone(int (*fn)(void*), void *child_stack, int flags, void *arg) 120
+
+# NOTE: this system call is never called directly, but we list it there
+# to have __NR_clone properly defined.
+#
+pid_t __sys_clone:clone (int, void*, int*, void*, int*) 120
+
int execve (const char*, char* const*, char* const*) 11
-int setuid:setuid32 (uid_t) 213
+int __setuid:setuid32 (uid_t) 213
uid_t getuid:getuid32 () 199
gid_t getgid:getgid32 () 200
uid_t geteuid:geteuid32 () 201
pid_t setsid() 66
int setgid:setgid32(gid_t) 214
int seteuid:seteuid32(uid_t) stub
-int setreuid:setreuid32(uid_t, uid_t) 203
-int setresuid:setresuid32(uid_t, uid_t, uid_t) 208
+int __setreuid:setreuid32(uid_t, uid_t) 203
+int __setresuid:setresuid32(uid_t, uid_t, uid_t) 208
int setresgid:setresgid32(gid_t, gid_t, gid_t) 210
void* __brk:brk(void*) 45
# see comments in arch-arm/bionic/kill.S to understand why we don't generate an ARM stub for kill/tkill
int prctl(int option, unsigned int arg2, unsigned int arg3, unsigned int arg4, unsigned int arg5) 172
int capget(cap_user_header_t header, cap_user_data_t data) 184
int capset(cap_user_header_t header, const cap_user_data_t data) 185
+int sigaltstack(const stack_t*, stack_t*) 186
int acct(const char* filepath) 51
# file descriptors
int sched_get_priority_min(int policy) 160
int sched_rr_get_interval(pid_t pid, struct timespec *interval) 161
+# io priorities
+int ioprio_set(int which, int who, int ioprio) 314,289
+int ioprio_get(int which, int who) 315,290
+
# other
int uname(struct utsname *) 122
pid_t __wait4:wait4(pid_t pid, int *status, int options, struct rusage *rusage) 114
index d1377c7ecc7737e20e12a202812a8909d4ffe49c..4fc89299c0fb348e32b2962df4375f0086e51747 100644 (file)
* SUCH DAMAGE.
*/
.global __get_pc
+.type __get_pc, %function
__get_pc:
mov r0, pc
index 9acaf3da53e6081b44c0d3ea6bfc4a240dc8e7c8..0a313a3766c1c14ea05f55ad9331d1fa003b19c3 100644 (file)
* SUCH DAMAGE.
*/
.global __get_sp
+.type __get_sp, %function
__get_sp:
mov r0, sp
index 0cd0b92dedbaac14832cc3fa7531891144eff87f..047541fc214f6e828442ce8f4612ded6712f7c53 100644 (file)
#include <sys/linux-syscalls.h>
.global __atomic_cmpxchg
+.type __atomic_cmpxchg, %function
.global __atomic_swap
+.type __atomic_swap, %function
.global __atomic_dec
+.type __atomic_dec, %function
.global __atomic_inc
-.global __futex_wait
-.global __futex_wake
+.type __atomic_inc, %function
#define FUTEX_WAIT 0
#define FUTEX_WAKE 1
bx lr
/* __futex_wait(*ftx, val, *timespec) */
-/* __futex_syscall(*ftx, op, val, *timespec, *addr2, val3) */
+/* __futex_wake(*ftx, counter) */
+/* __futex_syscall3(*ftx, op, val) */
+/* __futex_syscall4(*ftx, op, val, *timespec) */
+
+.global __futex_wait
+.type __futex_wait, %function
+
+.global __futex_wake
+.type __futex_wake, %function
+
+.global __futex_syscall3
+.type __futex_syscall3, %function
+
+.global __futex_syscall4
+.type __futex_syscall4, %function
#if __ARM_EABI__
+__futex_syscall3:
+ .fnstart
+ stmdb sp!, {r4, r7}
+ .save {r4, r7}
+ ldr r7, =__NR_futex
+ swi #0
+ ldmia sp!, {r4, r7}
+ bx lr
+ .fnend
+
__futex_wait:
.fnstart
stmdb sp!, {r4, r7}
#else
+__futex_syscall3:
+ swi #__NR_futex
+ bx lr
+
__futex_wait:
mov r3, r2
mov r2, r1
bx lr
#endif
+
+__futex_syscall4:
+ b __futex_syscall3
index 791c73d28ef24583a7bab0c0c38c55c6fbdcb7d6..9c25053738eb1b157348b171d2c7e6bfdfc72290 100644 (file)
/*
- * Copyright (C) 2008 The Android Open Source Project
+ * Copyright (C) 2008-2010 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
*/
#include <sys/linux-syscalls.h>
- .text
- .type __pthread_clone, #function
- .global __pthread_clone
- .align 4
-
+ .text
+ .type __pthread_clone, #function
+ .global __pthread_clone
+ .align 4
+ .fnstart
+
__pthread_clone:
- @ insert the args onto the new stack
- str r0, [r1, #-4]
- str r3, [r1, #-8]
+ @ insert the args onto the new stack
+ str r0, [r1, #-4]
+ str r3, [r1, #-8]
+
+ @ do the system call
+ @ get flags
- @ do the system call
- @ get flags
-
mov r0, r2
-
+
@ new sp is already in r1
#if __ARM_EABI__
stmfd sp!, {r4, r7}
ldr r7, =__NR_clone
- swi #0
+ swi #0
#else
- swi #__NR_clone
+ swi #__NR_clone
#endif
- movs r0, r0
+ movs r0, r0
#if __ARM_EABI__
ldmnefd sp!, {r4, r7}
#endif
- blt __error
- bxne lr
+ blt __error
+ bxne lr
- @ pick the function arg and call address off the stack and jump
- @ to the C __thread_entry function which does some setup and then
- @ calls the thread's start function
+ @ pick the function arg and call address off the stack and jump
+ @ to the C __thread_entry function which does some setup and then
+ @ calls the thread's start function
- ldr r0, [sp, #-4]
- ldr r1, [sp, #-8]
- mov r2, sp @ __thread_entry needs the TLS pointer
- b __thread_entry
+ ldr r0, [sp, #-4]
+ ldr r1, [sp, #-8]
+ mov r2, sp @ __thread_entry needs the TLS pointer
+ b __thread_entry
__error:
- mov r0, #-1
- bx lr
+ mov r0, #-1
+ bx lr
+ .fnend
+
+
+ #
+ # This function is defined as:
+ #
+ # pid_t __bionic_clone( int flags, void *child_stack,
+ # pid_t *pid, void *tls, pid_t *ctid,
+ # int (*fn)(void *), void* arg );
+ #
+ # NOTE: This is not the same signature than the GLibc
+ # __clone function here !! Placing 'fn' and 'arg'
+ # at the end of the parameter list makes the
+ # implementation much simpler.
+ #
+ .type __bionic_clone, #function
+ .globl __bionic_clone
+ .align 4
+ .fnstart
+
+__bionic_clone:
+ mov ip, sp
+ .save {r4, r5, r6, r7}
+
+ # save registers to parent stack
+ stmfd sp!, {r4, r5, r6, r7}
+
+ # load extra parameters
+ ldmfd ip, {r4, r5, r6}
+
+ # store 'fn' and 'arg' to the child stack
+ str r5, [r1, #-4]
+ str r6, [r1, #-8]
+
+ # system call
+ ldr r7, =__NR_clone
+ swi #0
+ movs r0, r0
+ beq 1f
+
+ # in parent, reload saved registers
+ # then either exit or error
+ #
+ ldmfd sp!, {r4, r5, r6, r7}
+ bxne lr
+ b __set_syscall_errno
+
+1: # in the child - pick arguments
+ ldr r0, [sp, #-4]
+ ldr r1, [sp, #-8]
+ b __bionic_clone_entry
+
+ .fnend
index 024d8853b2153d80e9eede2549981bd01d02b8a6..ba55996ec9786b45f9d8d70af60509b540ca3877 100644 (file)
#include <machine/cpu-features.h>
-#if __ARM_ARCH__ == 7 || defined(__ARM_NEON__)
+#if defined(__ARM_NEON__)
.text
.fpu neon
index 706cb0c9b967c91cba6e0ff0c01abcaa85cf9fd3..4a8caaca2790adffaa302b00b37092011f48ce68 100644 (file)
syscall_src += arch-arm/syscalls/_exit_thread.S
syscall_src += arch-arm/syscalls/__fork.S
syscall_src += arch-arm/syscalls/waitid.S
-syscall_src += arch-arm/syscalls/__clone.S
+syscall_src += arch-arm/syscalls/__sys_clone.S
syscall_src += arch-arm/syscalls/execve.S
-syscall_src += arch-arm/syscalls/setuid.S
+syscall_src += arch-arm/syscalls/__setuid.S
syscall_src += arch-arm/syscalls/getuid.S
syscall_src += arch-arm/syscalls/getgid.S
syscall_src += arch-arm/syscalls/geteuid.S
syscall_src += arch-arm/syscalls/getppid.S
syscall_src += arch-arm/syscalls/setsid.S
syscall_src += arch-arm/syscalls/setgid.S
-syscall_src += arch-arm/syscalls/setreuid.S
-syscall_src += arch-arm/syscalls/setresuid.S
+syscall_src += arch-arm/syscalls/__setreuid.S
+syscall_src += arch-arm/syscalls/__setresuid.S
syscall_src += arch-arm/syscalls/setresgid.S
syscall_src += arch-arm/syscalls/__brk.S
syscall_src += arch-arm/syscalls/__ptrace.S
syscall_src += arch-arm/syscalls/prctl.S
syscall_src += arch-arm/syscalls/capget.S
syscall_src += arch-arm/syscalls/capset.S
+syscall_src += arch-arm/syscalls/sigaltstack.S
syscall_src += arch-arm/syscalls/acct.S
syscall_src += arch-arm/syscalls/read.S
syscall_src += arch-arm/syscalls/write.S
syscall_src += arch-arm/syscalls/sched_get_priority_max.S
syscall_src += arch-arm/syscalls/sched_get_priority_min.S
syscall_src += arch-arm/syscalls/sched_rr_get_interval.S
+syscall_src += arch-arm/syscalls/ioprio_set.S
+syscall_src += arch-arm/syscalls/ioprio_get.S
syscall_src += arch-arm/syscalls/uname.S
syscall_src += arch-arm/syscalls/__wait4.S
syscall_src += arch-arm/syscalls/umask.S
similarity index 81%
rename from libc/arch-arm/syscalls/setresuid.S
rename to libc/arch-arm/syscalls/__setresuid.S
index 266c1a151950cccf78cae4f63c016912b63a7090..771077243314b5913c55b6a69733f5eb5ad732cc 100644 (file)
rename from libc/arch-arm/syscalls/setresuid.S
rename to libc/arch-arm/syscalls/__setresuid.S
index 266c1a151950cccf78cae4f63c016912b63a7090..771077243314b5913c55b6a69733f5eb5ad732cc 100644 (file)
#include <sys/linux-syscalls.h>
.text
- .type setresuid, #function
- .globl setresuid
+ .type __setresuid, #function
+ .globl __setresuid
.align 4
.fnstart
-setresuid:
+__setresuid:
.save {r4, r7}
stmfd sp!, {r4, r7}
ldr r7, =__NR_setresuid32
similarity index 82%
rename from libc/arch-arm/syscalls/setreuid.S
rename to libc/arch-arm/syscalls/__setreuid.S
index 0f94b47dea97674a67365a806566bd9eec27746f..0c68866a7fe3f7239d26814cc3de7501bbab877f 100644 (file)
rename from libc/arch-arm/syscalls/setreuid.S
rename to libc/arch-arm/syscalls/__setreuid.S
index 0f94b47dea97674a67365a806566bd9eec27746f..0c68866a7fe3f7239d26814cc3de7501bbab877f 100644 (file)
#include <sys/linux-syscalls.h>
.text
- .type setreuid, #function
- .globl setreuid
+ .type __setreuid, #function
+ .globl __setreuid
.align 4
.fnstart
-setreuid:
+__setreuid:
.save {r4, r7}
stmfd sp!, {r4, r7}
ldr r7, =__NR_setreuid32
similarity index 83%
rename from libc/arch-arm/syscalls/setuid.S
rename to libc/arch-arm/syscalls/__setuid.S
index 31cf446900e0323ffd920d08d0db0b022ee2a205..efc6e56bf227a3c517aff9d79bcf2c5aa3f6550b 100644 (file)
rename from libc/arch-arm/syscalls/setuid.S
rename to libc/arch-arm/syscalls/__setuid.S
index 31cf446900e0323ffd920d08d0db0b022ee2a205..efc6e56bf227a3c517aff9d79bcf2c5aa3f6550b 100644 (file)
#include <sys/linux-syscalls.h>
.text
- .type setuid, #function
- .globl setuid
+ .type __setuid, #function
+ .globl __setuid
.align 4
.fnstart
-setuid:
+__setuid:
.save {r4, r7}
stmfd sp!, {r4, r7}
ldr r7, =__NR_setuid32
diff --git a/libc/arch-arm/syscalls/__sys_clone.S b/libc/arch-arm/syscalls/__sys_clone.S
--- /dev/null
@@ -0,0 +1,21 @@
+/* autogenerated by gensyscalls.py */
+#include <sys/linux-syscalls.h>
+
+ .text
+ .type __sys_clone, #function
+ .globl __sys_clone
+ .align 4
+ .fnstart
+
+__sys_clone:
+ mov ip, sp
+ .save {r4, r5, r6, r7}
+ stmfd sp!, {r4, r5, r6, r7}
+ ldmfd ip, {r4, r5, r6}
+ ldr r7, =__NR_clone
+ swi #0
+ ldmfd sp!, {r4, r5, r6, r7}
+ movs r0, r0
+ bxpl lr
+ b __set_syscall_errno
+ .fnend
similarity index 73%
rename from libc/arch-arm/syscalls/__clone.S
rename to libc/arch-arm/syscalls/ioprio_get.S
index 650e2c0caff4172ffefc39ff8649a1a9d5d4e2a5..d686e9877bd61a3338c8b400724b69b311cd91cd 100644 (file)
rename from libc/arch-arm/syscalls/__clone.S
rename to libc/arch-arm/syscalls/ioprio_get.S
index 650e2c0caff4172ffefc39ff8649a1a9d5d4e2a5..d686e9877bd61a3338c8b400724b69b311cd91cd 100644 (file)
#include <sys/linux-syscalls.h>
.text
- .type __clone, #function
- .globl __clone
+ .type ioprio_get, #function
+ .globl ioprio_get
.align 4
.fnstart
-__clone:
+ioprio_get:
.save {r4, r7}
stmfd sp!, {r4, r7}
- ldr r7, =__NR_clone
+ ldr r7, =__NR_ioprio_get
swi #0
ldmfd sp!, {r4, r7}
movs r0, r0
diff --git a/libc/arch-arm/syscalls/ioprio_set.S b/libc/arch-arm/syscalls/ioprio_set.S
--- /dev/null
@@ -0,0 +1,19 @@
+/* autogenerated by gensyscalls.py */
+#include <sys/linux-syscalls.h>
+
+ .text
+ .type ioprio_set, #function
+ .globl ioprio_set
+ .align 4
+ .fnstart
+
+ioprio_set:
+ .save {r4, r7}
+ stmfd sp!, {r4, r7}
+ ldr r7, =__NR_ioprio_set
+ swi #0
+ ldmfd sp!, {r4, r7}
+ movs r0, r0
+ bxpl lr
+ b __set_syscall_errno
+ .fnend
diff --git a/libc/arch-arm/syscalls/sigaltstack.S b/libc/arch-arm/syscalls/sigaltstack.S
--- /dev/null
@@ -0,0 +1,19 @@
+/* autogenerated by gensyscalls.py */
+#include <sys/linux-syscalls.h>
+
+ .text
+ .type sigaltstack, #function
+ .globl sigaltstack
+ .align 4
+ .fnstart
+
+sigaltstack:
+ .save {r4, r7}
+ stmfd sp!, {r4, r7}
+ ldr r7, =__NR_sigaltstack
+ swi #0
+ ldmfd sp!, {r4, r7}
+ movs r0, r0
+ bxpl lr
+ b __set_syscall_errno
+ .fnend
index 16966f76288e597ee61f032275b439bfbb929137..c7815ff2c2d3c49ad2f345bea8e81e4499db9c62 100644 (file)
{
return futex(ftx, FUTEX_WAKE, count, NULL, NULL, 0);
}
+
+int __futex_syscall3(volatile void *ftx, int op, int val)
+{
+ return futex(ftx, op, val, NULL, NULL, 0);
+}
+
+int __futex_syscall4(volative void *ftx, int op, int val, const struct timespec *timeout)
+{
+ return futex(ftx, op, val, (void *)timeout, NULL, 0);
+}
index 0bbaecbc9b826d532d13c68a2e9f86fbb904ff2b..9cb19eeb5b71b645c2929c5bebde7600e1001d93 100644 (file)
.align 2
0: .long __NR_clone
1: .long __thread_entry
+
+/* XXX: TODO: Add __bionic_clone here
+ * See bionic/bionic_clone.c and arch-arm/bionic/clone.S
+ * for more details...
+ */
\ No newline at end of file
index cefb2ec46b13ee73f30c524f306494eb02b5a9f9..ab2f3d1b2f11f4bf34d4adfa0f31ec9579c374a6 100644 (file)
--- a/libc/arch-sh/syscalls.mk
+++ b/libc/arch-sh/syscalls.mk
syscall_src += arch-sh/syscalls/__fork.S
syscall_src += arch-sh/syscalls/_waitpid.S
syscall_src += arch-sh/syscalls/waitid.S
-syscall_src += arch-sh/syscalls/__clone.S
+syscall_src += arch-sh/syscalls/__sys_clone.S
syscall_src += arch-sh/syscalls/execve.S
-syscall_src += arch-sh/syscalls/setuid.S
+syscall_src += arch-sh/syscalls/__setuid.S
syscall_src += arch-sh/syscalls/getuid.S
syscall_src += arch-sh/syscalls/getgid.S
syscall_src += arch-sh/syscalls/geteuid.S
syscall_src += arch-sh/syscalls/getppid.S
syscall_src += arch-sh/syscalls/setsid.S
syscall_src += arch-sh/syscalls/setgid.S
-syscall_src += arch-sh/syscalls/setreuid.S
-syscall_src += arch-sh/syscalls/setresuid.S
+syscall_src += arch-sh/syscalls/__setreuid.S
+syscall_src += arch-sh/syscalls/__setresuid.S
syscall_src += arch-sh/syscalls/setresgid.S
syscall_src += arch-sh/syscalls/__brk.S
syscall_src += arch-sh/syscalls/kill.S
syscall_src += arch-sh/syscalls/prctl.S
syscall_src += arch-sh/syscalls/capget.S
syscall_src += arch-sh/syscalls/capset.S
+syscall_src += arch-sh/syscalls/sigaltstack.S
syscall_src += arch-sh/syscalls/acct.S
syscall_src += arch-sh/syscalls/read.S
syscall_src += arch-sh/syscalls/write.S
syscall_src += arch-sh/syscalls/sched_get_priority_max.S
syscall_src += arch-sh/syscalls/sched_get_priority_min.S
syscall_src += arch-sh/syscalls/sched_rr_get_interval.S
+syscall_src += arch-sh/syscalls/ioprio_set.S
+syscall_src += arch-sh/syscalls/ioprio_get.S
syscall_src += arch-sh/syscalls/uname.S
syscall_src += arch-sh/syscalls/__wait4.S
syscall_src += arch-sh/syscalls/umask.S
similarity index 87%
rename from libc/arch-sh/syscalls/setresuid.S
rename to libc/arch-sh/syscalls/__setresuid.S
index 41fe3492036f5ed1e8e92c340127504d5d33eb7a..424100e511a2c77aeae562ea04dcc1feaedd9fc2 100644 (file)
rename from libc/arch-sh/syscalls/setresuid.S
rename to libc/arch-sh/syscalls/__setresuid.S
index 41fe3492036f5ed1e8e92c340127504d5d33eb7a..424100e511a2c77aeae562ea04dcc1feaedd9fc2 100644 (file)
#include <sys/linux-syscalls.h>
.text
- .type setresuid, @function
- .globl setresuid
+ .type __setresuid, @function
+ .globl __setresuid
.align 4
-setresuid:
+__setresuid:
/* invoke trap */
mov.l 0f, r3 /* trap num */
similarity index 88%
rename from libc/arch-sh/syscalls/setreuid.S
rename to libc/arch-sh/syscalls/__setreuid.S
index 025df277e68d0125443bf5a9fb45af4071b6f147..69907482d8621c336108ab35c1089ba5133532f9 100644 (file)
rename from libc/arch-sh/syscalls/setreuid.S
rename to libc/arch-sh/syscalls/__setreuid.S
index 025df277e68d0125443bf5a9fb45af4071b6f147..69907482d8621c336108ab35c1089ba5133532f9 100644 (file)
#include <sys/linux-syscalls.h>
.text
- .type setreuid, @function
- .globl setreuid
+ .type __setreuid, @function
+ .globl __setreuid
.align 4
-setreuid:
+__setreuid:
/* invoke trap */
mov.l 0f, r3 /* trap num */
similarity index 89%
rename from libc/arch-sh/syscalls/setuid.S
rename to libc/arch-sh/syscalls/__setuid.S
index 1fb3148d1b879834770a9c67d70a7cb263e75607..f563de75dd4adfed09978dd5eeec5fda9a676623 100644 (file)
rename from libc/arch-sh/syscalls/setuid.S
rename to libc/arch-sh/syscalls/__setuid.S
index 1fb3148d1b879834770a9c67d70a7cb263e75607..f563de75dd4adfed09978dd5eeec5fda9a676623 100644 (file)
#include <sys/linux-syscalls.h>
.text
- .type setuid, @function
- .globl setuid
+ .type __setuid, @function
+ .globl __setuid
.align 4
-setuid:
+__setuid:
/* invoke trap */
mov.l 0f, r3 /* trap num */
similarity index 74%
rename from libc/arch-sh/syscalls/__clone.S
rename to libc/arch-sh/syscalls/__sys_clone.S
index 1df6ca2883f96d975c35ca7117551567a0b883c0..c2e7dd20b92c94a4e2eb92142e997de6a71e4ac1 100644 (file)
rename from libc/arch-sh/syscalls/__clone.S
rename to libc/arch-sh/syscalls/__sys_clone.S
index 1df6ca2883f96d975c35ca7117551567a0b883c0..c2e7dd20b92c94a4e2eb92142e997de6a71e4ac1 100644 (file)
#include <sys/linux-syscalls.h>
.text
- .type __clone, @function
- .globl __clone
+ .type __sys_clone, @function
+ .globl __sys_clone
.align 4
-__clone:
+__sys_clone:
+
+ /* get ready for additonal arg */
+ mov.l @r15, r0
/* invoke trap */
mov.l 0f, r3 /* trap num */
- trapa #(4 + 0x10)
+ trapa #(5 + 0x10)
/* check return value */
cmp/pz r0
diff --git a/libc/arch-sh/syscalls/ioprio_get.S b/libc/arch-sh/syscalls/ioprio_get.S
--- /dev/null
@@ -0,0 +1,32 @@
+/* autogenerated by gensyscalls.py */
+#include <sys/linux-syscalls.h>
+
+ .text
+ .type ioprio_get, @function
+ .globl ioprio_get
+ .align 4
+
+ioprio_get:
+
+ /* invoke trap */
+ mov.l 0f, r3 /* trap num */
+ trapa #(2 + 0x10)
+
+ /* check return value */
+ cmp/pz r0
+ bt __NR_ioprio_get_end
+
+ /* keep error number */
+ sts.l pr, @-r15
+ mov.l 1f, r1
+ jsr @r1
+ mov r0, r4
+ lds.l @r15+, pr
+
+__NR_ioprio_get_end:
+ rts
+ nop
+
+ .align 2
+0: .long __NR_ioprio_get
+1: .long __set_syscall_errno
diff --git a/libc/arch-sh/syscalls/ioprio_set.S b/libc/arch-sh/syscalls/ioprio_set.S
--- /dev/null
@@ -0,0 +1,32 @@
+/* autogenerated by gensyscalls.py */
+#include <sys/linux-syscalls.h>
+
+ .text
+ .type ioprio_set, @function
+ .globl ioprio_set
+ .align 4
+
+ioprio_set:
+
+ /* invoke trap */
+ mov.l 0f, r3 /* trap num */
+ trapa #(3 + 0x10)
+
+ /* check return value */
+ cmp/pz r0
+ bt __NR_ioprio_set_end
+
+ /* keep error number */
+ sts.l pr, @-r15
+ mov.l 1f, r1
+ jsr @r1
+ mov r0, r4
+ lds.l @r15+, pr
+
+__NR_ioprio_set_end:
+ rts
+ nop
+
+ .align 2
+0: .long __NR_ioprio_set
+1: .long __set_syscall_errno
diff --git a/libc/arch-sh/syscalls/sigaltstack.S b/libc/arch-sh/syscalls/sigaltstack.S
--- /dev/null
@@ -0,0 +1,32 @@
+/* autogenerated by gensyscalls.py */
+#include <sys/linux-syscalls.h>
+
+ .text
+ .type sigaltstack, @function
+ .globl sigaltstack
+ .align 4
+
+sigaltstack:
+
+ /* invoke trap */
+ mov.l 0f, r3 /* trap num */
+ trapa #(2 + 0x10)
+
+ /* check return value */
+ cmp/pz r0
+ bt __NR_sigaltstack_end
+
+ /* keep error number */
+ sts.l pr, @-r15
+ mov.l 1f, r1
+ jsr @r1
+ mov r0, r4
+ lds.l @r15+, pr
+
+__NR_sigaltstack_end:
+ rts
+ nop
+
+ .align 2
+0: .long __NR_sigaltstack
+1: .long __set_syscall_errno
index 2370f23589c935ad0c8317953a10d88e265e45fe..666e1821ce92a7019446021d2b1b0acb077d6fc7 100644 (file)
popl %ebx
ret
+/* int __futex_syscall3(volatile void *ftx, int op, int count) */
+.text
+.globl __futex_syscall3
+.type __futex_syscall3, @function
+.align 4
+__futex_syscall3:
+ pushl %ebx
+ movl 8(%esp), %ebx /* ftx */
+ movl 12(%esp), %ecx /* op */
+ movl 16(%esp), %edx /* value */
+ movl $__NR_futex, %eax
+ int $0x80
+ popl %ebx
+ ret
+
+/* int __futex_syscall4(volatile void *ftx, int op, int val, const struct timespec *timeout) */
+.text
+.globl __futex_syscall4
+.type __futex_syscall4, @function
+.align 4
+__futex_syscall4:
+ pushl %ebx
+ pushl %esi
+ movl 12(%esp), %ebx /* ftx */
+ movl 16(%esp), %ecx /* op */
+ movl 20(%esp), %edx /* val */
+ movl 24(%esp), %esi /* timeout */
+ movl $__NR_futex, %eax
+ int $0x80
+ popl %esi
+ popl %ebx
+ ret
/* int __atomic_cmpxchg(int old, int new, volatile int* addr) */
diff --git a/libc/arch-x86/bionic/atomics_x86.c b/libc/arch-x86/bionic/atomics_x86.c
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Copyright (C) 2008 The Android Open Source Project
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
- * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
- * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
- * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
- * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-#include <sys/atomics.h>
-
-#define FUTEX_SYSCALL 240
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-
-int __futex_wait(volatile void *ftx, int val)
-{
- int ret;
- asm volatile (
- "int $0x80;"
- : "=a" (ret)
- : "0" (FUTEX_SYSCALL),
- "b" (ftx),
- "c" (FUTEX_WAIT),
- "d" (val),
- "S" (0)
- );
- return ret;
-}
-
-int __futex_wake(volatile void *ftx, int count)
-{
- int ret;
- asm volatile (
- "int $0x80;"
- : "=a" (ret)
- : "0" (FUTEX_SYSCALL),
- "b" (ftx),
- "c" (FUTEX_WAKE),
- "d" (count)
- );
- return ret;
-}
-
-int __atomic_cmpxchg(int old, int new, volatile int* addr) {
- int xchg;
- asm volatile (
- "lock;"
- "cmpxchg %%ecx, (%%edx);"
- "setne %%al;"
- : "=a" (xchg)
- : "a" (old),
- "c" (new),
- "d" (addr)
- );
- return xchg;
-}
-
-int __atomic_swap(int new, volatile int* addr) {
- int old;
- asm volatile (
- "lock;"
- "xchg %%ecx, (%%edx);"
- : "=c" (old)
- : "c" (new),
- "d" (addr)
- );
- return old;
-}
-
-int __atomic_dec(volatile int* addr) {
- int old;
- do {
- old = *addr;
- } while (atomic_cmpxchg(old, old-1, addr));
- return old;
-}
-
-int __atomic_inc(volatile int* addr) {
- int old;
- do {
- old = *addr;
- } while (atomic_cmpxchg(old, old+1, addr));
- return old;
-}
-
index 361808ded560ade0ad9c4f311b6cfd0e022db1a5..3b50cc3d34a107b01ea60602f3a7faf9fd98e76d 100644 (file)
popl %ecx
popl %ebx
ret
+
+/* XXX: TODO: Add __bionic_clone here
+ * See bionic/bionic_clone.c and arch-arm/bionic/clone.S
+ * for more details...
+ */
\ No newline at end of file
index 86d23080a08e8a71e4304bce812bcfddd0ca2bc2..ab026fe5f74d59c165419b8b71b4b656f39a0d44 100644 (file)
syscall_src += arch-x86/syscalls/__fork.S
syscall_src += arch-x86/syscalls/_waitpid.S
syscall_src += arch-x86/syscalls/waitid.S
-syscall_src += arch-x86/syscalls/__clone.S
+syscall_src += arch-x86/syscalls/__sys_clone.S
syscall_src += arch-x86/syscalls/execve.S
-syscall_src += arch-x86/syscalls/setuid.S
+syscall_src += arch-x86/syscalls/__setuid.S
syscall_src += arch-x86/syscalls/getuid.S
syscall_src += arch-x86/syscalls/getgid.S
syscall_src += arch-x86/syscalls/geteuid.S
syscall_src += arch-x86/syscalls/getppid.S
syscall_src += arch-x86/syscalls/setsid.S
syscall_src += arch-x86/syscalls/setgid.S
-syscall_src += arch-x86/syscalls/setreuid.S
-syscall_src += arch-x86/syscalls/setresuid.S
+syscall_src += arch-x86/syscalls/__setreuid.S
+syscall_src += arch-x86/syscalls/__setresuid.S
syscall_src += arch-x86/syscalls/setresgid.S
syscall_src += arch-x86/syscalls/__brk.S
syscall_src += arch-x86/syscalls/kill.S
syscall_src += arch-x86/syscalls/prctl.S
syscall_src += arch-x86/syscalls/capget.S
syscall_src += arch-x86/syscalls/capset.S
+syscall_src += arch-x86/syscalls/sigaltstack.S
syscall_src += arch-x86/syscalls/acct.S
syscall_src += arch-x86/syscalls/read.S
syscall_src += arch-x86/syscalls/write.S
syscall_src += arch-x86/syscalls/sched_get_priority_max.S
syscall_src += arch-x86/syscalls/sched_get_priority_min.S
syscall_src += arch-x86/syscalls/sched_rr_get_interval.S
+syscall_src += arch-x86/syscalls/ioprio_set.S
+syscall_src += arch-x86/syscalls/ioprio_get.S
syscall_src += arch-x86/syscalls/uname.S
syscall_src += arch-x86/syscalls/__wait4.S
syscall_src += arch-x86/syscalls/umask.S
similarity index 87%
rename from libc/arch-x86/syscalls/setresuid.S
rename to libc/arch-x86/syscalls/__setresuid.S
index f81cb39f98abb063bd859e7e1261cd62cc8702eb..c492dfb5d314d75d290aa588fc2022d244eccc5f 100644 (file)
rename from libc/arch-x86/syscalls/setresuid.S
rename to libc/arch-x86/syscalls/__setresuid.S
index f81cb39f98abb063bd859e7e1261cd62cc8702eb..c492dfb5d314d75d290aa588fc2022d244eccc5f 100644 (file)
#include <sys/linux-syscalls.h>
.text
- .type setresuid, @function
- .globl setresuid
+ .type __setresuid, @function
+ .globl __setresuid
.align 4
-setresuid:
+__setresuid:
pushl %ebx
pushl %ecx
pushl %edx
similarity index 86%
rename from libc/arch-x86/syscalls/setreuid.S
rename to libc/arch-x86/syscalls/__setreuid.S
index 99e5d5b91d0fc14dcca0005d09e78c06ce318519..111e999afafdebf90fc57c34486eddaf91055c31 100644 (file)
rename from libc/arch-x86/syscalls/setreuid.S
rename to libc/arch-x86/syscalls/__setreuid.S
index 99e5d5b91d0fc14dcca0005d09e78c06ce318519..111e999afafdebf90fc57c34486eddaf91055c31 100644 (file)
#include <sys/linux-syscalls.h>
.text
- .type setreuid, @function
- .globl setreuid
+ .type __setreuid, @function
+ .globl __setreuid
.align 4
-setreuid:
+__setreuid:
pushl %ebx
pushl %ecx
mov 12(%esp), %ebx
similarity index 85%
rename from libc/arch-x86/syscalls/setuid.S
rename to libc/arch-x86/syscalls/__setuid.S
index de334c0d3cc58394a349cc176249c5e10328bf4e..1e5f28572c8dfe93e14e13759df40faa19c90599 100644 (file)
rename from libc/arch-x86/syscalls/setuid.S
rename to libc/arch-x86/syscalls/__setuid.S
index de334c0d3cc58394a349cc176249c5e10328bf4e..1e5f28572c8dfe93e14e13759df40faa19c90599 100644 (file)
#include <sys/linux-syscalls.h>
.text
- .type setuid, @function
- .globl setuid
+ .type __setuid, @function
+ .globl __setuid
.align 4
-setuid:
+__setuid:
pushl %ebx
mov 8(%esp), %ebx
movl $__NR_setuid32, %eax
similarity index 64%
rename from libc/arch-x86/syscalls/__clone.S
rename to libc/arch-x86/syscalls/__sys_clone.S
index 586212974b46041d6621f5a12e1355a193e8c0d5..172d6af8966c74cd3567e892715af5a5b222cfe1 100644 (file)
rename from libc/arch-x86/syscalls/__clone.S
rename to libc/arch-x86/syscalls/__sys_clone.S
index 586212974b46041d6621f5a12e1355a193e8c0d5..172d6af8966c74cd3567e892715af5a5b222cfe1 100644 (file)
#include <sys/linux-syscalls.h>
.text
- .type __clone, @function
- .globl __clone
+ .type __sys_clone, @function
+ .globl __sys_clone
.align 4
-__clone:
+__sys_clone:
pushl %ebx
pushl %ecx
pushl %edx
pushl %esi
- mov 20(%esp), %ebx
- mov 24(%esp), %ecx
- mov 28(%esp), %edx
- mov 32(%esp), %esi
+ pushl %edi
+ mov 24(%esp), %ebx
+ mov 28(%esp), %ecx
+ mov 32(%esp), %edx
+ mov 36(%esp), %esi
+ mov 40(%esp), %edi
movl $__NR_clone, %eax
int $0x80
cmpl $-129, %eax
addl $4, %esp
orl $-1, %eax
1:
+ popl %edi
popl %esi
popl %edx
popl %ecx
diff --git a/libc/arch-x86/syscalls/ioprio_get.S b/libc/arch-x86/syscalls/ioprio_get.S
--- /dev/null
@@ -0,0 +1,26 @@
+/* autogenerated by gensyscalls.py */
+#include <sys/linux-syscalls.h>
+
+ .text
+ .type ioprio_get, @function
+ .globl ioprio_get
+ .align 4
+
+ioprio_get:
+ pushl %ebx
+ pushl %ecx
+ mov 12(%esp), %ebx
+ mov 16(%esp), %ecx
+ movl $__NR_ioprio_get, %eax
+ int $0x80
+ cmpl $-129, %eax
+ jb 1f
+ negl %eax
+ pushl %eax
+ call __set_errno
+ addl $4, %esp
+ orl $-1, %eax
+1:
+ popl %ecx
+ popl %ebx
+ ret
diff --git a/libc/arch-x86/syscalls/ioprio_set.S b/libc/arch-x86/syscalls/ioprio_set.S
--- /dev/null
@@ -0,0 +1,29 @@
+/* autogenerated by gensyscalls.py */
+#include <sys/linux-syscalls.h>
+
+ .text
+ .type ioprio_set, @function
+ .globl ioprio_set
+ .align 4
+
+ioprio_set:
+ pushl %ebx
+ pushl %ecx
+ pushl %edx
+ mov 16(%esp), %ebx
+ mov 20(%esp), %ecx
+ mov 24(%esp), %edx
+ movl $__NR_ioprio_set, %eax
+ int $0x80
+ cmpl $-129, %eax
+ jb 1f
+ negl %eax
+ pushl %eax
+ call __set_errno
+ addl $4, %esp
+ orl $-1, %eax
+1:
+ popl %edx
+ popl %ecx
+ popl %ebx
+ ret
diff --git a/libc/arch-x86/syscalls/sigaltstack.S b/libc/arch-x86/syscalls/sigaltstack.S
--- /dev/null
@@ -0,0 +1,26 @@
+/* autogenerated by gensyscalls.py */
+#include <sys/linux-syscalls.h>
+
+ .text
+ .type sigaltstack, @function
+ .globl sigaltstack
+ .align 4
+
+sigaltstack:
+ pushl %ebx
+ pushl %ecx
+ mov 12(%esp), %ebx
+ mov 16(%esp), %ecx
+ movl $__NR_sigaltstack, %eax
+ int $0x80
+ cmpl $-129, %eax
+ jb 1f
+ negl %eax
+ pushl %eax
+ call __set_errno
+ addl $4, %esp
+ orl $-1, %eax
+1:
+ popl %ecx
+ popl %ebx
+ ret
diff --git a/libc/bionic/bionic_clone.c b/libc/bionic/bionic_clone.c
--- /dev/null
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+#define __GNU_SOURCE 1
+#include <sched.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <stdio.h>
+
+/* WARNING: AT THE MOMENT, THIS IS ONLY SUPPORTED ON ARM
+ */
+
+extern int __bionic_clone(unsigned long clone_flags,
+ void* newsp,
+ int *parent_tidptr,
+ void *new_tls,
+ int *child_tidptr,
+ int (*fn)(void *),
+ void *arg);
+
+extern void _exit_thread(int retCode);
+
+/* this function is called from the __bionic_clone
+ * assembly fragment to call the thread function
+ * then exit. */
+extern void
+__bionic_clone_entry( int (*fn)(void *), void *arg )
+{
+ int ret = (*fn)(arg);
+ _exit_thread(ret);
+}
+
+int
+clone(int (*fn)(void *), void *child_stack, int flags, void* arg, ...)
+{
+ va_list args;
+ int *parent_tidptr = NULL;
+ void *new_tls = NULL;
+ int *child_tidptr = NULL;
+ int ret;
+
+ /* extract optional parameters - they are cummulative */
+ va_start(args, arg);
+ if (flags & (CLONE_PARENT_SETTID|CLONE_SETTLS|CLONE_CHILD_SETTID)) {
+ parent_tidptr = va_arg(args, int*);
+ }
+ if (flags & (CLONE_SETTLS|CLONE_CHILD_SETTID)) {
+ new_tls = va_arg(args, void*);
+ }
+ if (flags & CLONE_CHILD_SETTID) {
+ child_tidptr = va_arg(args, int*);
+ }
+ va_end(args);
+
+ ret = __bionic_clone(flags, child_stack, parent_tidptr, new_tls, child_tidptr, fn, arg);
+ return ret;
+}
diff --git a/libc/bionic/clearenv.c b/libc/bionic/clearenv.c
--- /dev/null
+++ b/libc/bionic/clearenv.c
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+extern char** environ;
+
+int clearenv(void)
+{
+ char **P = environ;
+ int offset;
+
+ for (P = &environ[offset]; *P; ++P)
+ *P = 0;
+ return 0;
+}
diff --git a/libc/bionic/cpuacct.c b/libc/bionic/cpuacct.c
--- /dev/null
+++ b/libc/bionic/cpuacct.c
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+#include <unistd.h>
+#include <stdio.h>
+#include <errno.h>
+#include <sys/stat.h>
+//#include <sys/types.h>
+
+int cpuacct_add(uid_t uid)
+{
+ int count;
+ FILE *fp;
+ char buf[80];
+
+ count = snprintf(buf, sizeof(buf), "/acct/uid/%d/tasks", uid);
+ fp = fopen(buf, "w+");
+ if (!fp) {
+ /* Note: sizeof("tasks") returns 6, which includes the NULL char */
+ buf[count - sizeof("tasks")] = 0;
+ if (mkdir(buf, 0775) < 0)
+ return -errno;
+
+ /* Note: sizeof("tasks") returns 6, which includes the NULL char */
+ buf[count - sizeof("tasks")] = '/';
+ fp = fopen(buf, "w+");
+ }
+ if (!fp)
+ return -errno;
+
+ fprintf(fp, "0");
+ if (fclose(fp))
+ return -errno;
+
+ return 0;
+}
diff --git a/libc/bionic/dlmalloc.c b/libc/bionic/dlmalloc.c
index f6f878e8741a54bfdae37f4c265f5ac3c53e6a4f..19fbb752126a80ae79cc92e05e4af6e36f875add 100644 (file)
--- a/libc/bionic/dlmalloc.c
+++ b/libc/bionic/dlmalloc.c
size_t. The value is used only if HAVE_USR_INCLUDE_MALLOC_H is not set
REALLOC_ZERO_BYTES_FREES default: not defined
- This should be set if a call to realloc with zero bytes should
- be the same as a call to free. Some people think it should. Otherwise,
- since this malloc returns a unique pointer for malloc(0), so does
+ This should be set if a call to realloc with zero bytes should
+ be the same as a call to free. Some people think it should. Otherwise,
+ since this malloc returns a unique pointer for malloc(0), so does
realloc(p, 0).
LACKS_UNISTD_H, LACKS_FCNTL_H, LACKS_SYS_PARAM_H, LACKS_SYS_MMAN_H
/* ------------------- Declarations of public routines ------------------- */
/* Check an additional macro for the five primary functions */
-#if !defined(USE_DL_PREFIX) || !defined(MALLOC_LEAK_CHECK)
+#ifndef USE_DL_PREFIX
#define dlcalloc calloc
#define dlfree free
#define dlmalloc malloc
m->seg.sflags = mmap_flag;
m->magic = mparams.magic;
init_bins(m);
- if (is_global(m))
+ if (is_global(m))
init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
else {
/* Offset top by embedded malloc_state */
}
/* Unmap any unused mmapped segments */
- if (HAVE_MMAP)
+ if (HAVE_MMAP)
released += release_unused_segments(m);
/* On failure, disable autotrim to avoid repeated failed future calls */
while (a < alignment) a <<= 1;
alignment = a;
}
-
+
if (bytes >= MAX_REQUEST - alignment) {
if (m != 0) { /* Test isn't needed but avoids compiler warning */
MALLOC_FAILURE_ACTION;
Trial version Fri Aug 28 13:14:29 1992 Doug Lea (dl at g.oswego.edu)
* Based loosely on libg++-1.2X malloc. (It retains some of the overall
structure of old version, but most details differ.)
-
+
*/
diff --git a/libc/bionic/dlmalloc.h b/libc/bionic/dlmalloc.h
index e5f7d4a080c7110a4f033d2b6fb0e5e8abd6eae7..1b642d2896b5e416c1aa06c626ce621cddd40c2f 100644 (file)
--- a/libc/bionic/dlmalloc.h
+++ b/libc/bionic/dlmalloc.h
/*
Default header file for malloc-2.8.x, written by Doug Lea
and released to the public domain, as explained at
- http://creativecommons.org/licenses/publicdomain.
-
+ http://creativecommons.org/licenses/publicdomain.
+
last update: Mon Aug 15 08:55:52 2005 Doug Lea (dl at gee)
This header is for ANSI C/C++ only. You can set any of
the following #defines before including:
- * If USE_DL_PREFIX is defined, it is assumed that malloc.c
+ * If USE_DL_PREFIX is defined, it is assumed that malloc.c
was also compiled with this option, so all routines
have names starting with "dl".
#if !ONLY_MSPACES
/* Check an additional macro for the five primary functions */
-#if !defined(USE_DL_PREFIX) || !defined(MALLOC_LEAK_CHECK)
+#if !defined(USE_DL_PREFIX)
#define dlcalloc calloc
#define dlfree free
#define dlmalloc malloc
diff --git a/libc/bionic/err.c b/libc/bionic/err.c
--- /dev/null
+++ b/libc/bionic/err.c
@@ -0,0 +1,126 @@
+/*-
+ * Copyright (c) 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+#include <err.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <errno.h>
+
+extern char *__progname;
+
+__noreturn void
+err(int eval, const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ verr(eval, fmt, ap);
+ va_end(ap);
+}
+
+__noreturn void
+errx(int eval, const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ verrx(eval, fmt, ap);
+ va_end(ap);
+}
+
+__noreturn void
+verr(int eval, const char *fmt, va_list ap)
+{
+ int sverrno;
+
+ sverrno = errno;
+ (void)fprintf(stderr, "%s: ", __progname);
+ if (fmt != NULL) {
+ (void)vfprintf(stderr, fmt, ap);
+ (void)fprintf(stderr, ": ");
+ }
+ (void)fprintf(stderr, "%s\n", strerror(sverrno));
+ exit(eval);
+}
+
+
+__noreturn void
+verrx(int eval, const char *fmt, va_list ap)
+{
+ (void)fprintf(stderr, "%s: ", __progname);
+ if (fmt != NULL)
+ (void)vfprintf(stderr, fmt, ap);
+ (void)fprintf(stderr, "\n");
+ exit(eval);
+}
+
+void
+warn(const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ vwarn(fmt, ap);
+ va_end(ap);
+}
+
+void
+warnx(const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ vwarnx(fmt, ap);
+ va_end(ap);
+}
+
+void
+vwarn(const char *fmt, va_list ap)
+{
+ int sverrno;
+
+ sverrno = errno;
+ (void)fprintf(stderr, "%s: ", __progname);
+ if (fmt != NULL) {
+ (void)vfprintf(stderr, fmt, ap);
+ (void)fprintf(stderr, ": ");
+ }
+ (void)fprintf(stderr, "%s\n", strerror(sverrno));
+}
+
+void
+vwarnx(const char *fmt, va_list ap)
+{
+ (void)fprintf(stderr, "%s: ", __progname);
+ if (fmt != NULL)
+ (void)vfprintf(stderr, fmt, ap);
+ (void)fprintf(stderr, "\n");
+}
diff --git a/libc/bionic/fdprintf.c b/libc/bionic/fdprintf.c
--- /dev/null
+++ b/libc/bionic/fdprintf.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+int vfdprintf(int fd, const char * __restrict format, __va_list ap)
+{
+ char *buf=0;
+ int ret;
+ ret = vasprintf(&buf, format, ap);
+ if (ret < 0)
+ goto end;
+
+ ret = write(fd, buf, ret);
+ free(buf);
+end:
+ return ret;
+}
+
+int fdprintf(int fd, const char * __restrict format, ...)
+{
+ __va_list ap;
+ int ret;
+
+ va_start(ap, format);
+ ret = vfdprintf(fd, format, ap);
+ va_end(ap);
+
+ return ret;
+}
diff --git a/libc/bionic/fork.c b/libc/bionic/fork.c
index 1c6a4ba05fa6c9c77f4d75b1e63e29831390b6a1..e20f548d0bc519f54ff4775ad4ea924628b022b5 100644 (file)
--- a/libc/bionic/fork.c
+++ b/libc/bionic/fork.c
ret = __fork();
if (ret != 0) { /* not a child process */
__timer_table_start_stop(0);
+ } else {
+ /*
+ * Newly created process must update cpu accounting.
+ * Call cpuacct_add passing in our uid, which will take
+ * the current task id and add it to the uid group passed
+ * as a parameter.
+ */
+ cpuacct_add(getuid());
}
return ret;
}
diff --git a/libc/bionic/fts.c b/libc/bionic/fts.c
--- /dev/null
+++ b/libc/bionic/fts.c
@@ -0,0 +1,1041 @@
+/* $OpenBSD: fts.c,v 1.43 2009/08/27 16:19:27 millert Exp $ */
+
+/*-
+ * Copyright (c) 1990, 1993, 1994
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+#include <sys/stat.h>
+
+#include <dirent.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <fts.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#define MAX(a,b) ((a)>(b)?(a):(b))
+
+static FTSENT *fts_alloc(FTS *, char *, size_t);
+static FTSENT *fts_build(FTS *, int);
+static void fts_lfree(FTSENT *);
+static void fts_load(FTS *, FTSENT *);
+static size_t fts_maxarglen(char * const *);
+static void fts_padjust(FTS *, FTSENT *);
+static int fts_palloc(FTS *, size_t);
+static FTSENT *fts_sort(FTS *, FTSENT *, int);
+static u_short fts_stat(FTS *, FTSENT *, int);
+static int fts_safe_changedir(FTS *, FTSENT *, int, char *);
+
+#define ISDOT(a) (a[0] == '.' && (!a[1] || (a[1] == '.' && !a[2])))
+
+#define CLR(opt) (sp->fts_options &= ~(opt))
+#define ISSET(opt) (sp->fts_options & (opt))
+#define SET(opt) (sp->fts_options |= (opt))
+
+#define FCHDIR(sp, fd) (!ISSET(FTS_NOCHDIR) && fchdir(fd))
+
+/* fts_build flags */
+#define BCHILD 1 /* fts_children */
+#define BNAMES 2 /* fts_children, names only */
+#define BREAD 3 /* fts_read */
+
+FTS *
+fts_open(char * const *argv, int options,
+ int (*compar)(const FTSENT **, const FTSENT **))
+{
+ FTS *sp;
+ FTSENT *p, *root;
+ int nitems;
+ FTSENT *parent, *tmp;
+ size_t len;
+
+ /* Options check. */
+ if (options & ~FTS_OPTIONMASK) {
+ errno = EINVAL;
+ return (NULL);
+ }
+
+ /* Allocate/initialize the stream */
+ if ((sp = calloc(1, sizeof(FTS))) == NULL)
+ return (NULL);
+ sp->fts_compar = compar;
+ sp->fts_options = options;
+
+ /* Logical walks turn on NOCHDIR; symbolic links are too hard. */
+ if (ISSET(FTS_LOGICAL))
+ SET(FTS_NOCHDIR);
+
+ /*
+ * Start out with 1K of path space, and enough, in any case,
+ * to hold the user's paths.
+ */
+ if (fts_palloc(sp, MAX(fts_maxarglen(argv), MAXPATHLEN)))
+ goto mem1;
+
+ /* Allocate/initialize root's parent. */
+ if ((parent = fts_alloc(sp, "", 0)) == NULL)
+ goto mem2;
+ parent->fts_level = FTS_ROOTPARENTLEVEL;
+
+ /* Allocate/initialize root(s). */
+ for (root = NULL, nitems = 0; *argv; ++argv, ++nitems) {
+ /* Don't allow zero-length paths. */
+ if ((len = strlen(*argv)) == 0) {
+ errno = ENOENT;
+ goto mem3;
+ }
+
+ if ((p = fts_alloc(sp, *argv, len)) == NULL)
+ goto mem3;
+ p->fts_level = FTS_ROOTLEVEL;
+ p->fts_parent = parent;
+ p->fts_accpath = p->fts_name;
+ p->fts_info = fts_stat(sp, p, ISSET(FTS_COMFOLLOW));
+
+ /* Command-line "." and ".." are real directories. */
+ if (p->fts_info == FTS_DOT)
+ p->fts_info = FTS_D;
+
+ /*
+ * If comparison routine supplied, traverse in sorted
+ * order; otherwise traverse in the order specified.
+ */
+ if (compar) {
+ p->fts_link = root;
+ root = p;
+ } else {
+ p->fts_link = NULL;
+ if (root == NULL)
+ tmp = root = p;
+ else {
+ tmp->fts_link = p;
+ tmp = p;
+ }
+ }
+ }
+ if (compar && nitems > 1)
+ root = fts_sort(sp, root, nitems);
+
+ /*
+ * Allocate a dummy pointer and make fts_read think that we've just
+ * finished the node before the root(s); set p->fts_info to FTS_INIT
+ * so that everything about the "current" node is ignored.
+ */
+ if ((sp->fts_cur = fts_alloc(sp, "", 0)) == NULL)
+ goto mem3;
+ sp->fts_cur->fts_link = root;
+ sp->fts_cur->fts_info = FTS_INIT;
+
+ /*
+ * If using chdir(2), grab a file descriptor pointing to dot to ensure
+ * that we can get back here; this could be avoided for some paths,
+ * but almost certainly not worth the effort. Slashes, symbolic links,
+ * and ".." are all fairly nasty problems. Note, if we can't get the
+ * descriptor we run anyway, just more slowly.
+ */
+ if (!ISSET(FTS_NOCHDIR) && (sp->fts_rfd = open(".", O_RDONLY, 0)) < 0)
+ SET(FTS_NOCHDIR);
+
+ if (nitems == 0)
+ free(parent);
+
+ return (sp);
+
+mem3: fts_lfree(root);
+ free(parent);
+mem2: free(sp->fts_path);
+mem1: free(sp);
+ return (NULL);
+}
+
+static void
+fts_load(FTS *sp, FTSENT *p)
+{
+ size_t len;
+ char *cp;
+
+ /*
+ * Load the stream structure for the next traversal. Since we don't
+ * actually enter the directory until after the preorder visit, set
+ * the fts_accpath field specially so the chdir gets done to the right
+ * place and the user can access the first node. From fts_open it's
+ * known that the path will fit.
+ */
+ len = p->fts_pathlen = p->fts_namelen;
+ memmove(sp->fts_path, p->fts_name, len + 1);
+ if ((cp = strrchr(p->fts_name, '/')) && (cp != p->fts_name || cp[1])) {
+ len = strlen(++cp);
+ memmove(p->fts_name, cp, len + 1);
+ p->fts_namelen = len;
+ }
+ p->fts_accpath = p->fts_path = sp->fts_path;
+ sp->fts_dev = p->fts_dev;
+}
+
+int
+fts_close(FTS *sp)
+{
+ FTSENT *freep, *p;
+ int rfd, error = 0;
+
+ /*
+ * This still works if we haven't read anything -- the dummy structure
+ * points to the root list, so we step through to the end of the root
+ * list which has a valid parent pointer.
+ */
+ if (sp->fts_cur) {
+ for (p = sp->fts_cur; p->fts_level >= FTS_ROOTLEVEL;) {
+ freep = p;
+ p = p->fts_link ? p->fts_link : p->fts_parent;
+ free(freep);
+ }
+ free(p);
+ }
+
+ /* Stash the original directory fd if needed. */
+ rfd = ISSET(FTS_NOCHDIR) ? -1 : sp->fts_rfd;
+
+ /* Free up child linked list, sort array, path buffer, stream ptr.*/
+ if (sp->fts_child)
+ fts_lfree(sp->fts_child);
+ if (sp->fts_array)
+ free(sp->fts_array);
+ free(sp->fts_path);
+ free(sp);
+
+ /* Return to original directory, checking for error. */
+ if (rfd != -1) {
+ int saved_errno;
+ error = fchdir(rfd);
+ saved_errno = errno;
+ (void)close(rfd);
+ errno = saved_errno;
+ }
+
+ return (error);
+}
+
+/*
+ * Special case of "/" at the end of the path so that slashes aren't
+ * appended which would cause paths to be written as "....//foo".
+ */
+#define NAPPEND(p) \
+ (p->fts_path[p->fts_pathlen - 1] == '/' \
+ ? p->fts_pathlen - 1 : p->fts_pathlen)
+
+FTSENT *
+fts_read(FTS *sp)
+{
+ FTSENT *p, *tmp;
+ int instr;
+ char *t;
+ int saved_errno;
+
+ /* If finished or unrecoverable error, return NULL. */
+ if (sp->fts_cur == NULL || ISSET(FTS_STOP))
+ return (NULL);
+
+ /* Set current node pointer. */
+ p = sp->fts_cur;
+
+ /* Save and zero out user instructions. */
+ instr = p->fts_instr;
+ p->fts_instr = FTS_NOINSTR;
+
+ /* Any type of file may be re-visited; re-stat and re-turn. */
+ if (instr == FTS_AGAIN) {
+ p->fts_info = fts_stat(sp, p, 0);
+ return (p);
+ }
+
+ /*
+ * Following a symlink -- SLNONE test allows application to see
+ * SLNONE and recover. If indirecting through a symlink, have
+ * keep a pointer to current location. If unable to get that
+ * pointer, follow fails.
+ */
+ if (instr == FTS_FOLLOW &&
+ (p->fts_info == FTS_SL || p->fts_info == FTS_SLNONE)) {
+ p->fts_info = fts_stat(sp, p, 1);
+ if (p->fts_info == FTS_D && !ISSET(FTS_NOCHDIR)) {
+ if ((p->fts_symfd = open(".", O_RDONLY, 0)) < 0) {
+ p->fts_errno = errno;
+ p->fts_info = FTS_ERR;
+ } else
+ p->fts_flags |= FTS_SYMFOLLOW;
+ }
+ return (p);
+ }
+
+ /* Directory in pre-order. */
+ if (p->fts_info == FTS_D) {
+ /* If skipped or crossed mount point, do post-order visit. */
+ if (instr == FTS_SKIP ||
+ (ISSET(FTS_XDEV) && p->fts_dev != sp->fts_dev)) {
+ if (p->fts_flags & FTS_SYMFOLLOW)
+ (void)close(p->fts_symfd);
+ if (sp->fts_child) {
+ fts_lfree(sp->fts_child);
+ sp->fts_child = NULL;
+ }
+ p->fts_info = FTS_DP;
+ return (p);
+ }
+
+ /* Rebuild if only read the names and now traversing. */
+ if (sp->fts_child && ISSET(FTS_NAMEONLY)) {
+ CLR(FTS_NAMEONLY);
+ fts_lfree(sp->fts_child);
+ sp->fts_child = NULL;
+ }
+
+ /*
+ * Cd to the subdirectory.
+ *
+ * If have already read and now fail to chdir, whack the list
+ * to make the names come out right, and set the parent errno
+ * so the application will eventually get an error condition.
+ * Set the FTS_DONTCHDIR flag so that when we logically change
+ * directories back to the parent we don't do a chdir.
+ *
+ * If haven't read do so. If the read fails, fts_build sets
+ * FTS_STOP or the fts_info field of the node.
+ */
+ if (sp->fts_child) {
+ if (fts_safe_changedir(sp, p, -1, p->fts_accpath)) {
+ p->fts_errno = errno;
+ p->fts_flags |= FTS_DONTCHDIR;
+ for (p = sp->fts_child; p; p = p->fts_link)
+ p->fts_accpath =
+ p->fts_parent->fts_accpath;
+ }
+ } else if ((sp->fts_child = fts_build(sp, BREAD)) == NULL) {
+ if (ISSET(FTS_STOP))
+ return (NULL);
+ return (p);
+ }
+ p = sp->fts_child;
+ sp->fts_child = NULL;
+ goto name;
+ }
+
+ /* Move to the next node on this level. */
+next: tmp = p;
+ if ((p = p->fts_link)) {
+ free(tmp);
+
+ /*
+ * If reached the top, return to the original directory (or
+ * the root of the tree), and load the paths for the next root.
+ */
+ if (p->fts_level == FTS_ROOTLEVEL) {
+ if (FCHDIR(sp, sp->fts_rfd)) {
+ SET(FTS_STOP);
+ return (NULL);
+ }
+ fts_load(sp, p);
+ return (sp->fts_cur = p);
+ }
+
+ /*
+ * User may have called fts_set on the node. If skipped,
+ * ignore. If followed, get a file descriptor so we can
+ * get back if necessary.
+ */
+ if (p->fts_instr == FTS_SKIP)
+ goto next;
+ if (p->fts_instr == FTS_FOLLOW) {
+ p->fts_info = fts_stat(sp, p, 1);
+ if (p->fts_info == FTS_D && !ISSET(FTS_NOCHDIR)) {
+ if ((p->fts_symfd =
+ open(".", O_RDONLY, 0)) < 0) {
+ p->fts_errno = errno;
+ p->fts_info = FTS_ERR;
+ } else
+ p->fts_flags |= FTS_SYMFOLLOW;
+ }
+ p->fts_instr = FTS_NOINSTR;
+ }
+
+name: t = sp->fts_path + NAPPEND(p->fts_parent);
+ *t++ = '/';
+ memmove(t, p->fts_name, p->fts_namelen + 1);
+ return (sp->fts_cur = p);
+ }
+
+ /* Move up to the parent node. */
+ p = tmp->fts_parent;
+ free(tmp);
+
+ if (p->fts_level == FTS_ROOTPARENTLEVEL) {
+ /*
+ * Done; free everything up and set errno to 0 so the user
+ * can distinguish between error and EOF.
+ */
+ free(p);
+ errno = 0;
+ return (sp->fts_cur = NULL);
+ }
+
+ /* NUL terminate the pathname. */
+ sp->fts_path[p->fts_pathlen] = '\0';
+
+ /*
+ * Return to the parent directory. If at a root node or came through
+ * a symlink, go back through the file descriptor. Otherwise, cd up
+ * one directory.
+ */
+ if (p->fts_level == FTS_ROOTLEVEL) {
+ if (FCHDIR(sp, sp->fts_rfd)) {
+ SET(FTS_STOP);
+ sp->fts_cur = p;
+ return (NULL);
+ }
+ } else if (p->fts_flags & FTS_SYMFOLLOW) {
+ if (FCHDIR(sp, p->fts_symfd)) {
+ saved_errno = errno;
+ (void)close(p->fts_symfd);
+ errno = saved_errno;
+ SET(FTS_STOP);
+ sp->fts_cur = p;
+ return (NULL);
+ }
+ (void)close(p->fts_symfd);
+ } else if (!(p->fts_flags & FTS_DONTCHDIR) &&
+ fts_safe_changedir(sp, p->fts_parent, -1, "..")) {
+ SET(FTS_STOP);
+ sp->fts_cur = p;
+ return (NULL);
+ }
+ p->fts_info = p->fts_errno ? FTS_ERR : FTS_DP;
+ return (sp->fts_cur = p);
+}
+
+/*
+ * Fts_set takes the stream as an argument although it's not used in this
+ * implementation; it would be necessary if anyone wanted to add global
+ * semantics to fts using fts_set. An error return is allowed for similar
+ * reasons.
+ */
+/* ARGSUSED */
+int
+fts_set(FTS *sp, FTSENT *p, int instr)
+{
+ if (instr && instr != FTS_AGAIN && instr != FTS_FOLLOW &&
+ instr != FTS_NOINSTR && instr != FTS_SKIP) {
+ errno = EINVAL;
+ return (1);
+ }
+ p->fts_instr = instr;
+ return (0);
+}
+
+FTSENT *
+fts_children(FTS *sp, int instr)
+{
+ FTSENT *p;
+ int fd;
+
+ if (instr && instr != FTS_NAMEONLY) {
+ errno = EINVAL;
+ return (NULL);
+ }
+
+ /* Set current node pointer. */
+ p = sp->fts_cur;
+
+ /*
+ * Errno set to 0 so user can distinguish empty directory from
+ * an error.
+ */
+ errno = 0;
+
+ /* Fatal errors stop here. */
+ if (ISSET(FTS_STOP))
+ return (NULL);
+
+ /* Return logical hierarchy of user's arguments. */
+ if (p->fts_info == FTS_INIT)
+ return (p->fts_link);
+
+ /*
+ * If not a directory being visited in pre-order, stop here. Could
+ * allow FTS_DNR, assuming the user has fixed the problem, but the
+ * same effect is available with FTS_AGAIN.
+ */
+ if (p->fts_info != FTS_D /* && p->fts_info != FTS_DNR */)
+ return (NULL);
+
+ /* Free up any previous child list. */
+ if (sp->fts_child)
+ fts_lfree(sp->fts_child);
+
+ if (instr == FTS_NAMEONLY) {
+ SET(FTS_NAMEONLY);
+ instr = BNAMES;
+ } else
+ instr = BCHILD;
+
+ /*
+ * If using chdir on a relative path and called BEFORE fts_read does
+ * its chdir to the root of a traversal, we can lose -- we need to
+ * chdir into the subdirectory, and we don't know where the current
+ * directory is, so we can't get back so that the upcoming chdir by
+ * fts_read will work.
+ */
+ if (p->fts_level != FTS_ROOTLEVEL || p->fts_accpath[0] == '/' ||
+ ISSET(FTS_NOCHDIR))
+ return (sp->fts_child = fts_build(sp, instr));
+
+ if ((fd = open(".", O_RDONLY, 0)) < 0)
+ return (NULL);
+ sp->fts_child = fts_build(sp, instr);
+ if (fchdir(fd)) {
+ (void)close(fd);
+ return (NULL);
+ }
+ (void)close(fd);
+ return (sp->fts_child);
+}
+
+/*
+ * This is the tricky part -- do not casually change *anything* in here. The
+ * idea is to build the linked list of entries that are used by fts_children
+ * and fts_read. There are lots of special cases.
+ *
+ * The real slowdown in walking the tree is the stat calls. If FTS_NOSTAT is
+ * set and it's a physical walk (so that symbolic links can't be directories),
+ * we can do things quickly. First, if it's a 4.4BSD file system, the type
+ * of the file is in the directory entry. Otherwise, we assume that the number
+ * of subdirectories in a node is equal to the number of links to the parent.
+ * The former skips all stat calls. The latter skips stat calls in any leaf
+ * directories and for any files after the subdirectories in the directory have
+ * been found, cutting the stat calls by about 2/3.
+ */
+static FTSENT *
+fts_build(FTS *sp, int type)
+{
+ struct dirent *dp;
+ FTSENT *p, *head;
+ FTSENT *cur, *tail;
+ DIR *dirp;
+ void *oldaddr;
+ size_t len, maxlen;
+ int nitems, cderrno, descend, level, nlinks, nostat, doadjust;
+ int saved_errno;
+ char *cp;
+
+ /* Set current node pointer. */
+ cur = sp->fts_cur;
+
+ /*
+ * Open the directory for reading. If this fails, we're done.
+ * If being called from fts_read, set the fts_info field.
+ */
+ if ((dirp = opendir(cur->fts_accpath)) == NULL) {
+ if (type == BREAD) {
+ cur->fts_info = FTS_DNR;
+ cur->fts_errno = errno;
+ }
+ return (NULL);
+ }
+
+ /*
+ * Nlinks is the number of possible entries of type directory in the
+ * directory if we're cheating on stat calls, 0 if we're not doing
+ * any stat calls at all, -1 if we're doing stats on everything.
+ */
+ if (type == BNAMES)
+ nlinks = 0;
+ else if (ISSET(FTS_NOSTAT) && ISSET(FTS_PHYSICAL)) {
+ nlinks = cur->fts_nlink - (ISSET(FTS_SEEDOT) ? 0 : 2);
+ nostat = 1;
+ } else {
+ nlinks = -1;
+ nostat = 0;
+ }
+
+#ifdef notdef
+ (void)printf("nlinks == %d (cur: %u)\n", nlinks, cur->fts_nlink);
+ (void)printf("NOSTAT %d PHYSICAL %d SEEDOT %d\n",
+ ISSET(FTS_NOSTAT), ISSET(FTS_PHYSICAL), ISSET(FTS_SEEDOT));
+#endif
+ /*
+ * If we're going to need to stat anything or we want to descend
+ * and stay in the directory, chdir. If this fails we keep going,
+ * but set a flag so we don't chdir after the post-order visit.
+ * We won't be able to stat anything, but we can still return the
+ * names themselves. Note, that since fts_read won't be able to
+ * chdir into the directory, it will have to return different path
+ * names than before, i.e. "a/b" instead of "b". Since the node
+ * has already been visited in pre-order, have to wait until the
+ * post-order visit to return the error. There is a special case
+ * here, if there was nothing to stat then it's not an error to
+ * not be able to stat. This is all fairly nasty. If a program
+ * needed sorted entries or stat information, they had better be
+ * checking FTS_NS on the returned nodes.
+ */
+ cderrno = 0;
+ if (nlinks || type == BREAD) {
+ if (fts_safe_changedir(sp, cur, dirfd(dirp), NULL)) {
+ if (nlinks && type == BREAD)
+ cur->fts_errno = errno;
+ cur->fts_flags |= FTS_DONTCHDIR;
+ descend = 0;
+ cderrno = errno;
+ (void)closedir(dirp);
+ dirp = NULL;
+ } else
+ descend = 1;
+ } else
+ descend = 0;
+
+ /*
+ * Figure out the max file name length that can be stored in the
+ * current path -- the inner loop allocates more path as necessary.
+ * We really wouldn't have to do the maxlen calculations here, we
+ * could do them in fts_read before returning the path, but it's a
+ * lot easier here since the length is part of the dirent structure.
+ *
+ * If not changing directories set a pointer so that can just append
+ * each new name into the path.
+ */
+ len = NAPPEND(cur);
+ if (ISSET(FTS_NOCHDIR)) {
+ cp = sp->fts_path + len;
+ *cp++ = '/';
+ }
+ len++;
+ maxlen = sp->fts_pathlen - len;
+
+ /*
+ * fts_level is a short so we must prevent it from wrapping
+ * around to FTS_ROOTLEVEL and FTS_ROOTPARENTLEVEL.
+ */
+ level = cur->fts_level;
+ if (level < FTS_MAXLEVEL)
+ level++;
+
+ /* Read the directory, attaching each entry to the `link' pointer. */
+ doadjust = 0;
+ for (head = tail = NULL, nitems = 0; dirp && (dp = readdir(dirp));) {
+ if (!ISSET(FTS_SEEDOT) && ISDOT(dp->d_name))
+ continue;
+
+ if (!(p = fts_alloc(sp, dp->d_name, strlen(dp->d_name))))
+ goto mem1;
+ if (strlen(dp->d_name) >= maxlen) { /* include space for NUL */
+ oldaddr = sp->fts_path;
+ if (fts_palloc(sp, strlen(dp->d_name) +len + 1)) {
+ /*
+ * No more memory for path or structures. Save
+ * errno, free up the current structure and the
+ * structures already allocated.
+ */
+mem1: saved_errno = errno;
+ if (p)
+ free(p);
+ fts_lfree(head);
+ (void)closedir(dirp);
+ cur->fts_info = FTS_ERR;
+ SET(FTS_STOP);
+ errno = saved_errno;
+ return (NULL);
+ }
+ /* Did realloc() change the pointer? */
+ if (oldaddr != sp->fts_path) {
+ doadjust = 1;
+ if (ISSET(FTS_NOCHDIR))
+ cp = sp->fts_path + len;
+ }
+ maxlen = sp->fts_pathlen - len;
+ }
+
+ p->fts_level = level;
+ p->fts_parent = sp->fts_cur;
+ p->fts_pathlen = len + strlen(dp->d_name);
+ if (p->fts_pathlen < len) {
+ /*
+ * If we wrap, free up the current structure and
+ * the structures already allocated, then error
+ * out with ENAMETOOLONG.
+ */
+ free(p);
+ fts_lfree(head);
+ (void)closedir(dirp);
+ cur->fts_info = FTS_ERR;
+ SET(FTS_STOP);
+ errno = ENAMETOOLONG;
+ return (NULL);
+ }
+
+ if (cderrno) {
+ if (nlinks) {
+ p->fts_info = FTS_NS;
+ p->fts_errno = cderrno;
+ } else
+ p->fts_info = FTS_NSOK;
+ p->fts_accpath = cur->fts_accpath;
+ } else if (nlinks == 0
+#ifdef DT_DIR
+ || (nostat &&
+ dp->d_type != DT_DIR && dp->d_type != DT_UNKNOWN)
+#endif
+ ) {
+ p->fts_accpath =
+ ISSET(FTS_NOCHDIR) ? p->fts_path : p->fts_name;
+ p->fts_info = FTS_NSOK;
+ } else {
+ /* Build a file name for fts_stat to stat. */
+ if (ISSET(FTS_NOCHDIR)) {
+ p->fts_accpath = p->fts_path;
+ memmove(cp, p->fts_name, p->fts_namelen + 1);
+ } else
+ p->fts_accpath = p->fts_name;
+ /* Stat it. */
+ p->fts_info = fts_stat(sp, p, 0);
+
+ /* Decrement link count if applicable. */
+ if (nlinks > 0 && (p->fts_info == FTS_D ||
+ p->fts_info == FTS_DC || p->fts_info == FTS_DOT))
+ --nlinks;
+ }
+
+ /* We walk in directory order so "ls -f" doesn't get upset. */
+ p->fts_link = NULL;
+ if (head == NULL)
+ head = tail = p;
+ else {
+ tail->fts_link = p;
+ tail = p;
+ }
+ ++nitems;
+ }
+ if (dirp)
+ (void)closedir(dirp);
+
+ /*
+ * If realloc() changed the address of the path, adjust the
+ * addresses for the rest of the tree and the dir list.
+ */
+ if (doadjust)
+ fts_padjust(sp, head);
+
+ /*
+ * If not changing directories, reset the path back to original
+ * state.
+ */
+ if (ISSET(FTS_NOCHDIR)) {
+ if (len == sp->fts_pathlen || nitems == 0)
+ --cp;
+ *cp = '\0';
+ }
+
+ /*
+ * If descended after called from fts_children or after called from
+ * fts_read and nothing found, get back. At the root level we use
+ * the saved fd; if one of fts_open()'s arguments is a relative path
+ * to an empty directory, we wind up here with no other way back. If
+ * can't get back, we're done.
+ */
+ if (descend && (type == BCHILD || !nitems) &&
+ (cur->fts_level == FTS_ROOTLEVEL ? FCHDIR(sp, sp->fts_rfd) :
+ fts_safe_changedir(sp, cur->fts_parent, -1, ".."))) {
+ cur->fts_info = FTS_ERR;
+ SET(FTS_STOP);
+ return (NULL);
+ }
+
+ /* If didn't find anything, return NULL. */
+ if (!nitems) {
+ if (type == BREAD)
+ cur->fts_info = FTS_DP;
+ return (NULL);
+ }
+
+ /* Sort the entries. */
+ if (sp->fts_compar && nitems > 1)
+ head = fts_sort(sp, head, nitems);
+ return (head);
+}
+
+static u_short
+fts_stat(FTS *sp, FTSENT *p, int follow)
+{
+ FTSENT *t;
+ dev_t dev;
+ ino_t ino;
+ struct stat *sbp, sb;
+ int saved_errno;
+
+ /* If user needs stat info, stat buffer already allocated. */
+ sbp = ISSET(FTS_NOSTAT) ? &sb : p->fts_statp;
+
+ /*
+ * If doing a logical walk, or application requested FTS_FOLLOW, do
+ * a stat(2). If that fails, check for a non-existent symlink. If
+ * fail, set the errno from the stat call.
+ */
+ if (ISSET(FTS_LOGICAL) || follow) {
+ if (stat(p->fts_accpath, sbp)) {
+ saved_errno = errno;
+ if (!lstat(p->fts_accpath, sbp)) {
+ errno = 0;
+ return (FTS_SLNONE);
+ }
+ p->fts_errno = saved_errno;
+ goto err;
+ }
+ } else if (lstat(p->fts_accpath, sbp)) {
+ p->fts_errno = errno;
+err: memset(sbp, 0, sizeof(struct stat));
+ return (FTS_NS);
+ }
+
+ if (S_ISDIR(sbp->st_mode)) {
+ /*
+ * Set the device/inode. Used to find cycles and check for
+ * crossing mount points. Also remember the link count, used
+ * in fts_build to limit the number of stat calls. It is
+ * understood that these fields are only referenced if fts_info
+ * is set to FTS_D.
+ */
+ dev = p->fts_dev = sbp->st_dev;
+ ino = p->fts_ino = sbp->st_ino;
+ p->fts_nlink = sbp->st_nlink;
+
+ if (ISDOT(p->fts_name))
+ return (FTS_DOT);
+
+ /*
+ * Cycle detection is done by brute force when the directory
+ * is first encountered. If the tree gets deep enough or the
+ * number of symbolic links to directories is high enough,
+ * something faster might be worthwhile.
+ */
+ for (t = p->fts_parent;
+ t->fts_level >= FTS_ROOTLEVEL; t = t->fts_parent)
+ if (ino == t->fts_ino && dev == t->fts_dev) {
+ p->fts_cycle = t;
+ return (FTS_DC);
+ }
+ return (FTS_D);
+ }
+ if (S_ISLNK(sbp->st_mode))
+ return (FTS_SL);
+ if (S_ISREG(sbp->st_mode))
+ return (FTS_F);
+ return (FTS_DEFAULT);
+}
+
+static FTSENT *
+fts_sort(FTS *sp, FTSENT *head, int nitems)
+{
+ FTSENT **ap, *p;
+
+ /*
+ * Construct an array of pointers to the structures and call qsort(3).
+ * Reassemble the array in the order returned by qsort. If unable to
+ * sort for memory reasons, return the directory entries in their
+ * current order. Allocate enough space for the current needs plus
+ * 40 so don't realloc one entry at a time.
+ */
+ if (nitems > sp->fts_nitems) {
+ struct _ftsent **a;
+
+ sp->fts_nitems = nitems + 40;
+ if ((a = realloc(sp->fts_array,
+ sp->fts_nitems * sizeof(FTSENT *))) == NULL) {
+ if (sp->fts_array)
+ free(sp->fts_array);
+ sp->fts_array = NULL;
+ sp->fts_nitems = 0;
+ return (head);
+ }
+ sp->fts_array = a;
+ }
+ for (ap = sp->fts_array, p = head; p; p = p->fts_link)
+ *ap++ = p;
+ qsort((void *)sp->fts_array, nitems, sizeof(FTSENT *), sp->fts_compar);
+ for (head = *(ap = sp->fts_array); --nitems; ++ap)
+ ap[0]->fts_link = ap[1];
+ ap[0]->fts_link = NULL;
+ return (head);
+}
+
+static FTSENT *
+fts_alloc(FTS *sp, char *name, size_t namelen)
+{
+ FTSENT *p;
+ size_t len;
+
+ /*
+ * The file name is a variable length array and no stat structure is
+ * necessary if the user has set the nostat bit. Allocate the FTSENT
+ * structure, the file name and the stat structure in one chunk, but
+ * be careful that the stat structure is reasonably aligned. Since the
+ * fts_name field is declared to be of size 1, the fts_name pointer is
+ * namelen + 2 before the first possible address of the stat structure.
+ */
+ len = sizeof(FTSENT) + namelen;
+ if (!ISSET(FTS_NOSTAT))
+ len += sizeof(struct stat) + ALIGNBYTES;
+ if ((p = malloc(len)) == NULL)
+ return (NULL);
+
+ memset(p, 0, len);
+ p->fts_path = sp->fts_path;
+ p->fts_namelen = namelen;
+ p->fts_instr = FTS_NOINSTR;
+ if (!ISSET(FTS_NOSTAT))
+ p->fts_statp = (struct stat *)ALIGN(p->fts_name + namelen + 2);
+ memcpy(p->fts_name, name, namelen);
+
+ return (p);
+}
+
+static void
+fts_lfree(FTSENT *head)
+{
+ FTSENT *p;
+
+ /* Free a linked list of structures. */
+ while ((p = head)) {
+ head = head->fts_link;
+ free(p);
+ }
+}
+
+/*
+ * Allow essentially unlimited paths; find, rm, ls should all work on any tree.
+ * Most systems will allow creation of paths much longer than MAXPATHLEN, even
+ * though the kernel won't resolve them. Add the size (not just what's needed)
+ * plus 256 bytes so don't realloc the path 2 bytes at a time.
+ */
+static int
+fts_palloc(FTS *sp, size_t more)
+{
+ char *p;
+
+ /*
+ * Check for possible wraparound.
+ */
+ more += 256;
+ if (sp->fts_pathlen + more < sp->fts_pathlen) {
+ if (sp->fts_path)
+ free(sp->fts_path);
+ sp->fts_path = NULL;
+ errno = ENAMETOOLONG;
+ return (1);
+ }
+ sp->fts_pathlen += more;
+ p = realloc(sp->fts_path, sp->fts_pathlen);
+ if (p == NULL) {
+ if (sp->fts_path)
+ free(sp->fts_path);
+ sp->fts_path = NULL;
+ return (1);
+ }
+ sp->fts_path = p;
+ return (0);
+}
+
+/*
+ * When the path is realloc'd, have to fix all of the pointers in structures
+ * already returned.
+ */
+static void
+fts_padjust(FTS *sp, FTSENT *head)
+{
+ FTSENT *p;
+ char *addr = sp->fts_path;
+
+#define ADJUST(p) { \
+ if ((p)->fts_accpath != (p)->fts_name) { \
+ (p)->fts_accpath = \
+ (char *)addr + ((p)->fts_accpath - (p)->fts_path); \
+ } \
+ (p)->fts_path = addr; \
+}
+ /* Adjust the current set of children. */
+ for (p = sp->fts_child; p; p = p->fts_link)
+ ADJUST(p);
+
+ /* Adjust the rest of the tree, including the current level. */
+ for (p = head; p->fts_level >= FTS_ROOTLEVEL;) {
+ ADJUST(p);
+ p = p->fts_link ? p->fts_link : p->fts_parent;
+ }
+}
+
+static size_t
+fts_maxarglen(char * const *argv)
+{
+ size_t len, max;
+
+ for (max = 0; *argv; ++argv)
+ if ((len = strlen(*argv)) > max)
+ max = len;
+ return (max + 1);
+}
+
+/*
+ * Change to dir specified by fd or p->fts_accpath without getting
+ * tricked by someone changing the world out from underneath us.
+ * Assumes p->fts_dev and p->fts_ino are filled in.
+ */
+static int
+fts_safe_changedir(FTS *sp, FTSENT *p, int fd, char *path)
+{
+ int ret, oerrno, newfd;
+ struct stat sb;
+
+ newfd = fd;
+ if (ISSET(FTS_NOCHDIR))
+ return (0);
+ if (fd < 0 && (newfd = open(path, O_RDONLY, 0)) < 0)
+ return (-1);
+ if (fstat(newfd, &sb)) {
+ ret = -1;
+ goto bail;
+ }
+ if (p->fts_dev != sb.st_dev || p->fts_ino != sb.st_ino) {
+ errno = ENOENT; /* disinformation */
+ ret = -1;
+ goto bail;
+ }
+ ret = fchdir(newfd);
+bail:
+ oerrno = errno;
+ if (fd < 0)
+ (void)close(newfd);
+ errno = oerrno;
+ return (ret);
+}
index b479b272f63af5d6f86cef5f856981c4e9fa6de4..682ebcfd391fdfb8fc2d0ae2dd364bcaddda9ea6 100644 (file)
#include "libc_init_common.h"
#include <bionic_tls.h>
-extern void malloc_debug_init();
-
/* We flag the __libc_preinit function as a constructor to ensure
* that its address is listed in libc.so's .init_array section.
* This ensures that the function is called by the dynamic linker
__libc_init_common(elfdata);
-#ifdef MALLOC_LEAK_CHECK
- /* setup malloc leak checker, requires system properties */
+ /* Setup malloc routines accordingly to the environment.
+ * Requires system properties
+ */
extern void malloc_debug_init(void);
malloc_debug_init();
-#endif
-
}
__noreturn void __libc_init(uintptr_t *elfdata,
index e6264bbc58ed46d6416e7f07c84e6fde36da7f4d..d097b6baf29e71a4e739387f25c9b0d5cae6b18c 100644 (file)
/* Initialize the C runtime environment */
__libc_init_common(elfdata);
-#ifdef MALLOC_LEAK_CHECK
- /* setup malloc leak checker, requires system properties */
- extern void malloc_debug_init(void);
- malloc_debug_init();
-#endif
-
/* Several Linux ABIs don't pass the onexit pointer, and the ones that
* do never use it. Therefore, we ignore it.
*/
index 333642875a734a506d365718d169fc3f7c1024fd..2c5bf42f34f18ac8d199a21617d4162c5d21db47 100644 (file)
--- a/libc/bionic/logd_write.c
+++ b/libc/bionic/logd_write.c
static pthread_mutex_t log_init_lock = PTHREAD_MUTEX_INITIALIZER;
-log_channel_t log_channels[LOG_ID_MAX] = {
+static log_channel_t log_channels[LOG_ID_MAX] = {
{ __write_to_log_null, -1, NULL },
{ __write_to_log_init, -1, "/dev/"LOGGER_LOG_MAIN },
{ __write_to_log_init, -1, "/dev/"LOGGER_LOG_RADIO }
log_channels[log_id].logger =
(fd < 0) ? __write_to_log_null : __write_to_log_kernel;
+ log_channels[log_id].fd = fd;
pthread_mutex_unlock(&log_init_lock);
diff --git a/libc/bionic/malloc_debug_common.c b/libc/bionic/malloc_debug_common.c
--- /dev/null
@@ -0,0 +1,488 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * Contains definition of structures, global variables, and implementation of
+ * routines that are used by malloc leak detection code and other components in
+ * the system. The trick is that some components expect these data and
+ * routines to be defined / implemented in libc.so library, regardless
+ * whether or not MALLOC_LEAK_CHECK macro is defined. To make things even
+ * more tricky, malloc leak detection code, implemented in
+ * libc_malloc_debug.so also requires access to these variables and routines
+ * (to fill allocation entry hash table, for example). So, all relevant
+ * variables and routines are defined / implemented here and exported
+ * to all, leak detection code and other components via dynamic (libc.so),
+ * or static (libc.a) linking.
+ */
+
+#include <stdlib.h>
+#include <pthread.h>
+#include <unistd.h>
+#include "dlmalloc.h"
+#include "malloc_debug_common.h"
+
+/*
+ * In a VM process, this is set to 1 after fork()ing out of zygote.
+ */
+int gMallocLeakZygoteChild = 0;
+
+pthread_mutex_t gAllocationsMutex = PTHREAD_MUTEX_INITIALIZER;
+HashTable gHashTable;
+
+// =============================================================================
+// output functions
+// =============================================================================
+
+static int hash_entry_compare(const void* arg1, const void* arg2)
+{
+ HashEntry* e1 = *(HashEntry**)arg1;
+ HashEntry* e2 = *(HashEntry**)arg2;
+
+ size_t nbAlloc1 = e1->allocations;
+ size_t nbAlloc2 = e2->allocations;
+ size_t size1 = e1->size & ~SIZE_FLAG_MASK;
+ size_t size2 = e2->size & ~SIZE_FLAG_MASK;
+ size_t alloc1 = nbAlloc1 * size1;
+ size_t alloc2 = nbAlloc2 * size2;
+
+ // sort in descending order by:
+ // 1) total size
+ // 2) number of allocations
+ //
+ // This is used for sorting, not determination of equality, so we don't
+ // need to compare the bit flags.
+ int result;
+ if (alloc1 > alloc2) {
+ result = -1;
+ } else if (alloc1 < alloc2) {
+ result = 1;
+ } else {
+ if (nbAlloc1 > nbAlloc2) {
+ result = -1;
+ } else if (nbAlloc1 < nbAlloc2) {
+ result = 1;
+ } else {
+ result = 0;
+ }
+ }
+ return result;
+}
+
+/*
+ * Retrieve native heap information.
+ *
+ * "*info" is set to a buffer we allocate
+ * "*overallSize" is set to the size of the "info" buffer
+ * "*infoSize" is set to the size of a single entry
+ * "*totalMemory" is set to the sum of all allocations we're tracking; does
+ * not include heap overhead
+ * "*backtraceSize" is set to the maximum number of entries in the back trace
+ */
+void get_malloc_leak_info(uint8_t** info, size_t* overallSize,
+ size_t* infoSize, size_t* totalMemory, size_t* backtraceSize)
+{
+ // don't do anything if we have invalid arguments
+ if (info == NULL || overallSize == NULL || infoSize == NULL ||
+ totalMemory == NULL || backtraceSize == NULL) {
+ return;
+ }
+
+ pthread_mutex_lock(&gAllocationsMutex);
+
+ if (gHashTable.count == 0) {
+ *info = NULL;
+ *overallSize = 0;
+ *infoSize = 0;
+ *totalMemory = 0;
+ *backtraceSize = 0;
+ goto done;
+ }
+
+ void** list = (void**)dlmalloc(sizeof(void*) * gHashTable.count);
+
+ // get the entries into an array to be sorted
+ int index = 0;
+ int i;
+ for (i = 0 ; i < HASHTABLE_SIZE ; i++) {
+ HashEntry* entry = gHashTable.slots[i];
+ while (entry != NULL) {
+ list[index] = entry;
+ *totalMemory = *totalMemory +
+ ((entry->size & ~SIZE_FLAG_MASK) * entry->allocations);
+ index++;
+ entry = entry->next;
+ }
+ }
+
+ // XXX: the protocol doesn't allow variable size for the stack trace (yet)
+ *infoSize = (sizeof(size_t) * 2) + (sizeof(intptr_t) * BACKTRACE_SIZE);
+ *overallSize = *infoSize * gHashTable.count;
+ *backtraceSize = BACKTRACE_SIZE;
+
+ // now get A byte array big enough for this
+ *info = (uint8_t*)dlmalloc(*overallSize);
+
+ if (*info == NULL) {
+ *overallSize = 0;
+ goto done;
+ }
+
+ qsort((void*)list, gHashTable.count, sizeof(void*), hash_entry_compare);
+
+ uint8_t* head = *info;
+ const int count = gHashTable.count;
+ for (i = 0 ; i < count ; i++) {
+ HashEntry* entry = list[i];
+ size_t entrySize = (sizeof(size_t) * 2) + (sizeof(intptr_t) * entry->numEntries);
+ if (entrySize < *infoSize) {
+ /* we're writing less than a full entry, clear out the rest */
+ /* TODO: only clear out the part we're not overwriting? */
+ memset(head, 0, *infoSize);
+ } else {
+ /* make sure the amount we're copying doesn't exceed the limit */
+ entrySize = *infoSize;
+ }
+ memcpy(head, &(entry->size), entrySize);
+ head += *infoSize;
+ }
+
+ dlfree(list);
+
+done:
+ pthread_mutex_unlock(&gAllocationsMutex);
+}
+
+void free_malloc_leak_info(uint8_t* info)
+{
+ dlfree(info);
+}
+
+struct mallinfo mallinfo()
+{
+ return dlmallinfo();
+}
+
+void* valloc(size_t bytes) {
+ /* assume page size of 4096 bytes */
+ return memalign( getpagesize(), bytes );
+}
+
+/* Support for malloc debugging.
+ * Note that if USE_DL_PREFIX is not defined, it's assumed that memory
+ * allocation routines are implemented somewhere else, so all our custom
+ * malloc routines should not be compiled at all.
+ */
+#ifdef USE_DL_PREFIX
+
+/* Table for dispatching malloc calls, initialized with default dispatchers. */
+const MallocDebug __libc_malloc_default_dispatch __attribute__((aligned(32))) =
+{
+ dlmalloc, dlfree, dlcalloc, dlrealloc, dlmemalign
+};
+
+/* Selector of dispatch table to use for dispatching malloc calls. */
+const MallocDebug* __libc_malloc_dispatch = &__libc_malloc_default_dispatch;
+
+void* malloc(size_t bytes) {
+ return __libc_malloc_dispatch->malloc(bytes);
+}
+void free(void* mem) {
+ __libc_malloc_dispatch->free(mem);
+}
+void* calloc(size_t n_elements, size_t elem_size) {
+ return __libc_malloc_dispatch->calloc(n_elements, elem_size);
+}
+void* realloc(void* oldMem, size_t bytes) {
+ return __libc_malloc_dispatch->realloc(oldMem, bytes);
+}
+void* memalign(size_t alignment, size_t bytes) {
+ return __libc_malloc_dispatch->memalign(alignment, bytes);
+}
+
+/* We implement malloc debugging only in libc.so, so code bellow
+ * must be excluded if we compile this file for static libc.a
+ */
+#ifndef LIBC_STATIC
+#include <sys/system_properties.h>
+#include <dlfcn.h>
+#include "logd.h"
+
+// =============================================================================
+// log functions
+// =============================================================================
+
+#define debug_log(format, ...) \
+ __libc_android_log_print(ANDROID_LOG_DEBUG, "libc", (format), ##__VA_ARGS__ )
+#define error_log(format, ...) \
+ __libc_android_log_print(ANDROID_LOG_ERROR, "libc", (format), ##__VA_ARGS__ )
+#define info_log(format, ...) \
+ __libc_android_log_print(ANDROID_LOG_INFO, "libc", (format), ##__VA_ARGS__ )
+
+/* Table for dispatching malloc calls, depending on environment. */
+static MallocDebug gMallocUse __attribute__((aligned(32))) = {
+ dlmalloc, dlfree, dlcalloc, dlrealloc, dlmemalign
+};
+
+extern char* __progname;
+
+/* Handle to shared library where actual memory allocation is implemented.
+ * This library is loaded and memory allocation calls are redirected there
+ * when libc.debug.malloc environment variable contains value other than
+ * zero:
+ * 1 - For memory leak detections.
+ * 5 - For filling allocated / freed memory with patterns defined by
+ * CHK_SENTINEL_VALUE, and CHK_FILL_FREE macros.
+ * 10 - For adding pre-, and post- allocation stubs in order to detect
+ * buffer overruns.
+ * Note that emulator's memory allocation instrumentation is not controlled by
+ * libc.debug.malloc value, but rather by emulator, started with -memcheck
+ * option. Note also, that if emulator has started with -memcheck option,
+ * emulator's instrumented memory allocation will take over value saved in
+ * libc.debug.malloc. In other words, if emulator has started with -memcheck
+ * option, libc.debug.malloc value is ignored.
+ * Actual functionality for debug levels 1-10 is implemented in
+ * libc_malloc_debug_leak.so, while functionality for emultor's instrumented
+ * allocations is implemented in libc_malloc_debug_qemu.so and can be run inside
+ * the emulator only.
+ */
+static void* libc_malloc_impl_handle = NULL;
+
+/* Make sure we have MALLOC_ALIGNMENT that matches the one that is
+ * used in dlmalloc. Emulator's memchecker needs this value to properly
+ * align its guarding zones.
+ */
+#ifndef MALLOC_ALIGNMENT
+#define MALLOC_ALIGNMENT ((size_t)8U)
+#endif /* MALLOC_ALIGNMENT */
+
+/* Initializes memory allocation framework once per process. */
+static void malloc_init_impl(void)
+{
+ const char* so_name = NULL;
+ MallocDebugInit malloc_debug_initialize = NULL;
+ unsigned int qemu_running = 0;
+ unsigned int debug_level = 0;
+ unsigned int memcheck_enabled = 0;
+ char env[PROP_VALUE_MAX];
+ char memcheck_tracing[PROP_VALUE_MAX];
+
+ /* Get custom malloc debug level. Note that emulator started with
+ * memory checking option will have priority over debug level set in
+ * libc.debug.malloc system property. */
+ if (__system_property_get("ro.kernel.qemu", env) && atoi(env)) {
+ qemu_running = 1;
+ if (__system_property_get("ro.kernel.memcheck", memcheck_tracing)) {
+ if (memcheck_tracing[0] != '0') {
+ // Emulator has started with memory tracing enabled. Enforce it.
+ debug_level = 20;
+ memcheck_enabled = 1;
+ }
+ }
+ }
+
+ /* If debug level has not been set by memcheck option in the emulator,
+ * lets grab it from libc.debug.malloc system property. */
+ if (!debug_level && __system_property_get("libc.debug.malloc", env)) {
+ debug_level = atoi(env);
+ }
+
+ /* Debug level 0 means that we should use dlxxx allocation
+ * routines (default). */
+ if (!debug_level) {
+ return;
+ }
+
+ // Lets see which .so must be loaded for the requested debug level
+ switch (debug_level) {
+ case 1:
+ case 5:
+ case 10:
+ so_name = "/system/lib/libc_malloc_debug_leak.so";
+ break;
+ case 20:
+ // Quick check: debug level 20 can only be handled in emulator.
+ if (!qemu_running) {
+ error_log("%s: Debug level %d can only be set in emulator\n",
+ __progname, debug_level);
+ return;
+ }
+ // Make sure that memory checking has been enabled in emulator.
+ if (!memcheck_enabled) {
+ error_log("%s: Memory checking is not enabled in the emulator\n",
+ __progname);
+ return;
+ }
+ so_name = "/system/lib/libc_malloc_debug_qemu.so";
+ break;
+ default:
+ error_log("%s: Debug level %d is unknown\n",
+ __progname, debug_level);
+ return;
+ }
+
+ // Load .so that implements the required malloc debugging functionality.
+ libc_malloc_impl_handle = dlopen(so_name, RTLD_LAZY);
+ if (libc_malloc_impl_handle == NULL) {
+ error_log("%s: Missing module %s required for malloc debug level %d\n",
+ __progname, so_name, debug_level);
+ return;
+ }
+
+ // Initialize malloc debugging in the loaded module.
+ malloc_debug_initialize =
+ dlsym(libc_malloc_impl_handle, "malloc_debug_initialize");
+ if (malloc_debug_initialize == NULL) {
+ error_log("%s: Initialization routine is not found in %s\n",
+ __progname, so_name);
+ dlclose(libc_malloc_impl_handle);
+ return;
+ }
+ if (malloc_debug_initialize()) {
+ dlclose(libc_malloc_impl_handle);
+ return;
+ }
+
+ if (debug_level == 20) {
+ // For memory checker we need to do extra initialization.
+ int (*memcheck_initialize)(int, const char*) =
+ dlsym(libc_malloc_impl_handle, "memcheck_initialize");
+ if (memcheck_initialize == NULL) {
+ error_log("%s: memcheck_initialize routine is not found in %s\n",
+ __progname, so_name);
+ dlclose(libc_malloc_impl_handle);
+ return;
+ }
+ if (memcheck_initialize(MALLOC_ALIGNMENT, memcheck_tracing)) {
+ dlclose(libc_malloc_impl_handle);
+ return;
+ }
+ }
+
+ // Initialize malloc dispatch table with appropriate routines.
+ switch (debug_level) {
+ case 1:
+ __libc_android_log_print(ANDROID_LOG_INFO, "libc",
+ "%s using MALLOC_DEBUG = %d (leak checker)\n",
+ __progname, debug_level);
+ gMallocUse.malloc =
+ dlsym(libc_malloc_impl_handle, "leak_malloc");
+ gMallocUse.free =
+ dlsym(libc_malloc_impl_handle, "leak_free");
+ gMallocUse.calloc =
+ dlsym(libc_malloc_impl_handle, "leak_calloc");
+ gMallocUse.realloc =
+ dlsym(libc_malloc_impl_handle, "leak_realloc");
+ gMallocUse.memalign =
+ dlsym(libc_malloc_impl_handle, "leak_memalign");
+ break;
+ case 5:
+ __libc_android_log_print(ANDROID_LOG_INFO, "libc",
+ "%s using MALLOC_DEBUG = %d (fill)\n",
+ __progname, debug_level);
+ gMallocUse.malloc =
+ dlsym(libc_malloc_impl_handle, "fill_malloc");
+ gMallocUse.free =
+ dlsym(libc_malloc_impl_handle, "fill_free");
+ gMallocUse.calloc = dlcalloc;
+ gMallocUse.realloc =
+ dlsym(libc_malloc_impl_handle, "fill_realloc");
+ gMallocUse.memalign =
+ dlsym(libc_malloc_impl_handle, "fill_memalign");
+ break;
+ case 10:
+ __libc_android_log_print(ANDROID_LOG_INFO, "libc",
+ "%s using MALLOC_DEBUG = %d (sentinels, fill)\n",
+ __progname, debug_level);
+ gMallocUse.malloc =
+ dlsym(libc_malloc_impl_handle, "chk_malloc");
+ gMallocUse.free =
+ dlsym(libc_malloc_impl_handle, "chk_free");
+ gMallocUse.calloc =
+ dlsym(libc_malloc_impl_handle, "chk_calloc");
+ gMallocUse.realloc =
+ dlsym(libc_malloc_impl_handle, "chk_realloc");
+ gMallocUse.memalign =
+ dlsym(libc_malloc_impl_handle, "chk_memalign");
+ break;
+ case 20:
+ __libc_android_log_print(ANDROID_LOG_INFO, "libc",
+ "%s[%u] using MALLOC_DEBUG = %d (instrumented for emulator)\n",
+ __progname, getpid(), debug_level);
+ gMallocUse.malloc =
+ dlsym(libc_malloc_impl_handle, "qemu_instrumented_malloc");
+ gMallocUse.free =
+ dlsym(libc_malloc_impl_handle, "qemu_instrumented_free");
+ gMallocUse.calloc =
+ dlsym(libc_malloc_impl_handle, "qemu_instrumented_calloc");
+ gMallocUse.realloc =
+ dlsym(libc_malloc_impl_handle, "qemu_instrumented_realloc");
+ gMallocUse.memalign =
+ dlsym(libc_malloc_impl_handle, "qemu_instrumented_memalign");
+ break;
+ default:
+ break;
+ }
+
+ // Make sure dispatch table is initialized
+ if ((gMallocUse.malloc == NULL) ||
+ (gMallocUse.free == NULL) ||
+ (gMallocUse.calloc == NULL) ||
+ (gMallocUse.realloc == NULL) ||
+ (gMallocUse.memalign == NULL)) {
+ error_log("%s: Cannot initialize malloc dispatch table for debug level"
+ " %d: %p, %p, %p, %p, %p\n",
+ __progname, debug_level,
+ gMallocUse.malloc, gMallocUse.free,
+ gMallocUse.calloc, gMallocUse.realloc,
+ gMallocUse.memalign);
+ dlclose(libc_malloc_impl_handle);
+ libc_malloc_impl_handle = NULL;
+ } else {
+ __libc_malloc_dispatch = &gMallocUse;
+ }
+}
+
+static pthread_once_t malloc_init_once_ctl = PTHREAD_ONCE_INIT;
+
+#endif // !LIBC_STATIC
+#endif // USE_DL_PREFIX
+
+/* Initializes memory allocation framework.
+ * This routine is called from __libc_init routines implemented
+ * in libc_init_static.c and libc_init_dynamic.c files.
+ */
+void malloc_debug_init(void)
+{
+ /* We need to initialize malloc iff we implement here custom
+ * malloc routines (i.e. USE_DL_PREFIX is defined) for libc.so */
+#if defined(USE_DL_PREFIX) && !defined(LIBC_STATIC)
+ if (pthread_once(&malloc_init_once_ctl, malloc_init_impl)) {
+ error_log("Unable to initialize malloc_debug component.");
+ }
+#endif // USE_DL_PREFIX && !LIBC_STATIC
+}
diff --git a/libc/bionic/malloc_debug_common.h b/libc/bionic/malloc_debug_common.h
--- /dev/null
@@ -0,0 +1,99 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * Contains declarations of types and constants used by malloc leak
+ * detection code in both, libc and libc_malloc_debug libraries.
+ */
+#ifndef MALLOC_DEBUG_COMMON_H
+#define MALLOC_DEBUG_COMMON_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define HASHTABLE_SIZE 1543
+#define BACKTRACE_SIZE 32
+/* flag definitions, currently sharing storage with "size" */
+#define SIZE_FLAG_ZYGOTE_CHILD (1<<31)
+#define SIZE_FLAG_MASK (SIZE_FLAG_ZYGOTE_CHILD)
+
+#define MAX_SIZE_T (~(size_t)0)
+
+// =============================================================================
+// Structures
+// =============================================================================
+
+typedef struct HashEntry HashEntry;
+struct HashEntry {
+ size_t slot;
+ HashEntry* prev;
+ HashEntry* next;
+ size_t numEntries;
+ // fields above "size" are NOT sent to the host
+ size_t size;
+ size_t allocations;
+ intptr_t backtrace[0];
+};
+
+typedef struct HashTable HashTable;
+struct HashTable {
+ size_t count;
+ HashEntry* slots[HASHTABLE_SIZE];
+};
+
+/* Entry in malloc dispatch table. */
+typedef struct MallocDebug MallocDebug;
+struct MallocDebug {
+ /* Address of the actual malloc routine. */
+ void* (*malloc)(size_t bytes);
+ /* Address of the actual free routine. */
+ void (*free)(void* mem);
+ /* Address of the actual calloc routine. */
+ void* (*calloc)(size_t n_elements, size_t elem_size);
+ /* Address of the actual realloc routine. */
+ void* (*realloc)(void* oldMem, size_t bytes);
+ /* Address of the actual memalign routine. */
+ void* (*memalign)(size_t alignment, size_t bytes);
+};
+
+/* Malloc debugging initialization routine.
+ * This routine must be implemented in .so modules that implement malloc
+ * debugging. This routine is called once per process from malloc_init_impl
+ * routine implemented in bionic/libc/bionic/malloc_debug_common.c when malloc
+ * debugging gets initialized for the process.
+ * Return:
+ * 0 on success, -1 on failure.
+ */
+typedef int (*MallocDebugInit)(void);
+
+#ifdef __cplusplus
+}; /* end of extern "C" */
+#endif
+
+#endif // MALLOC_DEBUG_COMMON_H
similarity index 63%
rename from libc/bionic/malloc_leak.c
rename to libc/bionic/malloc_debug_leak.c
index ad1d2b4203e7d64bac00bd61a03e21bd0b4d1183..2ff8cee9ee0cf904582381032c8c76329d079631 100644 (file)
rename from libc/bionic/malloc_leak.c
rename to libc/bionic/malloc_debug_leak.c
index ad1d2b4203e7d64bac00bd61a03e21bd0b4d1183..2ff8cee9ee0cf904582381032c8c76329d079631 100644 (file)
#include <stdarg.h>
#include <fcntl.h>
#include <unwind.h>
+#include <dlfcn.h>
#include <sys/socket.h>
#include <sys/un.h>
#include "dlmalloc.h"
#include "logd.h"
+#include "malloc_debug_common.h"
-// =============================================================================
-// Utilities directly used by Dalvik
-// =============================================================================
-
-#define HASHTABLE_SIZE 1543
-#define BACKTRACE_SIZE 32
-/* flag definitions, currently sharing storage with "size" */
-#define SIZE_FLAG_ZYGOTE_CHILD (1<<31)
-#define SIZE_FLAG_MASK (SIZE_FLAG_ZYGOTE_CHILD)
-
-#define MAX_SIZE_T (~(size_t)0)
-
-/*
- * In a VM process, this is set to 1 after fork()ing out of zygote.
- */
-int gMallocLeakZygoteChild = 0;
-
-// =============================================================================
-// Structures
-// =============================================================================
-
-typedef struct HashEntry HashEntry;
-struct HashEntry {
- size_t slot;
- HashEntry* prev;
- HashEntry* next;
- size_t numEntries;
- // fields above "size" are NOT sent to the host
- size_t size;
- size_t allocations;
- intptr_t backtrace[0];
-};
-
-typedef struct HashTable HashTable;
-struct HashTable {
- size_t count;
- HashEntry* slots[HASHTABLE_SIZE];
-};
+// This file should be included into the build only when
+// MALLOC_LEAK_CHECK, or MALLOC_QEMU_INSTRUMENT, or both
+// macros are defined.
+#ifndef MALLOC_LEAK_CHECK
+#error MALLOC_LEAK_CHECK is not defined.
+#endif // !MALLOC_LEAK_CHECK
-static pthread_mutex_t gAllocationsMutex = PTHREAD_MUTEX_INITIALIZER;
-static HashTable gHashTable;
+// Global variables defined in malloc_debug_common.c
+extern int gMallocLeakZygoteChild;
+extern pthread_mutex_t gAllocationsMutex;
+extern HashTable gHashTable;
+extern const MallocDebug __libc_malloc_default_dispatch;
+extern const MallocDebug* __libc_malloc_dispatch;
// =============================================================================
// log functions
// =============================================================================
#define debug_log(format, ...) \
- __libc_android_log_print(ANDROID_LOG_DEBUG, "malloc_leak", (format), ##__VA_ARGS__ )
-
-// =============================================================================
-// output functions
-// =============================================================================
-
-static int hash_entry_compare(const void* arg1, const void* arg2)
-{
- HashEntry* e1 = *(HashEntry**)arg1;
- HashEntry* e2 = *(HashEntry**)arg2;
-
- size_t nbAlloc1 = e1->allocations;
- size_t nbAlloc2 = e2->allocations;
- size_t size1 = e1->size & ~SIZE_FLAG_MASK;
- size_t size2 = e2->size & ~SIZE_FLAG_MASK;
- size_t alloc1 = nbAlloc1 * size1;
- size_t alloc2 = nbAlloc2 * size2;
-
- // sort in descending order by:
- // 1) total size
- // 2) number of allocations
- //
- // This is used for sorting, not determination of equality, so we don't
- // need to compare the bit flags.
- int result;
- if (alloc1 > alloc2) {
- result = -1;
- } else if (alloc1 < alloc2) {
- result = 1;
- } else {
- if (nbAlloc1 > nbAlloc2) {
- result = -1;
- } else if (nbAlloc1 < nbAlloc2) {
- result = 1;
- } else {
- result = 0;
- }
- }
- return result;
-}
-
-/*
- * Retrieve native heap information.
- *
- * "*info" is set to a buffer we allocate
- * "*overallSize" is set to the size of the "info" buffer
- * "*infoSize" is set to the size of a single entry
- * "*totalMemory" is set to the sum of all allocations we're tracking; does
- * not include heap overhead
- * "*backtraceSize" is set to the maximum number of entries in the back trace
- */
-void get_malloc_leak_info(uint8_t** info, size_t* overallSize,
- size_t* infoSize, size_t* totalMemory, size_t* backtraceSize)
-{
- // don't do anything if we have invalid arguments
- if (info == NULL || overallSize == NULL || infoSize == NULL ||
- totalMemory == NULL || backtraceSize == NULL) {
- return;
- }
-
- pthread_mutex_lock(&gAllocationsMutex);
-
- if (gHashTable.count == 0) {
- *info = NULL;
- *overallSize = 0;
- *infoSize = 0;
- *totalMemory = 0;
- *backtraceSize = 0;
- goto done;
- }
-
- void** list = (void**)dlmalloc(sizeof(void*) * gHashTable.count);
-
- // debug_log("*****\ngHashTable.count = %d\n", gHashTable.count);
- // debug_log("list = %p\n", list);
-
- // get the entries into an array to be sorted
- int index = 0;
- int i;
- for (i = 0 ; i < HASHTABLE_SIZE ; i++) {
- HashEntry* entry = gHashTable.slots[i];
- while (entry != NULL) {
- list[index] = entry;
- *totalMemory = *totalMemory +
- ((entry->size & ~SIZE_FLAG_MASK) * entry->allocations);
- index++;
- entry = entry->next;
- }
- }
-
- // debug_log("sorted list!\n");
- // XXX: the protocol doesn't allow variable size for the stack trace (yet)
- *infoSize = (sizeof(size_t) * 2) + (sizeof(intptr_t) * BACKTRACE_SIZE);
- *overallSize = *infoSize * gHashTable.count;
- *backtraceSize = BACKTRACE_SIZE;
-
- // debug_log("infoSize = 0x%x overall = 0x%x\n", *infoSize, *overallSize);
- // now get A byte array big enough for this
- *info = (uint8_t*)dlmalloc(*overallSize);
-
- // debug_log("info = %p\n", info);
- if (*info == NULL) {
- *overallSize = 0;
- goto done;
- }
-
- // debug_log("sorting list...\n");
- qsort((void*)list, gHashTable.count, sizeof(void*), hash_entry_compare);
-
- uint8_t* head = *info;
- const int count = gHashTable.count;
- for (i = 0 ; i < count ; i++) {
- HashEntry* entry = list[i];
- size_t entrySize = (sizeof(size_t) * 2) + (sizeof(intptr_t) * entry->numEntries);
- if (entrySize < *infoSize) {
- /* we're writing less than a full entry, clear out the rest */
- /* TODO: only clear out the part we're not overwriting? */
- memset(head, 0, *infoSize);
- } else {
- /* make sure the amount we're copying doesn't exceed the limit */
- entrySize = *infoSize;
- }
- memcpy(head, &(entry->size), entrySize);
- head += *infoSize;
- }
-
- dlfree(list);
-
-done:
- // debug_log("+++++ done!\n");
- pthread_mutex_unlock(&gAllocationsMutex);
-}
-
-void free_malloc_leak_info(uint8_t* info)
-{
- dlfree(info);
-}
-
-struct mallinfo mallinfo()
-{
- return dlmallinfo();
-}
-
-void* valloc(size_t bytes) {
- /* assume page size of 4096 bytes */
- return memalign( getpagesize(), bytes );
-}
+ __libc_android_log_print(ANDROID_LOG_DEBUG, "malloc_leak_check", (format), ##__VA_ARGS__ )
+#define error_log(format, ...) \
+ __libc_android_log_print(ANDROID_LOG_ERROR, "malloc_leak_check", (format), ##__VA_ARGS__ )
+#define info_log(format, ...) \
+ __libc_android_log_print(ANDROID_LOG_INFO, "malloc_leak_check", (format), ##__VA_ARGS__ )
+static int gTrapOnError = 1;
-/*
- * Code guarded by MALLOC_LEAK_CHECK is only needed when malloc check is
- * enabled. Currently we exclude them in libc.so, and only include them in
- * libc_debug.so.
- */
-#ifdef MALLOC_LEAK_CHECK
#define MALLOC_ALIGNMENT 8
#define GUARD 0x48151642
-
#define DEBUG 0
// =============================================================================
@@ -407,13 +233,13 @@ static _Unwind_Reason_Code trace_function(__unwind_context *context, void *arg)
if (state->count) {
intptr_t ip = (intptr_t)_Unwind_GetIP(context);
if (ip) {
- state->addrs[0] = ip;
+ state->addrs[0] = ip;
state->addrs++;
state->count--;
return _URC_NO_REASON;
}
}
- /*
+ /*
* If we run out of space to record the address or 0 has been seen, stop
* unwinding the stack.
*/
return max_entries - state.count;
}
-// =============================================================================
-// malloc leak function dispatcher
-// =============================================================================
-
-static void* leak_malloc(size_t bytes);
-static void leak_free(void* mem);
-static void* leak_calloc(size_t n_elements, size_t elem_size);
-static void* leak_realloc(void* oldMem, size_t bytes);
-static void* leak_memalign(size_t alignment, size_t bytes);
-
-static void* fill_malloc(size_t bytes);
-static void fill_free(void* mem);
-static void* fill_realloc(void* oldMem, size_t bytes);
-static void* fill_memalign(size_t alignment, size_t bytes);
-
-static void* chk_malloc(size_t bytes);
-static void chk_free(void* mem);
-static void* chk_calloc(size_t n_elements, size_t elem_size);
-static void* chk_realloc(void* oldMem, size_t bytes);
-static void* chk_memalign(size_t alignment, size_t bytes);
-
-typedef struct {
- void* (*malloc)(size_t bytes);
- void (*free)(void* mem);
- void* (*calloc)(size_t n_elements, size_t elem_size);
- void* (*realloc)(void* oldMem, size_t bytes);
- void* (*memalign)(size_t alignment, size_t bytes);
-} MallocDebug;
-
-static const MallocDebug gMallocEngineTable[] __attribute__((aligned(32))) =
-{
- { dlmalloc, dlfree, dlcalloc, dlrealloc, dlmemalign },
- { leak_malloc, leak_free, leak_calloc, leak_realloc, leak_memalign },
- { fill_malloc, fill_free, dlcalloc, fill_realloc, fill_memalign },
- { chk_malloc, chk_free, chk_calloc, chk_realloc, chk_memalign }
-};
-
-enum {
- INDEX_NORMAL = 0,
- INDEX_LEAK_CHECK,
- INDEX_MALLOC_FILL,
- INDEX_MALLOC_CHECK,
-};
-
-static MallocDebug const * gMallocDispatch = &gMallocEngineTable[INDEX_NORMAL];
-static int gMallocDebugLevel;
-static int gTrapOnError = 1;
-
-void* malloc(size_t bytes) {
- return gMallocDispatch->malloc(bytes);
-}
-void free(void* mem) {
- gMallocDispatch->free(mem);
-}
-void* calloc(size_t n_elements, size_t elem_size) {
- return gMallocDispatch->calloc(n_elements, elem_size);
-}
-void* realloc(void* oldMem, size_t bytes) {
- return gMallocDispatch->realloc(oldMem, bytes);
-}
-void* memalign(size_t alignment, size_t bytes) {
- return gMallocDispatch->memalign(alignment, bytes);
-}
-
// =============================================================================
// malloc check functions
// =============================================================================
va_list args;
pthread_mutex_lock(&gAllocationsMutex);
- gMallocDispatch = &gMallocEngineTable[INDEX_NORMAL];
+ {
+ const MallocDebug* current_dispatch = __libc_malloc_dispatch;
+ __libc_malloc_dispatch = &__libc_malloc_default_dispatch;
va_start(args, format);
__libc_android_log_vprint(ANDROID_LOG_ERROR, "libc",
format, args);
if (gTrapOnError) {
__builtin_trap();
}
- gMallocDispatch = &gMallocEngineTable[INDEX_MALLOC_CHECK];
+ __libc_malloc_dispatch = current_dispatch;
+ }
pthread_mutex_unlock(&gAllocationsMutex);
}
buf = (char*)mem - CHK_SENTINEL_HEAD_SIZE;
for (i=0 ; i<CHK_SENTINEL_HEAD_SIZE ; i++) {
if (buf[i] != CHK_SENTINEL_VALUE) {
- assert_log_message(
+ assert_log_message(
"*** %s CHECK: buffer %p "
"corrupted %d bytes before allocation",
func, mem, CHK_SENTINEL_HEAD_SIZE-i);
buf = (char*)mem + bytes;
for (i=CHK_SENTINEL_TAIL_SIZE-1 ; i>=0 ; i--) {
if (buf[i] != CHK_SENTINEL_VALUE) {
- assert_log_message(
+ assert_log_message(
"*** %s CHECK: buffer %p, size=%lu, "
"corrupted %d bytes after allocation",
func, buffer, bytes, i+1);
intptr_t backtrace[BACKTRACE_SIZE];
size_t numEntries = get_backtrace(backtrace, BACKTRACE_SIZE);
-
+
AllocationEntry* header = (AllocationEntry*)base;
header->entry = record_backtrace(backtrace, numEntries, bytes);
header->guard = GUARD;
-
+
// now increment base to point to after our header.
// this should just work since our header is 8 bytes.
base = (AllocationEntry*)base + 1;
// check the guard to make sure it is valid
AllocationEntry* header = (AllocationEntry*)mem - 1;
-
+
if (header->guard != GUARD) {
// could be a memaligned block
if (((void**)mem)[-1] == MEMALIGN_GUARD) {
header = (AllocationEntry*)mem - 1;
}
}
-
+
if (header->guard == GUARD || is_valid_entry(header->entry)) {
// decrement the allocations
HashEntry* entry = header->entry;
// need to make sure it's a power of two
if (alignment & (alignment-1))
alignment = 1L << (31 - __builtin_clz(alignment));
-
+
// here, aligment is at least MALLOC_ALIGNMENT<<1 bytes
// we will align by at least MALLOC_ALIGNMENT bytes
// and at most alignment-MALLOC_ALIGNMENT bytes
// align the pointer
ptr += ((-ptr) % alignment);
-
+
// there is always enough space for the base pointer and the guard
((void**)ptr)[-1] = MEMALIGN_GUARD;
((void**)ptr)[-2] = base;
}
return base;
}
-#endif /* MALLOC_LEAK_CHECK */
-
-// called from libc_init()
-extern char* __progname;
-void malloc_debug_init()
+/* Initializes malloc debugging framework.
+ * See comments on MallocDebugInit in malloc_debug_common.h
+ */
+int malloc_debug_initialize(void)
{
- unsigned int level = 0;
-#ifdef MALLOC_LEAK_CHECK
- // if MALLOC_LEAK_CHECK is enabled, use level=1 by default
- level = 1;
-#endif
- char env[PROP_VALUE_MAX];
- int len = __system_property_get("libc.debug.malloc", env);
-
- if (len) {
- level = atoi(env);
-#ifndef MALLOC_LEAK_CHECK
- /* Alert the user that libc_debug.so needs to be installed as libc.so
- * when performing malloc checks.
- */
- if (level != 0) {
- __libc_android_log_print(ANDROID_LOG_INFO, "libc",
- "Malloc checks need libc_debug.so pushed to the device!\n");
-
- }
-#endif
- }
-
-#ifdef MALLOC_LEAK_CHECK
- gMallocDebugLevel = level;
- switch (level) {
- default:
- case 0:
- gMallocDispatch = &gMallocEngineTable[INDEX_NORMAL];
- break;
- case 1:
- __libc_android_log_print(ANDROID_LOG_INFO, "libc",
- "%s using MALLOC_DEBUG = %d (leak checker)\n",
- __progname, level);
- gMallocDispatch = &gMallocEngineTable[INDEX_LEAK_CHECK];
- break;
- case 5:
- __libc_android_log_print(ANDROID_LOG_INFO, "libc",
- "%s using MALLOC_DEBUG = %d (fill)\n",
- __progname, level);
- gMallocDispatch = &gMallocEngineTable[INDEX_MALLOC_FILL];
- break;
- case 10:
- __libc_android_log_print(ANDROID_LOG_INFO, "libc",
- "%s using MALLOC_DEBUG = %d (sentinels, fill)\n",
- __progname, level);
- gMallocDispatch = &gMallocEngineTable[INDEX_MALLOC_CHECK];
- break;
- }
-#endif
+ // We don't really have anything that requires initialization here.
+ return 0;
}
diff --git a/libc/bionic/malloc_debug_qemu.c b/libc/bionic/malloc_debug_qemu.c
--- /dev/null
@@ -0,0 +1,1014 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * Contains implementation of memory allocation routines instrumented for
+ * usage in the emulator to detect memory allocation violations, such as
+ * memory leaks, buffer overruns, etc.
+ * Code, implemented here is intended to run in the emulated environment only,
+ * and serves simply as hooks into memory allocation routines. Main job of this
+ * code is to notify the emulator about memory being allocated/deallocated,
+ * providing information about each allocation. The idea is that emulator will
+ * keep list of currently allocated blocks, and, knowing boundaries of each
+ * block it will be able to verify that ld/st access to these blocks don't step
+ * over boundaries set for the user. To enforce that, each memory block
+ * allocated by this code is guarded with "prefix" and "suffix" areas, so
+ * every time emulator detects access to any of these guarding areas, it can be
+ * considered as access violation.
+ */
+
+#include <stdlib.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <fcntl.h>
+#include <sys/mman.h>
+#include <pthread.h>
+#include <unistd.h>
+#include <errno.h>
+#include "dlmalloc.h"
+#include "logd.h"
+#include "malloc_debug_common.h"
+
+/* This file should be included into the build only when
+ * MALLOC_QEMU_INSTRUMENT macro is defined. */
+#ifndef MALLOC_QEMU_INSTRUMENT
+#error MALLOC_QEMU_INSTRUMENT is not defined.
+#endif // !MALLOC_QEMU_INSTRUMENT
+
+/* Controls access violation test performed to make sure that we catch AVs
+ * all the time they occur. See test_access_violation for more info. This macro
+ * is used for internal testing purposes and should always be set to zero for
+ * the production builds. */
+#define TEST_ACCESS_VIOLATIONS 0
+
+// =============================================================================
+// Communication structures
+// =============================================================================
+
+/* Describes memory block allocated from the heap. This structure is passed
+ * along with TRACE_DEV_REG_MALLOC event. This descriptor is used to inform
+ * the emulator about new memory block being allocated from the heap. The entire
+ * structure is initialized by the guest system before event is fired up. It is
+ * important to remember that same structure (an exact copy, except for
+ * replacing pointers with target_ulong) is also declared in the emulator's
+ * sources (file memcheck/memcheck_common.h). So, every time a change is made to
+ * any of these two declaration, another one must be also updated accordingly.
+ */
+typedef struct MallocDesc {
+ /* Pointer to the memory block actually allocated from the heap. Note that
+ * this is not the pointer that is returned to the malloc's caller. Pointer
+ * returned to the caller is calculated by adding value stored in this field
+ * to the value stored in prefix_size field of this structure.
+ */
+ void* ptr;
+
+ /* Number of bytes requested by the malloc's caller. */
+ uint32_t requested_bytes;
+
+ /* Byte size of the prefix data. Actual pointer returned to the malloc's
+ * caller is calculated by adding value stored in this field to the value
+ * stored in in the ptr field of this structure.
+ */
+ uint32_t prefix_size;
+
+ /* Byte size of the suffix data. */
+ uint32_t suffix_size;
+
+ /* Id of the process that initialized libc instance, in which allocation
+ * has occurred. This field is used by the emulator to report errors in
+ * the course of TRACE_DEV_REG_MALLOC event handling. In case of an error,
+ * emulator sets this field to zero (invalid value for a process ID).
+ */
+ uint32_t libc_pid;
+
+ /* Id of the process in context of which allocation has occurred.
+ * Value in this field may differ from libc_pid value, if process that
+ * is doing allocation has been forked from the process that initialized
+ * libc instance.
+ */
+ uint32_t allocator_pid;
+
+ /* Number of access violations detected on this allocation. */
+ uint32_t av_count;
+} MallocDesc;
+
+/* Describes memory block info queried from emulator. This structure is passed
+ * along with TRACE_DEV_REG_QUERY_MALLOC event. When handling free and realloc
+ * calls, it is required that we have information about memory blocks that were
+ * actually allocated in previous calls to malloc, calloc, memalign, or realloc.
+ * Since we don't keep this information directly in the allocated block, but
+ * rather we keep it in the emulator, we need to query emulator for that
+ * information with TRACE_DEV_REG_QUERY_MALLOC query. The entire structure is
+ * initialized by the guest system before event is fired up. It is important to
+ * remember that same structure (an exact copy, except for replacing pointers
+ * with target_ulong) is also declared in the emulator's sources (file
+ * memcheck/memecheck_common.h). So, every time a change is made to any of these
+ * two declaration, another one must be also updated accordingly.
+ */
+typedef struct MallocDescQuery {
+ /* Pointer, for which information is queried. Note that this pointer doesn't
+ * have to be exact pointer returned to malloc's caller, but can point
+ * anywhere inside an allocated block, including guarding areas. Emulator
+ * will respond with information about allocated block that contains this
+ * pointer.
+ */
+ void* ptr;
+
+ /* Id of the process that initialized libc instance, in which this query
+ * is called. This field is used by the emulator to report errors in
+ * the course of TRACE_DEV_REG_QUERY_MALLOC event handling. In case of an
+ * error, emulator sets this field to zero (invalid value for a process ID).
+ */
+ uint32_t libc_pid;
+
+ /* Process ID in context of which query is made. */
+ uint32_t query_pid;
+
+ /* Code of the allocation routine, in context of which query has been made:
+ * 1 - free
+ * 2 - realloc
+ */
+ uint32_t routine;
+
+ /* Address of memory allocation descriptor for the queried pointer.
+ * Descriptor, addressed by this field is initialized by the emulator in
+ * response to the query.
+ */
+ MallocDesc* desc;
+} MallocDescQuery;
+
+/* Describes memory block that is being freed back to the heap. This structure
+ * is passed along with TRACE_DEV_REG_FREE_PTR event. The entire structure is
+ * initialized by the guest system before event is fired up. It is important to
+ * remember that same structure (an exact copy, except for replacing pointers
+ * with target_ulong) is also declared in the emulator's sources (file
+ * memcheck/memecheck_common.h). So, every time a change is made to any of these
+ * two declaration, another one must be also updated accordingly.
+ */
+typedef struct MallocFree {
+ /* Pointer to be freed. */
+ void* ptr;
+
+ /* Id of the process that initialized libc instance, in which this free
+ * is called. This field is used by the emulator to report errors in
+ * the course of TRACE_DEV_REG_FREE_PTR event handling. In case of an
+ * error, emulator sets this field to zero (invalid value for a process ID).
+ */
+ uint32_t libc_pid;
+
+ /* Process ID in context of which memory is being freed. */
+ uint32_t free_pid;
+} MallocFree;
+
+// =============================================================================
+// Communication events
+// =============================================================================
+
+/* Notifies the emulator that libc has been initialized for a process.
+ * Event's value parameter is PID for the process in context of which libc has
+ * been initialized.
+ */
+#define TRACE_DEV_REG_LIBC_INIT 1536
+
+/* Notifies the emulator about new memory block been allocated.
+ * Event's value parameter points to MallocDesc instance that contains
+ * allocated block information. Note that 'libc_pid' field of the descriptor
+ * is used by emulator to report failure in handling this event. In case
+ * of a failure emulator will zero that field before completing this event.
+ */
+#define TRACE_DEV_REG_MALLOC 1537
+
+/* Notifies the emulator about memory block being freed.
+ * Event's value parameter points to MallocFree descriptor that contains
+ * information about block that's being freed. Note that 'libc_pid' field
+ * of the descriptor is used by emulator to report failure in handling this
+ * event. In case of a failure emulator will zero that field before completing
+ * this event.
+ */
+#define TRACE_DEV_REG_FREE_PTR 1538
+
+/* Queries the emulator about allocated memory block information.
+ * Event's value parameter points to MallocDescQuery descriptor that contains
+ * query parameters. Note that 'libc_pid' field of the descriptor is used by
+ * emulator to report failure in handling this event. In case of a failure
+ * emulator will zero that field before completing this event.
+ */
+#define TRACE_DEV_REG_QUERY_MALLOC 1539
+
+/* Queries the emulator to print a string to its stdout.
+ * Event's value parameter points to a zero-terminated string to be printed.
+ */
+#define TRACE_DEV_REG_PRINT_USER_STR 1540
+
+static void notify_qemu_string(const char* str);
+static void qemu_log(int prio, const char* fmt, ...);
+static void dump_malloc_descriptor(char* str,
+ size_t str_buf_size,
+ const MallocDesc* desc);
+
+// =============================================================================
+// Macros
+// =============================================================================
+
+/* Defines default size of allocation prefix.
+ * Note that we make prefix area quite large in order to increase chances of
+ * catching buffer overflow. */
+#define DEFAULT_PREFIX_SIZE (malloc_alignment * 4)
+
+/* Defines default size of allocation suffix.
+ * Note that we make suffix area quite large in order to increase chances of
+ * catching buffer overflow. */
+#define DEFAULT_SUFFIX_SIZE (malloc_alignment * 4)
+
+/* Debug tracing has been enabled by the emulator. */
+#define DEBUG_TRACING_ENABLED 0x00000001
+/* Error tracing has been enabled by the emulator. */
+#define ERROR_TRACING_ENABLED 0x00000002
+/* Info tracing has been enabled by the emulator. */
+#define INFO_TRACING_ENABLED 0x00000004
+/* All tracing flags combined. */
+#define ALL_TRACING_ENABLED (DEBUG_TRACING_ENABLED | \
+ ERROR_TRACING_ENABLED | \
+ INFO_TRACING_ENABLED)
+
+/* Prints a string to the emulator's stdout.
+ * In early stages of system loading, logging mesages via
+ * __libc_android_log_print API is not available, because ADB API has not been
+ * hooked up yet. So, in order to see such messages we need to print them to
+ * the emulator's stdout.
+ * Parameters passed to this macro are the same as parameters for printf
+ * routine.
+ */
+#define TR(...) \
+ do { \
+ char tr_str[4096]; \
+ snprintf(tr_str, sizeof(tr_str), __VA_ARGS__ ); \
+ tr_str[sizeof(tr_str) - 1] = '\0'; \
+ notify_qemu_string(&tr_str[0]); \
+ } while (0)
+
+// =============================================================================
+// Logging macros. Note that we simultaneously log messages to ADB and emulator.
+// =============================================================================
+
+/*
+ * Helper macros for checking if particular trace level is enabled.
+ */
+#define debug_LOG_ENABLED ((tracing_flags & DEBUG_TRACING_ENABLED) != 0)
+#define error_LOG_ENABLED ((tracing_flags & ERROR_TRACING_ENABLED) != 0)
+#define info_LOG_ENABLED ((tracing_flags & INFO_TRACING_ENABLED) != 0)
+#define tracing_enabled(type) (type##_LOG_ENABLED)
+
+/*
+ * Logging helper macros.
+ */
+#define debug_log(format, ...) \
+ do { \
+ __libc_android_log_print(ANDROID_LOG_DEBUG, "memcheck", \
+ (format), ##__VA_ARGS__ ); \
+ if (tracing_flags & DEBUG_TRACING_ENABLED) { \
+ qemu_log(ANDROID_LOG_DEBUG, (format), ##__VA_ARGS__ ); \
+ } \
+ } while (0)
+
+#define error_log(format, ...) \
+ do { \
+ __libc_android_log_print(ANDROID_LOG_ERROR, "memcheck", \
+ (format), ##__VA_ARGS__ ); \
+ if (tracing_flags & ERROR_TRACING_ENABLED) { \
+ qemu_log(ANDROID_LOG_ERROR, (format), ##__VA_ARGS__ ); \
+ } \
+ } while (0)
+
+#define info_log(format, ...) \
+ do { \
+ __libc_android_log_print(ANDROID_LOG_INFO, "memcheck", \
+ (format), ##__VA_ARGS__ ); \
+ if (tracing_flags & INFO_TRACING_ENABLED) { \
+ qemu_log(ANDROID_LOG_INFO, (format), ##__VA_ARGS__ ); \
+ } \
+ } while (0)
+
+/* Logs message dumping MallocDesc instance at the end of the message.
+ * Param:
+ * type - Message type: debug, error, or info
+ * desc - MallocDesc instance to dump.
+ * frmt + rest - Formats message preceding dumped descriptor.
+*/
+#define log_mdesc(type, desc, frmt, ...) \
+ do { \
+ if (tracing_enabled(type)) { \
+ char log_str[4096]; \
+ size_t str_len; \
+ snprintf(log_str, sizeof(log_str), frmt, ##__VA_ARGS__); \
+ log_str[sizeof(log_str) - 1] = '\0'; \
+ str_len = strlen(log_str); \
+ dump_malloc_descriptor(log_str + str_len, \
+ sizeof(log_str) - str_len, \
+ (desc)); \
+ type##_log(log_str); \
+ } \
+ } while (0)
+
+// =============================================================================
+// Static data
+// =============================================================================
+
+/* Emulator's magic page address.
+ * This page (mapped on /dev/qemu_trace device) is used to fire up events
+ * in the emulator. */
+static volatile void* qtrace = NULL;
+
+/* Cached PID of the process in context of which this libc instance
+ * has been initialized. */
+static uint32_t malloc_pid = 0;
+
+/* Memory allocation alignment that is used in dlmalloc.
+ * This variable is updated by memcheck_initialize routine. */
+static uint32_t malloc_alignment = 8;
+
+/* Tracing flags. These flags control which types of logging messages are
+ * enabled by the emulator. See XXX_TRACING_ENABLED for the values of flags
+ * stored in this variable. This variable is updated by memcheck_initialize
+ * routine. */
+static uint32_t tracing_flags = 0;
+
+// =============================================================================
+// Static routines
+// =============================================================================
+
+/* Gets pointer, returned to malloc caller for the given allocation decriptor.
+ * Param:
+ * desc - Allocation descriptor.
+ * Return:
+ * Pointer to the allocated memory returned to the malloc caller.
+ */
+static inline void*
+mallocdesc_user_ptr(const MallocDesc* desc)
+{
+ return (char*)desc->ptr + desc->prefix_size;
+}
+
+/* Gets size of memory block actually allocated from the heap for the given
+ * allocation decriptor.
+ * Param:
+ * desc - Allocation descriptor.
+ * Return:
+ * Size of memory block actually allocated from the heap.
+ */
+static inline uint32_t
+mallocdesc_alloc_size(const MallocDesc* desc)
+{
+ return desc->prefix_size + desc->requested_bytes + desc->suffix_size;
+}
+
+/* Gets pointer to the end of the allocated block for the given descriptor.
+ * Param:
+ * desc - Descriptor for the memory block, allocated in malloc handler.
+ * Return:
+ * Pointer to the end of (one byte past) the allocated block.
+ */
+static inline void*
+mallocdesc_alloc_end(const MallocDesc* desc)
+{
+ return (char*)desc->ptr + mallocdesc_alloc_size(desc);
+}
+
+/* Fires up an event in the emulator.
+ * Param:
+ * code - Event code (one of the TRACE_DEV_XXX).
+ * val - Event's value parameter.
+ */
+static inline void
+notify_qemu(uint32_t code, uint32_t val)
+{
+ if (NULL != qtrace) {
+ *(volatile uint32_t*)((uint32_t)qtrace + ((code - 1024) << 2)) = val;
+ }
+}
+
+/* Prints a zero-terminated string to the emulator's stdout (fires up
+ * TRACE_DEV_REG_PRINT_USER_STR event in the emulator).
+ * Param:
+ * str - Zero-terminated string to print.
+ */
+static void
+notify_qemu_string(const char* str)
+{
+ if (str != NULL) {
+ notify_qemu(TRACE_DEV_REG_PRINT_USER_STR, (uint32_t)str);
+ }
+}
+
+/* Fires up TRACE_DEV_REG_LIBC_INIT event in the emulator.
+ * Param:
+ * pid - ID of the process that initialized libc.
+ */
+static void
+notify_qemu_libc_initialized(uint32_t pid)
+{
+ notify_qemu(TRACE_DEV_REG_LIBC_INIT, pid);
+}
+
+/* Fires up TRACE_DEV_REG_MALLOC event in the emulator.
+ * Param:
+ * desc - Pointer to MallocDesc instance containing allocated block
+ * information.
+ * Return:
+ * Zero on success, or -1 on failure. Note that on failure libc_pid field of
+ * the desc parameter passed to this routine has been zeroed out by the
+ * emulator.
+ */
+static inline int
+notify_qemu_malloc(volatile MallocDesc* desc)
+{
+ desc->libc_pid = malloc_pid;
+ desc->allocator_pid = getpid();
+ desc->av_count = 0;
+ notify_qemu(TRACE_DEV_REG_MALLOC, (uint32_t)desc);
+
+ /* Emulator reports failure by zeroing libc_pid field of the
+ * descriptor. */
+ return desc->libc_pid != 0 ? 0 : -1;
+}
+
+/* Fires up TRACE_DEV_REG_FREE_PTR event in the emulator.
+ * Param:
+ * ptr - Pointer to the memory block that's being freed.
+ * Return:
+ * Zero on success, or -1 on failure.
+ */
+static inline int
+notify_qemu_free(void* ptr_to_free)
+{
+ volatile MallocFree free_desc;
+
+ free_desc.ptr = ptr_to_free;
+ free_desc.libc_pid = malloc_pid;
+ free_desc.free_pid = getpid();
+ notify_qemu(TRACE_DEV_REG_FREE_PTR, (uint32_t)&free_desc);
+
+ /* Emulator reports failure by zeroing libc_pid field of the
+ * descriptor. */
+ return free_desc.libc_pid != 0 ? 0 : -1;
+}
+
+/* Fires up TRACE_DEV_REG_QUERY_MALLOC event in the emulator.
+ * Param:
+ * ptr - Pointer to request allocation information for.
+ * desc - Pointer to MallocDesc instance that will receive allocation
+ * information.
+ * routine - Code of the allocation routine, in context of which query is made:
+ * 1 - free
+ * 2 - realloc
+ * Return:
+ * Zero on success, or -1 on failure.
+ */
+static inline int
+query_qemu_malloc_info(void* ptr, MallocDesc* desc, uint32_t routine)
+{
+ volatile MallocDescQuery query;
+
+ query.ptr = ptr;
+ query.libc_pid = malloc_pid;
+ query.query_pid = getpid();
+ query.routine = routine;
+ query.desc = desc;
+ notify_qemu(TRACE_DEV_REG_QUERY_MALLOC, (uint32_t)&query);
+
+ /* Emulator reports failure by zeroing libc_pid field of the
+ * descriptor. */
+ return query.libc_pid != 0 ? 0 : -1;
+}
+
+/* Logs a message to emulator's stdout.
+ * Param:
+ * prio - Message priority (debug, info, or error)
+ * fmt + rest - Message format and parameters.
+ */
+static void
+qemu_log(int prio, const char* fmt, ...)
+{
+ va_list ap;
+ char buf[4096];
+ const char* prefix;
+
+ /* Choose message prefix depending on the priority value. */
+ switch (prio) {
+ case ANDROID_LOG_ERROR:
+ if (!tracing_enabled(error)) {
+ return;
+ }
+ prefix = "E";
+ break;
+ case ANDROID_LOG_INFO:
+ if (!tracing_enabled(info)) {
+ return;
+ }
+ prefix = "I";
+ break;
+ case ANDROID_LOG_DEBUG:
+ default:
+ if (!tracing_enabled(debug)) {
+ return;
+ }
+ prefix = "D";
+ break;
+ }
+
+ va_start(ap, fmt);
+ vsnprintf(buf, sizeof(buf), fmt, ap);
+ va_end(ap);
+ buf[sizeof(buf) - 1] = '\0';
+
+ TR("%s/memcheck: %s\n", prefix, buf);
+}
+
+/* Dumps content of memory allocation descriptor to a string.
+ * Param:
+ * str - String to dump descriptor to.
+ * str_buf_size - Size of string's buffer.
+ * desc - Descriptor to dump.
+ */
+static void
+dump_malloc_descriptor(char* str, size_t str_buf_size, const MallocDesc* desc)
+{
+ if (str_buf_size) {
+ snprintf(str, str_buf_size,
+ "MDesc: %p: %X <-> %X [%u + %u + %u] by pid=%03u in libc_pid=%03u",
+ mallocdesc_user_ptr(desc), (uint32_t)desc->ptr,
+ (uint32_t)mallocdesc_alloc_end(desc), desc->prefix_size,
+ desc->requested_bytes, desc->suffix_size, desc->allocator_pid,
+ desc->libc_pid);
+ str[str_buf_size - 1] = '\0';
+ }
+}
+
+#if TEST_ACCESS_VIOLATIONS
+/* Causes an access violation on allocation descriptor, and verifies that
+ * violation has been detected by memory checker in the emulator.
+ */
+static void
+test_access_violation(const MallocDesc* desc)
+{
+ MallocDesc desc_chk;
+ char ch;
+ volatile char* prefix = (volatile char*)desc->ptr;
+ volatile char* suffix = (volatile char*)mallocdesc_user_ptr(desc) +
+ desc->requested_bytes;
+ /* We're causing AV by reading from the prefix and suffix areas of the
+ * allocated block. This should produce two access violations, so when we
+ * get allocation descriptor from QEMU, av_counter should be bigger than
+ * av_counter of the original descriptor by 2. */
+ ch = *prefix;
+ ch = *suffix;
+ if (!query_qemu_malloc_info(mallocdesc_user_ptr(desc), &desc_chk, 2) &&
+ desc_chk.av_count != (desc->av_count + 2)) {
+ log_mdesc(error, &desc_chk,
+ "<libc_pid=%03u, pid=%03u>: malloc: Access violation test failed:\n"
+ "Expected violations count %u is not equal to the actually reported %u",
+ malloc_pid, getpid(), desc->av_count + 2,
+ desc_chk.av_count);
+ }
+}
+#endif // TEST_ACCESS_VIOLATIONS
+
+// =============================================================================
+// API routines
+// =============================================================================
+
+void* qemu_instrumented_malloc(size_t bytes);
+void qemu_instrumented_free(void* mem);
+void* qemu_instrumented_calloc(size_t n_elements, size_t elem_size);
+void* qemu_instrumented_realloc(void* mem, size_t bytes);
+void* qemu_instrumented_memalign(size_t alignment, size_t bytes);
+
+/* Initializes malloc debugging instrumentation for the emulator.
+ * This routine is called from malloc_init_impl routine implemented in
+ * bionic/libc/bionic/malloc_debug_common.c when malloc debugging gets
+ * initialized for a process. The way malloc debugging implementation is
+ * done, it is guaranteed that this routine will be called just once per
+ * process.
+ * Return:
+ * 0 on success, or -1 on failure.
+*/
+int
+malloc_debug_initialize(void)
+{
+ /* We will be using emulator's magic page to report memory allocation
+ * activities. In essence, what magic page does, it translates writes to
+ * the memory mapped spaces into writes to an I/O port that emulator
+ * "listens to" on the other end. Note that until we open and map that
+ * device, logging to emulator's stdout will not be available. */
+ int fd = open("/dev/qemu_trace", O_RDWR);
+ if (fd < 0) {
+ error_log("Unable to open /dev/qemu_trace");
+ return -1;
+ } else {
+ qtrace = mmap(0, PAGESIZE, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
+ close(fd);
+
+ if (qtrace == MAP_FAILED) {
+ qtrace = NULL;
+ error_log("Unable to mmap /dev/qemu_trace");
+ return -1;
+ }
+ }
+
+ /* Cache pid of the process this library has been initialized for. */
+ malloc_pid = getpid();
+
+ return 0;
+}
+
+/* Completes malloc debugging instrumentation for the emulator.
+ * Note that this routine is called after successful return from
+ * malloc_debug_initialize, which means that connection to the emulator via
+ * "magic page" has been established.
+ * Param:
+ * alignment - Alignment requirement set for memiry allocations.
+ * memcheck_param - Emulator's -memcheck option parameters. This string
+ * contains abbreviation for guest events that are enabled for tracing.
+ * Return:
+ * 0 on success, or -1 on failure.
+*/
+int
+memcheck_initialize(int alignment, const char* memcheck_param)
+{
+ malloc_alignment = alignment;
+
+ /* Parse -memcheck parameter for the guest tracing flags. */
+ while (*memcheck_param != '\0') {
+ switch (*memcheck_param) {
+ case 'a':
+ // Enable all messages from the guest.
+ tracing_flags |= ALL_TRACING_ENABLED;
+ break;
+ case 'd':
+ // Enable debug messages from the guest.
+ tracing_flags |= DEBUG_TRACING_ENABLED;
+ break;
+ case 'e':
+ // Enable error messages from the guest.
+ tracing_flags |= ERROR_TRACING_ENABLED;
+ break;
+ case 'i':
+ // Enable info messages from the guest.
+ tracing_flags |= INFO_TRACING_ENABLED;
+ break;
+ default:
+ break;
+ }
+ if (tracing_flags == ALL_TRACING_ENABLED) {
+ break;
+ }
+ memcheck_param++;
+ }
+
+ notify_qemu_libc_initialized(malloc_pid);
+
+ debug_log("Instrumented for pid=%03u: malloc=%p, free=%p, calloc=%p, realloc=%p, memalign=%p",
+ malloc_pid, qemu_instrumented_malloc, qemu_instrumented_free,
+ qemu_instrumented_calloc, qemu_instrumented_realloc,
+ qemu_instrumented_memalign);
+
+ return 0;
+}
+
+/* This routine serves as entry point for 'malloc'.
+ * Primary responsibility of this routine is to allocate requested number of
+ * bytes (plus prefix, and suffix guards), and report allocation to the
+ * emulator.
+ */
+void*
+qemu_instrumented_malloc(size_t bytes)
+{
+ MallocDesc desc;
+
+ /* Initialize block descriptor and allocate memory. Note that dlmalloc
+ * returns a valid pointer on zero allocation. Lets mimic this behavior. */
+ desc.prefix_size = DEFAULT_PREFIX_SIZE;
+ desc.requested_bytes = bytes;
+ desc.suffix_size = DEFAULT_SUFFIX_SIZE;
+ desc.ptr = dlmalloc(mallocdesc_alloc_size(&desc));
+ if (desc.ptr == NULL) {
+ error_log("<libc_pid=%03u, pid=%03u> malloc(%u): dlmalloc(%u) failed.",
+ malloc_pid, getpid(), bytes, mallocdesc_alloc_size(&desc));
+ return NULL;
+ }
+
+ // Fire up event in the emulator.
+ if (notify_qemu_malloc(&desc)) {
+ log_mdesc(error, &desc, "<libc_pid=%03u, pid=%03u>: malloc: notify_malloc failed for ",
+ malloc_pid, getpid());
+ dlfree(desc.ptr);
+ return NULL;
+ } else {
+#if TEST_ACCESS_VIOLATIONS
+ test_access_violation(&desc);
+#endif // TEST_ACCESS_VIOLATIONS
+ log_mdesc(info, &desc, "+++ <libc_pid=%03u, pid=%03u> malloc(%u) -> ",
+ malloc_pid, getpid(), bytes);
+ return mallocdesc_user_ptr(&desc);
+ }
+}
+
+/* This routine serves as entry point for 'malloc'.
+ * Primary responsibility of this routine is to free requested memory, and
+ * report free block to the emulator.
+ */
+void
+qemu_instrumented_free(void* mem)
+{
+ MallocDesc desc;
+
+ if (mem == NULL) {
+ // Just let go NULL free
+ dlfree(mem);
+ return;
+ }
+
+ // Query emulator for the freeing block information.
+ if (query_qemu_malloc_info(mem, &desc, 1)) {
+ error_log("<libc_pid=%03u, pid=%03u>: free(%p) query_info failed.",
+ malloc_pid, getpid(), mem);
+ return;
+ }
+
+#if TEST_ACCESS_VIOLATIONS
+ test_access_violation(&desc);
+#endif // TEST_ACCESS_VIOLATIONS
+
+ /* Make sure that pointer that's being freed matches what we expect
+ * for this memory block. Note that this violation should be already
+ * caught in the emulator. */
+ if (mem != mallocdesc_user_ptr(&desc)) {
+ log_mdesc(error, &desc, "<libc_pid=%03u, pid=%03u>: free(%p) is invalid for ",
+ malloc_pid, getpid(), mem);
+ return;
+ }
+
+ // Fire up event in the emulator and free block that was actually allocated.
+ if (notify_qemu_free(mem)) {
+ log_mdesc(error, &desc, "<libc_pid=%03u, pid=%03u>: free(%p) notify_free failed for ",
+ malloc_pid, getpid(), mem);
+ } else {
+ log_mdesc(info, &desc, "--- <libc_pid=%03u, pid=%03u> free(%p) -> ",
+ malloc_pid, getpid(), mem);
+ dlfree(desc.ptr);
+ }
+}
+
+/* This routine serves as entry point for 'calloc'.
+ * This routine behaves similarly to qemu_instrumented_malloc.
+ */
+void*
+qemu_instrumented_calloc(size_t n_elements, size_t elem_size)
+{
+ MallocDesc desc;
+ void* ret;
+ size_t total_size;
+ size_t total_elements;
+
+ if (n_elements == 0 || elem_size == 0) {
+ // Just let go zero bytes allocation.
+ info_log("::: <libc_pid=%03u, pid=%03u>: Zero calloc redir to malloc",
+ malloc_pid, getpid());
+ return qemu_instrumented_malloc(0);
+ }
+
+ /* Fail on overflow - just to be safe even though this code runs only
+ * within the debugging C library, not the production one */
+ if (n_elements && MAX_SIZE_T / n_elements < elem_size) {
+ return NULL;
+ }
+
+ /* Calculating prefix size. The trick here is to make sure that
+ * first element (returned to the caller) is properly aligned. */
+ if (DEFAULT_PREFIX_SIZE >= elem_size) {
+ /* If default alignment is bigger than element size, we will
+ * set our prefix size to the default alignment size. */
+ desc.prefix_size = DEFAULT_PREFIX_SIZE;
+ /* For the suffix we will use whatever bytes remain from the prefix
+ * allocation size, aligned to the size of an element, plus the usual
+ * default suffix size. */
+ desc.suffix_size = (DEFAULT_PREFIX_SIZE % elem_size) +
+ DEFAULT_SUFFIX_SIZE;
+ } else {
+ /* Make sure that prefix, and suffix sizes is at least elem_size,
+ * and first element returned to the caller is properly aligned. */
+ desc.prefix_size = elem_size + DEFAULT_PREFIX_SIZE - 1;
+ desc.prefix_size &= ~(malloc_alignment - 1);
+ desc.suffix_size = DEFAULT_SUFFIX_SIZE;
+ }
+ desc.requested_bytes = n_elements * elem_size;
+ total_size = desc.requested_bytes + desc.prefix_size + desc.suffix_size;
+ total_elements = total_size / elem_size;
+ total_size %= elem_size;
+ if (total_size != 0) {
+ // Add extra to the suffix area.
+ total_elements++;
+ desc.suffix_size += (elem_size - total_size);
+ }
+ desc.ptr = dlcalloc(total_elements, elem_size);
+ if (desc.ptr == NULL) {
+ error_log("<libc_pid=%03u, pid=%03u> calloc: dlcalloc(%u(%u), %u) (prx=%u, sfx=%u) failed.",
+ malloc_pid, getpid(), n_elements, total_elements, elem_size,
+ desc.prefix_size, desc.suffix_size);
+ return NULL;
+ }
+
+ if (notify_qemu_malloc(&desc)) {
+ log_mdesc(error, &desc, "<libc_pid=%03u, pid=%03u>: calloc(%u(%u), %u): notify_malloc failed for ",
+ malloc_pid, getpid(), n_elements, total_elements, elem_size);
+ dlfree(desc.ptr);
+ return NULL;
+ } else {
+#if TEST_ACCESS_VIOLATIONS
+ test_access_violation(&desc);
+#endif // TEST_ACCESS_VIOLATIONS
+ log_mdesc(info, &desc, "### <libc_pid=%03u, pid=%03u> calloc(%u(%u), %u) -> ",
+ malloc_pid, getpid(), n_elements, total_elements, elem_size);
+ return mallocdesc_user_ptr(&desc);
+ }
+}
+
+/* This routine serves as entry point for 'realloc'.
+ * This routine behaves similarly to qemu_instrumented_free +
+ * qemu_instrumented_malloc. Note that this modifies behavior of "shrinking" an
+ * allocation, but overall it doesn't seem to matter, as caller of realloc
+ * should not expect that pointer returned after shrinking will remain the same.
+ */
+void*
+qemu_instrumented_realloc(void* mem, size_t bytes)
+{
+ MallocDesc new_desc;
+ MallocDesc cur_desc;
+ size_t to_copy;
+ void* ret;
+
+ if (mem == NULL) {
+ // Nothing to realloc. just do regular malloc.
+ info_log("::: <libc_pid=%03u, pid=%03u>: realloc(%p, %u) redir to malloc",
+ malloc_pid, getpid(), mem, bytes);
+ return qemu_instrumented_malloc(bytes);
+ }
+
+ if (bytes == 0) {
+ // This is a "free" condition.
+ info_log("::: <libc_pid=%03u, pid=%03u>: realloc(%p, %u) redir to free and malloc",
+ malloc_pid, getpid(), mem, bytes);
+ qemu_instrumented_free(mem);
+
+ // This is what dlrealloc does for a "free" realloc.
+ return NULL;
+ }
+
+ // Query emulator for the reallocating block information.
+ if (query_qemu_malloc_info(mem, &cur_desc, 2)) {
+ // Note that this violation should be already caught in the emulator.
+ error_log("<libc_pid=%03u, pid=%03u>: realloc(%p, %u) query_info failed.",
+ malloc_pid, getpid(), mem, bytes);
+ return NULL;
+ }
+
+#if TEST_ACCESS_VIOLATIONS
+ test_access_violation(&cur_desc);
+#endif // TEST_ACCESS_VIOLATIONS
+
+ /* Make sure that reallocating pointer value is what we would expect
+ * for this memory block. Note that this violation should be already caught
+ * in the emulator.*/
+ if (mem != mallocdesc_user_ptr(&cur_desc)) {
+ log_mdesc(error, &cur_desc, "<libc_pid=%03u, pid=%03u>: realloc(%p, %u) is invalid for ",
+ malloc_pid, getpid(), mem, bytes);
+ return NULL;
+ }
+
+ /* TODO: We're a bit inefficient here, always allocating new block from
+ * the heap. If this realloc shrinks current buffer, we can just do the
+ * shrinking "in place", adjusting suffix_size in the allocation descriptor
+ * for this block that is stored in the emulator. */
+
+ // Initialize descriptor for the new block.
+ new_desc.prefix_size = DEFAULT_PREFIX_SIZE;
+ new_desc.requested_bytes = bytes;
+ new_desc.suffix_size = DEFAULT_SUFFIX_SIZE;
+ new_desc.ptr = dlmalloc(mallocdesc_alloc_size(&new_desc));
+ if (new_desc.ptr == NULL) {
+ log_mdesc(error, &cur_desc, "<libc_pid=%03u, pid=%03u>: realloc(%p, %u): dlmalloc(%u) failed on ",
+ malloc_pid, getpid(), mem, bytes,
+ mallocdesc_alloc_size(&new_desc));
+ return NULL;
+ }
+ ret = mallocdesc_user_ptr(&new_desc);
+
+ // Copy user data from old block to the new one.
+ to_copy = bytes < cur_desc.requested_bytes ? bytes :
+ cur_desc.requested_bytes;
+ if (to_copy != 0) {
+ memcpy(ret, mallocdesc_user_ptr(&cur_desc), to_copy);
+ }
+
+ // Register new block with emulator.
+ if(notify_qemu_malloc(&new_desc)) {
+ log_mdesc(error, &new_desc, "<libc_pid=%03u, pid=%03u>: realloc(%p, %u) notify_malloc failed -> ",
+ malloc_pid, getpid(), mem, bytes);
+ log_mdesc(error, &cur_desc, " <- ");
+ dlfree(new_desc.ptr);
+ return NULL;
+ }
+
+#if TEST_ACCESS_VIOLATIONS
+ test_access_violation(&new_desc);
+#endif // TEST_ACCESS_VIOLATIONS
+
+ // Free old block.
+ if (notify_qemu_free(mem)) {
+ log_mdesc(error, &cur_desc, "<libc_pid=%03u, pid=%03u>: realloc(%p, %u): notify_free failed for ",
+ malloc_pid, getpid(), mem, bytes);
+ /* Since we registered new decriptor with the emulator, we need
+ * to unregister it before freeing newly allocated block. */
+ notify_qemu_free(mallocdesc_user_ptr(&new_desc));
+ dlfree(new_desc.ptr);
+ return NULL;
+ }
+ dlfree(cur_desc.ptr);
+
+ log_mdesc(info, &new_desc, "=== <libc_pid=%03u, pid=%03u>: realloc(%p, %u) -> ",
+ malloc_pid, getpid(), mem, bytes);
+ log_mdesc(info, &cur_desc, " <- ");
+
+ return ret;
+}
+
+/* This routine serves as entry point for 'memalign'.
+ * This routine behaves similarly to qemu_instrumented_malloc.
+ */
+void*
+qemu_instrumented_memalign(size_t alignment, size_t bytes)
+{
+ MallocDesc desc;
+
+ if (bytes == 0) {
+ // Just let go zero bytes allocation.
+ info_log("::: <libc_pid=%03u, pid=%03u>: memalign(%X, %u) redir to malloc",
+ malloc_pid, getpid(), alignment, bytes);
+ return qemu_instrumented_malloc(0);
+ }
+
+ /* Prefix size for aligned allocation must be equal to the alignment used
+ * for allocation in order to ensure proper alignment of the returned
+ * pointer, in case that alignment requirement is greater than prefix
+ * size. */
+ desc.prefix_size = alignment > DEFAULT_PREFIX_SIZE ? alignment :
+ DEFAULT_PREFIX_SIZE;
+ desc.requested_bytes = bytes;
+ desc.suffix_size = DEFAULT_SUFFIX_SIZE;
+ desc.ptr = dlmemalign(desc.prefix_size, mallocdesc_alloc_size(&desc));
+ if (desc.ptr == NULL) {
+ error_log("<libc_pid=%03u, pid=%03u> memalign(%X, %u): dlmalloc(%u) failed.",
+ malloc_pid, getpid(), alignment, bytes,
+ mallocdesc_alloc_size(&desc));
+ return NULL;
+ }
+ if (notify_qemu_malloc(&desc)) {
+ log_mdesc(error, &desc, "<libc_pid=%03u, pid=%03u>: memalign(%X, %u): notify_malloc failed for ",
+ malloc_pid, getpid(), alignment, bytes);
+ dlfree(desc.ptr);
+ return NULL;
+ }
+
+#if TEST_ACCESS_VIOLATIONS
+ test_access_violation(&desc);
+#endif // TEST_ACCESS_VIOLATIONS
+
+ log_mdesc(info, &desc, "@@@ <libc_pid=%03u, pid=%03u> memalign(%X, %u) -> ",
+ malloc_pid, getpid(), alignment, bytes);
+ return mallocdesc_user_ptr(&desc);
+}
diff --git a/libc/bionic/pthread.c b/libc/bionic/pthread.c
index 7d4056d2556bcb0cb9591699f288999086b58b50..ae44b067a97245575d004c49d6d72e45311dad9c 100644 (file)
--- a/libc/bionic/pthread.c
+++ b/libc/bionic/pthread.c
#include <memory.h>
#include <assert.h>
#include <malloc.h>
+#include <linux/futex.h>
extern int __pthread_clone(int (*fn)(void*), void *child_stack, int flags, void *arg);
extern void _exit_with_stack_teardown(void * stackBase, int stackSize, int retCode);
extern void _exit_thread(int retCode);
extern int __set_errno(int);
+#define __likely(cond) __builtin_expect(!!(cond), 1)
+#define __unlikely(cond) __builtin_expect(!!(cond), 0)
+
void _thread_created_hook(pid_t thread_id) __attribute__((noinline));
#define PTHREAD_ATTR_FLAG_DETACHED 0x00000001
int __futex_wait(volatile void *ftx, int val, const struct timespec *timeout);
int __futex_wake(volatile void *ftx, int count);
+int __futex_syscall3(volatile void *ftx, int op, int val);
+int __futex_syscall4(volatile void *ftx, int op, int val, const struct timespec *timeout);
+
+#ifndef FUTEX_PRIVATE_FLAG
+#define FUTEX_PRIVATE_FLAG 128
+#endif
+
+#ifndef FUTEX_WAIT_PRIVATE
+#define FUTEX_WAIT_PRIVATE (FUTEX_WAIT|FUTEX_PRIVATE_FLAG)
+#endif
+
+#ifndef FUTEX_WAKE_PRIVATE
+#define FUTEX_WAKE_PRIVATE (FUTEX_WAKE|FUTEX_PRIVATE_FLAG)
+#endif
+
// mutex lock states
//
// 0: unlocked
* bits: name description
* 31-16 tid owner thread's kernel id (recursive and errorcheck only)
* 15-14 type mutex type
- * 13-2 counter counter of recursive mutexes
+ * 13 shared process-shared flag
+ * 12-2 counter counter of recursive mutexes
* 1-0 state lock state (0, 1 or 2)
*/
#define MUTEX_TYPE_ERRORCHECK 0x8000
#define MUTEX_COUNTER_SHIFT 2
-#define MUTEX_COUNTER_MASK 0x3ffc
-
+#define MUTEX_COUNTER_MASK 0x1ffc
+#define MUTEX_SHARED_MASK 0x2000
+/* a mutex attribute holds the following fields
+ *
+ * bits: name description
+ * 0-3 type type of mutex
+ * 4 shared process-shared flag
+ */
+#define MUTEXATTR_TYPE_MASK 0x000f
+#define MUTEXATTR_SHARED_MASK 0x0010
int pthread_mutexattr_init(pthread_mutexattr_t *attr)
int pthread_mutexattr_gettype(const pthread_mutexattr_t *attr, int *type)
{
- if (attr && *attr >= PTHREAD_MUTEX_NORMAL &&
- *attr <= PTHREAD_MUTEX_ERRORCHECK ) {
- *type = *attr;
- return 0;
+ if (attr) {
+ int atype = (*attr & MUTEXATTR_TYPE_MASK);
+
+ if (atype >= PTHREAD_MUTEX_NORMAL &&
+ atype <= PTHREAD_MUTEX_ERRORCHECK) {
+ *type = atype;
+ return 0;
+ }
}
return EINVAL;
}
{
if (attr && type >= PTHREAD_MUTEX_NORMAL &&
type <= PTHREAD_MUTEX_ERRORCHECK ) {
- *attr = type;
+ *attr = (*attr & ~MUTEXATTR_TYPE_MASK) | type;
return 0;
}
return EINVAL;
switch (pshared) {
case PTHREAD_PROCESS_PRIVATE:
+ *attr &= ~MUTEXATTR_SHARED_MASK;
+ return 0;
+
case PTHREAD_PROCESS_SHARED:
/* our current implementation of pthread actually supports shared
* mutexes but won't cleanup if a process dies with the mutex held.
* Nevertheless, it's better than nothing. Shared mutexes are used
* by surfaceflinger and audioflinger.
*/
+ *attr |= MUTEXATTR_SHARED_MASK;
return 0;
}
-
- return ENOTSUP;
+ return EINVAL;
}
int pthread_mutexattr_getpshared(pthread_mutexattr_t *attr, int *pshared)
{
- if (!attr)
+ if (!attr || !pshared)
return EINVAL;
- *pshared = PTHREAD_PROCESS_PRIVATE;
+ *pshared = (*attr & MUTEXATTR_SHARED_MASK) ? PTHREAD_PROCESS_SHARED
+ : PTHREAD_PROCESS_PRIVATE;
return 0;
}
int pthread_mutex_init(pthread_mutex_t *mutex,
const pthread_mutexattr_t *attr)
{
- if ( mutex ) {
- if (attr == NULL) {
- mutex->value = MUTEX_TYPE_NORMAL;
- return 0;
- }
- switch ( *attr ) {
- case PTHREAD_MUTEX_NORMAL:
- mutex->value = MUTEX_TYPE_NORMAL;
- return 0;
+ int value = 0;
- case PTHREAD_MUTEX_RECURSIVE:
- mutex->value = MUTEX_TYPE_RECURSIVE;
- return 0;
+ if (mutex == NULL)
+ return EINVAL;
- case PTHREAD_MUTEX_ERRORCHECK:
- mutex->value = MUTEX_TYPE_ERRORCHECK;
- return 0;
- }
+ if (__likely(attr == NULL)) {
+ mutex->value = MUTEX_TYPE_NORMAL;
+ return 0;
}
- return EINVAL;
+
+ if ((*attr & MUTEXATTR_SHARED_MASK) != 0)
+ value |= MUTEX_SHARED_MASK;
+
+ switch (*attr & MUTEXATTR_TYPE_MASK) {
+ case PTHREAD_MUTEX_NORMAL:
+ value |= MUTEX_TYPE_NORMAL;
+ break;
+ case PTHREAD_MUTEX_RECURSIVE:
+ value |= MUTEX_TYPE_RECURSIVE;
+ break;
+ case PTHREAD_MUTEX_ERRORCHECK:
+ value |= MUTEX_TYPE_ERRORCHECK;
+ break;
+ default:
+ return EINVAL;
+ }
+
+ mutex->value = value;
+ return 0;
}
int pthread_mutex_destroy(pthread_mutex_t *mutex)
{
+ if (__unlikely(mutex == NULL))
+ return EINVAL;
+
mutex->value = 0xdead10cc;
return 0;
}
static __inline__ void
_normal_lock(pthread_mutex_t* mutex)
{
+ /* We need to preserve the shared flag during operations */
+ int shared = mutex->value & MUTEX_SHARED_MASK;
/*
* The common case is an unlocked mutex, so we begin by trying to
* change the lock's state from 0 to 1. __atomic_cmpxchg() returns 0
* if it made the swap successfully. If the result is nonzero, this
* lock is already held by another thread.
*/
- if (__atomic_cmpxchg(0, 1, &mutex->value ) != 0) {
+ if (__atomic_cmpxchg(shared|0, shared|1, &mutex->value ) != 0) {
/*
* We want to go to sleep until the mutex is available, which
* requires promoting it to state 2. We need to swap in the new
* that the mutex is in state 2 when we go to sleep on it, which
* guarantees a wake-up call.
*/
- while (__atomic_swap(2, &mutex->value ) != 0)
- __futex_wait(&mutex->value, 2, 0);
+ int wait_op = shared ? FUTEX_WAIT : FUTEX_WAIT_PRIVATE;
+
+ while (__atomic_swap(shared|2, &mutex->value ) != (shared|0))
+ __futex_syscall4(&mutex->value, wait_op, shared|2, 0);
}
}
static __inline__ void
_normal_unlock(pthread_mutex_t* mutex)
{
+ /* We need to preserve the shared flag during operations */
+ int shared = mutex->value & MUTEX_SHARED_MASK;
+
/*
- * The mutex value will be 1 or (rarely) 2. We use an atomic decrement
+ * The mutex state will be 1 or (rarely) 2. We use an atomic decrement
* to release the lock. __atomic_dec() returns the previous value;
* if it wasn't 1 we have to do some additional work.
*/
- if (__atomic_dec(&mutex->value) != 1) {
+ if (__atomic_dec(&mutex->value) != (shared|1)) {
+ int wake_op = shared ? FUTEX_WAKE : FUTEX_WAKE_PRIVATE;
/*
* Start by releasing the lock. The decrement changed it from
* "contended lock" to "uncontended lock", which means we still
* _normal_lock(), because the __futex_wait() call there will
* return immediately if the mutex value isn't 2.
*/
- mutex->value = 0;
+ mutex->value = shared;
/*
* Wake up one waiting thread. We don't know which thread will be
* Either way we have correct behavior and nobody is orphaned on
* the wait queue.
*/
- __futex_wake(&mutex->value, 1);
+ __futex_syscall3(&mutex->value, wake_op, 1);
}
}
static void
_recursive_lock(void)
{
- _normal_lock( &__recursive_lock);
+ _normal_lock(&__recursive_lock);
}
static void
_recursive_unlock(void)
{
- _normal_unlock( &__recursive_lock );
+ _normal_unlock(&__recursive_lock );
}
-#define __likely(cond) __builtin_expect(!!(cond), 1)
-#define __unlikely(cond) __builtin_expect(!!(cond), 0)
-
int pthread_mutex_lock(pthread_mutex_t *mutex)
{
- if (__likely(mutex != NULL))
- {
- int mtype = (mutex->value & MUTEX_TYPE_MASK);
+ int mtype, tid, new_lock_type, shared, wait_op;
- if ( __likely(mtype == MUTEX_TYPE_NORMAL) ) {
- _normal_lock(mutex);
- }
- else
- {
- int tid = __get_thread()->kernel_id;
+ if (__unlikely(mutex == NULL))
+ return EINVAL;
- if ( tid == MUTEX_OWNER(mutex) )
- {
- int oldv, counter;
+ mtype = (mutex->value & MUTEX_TYPE_MASK);
+ shared = (mutex->value & MUTEX_SHARED_MASK);
- if (mtype == MUTEX_TYPE_ERRORCHECK) {
- /* trying to re-lock a mutex we already acquired */
- return EDEADLK;
- }
- /*
- * We own the mutex, but other threads are able to change
- * the contents (e.g. promoting it to "contended"), so we
- * need to hold the global lock.
- */
- _recursive_lock();
- oldv = mutex->value;
- counter = (oldv + (1 << MUTEX_COUNTER_SHIFT)) & MUTEX_COUNTER_MASK;
- mutex->value = (oldv & ~MUTEX_COUNTER_MASK) | counter;
- _recursive_unlock();
- }
- else
- {
- /*
- * If the new lock is available immediately, we grab it in
- * the "uncontended" state.
- */
- int new_lock_type = 1;
-
- for (;;) {
- int oldv;
-
- _recursive_lock();
- oldv = mutex->value;
- if (oldv == mtype) { /* uncontended released lock => 1 or 2 */
- mutex->value = ((tid << 16) | mtype | new_lock_type);
- } else if ((oldv & 3) == 1) { /* locked state 1 => state 2 */
- oldv ^= 3;
- mutex->value = oldv;
- }
- _recursive_unlock();
-
- if (oldv == mtype)
- break;
-
- /*
- * The lock was held, possibly contended by others. From
- * now on, if we manage to acquire the lock, we have to
- * assume that others are still contending for it so that
- * we'll wake them when we unlock it.
- */
- new_lock_type = 2;
-
- __futex_wait( &mutex->value, oldv, 0 );
- }
- }
+ /* Handle normal case first */
+ if ( __likely(mtype == MUTEX_TYPE_NORMAL) ) {
+ _normal_lock(mutex);
+ return 0;
+ }
+
+ /* Do we already own this recursive or error-check mutex ? */
+ tid = __get_thread()->kernel_id;
+ if ( tid == MUTEX_OWNER(mutex) )
+ {
+ int oldv, counter;
+
+ if (mtype == MUTEX_TYPE_ERRORCHECK) {
+ /* trying to re-lock a mutex we already acquired */
+ return EDEADLK;
}
+ /*
+ * We own the mutex, but other threads are able to change
+ * the contents (e.g. promoting it to "contended"), so we
+ * need to hold the global lock.
+ */
+ _recursive_lock();
+ oldv = mutex->value;
+ counter = (oldv + (1 << MUTEX_COUNTER_SHIFT)) & MUTEX_COUNTER_MASK;
+ mutex->value = (oldv & ~MUTEX_COUNTER_MASK) | counter;
+ _recursive_unlock();
return 0;
}
- return EINVAL;
+
+ /* We don't own the mutex, so try to get it.
+ *
+ * First, we try to change its state from 0 to 1, if this
+ * doesn't work, try to change it to state 2.
+ */
+ new_lock_type = 1;
+
+ /* compute futex wait opcode and restore shared flag in mtype */
+ wait_op = shared ? FUTEX_WAIT : FUTEX_WAIT_PRIVATE;
+ mtype |= shared;
+
+ for (;;) {
+ int oldv;
+
+ _recursive_lock();
+ oldv = mutex->value;
+ if (oldv == mtype) { /* uncontended released lock => 1 or 2 */
+ mutex->value = ((tid << 16) | mtype | new_lock_type);
+ } else if ((oldv & 3) == 1) { /* locked state 1 => state 2 */
+ oldv ^= 3;
+ mutex->value = oldv;
+ }
+ _recursive_unlock();
+
+ if (oldv == mtype)
+ break;
+
+ /*
+ * The lock was held, possibly contended by others. From
+ * now on, if we manage to acquire the lock, we have to
+ * assume that others are still contending for it so that
+ * we'll wake them when we unlock it.
+ */
+ new_lock_type = 2;
+
+ __futex_syscall4(&mutex->value, wait_op, oldv, NULL);
+ }
+ return 0;
}
int pthread_mutex_unlock(pthread_mutex_t *mutex)
{
- if (__likely(mutex != NULL))
- {
- int mtype = (mutex->value & MUTEX_TYPE_MASK);
+ int mtype, tid, oldv, shared;
- if (__likely(mtype == MUTEX_TYPE_NORMAL)) {
- _normal_unlock(mutex);
- }
- else
- {
- int tid = __get_thread()->kernel_id;
+ if (__unlikely(mutex == NULL))
+ return EINVAL;
- if ( tid == MUTEX_OWNER(mutex) )
- {
- int oldv;
-
- _recursive_lock();
- oldv = mutex->value;
- if (oldv & MUTEX_COUNTER_MASK) {
- mutex->value = oldv - (1 << MUTEX_COUNTER_SHIFT);
- oldv = 0;
- } else {
- mutex->value = mtype;
- }
- _recursive_unlock();
+ mtype = (mutex->value & MUTEX_TYPE_MASK);
+ shared = (mutex->value & MUTEX_SHARED_MASK);
- if ((oldv & 3) == 2)
- __futex_wake( &mutex->value, 1 );
- }
- else {
- /* trying to unlock a lock we do not own */
- return EPERM;
- }
- }
+ /* Handle common case first */
+ if (__likely(mtype == MUTEX_TYPE_NORMAL)) {
+ _normal_unlock(mutex);
return 0;
}
- return EINVAL;
+
+ /* Do we already own this recursive or error-check mutex ? */
+ tid = __get_thread()->kernel_id;
+ if ( tid != MUTEX_OWNER(mutex) )
+ return EPERM;
+
+ /* We do, decrement counter or release the mutex if it is 0 */
+ _recursive_lock();
+ oldv = mutex->value;
+ if (oldv & MUTEX_COUNTER_MASK) {
+ mutex->value = oldv - (1 << MUTEX_COUNTER_SHIFT);
+ oldv = 0;
+ } else {
+ mutex->value = shared | mtype;
+ }
+ _recursive_unlock();
+
+ /* Wake one waiting thread, if any */
+ if ((oldv & 3) == 2) {
+ int wake_op = shared ? FUTEX_WAKE : FUTEX_WAKE_PRIVATE;
+ __futex_syscall3(&mutex->value, wake_op, 1);
+ }
+ return 0;
}
int pthread_mutex_trylock(pthread_mutex_t *mutex)
{
- if (__likely(mutex != NULL))
+ int mtype, tid, oldv, shared;
+
+ if (__unlikely(mutex == NULL))
+ return EINVAL;
+
+ mtype = (mutex->value & MUTEX_TYPE_MASK);
+ shared = (mutex->value & MUTEX_SHARED_MASK);
+
+ /* Handle common case first */
+ if ( __likely(mtype == MUTEX_TYPE_NORMAL) )
{
- int mtype = (mutex->value & MUTEX_TYPE_MASK);
+ if (__atomic_cmpxchg(shared|0, shared|1, &mutex->value) == 0)
+ return 0;
- if ( __likely(mtype == MUTEX_TYPE_NORMAL) )
- {
- if (__atomic_cmpxchg(0, 1, &mutex->value) == 0)
- return 0;
+ return EBUSY;
+ }
- return EBUSY;
- }
- else
- {
- int tid = __get_thread()->kernel_id;
- int oldv;
+ /* Do we already own this recursive or error-check mutex ? */
+ tid = __get_thread()->kernel_id;
+ if ( tid == MUTEX_OWNER(mutex) )
+ {
+ int counter;
- if ( tid == MUTEX_OWNER(mutex) )
- {
- int oldv, counter;
+ if (mtype == MUTEX_TYPE_ERRORCHECK) {
+ /* already locked by ourselves */
+ return EDEADLK;
+ }
- if (mtype == MUTEX_TYPE_ERRORCHECK) {
- /* already locked by ourselves */
- return EDEADLK;
- }
+ _recursive_lock();
+ oldv = mutex->value;
+ counter = (oldv + (1 << MUTEX_COUNTER_SHIFT)) & MUTEX_COUNTER_MASK;
+ mutex->value = (oldv & ~MUTEX_COUNTER_MASK) | counter;
+ _recursive_unlock();
+ return 0;
+ }
- _recursive_lock();
- oldv = mutex->value;
- counter = (oldv + (1 << MUTEX_COUNTER_SHIFT)) & MUTEX_COUNTER_MASK;
- mutex->value = (oldv & ~MUTEX_COUNTER_MASK) | counter;
- _recursive_unlock();
- return 0;
- }
+ /* Restore sharing bit in mtype */
+ mtype |= shared;
- /* try to lock it */
- _recursive_lock();
- oldv = mutex->value;
- if (oldv == mtype) /* uncontended released lock => state 1 */
- mutex->value = ((tid << 16) | mtype | 1);
- _recursive_unlock();
+ /* Try to lock it, just once. */
+ _recursive_lock();
+ oldv = mutex->value;
+ if (oldv == mtype) /* uncontended released lock => state 1 */
+ mutex->value = ((tid << 16) | mtype | 1);
+ _recursive_unlock();
- if (oldv != mtype)
- return EBUSY;
+ if (oldv != mtype)
+ return EBUSY;
- return 0;
- }
- }
- return EINVAL;
+ return 0;
}
clockid_t clock = CLOCK_MONOTONIC;
struct timespec abstime;
struct timespec ts;
+ int mtype, tid, oldv, new_lock_type, shared, wait_op;
/* compute absolute expiration time */
__timespec_to_relative_msec(&abstime, msecs, clock);
- if (__likely(mutex != NULL))
- {
- int mtype = (mutex->value & MUTEX_TYPE_MASK);
+ if (__unlikely(mutex == NULL))
+ return EINVAL;
- if ( __likely(mtype == MUTEX_TYPE_NORMAL) )
- {
- /* fast path for unconteded lock */
- if (__atomic_cmpxchg(0, 1, &mutex->value) == 0)
- return 0;
+ mtype = (mutex->value & MUTEX_TYPE_MASK);
+ shared = (mutex->value & MUTEX_SHARED_MASK);
- /* loop while needed */
- while (__atomic_swap(2, &mutex->value) != 0) {
- if (__timespec_to_absolute(&ts, &abstime, clock) < 0)
- return EBUSY;
+ /* Handle common case first */
+ if ( __likely(mtype == MUTEX_TYPE_NORMAL) )
+ {
+ int wait_op = shared ? FUTEX_WAIT : FUTEX_WAIT_PRIVATE;
- __futex_wait(&mutex->value, 2, &ts);
- }
+ /* fast path for unconteded lock */
+ if (__atomic_cmpxchg(shared|0, shared|1, &mutex->value) == 0)
return 0;
+
+ /* loop while needed */
+ while (__atomic_swap(shared|2, &mutex->value) != (shared|0)) {
+ if (__timespec_to_absolute(&ts, &abstime, clock) < 0)
+ return EBUSY;
+
+ __futex_syscall4(&mutex->value, wait_op, shared|2, &ts);
}
- else
- {
- int tid = __get_thread()->kernel_id;
- int oldv;
+ return 0;
+ }
- if ( tid == MUTEX_OWNER(mutex) )
- {
- int oldv, counter;
+ /* Do we already own this recursive or error-check mutex ? */
+ tid = __get_thread()->kernel_id;
+ if ( tid == MUTEX_OWNER(mutex) )
+ {
+ int oldv, counter;
- if (mtype == MUTEX_TYPE_ERRORCHECK) {
- /* already locked by ourselves */
- return EDEADLK;
- }
+ if (mtype == MUTEX_TYPE_ERRORCHECK) {
+ /* already locked by ourselves */
+ return EDEADLK;
+ }
- _recursive_lock();
- oldv = mutex->value;
- counter = (oldv + (1 << MUTEX_COUNTER_SHIFT)) & MUTEX_COUNTER_MASK;
- mutex->value = (oldv & ~MUTEX_COUNTER_MASK) | counter;
- _recursive_unlock();
- return 0;
- }
- else
- {
- /*
- * If the new lock is available immediately, we grab it in
- * the "uncontended" state.
- */
- int new_lock_type = 1;
-
- for (;;) {
- int oldv;
- struct timespec ts;
-
- _recursive_lock();
- oldv = mutex->value;
- if (oldv == mtype) { /* uncontended released lock => 1 or 2 */
- mutex->value = ((tid << 16) | mtype | new_lock_type);
- } else if ((oldv & 3) == 1) { /* locked state 1 => state 2 */
- oldv ^= 3;
- mutex->value = oldv;
- }
- _recursive_unlock();
-
- if (oldv == mtype)
- break;
-
- /*
- * The lock was held, possibly contended by others. From
- * now on, if we manage to acquire the lock, we have to
- * assume that others are still contending for it so that
- * we'll wake them when we unlock it.
- */
- new_lock_type = 2;
-
- if (__timespec_to_absolute(&ts, &abstime, clock) < 0)
- return EBUSY;
-
- __futex_wait( &mutex->value, oldv, &ts );
- }
- return 0;
- }
+ _recursive_lock();
+ oldv = mutex->value;
+ counter = (oldv + (1 << MUTEX_COUNTER_SHIFT)) & MUTEX_COUNTER_MASK;
+ mutex->value = (oldv & ~MUTEX_COUNTER_MASK) | counter;
+ _recursive_unlock();
+ return 0;
+ }
+
+ /* We don't own the mutex, so try to get it.
+ *
+ * First, we try to change its state from 0 to 1, if this
+ * doesn't work, try to change it to state 2.
+ */
+ new_lock_type = 1;
+
+ /* Compute wait op and restore sharing bit in mtype */
+ wait_op = shared ? FUTEX_WAIT : FUTEX_WAIT_PRIVATE;
+ mtype |= shared;
+
+ for (;;) {
+ int oldv;
+ struct timespec ts;
+
+ _recursive_lock();
+ oldv = mutex->value;
+ if (oldv == mtype) { /* uncontended released lock => 1 or 2 */
+ mutex->value = ((tid << 16) | mtype | new_lock_type);
+ } else if ((oldv & 3) == 1) { /* locked state 1 => state 2 */
+ oldv ^= 3;
+ mutex->value = oldv;
}
+ _recursive_unlock();
+
+ if (oldv == mtype)
+ break;
+
+ /*
+ * The lock was held, possibly contended by others. From
+ * now on, if we manage to acquire the lock, we have to
+ * assume that others are still contending for it so that
+ * we'll wake them when we unlock it.
+ */
+ new_lock_type = 2;
+
+ if (__timespec_to_absolute(&ts, &abstime, clock) < 0)
+ return EBUSY;
+
+ __futex_syscall4(&mutex->value, wait_op, oldv, &ts);
}
- return EINVAL;
+ return 0;
}
+int pthread_condattr_init(pthread_condattr_t *attr)
+{
+ if (attr == NULL)
+ return EINVAL;
+
+ *attr = PTHREAD_PROCESS_PRIVATE;
+ return 0;
+}
+
+int pthread_condattr_getpshared(pthread_condattr_t *attr, int *pshared)
+{
+ if (attr == NULL || pshared == NULL)
+ return EINVAL;
+
+ *pshared = *attr;
+ return 0;
+}
+
+int pthread_condattr_setpshared(pthread_condattr_t *attr, int pshared)
+{
+ if (attr == NULL)
+ return EINVAL;
+
+ if (pshared != PTHREAD_PROCESS_SHARED &&
+ pshared != PTHREAD_PROCESS_PRIVATE)
+ return EINVAL;
+
+ *attr = pshared;
+ return 0;
+}
+
+int pthread_condattr_destroy(pthread_condattr_t *attr)
+{
+ if (attr == NULL)
+ return EINVAL;
+
+ *attr = 0xdeada11d;
+ return 0;
+}
+
+/* We use one bit in condition variable values as the 'shared' flag
+ * The rest is a counter.
+ */
+#define COND_SHARED_MASK 0x0001
+#define COND_COUNTER_INCREMENT 0x0002
+#define COND_COUNTER_MASK (~COND_SHARED_MASK)
+
+#define COND_IS_SHARED(c) (((c)->value & COND_SHARED_MASK) != 0)
/* XXX *technically* there is a race condition that could allow
* XXX a signal to be missed. If thread A is preempted in _wait()
* XXX after unlocking the mutex and before waiting, and if other
- * XXX threads call signal or broadcast UINT_MAX times (exactly),
+ * XXX threads call signal or broadcast UINT_MAX/2 times (exactly),
* XXX before thread A is scheduled again and calls futex_wait(),
* XXX then the signal will be lost.
*/
int pthread_cond_init(pthread_cond_t *cond,
const pthread_condattr_t *attr)
{
+ if (cond == NULL)
+ return EINVAL;
+
cond->value = 0;
+
+ if (attr != NULL && *attr == PTHREAD_PROCESS_SHARED)
+ cond->value |= COND_SHARED_MASK;
+
return 0;
}
int pthread_cond_destroy(pthread_cond_t *cond)
{
+ if (cond == NULL)
+ return EINVAL;
+
cond->value = 0xdeadc04d;
return 0;
}
-int pthread_cond_broadcast(pthread_cond_t *cond)
+/* This function is used by pthread_cond_broadcast and
+ * pthread_cond_signal to atomically decrement the counter
+ * then wake-up 'counter' threads.
+ */
+static int
+__pthread_cond_pulse(pthread_cond_t *cond, int counter)
{
- __atomic_dec(&cond->value);
- __futex_wake(&cond->value, INT_MAX);
+ long flags;
+ int wake_op;
+
+ if (__unlikely(cond == NULL))
+ return EINVAL;
+
+ flags = (cond->value & ~COND_COUNTER_MASK);
+ for (;;) {
+ long oldval = cond->value;
+ long newval = ((oldval - COND_COUNTER_INCREMENT) & COND_COUNTER_MASK)
+ | flags;
+ if (__atomic_cmpxchg(oldval, newval, &cond->value) == 0)
+ break;
+ }
+
+ wake_op = COND_IS_SHARED(cond) ? FUTEX_WAKE : FUTEX_WAKE_PRIVATE;
+ __futex_syscall3(&cond->value, wake_op, counter);
return 0;
}
+int pthread_cond_broadcast(pthread_cond_t *cond)
+{
+ return __pthread_cond_pulse(cond, INT_MAX);
+}
+
int pthread_cond_signal(pthread_cond_t *cond)
{
- __atomic_dec(&cond->value);
- __futex_wake(&cond->value, 1);
- return 0;
+ return __pthread_cond_pulse(cond, 1);
}
int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
{
int status;
int oldvalue = cond->value;
+ int wait_op = COND_IS_SHARED(cond) ? FUTEX_WAIT : FUTEX_WAIT_PRIVATE;
pthread_mutex_unlock(mutex);
- status = __futex_wait(&cond->value, oldvalue, reltime);
+ status = __futex_syscall4(&cond->value, wait_op, oldvalue, reltime);
pthread_mutex_lock(mutex);
if (status == (-ETIMEDOUT)) return ETIMEDOUT;
int pthread_sigmask(int how, const sigset_t *set, sigset_t *oset)
{
- return __rt_sigprocmask(how, set, oset, _NSIG / 8);
+ /* pthread_sigmask must return the error code, but the syscall
+ * will set errno instead and return 0/-1
+ */
+ int ret, old_errno = errno;
+
+ ret = __rt_sigprocmask(how, set, oset, _NSIG / 8);
+ if (ret < 0)
+ ret = errno;
+
+ errno = old_errno;
+ return ret;
}
index 0c94600b731bc644321cab157687d00f6bd16934..84b93147931288886228a9afef03007ac4ee31cd 100644 (file)
--- a/libc/bionic/semaphore.c
+++ b/libc/bionic/semaphore.c
if (sem == NULL)
return EINVAL;
- if (__atomic_inc((volatile int*)&sem->count) == 0)
+ if (__atomic_inc((volatile int*)&sem->count) >= 0)
__futex_wake(&sem->count, 1);
return 0;
if (__atomic_dec_if_positive(&sem->count) > 0) {
return 0;
} else {
- return EAGAIN;
+ errno = EAGAIN;
+ return -1;
}
}
diff --git a/libc/bionic/stubs.c b/libc/bionic/stubs.c
index 365f21a2ac93866095e4ed8b4b8aea10c5ff59a5..d4956747d08baed27c090438d6dac401a5cf4b10 100644 (file)
--- a/libc/bionic/stubs.c
+++ b/libc/bionic/stubs.c
goto FAIL;
id = strtoul(name+4, &end, 10);
- if (id == 0 || *end != '\0')
+ if (*end != '\0')
goto FAIL;
id += AID_APP;
return NULL;
}
+int ttyname_r(int fd, char *buf, size_t buflen)
+{
+ fprintf(stderr, "FIX ME! implement ttyname_r() %s:%d\n", __FILE__, __LINE__);
+ return -ERANGE;
+}
+
struct netent *getnetbyaddr(uint32_t net, int type)
{
fprintf(stderr, "FIX ME! implement %s() %s:%d\n", __FUNCTION__, __FILE__, __LINE__);
fprintf(stderr, "FIX ME! implement %s() %s:%d\n", __FUNCTION__, __FILE__, __LINE__);
return NULL;
}
+
+char* getusershell(void)
+{
+ fprintf(stderr, "FIX ME! implement %s() %s:%d\n", __FUNCTION__, __FILE__, __LINE__);
+ return NULL;
+}
+
+void setusershell(void)
+{
+ fprintf(stderr, "FIX ME! implement %s() %s:%d\n", __FUNCTION__, __FILE__, __LINE__);
+}
+
+void endusershell(void)
+{
+ fprintf(stderr, "FIX ME! implement %s() %s:%d\n", __FUNCTION__, __FILE__, __LINE__);
+}
+
diff --git a/libc/docs/CHANGES.TXT b/libc/docs/CHANGES.TXT
--- /dev/null
+++ b/libc/docs/CHANGES.TXT
@@ -0,0 +1,174 @@
+Bionic ChangeLog:
+-----------------
+
+Differences between current and Android 2.1:
+
+- Add support for SH-4 CPU architecture !
+
+- __atomic_swap(): use LDREX/STREX CPU instructions on ARMv6 and higher.
+
+- <arpa/telnet.h>: New header (declarations only, no implementation).
+
+- <err.h>: New header + implementation. GLibc compatibility.
+
+- <warn.h>: New header + implementation. GLibc compatibility.
+
+- <fts.h>: New header + implementation.
+
+- <mntent.h>: Add missing <stdio.h> include.
+
+- <regex.h>: New header + implementation.
+
+- <signal.h>: Added killpg()
+
+- <stdint.h>: Allow 64-bit type declarations on C99 builds.
+
+- <stdio.h>: Add fdprintf() and vfdprintf(). Note that GLibc provides
+ the confusing 'dprintf' and 'vdprintf()' functions instead.
+
+- <stdlib.h>: Fix ptsname_r(): the return type is int instead of char*.
+ The mistake comes from a GLibc man page bug (the man page listed a return
+ type of char*, while the implementation used int. Fixed in late 2009 only).
+ The Bionic implementation was incorrect. Technically, this is an ABI
+ breakage, but code that used this function probably never worked or
+ compiled properly anyway.
+
+- <strings.h>: Add missing <sys/types.h> include.
+
+- <sys/queue.h>: Added new header (no implementation - macro templates).
+
+- <sys/resource.h>: Add rlim_t proper definition.
+
+- <time64.h>: Add missing C++ header inclusion guards.
+
+- <unistd.h>: Add getusershell(), setusershell() and endusershell(), though
+ implementation are bogus. GLibc compatibility.
+
+- <wchar.h>: Add mbstowcs() and wcstombs()
+
+- add clone() implementation for ARM (x86 and SH-4 not working yet).
+
+- <sys/epoll.h>: <sys/system_properties.h>: Add missing C++ inclusion guards
+
+- fix getpwnam() and getpwgrp() to accept "app_0" as a valid user name.
+
+- fix sem_trywait() to return -1 and set errno to EAGAIN, instead of
+ returning EAGAIN directly.
+
+- fix sem_post() to wake up multiple threads when called rapidly in
+ succession.
+
+- DNS: partial implementation of RFC3484 (rule 1, 2, 5, 6, 8, 10 and
+ modified rule 9), for better address selection/sorting.
+ In the process, removed code that was previously used for "sortlist"
+ in /etc/resolv.conf. (resolv.conf is already ignored, so the latter
+ is a no-op for actual functionality.)
+
+- fix pthread_sigmask() to properly return an error code without touching
+ errno. Previous implementation returned -1 on error, setting errno, which
+ is not Posix compliant.
+
+- add sigaltstack() implementation for ARM.
+
+- <time.h>: Properly implement the 'timezone' and 'daylight' global variables
+ (they were not defined previously, though declared in the header).
+
+- <time.h>: Fix timezone management implementation to properly update
+ 'tm_gmtoff' field in 'struct tm' structure.
+
+- DNS: get rid of spurious random DNS queries when trying to resolve
+ an unknown domain name. Due to an initialization bug, a random DNS search
+ list was generated for each thread if net.dns.search is not defined.
+
+- <pthread.h>: Add pthread_condattr_init/destroy/setpshared/getpshared functions
+ to enable proper shared conditional variable initialization.
+
+ Modify the pthread_mutex_t and pthread_cond_t implementation to use private
+ futexes for performance reasons. Mutexes and Condvars are no longer shareable
+ between processes by default anymore, unless you use PTHREAD_PROCESS_SHARED
+ with pthread_mutexattr_setpshared() and/or pthread_condattr_setpshared().
+
+-------------------------------------------------------------------------------
+Differences between Android 2.1 and 2.0.1:
+
+- zoneinfo: updated data tables to version 2009s
+
+
+-------------------------------------------------------------------------------
+Differences between Android 2.0.1 and 2.0:
+
+- abort(): ARM-specific hack to preserve the 'lr' register when abort()
+ is called (GCC does not preserve it by default since it thinks that
+ abort() never returns). This improves stack traces considerably.
+
+
+-------------------------------------------------------------------------------
+Differences between Android 2.0 and 1.6:
+
+- memcmp(), memcpy(): ARMv7 optimized versions.
+
+- pthread_mutexattr_setpshared(): implementation will not return ENOTSUP
+ if PTHREAD_PROCESS_SHARED is used, because our Mutex implementation can
+ work across multiple processes.
+
+ *HOWEVER* it does not use "robust futexes" which means that held mutexes
+ *are not* automatically released by the kernel when the owner process
+ crashes or exits. This is only done to simplify communication between
+ two always-live system processes, DO NOT USE THIS IN APPLICATIONS !
+
+- pthread_mutex_lock_timeout_np(): New Android-specific function to
+ perform a timed lock (). In case of timeout, it returns EBUSY.
+
+- pthread_cond_timedwait_monotonic_np(): Same as pthread_cond_timedwait()
+ but uses the monotonic clock(). Android-specific.
+
+- pthread_cond_timedwait_relative_np(): Same as pthread_cond_timedwait()
+ but uses a relative timeout instead. Android-specific.
+
+- <netinet/in.h>: Now includes <netinet/in6.h>.
+
+- <netinet/in6.h>: Added IPV6_JOIN_GROUP, IPV6_LEAVE_GROUP, IN6ADDR_ANY_INIT
+ and ipv6mr_interface definitions.
+
+- <time.h>:
+ * Add missing tzset() declaration.
+ * Add Android-specific strftime_tz().
+
+- getaddrinfo():
+ Only perform IPv6 lookup for AF_UNSPEC if we have IPv6 connectivity.
+ This saves one DNS query per lookup on non-IPv6 systems.
+
+- mktime(): Fix an infinite loop problen that appeared when switching to
+ GCC 4.4.0.
+
+- strftime(): fix incorrect handling of dates > 2038 due to 64-bit issue
+ in original code.
+
+-------------------------------------------------------------------------------
+Differences between Android 1.6 and 1.5:
+
+- C runtime: Fix runtime initialization to be called before any static C++
+ constructors. This allows these to use pthread functions properly.
+
+- __aeabi_atexit(): Fix implementation to properly call C++ static destructors
+ when the program exits (or when a shared library is unloaded).
+
+- <sys/stat.h>: added GLibc compatibility macros definitions:
+
+ #define st_atimensec st_atime_nsec
+ #define st_mtimensec st_mtime_nsec
+ #define st_ctimensec st_ctime_nsec
+
+- getaddrinfo(): implementation will now allow numeric ports if ai_socktype is
+ set to ANY. This is to match the GLibc behaviour.
+
+- getservent(): and getservent_r() incorrectly returned the port in host-endian
+ order in the s_port field. It now returns it in big-endian order.
+
+- DNS: Allow underscore in the middle of DNS labels. While not really
+ standard, this extension is needed for some VPN configurations and is
+ supported by other operating systems.
+
+- DNS: Support for DNS domain search lists through the new net.dns.search
+ system property. The corresponding value must be a space-separated list of
+ domain suffixes.
diff --git a/libc/include/arpa/telnet.h b/libc/include/arpa/telnet.h
--- /dev/null
@@ -0,0 +1,316 @@
+/*
+ * Copyright (c) 1983, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)telnet.h 8.2 (Berkeley) 12/15/93
+ */
+
+#ifndef _ARPA_TELNET_H
+#define _ARPA_TELNET_H 1
+
+/*
+ * Definitions for the TELNET protocol.
+ */
+#define IAC 255 /* interpret as command: */
+#define DONT 254 /* you are not to use option */
+#define DO 253 /* please, you use option */
+#define WONT 252 /* I won't use option */
+#define WILL 251 /* I will use option */
+#define SB 250 /* interpret as subnegotiation */
+#define GA 249 /* you may reverse the line */
+#define EL 248 /* erase the current line */
+#define EC 247 /* erase the current character */
+#define AYT 246 /* are you there */
+#define AO 245 /* abort output--but let prog finish */
+#define IP 244 /* interrupt process--permanently */
+#define BREAK 243 /* break */
+#define DM 242 /* data mark--for connect. cleaning */
+#define NOP 241 /* nop */
+#define SE 240 /* end sub negotiation */
+#define EOR 239 /* end of record (transparent mode) */
+#define ABORT 238 /* Abort process */
+#define SUSP 237 /* Suspend process */
+#define xEOF 236 /* End of file: EOF is already used... */
+
+#define SYNCH 242 /* for telfunc calls */
+
+#ifdef TELCMDS
+char *telcmds[] = {
+ "EOF", "SUSP", "ABORT", "EOR",
+ "SE", "NOP", "DMARK", "BRK", "IP", "AO", "AYT", "EC",
+ "EL", "GA", "SB", "WILL", "WONT", "DO", "DONT", "IAC", 0,
+};
+#else
+extern char *telcmds[];
+#endif
+
+#define TELCMD_FIRST xEOF
+#define TELCMD_LAST IAC
+#define TELCMD_OK(x) ((unsigned int)(x) <= TELCMD_LAST && \
+ (unsigned int)(x) >= TELCMD_FIRST)
+#define TELCMD(x) telcmds[(x)-TELCMD_FIRST]
+
+/* telnet options */
+#define TELOPT_BINARY 0 /* 8-bit data path */
+#define TELOPT_ECHO 1 /* echo */
+#define TELOPT_RCP 2 /* prepare to reconnect */
+#define TELOPT_SGA 3 /* suppress go ahead */
+#define TELOPT_NAMS 4 /* approximate message size */
+#define TELOPT_STATUS 5 /* give status */
+#define TELOPT_TM 6 /* timing mark */
+#define TELOPT_RCTE 7 /* remote controlled transmission and echo */
+#define TELOPT_NAOL 8 /* negotiate about output line width */
+#define TELOPT_NAOP 9 /* negotiate about output page size */
+#define TELOPT_NAOCRD 10 /* negotiate about CR disposition */
+#define TELOPT_NAOHTS 11 /* negotiate about horizontal tabstops */
+#define TELOPT_NAOHTD 12 /* negotiate about horizontal tab disposition */
+#define TELOPT_NAOFFD 13 /* negotiate about formfeed disposition */
+#define TELOPT_NAOVTS 14 /* negotiate about vertical tab stops */
+#define TELOPT_NAOVTD 15 /* negotiate about vertical tab disposition */
+#define TELOPT_NAOLFD 16 /* negotiate about output LF disposition */
+#define TELOPT_XASCII 17 /* extended ascii character set */
+#define TELOPT_LOGOUT 18 /* force logout */
+#define TELOPT_BM 19 /* byte macro */
+#define TELOPT_DET 20 /* data entry terminal */
+#define TELOPT_SUPDUP 21 /* supdup protocol */
+#define TELOPT_SUPDUPOUTPUT 22 /* supdup output */
+#define TELOPT_SNDLOC 23 /* send location */
+#define TELOPT_TTYPE 24 /* terminal type */
+#define TELOPT_EOR 25 /* end or record */
+#define TELOPT_TUID 26 /* TACACS user identification */
+#define TELOPT_OUTMRK 27 /* output marking */
+#define TELOPT_TTYLOC 28 /* terminal location number */
+#define TELOPT_3270REGIME 29 /* 3270 regime */
+#define TELOPT_X3PAD 30 /* X.3 PAD */
+#define TELOPT_NAWS 31 /* window size */
+#define TELOPT_TSPEED 32 /* terminal speed */
+#define TELOPT_LFLOW 33 /* remote flow control */
+#define TELOPT_LINEMODE 34 /* Linemode option */
+#define TELOPT_XDISPLOC 35 /* X Display Location */
+#define TELOPT_OLD_ENVIRON 36 /* Old - Environment variables */
+#define TELOPT_AUTHENTICATION 37/* Authenticate */
+#define TELOPT_ENCRYPT 38 /* Encryption option */
+#define TELOPT_NEW_ENVIRON 39 /* New - Environment variables */
+#define TELOPT_EXOPL 255 /* extended-options-list */
+
+
+#define NTELOPTS (1+TELOPT_NEW_ENVIRON)
+#ifdef TELOPTS
+const char *telopts[NTELOPTS+1] = {
+ "BINARY", "ECHO", "RCP", "SUPPRESS GO AHEAD", "NAME",
+ "STATUS", "TIMING MARK", "RCTE", "NAOL", "NAOP",
+ "NAOCRD", "NAOHTS", "NAOHTD", "NAOFFD", "NAOVTS",
+ "NAOVTD", "NAOLFD", "EXTEND ASCII", "LOGOUT", "BYTE MACRO",
+ "DATA ENTRY TERMINAL", "SUPDUP", "SUPDUP OUTPUT",
+ "SEND LOCATION", "TERMINAL TYPE", "END OF RECORD",
+ "TACACS UID", "OUTPUT MARKING", "TTYLOC",
+ "3270 REGIME", "X.3 PAD", "NAWS", "TSPEED", "LFLOW",
+ "LINEMODE", "XDISPLOC", "OLD-ENVIRON", "AUTHENTICATION",
+ "ENCRYPT", "NEW-ENVIRON",
+ 0,
+};
+#define TELOPT_FIRST TELOPT_BINARY
+#define TELOPT_LAST TELOPT_NEW_ENVIRON
+#define TELOPT_OK(x) ((unsigned int)(x) <= TELOPT_LAST)
+#define TELOPT(x) telopts[(x)-TELOPT_FIRST]
+#endif
+
+/* sub-option qualifiers */
+#define TELQUAL_IS 0 /* option is... */
+#define TELQUAL_SEND 1 /* send option */
+#define TELQUAL_INFO 2 /* ENVIRON: informational version of IS */
+#define TELQUAL_REPLY 2 /* AUTHENTICATION: client version of IS */
+#define TELQUAL_NAME 3 /* AUTHENTICATION: client version of IS */
+
+#define LFLOW_OFF 0 /* Disable remote flow control */
+#define LFLOW_ON 1 /* Enable remote flow control */
+#define LFLOW_RESTART_ANY 2 /* Restart output on any char */
+#define LFLOW_RESTART_XON 3 /* Restart output only on XON */
+
+/*
+ * LINEMODE suboptions
+ */
+
+#define LM_MODE 1
+#define LM_FORWARDMASK 2
+#define LM_SLC 3
+
+#define MODE_EDIT 0x01
+#define MODE_TRAPSIG 0x02
+#define MODE_ACK 0x04
+#define MODE_SOFT_TAB 0x08
+#define MODE_LIT_ECHO 0x10
+
+#define MODE_MASK 0x1f
+
+/* Not part of protocol, but needed to simplify things... */
+#define MODE_FLOW 0x0100
+#define MODE_ECHO 0x0200
+#define MODE_INBIN 0x0400
+#define MODE_OUTBIN 0x0800
+#define MODE_FORCE 0x1000
+
+#define SLC_SYNCH 1
+#define SLC_BRK 2
+#define SLC_IP 3
+#define SLC_AO 4
+#define SLC_AYT 5
+#define SLC_EOR 6
+#define SLC_ABORT 7
+#define SLC_EOF 8
+#define SLC_SUSP 9
+#define SLC_EC 10
+#define SLC_EL 11
+#define SLC_EW 12
+#define SLC_RP 13
+#define SLC_LNEXT 14
+#define SLC_XON 15
+#define SLC_XOFF 16
+#define SLC_FORW1 17
+#define SLC_FORW2 18
+
+#define NSLC 18
+
+/*
+ * For backwards compatibility, we define SLC_NAMES to be the
+ * list of names if SLC_NAMES is not defined.
+ */
+#define SLC_NAMELIST "0", "SYNCH", "BRK", "IP", "AO", "AYT", "EOR", \
+ "ABORT", "EOF", "SUSP", "EC", "EL", "EW", "RP", \
+ "LNEXT", "XON", "XOFF", "FORW1", "FORW2", 0,
+#ifdef SLC_NAMES
+const char *slc_names[] = {
+ SLC_NAMELIST
+};
+#else
+extern char *slc_names[];
+#define SLC_NAMES SLC_NAMELIST
+#endif
+
+#define SLC_NAME_OK(x) ((unsigned int)(x) <= NSLC)
+#define SLC_NAME(x) slc_names[x]
+
+#define SLC_NOSUPPORT 0
+#define SLC_CANTCHANGE 1
+#define SLC_VARIABLE 2
+#define SLC_DEFAULT 3
+#define SLC_LEVELBITS 0x03
+
+#define SLC_FUNC 0
+#define SLC_FLAGS 1
+#define SLC_VALUE 2
+
+#define SLC_ACK 0x80
+#define SLC_FLUSHIN 0x40
+#define SLC_FLUSHOUT 0x20
+
+#define OLD_ENV_VAR 1
+#define OLD_ENV_VALUE 0
+#define NEW_ENV_VAR 0
+#define NEW_ENV_VALUE 1
+#define ENV_ESC 2
+#define ENV_USERVAR 3
+
+/*
+ * AUTHENTICATION suboptions
+ */
+
+/*
+ * Who is authenticating who ...
+ */
+#define AUTH_WHO_CLIENT 0 /* Client authenticating server */
+#define AUTH_WHO_SERVER 1 /* Server authenticating client */
+#define AUTH_WHO_MASK 1
+
+/*
+ * amount of authentication done
+ */
+#define AUTH_HOW_ONE_WAY 0
+#define AUTH_HOW_MUTUAL 2
+#define AUTH_HOW_MASK 2
+
+#define AUTHTYPE_NULL 0
+#define AUTHTYPE_KERBEROS_V4 1
+#define AUTHTYPE_KERBEROS_V5 2
+#define AUTHTYPE_SPX 3
+#define AUTHTYPE_MINK 4
+#define AUTHTYPE_CNT 5
+
+#define AUTHTYPE_TEST 99
+
+#ifdef AUTH_NAMES
+const char *authtype_names[] = {
+ "NULL", "KERBEROS_V4", "KERBEROS_V5", "SPX", "MINK", 0,
+};
+#else
+extern char *authtype_names[];
+#endif
+
+#define AUTHTYPE_NAME_OK(x) ((unsigned int)(x) < AUTHTYPE_CNT)
+#define AUTHTYPE_NAME(x) authtype_names[x]
+
+/*
+ * ENCRYPTion suboptions
+ */
+#define ENCRYPT_IS 0 /* I pick encryption type ... */
+#define ENCRYPT_SUPPORT 1 /* I support encryption types ... */
+#define ENCRYPT_REPLY 2 /* Initial setup response */
+#define ENCRYPT_START 3 /* Am starting to send encrypted */
+#define ENCRYPT_END 4 /* Am ending encrypted */
+#define ENCRYPT_REQSTART 5 /* Request you start encrypting */
+#define ENCRYPT_REQEND 6 /* Request you send encrypting */
+#define ENCRYPT_ENC_KEYID 7
+#define ENCRYPT_DEC_KEYID 8
+#define ENCRYPT_CNT 9
+
+#define ENCTYPE_ANY 0
+#define ENCTYPE_DES_CFB64 1
+#define ENCTYPE_DES_OFB64 2
+#define ENCTYPE_CNT 3
+
+#ifdef ENCRYPT_NAMES
+const char *encrypt_names[] = {
+ "IS", "SUPPORT", "REPLY", "START", "END",
+ "REQUEST-START", "REQUEST-END", "ENC-KEYID", "DEC-KEYID",
+ 0,
+};
+const char *enctype_names[] = {
+ "ANY", "DES_CFB64", "DES_OFB64", 0,
+};
+#else
+extern const char *encrypt_names[];
+extern const char *enctype_names[];
+#endif
+
+
+#define ENCRYPT_NAME_OK(x) ((unsigned int)(x) < ENCRYPT_CNT)
+#define ENCRYPT_NAME(x) encrypt_names[x]
+
+#define ENCTYPE_NAME_OK(x) ((unsigned int)(x) < ENCTYPE_CNT)
+#define ENCTYPE_NAME(x) enctype_names[x]
+
+#endif /* arpa/telnet.h */
diff --git a/libc/include/ctype.h b/libc/include/ctype.h
index b5f9ff4f8b7ff90358425c1b1468aa81cf161264..58b76eae68480ed0af6fa9462ab1fcaeb063bdd0 100644 (file)
--- a/libc/include/ctype.h
+++ b/libc/include/ctype.h
/* extern __inline is a GNU C extension */
#ifdef __GNUC__
+# if defined(__GNUC_STDC_INLINE__)
+#define __CTYPE_INLINE extern __inline __attribute__((__gnu_inline__))
+# else
#define __CTYPE_INLINE extern __inline
+# endif
#else
#define __CTYPE_INLINE static __inline
#endif
diff --git a/libc/include/err.h b/libc/include/err.h
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..1636efe92d73ed464c07845ca8f0fc1c79829a79 100644 (file)
--- a/libc/include/err.h
+++ b/libc/include/err.h
+/* $OpenBSD: err.h,v 1.10 2006/01/06 18:53:04 millert Exp $ */
+/* $NetBSD: err.h,v 1.11 1994/10/26 00:55:52 cgd Exp $ */
+
+/*-
+ * Copyright (c) 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)err.h 8.1 (Berkeley) 6/2/93
+ */
+
+#ifndef _ERR_H_
+#define _ERR_H_
+
+/*
+ * Don't use va_list in the err/warn prototypes. Va_list is typedef'd in two
+ * places (<machine/varargs.h> and <machine/stdarg.h>), so if we include one
+ * of them here we may collide with the utility's includes. It's unreasonable
+ * for utilities to have to include one of them to include err.h, so we get
+ * __va_list from <machine/_types.h> and use it.
+ */
+#include <sys/cdefs.h>
+#include <machine/_types.h>
+
+__BEGIN_DECLS
+
+__noreturn void err(int, const char *, ...)
+ __attribute__((__format__ (printf, 2, 3)));
+__noreturn void verr(int, const char *, __va_list)
+ __attribute__((__format__ (printf, 2, 0)));
+__noreturn void errx(int, const char *, ...)
+ __attribute__((__format__ (printf, 2, 3)));
+__noreturn void verrx(int, const char *, __va_list)
+ __attribute__((__format__ (printf, 2, 0)));
+void warn(const char *, ...)
+ __attribute__((__format__ (printf, 1, 2)));
+void vwarn(const char *, __va_list)
+ __attribute__((__format__ (printf, 1, 0)));
+void warnx(const char *, ...)
+ __attribute__((__format__ (printf, 1, 2)));
+void vwarnx(const char *, __va_list)
+ __attribute__((__format__ (printf, 1, 0)));
+
+/*
+ * The _* versions are for use in library functions so user-defined
+ * versions of err*,warn* do not get used.
+ */
+__noreturn void _err(int, const char *, ...)
+ __attribute__((__format__ (printf, 2, 3)));
+__noreturn void _verr(int, const char *, __va_list)
+ __attribute__((__format__ (printf, 2, 0)));
+__noreturn void _errx(int, const char *, ...)
+ __attribute__((__format__ (printf, 2, 3)));
+__noreturn void _verrx(int, const char *, __va_list)
+ __attribute__((__format__ (printf, 2, 0)));
+void _warn(const char *, ...)
+ __attribute__((__format__ (printf, 1, 2)));
+void _vwarn(const char *, __va_list)
+ __attribute__((__format__ (printf, 1, 0)));
+void _warnx(const char *, ...)
+ __attribute__((__format__ (printf, 1, 2)));
+void _vwarnx(const char *, __va_list)
+ __attribute__((__format__ (printf, 1, 0)));
+
+__END_DECLS
+
+#endif /* !_ERR_H_ */
diff --git a/libc/include/fts.h b/libc/include/fts.h
--- /dev/null
+++ b/libc/include/fts.h
@@ -0,0 +1,125 @@
+/* $OpenBSD: fts.h,v 1.12 2009/08/27 16:19:27 millert Exp $ */
+/* $NetBSD: fts.h,v 1.5 1994/12/28 01:41:50 mycroft Exp $ */
+
+/*
+ * Copyright (c) 1989, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)fts.h 8.3 (Berkeley) 8/14/94
+ */
+
+#ifndef _FTS_H_
+#define _FTS_H_
+
+typedef struct {
+ struct _ftsent *fts_cur; /* current node */
+ struct _ftsent *fts_child; /* linked list of children */
+ struct _ftsent **fts_array; /* sort array */
+ dev_t fts_dev; /* starting device # */
+ char *fts_path; /* path for this descent */
+ int fts_rfd; /* fd for root */
+ size_t fts_pathlen; /* sizeof(path) */
+ int fts_nitems; /* elements in the sort array */
+ int (*fts_compar)(); /* compare function */
+
+#define FTS_COMFOLLOW 0x0001 /* follow command line symlinks */
+#define FTS_LOGICAL 0x0002 /* logical walk */
+#define FTS_NOCHDIR 0x0004 /* don't change directories */
+#define FTS_NOSTAT 0x0008 /* don't get stat info */
+#define FTS_PHYSICAL 0x0010 /* physical walk */
+#define FTS_SEEDOT 0x0020 /* return dot and dot-dot */
+#define FTS_XDEV 0x0040 /* don't cross devices */
+#define FTS_OPTIONMASK 0x00ff /* valid user option mask */
+
+#define FTS_NAMEONLY 0x1000 /* (private) child names only */
+#define FTS_STOP 0x2000 /* (private) unrecoverable error */
+ int fts_options; /* fts_open options, global flags */
+} FTS;
+
+typedef struct _ftsent {
+ struct _ftsent *fts_cycle; /* cycle node */
+ struct _ftsent *fts_parent; /* parent directory */
+ struct _ftsent *fts_link; /* next file in directory */
+ long fts_number; /* local numeric value */
+ void *fts_pointer; /* local address value */
+ char *fts_accpath; /* access path */
+ char *fts_path; /* root path */
+ int fts_errno; /* errno for this node */
+ int fts_symfd; /* fd for symlink */
+ size_t fts_pathlen; /* strlen(fts_path) */
+ size_t fts_namelen; /* strlen(fts_name) */
+
+ ino_t fts_ino; /* inode */
+ dev_t fts_dev; /* device */
+ nlink_t fts_nlink; /* link count */
+
+#define FTS_ROOTPARENTLEVEL -1
+#define FTS_ROOTLEVEL 0
+#define FTS_MAXLEVEL 0x7fff
+ short fts_level; /* depth (-1 to N) */
+
+#define FTS_D 1 /* preorder directory */
+#define FTS_DC 2 /* directory that causes cycles */
+#define FTS_DEFAULT 3 /* none of the above */
+#define FTS_DNR 4 /* unreadable directory */
+#define FTS_DOT 5 /* dot or dot-dot */
+#define FTS_DP 6 /* postorder directory */
+#define FTS_ERR 7 /* error; errno is set */
+#define FTS_F 8 /* regular file */
+#define FTS_INIT 9 /* initialized only */
+#define FTS_NS 10 /* stat(2) failed */
+#define FTS_NSOK 11 /* no stat(2) requested */
+#define FTS_SL 12 /* symbolic link */
+#define FTS_SLNONE 13 /* symbolic link without target */
+ unsigned short fts_info; /* user flags for FTSENT structure */
+
+#define FTS_DONTCHDIR 0x01 /* don't chdir .. to the parent */
+#define FTS_SYMFOLLOW 0x02 /* followed a symlink to get here */
+ unsigned short fts_flags; /* private flags for FTSENT structure */
+
+#define FTS_AGAIN 1 /* read node again */
+#define FTS_FOLLOW 2 /* follow symbolic link */
+#define FTS_NOINSTR 3 /* no instructions */
+#define FTS_SKIP 4 /* discard node */
+ unsigned short fts_instr; /* fts_set() instructions */
+
+ struct stat *fts_statp; /* stat(2) information */
+ char fts_name[1]; /* file name */
+} FTSENT;
+
+#include <sys/cdefs.h>
+
+__BEGIN_DECLS
+FTSENT *fts_children(FTS *, int);
+int fts_close(FTS *);
+FTS *fts_open(char * const *, int,
+ int (*)(const FTSENT **, const FTSENT **));
+FTSENT *fts_read(FTS *);
+int fts_set(FTS *, FTSENT *, int);
+__END_DECLS
+
+#endif /* !_FTS_H_ */
diff --git a/libc/include/mntent.h b/libc/include/mntent.h
index 468ff742ece1093548b5d9944ecc4aee9c37183f..b83da1f2f89f67663db8c04ad8de72f6a0941841 100644 (file)
--- a/libc/include/mntent.h
+++ b/libc/include/mntent.h
#ifndef _MNTENT_H_
#define _MNTENT_H_
+#include <stdio.h>
#define MNTTYPE_IGNORE "ignore"
diff --git a/libc/include/netdb.h b/libc/include/netdb.h
index b0c3b72758ff795eaa23a35aee2f705ab18febda..c2e08ea8771bceef323ae9a06d7997e2e15e8020 100644 (file)
--- a/libc/include/netdb.h
+++ b/libc/include/netdb.h
#define MAXHOSTNAMELEN 256
-/* BIONIC-BEGIN */
-#define h_errno (*__get_h_errno())
-extern int* __get_h_errno(void);
-/* BIONIC-END */
/*
* Structures returned by network data base library. All addresses are
#define SCOPE_DELIMITER '%'
__BEGIN_DECLS
+/* BIONIC-BEGIN */
+#define h_errno (*__get_h_errno())
+int* __get_h_errno(void);
+/* BIONIC-END */
void endhostent(void);
void endnetent(void);
void endnetgrent(void);
diff --git a/libc/include/pthread.h b/libc/include/pthread.h
index 6603b3f6cd7e07d1b78c232a9ea04bd55a9fc452..eb2d169973fdf92991e2e1f887ce729e1921450c 100644 (file)
--- a/libc/include/pthread.h
+++ b/libc/include/pthread.h
int pthread_mutex_trylock(pthread_mutex_t *mutex);
int pthread_mutex_timedlock(pthread_mutex_t *mutex, struct timespec* ts);
+int pthread_condattr_init(pthread_condattr_t *attr);
+int pthread_condattr_getpshared(pthread_condattr_t *attr, int *pshared);
+int pthread_condattr_setpshared(pthread_condattr_t* attr, int pshared);
+int pthread_condattr_destroy(pthread_condattr_t *attr);
+
int pthread_cond_init(pthread_cond_t *cond,
const pthread_condattr_t *attr);
int pthread_cond_destroy(pthread_cond_t *cond);
diff --git a/libc/include/regex.h b/libc/include/regex.h
--- /dev/null
+++ b/libc/include/regex.h
@@ -0,0 +1,105 @@
+/* $OpenBSD: regex.h,v 1.6 2003/06/02 19:34:12 millert Exp $ */
+/* $NetBSD: regex.h,v 1.4.6.1 1996/06/10 18:57:07 explorer Exp $ */
+
+/*-
+ * Copyright (c) 1992 Henry Spencer.
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Henry Spencer of the University of Toronto.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)regex.h 8.1 (Berkeley) 6/2/93
+ */
+
+#ifndef _REGEX_H_
+#define _REGEX_H_
+
+#include <sys/cdefs.h>
+#include <sys/types.h>
+
+/* types */
+typedef off_t regoff_t;
+
+typedef struct {
+ int re_magic;
+ size_t re_nsub; /* number of parenthesized subexpressions */
+ const char *re_endp; /* end pointer for REG_PEND */
+ struct re_guts *re_g; /* none of your business :-) */
+} regex_t;
+
+typedef struct {
+ regoff_t rm_so; /* start of match */
+ regoff_t rm_eo; /* end of match */
+} regmatch_t;
+
+/* regcomp() flags */
+#define REG_BASIC 0000
+#define REG_EXTENDED 0001
+#define REG_ICASE 0002
+#define REG_NOSUB 0004
+#define REG_NEWLINE 0010
+#define REG_NOSPEC 0020
+#define REG_PEND 0040
+#define REG_DUMP 0200
+
+/* regerror() flags */
+#define REG_NOMATCH 1
+#define REG_BADPAT 2
+#define REG_ECOLLATE 3
+#define REG_ECTYPE 4
+#define REG_EESCAPE 5
+#define REG_ESUBREG 6
+#define REG_EBRACK 7
+#define REG_EPAREN 8
+#define REG_EBRACE 9
+#define REG_BADBR 10
+#define REG_ERANGE 11
+#define REG_ESPACE 12
+#define REG_BADRPT 13
+#define REG_EMPTY 14
+#define REG_ASSERT 15
+#define REG_INVARG 16
+#define REG_ATOI 255 /* convert name to number (!) */
+#define REG_ITOA 0400 /* convert number to name (!) */
+
+/* regexec() flags */
+#define REG_NOTBOL 00001
+#define REG_NOTEOL 00002
+#define REG_STARTEND 00004
+#define REG_TRACE 00400 /* tracing of execution */
+#define REG_LARGE 01000 /* force large representation */
+#define REG_BACKR 02000 /* force use of backref code */
+
+__BEGIN_DECLS
+int regcomp(regex_t *, const char *, int);
+size_t regerror(int, const regex_t *, char *, size_t);
+int regexec(const regex_t *, const char *, size_t, regmatch_t [], int);
+void regfree(regex_t *);
+__END_DECLS
+
+#endif /* !_REGEX_H_ */
diff --git a/libc/include/sched.h b/libc/include/sched.h
index 6600bae64341219b597e42022422fc8608a837bb..33b9ad684cab918459d39bc2edd645105025e1d9 100644 (file)
--- a/libc/include/sched.h
+++ b/libc/include/sched.h
#define CLONE_CHILD_SETTID 0x01000000
#define CLONE_STOPPED 0x02000000
-extern int clone(int (*fn)(void*), void *child_stack, int flags, void *arg);
-extern pid_t __clone(int, void *);
+#ifdef __GNU_SOURCE
+extern int clone(int (*fn)(void *), void *child_stack, int flags, void* arg, ...);
+#endif
__END_DECLS
diff --git a/libc/include/signal.h b/libc/include/signal.h
index 55408478b22472fa8b1d44412ab5d309f4faa234..9e5ce611614cc126636eeeb232fd40b50242d52d 100644 (file)
--- a/libc/include/signal.h
+++ b/libc/include/signal.h
extern int raise(int);
extern int kill(pid_t, int);
+extern int killpg(int pgrp, int sig);
__END_DECLS
diff --git a/libc/include/stdio.h b/libc/include/stdio.h
index 79e526b7c439a118e1be935af4a95d04ef86e576..f0e103e0f4119dc36bea33ccd05c032bf7bc49d1 100644 (file)
--- a/libc/include/stdio.h
+++ b/libc/include/stdio.h
* that does not match the previous one in _bf. When this happens,
* _ub._base becomes non-nil (i.e., a stream has ungetc() data iff
* _ub._base!=NULL) and _up and _ur save the current values of _p and _r.
+ *
+ * NOTE: if you change this structure, you also need to update the
+ * std() initializer in findfp.c.
*/
typedef struct __sFILE {
unsigned char *_p; /* current position in (some) buffer */
#define getchar_unlocked() getc_unlocked(stdin)
#define putchar_unlocked(c) putc_unlocked(c, stdout)
+#ifdef _GNU_SOURCE
+/*
+ * glibc defines dprintf(int, const char*, ...), which is poorly named
+ * and likely to conflict with locally defined debugging printfs
+ * fdprintf is a better name, and some programs that use fdprintf use a
+ * #define fdprintf dprintf for compatibility
+ */
+int fdprintf(int, const char*, ...);
+int vfdprintf(int, const char*, __va_list);
+#endif /* _GNU_SOURCE */
+
#endif /* _STDIO_H_ */
diff --git a/libc/include/stdlib.h b/libc/include/stdlib.h
index acfe694247afc1406248b6823200feadce2b0c96..41e8d26c44b1bbcdd44e2eaa0f1aa421436eac85 100644 (file)
--- a/libc/include/stdlib.h
+++ b/libc/include/stdlib.h
extern int unlockpt(int);
extern char* ptsname(int);
-extern char* ptsname_r(int, char*, size_t);
+extern int ptsname_r(int, char*, size_t);
extern int getpt(void);
static __inline__ int grantpt(int __fd)
index 1478caa6c6432f8b2eff01c6488cae296a47d0b4..decdb4649ebbc401c0b2cfe2e2ac52bac214c3d4 100644 (file)
--- a/libc/include/sys/epoll.h
+++ b/libc/include/sys/epoll.h
#ifndef _SYS_EPOLL_H_
#define _SYS_EPOLL_H_
+#include <sys/cdefs.h>
+
+__BEGIN_DECLS
+
#define EPOLLIN 0x00000001
#define EPOLLPRI 0x00000002
#define EPOLLOUT 0x00000004
int epoll_ctl(int epfd, int op, int fd, struct epoll_event *event);
int epoll_wait(int epfd, struct epoll_event *events, int max, int timeout);
+__END_DECLS
+
#endif /* _SYS_EPOLL_H_ */
index 5d2b55e9c6eaab9727a0dbec18c5d6f4efcbac3d..30c58fd9fea9299f0ba6a3895b4285d013835bdb 100644 (file)
#define __NR_prctl (__NR_SYSCALL_BASE + 172)
#define __NR_capget (__NR_SYSCALL_BASE + 184)
#define __NR_capset (__NR_SYSCALL_BASE + 185)
+#define __NR_sigaltstack (__NR_SYSCALL_BASE + 186)
#define __NR_acct (__NR_SYSCALL_BASE + 51)
#define __NR_read (__NR_SYSCALL_BASE + 3)
#define __NR_write (__NR_SYSCALL_BASE + 4)
#define __NR_getsockopt (__NR_SYSCALL_BASE + 295)
#define __NR_sendmsg (__NR_SYSCALL_BASE + 296)
#define __NR_recvmsg (__NR_SYSCALL_BASE + 297)
+#define __NR_ioprio_set (__NR_SYSCALL_BASE + 314)
+#define __NR_ioprio_get (__NR_SYSCALL_BASE + 315)
#define __NR_epoll_create (__NR_SYSCALL_BASE + 250)
#define __NR_epoll_ctl (__NR_SYSCALL_BASE + 251)
#define __NR_epoll_wait (__NR_SYSCALL_BASE + 252)
#define __NR_timer_delete (__NR_SYSCALL_BASE + 263)
#define __NR_utimes (__NR_SYSCALL_BASE + 271)
#define __NR_socketcall (__NR_SYSCALL_BASE + 102)
+#define __NR_ioprio_set (__NR_SYSCALL_BASE + 289)
+#define __NR_ioprio_get (__NR_SYSCALL_BASE + 290)
#define __NR_epoll_create (__NR_SYSCALL_BASE + 254)
#define __NR_epoll_ctl (__NR_SYSCALL_BASE + 255)
#define __NR_epoll_wait (__NR_SYSCALL_BASE + 256)
#define __NR_socketcall (__NR_SYSCALL_BASE + 102)
#define __NR_socketcall (__NR_SYSCALL_BASE + 102)
#define __NR___socketcall (__NR_SYSCALL_BASE + 102)
+#define __NR_ioprio_set (__NR_SYSCALL_BASE + 289)
+#define __NR_ioprio_get (__NR_SYSCALL_BASE + 290)
#define __NR_epoll_create (__NR_SYSCALL_BASE + 254)
#define __NR_epoll_ctl (__NR_SYSCALL_BASE + 255)
#define __NR_epoll_wait (__NR_SYSCALL_BASE + 256)
index 12f7704f1f26316de6fb1852995b127787dec56a..116d0476a4614d5dc38b46c6843b473c01b98b4f 100644 (file)
pid_t __fork (void);
pid_t _waitpid (pid_t, int*, int, struct rusage*);
int waitid (int, pid_t, struct siginfo_t*, int,void*);
-pid_t __clone (int (*fn)(void*), void *child_stack, int flags, void *arg);
+pid_t __sys_clone (int, void*, int*, void*, int*);
int execve (const char*, char* const*, char* const*);
-int setuid (uid_t);
+int __setuid (uid_t);
uid_t getuid (void);
gid_t getgid (void);
uid_t geteuid (void);
pid_t setsid (void);
int setgid (gid_t);
int seteuid (uid_t);
-int setreuid (uid_t, uid_t);
-int setresuid (uid_t, uid_t, uid_t);
+int __setreuid (uid_t, uid_t);
+int __setresuid (uid_t, uid_t, uid_t);
int setresgid (gid_t, gid_t, gid_t);
void* __brk (void*);
int kill (pid_t, int);
int prctl (int option, unsigned int arg2, unsigned int arg3, unsigned int arg4, unsigned int arg5);
int capget (cap_user_header_t header, cap_user_data_t data);
int capset (cap_user_header_t header, const cap_user_data_t data);
+int sigaltstack (const stack_t*, stack_t*);
int acct (const char* filepath);
ssize_t read (int, void*, size_t);
ssize_t write (int, const void*, size_t);
int sched_get_priority_max (int policy);
int sched_get_priority_min (int policy);
int sched_rr_get_interval (pid_t pid, struct timespec *interval);
+int ioprio_set (int which, int who, int ioprio);
+int ioprio_get (int which, int who);
int uname (struct utsname *);
pid_t __wait4 (pid_t pid, int *status, int options, struct rusage *rusage);
mode_t umask (mode_t);
diff --git a/libc/include/sys/queue.h b/libc/include/sys/queue.h
--- /dev/null
+++ b/libc/include/sys/queue.h
@@ -0,0 +1,557 @@
+/*
+ * Copyright (c) 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)queue.h 8.5 (Berkeley) 8/20/94
+ */
+
+#ifndef _SYS_QUEUE_H_
+#define _SYS_QUEUE_H_
+
+/*
+ * This file defines five types of data structures: singly-linked lists,
+ * lists, simple queues, tail queues, and circular queues.
+ *
+ * A singly-linked list is headed by a single forward pointer. The
+ * elements are singly linked for minimum space and pointer manipulation
+ * overhead at the expense of O(n) removal for arbitrary elements. New
+ * elements can be added to the list after an existing element or at the
+ * head of the list. Elements being removed from the head of the list
+ * should use the explicit macro for this purpose for optimum
+ * efficiency. A singly-linked list may only be traversed in the forward
+ * direction. Singly-linked lists are ideal for applications with large
+ * datasets and few or no removals or for implementing a LIFO queue.
+ *
+ * A list is headed by a single forward pointer (or an array of forward
+ * pointers for a hash table header). The elements are doubly linked
+ * so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before
+ * or after an existing element or at the head of the list. A list
+ * may only be traversed in the forward direction.
+ *
+ * A simple queue is headed by a pair of pointers, one the head of the
+ * list and the other to the tail of the list. The elements are singly
+ * linked to save space, so elements can only be removed from the
+ * head of the list. New elements can be added to the list after
+ * an existing element, at the head of the list, or at the end of the
+ * list. A simple queue may only be traversed in the forward direction.
+ *
+ * A tail queue is headed by a pair of pointers, one to the head of the
+ * list and the other to the tail of the list. The elements are doubly
+ * linked so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before or
+ * after an existing element, at the head of the list, or at the end of
+ * the list. A tail queue may be traversed in either direction.
+ *
+ * A circle queue is headed by a pair of pointers, one to the head of the
+ * list and the other to the tail of the list. The elements are doubly
+ * linked so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before or after
+ * an existing element, at the head of the list, or at the end of the list.
+ * A circle queue may be traversed in either direction, but has a more
+ * complex end of list detection.
+ *
+ * For details on the use of these macros, see the queue(3) manual page.
+ */
+
+/*
+ * List definitions.
+ */
+#define LIST_HEAD(name, type) \
+struct name { \
+ struct type *lh_first; /* first element */ \
+}
+
+#define LIST_HEAD_INITIALIZER(head) \
+ { NULL }
+
+#define LIST_ENTRY(type) \
+struct { \
+ struct type *le_next; /* next element */ \
+ struct type **le_prev; /* address of previous next element */ \
+}
+
+/*
+ * List functions.
+ */
+#define LIST_INIT(head) do { \
+ (head)->lh_first = NULL; \
+} while (/*CONSTCOND*/0)
+
+#define LIST_INSERT_AFTER(listelm, elm, field) do { \
+ if (((elm)->field.le_next = (listelm)->field.le_next) != NULL) \
+ (listelm)->field.le_next->field.le_prev = \
+ &(elm)->field.le_next; \
+ (listelm)->field.le_next = (elm); \
+ (elm)->field.le_prev = &(listelm)->field.le_next; \
+} while (/*CONSTCOND*/0)
+
+#define LIST_INSERT_BEFORE(listelm, elm, field) do { \
+ (elm)->field.le_prev = (listelm)->field.le_prev; \
+ (elm)->field.le_next = (listelm); \
+ *(listelm)->field.le_prev = (elm); \
+ (listelm)->field.le_prev = &(elm)->field.le_next; \
+} while (/*CONSTCOND*/0)
+
+#define LIST_INSERT_HEAD(head, elm, field) do { \
+ if (((elm)->field.le_next = (head)->lh_first) != NULL) \
+ (head)->lh_first->field.le_prev = &(elm)->field.le_next;\
+ (head)->lh_first = (elm); \
+ (elm)->field.le_prev = &(head)->lh_first; \
+} while (/*CONSTCOND*/0)
+
+#define LIST_REMOVE(elm, field) do { \
+ if ((elm)->field.le_next != NULL) \
+ (elm)->field.le_next->field.le_prev = \
+ (elm)->field.le_prev; \
+ *(elm)->field.le_prev = (elm)->field.le_next; \
+} while (/*CONSTCOND*/0)
+
+#define LIST_FOREACH(var, head, field) \
+ for ((var) = ((head)->lh_first); \
+ (var); \
+ (var) = ((var)->field.le_next))
+
+/*
+ * List access methods.
+ */
+#define LIST_EMPTY(head) ((head)->lh_first == NULL)
+#define LIST_FIRST(head) ((head)->lh_first)
+#define LIST_NEXT(elm, field) ((elm)->field.le_next)
+
+
+/*
+ * Singly-linked List definitions.
+ */
+#define SLIST_HEAD(name, type) \
+struct name { \
+ struct type *slh_first; /* first element */ \
+}
+
+#define SLIST_HEAD_INITIALIZER(head) \
+ { NULL }
+
+#define SLIST_ENTRY(type) \
+struct { \
+ struct type *sle_next; /* next element */ \
+}
+
+/*
+ * Singly-linked List functions.
+ */
+#define SLIST_INIT(head) do { \
+ (head)->slh_first = NULL; \
+} while (/*CONSTCOND*/0)
+
+#define SLIST_INSERT_AFTER(slistelm, elm, field) do { \
+ (elm)->field.sle_next = (slistelm)->field.sle_next; \
+ (slistelm)->field.sle_next = (elm); \
+} while (/*CONSTCOND*/0)
+
+#define SLIST_INSERT_HEAD(head, elm, field) do { \
+ (elm)->field.sle_next = (head)->slh_first; \
+ (head)->slh_first = (elm); \
+} while (/*CONSTCOND*/0)
+
+#define SLIST_REMOVE_HEAD(head, field) do { \
+ (head)->slh_first = (head)->slh_first->field.sle_next; \
+} while (/*CONSTCOND*/0)
+
+#define SLIST_REMOVE(head, elm, type, field) do { \
+ if ((head)->slh_first == (elm)) { \
+ SLIST_REMOVE_HEAD((head), field); \
+ } \
+ else { \
+ struct type *curelm = (head)->slh_first; \
+ while(curelm->field.sle_next != (elm)) \
+ curelm = curelm->field.sle_next; \
+ curelm->field.sle_next = \
+ curelm->fie