tsan: Update to LLVM 19.1.0.

This commit is contained in:
Alex Rønne Petersen 2024-09-24 13:47:29 +02:00 committed by Andrew Kelley
parent 7f6b7c5608
commit a40cdad18c
129 changed files with 4880 additions and 2546 deletions

293
lib/tsan/builtins/assembly.h vendored Normal file
View File

@ -0,0 +1,293 @@
//===-- assembly.h - compiler-rt assembler support macros -----------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines macros for use in compiler-rt assembler source.
// This file is not part of the interface of this library.
//
//===----------------------------------------------------------------------===//
#ifndef COMPILERRT_ASSEMBLY_H
#define COMPILERRT_ASSEMBLY_H
#if defined(__linux__) && defined(__CET__)
#if __has_include(<cet.h>)
#include <cet.h>
#endif
#endif
#if defined(__APPLE__) && defined(__aarch64__)
#define SEPARATOR %%
#else
#define SEPARATOR ;
#endif
#if defined(__APPLE__)
#define HIDDEN(name) .private_extern name
#define LOCAL_LABEL(name) L_##name
// tell linker it can break up file at label boundaries
#define FILE_LEVEL_DIRECTIVE .subsections_via_symbols
#define SYMBOL_IS_FUNC(name)
#define CONST_SECTION .const
#define NO_EXEC_STACK_DIRECTIVE
#elif defined(__ELF__)
#define HIDDEN(name) .hidden name
#define LOCAL_LABEL(name) .L_##name
#define FILE_LEVEL_DIRECTIVE
#if defined(__arm__) || defined(__aarch64__)
#define SYMBOL_IS_FUNC(name) .type name,%function
#else
#define SYMBOL_IS_FUNC(name) .type name,@function
#endif
#define CONST_SECTION .section .rodata
#if defined(__GNU__) || defined(__FreeBSD__) || defined(__Fuchsia__) || \
defined(__linux__)
#define NO_EXEC_STACK_DIRECTIVE .section .note.GNU-stack,"",%progbits
#else
#define NO_EXEC_STACK_DIRECTIVE
#endif
#else // !__APPLE__ && !__ELF__
#define HIDDEN(name)
#define LOCAL_LABEL(name) .L ## name
#define FILE_LEVEL_DIRECTIVE
#define SYMBOL_IS_FUNC(name) \
.def name SEPARATOR \
.scl 2 SEPARATOR \
.type 32 SEPARATOR \
.endef
#define CONST_SECTION .section .rdata,"rd"
#define NO_EXEC_STACK_DIRECTIVE
#endif
#if defined(__arm__) || defined(__aarch64__)
#define FUNC_ALIGN \
.text SEPARATOR \
.balign 16 SEPARATOR
#else
#define FUNC_ALIGN
#endif
// BTI and PAC gnu property note
#define NT_GNU_PROPERTY_TYPE_0 5
#define GNU_PROPERTY_AARCH64_FEATURE_1_AND 0xc0000000
#define GNU_PROPERTY_AARCH64_FEATURE_1_BTI 1
#define GNU_PROPERTY_AARCH64_FEATURE_1_PAC 2
#if defined(__ARM_FEATURE_BTI_DEFAULT)
#define BTI_FLAG GNU_PROPERTY_AARCH64_FEATURE_1_BTI
#else
#define BTI_FLAG 0
#endif
#if __ARM_FEATURE_PAC_DEFAULT & 3
#define PAC_FLAG GNU_PROPERTY_AARCH64_FEATURE_1_PAC
#else
#define PAC_FLAG 0
#endif
#define GNU_PROPERTY(type, value) \
.pushsection .note.gnu.property, "a" SEPARATOR \
.p2align 3 SEPARATOR \
.word 4 SEPARATOR \
.word 16 SEPARATOR \
.word NT_GNU_PROPERTY_TYPE_0 SEPARATOR \
.asciz "GNU" SEPARATOR \
.word type SEPARATOR \
.word 4 SEPARATOR \
.word value SEPARATOR \
.word 0 SEPARATOR \
.popsection
#if BTI_FLAG != 0
#define BTI_C hint #34
#define BTI_J hint #36
#else
#define BTI_C
#define BTI_J
#endif
#if (BTI_FLAG | PAC_FLAG) != 0
#define GNU_PROPERTY_BTI_PAC \
GNU_PROPERTY(GNU_PROPERTY_AARCH64_FEATURE_1_AND, BTI_FLAG | PAC_FLAG)
#else
#define GNU_PROPERTY_BTI_PAC
#endif
#if defined(__clang__) || defined(__GCC_HAVE_DWARF2_CFI_ASM)
#define CFI_START .cfi_startproc
#define CFI_END .cfi_endproc
#else
#define CFI_START
#define CFI_END
#endif
#if defined(__arm__)
// Determine actual [ARM][THUMB[1][2]] ISA using compiler predefined macros:
// - for '-mthumb -march=armv6' compiler defines '__thumb__'
// - for '-mthumb -march=armv7' compiler defines '__thumb__' and '__thumb2__'
#if defined(__thumb2__) || defined(__thumb__)
#define DEFINE_CODE_STATE .thumb SEPARATOR
#define DECLARE_FUNC_ENCODING .thumb_func SEPARATOR
#if defined(__thumb2__)
#define USE_THUMB_2
#define IT(cond) it cond
#define ITT(cond) itt cond
#define ITE(cond) ite cond
#else
#define USE_THUMB_1
#define IT(cond)
#define ITT(cond)
#define ITE(cond)
#endif // defined(__thumb__2)
#else // !defined(__thumb2__) && !defined(__thumb__)
#define DEFINE_CODE_STATE .arm SEPARATOR
#define DECLARE_FUNC_ENCODING
#define IT(cond)
#define ITT(cond)
#define ITE(cond)
#endif
#if defined(USE_THUMB_1) && defined(USE_THUMB_2)
#error "USE_THUMB_1 and USE_THUMB_2 can't be defined together."
#endif
#if defined(__ARM_ARCH_4T__) || __ARM_ARCH >= 5
#define ARM_HAS_BX
#endif
#if !defined(__ARM_FEATURE_CLZ) && !defined(USE_THUMB_1) && \
(__ARM_ARCH >= 6 || (__ARM_ARCH == 5 && !defined(__ARM_ARCH_5__)))
#define __ARM_FEATURE_CLZ
#endif
#ifdef ARM_HAS_BX
#define JMP(r) bx r
#define JMPc(r, c) bx##c r
#else
#define JMP(r) mov pc, r
#define JMPc(r, c) mov##c pc, r
#endif
// pop {pc} can't switch Thumb mode on ARMv4T
#if __ARM_ARCH >= 5
#define POP_PC() pop {pc}
#else
#define POP_PC() \
pop {ip}; \
JMP(ip)
#endif
#if defined(USE_THUMB_2)
#define WIDE(op) op.w
#else
#define WIDE(op) op
#endif
#else // !defined(__arm)
#define DECLARE_FUNC_ENCODING
#define DEFINE_CODE_STATE
#endif
#define GLUE2_(a, b) a##b
#define GLUE(a, b) GLUE2_(a, b)
#define GLUE2(a, b) GLUE2_(a, b)
#define GLUE3_(a, b, c) a##b##c
#define GLUE3(a, b, c) GLUE3_(a, b, c)
#define GLUE4_(a, b, c, d) a##b##c##d
#define GLUE4(a, b, c, d) GLUE4_(a, b, c, d)
#define SYMBOL_NAME(name) GLUE(__USER_LABEL_PREFIX__, name)
#ifdef VISIBILITY_HIDDEN
#define DECLARE_SYMBOL_VISIBILITY(name) \
HIDDEN(SYMBOL_NAME(name)) SEPARATOR
#define DECLARE_SYMBOL_VISIBILITY_UNMANGLED(name) \
HIDDEN(name) SEPARATOR
#else
#define DECLARE_SYMBOL_VISIBILITY(name)
#define DECLARE_SYMBOL_VISIBILITY_UNMANGLED(name)
#endif
#define DEFINE_COMPILERRT_FUNCTION(name) \
DEFINE_CODE_STATE \
FILE_LEVEL_DIRECTIVE SEPARATOR \
.globl SYMBOL_NAME(name) SEPARATOR \
SYMBOL_IS_FUNC(SYMBOL_NAME(name)) SEPARATOR \
DECLARE_SYMBOL_VISIBILITY(name) \
DECLARE_FUNC_ENCODING \
SYMBOL_NAME(name):
#define DEFINE_COMPILERRT_THUMB_FUNCTION(name) \
DEFINE_CODE_STATE \
FILE_LEVEL_DIRECTIVE SEPARATOR \
.globl SYMBOL_NAME(name) SEPARATOR \
SYMBOL_IS_FUNC(SYMBOL_NAME(name)) SEPARATOR \
DECLARE_SYMBOL_VISIBILITY(name) SEPARATOR \
.thumb_func SEPARATOR \
SYMBOL_NAME(name):
#define DEFINE_COMPILERRT_PRIVATE_FUNCTION(name) \
DEFINE_CODE_STATE \
FILE_LEVEL_DIRECTIVE SEPARATOR \
.globl SYMBOL_NAME(name) SEPARATOR \
SYMBOL_IS_FUNC(SYMBOL_NAME(name)) SEPARATOR \
HIDDEN(SYMBOL_NAME(name)) SEPARATOR \
DECLARE_FUNC_ENCODING \
SYMBOL_NAME(name):
#define DEFINE_COMPILERRT_PRIVATE_FUNCTION_UNMANGLED(name) \
DEFINE_CODE_STATE \
.globl name SEPARATOR \
SYMBOL_IS_FUNC(name) SEPARATOR \
HIDDEN(name) SEPARATOR \
DECLARE_FUNC_ENCODING \
name:
#define DEFINE_COMPILERRT_OUTLINE_FUNCTION_UNMANGLED(name) \
DEFINE_CODE_STATE \
FUNC_ALIGN \
.globl name SEPARATOR \
SYMBOL_IS_FUNC(name) SEPARATOR \
DECLARE_SYMBOL_VISIBILITY_UNMANGLED(name) SEPARATOR \
DECLARE_FUNC_ENCODING \
name: \
SEPARATOR CFI_START \
SEPARATOR BTI_C
#define DEFINE_COMPILERRT_FUNCTION_ALIAS(name, target) \
.globl SYMBOL_NAME(name) SEPARATOR \
SYMBOL_IS_FUNC(SYMBOL_NAME(name)) SEPARATOR \
DECLARE_SYMBOL_VISIBILITY(name) SEPARATOR \
.set SYMBOL_NAME(name), SYMBOL_NAME(target) SEPARATOR
#if defined(__ARM_EABI__)
#define DEFINE_AEABI_FUNCTION_ALIAS(aeabi_name, name) \
DEFINE_COMPILERRT_FUNCTION_ALIAS(aeabi_name, name)
#else
#define DEFINE_AEABI_FUNCTION_ALIAS(aeabi_name, name)
#endif
#ifdef __ELF__
#define END_COMPILERRT_FUNCTION(name) \
.size SYMBOL_NAME(name), . - SYMBOL_NAME(name)
#define END_COMPILERRT_OUTLINE_FUNCTION(name) \
CFI_END SEPARATOR \
.size SYMBOL_NAME(name), . - SYMBOL_NAME(name)
#else
#define END_COMPILERRT_FUNCTION(name)
#define END_COMPILERRT_OUTLINE_FUNCTION(name) \
CFI_END
#endif
#endif // COMPILERRT_ASSEMBLY_H

View File

@ -185,6 +185,11 @@ const interpose_substitution substitution_##func_name[] \
# else
# define __ASM_WEAK_WRAPPER(func) ".weak " #func "\n"
# endif // SANITIZER_FREEBSD || SANITIZER_NETBSD
# if defined(__arm__) || defined(__aarch64__)
# define ASM_TYPE_FUNCTION_STR "%function"
# else
# define ASM_TYPE_FUNCTION_STR "@function"
# endif
// Keep trampoline implementation in sync with sanitizer_common/sanitizer_asm.h
# define DECLARE_WRAPPER(ret_type, func, ...) \
extern "C" ret_type func(__VA_ARGS__); \
@ -196,12 +201,14 @@ const interpose_substitution substitution_##func_name[] \
__ASM_WEAK_WRAPPER(func) \
".set " #func ", " SANITIZER_STRINGIFY(TRAMPOLINE(func)) "\n" \
".globl " SANITIZER_STRINGIFY(TRAMPOLINE(func)) "\n" \
".type " SANITIZER_STRINGIFY(TRAMPOLINE(func)) ", %function\n" \
".type " SANITIZER_STRINGIFY(TRAMPOLINE(func)) ", " \
ASM_TYPE_FUNCTION_STR "\n" \
SANITIZER_STRINGIFY(TRAMPOLINE(func)) ":\n" \
SANITIZER_STRINGIFY(CFI_STARTPROC) "\n" \
SANITIZER_STRINGIFY(ASM_TAIL_CALL) " __interceptor_" \
SANITIZER_STRINGIFY(ASM_PREEMPTIBLE_SYM(func)) "\n" \
SANITIZER_STRINGIFY(CFI_ENDPROC) "\n" \
C_ASM_STARTPROC "\n" \
C_ASM_TAIL_CALL(SANITIZER_STRINGIFY(TRAMPOLINE(func)), \
"__interceptor_" \
SANITIZER_STRINGIFY(ASM_PREEMPTIBLE_SYM(func))) "\n" \
C_ASM_ENDPROC "\n" \
".size " SANITIZER_STRINGIFY(TRAMPOLINE(func)) ", " \
".-" SANITIZER_STRINGIFY(TRAMPOLINE(func)) "\n" \
);
@ -341,6 +348,18 @@ typedef unsigned long long uptr;
#else
typedef unsigned long uptr;
#endif // _WIN64
#if defined(__ELF__) && !SANITIZER_FUCHSIA
// The use of interceptors makes many sanitizers unusable for static linking.
// Define a function, if called, will cause a linker error (undefined _DYNAMIC).
// However, -static-pie (which is not common) cannot be detected at link time.
extern uptr kDynamic[] asm("_DYNAMIC");
inline void DoesNotSupportStaticLinking() {
[[maybe_unused]] volatile auto x = &kDynamic;
}
#else
inline void DoesNotSupportStaticLinking() {}
#endif
} // namespace __interception
#define INCLUDED_FROM_INTERCEPTION_LIB

View File

@ -28,12 +28,14 @@ bool InterceptFunction(const char *name, const char *ver, uptr *ptr_to_real,
uptr func, uptr trampoline);
} // namespace __interception
#define INTERCEPT_FUNCTION_LINUX_OR_FREEBSD(func) \
::__interception::InterceptFunction( \
#func, \
(::__interception::uptr *)&REAL(func), \
(::__interception::uptr)&(func), \
(::__interception::uptr)&TRAMPOLINE(func))
// Cast func to type of REAL(func) before casting to uptr in case it is an
// overloaded function, which is the case for some glibc functions when
// _FORTIFY_SOURCE is used. This disambiguates which overload to use.
#define INTERCEPT_FUNCTION_LINUX_OR_FREEBSD(func) \
::__interception::InterceptFunction( \
#func, (::__interception::uptr *)&REAL(func), \
(::__interception::uptr)(decltype(REAL(func)))&(func), \
(::__interception::uptr) &TRAMPOLINE(func))
// dlvsym is a GNU extension supported by some other platforms.
#if SANITIZER_GLIBC || SANITIZER_FREEBSD || SANITIZER_NETBSD
@ -41,7 +43,7 @@ bool InterceptFunction(const char *name, const char *ver, uptr *ptr_to_real,
::__interception::InterceptFunction( \
#func, symver, \
(::__interception::uptr *)&REAL(func), \
(::__interception::uptr)&(func), \
(::__interception::uptr)(decltype(REAL(func)))&(func), \
(::__interception::uptr)&TRAMPOLINE(func))
#else
#define INTERCEPT_FUNCTION_VER_LINUX_OR_FREEBSD(func, symver) \

View File

@ -1,4 +1,4 @@
//===-- interception_linux.cpp ----------------------------------*- C++ -*-===//
//===-- interception_win.cpp ------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@ -339,7 +339,7 @@ struct TrampolineMemoryRegion {
uptr max_size;
};
UNUSED static const uptr kTrampolineScanLimitRange = 1 << 31; // 2 gig
UNUSED static const uptr kTrampolineScanLimitRange = 1ull << 31; // 2 gig
static const int kMaxTrampolineRegion = 1024;
static TrampolineMemoryRegion TrampolineRegions[kMaxTrampolineRegion];
@ -431,7 +431,8 @@ static uptr AllocateMemoryForTrampoline(uptr image_address, size_t size) {
// The following prologues cannot be patched because of the short jump
// jumping to the patching region.
#if SANITIZER_WINDOWS64
// Short jump patterns below are only for x86_64.
# if SANITIZER_WINDOWS_x64
// ntdll!wcslen in Win11
// 488bc1 mov rax,rcx
// 0fb710 movzx edx,word ptr [rax]
@ -457,7 +458,12 @@ static const u8 kPrologueWithShortJump2[] = {
// Returns 0 on error.
static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
#if SANITIZER_WINDOWS64
#if SANITIZER_ARM64
// An ARM64 instruction is 4 bytes long.
return 4;
#endif
# if SANITIZER_WINDOWS_x64
if (memcmp((u8*)address, kPrologueWithShortJump1,
sizeof(kPrologueWithShortJump1)) == 0 ||
memcmp((u8*)address, kPrologueWithShortJump2,
@ -473,6 +479,8 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
switch (*(u8*)address) {
case 0x90: // 90 : nop
case 0xC3: // C3 : ret (for small/empty function interception
case 0xCC: // CC : int 3 i.e. registering weak functions)
return 1;
case 0x50: // push eax / rax
@ -496,7 +504,6 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
// Cannot overwrite control-instruction. Return 0 to indicate failure.
case 0xE9: // E9 XX XX XX XX : jmp <label>
case 0xE8: // E8 XX XX XX XX : call <func>
case 0xC3: // C3 : ret
case 0xEB: // EB XX : jmp XX (short jump)
case 0x70: // 7Y YY : jy XX (short conditional jump)
case 0x71:
@ -539,7 +546,12 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
return 7;
}
#if SANITIZER_WINDOWS64
switch (0x000000FF & *(u32 *)address) {
case 0xc2: // C2 XX XX : ret XX (needed for registering weak functions)
return 3;
}
# if SANITIZER_WINDOWS_x64
switch (*(u8*)address) {
case 0xA1: // A1 XX XX XX XX XX XX XX XX :
// movabs eax, dword ptr ds:[XXXXXXXX]
@ -572,6 +584,7 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
case 0x018a: // mov al, byte ptr [rcx]
return 2;
case 0x058A: // 8A 05 XX XX XX XX : mov al, byte ptr [XX XX XX XX]
case 0x058B: // 8B 05 XX XX XX XX : mov eax, dword ptr [XX XX XX XX]
if (rel_offset)
*rel_offset = 2;
@ -598,6 +611,7 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
case 0xc18b4c: // 4C 8B C1 : mov r8, rcx
case 0xd2b60f: // 0f b6 d2 : movzx edx, dl
case 0xca2b48: // 48 2b ca : sub rcx, rdx
case 0xca3b48: // 48 3b ca : cmp rcx, rdx
case 0x10b70f: // 0f b7 10 : movzx edx, WORD PTR [rax]
case 0xc00b4d: // 3d 0b c0 : or r8, r8
case 0xc08b41: // 41 8b c0 : mov eax, r8d
@ -617,9 +631,11 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
case 0x058b48: // 48 8b 05 XX XX XX XX :
// mov rax, QWORD PTR [rip + XXXXXXXX]
case 0x058d48: // 48 8d 05 XX XX XX XX :
// lea rax, QWORD PTR [rip + XXXXXXXX]
case 0x25ff48: // 48 ff 25 XX XX XX XX :
// rex.W jmp QWORD PTR [rip + XXXXXXXX]
case 0x158D4C: // 4c 8d 15 XX XX XX XX : lea r10, [rip + XX]
// Instructions having offset relative to 'rip' need offset adjustment.
if (rel_offset)
*rel_offset = 3;
@ -721,16 +737,22 @@ static bool CopyInstructions(uptr to, uptr from, size_t size) {
size_t instruction_size = GetInstructionSize(from + cursor, &rel_offset);
if (!instruction_size)
return false;
_memcpy((void*)(to + cursor), (void*)(from + cursor),
_memcpy((void *)(to + cursor), (void *)(from + cursor),
(size_t)instruction_size);
if (rel_offset) {
uptr delta = to - from;
uptr relocated_offset = *(u32*)(to + cursor + rel_offset) - delta;
#if SANITIZER_WINDOWS64
if (relocated_offset + 0x80000000U >= 0xFFFFFFFFU)
# if SANITIZER_WINDOWS64
// we want to make sure that the new relative offset still fits in 32-bits
// this will be untrue if relocated_offset \notin [-2**31, 2**31)
s64 delta = to - from;
s64 relocated_offset = *(s32 *)(to + cursor + rel_offset) - delta;
if (-0x8000'0000ll > relocated_offset || relocated_offset > 0x7FFF'FFFFll)
return false;
#endif
*(u32*)(to + cursor + rel_offset) = relocated_offset;
# else
// on 32-bit, the relative offset will always be correct
s32 delta = to - from;
s32 relocated_offset = *(s32 *)(to + cursor + rel_offset) - delta;
# endif
*(s32 *)(to + cursor + rel_offset) = relocated_offset;
}
cursor += instruction_size;
}
@ -932,19 +954,26 @@ bool OverrideFunction(
static void **InterestingDLLsAvailable() {
static const char *InterestingDLLs[] = {
"kernel32.dll",
"msvcr100.dll", // VS2010
"msvcr110.dll", // VS2012
"msvcr120.dll", // VS2013
"vcruntime140.dll", // VS2015
"ucrtbase.dll", // Universal CRT
#if (defined(__MINGW32__) && defined(__i386__))
"libc++.dll", // libc++
"libunwind.dll", // libunwind
#endif
// NTDLL should go last as it exports some functions that we should
// override in the CRT [presumably only used internally].
"ntdll.dll", NULL};
"kernel32.dll",
"msvcr100d.dll", // VS2010
"msvcr110d.dll", // VS2012
"msvcr120d.dll", // VS2013
"vcruntime140d.dll", // VS2015
"ucrtbased.dll", // Universal CRT
"msvcr100.dll", // VS2010
"msvcr110.dll", // VS2012
"msvcr120.dll", // VS2013
"vcruntime140.dll", // VS2015
"ucrtbase.dll", // Universal CRT
# if (defined(__MINGW32__) && defined(__i386__))
"libc++.dll", // libc++
"libunwind.dll", // libunwind
# endif
// NTDLL should go last as it exports some functions that we should
// override in the CRT [presumably only used internally].
"ntdll.dll",
NULL
};
static void *result[ARRAY_SIZE(InterestingDLLs)] = { 0 };
if (!result[0]) {
for (size_t i = 0, j = 0; InterestingDLLs[i]; ++i) {

View File

@ -25,7 +25,7 @@ namespace __sanitizer {
const char *PrimaryAllocatorName = "SizeClassAllocator";
const char *SecondaryAllocatorName = "LargeMmapAllocator";
static ALIGNED(64) char internal_alloc_placeholder[sizeof(InternalAllocator)];
alignas(64) static char internal_alloc_placeholder[sizeof(InternalAllocator)];
static atomic_uint8_t internal_allocator_initialized;
static StaticSpinMutex internal_alloc_init_mu;
@ -138,14 +138,20 @@ void InternalAllocatorUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
// LowLevelAllocator
constexpr uptr kLowLevelAllocatorDefaultAlignment = 8;
constexpr uptr kMinNumPagesRounded = 16;
constexpr uptr kMinRoundedSize = 65536;
static uptr low_level_alloc_min_alignment = kLowLevelAllocatorDefaultAlignment;
static LowLevelAllocateCallback low_level_alloc_callback;
static LowLevelAllocator Alloc;
LowLevelAllocator &GetGlobalLowLevelAllocator() { return Alloc; }
void *LowLevelAllocator::Allocate(uptr size) {
// Align allocation size.
size = RoundUpTo(size, low_level_alloc_min_alignment);
if (allocated_end_ - allocated_current_ < (sptr)size) {
uptr size_to_allocate = RoundUpTo(size, GetPageSizeCached());
uptr size_to_allocate = RoundUpTo(
size, Min(GetPageSizeCached() * kMinNumPagesRounded, kMinRoundedSize));
allocated_current_ = (char *)MmapOrDie(size_to_allocate, __func__);
allocated_end_ = allocated_current_ + size_to_allocate;
if (low_level_alloc_callback) {

View File

@ -40,6 +40,8 @@ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void __sanitizer_malloc_hook(void *ptr, uptr size);
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void __sanitizer_free_hook(void *ptr);
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE int
__sanitizer_ignore_free_hook(void *ptr);
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
__sanitizer_purge_allocator();

View File

@ -278,7 +278,7 @@ class SizeClassAllocator32 {
static const uptr kRegionSize = 1 << kRegionSizeLog;
static const uptr kNumPossibleRegions = kSpaceSize / kRegionSize;
struct ALIGNED(SANITIZER_CACHE_LINE_SIZE) SizeClassInfo {
struct alignas(SANITIZER_CACHE_LINE_SIZE) SizeClassInfo {
StaticSpinMutex mutex;
IntrusiveList<TransferBatch> free_list;
u32 rand_state;

View File

@ -316,13 +316,13 @@ class SizeClassAllocator64 {
Printf(
"%s %02zd (%6zd): mapped: %6zdK allocs: %7zd frees: %7zd inuse: %6zd "
"num_freed_chunks %7zd avail: %6zd rss: %6zdK releases: %6zd "
"last released: %6lldK region: 0x%zx\n",
"last released: %6lldK region: %p\n",
region->exhausted ? "F" : " ", class_id, ClassIdToSize(class_id),
region->mapped_user >> 10, region->stats.n_allocated,
region->stats.n_freed, in_use, region->num_freed_chunks, avail_chunks,
rss >> 10, region->rtoi.num_releases,
region->rtoi.last_released_bytes >> 10,
SpaceBeg() + kRegionSize * class_id);
(void *)(SpaceBeg() + kRegionSize * class_id));
}
void PrintStats() {
@ -636,15 +636,17 @@ class SizeClassAllocator64 {
}
uptr SpaceEnd() const { return SpaceBeg() + kSpaceSize; }
// kRegionSize should be able to satisfy the largest size class.
static_assert(kRegionSize >= SizeClassMap::kMaxSize);
static_assert(kRegionSize >= SizeClassMap::kMaxSize,
"Region size exceed largest size");
// kRegionSize must be <= 2^36, see CompactPtrT.
COMPILER_CHECK((kRegionSize) <= (1ULL << (SANITIZER_WORDSIZE / 2 + 4)));
COMPILER_CHECK((kRegionSize) <=
(1ULL << (sizeof(CompactPtrT) * 8 + kCompactPtrScale)));
// Call mmap for user memory with at least this size.
static const uptr kUserMapSize = 1 << 16;
static const uptr kUserMapSize = 1 << 18;
// Call mmap for metadata memory with at least this size.
static const uptr kMetaMapSize = 1 << 16;
// Call mmap for free array memory with at least this size.
static const uptr kFreeArrayMapSize = 1 << 16;
static const uptr kFreeArrayMapSize = 1 << 18;
atomic_sint32_t release_to_os_interval_ms_;
@ -665,7 +667,7 @@ class SizeClassAllocator64 {
u64 last_released_bytes;
};
struct ALIGNED(SANITIZER_CACHE_LINE_SIZE) RegionInfo {
struct alignas(SANITIZER_CACHE_LINE_SIZE) RegionInfo {
Mutex mutex;
uptr num_freed_chunks; // Number of elements in the freearray.
uptr mapped_free_array; // Bytes mapped for freearray.

View File

@ -42,6 +42,16 @@
# define CFI_RESTORE(reg)
#endif
#if defined(__aarch64__) && defined(__ARM_FEATURE_BTI_DEFAULT)
# define ASM_STARTPROC CFI_STARTPROC; hint #34
# define C_ASM_STARTPROC SANITIZER_STRINGIFY(CFI_STARTPROC) "\nhint #34"
#else
# define ASM_STARTPROC CFI_STARTPROC
# define C_ASM_STARTPROC SANITIZER_STRINGIFY(CFI_STARTPROC)
#endif
#define ASM_ENDPROC CFI_ENDPROC
#define C_ASM_ENDPROC SANITIZER_STRINGIFY(CFI_ENDPROC)
#if defined(__x86_64__) || defined(__i386__) || defined(__sparc__)
# define ASM_TAIL_CALL jmp
#elif defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
@ -53,6 +63,29 @@
# define ASM_TAIL_CALL tail
#endif
// Currently, almost all of the shared libraries rely on the value of
// $t9 to get the address of current function, instead of PCREL, even
// on MIPSr6. To be compatiable with them, we have to set $t9 properly.
// MIPS uses GOT to get the address of preemptible functions.
#if defined(__mips64)
# define C_ASM_TAIL_CALL(t_func, i_func) \
"lui $t8, %hi(%neg(%gp_rel(" t_func ")))\n" \
"daddu $t8, $t8, $t9\n" \
"daddiu $t8, $t8, %lo(%neg(%gp_rel(" t_func ")))\n" \
"ld $t9, %got_disp(" i_func ")($t8)\n" \
"jr $t9\n"
#elif defined(__mips__)
# define C_ASM_TAIL_CALL(t_func, i_func) \
".set noreorder\n" \
".cpload $t9\n" \
".set reorder\n" \
"lw $t9, %got(" i_func ")($gp)\n" \
"jr $t9\n"
#elif defined(ASM_TAIL_CALL)
# define C_ASM_TAIL_CALL(t_func, i_func) \
SANITIZER_STRINGIFY(ASM_TAIL_CALL) " " i_func
#endif
#if defined(__ELF__) && defined(__x86_64__) || defined(__i386__) || \
defined(__riscv)
# define ASM_PREEMPTIBLE_SYM(sym) sym@plt
@ -62,7 +95,11 @@
#if !defined(__APPLE__)
# define ASM_HIDDEN(symbol) .hidden symbol
# define ASM_TYPE_FUNCTION(symbol) .type symbol, %function
# if defined(__arm__) || defined(__aarch64__)
# define ASM_TYPE_FUNCTION(symbol) .type symbol, %function
# else
# define ASM_TYPE_FUNCTION(symbol) .type symbol, @function
# endif
# define ASM_SIZE(symbol) .size symbol, .-symbol
# define ASM_SYMBOL(symbol) symbol
# define ASM_SYMBOL_INTERCEPTOR(symbol) symbol
@ -87,9 +124,9 @@
.globl __interceptor_trampoline_##name; \
ASM_TYPE_FUNCTION(__interceptor_trampoline_##name); \
__interceptor_trampoline_##name: \
CFI_STARTPROC; \
ASM_STARTPROC; \
ASM_TAIL_CALL ASM_PREEMPTIBLE_SYM(__interceptor_##name); \
CFI_ENDPROC; \
ASM_ENDPROC; \
ASM_SIZE(__interceptor_trampoline_##name)
# define ASM_INTERCEPTOR_TRAMPOLINE_SUPPORT 1
# endif // Architecture supports interceptor trampoline

View File

@ -18,12 +18,24 @@
namespace __sanitizer {
enum memory_order {
// If the __atomic atomic builtins are supported (Clang/GCC), use the
// compiler provided macro values so that we can map the atomic operations
// to __atomic_* directly.
#ifdef __ATOMIC_SEQ_CST
memory_order_relaxed = __ATOMIC_RELAXED,
memory_order_consume = __ATOMIC_CONSUME,
memory_order_acquire = __ATOMIC_ACQUIRE,
memory_order_release = __ATOMIC_RELEASE,
memory_order_acq_rel = __ATOMIC_ACQ_REL,
memory_order_seq_cst = __ATOMIC_SEQ_CST
#else
memory_order_relaxed = 1 << 0,
memory_order_consume = 1 << 1,
memory_order_acquire = 1 << 2,
memory_order_release = 1 << 3,
memory_order_acq_rel = 1 << 4,
memory_order_seq_cst = 1 << 5
#endif
};
struct atomic_uint8_t {
@ -49,7 +61,7 @@ struct atomic_uint32_t {
struct atomic_uint64_t {
typedef u64 Type;
// On 32-bit platforms u64 is not necessary aligned on 8 bytes.
volatile ALIGNED(8) Type val_dont_use;
alignas(8) volatile Type val_dont_use;
};
struct atomic_uintptr_t {

View File

@ -14,60 +14,63 @@
#ifndef SANITIZER_ATOMIC_CLANG_H
#define SANITIZER_ATOMIC_CLANG_H
#if defined(__i386__) || defined(__x86_64__)
# include "sanitizer_atomic_clang_x86.h"
#else
# include "sanitizer_atomic_clang_other.h"
#endif
namespace __sanitizer {
// We would like to just use compiler builtin atomic operations
// for loads and stores, but they are mostly broken in clang:
// - they lead to vastly inefficient code generation
// (http://llvm.org/bugs/show_bug.cgi?id=17281)
// - 64-bit atomic operations are not implemented on x86_32
// (http://llvm.org/bugs/show_bug.cgi?id=15034)
// - they are not implemented on ARM
// error: undefined reference to '__atomic_load_4'
// We use the compiler builtin atomic operations for loads and stores, which
// generates correct code for all architectures, but may require libatomic
// on platforms where e.g. 64-bit atomics are not supported natively.
// See http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
// for mappings of the memory model to different processors.
inline void atomic_signal_fence(memory_order) {
inline void atomic_signal_fence(memory_order mo) { __atomic_signal_fence(mo); }
inline void atomic_thread_fence(memory_order mo) { __atomic_thread_fence(mo); }
inline void proc_yield(int cnt) {
__asm__ __volatile__("" ::: "memory");
#if defined(__i386__) || defined(__x86_64__)
for (int i = 0; i < cnt; i++) __asm__ __volatile__("pause");
__asm__ __volatile__("" ::: "memory");
#endif
}
inline void atomic_thread_fence(memory_order) {
__sync_synchronize();
template <typename T>
inline typename T::Type atomic_load(const volatile T *a, memory_order mo) {
DCHECK(mo == memory_order_relaxed || mo == memory_order_consume ||
mo == memory_order_acquire || mo == memory_order_seq_cst);
DCHECK(!((uptr)a % sizeof(*a)));
return __atomic_load_n(&a->val_dont_use, mo);
}
template<typename T>
inline typename T::Type atomic_fetch_add(volatile T *a,
typename T::Type v, memory_order mo) {
template <typename T>
inline void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
DCHECK(mo == memory_order_relaxed || mo == memory_order_release ||
mo == memory_order_seq_cst);
DCHECK(!((uptr)a % sizeof(*a)));
__atomic_store_n(&a->val_dont_use, v, mo);
}
template <typename T>
inline typename T::Type atomic_fetch_add(volatile T *a, typename T::Type v,
memory_order mo) {
DCHECK(!((uptr)a % sizeof(*a)));
return __atomic_fetch_add(&a->val_dont_use, v, mo);
}
template <typename T>
inline typename T::Type atomic_fetch_sub(volatile T *a, typename T::Type v,
memory_order mo) {
(void)mo;
DCHECK(!((uptr)a % sizeof(*a)));
return __sync_fetch_and_add(&a->val_dont_use, v);
return __atomic_fetch_sub(&a->val_dont_use, v, mo);
}
template<typename T>
inline typename T::Type atomic_fetch_sub(volatile T *a,
typename T::Type v, memory_order mo) {
(void)mo;
template <typename T>
inline typename T::Type atomic_exchange(volatile T *a, typename T::Type v,
memory_order mo) {
DCHECK(!((uptr)a % sizeof(*a)));
return __sync_fetch_and_add(&a->val_dont_use, -v);
}
template<typename T>
inline typename T::Type atomic_exchange(volatile T *a,
typename T::Type v, memory_order mo) {
DCHECK(!((uptr)a % sizeof(*a)));
if (mo & (memory_order_release | memory_order_acq_rel | memory_order_seq_cst))
__sync_synchronize();
v = __sync_lock_test_and_set(&a->val_dont_use, v);
if (mo == memory_order_seq_cst)
__sync_synchronize();
return v;
return __atomic_exchange_n(&a->val_dont_use, v, mo);
}
template <typename T>
@ -82,9 +85,8 @@ inline bool atomic_compare_exchange_strong(volatile T *a, typename T::Type *cmp,
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
}
template<typename T>
inline bool atomic_compare_exchange_weak(volatile T *a,
typename T::Type *cmp,
template <typename T>
inline bool atomic_compare_exchange_weak(volatile T *a, typename T::Type *cmp,
typename T::Type xchg,
memory_order mo) {
return atomic_compare_exchange_strong(a, cmp, xchg, mo);
@ -92,13 +94,6 @@ inline bool atomic_compare_exchange_weak(volatile T *a,
} // namespace __sanitizer
// This include provides explicit template instantiations for atomic_uint64_t
// on MIPS32, which does not directly support 8 byte atomics. It has to
// proceed the template definitions above.
#if defined(_MIPS_SIM) && defined(_ABIO32) && _MIPS_SIM == _ABIO32
# include "sanitizer_atomic_clang_mips.h"
#endif
#undef ATOMIC_ORDER
#endif // SANITIZER_ATOMIC_CLANG_H

View File

@ -1,117 +0,0 @@
//===-- sanitizer_atomic_clang_mips.h ---------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
// Not intended for direct inclusion. Include sanitizer_atomic.h.
//
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_ATOMIC_CLANG_MIPS_H
#define SANITIZER_ATOMIC_CLANG_MIPS_H
namespace __sanitizer {
// MIPS32 does not support atomics > 4 bytes. To address this lack of
// functionality, the sanitizer library provides helper methods which use an
// internal spin lock mechanism to emulate atomic operations when the size is
// 8 bytes.
static void __spin_lock(volatile int *lock) {
while (__sync_lock_test_and_set(lock, 1))
while (*lock) {
}
}
static void __spin_unlock(volatile int *lock) { __sync_lock_release(lock); }
// Make sure the lock is on its own cache line to prevent false sharing.
// Put it inside a struct that is aligned and padded to the typical MIPS
// cacheline which is 32 bytes.
static struct {
int lock;
char pad[32 - sizeof(int)];
} __attribute__((aligned(32))) lock = {0, {0}};
template <>
inline atomic_uint64_t::Type atomic_fetch_add(volatile atomic_uint64_t *ptr,
atomic_uint64_t::Type val,
memory_order mo) {
DCHECK(mo &
(memory_order_relaxed | memory_order_release | memory_order_seq_cst));
DCHECK(!((uptr)ptr % sizeof(*ptr)));
atomic_uint64_t::Type ret;
__spin_lock(&lock.lock);
ret = *(const_cast<atomic_uint64_t::Type volatile *>(&ptr->val_dont_use));
ptr->val_dont_use = ret + val;
__spin_unlock(&lock.lock);
return ret;
}
template <>
inline atomic_uint64_t::Type atomic_fetch_sub(volatile atomic_uint64_t *ptr,
atomic_uint64_t::Type val,
memory_order mo) {
return atomic_fetch_add(ptr, -val, mo);
}
template <>
inline bool atomic_compare_exchange_strong(volatile atomic_uint64_t *ptr,
atomic_uint64_t::Type *cmp,
atomic_uint64_t::Type xchg,
memory_order mo) {
DCHECK(mo &
(memory_order_relaxed | memory_order_release | memory_order_seq_cst));
DCHECK(!((uptr)ptr % sizeof(*ptr)));
typedef atomic_uint64_t::Type Type;
Type cmpv = *cmp;
Type prev;
bool ret = false;
__spin_lock(&lock.lock);
prev = *(const_cast<Type volatile *>(&ptr->val_dont_use));
if (prev == cmpv) {
ret = true;
ptr->val_dont_use = xchg;
}
__spin_unlock(&lock.lock);
return ret;
}
template <>
inline atomic_uint64_t::Type atomic_load(const volatile atomic_uint64_t *ptr,
memory_order mo) {
DCHECK(mo &
(memory_order_relaxed | memory_order_release | memory_order_seq_cst));
DCHECK(!((uptr)ptr % sizeof(*ptr)));
atomic_uint64_t::Type zero = 0;
volatile atomic_uint64_t *Newptr =
const_cast<volatile atomic_uint64_t *>(ptr);
return atomic_fetch_add(Newptr, zero, mo);
}
template <>
inline void atomic_store(volatile atomic_uint64_t *ptr, atomic_uint64_t::Type v,
memory_order mo) {
DCHECK(mo &
(memory_order_relaxed | memory_order_release | memory_order_seq_cst));
DCHECK(!((uptr)ptr % sizeof(*ptr)));
__spin_lock(&lock.lock);
ptr->val_dont_use = v;
__spin_unlock(&lock.lock);
}
} // namespace __sanitizer
#endif // SANITIZER_ATOMIC_CLANG_MIPS_H

View File

@ -1,85 +0,0 @@
//===-- sanitizer_atomic_clang_other.h --------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
// Not intended for direct inclusion. Include sanitizer_atomic.h.
//
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_ATOMIC_CLANG_OTHER_H
#define SANITIZER_ATOMIC_CLANG_OTHER_H
namespace __sanitizer {
inline void proc_yield(int cnt) {
__asm__ __volatile__("" ::: "memory");
}
template<typename T>
inline typename T::Type atomic_load(
const volatile T *a, memory_order mo) {
DCHECK(mo & (memory_order_relaxed | memory_order_consume
| memory_order_acquire | memory_order_seq_cst));
DCHECK(!((uptr)a % sizeof(*a)));
typename T::Type v;
if (sizeof(*a) < 8 || sizeof(void*) == 8) {
// Assume that aligned loads are atomic.
if (mo == memory_order_relaxed) {
v = a->val_dont_use;
} else if (mo == memory_order_consume) {
// Assume that processor respects data dependencies
// (and that compiler won't break them).
__asm__ __volatile__("" ::: "memory");
v = a->val_dont_use;
__asm__ __volatile__("" ::: "memory");
} else if (mo == memory_order_acquire) {
__asm__ __volatile__("" ::: "memory");
v = a->val_dont_use;
__sync_synchronize();
} else { // seq_cst
// E.g. on POWER we need a hw fence even before the store.
__sync_synchronize();
v = a->val_dont_use;
__sync_synchronize();
}
} else {
__atomic_load(const_cast<typename T::Type volatile *>(&a->val_dont_use), &v,
__ATOMIC_SEQ_CST);
}
return v;
}
template<typename T>
inline void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
DCHECK(mo & (memory_order_relaxed | memory_order_release
| memory_order_seq_cst));
DCHECK(!((uptr)a % sizeof(*a)));
if (sizeof(*a) < 8 || sizeof(void*) == 8) {
// Assume that aligned loads are atomic.
if (mo == memory_order_relaxed) {
a->val_dont_use = v;
} else if (mo == memory_order_release) {
__sync_synchronize();
a->val_dont_use = v;
__asm__ __volatile__("" ::: "memory");
} else { // seq_cst
__sync_synchronize();
a->val_dont_use = v;
__sync_synchronize();
}
} else {
__atomic_store(&a->val_dont_use, &v, __ATOMIC_SEQ_CST);
}
}
} // namespace __sanitizer
#endif // #ifndef SANITIZER_ATOMIC_CLANG_OTHER_H

View File

@ -1,113 +0,0 @@
//===-- sanitizer_atomic_clang_x86.h ----------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
// Not intended for direct inclusion. Include sanitizer_atomic.h.
//
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_ATOMIC_CLANG_X86_H
#define SANITIZER_ATOMIC_CLANG_X86_H
namespace __sanitizer {
inline void proc_yield(int cnt) {
__asm__ __volatile__("" ::: "memory");
for (int i = 0; i < cnt; i++)
__asm__ __volatile__("pause");
__asm__ __volatile__("" ::: "memory");
}
template<typename T>
inline typename T::Type atomic_load(
const volatile T *a, memory_order mo) {
DCHECK(mo & (memory_order_relaxed | memory_order_consume
| memory_order_acquire | memory_order_seq_cst));
DCHECK(!((uptr)a % sizeof(*a)));
typename T::Type v;
if (sizeof(*a) < 8 || sizeof(void*) == 8) {
// Assume that aligned loads are atomic.
if (mo == memory_order_relaxed) {
v = a->val_dont_use;
} else if (mo == memory_order_consume) {
// Assume that processor respects data dependencies
// (and that compiler won't break them).
__asm__ __volatile__("" ::: "memory");
v = a->val_dont_use;
__asm__ __volatile__("" ::: "memory");
} else if (mo == memory_order_acquire) {
__asm__ __volatile__("" ::: "memory");
v = a->val_dont_use;
// On x86 loads are implicitly acquire.
__asm__ __volatile__("" ::: "memory");
} else { // seq_cst
// On x86 plain MOV is enough for seq_cst store.
__asm__ __volatile__("" ::: "memory");
v = a->val_dont_use;
__asm__ __volatile__("" ::: "memory");
}
} else {
// 64-bit load on 32-bit platform.
__asm__ __volatile__(
"movq %1, %%mm0;" // Use mmx reg for 64-bit atomic moves
"movq %%mm0, %0;" // (ptr could be read-only)
"emms;" // Empty mmx state/Reset FP regs
: "=m" (v)
: "m" (a->val_dont_use)
: // mark the mmx registers as clobbered
#ifdef __MMX__
"mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7",
#endif // #ifdef __MMX__
"memory");
}
return v;
}
template<typename T>
inline void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
DCHECK(mo & (memory_order_relaxed | memory_order_release
| memory_order_seq_cst));
DCHECK(!((uptr)a % sizeof(*a)));
if (sizeof(*a) < 8 || sizeof(void*) == 8) {
// Assume that aligned loads are atomic.
if (mo == memory_order_relaxed) {
a->val_dont_use = v;
} else if (mo == memory_order_release) {
// On x86 stores are implicitly release.
__asm__ __volatile__("" ::: "memory");
a->val_dont_use = v;
__asm__ __volatile__("" ::: "memory");
} else { // seq_cst
// On x86 stores are implicitly release.
__asm__ __volatile__("" ::: "memory");
a->val_dont_use = v;
__sync_synchronize();
}
} else {
// 64-bit store on 32-bit platform.
__asm__ __volatile__(
"movq %1, %%mm0;" // Use mmx reg for 64-bit atomic moves
"movq %%mm0, %0;"
"emms;" // Empty mmx state/Reset FP regs
: "=m" (a->val_dont_use)
: "m" (v)
: // mark the mmx registers as clobbered
#ifdef __MMX__
"mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7",
#endif // #ifdef __MMX__
"memory");
if (mo == memory_order_seq_cst)
__sync_synchronize();
}
}
} // namespace __sanitizer
#endif // #ifndef SANITIZER_ATOMIC_CLANG_X86_H

View File

@ -70,8 +70,8 @@ inline void proc_yield(int cnt) {
template<typename T>
inline typename T::Type atomic_load(
const volatile T *a, memory_order mo) {
DCHECK(mo & (memory_order_relaxed | memory_order_consume
| memory_order_acquire | memory_order_seq_cst));
DCHECK(mo == memory_order_relaxed || mo == memory_order_consume ||
mo == memory_order_acquire || mo == memory_order_seq_cst);
DCHECK(!((uptr)a % sizeof(*a)));
typename T::Type v;
// FIXME(dvyukov): 64-bit load is not atomic on 32-bits.
@ -87,8 +87,8 @@ inline typename T::Type atomic_load(
template<typename T>
inline void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
DCHECK(mo & (memory_order_relaxed | memory_order_release
| memory_order_seq_cst));
DCHECK(mo == memory_order_relaxed || mo == memory_order_release ||
mo == memory_order_seq_cst);
DCHECK(!((uptr)a % sizeof(*a)));
// FIXME(dvyukov): 64-bit store is not atomic on 32-bits.
if (mo == memory_order_relaxed) {

View File

@ -321,23 +321,23 @@ class TwoLevelBitVector {
};
private:
void check(uptr idx) const { CHECK_LE(idx, size()); }
void check(uptr idx) const { CHECK_LT(idx, size()); }
uptr idx0(uptr idx) const {
uptr res = idx / (BV::kSize * BV::kSize);
CHECK_LE(res, kLevel1Size);
CHECK_LT(res, kLevel1Size);
return res;
}
uptr idx1(uptr idx) const {
uptr res = (idx / BV::kSize) % BV::kSize;
CHECK_LE(res, BV::kSize);
CHECK_LT(res, BV::kSize);
return res;
}
uptr idx2(uptr idx) const {
uptr res = idx % BV::kSize;
CHECK_LE(res, BV::kSize);
CHECK_LT(res, BV::kSize);
return res;
}

View File

@ -139,9 +139,11 @@ u32 ChainedOriginDepot::Get(u32 id, u32 *other) {
return desc.here_id;
}
void ChainedOriginDepot::LockAll() { depot.LockAll(); }
void ChainedOriginDepot::LockBeforeFork() { depot.LockBeforeFork(); }
void ChainedOriginDepot::UnlockAll() { depot.UnlockAll(); }
void ChainedOriginDepot::UnlockAfterFork(bool fork_child) {
depot.UnlockAfterFork(fork_child);
}
void ChainedOriginDepot::TestOnlyUnmap() { depot.TestOnlyUnmap(); }

View File

@ -32,8 +32,8 @@ class ChainedOriginDepot {
// Retrieves the stored StackDepot ID for the given origin ID.
u32 Get(u32 id, u32 *other);
void LockAll();
void UnlockAll();
void LockBeforeFork();
void UnlockAfterFork(bool fork_child);
void TestOnlyUnmap();
private:

View File

@ -115,8 +115,9 @@ void ReportErrorSummary(const char *error_message, const char *alt_tool_name) {
if (!common_flags()->print_summary)
return;
InternalScopedString buff;
buff.append("SUMMARY: %s: %s",
alt_tool_name ? alt_tool_name : SanitizerToolName, error_message);
buff.AppendF("SUMMARY: %s: %s",
alt_tool_name ? alt_tool_name : SanitizerToolName,
error_message);
__sanitizer_report_error_summary(buff.data());
}
@ -346,7 +347,13 @@ void RunMallocHooks(void *ptr, uptr size) {
}
}
void RunFreeHooks(void *ptr) {
// Returns '1' if the call to free() should be ignored (based on
// __sanitizer_ignore_free_hook), or '0' otherwise.
int RunFreeHooks(void *ptr) {
if (__sanitizer_ignore_free_hook(ptr)) {
return 1;
}
__sanitizer_free_hook(ptr);
for (int i = 0; i < kMaxMallocFreeHooks; i++) {
auto hook = MFHooks[i].free_hook;
@ -354,6 +361,8 @@ void RunFreeHooks(void *ptr) {
break;
hook(ptr);
}
return 0;
}
static int InstallMallocFreeHooks(void (*malloc_hook)(const void *, uptr),
@ -418,4 +427,9 @@ SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_free_hook, void *ptr) {
(void)ptr;
}
SANITIZER_INTERFACE_WEAK_DEF(int, __sanitizer_ignore_free_hook, void *ptr) {
(void)ptr;
return 0;
}
} // extern "C"

View File

@ -32,6 +32,7 @@ struct AddressInfo;
struct BufferedStackTrace;
struct SignalContext;
struct StackTrace;
struct SymbolizedStack;
// Constants.
const uptr kWordSize = SANITIZER_WORDSIZE / 8;
@ -59,14 +60,10 @@ inline int Verbosity() {
return atomic_load(&current_verbosity, memory_order_relaxed);
}
#if SANITIZER_ANDROID
inline uptr GetPageSize() {
// Android post-M sysconf(_SC_PAGESIZE) crashes if called from .preinit_array.
return 4096;
}
inline uptr GetPageSizeCached() {
return 4096;
}
#if SANITIZER_ANDROID && !defined(__aarch64__)
// 32-bit Android only has 4k pages.
inline uptr GetPageSize() { return 4096; }
inline uptr GetPageSizeCached() { return 4096; }
#else
uptr GetPageSize();
extern uptr PageSizeCached;
@ -76,6 +73,7 @@ inline uptr GetPageSizeCached() {
return PageSizeCached;
}
#endif
uptr GetMmapGranularity();
uptr GetMaxVirtualAddress();
uptr GetMaxUserVirtualAddress();
@ -90,10 +88,11 @@ void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
// Memory management
void *MmapOrDie(uptr size, const char *mem_type, bool raw_report = false);
inline void *MmapOrDieQuietly(uptr size, const char *mem_type) {
return MmapOrDie(size, mem_type, /*raw_report*/ true);
}
void UnmapOrDie(void *addr, uptr size);
void UnmapOrDie(void *addr, uptr size, bool raw_report = false);
// Behaves just like MmapOrDie, but tolerates out of memory condition, in that
// case returns nullptr.
void *MmapOrDieOnFatalError(uptr size, const char *mem_type);
@ -138,7 +137,8 @@ void UnmapFromTo(uptr from, uptr to);
// shadow_size_bytes bytes on the right, which on linux is mapped no access.
// The high_mem_end may be updated if the original shadow size doesn't fit.
uptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale,
uptr min_shadow_base_alignment, uptr &high_mem_end);
uptr min_shadow_base_alignment, uptr &high_mem_end,
uptr granularity);
// Let S = max(shadow_size, num_aliases * alias_size, ring_buffer_size).
// Reserves 2*S bytes of address space to the right of the returned address and
@ -177,7 +177,7 @@ bool DontDumpShadowMemory(uptr addr, uptr length);
// Check if the built VMA size matches the runtime one.
void CheckVMASize();
void RunMallocHooks(void *ptr, uptr size);
void RunFreeHooks(void *ptr);
int RunFreeHooks(void *ptr);
class ReservedAddressRange {
public:
@ -208,6 +208,11 @@ void ParseUnixMemoryProfile(fill_profile_f cb, uptr *stats, char *smaps,
// Simple low-level (mmap-based) allocator for internal use. Doesn't have
// constructor, so all instances of LowLevelAllocator should be
// linker initialized.
//
// NOTE: Users should instead use the singleton provided via
// `GetGlobalLowLevelAllocator()` rather than create a new one. This way, the
// number of mmap fragments can be reduced and use the same contiguous mmap
// provided by this singleton.
class LowLevelAllocator {
public:
// Requires an external lock.
@ -224,6 +229,8 @@ typedef void (*LowLevelAllocateCallback)(uptr ptr, uptr size);
// Passing NULL removes the callback.
void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback);
LowLevelAllocator &GetGlobalLowLevelAllocator();
// IO
void CatastrophicErrorWrite(const char *buffer, uptr length);
void RawWrite(const char *buffer);
@ -386,6 +393,8 @@ void ReportErrorSummary(const char *error_type, const AddressInfo &info,
// Same as above, but obtains AddressInfo by symbolizing top stack trace frame.
void ReportErrorSummary(const char *error_type, const StackTrace *trace,
const char *alt_tool_name = nullptr);
// Skips frames which we consider internal and not usefull to the users.
const SymbolizedStack *SkipInternalFrames(const SymbolizedStack *frames);
void ReportMmapWriteExec(int prot, int mflags);
@ -500,7 +509,7 @@ inline int ToLower(int c) {
// A low-level vector based on mmap. May incur a significant memory overhead for
// small vectors.
// WARNING: The current implementation supports only POD types.
template<typename T>
template <typename T, bool raw_report = false>
class InternalMmapVectorNoCtor {
public:
using value_type = T;
@ -510,7 +519,7 @@ class InternalMmapVectorNoCtor {
data_ = 0;
reserve(initial_capacity);
}
void Destroy() { UnmapOrDie(data_, capacity_bytes_); }
void Destroy() { UnmapOrDie(data_, capacity_bytes_, raw_report); }
T &operator[](uptr i) {
CHECK_LT(i, size_);
return data_[i];
@ -586,9 +595,10 @@ class InternalMmapVectorNoCtor {
CHECK_LE(size_, new_capacity);
uptr new_capacity_bytes =
RoundUpTo(new_capacity * sizeof(T), GetPageSizeCached());
T *new_data = (T *)MmapOrDie(new_capacity_bytes, "InternalMmapVector");
T *new_data =
(T *)MmapOrDie(new_capacity_bytes, "InternalMmapVector", raw_report);
internal_memcpy(new_data, data_, size_ * sizeof(T));
UnmapOrDie(data_, capacity_bytes_);
UnmapOrDie(data_, capacity_bytes_, raw_report);
data_ = new_data;
capacity_bytes_ = new_capacity_bytes;
}
@ -636,7 +646,8 @@ class InternalScopedString {
buffer_.resize(1);
buffer_[0] = '\0';
}
void append(const char *format, ...) FORMAT(2, 3);
void Append(const char *str);
void AppendF(const char *format, ...) FORMAT(2, 3);
const char *data() const { return buffer_.data(); }
char *data() { return buffer_.data(); }
@ -1086,7 +1097,7 @@ inline u32 GetNumberOfCPUsCached() {
} // namespace __sanitizer
inline void *operator new(__sanitizer::operator_new_size_type size,
inline void *operator new(__sanitizer::usize size,
__sanitizer::LowLevelAllocator &alloc) {
return alloc.Allocate(size);
}

View File

@ -33,16 +33,17 @@
// COMMON_INTERCEPTOR_STRERROR
//===----------------------------------------------------------------------===//
#include <stdarg.h>
#include "interception/interception.h"
#include "sanitizer_addrhashmap.h"
#include "sanitizer_dl.h"
#include "sanitizer_errno.h"
#include "sanitizer_placement_new.h"
#include "sanitizer_platform_interceptors.h"
#include "sanitizer_symbolizer.h"
#include "sanitizer_tls_get_addr.h"
#include <stdarg.h>
#if SANITIZER_INTERCEPTOR_HOOKS
#define CALL_WEAK_INTERCEPTOR_HOOK(f, ...) f(__VA_ARGS__);
#define DECLARE_WEAK_INTERCEPTOR_HOOK(f, ...) \
@ -445,11 +446,13 @@ INTERCEPTOR(char*, textdomain, const char *domainname) {
#define INIT_TEXTDOMAIN
#endif
#if SANITIZER_INTERCEPT_STRCMP
#if SANITIZER_INTERCEPT_STRCMP || SANITIZER_INTERCEPT_MEMCMP
static inline int CharCmpX(unsigned char c1, unsigned char c2) {
return (c1 == c2) ? 0 : (c1 < c2) ? -1 : 1;
}
#endif
#if SANITIZER_INTERCEPT_STRCMP
DECLARE_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strcmp, uptr called_pc,
const char *s1, const char *s2, int result)
@ -971,7 +974,7 @@ INTERCEPTOR(SSIZE_T, read, int fd, void *ptr, SIZE_T count) {
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
SSIZE_T res = REAL(read)(fd, ptr, count);
SSIZE_T res = COMMON_INTERCEPTOR_BLOCK_REAL(read)(fd, ptr, count);
if (res > 0) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, res);
if (res >= 0 && fd >= 0) COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
return res;
@ -1006,7 +1009,7 @@ INTERCEPTOR(SSIZE_T, pread, int fd, void *ptr, SIZE_T count, OFF_T offset) {
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
SSIZE_T res = REAL(pread)(fd, ptr, count, offset);
SSIZE_T res = COMMON_INTERCEPTOR_BLOCK_REAL(pread)(fd, ptr, count, offset);
if (res > 0) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, res);
if (res >= 0 && fd >= 0) COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
return res;
@ -1024,7 +1027,7 @@ INTERCEPTOR(SSIZE_T, pread64, int fd, void *ptr, SIZE_T count, OFF64_T offset) {
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
SSIZE_T res = REAL(pread64)(fd, ptr, count, offset);
SSIZE_T res = COMMON_INTERCEPTOR_BLOCK_REAL(pread64)(fd, ptr, count, offset);
if (res > 0) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, res);
if (res >= 0 && fd >= 0) COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
return res;
@ -1040,7 +1043,7 @@ INTERCEPTOR_WITH_SUFFIX(SSIZE_T, readv, int fd, __sanitizer_iovec *iov,
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, readv, fd, iov, iovcnt);
COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
SSIZE_T res = REAL(readv)(fd, iov, iovcnt);
SSIZE_T res = COMMON_INTERCEPTOR_BLOCK_REAL(readv)(fd, iov, iovcnt);
if (res > 0) write_iovec(ctx, iov, iovcnt, res);
if (res >= 0 && fd >= 0) COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
return res;
@ -1056,7 +1059,7 @@ INTERCEPTOR(SSIZE_T, preadv, int fd, __sanitizer_iovec *iov, int iovcnt,
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, preadv, fd, iov, iovcnt, offset);
COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
SSIZE_T res = REAL(preadv)(fd, iov, iovcnt, offset);
SSIZE_T res = COMMON_INTERCEPTOR_BLOCK_REAL(preadv)(fd, iov, iovcnt, offset);
if (res > 0) write_iovec(ctx, iov, iovcnt, res);
if (res >= 0 && fd >= 0) COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
return res;
@ -1072,7 +1075,8 @@ INTERCEPTOR(SSIZE_T, preadv64, int fd, __sanitizer_iovec *iov, int iovcnt,
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, preadv64, fd, iov, iovcnt, offset);
COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
SSIZE_T res = REAL(preadv64)(fd, iov, iovcnt, offset);
SSIZE_T res =
COMMON_INTERCEPTOR_BLOCK_REAL(preadv64)(fd, iov, iovcnt, offset);
if (res > 0) write_iovec(ctx, iov, iovcnt, res);
if (res >= 0 && fd >= 0) COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
return res;
@ -1088,8 +1092,9 @@ INTERCEPTOR(SSIZE_T, write, int fd, void *ptr, SIZE_T count) {
COMMON_INTERCEPTOR_ENTER(ctx, write, fd, ptr, count);
COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
if (fd >= 0) COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd);
SSIZE_T res = REAL(write)(fd, ptr, count);
// FIXME: this check should be _before_ the call to REAL(write), not after
SSIZE_T res = COMMON_INTERCEPTOR_BLOCK_REAL(write)(fd, ptr, count);
// FIXME: this check should be _before_ the call to
// COMMON_INTERCEPTOR_BLOCK_REAL(write), not after
if (res > 0) COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, res);
return res;
}
@ -1118,7 +1123,7 @@ INTERCEPTOR(SSIZE_T, pwrite, int fd, void *ptr, SIZE_T count, OFF_T offset) {
COMMON_INTERCEPTOR_ENTER(ctx, pwrite, fd, ptr, count, offset);
COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
if (fd >= 0) COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd);
SSIZE_T res = REAL(pwrite)(fd, ptr, count, offset);
SSIZE_T res = COMMON_INTERCEPTOR_BLOCK_REAL(pwrite)(fd, ptr, count, offset);
if (res > 0) COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, res);
return res;
}
@ -1134,7 +1139,7 @@ INTERCEPTOR(SSIZE_T, pwrite64, int fd, void *ptr, OFF64_T count,
COMMON_INTERCEPTOR_ENTER(ctx, pwrite64, fd, ptr, count, offset);
COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
if (fd >= 0) COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd);
SSIZE_T res = REAL(pwrite64)(fd, ptr, count, offset);
SSIZE_T res = COMMON_INTERCEPTOR_BLOCK_REAL(pwrite64)(fd, ptr, count, offset);
if (res > 0) COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, res);
return res;
}
@ -1150,7 +1155,7 @@ INTERCEPTOR_WITH_SUFFIX(SSIZE_T, writev, int fd, __sanitizer_iovec *iov,
COMMON_INTERCEPTOR_ENTER(ctx, writev, fd, iov, iovcnt);
COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
if (fd >= 0) COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd);
SSIZE_T res = REAL(writev)(fd, iov, iovcnt);
SSIZE_T res = COMMON_INTERCEPTOR_BLOCK_REAL(writev)(fd, iov, iovcnt);
if (res > 0) read_iovec(ctx, iov, iovcnt, res);
return res;
}
@ -1166,7 +1171,7 @@ INTERCEPTOR(SSIZE_T, pwritev, int fd, __sanitizer_iovec *iov, int iovcnt,
COMMON_INTERCEPTOR_ENTER(ctx, pwritev, fd, iov, iovcnt, offset);
COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
if (fd >= 0) COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd);
SSIZE_T res = REAL(pwritev)(fd, iov, iovcnt, offset);
SSIZE_T res = COMMON_INTERCEPTOR_BLOCK_REAL(pwritev)(fd, iov, iovcnt, offset);
if (res > 0) read_iovec(ctx, iov, iovcnt, res);
return res;
}
@ -1182,7 +1187,8 @@ INTERCEPTOR(SSIZE_T, pwritev64, int fd, __sanitizer_iovec *iov, int iovcnt,
COMMON_INTERCEPTOR_ENTER(ctx, pwritev64, fd, iov, iovcnt, offset);
COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
if (fd >= 0) COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd);
SSIZE_T res = REAL(pwritev64)(fd, iov, iovcnt, offset);
SSIZE_T res =
COMMON_INTERCEPTOR_BLOCK_REAL(pwritev64)(fd, iov, iovcnt, offset);
if (res > 0) read_iovec(ctx, iov, iovcnt, res);
return res;
}
@ -1245,6 +1251,7 @@ INTERCEPTOR(int, prctl, int option, unsigned long arg2, unsigned long arg3,
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, prctl, option, arg2, arg3, arg4, arg5);
static const int PR_SET_NAME = 15;
static const int PR_GET_NAME = 16;
static const int PR_SET_VMA = 0x53564d41;
static const int PR_SCHED_CORE = 62;
static const int PR_SCHED_CORE_GET = 0;
@ -1258,7 +1265,11 @@ INTERCEPTOR(int, prctl, int option, unsigned long arg2, unsigned long arg3,
internal_strncpy(buff, (char *)arg2, 15);
buff[15] = 0;
COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, buff);
} else if (res != -1 && option == PR_SCHED_CORE && arg2 == PR_SCHED_CORE_GET) {
} else if (res == 0 && option == PR_GET_NAME) {
char *name = (char *)arg2;
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, name, internal_strlen(name) + 1);
} else if (res != -1 && option == PR_SCHED_CORE &&
arg2 == PR_SCHED_CORE_GET) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, (u64*)(arg5), sizeof(u64));
}
return res;
@ -2546,7 +2557,7 @@ INTERCEPTOR_WITH_SUFFIX(int, wait, int *status) {
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
int res = REAL(wait)(status);
int res = COMMON_INTERCEPTOR_BLOCK_REAL(wait)(status);
if (res != -1 && status)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, status, sizeof(*status));
return res;
@ -2564,7 +2575,7 @@ INTERCEPTOR_WITH_SUFFIX(int, waitid, int idtype, int id, void *infop,
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
int res = REAL(waitid)(idtype, id, infop, options);
int res = COMMON_INTERCEPTOR_BLOCK_REAL(waitid)(idtype, id, infop, options);
if (res != -1 && infop)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, infop, siginfo_t_sz);
return res;
@ -2575,7 +2586,7 @@ INTERCEPTOR_WITH_SUFFIX(int, waitpid, int pid, int *status, int options) {
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
int res = REAL(waitpid)(pid, status, options);
int res = COMMON_INTERCEPTOR_BLOCK_REAL(waitpid)(pid, status, options);
if (res != -1 && status)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, status, sizeof(*status));
return res;
@ -2586,7 +2597,7 @@ INTERCEPTOR(int, wait3, int *status, int options, void *rusage) {
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
int res = REAL(wait3)(status, options, rusage);
int res = COMMON_INTERCEPTOR_BLOCK_REAL(wait3)(status, options, rusage);
if (res != -1) {
if (status) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, status, sizeof(*status));
if (rusage) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, rusage, struct_rusage_sz);
@ -2600,7 +2611,8 @@ INTERCEPTOR(int, __wait4, int pid, int *status, int options, void *rusage) {
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
int res = REAL(__wait4)(pid, status, options, rusage);
int res =
COMMON_INTERCEPTOR_BLOCK_REAL(__wait4)(pid, status, options, rusage);
if (res != -1) {
if (status) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, status, sizeof(*status));
if (rusage) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, rusage, struct_rusage_sz);
@ -2615,7 +2627,7 @@ INTERCEPTOR(int, wait4, int pid, int *status, int options, void *rusage) {
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
int res = REAL(wait4)(pid, status, options, rusage);
int res = COMMON_INTERCEPTOR_BLOCK_REAL(wait4)(pid, status, options, rusage);
if (res != -1) {
if (status) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, status, sizeof(*status));
if (rusage) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, rusage, struct_rusage_sz);
@ -2993,7 +3005,7 @@ INTERCEPTOR(int, accept, int fd, void *addr, unsigned *addrlen) {
COMMON_INTERCEPTOR_READ_RANGE(ctx, addrlen, sizeof(*addrlen));
addrlen0 = *addrlen;
}
int fd2 = REAL(accept)(fd, addr, addrlen);
int fd2 = COMMON_INTERCEPTOR_BLOCK_REAL(accept)(fd, addr, addrlen);
if (fd2 >= 0) {
if (fd >= 0) COMMON_INTERCEPTOR_FD_SOCKET_ACCEPT(ctx, fd, fd2);
if (addr && addrlen)
@ -3018,7 +3030,7 @@ INTERCEPTOR(int, accept4, int fd, void *addr, unsigned *addrlen, int f) {
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
int fd2 = REAL(accept4)(fd, addr, addrlen, f);
int fd2 = COMMON_INTERCEPTOR_BLOCK_REAL(accept4)(fd, addr, addrlen, f);
if (fd2 >= 0) {
if (fd >= 0) COMMON_INTERCEPTOR_FD_SOCKET_ACCEPT(ctx, fd, fd2);
if (addr && addrlen)
@ -3042,7 +3054,7 @@ INTERCEPTOR(int, paccept, int fd, void *addr, unsigned *addrlen,
addrlen0 = *addrlen;
}
if (set) COMMON_INTERCEPTOR_READ_RANGE(ctx, set, sizeof(*set));
int fd2 = REAL(paccept)(fd, addr, addrlen, set, f);
int fd2 = COMMON_INTERCEPTOR_BLOCK_REAL(paccept)(fd, addr, addrlen, set, f);
if (fd2 >= 0) {
if (fd >= 0) COMMON_INTERCEPTOR_FD_SOCKET_ACCEPT(ctx, fd, fd2);
if (addr && addrlen)
@ -3123,7 +3135,7 @@ INTERCEPTOR(SSIZE_T, recvmsg, int fd, struct __sanitizer_msghdr *msg,
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
SSIZE_T res = REAL(recvmsg)(fd, msg, flags);
SSIZE_T res = COMMON_INTERCEPTOR_BLOCK_REAL(recvmsg)(fd, msg, flags);
if (res >= 0) {
if (fd >= 0) COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
if (msg) {
@ -3144,7 +3156,8 @@ INTERCEPTOR(int, recvmmsg, int fd, struct __sanitizer_mmsghdr *msgvec,
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, recvmmsg, fd, msgvec, vlen, flags, timeout);
if (timeout) COMMON_INTERCEPTOR_READ_RANGE(ctx, timeout, struct_timespec_sz);
int res = REAL(recvmmsg)(fd, msgvec, vlen, flags, timeout);
int res =
COMMON_INTERCEPTOR_BLOCK_REAL(recvmmsg)(fd, msgvec, vlen, flags, timeout);
if (res >= 0) {
if (fd >= 0) COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
for (int i = 0; i < res; ++i) {
@ -3222,7 +3235,7 @@ INTERCEPTOR(SSIZE_T, sendmsg, int fd, struct __sanitizer_msghdr *msg,
COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd);
}
SSIZE_T res = REAL(sendmsg)(fd, msg, flags);
SSIZE_T res = COMMON_INTERCEPTOR_BLOCK_REAL(sendmsg)(fd, msg, flags);
if (common_flags()->intercept_send && res >= 0 && msg)
read_msghdr(ctx, msg, res);
return res;
@ -3241,7 +3254,7 @@ INTERCEPTOR(int, sendmmsg, int fd, struct __sanitizer_mmsghdr *msgvec,
COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd);
}
int res = REAL(sendmmsg)(fd, msgvec, vlen, flags);
int res = COMMON_INTERCEPTOR_BLOCK_REAL(sendmmsg)(fd, msgvec, vlen, flags);
if (res >= 0 && msgvec) {
for (int i = 0; i < res; ++i) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, &msgvec[i].msg_len,
@ -3264,7 +3277,7 @@ INTERCEPTOR(int, msgsnd, int msqid, const void *msgp, SIZE_T msgsz,
COMMON_INTERCEPTOR_ENTER(ctx, msgsnd, msqid, msgp, msgsz, msgflg);
if (msgp)
COMMON_INTERCEPTOR_READ_RANGE(ctx, msgp, sizeof(long) + msgsz);
int res = REAL(msgsnd)(msqid, msgp, msgsz, msgflg);
int res = COMMON_INTERCEPTOR_BLOCK_REAL(msgsnd)(msqid, msgp, msgsz, msgflg);
return res;
}
@ -3272,7 +3285,8 @@ INTERCEPTOR(SSIZE_T, msgrcv, int msqid, void *msgp, SIZE_T msgsz,
long msgtyp, int msgflg) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, msgrcv, msqid, msgp, msgsz, msgtyp, msgflg);
SSIZE_T len = REAL(msgrcv)(msqid, msgp, msgsz, msgtyp, msgflg);
SSIZE_T len =
COMMON_INTERCEPTOR_BLOCK_REAL(msgrcv)(msqid, msgp, msgsz, msgtyp, msgflg);
if (len != -1)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, msgp, sizeof(long) + len);
return len;
@ -6116,7 +6130,7 @@ INTERCEPTOR(int, flopen, const char *path, int flags, ...) {
if (path) {
COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
}
return REAL(flopen)(path, flags, mode);
return COMMON_INTERCEPTOR_BLOCK_REAL(flopen)(path, flags, mode);
}
INTERCEPTOR(int, flopenat, int dirfd, const char *path, int flags, ...) {
@ -6129,7 +6143,7 @@ INTERCEPTOR(int, flopenat, int dirfd, const char *path, int flags, ...) {
if (path) {
COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
}
return REAL(flopenat)(dirfd, path, flags, mode);
return COMMON_INTERCEPTOR_BLOCK_REAL(flopenat)(dirfd, path, flags, mode);
}
#define INIT_FLOPEN \
@ -6305,7 +6319,36 @@ INTERCEPTOR(int, fclose, __sanitizer_FILE *fp) {
INTERCEPTOR(void*, dlopen, const char *filename, int flag) {
void *ctx;
COMMON_INTERCEPTOR_ENTER_NOIGNORE(ctx, dlopen, filename, flag);
if (filename) COMMON_INTERCEPTOR_READ_STRING(ctx, filename, 0);
if (filename) {
COMMON_INTERCEPTOR_READ_STRING(ctx, filename, 0);
# if !SANITIZER_DYNAMIC
// We care about a very specific use-case: dladdr on
// statically-linked ASan may return <main program>
// instead of the library.
// We therefore only take effect if the sanitizer is statically
// linked, and we don't bother canonicalizing paths because
// dladdr should return the same address both times (we assume
// the user did not canonicalize the result from dladdr).
if (common_flags()->test_only_replace_dlopen_main_program) {
VPrintf(1, "dlopen interceptor: filename: %s\n", filename);
const char *SelfFName = DladdrSelfFName();
VPrintf(1, "dlopen interceptor: DladdrSelfFName: %p %s\n",
(const void *)SelfFName, SelfFName);
if (SelfFName && internal_strcmp(SelfFName, filename) == 0) {
// It's possible they copied the string from dladdr, so
// we do a string comparison rather than pointer comparison.
VPrintf(1, "dlopen interceptor: replacing %s because it matches %s\n",
filename, SelfFName);
filename = (char *)0; // RTLD_DEFAULT
}
}
# endif // !SANITIZER_DYNAMIC
}
void *res = COMMON_INTERCEPTOR_DLOPEN(filename, flag);
Symbolizer::GetOrInit()->InvalidateModuleList();
COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, res);
@ -6685,7 +6728,7 @@ INTERCEPTOR(SSIZE_T, recv, int fd, void *buf, SIZE_T len, int flags) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, recv, fd, buf, len, flags);
COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
SSIZE_T res = REAL(recv)(fd, buf, len, flags);
SSIZE_T res = COMMON_INTERCEPTOR_BLOCK_REAL(recv)(fd, buf, len, flags);
if (res > 0) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, Min((SIZE_T)res, len));
}
@ -6702,7 +6745,8 @@ INTERCEPTOR(SSIZE_T, recvfrom, int fd, void *buf, SIZE_T len, int flags,
SIZE_T srcaddr_sz;
if (srcaddr) srcaddr_sz = *addrlen;
(void)srcaddr_sz; // prevent "set but not used" warning
SSIZE_T res = REAL(recvfrom)(fd, buf, len, flags, srcaddr, addrlen);
SSIZE_T res = COMMON_INTERCEPTOR_BLOCK_REAL(recvfrom)(fd, buf, len, flags,
srcaddr, addrlen);
if (res > 0)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, Min((SIZE_T)res, len));
if (res >= 0 && srcaddr)
@ -6725,7 +6769,7 @@ INTERCEPTOR(SSIZE_T, send, int fd, void *buf, SIZE_T len, int flags) {
COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd);
}
SSIZE_T res = REAL(send)(fd, buf, len, flags);
SSIZE_T res = COMMON_INTERCEPTOR_BLOCK_REAL(send)(fd, buf, len, flags);
if (common_flags()->intercept_send && res > 0)
COMMON_INTERCEPTOR_READ_RANGE(ctx, buf, Min((SIZE_T)res, len));
return res;
@ -6740,7 +6784,8 @@ INTERCEPTOR(SSIZE_T, sendto, int fd, void *buf, SIZE_T len, int flags,
COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd);
}
// Can't check dstaddr as it may have uninitialized padding at the end.
SSIZE_T res = REAL(sendto)(fd, buf, len, flags, dstaddr, addrlen);
SSIZE_T res = COMMON_INTERCEPTOR_BLOCK_REAL(sendto)(fd, buf, len, flags,
dstaddr, addrlen);
if (common_flags()->intercept_send && res > 0)
COMMON_INTERCEPTOR_READ_RANGE(ctx, buf, Min((SIZE_T)res, len));
return res;
@ -6753,25 +6798,25 @@ INTERCEPTOR(SSIZE_T, sendto, int fd, void *buf, SIZE_T len, int flags,
#endif
#if SANITIZER_INTERCEPT_EVENTFD_READ_WRITE
INTERCEPTOR(int, eventfd_read, int fd, u64 *value) {
INTERCEPTOR(int, eventfd_read, int fd, __sanitizer_eventfd_t *value) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, eventfd_read, fd, value);
COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
int res = REAL(eventfd_read)(fd, value);
int res = COMMON_INTERCEPTOR_BLOCK_REAL(eventfd_read)(fd, value);
if (res == 0) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, value, sizeof(*value));
if (fd >= 0) COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
}
return res;
}
INTERCEPTOR(int, eventfd_write, int fd, u64 value) {
INTERCEPTOR(int, eventfd_write, int fd, __sanitizer_eventfd_t value) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, eventfd_write, fd, value);
if (fd >= 0) {
COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd);
}
int res = REAL(eventfd_write)(fd, value);
int res = COMMON_INTERCEPTOR_BLOCK_REAL(eventfd_write)(fd, value);
return res;
}
#define INIT_EVENTFD_READ_WRITE \
@ -7394,7 +7439,8 @@ INTERCEPTOR(int, open_by_handle_at, int mount_fd, struct file_handle* handle,
COMMON_INTERCEPTOR_READ_RANGE(
ctx, &sanitizer_handle->f_handle, sanitizer_handle->handle_bytes);
return REAL(open_by_handle_at)(mount_fd, handle, flags);
return COMMON_INTERCEPTOR_BLOCK_REAL(open_by_handle_at)(mount_fd, handle,
flags);
}
#define INIT_OPEN_BY_HANDLE_AT COMMON_INTERCEPT_FUNCTION(open_by_handle_at)
@ -7609,9 +7655,9 @@ static void write_protoent(void *ctx, struct __sanitizer_protoent *p) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->p_aliases, pp_size * sizeof(char *));
}
INTERCEPTOR(struct __sanitizer_protoent *, getprotoent) {
INTERCEPTOR(struct __sanitizer_protoent *, getprotoent,) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, getprotoent);
COMMON_INTERCEPTOR_ENTER(ctx, getprotoent,);
struct __sanitizer_protoent *p = REAL(getprotoent)();
if (p)
write_protoent(ctx, p);
@ -7698,9 +7744,9 @@ INTERCEPTOR(int, getprotobynumber_r, int num,
#endif
#if SANITIZER_INTERCEPT_NETENT
INTERCEPTOR(struct __sanitizer_netent *, getnetent) {
INTERCEPTOR(struct __sanitizer_netent *, getnetent,) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, getnetent);
COMMON_INTERCEPTOR_ENTER(ctx, getnetent,);
struct __sanitizer_netent *n = REAL(getnetent)();
if (n) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n, sizeof(*n));
@ -9862,9 +9908,9 @@ INTERCEPTOR(char *, fdevname_r, int fd, char *buf, SIZE_T len) {
#endif
#if SANITIZER_INTERCEPT_GETUSERSHELL
INTERCEPTOR(char *, getusershell) {
INTERCEPTOR(char *, getusershell,) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, getusershell);
COMMON_INTERCEPTOR_ENTER(ctx, getusershell,);
char *res = REAL(getusershell)();
if (res)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, internal_strlen(res) + 1);
@ -9933,7 +9979,13 @@ INTERCEPTOR(void, sl_free, void *sl, int freeall) {
INTERCEPTOR(SSIZE_T, getrandom, void *buf, SIZE_T buflen, unsigned int flags) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, getrandom, buf, buflen, flags);
SSIZE_T n = REAL(getrandom)(buf, buflen, flags);
// If GRND_NONBLOCK is set in the flags, it is non blocking.
static const int grnd_nonblock = 1;
SSIZE_T n;
if ((flags & grnd_nonblock))
n = REAL(getrandom)(buf, buflen, flags);
else
n = COMMON_INTERCEPTOR_BLOCK_REAL(getrandom)(buf, buflen, flags);
if (n > 0) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, n);
}
@ -10180,20 +10232,6 @@ INTERCEPTOR(int, __xuname, int size, void *utsname) {
#define INIT___XUNAME
#endif
#if SANITIZER_INTERCEPT_HEXDUMP
INTERCEPTOR(void, hexdump, const void *ptr, int length, const char *header, int flags) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, hexdump, ptr, length, header, flags);
COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, length);
COMMON_INTERCEPTOR_READ_RANGE(ctx, header, internal_strlen(header) + 1);
REAL(hexdump)(ptr, length, header, flags);
}
#define INIT_HEXDUMP COMMON_INTERCEPT_FUNCTION(hexdump);
#else
#define INIT_HEXDUMP
#endif
#if SANITIZER_INTERCEPT_ARGP_PARSE
INTERCEPTOR(int, argp_parse, const struct argp *argp, int argc, char **argv,
unsigned flags, int *arg_index, void *input) {
@ -10226,6 +10264,38 @@ INTERCEPTOR(int, cpuset_getaffinity, int level, int which, __int64_t id, SIZE_T
#define INIT_CPUSET_GETAFFINITY
#endif
#if SANITIZER_INTERCEPT_PREADV2
INTERCEPTOR(SSIZE_T, preadv2, int fd, __sanitizer_iovec *iov, int iovcnt,
OFF_T offset, int flags) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, preadv2, fd, iov, iovcnt, offset, flags);
COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
SSIZE_T res = REAL(preadv2)(fd, iov, iovcnt, offset, flags);
if (res > 0) write_iovec(ctx, iov, iovcnt, res);
if (res >= 0 && fd >= 0) COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
return res;
}
#define INIT_PREADV2 COMMON_INTERCEPT_FUNCTION(preadv2)
#else
#define INIT_PREADV2
#endif
#if SANITIZER_INTERCEPT_PWRITEV2
INTERCEPTOR(SSIZE_T, pwritev2, int fd, __sanitizer_iovec *iov, int iovcnt,
OFF_T offset, int flags) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, pwritev2, fd, iov, iovcnt, offset, flags);
COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
if (fd >= 0) COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd);
SSIZE_T res = REAL(pwritev2)(fd, iov, iovcnt, offset, flags);
if (res > 0) read_iovec(ctx, iov, iovcnt, res);
return res;
}
#define INIT_PWRITEV2 COMMON_INTERCEPT_FUNCTION(pwritev2)
#else
#define INIT_PWRITEV2
#endif
#include "sanitizer_common_interceptors_netbsd_compat.inc"
namespace __sanitizer {
@ -10543,9 +10613,10 @@ static void InitializeCommonInterceptors() {
INIT_PROCCTL
INIT_UNAME;
INIT___XUNAME;
INIT_HEXDUMP;
INIT_ARGP_PARSE;
INIT_CPUSET_GETAFFINITY;
INIT_PREADV2;
INIT_PWRITEV2;
INIT___PRINTF_CHK;
}

View File

@ -547,24 +547,25 @@ static void printf_common(void *ctx, const char *format, va_list aq) {
continue;
} else if (size == FSS_STRLEN) {
if (void *argp = va_arg(aq, void *)) {
uptr len;
if (dir.starredPrecision) {
// FIXME: properly support starred precision for strings.
size = 0;
len = 0;
} else if (dir.fieldPrecision > 0) {
// Won't read more than "precision" symbols.
size = internal_strnlen((const char *)argp, dir.fieldPrecision);
if (size < dir.fieldPrecision) size++;
len = internal_strnlen((const char *)argp, dir.fieldPrecision);
if (len < (uptr)dir.fieldPrecision)
len++;
} else {
// Whole string will be accessed.
size = internal_strlen((const char *)argp) + 1;
len = internal_strlen((const char *)argp) + 1;
}
COMMON_INTERCEPTOR_READ_RANGE(ctx, argp, size);
COMMON_INTERCEPTOR_READ_RANGE(ctx, argp, len);
}
} else if (size == FSS_WCSLEN) {
if (void *argp = va_arg(aq, void *)) {
// FIXME: Properly support wide-character strings (via wcsrtombs).
size = 0;
COMMON_INTERCEPTOR_READ_RANGE(ctx, argp, size);
COMMON_INTERCEPTOR_READ_RANGE(ctx, argp, 0);
}
} else {
// Skip non-pointer args

View File

@ -46,6 +46,7 @@ INTERFACE_FUNCTION(__sanitizer_purge_allocator)
INTERFACE_FUNCTION(__sanitizer_print_memory_profile)
INTERFACE_WEAK_FUNCTION(__sanitizer_free_hook)
INTERFACE_WEAK_FUNCTION(__sanitizer_malloc_hook)
INTERFACE_WEAK_FUNCTION(__sanitizer_ignore_free_hook)
// Memintrinsic functions.
INTERFACE_FUNCTION(__sanitizer_internal_memcpy)
INTERFACE_FUNCTION(__sanitizer_internal_memmove)

View File

@ -9,6 +9,7 @@
//===----------------------------------------------------------------------===//
INTERFACE_WEAK_FUNCTION(__sanitizer_symbolize_code)
INTERFACE_WEAK_FUNCTION(__sanitizer_symbolize_data)
INTERFACE_WEAK_FUNCTION(__sanitizer_symbolize_frame)
INTERFACE_WEAK_FUNCTION(__sanitizer_symbolize_demangle)
INTERFACE_WEAK_FUNCTION(__sanitizer_symbolize_flush)
INTERFACE_WEAK_FUNCTION(__sanitizer_symbolize_set_demangle)

View File

@ -87,8 +87,8 @@ void MaybeStartBackgroudThread() {
if (!common_flags()->hard_rss_limit_mb &&
!common_flags()->soft_rss_limit_mb &&
!common_flags()->heap_profile) return;
if (!&real_pthread_create) {
VPrintf(1, "%s: real_pthread_create undefined\n", SanitizerToolName);
if (!&internal_pthread_create) {
VPrintf(1, "%s: internal_pthread_create undefined\n", SanitizerToolName);
return; // Can't spawn the thread anyway.
}
@ -119,8 +119,10 @@ void MaybeStartBackgroudThread() {}
#endif
void WriteToSyslog(const char *msg) {
if (!msg)
return;
InternalScopedString msg_copy;
msg_copy.append("%s", msg);
msg_copy.Append(msg);
const char *p = msg_copy.data();
// Print one line at a time.
@ -167,7 +169,7 @@ void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name,
: !MmapFixedNoReserve(beg, size, name)) {
Report(
"ReserveShadowMemoryRange failed while trying to map 0x%zx bytes. "
"Perhaps you're using ulimit -v\n",
"Perhaps you're using ulimit -v or ulimit -d\n",
size);
Abort();
}

View File

@ -38,6 +38,10 @@
// Called before fork syscall.
// COMMON_SYSCALL_POST_FORK(long res)
// Called after fork syscall.
// COMMON_SYSCALL_BLOCKING_START()
// Called before blocking syscall.
// COMMON_SYSCALL_BLOCKING_END()
// Called after blocking syscall.
//===----------------------------------------------------------------------===//
#include "sanitizer_platform.h"
@ -85,6 +89,16 @@
{}
# endif
# ifndef COMMON_SYSCALL_BLOCKING_START
# define COMMON_SYSCALL_BLOCKING_START() \
{}
# endif
# ifndef COMMON_SYSCALL_BLOCKING_END
# define COMMON_SYSCALL_BLOCKING_END() \
{}
# endif
// FIXME: do some kind of PRE_READ for all syscall arguments (int(s) and such).
extern "C" {
@ -2808,6 +2822,15 @@ PRE_SYSCALL(fchownat)
POST_SYSCALL(fchownat)
(long res, long dfd, const void *filename, long user, long group, long flag) {}
PRE_SYSCALL(fchmodat2)(long dfd, const void *filename, long mode, long flag) {
if (filename)
PRE_READ(filename,
__sanitizer::internal_strlen((const char *)filename) + 1);
}
POST_SYSCALL(fchmodat2)
(long res, long dfd, const void *filename, long mode, long flag) {}
PRE_SYSCALL(openat)(long dfd, const void *filename, long flags, long mode) {
if (filename)
PRE_READ(filename,
@ -3167,6 +3190,18 @@ POST_SYSCALL(sigaltstack)(long res, void *ss, void *oss) {
}
}
}
PRE_SYSCALL(futex)
(void *uaddr, long futex_op, long val, void *timeout, void *uaddr2, long val3) {
COMMON_SYSCALL_BLOCKING_START();
}
POST_SYSCALL(futex)
(long res, void *uaddr, long futex_op, long val, void *timeout, void *uaddr2,
long val3) {
COMMON_SYSCALL_BLOCKING_END();
}
} // extern "C"
# undef PRE_SYSCALL

View File

@ -0,0 +1,37 @@
//===-- sanitizer_dl.cpp --------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file has helper functions that depend on libc's dynamic loading
// introspection.
//
//===----------------------------------------------------------------------===//
#include "sanitizer_dl.h"
#include "sanitizer_common/sanitizer_platform.h"
#if SANITIZER_GLIBC
# include <dlfcn.h>
#endif
namespace __sanitizer {
extern const char *SanitizerToolName;
const char *DladdrSelfFName(void) {
#if SANITIZER_GLIBC
Dl_info info;
int ret = dladdr((void *)&SanitizerToolName, &info);
if (ret) {
return info.dli_fname;
}
#endif
return nullptr;
}
} // namespace __sanitizer

View File

@ -0,0 +1,26 @@
//===-- sanitizer_dl.h ----------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file has helper functions that depend on libc's dynamic loading
// introspection.
//
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_DL_H
#define SANITIZER_DL_H
namespace __sanitizer {
// Returns the path to the shared object or - in the case of statically linked
// sanitizers
// - the main program itself, that contains the sanitizer.
const char* DladdrSelfFName(void);
} // namespace __sanitizer
#endif // SANITIZER_DL_H

View File

@ -69,7 +69,7 @@ void ReportFile::ReopenIfNecessary() {
WriteToFile(kStderrFd, ErrorMsgPrefix, internal_strlen(ErrorMsgPrefix));
WriteToFile(kStderrFd, full_path, internal_strlen(full_path));
char errmsg[100];
internal_snprintf(errmsg, sizeof(errmsg), " (reason: %d)", err);
internal_snprintf(errmsg, sizeof(errmsg), " (reason: %d)\n", err);
WriteToFile(kStderrFd, errmsg, internal_strlen(errmsg));
Die();
}
@ -88,6 +88,8 @@ static void RecursiveCreateParentDirs(char *path) {
const char *ErrorMsgPrefix = "ERROR: Can't create directory: ";
WriteToFile(kStderrFd, ErrorMsgPrefix, internal_strlen(ErrorMsgPrefix));
WriteToFile(kStderrFd, path, internal_strlen(path));
const char *ErrorMsgSuffix = "\n";
WriteToFile(kStderrFd, ErrorMsgSuffix, internal_strlen(ErrorMsgSuffix));
Die();
}
path[i] = save;

View File

@ -84,7 +84,7 @@ bool IsPathSeparator(const char c);
bool IsAbsolutePath(const char *path);
// Returns true on success, false on failure.
bool CreateDir(const char *pathname);
// Starts a subprocess and returs its pid.
// Starts a subprocess and returns its pid.
// If *_fd parameters are not kInvalidFd their corresponding input/output
// streams will be redirect to the file. The files will always be closed
// in parent process even in case of an error.

View File

@ -19,8 +19,6 @@
namespace __sanitizer {
LowLevelAllocator FlagParser::Alloc;
class UnknownFlags {
static const int kMaxUnknownFlags = 20;
const char *unknown_flags_[kMaxUnknownFlags];
@ -49,7 +47,7 @@ void ReportUnrecognizedFlags() {
char *FlagParser::ll_strndup(const char *s, uptr n) {
uptr len = internal_strnlen(s, n);
char *s2 = (char*)Alloc.Allocate(len + 1);
char *s2 = (char *)GetGlobalLowLevelAllocator().Allocate(len + 1);
internal_memcpy(s2, s, len);
s2[len] = 0;
return s2;
@ -185,7 +183,8 @@ void FlagParser::RegisterHandler(const char *name, FlagHandlerBase *handler,
}
FlagParser::FlagParser() : n_flags_(0), buf_(nullptr), pos_(0) {
flags_ = (Flag *)Alloc.Allocate(sizeof(Flag) * kMaxFlags);
flags_ =
(Flag *)GetGlobalLowLevelAllocator().Allocate(sizeof(Flag) * kMaxFlags);
}
} // namespace __sanitizer

View File

@ -178,8 +178,6 @@ class FlagParser {
bool ParseFile(const char *path, bool ignore_missing);
void PrintFlagDescriptions();
static LowLevelAllocator Alloc;
private:
void fatal_error(const char *err);
bool is_space(char c);
@ -193,7 +191,7 @@ class FlagParser {
template <typename T>
static void RegisterFlag(FlagParser *parser, const char *name, const char *desc,
T *var) {
FlagHandler<T> *fh = new (FlagParser::Alloc) FlagHandler<T>(var);
FlagHandler<T> *fh = new (GetGlobalLowLevelAllocator()) FlagHandler<T>(var);
parser->RegisterHandler(name, fh, desc);
}

View File

@ -108,11 +108,11 @@ class FlagHandlerInclude final : public FlagHandlerBase {
};
void RegisterIncludeFlags(FlagParser *parser, CommonFlags *cf) {
FlagHandlerInclude *fh_include = new (FlagParser::Alloc)
FlagHandlerInclude *fh_include = new (GetGlobalLowLevelAllocator())
FlagHandlerInclude(parser, /*ignore_missing*/ false);
parser->RegisterHandler("include", fh_include,
"read more options from the given file");
FlagHandlerInclude *fh_include_if_exists = new (FlagParser::Alloc)
FlagHandlerInclude *fh_include_if_exists = new (GetGlobalLowLevelAllocator())
FlagHandlerInclude(parser, /*ignore_missing*/ true);
parser->RegisterHandler(
"include_if_exists", fh_include_if_exists,

View File

@ -269,3 +269,16 @@ COMMON_FLAG(bool, detect_write_exec, false,
COMMON_FLAG(bool, test_only_emulate_no_memorymap, false,
"TEST ONLY fail to read memory mappings to emulate sanitized "
"\"init\"")
// With static linking, dladdr((void*)pthread_join) or similar will return the
// path to the main program. This flag will replace dlopen(<main program,...>
// with dlopen(NULL,...), which is the correct way to get a handle to the main
// program.
COMMON_FLAG(bool, test_only_replace_dlopen_main_program, false,
"TEST ONLY replace dlopen(<main program>,...) with dlopen(NULL)")
COMMON_FLAG(bool, enable_symbolizer_markup, SANITIZER_FUCHSIA,
"Use sanitizer symbolizer markup, available on Linux "
"and always set true for Fuchsia.")
COMMON_FLAG(bool, detect_invalid_join, true,
"If set, check invalid joins of threads.")

View File

@ -109,6 +109,10 @@ class TwoLevelMap {
return *AddressSpaceView::LoadWritable(&map2[idx % kSize2]);
}
void Lock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS { mu_.Lock(); }
void Unlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS { mu_.Unlock(); }
private:
constexpr uptr MmapSize() const {
return RoundUpTo(kSize2 * sizeof(T), GetPageSizeCached());

View File

@ -1,137 +0,0 @@
//===-- sanitizer_freebsd.h -------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file is a part of Sanitizer runtime. It contains FreeBSD-specific
// definitions.
//
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_FREEBSD_H
#define SANITIZER_FREEBSD_H
#include "sanitizer_internal_defs.h"
// x86-64 FreeBSD 9.2 and older define 'ucontext_t' incorrectly in
// 32-bit mode.
#if SANITIZER_FREEBSD && (SANITIZER_WORDSIZE == 32)
#include <osreldate.h>
#if __FreeBSD_version <= 902001 // v9.2
#include <link.h>
#include <sys/param.h>
#include <ucontext.h>
namespace __sanitizer {
typedef unsigned long long __xuint64_t;
typedef __int32_t __xregister_t;
typedef struct __xmcontext {
__xregister_t mc_onstack;
__xregister_t mc_gs;
__xregister_t mc_fs;
__xregister_t mc_es;
__xregister_t mc_ds;
__xregister_t mc_edi;
__xregister_t mc_esi;
__xregister_t mc_ebp;
__xregister_t mc_isp;
__xregister_t mc_ebx;
__xregister_t mc_edx;
__xregister_t mc_ecx;
__xregister_t mc_eax;
__xregister_t mc_trapno;
__xregister_t mc_err;
__xregister_t mc_eip;
__xregister_t mc_cs;
__xregister_t mc_eflags;
__xregister_t mc_esp;
__xregister_t mc_ss;
int mc_len;
int mc_fpformat;
int mc_ownedfp;
__xregister_t mc_flags;
int mc_fpstate[128] __aligned(16);
__xregister_t mc_fsbase;
__xregister_t mc_gsbase;
__xregister_t mc_xfpustate;
__xregister_t mc_xfpustate_len;
int mc_spare2[4];
} xmcontext_t;
typedef struct __xucontext {
sigset_t uc_sigmask;
xmcontext_t uc_mcontext;
struct __ucontext *uc_link;
stack_t uc_stack;
int uc_flags;
int __spare__[4];
} xucontext_t;
struct xkinfo_vmentry {
int kve_structsize;
int kve_type;
__xuint64_t kve_start;
__xuint64_t kve_end;
__xuint64_t kve_offset;
__xuint64_t kve_vn_fileid;
__uint32_t kve_vn_fsid;
int kve_flags;
int kve_resident;
int kve_private_resident;
int kve_protection;
int kve_ref_count;
int kve_shadow_count;
int kve_vn_type;
__xuint64_t kve_vn_size;
__uint32_t kve_vn_rdev;
__uint16_t kve_vn_mode;
__uint16_t kve_status;
int _kve_ispare[12];
char kve_path[PATH_MAX];
};
typedef struct {
__uint32_t p_type;
__uint32_t p_offset;
__uint32_t p_vaddr;
__uint32_t p_paddr;
__uint32_t p_filesz;
__uint32_t p_memsz;
__uint32_t p_flags;
__uint32_t p_align;
} XElf32_Phdr;
struct xdl_phdr_info {
Elf_Addr dlpi_addr;
const char *dlpi_name;
const XElf32_Phdr *dlpi_phdr;
Elf_Half dlpi_phnum;
unsigned long long int dlpi_adds;
unsigned long long int dlpi_subs;
size_t dlpi_tls_modid;
void *dlpi_tls_data;
};
typedef int (*__xdl_iterate_hdr_callback)(struct xdl_phdr_info *, size_t,
void *);
typedef int xdl_iterate_phdr_t(__xdl_iterate_hdr_callback, void *);
#define xdl_iterate_phdr(callback, param) \
(((xdl_iterate_phdr_t *)dl_iterate_phdr)((callback), (param)))
} // namespace __sanitizer
#endif // __FreeBSD_version <= 902001
#endif // SANITIZER_FREEBSD && (SANITIZER_WORDSIZE == 32)
#endif // SANITIZER_FREEBSD_H

View File

@ -129,6 +129,60 @@ uptr GetMaxVirtualAddress() { return GetMaxUserVirtualAddress(); }
bool ErrorIsOOM(error_t err) { return err == ZX_ERR_NO_MEMORY; }
// For any sanitizer internal that needs to map something which can be unmapped
// later, first attempt to map to a pre-allocated VMAR. This helps reduce
// fragmentation from many small anonymous mmap calls. A good value for this
// VMAR size would be the total size of your typical sanitizer internal objects
// allocated in an "average" process lifetime. Examples of this include:
// FakeStack, LowLevelAllocator mappings, TwoLevelMap, InternalMmapVector,
// StackStore, CreateAsanThread, etc.
//
// This is roughly equal to the total sum of sanitizer internal mappings for a
// large test case.
constexpr size_t kSanitizerHeapVmarSize = 13ULL << 20;
static zx_handle_t gSanitizerHeapVmar = ZX_HANDLE_INVALID;
static zx_status_t GetSanitizerHeapVmar(zx_handle_t *vmar) {
zx_status_t status = ZX_OK;
if (gSanitizerHeapVmar == ZX_HANDLE_INVALID) {
CHECK_EQ(kSanitizerHeapVmarSize % GetPageSizeCached(), 0);
uintptr_t base;
status = _zx_vmar_allocate(
_zx_vmar_root_self(),
ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE | ZX_VM_CAN_MAP_SPECIFIC, 0,
kSanitizerHeapVmarSize, &gSanitizerHeapVmar, &base);
}
*vmar = gSanitizerHeapVmar;
if (status == ZX_OK)
CHECK_NE(gSanitizerHeapVmar, ZX_HANDLE_INVALID);
return status;
}
static zx_status_t TryVmoMapSanitizerVmar(zx_vm_option_t options,
size_t vmar_offset, zx_handle_t vmo,
size_t size, uintptr_t *addr,
zx_handle_t *vmar_used = nullptr) {
zx_handle_t vmar;
zx_status_t status = GetSanitizerHeapVmar(&vmar);
if (status != ZX_OK)
return status;
status = _zx_vmar_map(gSanitizerHeapVmar, options, vmar_offset, vmo,
/*vmo_offset=*/0, size, addr);
if (vmar_used)
*vmar_used = gSanitizerHeapVmar;
if (status == ZX_ERR_NO_RESOURCES || status == ZX_ERR_INVALID_ARGS) {
// This means there's no space in the heap VMAR, so fallback to the root
// VMAR.
status = _zx_vmar_map(_zx_vmar_root_self(), options, vmar_offset, vmo,
/*vmo_offset=*/0, size, addr);
if (vmar_used)
*vmar_used = _zx_vmar_root_self();
}
return status;
}
static void *DoAnonymousMmapOrDie(uptr size, const char *mem_type,
bool raw_report, bool die_for_nomem) {
size = RoundUpTo(size, GetPageSize());
@ -144,11 +198,9 @@ static void *DoAnonymousMmapOrDie(uptr size, const char *mem_type,
_zx_object_set_property(vmo, ZX_PROP_NAME, mem_type,
internal_strlen(mem_type));
// TODO(mcgrathr): Maybe allocate a VMAR for all sanitizer heap and use that?
uintptr_t addr;
status =
_zx_vmar_map(_zx_vmar_root_self(), ZX_VM_PERM_READ | ZX_VM_PERM_WRITE, 0,
vmo, 0, size, &addr);
status = TryVmoMapSanitizerVmar(ZX_VM_PERM_READ | ZX_VM_PERM_WRITE,
/*vmar_offset=*/0, vmo, size, &addr);
_zx_handle_close(vmo);
if (status != ZX_OK) {
@ -226,27 +278,32 @@ static uptr DoMmapFixedOrDie(zx_handle_t vmar, uptr fixed_addr, uptr map_size,
uptr ReservedAddressRange::Map(uptr fixed_addr, uptr map_size,
const char *name) {
return DoMmapFixedOrDie(os_handle_, fixed_addr, map_size, base_, name_,
false);
return DoMmapFixedOrDie(os_handle_, fixed_addr, map_size, base_,
name ? name : name_, false);
}
uptr ReservedAddressRange::MapOrDie(uptr fixed_addr, uptr map_size,
const char *name) {
return DoMmapFixedOrDie(os_handle_, fixed_addr, map_size, base_, name_, true);
return DoMmapFixedOrDie(os_handle_, fixed_addr, map_size, base_,
name ? name : name_, true);
}
void UnmapOrDieVmar(void *addr, uptr size, zx_handle_t target_vmar) {
void UnmapOrDieVmar(void *addr, uptr size, zx_handle_t target_vmar,
bool raw_report) {
if (!addr || !size)
return;
size = RoundUpTo(size, GetPageSize());
zx_status_t status =
_zx_vmar_unmap(target_vmar, reinterpret_cast<uintptr_t>(addr), size);
if (status != ZX_OK) {
Report("ERROR: %s failed to deallocate 0x%zx (%zd) bytes at address %p\n",
SanitizerToolName, size, size, addr);
CHECK("unable to unmap" && 0);
if (status == ZX_ERR_INVALID_ARGS && target_vmar == gSanitizerHeapVmar) {
// If there wasn't any space in the heap vmar, the fallback was the root
// vmar.
status = _zx_vmar_unmap(_zx_vmar_root_self(),
reinterpret_cast<uintptr_t>(addr), size);
}
if (status != ZX_OK)
ReportMunmapFailureAndDie(addr, size, status, raw_report);
DecreaseTotalMmap(size);
}
@ -268,7 +325,8 @@ void ReservedAddressRange::Unmap(uptr addr, uptr size) {
}
// Partial unmapping does not affect the fact that the initial range is still
// reserved, and the resulting unmapped memory can't be reused.
UnmapOrDieVmar(reinterpret_cast<void *>(addr), size, vmar);
UnmapOrDieVmar(reinterpret_cast<void *>(addr), size, vmar,
/*raw_report=*/false);
}
// This should never be called.
@ -307,17 +365,16 @@ void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
_zx_object_set_property(vmo, ZX_PROP_NAME, mem_type,
internal_strlen(mem_type));
// TODO(mcgrathr): Maybe allocate a VMAR for all sanitizer heap and use that?
// Map a larger size to get a chunk of address space big enough that
// it surely contains an aligned region of the requested size. Then
// overwrite the aligned middle portion with a mapping from the
// beginning of the VMO, and unmap the excess before and after.
size_t map_size = size + alignment;
uintptr_t addr;
status =
_zx_vmar_map(_zx_vmar_root_self(), ZX_VM_PERM_READ | ZX_VM_PERM_WRITE, 0,
vmo, 0, map_size, &addr);
zx_handle_t vmar_used;
status = TryVmoMapSanitizerVmar(ZX_VM_PERM_READ | ZX_VM_PERM_WRITE,
/*vmar_offset=*/0, vmo, map_size, &addr,
&vmar_used);
if (status == ZX_OK) {
uintptr_t map_addr = addr;
uintptr_t map_end = map_addr + map_size;
@ -325,12 +382,12 @@ void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
uintptr_t end = addr + size;
if (addr != map_addr) {
zx_info_vmar_t info;
status = _zx_object_get_info(_zx_vmar_root_self(), ZX_INFO_VMAR, &info,
sizeof(info), NULL, NULL);
status = _zx_object_get_info(vmar_used, ZX_INFO_VMAR, &info, sizeof(info),
NULL, NULL);
if (status == ZX_OK) {
uintptr_t new_addr;
status = _zx_vmar_map(
_zx_vmar_root_self(),
vmar_used,
ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_SPECIFIC_OVERWRITE,
addr - info.base, vmo, 0, size, &new_addr);
if (status == ZX_OK)
@ -338,9 +395,9 @@ void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
}
}
if (status == ZX_OK && addr != map_addr)
status = _zx_vmar_unmap(_zx_vmar_root_self(), map_addr, addr - map_addr);
status = _zx_vmar_unmap(vmar_used, map_addr, addr - map_addr);
if (status == ZX_OK && end != map_end)
status = _zx_vmar_unmap(_zx_vmar_root_self(), end, map_end - end);
status = _zx_vmar_unmap(vmar_used, end, map_end - end);
}
_zx_handle_close(vmo);
@ -355,8 +412,8 @@ void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
return reinterpret_cast<void *>(addr);
}
void UnmapOrDie(void *addr, uptr size) {
UnmapOrDieVmar(addr, size, _zx_vmar_root_self());
void UnmapOrDie(void *addr, uptr size, bool raw_report) {
UnmapOrDieVmar(addr, size, gSanitizerHeapVmar, raw_report);
}
void ReleaseMemoryPagesToOS(uptr beg, uptr end) {

View File

@ -62,6 +62,6 @@ class MurMur2Hash64Builder {
return x;
}
};
} //namespace __sanitizer
} // namespace __sanitizer
#endif // SANITIZER_HASH_H

View File

@ -15,6 +15,11 @@
#include "sanitizer_platform.h"
#include "sanitizer_redefine_builtins.h"
// GCC does not understand __has_feature.
#if !defined(__has_feature)
#define __has_feature(x) 0
#endif
#ifndef SANITIZER_DEBUG
# define SANITIZER_DEBUG 0
#endif
@ -30,13 +35,20 @@
# define SANITIZER_INTERFACE_ATTRIBUTE __declspec(dllexport)
#endif
# define SANITIZER_WEAK_ATTRIBUTE
# define SANITIZER_WEAK_IMPORT
#elif SANITIZER_GO
# define SANITIZER_INTERFACE_ATTRIBUTE
# define SANITIZER_WEAK_ATTRIBUTE
# define SANITIZER_WEAK_IMPORT
#else
# define SANITIZER_INTERFACE_ATTRIBUTE __attribute__((visibility("default")))
# define SANITIZER_WEAK_ATTRIBUTE __attribute__((weak))
#endif
# if SANITIZER_APPLE
# define SANITIZER_WEAK_IMPORT extern "C" __attribute((weak_import))
# else
# define SANITIZER_WEAK_IMPORT extern "C" SANITIZER_WEAK_ATTRIBUTE
# endif // SANITIZER_APPLE
#endif // SANITIZER_WINDOWS
//--------------------------- WEAK FUNCTIONS ---------------------------------//
// When working with weak functions, to simplify the code and make it more
@ -179,15 +191,19 @@ typedef uptr OFF_T;
#endif
typedef u64 OFF64_T;
#if (SANITIZER_WORDSIZE == 64) || SANITIZER_APPLE
typedef uptr operator_new_size_type;
#ifdef __SIZE_TYPE__
typedef __SIZE_TYPE__ usize;
#else
# if defined(__s390__) && !defined(__s390x__)
// Special case: 31-bit s390 has unsigned long as size_t.
typedef unsigned long operator_new_size_type;
# else
typedef u32 operator_new_size_type;
# endif
// Since we use this for operator new, usize must match the real size_t, but on
// 32-bit Windows the definition of uptr does not actually match uintptr_t or
// size_t because we are working around typedef mismatches for the (S)SIZE_T
// types used in interception.h.
// Until the definition of uptr has been fixed we have to special case Win32.
# if SANITIZER_WINDOWS && SANITIZER_WORDSIZE == 32
typedef unsigned int usize;
# else
typedef uptr usize;
# endif
#endif
typedef u64 tid_t;

View File

@ -199,6 +199,14 @@ char *internal_strncat(char *dst, const char *src, uptr n) {
return dst;
}
wchar_t *internal_wcscpy(wchar_t *dst, const wchar_t *src) {
wchar_t *dst_it = dst;
do {
*dst_it++ = *src++;
} while (*src);
return dst;
}
uptr internal_strlcpy(char *dst, const char *src, uptr maxlen) {
const uptr srclen = internal_strlen(src);
if (srclen < maxlen) {
@ -218,6 +226,14 @@ char *internal_strncpy(char *dst, const char *src, uptr n) {
return dst;
}
wchar_t *internal_wcsncpy(wchar_t *dst, const wchar_t *src, uptr n) {
uptr i;
for (i = 0; i < n && src[i]; ++i)
dst[i] = src[i];
internal_memset(dst + i, 0, (n - i) * sizeof(wchar_t));
return dst;
}
uptr internal_strnlen(const char *s, uptr maxlen) {
uptr i = 0;
while (i < maxlen && s[i]) i++;

View File

@ -71,7 +71,8 @@ int internal_snprintf(char *buffer, uptr length, const char *format, ...)
FORMAT(3, 4);
uptr internal_wcslen(const wchar_t *s);
uptr internal_wcsnlen(const wchar_t *s, uptr maxlen);
wchar_t *internal_wcscpy(wchar_t *dst, const wchar_t *src);
wchar_t *internal_wcsncpy(wchar_t *dst, const wchar_t *src, uptr maxlen);
// Return true if all bytes in [mem, mem+size) are zero.
// Optimized for the case when the result is true.
bool mem_is_zero(const char *mem, uptr size);

View File

@ -105,8 +105,8 @@ void LibIgnore::OnLibraryLoaded(const char *name) {
continue;
if (IsPcInstrumented(range.beg) && IsPcInstrumented(range.end - 1))
continue;
VReport(1, "Adding instrumented range 0x%zx-0x%zx from library '%s'\n",
range.beg, range.end, mod.full_name());
VReport(1, "Adding instrumented range %p-%p from library '%s'\n",
(void *)range.beg, (void *)range.end, mod.full_name());
const uptr idx =
atomic_load(&instrumented_ranges_count_, memory_order_relaxed);
CHECK_LT(idx, ARRAY_SIZE(instrumented_code_ranges_));

File diff suppressed because it is too large Load Diff

View File

@ -13,15 +13,15 @@
#define SANITIZER_LINUX_H
#include "sanitizer_platform.h"
#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \
#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \
SANITIZER_SOLARIS
#include "sanitizer_common.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_platform_limits_freebsd.h"
#include "sanitizer_platform_limits_netbsd.h"
#include "sanitizer_platform_limits_posix.h"
#include "sanitizer_platform_limits_solaris.h"
#include "sanitizer_posix.h"
# include "sanitizer_common.h"
# include "sanitizer_internal_defs.h"
# include "sanitizer_platform_limits_freebsd.h"
# include "sanitizer_platform_limits_netbsd.h"
# include "sanitizer_platform_limits_posix.h"
# include "sanitizer_platform_limits_solaris.h"
# include "sanitizer_posix.h"
struct link_map; // Opaque type returned by dlopen().
struct utsname;
@ -46,9 +46,9 @@ void ReadProcMaps(ProcSelfMapsBuff *proc_maps);
// Syscall wrappers.
uptr internal_getdents(fd_t fd, struct linux_dirent *dirp, unsigned int count);
uptr internal_sigaltstack(const void* ss, void* oss);
uptr internal_sigaltstack(const void *ss, void *oss);
uptr internal_sigprocmask(int how, __sanitizer_sigset_t *set,
__sanitizer_sigset_t *oldset);
__sanitizer_sigset_t *oldset);
void SetSigProcMask(__sanitizer_sigset_t *set, __sanitizer_sigset_t *oldset);
void BlockSignals(__sanitizer_sigset_t *oldset = nullptr);
@ -65,10 +65,10 @@ struct ScopedBlockSignals {
# if SANITIZER_GLIBC
uptr internal_clock_gettime(__sanitizer_clockid_t clk_id, void *tp);
#endif
# endif
// Linux-only syscalls.
#if SANITIZER_LINUX
# if SANITIZER_LINUX
uptr internal_prctl(int option, uptr arg2, uptr arg3, uptr arg4, uptr arg5);
# if defined(__x86_64__)
uptr internal_arch_prctl(int option, uptr arg2);
@ -83,15 +83,15 @@ void internal_sigdelset(__sanitizer_sigset_t *set, int signum);
defined(__arm__) || SANITIZER_RISCV64 || SANITIZER_LOONGARCH64
uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
int *parent_tidptr, void *newtls, int *child_tidptr);
#endif
# endif
int internal_uname(struct utsname *buf);
#elif SANITIZER_FREEBSD
# elif SANITIZER_FREEBSD
uptr internal_procctl(int type, int id, int cmd, void *data);
void internal_sigdelset(__sanitizer_sigset_t *set, int signum);
#elif SANITIZER_NETBSD
# elif SANITIZER_NETBSD
void internal_sigdelset(__sanitizer_sigset_t *set, int signum);
uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg);
#endif // SANITIZER_LINUX
# endif // SANITIZER_LINUX
// This class reads thread IDs from /proc/<pid>/task using only syscalls.
class ThreadLister {
@ -135,36 +135,60 @@ inline void ReleaseMemoryPagesToOSAndZeroFill(uptr beg, uptr end) {
ReleaseMemoryPagesToOS(beg, end);
}
#if SANITIZER_ANDROID
# if SANITIZER_ANDROID
#if defined(__aarch64__)
# define __get_tls() \
({ void** __v; __asm__("mrs %0, tpidr_el0" : "=r"(__v)); __v; })
#elif defined(__arm__)
# define __get_tls() \
({ void** __v; __asm__("mrc p15, 0, %0, c13, c0, 3" : "=r"(__v)); __v; })
#elif defined(__mips__)
# if defined(__aarch64__)
# define __get_tls() \
({ \
void **__v; \
__asm__("mrs %0, tpidr_el0" : "=r"(__v)); \
__v; \
})
# elif defined(__arm__)
# define __get_tls() \
({ \
void **__v; \
__asm__("mrc p15, 0, %0, c13, c0, 3" : "=r"(__v)); \
__v; \
})
# elif defined(__mips__)
// On mips32r1, this goes via a kernel illegal instruction trap that's
// optimized for v1.
# define __get_tls() \
({ register void** __v asm("v1"); \
__asm__(".set push\n" \
".set mips32r2\n" \
"rdhwr %0,$29\n" \
".set pop\n" : "=r"(__v)); \
__v; })
#elif defined (__riscv)
# define __get_tls() \
({ void** __v; __asm__("mv %0, tp" : "=r"(__v)); __v; })
#elif defined(__i386__)
# define __get_tls() \
({ void** __v; __asm__("movl %%gs:0, %0" : "=r"(__v)); __v; })
#elif defined(__x86_64__)
# define __get_tls() \
({ void** __v; __asm__("mov %%fs:0, %0" : "=r"(__v)); __v; })
#else
#error "Unsupported architecture."
#endif
# define __get_tls() \
({ \
register void **__v asm("v1"); \
__asm__( \
".set push\n" \
".set mips32r2\n" \
"rdhwr %0,$29\n" \
".set pop\n" \
: "=r"(__v)); \
__v; \
})
# elif defined(__riscv)
# define __get_tls() \
({ \
void **__v; \
__asm__("mv %0, tp" : "=r"(__v)); \
__v; \
})
# elif defined(__i386__)
# define __get_tls() \
({ \
void **__v; \
__asm__("movl %%gs:0, %0" : "=r"(__v)); \
__v; \
})
# elif defined(__x86_64__)
# define __get_tls() \
({ \
void **__v; \
__asm__("mov %%fs:0, %0" : "=r"(__v)); \
__v; \
})
# else
# error "Unsupported architecture."
# endif
// The Android Bionic team has allocated a TLS slot for sanitizers starting
// with Q, given that Android currently doesn't support ELF TLS. It is used to
@ -175,7 +199,7 @@ ALWAYS_INLINE uptr *get_android_tls_ptr() {
return reinterpret_cast<uptr *>(&__get_tls()[TLS_SLOT_SANITIZER]);
}
#endif // SANITIZER_ANDROID
# endif // SANITIZER_ANDROID
} // namespace __sanitizer

View File

@ -16,89 +16,101 @@
#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \
SANITIZER_SOLARIS
#include "sanitizer_allocator_internal.h"
#include "sanitizer_atomic.h"
#include "sanitizer_common.h"
#include "sanitizer_file.h"
#include "sanitizer_flags.h"
#include "sanitizer_freebsd.h"
#include "sanitizer_getauxval.h"
#include "sanitizer_glibc_version.h"
#include "sanitizer_linux.h"
#include "sanitizer_placement_new.h"
#include "sanitizer_procmaps.h"
#include "sanitizer_solaris.h"
# include "sanitizer_allocator_internal.h"
# include "sanitizer_atomic.h"
# include "sanitizer_common.h"
# include "sanitizer_file.h"
# include "sanitizer_flags.h"
# include "sanitizer_getauxval.h"
# include "sanitizer_glibc_version.h"
# include "sanitizer_linux.h"
# include "sanitizer_placement_new.h"
# include "sanitizer_procmaps.h"
# include "sanitizer_solaris.h"
#if SANITIZER_NETBSD
#define _RTLD_SOURCE // for __lwp_gettcb_fast() / __lwp_getprivate_fast()
#endif
# if SANITIZER_NETBSD
# define _RTLD_SOURCE // for __lwp_gettcb_fast() / __lwp_getprivate_fast()
# endif
#include <dlfcn.h> // for dlsym()
#include <link.h>
#include <pthread.h>
#include <signal.h>
#include <sys/mman.h>
#include <sys/resource.h>
#include <syslog.h>
# include <dlfcn.h> // for dlsym()
# include <link.h>
# include <pthread.h>
# include <signal.h>
# include <sys/mman.h>
# include <sys/resource.h>
# include <syslog.h>
#if !defined(ElfW)
#define ElfW(type) Elf_##type
#endif
# if !defined(ElfW)
# define ElfW(type) Elf_##type
# endif
#if SANITIZER_FREEBSD
#include <pthread_np.h>
#include <osreldate.h>
#include <sys/sysctl.h>
#define pthread_getattr_np pthread_attr_get_np
# if SANITIZER_FREEBSD
# include <pthread_np.h>
# include <sys/auxv.h>
# include <sys/sysctl.h>
# define pthread_getattr_np pthread_attr_get_np
// The MAP_NORESERVE define has been removed in FreeBSD 11.x, and even before
// that, it was never implemented. So just define it to zero.
#undef MAP_NORESERVE
#define MAP_NORESERVE 0
#endif
# undef MAP_NORESERVE
# define MAP_NORESERVE 0
extern const Elf_Auxinfo *__elf_aux_vector;
extern "C" int __sys_sigaction(int signum, const struct sigaction *act,
struct sigaction *oldact);
# endif
#if SANITIZER_NETBSD
#include <sys/sysctl.h>
#include <sys/tls.h>
#include <lwp.h>
#endif
# if SANITIZER_NETBSD
# include <lwp.h>
# include <sys/sysctl.h>
# include <sys/tls.h>
# endif
#if SANITIZER_SOLARIS
#include <stddef.h>
#include <stdlib.h>
#include <thread.h>
#endif
# if SANITIZER_SOLARIS
# include <stddef.h>
# include <stdlib.h>
# include <thread.h>
# endif
#if SANITIZER_ANDROID
#include <android/api-level.h>
#if !defined(CPU_COUNT) && !defined(__aarch64__)
#include <dirent.h>
#include <fcntl.h>
# if SANITIZER_ANDROID
# include <android/api-level.h>
# if !defined(CPU_COUNT) && !defined(__aarch64__)
# include <dirent.h>
# include <fcntl.h>
struct __sanitizer::linux_dirent {
long d_ino;
off_t d_off;
long d_ino;
off_t d_off;
unsigned short d_reclen;
char d_name[];
char d_name[];
};
#endif
#endif
# endif
# endif
#if !SANITIZER_ANDROID
#include <elf.h>
#include <unistd.h>
#endif
# if !SANITIZER_ANDROID
# include <elf.h>
# include <unistd.h>
# endif
namespace __sanitizer {
SANITIZER_WEAK_ATTRIBUTE int
real_sigaction(int signum, const void *act, void *oldact);
SANITIZER_WEAK_ATTRIBUTE int real_sigaction(int signum, const void *act,
void *oldact);
int internal_sigaction(int signum, const void *act, void *oldact) {
#if !SANITIZER_GO
# if SANITIZER_FREEBSD
// On FreeBSD, call the sigaction syscall directly (part of libsys in FreeBSD
// 15) since the libc version goes via a global interposing table. Due to
// library initialization order the table can be relocated after the call to
// InitializeDeadlySignals() which then crashes when dereferencing the
// uninitialized pointer in libc.
return __sys_sigaction(signum, (const struct sigaction *)act,
(struct sigaction *)oldact);
# else
# if !SANITIZER_GO
if (&real_sigaction)
return real_sigaction(signum, act, oldact);
#endif
# endif
return sigaction(signum, (const struct sigaction *)act,
(struct sigaction *)oldact);
# endif
}
void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
@ -111,7 +123,7 @@ void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
CHECK_EQ(getrlimit(RLIMIT_STACK, &rl), 0);
// Find the mapping that contains a stack variable.
MemoryMappingLayout proc_maps(/*cache_enabled*/true);
MemoryMappingLayout proc_maps(/*cache_enabled*/ true);
if (proc_maps.Error()) {
*stack_top = *stack_bottom = 0;
return;
@ -119,7 +131,8 @@ void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
MemoryMappedSegment segment;
uptr prev_end = 0;
while (proc_maps.Next(&segment)) {
if ((uptr)&rl < segment.end) break;
if ((uptr)&rl < segment.end)
break;
prev_end = segment.end;
}
CHECK((uptr)&rl >= segment.start && (uptr)&rl < segment.end);
@ -127,7 +140,8 @@ void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
// Get stacksize from rlimit, but clip it so that it does not overlap
// with other mappings.
uptr stacksize = rl.rlim_cur;
if (stacksize > segment.end - prev_end) stacksize = segment.end - prev_end;
if (stacksize > segment.end - prev_end)
stacksize = segment.end - prev_end;
// When running with unlimited stack size, we still want to set some limit.
// The unlimited stack size is caused by 'ulimit -s unlimited'.
// Also, for some reason, GNU make spawns subprocesses with unlimited stack.
@ -135,43 +149,56 @@ void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
stacksize = kMaxThreadStackSize;
*stack_top = segment.end;
*stack_bottom = segment.end - stacksize;
uptr maxAddr = GetMaxUserVirtualAddress();
// Edge case: the stack mapping on some systems may be off-by-one e.g.,
// fffffffdf000-1000000000000 rw-p 00000000 00:00 0 [stack]
// instead of:
// fffffffdf000- ffffffffffff
// The out-of-range stack_top can result in an invalid shadow address
// calculation, since those usually assume the parameters are in range.
if (*stack_top == maxAddr + 1)
*stack_top = maxAddr;
else
CHECK_LE(*stack_top, maxAddr);
return;
}
uptr stacksize = 0;
void *stackaddr = nullptr;
#if SANITIZER_SOLARIS
# if SANITIZER_SOLARIS
stack_t ss;
CHECK_EQ(thr_stksegment(&ss), 0);
stacksize = ss.ss_size;
stackaddr = (char *)ss.ss_sp - stacksize;
#else // !SANITIZER_SOLARIS
# else // !SANITIZER_SOLARIS
pthread_attr_t attr;
pthread_attr_init(&attr);
CHECK_EQ(pthread_getattr_np(pthread_self(), &attr), 0);
internal_pthread_attr_getstack(&attr, &stackaddr, &stacksize);
pthread_attr_destroy(&attr);
#endif // SANITIZER_SOLARIS
# endif // SANITIZER_SOLARIS
*stack_top = (uptr)stackaddr + stacksize;
*stack_bottom = (uptr)stackaddr;
}
#if !SANITIZER_GO
# if !SANITIZER_GO
bool SetEnv(const char *name, const char *value) {
void *f = dlsym(RTLD_NEXT, "setenv");
if (!f)
return false;
typedef int(*setenv_ft)(const char *name, const char *value, int overwrite);
typedef int (*setenv_ft)(const char *name, const char *value, int overwrite);
setenv_ft setenv_f;
CHECK_EQ(sizeof(setenv_f), sizeof(f));
internal_memcpy(&setenv_f, &f, sizeof(f));
return setenv_f(name, value, 1) == 0;
}
#endif
# endif
__attribute__((unused)) static bool GetLibcVersion(int *major, int *minor,
int *patch) {
#ifdef _CS_GNU_LIBC_VERSION
# ifdef _CS_GNU_LIBC_VERSION
char buf[64];
uptr len = confstr(_CS_GNU_LIBC_VERSION, buf, sizeof(buf));
if (len >= sizeof(buf))
@ -185,9 +212,9 @@ __attribute__((unused)) static bool GetLibcVersion(int *major, int *minor,
*minor = (*p == '.') ? internal_simple_strtoll(p + 1, &p, 10) : 0;
*patch = (*p == '.') ? internal_simple_strtoll(p + 1, &p, 10) : 0;
return true;
#else
# else
return false;
#endif
# endif
}
// True if we can use dlpi_tls_data. glibc before 2.25 may leave NULL (BZ
@ -198,42 +225,42 @@ __attribute__((unused)) static bool GetLibcVersion(int *major, int *minor,
// https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=254774
__attribute__((unused)) static int g_use_dlpi_tls_data;
#if SANITIZER_GLIBC && !SANITIZER_GO
# if SANITIZER_GLIBC && !SANITIZER_GO
__attribute__((unused)) static size_t g_tls_size;
void InitTlsSize() {
int major, minor, patch;
g_use_dlpi_tls_data =
GetLibcVersion(&major, &minor, &patch) && major == 2 && minor >= 25;
#if defined(__aarch64__) || defined(__x86_64__) || defined(__powerpc64__) || \
defined(__loongarch__)
# if defined(__aarch64__) || defined(__x86_64__) || \
defined(__powerpc64__) || defined(__loongarch__)
void *get_tls_static_info = dlsym(RTLD_NEXT, "_dl_get_tls_static_info");
size_t tls_align;
((void (*)(size_t *, size_t *))get_tls_static_info)(&g_tls_size, &tls_align);
#endif
# endif
}
#else
void InitTlsSize() { }
#endif // SANITIZER_GLIBC && !SANITIZER_GO
# else
void InitTlsSize() {}
# endif // SANITIZER_GLIBC && !SANITIZER_GO
// On glibc x86_64, ThreadDescriptorSize() needs to be precise due to the usage
// of g_tls_size. On other targets, ThreadDescriptorSize() is only used by lsan
// to get the pointer to thread-specific data keys in the thread control block.
#if (SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_SOLARIS) && \
!SANITIZER_ANDROID && !SANITIZER_GO
# if (SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_SOLARIS) && \
!SANITIZER_ANDROID && !SANITIZER_GO
// sizeof(struct pthread) from glibc.
static atomic_uintptr_t thread_descriptor_size;
static uptr ThreadDescriptorSizeFallback() {
uptr val = 0;
#if defined(__x86_64__) || defined(__i386__) || defined(__arm__)
# if defined(__x86_64__) || defined(__i386__) || defined(__arm__)
int major;
int minor;
int patch;
if (GetLibcVersion(&major, &minor, &patch) && major == 2) {
/* sizeof(struct pthread) values from various glibc versions. */
if (SANITIZER_X32)
val = 1728; // Assume only one particular version for x32.
val = 1728; // Assume only one particular version for x32.
// For ARM sizeof(struct pthread) changed in Glibc 2.23.
else if (SANITIZER_ARM)
val = minor <= 22 ? 1120 : 1216;
@ -256,19 +283,19 @@ static uptr ThreadDescriptorSizeFallback() {
else // minor == 32
val = FIRST_32_SECOND_64(1344, 2496);
}
#elif defined(__s390__) || defined(__sparc__)
# elif defined(__s390__) || defined(__sparc__)
// The size of a prefix of TCB including pthread::{specific_1stblock,specific}
// suffices. Just return offsetof(struct pthread, specific_used), which hasn't
// changed since 2007-05. Technically this applies to i386/x86_64 as well but
// we call _dl_get_tls_static_info and need the precise size of struct
// pthread.
return FIRST_32_SECOND_64(524, 1552);
#elif defined(__mips__)
# elif defined(__mips__)
// TODO(sagarthakur): add more values as per different glibc versions.
val = FIRST_32_SECOND_64(1152, 1776);
#elif SANITIZER_LOONGARCH64
val = 1856; // from glibc 2.36
#elif SANITIZER_RISCV64
# elif SANITIZER_LOONGARCH64
val = 1856; // from glibc 2.36
# elif SANITIZER_RISCV64
int major;
int minor;
int patch;
@ -283,12 +310,12 @@ static uptr ThreadDescriptorSizeFallback() {
val = 1936; // tested against glibc 2.32
}
#elif defined(__aarch64__)
# elif defined(__aarch64__)
// The sizeof (struct pthread) is the same from GLIBC 2.17 to 2.22.
val = 1776;
#elif defined(__powerpc64__)
val = 1776; // from glibc.ppc64le 2.20-8.fc21
#endif
# elif defined(__powerpc64__)
val = 1776; // from glibc.ppc64le 2.20-8.fc21
# endif
return val;
}
@ -307,26 +334,26 @@ uptr ThreadDescriptorSize() {
return val;
}
#if defined(__mips__) || defined(__powerpc64__) || SANITIZER_RISCV64 || \
SANITIZER_LOONGARCH64
# if defined(__mips__) || defined(__powerpc64__) || SANITIZER_RISCV64 || \
SANITIZER_LOONGARCH64
// TlsPreTcbSize includes size of struct pthread_descr and size of tcb
// head structure. It lies before the static tls blocks.
static uptr TlsPreTcbSize() {
#if defined(__mips__)
const uptr kTcbHead = 16; // sizeof (tcbhead_t)
#elif defined(__powerpc64__)
const uptr kTcbHead = 88; // sizeof (tcbhead_t)
#elif SANITIZER_RISCV64
# if defined(__mips__)
const uptr kTcbHead = 16; // sizeof (tcbhead_t)
#elif SANITIZER_LOONGARCH64
# elif defined(__powerpc64__)
const uptr kTcbHead = 88; // sizeof (tcbhead_t)
# elif SANITIZER_RISCV64
const uptr kTcbHead = 16; // sizeof (tcbhead_t)
#endif
# elif SANITIZER_LOONGARCH64
const uptr kTcbHead = 16; // sizeof (tcbhead_t)
# endif
const uptr kTlsAlign = 16;
const uptr kTlsPreTcbSize =
RoundUpTo(ThreadDescriptorSize() + kTcbHead, kTlsAlign);
return kTlsPreTcbSize;
}
#endif
# endif
namespace {
struct TlsBlock {
@ -336,7 +363,7 @@ struct TlsBlock {
};
} // namespace
#ifdef __s390__
# ifdef __s390__
extern "C" uptr __tls_get_offset(void *arg);
static uptr TlsGetOffset(uptr ti_module, uptr ti_offset) {
@ -354,16 +381,16 @@ static uptr TlsGetOffset(uptr ti_module, uptr ti_offset) {
: "memory", "cc", "0", "1", "3", "4", "5", "14");
return r2;
}
#else
# else
extern "C" void *__tls_get_addr(size_t *);
#endif
# endif
static size_t main_tls_modid;
static int CollectStaticTlsBlocks(struct dl_phdr_info *info, size_t size,
void *data) {
size_t tls_modid;
#if SANITIZER_SOLARIS
# if SANITIZER_SOLARIS
// dlpi_tls_modid is only available since Solaris 11.4 SRU 10. Use
// dlinfo(RTLD_DI_LINKMAP) instead which works on all of Solaris 11.3,
// 11.4, and Illumos. The tlsmodid of the executable was changed to 1 in
@ -376,27 +403,26 @@ static int CollectStaticTlsBlocks(struct dl_phdr_info *info, size_t size,
Rt_map *map;
dlinfo(RTLD_SELF, RTLD_DI_LINKMAP, &map);
tls_modid = map->rt_tlsmodid;
#else
# else
main_tls_modid = 1;
tls_modid = info->dlpi_tls_modid;
#endif
# endif
if (tls_modid < main_tls_modid)
return 0;
uptr begin;
#if !SANITIZER_SOLARIS
# if !SANITIZER_SOLARIS
begin = (uptr)info->dlpi_tls_data;
#endif
# endif
if (!g_use_dlpi_tls_data) {
// Call __tls_get_addr as a fallback. This forces TLS allocation on glibc
// and FreeBSD.
#ifdef __s390__
begin = (uptr)__builtin_thread_pointer() +
TlsGetOffset(tls_modid, 0);
#else
# ifdef __s390__
begin = (uptr)__builtin_thread_pointer() + TlsGetOffset(tls_modid, 0);
# else
size_t mod_and_off[2] = {tls_modid, 0};
begin = (uptr)__tls_get_addr(mod_and_off);
#endif
# endif
}
for (unsigned i = 0; i != info->dlpi_phnum; ++i)
if (info->dlpi_phdr[i].p_type == PT_TLS) {
@ -439,23 +465,21 @@ __attribute__((unused)) static void GetStaticTlsBoundary(uptr *addr, uptr *size,
*addr = ranges[l].begin;
*size = ranges[r - 1].end - ranges[l].begin;
}
#endif // (x86_64 || i386 || mips || ...) && (SANITIZER_FREEBSD ||
// SANITIZER_LINUX) && !SANITIZER_ANDROID && !SANITIZER_GO
# endif // (x86_64 || i386 || mips || ...) && (SANITIZER_FREEBSD ||
// SANITIZER_LINUX) && !SANITIZER_ANDROID && !SANITIZER_GO
#if SANITIZER_NETBSD
static struct tls_tcb * ThreadSelfTlsTcb() {
# if SANITIZER_NETBSD
static struct tls_tcb *ThreadSelfTlsTcb() {
struct tls_tcb *tcb = nullptr;
#ifdef __HAVE___LWP_GETTCB_FAST
# ifdef __HAVE___LWP_GETTCB_FAST
tcb = (struct tls_tcb *)__lwp_gettcb_fast();
#elif defined(__HAVE___LWP_GETPRIVATE_FAST)
# elif defined(__HAVE___LWP_GETPRIVATE_FAST)
tcb = (struct tls_tcb *)__lwp_getprivate_fast();
#endif
# endif
return tcb;
}
uptr ThreadSelf() {
return (uptr)ThreadSelfTlsTcb()->tcb_pthread;
}
uptr ThreadSelf() { return (uptr)ThreadSelfTlsTcb()->tcb_pthread; }
int GetSizeFromHdr(struct dl_phdr_info *info, size_t size, void *data) {
const Elf_Phdr *hdr = info->dlpi_phdr;
@ -463,23 +487,23 @@ int GetSizeFromHdr(struct dl_phdr_info *info, size_t size, void *data) {
for (; hdr != last_hdr; ++hdr) {
if (hdr->p_type == PT_TLS && info->dlpi_tls_modid == 1) {
*(uptr*)data = hdr->p_memsz;
*(uptr *)data = hdr->p_memsz;
break;
}
}
return 0;
}
#endif // SANITIZER_NETBSD
# endif // SANITIZER_NETBSD
#if SANITIZER_ANDROID
# if SANITIZER_ANDROID
// Bionic provides this API since S.
extern "C" SANITIZER_WEAK_ATTRIBUTE void __libc_get_static_tls_bounds(void **,
void **);
#endif
# endif
#if !SANITIZER_GO
# if !SANITIZER_GO
static void GetTls(uptr *addr, uptr *size) {
#if SANITIZER_ANDROID
# if SANITIZER_ANDROID
if (&__libc_get_static_tls_bounds) {
void *start_addr;
void *end_addr;
@ -491,48 +515,48 @@ static void GetTls(uptr *addr, uptr *size) {
*addr = 0;
*size = 0;
}
#elif SANITIZER_GLIBC && defined(__x86_64__)
# elif SANITIZER_GLIBC && defined(__x86_64__)
// For aarch64 and x86-64, use an O(1) approach which requires relatively
// precise ThreadDescriptorSize. g_tls_size was initialized in InitTlsSize.
# if SANITIZER_X32
# if SANITIZER_X32
asm("mov %%fs:8,%0" : "=r"(*addr));
# else
# else
asm("mov %%fs:16,%0" : "=r"(*addr));
# endif
# endif
*size = g_tls_size;
*addr -= *size;
*addr += ThreadDescriptorSize();
#elif SANITIZER_GLIBC && defined(__aarch64__)
# elif SANITIZER_GLIBC && defined(__aarch64__)
*addr = reinterpret_cast<uptr>(__builtin_thread_pointer()) -
ThreadDescriptorSize();
*size = g_tls_size + ThreadDescriptorSize();
#elif SANITIZER_GLIBC && defined(__loongarch__)
# ifdef __clang__
# elif SANITIZER_GLIBC && defined(__loongarch__)
# ifdef __clang__
*addr = reinterpret_cast<uptr>(__builtin_thread_pointer()) -
ThreadDescriptorSize();
# else
# else
asm("or %0,$tp,$zero" : "=r"(*addr));
*addr -= ThreadDescriptorSize();
# endif
# endif
*size = g_tls_size + ThreadDescriptorSize();
#elif SANITIZER_GLIBC && defined(__powerpc64__)
# elif SANITIZER_GLIBC && defined(__powerpc64__)
// Workaround for glibc<2.25(?). 2.27 is known to not need this.
uptr tp;
asm("addi %0,13,-0x7000" : "=r"(tp));
const uptr pre_tcb_size = TlsPreTcbSize();
*addr = tp - pre_tcb_size;
*size = g_tls_size + pre_tcb_size;
#elif SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_SOLARIS
# elif SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_SOLARIS
uptr align;
GetStaticTlsBoundary(addr, size, &align);
#if defined(__x86_64__) || defined(__i386__) || defined(__s390__) || \
defined(__sparc__)
# if defined(__x86_64__) || defined(__i386__) || defined(__s390__) || \
defined(__sparc__)
if (SANITIZER_GLIBC) {
#if defined(__x86_64__) || defined(__i386__)
# if defined(__x86_64__) || defined(__i386__)
align = Max<uptr>(align, 64);
#else
# else
align = Max<uptr>(align, 16);
#endif
# endif
}
const uptr tp = RoundUpTo(*addr + *size, align);
@ -551,26 +575,26 @@ static void GetTls(uptr *addr, uptr *size) {
// because the number of bytes after pthread::specific is larger.
*addr = tp - RoundUpTo(*size, align);
*size = tp - *addr + ThreadDescriptorSize();
#else
# else
if (SANITIZER_GLIBC)
*size += 1664;
else if (SANITIZER_FREEBSD)
*size += 128; // RTLD_STATIC_TLS_EXTRA
#if defined(__mips__) || defined(__powerpc64__) || SANITIZER_RISCV64
# if defined(__mips__) || defined(__powerpc64__) || SANITIZER_RISCV64
const uptr pre_tcb_size = TlsPreTcbSize();
*addr -= pre_tcb_size;
*size += pre_tcb_size;
#else
# else
// arm and aarch64 reserve two words at TP, so this underestimates the range.
// However, this is sufficient for the purpose of finding the pointers to
// thread-specific data keys.
const uptr tcb_size = ThreadDescriptorSize();
*addr -= tcb_size;
*size += tcb_size;
#endif
#endif
#elif SANITIZER_NETBSD
struct tls_tcb * const tcb = ThreadSelfTlsTcb();
# endif
# endif
# elif SANITIZER_NETBSD
struct tls_tcb *const tcb = ThreadSelfTlsTcb();
*addr = 0;
*size = 0;
if (tcb != 0) {
@ -583,31 +607,31 @@ static void GetTls(uptr *addr, uptr *size) {
*addr = (uptr)tcb->tcb_dtv[1];
}
}
#else
#error "Unknown OS"
#endif
# else
# error "Unknown OS"
# endif
}
#endif
# endif
#if !SANITIZER_GO
# if !SANITIZER_GO
uptr GetTlsSize() {
#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \
SANITIZER_SOLARIS
# if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \
SANITIZER_SOLARIS
uptr addr, size;
GetTls(&addr, &size);
return size;
#else
# else
return 0;
#endif
# endif
}
#endif
# endif
void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
uptr *tls_addr, uptr *tls_size) {
#if SANITIZER_GO
# if SANITIZER_GO
// Stub implementation for Go.
*stk_addr = *stk_size = *tls_addr = *tls_size = 0;
#else
# else
GetTls(tls_addr, tls_size);
uptr stack_top, stack_bottom;
@ -623,16 +647,12 @@ void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
*stk_size = *tls_addr - *stk_addr;
}
}
#endif
# endif
}
#if !SANITIZER_FREEBSD
# if !SANITIZER_FREEBSD
typedef ElfW(Phdr) Elf_Phdr;
#elif SANITIZER_WORDSIZE == 32 && __FreeBSD_version <= 902001 // v9.2
#define Elf_Phdr XElf32_Phdr
#define dl_phdr_info xdl_phdr_info
#define dl_iterate_phdr(c, b) xdl_iterate_phdr((c), (b))
#endif // !SANITIZER_FREEBSD
# endif
struct DlIteratePhdrData {
InternalMmapVectorNoCtor<LoadedModule> *modules;
@ -652,8 +672,7 @@ static int AddModuleSegments(const char *module_name, dl_phdr_info *info,
uptr cur_end = cur_beg + phdr->p_memsz;
bool executable = phdr->p_flags & PF_X;
bool writable = phdr->p_flags & PF_W;
cur_module.addAddressRange(cur_beg, cur_end, executable,
writable);
cur_module.addAddressRange(cur_beg, cur_end, executable, writable);
} else if (phdr->p_type == PT_NOTE) {
# ifdef NT_GNU_BUILD_ID
uptr off = 0;
@ -698,33 +717,30 @@ static int dl_iterate_phdr_cb(dl_phdr_info *info, size_t size, void *arg) {
return AddModuleSegments(module_name.data(), info, data->modules);
}
if (info->dlpi_name) {
InternalScopedString module_name;
module_name.append("%s", info->dlpi_name);
return AddModuleSegments(module_name.data(), info, data->modules);
}
if (info->dlpi_name)
return AddModuleSegments(info->dlpi_name, info, data->modules);
return 0;
}
#if SANITIZER_ANDROID && __ANDROID_API__ < 21
# if SANITIZER_ANDROID && __ANDROID_API__ < 21
extern "C" __attribute__((weak)) int dl_iterate_phdr(
int (*)(struct dl_phdr_info *, size_t, void *), void *);
#endif
# endif
static bool requiresProcmaps() {
#if SANITIZER_ANDROID && __ANDROID_API__ <= 22
# if SANITIZER_ANDROID && __ANDROID_API__ <= 22
// Fall back to /proc/maps if dl_iterate_phdr is unavailable or broken.
// The runtime check allows the same library to work with
// both K and L (and future) Android releases.
return AndroidGetApiLevel() <= ANDROID_LOLLIPOP_MR1;
#else
# else
return false;
#endif
# endif
}
static void procmapsInit(InternalMmapVectorNoCtor<LoadedModule> *modules) {
MemoryMappingLayout memory_mapping(/*cache_enabled*/true);
MemoryMappingLayout memory_mapping(/*cache_enabled*/ true);
memory_mapping.DumpListOfModules(modules);
}
@ -776,22 +792,19 @@ uptr GetRSS() {
// We need the second number which is RSS in pages.
char *pos = buf;
// Skip the first number.
while (*pos >= '0' && *pos <= '9')
pos++;
while (*pos >= '0' && *pos <= '9') pos++;
// Skip whitespaces.
while (!(*pos >= '0' && *pos <= '9') && *pos != 0)
pos++;
while (!(*pos >= '0' && *pos <= '9') && *pos != 0) pos++;
// Read the number.
uptr rss = 0;
while (*pos >= '0' && *pos <= '9')
rss = rss * 10 + *pos++ - '0';
while (*pos >= '0' && *pos <= '9') rss = rss * 10 + *pos++ - '0';
return rss * GetPageSizeCached();
}
// sysconf(_SC_NPROCESSORS_{CONF,ONLN}) cannot be used on most platforms as
// they allocate memory.
u32 GetNumberOfCPUs() {
#if SANITIZER_FREEBSD || SANITIZER_NETBSD
# if SANITIZER_FREEBSD || SANITIZER_NETBSD
u32 ncpu;
int req[2];
uptr len = sizeof(ncpu);
@ -799,7 +812,7 @@ u32 GetNumberOfCPUs() {
req[1] = HW_NCPU;
CHECK_EQ(internal_sysctl(req, 2, &ncpu, &len, NULL, 0), 0);
return ncpu;
#elif SANITIZER_ANDROID && !defined(CPU_COUNT) && !defined(__aarch64__)
# elif SANITIZER_ANDROID && !defined(CPU_COUNT) && !defined(__aarch64__)
// Fall back to /sys/devices/system/cpu on Android when cpu_set_t doesn't
// exist in sched.h. That is the case for toolchains generated with older
// NDKs.
@ -827,26 +840,26 @@ u32 GetNumberOfCPUs() {
break;
if (entry->d_ino != 0 && *d_type == DT_DIR) {
if (entry->d_name[0] == 'c' && entry->d_name[1] == 'p' &&
entry->d_name[2] == 'u' &&
entry->d_name[3] >= '0' && entry->d_name[3] <= '9')
entry->d_name[2] == 'u' && entry->d_name[3] >= '0' &&
entry->d_name[3] <= '9')
n_cpus++;
}
entry = (struct linux_dirent *)(((u8 *)entry) + entry->d_reclen);
}
internal_close(fd);
return n_cpus;
#elif SANITIZER_SOLARIS
# elif SANITIZER_SOLARIS
return sysconf(_SC_NPROCESSORS_ONLN);
#else
# else
cpu_set_t CPUs;
CHECK_EQ(sched_getaffinity(0, sizeof(cpu_set_t), &CPUs), 0);
return CPU_COUNT(&CPUs);
#endif
# endif
}
#if SANITIZER_LINUX
# if SANITIZER_LINUX
#if SANITIZER_ANDROID
# if SANITIZER_ANDROID
static atomic_uint8_t android_log_initialized;
void AndroidLogInit() {
@ -858,13 +871,15 @@ static bool ShouldLogAfterPrintf() {
return atomic_load(&android_log_initialized, memory_order_acquire);
}
extern "C" SANITIZER_WEAK_ATTRIBUTE
int async_safe_write_log(int pri, const char* tag, const char* msg);
extern "C" SANITIZER_WEAK_ATTRIBUTE
int __android_log_write(int prio, const char* tag, const char* msg);
extern "C" SANITIZER_WEAK_ATTRIBUTE int async_safe_write_log(int pri,
const char *tag,
const char *msg);
extern "C" SANITIZER_WEAK_ATTRIBUTE int __android_log_write(int prio,
const char *tag,
const char *msg);
// ANDROID_LOG_INFO is 4, but can't be resolved at runtime.
#define SANITIZER_ANDROID_LOG_INFO 4
# define SANITIZER_ANDROID_LOG_INFO 4
// async_safe_write_log is a new public version of __libc_write_log that is
// used behind syslog. It is preferable to syslog as it will not do any dynamic
@ -883,14 +898,14 @@ void WriteOneLineToSyslog(const char *s) {
}
}
extern "C" SANITIZER_WEAK_ATTRIBUTE
void android_set_abort_message(const char *);
extern "C" SANITIZER_WEAK_ATTRIBUTE void android_set_abort_message(
const char *);
void SetAbortMessage(const char *str) {
if (&android_set_abort_message)
android_set_abort_message(str);
}
#else
# else
void AndroidLogInit() {}
static bool ShouldLogAfterPrintf() { return true; }
@ -898,16 +913,16 @@ static bool ShouldLogAfterPrintf() { return true; }
void WriteOneLineToSyslog(const char *s) { syslog(LOG_INFO, "%s", s); }
void SetAbortMessage(const char *str) {}
#endif // SANITIZER_ANDROID
# endif // SANITIZER_ANDROID
void LogMessageOnPrintf(const char *str) {
if (common_flags()->log_to_syslog && ShouldLogAfterPrintf())
WriteToSyslog(str);
}
#endif // SANITIZER_LINUX
# endif // SANITIZER_LINUX
#if SANITIZER_GLIBC && !SANITIZER_GO
# if SANITIZER_GLIBC && !SANITIZER_GO
// glibc crashes when using clock_gettime from a preinit_array function as the
// vDSO function pointers haven't been initialized yet. __progname is
// initialized after the vDSO function pointers, so if it exists, is not null
@ -918,8 +933,8 @@ inline bool CanUseVDSO() { return &__progname && __progname && *__progname; }
// MonotonicNanoTime is a timing function that can leverage the vDSO by calling
// clock_gettime. real_clock_gettime only exists if clock_gettime is
// intercepted, so define it weakly and use it if available.
extern "C" SANITIZER_WEAK_ATTRIBUTE
int real_clock_gettime(u32 clk_id, void *tp);
extern "C" SANITIZER_WEAK_ATTRIBUTE int real_clock_gettime(u32 clk_id,
void *tp);
u64 MonotonicNanoTime() {
timespec ts;
if (CanUseVDSO()) {
@ -932,19 +947,26 @@ u64 MonotonicNanoTime() {
}
return (u64)ts.tv_sec * (1000ULL * 1000 * 1000) + ts.tv_nsec;
}
#else
# else
// Non-glibc & Go always use the regular function.
u64 MonotonicNanoTime() {
timespec ts;
clock_gettime(CLOCK_MONOTONIC, &ts);
return (u64)ts.tv_sec * (1000ULL * 1000 * 1000) + ts.tv_nsec;
}
#endif // SANITIZER_GLIBC && !SANITIZER_GO
# endif // SANITIZER_GLIBC && !SANITIZER_GO
void ReExec() {
const char *pathname = "/proc/self/exe";
#if SANITIZER_NETBSD
# if SANITIZER_FREEBSD
for (const auto *aux = __elf_aux_vector; aux->a_type != AT_NULL; aux++) {
if (aux->a_type == AT_EXECPATH) {
pathname = static_cast<const char *>(aux->a_un.a_ptr);
break;
}
}
# elif SANITIZER_NETBSD
static const int name[] = {
CTL_KERN,
KERN_PROC_ARGS,
@ -957,14 +979,14 @@ void ReExec() {
len = sizeof(path);
if (internal_sysctl(name, ARRAY_SIZE(name), path, &len, NULL, 0) != -1)
pathname = path;
#elif SANITIZER_SOLARIS
# elif SANITIZER_SOLARIS
pathname = getexecname();
CHECK_NE(pathname, NULL);
#elif SANITIZER_USE_GETAUXVAL
# elif SANITIZER_USE_GETAUXVAL
// Calling execve with /proc/self/exe sets that as $EXEC_ORIGIN. Binaries that
// rely on that will fail to load shared libraries. Query AT_EXECFN instead.
pathname = reinterpret_cast<const char *>(getauxval(AT_EXECFN));
#endif
# endif
uptr rv = internal_execve(pathname, GetArgv(), GetEnviron());
int rverrno;
@ -986,9 +1008,8 @@ void UnmapFromTo(uptr from, uptr to) {
}
uptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale,
uptr min_shadow_base_alignment,
UNUSED uptr &high_mem_end) {
const uptr granularity = GetMmapGranularity();
uptr min_shadow_base_alignment, UNUSED uptr &high_mem_end,
uptr granularity) {
const uptr alignment =
Max<uptr>(granularity << shadow_scale, 1ULL << min_shadow_base_alignment);
const uptr left_padding =
@ -1016,14 +1037,14 @@ static uptr MmapSharedNoReserve(uptr addr, uptr size) {
static uptr MremapCreateAlias(uptr base_addr, uptr alias_addr,
uptr alias_size) {
#if SANITIZER_LINUX
# if SANITIZER_LINUX
return internal_mremap(reinterpret_cast<void *>(base_addr), 0, alias_size,
MREMAP_MAYMOVE | MREMAP_FIXED,
reinterpret_cast<void *>(alias_addr));
#else
# else
CHECK(false && "mremap is not supported outside of Linux");
return 0;
#endif
# endif
}
static void CreateAliases(uptr start_addr, uptr alias_size, uptr num_aliases) {
@ -1068,12 +1089,12 @@ uptr MapDynamicShadowAndAliases(uptr shadow_size, uptr alias_size,
}
void InitializePlatformCommonFlags(CommonFlags *cf) {
#if SANITIZER_ANDROID
# if SANITIZER_ANDROID
if (&__libc_get_static_tls_bounds == nullptr)
cf->detect_leaks = false;
#endif
# endif
}
} // namespace __sanitizer
} // namespace __sanitizer
#endif

View File

@ -15,14 +15,14 @@
#if SANITIZER_LINUX && SANITIZER_S390
#include <dlfcn.h>
#include <errno.h>
#include <sys/syscall.h>
#include <sys/utsname.h>
#include <unistd.h>
# include <dlfcn.h>
# include <errno.h>
# include <sys/syscall.h>
# include <sys/utsname.h>
# include <unistd.h>
#include "sanitizer_libc.h"
#include "sanitizer_linux.h"
# include "sanitizer_libc.h"
# include "sanitizer_linux.h"
namespace __sanitizer {
@ -37,22 +37,19 @@ uptr internal_mmap(void *addr, uptr length, int prot, int flags, int fd,
unsigned long fd;
unsigned long offset;
} params = {
(unsigned long)addr,
(unsigned long)length,
(unsigned long)prot,
(unsigned long)flags,
(unsigned long)fd,
# ifdef __s390x__
(unsigned long)offset,
# else
(unsigned long)addr, (unsigned long)length, (unsigned long)prot,
(unsigned long)flags, (unsigned long)fd,
# ifdef __s390x__
(unsigned long)offset,
# else
(unsigned long)(offset / 4096),
# endif
# endif
};
# ifdef __s390x__
# ifdef __s390x__
return syscall(__NR_mmap, &params);
# else
# else
return syscall(__NR_mmap2, &params);
# endif
# endif
}
uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
@ -63,58 +60,54 @@ uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
}
CHECK_EQ(0, (uptr)child_stack % 16);
// Minimum frame size.
#ifdef __s390x__
# ifdef __s390x__
child_stack = (char *)child_stack - 160;
#else
# else
child_stack = (char *)child_stack - 96;
#endif
# endif
// Terminate unwind chain.
((unsigned long *)child_stack)[0] = 0;
// And pass parameters.
((unsigned long *)child_stack)[1] = (uptr)fn;
((unsigned long *)child_stack)[2] = (uptr)arg;
register uptr res __asm__("r2");
register void *__cstack __asm__("r2") = child_stack;
register long __flags __asm__("r3") = flags;
register int * __ptidptr __asm__("r4") = parent_tidptr;
register int * __ctidptr __asm__("r5") = child_tidptr;
register void * __newtls __asm__("r6") = newtls;
register void *__cstack __asm__("r2") = child_stack;
register long __flags __asm__("r3") = flags;
register int *__ptidptr __asm__("r4") = parent_tidptr;
register int *__ctidptr __asm__("r5") = child_tidptr;
register void *__newtls __asm__("r6") = newtls;
__asm__ __volatile__(
/* Clone. */
"svc %1\n"
/* Clone. */
"svc %1\n"
/* if (%r2 != 0)
* return;
*/
#ifdef __s390x__
"cghi %%r2, 0\n"
#else
"chi %%r2, 0\n"
#endif
"jne 1f\n"
/* if (%r2 != 0)
* return;
*/
# ifdef __s390x__
"cghi %%r2, 0\n"
# else
"chi %%r2, 0\n"
# endif
"jne 1f\n"
/* Call "fn(arg)". */
#ifdef __s390x__
"lmg %%r1, %%r2, 8(%%r15)\n"
#else
"lm %%r1, %%r2, 4(%%r15)\n"
#endif
"basr %%r14, %%r1\n"
/* Call "fn(arg)". */
# ifdef __s390x__
"lmg %%r1, %%r2, 8(%%r15)\n"
# else
"lm %%r1, %%r2, 4(%%r15)\n"
# endif
"basr %%r14, %%r1\n"
/* Call _exit(%r2). */
"svc %2\n"
/* Call _exit(%r2). */
"svc %2\n"
/* Return to parent. */
"1:\n"
: "=r" (res)
: "i"(__NR_clone), "i"(__NR_exit),
"r"(__cstack),
"r"(__flags),
"r"(__ptidptr),
"r"(__ctidptr),
"r"(__newtls)
: "memory", "cc");
/* Return to parent. */
"1:\n"
: "=r"(res)
: "i"(__NR_clone), "i"(__NR_exit), "r"(__cstack), "r"(__flags),
"r"(__ptidptr), "r"(__ctidptr), "r"(__newtls)
: "memory", "cc");
if (res >= (uptr)-4095) {
errno = -res;
return -1;
@ -122,7 +115,7 @@ uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
return res;
}
#if SANITIZER_S390_64
# if SANITIZER_S390_64
static bool FixedCVE_2016_2143() {
// Try to determine if the running kernel has a fix for CVE-2016-2143,
// return false if in doubt (better safe than sorry). Distros may want to
@ -137,20 +130,20 @@ static bool FixedCVE_2016_2143() {
// At least first 2 should be matched.
if (ptr[0] != '.')
return false;
minor = internal_simple_strtoll(ptr+1, &ptr, 10);
minor = internal_simple_strtoll(ptr + 1, &ptr, 10);
// Third is optional.
if (ptr[0] == '.')
patch = internal_simple_strtoll(ptr+1, &ptr, 10);
patch = internal_simple_strtoll(ptr + 1, &ptr, 10);
if (major < 3) {
if (major == 2 && minor == 6 && patch == 32 && ptr[0] == '-' &&
internal_strstr(ptr, ".el6")) {
// Check RHEL6
int r1 = internal_simple_strtoll(ptr+1, &ptr, 10);
if (r1 >= 657) // 2.6.32-657.el6 or later
int r1 = internal_simple_strtoll(ptr + 1, &ptr, 10);
if (r1 >= 657) // 2.6.32-657.el6 or later
return true;
if (r1 == 642 && ptr[0] == '.') {
int r2 = internal_simple_strtoll(ptr+1, &ptr, 10);
if (r2 >= 9) // 2.6.32-642.9.1.el6 or later
int r2 = internal_simple_strtoll(ptr + 1, &ptr, 10);
if (r2 >= 9) // 2.6.32-642.9.1.el6 or later
return true;
}
}
@ -166,12 +159,12 @@ static bool FixedCVE_2016_2143() {
if (minor == 10 && patch == 0 && ptr[0] == '-' &&
internal_strstr(ptr, ".el7")) {
// Check RHEL7
int r1 = internal_simple_strtoll(ptr+1, &ptr, 10);
if (r1 >= 426) // 3.10.0-426.el7 or later
int r1 = internal_simple_strtoll(ptr + 1, &ptr, 10);
if (r1 >= 426) // 3.10.0-426.el7 or later
return true;
if (r1 == 327 && ptr[0] == '.') {
int r2 = internal_simple_strtoll(ptr+1, &ptr, 10);
if (r2 >= 27) // 3.10.0-327.27.1.el7 or later
int r2 = internal_simple_strtoll(ptr + 1, &ptr, 10);
if (r2 >= 27) // 3.10.0-327.27.1.el7 or later
return true;
}
}
@ -187,8 +180,8 @@ static bool FixedCVE_2016_2143() {
if (minor == 4 && patch == 0 && ptr[0] == '-' &&
internal_strstr(buf.version, "Ubuntu")) {
// Check Ubuntu 16.04
int r1 = internal_simple_strtoll(ptr+1, &ptr, 10);
if (r1 >= 13) // 4.4.0-13 or later
int r1 = internal_simple_strtoll(ptr + 1, &ptr, 10);
if (r1 >= 13) // 4.4.0-13 or later
return true;
}
// Otherwise, OK if 4.5+.
@ -211,18 +204,19 @@ void AvoidCVE_2016_2143() {
if (GetEnv("SANITIZER_IGNORE_CVE_2016_2143"))
return;
Report(
"ERROR: Your kernel seems to be vulnerable to CVE-2016-2143. Using ASan,\n"
"MSan, TSan, DFSan or LSan with such kernel can and will crash your\n"
"machine, or worse.\n"
"\n"
"If you are certain your kernel is not vulnerable (you have compiled it\n"
"yourself, or are using an unrecognized distribution kernel), you can\n"
"override this safety check by exporting SANITIZER_IGNORE_CVE_2016_2143\n"
"with any value.\n");
"ERROR: Your kernel seems to be vulnerable to CVE-2016-2143. Using "
"ASan,\n"
"MSan, TSan, DFSan or LSan with such kernel can and will crash your\n"
"machine, or worse.\n"
"\n"
"If you are certain your kernel is not vulnerable (you have compiled it\n"
"yourself, or are using an unrecognized distribution kernel), you can\n"
"override this safety check by exporting SANITIZER_IGNORE_CVE_2016_2143\n"
"with any value.\n");
Die();
}
#endif
# endif
} // namespace __sanitizer
} // namespace __sanitizer
#endif // SANITIZER_LINUX && SANITIZER_S390
#endif // SANITIZER_LINUX && SANITIZER_S390

View File

@ -1188,8 +1188,8 @@ uptr GetMaxVirtualAddress() {
}
uptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale,
uptr min_shadow_base_alignment, uptr &high_mem_end) {
const uptr granularity = GetMmapGranularity();
uptr min_shadow_base_alignment, uptr &high_mem_end,
uptr granularity) {
const uptr alignment =
Max<uptr>(granularity << shadow_scale, 1ULL << min_shadow_base_alignment);
const uptr left_padding =
@ -1372,8 +1372,8 @@ void DumpProcessMap() {
for (uptr i = 0; i < modules.size(); ++i) {
char uuid_str[128];
FormatUUID(uuid_str, sizeof(uuid_str), modules[i].uuid());
Printf("0x%zx-0x%zx %s (%s) %s\n", modules[i].base_address(),
modules[i].max_address(), modules[i].full_name(),
Printf("%p-%p %s (%s) %s\n", (void *)modules[i].base_address(),
(void *)modules[i].max_address(), modules[i].full_name(),
ModuleArchToString(modules[i].arch()), uuid_str);
}
Printf("End of module map.\n");

View File

@ -31,6 +31,10 @@ struct __sanitizer_struct_mallinfo {
int v[10];
};
struct __sanitizer_struct_mallinfo2 {
uptr v[10];
};
#endif
} // namespace __sanitizer

View File

@ -123,7 +123,7 @@ INTERCEPTOR(void, malloc_set_zone_name, malloc_zone_t *zone, const char *name) {
COMMON_MALLOC_ENTER();
InternalScopedString new_name;
if (name && zone->introspect == sanitizer_zone.introspect) {
new_name.append(COMMON_MALLOC_ZONE_NAME "-%s", name);
new_name.AppendF(COMMON_MALLOC_ZONE_NAME "-%s", name);
name = new_name.data();
}

View File

@ -212,8 +212,10 @@ struct InternalDeadlockDetector {
return initialized > 0;
}
};
static THREADLOCAL InternalDeadlockDetector deadlock_detector;
// This variable is used by the __tls_get_addr interceptor, so cannot use the
// global-dynamic TLS model, as that would result in crashes.
__attribute__((tls_model("initial-exec"))) static THREADLOCAL
InternalDeadlockDetector deadlock_detector;
void CheckedMutex::LockImpl(uptr pc) { deadlock_detector.Lock(type_, pc); }

View File

@ -17,8 +17,6 @@
#include "sanitizer_internal_defs.h"
inline void *operator new(__sanitizer::operator_new_size_type sz, void *p) {
return p;
}
inline void *operator new(__sanitizer::usize sz, void *p) { return p; }
#endif // SANITIZER_PLACEMENT_NEW_H

View File

@ -260,6 +260,17 @@
# define SANITIZER_ARM64 0
#endif
#if SANITIZER_WINDOWS64 && SANITIZER_ARM64
# define SANITIZER_WINDOWS_ARM64 1
# define SANITIZER_WINDOWS_x64 0
#elif SANITIZER_WINDOWS64 && !SANITIZER_ARM64
# define SANITIZER_WINDOWS_ARM64 0
# define SANITIZER_WINDOWS_x64 1
#else
# define SANITIZER_WINDOWS_ARM64 0
# define SANITIZER_WINDOWS_x64 0
#endif
#if SANITIZER_SOLARIS && SANITIZER_WORDSIZE == 32
# define SANITIZER_SOLARIS32 1
#else
@ -284,7 +295,8 @@
// For such platforms build this code with -DSANITIZER_CAN_USE_ALLOCATOR64=0 or
// change the definition of SANITIZER_CAN_USE_ALLOCATOR64 here.
#ifndef SANITIZER_CAN_USE_ALLOCATOR64
# if SANITIZER_RISCV64 || SANITIZER_IOS
# if (SANITIZER_RISCV64 && !SANITIZER_FUCHSIA && !SANITIZER_LINUX) || \
SANITIZER_IOS || SANITIZER_DRIVERKIT
# define SANITIZER_CAN_USE_ALLOCATOR64 0
# elif defined(__mips64) || defined(__hexagon__)
# define SANITIZER_CAN_USE_ALLOCATOR64 0
@ -303,7 +315,15 @@
# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 40)
# endif
#elif SANITIZER_RISCV64
# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 38)
// FIXME: Rather than hardcoding the VMA here, we should rely on
// GetMaxUserVirtualAddress(). This will require some refactoring though since
// many places either hardcode some value or SANITIZER_MMAP_RANGE_SIZE is
// assumed to be some constant integer.
# if SANITIZER_FUCHSIA
# define SANITIZER_MMAP_RANGE_SIZE (1ULL << 38)
# else
# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 56)
# endif
#elif defined(__aarch64__)
# if SANITIZER_APPLE
# if SANITIZER_OSX || SANITIZER_IOSSIM

View File

@ -191,7 +191,8 @@
#define SANITIZER_INTERCEPT_PREADV \
(SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID)
#define SANITIZER_INTERCEPT_PWRITEV SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_PWRITEV \
(SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID)
#define SANITIZER_INTERCEPT_PREADV64 SI_GLIBC
#define SANITIZER_INTERCEPT_PWRITEV64 SI_GLIBC
@ -301,7 +302,8 @@
#define SANITIZER_INTERCEPT_CANONICALIZE_FILE_NAME (SI_GLIBC || SI_SOLARIS)
#define SANITIZER_INTERCEPT_CONFSTR \
(SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
#define SANITIZER_INTERCEPT_SCHED_GETAFFINITY SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_SCHED_GETAFFINITY \
(SI_LINUX_NOT_ANDROID || SI_FREEBSD)
#define SANITIZER_INTERCEPT_SCHED_GETPARAM SI_LINUX_NOT_ANDROID || SI_SOLARIS
#define SANITIZER_INTERCEPT_STRERROR SI_POSIX
#define SANITIZER_INTERCEPT_STRERROR_R SI_POSIX
@ -462,7 +464,7 @@
(SI_LINUX || SI_MAC || SI_WINDOWS || SI_FREEBSD || SI_NETBSD || SI_SOLARIS)
#define SANITIZER_INTERCEPT_RECV_RECVFROM SI_POSIX
#define SANITIZER_INTERCEPT_SEND_SENDTO SI_POSIX
#define SANITIZER_INTERCEPT_EVENTFD_READ_WRITE SI_LINUX
#define SANITIZER_INTERCEPT_EVENTFD_READ_WRITE (SI_LINUX || SI_FREEBSD)
#define SI_STAT_LINUX (SI_LINUX && __GLIBC_PREREQ(2, 33))
#define SANITIZER_INTERCEPT_STAT \
@ -575,12 +577,12 @@
#define SANITIZER_INTERCEPT_SL_INIT (SI_FREEBSD || SI_NETBSD)
#define SANITIZER_INTERCEPT_GETRANDOM \
((SI_LINUX && __GLIBC_PREREQ(2, 25)) || SI_FREEBSD)
((SI_LINUX && __GLIBC_PREREQ(2, 25)) || SI_FREEBSD || SI_SOLARIS)
#define SANITIZER_INTERCEPT___CXA_ATEXIT SI_NETBSD
#define SANITIZER_INTERCEPT_ATEXIT SI_NETBSD
#define SANITIZER_INTERCEPT_PTHREAD_ATFORK SI_NETBSD
#define SANITIZER_INTERCEPT_GETENTROPY \
((SI_LINUX && __GLIBC_PREREQ(2, 25)) || SI_FREEBSD)
((SI_LINUX && __GLIBC_PREREQ(2, 25)) || SI_FREEBSD || SI_SOLARIS)
#define SANITIZER_INTERCEPT_QSORT \
(SI_POSIX && !SI_IOSSIM && !SI_WATCHOS && !SI_TVOS && !SI_ANDROID)
#define SANITIZER_INTERCEPT_QSORT_R SI_GLIBC
@ -594,9 +596,11 @@
#define SANITIZER_INTERCEPT___XUNAME SI_FREEBSD
#define SANITIZER_INTERCEPT_FLOPEN SI_FREEBSD
#define SANITIZER_INTERCEPT_PROCCTL SI_FREEBSD
#define SANITIZER_INTERCEPT_HEXDUMP SI_FREEBSD
#define SANITIZER_INTERCEPT_ARGP_PARSE SI_GLIBC
#define SANITIZER_INTERCEPT_CPUSET_GETAFFINITY SI_FREEBSD
// FIXME: also available from musl 1.2.5
#define SANITIZER_INTERCEPT_PREADV2 (SI_LINUX && __GLIBC_PREREQ(2, 26))
#define SANITIZER_INTERCEPT_PWRITEV2 (SI_LINUX && __GLIBC_PREREQ(2, 26))
// This macro gives a way for downstream users to override the above
// interceptor macros irrespective of the platform they are on. They have

View File

@ -475,6 +475,8 @@ CHECK_TYPE_SIZE(nfds_t);
CHECK_TYPE_SIZE(sigset_t);
COMPILER_CHECK(sizeof(__sanitizer_sigaction) == sizeof(struct sigaction));
COMPILER_CHECK(sizeof(__sanitizer_siginfo) == sizeof(siginfo_t));
CHECK_SIZE_AND_OFFSET(siginfo_t, si_value);
// Can't write checks for sa_handler and sa_sigaction due to them being
// preprocessor macros.
CHECK_STRUCT_SIZE_AND_OFFSET(sigaction, sa_mask);

View File

@ -301,11 +301,29 @@ struct __sanitizer_sigset_t {
typedef __sanitizer_sigset_t __sanitizer_kernel_sigset_t;
struct __sanitizer_siginfo {
// The size is determined by looking at sizeof of real siginfo_t on linux.
u64 opaque[128 / sizeof(u64)];
union __sanitizer_sigval {
int sival_int;
void *sival_ptr;
};
struct __sanitizer_siginfo {
int si_signo;
int si_errno;
int si_code;
pid_t si_pid;
u32 si_uid;
int si_status;
void *si_addr;
union __sanitizer_sigval si_value;
# if SANITIZER_WORDSIZE == 64
char data[40];
# else
char data[32];
# endif
};
typedef __sanitizer_siginfo __sanitizer_siginfo_t;
using __sanitizer_sighandler_ptr = void (*)(int sig);
using __sanitizer_sigactionhandler_ptr = void (*)(int sig,
__sanitizer_siginfo *siginfo,
@ -726,6 +744,8 @@ struct __sanitizer_cpuset {
typedef struct __sanitizer_cpuset __sanitizer_cpuset_t;
extern unsigned struct_cpuset_sz;
typedef unsigned long long __sanitizer_eventfd_t;
} // namespace __sanitizer
# define CHECK_TYPE_SIZE(TYPE) \

View File

@ -523,6 +523,7 @@ typedef long __sanitizer_clock_t;
#if SANITIZER_LINUX
typedef int __sanitizer_clockid_t;
typedef unsigned long long __sanitizer_eventfd_t;
#endif
#if SANITIZER_LINUX

View File

@ -54,12 +54,12 @@ void *MmapOrDie(uptr size, const char *mem_type, bool raw_report) {
return (void *)res;
}
void UnmapOrDie(void *addr, uptr size) {
void UnmapOrDie(void *addr, uptr size, bool raw_report) {
if (!addr || !size) return;
uptr res = internal_munmap(addr, size);
int reserrno;
if (UNLIKELY(internal_iserror(res, &reserrno)))
ReportMunmapFailureAndDie(addr, size, reserrno);
ReportMunmapFailureAndDie(addr, size, reserrno, raw_report);
DecreaseTotalMmap(size);
}
@ -85,8 +85,8 @@ void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
CHECK(IsPowerOfTwo(size));
CHECK(IsPowerOfTwo(alignment));
uptr map_size = size + alignment;
// mmap maps entire pages and rounds up map_size needs to be a an integral
// number of pages.
// mmap maps entire pages and rounds up map_size needs to be a an integral
// number of pages.
// We need to be aware of this size for calculating end and for unmapping
// fragments before and after the alignment region.
map_size = RoundUpTo(map_size, GetPageSizeCached());
@ -130,8 +130,8 @@ static void *MmapFixedImpl(uptr fixed_addr, uptr size, bool tolerate_enomem,
if (tolerate_enomem && reserrno == ENOMEM)
return nullptr;
char mem_type[40];
internal_snprintf(mem_type, sizeof(mem_type), "memory at address 0x%zx",
fixed_addr);
internal_snprintf(mem_type, sizeof(mem_type), "memory at address %p",
(void *)fixed_addr);
ReportMmapFailureAndDie(size, mem_type, "allocate", reserrno);
}
IncreaseTotalMmap(size);

View File

@ -74,21 +74,21 @@ int internal_sysctlbyname(const char *sname, void *oldp, uptr *oldlenp,
// These functions call appropriate pthread_ functions directly, bypassing
// the interceptor. They are weak and may not be present in some tools.
SANITIZER_WEAK_ATTRIBUTE
int real_pthread_create(void *th, void *attr, void *(*callback)(void *),
void *param);
int internal_pthread_create(void *th, void *attr, void *(*callback)(void *),
void *param);
SANITIZER_WEAK_ATTRIBUTE
int real_pthread_join(void *th, void **ret);
int internal_pthread_join(void *th, void **ret);
#define DEFINE_REAL_PTHREAD_FUNCTIONS \
namespace __sanitizer { \
int real_pthread_create(void *th, void *attr, void *(*callback)(void *), \
void *param) { \
return REAL(pthread_create)(th, attr, callback, param); \
} \
int real_pthread_join(void *th, void **ret) { \
return REAL(pthread_join(th, ret)); \
} \
} // namespace __sanitizer
# define DEFINE_INTERNAL_PTHREAD_FUNCTIONS \
namespace __sanitizer { \
int internal_pthread_create(void *th, void *attr, \
void *(*callback)(void *), void *param) { \
return REAL(pthread_create)(th, attr, callback, param); \
} \
int internal_pthread_join(void *th, void **ret) { \
return REAL(pthread_join(th, ret)); \
} \
} // namespace __sanitizer
int internal_pthread_attr_getstack(void *attr, void **addr, uptr *size);

View File

@ -91,12 +91,12 @@ static rlim_t getlim(int res) {
static void setlim(int res, rlim_t lim) {
struct rlimit rlim;
if (getrlimit(res, const_cast<struct rlimit *>(&rlim))) {
if (getrlimit(res, &rlim)) {
Report("ERROR: %s getrlimit() failed %d\n", SanitizerToolName, errno);
Die();
}
rlim.rlim_cur = lim;
if (setrlimit(res, const_cast<struct rlimit *>(&rlim))) {
if (setrlimit(res, &rlim)) {
Report("ERROR: %s setrlimit() failed %d\n", SanitizerToolName, errno);
Die();
}
@ -104,7 +104,27 @@ static void setlim(int res, rlim_t lim) {
void DisableCoreDumperIfNecessary() {
if (common_flags()->disable_coredump) {
setlim(RLIMIT_CORE, 0);
rlimit rlim;
CHECK_EQ(0, getrlimit(RLIMIT_CORE, &rlim));
// On Linux, if the kernel.core_pattern sysctl starts with a '|' (i.e. it
// is being piped to a coredump handler such as systemd-coredumpd), the
// kernel ignores RLIMIT_CORE (since we aren't creating a file in the file
// system) except for the magic value of 1, which disables coredumps when
// piping. 1 byte is too small for any kind of valid core dump, so it
// also disables coredumps if kernel.core_pattern creates files directly.
// While most piped coredump handlers do respect the crashing processes'
// RLIMIT_CORE, this is notable not the case for Debian's systemd-coredump
// due to a local patch that changes sysctl.d/50-coredump.conf to ignore
// the specified limit and instead use RLIM_INFINITY.
//
// The alternative to using RLIMIT_CORE=1 would be to use prctl() with the
// PR_SET_DUMPABLE flag, however that also prevents ptrace(), so makes it
// impossible to attach a debugger.
//
// Note: we use rlim_max in the Min() call here since that is the upper
// limit for what can be set without getting an EINVAL error.
rlim.rlim_cur = Min<rlim_t>(SANITIZER_LINUX ? 1 : 0, rlim.rlim_max);
CHECK_EQ(0, setrlimit(RLIMIT_CORE, &rlim));
}
}
@ -307,9 +327,10 @@ static bool MmapFixed(uptr fixed_addr, uptr size, int additional_flags,
MAP_PRIVATE | MAP_FIXED | additional_flags | MAP_ANON, name);
int reserrno;
if (internal_iserror(p, &reserrno)) {
Report("ERROR: %s failed to "
"allocate 0x%zx (%zd) bytes at address %zx (errno: %d)\n",
SanitizerToolName, size, size, fixed_addr, reserrno);
Report(
"ERROR: %s failed to "
"allocate 0x%zx (%zd) bytes at address %p (errno: %d)\n",
SanitizerToolName, size, size, (void *)fixed_addr, reserrno);
return false;
}
IncreaseTotalMmap(size);

View File

@ -54,7 +54,7 @@ static int AppendNumber(char **buff, const char *buff_end, u64 absolute_value,
uptr num_buffer[kMaxLen];
int pos = 0;
do {
RAW_CHECK_MSG((uptr)pos < kMaxLen, "AppendNumber buffer overflow");
RAW_CHECK_MSG((uptr)pos < kMaxLen, "AppendNumber buffer overflow",);
num_buffer[pos++] = absolute_value % base;
absolute_value /= base;
} while (absolute_value > 0);
@ -337,7 +337,14 @@ int internal_snprintf(char *buffer, uptr length, const char *format, ...) {
return needed_length;
}
void InternalScopedString::append(const char *format, ...) {
void InternalScopedString::Append(const char *str) {
uptr prev_len = length();
uptr str_len = internal_strlen(str);
buffer_.resize(prev_len + str_len + 1);
internal_memcpy(buffer_.data() + prev_len, str, str_len + 1);
}
void InternalScopedString::AppendF(const char *format, ...) {
uptr prev_len = length();
while (true) {

View File

@ -13,9 +13,6 @@
#include "sanitizer_platform.h"
#if SANITIZER_FREEBSD || SANITIZER_NETBSD
#include "sanitizer_common.h"
#if SANITIZER_FREEBSD
#include "sanitizer_freebsd.h"
#endif
#include "sanitizer_procmaps.h"
// clang-format off
@ -29,29 +26,35 @@
#include <limits.h>
// Fix 'kinfo_vmentry' definition on FreeBSD prior v9.2 in 32-bit mode.
#if SANITIZER_FREEBSD && (SANITIZER_WORDSIZE == 32)
#include <osreldate.h>
#if __FreeBSD_version <= 902001 // v9.2
#define kinfo_vmentry xkinfo_vmentry
#endif
#endif
namespace __sanitizer {
#if SANITIZER_FREEBSD
void GetMemoryProfile(fill_profile_f cb, uptr *stats) {
const int Mib[] = {
CTL_KERN,
KERN_PROC,
KERN_PROC_PID,
getpid()
};
const int Mib[] = {CTL_KERN, KERN_PROC, KERN_PROC_PID, getpid()};
struct kinfo_proc InfoProc;
uptr Len = sizeof(InfoProc);
CHECK_EQ(internal_sysctl(Mib, ARRAY_SIZE(Mib), nullptr, (uptr *)&InfoProc, &Len, 0), 0);
cb(0, InfoProc.ki_rssize * GetPageSizeCached(), false, stats);
struct kinfo_proc *InfoProc;
uptr Len = sizeof(*InfoProc);
uptr Size = Len;
InfoProc = (struct kinfo_proc *)MmapOrDie(Size, "GetMemoryProfile()");
CHECK_EQ(
internal_sysctl(Mib, ARRAY_SIZE(Mib), nullptr, (uptr *)InfoProc, &Len, 0),
0);
cb(0, InfoProc->ki_rssize * GetPageSizeCached(), false, stats);
UnmapOrDie(InfoProc, Size, true);
}
#elif SANITIZER_NETBSD
void GetMemoryProfile(fill_profile_f cb, uptr *stats) {
struct kinfo_proc2 *InfoProc;
uptr Len = sizeof(*InfoProc);
uptr Size = Len;
const int Mib[] = {CTL_KERN, KERN_PROC2, KERN_PROC_PID,
getpid(), (int)Size, 1};
InfoProc = (struct kinfo_proc2 *)MmapOrDie(Size, "GetMemoryProfile()");
CHECK_EQ(
internal_sysctl(Mib, ARRAY_SIZE(Mib), nullptr, (uptr *)InfoProc, &Len, 0),
0);
cb(0, InfoProc->p_vm_rssize * GetPageSizeCached(), false, stats);
UnmapOrDie(InfoProc, Size, true);
}
#endif

View File

@ -145,7 +145,7 @@ void MemoryMappingLayout::DumpListOfModules(
}
}
#if SANITIZER_LINUX || SANITIZER_ANDROID || SANITIZER_SOLARIS || SANITIZER_NETBSD
#if SANITIZER_LINUX || SANITIZER_ANDROID || SANITIZER_SOLARIS
void GetMemoryProfile(fill_profile_f cb, uptr *stats) {
char *smaps = nullptr;
uptr smaps_cap = 0;

View File

@ -9,31 +9,33 @@
#ifndef SANITIZER_PTRAUTH_H
#define SANITIZER_PTRAUTH_H
#if __has_feature(ptrauth_calls)
#include <ptrauth.h>
#if __has_feature(ptrauth_intrinsics)
# include <ptrauth.h>
#elif defined(__ARM_FEATURE_PAC_DEFAULT) && !defined(__APPLE__)
inline unsigned long ptrauth_strip(void* __value, unsigned int __key) {
// On the stack the link register is protected with Pointer
// Authentication Code when compiled with -mbranch-protection.
// Let's stripping the PAC unconditionally because xpaclri is in
// the NOP space so will do nothing when it is not enabled or not available.
unsigned long ret;
asm volatile(
"mov x30, %1\n\t"
"hint #7\n\t" // xpaclri
"mov %0, x30\n\t"
: "=r"(ret)
: "r"(__value)
: "x30");
return ret;
}
#define ptrauth_auth_data(__value, __old_key, __old_data) __value
#define ptrauth_string_discriminator(__string) ((int)0)
// On the stack the link register is protected with Pointer
// Authentication Code when compiled with -mbranch-protection.
// Let's stripping the PAC unconditionally because xpaclri is in
// the NOP space so will do nothing when it is not enabled or not available.
# define ptrauth_strip(__value, __key) \
({ \
__typeof(__value) ret; \
asm volatile( \
"mov x30, %1\n\t" \
"hint #7\n\t" \
"mov %0, x30\n\t" \
"mov x30, xzr\n\t" \
: "=r"(ret) \
: "r"(__value) \
: "x30"); \
ret; \
})
# define ptrauth_auth_data(__value, __old_key, __old_data) __value
# define ptrauth_string_discriminator(__string) ((int)0)
#else
// Copied from <ptrauth.h>
#define ptrauth_strip(__value, __key) __value
#define ptrauth_auth_data(__value, __old_key, __old_data) __value
#define ptrauth_string_discriminator(__string) ((int)0)
# define ptrauth_strip(__value, __key) __value
# define ptrauth_auth_data(__value, __old_key, __old_data) __value
# define ptrauth_string_discriminator(__string) ((int)0)
#endif
#define STRIP_PAC_PC(pc) ((uptr)ptrauth_strip(pc, 0))

View File

@ -11,16 +11,19 @@
//
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_COMMON_NO_REDEFINE_BUILTINS
#ifndef SANITIZER_REDEFINE_BUILTINS_H
#define SANITIZER_REDEFINE_BUILTINS_H
# ifndef SANITIZER_REDEFINE_BUILTINS_H
# define SANITIZER_REDEFINE_BUILTINS_H
// The asm hack only works with GCC and Clang.
#if !defined(_WIN32)
# if !defined(_WIN32)
asm("memcpy = __sanitizer_internal_memcpy");
asm("memmove = __sanitizer_internal_memmove");
asm("memset = __sanitizer_internal_memset");
# if defined(__cplusplus) && \
!defined(SANITIZER_COMMON_REDEFINE_BUILTINS_IN_STD)
// The builtins should not be redefined in source files that make use of C++
// standard libraries, in particular where C++STL headers with inline functions
// are used. The redefinition in such cases would lead to ODR violations.
@ -46,7 +49,8 @@ using unordered_set = Define_SANITIZER_COMMON_NO_REDEFINE_BUILTINS_in_cpp_file;
using vector = Define_SANITIZER_COMMON_NO_REDEFINE_BUILTINS_in_cpp_file;
} // namespace std
#endif // !_WIN32
# endif // __cpluplus
# endif // !_WIN32
#endif // SANITIZER_REDEFINE_BUILTINS_H
#endif // SANITIZER_COMMON_NO_REDEFINE_BUILTINS
# endif // SANITIZER_REDEFINE_BUILTINS_H
#endif // SANITIZER_COMMON_NO_REDEFINE_BUILTINS

View File

@ -47,7 +47,9 @@ class RingBuffer {
void push(T t) {
*next_ = t;
next_--;
// The condition below works only if sizeof(T) is divisible by sizeof(T*).
static_assert((sizeof(T) % sizeof(T *)) == 0,
"The condition below works only if sizeof(T) is divisible by "
"sizeof(T*).");
if (next_ <= reinterpret_cast<T*>(&next_))
next_ = last_;
}

View File

@ -44,6 +44,9 @@ StackStore::Id StackStore::Store(const StackTrace &trace, uptr *pack) {
uptr idx = 0;
*pack = 0;
uptr *stack_trace = Alloc(h.size + 1, &idx, pack);
// No more space.
if (stack_trace == nullptr)
return 0;
*stack_trace = h.ToUptr();
internal_memcpy(stack_trace + 1, trace.trace, h.size * sizeof(uptr));
*pack += blocks_[GetBlockIdx(idx)].Stored(h.size + 1);
@ -76,8 +79,10 @@ uptr *StackStore::Alloc(uptr count, uptr *idx, uptr *pack) {
uptr block_idx = GetBlockIdx(start);
uptr last_idx = GetBlockIdx(start + count - 1);
if (LIKELY(block_idx == last_idx)) {
// Fits into the a single block.
CHECK_LT(block_idx, ARRAY_SIZE(blocks_));
// Fits into a single block.
// No more available blocks. Indicate inability to allocate more memory.
if (block_idx >= ARRAY_SIZE(blocks_))
return nullptr;
*idx = start;
return blocks_[block_idx].GetOrCreate(this) + GetInBlockIdx(start);
}

View File

@ -215,16 +215,16 @@ StackTrace StackDepotGet(u32 id) {
return theDepot.Get(id);
}
void StackDepotLockAll() {
theDepot.LockAll();
void StackDepotLockBeforeFork() {
theDepot.LockBeforeFork();
compress_thread.LockAndStop();
stackStore.LockAll();
}
void StackDepotUnlockAll() {
void StackDepotUnlockAfterFork(bool fork_child) {
stackStore.UnlockAll();
compress_thread.Unlock();
theDepot.UnlockAll();
theDepot.UnlockAfterFork(fork_child);
}
void StackDepotPrintAll() {

View File

@ -39,8 +39,8 @@ StackDepotHandle StackDepotPut_WithHandle(StackTrace stack);
// Retrieves a stored stack trace by the id.
StackTrace StackDepotGet(u32 id);
void StackDepotLockAll();
void StackDepotUnlockAll();
void StackDepotLockBeforeFork();
void StackDepotUnlockAfterFork(bool fork_child);
void StackDepotPrintAll();
void StackDepotStopBackgroundThread();

View File

@ -52,8 +52,8 @@ class StackDepotBase {
};
}
void LockAll();
void UnlockAll();
void LockBeforeFork();
void UnlockAfterFork(bool fork_child);
void PrintAll();
void TestOnlyUnmap() {
@ -160,18 +160,33 @@ StackDepotBase<Node, kReservedBits, kTabSizeLog>::Get(u32 id) {
}
template <class Node, int kReservedBits, int kTabSizeLog>
void StackDepotBase<Node, kReservedBits, kTabSizeLog>::LockAll() {
for (int i = 0; i < kTabSize; ++i) {
lock(&tab[i]);
}
void StackDepotBase<Node, kReservedBits, kTabSizeLog>::LockBeforeFork() {
// Do not lock hash table. It's very expensive, but it's not rely needed. The
// parent process will neither lock nor unlock. Child process risks to be
// deadlocked on already locked buckets. To avoid deadlock we will unlock
// every locked buckets in `UnlockAfterFork`. This may affect consistency of
// the hash table, but the only issue is a few items inserted by parent
// process will be not found by child, and the child may insert them again,
// wasting some space in `stackStore`.
// We still need to lock nodes.
nodes.Lock();
}
template <class Node, int kReservedBits, int kTabSizeLog>
void StackDepotBase<Node, kReservedBits, kTabSizeLog>::UnlockAll() {
void StackDepotBase<Node, kReservedBits, kTabSizeLog>::UnlockAfterFork(
bool fork_child) {
nodes.Unlock();
// Only unlock in child process to avoid deadlock. See `LockBeforeFork`.
if (!fork_child)
return;
for (int i = 0; i < kTabSize; ++i) {
atomic_uint32_t *p = &tab[i];
uptr s = atomic_load(p, memory_order_relaxed);
unlock(p, s & kUnlockMask);
if (s & kLockMask)
unlock(p, s & kUnlockMask);
}
}

View File

@ -29,42 +29,43 @@ class StackTraceTextPrinter {
frame_delimiter_(frame_delimiter),
output_(output),
dedup_token_(dedup_token),
symbolize_(RenderNeedsSymbolization(stack_trace_fmt)) {}
symbolize_(StackTracePrinter::GetOrInit()->RenderNeedsSymbolization(
stack_trace_fmt)) {}
bool ProcessAddressFrames(uptr pc) {
SymbolizedStack *frames = symbolize_
? Symbolizer::GetOrInit()->SymbolizePC(pc)
: SymbolizedStack::New(pc);
SymbolizedStackHolder symbolized_stack(
symbolize_ ? Symbolizer::GetOrInit()->SymbolizePC(pc)
: SymbolizedStack::New(pc));
const SymbolizedStack *frames = symbolized_stack.get();
if (!frames)
return false;
for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
for (const SymbolizedStack *cur = frames; cur; cur = cur->next) {
uptr prev_len = output_->length();
RenderFrame(output_, stack_trace_fmt_, frame_num_++, cur->info.address,
symbolize_ ? &cur->info : nullptr,
common_flags()->symbolize_vs_style,
common_flags()->strip_path_prefix);
StackTracePrinter::GetOrInit()->RenderFrame(
output_, stack_trace_fmt_, frame_num_++, cur->info.address,
symbolize_ ? &cur->info : nullptr, common_flags()->symbolize_vs_style,
common_flags()->strip_path_prefix);
if (prev_len != output_->length())
output_->append("%c", frame_delimiter_);
output_->AppendF("%c", frame_delimiter_);
ExtendDedupToken(cur);
}
frames->ClearAll();
return true;
}
private:
// Extend the dedup token by appending a new frame.
void ExtendDedupToken(SymbolizedStack *stack) {
void ExtendDedupToken(const SymbolizedStack *stack) {
if (!dedup_token_)
return;
if (dedup_frames_-- > 0) {
if (dedup_token_->length())
dedup_token_->append("--");
if (stack->info.function != nullptr)
dedup_token_->append("%s", stack->info.function);
dedup_token_->Append("--");
if (stack->info.function)
dedup_token_->Append(stack->info.function);
}
}
@ -98,7 +99,7 @@ void StackTrace::PrintTo(InternalScopedString *output) const {
output, &dedup_token);
if (trace == nullptr || size == 0) {
output->append(" <empty stack>\n\n");
output->Append(" <empty stack>\n\n");
return;
}
@ -110,11 +111,11 @@ void StackTrace::PrintTo(InternalScopedString *output) const {
}
// Always add a trailing empty line after stack trace.
output->append("\n");
output->Append("\n");
// Append deduplication token, if non-empty.
if (dedup_token.length())
output->append("DEDUP_TOKEN: %s\n", dedup_token.data());
output->AppendF("DEDUP_TOKEN: %s\n", dedup_token.data());
}
uptr StackTrace::PrintTo(char *out_buf, uptr out_buf_size) const {
@ -197,7 +198,7 @@ void __sanitizer_symbolize_pc(uptr pc, const char *fmt, char *out_buf,
StackTraceTextPrinter printer(fmt, '\0', &output, nullptr);
if (!printer.ProcessAddressFrames(pc)) {
output.clear();
output.append("<can't symbolize>");
output.Append("<can't symbolize>");
}
CopyStringToBuffer(output, out_buf, out_buf_size);
}
@ -210,7 +211,8 @@ void __sanitizer_symbolize_global(uptr data_addr, const char *fmt,
DataInfo DI;
if (!Symbolizer::GetOrInit()->SymbolizeData(data_addr, &DI)) return;
InternalScopedString data_desc;
RenderData(&data_desc, fmt, &DI, common_flags()->strip_path_prefix);
StackTracePrinter::GetOrInit()->RenderData(&data_desc, fmt, &DI,
common_flags()->strip_path_prefix);
internal_strncpy(out_buf, data_desc.data(), out_buf_size);
out_buf[out_buf_size - 1] = 0;
}

View File

@ -12,13 +12,28 @@
#include "sanitizer_stacktrace_printer.h"
#include "sanitizer_common.h"
#include "sanitizer_file.h"
#include "sanitizer_flags.h"
#include "sanitizer_fuchsia.h"
#include "sanitizer_symbolizer_markup.h"
namespace __sanitizer {
const char *StripFunctionName(const char *function) {
StackTracePrinter *StackTracePrinter::GetOrInit() {
static StackTracePrinter *stacktrace_printer;
static StaticSpinMutex init_mu;
SpinMutexLock l(&init_mu);
if (stacktrace_printer)
return stacktrace_printer;
stacktrace_printer = StackTracePrinter::NewStackTracePrinter();
CHECK(stacktrace_printer);
return stacktrace_printer;
}
const char *StackTracePrinter::StripFunctionName(const char *function) {
if (!common_flags()->demangle)
return function;
if (!function)
@ -47,6 +62,13 @@ const char *StripFunctionName(const char *function) {
// sanitizer_symbolizer_markup.cpp implements these differently.
#if !SANITIZER_SYMBOLIZER_MARKUP
StackTracePrinter *StackTracePrinter::NewStackTracePrinter() {
if (common_flags()->enable_symbolizer_markup)
return new (GetGlobalLowLevelAllocator()) MarkupStackTracePrinter();
return new (GetGlobalLowLevelAllocator()) FormattedStackTracePrinter();
}
static const char *DemangleFunctionName(const char *function) {
if (!common_flags()->demangle)
return function;
@ -130,20 +152,23 @@ static void MaybeBuildIdToBuffer(const AddressInfo &info, bool PrefixSpace,
InternalScopedString *buffer) {
if (info.uuid_size) {
if (PrefixSpace)
buffer->append(" ");
buffer->append("(BuildId: ");
buffer->Append(" ");
buffer->Append("(BuildId: ");
for (uptr i = 0; i < info.uuid_size; ++i) {
buffer->append("%02x", info.uuid[i]);
buffer->AppendF("%02x", info.uuid[i]);
}
buffer->append(")");
buffer->Append(")");
}
}
static const char kDefaultFormat[] = " #%n %p %F %L";
void RenderFrame(InternalScopedString *buffer, const char *format, int frame_no,
uptr address, const AddressInfo *info, bool vs_style,
const char *strip_path_prefix) {
void FormattedStackTracePrinter::RenderFrame(InternalScopedString *buffer,
const char *format, int frame_no,
uptr address,
const AddressInfo *info,
bool vs_style,
const char *strip_path_prefix) {
// info will be null in the case where symbolization is not needed for the
// given format. This ensures that the code below will get a hard failure
// rather than print incorrect information in case RenderNeedsSymbolization
@ -154,56 +179,56 @@ void RenderFrame(InternalScopedString *buffer, const char *format, int frame_no,
format = kDefaultFormat;
for (const char *p = format; *p != '\0'; p++) {
if (*p != '%') {
buffer->append("%c", *p);
buffer->AppendF("%c", *p);
continue;
}
p++;
switch (*p) {
case '%':
buffer->append("%%");
buffer->Append("%");
break;
// Frame number and all fields of AddressInfo structure.
case 'n':
buffer->append("%u", frame_no);
buffer->AppendF("%u", frame_no);
break;
case 'p':
buffer->append("0x%zx", address);
buffer->AppendF("%p", (void *)address);
break;
case 'm':
buffer->append("%s", StripPathPrefix(info->module, strip_path_prefix));
buffer->AppendF("%s", StripPathPrefix(info->module, strip_path_prefix));
break;
case 'o':
buffer->append("0x%zx", info->module_offset);
buffer->AppendF("0x%zx", info->module_offset);
break;
case 'b':
MaybeBuildIdToBuffer(*info, /*PrefixSpace=*/false, buffer);
break;
case 'f':
buffer->append("%s",
DemangleFunctionName(StripFunctionName(info->function)));
buffer->AppendF("%s",
DemangleFunctionName(StripFunctionName(info->function)));
break;
case 'q':
buffer->append("0x%zx", info->function_offset != AddressInfo::kUnknown
? info->function_offset
: 0x0);
buffer->AppendF("0x%zx", info->function_offset != AddressInfo::kUnknown
? info->function_offset
: 0x0);
break;
case 's':
buffer->append("%s", StripPathPrefix(info->file, strip_path_prefix));
buffer->AppendF("%s", StripPathPrefix(info->file, strip_path_prefix));
break;
case 'l':
buffer->append("%d", info->line);
buffer->AppendF("%d", info->line);
break;
case 'c':
buffer->append("%d", info->column);
buffer->AppendF("%d", info->column);
break;
// Smarter special cases.
case 'F':
// Function name and offset, if file is unknown.
if (info->function) {
buffer->append("in %s",
DemangleFunctionName(StripFunctionName(info->function)));
buffer->AppendF(
"in %s", DemangleFunctionName(StripFunctionName(info->function)));
if (!info->file && info->function_offset != AddressInfo::kUnknown)
buffer->append("+0x%zx", info->function_offset);
buffer->AppendF("+0x%zx", info->function_offset);
}
break;
case 'S':
@ -224,7 +249,7 @@ void RenderFrame(InternalScopedString *buffer, const char *format, int frame_no,
MaybeBuildIdToBuffer(*info, /*PrefixSpace=*/true, buffer);
#endif
} else {
buffer->append("(<unknown module>)");
buffer->Append("(<unknown module>)");
}
break;
case 'M':
@ -239,18 +264,18 @@ void RenderFrame(InternalScopedString *buffer, const char *format, int frame_no,
MaybeBuildIdToBuffer(*info, /*PrefixSpace=*/true, buffer);
#endif
} else {
buffer->append("(%p)", (void *)address);
buffer->AppendF("(%p)", (void *)address);
}
break;
default:
Report("Unsupported specifier in stack frame format: %c (%p)!\n", *p,
(void *)p);
(const void *)p);
Die();
}
}
}
bool RenderNeedsSymbolization(const char *format) {
bool FormattedStackTracePrinter::RenderNeedsSymbolization(const char *format) {
if (0 == internal_strcmp(format, "DEFAULT"))
format = kDefaultFormat;
for (const char *p = format; *p != '\0'; p++) {
@ -273,30 +298,32 @@ bool RenderNeedsSymbolization(const char *format) {
return false;
}
void RenderData(InternalScopedString *buffer, const char *format,
const DataInfo *DI, const char *strip_path_prefix) {
void FormattedStackTracePrinter::RenderData(InternalScopedString *buffer,
const char *format,
const DataInfo *DI,
const char *strip_path_prefix) {
for (const char *p = format; *p != '\0'; p++) {
if (*p != '%') {
buffer->append("%c", *p);
buffer->AppendF("%c", *p);
continue;
}
p++;
switch (*p) {
case '%':
buffer->append("%%");
buffer->Append("%");
break;
case 's':
buffer->append("%s", StripPathPrefix(DI->file, strip_path_prefix));
buffer->AppendF("%s", StripPathPrefix(DI->file, strip_path_prefix));
break;
case 'l':
buffer->append("%zu", DI->line);
buffer->AppendF("%zu", DI->line);
break;
case 'g':
buffer->append("%s", DI->name);
buffer->AppendF("%s", DI->name);
break;
default:
Report("Unsupported specifier in stack frame format: %c (%p)!\n", *p,
(void *)p);
(const void *)p);
Die();
}
}
@ -304,33 +331,35 @@ void RenderData(InternalScopedString *buffer, const char *format,
#endif // !SANITIZER_SYMBOLIZER_MARKUP
void RenderSourceLocation(InternalScopedString *buffer, const char *file,
int line, int column, bool vs_style,
const char *strip_path_prefix) {
void StackTracePrinter::RenderSourceLocation(InternalScopedString *buffer,
const char *file, int line,
int column, bool vs_style,
const char *strip_path_prefix) {
if (vs_style && line > 0) {
buffer->append("%s(%d", StripPathPrefix(file, strip_path_prefix), line);
buffer->AppendF("%s(%d", StripPathPrefix(file, strip_path_prefix), line);
if (column > 0)
buffer->append(",%d", column);
buffer->append(")");
buffer->AppendF(",%d", column);
buffer->Append(")");
return;
}
buffer->append("%s", StripPathPrefix(file, strip_path_prefix));
buffer->AppendF("%s", StripPathPrefix(file, strip_path_prefix));
if (line > 0) {
buffer->append(":%d", line);
buffer->AppendF(":%d", line);
if (column > 0)
buffer->append(":%d", column);
buffer->AppendF(":%d", column);
}
}
void RenderModuleLocation(InternalScopedString *buffer, const char *module,
uptr offset, ModuleArch arch,
const char *strip_path_prefix) {
buffer->append("(%s", StripPathPrefix(module, strip_path_prefix));
void StackTracePrinter::RenderModuleLocation(InternalScopedString *buffer,
const char *module, uptr offset,
ModuleArch arch,
const char *strip_path_prefix) {
buffer->AppendF("(%s", StripPathPrefix(module, strip_path_prefix));
if (arch != kModuleArchUnknown) {
buffer->append(":%s", ModuleArchToString(arch));
buffer->AppendF(":%s", ModuleArchToString(arch));
}
buffer->append("+0x%zx)", offset);
buffer->AppendF("+0x%zx)", offset);
}
} // namespace __sanitizer

View File

@ -13,61 +13,102 @@
#define SANITIZER_STACKTRACE_PRINTER_H
#include "sanitizer_common.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_symbolizer.h"
namespace __sanitizer {
// Strip interceptor prefixes from function name.
const char *StripFunctionName(const char *function);
// StacktracePrinter is an interface that is implemented by
// classes that can perform rendering of the different parts
// of a stacktrace.
class StackTracePrinter {
public:
static StackTracePrinter *GetOrInit();
// Render the contents of "info" structure, which represents the contents of
// stack frame "frame_no" and appends it to the "buffer". "format" is a
// string with placeholders, which is copied to the output with
// placeholders substituted with the contents of "info". For example,
// format string
// " frame %n: function %F at %S"
// will be turned into
// " frame 10: function foo::bar() at my/file.cc:10"
// You may additionally pass "strip_path_prefix" to strip prefixes of paths to
// source files and modules.
// Here's the full list of available placeholders:
// %% - represents a '%' character;
// %n - frame number (copy of frame_no);
// %p - PC in hex format;
// %m - path to module (binary or shared object);
// %o - offset in the module in hex format;
// %f - function name;
// %q - offset in the function in hex format (*if available*);
// %s - path to source file;
// %l - line in the source file;
// %c - column in the source file;
// %F - if function is known to be <foo>, prints "in <foo>", possibly
// followed by the offset in this function, but only if source file
// is unknown;
// %S - prints file/line/column information;
// %L - prints location information: file/line/column, if it is known, or
// module+offset if it is known, or (<unknown module>) string.
// %M - prints module basename and offset, if it is known, or PC.
void RenderFrame(InternalScopedString *buffer, const char *format, int frame_no,
uptr address, const AddressInfo *info, bool vs_style,
const char *strip_path_prefix = "");
// Strip interceptor prefixes from function name.
const char *StripFunctionName(const char *function);
bool RenderNeedsSymbolization(const char *format);
virtual void RenderFrame(InternalScopedString *buffer, const char *format,
int frame_no, uptr address, const AddressInfo *info,
bool vs_style, const char *strip_path_prefix = "") {
// Should be pure virtual, but we can't depend on __cxa_pure_virtual.
UNIMPLEMENTED();
}
void RenderSourceLocation(InternalScopedString *buffer, const char *file,
int line, int column, bool vs_style,
const char *strip_path_prefix);
virtual bool RenderNeedsSymbolization(const char *format) {
// Should be pure virtual, but we can't depend on __cxa_pure_virtual.
UNIMPLEMENTED();
}
void RenderModuleLocation(InternalScopedString *buffer, const char *module,
uptr offset, ModuleArch arch,
const char *strip_path_prefix);
void RenderSourceLocation(InternalScopedString *buffer, const char *file,
int line, int column, bool vs_style,
const char *strip_path_prefix);
// Same as RenderFrame, but for data section (global variables).
// Accepts %s, %l from above.
// Also accepts:
// %g - name of the global variable.
void RenderData(InternalScopedString *buffer, const char *format,
const DataInfo *DI, const char *strip_path_prefix = "");
void RenderModuleLocation(InternalScopedString *buffer, const char *module,
uptr offset, ModuleArch arch,
const char *strip_path_prefix);
virtual void RenderData(InternalScopedString *buffer, const char *format,
const DataInfo *DI,
const char *strip_path_prefix = "") {
// Should be pure virtual, but we can't depend on __cxa_pure_virtual.
UNIMPLEMENTED();
}
private:
// To be called from StackTracePrinter::GetOrInit
static StackTracePrinter *NewStackTracePrinter();
protected:
~StackTracePrinter() {}
};
class FormattedStackTracePrinter : public StackTracePrinter {
public:
// Render the contents of "info" structure, which represents the contents of
// stack frame "frame_no" and appends it to the "buffer". "format" is a
// string with placeholders, which is copied to the output with
// placeholders substituted with the contents of "info". For example,
// format string
// " frame %n: function %F at %S"
// will be turned into
// " frame 10: function foo::bar() at my/file.cc:10"
// You may additionally pass "strip_path_prefix" to strip prefixes of paths to
// source files and modules.
// Here's the full list of available placeholders:
// %% - represents a '%' character;
// %n - frame number (copy of frame_no);
// %p - PC in hex format;
// %m - path to module (binary or shared object);
// %o - offset in the module in hex format;
// %f - function name;
// %q - offset in the function in hex format (*if available*);
// %s - path to source file;
// %l - line in the source file;
// %c - column in the source file;
// %F - if function is known to be <foo>, prints "in <foo>", possibly
// followed by the offset in this function, but only if source file
// is unknown;
// %S - prints file/line/column information;
// %L - prints location information: file/line/column, if it is known, or
// module+offset if it is known, or (<unknown module>) string.
// %M - prints module basename and offset, if it is known, or PC.
void RenderFrame(InternalScopedString *buffer, const char *format,
int frame_no, uptr address, const AddressInfo *info,
bool vs_style, const char *strip_path_prefix = "") override;
bool RenderNeedsSymbolization(const char *format) override;
// Same as RenderFrame, but for data section (global variables).
// Accepts %s, %l from above.
// Also accepts:
// %g - name of the global variable.
void RenderData(InternalScopedString *buffer, const char *format,
const DataInfo *DI,
const char *strip_path_prefix = "") override;
protected:
~FormattedStackTracePrinter() {}
};
} // namespace __sanitizer

View File

@ -58,17 +58,16 @@ void BufferedStackTrace::UnwindFast(uptr pc, uptr bp, uptr stack_top,
// Avoid infinite loop when frame == frame[0] by using frame > prev_frame.
while (IsValidFrame(bp, stack_top, bottom) && IsAligned(bp, sizeof(uhwptr)) &&
size < max_depth) {
uhwptr pc1 = ((uhwptr *)bp)[15];
// %o7 contains the address of the call instruction and not the
// return address, so we need to compensate.
uhwptr pc1 = GetNextInstructionPc(((uhwptr *)bp)[15]);
// Let's assume that any pointer in the 0th page is invalid and
// stop unwinding here. If we're adding support for a platform
// where this isn't true, we need to reconsider this check.
if (pc1 < kPageSize)
break;
if (pc1 != pc) {
// %o7 contains the address of the call instruction and not the
// return address, so we need to compensate.
trace_buffer[size++] = GetNextInstructionPc((uptr)pc1);
}
if (pc1 != pc)
trace_buffer[size++] = pc1;
bottom = bp;
bp = (uptr)((uhwptr *)bp)[14] + STACK_BIAS;
}

View File

@ -257,8 +257,8 @@ static void TracerThreadDieCallback() {
static void TracerThreadSignalHandler(int signum, __sanitizer_siginfo *siginfo,
void *uctx) {
SignalContext ctx(siginfo, uctx);
Printf("Tracer caught signal %d: addr=0x%zx pc=0x%zx sp=0x%zx\n", signum,
ctx.addr, ctx.pc, ctx.sp);
Printf("Tracer caught signal %d: addr=%p pc=%p sp=%p\n", signum,
(void *)ctx.addr, (void *)ctx.pc, (void *)ctx.sp);
ThreadSuspender *inst = thread_suspender_instance;
if (inst) {
if (signum == SIGABRT)
@ -565,7 +565,7 @@ PtraceRegistersStatus SuspendedThreadsListLinux::GetRegistersAndSP(
constexpr uptr uptr_sz = sizeof(uptr);
int pterrno;
#ifdef ARCH_IOVEC_FOR_GETREGSET
auto append = [&](uptr regset) {
auto AppendF = [&](uptr regset) {
uptr size = buffer->size();
// NT_X86_XSTATE requires 64bit alignment.
uptr size_up = RoundUpTo(size, 8 / uptr_sz);
@ -596,11 +596,11 @@ PtraceRegistersStatus SuspendedThreadsListLinux::GetRegistersAndSP(
};
buffer->clear();
bool fail = !append(NT_PRSTATUS);
bool fail = !AppendF(NT_PRSTATUS);
if (!fail) {
// Accept the first available and do not report errors.
for (uptr regs : kExtraRegs)
if (regs && append(regs))
if (regs && AppendF(regs))
break;
}
#else

View File

@ -158,8 +158,8 @@ static void TracerThreadDieCallback() {
static void TracerThreadSignalHandler(int signum, __sanitizer_siginfo *siginfo,
void *uctx) {
SignalContext ctx(siginfo, uctx);
Printf("Tracer caught signal %d: addr=0x%zx pc=0x%zx sp=0x%zx\n", signum,
ctx.addr, ctx.pc, ctx.sp);
Printf("Tracer caught signal %d: addr=%p pc=%p sp=%p\n", signum,
(void *)ctx.addr, (void *)ctx.pc, (void *)ctx.sp);
ThreadSuspender *inst = thread_suspender_instance;
if (inst) {
if (signum == SIGABRT)

View File

@ -86,7 +86,7 @@ void SuppressionContext::ParseFromFile(const char *filename) {
}
Parse(file_contents);
UnmapOrDie(file_contents, contents_size);
UnmapOrDie(file_contents, buffer_size);
}
bool SuppressionContext::Match(const char *str, const char *type,
@ -138,7 +138,10 @@ void SuppressionContext::Parse(const char *str) {
}
}
if (type == suppression_types_num_) {
Printf("%s: failed to parse suppressions\n", SanitizerToolName);
Printf("%s: failed to parse suppressions.\n", SanitizerToolName);
Printf("Supported suppression types are:\n");
for (type = 0; type < suppression_types_num_; type++)
Printf("- %s\n", suppression_types_[type]);
Die();
}
Suppression s;

View File

@ -10,6 +10,8 @@
// run-time libraries.
//===----------------------------------------------------------------------===//
#include <errno.h>
#include "sanitizer_allocator_internal.h"
#include "sanitizer_common.h"
#include "sanitizer_internal_defs.h"
@ -128,7 +130,7 @@ Symbolizer::Symbolizer(IntrusiveList<SymbolizerTool> tools)
start_hook_(0), end_hook_(0) {}
Symbolizer::SymbolizerScope::SymbolizerScope(const Symbolizer *sym)
: sym_(sym) {
: sym_(sym), errno_(errno) {
if (sym_->start_hook_)
sym_->start_hook_();
}
@ -136,6 +138,7 @@ Symbolizer::SymbolizerScope::SymbolizerScope(const Symbolizer *sym)
Symbolizer::SymbolizerScope::~SymbolizerScope() {
if (sym_->end_hook_)
sym_->end_hook_();
errno = errno_;
}
} // namespace __sanitizer

View File

@ -64,6 +64,26 @@ struct SymbolizedStack {
SymbolizedStack();
};
class SymbolizedStackHolder {
SymbolizedStack *Stack;
void clear() {
if (Stack)
Stack->ClearAll();
}
public:
explicit SymbolizedStackHolder(SymbolizedStack *Stack = nullptr)
: Stack(Stack) {}
~SymbolizedStackHolder() { clear(); }
void reset(SymbolizedStack *S = nullptr) {
if (Stack != S)
clear();
Stack = S;
}
const SymbolizedStack *get() const { return Stack; }
};
// For now, DataInfo is used to describe global variable.
struct DataInfo {
// Owns all the string members. Storage for them is
@ -136,7 +156,7 @@ class Symbolizer final {
// Release internal caches (if any).
void Flush();
// Attempts to demangle the provided C++ mangled name.
// Attempts to demangle the provided C++ mangled name. Never returns nullptr.
const char *Demangle(const char *name);
// Allow user to install hooks that would be called before/after Symbolizer
@ -154,6 +174,8 @@ class Symbolizer final {
void InvalidateModuleList();
const ListOfModules &GetRefreshedListOfModules();
private:
// GetModuleNameAndOffsetForPC has to return a string to the caller.
// Since the corresponding module might get unloaded later, we should create
@ -187,7 +209,7 @@ class Symbolizer final {
// If stale, need to reload the modules before looking up addresses.
bool modules_fresh_;
// Platform-specific default demangler, must not return nullptr.
// Platform-specific default demangler, returns nullptr on failure.
const char *PlatformDemangle(const char *name);
static Symbolizer *symbolizer_;
@ -212,6 +234,7 @@ class Symbolizer final {
~SymbolizerScope();
private:
const Symbolizer *sym_;
int errno_; // Backup errno in case symbolizer change the value.
};
};

View File

@ -160,6 +160,15 @@ void ParseSymbolizePCOutput(const char *str, SymbolizedStack *res);
// Used by LLVMSymbolizer and InternalSymbolizer.
void ParseSymbolizeDataOutput(const char *str, DataInfo *info);
// Parses repeated strings in the following format:
// <function_name>
// <var_name>
// <file_name>:<line_number>[:<column_number>]
// [<frame_offset>|??] [<size>|??] [<tag_offset>|??]
// Used by LLVMSymbolizer and InternalSymbolizer.
void ParseSymbolizeFrameOutput(const char *str,
InternalMmapVector<LocalInfo> *locals);
} // namespace __sanitizer
#endif // SANITIZER_SYMBOLIZER_INTERNAL_H

View File

@ -199,7 +199,7 @@ static char *DemangleAlloc(const char *name, bool always_alloc) {
#endif
if (always_alloc)
return internal_strdup(name);
return 0;
return nullptr;
}
const char *LibbacktraceSymbolizer::Demangle(const char *name) {

View File

@ -117,7 +117,7 @@ bool Symbolizer::SymbolizeData(uptr addr, DataInfo *info) {
return true;
}
}
return true;
return false;
}
bool Symbolizer::SymbolizeFrame(uptr addr, FrameInfo *info) {
@ -133,7 +133,7 @@ bool Symbolizer::SymbolizeFrame(uptr addr, FrameInfo *info) {
return true;
}
}
return true;
return false;
}
bool Symbolizer::GetModuleNameAndOffsetForPC(uptr pc, const char **module_name,
@ -159,13 +159,16 @@ void Symbolizer::Flush() {
}
const char *Symbolizer::Demangle(const char *name) {
CHECK(name);
Lock l(&mu_);
for (auto &tool : tools_) {
SymbolizerScope sym_scope(this);
if (const char *demangled = tool.Demangle(name))
return demangled;
}
return PlatformDemangle(name);
if (const char *demangled = PlatformDemangle(name))
return demangled;
return name;
}
bool Symbolizer::FindModuleNameAndOffsetForAddress(uptr address,
@ -188,6 +191,13 @@ void Symbolizer::RefreshModules() {
modules_fresh_ = true;
}
const ListOfModules &Symbolizer::GetRefreshedListOfModules() {
if (!modules_fresh_)
RefreshModules();
return modules_;
}
static const LoadedModule *SearchForModule(const ListOfModules &modules,
uptr address) {
for (uptr i = 0; i < modules.size(); i++) {
@ -382,8 +392,8 @@ void ParseSymbolizeDataOutput(const char *str, DataInfo *info) {
str = ExtractUptr(str, "\n", &info->line);
}
static void ParseSymbolizeFrameOutput(const char *str,
InternalMmapVector<LocalInfo> *locals) {
void ParseSymbolizeFrameOutput(const char *str,
InternalMmapVector<LocalInfo> *locals) {
if (internal_strncmp(str, "??", 2) == 0)
return;

View File

@ -42,7 +42,8 @@ bool DlAddrSymbolizer::SymbolizePC(uptr addr, SymbolizedStack *stack) {
}
const char *demangled = DemangleSwiftAndCXX(info.dli_sname);
if (!demangled) return false;
if (!demangled)
demangled = info.dli_sname;
stack->info.function = internal_strdup(demangled);
return true;
}
@ -52,6 +53,8 @@ bool DlAddrSymbolizer::SymbolizeData(uptr addr, DataInfo *datainfo) {
int result = dladdr((const void *)addr, &info);
if (!result) return false;
const char *demangled = DemangleSwiftAndCXX(info.dli_sname);
if (!demangled)
demangled = info.dli_sname;
datainfo->name = internal_strdup(demangled);
datainfo->start = (uptr)info.dli_saddr;
return true;

View File

@ -8,143 +8,155 @@
//
// This file is shared between various sanitizers' runtime libraries.
//
// Implementation of offline markup symbolizer.
//===----------------------------------------------------------------------===//
#include "sanitizer_platform.h"
#if SANITIZER_SYMBOLIZER_MARKUP
#if SANITIZER_FUCHSIA
#include "sanitizer_symbolizer_fuchsia.h"
# endif
# include <limits.h>
# include <unwind.h>
# include "sanitizer_stacktrace.h"
# include "sanitizer_symbolizer.h"
namespace __sanitizer {
// This generic support for offline symbolizing is based on the
// Fuchsia port. We don't do any actual symbolization per se.
// Instead, we emit text containing raw addresses and raw linkage
// symbol names, embedded in Fuchsia's symbolization markup format.
// Fuchsia's logging infrastructure emits enough information about
// process memory layout that a post-processing filter can do the
// symbolization and pretty-print the markup. See the spec at:
// https://fuchsia.googlesource.com/zircon/+/master/docs/symbolizer_markup.md
// See the spec at:
// https://llvm.org/docs/SymbolizerMarkupFormat.html
//===----------------------------------------------------------------------===//
// This is used by UBSan for type names, and by ASan for global variable names.
// It's expected to return a static buffer that will be reused on each call.
const char *Symbolizer::Demangle(const char *name) {
static char buffer[kFormatDemangleMax];
internal_snprintf(buffer, sizeof(buffer), kFormatDemangle, name);
return buffer;
#include "sanitizer_symbolizer_markup.h"
#include "sanitizer_common.h"
#include "sanitizer_symbolizer.h"
#include "sanitizer_symbolizer_markup_constants.h"
namespace __sanitizer {
void MarkupStackTracePrinter::RenderData(InternalScopedString *buffer,
const char *format, const DataInfo *DI,
const char *strip_path_prefix) {
RenderContext(buffer);
buffer->AppendF(kFormatData, reinterpret_cast<void *>(DI->start));
}
// This is used mostly for suppression matching. Making it work
// would enable "interceptor_via_lib" suppressions. It's also used
// once in UBSan to say "in module ..." in a message that also
// includes an address in the module, so post-processing can already
// pretty-print that so as to indicate the module.
bool Symbolizer::GetModuleNameAndOffsetForPC(uptr pc, const char **module_name,
uptr *module_address) {
bool MarkupStackTracePrinter::RenderNeedsSymbolization(const char *format) {
return false;
}
// This is mainly used by hwasan for online symbolization. This isn't needed
// since hwasan can always just dump stack frames for offline symbolization.
bool Symbolizer::SymbolizeFrame(uptr addr, FrameInfo *info) { return false; }
// This is used in some places for suppression checking, which we
// don't really support for Fuchsia. It's also used in UBSan to
// identify a PC location to a function name, so we always fill in
// the function member with a string containing markup around the PC
// value.
// TODO(mcgrathr): Under SANITIZER_GO, it's currently used by TSan
// to render stack frames, but that should be changed to use
// RenderStackFrame.
SymbolizedStack *Symbolizer::SymbolizePC(uptr addr) {
SymbolizedStack *s = SymbolizedStack::New(addr);
char buffer[kFormatFunctionMax];
internal_snprintf(buffer, sizeof(buffer), kFormatFunction, addr);
s->info.function = internal_strdup(buffer);
return s;
// We don't support the stack_trace_format flag at all.
void MarkupStackTracePrinter::RenderFrame(InternalScopedString *buffer,
const char *format, int frame_no,
uptr address, const AddressInfo *info,
bool vs_style,
const char *strip_path_prefix) {
CHECK(!RenderNeedsSymbolization(format));
RenderContext(buffer);
buffer->AppendF(kFormatFrame, frame_no, reinterpret_cast<void *>(address));
}
// Always claim we succeeded, so that RenderDataInfo will be called.
bool Symbolizer::SymbolizeData(uptr addr, DataInfo *info) {
bool MarkupSymbolizerTool::SymbolizePC(uptr addr, SymbolizedStack *stack) {
char buffer[kFormatFunctionMax];
internal_snprintf(buffer, sizeof(buffer), kFormatFunction,
reinterpret_cast<void *>(addr));
stack->info.function = internal_strdup(buffer);
return true;
}
bool MarkupSymbolizerTool::SymbolizeData(uptr addr, DataInfo *info) {
info->Clear();
info->start = addr;
return true;
}
// We ignore the format argument to __sanitizer_symbolize_global.
void RenderData(InternalScopedString *buffer, const char *format,
const DataInfo *DI, const char *strip_path_prefix) {
buffer->append(kFormatData, DI->start);
const char *MarkupSymbolizerTool::Demangle(const char *name) {
static char buffer[kFormatDemangleMax];
internal_snprintf(buffer, sizeof(buffer), kFormatDemangle, name);
return buffer;
}
bool RenderNeedsSymbolization(const char *format) { return false; }
// Fuchsia's implementation of symbolizer markup doesn't need to emit contextual
// elements at this point.
// Fuchsia's logging infrastructure emits enough information about
// process memory layout that a post-processing filter can do the
// symbolization and pretty-print the markup.
#if !SANITIZER_FUCHSIA
// We don't support the stack_trace_format flag at all.
void RenderFrame(InternalScopedString *buffer, const char *format, int frame_no,
uptr address, const AddressInfo *info, bool vs_style,
const char *strip_path_prefix) {
CHECK(!RenderNeedsSymbolization(format));
buffer->append(kFormatFrame, frame_no, address);
static bool ModulesEq(const LoadedModule &module,
const RenderedModule &renderedModule) {
return module.base_address() == renderedModule.base_address &&
internal_memcmp(module.uuid(), renderedModule.uuid,
module.uuid_size()) == 0 &&
internal_strcmp(module.full_name(), renderedModule.full_name) == 0;
}
Symbolizer *Symbolizer::PlatformInit() {
return new (symbolizer_allocator_) Symbolizer({});
static bool ModuleHasBeenRendered(
const LoadedModule &module,
const InternalMmapVectorNoCtor<RenderedModule> &renderedModules) {
for (const auto &renderedModule : renderedModules)
if (ModulesEq(module, renderedModule))
return true;
return false;
}
void Symbolizer::LateInitialize() { Symbolizer::GetOrInit(); }
static void RenderModule(InternalScopedString *buffer,
const LoadedModule &module, uptr moduleId) {
InternalScopedString buildIdBuffer;
for (uptr i = 0; i < module.uuid_size(); i++)
buildIdBuffer.AppendF("%02x", module.uuid()[i]);
void StartReportDeadlySignal() {}
void ReportDeadlySignal(const SignalContext &sig, u32 tid,
UnwindSignalStackCallbackType unwind,
const void *unwind_context) {}
#if SANITIZER_CAN_SLOW_UNWIND
struct UnwindTraceArg {
BufferedStackTrace *stack;
u32 max_depth;
};
_Unwind_Reason_Code Unwind_Trace(struct _Unwind_Context *ctx, void *param) {
UnwindTraceArg *arg = static_cast<UnwindTraceArg *>(param);
CHECK_LT(arg->stack->size, arg->max_depth);
uptr pc = _Unwind_GetIP(ctx);
if (pc < PAGE_SIZE) return _URC_NORMAL_STOP;
arg->stack->trace_buffer[arg->stack->size++] = pc;
return (arg->stack->size == arg->max_depth ? _URC_NORMAL_STOP
: _URC_NO_REASON);
buffer->AppendF(kFormatModule, moduleId, module.full_name(),
buildIdBuffer.data());
buffer->Append("\n");
}
void BufferedStackTrace::UnwindSlow(uptr pc, u32 max_depth) {
CHECK_GE(max_depth, 2);
size = 0;
UnwindTraceArg arg = {this, Min(max_depth + 1, kStackTraceMax)};
_Unwind_Backtrace(Unwind_Trace, &arg);
CHECK_GT(size, 0);
// We need to pop a few frames so that pc is on top.
uptr to_pop = LocatePcInTrace(pc);
// trace_buffer[0] belongs to the current function so we always pop it,
// unless there is only 1 frame in the stack trace (1 frame is always better
// than 0!).
PopStackFrames(Min(to_pop, static_cast<uptr>(1)));
trace_buffer[0] = pc;
static void RenderMmaps(InternalScopedString *buffer,
const LoadedModule &module, uptr moduleId) {
InternalScopedString accessBuffer;
// All module mmaps are readable at least
for (const auto &range : module.ranges()) {
accessBuffer.Append("r");
if (range.writable)
accessBuffer.Append("w");
if (range.executable)
accessBuffer.Append("x");
//{{{mmap:%starting_addr:%size_in_hex:load:%moduleId:r%(w|x):%relative_addr}}}
// module.base_address == dlpi_addr
// range.beg == dlpi_addr + p_vaddr
// relative address == p_vaddr == range.beg - module.base_address
buffer->AppendF(kFormatMmap, reinterpret_cast<void *>(range.beg),
range.end - range.beg, static_cast<int>(moduleId),
accessBuffer.data(), range.beg - module.base_address());
buffer->Append("\n");
accessBuffer.clear();
}
}
void BufferedStackTrace::UnwindSlow(uptr pc, void *context, u32 max_depth) {
CHECK(context);
CHECK_GE(max_depth, 2);
UNREACHABLE("signal context doesn't exist");
void MarkupStackTracePrinter::RenderContext(InternalScopedString *buffer) {
if (renderedModules_.size() == 0)
buffer->Append("{{{reset}}}\n");
const auto &modules = Symbolizer::GetOrInit()->GetRefreshedListOfModules();
for (const auto &module : modules) {
if (ModuleHasBeenRendered(module, renderedModules_))
continue;
// symbolizer markup id, used to refer to this modules from other contextual
// elements
uptr moduleId = renderedModules_.size();
RenderModule(buffer, module, moduleId);
RenderMmaps(buffer, module, moduleId);
renderedModules_.push_back({
internal_strdup(module.full_name()),
module.base_address(),
{},
});
// kModuleUUIDSize is the size of curModule.uuid
CHECK_GE(kModuleUUIDSize, module.uuid_size());
internal_memcpy(renderedModules_.back().uuid, module.uuid(),
module.uuid_size());
}
}
#endif // SANITIZER_CAN_SLOW_UNWIND
#endif // !SANITIZER_FUCHSIA
} // namespace __sanitizer
#endif // SANITIZER_SYMBOLIZER_MARKUP

View File

@ -0,0 +1,79 @@
//===-- sanitizer_symbolizer_markup.h -----------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file is shared between various sanitizers' runtime libraries.
//
// Header for the offline markup symbolizer.
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_SYMBOLIZER_MARKUP_H
#define SANITIZER_SYMBOLIZER_MARKUP_H
#include "sanitizer_common.h"
#include "sanitizer_stacktrace_printer.h"
#include "sanitizer_symbolizer.h"
#include "sanitizer_symbolizer_internal.h"
namespace __sanitizer {
// Simplier view of a LoadedModule. It only holds information necessary to
// identify unique modules.
struct RenderedModule {
char *full_name;
uptr base_address;
u8 uuid[kModuleUUIDSize]; // BuildId
};
class MarkupStackTracePrinter : public StackTracePrinter {
public:
// We don't support the stack_trace_format flag at all.
void RenderFrame(InternalScopedString *buffer, const char *format,
int frame_no, uptr address, const AddressInfo *info,
bool vs_style, const char *strip_path_prefix = "") override;
bool RenderNeedsSymbolization(const char *format) override;
// We ignore the format argument to __sanitizer_symbolize_global.
void RenderData(InternalScopedString *buffer, const char *format,
const DataInfo *DI,
const char *strip_path_prefix = "") override;
private:
// Keeps track of the modules that have been rendered to avoid re-rendering
// them
InternalMmapVector<RenderedModule> renderedModules_;
void RenderContext(InternalScopedString *buffer);
protected:
~MarkupStackTracePrinter() {}
};
class MarkupSymbolizerTool final : public SymbolizerTool {
public:
// This is used in some places for suppression checking, which we
// don't really support for Fuchsia. It's also used in UBSan to
// identify a PC location to a function name, so we always fill in
// the function member with a string containing markup around the PC
// value.
// TODO(mcgrathr): Under SANITIZER_GO, it's currently used by TSan
// to render stack frames, but that should be changed to use
// RenderStackFrame.
bool SymbolizePC(uptr addr, SymbolizedStack *stack) override;
// Always claim we succeeded, so that RenderDataInfo will be called.
bool SymbolizeData(uptr addr, DataInfo *info) override;
// May return NULL if demangling failed.
// This is used by UBSan for type names, and by ASan for global variable
// names. It's expected to return a static buffer that will be reused on each
// call.
const char *Demangle(const char *name) override;
};
} // namespace __sanitizer
#endif // SANITIZER_SYMBOLIZER_MARKUP_H

View File

@ -1,4 +1,5 @@
//===-- sanitizer_symbolizer_fuchsia.h -----------------------------------===//
//===-- sanitizer_symbolizer_markup_constants.h
//-----------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@ -8,10 +9,10 @@
//
// This file is shared between various sanitizers' runtime libraries.
//
// Define Fuchsia's string formats and limits for the markup symbolizer.
// Define string formats and limits for the markup symbolizer.
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_SYMBOLIZER_FUCHSIA_H
#define SANITIZER_SYMBOLIZER_FUCHSIA_H
#ifndef SANITIZER_SYMBOLIZER_MARKUP_CONSTANTS_H
#define SANITIZER_SYMBOLIZER_MARKUP_CONSTANTS_H
#include "sanitizer_internal_defs.h"
@ -32,11 +33,17 @@ constexpr uptr kFormatFunctionMax = 64; // More than big enough for 64-bit hex.
constexpr const char *kFormatData = "{{{data:%p}}}";
// One frame in a backtrace (printed on a line by itself).
constexpr const char *kFormatFrame = "{{{bt:%u:%p}}}";
constexpr const char *kFormatFrame = "{{{bt:%d:%p}}}";
// Module contextual element.
constexpr const char *kFormatModule = "{{{module:%zu:%s:elf:%s}}}";
// mmap for a module segment.
constexpr const char *kFormatMmap = "{{{mmap:%p:0x%zx:load:%d:%s:0x%zx}}}";
// Dump trigger element.
#define FORMAT_DUMPFILE "{{{dumpfile:%s:%s}}}"
} // namespace __sanitizer
#endif // SANITIZER_SYMBOLIZER_FUCHSIA_H
#endif // SANITIZER_SYMBOLIZER_MARKUP_CONSTANTS_H

View File

@ -0,0 +1,85 @@
//===-- sanitizer_symbolizer_markup_fuchsia.cpp ---------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file is shared between various sanitizers' runtime libraries.
//
// Fuchsia specific implementation of offline markup symbolizer.
//===----------------------------------------------------------------------===//
#include "sanitizer_platform.h"
#if SANITIZER_SYMBOLIZER_MARKUP
# include "sanitizer_common.h"
# include "sanitizer_stacktrace_printer.h"
# include "sanitizer_symbolizer.h"
# include "sanitizer_symbolizer_markup.h"
# include "sanitizer_symbolizer_markup_constants.h"
namespace __sanitizer {
// This is used by UBSan for type names, and by ASan for global variable names.
// It's expected to return a static buffer that will be reused on each call.
const char *Symbolizer::Demangle(const char *name) {
static char buffer[kFormatDemangleMax];
internal_snprintf(buffer, sizeof(buffer), kFormatDemangle, name);
return buffer;
}
// This is used mostly for suppression matching. Making it work
// would enable "interceptor_via_lib" suppressions. It's also used
// once in UBSan to say "in module ..." in a message that also
// includes an address in the module, so post-processing can already
// pretty-print that so as to indicate the module.
bool Symbolizer::GetModuleNameAndOffsetForPC(uptr pc, const char **module_name,
uptr *module_address) {
return false;
}
// This is mainly used by hwasan for online symbolization. This isn't needed
// since hwasan can always just dump stack frames for offline symbolization.
bool Symbolizer::SymbolizeFrame(uptr addr, FrameInfo *info) { return false; }
// This is used in some places for suppression checking, which we
// don't really support for Fuchsia. It's also used in UBSan to
// identify a PC location to a function name, so we always fill in
// the function member with a string containing markup around the PC
// value.
// TODO(mcgrathr): Under SANITIZER_GO, it's currently used by TSan
// to render stack frames, but that should be changed to use
// RenderStackFrame.
SymbolizedStack *Symbolizer::SymbolizePC(uptr addr) {
SymbolizedStack *s = SymbolizedStack::New(addr);
char buffer[kFormatFunctionMax];
internal_snprintf(buffer, sizeof(buffer), kFormatFunction, addr);
s->info.function = internal_strdup(buffer);
return s;
}
// Always claim we succeeded, so that RenderDataInfo will be called.
bool Symbolizer::SymbolizeData(uptr addr, DataInfo *info) {
info->Clear();
info->start = addr;
return true;
}
// Fuchsia only uses MarkupStackTracePrinter
StackTracePrinter *StackTracePrinter::NewStackTracePrinter() {
return new (GetGlobalLowLevelAllocator()) MarkupStackTracePrinter();
}
void MarkupStackTracePrinter::RenderContext(InternalScopedString *) {}
Symbolizer *Symbolizer::PlatformInit() {
return new (symbolizer_allocator_) Symbolizer({});
}
void Symbolizer::LateInitialize() { Symbolizer::GetOrInit(); }
} // namespace __sanitizer
#endif // SANITIZER_SYMBOLIZER_MARKUP

View File

@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_platform.h"
#include "sanitizer_symbolizer_markup.h"
#if SANITIZER_POSIX
# include <dlfcn.h> // for dlsym()
# include <errno.h>
@ -56,7 +57,7 @@ const char *DemangleCXXABI(const char *name) {
__cxxabiv1::__cxa_demangle(name, 0, 0, 0))
return demangled_name;
return name;
return nullptr;
}
// As of now, there are no headers for the Swift runtime. Once they are
@ -324,9 +325,12 @@ __sanitizer_symbolize_code(const char *ModuleName, u64 ModuleOffset,
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE bool
__sanitizer_symbolize_data(const char *ModuleName, u64 ModuleOffset,
char *Buffer, int MaxLength);
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE bool
__sanitizer_symbolize_frame(const char *ModuleName, u64 ModuleOffset,
char *Buffer, int MaxLength);
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
__sanitizer_symbolize_flush();
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE int
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE bool
__sanitizer_symbolize_demangle(const char *Name, char *Buffer, int MaxLength);
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE bool
__sanitizer_symbolize_set_demangle(bool Demangle);
@ -337,19 +341,19 @@ __sanitizer_symbolize_set_inline_frames(bool InlineFrames);
class InternalSymbolizer final : public SymbolizerTool {
public:
static InternalSymbolizer *get(LowLevelAllocator *alloc) {
if (__sanitizer_symbolize_set_demangle)
CHECK(__sanitizer_symbolize_set_demangle(common_flags()->demangle));
if (__sanitizer_symbolize_set_inline_frames)
CHECK(__sanitizer_symbolize_set_inline_frames(
common_flags()->symbolize_inline_frames));
if (__sanitizer_symbolize_code && __sanitizer_symbolize_data)
return new (*alloc) InternalSymbolizer();
return 0;
// These one is the most used one, so we will use it to detect a presence of
// internal symbolizer.
if (&__sanitizer_symbolize_code == nullptr)
return nullptr;
CHECK(__sanitizer_symbolize_set_demangle(common_flags()->demangle));
CHECK(__sanitizer_symbolize_set_inline_frames(
common_flags()->symbolize_inline_frames));
return new (*alloc) InternalSymbolizer();
}
bool SymbolizePC(uptr addr, SymbolizedStack *stack) override {
bool result = __sanitizer_symbolize_code(
stack->info.module, stack->info.module_offset, buffer_, kBufferSize);
stack->info.module, stack->info.module_offset, buffer_, sizeof(buffer_));
if (result)
ParseSymbolizePCOutput(buffer_, stack);
return result;
@ -357,7 +361,7 @@ class InternalSymbolizer final : public SymbolizerTool {
bool SymbolizeData(uptr addr, DataInfo *info) override {
bool result = __sanitizer_symbolize_data(info->module, info->module_offset,
buffer_, kBufferSize);
buffer_, sizeof(buffer_));
if (result) {
ParseSymbolizeDataOutput(buffer_, info);
info->start += (addr - info->module_offset); // Add the base address.
@ -365,34 +369,29 @@ class InternalSymbolizer final : public SymbolizerTool {
return result;
}
void Flush() override {
if (__sanitizer_symbolize_flush)
__sanitizer_symbolize_flush();
bool SymbolizeFrame(uptr addr, FrameInfo *info) override {
bool result = __sanitizer_symbolize_frame(info->module, info->module_offset,
buffer_, sizeof(buffer_));
if (result)
ParseSymbolizeFrameOutput(buffer_, &info->locals);
return result;
}
void Flush() override { __sanitizer_symbolize_flush(); }
const char *Demangle(const char *name) override {
if (__sanitizer_symbolize_demangle) {
for (uptr res_length = 1024;
res_length <= InternalSizeClassMap::kMaxSize;) {
char *res_buff = static_cast<char *>(InternalAlloc(res_length));
uptr req_length =
__sanitizer_symbolize_demangle(name, res_buff, res_length);
if (req_length > res_length) {
res_length = req_length + 1;
InternalFree(res_buff);
continue;
}
return res_buff;
}
if (__sanitizer_symbolize_demangle(name, buffer_, sizeof(buffer_))) {
char *res_buff = nullptr;
ExtractToken(buffer_, "", &res_buff);
return res_buff;
}
return name;
return nullptr;
}
private:
InternalSymbolizer() {}
static const int kBufferSize = 16 * 1024;
char buffer_[kBufferSize];
char buffer_[16 * 1024];
};
# else // SANITIZER_SUPPORTS_WEAK_HOOKS
@ -470,6 +469,12 @@ static void ChooseSymbolizerTools(IntrusiveList<SymbolizerTool> *list,
VReport(2, "Symbolizer is disabled.\n");
return;
}
if (common_flags()->enable_symbolizer_markup) {
VReport(2, "Using symbolizer markup");
SymbolizerTool *tool = new (*allocator) MarkupSymbolizerTool();
CHECK(tool);
list->push_back(tool);
}
if (IsAllocatorOutOfMemory()) {
VReport(2, "Cannot use internal symbolizer: out of memory\n");
} else if (SymbolizerTool *tool = InternalSymbolizer::get(allocator)) {

View File

@ -28,14 +28,41 @@
namespace __sanitizer {
#if !SANITIZER_GO
static bool FrameIsInternal(const SymbolizedStack *frame) {
if (!frame)
return true;
const char *file = frame->info.file;
const char *module = frame->info.module;
// On Gentoo, the path is g++-*, so there's *not* a missing /.
if (file && (internal_strstr(file, "/compiler-rt/lib/") ||
internal_strstr(file, "/include/c++/") ||
internal_strstr(file, "/include/g++")))
return true;
if (file && internal_strstr(file, "\\compiler-rt\\lib\\"))
return true;
if (module && (internal_strstr(module, "libclang_rt.")))
return true;
if (module && (internal_strstr(module, "clang_rt.")))
return true;
return false;
}
const SymbolizedStack *SkipInternalFrames(const SymbolizedStack *frames) {
for (const SymbolizedStack *f = frames; f; f = f->next)
if (!FrameIsInternal(f))
return f;
return nullptr;
}
void ReportErrorSummary(const char *error_type, const AddressInfo &info,
const char *alt_tool_name) {
if (!common_flags()->print_summary) return;
InternalScopedString buff;
buff.append("%s ", error_type);
RenderFrame(&buff, "%L %F", 0, info.address, &info,
common_flags()->symbolize_vs_style,
common_flags()->strip_path_prefix);
buff.AppendF("%s ", error_type);
StackTracePrinter::GetOrInit()->RenderFrame(
&buff, "%L %F", 0, info.address, &info,
common_flags()->symbolize_vs_style, common_flags()->strip_path_prefix);
ReportErrorSummary(buff.data(), alt_tool_name);
}
#endif
@ -75,16 +102,33 @@ void ReportErrorSummary(const char *error_type, const StackTrace *stack,
#if !SANITIZER_GO
if (!common_flags()->print_summary)
return;
if (stack->size == 0) {
ReportErrorSummary(error_type);
return;
// Find first non-internal stack frame.
for (uptr i = 0; i < stack->size; ++i) {
uptr pc = StackTrace::GetPreviousInstructionPc(stack->trace[i]);
SymbolizedStackHolder symbolized_stack(
Symbolizer::GetOrInit()->SymbolizePC(pc));
if (const SymbolizedStack *frame = symbolized_stack.get()) {
if (const SymbolizedStack *summary_frame = SkipInternalFrames(frame)) {
ReportErrorSummary(error_type, summary_frame->info, alt_tool_name);
return;
}
}
}
// Currently, we include the first stack frame into the report summary.
// Maybe sometimes we need to choose another frame (e.g. skip memcpy/etc).
uptr pc = StackTrace::GetPreviousInstructionPc(stack->trace[0]);
SymbolizedStack *frame = Symbolizer::GetOrInit()->SymbolizePC(pc);
ReportErrorSummary(error_type, frame->info, alt_tool_name);
frame->ClearAll();
// Fallback to the top one.
if (stack->size) {
uptr pc = StackTrace::GetPreviousInstructionPc(stack->trace[0]);
SymbolizedStackHolder symbolized_stack(
Symbolizer::GetOrInit()->SymbolizePC(pc));
if (const SymbolizedStack *frame = symbolized_stack.get()) {
ReportErrorSummary(error_type, frame->info, alt_tool_name);
return;
}
}
// Fallback to a summary without location.
ReportErrorSummary(error_type);
#endif
}
@ -148,22 +192,22 @@ static void MaybeReportNonExecRegion(uptr pc) {
static void PrintMemoryByte(InternalScopedString *str, const char *before,
u8 byte) {
SanitizerCommonDecorator d;
str->append("%s%s%x%x%s ", before, d.MemoryByte(), byte >> 4, byte & 15,
d.Default());
str->AppendF("%s%s%x%x%s ", before, d.MemoryByte(), byte >> 4, byte & 15,
d.Default());
}
static void MaybeDumpInstructionBytes(uptr pc) {
if (!common_flags()->dump_instruction_bytes || (pc < GetPageSizeCached()))
return;
InternalScopedString str;
str.append("First 16 instruction bytes at pc: ");
str.AppendF("First 16 instruction bytes at pc: ");
if (IsAccessibleMemoryRange(pc, 16)) {
for (int i = 0; i < 16; ++i) {
PrintMemoryByte(&str, "", ((u8 *)pc)[i]);
}
str.append("\n");
str.AppendF("\n");
} else {
str.append("unaccessible\n");
str.AppendF("unaccessible\n");
}
Report("%s", str.data());
}

View File

@ -0,0 +1,33 @@
//===-- sanitizer_symbolizer_report_fuchsia.cpp
//-----------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Implementation of the report functions for fuchsia.
//
//===----------------------------------------------------------------------===//
#include "sanitizer_platform.h"
#if SANITIZER_SYMBOLIZER_MARKUP
# include "sanitizer_common.h"
namespace __sanitizer {
void StartReportDeadlySignal() {}
void ReportDeadlySignal(const SignalContext &sig, u32 tid,
UnwindSignalStackCallbackType unwind,
const void *unwind_context) {}
void HandleDeadlySignal(void *siginfo, void *context, u32 tid,
UnwindSignalStackCallbackType unwind,
const void *unwind_context) {}
} // namespace __sanitizer
#endif // SANITIZER_SYMBOLIZER_MARKUP

View File

@ -175,9 +175,7 @@ const char *WinSymbolizerTool::Demangle(const char *name) {
return name;
}
const char *Symbolizer::PlatformDemangle(const char *name) {
return name;
}
const char *Symbolizer::PlatformDemangle(const char *name) { return nullptr; }
namespace {
struct ScopedHandle {
@ -233,7 +231,7 @@ bool SymbolizerProcess::StartSymbolizerSubprocess() {
CHECK(!internal_strchr(arg, '"') && "quotes in args unsupported");
CHECK(arglen > 0 && arg[arglen - 1] != '\\' &&
"args ending in backslash and empty args unsupported");
command_line.append("\"%s\" ", arg);
command_line.AppendF("\"%s\" ", arg);
}
VReport(3, "Launching symbolizer command: %s\n", command_line.data());

View File

@ -0,0 +1,131 @@
//===-- sanitizer_syscall_linux_hexagon.inc ---------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Implementations of internal_syscall and internal_iserror for Linux/hexagon.
//
//===----------------------------------------------------------------------===//
#define SYSCALL(name) __NR_##name
#define __internal_syscall_LL_E(x) \
((union { \
long long ll; \
long l[2]; \
}){.ll = x}) \
.l[0], \
((union { \
long long ll; \
long l[2]; \
}){.ll = x}) \
.l[1]
#define __internal_syscall_LL_O(x) 0, __SYSCALL_LL_E((x))
#define __asm_syscall(...) \
do { \
__asm__ __volatile__("trap0(#1)" : "=r"(r0) : __VA_ARGS__ : "memory"); \
return r0; \
} while (0)
#define __internal_syscall0(n) (__internal_syscall)(n)
static uptr __internal_syscall(long n) {
register u32 r6 __asm__("r6") = n;
register u32 r0 __asm__("r0");
__asm_syscall("r"(r6));
}
#define __internal_syscall1(n, a1) (__internal_syscall)(n, (long)(a1))
static uptr __internal_syscall(long n, long a) {
register u32 r6 __asm__("r6") = n;
register u32 r0 __asm__("r0") = a;
__asm_syscall("r"(r6), "0"(r0));
}
#define __internal_syscall2(n, a1, a2) \
(__internal_syscall)(n, (long)(a1), (long)(a2))
static uptr __internal_syscall(long n, long a, long b) {
register u32 r6 __asm__("r6") = n;
register u32 r0 __asm__("r0") = a;
register u32 r1 __asm__("r1") = b;
__asm_syscall("r"(r6), "0"(r0), "r"(r1));
}
#define __internal_syscall3(n, a1, a2, a3) \
(__internal_syscall)(n, (long)(a1), (long)(a2), (long)(a3))
static uptr __internal_syscall(long n, long a, long b, long c) {
register u32 r6 __asm__("r6") = n;
register u32 r0 __asm__("r0") = a;
register u32 r1 __asm__("r1") = b;
register u32 r2 __asm__("r2") = c;
__asm_syscall("r"(r6), "0"(r0), "r"(r1), "r"(r2));
}
#define __internal_syscall4(n, a1, a2, a3, a4) \
(__internal_syscall)(n, (long)(a1), (long)(a2), (long)(a3), (long)(a4))
static uptr __internal_syscall(long n, long a, long b, long c, long d) {
register u32 r6 __asm__("r6") = n;
register u32 r0 __asm__("r0") = a;
register u32 r1 __asm__("r1") = b;
register u32 r2 __asm__("r2") = c;
register u32 r3 __asm__("r3") = d;
__asm_syscall("r"(r6), "0"(r0), "r"(r1), "r"(r2), "r"(r3));
}
#define __internal_syscall5(n, a1, a2, a3, a4, a5) \
(__internal_syscall)(n, (long)(a1), (long)(a2), (long)(a3), (long)(a4), \
(long)(a5))
static uptr __internal_syscall(long n, long a, long b, long c, long d, long e) {
register u32 r6 __asm__("r6") = n;
register u32 r0 __asm__("r0") = a;
register u32 r1 __asm__("r1") = b;
register u32 r2 __asm__("r2") = c;
register u32 r3 __asm__("r3") = d;
register u32 r4 __asm__("r4") = e;
__asm_syscall("r"(r6), "0"(r0), "r"(r1), "r"(r2), "r"(r3), "r"(r4));
}
#define __internal_syscall6(n, a1, a2, a3, a4, a5, a6) \
(__internal_syscall)(n, (long)(a1), (long)(a2), (long)(a3), (long)(a4), \
(long)(a5), (long)(a6))
static uptr __internal_syscall(long n, long a, long b, long c, long d, long e,
long f) {
register u32 r6 __asm__("r6") = n;
register u32 r0 __asm__("r0") = a;
register u32 r1 __asm__("r1") = b;
register u32 r2 __asm__("r2") = c;
register u32 r3 __asm__("r3") = d;
register u32 r4 __asm__("r4") = e;
register u32 r5 __asm__("r5") = f;
__asm_syscall("r"(r6), "0"(r0), "r"(r1), "r"(r2), "r"(r3), "r"(r4), "r"(r5));
}
#define __SYSCALL_NARGS_X(a1, a2, a3, a4, a5, a6, a7, a8, n, ...) n
#define __SYSCALL_NARGS(...) \
__SYSCALL_NARGS_X(__VA_ARGS__, 7, 6, 5, 4, 3, 2, 1, 0, )
#define __SYSCALL_CONCAT_X(a, b) a##b
#define __SYSCALL_CONCAT(a, b) __SYSCALL_CONCAT_X(a, b)
#define __SYSCALL_DISP(b, ...) \
__SYSCALL_CONCAT(b, __SYSCALL_NARGS(__VA_ARGS__))(__VA_ARGS__)
#define internal_syscall(...) __SYSCALL_DISP(__internal_syscall, __VA_ARGS__)
// Helper function used to avoid clobbering of errno.
bool internal_iserror(uptr retval, int *rverrno) {
if (retval >= (uptr)-4095) {
if (rverrno)
*rverrno = -retval;
return true;
}
return false;
}

View File

@ -0,0 +1,171 @@
//===-- sanitizer_syscall_linux_loongarch64.inc -----------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Implementations of internal_syscall and internal_iserror for
// Linux/loongarch64.
//
//===----------------------------------------------------------------------===//
// About local register variables:
// https://gcc.gnu.org/onlinedocs/gcc/Local-Register-Variables.html#Local-Register-Variables
//
// Kernel ABI:
// https://lore.kernel.org/loongarch/1f353678-3398-e30b-1c87-6edb278f74db@xen0n.name/T/#m1613bc86c2d7bf5f6da92bd62984302bfd699a2f
// syscall number is placed in a7
// parameters, if present, are placed in a0-a6
// upon return:
// the return value is placed in a0
// t0-t8 should be considered clobbered
// all other registers are preserved
#define SYSCALL(name) __NR_##name
#define INTERNAL_SYSCALL_CLOBBERS \
"memory", "$t0", "$t1", "$t2", "$t3", "$t4", "$t5", "$t6", "$t7", "$t8"
static uptr __internal_syscall(u64 nr) {
register u64 a7 asm("$a7") = nr;
register u64 a0 asm("$a0");
__asm__ volatile("syscall 0\n\t"
: "=r"(a0)
: "r"(a7)
: INTERNAL_SYSCALL_CLOBBERS);
return a0;
}
#define __internal_syscall0(n) (__internal_syscall)(n)
static uptr __internal_syscall(u64 nr, u64 arg1) {
register u64 a7 asm("$a7") = nr;
register u64 a0 asm("$a0") = arg1;
__asm__ volatile("syscall 0\n\t"
: "+r"(a0)
: "r"(a7)
: INTERNAL_SYSCALL_CLOBBERS);
return a0;
}
#define __internal_syscall1(n, a1) (__internal_syscall)(n, (u64)(a1))
static uptr __internal_syscall(u64 nr, u64 arg1, long arg2) {
register u64 a7 asm("$a7") = nr;
register u64 a0 asm("$a0") = arg1;
register u64 a1 asm("$a1") = arg2;
__asm__ volatile("syscall 0\n\t"
: "+r"(a0)
: "r"(a7), "r"(a1)
: INTERNAL_SYSCALL_CLOBBERS);
return a0;
}
#define __internal_syscall2(n, a1, a2) \
(__internal_syscall)(n, (u64)(a1), (long)(a2))
static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3) {
register u64 a7 asm("$a7") = nr;
register u64 a0 asm("$a0") = arg1;
register u64 a1 asm("$a1") = arg2;
register u64 a2 asm("$a2") = arg3;
__asm__ volatile("syscall 0\n\t"
: "+r"(a0)
: "r"(a7), "r"(a1), "r"(a2)
: INTERNAL_SYSCALL_CLOBBERS);
return a0;
}
#define __internal_syscall3(n, a1, a2, a3) \
(__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3))
static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3,
u64 arg4) {
register u64 a7 asm("$a7") = nr;
register u64 a0 asm("$a0") = arg1;
register u64 a1 asm("$a1") = arg2;
register u64 a2 asm("$a2") = arg3;
register u64 a3 asm("$a3") = arg4;
__asm__ volatile("syscall 0\n\t"
: "+r"(a0)
: "r"(a7), "r"(a1), "r"(a2), "r"(a3)
: INTERNAL_SYSCALL_CLOBBERS);
return a0;
}
#define __internal_syscall4(n, a1, a2, a3, a4) \
(__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4))
static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3, u64 arg4,
long arg5) {
register u64 a7 asm("$a7") = nr;
register u64 a0 asm("$a0") = arg1;
register u64 a1 asm("$a1") = arg2;
register u64 a2 asm("$a2") = arg3;
register u64 a3 asm("$a3") = arg4;
register u64 a4 asm("$a4") = arg5;
__asm__ volatile("syscall 0\n\t"
: "+r"(a0)
: "r"(a7), "r"(a1), "r"(a2), "r"(a3), "r"(a4)
: INTERNAL_SYSCALL_CLOBBERS);
return a0;
}
#define __internal_syscall5(n, a1, a2, a3, a4, a5) \
(__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4), \
(u64)(a5))
static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3, u64 arg4,
long arg5, long arg6) {
register u64 a7 asm("$a7") = nr;
register u64 a0 asm("$a0") = arg1;
register u64 a1 asm("$a1") = arg2;
register u64 a2 asm("$a2") = arg3;
register u64 a3 asm("$a3") = arg4;
register u64 a4 asm("$a4") = arg5;
register u64 a5 asm("$a5") = arg6;
__asm__ volatile("syscall 0\n\t"
: "+r"(a0)
: "r"(a7), "r"(a1), "r"(a2), "r"(a3), "r"(a4), "r"(a5)
: INTERNAL_SYSCALL_CLOBBERS);
return a0;
}
#define __internal_syscall6(n, a1, a2, a3, a4, a5, a6) \
(__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4), \
(u64)(a5), (long)(a6))
static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3, u64 arg4,
long arg5, long arg6, long arg7) {
register u64 a7 asm("$a7") = nr;
register u64 a0 asm("$a0") = arg1;
register u64 a1 asm("$a1") = arg2;
register u64 a2 asm("$a2") = arg3;
register u64 a3 asm("$a3") = arg4;
register u64 a4 asm("$a4") = arg5;
register u64 a5 asm("$a5") = arg6;
register u64 a6 asm("$a6") = arg7;
__asm__ volatile("syscall 0\n\t"
: "+r"(a0)
: "r"(a7), "r"(a1), "r"(a2), "r"(a3), "r"(a4), "r"(a5),
"r"(a6)
: INTERNAL_SYSCALL_CLOBBERS);
return a0;
}
#define __internal_syscall7(n, a1, a2, a3, a4, a5, a6, a7) \
(__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4), \
(u64)(a5), (long)(a6), (long)(a7))
#define __SYSCALL_NARGS_X(a1, a2, a3, a4, a5, a6, a7, a8, n, ...) n
#define __SYSCALL_NARGS(...) \
__SYSCALL_NARGS_X(__VA_ARGS__, 7, 6, 5, 4, 3, 2, 1, 0, )
#define __SYSCALL_CONCAT_X(a, b) a##b
#define __SYSCALL_CONCAT(a, b) __SYSCALL_CONCAT_X(a, b)
#define __SYSCALL_DISP(b, ...) \
__SYSCALL_CONCAT(b, __SYSCALL_NARGS(__VA_ARGS__))(__VA_ARGS__)
#define internal_syscall(...) __SYSCALL_DISP(__internal_syscall, __VA_ARGS__)
// Helper function used to avoid clobbering of errno.
bool internal_iserror(uptr retval, int *internal_errno) {
if (retval >= (uptr)-4095) {
if (internal_errno)
*internal_errno = -retval;
return true;
}
return false;
}

View File

@ -0,0 +1,174 @@
//===-- sanitizer_syscall_linux_riscv64.inc ---------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Implementations of internal_syscall and internal_iserror for Linux/riscv64.
//
//===----------------------------------------------------------------------===//
// About local register variables:
// https://gcc.gnu.org/onlinedocs/gcc/Local-Register-Variables.html#Local-Register-Variables
//
// Kernel ABI...
// To my surprise I haven't found much information regarding it.
// Kernel source and internet browsing shows that:
// syscall number is passed in a7
// (http://man7.org/linux/man-pages/man2/syscall.2.html) results are return in
// a0 and a1 (http://man7.org/linux/man-pages/man2/syscall.2.html) arguments
// are passed in: a0-a7 (see below)
//
// Regarding the arguments. The only "documentation" I could find is
// this comment (!!!) by Bruce Hold on google forums (!!!):
// https://groups.google.com/a/groups.riscv.org/forum/#!topic/sw-dev/exbrzM3GZDQ
// Confirmed by inspecting glibc sources.
// Great way to document things.
#define SYSCALL(name) __NR_##name
#define INTERNAL_SYSCALL_CLOBBERS "memory"
static uptr __internal_syscall(u64 nr) {
register u64 a7 asm("a7") = nr;
register u64 a0 asm("a0");
__asm__ volatile("ecall\n\t"
: "=r"(a0)
: "r"(a7)
: INTERNAL_SYSCALL_CLOBBERS);
return a0;
}
#define __internal_syscall0(n) (__internal_syscall)(n)
static uptr __internal_syscall(u64 nr, u64 arg1) {
register u64 a7 asm("a7") = nr;
register u64 a0 asm("a0") = arg1;
__asm__ volatile("ecall\n\t"
: "+r"(a0)
: "r"(a7)
: INTERNAL_SYSCALL_CLOBBERS);
return a0;
}
#define __internal_syscall1(n, a1) (__internal_syscall)(n, (u64)(a1))
static uptr __internal_syscall(u64 nr, u64 arg1, long arg2) {
register u64 a7 asm("a7") = nr;
register u64 a0 asm("a0") = arg1;
register u64 a1 asm("a1") = arg2;
__asm__ volatile("ecall\n\t"
: "+r"(a0)
: "r"(a7), "r"(a1)
: INTERNAL_SYSCALL_CLOBBERS);
return a0;
}
#define __internal_syscall2(n, a1, a2) \
(__internal_syscall)(n, (u64)(a1), (long)(a2))
static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3) {
register u64 a7 asm("a7") = nr;
register u64 a0 asm("a0") = arg1;
register u64 a1 asm("a1") = arg2;
register u64 a2 asm("a2") = arg3;
__asm__ volatile("ecall\n\t"
: "+r"(a0)
: "r"(a7), "r"(a1), "r"(a2)
: INTERNAL_SYSCALL_CLOBBERS);
return a0;
}
#define __internal_syscall3(n, a1, a2, a3) \
(__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3))
static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3,
u64 arg4) {
register u64 a7 asm("a7") = nr;
register u64 a0 asm("a0") = arg1;
register u64 a1 asm("a1") = arg2;
register u64 a2 asm("a2") = arg3;
register u64 a3 asm("a3") = arg4;
__asm__ volatile("ecall\n\t"
: "+r"(a0)
: "r"(a7), "r"(a1), "r"(a2), "r"(a3)
: INTERNAL_SYSCALL_CLOBBERS);
return a0;
}
#define __internal_syscall4(n, a1, a2, a3, a4) \
(__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4))
static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3, u64 arg4,
long arg5) {
register u64 a7 asm("a7") = nr;
register u64 a0 asm("a0") = arg1;
register u64 a1 asm("a1") = arg2;
register u64 a2 asm("a2") = arg3;
register u64 a3 asm("a3") = arg4;
register u64 a4 asm("a4") = arg5;
__asm__ volatile("ecall\n\t"
: "+r"(a0)
: "r"(a7), "r"(a1), "r"(a2), "r"(a3), "r"(a4)
: INTERNAL_SYSCALL_CLOBBERS);
return a0;
}
#define __internal_syscall5(n, a1, a2, a3, a4, a5) \
(__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4), \
(u64)(a5))
static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3, u64 arg4,
long arg5, long arg6) {
register u64 a7 asm("a7") = nr;
register u64 a0 asm("a0") = arg1;
register u64 a1 asm("a1") = arg2;
register u64 a2 asm("a2") = arg3;
register u64 a3 asm("a3") = arg4;
register u64 a4 asm("a4") = arg5;
register u64 a5 asm("a5") = arg6;
__asm__ volatile("ecall\n\t"
: "+r"(a0)
: "r"(a7), "r"(a1), "r"(a2), "r"(a3), "r"(a4), "r"(a5)
: INTERNAL_SYSCALL_CLOBBERS);
return a0;
}
#define __internal_syscall6(n, a1, a2, a3, a4, a5, a6) \
(__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4), \
(u64)(a5), (long)(a6))
static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3, u64 arg4,
long arg5, long arg6, long arg7) {
register u64 a7 asm("a7") = nr;
register u64 a0 asm("a0") = arg1;
register u64 a1 asm("a1") = arg2;
register u64 a2 asm("a2") = arg3;
register u64 a3 asm("a3") = arg4;
register u64 a4 asm("a4") = arg5;
register u64 a5 asm("a5") = arg6;
register u64 a6 asm("a6") = arg7;
__asm__ volatile("ecall\n\t"
: "+r"(a0)
: "r"(a7), "r"(a1), "r"(a2), "r"(a3), "r"(a4), "r"(a5),
"r"(a6)
: INTERNAL_SYSCALL_CLOBBERS);
return a0;
}
#define __internal_syscall7(n, a1, a2, a3, a4, a5, a6, a7) \
(__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4), \
(u64)(a5), (long)(a6), (long)(a7))
#define __SYSCALL_NARGS_X(a1, a2, a3, a4, a5, a6, a7, a8, n, ...) n
#define __SYSCALL_NARGS(...) \
__SYSCALL_NARGS_X(__VA_ARGS__, 7, 6, 5, 4, 3, 2, 1, 0, )
#define __SYSCALL_CONCAT_X(a, b) a##b
#define __SYSCALL_CONCAT(a, b) __SYSCALL_CONCAT_X(a, b)
#define __SYSCALL_DISP(b, ...) \
__SYSCALL_CONCAT(b, __SYSCALL_NARGS(__VA_ARGS__))(__VA_ARGS__)
#define internal_syscall(...) __SYSCALL_DISP(__internal_syscall, __VA_ARGS__)
// Helper function used to avoid clobbering of errno.
bool internal_iserror(uptr retval, int *rverrno) {
if (retval >= (uptr)-4095) {
if (rverrno)
*rverrno = -retval;
return true;
}
return false;
}

View File

@ -23,6 +23,9 @@ void ThreadArgRetval::CreateLocked(uptr thread, bool detached,
Data& t = data_[thread];
t = {};
t.gen = gen_++;
static_assert(sizeof(gen_) == sizeof(u32) && kInvalidGen == UINT32_MAX);
if (gen_ == kInvalidGen)
gen_ = 0;
t.detached = detached;
t.args = args;
}
@ -53,16 +56,28 @@ void ThreadArgRetval::Finish(uptr thread, void* retval) {
u32 ThreadArgRetval::BeforeJoin(uptr thread) const {
__sanitizer::Lock lock(&mtx_);
auto t = data_.find(thread);
CHECK(t);
CHECK(!t->second.detached);
return t->second.gen;
if (t && !t->second.detached) {
return t->second.gen;
}
if (!common_flags()->detect_invalid_join)
return kInvalidGen;
const char* reason = "unknown";
if (!t) {
reason = "already joined";
} else if (t->second.detached) {
reason = "detached";
}
Report("ERROR: %s: Joining %s thread, aborting.\n", SanitizerToolName,
reason);
Die();
}
void ThreadArgRetval::AfterJoin(uptr thread, u32 gen) {
__sanitizer::Lock lock(&mtx_);
auto t = data_.find(thread);
if (!t || gen != t->second.gen) {
// Thread was reused and erased by any other event.
// Thread was reused and erased by any other event, or we had an invalid
// join.
return;
}
CHECK(!t->second.detached);

View File

@ -93,6 +93,7 @@ class SANITIZER_MUTEX ThreadArgRetval {
// will keep pointers alive forever, missing leaks caused by cancelation.
private:
static const u32 kInvalidGen = UINT32_MAX;
struct Data {
Args args;
u32 gen; // Avoid collision if thread id re-used.

View File

@ -121,25 +121,26 @@ DTLS::DTV *DTLS_on_tls_get_addr(void *arg_void, void *res,
uptr tls_size = 0;
uptr tls_beg = reinterpret_cast<uptr>(res) - arg->offset - kDtvOffset;
VReport(2,
"__tls_get_addr: %p {0x%zx,0x%zx} => %p; tls_beg: 0x%zx; sp: %p "
"__tls_get_addr: %p {0x%zx,0x%zx} => %p; tls_beg: %p; sp: %p "
"num_live_dtls %zd\n",
(void *)arg, arg->dso_id, arg->offset, res, tls_beg, (void *)&tls_beg,
(void *)arg, arg->dso_id, arg->offset, res, (void *)tls_beg,
(void *)&tls_beg,
atomic_load(&number_of_live_dtls, memory_order_relaxed));
if (dtls.last_memalign_ptr == tls_beg) {
tls_size = dtls.last_memalign_size;
VReport(2, "__tls_get_addr: glibc <=2.24 suspected; tls={0x%zx,0x%zx}\n",
tls_beg, tls_size);
VReport(2, "__tls_get_addr: glibc <=2.24 suspected; tls={%p,0x%zx}\n",
(void *)tls_beg, tls_size);
} else if (tls_beg >= static_tls_begin && tls_beg < static_tls_end) {
// This is the static TLS block which was initialized / unpoisoned at thread
// creation.
VReport(2, "__tls_get_addr: static tls: 0x%zx\n", tls_beg);
VReport(2, "__tls_get_addr: static tls: %p\n", (void *)tls_beg);
tls_size = 0;
} else if (const void *start =
__sanitizer_get_allocated_begin((void *)tls_beg)) {
tls_beg = (uptr)start;
tls_size = __sanitizer_get_allocated_size(start);
VReport(2, "__tls_get_addr: glibc >=2.25 suspected; tls={0x%zx,0x%zx}\n",
tls_beg, tls_size);
VReport(2, "__tls_get_addr: glibc >=2.25 suspected; tls={%p,0x%zx}\n",
(void *)tls_beg, tls_size);
} else {
VReport(2, "__tls_get_addr: Can't guess glibc version\n");
// This may happen inside the DTOR of main thread, so just ignore it.

View File

@ -0,0 +1,66 @@
//===------------------ sanitizer_unwind_fuchsia.cpp
//---------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// Sanitizer unwind Fuchsia specific functions.
//
//===----------------------------------------------------------------------===//
#include "sanitizer_platform.h"
#if SANITIZER_FUCHSIA
# include <limits.h>
# include <unwind.h>
# include "sanitizer_common.h"
# include "sanitizer_stacktrace.h"
namespace __sanitizer {
# if SANITIZER_CAN_SLOW_UNWIND
struct UnwindTraceArg {
BufferedStackTrace *stack;
u32 max_depth;
};
_Unwind_Reason_Code Unwind_Trace(struct _Unwind_Context *ctx, void *param) {
UnwindTraceArg *arg = static_cast<UnwindTraceArg *>(param);
CHECK_LT(arg->stack->size, arg->max_depth);
uptr pc = _Unwind_GetIP(ctx);
if (pc < GetPageSizeCached())
return _URC_NORMAL_STOP;
arg->stack->trace_buffer[arg->stack->size++] = pc;
return (arg->stack->size == arg->max_depth ? _URC_NORMAL_STOP
: _URC_NO_REASON);
}
void BufferedStackTrace::UnwindSlow(uptr pc, u32 max_depth) {
CHECK_GE(max_depth, 2);
size = 0;
UnwindTraceArg arg = {this, Min(max_depth + 1, kStackTraceMax)};
_Unwind_Backtrace(Unwind_Trace, &arg);
CHECK_GT(size, 0);
// We need to pop a few frames so that pc is on top.
uptr to_pop = LocatePcInTrace(pc);
// trace_buffer[0] belongs to the current function so we always pop it,
// unless there is only 1 frame in the stack trace (1 frame is always better
// than 0!).
PopStackFrames(Min(to_pop, static_cast<uptr>(1)));
trace_buffer[0] = pc;
}
void BufferedStackTrace::UnwindSlow(uptr pc, void *context, u32 max_depth) {
CHECK(context);
CHECK_GE(max_depth, 2);
UNREACHABLE("signal context doesn't exist");
}
# endif // SANITIZER_CAN_SLOW_UNWIND
} // namespace __sanitizer
#endif // SANITIZER_FUCHSIA

View File

@ -70,10 +70,17 @@ void BufferedStackTrace::UnwindSlow(uptr pc, void *context, u32 max_depth) {
stack_frame.AddrStack.Offset = ctx.Rsp;
# endif
# else
# if SANITIZER_ARM
int machine_type = IMAGE_FILE_MACHINE_ARM;
stack_frame.AddrPC.Offset = ctx.Pc;
stack_frame.AddrFrame.Offset = ctx.R11;
stack_frame.AddrStack.Offset = ctx.Sp;
# else
int machine_type = IMAGE_FILE_MACHINE_I386;
stack_frame.AddrPC.Offset = ctx.Eip;
stack_frame.AddrFrame.Offset = ctx.Ebp;
stack_frame.AddrStack.Offset = ctx.Esp;
# endif
# endif
stack_frame.AddrPC.Mode = AddrModeFlat;
stack_frame.AddrFrame.Mode = AddrModeFlat;

Some files were not shown because too many files have changed in this diff Show More