Imported Upstream version 6.10.0.49

Former-commit-id: 1d6753294b2993e1fbf92de9366bb9544db4189b
This commit is contained in:
Xamarin Public Jenkins (auto-signing)
2020-01-16 16:38:04 +00:00
parent d94e79959b
commit 468663ddbb
48518 changed files with 2789335 additions and 61176 deletions

View File

@@ -0,0 +1 @@
BasedOnStyle: Google

View File

@@ -0,0 +1,16 @@
Checks: '-*,clang-diagnostic-*,llvm-*,misc-*,readability-identifier-naming'
CheckOptions:
- key: readability-identifier-naming.ClassCase
value: CamelCase
- key: readability-identifier-naming.EnumCase
value: CamelCase
- key: readability-identifier-naming.FunctionCase
value: CamelCase
- key: readability-identifier-naming.UnionCase
value: CamelCase
- key: readability-identifier-naming.GlobalConstantCase
value: CamelCase
- key: readability-identifier-naming.GlobalConstantPrefix
value: "k"
- key: readability-identifier-naming.VariableCase
value: lower_case

View File

@@ -0,0 +1,307 @@
# Build system for the common Sanitizer runtime support library components.
# These components are shared between AddressSanitizer and ThreadSanitizer.
set(SANITIZER_SOURCES_NOTERMINATION
sanitizer_allocator.cc
sanitizer_common.cc
sanitizer_deadlock_detector1.cc
sanitizer_deadlock_detector2.cc
sanitizer_errno.cc
sanitizer_file.cc
sanitizer_flags.cc
sanitizer_flag_parser.cc
sanitizer_fuchsia.cc
sanitizer_libc.cc
sanitizer_libignore.cc
sanitizer_linux.cc
sanitizer_linux_s390.cc
sanitizer_mac.cc
sanitizer_persistent_allocator.cc
sanitizer_platform_limits_linux.cc
sanitizer_platform_limits_netbsd.cc
sanitizer_platform_limits_posix.cc
sanitizer_platform_limits_solaris.cc
sanitizer_posix.cc
sanitizer_printf.cc
sanitizer_procmaps_common.cc
sanitizer_procmaps_freebsd.cc
sanitizer_procmaps_linux.cc
sanitizer_procmaps_mac.cc
sanitizer_procmaps_solaris.cc
sanitizer_solaris.cc
sanitizer_stackdepot.cc
sanitizer_stacktrace.cc
sanitizer_stacktrace_printer.cc
sanitizer_stoptheworld_mac.cc
sanitizer_suppressions.cc
sanitizer_symbolizer.cc
sanitizer_symbolizer_fuchsia.cc
sanitizer_symbolizer_libbacktrace.cc
sanitizer_symbolizer_mac.cc
sanitizer_symbolizer_win.cc
sanitizer_tls_get_addr.cc
sanitizer_thread_registry.cc
sanitizer_win.cc)
if(UNIX AND NOT APPLE AND NOT OS_NAME MATCHES "SunOS")
list(APPEND SANITIZER_SOURCES_NOTERMINATION
sanitizer_linux_x86_64.S)
list(APPEND SANITIZER_SOURCES_NOTERMINATION
sanitizer_linux_mips64.S)
endif()
set(SANITIZER_SOURCES
${SANITIZER_SOURCES_NOTERMINATION} sanitizer_termination.cc)
# Libc functions stubs. These sources should be linked instead of
# SANITIZER_LIBCDEP_SOURCES when sanitizer_common library must not depend on
# libc.
set(SANITIZER_NOLIBC_SOURCES
sanitizer_common_nolibc.cc)
set(SANITIZER_LIBCDEP_SOURCES
sanitizer_common_libcdep.cc
sanitizer_allocator_checks.cc
sancov_flags.cc
sanitizer_coverage_fuchsia.cc
sanitizer_coverage_libcdep_new.cc
sanitizer_coverage_win_sections.cc
sanitizer_linux_libcdep.cc
sanitizer_mac_libcdep.cc
sanitizer_posix_libcdep.cc
sanitizer_stacktrace_libcdep.cc
sanitizer_stoptheworld_linux_libcdep.cc
sanitizer_symbolizer_libcdep.cc
sanitizer_symbolizer_posix_libcdep.cc
sanitizer_unwind_linux_libcdep.cc)
# Explicitly list all sanitizer_common headers. Not all of these are
# included in sanitizer_common source files, but we need to depend on
# headers when building our custom unit tests.
set(SANITIZER_HEADERS
sanitizer_addrhashmap.h
sanitizer_allocator.h
sanitizer_allocator_bytemap.h
sanitizer_allocator_combined.h
sanitizer_allocator_interface.h
sanitizer_allocator_internal.h
sanitizer_allocator_local_cache.h
sanitizer_allocator_primary32.h
sanitizer_allocator_primary64.h
sanitizer_allocator_secondary.h
sanitizer_allocator_size_class_map.h
sanitizer_allocator_stats.h
sanitizer_atomic.h
sanitizer_atomic_clang.h
sanitizer_atomic_msvc.h
sanitizer_bitvector.h
sanitizer_bvgraph.h
sanitizer_common.h
sanitizer_common_interceptors.inc
sanitizer_common_interceptors_ioctl.inc
sanitizer_common_interceptors_format.inc
sanitizer_common_syscalls.inc
sanitizer_deadlock_detector.h
sanitizer_deadlock_detector_interface.h
sanitizer_errno.h
sanitizer_errno_codes.h
sanitizer_file.h
sanitizer_flag_parser.h
sanitizer_flags.h
sanitizer_flags.inc
sanitizer_fuchsia.h
sanitizer_interface_internal.h
sanitizer_internal_defs.h
sanitizer_lfstack.h
sanitizer_libc.h
sanitizer_libignore.h
sanitizer_linux.h
sanitizer_list.h
sanitizer_mac.h
sanitizer_mutex.h
sanitizer_persistent_allocator.h
sanitizer_placement_new.h
sanitizer_platform.h
sanitizer_platform_interceptors.h
sanitizer_platform_limits_netbsd.h
sanitizer_platform_limits_posix.h
sanitizer_platform_limits_solaris.h
sanitizer_posix.h
sanitizer_procmaps.h
sanitizer_quarantine.h
sanitizer_report_decorator.h
sanitizer_stackdepot.h
sanitizer_stackdepotbase.h
sanitizer_stacktrace.h
sanitizer_stacktrace_printer.h
sanitizer_stoptheworld.h
sanitizer_suppressions.h
sanitizer_symbolizer.h
sanitizer_symbolizer_internal.h
sanitizer_symbolizer_libbacktrace.h
sanitizer_symbolizer_mac.h
sanitizer_syscall_generic.inc
sanitizer_syscall_linux_x86_64.inc
sanitizer_syscall_linux_aarch64.inc
sanitizer_thread_registry.h
sanitizer_vector.h
sanitizer_win.h)
include_directories(..)
set(SANITIZER_COMMON_DEFINITIONS)
include(CheckIncludeFile)
append_have_file_definition(rpc/xdr.h HAVE_RPC_XDR_H SANITIZER_COMMON_DEFINITIONS)
append_have_file_definition(tirpc/rpc/xdr.h HAVE_TIRPC_RPC_XDR_H SANITIZER_COMMON_DEFINITIONS)
set(SANITIZER_CFLAGS ${SANITIZER_COMMON_CFLAGS})
append_rtti_flag(OFF SANITIZER_CFLAGS)
append_list_if(SANITIZER_LIMIT_FRAME_SIZE -Wframe-larger-than=570
SANITIZER_CFLAGS)
append_list_if(COMPILER_RT_HAS_WGLOBAL_CONSTRUCTORS_FLAG -Wglobal-constructors
SANITIZER_CFLAGS)
if (LLVM_ENABLE_PEDANTIC AND UNIX AND NOT APPLE)
# With -pedantic, our .S files raise warnings about empty macro arguments
# from __USER_LABEL_PREFIX__ being an empty arg to GLUE(). Unfortunately,
# there is no simple way to test for an empty define, nor to disable just
# that warning or to disable -pedantic. There is also no simple way to
# remove -pedantic from just this file (we'd have to remove from
# CMAKE_C*_FLAGS and re-add as a source property to all the non-.S files).
set_source_files_properties(sanitizer_linux_x86_64.S
PROPERTIES COMPILE_FLAGS "-w")
set_source_files_properties(sanitizer_linux_mips64.S
PROPERTIES COMPILE_FLAGS "-w")
endif ()
if(APPLE)
set(OS_OPTION OS ${SANITIZER_COMMON_SUPPORTED_OS})
endif()
add_compiler_rt_object_libraries(RTSanitizerCommon
${OS_OPTION}
ARCHS ${SANITIZER_COMMON_SUPPORTED_ARCH}
SOURCES ${SANITIZER_SOURCES}
CFLAGS ${SANITIZER_CFLAGS}
DEFS ${SANITIZER_COMMON_DEFINITIONS})
add_compiler_rt_object_libraries(RTSanitizerCommonNoTermination
${OS_OPTION}
ARCHS ${SANITIZER_COMMON_SUPPORTED_ARCH}
SOURCES ${SANITIZER_SOURCES_NOTERMINATION}
CFLAGS ${SANITIZER_CFLAGS}
DEFS ${SANITIZER_COMMON_DEFINITIONS})
add_compiler_rt_object_libraries(RTSanitizerCommonNoLibc
${OS_OPTION}
ARCHS ${SANITIZER_COMMON_SUPPORTED_ARCH}
SOURCES ${SANITIZER_NOLIBC_SOURCES}
CFLAGS ${SANITIZER_CFLAGS}
DEFS ${SANITIZER_COMMON_DEFINITIONS})
add_compiler_rt_object_libraries(RTSanitizerCommonLibc
${OS_OPTION}
ARCHS ${SANITIZER_COMMON_SUPPORTED_ARCH}
SOURCES ${SANITIZER_LIBCDEP_SOURCES}
CFLAGS ${SANITIZER_CFLAGS}
DEFS ${SANITIZER_COMMON_DEFINITIONS})
set(SANITIZER_NO_WEAK_HOOKS_CFLAGS ${SANITIZER_CFLAGS})
list(APPEND SANITIZER_NO_WEAK_HOOKS_CFLAGS "-DSANITIZER_SUPPORTS_WEAK_HOOKS=0")
add_compiler_rt_object_libraries(RTSanitizerCommonNoHooks
${OS_OPTION}
ARCHS ${SANITIZER_COMMON_SUPPORTED_ARCH}
SOURCES ${SANITIZER_SOURCES}
CFLAGS ${SANITIZER_NO_WEAK_HOOKS_CFLAGS}
DEFS ${SANITIZER_COMMON_DEFINITIONS})
add_compiler_rt_object_libraries(RTSanitizerCommonLibcNoHooks
${OS_OPTION}
ARCHS ${SANITIZER_COMMON_SUPPORTED_ARCH}
SOURCES ${SANITIZER_LIBCDEP_SOURCES}
CFLAGS ${SANITIZER_NO_WEAK_HOOKS_CFLAGS}
DEFS ${SANITIZER_COMMON_DEFINITIONS})
if(OS_NAME MATCHES "SunOS")
# Solaris ld doesn't support the non-standard GNU ld extension of adding
# __start_SECNAME and __stop_SECNAME labels to sections whose names are
# valid C identifiers. Instead we add our own definitions for the
# __sancov_guards section.
add_compiler_rt_object_libraries(SancovBegin
ARCHS ${SANITIZER_COMMON_SUPPORTED_ARCH}
SOURCES sancov_begin.S
CFLAGS ${SANITIZER_CFLAGS}
DEFS ${SANITIZER_COMMON_DEFINITIONS})
add_compiler_rt_runtime(clang_rt.sancov_begin
STATIC
ARCHS ${SANITIZER_COMMON_SUPPORTED_ARCH}
OBJECT_LIBS SancovBegin
CFLAGS ${SANITIZER_CFLAGS}
DEFS ${SANITIZER_COMMON_DEFINITIONS})
add_compiler_rt_object_libraries(SancovEnd
ARCHS ${SANITIZER_COMMON_SUPPORTED_ARCH}
SOURCES sancov_end.S
CFLAGS ${SANITIZER_CFLAGS}
DEFS ${SANITIZER_COMMON_DEFINITIONS})
add_compiler_rt_runtime(clang_rt.sancov_end
STATIC
ARCHS ${SANITIZER_COMMON_SUPPORTED_ARCH}
OBJECT_LIBS SancovEnd
CFLAGS ${SANITIZER_CFLAGS}
DEFS ${SANITIZER_COMMON_DEFINITIONS})
endif()
if(WIN32)
add_compiler_rt_object_libraries(SanitizerCommonWeakInterception
${SANITIZER_COMMON_SUPPORTED_OS}
ARCHS ${SANITIZER_COMMON_SUPPORTED_ARCH}
SOURCES sanitizer_win_weak_interception.cc
CFLAGS ${SANITIZER_CFLAGS} -DSANITIZER_DYNAMIC
DEFS ${SANITIZER_COMMON_DEFINITIONS})
add_compiler_rt_object_libraries(SancovWeakInterception
${SANITIZER_COMMON_SUPPORTED_OS}
ARCHS ${SANITIZER_COMMON_SUPPORTED_ARCH}
SOURCES sanitizer_coverage_win_weak_interception.cc
CFLAGS ${SANITIZER_CFLAGS} -DSANITIZER_DYNAMIC
DEFS ${SANITIZER_COMMON_DEFINITIONS})
add_compiler_rt_object_libraries(SanitizerCommonDllThunk
${SANITIZER_COMMON_SUPPORTED_OS}
ARCHS ${SANITIZER_COMMON_SUPPORTED_ARCH}
SOURCES sanitizer_win_dll_thunk.cc
CFLAGS ${SANITIZER_CFLAGS} -DSANITIZER_DLL_THUNK
DEFS ${SANITIZER_COMMON_DEFINITIONS})
add_compiler_rt_object_libraries(SancovDllThunk
${SANITIZER_COMMON_SUPPORTED_OS}
ARCHS ${SANITIZER_COMMON_SUPPORTED_ARCH}
SOURCES sanitizer_coverage_win_dll_thunk.cc
sanitizer_coverage_win_sections.cc
CFLAGS ${SANITIZER_CFLAGS} -DSANITIZER_DLL_THUNK
DEFS ${SANITIZER_COMMON_DEFINITIONS})
set(DYNAMIC_RUNTIME_THUNK_CFLAGS "-DSANITIZER_DYNAMIC_RUNTIME_THUNK")
if(MSVC)
list(APPEND DYNAMIC_RUNTIME_THUNK_CFLAGS "-Zl")
elseif(CMAKE_C_COMPILER_ID MATCHES Clang)
list(APPEND DYNAMIC_RUNTIME_THUNK_CFLAGS "-nodefaultlibs")
endif()
add_compiler_rt_object_libraries(SanitizerCommonDynamicRuntimeThunk
${SANITIZER_COMMON_SUPPORTED_OS}
ARCHS ${SANITIZER_COMMON_SUPPORTED_ARCH}
SOURCES sanitizer_win_dynamic_runtime_thunk.cc
CFLAGS ${SANITIZER_CFLAGS} ${DYNAMIC_RUNTIME_THUNK_CFLAGS}
DEFS ${SANITIZER_COMMON_DEFINITIONS})
add_compiler_rt_object_libraries(SancovDynamicRuntimeThunk
${SANITIZER_COMMON_SUPPORTED_OS}
ARCHS ${SANITIZER_COMMON_SUPPORTED_ARCH}
SOURCES sanitizer_coverage_win_dynamic_runtime_thunk.cc
sanitizer_coverage_win_sections.cc
CFLAGS ${SANITIZER_CFLAGS} ${DYNAMIC_RUNTIME_THUNK_CFLAGS}
DEFS ${SANITIZER_COMMON_DEFINITIONS})
endif()
# Unit tests for common sanitizer runtime.
if(COMPILER_RT_INCLUDE_TESTS)
add_subdirectory(tests)
endif()

View File

@@ -0,0 +1,5 @@
.type __start___sancov_guards,@object
.globl __start___sancov_guards
.section __sancov_guards,"aw",@progbits
.p2align 2
__start___sancov_guards:

View File

@@ -0,0 +1,5 @@
.type __stop___sancov_guards,@object
.globl __stop___sancov_guards
.section __sancov_guards,"aw",@progbits
.p2align 2
__stop___sancov_guards:

View File

@@ -0,0 +1,59 @@
//===-- sancov_flags.cc -----------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Sanitizer Coverage runtime flags.
//
//===----------------------------------------------------------------------===//
#include "sancov_flags.h"
#include "sanitizer_flag_parser.h"
#include "sanitizer_platform.h"
SANITIZER_INTERFACE_WEAK_DEF(const char*, __sancov_default_options, void) {
return "";
}
using namespace __sanitizer;
namespace __sancov {
SancovFlags sancov_flags_dont_use_directly; // use via flags();
void SancovFlags::SetDefaults() {
#define SANCOV_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
#include "sancov_flags.inc"
#undef SANCOV_FLAG
}
static void RegisterSancovFlags(FlagParser *parser, SancovFlags *f) {
#define SANCOV_FLAG(Type, Name, DefaultValue, Description) \
RegisterFlag(parser, #Name, Description, &f->Name);
#include "sancov_flags.inc"
#undef SANCOV_FLAG
}
static const char *MaybeCallSancovDefaultOptions() {
return (&__sancov_default_options) ? __sancov_default_options() : "";
}
void InitializeSancovFlags() {
SancovFlags *f = sancov_flags();
f->SetDefaults();
FlagParser parser;
RegisterSancovFlags(&parser, f);
parser.ParseString(MaybeCallSancovDefaultOptions());
parser.ParseString(GetEnv("SANCOV_OPTIONS"));
ReportUnrecognizedFlags();
if (f->help) parser.PrintFlagDescriptions();
}
} // namespace __sancov

View File

@@ -0,0 +1,40 @@
//===-- sancov_flags.h ------------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Sanitizer Coverage runtime flags.
//
//===----------------------------------------------------------------------===//
#ifndef SANCOV_FLAGS_H
#define SANCOV_FLAGS_H
#include "sanitizer_flag_parser.h"
#include "sanitizer_internal_defs.h"
namespace __sancov {
struct SancovFlags {
#define SANCOV_FLAG(Type, Name, DefaultValue, Description) Type Name;
#include "sancov_flags.inc"
#undef SANCOV_FLAG
void SetDefaults();
};
extern SancovFlags sancov_flags_dont_use_directly;
inline SancovFlags* sancov_flags() { return &sancov_flags_dont_use_directly; }
void InitializeSancovFlags();
} // namespace __sancov
extern "C" SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE const char*
__sancov_default_options();
#endif

View File

@@ -0,0 +1,21 @@
//===-- sancov_flags.inc ----------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Sanitizer Coverage runtime flags.
//
//===----------------------------------------------------------------------===//
#ifndef SANCOV_FLAG
#error "Defnine SANCOV_FLAG prior to including this file!"
#endif
SANCOV_FLAG(bool, symbolize, true,
"If set, converage information will be symbolized by sancov tool "
"after dumping.")
SANCOV_FLAG(bool, help, false, "Print flags help.")

View File

@@ -0,0 +1,354 @@
//===-- sanitizer_addrhashmap.h ---------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Concurrent uptr->T hashmap.
//
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_ADDRHASHMAP_H
#define SANITIZER_ADDRHASHMAP_H
#include "sanitizer_common.h"
#include "sanitizer_mutex.h"
#include "sanitizer_atomic.h"
#include "sanitizer_allocator_internal.h"
namespace __sanitizer {
// Concurrent uptr->T hashmap.
// T must be a POD type, kSize is preferably a prime but can be any number.
// Usage example:
//
// typedef AddrHashMap<uptr, 11> Map;
// Map m;
// {
// Map::Handle h(&m, addr);
// use h.operator->() to access the data
// if h.created() then the element was just created, and the current thread
// has exclusive access to it
// otherwise the current thread has only read access to the data
// }
// {
// Map::Handle h(&m, addr, true);
// this will remove the data from the map in Handle dtor
// the current thread has exclusive access to the data
// if !h.exists() then the element never existed
// }
template<typename T, uptr kSize>
class AddrHashMap {
private:
struct Cell {
atomic_uintptr_t addr;
T val;
};
struct AddBucket {
uptr cap;
uptr size;
Cell cells[1]; // variable len
};
static const uptr kBucketSize = 3;
struct Bucket {
RWMutex mtx;
atomic_uintptr_t add;
Cell cells[kBucketSize];
};
public:
AddrHashMap();
class Handle {
public:
Handle(AddrHashMap<T, kSize> *map, uptr addr);
Handle(AddrHashMap<T, kSize> *map, uptr addr, bool remove);
Handle(AddrHashMap<T, kSize> *map, uptr addr, bool remove, bool create);
~Handle();
T *operator->();
T &operator*();
const T &operator*() const;
bool created() const;
bool exists() const;
private:
friend AddrHashMap<T, kSize>;
AddrHashMap<T, kSize> *map_;
Bucket *bucket_;
Cell *cell_;
uptr addr_;
uptr addidx_;
bool created_;
bool remove_;
bool create_;
};
private:
friend class Handle;
Bucket *table_;
void acquire(Handle *h);
void release(Handle *h);
uptr calcHash(uptr addr);
};
template<typename T, uptr kSize>
AddrHashMap<T, kSize>::Handle::Handle(AddrHashMap<T, kSize> *map, uptr addr) {
map_ = map;
addr_ = addr;
remove_ = false;
create_ = true;
map_->acquire(this);
}
template<typename T, uptr kSize>
AddrHashMap<T, kSize>::Handle::Handle(AddrHashMap<T, kSize> *map, uptr addr,
bool remove) {
map_ = map;
addr_ = addr;
remove_ = remove;
create_ = true;
map_->acquire(this);
}
template<typename T, uptr kSize>
AddrHashMap<T, kSize>::Handle::Handle(AddrHashMap<T, kSize> *map, uptr addr,
bool remove, bool create) {
map_ = map;
addr_ = addr;
remove_ = remove;
create_ = create;
map_->acquire(this);
}
template<typename T, uptr kSize>
AddrHashMap<T, kSize>::Handle::~Handle() {
map_->release(this);
}
template <typename T, uptr kSize>
T *AddrHashMap<T, kSize>::Handle::operator->() {
return &cell_->val;
}
template <typename T, uptr kSize>
const T &AddrHashMap<T, kSize>::Handle::operator*() const {
return cell_->val;
}
template <typename T, uptr kSize>
T &AddrHashMap<T, kSize>::Handle::operator*() {
return cell_->val;
}
template<typename T, uptr kSize>
bool AddrHashMap<T, kSize>::Handle::created() const {
return created_;
}
template<typename T, uptr kSize>
bool AddrHashMap<T, kSize>::Handle::exists() const {
return cell_ != nullptr;
}
template<typename T, uptr kSize>
AddrHashMap<T, kSize>::AddrHashMap() {
table_ = (Bucket*)MmapOrDie(kSize * sizeof(table_[0]), "AddrHashMap");
}
template<typename T, uptr kSize>
void AddrHashMap<T, kSize>::acquire(Handle *h) {
uptr addr = h->addr_;
uptr hash = calcHash(addr);
Bucket *b = &table_[hash];
h->created_ = false;
h->addidx_ = -1U;
h->bucket_ = b;
h->cell_ = nullptr;
// If we want to remove the element, we need exclusive access to the bucket,
// so skip the lock-free phase.
if (h->remove_)
goto locked;
retry:
// First try to find an existing element w/o read mutex.
CHECK(!h->remove_);
// Check the embed cells.
for (uptr i = 0; i < kBucketSize; i++) {
Cell *c = &b->cells[i];
uptr addr1 = atomic_load(&c->addr, memory_order_acquire);
if (addr1 == addr) {
h->cell_ = c;
return;
}
}
// Check the add cells with read lock.
if (atomic_load(&b->add, memory_order_relaxed)) {
b->mtx.ReadLock();
AddBucket *add = (AddBucket*)atomic_load(&b->add, memory_order_relaxed);
for (uptr i = 0; i < add->size; i++) {
Cell *c = &add->cells[i];
uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);
if (addr1 == addr) {
h->addidx_ = i;
h->cell_ = c;
return;
}
}
b->mtx.ReadUnlock();
}
locked:
// Re-check existence under write lock.
// Embed cells.
b->mtx.Lock();
for (uptr i = 0; i < kBucketSize; i++) {
Cell *c = &b->cells[i];
uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);
if (addr1 == addr) {
if (h->remove_) {
h->cell_ = c;
return;
}
b->mtx.Unlock();
goto retry;
}
}
// Add cells.
AddBucket *add = (AddBucket*)atomic_load(&b->add, memory_order_relaxed);
if (add) {
for (uptr i = 0; i < add->size; i++) {
Cell *c = &add->cells[i];
uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);
if (addr1 == addr) {
if (h->remove_) {
h->addidx_ = i;
h->cell_ = c;
return;
}
b->mtx.Unlock();
goto retry;
}
}
}
// The element does not exist, no need to create it if we want to remove.
if (h->remove_ || !h->create_) {
b->mtx.Unlock();
return;
}
// Now try to create it under the mutex.
h->created_ = true;
// See if we have a free embed cell.
for (uptr i = 0; i < kBucketSize; i++) {
Cell *c = &b->cells[i];
uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);
if (addr1 == 0) {
h->cell_ = c;
return;
}
}
// Store in the add cells.
if (!add) {
// Allocate a new add array.
const uptr kInitSize = 64;
add = (AddBucket*)InternalAlloc(kInitSize);
internal_memset(add, 0, kInitSize);
add->cap = (kInitSize - sizeof(*add)) / sizeof(add->cells[0]) + 1;
add->size = 0;
atomic_store(&b->add, (uptr)add, memory_order_relaxed);
}
if (add->size == add->cap) {
// Grow existing add array.
uptr oldsize = sizeof(*add) + (add->cap - 1) * sizeof(add->cells[0]);
uptr newsize = oldsize * 2;
AddBucket *add1 = (AddBucket*)InternalAlloc(newsize);
internal_memset(add1, 0, newsize);
add1->cap = (newsize - sizeof(*add)) / sizeof(add->cells[0]) + 1;
add1->size = add->size;
internal_memcpy(add1->cells, add->cells, add->size * sizeof(add->cells[0]));
InternalFree(add);
atomic_store(&b->add, (uptr)add1, memory_order_relaxed);
add = add1;
}
// Store.
uptr i = add->size++;
Cell *c = &add->cells[i];
CHECK_EQ(atomic_load(&c->addr, memory_order_relaxed), 0);
h->addidx_ = i;
h->cell_ = c;
}
template<typename T, uptr kSize>
void AddrHashMap<T, kSize>::release(Handle *h) {
if (!h->cell_)
return;
Bucket *b = h->bucket_;
Cell *c = h->cell_;
uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);
if (h->created_) {
// Denote completion of insertion.
CHECK_EQ(addr1, 0);
// After the following store, the element becomes available
// for lock-free reads.
atomic_store(&c->addr, h->addr_, memory_order_release);
b->mtx.Unlock();
} else if (h->remove_) {
// Denote that the cell is empty now.
CHECK_EQ(addr1, h->addr_);
atomic_store(&c->addr, 0, memory_order_release);
// See if we need to compact the bucket.
AddBucket *add = (AddBucket*)atomic_load(&b->add, memory_order_relaxed);
if (h->addidx_ == -1U) {
// Removed from embed array, move an add element into the freed cell.
if (add && add->size != 0) {
uptr last = --add->size;
Cell *c1 = &add->cells[last];
c->val = c1->val;
uptr addr1 = atomic_load(&c1->addr, memory_order_relaxed);
atomic_store(&c->addr, addr1, memory_order_release);
atomic_store(&c1->addr, 0, memory_order_release);
}
} else {
// Removed from add array, compact it.
uptr last = --add->size;
Cell *c1 = &add->cells[last];
if (c != c1) {
*c = *c1;
atomic_store(&c1->addr, 0, memory_order_relaxed);
}
}
if (add && add->size == 0) {
// FIXME(dvyukov): free add?
}
b->mtx.Unlock();
} else {
CHECK_EQ(addr1, h->addr_);
if (h->addidx_ != -1U)
b->mtx.ReadUnlock();
}
}
template<typename T, uptr kSize>
uptr AddrHashMap<T, kSize>::calcHash(uptr addr) {
addr += addr << 10;
addr ^= addr >> 6;
return addr % kSize;
}
} // namespace __sanitizer
#endif // SANITIZER_ADDRHASHMAP_H

View File

@@ -0,0 +1,260 @@
//===-- sanitizer_allocator.cc --------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is shared between AddressSanitizer and ThreadSanitizer
// run-time libraries.
// This allocator is used inside run-times.
//===----------------------------------------------------------------------===//
#include "sanitizer_allocator.h"
#include "sanitizer_allocator_checks.h"
#include "sanitizer_allocator_internal.h"
#include "sanitizer_atomic.h"
#include "sanitizer_common.h"
namespace __sanitizer {
// ThreadSanitizer for Go uses libc malloc/free.
#if SANITIZER_GO || defined(SANITIZER_USE_MALLOC)
# if SANITIZER_LINUX && !SANITIZER_ANDROID
extern "C" void *__libc_malloc(uptr size);
# if !SANITIZER_GO
extern "C" void *__libc_memalign(uptr alignment, uptr size);
# endif
extern "C" void *__libc_realloc(void *ptr, uptr size);
extern "C" void __libc_free(void *ptr);
# else
# include <stdlib.h>
# define __libc_malloc malloc
# if !SANITIZER_GO
static void *__libc_memalign(uptr alignment, uptr size) {
void *p;
uptr error = posix_memalign(&p, alignment, size);
if (error) return nullptr;
return p;
}
# endif
# define __libc_realloc realloc
# define __libc_free free
# endif
static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache,
uptr alignment) {
(void)cache;
#if !SANITIZER_GO
if (alignment == 0)
return __libc_malloc(size);
else
return __libc_memalign(alignment, size);
#else
// Windows does not provide __libc_memalign/posix_memalign. It provides
// __aligned_malloc, but the allocated blocks can't be passed to free,
// they need to be passed to __aligned_free. InternalAlloc interface does
// not account for such requirement. Alignemnt does not seem to be used
// anywhere in runtime, so just call __libc_malloc for now.
DCHECK_EQ(alignment, 0);
return __libc_malloc(size);
#endif
}
static void *RawInternalRealloc(void *ptr, uptr size,
InternalAllocatorCache *cache) {
(void)cache;
return __libc_realloc(ptr, size);
}
static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) {
(void)cache;
__libc_free(ptr);
}
InternalAllocator *internal_allocator() {
return 0;
}
#else // SANITIZER_GO || defined(SANITIZER_USE_MALLOC)
static ALIGNED(64) char internal_alloc_placeholder[sizeof(InternalAllocator)];
static atomic_uint8_t internal_allocator_initialized;
static StaticSpinMutex internal_alloc_init_mu;
static InternalAllocatorCache internal_allocator_cache;
static StaticSpinMutex internal_allocator_cache_mu;
InternalAllocator *internal_allocator() {
InternalAllocator *internal_allocator_instance =
reinterpret_cast<InternalAllocator *>(&internal_alloc_placeholder);
if (atomic_load(&internal_allocator_initialized, memory_order_acquire) == 0) {
SpinMutexLock l(&internal_alloc_init_mu);
if (atomic_load(&internal_allocator_initialized, memory_order_relaxed) ==
0) {
internal_allocator_instance->Init(kReleaseToOSIntervalNever);
atomic_store(&internal_allocator_initialized, 1, memory_order_release);
}
}
return internal_allocator_instance;
}
static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache,
uptr alignment) {
if (alignment == 0) alignment = 8;
if (cache == 0) {
SpinMutexLock l(&internal_allocator_cache_mu);
return internal_allocator()->Allocate(&internal_allocator_cache, size,
alignment);
}
return internal_allocator()->Allocate(cache, size, alignment);
}
static void *RawInternalRealloc(void *ptr, uptr size,
InternalAllocatorCache *cache) {
uptr alignment = 8;
if (cache == 0) {
SpinMutexLock l(&internal_allocator_cache_mu);
return internal_allocator()->Reallocate(&internal_allocator_cache, ptr,
size, alignment);
}
return internal_allocator()->Reallocate(cache, ptr, size, alignment);
}
static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) {
if (!cache) {
SpinMutexLock l(&internal_allocator_cache_mu);
return internal_allocator()->Deallocate(&internal_allocator_cache, ptr);
}
internal_allocator()->Deallocate(cache, ptr);
}
#endif // SANITIZER_GO || defined(SANITIZER_USE_MALLOC)
const u64 kBlockMagic = 0x6A6CB03ABCEBC041ull;
void *InternalAlloc(uptr size, InternalAllocatorCache *cache, uptr alignment) {
if (size + sizeof(u64) < size)
return nullptr;
void *p = RawInternalAlloc(size + sizeof(u64), cache, alignment);
if (!p)
return nullptr;
((u64*)p)[0] = kBlockMagic;
return (char*)p + sizeof(u64);
}
void *InternalRealloc(void *addr, uptr size, InternalAllocatorCache *cache) {
if (!addr)
return InternalAlloc(size, cache);
if (size + sizeof(u64) < size)
return nullptr;
addr = (char*)addr - sizeof(u64);
size = size + sizeof(u64);
CHECK_EQ(kBlockMagic, ((u64*)addr)[0]);
void *p = RawInternalRealloc(addr, size, cache);
if (!p)
return nullptr;
return (char*)p + sizeof(u64);
}
void *InternalCalloc(uptr count, uptr size, InternalAllocatorCache *cache) {
if (UNLIKELY(CheckForCallocOverflow(count, size)))
return InternalAllocator::FailureHandler::OnBadRequest();
void *p = InternalAlloc(count * size, cache);
if (p) internal_memset(p, 0, count * size);
return p;
}
void InternalFree(void *addr, InternalAllocatorCache *cache) {
if (!addr)
return;
addr = (char*)addr - sizeof(u64);
CHECK_EQ(kBlockMagic, ((u64*)addr)[0]);
((u64*)addr)[0] = 0;
RawInternalFree(addr, cache);
}
// LowLevelAllocator
constexpr uptr kLowLevelAllocatorDefaultAlignment = 8;
static uptr low_level_alloc_min_alignment = kLowLevelAllocatorDefaultAlignment;
static LowLevelAllocateCallback low_level_alloc_callback;
void *LowLevelAllocator::Allocate(uptr size) {
// Align allocation size.
size = RoundUpTo(size, low_level_alloc_min_alignment);
if (allocated_end_ - allocated_current_ < (sptr)size) {
uptr size_to_allocate = Max(size, GetPageSizeCached());
allocated_current_ =
(char*)MmapOrDie(size_to_allocate, __func__);
allocated_end_ = allocated_current_ + size_to_allocate;
if (low_level_alloc_callback) {
low_level_alloc_callback((uptr)allocated_current_,
size_to_allocate);
}
}
CHECK(allocated_end_ - allocated_current_ >= (sptr)size);
void *res = allocated_current_;
allocated_current_ += size;
return res;
}
void SetLowLevelAllocateMinAlignment(uptr alignment) {
CHECK(IsPowerOfTwo(alignment));
low_level_alloc_min_alignment = Max(alignment, low_level_alloc_min_alignment);
}
void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback) {
low_level_alloc_callback = callback;
}
static atomic_uint8_t allocator_out_of_memory = {0};
static atomic_uint8_t allocator_may_return_null = {0};
bool IsAllocatorOutOfMemory() {
return atomic_load_relaxed(&allocator_out_of_memory);
}
// Prints error message and kills the program.
void NORETURN ReportAllocatorCannotReturnNull() {
Report("%s's allocator is terminating the process instead of returning 0\n",
SanitizerToolName);
Report("If you don't like this behavior set allocator_may_return_null=1\n");
CHECK(0);
Die();
}
bool AllocatorMayReturnNull() {
return atomic_load(&allocator_may_return_null, memory_order_relaxed);
}
void SetAllocatorMayReturnNull(bool may_return_null) {
atomic_store(&allocator_may_return_null, may_return_null,
memory_order_relaxed);
}
void *ReturnNullOrDieOnFailure::OnBadRequest() {
if (AllocatorMayReturnNull())
return nullptr;
ReportAllocatorCannotReturnNull();
}
void *ReturnNullOrDieOnFailure::OnOOM() {
atomic_store_relaxed(&allocator_out_of_memory, 1);
if (AllocatorMayReturnNull())
return nullptr;
ReportAllocatorCannotReturnNull();
}
void NORETURN *DieOnFailure::OnBadRequest() {
ReportAllocatorCannotReturnNull();
}
void NORETURN *DieOnFailure::OnOOM() {
atomic_store_relaxed(&allocator_out_of_memory, 1);
ReportAllocatorCannotReturnNull();
}
} // namespace __sanitizer

View File

@@ -0,0 +1,83 @@
//===-- sanitizer_allocator.h -----------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Specialized memory allocator for ThreadSanitizer, MemorySanitizer, etc.
//
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_ALLOCATOR_H
#define SANITIZER_ALLOCATOR_H
#include "sanitizer_internal_defs.h"
#include "sanitizer_common.h"
#include "sanitizer_libc.h"
#include "sanitizer_list.h"
#include "sanitizer_mutex.h"
#include "sanitizer_lfstack.h"
#include "sanitizer_procmaps.h"
namespace __sanitizer {
// Since flags are immutable and allocator behavior can be changed at runtime
// (unit tests or ASan on Android are some examples), allocator_may_return_null
// flag value is cached here and can be altered later.
bool AllocatorMayReturnNull();
void SetAllocatorMayReturnNull(bool may_return_null);
// Allocator failure handling policies:
// Implements AllocatorMayReturnNull policy, returns null when the flag is set,
// dies otherwise.
struct ReturnNullOrDieOnFailure {
static void *OnBadRequest();
static void *OnOOM();
};
// Always dies on the failure.
struct DieOnFailure {
static void NORETURN *OnBadRequest();
static void NORETURN *OnOOM();
};
// Returns true if allocator detected OOM condition. Can be used to avoid memory
// hungry operations. Set when AllocatorReturnNullOrDieOnOOM() is called.
bool IsAllocatorOutOfMemory();
// Allocators call these callbacks on mmap/munmap.
struct NoOpMapUnmapCallback {
void OnMap(uptr p, uptr size) const { }
void OnUnmap(uptr p, uptr size) const { }
};
// Callback type for iterating over chunks.
typedef void (*ForEachChunkCallback)(uptr chunk, void *arg);
INLINE u32 Rand(u32 *state) { // ANSI C linear congruential PRNG.
return (*state = *state * 1103515245 + 12345) >> 16;
}
INLINE u32 RandN(u32 *state, u32 n) { return Rand(state) % n; } // [0, n)
template<typename T>
INLINE void RandomShuffle(T *a, u32 n, u32 *rand_state) {
if (n <= 1) return;
for (u32 i = n - 1; i > 0; i--)
Swap(a[i], a[RandN(rand_state, i + 1)]);
}
#include "sanitizer_allocator_size_class_map.h"
#include "sanitizer_allocator_stats.h"
#include "sanitizer_allocator_primary64.h"
#include "sanitizer_allocator_bytemap.h"
#include "sanitizer_allocator_primary32.h"
#include "sanitizer_allocator_local_cache.h"
#include "sanitizer_allocator_secondary.h"
#include "sanitizer_allocator_combined.h"
} // namespace __sanitizer
#endif // SANITIZER_ALLOCATOR_H

View File

@@ -0,0 +1,103 @@
//===-- sanitizer_allocator_bytemap.h ---------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Part of the Sanitizer Allocator.
//
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_ALLOCATOR_H
#error This file must be included inside sanitizer_allocator.h
#endif
// Maps integers in rage [0, kSize) to u8 values.
template<u64 kSize>
class FlatByteMap {
public:
void TestOnlyInit() {
internal_memset(map_, 0, sizeof(map_));
}
void set(uptr idx, u8 val) {
CHECK_LT(idx, kSize);
CHECK_EQ(0U, map_[idx]);
map_[idx] = val;
}
u8 operator[] (uptr idx) {
CHECK_LT(idx, kSize);
// FIXME: CHECK may be too expensive here.
return map_[idx];
}
private:
u8 map_[kSize];
};
// TwoLevelByteMap maps integers in range [0, kSize1*kSize2) to u8 values.
// It is implemented as a two-dimensional array: array of kSize1 pointers
// to kSize2-byte arrays. The secondary arrays are mmaped on demand.
// Each value is initially zero and can be set to something else only once.
// Setting and getting values from multiple threads is safe w/o extra locking.
template <u64 kSize1, u64 kSize2, class MapUnmapCallback = NoOpMapUnmapCallback>
class TwoLevelByteMap {
public:
void TestOnlyInit() {
internal_memset(map1_, 0, sizeof(map1_));
mu_.Init();
}
void TestOnlyUnmap() {
for (uptr i = 0; i < kSize1; i++) {
u8 *p = Get(i);
if (!p) continue;
MapUnmapCallback().OnUnmap(reinterpret_cast<uptr>(p), kSize2);
UnmapOrDie(p, kSize2);
}
}
uptr size() const { return kSize1 * kSize2; }
uptr size1() const { return kSize1; }
uptr size2() const { return kSize2; }
void set(uptr idx, u8 val) {
CHECK_LT(idx, kSize1 * kSize2);
u8 *map2 = GetOrCreate(idx / kSize2);
CHECK_EQ(0U, map2[idx % kSize2]);
map2[idx % kSize2] = val;
}
u8 operator[] (uptr idx) const {
CHECK_LT(idx, kSize1 * kSize2);
u8 *map2 = Get(idx / kSize2);
if (!map2) return 0;
return map2[idx % kSize2];
}
private:
u8 *Get(uptr idx) const {
CHECK_LT(idx, kSize1);
return reinterpret_cast<u8 *>(
atomic_load(&map1_[idx], memory_order_acquire));
}
u8 *GetOrCreate(uptr idx) {
u8 *res = Get(idx);
if (!res) {
SpinMutexLock l(&mu_);
if (!(res = Get(idx))) {
res = (u8*)MmapOrDie(kSize2, "TwoLevelByteMap");
MapUnmapCallback().OnMap(reinterpret_cast<uptr>(res), kSize2);
atomic_store(&map1_[idx], reinterpret_cast<uptr>(res),
memory_order_release);
}
}
return res;
}
atomic_uintptr_t map1_[kSize1];
StaticSpinMutex mu_;
};

View File

@@ -0,0 +1,23 @@
//===-- sanitizer_allocator_checks.cc ---------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Various checks shared between ThreadSanitizer, MemorySanitizer, etc. memory
// allocators.
//
//===----------------------------------------------------------------------===//
#include "sanitizer_errno.h"
namespace __sanitizer {
void SetErrnoToENOMEM() {
errno = errno_ENOMEM;
}
} // namespace __sanitizer

View File

@@ -0,0 +1,75 @@
//===-- sanitizer_allocator_checks.h ----------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Various checks shared between ThreadSanitizer, MemorySanitizer, etc. memory
// allocators.
//
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_ALLOCATOR_CHECKS_H
#define SANITIZER_ALLOCATOR_CHECKS_H
#include "sanitizer_internal_defs.h"
#include "sanitizer_common.h"
#include "sanitizer_platform.h"
namespace __sanitizer {
// The following is defined in a separate compilation unit to avoid pulling in
// sanitizer_errno.h in this header, which leads to conflicts when other system
// headers include errno.h. This is usually the result of an unlikely event,
// and as such we do not care as much about having it inlined.
void SetErrnoToENOMEM();
// A common errno setting logic shared by almost all sanitizer allocator APIs.
INLINE void *SetErrnoOnNull(void *ptr) {
if (UNLIKELY(!ptr))
SetErrnoToENOMEM();
return ptr;
}
// In case of the check failure, the caller of the following Check... functions
// should "return POLICY::OnBadRequest();" where POLICY is the current allocator
// failure handling policy.
// Checks aligned_alloc() parameters, verifies that the alignment is a power of
// two and that the size is a multiple of alignment for POSIX implementation,
// and a bit relaxed requirement for non-POSIX ones, that the size is a multiple
// of alignment.
INLINE bool CheckAlignedAllocAlignmentAndSize(uptr alignment, uptr size) {
#if SANITIZER_POSIX
return IsPowerOfTwo(alignment) && (size & (alignment - 1)) == 0;
#else
return size % alignment == 0;
#endif
}
// Checks posix_memalign() parameters, verifies that alignment is a power of two
// and a multiple of sizeof(void *).
INLINE bool CheckPosixMemalignAlignment(uptr alignment) {
return IsPowerOfTwo(alignment) && (alignment % sizeof(void *)) == 0; // NOLINT
}
// Returns true if calloc(size, n) call overflows on size*n calculation.
INLINE bool CheckForCallocOverflow(uptr size, uptr n) {
if (!size)
return false;
uptr max = (uptr)-1L;
return (max / size) < n;
}
// Returns true if the size passed to pvalloc overflows when rounded to the next
// multiple of page_size.
INLINE bool CheckForPvallocOverflow(uptr size, uptr page_size) {
return RoundUpTo(size, page_size) < size;
}
} // namespace __sanitizer
#endif // SANITIZER_ALLOCATOR_CHECKS_H

View File

@@ -0,0 +1,197 @@
//===-- sanitizer_allocator_combined.h --------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Part of the Sanitizer Allocator.
//
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_ALLOCATOR_H
#error This file must be included inside sanitizer_allocator.h
#endif
// This class implements a complete memory allocator by using two
// internal allocators:
// PrimaryAllocator is efficient, but may not allocate some sizes (alignments).
// When allocating 2^x bytes it should return 2^x aligned chunk.
// PrimaryAllocator is used via a local AllocatorCache.
// SecondaryAllocator can allocate anything, but is not efficient.
template <class PrimaryAllocator, class AllocatorCache,
class SecondaryAllocator> // NOLINT
class CombinedAllocator {
public:
typedef typename SecondaryAllocator::FailureHandler FailureHandler;
void InitLinkerInitialized(s32 release_to_os_interval_ms) {
primary_.Init(release_to_os_interval_ms);
secondary_.InitLinkerInitialized();
stats_.InitLinkerInitialized();
}
void Init(s32 release_to_os_interval_ms) {
primary_.Init(release_to_os_interval_ms);
secondary_.Init();
stats_.Init();
}
void *Allocate(AllocatorCache *cache, uptr size, uptr alignment) {
// Returning 0 on malloc(0) may break a lot of code.
if (size == 0)
size = 1;
if (size + alignment < size)
return FailureHandler::OnBadRequest();
uptr original_size = size;
// If alignment requirements are to be fulfilled by the frontend allocator
// rather than by the primary or secondary, passing an alignment lower than
// or equal to 8 will prevent any further rounding up, as well as the later
// alignment check.
if (alignment > 8)
size = RoundUpTo(size, alignment);
// The primary allocator should return a 2^x aligned allocation when
// requested 2^x bytes, hence using the rounded up 'size' when being
// serviced by the primary (this is no longer true when the primary is
// using a non-fixed base address). The secondary takes care of the
// alignment without such requirement, and allocating 'size' would use
// extraneous memory, so we employ 'original_size'.
void *res;
if (primary_.CanAllocate(size, alignment))
res = cache->Allocate(&primary_, primary_.ClassID(size));
else
res = secondary_.Allocate(&stats_, original_size, alignment);
if (!res)
return FailureHandler::OnOOM();
if (alignment > 8)
CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0);
return res;
}
s32 ReleaseToOSIntervalMs() const {
return primary_.ReleaseToOSIntervalMs();
}
void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) {
primary_.SetReleaseToOSIntervalMs(release_to_os_interval_ms);
}
void ForceReleaseToOS() {
primary_.ForceReleaseToOS();
}
void Deallocate(AllocatorCache *cache, void *p) {
if (!p) return;
if (primary_.PointerIsMine(p))
cache->Deallocate(&primary_, primary_.GetSizeClass(p), p);
else
secondary_.Deallocate(&stats_, p);
}
void *Reallocate(AllocatorCache *cache, void *p, uptr new_size,
uptr alignment) {
if (!p)
return Allocate(cache, new_size, alignment);
if (!new_size) {
Deallocate(cache, p);
return nullptr;
}
CHECK(PointerIsMine(p));
uptr old_size = GetActuallyAllocatedSize(p);
uptr memcpy_size = Min(new_size, old_size);
void *new_p = Allocate(cache, new_size, alignment);
if (new_p)
internal_memcpy(new_p, p, memcpy_size);
Deallocate(cache, p);
return new_p;
}
bool PointerIsMine(void *p) {
if (primary_.PointerIsMine(p))
return true;
return secondary_.PointerIsMine(p);
}
bool FromPrimary(void *p) {
return primary_.PointerIsMine(p);
}
void *GetMetaData(const void *p) {
if (primary_.PointerIsMine(p))
return primary_.GetMetaData(p);
return secondary_.GetMetaData(p);
}
void *GetBlockBegin(const void *p) {
if (primary_.PointerIsMine(p))
return primary_.GetBlockBegin(p);
return secondary_.GetBlockBegin(p);
}
// This function does the same as GetBlockBegin, but is much faster.
// Must be called with the allocator locked.
void *GetBlockBeginFastLocked(void *p) {
if (primary_.PointerIsMine(p))
return primary_.GetBlockBegin(p);
return secondary_.GetBlockBeginFastLocked(p);
}
uptr GetActuallyAllocatedSize(void *p) {
if (primary_.PointerIsMine(p))
return primary_.GetActuallyAllocatedSize(p);
return secondary_.GetActuallyAllocatedSize(p);
}
uptr TotalMemoryUsed() {
return primary_.TotalMemoryUsed() + secondary_.TotalMemoryUsed();
}
void TestOnlyUnmap() { primary_.TestOnlyUnmap(); }
void InitCache(AllocatorCache *cache) {
cache->Init(&stats_);
}
void DestroyCache(AllocatorCache *cache) {
cache->Destroy(&primary_, &stats_);
}
void SwallowCache(AllocatorCache *cache) {
cache->Drain(&primary_);
}
void GetStats(AllocatorStatCounters s) const {
stats_.Get(s);
}
void PrintStats() {
primary_.PrintStats();
secondary_.PrintStats();
}
// ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
// introspection API.
void ForceLock() {
primary_.ForceLock();
secondary_.ForceLock();
}
void ForceUnlock() {
secondary_.ForceUnlock();
primary_.ForceUnlock();
}
// Iterate over all existing chunks.
// The allocator must be locked when calling this function.
void ForEachChunk(ForEachChunkCallback callback, void *arg) {
primary_.ForEachChunk(callback, arg);
secondary_.ForEachChunk(callback, arg);
}
private:
PrimaryAllocator primary_;
SecondaryAllocator secondary_;
AllocatorGlobalStats stats_;
};

View File

@@ -0,0 +1,48 @@
//===-- sanitizer_allocator_interface.h ------------------------- C++ -----===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Re-declaration of functions from public sanitizer allocator interface.
//
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_ALLOCATOR_INTERFACE_H
#define SANITIZER_ALLOCATOR_INTERFACE_H
#include "sanitizer_internal_defs.h"
using __sanitizer::uptr;
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE
uptr __sanitizer_get_estimated_allocated_size(uptr size);
SANITIZER_INTERFACE_ATTRIBUTE int __sanitizer_get_ownership(const void *p);
SANITIZER_INTERFACE_ATTRIBUTE uptr
__sanitizer_get_allocated_size(const void *p);
SANITIZER_INTERFACE_ATTRIBUTE uptr __sanitizer_get_current_allocated_bytes();
SANITIZER_INTERFACE_ATTRIBUTE uptr __sanitizer_get_heap_size();
SANITIZER_INTERFACE_ATTRIBUTE uptr __sanitizer_get_free_bytes();
SANITIZER_INTERFACE_ATTRIBUTE uptr __sanitizer_get_unmapped_bytes();
SANITIZER_INTERFACE_ATTRIBUTE int __sanitizer_install_malloc_and_free_hooks(
void (*malloc_hook)(const void *, uptr),
void (*free_hook)(const void *));
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void __sanitizer_malloc_hook(void *ptr, uptr size);
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void __sanitizer_free_hook(void *ptr);
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
__sanitizer_purge_allocator();
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
__sanitizer_print_memory_profile(uptr top_percent, uptr max_number_of_contexts);
} // extern "C"
#endif // SANITIZER_ALLOCATOR_INTERFACE_H

View File

@@ -0,0 +1,73 @@
//===-- sanitizer_allocator_internal.h --------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This allocator is used inside run-times.
//
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_ALLOCATOR_INTERNAL_H
#define SANITIZER_ALLOCATOR_INTERNAL_H
#include "sanitizer_allocator.h"
#include "sanitizer_internal_defs.h"
namespace __sanitizer {
// FIXME: Check if we may use even more compact size class map for internal
// purposes.
typedef CompactSizeClassMap InternalSizeClassMap;
static const uptr kInternalAllocatorRegionSizeLog = 20;
static const uptr kInternalAllocatorNumRegions =
SANITIZER_MMAP_RANGE_SIZE >> kInternalAllocatorRegionSizeLog;
#if SANITIZER_WORDSIZE == 32
typedef FlatByteMap<kInternalAllocatorNumRegions> ByteMap;
#else
typedef TwoLevelByteMap<(kInternalAllocatorNumRegions >> 12), 1 << 12> ByteMap;
#endif
struct AP32 {
static const uptr kSpaceBeg = 0;
static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
static const uptr kMetadataSize = 0;
typedef InternalSizeClassMap SizeClassMap;
static const uptr kRegionSizeLog = kInternalAllocatorRegionSizeLog;
typedef __sanitizer::ByteMap ByteMap;
typedef NoOpMapUnmapCallback MapUnmapCallback;
static const uptr kFlags = 0;
};
typedef SizeClassAllocator32<AP32> PrimaryInternalAllocator;
typedef SizeClassAllocatorLocalCache<PrimaryInternalAllocator>
InternalAllocatorCache;
typedef CombinedAllocator<PrimaryInternalAllocator, InternalAllocatorCache,
LargeMmapAllocator<NoOpMapUnmapCallback, DieOnFailure>
> InternalAllocator;
void *InternalAlloc(uptr size, InternalAllocatorCache *cache = nullptr,
uptr alignment = 0);
void *InternalRealloc(void *p, uptr size,
InternalAllocatorCache *cache = nullptr);
void *InternalCalloc(uptr countr, uptr size,
InternalAllocatorCache *cache = nullptr);
void InternalFree(void *p, InternalAllocatorCache *cache = nullptr);
InternalAllocator *internal_allocator();
enum InternalAllocEnum {
INTERNAL_ALLOC
};
} // namespace __sanitizer
inline void *operator new(__sanitizer::operator_new_size_type size,
__sanitizer::InternalAllocEnum) {
return __sanitizer::InternalAlloc(size);
}
#endif // SANITIZER_ALLOCATOR_INTERNAL_H

View File

@@ -0,0 +1,272 @@
//===-- sanitizer_allocator_local_cache.h -----------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Part of the Sanitizer Allocator.
//
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_ALLOCATOR_H
#error This file must be included inside sanitizer_allocator.h
#endif
// Objects of this type should be used as local caches for SizeClassAllocator64
// or SizeClassAllocator32. Since the typical use of this class is to have one
// object per thread in TLS, is has to be POD.
template<class SizeClassAllocator>
struct SizeClassAllocatorLocalCache
: SizeClassAllocator::AllocatorCache {
};
// Cache used by SizeClassAllocator64.
template <class SizeClassAllocator>
struct SizeClassAllocator64LocalCache {
typedef SizeClassAllocator Allocator;
void Init(AllocatorGlobalStats *s) {
stats_.Init();
if (s)
s->Register(&stats_);
}
void Destroy(SizeClassAllocator *allocator, AllocatorGlobalStats *s) {
Drain(allocator);
if (s)
s->Unregister(&stats_);
}
void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
CHECK_NE(class_id, 0UL);
CHECK_LT(class_id, kNumClasses);
PerClass *c = &per_class_[class_id];
if (UNLIKELY(c->count == 0)) {
if (UNLIKELY(!Refill(c, allocator, class_id)))
return nullptr;
}
stats_.Add(AllocatorStatAllocated, c->class_size);
CHECK_GT(c->count, 0);
CompactPtrT chunk = c->chunks[--c->count];
void *res = reinterpret_cast<void *>(allocator->CompactPtrToPointer(
allocator->GetRegionBeginBySizeClass(class_id), chunk));
return res;
}
void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
CHECK_NE(class_id, 0UL);
CHECK_LT(class_id, kNumClasses);
// If the first allocator call on a new thread is a deallocation, then
// max_count will be zero, leading to check failure.
InitCache();
PerClass *c = &per_class_[class_id];
stats_.Sub(AllocatorStatAllocated, c->class_size);
CHECK_NE(c->max_count, 0UL);
if (UNLIKELY(c->count == c->max_count))
Drain(c, allocator, class_id, c->max_count / 2);
CompactPtrT chunk = allocator->PointerToCompactPtr(
allocator->GetRegionBeginBySizeClass(class_id),
reinterpret_cast<uptr>(p));
c->chunks[c->count++] = chunk;
}
void Drain(SizeClassAllocator *allocator) {
for (uptr i = 0; i < kNumClasses; i++) {
PerClass *c = &per_class_[i];
while (c->count > 0)
Drain(c, allocator, i, c->count);
}
}
private:
typedef typename Allocator::SizeClassMapT SizeClassMap;
static const uptr kNumClasses = SizeClassMap::kNumClasses;
typedef typename Allocator::CompactPtrT CompactPtrT;
struct PerClass {
u32 count;
u32 max_count;
uptr class_size;
CompactPtrT chunks[2 * SizeClassMap::kMaxNumCachedHint];
};
PerClass per_class_[kNumClasses];
AllocatorStats stats_;
void InitCache() {
if (LIKELY(per_class_[1].max_count))
return;
for (uptr i = 0; i < kNumClasses; i++) {
PerClass *c = &per_class_[i];
c->max_count = 2 * SizeClassMap::MaxCachedHint(i);
c->class_size = Allocator::ClassIdToSize(i);
}
}
NOINLINE bool Refill(PerClass *c, SizeClassAllocator *allocator,
uptr class_id) {
InitCache();
uptr num_requested_chunks = c->max_count / 2;
if (UNLIKELY(!allocator->GetFromAllocator(&stats_, class_id, c->chunks,
num_requested_chunks)))
return false;
c->count = num_requested_chunks;
return true;
}
NOINLINE void Drain(PerClass *c, SizeClassAllocator *allocator, uptr class_id,
uptr count) {
InitCache();
CHECK_GE(c->count, count);
uptr first_idx_to_drain = c->count - count;
c->count -= count;
allocator->ReturnToAllocator(&stats_, class_id,
&c->chunks[first_idx_to_drain], count);
}
};
// Cache used by SizeClassAllocator32.
template <class SizeClassAllocator>
struct SizeClassAllocator32LocalCache {
typedef SizeClassAllocator Allocator;
typedef typename Allocator::TransferBatch TransferBatch;
void Init(AllocatorGlobalStats *s) {
stats_.Init();
if (s)
s->Register(&stats_);
}
// Returns a TransferBatch suitable for class_id.
TransferBatch *CreateBatch(uptr class_id, SizeClassAllocator *allocator,
TransferBatch *b) {
if (uptr batch_class_id = per_class_[class_id].batch_class_id)
return (TransferBatch*)Allocate(allocator, batch_class_id);
return b;
}
// Destroys TransferBatch b.
void DestroyBatch(uptr class_id, SizeClassAllocator *allocator,
TransferBatch *b) {
if (uptr batch_class_id = per_class_[class_id].batch_class_id)
Deallocate(allocator, batch_class_id, b);
}
void Destroy(SizeClassAllocator *allocator, AllocatorGlobalStats *s) {
Drain(allocator);
if (s)
s->Unregister(&stats_);
}
void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
CHECK_NE(class_id, 0UL);
CHECK_LT(class_id, kNumClasses);
PerClass *c = &per_class_[class_id];
if (UNLIKELY(c->count == 0)) {
if (UNLIKELY(!Refill(allocator, class_id)))
return nullptr;
}
stats_.Add(AllocatorStatAllocated, c->class_size);
void *res = c->batch[--c->count];
PREFETCH(c->batch[c->count - 1]);
return res;
}
void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
CHECK_NE(class_id, 0UL);
CHECK_LT(class_id, kNumClasses);
// If the first allocator call on a new thread is a deallocation, then
// max_count will be zero, leading to check failure.
InitCache();
PerClass *c = &per_class_[class_id];
stats_.Sub(AllocatorStatAllocated, c->class_size);
CHECK_NE(c->max_count, 0UL);
if (UNLIKELY(c->count == c->max_count))
Drain(allocator, class_id);
c->batch[c->count++] = p;
}
void Drain(SizeClassAllocator *allocator) {
for (uptr i = 0; i < kNumClasses; i++) {
PerClass *c = &per_class_[i];
while (c->count > 0)
Drain(allocator, i);
}
}
private:
typedef typename Allocator::SizeClassMapT SizeClassMap;
static const uptr kBatchClassID = SizeClassMap::kBatchClassID;
static const uptr kNumClasses = SizeClassMap::kNumClasses;
// If kUseSeparateSizeClassForBatch is true, all TransferBatch objects are
// allocated from kBatchClassID size class (except for those that are needed
// for kBatchClassID itself). The goal is to have TransferBatches in a totally
// different region of RAM to improve security.
static const bool kUseSeparateSizeClassForBatch =
Allocator::kUseSeparateSizeClassForBatch;
struct PerClass {
uptr count;
uptr max_count;
uptr class_size;
uptr batch_class_id;
void *batch[2 * TransferBatch::kMaxNumCached];
};
PerClass per_class_[kNumClasses];
AllocatorStats stats_;
void InitCache() {
if (LIKELY(per_class_[1].max_count))
return;
const uptr batch_class_id = SizeClassMap::ClassID(sizeof(TransferBatch));
for (uptr i = 0; i < kNumClasses; i++) {
PerClass *c = &per_class_[i];
uptr max_cached = TransferBatch::MaxCached(i);
c->max_count = 2 * max_cached;
c->class_size = Allocator::ClassIdToSize(i);
// Precompute the class id to use to store batches for the current class
// id. 0 means the class size is large enough to store a batch within one
// of the chunks. If using a separate size class, it will always be
// kBatchClassID, except for kBatchClassID itself.
if (kUseSeparateSizeClassForBatch) {
c->batch_class_id = (i == kBatchClassID) ? 0 : kBatchClassID;
} else {
c->batch_class_id = (c->class_size <
TransferBatch::AllocationSizeRequiredForNElements(max_cached)) ?
batch_class_id : 0;
}
}
}
NOINLINE bool Refill(SizeClassAllocator *allocator, uptr class_id) {
InitCache();
PerClass *c = &per_class_[class_id];
TransferBatch *b = allocator->AllocateBatch(&stats_, this, class_id);
if (UNLIKELY(!b))
return false;
CHECK_GT(b->Count(), 0);
b->CopyToArray(c->batch);
c->count = b->Count();
DestroyBatch(class_id, allocator, b);
return true;
}
NOINLINE void Drain(SizeClassAllocator *allocator, uptr class_id) {
InitCache();
PerClass *c = &per_class_[class_id];
uptr cnt = Min(c->max_count / 2, c->count);
uptr first_idx_to_drain = c->count - cnt;
TransferBatch *b = CreateBatch(
class_id, allocator, (TransferBatch *)c->batch[first_idx_to_drain]);
// Failure to allocate a batch while releasing memory is non recoverable.
// TODO(alekseys): Figure out how to do it without allocating a new batch.
if (UNLIKELY(!b))
DieOnFailure::OnOOM();
b->SetFromArray(allocator->GetRegionBeginBySizeClass(class_id),
&c->batch[first_idx_to_drain], cnt);
c->count -= cnt;
allocator->DeallocateBatch(&stats_, class_id, b);
}
};

View File

@@ -0,0 +1,370 @@
//===-- sanitizer_allocator_primary32.h -------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Part of the Sanitizer Allocator.
//
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_ALLOCATOR_H
#error This file must be included inside sanitizer_allocator.h
#endif
template<class SizeClassAllocator> struct SizeClassAllocator32LocalCache;
// SizeClassAllocator32 -- allocator for 32-bit address space.
// This allocator can theoretically be used on 64-bit arch, but there it is less
// efficient than SizeClassAllocator64.
//
// [kSpaceBeg, kSpaceBeg + kSpaceSize) is the range of addresses which can
// be returned by MmapOrDie().
//
// Region:
// a result of a single call to MmapAlignedOrDieOnFatalError(kRegionSize,
// kRegionSize).
// Since the regions are aligned by kRegionSize, there are exactly
// kNumPossibleRegions possible regions in the address space and so we keep
// a ByteMap possible_regions to store the size classes of each Region.
// 0 size class means the region is not used by the allocator.
//
// One Region is used to allocate chunks of a single size class.
// A Region looks like this:
// UserChunk1 .. UserChunkN <gap> MetaChunkN .. MetaChunk1
//
// In order to avoid false sharing the objects of this class should be
// chache-line aligned.
struct SizeClassAllocator32FlagMasks { // Bit masks.
enum {
kRandomShuffleChunks = 1,
kUseSeparateSizeClassForBatch = 2,
};
};
template <class Params>
class SizeClassAllocator32 {
public:
static const uptr kSpaceBeg = Params::kSpaceBeg;
static const u64 kSpaceSize = Params::kSpaceSize;
static const uptr kMetadataSize = Params::kMetadataSize;
typedef typename Params::SizeClassMap SizeClassMap;
static const uptr kRegionSizeLog = Params::kRegionSizeLog;
typedef typename Params::ByteMap ByteMap;
typedef typename Params::MapUnmapCallback MapUnmapCallback;
static const bool kRandomShuffleChunks = Params::kFlags &
SizeClassAllocator32FlagMasks::kRandomShuffleChunks;
static const bool kUseSeparateSizeClassForBatch = Params::kFlags &
SizeClassAllocator32FlagMasks::kUseSeparateSizeClassForBatch;
struct TransferBatch {
static const uptr kMaxNumCached = SizeClassMap::kMaxNumCachedHint - 2;
void SetFromArray(uptr region_beg_unused, void *batch[], uptr count) {
count_ = count;
CHECK_LE(count_, kMaxNumCached);
for (uptr i = 0; i < count; i++)
batch_[i] = batch[i];
}
uptr Count() const { return count_; }
void Clear() { count_ = 0; }
void Add(void *ptr) {
batch_[count_++] = ptr;
CHECK_LE(count_, kMaxNumCached);
}
void CopyToArray(void *to_batch[]) {
for (uptr i = 0, n = Count(); i < n; i++)
to_batch[i] = batch_[i];
}
// How much memory do we need for a batch containing n elements.
static uptr AllocationSizeRequiredForNElements(uptr n) {
return sizeof(uptr) * 2 + sizeof(void *) * n;
}
static uptr MaxCached(uptr class_id) {
return Min(kMaxNumCached, SizeClassMap::MaxCachedHint(class_id));
}
TransferBatch *next;
private:
uptr count_;
void *batch_[kMaxNumCached];
};
static const uptr kBatchSize = sizeof(TransferBatch);
COMPILER_CHECK((kBatchSize & (kBatchSize - 1)) == 0);
COMPILER_CHECK(kBatchSize == SizeClassMap::kMaxNumCachedHint * sizeof(uptr));
static uptr ClassIdToSize(uptr class_id) {
return (class_id == SizeClassMap::kBatchClassID) ?
kBatchSize : SizeClassMap::Size(class_id);
}
typedef SizeClassAllocator32<Params> ThisT;
typedef SizeClassAllocator32LocalCache<ThisT> AllocatorCache;
void Init(s32 release_to_os_interval_ms) {
possible_regions.TestOnlyInit();
internal_memset(size_class_info_array, 0, sizeof(size_class_info_array));
}
s32 ReleaseToOSIntervalMs() const {
return kReleaseToOSIntervalNever;
}
void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) {
// This is empty here. Currently only implemented in 64-bit allocator.
}
void ForceReleaseToOS() {
// Currently implemented in 64-bit allocator only.
}
void *MapWithCallback(uptr size) {
void *res = MmapOrDie(size, "SizeClassAllocator32");
MapUnmapCallback().OnMap((uptr)res, size);
return res;
}
void UnmapWithCallback(uptr beg, uptr size) {
MapUnmapCallback().OnUnmap(beg, size);
UnmapOrDie(reinterpret_cast<void *>(beg), size);
}
static bool CanAllocate(uptr size, uptr alignment) {
return size <= SizeClassMap::kMaxSize &&
alignment <= SizeClassMap::kMaxSize;
}
void *GetMetaData(const void *p) {
CHECK(PointerIsMine(p));
uptr mem = reinterpret_cast<uptr>(p);
uptr beg = ComputeRegionBeg(mem);
uptr size = ClassIdToSize(GetSizeClass(p));
u32 offset = mem - beg;
uptr n = offset / (u32)size; // 32-bit division
uptr meta = (beg + kRegionSize) - (n + 1) * kMetadataSize;
return reinterpret_cast<void*>(meta);
}
NOINLINE TransferBatch *AllocateBatch(AllocatorStats *stat, AllocatorCache *c,
uptr class_id) {
CHECK_LT(class_id, kNumClasses);
SizeClassInfo *sci = GetSizeClassInfo(class_id);
SpinMutexLock l(&sci->mutex);
if (sci->free_list.empty() &&
UNLIKELY(!PopulateFreeList(stat, c, sci, class_id)))
return nullptr;
CHECK(!sci->free_list.empty());
TransferBatch *b = sci->free_list.front();
sci->free_list.pop_front();
return b;
}
NOINLINE void DeallocateBatch(AllocatorStats *stat, uptr class_id,
TransferBatch *b) {
CHECK_LT(class_id, kNumClasses);
CHECK_GT(b->Count(), 0);
SizeClassInfo *sci = GetSizeClassInfo(class_id);
SpinMutexLock l(&sci->mutex);
sci->free_list.push_front(b);
}
uptr GetRegionBeginBySizeClass(uptr class_id) { return 0; }
bool PointerIsMine(const void *p) {
uptr mem = reinterpret_cast<uptr>(p);
if (mem < kSpaceBeg || mem >= kSpaceBeg + kSpaceSize)
return false;
return GetSizeClass(p) != 0;
}
uptr GetSizeClass(const void *p) {
return possible_regions[ComputeRegionId(reinterpret_cast<uptr>(p))];
}
void *GetBlockBegin(const void *p) {
CHECK(PointerIsMine(p));
uptr mem = reinterpret_cast<uptr>(p);
uptr beg = ComputeRegionBeg(mem);
uptr size = ClassIdToSize(GetSizeClass(p));
u32 offset = mem - beg;
u32 n = offset / (u32)size; // 32-bit division
uptr res = beg + (n * (u32)size);
return reinterpret_cast<void*>(res);
}
uptr GetActuallyAllocatedSize(void *p) {
CHECK(PointerIsMine(p));
return ClassIdToSize(GetSizeClass(p));
}
uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
uptr TotalMemoryUsed() {
// No need to lock here.
uptr res = 0;
for (uptr i = 0; i < kNumPossibleRegions; i++)
if (possible_regions[i])
res += kRegionSize;
return res;
}
void TestOnlyUnmap() {
for (uptr i = 0; i < kNumPossibleRegions; i++)
if (possible_regions[i])
UnmapWithCallback((i * kRegionSize), kRegionSize);
}
// ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
// introspection API.
void ForceLock() {
for (uptr i = 0; i < kNumClasses; i++) {
GetSizeClassInfo(i)->mutex.Lock();
}
}
void ForceUnlock() {
for (int i = kNumClasses - 1; i >= 0; i--) {
GetSizeClassInfo(i)->mutex.Unlock();
}
}
// Iterate over all existing chunks.
// The allocator must be locked when calling this function.
void ForEachChunk(ForEachChunkCallback callback, void *arg) {
for (uptr region = 0; region < kNumPossibleRegions; region++)
if (possible_regions[region]) {
uptr chunk_size = ClassIdToSize(possible_regions[region]);
uptr max_chunks_in_region = kRegionSize / (chunk_size + kMetadataSize);
uptr region_beg = region * kRegionSize;
for (uptr chunk = region_beg;
chunk < region_beg + max_chunks_in_region * chunk_size;
chunk += chunk_size) {
// Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk));
callback(chunk, arg);
}
}
}
void PrintStats() {
}
static uptr AdditionalSize() {
return 0;
}
typedef SizeClassMap SizeClassMapT;
static const uptr kNumClasses = SizeClassMap::kNumClasses;
private:
static const uptr kRegionSize = 1 << kRegionSizeLog;
static const uptr kNumPossibleRegions = kSpaceSize / kRegionSize;
struct SizeClassInfo {
SpinMutex mutex;
IntrusiveList<TransferBatch> free_list;
u32 rand_state;
char padding[kCacheLineSize - 2 * sizeof(uptr) -
sizeof(IntrusiveList<TransferBatch>)];
};
COMPILER_CHECK(sizeof(SizeClassInfo) == kCacheLineSize);
uptr ComputeRegionId(uptr mem) {
uptr res = mem >> kRegionSizeLog;
CHECK_LT(res, kNumPossibleRegions);
return res;
}
uptr ComputeRegionBeg(uptr mem) {
return mem & ~(kRegionSize - 1);
}
uptr AllocateRegion(AllocatorStats *stat, uptr class_id) {
CHECK_LT(class_id, kNumClasses);
uptr res = reinterpret_cast<uptr>(MmapAlignedOrDieOnFatalError(
kRegionSize, kRegionSize, "SizeClassAllocator32"));
if (UNLIKELY(!res))
return 0;
MapUnmapCallback().OnMap(res, kRegionSize);
stat->Add(AllocatorStatMapped, kRegionSize);
CHECK(IsAligned(res, kRegionSize));
possible_regions.set(ComputeRegionId(res), static_cast<u8>(class_id));
return res;
}
SizeClassInfo *GetSizeClassInfo(uptr class_id) {
CHECK_LT(class_id, kNumClasses);
return &size_class_info_array[class_id];
}
bool PopulateBatches(AllocatorCache *c, SizeClassInfo *sci, uptr class_id,
TransferBatch **current_batch, uptr max_count,
uptr *pointers_array, uptr count) {
// If using a separate class for batches, we do not need to shuffle it.
if (kRandomShuffleChunks && (!kUseSeparateSizeClassForBatch ||
class_id != SizeClassMap::kBatchClassID))
RandomShuffle(pointers_array, count, &sci->rand_state);
TransferBatch *b = *current_batch;
for (uptr i = 0; i < count; i++) {
if (!b) {
b = c->CreateBatch(class_id, this, (TransferBatch*)pointers_array[i]);
if (UNLIKELY(!b))
return false;
b->Clear();
}
b->Add((void*)pointers_array[i]);
if (b->Count() == max_count) {
sci->free_list.push_back(b);
b = nullptr;
}
}
*current_batch = b;
return true;
}
bool PopulateFreeList(AllocatorStats *stat, AllocatorCache *c,
SizeClassInfo *sci, uptr class_id) {
uptr size = ClassIdToSize(class_id);
uptr reg = AllocateRegion(stat, class_id);
if (UNLIKELY(!reg))
return false;
if (kRandomShuffleChunks)
if (UNLIKELY(sci->rand_state == 0))
// The random state is initialized from ASLR (PIE) and time.
sci->rand_state = reinterpret_cast<uptr>(sci) ^ NanoTime();
uptr n_chunks = kRegionSize / (size + kMetadataSize);
uptr max_count = TransferBatch::MaxCached(class_id);
CHECK_GT(max_count, 0);
TransferBatch *b = nullptr;
const uptr kShuffleArraySize = 48;
uptr shuffle_array[kShuffleArraySize];
uptr count = 0;
for (uptr i = reg; i < reg + n_chunks * size; i += size) {
shuffle_array[count++] = i;
if (count == kShuffleArraySize) {
if (UNLIKELY(!PopulateBatches(c, sci, class_id, &b, max_count,
shuffle_array, count)))
return false;
count = 0;
}
}
if (count) {
if (UNLIKELY(!PopulateBatches(c, sci, class_id, &b, max_count,
shuffle_array, count)))
return false;
}
if (b) {
CHECK_GT(b->Count(), 0);
sci->free_list.push_back(b);
}
return true;
}
ByteMap possible_regions;
SizeClassInfo size_class_info_array[kNumClasses];
};

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More