llvm-project/compiler-rt/lib/hwasan/hwasan_thread.cpp
Tomahawkd c97322d048
[compiler-rt][hwasan] Add fiber switch for HwASan (#153822)
Currently HwASan has no fiber switch interface for coroutines. This PR
adds fiber switch interfaces similar to ASan which helps to pass sp
check correctly on unwinding.

The only difference is HwASan does not need a fake stack since tags can
do the same thing (e.g., detect UAR). Interfaces are made identical with
ASan's.

Also adds unit test which is similar to ASan with minor adjustments:

1. change `__asan_handle_no_return` to `__hwasan_handle_vfork`
2. remove huge stack test since `__hwasan_handle_vfork` has no stack
size limitation.
3. use uninstrumented globals to simulate allocation since hwasan do not
support tagged pointer while using `longjmp`

The testcase is tested on both x86 with alias mode enabled and aarch64.
2025-08-19 17:48:44 -07:00

304 lines
9.2 KiB
C++

#include "hwasan_thread.h"
#include "hwasan.h"
#include "hwasan_interface_internal.h"
#include "hwasan_mapping.h"
#include "hwasan_poisoning.h"
#include "hwasan_thread_list.h"
#include "sanitizer_common/sanitizer_atomic.h"
#include "sanitizer_common/sanitizer_file.h"
#include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_tls_get_addr.h"
namespace __hwasan {
static u32 RandomSeed() {
u32 seed;
do {
if (UNLIKELY(!GetRandom(reinterpret_cast<void *>(&seed), sizeof(seed),
/*blocking=*/false))) {
seed = static_cast<u32>(
(NanoTime() >> 12) ^
(reinterpret_cast<uptr>(__builtin_frame_address(0)) >> 4));
}
} while (!seed);
return seed;
}
void Thread::InitRandomState() {
random_state_ = flags()->random_tags ? RandomSeed() : unique_id_;
random_state_inited_ = true;
// Push a random number of zeros onto the ring buffer so that the first stack
// tag base will be random.
for (tag_t i = 0, e = GenerateRandomTag(); i != e; ++i)
stack_allocations_->push(0);
}
void Thread::Init(uptr stack_buffer_start, uptr stack_buffer_size,
const InitState *state) {
CHECK_EQ(0, unique_id_); // try to catch bad stack reuse
CHECK_EQ(0, stack_top_);
CHECK_EQ(0, stack_bottom_);
static atomic_uint64_t unique_id;
unique_id_ = atomic_fetch_add(&unique_id, 1, memory_order_relaxed);
if (!IsMainThread())
os_id_ = GetTid();
if (auto sz = flags()->heap_history_size)
heap_allocations_ = HeapAllocationsRingBuffer::New(sz);
#if !SANITIZER_FUCHSIA
// Do not initialize the stack ring buffer just yet on Fuchsia. Threads will
// be initialized before we enter the thread itself, so we will instead call
// this later.
InitStackRingBuffer(stack_buffer_start, stack_buffer_size);
#endif
InitStackAndTls(state);
dtls_ = DTLS_Get();
AllocatorThreadStart(allocator_cache());
if (flags()->verbose_threads) {
if (IsMainThread()) {
Printf("sizeof(Thread): %zd sizeof(HeapRB): %zd sizeof(StackRB): %zd\n",
sizeof(Thread), heap_allocations_->SizeInBytes(),
stack_allocations_->size() * sizeof(uptr));
}
Print("Creating : ");
}
ClearShadowForThreadStackAndTLS();
}
void Thread::InitStackRingBuffer(uptr stack_buffer_start,
uptr stack_buffer_size) {
HwasanTSDThreadInit(); // Only needed with interceptors.
uptr *ThreadLong = GetCurrentThreadLongPtr();
// The following implicitly sets (this) as the current thread.
stack_allocations_ = new (ThreadLong)
StackAllocationsRingBuffer((void *)stack_buffer_start, stack_buffer_size);
// Check that it worked.
CHECK_EQ(GetCurrentThread(), this);
// ScopedTaggingDisable needs GetCurrentThread to be set up.
ScopedTaggingDisabler disabler;
if (stack_bottom_) {
int local;
CHECK(AddrIsInStack((uptr)&local));
CHECK(MemIsApp(stack_bottom_));
CHECK(MemIsApp(stack_top_ - 1));
}
}
void Thread::ClearShadowForThreadStackAndTLS() {
if (stack_top_ != stack_bottom_)
TagMemory(UntagAddr(stack_bottom_),
UntagAddr(stack_top_) - UntagAddr(stack_bottom_),
GetTagFromPointer(stack_top_));
if (tls_begin_ != tls_end_)
TagMemory(UntagAddr(tls_begin_),
UntagAddr(tls_end_) - UntagAddr(tls_begin_),
GetTagFromPointer(tls_begin_));
}
void Thread::Destroy() {
if (flags()->verbose_threads)
Print("Destroying: ");
AllocatorThreadFinish(allocator_cache());
ClearShadowForThreadStackAndTLS();
if (heap_allocations_)
heap_allocations_->Delete();
DTLS_Destroy();
// Unregister this as the current thread.
// Instrumented code can not run on this thread from this point onwards, but
// malloc/free can still be served. Glibc may call free() very late, after all
// TSD destructors are done.
CHECK_EQ(GetCurrentThread(), this);
*GetCurrentThreadLongPtr() = 0;
}
void Thread::StartSwitchFiber(uptr bottom, uptr size) {
if (atomic_load(&stack_switching_, memory_order_acquire)) {
Report("ERROR: starting fiber switch while in fiber switch\n");
Die();
}
next_stack_bottom_ = bottom;
next_stack_top_ = bottom + size;
atomic_store(&stack_switching_, 1, memory_order_release);
}
void Thread::FinishSwitchFiber(uptr *bottom_old, uptr *size_old) {
if (!atomic_load(&stack_switching_, memory_order_acquire)) {
Report("ERROR: finishing a fiber switch that has not started\n");
Die();
}
if (bottom_old)
*bottom_old = stack_bottom_;
if (size_old)
*size_old = stack_top_ - stack_bottom_;
stack_bottom_ = next_stack_bottom_;
stack_top_ = next_stack_top_;
atomic_store(&stack_switching_, 0, memory_order_release);
next_stack_top_ = 0;
next_stack_bottom_ = 0;
}
inline Thread::StackBounds Thread::GetStackBounds() const {
if (!atomic_load(&stack_switching_, memory_order_acquire)) {
// Make sure the stack bounds are fully initialized.
if (stack_bottom_ >= stack_top_)
return {0, 0};
return {stack_bottom_, stack_top_};
}
const uptr cur_stack = (uptr)__builtin_frame_address(0);
// Note: need to check next stack first, because FinishSwitchFiber
// may be in process of overwriting stack_top_/bottom_. But in such case
// we are already on the next stack.
if (cur_stack >= next_stack_bottom_ && cur_stack < next_stack_top_)
return {next_stack_bottom_, next_stack_top_};
return {stack_bottom_, stack_top_};
}
uptr Thread::stack_top() { return GetStackBounds().top; }
uptr Thread::stack_bottom() { return GetStackBounds().bottom; }
uptr Thread::stack_size() {
const auto bounds = GetStackBounds();
return bounds.top - bounds.bottom;
}
void Thread::Print(const char *Prefix) {
Printf("%sT%zd %p stack: [%p,%p) sz: %zd tls: [%p,%p)\n", Prefix, unique_id_,
(void *)this, stack_bottom(), stack_top(),
stack_top() - stack_bottom(), tls_begin(), tls_end());
}
static u32 xorshift(u32 state) {
state ^= state << 13;
state ^= state >> 17;
state ^= state << 5;
return state;
}
// Generate a (pseudo-)random non-zero tag.
tag_t Thread::GenerateRandomTag(uptr num_bits) {
DCHECK_GT(num_bits, 0);
if (tagging_disabled_)
return 0;
tag_t tag;
const uptr tag_mask = (1ULL << num_bits) - 1;
do {
if (flags()->random_tags) {
if (!random_buffer_) {
EnsureRandomStateInited();
random_buffer_ = random_state_ = xorshift(random_state_);
}
CHECK(random_buffer_);
tag = random_buffer_ & tag_mask;
random_buffer_ >>= num_bits;
} else {
EnsureRandomStateInited();
random_state_ += 1;
tag = random_state_ & tag_mask;
}
} while (!tag);
return tag;
}
void EnsureMainThreadIDIsCorrect() {
auto *t = __hwasan::GetCurrentThread();
if (t && (t->IsMainThread()))
t->set_os_id(GetTid());
}
} // namespace __hwasan
// --- Implementation of LSan-specific functions --- {{{1
namespace __lsan {
static __hwasan::HwasanThreadList *GetHwasanThreadListLocked() {
auto &tl = __hwasan::hwasanThreadList();
tl.CheckLocked();
return &tl;
}
static __hwasan::Thread *GetThreadByOsIDLocked(ThreadID os_id) {
return GetHwasanThreadListLocked()->FindThreadLocked(
[os_id](__hwasan::Thread *t) { return t->os_id() == os_id; });
}
void LockThreads() {
__hwasan::hwasanThreadList().Lock();
__hwasan::hwasanThreadArgRetval().Lock();
}
void UnlockThreads() {
__hwasan::hwasanThreadArgRetval().Unlock();
__hwasan::hwasanThreadList().Unlock();
}
void EnsureMainThreadIDIsCorrect() { __hwasan::EnsureMainThreadIDIsCorrect(); }
bool GetThreadRangesLocked(ThreadID os_id, uptr *stack_begin, uptr *stack_end,
uptr *tls_begin, uptr *tls_end, uptr *cache_begin,
uptr *cache_end, DTLS **dtls) {
auto *t = GetThreadByOsIDLocked(os_id);
if (!t)
return false;
*stack_begin = t->stack_bottom();
*stack_end = t->stack_top();
*tls_begin = t->tls_begin();
*tls_end = t->tls_end();
// Fixme: is this correct for HWASan.
*cache_begin = 0;
*cache_end = 0;
*dtls = t->dtls();
return true;
}
void GetAllThreadAllocatorCachesLocked(InternalMmapVector<uptr> *caches) {}
void GetThreadExtraStackRangesLocked(ThreadID os_id,
InternalMmapVector<Range> *ranges) {}
void GetThreadExtraStackRangesLocked(InternalMmapVector<Range> *ranges) {}
void GetAdditionalThreadContextPtrsLocked(InternalMmapVector<uptr> *ptrs) {
__hwasan::hwasanThreadArgRetval().GetAllPtrsLocked(ptrs);
}
void GetRunningThreadsLocked(InternalMmapVector<ThreadID> *threads) {
// TODO: implement.
}
void PrintThreads() {
// TODO: implement.
}
} // namespace __lsan
// ---------------------- Interface ---------------- {{{1
using namespace __hwasan;
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_start_switch_fiber(void **, const void *bottom, uptr size) {
if (auto *t = GetCurrentThread())
t->StartSwitchFiber((uptr)bottom, size);
else
VReport(1, "__hwasan_start_switch_fiber called from unknown thread\n");
}
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_finish_switch_fiber(void *, const void **bottom_old,
uptr *size_old) {
if (auto *t = GetCurrentThread())
t->FinishSwitchFiber((uptr *)bottom_old, size_old);
else
VReport(1, "__hwasan_finish_switch_fiber called from unknown thread\n");
}
}