Revert "[asan] Fix misalignment of variables in fake stack frames" (#153139)

Reverts llvm/llvm-project#152819 due to buildbot failures
This commit is contained in:
Thurston Dang 2025-08-11 22:31:10 -07:00 committed by GitHub
parent 927e19f5f3
commit 29ad073c6c
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 16 additions and 61 deletions

View File

@ -54,34 +54,18 @@ FakeStack *FakeStack::Create(uptr stack_size_log) {
stack_size_log = kMinStackSizeLog;
if (stack_size_log > kMaxStackSizeLog)
stack_size_log = kMaxStackSizeLog;
CHECK_LE(kMaxStackFrameSizeLog, stack_size_log);
uptr size = RequiredSize(stack_size_log);
uptr padded_size = size + kMaxStackFrameSize;
void *true_res = reinterpret_cast<void *>(
flags()->uar_noreserve ? MmapNoReserveOrDie(padded_size, "FakeStack")
: MmapOrDie(padded_size, "FakeStack"));
// GetFrame() requires the property that
// (res + kFlagsOffset + SizeRequiredForFlags(stack_size_log)) is aligned to
// kMaxStackFrameSize.
// We didn't use MmapAlignedOrDieOnFatalError, because it requires that the
// *size* is a power of 2, which is an overly strong condition.
static_assert(alignof(FakeStack) <= kMaxStackFrameSize);
FakeStack *res = reinterpret_cast<FakeStack *>(
RoundUpTo(
(uptr)true_res + kFlagsOffset + SizeRequiredForFlags(stack_size_log),
kMaxStackFrameSize) -
kFlagsOffset - SizeRequiredForFlags(stack_size_log));
res->true_start = true_res;
flags()->uar_noreserve ? MmapNoReserveOrDie(size, "FakeStack")
: MmapOrDie(size, "FakeStack"));
res->stack_size_log_ = stack_size_log;
u8 *p = reinterpret_cast<u8 *>(res);
VReport(1,
"T%d: FakeStack created: %p -- %p stack_size_log: %zd; "
"mmapped %zdK, noreserve=%d, true_start: %p, start of first frame: "
"0x%zx\n",
"mmapped %zdK, noreserve=%d \n",
GetCurrentTidOrInvalid(), (void *)p,
(void *)(p + FakeStack::RequiredSize(stack_size_log)), stack_size_log,
size >> 10, flags()->uar_noreserve, res->true_start,
res->GetFrame(stack_size_log, /*class_id*/ 0, /*pos*/ 0));
size >> 10, flags()->uar_noreserve);
return res;
}
@ -95,10 +79,8 @@ void FakeStack::Destroy(int tid) {
Report("T%d: FakeStack destroyed: %s\n", tid, str.data());
}
uptr size = RequiredSize(stack_size_log_);
uptr padded_size = size + kMaxStackFrameSize;
FlushUnneededASanShadowMemory(reinterpret_cast<uptr>(true_start),
padded_size);
UnmapOrDie(true_start, padded_size);
FlushUnneededASanShadowMemory(reinterpret_cast<uptr>(this), size);
UnmapOrDie(this, size);
}
void FakeStack::PoisonAll(u8 magic) {

View File

@ -32,12 +32,12 @@ struct FakeFrame {
// is not popped but remains there for quite some time until gets used again.
// So, we poison the objects on the fake stack when function returns.
// It helps us find use-after-return bugs.
//
// The FakeStack objects is allocated by a single mmap call and has no other
// pointers. The size of the fake stack depends on the actual thread stack size
// and thus can not be a constant.
// stack_size is a power of two greater or equal to the thread's stack size;
// we store it as its logarithm (stack_size_log).
// FakeStack is padded such that GetFrame() is aligned to BytesInSizeClass().
// FakeStack has kNumberOfSizeClasses (11) size classes, each size class
// is a power of two, starting from 64 bytes. Each size class occupies
// stack_size bytes and thus can allocate
@ -56,9 +56,6 @@ struct FakeFrame {
class FakeStack {
static const uptr kMinStackFrameSizeLog = 6; // Min frame is 64B.
static const uptr kMaxStackFrameSizeLog = 16; // Max stack frame is 64K.
static_assert(kMaxStackFrameSizeLog >= kMinStackFrameSizeLog);
static const u64 kMaxStackFrameSize = 1 << kMaxStackFrameSizeLog;
public:
static const uptr kNumberOfSizeClasses =
@ -69,7 +66,7 @@ class FakeStack {
void Destroy(int tid);
// min_uar_stack_size_log is 16 (stack_size >= 64KB)
// stack_size_log is at least 15 (stack_size >= 32K).
static uptr SizeRequiredForFlags(uptr stack_size_log) {
return ((uptr)1) << (stack_size_log + 1 - kMinStackFrameSizeLog);
}
@ -113,28 +110,6 @@ class FakeStack {
}
// Get frame by class_id and pos.
// Return values are guaranteed to be aligned to BytesInSizeClass(class_id),
// which is useful in combination with
// ASanStackFrameLayout::ComputeASanStackFrameLayout().
//
// Note that alignment to 1<<kMaxStackFrameSizeLog (aka
// BytesInSizeClass(max_class_id)) implies alignment to BytesInSizeClass()
// for any class_id, since the class sizes are increasing powers of 2.
//
// 1) (this + kFlagsOffset + SizeRequiredForFlags())) is aligned to
// 1<<kMaxStackFrameSizeLog (see FakeStack::Create)
//
// Note that SizeRequiredForFlags(16) == 2048. If FakeStack::Create() had
// merely returned an address from mmap (4K-aligned), the addition would
// not be 4K-aligned.
// 2) We know that stack_size_log >= kMaxStackFrameSizeLog (otherwise you
// couldn't store a single frame of that size in the entire stack)
// hence (1<<stack_size_log) is aligned to 1<<kMaxStackFrameSizeLog
// and ((1<<stack_size_log) * class_id) is aligned to
// 1<<kMaxStackFrameSizeLog
// 3) BytesInSizeClass(class_id) * pos is aligned to
// BytesInSizeClass(class_id)
// The sum of these is aligned to BytesInSizeClass(class_id).
u8 *GetFrame(uptr stack_size_log, uptr class_id, uptr pos) {
return reinterpret_cast<u8 *>(this) + kFlagsOffset +
SizeRequiredForFlags(stack_size_log) +
@ -181,18 +156,15 @@ class FakeStack {
private:
FakeStack() { }
static const uptr kFlagsOffset = 4096; // This is where the flags begin.
static const uptr kFlagsOffset = 4096; // This is were the flags begin.
// Must match the number of uses of DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID
COMPILER_CHECK(kNumberOfSizeClasses == 11);
static const uptr kMaxStackMallocSize = ((uptr)1) << kMaxStackFrameSizeLog;
uptr hint_position_[kNumberOfSizeClasses];
uptr stack_size_log_;
// a bit is set if something was allocated from the corresponding size class.
bool needs_gc_;
// We allocated more memory than needed to ensure the FakeStack (and, by
// extension, each of the fake stack frames) is aligned. We keep track of the
// true start so that we can unmap it.
void *true_start;
};
FakeStack *GetTLSFakeStack();

View File

@ -113,7 +113,6 @@ TEST(FakeStack, Allocate) {
uptr bytes_in_class = FakeStack::BytesInSizeClass(cid);
for (uptr j = 0; j < n; j++) {
FakeFrame *ff = fs->Allocate(stack_size_log, cid, 0);
EXPECT_EQ(reinterpret_cast<uptr>(ff) % bytes_in_class, 0U);
uptr x = reinterpret_cast<uptr>(ff);
EXPECT_TRUE(s.insert(std::make_pair(ff, cid)).second);
EXPECT_EQ(x, fs->AddrIsInFakeStack(x));

View File

@ -1,11 +1,11 @@
// Regression test 1:
// When the stack size is 1<<16, SizeRequiredForFlags(16) == 2KB. This forces
// FakeStack's GetFrame() out of alignment if the FakeStack isn't padded.
// This deterministically fails: when the stack size is 1<<16, FakeStack's
// GetFrame() is out of alignment, because SizeRequiredForFlags(16) == 2K.
// RUN: %clangxx_asan -fsanitize-address-use-after-return=always -O0 -DALIGNMENT=4096 -DTHREAD_COUNT=1 -DTHREAD_STACK_SIZE=65536 %s -o %t && %run %t 2>&1
// Regression test 2:
// Check that the FakeStack frame is aligned, beyond the typical 4KB page
// alignment. Alignment can happen by chance, so try this on many threads.
// The FakeStack frame is not guaranteed to be aligned, but alignment can
// happen by chance, so try this on many threads.
// RUN: %clangxx_asan -fsanitize-address-use-after-return=always -O0 -DALIGNMENT=8192 -DTHREAD_COUNT=32 -DTHREAD_STACK_SIZE=131072 %s -o %t && %run %t 2>&1
// RUN: %clangxx_asan -fsanitize-address-use-after-return=always -O0 -DALIGNMENT=16384 -DTHREAD_COUNT=32 -DTHREAD_STACK_SIZE=131072 %s -o %t && %run %t 2>&1
@ -17,6 +17,8 @@
// RUN: %clangxx_asan -fsanitize-address-use-after-return=always -O0 -DALIGNMENT=8192 -DTHREAD_COUNT=32 -DTHREAD_STACK_SIZE=131072 %s -o %t && %run %t 2>&1
// RUN: %clangxx_asan -fsanitize-address-use-after-return=always -O0 -DALIGNMENT=16384 -DTHREAD_COUNT=32 -DTHREAD_STACK_SIZE=131072 %s -o %t && %run %t 2>&1
// XFAIL: *
#include <assert.h>
#include <pthread.h>
#include <stdio.h>