
The GWP-ASan recoverable mode allows a process to continue to function after a GWP-ASan error is detected. The error will continue to be dumped, but GWP-ASan now has APIs that a signal handler (like the example optional crash handler) can call in order to allow the continuation of a process. When an error occurs with an allocation, the slot used for that allocation will be permanently disabled. This means that free() of that pointer is a no-op, and use-after-frees will succeed (writing and reading the data present in the page). For heap-buffer-overflow/underflow, the guard page is marked as accessible and buffer-overflows will succeed (writing and reading the data present in the now-accessible guard page). This does impact adjacent allocations, buffer-underflow and buffer-overflows from adjacent allocations will no longer touch an inaccessible guard page. This could be improved in future by having two guard pages between each adjacent allocation, but that's out of scope of this patch. Each allocation only ever has a single error report generated. It's whatever came first between invalid-free, double-free, use-after-free or heap-buffer-overflow, but only one. Reviewed By: eugenis, fmayer Differential Revision: https://reviews.llvm.org/D140173
163 lines
5.5 KiB
C++
163 lines
5.5 KiB
C++
//===-- crash_handler.cpp ---------------------------------------*- C++ -*-===//
|
|
//
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
//
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
#include "gwp_asan/common.h"
|
|
#include "gwp_asan/stack_trace_compressor.h"
|
|
|
|
#include <assert.h>
|
|
#include <stdint.h>
|
|
#include <string.h>
|
|
|
|
using AllocationMetadata = gwp_asan::AllocationMetadata;
|
|
using Error = gwp_asan::Error;
|
|
|
|
#ifdef __cplusplus
|
|
extern "C" {
|
|
#endif
|
|
|
|
bool __gwp_asan_error_is_mine(const gwp_asan::AllocatorState *State,
|
|
uintptr_t ErrorPtr) {
|
|
assert(State && "State should not be nullptr.");
|
|
if (State->FailureType != Error::UNKNOWN && State->FailureAddress != 0)
|
|
return true;
|
|
|
|
return ErrorPtr < State->GuardedPagePoolEnd &&
|
|
State->GuardedPagePool <= ErrorPtr;
|
|
}
|
|
|
|
uintptr_t
|
|
__gwp_asan_get_internal_crash_address(const gwp_asan::AllocatorState *State,
|
|
uintptr_t ErrorPtr) {
|
|
// There can be a race between internally- and externally-raised faults. The
|
|
// fault address from the signal handler is used to discriminate whether it's
|
|
// internally- or externally-raised, and the pool maintains a special page at
|
|
// the end of the GuardedPagePool specifically for the internally-raised
|
|
// faults.
|
|
if (ErrorPtr != State->internallyDetectedErrorFaultAddress())
|
|
return 0u;
|
|
return State->FailureAddress;
|
|
}
|
|
|
|
static const AllocationMetadata *
|
|
addrToMetadata(const gwp_asan::AllocatorState *State,
|
|
const AllocationMetadata *Metadata, uintptr_t Ptr) {
|
|
// Note - Similar implementation in guarded_pool_allocator.cpp.
|
|
return &Metadata[State->getNearestSlot(Ptr)];
|
|
}
|
|
|
|
gwp_asan::Error
|
|
__gwp_asan_diagnose_error(const gwp_asan::AllocatorState *State,
|
|
const gwp_asan::AllocationMetadata *Metadata,
|
|
uintptr_t ErrorPtr) {
|
|
if (!__gwp_asan_error_is_mine(State, ErrorPtr))
|
|
return Error::UNKNOWN;
|
|
|
|
if (State->FailureType != Error::UNKNOWN)
|
|
return State->FailureType;
|
|
|
|
// Check for use-after-free.
|
|
if (addrToMetadata(State, Metadata, ErrorPtr)->IsDeallocated)
|
|
return Error::USE_AFTER_FREE;
|
|
|
|
// Check for buffer-overflow. Because of allocation alignment or left/right
|
|
// page placement, we can have buffer-overflows that don't touch a guarded
|
|
// page, but these are not possible to detect unless it's also a
|
|
// use-after-free, which is handled above.
|
|
if (State->isGuardPage(ErrorPtr)) {
|
|
size_t Slot = State->getNearestSlot(ErrorPtr);
|
|
const AllocationMetadata *SlotMeta =
|
|
addrToMetadata(State, Metadata, State->slotToAddr(Slot));
|
|
|
|
// Ensure that this slot was allocated once upon a time.
|
|
if (!SlotMeta->Addr)
|
|
return Error::UNKNOWN;
|
|
|
|
if (SlotMeta->Addr < ErrorPtr)
|
|
return Error::BUFFER_OVERFLOW;
|
|
return Error::BUFFER_UNDERFLOW;
|
|
}
|
|
|
|
// If we have reached here, the error is still unknown.
|
|
return Error::UNKNOWN;
|
|
}
|
|
|
|
const gwp_asan::AllocationMetadata *
|
|
__gwp_asan_get_metadata(const gwp_asan::AllocatorState *State,
|
|
const gwp_asan::AllocationMetadata *Metadata,
|
|
uintptr_t ErrorPtr) {
|
|
if (!__gwp_asan_error_is_mine(State, ErrorPtr))
|
|
return nullptr;
|
|
|
|
if (ErrorPtr >= State->GuardedPagePoolEnd ||
|
|
State->GuardedPagePool > ErrorPtr)
|
|
return nullptr;
|
|
|
|
const AllocationMetadata *Meta = addrToMetadata(State, Metadata, ErrorPtr);
|
|
if (Meta->Addr == 0)
|
|
return nullptr;
|
|
|
|
return Meta;
|
|
}
|
|
|
|
uintptr_t __gwp_asan_get_allocation_address(
|
|
const gwp_asan::AllocationMetadata *AllocationMeta) {
|
|
return AllocationMeta->Addr;
|
|
}
|
|
|
|
size_t __gwp_asan_get_allocation_size(
|
|
const gwp_asan::AllocationMetadata *AllocationMeta) {
|
|
return AllocationMeta->RequestedSize;
|
|
}
|
|
|
|
uint64_t __gwp_asan_get_allocation_thread_id(
|
|
const gwp_asan::AllocationMetadata *AllocationMeta) {
|
|
return AllocationMeta->AllocationTrace.ThreadID;
|
|
}
|
|
|
|
size_t __gwp_asan_get_allocation_trace(
|
|
const gwp_asan::AllocationMetadata *AllocationMeta, uintptr_t *Buffer,
|
|
size_t BufferLen) {
|
|
uintptr_t UncompressedBuffer[AllocationMetadata::kMaxTraceLengthToCollect];
|
|
size_t UnpackedLength = gwp_asan::compression::unpack(
|
|
AllocationMeta->AllocationTrace.CompressedTrace,
|
|
AllocationMeta->AllocationTrace.TraceSize, UncompressedBuffer,
|
|
AllocationMetadata::kMaxTraceLengthToCollect);
|
|
if (UnpackedLength < BufferLen)
|
|
BufferLen = UnpackedLength;
|
|
memcpy(Buffer, UncompressedBuffer, BufferLen * sizeof(*Buffer));
|
|
return UnpackedLength;
|
|
}
|
|
|
|
bool __gwp_asan_is_deallocated(
|
|
const gwp_asan::AllocationMetadata *AllocationMeta) {
|
|
return AllocationMeta->IsDeallocated;
|
|
}
|
|
|
|
uint64_t __gwp_asan_get_deallocation_thread_id(
|
|
const gwp_asan::AllocationMetadata *AllocationMeta) {
|
|
return AllocationMeta->DeallocationTrace.ThreadID;
|
|
}
|
|
|
|
size_t __gwp_asan_get_deallocation_trace(
|
|
const gwp_asan::AllocationMetadata *AllocationMeta, uintptr_t *Buffer,
|
|
size_t BufferLen) {
|
|
uintptr_t UncompressedBuffer[AllocationMetadata::kMaxTraceLengthToCollect];
|
|
size_t UnpackedLength = gwp_asan::compression::unpack(
|
|
AllocationMeta->DeallocationTrace.CompressedTrace,
|
|
AllocationMeta->DeallocationTrace.TraceSize, UncompressedBuffer,
|
|
AllocationMetadata::kMaxTraceLengthToCollect);
|
|
if (UnpackedLength < BufferLen)
|
|
BufferLen = UnpackedLength;
|
|
memcpy(Buffer, UncompressedBuffer, BufferLen * sizeof(*Buffer));
|
|
return UnpackedLength;
|
|
}
|
|
|
|
#ifdef __cplusplus
|
|
} // extern "C"
|
|
#endif
|