summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndroid Build Coastguard Worker <android-build-coastguard-worker@google.com>2023-07-07 05:11:25 +0000
committerAndroid Build Coastguard Worker <android-build-coastguard-worker@google.com>2023-07-07 05:11:25 +0000
commit9f0257cff92501be038542aa6e462802ec85e234 (patch)
tree2b94fc60a93247d1e8b0239e1811e739ca0402b1
parenteb4bcfaa0cbc97cc1aa9a32d706b6150b830ac48 (diff)
parente25d098ba5558facba64107f9383466ed0a52848 (diff)
downloadgwp_asan-android14-mainline-uwb-release.tar.gz
Change-Id: Ia47441e72a3569d76f8e3eb411411a70f97af141
-rw-r--r--Android.bp12
-rw-r--r--gwp_asan/common.cpp4
-rw-r--r--gwp_asan/common.h13
-rw-r--r--gwp_asan/crash_handler.cpp26
-rw-r--r--gwp_asan/crash_handler.h16
-rw-r--r--gwp_asan/guarded_pool_allocator.cpp144
-rw-r--r--gwp_asan/guarded_pool_allocator.h13
-rw-r--r--gwp_asan/optional/backtrace_sanitizer_common.cpp4
-rw-r--r--gwp_asan/optional/segv_handler.h3
-rw-r--r--gwp_asan/optional/segv_handler_fuchsia.cpp3
-rw-r--r--gwp_asan/optional/segv_handler_posix.cpp106
-rw-r--r--gwp_asan/options.inc10
-rw-r--r--gwp_asan/platform_specific/guarded_pool_allocator_posix.cpp4
-rw-r--r--gwp_asan/tests/alignment.cpp40
-rw-r--r--gwp_asan/tests/backtrace.cpp79
-rw-r--r--gwp_asan/tests/crash_handler_api.cpp23
-rw-r--r--gwp_asan/tests/harness.cpp36
-rw-r--r--gwp_asan/tests/harness.h56
-rw-r--r--gwp_asan/tests/never_allocated.cpp55
-rw-r--r--gwp_asan/tests/recoverable.cpp194
-rw-r--r--gwp_asan/tests/thread_contention.cpp4
21 files changed, 685 insertions, 160 deletions
diff --git a/Android.bp b/Android.bp
index 5f8c925..dd135c0 100644
--- a/Android.bp
+++ b/Android.bp
@@ -95,12 +95,6 @@ cc_library_headers {
// in the platform side.
"//apex_available:platform",
"com.android.runtime",
- // GWP-ASan headers are currently referenced by the following additional APEXes
- "com.android.art",
- "com.android.art.debug",
- "com.android.media",
- "com.android.media.swcodec",
- "com.android.virt",
],
}
@@ -139,9 +133,9 @@ cc_library_static {
// GWP-ASan crash handler API. Provides all the tools you need to symbolize a
// GWP-ASan crash.
-cc_library {
+cc_library_static {
name: "gwp_asan_crash_handler",
- defaults: ["gwp_asan_defaults"],
+ defaults: ["gwp_asan_no_libs_defaults"],
header_libs: [
"gwp_asan_headers",
],
@@ -212,6 +206,8 @@ cc_test {
"gwp_asan/tests/iterate.cpp",
"gwp_asan/tests/late_init.cpp",
"gwp_asan/tests/mutex_test.cpp",
+ "gwp_asan/tests/never_allocated.cpp",
+ "gwp_asan/tests/recoverable.cpp",
"gwp_asan/tests/options.cpp",
"gwp_asan/tests/slot_reuse.cpp",
"gwp_asan/tests/thread_contention.cpp",
diff --git a/gwp_asan/common.cpp b/gwp_asan/common.cpp
index b0f6c58..790a331 100644
--- a/gwp_asan/common.cpp
+++ b/gwp_asan/common.cpp
@@ -105,4 +105,8 @@ size_t AllocatorState::getNearestSlot(uintptr_t Ptr) const {
return addrToSlot(this, Ptr + PageSize); // Round up.
}
+uintptr_t AllocatorState::internallyDetectedErrorFaultAddress() const {
+ return GuardedPagePoolEnd - 0x10;
+}
+
} // namespace gwp_asan
diff --git a/gwp_asan/common.h b/gwp_asan/common.h
index 6b238ad..df45102 100644
--- a/gwp_asan/common.h
+++ b/gwp_asan/common.h
@@ -35,7 +35,7 @@ struct AllocatorVersionMagic {
uint8_t Magic[4] = {};
// Update the version number when the AllocatorState or AllocationMetadata
// change.
- static constexpr uint16_t kAllocatorVersion = 1;
+ static constexpr uint16_t kAllocatorVersion = 2;
uint16_t Version = 0;
uint16_t Reserved = 0;
};
@@ -98,6 +98,12 @@ struct AllocationMetadata {
// Whether this allocation has been deallocated yet.
bool IsDeallocated = false;
+
+ // In recoverable mode, whether this allocation has had a crash associated
+ // with it. This has certain side effects, like meaning this allocation will
+ // permanently occupy a slot, and won't ever have another crash reported from
+ // it.
+ bool HasCrashed = false;
};
// This holds the state that's shared between the GWP-ASan allocator and the
@@ -127,6 +133,11 @@ struct AllocatorState {
// must be within memory owned by this pool, else the result is undefined.
bool isGuardPage(uintptr_t Ptr) const;
+ // Returns the address that's used by __gwp_asan_get_internal_crash_address()
+ // and GPA::raiseInternallyDetectedError() to communicate that the SEGV in
+ // question comes from an internally-detected error.
+ uintptr_t internallyDetectedErrorFaultAddress() const;
+
// The number of guarded slots that this pool holds.
size_t MaxSimultaneousAllocations = 0;
diff --git a/gwp_asan/crash_handler.cpp b/gwp_asan/crash_handler.cpp
index 6b4c39e..555365c 100644
--- a/gwp_asan/crash_handler.cpp
+++ b/gwp_asan/crash_handler.cpp
@@ -31,7 +31,15 @@ bool __gwp_asan_error_is_mine(const gwp_asan::AllocatorState *State,
}
uintptr_t
-__gwp_asan_get_internal_crash_address(const gwp_asan::AllocatorState *State) {
+__gwp_asan_get_internal_crash_address(const gwp_asan::AllocatorState *State,
+ uintptr_t ErrorPtr) {
+ // There can be a race between internally- and externally-raised faults. The
+ // fault address from the signal handler is used to discriminate whether it's
+ // internally- or externally-raised, and the pool maintains a special page at
+ // the end of the GuardedPagePool specifically for the internally-raised
+ // faults.
+ if (ErrorPtr != State->internallyDetectedErrorFaultAddress())
+ return 0u;
return State->FailureAddress;
}
@@ -52,7 +60,14 @@ __gwp_asan_diagnose_error(const gwp_asan::AllocatorState *State,
if (State->FailureType != Error::UNKNOWN)
return State->FailureType;
- // Let's try and figure out what the source of this error is.
+ // Check for use-after-free.
+ if (addrToMetadata(State, Metadata, ErrorPtr)->IsDeallocated)
+ return Error::USE_AFTER_FREE;
+
+ // Check for buffer-overflow. Because of allocation alignment or left/right
+ // page placement, we can have buffer-overflows that don't touch a guarded
+ // page, but these are not possible to detect unless it's also a
+ // use-after-free, which is handled above.
if (State->isGuardPage(ErrorPtr)) {
size_t Slot = State->getNearestSlot(ErrorPtr);
const AllocationMetadata *SlotMeta =
@@ -67,13 +82,6 @@ __gwp_asan_diagnose_error(const gwp_asan::AllocatorState *State,
return Error::BUFFER_UNDERFLOW;
}
- // Access wasn't a guard page, check for use-after-free.
- const AllocationMetadata *SlotMeta =
- addrToMetadata(State, Metadata, ErrorPtr);
- if (SlotMeta->IsDeallocated) {
- return Error::USE_AFTER_FREE;
- }
-
// If we have reached here, the error is still unknown.
return Error::UNKNOWN;
}
diff --git a/gwp_asan/crash_handler.h b/gwp_asan/crash_handler.h
index 4a95069..1ff60ed 100644
--- a/gwp_asan/crash_handler.h
+++ b/gwp_asan/crash_handler.h
@@ -46,12 +46,18 @@ __gwp_asan_diagnose_error(const gwp_asan::AllocatorState *State,
const gwp_asan::AllocationMetadata *Metadata,
uintptr_t ErrorPtr);
-// For internally-detected errors (double free, invalid free), this function
-// returns the pointer that the error occurred at. If the error is unrelated to
-// GWP-ASan, or if the error was caused by a non-internally detected failure,
-// this function returns zero.
+// This function, provided the fault address from the signal handler, returns
+// the following values:
+// 1. If the crash was caused by an internally-detected error (invalid free,
+// double free), this function returns the pointer that was used for the
+// internally-detected bad operation (i.e. the pointer given to free()).
+// 2. For externally-detected crashes (use-after-free, buffer-overflow), this
+// function returns zero.
+// 3. If GWP-ASan wasn't responsible for the crash at all, this function also
+// returns zero.
uintptr_t
-__gwp_asan_get_internal_crash_address(const gwp_asan::AllocatorState *State);
+__gwp_asan_get_internal_crash_address(const gwp_asan::AllocatorState *State,
+ uintptr_t ErrorPtr);
// Returns a pointer to the metadata for the allocation that's responsible for
// the crash. This metadata should not be dereferenced directly due to API
diff --git a/gwp_asan/guarded_pool_allocator.cpp b/gwp_asan/guarded_pool_allocator.cpp
index 7096b42..9017ab7 100644
--- a/gwp_asan/guarded_pool_allocator.cpp
+++ b/gwp_asan/guarded_pool_allocator.cpp
@@ -8,6 +8,7 @@
#include "gwp_asan/guarded_pool_allocator.h"
+#include "gwp_asan/crash_handler.h"
#include "gwp_asan/options.h"
#include "gwp_asan/utilities.h"
@@ -73,8 +74,15 @@ void GuardedPoolAllocator::init(const options::Options &Opts) {
assert((PageSize & (PageSize - 1)) == 0);
State.PageSize = PageSize;
+ // Number of pages required =
+ // + MaxSimultaneousAllocations * maximumAllocationSize (N pages per slot)
+ // + MaxSimultaneousAllocations (one guard on the left side of each slot)
+ // + 1 (an extra guard page at the end of the pool, on the right side)
+ // + 1 (an extra page that's used for reporting internally-detected crashes,
+ // like double free and invalid free, to the signal handler; see
+ // raiseInternallyDetectedError() for more info)
size_t PoolBytesRequired =
- PageSize * (1 + State.MaxSimultaneousAllocations) +
+ PageSize * (2 + State.MaxSimultaneousAllocations) +
State.MaxSimultaneousAllocations * State.maximumAllocationSize();
assert(PoolBytesRequired % PageSize == 0);
void *GuardedPoolMemory = reserveGuardedPool(PoolBytesRequired);
@@ -258,22 +266,60 @@ void *GuardedPoolAllocator::allocate(size_t Size, size_t Alignment) {
return reinterpret_cast<void *>(UserPtr);
}
-void GuardedPoolAllocator::trapOnAddress(uintptr_t Address, Error E) {
+void GuardedPoolAllocator::raiseInternallyDetectedError(uintptr_t Address,
+ Error E) {
+ // Disable the allocator before setting the internal failure state. In
+ // non-recoverable mode, the allocator will be permanently disabled, and so
+ // things will be accessed without locks.
+ disable();
+
+ // Races between internally- and externally-raised faults can happen. Right
+ // now, in this thread we've locked the allocator in order to raise an
+ // internally-detected fault, and another thread could SIGSEGV to raise an
+ // externally-detected fault. What will happen is that the other thread will
+ // wait in the signal handler, as we hold the allocator's locks from the
+ // disable() above. We'll trigger the signal handler by touching the
+ // internal-signal-raising address below, and the signal handler from our
+ // thread will get to run first as we will continue to hold the allocator
+ // locks until the enable() at the end of this function. Be careful though, if
+ // this thread receives another SIGSEGV after the disable() above, but before
+ // touching the internal-signal-raising address below, then this thread will
+ // get an "externally-raised" SIGSEGV while *also* holding the allocator
+ // locks, which means this thread's signal handler will deadlock. This could
+ // be resolved with a re-entrant lock, but asking platforms to implement this
+ // seems unnecessary given the only way to get a SIGSEGV in this critical
+ // section is either a memory safety bug in the couple lines of code below (be
+ // careful!), or someone outside uses `kill(this_thread, SIGSEGV)`, which
+ // really shouldn't happen.
+
State.FailureType = E;
State.FailureAddress = Address;
- // Raise a SEGV by touching first guard page.
- volatile char *p = reinterpret_cast<char *>(State.GuardedPagePool);
+ // Raise a SEGV by touching a specific address that identifies to the crash
+ // handler that this is an internally-raised fault. Changing this address?
+ // Don't forget to update __gwp_asan_get_internal_crash_address.
+ volatile char *p =
+ reinterpret_cast<char *>(State.internallyDetectedErrorFaultAddress());
*p = 0;
- // Normally, would be __builtin_unreachable(), but because of
- // https://bugs.llvm.org/show_bug.cgi?id=47480, unreachable will DCE the
- // volatile store above, even though it has side effects.
- __builtin_trap();
-}
-void GuardedPoolAllocator::stop() {
- getThreadLocals()->RecursiveGuard = true;
- PoolMutex.tryLock();
+ // This should never be reached in non-recoverable mode. Ensure that the
+ // signal handler called handleRecoverablePostCrashReport(), which was
+ // responsible for re-setting these fields.
+ assert(State.FailureType == Error::UNKNOWN);
+ assert(State.FailureAddress == 0u);
+
+ // In recoverable mode, the signal handler (after dumping the crash) marked
+ // the page containing the InternalFaultSegvAddress as read/writeable, to
+ // allow the second touch to succeed after returning from the signal handler.
+ // Now, we need to mark the page as non-read/write-able again, so future
+ // internal faults can be raised.
+ deallocateInGuardedPool(
+ reinterpret_cast<void *>(getPageAddr(
+ State.internallyDetectedErrorFaultAddress(), State.PageSize)),
+ State.PageSize);
+
+ // And now we're done with patching ourselves back up, enable the allocator.
+ enable();
}
void GuardedPoolAllocator::deallocate(void *Ptr) {
@@ -282,19 +328,25 @@ void GuardedPoolAllocator::deallocate(void *Ptr) {
size_t Slot = State.getNearestSlot(UPtr);
uintptr_t SlotStart = State.slotToAddr(Slot);
AllocationMetadata *Meta = addrToMetadata(UPtr);
+
+ // If this allocation is responsible for crash, never recycle it. Turn the
+ // deallocate() call into a no-op.
+ if (Meta->HasCrashed)
+ return;
+
if (Meta->Addr != UPtr) {
- // If multiple errors occur at the same time, use the first one.
- ScopedLock L(PoolMutex);
- trapOnAddress(UPtr, Error::INVALID_FREE);
+ raiseInternallyDetectedError(UPtr, Error::INVALID_FREE);
+ return;
+ }
+ if (Meta->IsDeallocated) {
+ raiseInternallyDetectedError(UPtr, Error::DOUBLE_FREE);
+ return;
}
// Intentionally scope the mutex here, so that other threads can access the
// pool during the expensive markInaccessible() call.
{
ScopedLock L(PoolMutex);
- if (Meta->IsDeallocated) {
- trapOnAddress(UPtr, Error::DOUBLE_FREE);
- }
// Ensure that the deallocation is recorded before marking the page as
// inaccessible. Otherwise, a racy use-after-free will have inconsistent
@@ -318,6 +370,62 @@ void GuardedPoolAllocator::deallocate(void *Ptr) {
freeSlot(Slot);
}
+// Thread-compatible, protected by PoolMutex.
+static bool PreviousRecursiveGuard;
+
+void GuardedPoolAllocator::preCrashReport(void *Ptr) {
+ assert(pointerIsMine(Ptr) && "Pointer is not mine!");
+ uintptr_t InternalCrashAddr = __gwp_asan_get_internal_crash_address(
+ &State, reinterpret_cast<uintptr_t>(Ptr));
+ if (!InternalCrashAddr)
+ disable();
+
+ // If something in the signal handler calls malloc() while dumping the
+ // GWP-ASan report (e.g. backtrace_symbols()), make sure that GWP-ASan doesn't
+ // service that allocation. `PreviousRecursiveGuard` is protected by the
+ // allocator locks taken in disable(), either explicitly above for
+ // externally-raised errors, or implicitly in raiseInternallyDetectedError()
+ // for internally-detected errors.
+ PreviousRecursiveGuard = getThreadLocals()->RecursiveGuard;
+ getThreadLocals()->RecursiveGuard = true;
+}
+
+void GuardedPoolAllocator::postCrashReportRecoverableOnly(void *SignalPtr) {
+ uintptr_t SignalUPtr = reinterpret_cast<uintptr_t>(SignalPtr);
+ uintptr_t InternalCrashAddr =
+ __gwp_asan_get_internal_crash_address(&State, SignalUPtr);
+ uintptr_t ErrorUptr = InternalCrashAddr ?: SignalUPtr;
+
+ AllocationMetadata *Metadata = addrToMetadata(ErrorUptr);
+ Metadata->HasCrashed = true;
+
+ allocateInGuardedPool(
+ reinterpret_cast<void *>(getPageAddr(SignalUPtr, State.PageSize)),
+ State.PageSize);
+
+ // Clear the internal state in order to not confuse the crash handler if a
+ // use-after-free or buffer-overflow comes from a different allocation in the
+ // future.
+ if (InternalCrashAddr) {
+ State.FailureType = Error::UNKNOWN;
+ State.FailureAddress = 0;
+ }
+
+ size_t Slot = State.getNearestSlot(ErrorUptr);
+ // If the slot is available, remove it permanently.
+ for (size_t i = 0; i < FreeSlotsLength; ++i) {
+ if (FreeSlots[i] == Slot) {
+ FreeSlots[i] = FreeSlots[FreeSlotsLength - 1];
+ FreeSlotsLength -= 1;
+ break;
+ }
+ }
+
+ getThreadLocals()->RecursiveGuard = PreviousRecursiveGuard;
+ if (!InternalCrashAddr)
+ enable();
+}
+
size_t GuardedPoolAllocator::getSize(const void *Ptr) {
assert(pointerIsMine(Ptr));
ScopedLock L(PoolMutex);
diff --git a/gwp_asan/guarded_pool_allocator.h b/gwp_asan/guarded_pool_allocator.h
index 6d2ce25..de07b67 100644
--- a/gwp_asan/guarded_pool_allocator.h
+++ b/gwp_asan/guarded_pool_allocator.h
@@ -67,11 +67,6 @@ public:
// allocate.
void iterate(void *Base, size_t Size, iterate_callback Cb, void *Arg);
- // This function is used to signal the allocator to indefinitely stop
- // functioning, as a crash has occurred. This stops the allocator from
- // servicing any further allocations permanently.
- void stop();
-
// Return whether the allocation should be randomly chosen for sampling.
GWP_ASAN_ALWAYS_INLINE bool shouldSample() {
// NextSampleCounter == 0 means we "should regenerate the counter".
@@ -115,6 +110,12 @@ public:
// Returns a pointer to the AllocatorState region.
const AllocatorState *getAllocatorState() const { return &State; }
+ // Functions that the signal handler is responsible for calling, while
+ // providing the SEGV pointer, prior to dumping the crash, and after dumping
+ // the crash (in recoverable mode only).
+ void preCrashReport(void *Ptr);
+ void postCrashReportRecoverableOnly(void *Ptr);
+
// Exposed as protected for testing.
protected:
// Returns the actual allocation size required to service an allocation with
@@ -185,7 +186,7 @@ private:
// Raise a SEGV and set the corresponding fields in the Allocator's State in
// order to tell the crash handler what happened. Used when errors are
// detected internally (Double Free, Invalid Free).
- void trapOnAddress(uintptr_t Address, Error E);
+ void raiseInternallyDetectedError(uintptr_t Address, Error E);
static GuardedPoolAllocator *getSingleton();
diff --git a/gwp_asan/optional/backtrace_sanitizer_common.cpp b/gwp_asan/optional/backtrace_sanitizer_common.cpp
index e6cce86..f8b9cbd 100644
--- a/gwp_asan/optional/backtrace_sanitizer_common.cpp
+++ b/gwp_asan/optional/backtrace_sanitizer_common.cpp
@@ -72,7 +72,9 @@ static void PrintBacktrace(uintptr_t *Trace, size_t TraceLength,
return;
}
- StackTrace.Print();
+ __sanitizer::InternalScopedString buffer;
+ StackTrace.PrintTo(&buffer);
+ Printf("%s\n", buffer.data());
}
} // anonymous namespace
diff --git a/gwp_asan/optional/segv_handler.h b/gwp_asan/optional/segv_handler.h
index 87d9fe1..72105de 100644
--- a/gwp_asan/optional/segv_handler.h
+++ b/gwp_asan/optional/segv_handler.h
@@ -23,7 +23,8 @@ namespace segv_handler {
// before this function.
void installSignalHandlers(gwp_asan::GuardedPoolAllocator *GPA, Printf_t Printf,
gwp_asan::backtrace::PrintBacktrace_t PrintBacktrace,
- gwp_asan::backtrace::SegvBacktrace_t SegvBacktrace);
+ gwp_asan::backtrace::SegvBacktrace_t SegvBacktrace,
+ bool Recoverable = false);
// Uninistall the signal handlers, test-only.
void uninstallSignalHandlers();
diff --git a/gwp_asan/optional/segv_handler_fuchsia.cpp b/gwp_asan/optional/segv_handler_fuchsia.cpp
index 966d7d0..f5ff35e 100644
--- a/gwp_asan/optional/segv_handler_fuchsia.cpp
+++ b/gwp_asan/optional/segv_handler_fuchsia.cpp
@@ -15,7 +15,8 @@ namespace segv_handler {
void installSignalHandlers(gwp_asan::GuardedPoolAllocator * /* GPA */,
Printf_t /* Printf */,
backtrace::PrintBacktrace_t /* PrintBacktrace */,
- backtrace::SegvBacktrace_t /* SegvBacktrace */) {}
+ backtrace::SegvBacktrace_t /* SegvBacktrace */,
+ bool /* Recoverable */) {}
void uninstallSignalHandlers() {}
} // namespace segv_handler
diff --git a/gwp_asan/optional/segv_handler_posix.cpp b/gwp_asan/optional/segv_handler_posix.cpp
index 5c9bb9f..198db5c 100644
--- a/gwp_asan/optional/segv_handler_posix.cpp
+++ b/gwp_asan/optional/segv_handler_posix.cpp
@@ -47,15 +47,12 @@ void printHeader(Error E, uintptr_t AccessPtr,
// appended to a log file automatically per Printf() call.
constexpr size_t kDescriptionBufferLen = 128;
char DescriptionBuffer[kDescriptionBufferLen] = "";
+
+ bool AccessWasInBounds = false;
if (E != Error::UNKNOWN && Metadata != nullptr) {
uintptr_t Address = __gwp_asan_get_allocation_address(Metadata);
size_t Size = __gwp_asan_get_allocation_size(Metadata);
- if (E == Error::USE_AFTER_FREE) {
- snprintf(DescriptionBuffer, kDescriptionBufferLen,
- "(%zu byte%s into a %zu-byte allocation at 0x%zx) ",
- AccessPtr - Address, (AccessPtr - Address == 1) ? "" : "s", Size,
- Address);
- } else if (AccessPtr < Address) {
+ if (AccessPtr < Address) {
snprintf(DescriptionBuffer, kDescriptionBufferLen,
"(%zu byte%s to the left of a %zu-byte allocation at 0x%zx) ",
Address - AccessPtr, (Address - AccessPtr == 1) ? "" : "s", Size,
@@ -65,9 +62,15 @@ void printHeader(Error E, uintptr_t AccessPtr,
"(%zu byte%s to the right of a %zu-byte allocation at 0x%zx) ",
AccessPtr - Address, (AccessPtr - Address == 1) ? "" : "s", Size,
Address);
- } else {
+ } else if (E == Error::DOUBLE_FREE) {
snprintf(DescriptionBuffer, kDescriptionBufferLen,
"(a %zu-byte allocation) ", Size);
+ } else {
+ AccessWasInBounds = true;
+ snprintf(DescriptionBuffer, kDescriptionBufferLen,
+ "(%zu byte%s into a %zu-byte allocation at 0x%zx) ",
+ AccessPtr - Address, (AccessPtr - Address == 1) ? "" : "s", Size,
+ Address);
}
}
@@ -81,10 +84,27 @@ void printHeader(Error E, uintptr_t AccessPtr,
else
snprintf(ThreadBuffer, kThreadBufferLen, "%" PRIu64, ThreadID);
- Printf("%s at 0x%zx %sby thread %s here:\n", gwp_asan::ErrorToString(E),
- AccessPtr, DescriptionBuffer, ThreadBuffer);
+ const char *OutOfBoundsAndUseAfterFreeWarning = "";
+ if (E == Error::USE_AFTER_FREE && !AccessWasInBounds) {
+ OutOfBoundsAndUseAfterFreeWarning =
+ " (warning: buffer overflow/underflow detected on a free()'d "
+ "allocation. This either means you have a buffer-overflow and a "
+ "use-after-free at the same time, or you have a long-lived "
+ "use-after-free bug where the allocation/deallocation metadata below "
+ "has already been overwritten and is likely bogus)";
+ }
+
+ Printf("%s%s at 0x%zx %sby thread %s here:\n", gwp_asan::ErrorToString(E),
+ OutOfBoundsAndUseAfterFreeWarning, AccessPtr, DescriptionBuffer,
+ ThreadBuffer);
}
+static bool HasReportedBadPoolAccess = false;
+static const char *kUnknownCrashText =
+ "GWP-ASan cannot provide any more information about this error. This may "
+ "occur due to a wild memory access into the GWP-ASan pool, or an "
+ "overflow/underflow that is > 512B in length.\n";
+
void dumpReport(uintptr_t ErrorPtr, const gwp_asan::AllocatorState *State,
const gwp_asan::AllocationMetadata *Metadata,
SegvBacktrace_t SegvBacktrace, Printf_t Printf,
@@ -92,29 +112,45 @@ void dumpReport(uintptr_t ErrorPtr, const gwp_asan::AllocatorState *State,
assert(State && "dumpReport missing Allocator State.");
assert(Metadata && "dumpReport missing Metadata.");
assert(Printf && "dumpReport missing Printf.");
+ assert(__gwp_asan_error_is_mine(State, ErrorPtr) &&
+ "dumpReport() called on a non-GWP-ASan error.");
+
+ uintptr_t InternalErrorPtr =
+ __gwp_asan_get_internal_crash_address(State, ErrorPtr);
+ if (InternalErrorPtr)
+ ErrorPtr = InternalErrorPtr;
- if (!__gwp_asan_error_is_mine(State, ErrorPtr))
+ const gwp_asan::AllocationMetadata *AllocMeta =
+ __gwp_asan_get_metadata(State, Metadata, ErrorPtr);
+
+ if (AllocMeta == nullptr) {
+ if (HasReportedBadPoolAccess) return;
+ HasReportedBadPoolAccess = true;
+ Printf("*** GWP-ASan detected a memory error ***\n");
+ ScopedEndOfReportDecorator Decorator(Printf);
+ Printf(kUnknownCrashText);
+ return;
+ }
+
+ // It's unusual for a signal handler to be invoked multiple times for the same
+ // allocation, but it's possible in various scenarios, like:
+ // 1. A double-free or invalid-free was invoked in one thread at the same
+ // time as a buffer-overflow or use-after-free in another thread, or
+ // 2. Two threads do a use-after-free or buffer-overflow at the same time.
+ // In these instances, we've already dumped a report for this allocation, so
+ // skip dumping this issue as well.
+ if (AllocMeta->HasCrashed)
return;
Printf("*** GWP-ASan detected a memory error ***\n");
ScopedEndOfReportDecorator Decorator(Printf);
- uintptr_t InternalErrorPtr = __gwp_asan_get_internal_crash_address(State);
- if (InternalErrorPtr != 0u)
- ErrorPtr = InternalErrorPtr;
-
Error E = __gwp_asan_diagnose_error(State, Metadata, ErrorPtr);
-
if (E == Error::UNKNOWN) {
- Printf("GWP-ASan cannot provide any more information about this error. "
- "This may occur due to a wild memory access into the GWP-ASan pool, "
- "or an overflow/underflow that is > 512B in length.\n");
+ Printf(kUnknownCrashText);
return;
}
- const gwp_asan::AllocationMetadata *AllocMeta =
- __gwp_asan_get_metadata(State, Metadata, ErrorPtr);
-
// Print the error header.
printHeader(E, ErrorPtr, AllocMeta, Printf);
@@ -126,9 +162,6 @@ void dumpReport(uintptr_t ErrorPtr, const gwp_asan::AllocatorState *State,
PrintBacktrace(Trace, TraceLength, Printf);
- if (AllocMeta == nullptr)
- return;
-
// Maybe print the deallocation trace.
if (__gwp_asan_is_deallocated(AllocMeta)) {
uint64_t ThreadID = __gwp_asan_get_deallocation_thread_id(AllocMeta);
@@ -154,23 +187,33 @@ void dumpReport(uintptr_t ErrorPtr, const gwp_asan::AllocatorState *State,
struct sigaction PreviousHandler;
bool SignalHandlerInstalled;
+bool RecoverableSignal;
gwp_asan::GuardedPoolAllocator *GPAForSignalHandler;
Printf_t PrintfForSignalHandler;
PrintBacktrace_t PrintBacktraceForSignalHandler;
SegvBacktrace_t BacktraceForSignalHandler;
static void sigSegvHandler(int sig, siginfo_t *info, void *ucontext) {
- if (GPAForSignalHandler) {
- GPAForSignalHandler->stop();
+ const gwp_asan::AllocatorState *State =
+ GPAForSignalHandler->getAllocatorState();
+ void *FaultAddr = info->si_addr;
+ uintptr_t FaultAddrUPtr = reinterpret_cast<uintptr_t>(FaultAddr);
+
+ if (__gwp_asan_error_is_mine(State, FaultAddrUPtr)) {
+ GPAForSignalHandler->preCrashReport(FaultAddr);
- dumpReport(reinterpret_cast<uintptr_t>(info->si_addr),
- GPAForSignalHandler->getAllocatorState(),
- GPAForSignalHandler->getMetadataRegion(),
+ dumpReport(FaultAddrUPtr, State, GPAForSignalHandler->getMetadataRegion(),
BacktraceForSignalHandler, PrintfForSignalHandler,
PrintBacktraceForSignalHandler, ucontext);
+
+ if (RecoverableSignal) {
+ GPAForSignalHandler->postCrashReportRecoverableOnly(FaultAddr);
+ return;
+ }
}
- // Process any previous handlers.
+ // Process any previous handlers as long as the crash wasn't a GWP-ASan crash
+ // in recoverable mode.
if (PreviousHandler.sa_flags & SA_SIGINFO) {
PreviousHandler.sa_sigaction(sig, info, ucontext);
} else if (PreviousHandler.sa_handler == SIG_DFL) {
@@ -196,7 +239,7 @@ namespace segv_handler {
void installSignalHandlers(gwp_asan::GuardedPoolAllocator *GPA, Printf_t Printf,
PrintBacktrace_t PrintBacktrace,
- SegvBacktrace_t SegvBacktrace) {
+ SegvBacktrace_t SegvBacktrace, bool Recoverable) {
assert(GPA && "GPA wasn't provided to installSignalHandlers.");
assert(Printf && "Printf wasn't provided to installSignalHandlers.");
assert(PrintBacktrace &&
@@ -207,6 +250,7 @@ void installSignalHandlers(gwp_asan::GuardedPoolAllocator *GPA, Printf_t Printf,
PrintfForSignalHandler = Printf;
PrintBacktraceForSignalHandler = PrintBacktrace;
BacktraceForSignalHandler = SegvBacktrace;
+ RecoverableSignal = Recoverable;
struct sigaction Action = {};
Action.sa_sigaction = sigSegvHandler;
diff --git a/gwp_asan/options.inc b/gwp_asan/options.inc
index 9900a2a..3a59321 100644
--- a/gwp_asan/options.inc
+++ b/gwp_asan/options.inc
@@ -49,6 +49,16 @@ GWP_ASAN_OPTION(
"the same. Note, if the previously installed SIGSEGV handler is SIG_IGN, "
"we terminate the process after dumping the error report.")
+GWP_ASAN_OPTION(
+ bool, Recoverable, false,
+ "Install GWP-ASan's signal handler in recoverable mode. This means that "
+ "upon GWP-ASan detecting an error, it'll print the error report, but *not* "
+ "crash. Only one crash per sampled allocation will ever be recorded, and "
+ "if a sampled allocation does actually cause a crash, it'll permanently "
+ "occupy a slot in the pool. The recoverable mode also means that "
+ "previously-installed signal handlers will only be triggered for "
+ "non-GWP-ASan errors, as all GWP-ASan errors won't be forwarded.")
+
GWP_ASAN_OPTION(bool, InstallForkHandlers, true,
"Install GWP-ASan atfork handlers to acquire internal locks "
"before fork and release them after.")
diff --git a/gwp_asan/platform_specific/guarded_pool_allocator_posix.cpp b/gwp_asan/platform_specific/guarded_pool_allocator_posix.cpp
index adb7330..c036ebe 100644
--- a/gwp_asan/platform_specific/guarded_pool_allocator_posix.cpp
+++ b/gwp_asan/platform_specific/guarded_pool_allocator_posix.cpp
@@ -98,6 +98,10 @@ size_t GuardedPoolAllocator::getPlatformPageSize() {
}
void GuardedPoolAllocator::installAtFork() {
+ static bool AtForkInstalled = false;
+ if (AtForkInstalled)
+ return;
+ AtForkInstalled = true;
auto Disable = []() {
if (auto *S = getSingleton())
S->disable();
diff --git a/gwp_asan/tests/alignment.cpp b/gwp_asan/tests/alignment.cpp
index 6d1e912..9f15046 100644
--- a/gwp_asan/tests/alignment.cpp
+++ b/gwp_asan/tests/alignment.cpp
@@ -35,13 +35,13 @@ public:
TEST(AlignmentTest, LeftAlignedAllocs) {
// Alignment < Page Size.
EXPECT_EQ(0x4000u, AlignmentTestGPA::alignUp(
- /* Ptr */ 0x4000, /* Alignment */ 0x1));
+ /* Ptr */ 0x4000, /* Alignment */ 0x1));
// Alignment == Page Size.
EXPECT_EQ(0x4000u, AlignmentTestGPA::alignUp(
- /* Ptr */ 0x4000, /* Alignment */ 0x1000));
+ /* Ptr */ 0x4000, /* Alignment */ 0x1000));
// Alignment > Page Size.
EXPECT_EQ(0x4000u, AlignmentTestGPA::alignUp(
- /* Ptr */ 0x4000, /* Alignment */ 0x4000));
+ /* Ptr */ 0x4000, /* Alignment */ 0x4000));
}
TEST(AlignmentTest, SingleByteAllocs) {
@@ -50,21 +50,21 @@ TEST(AlignmentTest, SingleByteAllocs) {
AlignmentTestGPA::getRequiredBackingSize(
/* Size */ 0x1, /* Alignment */ 0x1, /* PageSize */ 0x1000));
EXPECT_EQ(0x7fffu, AlignmentTestGPA::alignDown(
- /* Ptr */ 0x8000 - 0x1, /* Alignment */ 0x1));
+ /* Ptr */ 0x8000 - 0x1, /* Alignment */ 0x1));
// Alignment == Page Size.
EXPECT_EQ(0x1u,
AlignmentTestGPA::getRequiredBackingSize(
/* Size */ 0x1, /* Alignment */ 0x1000, /* PageSize */ 0x1000));
EXPECT_EQ(0x7000u, AlignmentTestGPA::alignDown(
- /* Ptr */ 0x8000 - 0x1, /* Alignment */ 0x1000));
+ /* Ptr */ 0x8000 - 0x1, /* Alignment */ 0x1000));
// Alignment > Page Size.
EXPECT_EQ(0x3001u,
AlignmentTestGPA::getRequiredBackingSize(
/* Size */ 0x1, /* Alignment */ 0x4000, /* PageSize */ 0x1000));
EXPECT_EQ(0x4000u, AlignmentTestGPA::alignDown(
- /* Ptr */ 0x8000 - 0x1, /* Alignment */ 0x4000));
+ /* Ptr */ 0x8000 - 0x1, /* Alignment */ 0x4000));
}
TEST(AlignmentTest, PageSizedAllocs) {
@@ -73,21 +73,21 @@ TEST(AlignmentTest, PageSizedAllocs) {
AlignmentTestGPA::getRequiredBackingSize(
/* Size */ 0x1000, /* Alignment */ 0x1, /* PageSize */ 0x1000));
EXPECT_EQ(0x7000u, AlignmentTestGPA::alignDown(
- /* Ptr */ 0x8000 - 0x1000, /* Alignment */ 0x1));
+ /* Ptr */ 0x8000 - 0x1000, /* Alignment */ 0x1));
// Alignment == Page Size.
EXPECT_EQ(0x1000u, AlignmentTestGPA::getRequiredBackingSize(
- /* Size */ 0x1000, /* Alignment */ 0x1000,
- /* PageSize */ 0x1000));
+ /* Size */ 0x1000, /* Alignment */ 0x1000,
+ /* PageSize */ 0x1000));
EXPECT_EQ(0x7000u, AlignmentTestGPA::alignDown(
- /* Ptr */ 0x8000 - 0x1000, /* Alignment */ 0x1000));
+ /* Ptr */ 0x8000 - 0x1000, /* Alignment */ 0x1000));
// Alignment > Page Size.
EXPECT_EQ(0x4000u, AlignmentTestGPA::getRequiredBackingSize(
- /* Size */ 0x1000, /* Alignment */ 0x4000,
- /* PageSize */ 0x1000));
+ /* Size */ 0x1000, /* Alignment */ 0x4000,
+ /* PageSize */ 0x1000));
EXPECT_EQ(0x4000u, AlignmentTestGPA::alignDown(
- /* Ptr */ 0x8000 - 0x1000, /* Alignment */ 0x4000));
+ /* Ptr */ 0x8000 - 0x1000, /* Alignment */ 0x4000));
}
TEST(AlignmentTest, MoreThanPageAllocs) {
@@ -96,19 +96,19 @@ TEST(AlignmentTest, MoreThanPageAllocs) {
AlignmentTestGPA::getRequiredBackingSize(
/* Size */ 0x2fff, /* Alignment */ 0x1, /* PageSize */ 0x1000));
EXPECT_EQ(0x5001u, AlignmentTestGPA::alignDown(
- /* Ptr */ 0x8000 - 0x2fff, /* Alignment */ 0x1));
+ /* Ptr */ 0x8000 - 0x2fff, /* Alignment */ 0x1));
// Alignment == Page Size.
EXPECT_EQ(0x2fffu, AlignmentTestGPA::getRequiredBackingSize(
- /* Size */ 0x2fff, /* Alignment */ 0x1000,
- /* PageSize */ 0x1000));
+ /* Size */ 0x2fff, /* Alignment */ 0x1000,
+ /* PageSize */ 0x1000));
EXPECT_EQ(0x5000u, AlignmentTestGPA::alignDown(
- /* Ptr */ 0x8000 - 0x2fff, /* Alignment */ 0x1000));
+ /* Ptr */ 0x8000 - 0x2fff, /* Alignment */ 0x1000));
// Alignment > Page Size.
EXPECT_EQ(0x5fffu, AlignmentTestGPA::getRequiredBackingSize(
- /* Size */ 0x2fff, /* Alignment */ 0x4000,
- /* PageSize */ 0x1000));
+ /* Size */ 0x2fff, /* Alignment */ 0x4000,
+ /* PageSize */ 0x1000));
EXPECT_EQ(0x4000u, AlignmentTestGPA::alignDown(
- /* Ptr */ 0x8000 - 0x2fff, /* Alignment */ 0x4000));
+ /* Ptr */ 0x8000 - 0x2fff, /* Alignment */ 0x4000));
}
diff --git a/gwp_asan/tests/backtrace.cpp b/gwp_asan/tests/backtrace.cpp
index a4eb8eb..7cbbcf5 100644
--- a/gwp_asan/tests/backtrace.cpp
+++ b/gwp_asan/tests/backtrace.cpp
@@ -6,46 +6,38 @@
//
//===----------------------------------------------------------------------===//
+#include <regex>
#include <string>
#include "gwp_asan/common.h"
#include "gwp_asan/crash_handler.h"
#include "gwp_asan/tests/harness.h"
-// Optnone to ensure that the calls to these functions are not optimized away,
-// as we're looking for them in the backtraces.
-__attribute((optnone)) void *
-AllocateMemory(gwp_asan::GuardedPoolAllocator &GPA) {
- return GPA.allocate(1);
-}
-__attribute((optnone)) void
-DeallocateMemory(gwp_asan::GuardedPoolAllocator &GPA, void *Ptr) {
- GPA.deallocate(Ptr);
-}
-__attribute((optnone)) void
-DeallocateMemory2(gwp_asan::GuardedPoolAllocator &GPA, void *Ptr) {
- GPA.deallocate(Ptr);
-}
-__attribute__((optnone)) void TouchMemory(void *Ptr) {
- *(reinterpret_cast<volatile char *>(Ptr)) = 7;
-}
-
-TEST_F(BacktraceGuardedPoolAllocatorDeathTest, DoubleFree) {
+TEST_P(BacktraceGuardedPoolAllocatorDeathTest, DoubleFree) {
void *Ptr = AllocateMemory(GPA);
DeallocateMemory(GPA, Ptr);
- std::string DeathRegex = "Double Free.*";
- DeathRegex.append("DeallocateMemory2.*");
-
- DeathRegex.append("was deallocated.*");
- DeathRegex.append("DeallocateMemory.*");
-
- DeathRegex.append("was allocated.*");
- DeathRegex.append("AllocateMemory.*");
- ASSERT_DEATH(DeallocateMemory2(GPA, Ptr), DeathRegex);
+ std::string DeathRegex = "Double Free.*DeallocateMemory2.*";
+ DeathRegex.append("was deallocated.*DeallocateMemory[^2].*");
+ DeathRegex.append("was allocated.*AllocateMemory");
+ if (!Recoverable) {
+ ASSERT_DEATH(DeallocateMemory2(GPA, Ptr), DeathRegex);
+ return;
+ }
+
+ // For recoverable, assert that DeallocateMemory2() doesn't crash.
+ DeallocateMemory2(GPA, Ptr);
+ // Fuchsia's zxtest doesn't have an EXPECT_THAT(testing::MatchesRegex(), ...),
+ // so check the regex manually.
+ EXPECT_TRUE(std::regex_search(
+ GetOutputBuffer(),
+ std::basic_regex(DeathRegex, std::regex_constants::extended)))
+ << "Regex \"" << DeathRegex
+ << "\" was not found in input:\n============\n"
+ << GetOutputBuffer() << "\n============";
}
-TEST_F(BacktraceGuardedPoolAllocatorDeathTest, UseAfterFree) {
+TEST_P(BacktraceGuardedPoolAllocatorDeathTest, UseAfterFree) {
#if defined(__linux__) && __ARM_ARCH == 7
// Incomplete backtrace on Armv7 Linux
GTEST_SKIP();
@@ -54,15 +46,26 @@ TEST_F(BacktraceGuardedPoolAllocatorDeathTest, UseAfterFree) {
void *Ptr = AllocateMemory(GPA);
DeallocateMemory(GPA, Ptr);
- std::string DeathRegex = "Use After Free.*";
- DeathRegex.append("TouchMemory.*");
-
- DeathRegex.append("was deallocated.*");
- DeathRegex.append("DeallocateMemory.*");
-
- DeathRegex.append("was allocated.*");
- DeathRegex.append("AllocateMemory.*");
- ASSERT_DEATH(TouchMemory(Ptr), DeathRegex);
+ std::string DeathRegex = "Use After Free.*TouchMemory.*";
+ DeathRegex.append("was deallocated.*DeallocateMemory[^2].*");
+ DeathRegex.append("was allocated.*AllocateMemory");
+
+ if (!Recoverable) {
+ ASSERT_DEATH(TouchMemory(Ptr), DeathRegex);
+ return;
+ }
+
+ // For recoverable, assert that TouchMemory() doesn't crash.
+ TouchMemory(Ptr);
+ // Fuchsia's zxtest doesn't have an EXPECT_THAT(testing::MatchesRegex(), ...),
+ // so check the regex manually.
+ EXPECT_TRUE(std::regex_search(
+ GetOutputBuffer(),
+ std::basic_regex(DeathRegex, std::regex_constants::extended)))
+ << "Regex \"" << DeathRegex
+ << "\" was not found in input:\n============\n"
+ << GetOutputBuffer() << "\n============";
+ ;
}
TEST(Backtrace, Short) {
diff --git a/gwp_asan/tests/crash_handler_api.cpp b/gwp_asan/tests/crash_handler_api.cpp
index 4cdb569..d270ed8 100644
--- a/gwp_asan/tests/crash_handler_api.cpp
+++ b/gwp_asan/tests/crash_handler_api.cpp
@@ -16,7 +16,7 @@ using GuardedPoolAllocator = gwp_asan::GuardedPoolAllocator;
using AllocationMetadata = gwp_asan::AllocationMetadata;
using AllocatorState = gwp_asan::AllocatorState;
-class CrashHandlerAPITest : public Test {
+class CrashHandlerAPITest : public ::testing::Test {
public:
void SetUp() override { setupState(); }
@@ -40,7 +40,8 @@ protected:
void setupState() {
State.GuardedPagePool = 0x2000;
- State.GuardedPagePoolEnd = 0xb000;
+ State.GuardedPagePoolEnd = 0xc000;
+ InternalFaultAddr = State.GuardedPagePoolEnd - 0x10;
State.MaxSimultaneousAllocations = 4; // 0x3000, 0x5000, 0x7000, 0x9000.
State.PageSize = 0x1000;
}
@@ -100,6 +101,7 @@ protected:
static uintptr_t BacktraceConstants[kNumBacktraceConstants];
AllocatorState State = {};
AllocationMetadata Metadata[4] = {};
+ uintptr_t InternalFaultAddr;
};
uintptr_t CrashHandlerAPITest::BacktraceConstants[kNumBacktraceConstants] = {
@@ -125,7 +127,7 @@ TEST_F(CrashHandlerAPITest, PointerNotAllocated) {
EXPECT_TRUE(__gwp_asan_error_is_mine(&State, FailureAddress));
EXPECT_EQ(Error::UNKNOWN,
__gwp_asan_diagnose_error(&State, Metadata, FailureAddress));
- EXPECT_EQ(0u, __gwp_asan_get_internal_crash_address(&State));
+ EXPECT_EQ(0u, __gwp_asan_get_internal_crash_address(&State, FailureAddress));
EXPECT_EQ(nullptr, __gwp_asan_get_metadata(&State, Metadata, FailureAddress));
}
@@ -140,7 +142,8 @@ TEST_F(CrashHandlerAPITest, DoubleFree) {
EXPECT_TRUE(__gwp_asan_error_is_mine(&State));
EXPECT_EQ(Error::DOUBLE_FREE,
__gwp_asan_diagnose_error(&State, Metadata, 0x0));
- EXPECT_EQ(FailureAddress, __gwp_asan_get_internal_crash_address(&State));
+ EXPECT_EQ(FailureAddress,
+ __gwp_asan_get_internal_crash_address(&State, InternalFaultAddr));
checkMetadata(Index, FailureAddress);
}
@@ -155,7 +158,8 @@ TEST_F(CrashHandlerAPITest, InvalidFree) {
EXPECT_TRUE(__gwp_asan_error_is_mine(&State));
EXPECT_EQ(Error::INVALID_FREE,
__gwp_asan_diagnose_error(&State, Metadata, 0x0));
- EXPECT_EQ(FailureAddress, __gwp_asan_get_internal_crash_address(&State));
+ EXPECT_EQ(FailureAddress,
+ __gwp_asan_get_internal_crash_address(&State, InternalFaultAddr));
checkMetadata(Index, FailureAddress);
}
@@ -168,7 +172,8 @@ TEST_F(CrashHandlerAPITest, InvalidFreeNoMetadata) {
EXPECT_TRUE(__gwp_asan_error_is_mine(&State));
EXPECT_EQ(Error::INVALID_FREE,
__gwp_asan_diagnose_error(&State, Metadata, 0x0));
- EXPECT_EQ(FailureAddress, __gwp_asan_get_internal_crash_address(&State));
+ EXPECT_EQ(FailureAddress,
+ __gwp_asan_get_internal_crash_address(&State, InternalFaultAddr));
EXPECT_EQ(nullptr, __gwp_asan_get_metadata(&State, Metadata, FailureAddress));
}
@@ -180,7 +185,7 @@ TEST_F(CrashHandlerAPITest, UseAfterFree) {
EXPECT_TRUE(__gwp_asan_error_is_mine(&State, FailureAddress));
EXPECT_EQ(Error::USE_AFTER_FREE,
__gwp_asan_diagnose_error(&State, Metadata, FailureAddress));
- EXPECT_EQ(0u, __gwp_asan_get_internal_crash_address(&State));
+ EXPECT_EQ(0u, __gwp_asan_get_internal_crash_address(&State, FailureAddress));
checkMetadata(Index, FailureAddress);
}
@@ -192,7 +197,7 @@ TEST_F(CrashHandlerAPITest, BufferOverflow) {
EXPECT_TRUE(__gwp_asan_error_is_mine(&State, FailureAddress));
EXPECT_EQ(Error::BUFFER_OVERFLOW,
__gwp_asan_diagnose_error(&State, Metadata, FailureAddress));
- EXPECT_EQ(0u, __gwp_asan_get_internal_crash_address(&State));
+ EXPECT_EQ(0u, __gwp_asan_get_internal_crash_address(&State, FailureAddress));
checkMetadata(Index, FailureAddress);
}
@@ -204,6 +209,6 @@ TEST_F(CrashHandlerAPITest, BufferUnderflow) {
EXPECT_TRUE(__gwp_asan_error_is_mine(&State, FailureAddress));
EXPECT_EQ(Error::BUFFER_UNDERFLOW,
__gwp_asan_diagnose_error(&State, Metadata, FailureAddress));
- EXPECT_EQ(0u, __gwp_asan_get_internal_crash_address(&State));
+ EXPECT_EQ(0u, __gwp_asan_get_internal_crash_address(&State, FailureAddress));
checkMetadata(Index, FailureAddress);
}
diff --git a/gwp_asan/tests/harness.cpp b/gwp_asan/tests/harness.cpp
index e668c73..6d41630 100644
--- a/gwp_asan/tests/harness.cpp
+++ b/gwp_asan/tests/harness.cpp
@@ -8,6 +8,8 @@
#include "gwp_asan/tests/harness.h"
+#include <string>
+
namespace gwp_asan {
namespace test {
bool OnlyOnce() {
@@ -16,3 +18,37 @@ bool OnlyOnce() {
}
} // namespace test
} // namespace gwp_asan
+
+// Optnone to ensure that the calls to these functions are not optimized away,
+// as we're looking for them in the backtraces.
+__attribute__((optnone)) char *
+AllocateMemory(gwp_asan::GuardedPoolAllocator &GPA) {
+ return static_cast<char *>(GPA.allocate(1));
+}
+__attribute__((optnone)) void
+DeallocateMemory(gwp_asan::GuardedPoolAllocator &GPA, void *Ptr) {
+ GPA.deallocate(Ptr);
+}
+__attribute__((optnone)) void
+DeallocateMemory2(gwp_asan::GuardedPoolAllocator &GPA, void *Ptr) {
+ GPA.deallocate(Ptr);
+}
+__attribute__((optnone)) void TouchMemory(void *Ptr) {
+ *(reinterpret_cast<volatile char *>(Ptr)) = 7;
+}
+
+void CheckOnlyOneGwpAsanCrash(const std::string &OutputBuffer) {
+ const char *kGwpAsanErrorString = "GWP-ASan detected a memory error";
+ size_t FirstIndex = OutputBuffer.find(kGwpAsanErrorString);
+ ASSERT_NE(FirstIndex, std::string::npos) << "Didn't detect a GWP-ASan crash";
+ ASSERT_EQ(OutputBuffer.find(kGwpAsanErrorString, FirstIndex + 1),
+ std::string::npos)
+ << "Detected more than one GWP-ASan crash:\n"
+ << OutputBuffer;
+}
+
+INSTANTIATE_TEST_SUITE_P(RecoverableTests, BacktraceGuardedPoolAllocator,
+ /* Recoverable */ testing::Values(true));
+INSTANTIATE_TEST_SUITE_P(RecoverableAndNonRecoverableTests,
+ BacktraceGuardedPoolAllocatorDeathTest,
+ /* Recoverable */ testing::Bool());
diff --git a/gwp_asan/tests/harness.h b/gwp_asan/tests/harness.h
index ed91e64..ae39a44 100644
--- a/gwp_asan/tests/harness.h
+++ b/gwp_asan/tests/harness.h
@@ -12,11 +12,18 @@
#include <stdarg.h>
#if defined(__Fuchsia__)
+#define ZXTEST_USE_STREAMABLE_MACROS
#include <zxtest/zxtest.h>
-using Test = ::zxtest::Test;
+namespace testing = zxtest;
+// zxtest defines a different ASSERT_DEATH, taking a lambda and an error message
+// if death didn't occur, versus gtest taking a statement and a string to search
+// for in the dying process. zxtest doesn't define an EXPECT_DEATH, so we use
+// that in the tests below (which works as intended for gtest), and we define
+// EXPECT_DEATH as a wrapper for zxtest's ASSERT_DEATH. Note that zxtest drops
+// the functionality for checking for a particular message in death.
+#define EXPECT_DEATH(X, Y) ASSERT_DEATH(([&] { X; }), "")
#else
#include "gtest/gtest.h"
-using Test = ::testing::Test;
#endif
#include "gwp_asan/guarded_pool_allocator.h"
@@ -39,7 +46,14 @@ bool OnlyOnce();
}; // namespace test
}; // namespace gwp_asan
-class DefaultGuardedPoolAllocator : public Test {
+char *AllocateMemory(gwp_asan::GuardedPoolAllocator &GPA);
+void DeallocateMemory(gwp_asan::GuardedPoolAllocator &GPA, void *Ptr);
+void DeallocateMemory2(gwp_asan::GuardedPoolAllocator &GPA, void *Ptr);
+void TouchMemory(void *Ptr);
+
+void CheckOnlyOneGwpAsanCrash(const std::string &OutputBuffer);
+
+class DefaultGuardedPoolAllocator : public ::testing::Test {
public:
void SetUp() override {
gwp_asan::options::Options Opts;
@@ -58,7 +72,7 @@ protected:
MaxSimultaneousAllocations;
};
-class CustomGuardedPoolAllocator : public Test {
+class CustomGuardedPoolAllocator : public ::testing::Test {
public:
void
InitNumSlots(decltype(gwp_asan::options::Options::MaxSimultaneousAllocations)
@@ -81,7 +95,8 @@ protected:
MaxSimultaneousAllocations;
};
-class BacktraceGuardedPoolAllocator : public Test {
+class BacktraceGuardedPoolAllocator
+ : public ::testing::TestWithParam</* Recoverable */ bool> {
public:
void SetUp() override {
gwp_asan::options::Options Opts;
@@ -91,10 +106,19 @@ public:
Opts.InstallForkHandlers = gwp_asan::test::OnlyOnce();
GPA.init(Opts);
+ // In recoverable mode, capture GWP-ASan logs to an internal buffer so that
+ // we can search it in unit tests. For non-recoverable tests, the default
+ // buffer is fine, as any tests should be EXPECT_DEATH()'d.
+ Recoverable = GetParam();
+ gwp_asan::Printf_t PrintfFunction = PrintfToBuffer;
+ GetOutputBuffer().clear();
+ if (!Recoverable)
+ PrintfFunction = gwp_asan::test::getPrintfFunction();
+
gwp_asan::segv_handler::installSignalHandlers(
- &GPA, gwp_asan::test::getPrintfFunction(),
- gwp_asan::backtrace::getPrintBacktraceFunction(),
- gwp_asan::backtrace::getSegvBacktraceFunction());
+ &GPA, PrintfFunction, gwp_asan::backtrace::getPrintBacktraceFunction(),
+ gwp_asan::backtrace::getSegvBacktraceFunction(),
+ /* Recoverable */ Recoverable);
}
void TearDown() override {
@@ -103,7 +127,23 @@ public:
}
protected:
+ static std::string &GetOutputBuffer() {
+ static std::string Buffer;
+ return Buffer;
+ }
+
+ __attribute__((format(printf, 1, 2))) static void
+ PrintfToBuffer(const char *Format, ...) {
+ va_list AP;
+ va_start(AP, Format);
+ char Buffer[8192];
+ vsnprintf(Buffer, sizeof(Buffer), Format, AP);
+ GetOutputBuffer() += Buffer;
+ va_end(AP);
+ }
+
gwp_asan::GuardedPoolAllocator GPA;
+ bool Recoverable;
};
// https://github.com/google/googletest/blob/master/docs/advanced.md#death-tests-and-threads
diff --git a/gwp_asan/tests/never_allocated.cpp b/gwp_asan/tests/never_allocated.cpp
new file mode 100644
index 0000000..2f695b4
--- /dev/null
+++ b/gwp_asan/tests/never_allocated.cpp
@@ -0,0 +1,55 @@
+//===-- never_allocated.cpp -------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include <string>
+
+#include "gwp_asan/common.h"
+#include "gwp_asan/crash_handler.h"
+#include "gwp_asan/tests/harness.h"
+
+TEST_P(BacktraceGuardedPoolAllocatorDeathTest, NeverAllocated) {
+ SCOPED_TRACE("");
+ void *Ptr = GPA.allocate(0x1000);
+ GPA.deallocate(Ptr);
+
+ std::string DeathNeedle =
+ "GWP-ASan cannot provide any more information about this error";
+
+ // Trigger a guard page in a completely different slot that's never allocated.
+ // Previously, there was a bug that this would result in nullptr-dereference
+ // in the posix crash handler.
+ char *volatile NeverAllocatedPtr = static_cast<char *>(Ptr) + 0x3000;
+ if (!Recoverable) {
+ EXPECT_DEATH(*NeverAllocatedPtr = 0, DeathNeedle);
+ return;
+ }
+
+ *NeverAllocatedPtr = 0;
+ CheckOnlyOneGwpAsanCrash(GetOutputBuffer());
+ ASSERT_NE(std::string::npos, GetOutputBuffer().find(DeathNeedle));
+
+ // Check that subsequent invalid touches of the pool don't print a report.
+ GetOutputBuffer().clear();
+ for (size_t i = 0; i < 100; ++i) {
+ *NeverAllocatedPtr = 0;
+ *(NeverAllocatedPtr + 0x2000) = 0;
+ *(NeverAllocatedPtr + 0x3000) = 0;
+ ASSERT_TRUE(GetOutputBuffer().empty());
+ }
+
+ // Check that reports on the other slots still report a double-free, but only
+ // once.
+ GetOutputBuffer().clear();
+ GPA.deallocate(Ptr);
+ ASSERT_NE(std::string::npos, GetOutputBuffer().find("Double Free"));
+ GetOutputBuffer().clear();
+ for (size_t i = 0; i < 100; ++i) {
+ DeallocateMemory(GPA, Ptr);
+ ASSERT_TRUE(GetOutputBuffer().empty());
+ }
+}
diff --git a/gwp_asan/tests/recoverable.cpp b/gwp_asan/tests/recoverable.cpp
new file mode 100644
index 0000000..2c14ff5
--- /dev/null
+++ b/gwp_asan/tests/recoverable.cpp
@@ -0,0 +1,194 @@
+//===-- recoverable.cpp -----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include <atomic>
+#include <mutex>
+#include <regex>
+#include <string>
+#include <thread>
+#include <vector>
+
+#include "gwp_asan/common.h"
+#include "gwp_asan/crash_handler.h"
+#include "gwp_asan/tests/harness.h"
+
+TEST_P(BacktraceGuardedPoolAllocator, MultipleDoubleFreeOnlyOneOutput) {
+ SCOPED_TRACE("");
+ void *Ptr = AllocateMemory(GPA);
+ DeallocateMemory(GPA, Ptr);
+ // First time should generate a crash report.
+ DeallocateMemory(GPA, Ptr);
+ CheckOnlyOneGwpAsanCrash(GetOutputBuffer());
+ ASSERT_NE(std::string::npos, GetOutputBuffer().find("Double Free"));
+
+ // Ensure the crash is only reported once.
+ GetOutputBuffer().clear();
+ for (size_t i = 0; i < 100; ++i) {
+ DeallocateMemory(GPA, Ptr);
+ ASSERT_TRUE(GetOutputBuffer().empty());
+ }
+}
+
+TEST_P(BacktraceGuardedPoolAllocator, MultipleInvalidFreeOnlyOneOutput) {
+ SCOPED_TRACE("");
+ char *Ptr = static_cast<char *>(AllocateMemory(GPA));
+ // First time should generate a crash report.
+ DeallocateMemory(GPA, Ptr + 1);
+ CheckOnlyOneGwpAsanCrash(GetOutputBuffer());
+ ASSERT_NE(std::string::npos, GetOutputBuffer().find("Invalid (Wild) Free"));
+
+ // Ensure the crash is only reported once.
+ GetOutputBuffer().clear();
+ for (size_t i = 0; i < 100; ++i) {
+ DeallocateMemory(GPA, Ptr + 1);
+ ASSERT_TRUE(GetOutputBuffer().empty());
+ }
+}
+
+TEST_P(BacktraceGuardedPoolAllocator, MultipleUseAfterFreeOnlyOneOutput) {
+ SCOPED_TRACE("");
+ void *Ptr = AllocateMemory(GPA);
+ DeallocateMemory(GPA, Ptr);
+ // First time should generate a crash report.
+ TouchMemory(Ptr);
+ ASSERT_NE(std::string::npos, GetOutputBuffer().find("Use After Free"));
+
+ // Ensure the crash is only reported once.
+ GetOutputBuffer().clear();
+ for (size_t i = 0; i < 100; ++i) {
+ TouchMemory(Ptr);
+ ASSERT_TRUE(GetOutputBuffer().empty());
+ }
+}
+
+TEST_P(BacktraceGuardedPoolAllocator, MultipleBufferOverflowOnlyOneOutput) {
+ SCOPED_TRACE("");
+ char *Ptr = static_cast<char *>(AllocateMemory(GPA));
+ // First time should generate a crash report.
+ TouchMemory(Ptr - 16);
+ TouchMemory(Ptr + 16);
+ CheckOnlyOneGwpAsanCrash(GetOutputBuffer());
+ if (GetOutputBuffer().find("Buffer Overflow") == std::string::npos &&
+ GetOutputBuffer().find("Buffer Underflow") == std::string::npos)
+ FAIL() << "Failed to detect buffer underflow/overflow:\n"
+ << GetOutputBuffer();
+
+ // Ensure the crash is only reported once.
+ GetOutputBuffer().clear();
+ for (size_t i = 0; i < 100; ++i) {
+ TouchMemory(Ptr - 16);
+ TouchMemory(Ptr + 16);
+ ASSERT_TRUE(GetOutputBuffer().empty()) << GetOutputBuffer();
+ }
+}
+
+TEST_P(BacktraceGuardedPoolAllocator, OneDoubleFreeOneUseAfterFree) {
+ SCOPED_TRACE("");
+ void *Ptr = AllocateMemory(GPA);
+ DeallocateMemory(GPA, Ptr);
+ // First time should generate a crash report.
+ DeallocateMemory(GPA, Ptr);
+ CheckOnlyOneGwpAsanCrash(GetOutputBuffer());
+ ASSERT_NE(std::string::npos, GetOutputBuffer().find("Double Free"));
+
+ // Ensure the crash is only reported once.
+ GetOutputBuffer().clear();
+ for (size_t i = 0; i < 100; ++i) {
+ DeallocateMemory(GPA, Ptr);
+ ASSERT_TRUE(GetOutputBuffer().empty());
+ }
+}
+
+// We use double-free to detect that each slot can generate as single error.
+// Use-after-free would also be acceptable, but buffer-overflow wouldn't be, as
+// the random left/right alignment means that one right-overflow can disable
+// page protections, and a subsequent left-overflow of a slot that's on the
+// right hand side may not trap.
+TEST_P(BacktraceGuardedPoolAllocator, OneErrorReportPerSlot) {
+ SCOPED_TRACE("");
+ std::vector<void *> Ptrs;
+ for (size_t i = 0; i < GPA.getAllocatorState()->MaxSimultaneousAllocations;
+ ++i) {
+ void *Ptr = AllocateMemory(GPA);
+ ASSERT_NE(Ptr, nullptr);
+ Ptrs.push_back(Ptr);
+ DeallocateMemory(GPA, Ptr);
+ DeallocateMemory(GPA, Ptr);
+ CheckOnlyOneGwpAsanCrash(GetOutputBuffer());
+ ASSERT_NE(std::string::npos, GetOutputBuffer().find("Double Free"));
+ // Ensure the crash from this slot is only reported once.
+ GetOutputBuffer().clear();
+ DeallocateMemory(GPA, Ptr);
+ ASSERT_TRUE(GetOutputBuffer().empty());
+ // Reset the buffer, as we're gonna move to the next allocation.
+ GetOutputBuffer().clear();
+ }
+
+ // All slots should have been used. No further errors should occur.
+ for (size_t i = 0; i < 100; ++i)
+ ASSERT_EQ(AllocateMemory(GPA), nullptr);
+ for (void *Ptr : Ptrs) {
+ DeallocateMemory(GPA, Ptr);
+ TouchMemory(Ptr);
+ }
+ ASSERT_TRUE(GetOutputBuffer().empty());
+}
+
+void singleAllocThrashTask(gwp_asan::GuardedPoolAllocator *GPA,
+ std::atomic<bool> *StartingGun,
+ unsigned NumIterations, unsigned Job, char *Ptr) {
+ while (!*StartingGun) {
+ // Wait for starting gun.
+ }
+
+ for (unsigned i = 0; i < NumIterations; ++i) {
+ switch (Job) {
+ case 0:
+ DeallocateMemory(*GPA, Ptr);
+ break;
+ case 1:
+ DeallocateMemory(*GPA, Ptr + 1);
+ break;
+ case 2:
+ TouchMemory(Ptr);
+ break;
+ case 3:
+ TouchMemory(Ptr - 16);
+ TouchMemory(Ptr + 16);
+ break;
+ default:
+ __builtin_trap();
+ }
+ }
+}
+
+void runInterThreadThrashingSingleAlloc(unsigned NumIterations,
+ gwp_asan::GuardedPoolAllocator *GPA) {
+ std::atomic<bool> StartingGun{false};
+ std::vector<std::thread> Threads;
+ constexpr unsigned kNumThreads = 4;
+
+ char *Ptr = static_cast<char *>(AllocateMemory(*GPA));
+
+ for (unsigned i = 0; i < kNumThreads; ++i) {
+ Threads.emplace_back(singleAllocThrashTask, GPA, &StartingGun,
+ NumIterations, i, Ptr);
+ }
+
+ StartingGun = true;
+
+ for (auto &T : Threads)
+ T.join();
+}
+
+TEST_P(BacktraceGuardedPoolAllocator, InterThreadThrashingSingleAlloc) {
+ SCOPED_TRACE("");
+ constexpr unsigned kNumIterations = 100000;
+ runInterThreadThrashingSingleAlloc(kNumIterations, &GPA);
+ CheckOnlyOneGwpAsanCrash(GetOutputBuffer());
+}
diff --git a/gwp_asan/tests/thread_contention.cpp b/gwp_asan/tests/thread_contention.cpp
index 0992b97..26ccd8e 100644
--- a/gwp_asan/tests/thread_contention.cpp
+++ b/gwp_asan/tests/thread_contention.cpp
@@ -44,12 +44,8 @@ void asyncTask(gwp_asan::GuardedPoolAllocator *GPA,
void runThreadContentionTest(unsigned NumThreads, unsigned NumIterations,
gwp_asan::GuardedPoolAllocator *GPA) {
-
std::atomic<bool> StartingGun{false};
std::vector<std::thread> Threads;
- if (std::thread::hardware_concurrency() < NumThreads) {
- NumThreads = std::thread::hardware_concurrency();
- }
for (unsigned i = 0; i < NumThreads; ++i) {
Threads.emplace_back(asyncTask, GPA, &StartingGun, NumIterations);