Retro68/gcc/libsanitizer/tsan/tsan_rtl_report.cc

755 lines
24 KiB
C++
Raw Normal View History

2014-09-21 17:33:12 +00:00
//===-- tsan_rtl_report.cc ------------------------------------------------===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
#include "tsan_platform.h"
#include "tsan_rtl.h"
#include "tsan_suppressions.h"
#include "tsan_symbolize.h"
#include "tsan_report.h"
#include "tsan_sync.h"
#include "tsan_mman.h"
#include "tsan_flags.h"
#include "tsan_fd.h"
namespace __tsan {
using namespace __sanitizer; // NOLINT
2015-08-28 15:33:40 +00:00
static ReportStack *SymbolizeStack(StackTrace trace);
2014-09-21 17:33:12 +00:00
void TsanCheckFailed(const char *file, int line, const char *cond,
u64 v1, u64 v2) {
2015-08-28 15:33:40 +00:00
// There is high probability that interceptors will check-fail as well,
// on the other hand there is no sense in processing interceptors
// since we are going to die soon.
ScopedIgnoreInterceptors ignore;
#if !SANITIZER_GO
cur_thread()->ignore_sync++;
cur_thread()->ignore_reads_and_writes++;
#endif
2014-09-21 17:33:12 +00:00
Printf("FATAL: ThreadSanitizer CHECK failed: "
"%s:%d \"%s\" (0x%zx, 0x%zx)\n",
file, line, cond, (uptr)v1, (uptr)v2);
2015-08-28 15:33:40 +00:00
PrintCurrentStackSlow(StackTrace::GetCurrentPc());
2014-09-21 17:33:12 +00:00
Die();
}
// Can be overriden by an application/test to intercept reports.
#ifdef TSAN_EXTERNAL_HOOKS
bool OnReport(const ReportDesc *rep, bool suppressed);
#else
SANITIZER_WEAK_CXX_DEFAULT_IMPL
bool OnReport(const ReportDesc *rep, bool suppressed) {
2014-09-21 17:33:12 +00:00
(void)rep;
return suppressed;
}
#endif
SANITIZER_WEAK_DEFAULT_IMPL
void __tsan_on_report(const ReportDesc *rep) {
(void)rep;
}
2017-04-10 11:32:00 +00:00
static void StackStripMain(SymbolizedStack *frames) {
SymbolizedStack *last_frame = nullptr;
SymbolizedStack *last_frame2 = nullptr;
for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
2014-09-21 17:33:12 +00:00
last_frame2 = last_frame;
2017-04-10 11:32:00 +00:00
last_frame = cur;
2014-09-21 17:33:12 +00:00
}
if (last_frame2 == 0)
return;
#if !SANITIZER_GO
2015-08-28 15:33:40 +00:00
const char *last = last_frame->info.function;
const char *last2 = last_frame2->info.function;
2014-09-21 17:33:12 +00:00
// Strip frame above 'main'
if (last2 && 0 == internal_strcmp(last2, "main")) {
2017-04-10 11:32:00 +00:00
last_frame->ClearAll();
last_frame2->next = nullptr;
2014-09-21 17:33:12 +00:00
// Strip our internal thread start routine.
} else if (last && 0 == internal_strcmp(last, "__tsan_thread_start_func")) {
2017-04-10 11:32:00 +00:00
last_frame->ClearAll();
last_frame2->next = nullptr;
2014-09-21 17:33:12 +00:00
// Strip global ctors init.
} else if (last && 0 == internal_strcmp(last, "__do_global_ctors_aux")) {
2017-04-10 11:32:00 +00:00
last_frame->ClearAll();
last_frame2->next = nullptr;
2014-09-21 17:33:12 +00:00
// If both are 0, then we probably just failed to symbolize.
} else if (last || last2) {
// Ensure that we recovered stack completely. Trimmed stack
// can actually happen if we do not instrument some code,
// so it's only a debug print. However we must try hard to not miss it
// due to our fault.
2017-04-10 11:32:00 +00:00
DPrintf("Bottom stack frame is missed\n");
2014-09-21 17:33:12 +00:00
}
#else
// The last frame always point into runtime (gosched0, goexit0, runtime.main).
2017-04-10 11:32:00 +00:00
last_frame->ClearAll();
last_frame2->next = nullptr;
2014-09-21 17:33:12 +00:00
#endif
}
ReportStack *SymbolizeStackId(u32 stack_id) {
2015-08-28 15:33:40 +00:00
if (stack_id == 0)
2014-09-21 17:33:12 +00:00
return 0;
2015-08-28 15:33:40 +00:00
StackTrace stack = StackDepotGet(stack_id);
if (stack.trace == nullptr)
return nullptr;
return SymbolizeStack(stack);
2014-09-21 17:33:12 +00:00
}
2015-08-28 15:33:40 +00:00
static ReportStack *SymbolizeStack(StackTrace trace) {
if (trace.size == 0)
2014-09-21 17:33:12 +00:00
return 0;
2017-04-10 11:32:00 +00:00
SymbolizedStack *top = nullptr;
2015-08-28 15:33:40 +00:00
for (uptr si = 0; si < trace.size; si++) {
const uptr pc = trace.trace[si];
2014-09-21 17:33:12 +00:00
uptr pc1 = pc;
2017-04-10 11:32:00 +00:00
// We obtain the return address, but we're interested in the previous
// instruction.
if ((pc & kExternalPCBit) == 0)
pc1 = StackTrace::GetPreviousInstructionPc(pc);
SymbolizedStack *ent = SymbolizeCode(pc1);
2014-09-21 17:33:12 +00:00
CHECK_NE(ent, 0);
2017-04-10 11:32:00 +00:00
SymbolizedStack *last = ent;
2014-09-21 17:33:12 +00:00
while (last->next) {
2015-08-28 15:33:40 +00:00
last->info.address = pc; // restore original pc for report
2014-09-21 17:33:12 +00:00
last = last->next;
}
2015-08-28 15:33:40 +00:00
last->info.address = pc; // restore original pc for report
2017-04-10 11:32:00 +00:00
last->next = top;
top = ent;
2014-09-21 17:33:12 +00:00
}
2017-04-10 11:32:00 +00:00
StackStripMain(top);
ReportStack *stack = ReportStack::New();
stack->frames = top;
2014-09-21 17:33:12 +00:00
return stack;
}
2018-12-28 15:30:48 +00:00
ScopedReport::ScopedReport(ReportType typ, uptr tag) {
2015-08-28 15:33:40 +00:00
ctx->thread_registry->CheckLocked();
2014-09-21 17:33:12 +00:00
void *mem = internal_alloc(MBlockReport, sizeof(ReportDesc));
rep_ = new(mem) ReportDesc;
rep_->typ = typ;
2018-12-28 15:30:48 +00:00
rep_->tag = tag;
2015-08-28 15:33:40 +00:00
ctx->report_mtx.Lock();
2014-09-21 17:33:12 +00:00
CommonSanitizerReportMutex.Lock();
}
ScopedReport::~ScopedReport() {
CommonSanitizerReportMutex.Unlock();
2015-08-28 15:33:40 +00:00
ctx->report_mtx.Unlock();
2014-09-21 17:33:12 +00:00
DestroyAndFree(rep_);
}
2015-08-28 15:33:40 +00:00
void ScopedReport::AddStack(StackTrace stack, bool suppressable) {
2014-09-21 17:33:12 +00:00
ReportStack **rs = rep_->stacks.PushBack();
2015-08-28 15:33:40 +00:00
*rs = SymbolizeStack(stack);
(*rs)->suppressable = suppressable;
2014-09-21 17:33:12 +00:00
}
2018-12-28 15:30:48 +00:00
void ScopedReport::AddMemoryAccess(uptr addr, uptr external_tag, Shadow s,
StackTrace stack, const MutexSet *mset) {
2014-09-21 17:33:12 +00:00
void *mem = internal_alloc(MBlockReportMop, sizeof(ReportMop));
ReportMop *mop = new(mem) ReportMop;
rep_->mops.PushBack(mop);
mop->tid = s.tid();
mop->addr = addr + s.addr0();
mop->size = s.size();
mop->write = s.IsWrite();
mop->atomic = s.IsAtomic();
2015-08-28 15:33:40 +00:00
mop->stack = SymbolizeStack(stack);
2018-12-28 15:30:48 +00:00
mop->external_tag = external_tag;
2015-08-28 15:33:40 +00:00
if (mop->stack)
mop->stack->suppressable = true;
2014-09-21 17:33:12 +00:00
for (uptr i = 0; i < mset->Size(); i++) {
MutexSet::Desc d = mset->Get(i);
2015-08-28 15:33:40 +00:00
u64 mid = this->AddMutex(d.id);
ReportMopMutex mtx = {mid, d.write};
mop->mset.PushBack(mtx);
2014-09-21 17:33:12 +00:00
}
}
2015-08-28 15:33:40 +00:00
void ScopedReport::AddUniqueTid(int unique_tid) {
rep_->unique_tids.PushBack(unique_tid);
}
void ScopedReport::AddThread(const ThreadContext *tctx, bool suppressable) {
2014-09-21 17:33:12 +00:00
for (uptr i = 0; i < rep_->threads.Size(); i++) {
if ((u32)rep_->threads[i]->id == tctx->tid)
return;
}
void *mem = internal_alloc(MBlockReportThread, sizeof(ReportThread));
ReportThread *rt = new(mem) ReportThread;
2014-09-21 17:33:12 +00:00
rep_->threads.PushBack(rt);
rt->id = tctx->tid;
rt->os_id = tctx->os_id;
2014-09-21 17:33:12 +00:00
rt->running = (tctx->status == ThreadStatusRunning);
2015-08-28 15:33:40 +00:00
rt->name = internal_strdup(tctx->name);
2014-09-21 17:33:12 +00:00
rt->parent_tid = tctx->parent_tid;
2018-12-28 15:30:48 +00:00
rt->workerthread = tctx->workerthread;
2014-09-21 17:33:12 +00:00
rt->stack = 0;
rt->stack = SymbolizeStackId(tctx->creation_stack_id);
2015-08-28 15:33:40 +00:00
if (rt->stack)
rt->stack->suppressable = suppressable;
2014-09-21 17:33:12 +00:00
}
#if !SANITIZER_GO
static bool FindThreadByUidLockedCallback(ThreadContextBase *tctx, void *arg) {
int unique_id = *(int *)arg;
return tctx->unique_id == (u32)unique_id;
}
2014-09-21 17:33:12 +00:00
static ThreadContext *FindThreadByUidLocked(int unique_id) {
ctx->thread_registry->CheckLocked();
return static_cast<ThreadContext *>(
ctx->thread_registry->FindThreadContextLocked(
FindThreadByUidLockedCallback, &unique_id));
2014-09-21 17:33:12 +00:00
}
static ThreadContext *FindThreadByTidLocked(int tid) {
ctx->thread_registry->CheckLocked();
return static_cast<ThreadContext*>(
ctx->thread_registry->GetThreadLocked(tid));
}
static bool IsInStackOrTls(ThreadContextBase *tctx_base, void *arg) {
uptr addr = (uptr)arg;
ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
if (tctx->status != ThreadStatusRunning)
return false;
ThreadState *thr = tctx->thr;
CHECK(thr);
return ((addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size) ||
(addr >= thr->tls_addr && addr < thr->tls_addr + thr->tls_size));
}
ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack) {
ctx->thread_registry->CheckLocked();
ThreadContext *tctx = static_cast<ThreadContext*>(
ctx->thread_registry->FindThreadContextLocked(IsInStackOrTls,
(void*)addr));
if (!tctx)
return 0;
ThreadState *thr = tctx->thr;
CHECK(thr);
*is_stack = (addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size);
return tctx;
}
#endif
2015-08-28 15:33:40 +00:00
void ScopedReport::AddThread(int unique_tid, bool suppressable) {
#if !SANITIZER_GO
2015-08-28 15:33:40 +00:00
if (const ThreadContext *tctx = FindThreadByUidLocked(unique_tid))
AddThread(tctx, suppressable);
#endif
}
2014-09-21 17:33:12 +00:00
void ScopedReport::AddMutex(const SyncVar *s) {
for (uptr i = 0; i < rep_->mutexes.Size(); i++) {
if (rep_->mutexes[i]->id == s->uid)
return;
}
void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex));
ReportMutex *rm = new(mem) ReportMutex;
2014-09-21 17:33:12 +00:00
rep_->mutexes.PushBack(rm);
rm->id = s->uid;
2015-08-28 15:33:40 +00:00
rm->addr = s->addr;
2014-09-21 17:33:12 +00:00
rm->destroyed = false;
rm->stack = SymbolizeStackId(s->creation_stack_id);
}
2015-08-28 15:33:40 +00:00
u64 ScopedReport::AddMutex(u64 id) {
u64 uid = 0;
u64 mid = id;
uptr addr = SyncVar::SplitId(id, &uid);
SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, true);
2015-08-28 15:33:40 +00:00
// Check that the mutex is still alive.
// Another mutex can be created at the same address,
// so check uid as well.
if (s && s->CheckId(uid)) {
mid = s->uid;
AddMutex(s);
} else {
AddDeadMutex(id);
}
if (s)
s->mtx.Unlock();
return mid;
}
void ScopedReport::AddDeadMutex(u64 id) {
2014-09-21 17:33:12 +00:00
for (uptr i = 0; i < rep_->mutexes.Size(); i++) {
if (rep_->mutexes[i]->id == id)
return;
}
void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex));
ReportMutex *rm = new(mem) ReportMutex;
2014-09-21 17:33:12 +00:00
rep_->mutexes.PushBack(rm);
rm->id = id;
2015-08-28 15:33:40 +00:00
rm->addr = 0;
2014-09-21 17:33:12 +00:00
rm->destroyed = true;
rm->stack = 0;
}
void ScopedReport::AddLocation(uptr addr, uptr size) {
if (addr == 0)
return;
#if !SANITIZER_GO
2014-09-21 17:33:12 +00:00
int fd = -1;
2018-12-28 15:30:48 +00:00
int creat_tid = kInvalidTid;
2014-09-21 17:33:12 +00:00
u32 creat_stack = 0;
2015-08-28 15:33:40 +00:00
if (FdLocation(addr, &fd, &creat_tid, &creat_stack)) {
ReportLocation *loc = ReportLocation::New(ReportLocationFD);
2014-09-21 17:33:12 +00:00
loc->fd = fd;
loc->tid = creat_tid;
loc->stack = SymbolizeStackId(creat_stack);
2015-08-28 15:33:40 +00:00
rep_->locs.PushBack(loc);
2014-09-21 17:33:12 +00:00
ThreadContext *tctx = FindThreadByUidLocked(creat_tid);
if (tctx)
AddThread(tctx);
return;
}
MBlock *b = 0;
2015-08-28 15:33:40 +00:00
Allocator *a = allocator();
if (a->PointerIsMine((void*)addr)) {
void *block_begin = a->GetBlockBegin((void*)addr);
if (block_begin)
b = ctx->metamap.GetBlock((uptr)block_begin);
}
if (b != 0) {
ThreadContext *tctx = FindThreadByTidLocked(b->tid);
ReportLocation *loc = ReportLocation::New(ReportLocationHeap);
loc->heap_chunk_start = (uptr)allocator()->GetBlockBegin((void *)addr);
loc->heap_chunk_size = b->siz;
2018-12-28 15:30:48 +00:00
loc->external_tag = b->tag;
2015-08-28 15:33:40 +00:00
loc->tid = tctx ? tctx->tid : b->tid;
loc->stack = SymbolizeStackId(b->stk);
2014-09-21 17:33:12 +00:00
rep_->locs.PushBack(loc);
if (tctx)
AddThread(tctx);
return;
}
bool is_stack = false;
if (ThreadContext *tctx = IsThreadStackOrTls(addr, &is_stack)) {
2015-08-28 15:33:40 +00:00
ReportLocation *loc =
ReportLocation::New(is_stack ? ReportLocationStack : ReportLocationTLS);
2014-09-21 17:33:12 +00:00
loc->tid = tctx->tid;
2015-08-28 15:33:40 +00:00
rep_->locs.PushBack(loc);
2014-09-21 17:33:12 +00:00
AddThread(tctx);
}
#endif
2015-08-28 15:33:40 +00:00
if (ReportLocation *loc = SymbolizeData(addr)) {
loc->suppressable = true;
2014-09-21 17:33:12 +00:00
rep_->locs.PushBack(loc);
return;
}
}
#if !SANITIZER_GO
2014-09-21 17:33:12 +00:00
void ScopedReport::AddSleep(u32 stack_id) {
rep_->sleep = SymbolizeStackId(stack_id);
}
#endif
void ScopedReport::SetCount(int count) {
rep_->count = count;
}
const ReportDesc *ScopedReport::GetReport() const {
return rep_;
}
2015-08-28 15:33:40 +00:00
void RestoreStack(int tid, const u64 epoch, VarSizeStackTrace *stk,
2018-12-28 15:30:48 +00:00
MutexSet *mset, uptr *tag) {
2014-09-21 17:33:12 +00:00
// This function restores stack trace and mutex set for the thread/epoch.
// It does so by getting stack trace and mutex set at the beginning of
// trace part, and then replaying the trace till the given epoch.
2017-04-10 11:32:00 +00:00
Trace* trace = ThreadTrace(tid);
ReadLock l(&trace->mtx);
2014-09-21 17:33:12 +00:00
const int partidx = (epoch / kTracePartSize) % TraceParts();
TraceHeader* hdr = &trace->headers[partidx];
2017-04-10 11:32:00 +00:00
if (epoch < hdr->epoch0 || epoch >= hdr->epoch0 + kTracePartSize)
2014-09-21 17:33:12 +00:00
return;
2017-04-10 11:32:00 +00:00
CHECK_EQ(RoundDown(epoch, kTracePartSize), hdr->epoch0);
2014-09-21 17:33:12 +00:00
const u64 epoch0 = RoundDown(epoch, TraceSize());
const u64 eend = epoch % TraceSize();
const u64 ebegin = RoundDown(eend, kTracePartSize);
DPrintf("#%d: RestoreStack epoch=%zu ebegin=%zu eend=%zu partidx=%d\n",
tid, (uptr)epoch, (uptr)ebegin, (uptr)eend, partidx);
2017-04-10 11:32:00 +00:00
Vector<uptr> stack(MBlockReportStack);
stack.Resize(hdr->stack0.size + 64);
2015-08-28 15:33:40 +00:00
for (uptr i = 0; i < hdr->stack0.size; i++) {
stack[i] = hdr->stack0.trace[i];
2017-04-10 11:32:00 +00:00
DPrintf2(" #%02zu: pc=%zx\n", i, stack[i]);
2014-09-21 17:33:12 +00:00
}
if (mset)
*mset = hdr->mset0;
2015-08-28 15:33:40 +00:00
uptr pos = hdr->stack0.size;
2014-09-21 17:33:12 +00:00
Event *events = (Event*)GetThreadTrace(tid);
for (uptr i = ebegin; i <= eend; i++) {
Event ev = events[i];
2018-12-28 15:30:48 +00:00
EventType typ = (EventType)(ev >> kEventPCBits);
uptr pc = (uptr)(ev & ((1ull << kEventPCBits) - 1));
2014-09-21 17:33:12 +00:00
DPrintf2(" %zu typ=%d pc=%zx\n", i, typ, pc);
if (typ == EventTypeMop) {
stack[pos] = pc;
} else if (typ == EventTypeFuncEnter) {
2017-04-10 11:32:00 +00:00
if (stack.Size() < pos + 2)
stack.Resize(pos + 2);
2014-09-21 17:33:12 +00:00
stack[pos++] = pc;
} else if (typ == EventTypeFuncExit) {
if (pos > 0)
pos--;
}
if (mset) {
if (typ == EventTypeLock) {
mset->Add(pc, true, epoch0 + i);
} else if (typ == EventTypeUnlock) {
mset->Del(pc, true);
} else if (typ == EventTypeRLock) {
mset->Add(pc, false, epoch0 + i);
} else if (typ == EventTypeRUnlock) {
mset->Del(pc, false);
}
}
for (uptr j = 0; j <= pos; j++)
DPrintf2(" #%zu: %zx\n", j, stack[j]);
}
if (pos == 0 && stack[0] == 0)
return;
pos++;
2017-04-10 11:32:00 +00:00
stk->Init(&stack[0], pos);
2018-12-28 15:30:48 +00:00
ExtractTagFromStack(stk, tag);
2014-09-21 17:33:12 +00:00
}
2015-08-28 15:33:40 +00:00
static bool HandleRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2],
uptr addr_min, uptr addr_max) {
2014-09-21 17:33:12 +00:00
bool equal_stack = false;
RacyStacks hash;
bool equal_address = false;
RacyAddress ra0 = {addr_min, addr_max};
2017-04-10 11:32:00 +00:00
{
ReadLock lock(&ctx->racy_mtx);
if (flags()->suppress_equal_stacks) {
hash.hash[0] = md5_hash(traces[0].trace, traces[0].size * sizeof(uptr));
hash.hash[1] = md5_hash(traces[1].trace, traces[1].size * sizeof(uptr));
for (uptr i = 0; i < ctx->racy_stacks.Size(); i++) {
if (hash == ctx->racy_stacks[i]) {
VPrintf(2,
"ThreadSanitizer: suppressing report as doubled (stack)\n");
equal_stack = true;
break;
}
}
}
if (flags()->suppress_equal_addresses) {
for (uptr i = 0; i < ctx->racy_addresses.Size(); i++) {
RacyAddress ra2 = ctx->racy_addresses[i];
uptr maxbeg = max(ra0.addr_min, ra2.addr_min);
uptr minend = min(ra0.addr_max, ra2.addr_max);
if (maxbeg < minend) {
VPrintf(2, "ThreadSanitizer: suppressing report as doubled (addr)\n");
equal_address = true;
break;
}
2014-09-21 17:33:12 +00:00
}
}
}
2017-04-10 11:32:00 +00:00
if (!equal_stack && !equal_address)
return false;
if (!equal_stack) {
Lock lock(&ctx->racy_mtx);
ctx->racy_stacks.PushBack(hash);
2014-09-21 17:33:12 +00:00
}
2017-04-10 11:32:00 +00:00
if (!equal_address) {
Lock lock(&ctx->racy_mtx);
ctx->racy_addresses.PushBack(ra0);
}
return true;
2014-09-21 17:33:12 +00:00
}
2015-08-28 15:33:40 +00:00
static void AddRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2],
uptr addr_min, uptr addr_max) {
2017-04-10 11:32:00 +00:00
Lock lock(&ctx->racy_mtx);
2014-09-21 17:33:12 +00:00
if (flags()->suppress_equal_stacks) {
RacyStacks hash;
2015-08-28 15:33:40 +00:00
hash.hash[0] = md5_hash(traces[0].trace, traces[0].size * sizeof(uptr));
hash.hash[1] = md5_hash(traces[1].trace, traces[1].size * sizeof(uptr));
2014-09-21 17:33:12 +00:00
ctx->racy_stacks.PushBack(hash);
}
if (flags()->suppress_equal_addresses) {
RacyAddress ra0 = {addr_min, addr_max};
ctx->racy_addresses.PushBack(ra0);
}
}
2015-08-28 15:33:40 +00:00
bool OutputReport(ThreadState *thr, const ScopedReport &srep) {
2018-12-28 15:30:48 +00:00
if (!flags()->report_bugs || thr->suppress_reports)
2017-04-10 11:32:00 +00:00
return false;
atomic_store_relaxed(&ctx->last_symbolize_time_ns, NanoTime());
2014-09-21 17:33:12 +00:00
const ReportDesc *rep = srep.GetReport();
CHECK_EQ(thr->current_report, nullptr);
thr->current_report = rep;
2014-09-21 17:33:12 +00:00
Suppression *supp = 0;
2017-04-10 11:32:00 +00:00
uptr pc_or_addr = 0;
for (uptr i = 0; pc_or_addr == 0 && i < rep->mops.Size(); i++)
pc_or_addr = IsSuppressed(rep->typ, rep->mops[i]->stack, &supp);
for (uptr i = 0; pc_or_addr == 0 && i < rep->stacks.Size(); i++)
pc_or_addr = IsSuppressed(rep->typ, rep->stacks[i], &supp);
for (uptr i = 0; pc_or_addr == 0 && i < rep->threads.Size(); i++)
pc_or_addr = IsSuppressed(rep->typ, rep->threads[i]->stack, &supp);
for (uptr i = 0; pc_or_addr == 0 && i < rep->locs.Size(); i++)
pc_or_addr = IsSuppressed(rep->typ, rep->locs[i], &supp);
if (pc_or_addr != 0) {
Lock lock(&ctx->fired_suppressions_mtx);
FiredSuppression s = {srep.GetReport()->typ, pc_or_addr, supp};
2014-09-21 17:33:12 +00:00
ctx->fired_suppressions.push_back(s);
}
2015-08-28 15:33:40 +00:00
{
bool old_is_freeing = thr->is_freeing;
thr->is_freeing = false;
2017-04-10 11:32:00 +00:00
bool suppressed = OnReport(rep, pc_or_addr != 0);
2015-08-28 15:33:40 +00:00
thr->is_freeing = old_is_freeing;
if (suppressed) {
thr->current_report = nullptr;
2015-08-28 15:33:40 +00:00
return false;
}
2015-08-28 15:33:40 +00:00
}
2014-09-21 17:33:12 +00:00
PrintReport(rep);
__tsan_on_report(rep);
2014-09-21 17:33:12 +00:00
ctx->nreported++;
if (flags()->halt_on_error)
2017-04-10 11:32:00 +00:00
Die();
thr->current_report = nullptr;
2014-09-21 17:33:12 +00:00
return true;
}
2017-04-10 11:32:00 +00:00
bool IsFiredSuppression(Context *ctx, ReportType type, StackTrace trace) {
ReadLock lock(&ctx->fired_suppressions_mtx);
2014-09-21 17:33:12 +00:00
for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) {
2017-04-10 11:32:00 +00:00
if (ctx->fired_suppressions[k].type != type)
2014-09-21 17:33:12 +00:00
continue;
2015-08-28 15:33:40 +00:00
for (uptr j = 0; j < trace.size; j++) {
2014-09-21 17:33:12 +00:00
FiredSuppression *s = &ctx->fired_suppressions[k];
2017-04-10 11:32:00 +00:00
if (trace.trace[j] == s->pc_or_addr) {
2014-09-21 17:33:12 +00:00
if (s->supp)
2017-04-10 11:32:00 +00:00
atomic_fetch_add(&s->supp->hit_count, 1, memory_order_relaxed);
2014-09-21 17:33:12 +00:00
return true;
}
}
}
return false;
}
2017-04-10 11:32:00 +00:00
static bool IsFiredSuppression(Context *ctx, ReportType type, uptr addr) {
ReadLock lock(&ctx->fired_suppressions_mtx);
2014-09-21 17:33:12 +00:00
for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) {
2017-04-10 11:32:00 +00:00
if (ctx->fired_suppressions[k].type != type)
2014-09-21 17:33:12 +00:00
continue;
FiredSuppression *s = &ctx->fired_suppressions[k];
2017-04-10 11:32:00 +00:00
if (addr == s->pc_or_addr) {
2014-09-21 17:33:12 +00:00
if (s->supp)
2017-04-10 11:32:00 +00:00
atomic_fetch_add(&s->supp->hit_count, 1, memory_order_relaxed);
2014-09-21 17:33:12 +00:00
return true;
}
}
return false;
}
static bool RaceBetweenAtomicAndFree(ThreadState *thr) {
Shadow s0(thr->racy_state[0]);
Shadow s1(thr->racy_state[1]);
CHECK(!(s0.IsAtomic() && s1.IsAtomic()));
if (!s0.IsAtomic() && !s1.IsAtomic())
return true;
if (s0.IsAtomic() && s1.IsFreed())
return true;
if (s1.IsAtomic() && thr->is_freeing)
return true;
return false;
}
void ReportRace(ThreadState *thr) {
2015-08-28 15:33:40 +00:00
CheckNoLocks(thr);
// Symbolizer makes lots of intercepted calls. If we try to process them,
// at best it will cause deadlocks on internal mutexes.
ScopedIgnoreInterceptors ignore;
2014-09-21 17:33:12 +00:00
if (!flags()->report_bugs)
return;
if (!flags()->report_atomic_races && !RaceBetweenAtomicAndFree(thr))
return;
bool freed = false;
{
Shadow s(thr->racy_state[1]);
freed = s.GetFreedAndReset();
thr->racy_state[1] = s.raw();
}
uptr addr = ShadowToMem((uptr)thr->racy_shadow_addr);
uptr addr_min = 0;
uptr addr_max = 0;
{
uptr a0 = addr + Shadow(thr->racy_state[0]).addr0();
uptr a1 = addr + Shadow(thr->racy_state[1]).addr0();
uptr e0 = a0 + Shadow(thr->racy_state[0]).size();
uptr e1 = a1 + Shadow(thr->racy_state[1]).size();
addr_min = min(a0, a1);
addr_max = max(e0, e1);
if (IsExpectedReport(addr_min, addr_max - addr_min))
return;
}
ReportType typ = ReportTypeRace;
2015-08-28 15:33:40 +00:00
if (thr->is_vptr_access && freed)
typ = ReportTypeVptrUseAfterFree;
else if (thr->is_vptr_access)
2014-09-21 17:33:12 +00:00
typ = ReportTypeVptrRace;
else if (freed)
typ = ReportTypeUseAfterFree;
2017-04-10 11:32:00 +00:00
if (IsFiredSuppression(ctx, typ, addr))
2014-09-21 17:33:12 +00:00
return;
2017-04-10 11:32:00 +00:00
2014-09-21 17:33:12 +00:00
const uptr kMop = 2;
2015-08-28 15:33:40 +00:00
VarSizeStackTrace traces[kMop];
2018-12-28 15:30:48 +00:00
uptr tags[kMop] = {kExternalTagNone};
uptr toppc = TraceTopPC(thr);
if (toppc >> kEventPCBits) {
// This is a work-around for a known issue.
// The scenario where this happens is rather elaborate and requires
// an instrumented __sanitizer_report_error_summary callback and
// a __tsan_symbolize_external callback and a race during a range memory
// access larger than 8 bytes. MemoryAccessRange adds the current PC to
// the trace and starts processing memory accesses. A first memory access
// triggers a race, we report it and call the instrumented
// __sanitizer_report_error_summary, which adds more stuff to the trace
// since it is intrumented. Then a second memory access in MemoryAccessRange
// also triggers a race and we get here and call TraceTopPC to get the
// current PC, however now it contains some unrelated events from the
// callback. Most likely, TraceTopPC will now return a EventTypeFuncExit
// event. Later we subtract -1 from it (in GetPreviousInstructionPc)
// and the resulting PC has kExternalPCBit set, so we pass it to
// __tsan_symbolize_external. __tsan_symbolize_external is within its rights
// to crash since the PC is completely bogus.
// test/tsan/double_race.cc contains a test case for this.
toppc = 0;
}
ObtainCurrentStack(thr, toppc, &traces[0], &tags[0]);
2017-04-10 11:32:00 +00:00
if (IsFiredSuppression(ctx, typ, traces[0]))
2014-09-21 17:33:12 +00:00
return;
2017-04-10 11:32:00 +00:00
// MutexSet is too large to live on stack.
Vector<u64> mset_buffer(MBlockScopedBuf);
mset_buffer.Resize(sizeof(MutexSet) / sizeof(u64) + 1);
MutexSet *mset2 = new(&mset_buffer[0]) MutexSet();
2014-09-21 17:33:12 +00:00
Shadow s2(thr->racy_state[1]);
2018-12-28 15:30:48 +00:00
RestoreStack(s2.tid(), s2.epoch(), &traces[1], mset2, &tags[1]);
2017-04-10 11:32:00 +00:00
if (IsFiredSuppression(ctx, typ, traces[1]))
2014-09-21 17:33:12 +00:00
return;
if (HandleRacyStacks(thr, traces, addr_min, addr_max))
return;
2018-12-28 15:30:48 +00:00
// If any of the accesses has a tag, treat this as an "external" race.
uptr tag = kExternalTagNone;
for (uptr i = 0; i < kMop; i++) {
if (tags[i] != kExternalTagNone) {
typ = ReportTypeExternalRace;
tag = tags[i];
break;
}
}
2017-04-10 11:32:00 +00:00
ThreadRegistryLock l0(ctx->thread_registry);
2018-12-28 15:30:48 +00:00
ScopedReport rep(typ, tag);
2014-09-21 17:33:12 +00:00
for (uptr i = 0; i < kMop; i++) {
Shadow s(thr->racy_state[i]);
2018-12-28 15:30:48 +00:00
rep.AddMemoryAccess(addr, tags[i], s, traces[i],
i == 0 ? &thr->mset : mset2);
2014-09-21 17:33:12 +00:00
}
for (uptr i = 0; i < kMop; i++) {
FastState s(thr->racy_state[i]);
ThreadContext *tctx = static_cast<ThreadContext*>(
ctx->thread_registry->GetThreadLocked(s.tid()));
if (s.epoch() < tctx->epoch0 || s.epoch() > tctx->epoch1)
continue;
rep.AddThread(tctx);
}
rep.AddLocation(addr_min, addr_max - addr_min);
#if !SANITIZER_GO
2014-09-21 17:33:12 +00:00
{ // NOLINT
Shadow s(thr->racy_state[1]);
if (s.epoch() <= thr->last_sleep_clock.get(s.tid()))
rep.AddSleep(thr->last_sleep_stack_id);
}
#endif
2015-08-28 15:33:40 +00:00
if (!OutputReport(thr, rep))
2014-09-21 17:33:12 +00:00
return;
AddRacyStacks(thr, traces, addr_min, addr_max);
}
void PrintCurrentStack(ThreadState *thr, uptr pc) {
2015-08-28 15:33:40 +00:00
VarSizeStackTrace trace;
ObtainCurrentStack(thr, pc, &trace);
2014-09-21 17:33:12 +00:00
PrintStack(SymbolizeStack(trace));
}
// Always inlining PrintCurrentStackSlow, because LocatePcInTrace assumes
// __sanitizer_print_stack_trace exists in the actual unwinded stack, but
// tail-call to PrintCurrentStackSlow breaks this assumption because
// __sanitizer_print_stack_trace disappears after tail-call.
// However, this solution is not reliable enough, please see dvyukov's comment
// http://reviews.llvm.org/D19148#406208
// Also see PR27280 comment 2 and 3 for breaking examples and analysis.
ALWAYS_INLINE
2015-08-28 15:33:40 +00:00
void PrintCurrentStackSlow(uptr pc) {
#if !SANITIZER_GO
2015-08-28 15:33:40 +00:00
BufferedStackTrace *ptrace =
new(internal_alloc(MBlockStackTrace, sizeof(BufferedStackTrace)))
BufferedStackTrace();
ptrace->Unwind(kStackTraceMax, pc, 0, 0, 0, 0, false);
2014-09-21 17:33:12 +00:00
for (uptr i = 0; i < ptrace->size / 2; i++) {
2015-08-28 15:33:40 +00:00
uptr tmp = ptrace->trace_buffer[i];
ptrace->trace_buffer[i] = ptrace->trace_buffer[ptrace->size - i - 1];
ptrace->trace_buffer[ptrace->size - i - 1] = tmp;
2014-09-21 17:33:12 +00:00
}
2015-08-28 15:33:40 +00:00
PrintStack(SymbolizeStack(*ptrace));
2014-09-21 17:33:12 +00:00
#endif
}
} // namespace __tsan
2015-08-28 15:33:40 +00:00
using namespace __tsan;
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_print_stack_trace() {
PrintCurrentStackSlow(StackTrace::GetCurrentPc());
}
} // extern "C"