Retro68/gcc/libsanitizer/tsan/tsan_clock.h

225 lines
6.3 KiB
C
Raw Normal View History

2014-09-21 17:33:12 +00:00
//===-- tsan_clock.h --------------------------------------------*- C++ -*-===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
//===----------------------------------------------------------------------===//
#ifndef TSAN_CLOCK_H
#define TSAN_CLOCK_H
#include "tsan_defs.h"
2015-08-28 15:33:40 +00:00
#include "tsan_dense_alloc.h"
2014-09-21 17:33:12 +00:00
namespace __tsan {
2015-08-28 15:33:40 +00:00
typedef DenseSlabAlloc<ClockBlock, 1<<16, 1<<10> ClockAlloc;
typedef DenseSlabAllocCache ClockCache;
2014-09-21 17:33:12 +00:00
// The clock that lives in sync variables (mutexes, atomics, etc).
class SyncClock {
public:
SyncClock();
2015-08-28 15:33:40 +00:00
~SyncClock();
2014-09-21 17:33:12 +00:00
2018-12-28 15:30:48 +00:00
uptr size() const;
2014-09-21 17:33:12 +00:00
2018-12-28 15:30:48 +00:00
// These are used only in tests.
u64 get(unsigned tid) const;
u64 get_clean(unsigned tid) const;
2014-09-21 17:33:12 +00:00
2015-08-28 15:33:40 +00:00
void Resize(ClockCache *c, uptr nclk);
void Reset(ClockCache *c);
void DebugDump(int(*printf)(const char *s, ...));
2018-12-28 15:30:48 +00:00
// Clock element iterator.
// Note: it iterates only over the table without regard to dirty entries.
class Iter {
public:
explicit Iter(SyncClock* parent);
Iter& operator++();
bool operator!=(const Iter& other);
ClockElem &operator*();
private:
SyncClock *parent_;
// [pos_, end_) is the current continuous range of clock elements.
ClockElem *pos_;
ClockElem *end_;
int block_; // Current number of second level block.
NOINLINE void Next();
};
Iter begin();
Iter end();
2014-09-21 17:33:12 +00:00
private:
2018-12-28 15:30:48 +00:00
friend class ThreadClock;
friend class Iter;
2015-08-28 15:33:40 +00:00
static const uptr kDirtyTids = 2;
2018-12-28 15:30:48 +00:00
struct Dirty {
u64 epoch : kClkBits;
u64 tid : 64 - kClkBits; // kInvalidId if not active
};
2015-08-28 15:33:40 +00:00
unsigned release_store_tid_;
unsigned release_store_reused_;
2018-12-28 15:30:48 +00:00
Dirty dirty_[kDirtyTids];
// If size_ is 0, tab_ is nullptr.
// If size <= 64 (kClockCount), tab_ contains pointer to an array with
// 64 ClockElem's (ClockBlock::clock).
// Otherwise, tab_ points to an array with up to 127 u32 elements,
2015-08-28 15:33:40 +00:00
// each pointing to the second-level 512b block with 64 ClockElem's.
2018-12-28 15:30:48 +00:00
// Unused space in the first level ClockBlock is used to store additional
// clock elements.
// The last u32 element in the first level ClockBlock is always used as
// reference counter.
//
// See the following scheme for details.
// All memory blocks are 512 bytes (allocated from ClockAlloc).
// Clock (clk) elements are 64 bits.
// Idx and ref are 32 bits.
//
// tab_
// |
// \/
// +----------------------------------------------------+
// | clk128 | clk129 | ...unused... | idx1 | idx0 | ref |
// +----------------------------------------------------+
// | |
// | \/
// | +----------------+
// | | clk0 ... clk63 |
// | +----------------+
// \/
// +------------------+
// | clk64 ... clk127 |
// +------------------+
//
// Note: dirty entries, if active, always override what's stored in the clock.
2015-08-28 15:33:40 +00:00
ClockBlock *tab_;
u32 tab_idx_;
2018-12-28 15:30:48 +00:00
u16 size_;
u16 blocks_; // Number of second level blocks.
void Unshare(ClockCache *c);
bool IsShared() const;
bool Cachable() const;
void ResetImpl();
void FlushDirty();
uptr capacity() const;
u32 get_block(uptr bi) const;
void append_block(u32 idx);
2015-08-28 15:33:40 +00:00
ClockElem &elem(unsigned tid) const;
2014-09-21 17:33:12 +00:00
};
// The clock that lives in threads.
2018-12-28 15:30:48 +00:00
class ThreadClock {
2014-09-21 17:33:12 +00:00
public:
2015-08-28 15:33:40 +00:00
typedef DenseSlabAllocCache Cache;
explicit ThreadClock(unsigned tid, unsigned reused = 0);
2014-09-21 17:33:12 +00:00
2018-12-28 15:30:48 +00:00
u64 get(unsigned tid) const;
void set(ClockCache *c, unsigned tid, u64 v);
void set(u64 v);
void tick();
uptr size() const;
2014-09-21 17:33:12 +00:00
2018-12-28 15:30:48 +00:00
void acquire(ClockCache *c, SyncClock *src);
void release(ClockCache *c, SyncClock *dst);
2015-08-28 15:33:40 +00:00
void acq_rel(ClockCache *c, SyncClock *dst);
2018-12-28 15:30:48 +00:00
void ReleaseStore(ClockCache *c, SyncClock *dst);
void ResetCached(ClockCache *c);
2015-08-28 15:33:40 +00:00
void DebugReset();
void DebugDump(int(*printf)(const char *s, ...));
2014-09-21 17:33:12 +00:00
private:
2015-08-28 15:33:40 +00:00
static const uptr kDirtyTids = SyncClock::kDirtyTids;
2018-12-28 15:30:48 +00:00
// Index of the thread associated with he clock ("current thread").
2015-08-28 15:33:40 +00:00
const unsigned tid_;
2018-12-28 15:30:48 +00:00
const unsigned reused_; // tid_ reuse count.
// Current thread time when it acquired something from other threads.
2015-08-28 15:33:40 +00:00
u64 last_acquire_;
2018-12-28 15:30:48 +00:00
// Cached SyncClock (without dirty entries and release_store_tid_).
// We reuse it for subsequent store-release operations without intervening
// acquire operations. Since it is shared (and thus constant), clock value
// for the current thread is then stored in dirty entries in the SyncClock.
// We host a refernece to the table while it is cached here.
u32 cached_idx_;
u16 cached_size_;
u16 cached_blocks_;
// Number of active elements in the clk_ table (the rest is zeros).
2014-09-21 17:33:12 +00:00
uptr nclk_;
2018-12-28 15:30:48 +00:00
u64 clk_[kMaxTidInClock]; // Fixed size vector clock.
2015-08-28 15:33:40 +00:00
bool IsAlreadyAcquired(const SyncClock *src) const;
2018-12-28 15:30:48 +00:00
void UpdateCurrentThread(ClockCache *c, SyncClock *dst) const;
2014-09-21 17:33:12 +00:00
};
2018-12-28 15:30:48 +00:00
ALWAYS_INLINE u64 ThreadClock::get(unsigned tid) const {
DCHECK_LT(tid, kMaxTidInClock);
return clk_[tid];
}
ALWAYS_INLINE void ThreadClock::set(u64 v) {
DCHECK_GE(v, clk_[tid_]);
clk_[tid_] = v;
}
ALWAYS_INLINE void ThreadClock::tick() {
clk_[tid_]++;
}
ALWAYS_INLINE uptr ThreadClock::size() const {
return nclk_;
}
ALWAYS_INLINE SyncClock::Iter SyncClock::begin() {
return Iter(this);
}
ALWAYS_INLINE SyncClock::Iter SyncClock::end() {
return Iter(nullptr);
}
ALWAYS_INLINE uptr SyncClock::size() const {
return size_;
}
ALWAYS_INLINE SyncClock::Iter::Iter(SyncClock* parent)
: parent_(parent)
, pos_(nullptr)
, end_(nullptr)
, block_(-1) {
if (parent)
Next();
}
ALWAYS_INLINE SyncClock::Iter& SyncClock::Iter::operator++() {
pos_++;
if (UNLIKELY(pos_ >= end_))
Next();
return *this;
}
ALWAYS_INLINE bool SyncClock::Iter::operator!=(const SyncClock::Iter& other) {
return parent_ != other.parent_;
}
ALWAYS_INLINE ClockElem &SyncClock::Iter::operator*() {
return *pos_;
}
2014-09-21 17:33:12 +00:00
} // namespace __tsan
#endif // TSAN_CLOCK_H