Retro68/gcc/libsanitizer/sanitizer_common/sanitizer_quarantine.h

186 lines
4.7 KiB
C
Raw Normal View History

2014-09-21 17:33:12 +00:00
//===-- sanitizer_quarantine.h ----------------------------------*- C++ -*-===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Memory quarantine for AddressSanitizer and potentially other tools.
// Quarantine caches some specified amount of memory in per-thread caches,
// then evicts to global FIFO queue. When the queue reaches specified threshold,
// oldest memory is recycled.
//
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_QUARANTINE_H
#define SANITIZER_QUARANTINE_H
#include "sanitizer_internal_defs.h"
#include "sanitizer_mutex.h"
#include "sanitizer_list.h"
namespace __sanitizer {
template<typename Node> class QuarantineCache;
struct QuarantineBatch {
static const uptr kSize = 1021;
QuarantineBatch *next;
uptr size;
uptr count;
void *batch[kSize];
};
COMPILER_CHECK(sizeof(QuarantineBatch) <= (1 << 13)); // 8Kb.
// The callback interface is:
// void Callback::Recycle(Node *ptr);
// void *cb.Allocate(uptr size);
// void cb.Deallocate(void *ptr);
template<typename Callback, typename Node>
class Quarantine {
public:
typedef QuarantineCache<Callback> Cache;
explicit Quarantine(LinkerInitialized)
: cache_(LINKER_INITIALIZED) {
}
void Init(uptr size, uptr cache_size) {
2017-04-10 11:32:00 +00:00
atomic_store(&max_size_, size, memory_order_release);
atomic_store(&min_size_, size / 10 * 9,
memory_order_release); // 90% of max size.
2014-09-21 17:33:12 +00:00
max_cache_size_ = cache_size;
}
2017-04-10 11:32:00 +00:00
uptr GetSize() const { return atomic_load(&max_size_, memory_order_acquire); }
2014-09-21 17:33:12 +00:00
void Put(Cache *c, Callback cb, Node *ptr, uptr size) {
c->Enqueue(cb, ptr, size);
if (c->Size() > max_cache_size_)
Drain(c, cb);
}
void NOINLINE Drain(Cache *c, Callback cb) {
{
SpinMutexLock l(&cache_mutex_);
cache_.Transfer(c);
}
2017-04-10 11:32:00 +00:00
if (cache_.Size() > GetSize() && recycle_mutex_.TryLock())
2014-09-21 17:33:12 +00:00
Recycle(cb);
}
private:
// Read-only data.
char pad0_[kCacheLineSize];
2017-04-10 11:32:00 +00:00
atomic_uintptr_t max_size_;
atomic_uintptr_t min_size_;
2014-09-21 17:33:12 +00:00
uptr max_cache_size_;
char pad1_[kCacheLineSize];
SpinMutex cache_mutex_;
SpinMutex recycle_mutex_;
Cache cache_;
char pad2_[kCacheLineSize];
void NOINLINE Recycle(Callback cb) {
Cache tmp;
2017-04-10 11:32:00 +00:00
uptr min_size = atomic_load(&min_size_, memory_order_acquire);
2014-09-21 17:33:12 +00:00
{
SpinMutexLock l(&cache_mutex_);
2017-04-10 11:32:00 +00:00
while (cache_.Size() > min_size) {
2014-09-21 17:33:12 +00:00
QuarantineBatch *b = cache_.DequeueBatch();
tmp.EnqueueBatch(b);
}
}
recycle_mutex_.Unlock();
DoRecycle(&tmp, cb);
}
void NOINLINE DoRecycle(Cache *c, Callback cb) {
while (QuarantineBatch *b = c->DequeueBatch()) {
const uptr kPrefetch = 16;
CHECK(kPrefetch <= ARRAY_SIZE(b->batch));
2014-09-21 17:33:12 +00:00
for (uptr i = 0; i < kPrefetch; i++)
PREFETCH(b->batch[i]);
for (uptr i = 0, count = b->count; i < count; i++) {
if (i + kPrefetch < count)
PREFETCH(b->batch[i + kPrefetch]);
2014-09-21 17:33:12 +00:00
cb.Recycle((Node*)b->batch[i]);
}
cb.Deallocate(b);
}
}
};
// Per-thread cache of memory blocks.
template<typename Callback>
class QuarantineCache {
public:
explicit QuarantineCache(LinkerInitialized) {
}
QuarantineCache()
: size_() {
list_.clear();
}
uptr Size() const {
return atomic_load(&size_, memory_order_relaxed);
}
void Enqueue(Callback cb, void *ptr, uptr size) {
if (list_.empty() || list_.back()->count == QuarantineBatch::kSize) {
AllocBatch(cb);
size += sizeof(QuarantineBatch); // Count the batch in Quarantine size.
}
QuarantineBatch *b = list_.back();
2017-04-10 11:32:00 +00:00
CHECK(b);
2014-09-21 17:33:12 +00:00
b->batch[b->count++] = ptr;
b->size += size;
SizeAdd(size);
}
void Transfer(QuarantineCache *c) {
list_.append_back(&c->list_);
SizeAdd(c->Size());
atomic_store(&c->size_, 0, memory_order_relaxed);
}
void EnqueueBatch(QuarantineBatch *b) {
list_.push_back(b);
SizeAdd(b->size);
}
QuarantineBatch *DequeueBatch() {
if (list_.empty())
2017-04-10 11:32:00 +00:00
return nullptr;
2014-09-21 17:33:12 +00:00
QuarantineBatch *b = list_.front();
list_.pop_front();
SizeSub(b->size);
return b;
}
private:
IntrusiveList<QuarantineBatch> list_;
atomic_uintptr_t size_;
void SizeAdd(uptr add) {
atomic_store(&size_, Size() + add, memory_order_relaxed);
}
void SizeSub(uptr sub) {
atomic_store(&size_, Size() - sub, memory_order_relaxed);
}
NOINLINE QuarantineBatch* AllocBatch(Callback cb) {
QuarantineBatch *b = (QuarantineBatch *)cb.Allocate(sizeof(*b));
2017-04-10 11:32:00 +00:00
CHECK(b);
2014-09-21 17:33:12 +00:00
b->count = 0;
b->size = 0;
list_.push_back(b);
return b;
}
};
2017-04-10 11:32:00 +00:00
} // namespace __sanitizer
2014-09-21 17:33:12 +00:00
2017-04-10 11:32:00 +00:00
#endif // SANITIZER_QUARANTINE_H