mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-14 11:32:34 +00:00
[Allocator Cleanup] Make the growth of the "slab" size of the
BumpPtrAllocator significantly less strange by making it a simple function of the number of slabs allocated rather than by making it a recurrance. I *think* the previous behavior was essentially that the size of the slabs would be doubled after the first 128 were allocated, and then doubled again each time 64 more were allocated, but only if every allocation packed perfectly into the slab size. If not, the wasted space wouldn't be counted toward increasing the size, but allocations over the size threshold *would*. And since the allocations over the size threshold might be much larger than the slab size, this could have somewhat surprising consequences where we rapidly grow the slab size. This currently requires adding state to the allocator to track the number of slabs currently allocated, but that isn't too bad. I'm planning further changes to the allocator that will make this state fall out even more naturally. It still doesn't fully decouple the growth rate from the allocations which are over the size threshold. That fix is coming later. This specific fix will allow making the entire thing into a more stateless device and lifting the parameters into template parameters rather than runtime parameters. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@204993 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
ec90ab499d
commit
415a008ad2
@ -132,6 +132,12 @@ class BumpPtrAllocator {
|
||||
/// Used so that we can compute how much space was wasted.
|
||||
size_t BytesAllocated;
|
||||
|
||||
/// \brief How many slabs we've allocated.
|
||||
///
|
||||
/// Used to scale the size of each slab and reduce the number of allocations
|
||||
/// for extremely heavy memory use scenarios.
|
||||
size_t NumSlabs;
|
||||
|
||||
/// \brief Aligns \c Ptr to \c Alignment bytes, rounding up.
|
||||
///
|
||||
/// Alignment should be a power of two. This method rounds up, so
|
||||
@ -179,7 +185,7 @@ public:
|
||||
|
||||
void Deallocate(const void * /*Ptr*/) {}
|
||||
|
||||
unsigned GetNumSlabs() const;
|
||||
size_t GetNumSlabs() const { return NumSlabs; }
|
||||
|
||||
void PrintStats() const;
|
||||
|
||||
|
@ -24,11 +24,12 @@ namespace llvm {
|
||||
BumpPtrAllocator::BumpPtrAllocator(size_t size, size_t threshold,
|
||||
SlabAllocator &allocator)
|
||||
: SlabSize(size), SizeThreshold(std::min(size, threshold)),
|
||||
Allocator(allocator), CurSlab(0), BytesAllocated(0) { }
|
||||
Allocator(allocator), CurSlab(0), BytesAllocated(0), NumSlabs(0) {}
|
||||
|
||||
BumpPtrAllocator::BumpPtrAllocator(size_t size, size_t threshold)
|
||||
: SlabSize(size), SizeThreshold(std::min(size, threshold)),
|
||||
Allocator(DefaultSlabAllocator), CurSlab(0), BytesAllocated(0) { }
|
||||
Allocator(DefaultSlabAllocator), CurSlab(0), BytesAllocated(0),
|
||||
NumSlabs(0) {}
|
||||
|
||||
BumpPtrAllocator::~BumpPtrAllocator() {
|
||||
DeallocateSlabs(CurSlab);
|
||||
@ -49,13 +50,18 @@ char *BumpPtrAllocator::AlignPtr(char *Ptr, size_t Alignment) {
|
||||
/// StartNewSlab - Allocate a new slab and move the bump pointers over into
|
||||
/// the new slab. Modifies CurPtr and End.
|
||||
void BumpPtrAllocator::StartNewSlab() {
|
||||
// If we allocated a big number of slabs already it's likely that we're going
|
||||
// to allocate more. Increase slab size to reduce mallocs and possibly memory
|
||||
// overhead. The factors are chosen conservatively to avoid overallocation.
|
||||
if (BytesAllocated >= SlabSize * 128)
|
||||
SlabSize *= 2;
|
||||
++NumSlabs;
|
||||
// Scale the actual allocated slab size based on the number of slabs
|
||||
// allocated. Every 128 slabs allocated, we double the allocated size to
|
||||
// reduce allocation frequency, but saturate at multiplying the slab size by
|
||||
// 2^30.
|
||||
// FIXME: Currently, this count includes special slabs for objects above the
|
||||
// size threshold. That will be fixed in a subsequent commit to make the
|
||||
// growth even more predictable.
|
||||
size_t AllocatedSlabSize =
|
||||
SlabSize * (1 << std::min<size_t>(30, NumSlabs / 128));
|
||||
|
||||
MemSlab *NewSlab = Allocator.Allocate(SlabSize);
|
||||
MemSlab *NewSlab = Allocator.Allocate(AllocatedSlabSize);
|
||||
NewSlab->NextPtr = CurSlab;
|
||||
CurSlab = NewSlab;
|
||||
CurPtr = (char*)(CurSlab + 1);
|
||||
@ -75,6 +81,7 @@ void BumpPtrAllocator::DeallocateSlabs(MemSlab *Slab) {
|
||||
#endif
|
||||
Allocator.Deallocate(Slab);
|
||||
Slab = NextSlab;
|
||||
--NumSlabs;
|
||||
}
|
||||
}
|
||||
|
||||
@ -118,6 +125,7 @@ void *BumpPtrAllocator::Allocate(size_t Size, size_t Alignment) {
|
||||
// If Size is really big, allocate a separate slab for it.
|
||||
size_t PaddedSize = Size + sizeof(MemSlab) + Alignment - 1;
|
||||
if (PaddedSize > SizeThreshold) {
|
||||
++NumSlabs;
|
||||
MemSlab *NewSlab = Allocator.Allocate(PaddedSize);
|
||||
|
||||
// Put the new slab after the current slab, since we are not allocating
|
||||
@ -140,14 +148,6 @@ void *BumpPtrAllocator::Allocate(size_t Size, size_t Alignment) {
|
||||
return Ptr;
|
||||
}
|
||||
|
||||
unsigned BumpPtrAllocator::GetNumSlabs() const {
|
||||
unsigned NumSlabs = 0;
|
||||
for (MemSlab *Slab = CurSlab; Slab != 0; Slab = Slab->NextPtr) {
|
||||
++NumSlabs;
|
||||
}
|
||||
return NumSlabs;
|
||||
}
|
||||
|
||||
size_t BumpPtrAllocator::getTotalMemory() const {
|
||||
size_t TotalMemory = 0;
|
||||
for (MemSlab *Slab = CurSlab; Slab != 0; Slab = Slab->NextPtr) {
|
||||
|
Loading…
Reference in New Issue
Block a user