[Allocator] Switch the BumpPtrAllocator to use a vector of pointers to

slabs rather than embedding a singly linked list in the slabs
themselves. This has a few advantages:

- Better utilization of the slab's memory by not wasting 16-bytes at the
  front.
- Simpler allocation strategy by not having a struct packed at the
  front.
- Avoids paging every allocated slab in just to traverse them for
  deallocating or dumping stats.

The latter is the really nice part. Folks have complained from time to
time bitterly that tearing down a BumpPtrAllocator, even if it doesn't
run any destructors, pages in all of the memory allocated. Now it won't.
=]

Also resolves a FIXME with the scaling of the slab sizes. The scaling
now disregards specially sized slabs for allocations larger than the
threshold.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@206147 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Chandler Carruth
2014-04-14 03:55:11 +00:00
parent 1b1e4e27e1
commit cb7ead25c2
4 changed files with 161 additions and 149 deletions

View File

@ -84,7 +84,7 @@ TEST(AllocatorTest, TestOverflow) {
BumpPtrAllocator Alloc;
// Fill the slab right up until the end pointer.
Alloc.Allocate(4096 - sizeof(MemSlab), 0);
Alloc.Allocate(4096, 0);
EXPECT_EQ(1U, Alloc.GetNumSlabs());
// If we don't allocate a new slab, then we will have overflowed.
@ -103,37 +103,32 @@ TEST(AllocatorTest, TestSmallSlabSize) {
// Mock slab allocator that returns slabs aligned on 4096 bytes. There is no
// easy portable way to do this, so this is kind of a hack.
class MockSlabAllocator : public SlabAllocator {
MemSlab *LastSlab;
size_t LastSlabSize;
public:
virtual ~MockSlabAllocator() { }
virtual MemSlab *Allocate(size_t Size) {
virtual void *Allocate(size_t Size) {
// Allocate space for the alignment, the slab, and a void* that goes right
// before the slab.
size_t Alignment = 4096;
void *MemBase = malloc(Size + Alignment - 1 + sizeof(void*));
// Make the slab.
MemSlab *Slab = (MemSlab*)(((uintptr_t)MemBase+sizeof(void*)+Alignment-1) &
~(uintptr_t)(Alignment - 1));
Slab->Size = Size;
Slab->NextPtr = 0;
// Find the slab start.
void *Slab = alignPtr((char *)MemBase + sizeof(void *), Alignment);
// Hold a pointer to the base so we can free the whole malloced block.
((void**)Slab)[-1] = MemBase;
LastSlab = Slab;
LastSlabSize = Size;
return Slab;
}
virtual void Deallocate(MemSlab *Slab) {
virtual void Deallocate(void *Slab, size_t Size) {
free(((void**)Slab)[-1]);
}
MemSlab *GetLastSlab() {
return LastSlab;
}
size_t GetLastSlabSize() { return LastSlabSize; }
};
// Allocate a large-ish block with a really large alignment so that the
@ -142,9 +137,16 @@ public:
TEST(AllocatorTest, TestBigAlignment) {
MockSlabAllocator SlabAlloc;
BumpPtrAllocator Alloc(SlabAlloc);
uintptr_t Ptr = (uintptr_t)Alloc.Allocate(3000, 2048);
MemSlab *Slab = SlabAlloc.GetLastSlab();
EXPECT_LE(Ptr + 3000, ((uintptr_t)Slab) + Slab->Size);
// First allocate a tiny bit to ensure we have to re-align things.
(void)Alloc.Allocate(1, 0);
// Now the big chunk with a big alignment.
(void)Alloc.Allocate(3000, 2048);
// We test that the last slab size is not the default 4096 byte slab, but
// rather a custom sized slab that is larger.
EXPECT_GT(SlabAlloc.GetLastSlabSize(), 4096u);
}
} // anonymous namespace