Add an ArrayRecycler class.

This is similar to the existing Recycler allocator, but instead of
recycling individual objects from a BumpPtrAllocator, arrays of
different sizes can be allocated.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@171581 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Jakob Stoklund Olesen 2013-01-05 00:57:11 +00:00
parent 6a40db40ee
commit 8a0631a35e
3 changed files with 253 additions and 0 deletions

View File

@ -0,0 +1,143 @@
//==- llvm/Support/ArrayRecycler.h - Recycling of Arrays ---------*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the ArrayRecycler class template which can recycle small
// arrays allocated from one of the allocators in Allocator.h
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_SUPPORT_ARRAY_RECYCLER_H
#define LLVM_SUPPORT_ARRAY_RECYCLER_H
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/MathExtras.h"
namespace llvm {
class BumpPtrAllocator;
/// Recycle small arrays allocated from a BumpPtrAllocator.
///
/// Arrays are allocated in a small number of fixed sizes. For each supported
/// array size, the ArrayRecycler keeps a free list of available arrays.
///
template<class T, size_t Align = AlignOf<T>::Alignment>
class ArrayRecycler {
// The free list for a given array size is a simple singly linked list.
// We can't use iplist or Recycler here since those classes can't be copied.
struct FreeList {
FreeList *Next;
};
// Keep a free list for each array size.
SmallVector<FreeList*, 8> Bucket;
// Remove an entry from the free list in Bucket[Idx] and return it.
// Return NULL if no entries are available.
T *pop(unsigned Idx) {
if (Idx >= Bucket.size())
return 0;
FreeList *Entry = Bucket[Idx];
if (!Entry)
return 0;
Bucket[Idx] = Entry->Next;
return reinterpret_cast<T*>(Entry);
}
// Add an entry to the free list at Bucket[Idx].
void push(unsigned Idx, T *Ptr) {
assert(Ptr && "Cannot recycle NULL pointer");
assert(sizeof(T) >= sizeof(FreeList) && "Objects are too small");
assert(Align >= AlignOf<FreeList>::Alignment && "Object underaligned");
FreeList *Entry = reinterpret_cast<FreeList*>(Ptr);
if (Idx >= Bucket.size())
Bucket.resize(size_t(Idx) + 1);
Entry->Next = Bucket[Idx];
Bucket[Idx] = Entry;
}
public:
/// The size of an allocated array is represented by a Capacity instance.
///
/// This class is much smaller than a size_t, and it provides methods to work
/// with the set of legal array capacities.
class Capacity {
uint8_t Index;
explicit Capacity(uint8_t idx) : Index(idx) {}
public:
Capacity() : Index(0) {}
/// Get the capacity of an array that can hold at least N elements.
static Capacity get(size_t N) {
return Capacity(N ? Log2_64_Ceil(N) : 0);
}
/// Get the number of elements in an array with this capacity.
size_t getSize() const { return size_t(1u) << Index; }
/// Get the bucket number for this capacity.
unsigned getBucket() const { return Index; }
/// Get the next larger capacity. Large capacities grow exponentially, so
/// this function can be used to reallocate incrementally growing vectors
/// in amortized linear time.
Capacity getNext() const { return Capacity(Index + 1); }
};
~ArrayRecycler() {
// The client should always call clear() so recycled arrays can be returned
// to the allocator.
assert(Bucket.empty() && "Non-empty ArrayRecycler deleted!");
}
/// Release all the tracked allocations to the allocator. The recycler must
/// be free of any tracked allocations before being deleted.
template<class AllocatorType>
void clear(AllocatorType &Allocator) {
for (; !Bucket.empty(); Bucket.pop_back())
while (T *Ptr = pop(Bucket.size() - 1))
Allocator.Deallocate(Ptr);
}
/// Special case for BumpPtrAllocator which has an empty Deallocate()
/// function.
///
/// There is no need to traverse the free lists, pulling all the objects into
/// cache.
void clear(BumpPtrAllocator&) {
Bucket.clear();
}
/// Allocate an array of at least the requested capacity.
///
/// Return an existing recycled array, or allocate one from Allocator if
/// none are available for recycling.
///
template<class AllocatorType>
T *allocate(Capacity Cap, AllocatorType &Allocator) {
// Try to recycle an existing array.
if (T *Ptr = pop(Cap.getBucket()))
return Ptr;
// Nope, get more memory.
return static_cast<T*>(Allocator.Allocate(sizeof(T)*Cap.getSize(), Align));
}
/// Deallocate an array with the specified Capacity.
///
/// Cap must be the same capacity that was given to allocate().
///
void deallocate(Capacity Cap, T *Ptr) {
push(Cap.getBucket(), Ptr);
}
};
} // end llvm namespace
#endif

View File

@ -0,0 +1,109 @@
//===--- unittest/Support/ArrayRecyclerTest.cpp ---------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "llvm/Support/ArrayRecycler.h"
#include "llvm/Support/Allocator.h"
#include "gtest/gtest.h"
#include <cstdlib>
using namespace llvm;
namespace {
struct Object {
int Num;
Object *Other;
};
typedef ArrayRecycler<Object> ARO;
TEST(ArrayRecyclerTest, Capacity) {
// Capacity size should never be 0.
ARO::Capacity Cap = ARO::Capacity::get(0);
EXPECT_LT(0u, Cap.getSize());
size_t PrevSize = Cap.getSize();
for (unsigned N = 1; N != 100; ++N) {
Cap = ARO::Capacity::get(N);
EXPECT_LE(N, Cap.getSize());
if (PrevSize >= N)
EXPECT_EQ(PrevSize, Cap.getSize());
else
EXPECT_LT(PrevSize, Cap.getSize());
PrevSize = Cap.getSize();
}
// Check that the buckets are monotonically increasing.
Cap = ARO::Capacity::get(0);
PrevSize = Cap.getSize();
for (unsigned N = 0; N != 20; ++N) {
Cap = Cap.getNext();
EXPECT_LT(PrevSize, Cap.getSize());
PrevSize = Cap.getSize();
}
}
TEST(ArrayRecyclerTest, Basics) {
BumpPtrAllocator Allocator;
ArrayRecycler<Object> DUT;
ARO::Capacity Cap = ARO::Capacity::get(8);
Object *A1 = DUT.allocate(Cap, Allocator);
A1[0].Num = 21;
A1[7].Num = 17;
Object *A2 = DUT.allocate(Cap, Allocator);
A2[0].Num = 121;
A2[7].Num = 117;
Object *A3 = DUT.allocate(Cap, Allocator);
A3[0].Num = 221;
A3[7].Num = 217;
EXPECT_EQ(21, A1[0].Num);
EXPECT_EQ(17, A1[7].Num);
EXPECT_EQ(121, A2[0].Num);
EXPECT_EQ(117, A2[7].Num);
EXPECT_EQ(221, A3[0].Num);
EXPECT_EQ(217, A3[7].Num);
DUT.deallocate(Cap, A2);
// Check that deallocation didn't clobber anything.
EXPECT_EQ(21, A1[0].Num);
EXPECT_EQ(17, A1[7].Num);
EXPECT_EQ(221, A3[0].Num);
EXPECT_EQ(217, A3[7].Num);
// Verify recycling.
Object *A2x = DUT.allocate(Cap, Allocator);
EXPECT_EQ(A2, A2x);
DUT.deallocate(Cap, A2x);
DUT.deallocate(Cap, A1);
DUT.deallocate(Cap, A3);
// Objects are not required to be recycled in reverse deallocation order, but
// that is what the current implementation does.
Object *A3x = DUT.allocate(Cap, Allocator);
EXPECT_EQ(A3, A3x);
Object *A1x = DUT.allocate(Cap, Allocator);
EXPECT_EQ(A1, A1x);
Object *A2y = DUT.allocate(Cap, Allocator);
EXPECT_EQ(A2, A2y);
// Back to allocation from the BumpPtrAllocator.
Object *A4 = DUT.allocate(Cap, Allocator);
EXPECT_NE(A1, A4);
EXPECT_NE(A2, A4);
EXPECT_NE(A3, A4);
DUT.clear(Allocator);
}
} // end anonymous namespace

View File

@ -6,6 +6,7 @@ set(LLVM_LINK_COMPONENTS
add_llvm_unittest(SupportTests
AlignOfTest.cpp
AllocatorTest.cpp
ArrayRecyclerTest.cpp
BlockFrequencyTest.cpp
Casting.cpp
CommandLineTest.cpp