Bye, Bye Compaction Tables. The benefit compaction tables provides doesn't

outweight its computational costs. This patch removes all compaction
table handling from the bcreader and bcwriter. For the record, here's the
difference betweeen having and not having compaction tables for some tests:

Test             With       Without   Size Chg
Olden/mst       5,602         5,598      +0.1%
viterbi        18,026        17,795      +1.3%
obsequi       162,133       166,663      -2.8%
burg          224,090       228,148      -1.8%
kimwitu++   4,933,263     5,121,159      -3.8%
176.gcc     8,470,424     9,141,539      -7.3%

It seems that it is more beneficial to larger files, but even on the largest
test case we have (176.gcc) it only amounts ot an I/O saving of 7.3%.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@33661 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Reid Spencer 2007-01-30 19:36:46 +00:00
parent 908504347b
commit d2bb887cd1
6 changed files with 34 additions and 721 deletions

View File

@ -100,7 +100,6 @@ public:
bca.BlockSizes[BytecodeFormat::ModuleGlobalInfoBlockID] = 0;
bca.BlockSizes[BytecodeFormat::GlobalTypePlaneBlockID] = 0;
bca.BlockSizes[BytecodeFormat::InstructionListBlockID] = 0;
bca.BlockSizes[BytecodeFormat::CompactionTableBlockID] = 0;
bca.BlockSizes[BytecodeFormat::TypeSymbolTableBlockID] = 0;
}
@ -635,9 +634,6 @@ void PrintBytecodeAnalysis(BytecodeAnalysis& bca, std::ostream& Out )
print(Out, "Instruction List Bytes",
double(bca.BlockSizes[BytecodeFormat::InstructionListBlockID]),
double(bca.byteSize));
print(Out, "Compaction Table Bytes",
double(bca.BlockSizes[BytecodeFormat::CompactionTableBlockID]),
double(bca.byteSize));
print(Out, "Value Symbol Table Bytes",
double(bca.BlockSizes[BytecodeFormat::ValueSymbolTableBlockID]),
double(bca.byteSize));

View File

@ -186,8 +186,8 @@ inline bool BytecodeReader::hasImplicitNull(unsigned TyID) {
return TyID != Type::LabelTyID && TyID != Type::VoidTyID;
}
/// Obtain a type given a typeid and account for things like compaction tables,
/// function level vs module level, and the offsetting for the primitive types.
/// Obtain a type given a typeid and account for things like function level vs
/// module level, and the offsetting for the primitive types.
const Type *BytecodeReader::getType(unsigned ID) {
if (ID <= Type::LastPrimitiveTyID)
if (const Type *T = Type::getPrimitiveType((Type::TypeID)ID))
@ -196,12 +196,6 @@ const Type *BytecodeReader::getType(unsigned ID) {
// Otherwise, derived types need offset...
ID -= Type::FirstDerivedTyID;
if (!CompactionTypes.empty()) {
if (ID >= CompactionTypes.size())
error("Type ID out of range for compaction table!");
return CompactionTypes[ID].first;
}
// Is it a module-level type?
if (ID < ModuleTypes.size())
return ModuleTypes[ID].get();
@ -223,20 +217,11 @@ inline const Type* BytecodeReader::readType() {
}
/// Get the slot number associated with a type accounting for primitive
/// types, compaction tables, and function level vs module level.
/// types and function level vs module level.
unsigned BytecodeReader::getTypeSlot(const Type *Ty) {
if (Ty->isPrimitiveType())
return Ty->getTypeID();
// Scan the compaction table for the type if needed.
if (!CompactionTypes.empty()) {
for (unsigned i = 0, e = CompactionTypes.size(); i != e; ++i)
if (CompactionTypes[i].first == Ty)
return Type::FirstDerivedTyID + i;
error("Couldn't find type specified in compaction table!");
}
// Check the function level types first...
TypeListTy::iterator I = std::find(FunctionTypes.begin(),
FunctionTypes.end(), Ty);
@ -266,84 +251,28 @@ unsigned BytecodeReader::getTypeSlot(const Type *Ty) {
return Type::FirstDerivedTyID + IT->second;
}
/// This is just like getType, but when a compaction table is in use, it is
/// ignored. It also ignores function level types.
/// @see getType
const Type *BytecodeReader::getGlobalTableType(unsigned Slot) {
if (Slot < Type::FirstDerivedTyID) {
const Type *Ty = Type::getPrimitiveType((Type::TypeID)Slot);
if (!Ty)
error("Not a primitive type ID?");
return Ty;
}
Slot -= Type::FirstDerivedTyID;
if (Slot >= ModuleTypes.size())
error("Illegal compaction table type reference!");
return ModuleTypes[Slot];
}
/// This is just like getTypeSlot, but when a compaction table is in use, it
/// is ignored. It also ignores function level types.
unsigned BytecodeReader::getGlobalTableTypeSlot(const Type *Ty) {
if (Ty->isPrimitiveType())
return Ty->getTypeID();
// If we don't have our cache yet, build it now.
if (ModuleTypeIDCache.empty()) {
unsigned N = 0;
ModuleTypeIDCache.reserve(ModuleTypes.size());
for (TypeListTy::iterator I = ModuleTypes.begin(), E = ModuleTypes.end();
I != E; ++I, ++N)
ModuleTypeIDCache.push_back(std::make_pair(*I, N));
std::sort(ModuleTypeIDCache.begin(), ModuleTypeIDCache.end());
}
// Binary search the cache for the entry.
std::vector<std::pair<const Type*, unsigned> >::iterator IT =
std::lower_bound(ModuleTypeIDCache.begin(), ModuleTypeIDCache.end(),
std::make_pair(Ty, 0U));
if (IT == ModuleTypeIDCache.end() || IT->first != Ty)
error("Didn't find type in ModuleTypes.");
return Type::FirstDerivedTyID + IT->second;
}
/// Retrieve a value of a given type and slot number, possibly creating
/// it if it doesn't already exist.
Value * BytecodeReader::getValue(unsigned type, unsigned oNum, bool Create) {
assert(type != Type::LabelTyID && "getValue() cannot get blocks!");
unsigned Num = oNum;
// If there is a compaction table active, it defines the low-level numbers.
// If not, the module values define the low-level numbers.
if (CompactionValues.size() > type && !CompactionValues[type].empty()) {
if (Num < CompactionValues[type].size())
return CompactionValues[type][Num];
Num -= CompactionValues[type].size();
} else {
// By default, the global type id is the type id passed in
unsigned GlobalTyID = type;
// By default, the global type id is the type id passed in
unsigned GlobalTyID = type;
// If the type plane was compactified, figure out the global type ID by
// adding the derived type ids and the distance.
if (!CompactionTypes.empty() && type >= Type::FirstDerivedTyID)
GlobalTyID = CompactionTypes[type-Type::FirstDerivedTyID].second;
if (hasImplicitNull(GlobalTyID)) {
const Type *Ty = getType(type);
if (!isa<OpaqueType>(Ty)) {
if (Num == 0)
return Constant::getNullValue(Ty);
--Num;
}
if (hasImplicitNull(GlobalTyID)) {
const Type *Ty = getType(type);
if (!isa<OpaqueType>(Ty)) {
if (Num == 0)
return Constant::getNullValue(Ty);
--Num;
}
}
if (GlobalTyID < ModuleValues.size() && ModuleValues[GlobalTyID]) {
if (Num < ModuleValues[GlobalTyID]->size())
return ModuleValues[GlobalTyID]->getOperand(Num);
Num -= ModuleValues[GlobalTyID]->size();
}
if (GlobalTyID < ModuleValues.size() && ModuleValues[GlobalTyID]) {
if (Num < ModuleValues[GlobalTyID]->size())
return ModuleValues[GlobalTyID]->getOperand(Num);
Num -= ModuleValues[GlobalTyID]->size();
}
if (FunctionValues.size() > type &&
@ -370,38 +299,6 @@ Value * BytecodeReader::getValue(unsigned type, unsigned oNum, bool Create) {
return 0; // just silence warning, error calls longjmp
}
/// This is just like getValue, but when a compaction table is in use, it
/// is ignored. Also, no forward references or other fancy features are
/// supported.
Value* BytecodeReader::getGlobalTableValue(unsigned TyID, unsigned SlotNo) {
if (SlotNo == 0)
return Constant::getNullValue(getType(TyID));
if (!CompactionTypes.empty() && TyID >= Type::FirstDerivedTyID) {
TyID -= Type::FirstDerivedTyID;
if (TyID >= CompactionTypes.size())
error("Type ID out of range for compaction table!");
TyID = CompactionTypes[TyID].second;
}
--SlotNo;
if (TyID >= ModuleValues.size() || ModuleValues[TyID] == 0 ||
SlotNo >= ModuleValues[TyID]->size()) {
if (TyID >= ModuleValues.size() || ModuleValues[TyID] == 0)
error("Corrupt compaction table entry!"
+ utostr(TyID) + ", " + utostr(SlotNo) + ": "
+ utostr(ModuleValues.size()));
else
error("Corrupt compaction table entry!"
+ utostr(TyID) + ", " + utostr(SlotNo) + ": "
+ utostr(ModuleValues.size()) + ", "
+ utohexstr(reinterpret_cast<uint64_t>(((void*)ModuleValues[TyID])))
+ ", "
+ utostr(ModuleValues[TyID]->size()));
}
return ModuleValues[TyID]->getOperand(SlotNo);
}
/// Just like getValue, except that it returns a null pointer
/// only on error. It always returns a constant (meaning that if the value is
@ -1079,76 +976,6 @@ void BytecodeReader::ParseValueSymbolTable(Function *CurrentFunction,
if (Handler) Handler->handleSymbolTableEnd();
}
/// Read in the types portion of a compaction table.
void BytecodeReader::ParseCompactionTypes(unsigned NumEntries) {
for (unsigned i = 0; i != NumEntries; ++i) {
unsigned TypeSlot = read_vbr_uint();
const Type *Typ = getGlobalTableType(TypeSlot);
CompactionTypes.push_back(std::make_pair(Typ, TypeSlot));
if (Handler) Handler->handleCompactionTableType(i, TypeSlot, Typ);
}
}
/// Parse a compaction table.
void BytecodeReader::ParseCompactionTable() {
// Notify handler that we're beginning a compaction table.
if (Handler) Handler->handleCompactionTableBegin();
// Get the types for the compaction table.
unsigned NumEntries = read_vbr_uint();
ParseCompactionTypes(NumEntries);
// Compaction tables live in separate blocks so we have to loop
// until we've read the whole thing.
while (moreInBlock()) {
// Read the number of Value* entries in the compaction table
unsigned NumEntries = read_vbr_uint();
unsigned Ty = 0;
// Decode the type from value read in. Most compaction table
// planes will have one or two entries in them. If that's the
// case then the length is encoded in the bottom two bits and
// the higher bits encode the type. This saves another VBR value.
if ((NumEntries & 3) == 3) {
// In this case, both low-order bits are set (value 3). This
// is a signal that the typeid follows.
NumEntries >>= 2;
Ty = read_vbr_uint();
} else {
// In this case, the low-order bits specify the number of entries
// and the high order bits specify the type.
Ty = NumEntries >> 2;
NumEntries &= 3;
}
// Make sure we have enough room for the plane.
if (Ty >= CompactionValues.size())
CompactionValues.resize(Ty+1);
// Make sure the plane is empty or we have some kind of error.
if (!CompactionValues[Ty].empty())
error("Compaction table plane contains multiple entries!");
// Notify handler about the plane.
if (Handler) Handler->handleCompactionTablePlane(Ty, NumEntries);
// Push the implicit zero.
CompactionValues[Ty].push_back(Constant::getNullValue(getType(Ty)));
// Read in each of the entries, put them in the compaction table
// and notify the handler that we have a new compaction table value.
for (unsigned i = 0; i != NumEntries; ++i) {
unsigned ValSlot = read_vbr_uint();
Value *V = getGlobalTableValue(Ty, ValSlot);
CompactionValues[Ty].push_back(V);
if (Handler) Handler->handleCompactionTableValue(i, Ty, ValSlot);
}
}
// Notify handler that the compaction table is done.
if (Handler) Handler->handleCompactionTableEnd();
}
// Parse a single type. The typeid is read in first. If its a primitive type
// then nothing else needs to be read, we know how to instantiate it. If its
// a derived type, then additional data is read to fill out the type
@ -1667,8 +1494,7 @@ void BytecodeReader::ParseFunctionBody(Function* F) {
case BytecodeFormat::ConstantPoolBlockID:
if (!InsertedArguments) {
// Insert arguments into the value table before we parse the first basic
// block in the function, but after we potentially read in the
// compaction table.
// block in the function
insertArguments(F);
InsertedArguments = true;
}
@ -1676,14 +1502,9 @@ void BytecodeReader::ParseFunctionBody(Function* F) {
ParseConstantPool(FunctionValues, FunctionTypes, true);
break;
case BytecodeFormat::CompactionTableBlockID:
ParseCompactionTable();
break;
case BytecodeFormat::InstructionListBlockID: {
// Insert arguments into the value table before we parse the instruction
// list for the function, but after we potentially read in the compaction
// table.
// list for the function
if (!InsertedArguments) {
insertArguments(F);
InsertedArguments = true;
@ -1732,8 +1553,6 @@ void BytecodeReader::ParseFunctionBody(Function* F) {
// Clear out function-level types...
FunctionTypes.clear();
CompactionTypes.clear();
CompactionValues.clear();
freeTable(FunctionValues);
if (Handler) Handler->handleFunctionEnd(F);

View File

@ -212,12 +212,6 @@ protected:
/// @brief Parse a function body
void ParseFunctionBody(Function* Func);
/// @brief Parse the type list portion of a compaction table
void ParseCompactionTypes(unsigned NumEntries);
/// @brief Parse a compaction table
void ParseCompactionTable();
/// @brief Parse global types
void ParseGlobalTypes();
@ -275,15 +269,6 @@ private:
///
unsigned char RevisionNum; // The rev # itself
/// CompactionTypes - If a compaction table is active in the current function,
/// this is the mapping that it contains. We keep track of what resolved type
/// it is as well as what global type entry it is.
std::vector<std::pair<const Type*, unsigned> > CompactionTypes;
/// @brief If a compaction table is active in the current function,
/// this is the mapping that it contains.
std::vector<std::vector<Value*> > CompactionValues;
/// @brief This vector is used to deal with forward references to types in
/// a module.
TypeListTy ModuleTypes;
@ -363,23 +348,12 @@ private:
/// @brief Converts a Type* to its type slot number
unsigned getTypeSlot(const Type *Ty);
/// @brief Converts a normal type slot number to a compacted type slot num.
unsigned getCompactionTypeSlot(unsigned type);
/// @brief Gets the global type corresponding to the TypeId
const Type *getGlobalTableType(unsigned TypeId);
/// This is just like getTypeSlot, but when a compaction table is in use,
/// it is ignored.
unsigned getGlobalTableTypeSlot(const Type *Ty);
/// @brief Get a value from its typeid and slot number
Value* getValue(unsigned TypeID, unsigned num, bool Create = true);
/// @brief Get a value from its type and slot number, ignoring compaction
/// tables.
Value *getGlobalTableValue(unsigned TyID, unsigned SlotNo);
/// @brief Get a basic block for current function
BasicBlock *getBasicBlock(unsigned ID);

View File

@ -87,38 +87,7 @@ SlotCalculator::SlotCalculator(const Function *M ) {
incorporateFunction(M); // Start out in incorporated state
}
unsigned SlotCalculator::getGlobalSlot(const Value *V) const {
assert(!CompactionTable.empty() &&
"This method can only be used when compaction is enabled!");
std::map<const Value*, unsigned>::const_iterator I = NodeMap.find(V);
assert(I != NodeMap.end() && "Didn't find global slot entry!");
return I->second;
}
unsigned SlotCalculator::getGlobalSlot(const Type* T) const {
std::map<const Type*, unsigned>::const_iterator I = TypeMap.find(T);
assert(I != TypeMap.end() && "Didn't find global slot entry!");
return I->second;
}
SlotCalculator::TypePlane &SlotCalculator::getPlane(unsigned Plane) {
if (CompactionTable.empty()) { // No compaction table active?
// fall out
} else if (!CompactionTable[Plane].empty()) { // Compaction table active.
assert(Plane < CompactionTable.size());
return CompactionTable[Plane];
} else {
// Final case: compaction table active, but this plane is not
// compactified. If the type plane is compactified, unmap back to the
// global type plane corresponding to "Plane".
if (!CompactionTypes.empty()) {
const Type *Ty = CompactionTypes[Plane];
TypeMapType::iterator It = TypeMap.find(Ty);
assert(It != TypeMap.end() && "Type not in global constant map?");
Plane = It->second;
}
}
// Okay we are just returning an entry out of the main Table. Make sure the
// plane exists and return it.
if (Plane >= Table.size())
@ -284,10 +253,6 @@ void SlotCalculator::incorporateFunction(const Function *F) {
SC_DEBUG("begin processFunction!\n");
// If we emitted all of the function constants, build a compaction table.
if (ModuleContainsAllFunctionConstants)
buildCompactionTable(F);
// Update the ModuleLevel entries to be accurate.
ModuleLevel.resize(getNumPlanes());
for (unsigned i = 0, e = getNumPlanes(); i != e; ++i)
@ -330,11 +295,6 @@ void SlotCalculator::incorporateFunction(const Function *F) {
}
}
// If we are building a compaction table, prune out planes that do not benefit
// from being compactified.
if (!CompactionTable.empty())
pruneCompactionTable();
SC_DEBUG("end processFunction!\n");
}
@ -345,10 +305,6 @@ void SlotCalculator::purgeFunction() {
SC_DEBUG("begin purgeFunction!\n");
// First, free the compaction map if used.
CompactionNodeMap.clear();
CompactionTypeMap.clear();
// Next, remove values from existing type planes
for (unsigned i = 0; i != NumModuleTypes; ++i) {
// Size of plane before function came
@ -371,23 +327,18 @@ void SlotCalculator::purgeFunction() {
ModuleTypeLevel = 0;
// Finally, remove any type planes defined by the function...
CompactionTypes.clear();
if (!CompactionTable.empty()) {
CompactionTable.clear();
} else {
while (Table.size() > NumModuleTypes) {
TypePlane &Plane = Table.back();
SC_DEBUG("Removing Plane " << (Table.size()-1) << " of size "
<< Plane.size() << "\n");
while (Plane.size()) {
assert(!isa<GlobalValue>(Plane.back()) &&
"Functions cannot define globals!");
NodeMap.erase(Plane.back()); // Erase from nodemap
Plane.pop_back(); // Shrink plane
}
Table.pop_back(); // Nuke the plane, we don't like it.
while (Table.size() > NumModuleTypes) {
TypePlane &Plane = Table.back();
SC_DEBUG("Removing Plane " << (Table.size()-1) << " of size "
<< Plane.size() << "\n");
while (Plane.size()) {
assert(!isa<GlobalValue>(Plane.back()) &&
"Functions cannot define globals!");
NodeMap.erase(Plane.back()); // Erase from nodemap
Plane.pop_back(); // Shrink plane
}
Table.pop_back(); // Nuke the plane, we don't like it.
}
SC_DEBUG("end purgeFunction!\n");
@ -397,278 +348,8 @@ static inline bool hasNullValue(const Type *Ty) {
return Ty != Type::LabelTy && Ty != Type::VoidTy && !isa<OpaqueType>(Ty);
}
/// getOrCreateCompactionTableSlot - This method is used to build up the initial
/// approximation of the compaction table.
unsigned SlotCalculator::getOrCreateCompactionTableSlot(const Value *V) {
std::map<const Value*, unsigned>::iterator I =
CompactionNodeMap.lower_bound(V);
if (I != CompactionNodeMap.end() && I->first == V)
return I->second; // Already exists?
// Make sure the type is in the table.
unsigned Ty;
if (!CompactionTypes.empty())
Ty = getOrCreateCompactionTableSlot(V->getType());
else // If the type plane was decompactified, use the global plane ID
Ty = getSlot(V->getType());
if (CompactionTable.size() <= Ty)
CompactionTable.resize(Ty+1);
TypePlane &TyPlane = CompactionTable[Ty];
// Make sure to insert the null entry if the thing we are inserting is not a
// null constant.
if (TyPlane.empty() && hasNullValue(V->getType())) {
Value *ZeroInitializer = Constant::getNullValue(V->getType());
if (V != ZeroInitializer) {
TyPlane.push_back(ZeroInitializer);
CompactionNodeMap[ZeroInitializer] = 0;
}
}
unsigned SlotNo = TyPlane.size();
TyPlane.push_back(V);
CompactionNodeMap.insert(std::make_pair(V, SlotNo));
return SlotNo;
}
/// getOrCreateCompactionTableSlot - This method is used to build up the initial
/// approximation of the compaction table.
unsigned SlotCalculator::getOrCreateCompactionTableSlot(const Type *T) {
CompactionTypeMapType::iterator I = CompactionTypeMap.lower_bound(T);
if (I != CompactionTypeMap.end() && I->first == T)
return I->second; // Already exists?
unsigned SlotNo = CompactionTypes.size();
SC_DEBUG("Inserting Compaction Type #" << SlotNo << ": " << *T << "\n");
CompactionTypes.push_back(T);
CompactionTypeMap.insert(I, std::make_pair(T, SlotNo));
return SlotNo;
}
/// buildCompactionTable - Since all of the function constants and types are
/// stored in the module-level constant table, we don't need to emit a function
/// constant table. Also due to this, the indices for various constants and
/// types might be very large in large programs. In order to avoid blowing up
/// the size of instructions in the bytecode encoding, we build a compaction
/// table, which defines a mapping from function-local identifiers to global
/// identifiers.
void SlotCalculator::buildCompactionTable(const Function *F) {
assert(CompactionNodeMap.empty() && "Compaction table already built!");
assert(CompactionTypeMap.empty() && "Compaction types already built!");
// First step, insert the primitive types.
CompactionTable.resize(Type::LastPrimitiveTyID+1);
for (unsigned i = 0; i <= Type::LastPrimitiveTyID; ++i) {
const Type *PrimTy = Type::getPrimitiveType((Type::TypeID)i);
CompactionTypes.push_back(PrimTy);
CompactionTypeMap[PrimTy] = i;
}
CompactionTypeMap[Type::Int1Ty] = CompactionTypes.size();
CompactionTypes.push_back(Type::Int1Ty);
CompactionTypeMap[Type::Int8Ty] = CompactionTypes.size();
CompactionTypes.push_back(Type::Int8Ty);
CompactionTypeMap[Type::Int16Ty] = CompactionTypes.size();
CompactionTypes.push_back(Type::Int16Ty);
CompactionTypeMap[Type::Int32Ty] = CompactionTypes.size();
CompactionTypes.push_back(Type::Int32Ty);
CompactionTypeMap[Type::Int64Ty] = CompactionTypes.size();
CompactionTypes.push_back(Type::Int64Ty);
// Next, include any types used by function arguments.
for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end();
I != E; ++I)
getOrCreateCompactionTableSlot(I->getType());
// Next, find all of the types and values that are referred to by the
// instructions in the function.
for (const_inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I) {
getOrCreateCompactionTableSlot(I->getType());
for (unsigned op = 0, e = I->getNumOperands(); op != e; ++op)
if (isa<Constant>(I->getOperand(op)) || isa<InlineAsm>(I->getOperand(op)))
getOrCreateCompactionTableSlot(I->getOperand(op));
}
// Now do the constants and global values
const SymbolTable &ST = F->getValueSymbolTable();
for (SymbolTable::plane_const_iterator PI = ST.plane_begin(),
PE = ST.plane_end(); PI != PE; ++PI)
for (SymbolTable::value_const_iterator VI = PI->second.begin(),
VE = PI->second.end(); VI != VE; ++VI)
if (isa<Constant>(VI->second) && !isa<GlobalValue>(VI->second))
getOrCreateCompactionTableSlot(VI->second);
// Now that we have all of the values in the table, and know what types are
// referenced, make sure that there is at least the zero initializer in any
// used type plane. Since the type was used, we will be emitting instructions
// to the plane even if there are no constants in it.
CompactionTable.resize(CompactionTypes.size());
for (unsigned i = 0, e = CompactionTable.size(); i != e; ++i)
if (CompactionTable[i].empty() && (i != Type::VoidTyID) &&
i != Type::LabelTyID) {
const Type *Ty = CompactionTypes[i];
SC_DEBUG("Getting Null Value #" << i << " for Type " << *Ty << "\n");
assert(Ty->getTypeID() != Type::VoidTyID);
assert(Ty->getTypeID() != Type::LabelTyID);
getOrCreateCompactionTableSlot(Constant::getNullValue(Ty));
}
// Okay, now at this point, we have a legal compaction table. Since we want
// to emit the smallest possible binaries, do not compactify the type plane if
// it will not save us anything. Because we have not yet incorporated the
// function body itself yet, we don't know whether or not it's a good idea to
// compactify other planes. We will defer this decision until later.
TypeList &GlobalTypes = Types;
// All of the values types will be scrunched to the start of the types plane
// of the global table. Figure out just how many there are.
assert(!GlobalTypes.empty() && "No global types???");
unsigned NumFCTypes = GlobalTypes.size()-1;
while (!GlobalTypes[NumFCTypes]->isFirstClassType())
--NumFCTypes;
// If there are fewer that 64 types, no instructions will be exploded due to
// the size of the type operands. Thus there is no need to compactify types.
// Also, if the compaction table contains most of the entries in the global
// table, there really is no reason to compactify either.
if (NumFCTypes < 64) {
// Decompactifying types is tricky, because we have to move type planes all
// over the place. At least we don't need to worry about updating the
// CompactionNodeMap for non-types though.
std::vector<TypePlane> TmpCompactionTable;
std::swap(CompactionTable, TmpCompactionTable);
TypeList TmpTypes;
std::swap(TmpTypes, CompactionTypes);
// Move each plane back over to the uncompactified plane
while (!TmpTypes.empty()) {
const Type *Ty = TmpTypes.back();
TmpTypes.pop_back();
CompactionTypeMap.erase(Ty); // Decompactify type!
// Find the global slot number for this type.
int TySlot = getSlot(Ty);
assert(TySlot != -1 && "Type doesn't exist in global table?");
// Now we know where to put the compaction table plane.
if (CompactionTable.size() <= unsigned(TySlot))
CompactionTable.resize(TySlot+1);
// Move the plane back into the compaction table.
std::swap(CompactionTable[TySlot], TmpCompactionTable[TmpTypes.size()]);
// And remove the empty plane we just moved in.
TmpCompactionTable.pop_back();
}
}
}
/// pruneCompactionTable - Once the entire function being processed has been
/// incorporated into the current compaction table, look over the compaction
/// table and check to see if there are any values whose compaction will not
/// save us any space in the bytecode file. If compactifying these values
/// serves no purpose, then we might as well not even emit the compactification
/// information to the bytecode file, saving a bit more space.
///
/// Note that the type plane has already been compactified if possible.
///
void SlotCalculator::pruneCompactionTable() {
TypeList &TyPlane = CompactionTypes;
for (unsigned ctp = 0, e = CompactionTable.size(); ctp != e; ++ctp)
if (!CompactionTable[ctp].empty()) {
TypePlane &CPlane = CompactionTable[ctp];
unsigned GlobalSlot = ctp;
if (!TyPlane.empty())
GlobalSlot = getGlobalSlot(TyPlane[ctp]);
if (GlobalSlot >= Table.size())
Table.resize(GlobalSlot+1);
TypePlane &GPlane = Table[GlobalSlot];
unsigned ModLevel = getModuleLevel(ctp);
unsigned NumFunctionObjs = CPlane.size()-ModLevel;
// If the maximum index required if all entries in this plane were merged
// into the global plane is less than 64, go ahead and eliminate the
// plane.
bool PrunePlane = GPlane.size() + NumFunctionObjs < 64;
// If there are no function-local values defined, and the maximum
// referenced global entry is less than 64, we don't need to compactify.
if (!PrunePlane && NumFunctionObjs == 0) {
unsigned MaxIdx = 0;
for (unsigned i = 0; i != ModLevel; ++i) {
unsigned Idx = NodeMap[CPlane[i]];
if (Idx > MaxIdx) MaxIdx = Idx;
}
PrunePlane = MaxIdx < 64;
}
// Ok, finally, if we decided to prune this plane out of the compaction
// table, do so now.
if (PrunePlane) {
TypePlane OldPlane;
std::swap(OldPlane, CPlane);
// Loop over the function local objects, relocating them to the global
// table plane.
for (unsigned i = ModLevel, e = OldPlane.size(); i != e; ++i) {
const Value *V = OldPlane[i];
CompactionNodeMap.erase(V);
assert(NodeMap.count(V) == 0 && "Value already in table??");
getOrCreateSlot(V);
}
// For compactified global values, just remove them from the compaction
// node map.
for (unsigned i = 0; i != ModLevel; ++i)
CompactionNodeMap.erase(OldPlane[i]);
// Update the new modulelevel for this plane.
assert(ctp < ModuleLevel.size() && "Cannot set modulelevel!");
ModuleLevel[ctp] = GPlane.size()-NumFunctionObjs;
assert((int)ModuleLevel[ctp] >= 0 && "Bad computation!");
}
}
}
/// Determine if the compaction table is actually empty. Because the
/// compaction table always includes the primitive type planes, we
/// can't just check getCompactionTable().size() because it will never
/// be zero. Furthermore, the ModuleLevel factors into whether a given
/// plane is empty or not. This function does the necessary computation
/// to determine if its actually empty.
bool SlotCalculator::CompactionTableIsEmpty() const {
// Check a degenerate case, just in case.
if (CompactionTable.empty())
return true;
// Check each plane
for (unsigned i = 0, e = CompactionTable.size(); i < e; ++i) {
// If the plane is not empty
if (!CompactionTable[i].empty()) {
// If the module level is non-zero then at least the
// first element of the plane is valid and therefore not empty.
unsigned End = getModuleLevel(i);
if (End != 0)
return false;
}
}
// All the compaction table planes are empty so the table is
// considered empty too.
return true;
}
int SlotCalculator::getSlot(const Value *V) const {
// If there is a CompactionTable active...
if (!CompactionNodeMap.empty()) {
std::map<const Value*, unsigned>::const_iterator I =
CompactionNodeMap.find(V);
if (I != CompactionNodeMap.end())
return (int)I->second;
// Otherwise, if it's not in the compaction table, it must be in a
// non-compactified plane.
}
std::map<const Value*, unsigned>::const_iterator I = NodeMap.find(V);
if (I != NodeMap.end())
return (int)I->second;
@ -677,16 +358,6 @@ int SlotCalculator::getSlot(const Value *V) const {
}
int SlotCalculator::getSlot(const Type*T) const {
// If there is a CompactionTable active...
if (!CompactionTypeMap.empty()) {
std::map<const Type*, unsigned>::const_iterator I =
CompactionTypeMap.find(T);
if (I != CompactionTypeMap.end())
return (int)I->second;
// Otherwise, if it's not in the compaction table, it must be in a
// non-compactified plane.
}
std::map<const Type*, unsigned>::const_iterator I = TypeMap.find(T);
if (I != TypeMap.end())
return (int)I->second;
@ -705,8 +376,6 @@ int SlotCalculator::getOrCreateSlot(const Value *V) {
if (!isa<GlobalValue>(V)) // Initializers for globals are handled explicitly
if (const Constant *C = dyn_cast<Constant>(V)) {
assert(CompactionNodeMap.empty() &&
"All needed constants should be in the compaction map already!");
// Do not index the characters that make up constant strings. We emit
// constant strings as special entities that don't require their
@ -741,20 +410,6 @@ int SlotCalculator::insertValue(const Value *D, bool dontIgnore) {
assert(D && "Can't insert a null value!");
assert(getSlot(D) == -1 && "Value is already in the table!");
// If we are building a compaction map, and if this plane is being compacted,
// insert the value into the compaction map, not into the global map.
if (!CompactionNodeMap.empty()) {
if (D->getType() == Type::VoidTy) return -1; // Do not insert void values
assert(!isa<Constant>(D) &&
"Types, constants, and globals should be in global table!");
int Plane = getSlot(D->getType());
assert(Plane != -1 && CompactionTable.size() > (unsigned)Plane &&
"Didn't find value type!");
if (!CompactionTable[Plane].empty())
return getOrCreateCompactionTableSlot(D);
}
// If this node does not contribute to a plane, or if the node has a
// name and we don't want names, then ignore the silly node... Note that types
// do need slot numbers so that we can keep track of where other values land.
@ -773,12 +428,6 @@ int SlotCalculator::insertType(const Type *Ty, bool dontIgnore) {
assert(Ty && "Can't insert a null type!");
assert(getSlot(Ty) == -1 && "Type is already in the table!");
// If we are building a compaction map, and if this plane is being compacted,
// insert the value into the compaction map, not into the global map.
if (!CompactionTypeMap.empty()) {
getOrCreateCompactionTableSlot(Ty);
}
// Insert the current type before any subtypes. This is important because
// recursive types elements are inserted in a bottom up order. Changing
// this here can break things. For example:
@ -818,11 +467,7 @@ int SlotCalculator::doInsertValue(const Value *D) {
// llvm_cerr << "Inserting type '"<<cast<Type>(D)->getDescription() <<"'!\n";
if (Typ->isDerivedType()) {
int ValSlot;
if (CompactionTable.empty())
ValSlot = getSlot(Typ);
else
ValSlot = getGlobalSlot(Typ);
int ValSlot = getSlot(Typ);
if (ValSlot == -1) { // Have we already entered this type?
// Nope, this is the first we have seen the type, process it.
ValSlot = insertType(Typ, true);

View File

@ -61,17 +61,6 @@ class SlotCalculator {
/// is only possible if building information for a bytecode file.
bool ModuleContainsAllFunctionConstants;
/// CompactionTable/NodeMap - When function compaction has been performed,
/// these entries provide a compacted view of the namespace needed to emit
/// instructions in a function body. The 'getSlot()' method automatically
/// returns these entries if applicable, or the global entries if not.
std::vector<TypePlane> CompactionTable;
TypeList CompactionTypes;
typedef std::map<const Value*, unsigned> CompactionNodeMapType;
CompactionNodeMapType CompactionNodeMap;
typedef std::map<const Type*, unsigned> CompactionTypeMapType;
CompactionTypeMapType CompactionTypeMap;
SlotCalculator(const SlotCalculator &); // DO NOT IMPLEMENT
void operator=(const SlotCalculator &); // DO NOT IMPLEMENT
public:
@ -85,24 +74,8 @@ public:
int getSlot(const Value *V) const;
int getSlot(const Type* T) const;
/// getGlobalSlot - Return a slot number from the global table. This can only
/// be used when a compaction table is active.
unsigned getGlobalSlot(const Value *V) const;
unsigned getGlobalSlot(const Type *V) const;
inline unsigned getNumPlanes() const {
if (CompactionTable.empty())
return Table.size();
else
return CompactionTable.size();
}
inline unsigned getNumTypes() const {
if (CompactionTypes.empty())
return Types.size();
else
return CompactionTypes.size();
}
inline unsigned getNumPlanes() const { return Table.size(); }
inline unsigned getNumTypes() const { return Types.size(); }
inline unsigned getModuleLevel(unsigned Plane) const {
return Plane < ModuleLevel.size() ? ModuleLevel[Plane] : 0;
@ -114,11 +87,7 @@ public:
}
TypePlane &getPlane(unsigned Plane);
TypeList& getTypes() {
if (!CompactionTypes.empty())
return CompactionTypes;
return Types;
}
TypeList& getTypes() { return Types; }
/// incorporateFunction/purgeFunction - If you'd like to deal with a function,
/// use these two methods to get its data into the SlotCalculator!
@ -133,15 +102,6 @@ public:
string_iterator string_begin() const { return ConstantStrings.begin(); }
string_iterator string_end() const { return ConstantStrings.end(); }
const std::vector<TypePlane> &getCompactionTable() const {
return CompactionTable;
}
const TypeList& getCompactionTypes() const { return CompactionTypes; }
/// @brief Determine if the compaction table (not types) is empty
bool CompactionTableIsEmpty() const;
private:
// getOrCreateSlot - Values can be crammed into here at will... if
// they haven't been inserted already, they get inserted, otherwise
@ -173,11 +133,6 @@ private:
void processValueSymbolTable(const SymbolTable *ST);
void processSymbolTableConstants(const SymbolTable *ST);
void buildCompactionTable(const Function *F);
unsigned getOrCreateCompactionTableSlot(const Value *V);
unsigned getOrCreateCompactionTableSlot(const Type *V);
void pruneCompactionTable();
// insertPrimitives - helper for constructors to insert primitive types.
void insertPrimitives();
};

View File

@ -1112,14 +1112,7 @@ void BytecodeWriter::outputFunction(const Function *F) {
// Get slot information about the function...
Table.incorporateFunction(F);
if (Table.getCompactionTable().empty()) {
// Output information about the constants in the function if the compaction
// table is not being used.
outputConstants(true);
} else {
// Otherwise, emit the compaction table.
outputCompactionTable();
}
outputConstants(true);
// Output all of the instructions in the body of the function
outputInstructions(F);
@ -1130,75 +1123,6 @@ void BytecodeWriter::outputFunction(const Function *F) {
Table.purgeFunction();
}
void BytecodeWriter::outputCompactionTablePlane(unsigned PlaneNo,
const std::vector<const Value*> &Plane,
unsigned StartNo) {
unsigned End = Table.getModuleLevel(PlaneNo);
if (Plane.empty() || StartNo == End || End == 0) return; // Nothing to emit
assert(StartNo < End && "Cannot emit negative range!");
assert(StartNo < Plane.size() && End <= Plane.size());
// Do not emit the null initializer!
++StartNo;
// Figure out which encoding to use. By far the most common case we have is
// to emit 0-2 entries in a compaction table plane.
switch (End-StartNo) {
case 0: // Avoid emitting two vbr's if possible.
case 1:
case 2:
output_vbr((PlaneNo << 2) | End-StartNo);
break;
default:
// Output the number of things.
output_vbr((unsigned(End-StartNo) << 2) | 3);
output_typeid(PlaneNo); // Emit the type plane this is
break;
}
for (unsigned i = StartNo; i != End; ++i)
output_vbr(Table.getGlobalSlot(Plane[i]));
}
void BytecodeWriter::outputCompactionTypes(unsigned StartNo) {
// Get the compaction type table from the slot calculator
const std::vector<const Type*> &CTypes = Table.getCompactionTypes();
// The compaction types may have been uncompactified back to the
// global types. If so, we just write an empty table
if (CTypes.size() == 0) {
output_vbr(0U);
return;
}
assert(CTypes.size() >= StartNo && "Invalid compaction types start index");
// Determine how many types to write
unsigned NumTypes = CTypes.size() - StartNo;
// Output the number of types.
output_vbr(NumTypes);
for (unsigned i = StartNo; i < StartNo+NumTypes; ++i)
output_typeid(Table.getGlobalSlot(CTypes[i]));
}
void BytecodeWriter::outputCompactionTable() {
// Avoid writing the compaction table at all if there is no content.
if (Table.getCompactionTypes().size() >= Type::FirstDerivedTyID ||
(!Table.CompactionTableIsEmpty())) {
BytecodeBlock CTB(BytecodeFormat::CompactionTableBlockID, *this,
true/*ElideIfEmpty*/);
const std::vector<std::vector<const Value*> > &CT =
Table.getCompactionTable();
// First things first, emit the type compaction table if there is one.
outputCompactionTypes(Type::FirstDerivedTyID);
for (unsigned i = 0, e = CT.size(); i != e; ++i)
outputCompactionTablePlane(i, CT[i], 0);
}
}
void BytecodeWriter::outputTypeSymbolTable(const TypeSymbolTable &TST) {
// Do not output the block for an empty symbol table, it just wastes