For PR1072:

Removing -raise has neglible positive or negative side effects so we are
opting to remove it. See the PR for comparison details.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@33844 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Reid Spencer 2007-02-03 23:15:56 +00:00
parent a49fd07ec6
commit 7ba98a9000
42 changed files with 2 additions and 2134 deletions

View File

@ -96,7 +96,6 @@ namespace {
(void) llvm::createDemoteRegisterToMemoryPass();
(void) llvm::createPruneEHPass();
(void) llvm::createRaiseAllocationsPass();
(void) llvm::createRaisePointerReferencesPass();
(void) llvm::createReassociatePass();
(void) llvm::createSCCPPass();
(void) llvm::createScalarReplAggregatesPass();

View File

@ -26,14 +26,6 @@ class PassInfo;
class TerminatorInst;
class TargetLowering;
//===----------------------------------------------------------------------===//
//
// RaisePointerReferences - Try to eliminate as many pointer arithmetic
// expressions as possible, by converting expressions to use getelementptr and
// friends.
//
FunctionPass *createRaisePointerReferencesPass();
//===----------------------------------------------------------------------===//
//
// ConstantPropagation - A worklist driven constant propagation pass

View File

@ -1,998 +0,0 @@
//===- ExprTypeConvert.cpp - Code to change an LLVM Expr Type -------------===//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the part of level raising that checks to see if it is
// possible to coerce an entire expression tree into a different type. If
// convertible, other routines from this file will do the conversion.
//
//===----------------------------------------------------------------------===//
#include "TransformInternals.h"
#include "llvm/Constants.h"
#include "llvm/Instructions.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/Debug.h"
#include <algorithm>
using namespace llvm;
static bool OperandConvertibleToType(User *U, Value *V, const Type *Ty,
ValueTypeCache &ConvertedTypes,
const TargetData &TD);
static void ConvertOperandToType(User *U, Value *OldVal, Value *NewVal,
ValueMapCache &VMC, const TargetData &TD);
// ExpressionConvertibleToType - Return true if it is possible
bool llvm::ExpressionConvertibleToType(Value *V, const Type *Ty,
ValueTypeCache &CTMap, const TargetData &TD) {
// Expression type must be holdable in a register.
if (!Ty->isFirstClassType())
return false;
ValueTypeCache::iterator CTMI = CTMap.find(V);
if (CTMI != CTMap.end()) return CTMI->second == Ty;
// If it's a constant... all constants can be converted to a different
// type.
//
if (isa<Constant>(V) && !isa<GlobalValue>(V))
return true;
CTMap[V] = Ty;
if (V->getType() == Ty) return true; // Expression already correct type!
Instruction *I = dyn_cast<Instruction>(V);
if (I == 0) return false; // Otherwise, we can't convert!
switch (I->getOpcode()) {
case Instruction::BitCast:
if (!cast<BitCastInst>(I)->isLosslessCast())
return false;
// We do not allow conversion of a cast that casts from a ptr to array
// of X to a *X. For example: cast [4 x %List *] * %val to %List * *
//
if (const PointerType *SPT =
dyn_cast<PointerType>(I->getOperand(0)->getType()))
if (const PointerType *DPT = dyn_cast<PointerType>(I->getType()))
if (const ArrayType *AT = dyn_cast<ArrayType>(SPT->getElementType()))
if (AT->getElementType() == DPT->getElementType())
return false;
// Otherwise it is a lossless cast and we can allow it
break;
case Instruction::Add:
case Instruction::Sub:
if (!Ty->isInteger() && !Ty->isFloatingPoint()) return false;
if (!ExpressionConvertibleToType(I->getOperand(0), Ty, CTMap, TD) ||
!ExpressionConvertibleToType(I->getOperand(1), Ty, CTMap, TD))
return false;
break;
case Instruction::LShr:
case Instruction::AShr:
if (!Ty->isInteger()) return false;
if (!ExpressionConvertibleToType(I->getOperand(0), Ty, CTMap, TD))
return false;
break;
case Instruction::Shl:
if (!Ty->isInteger()) return false;
if (!ExpressionConvertibleToType(I->getOperand(0), Ty, CTMap, TD))
return false;
break;
case Instruction::Load: {
LoadInst *LI = cast<LoadInst>(I);
if (!ExpressionConvertibleToType(LI->getPointerOperand(),
PointerType::get(Ty), CTMap, TD))
return false;
break;
}
case Instruction::PHI: {
PHINode *PN = cast<PHINode>(I);
// Be conservative if we find a giant PHI node.
if (PN->getNumIncomingValues() > 32) return false;
for (unsigned i = 0; i < PN->getNumIncomingValues(); ++i)
if (!ExpressionConvertibleToType(PN->getIncomingValue(i), Ty, CTMap, TD))
return false;
break;
}
case Instruction::GetElementPtr: {
// GetElementPtr's are directly convertible to a pointer type if they have
// a number of zeros at the end. Because removing these values does not
// change the logical offset of the GEP, it is okay and fair to remove them.
// This can change this:
// %t1 = getelementptr %Hosp * %hosp, ubyte 4, ubyte 0 ; <%List **>
// %t2 = cast %List * * %t1 to %List *
// into
// %t2 = getelementptr %Hosp * %hosp, ubyte 4 ; <%List *>
//
GetElementPtrInst *GEP = cast<GetElementPtrInst>(I);
const PointerType *PTy = dyn_cast<PointerType>(Ty);
if (!PTy) return false; // GEP must always return a pointer...
const Type *PVTy = PTy->getElementType();
// Check to see if there are zero elements that we can remove from the
// index array. If there are, check to see if removing them causes us to
// get to the right type...
//
std::vector<Value*> Indices(GEP->idx_begin(), GEP->idx_end());
const Type *BaseType = GEP->getPointerOperand()->getType();
const Type *ElTy = 0;
while (!Indices.empty() &&
Indices.back() == Constant::getNullValue(Indices.back()->getType())){
Indices.pop_back();
ElTy = GetElementPtrInst::getIndexedType(BaseType, Indices, true);
if (ElTy == PVTy)
break; // Found a match!!
ElTy = 0;
}
if (ElTy) break; // Found a number of zeros we can strip off!
// Otherwise, it could be that we have something like this:
// getelementptr [[sbyte] *] * %reg115, long %reg138 ; [sbyte]**
// and want to convert it into something like this:
// getelemenptr [[int] *] * %reg115, long %reg138 ; [int]**
//
if (GEP->getNumOperands() == 2 &&
PTy->getElementType()->isSized() &&
TD.getTypeSize(PTy->getElementType()) ==
TD.getTypeSize(GEP->getType()->getElementType())) {
const PointerType *NewSrcTy = PointerType::get(PVTy);
if (!ExpressionConvertibleToType(I->getOperand(0), NewSrcTy, CTMap, TD))
return false;
break;
}
return false; // No match, maybe next time.
}
case Instruction::Call: {
if (isa<Function>(I->getOperand(0)))
return false; // Don't even try to change direct calls.
// If this is a function pointer, we can convert the return type if we can
// convert the source function pointer.
//
const PointerType *PT = cast<PointerType>(I->getOperand(0)->getType());
const FunctionType *FT = cast<FunctionType>(PT->getElementType());
std::vector<const Type *> ArgTys(FT->param_begin(), FT->param_end());
const FunctionType *NewTy =
FunctionType::get(Ty, ArgTys, FT->isVarArg());
if (!ExpressionConvertibleToType(I->getOperand(0),
PointerType::get(NewTy), CTMap, TD))
return false;
break;
}
default:
return false;
}
// Expressions are only convertible if all of the users of the expression can
// have this value converted. This makes use of the map to avoid infinite
// recursion.
//
for (Value::use_iterator It = I->use_begin(), E = I->use_end(); It != E; ++It)
if (!OperandConvertibleToType(*It, I, Ty, CTMap, TD))
return false;
return true;
}
Value *llvm::ConvertExpressionToType(Value *V, const Type *Ty,
ValueMapCache &VMC, const TargetData &TD) {
if (V->getType() == Ty) return V; // Already where we need to be?
ValueMapCache::ExprMapTy::iterator VMCI = VMC.ExprMap.find(V);
if (VMCI != VMC.ExprMap.end()) {
assert(VMCI->second->getType() == Ty);
if (Instruction *I = dyn_cast<Instruction>(V))
ValueHandle IHandle(VMC, I); // Remove I if it is unused now!
return VMCI->second;
}
DOUT << "CETT: " << (void*)V << " " << *V;
Instruction *I = dyn_cast<Instruction>(V);
if (I == 0) {
Constant *CPV = cast<Constant>(V);
// Constants are converted by constant folding the cast that is required.
// We assume here that all casts are implemented for constant prop.
// FIXME: This seems to work, but it is unclear why ZEXT is always the
// right choice here.
Instruction::CastOps opcode = CastInst::getCastOpcode(CPV, false, Ty,false);
Value *Result = ConstantExpr::getCast(opcode, CPV, Ty);
// Add the instruction to the expression map
//VMC.ExprMap[V] = Result;
return Result;
}
BasicBlock *BB = I->getParent();
std::string Name = I->getName(); if (!Name.empty()) I->setName("");
Instruction *Res; // Result of conversion
ValueHandle IHandle(VMC, I); // Prevent I from being removed!
Constant *Dummy = Constant::getNullValue(Ty);
switch (I->getOpcode()) {
case Instruction::BitCast: {
assert(VMC.NewCasts.count(ValueHandle(VMC, I)) == 0);
Instruction::CastOps opcode = CastInst::getCastOpcode(I->getOperand(0),
false, Ty, false);
Res = CastInst::create(opcode, I->getOperand(0), Ty, Name);
VMC.NewCasts.insert(ValueHandle(VMC, Res));
break;
}
case Instruction::Add:
case Instruction::Sub:
Res = BinaryOperator::create(cast<BinaryOperator>(I)->getOpcode(),
Dummy, Dummy, Name);
VMC.ExprMap[I] = Res; // Add node to expression eagerly
Res->setOperand(0, ConvertExpressionToType(I->getOperand(0), Ty, VMC, TD));
Res->setOperand(1, ConvertExpressionToType(I->getOperand(1), Ty, VMC, TD));
break;
case Instruction::Shl:
case Instruction::LShr:
case Instruction::AShr:
Res = BinaryOperator::create(cast<BinaryOperator>(I)->getOpcode(), Dummy,
I->getOperand(1), Name);
VMC.ExprMap[I] = Res;
Res->setOperand(0, ConvertExpressionToType(I->getOperand(0), Ty, VMC, TD));
break;
case Instruction::Load: {
LoadInst *LI = cast<LoadInst>(I);
Res = new LoadInst(Constant::getNullValue(PointerType::get(Ty)), Name);
VMC.ExprMap[I] = Res;
Res->setOperand(0, ConvertExpressionToType(LI->getPointerOperand(),
PointerType::get(Ty), VMC, TD));
assert(Res->getOperand(0)->getType() == PointerType::get(Ty));
assert(Ty == Res->getType());
assert(Res->getType()->isFirstClassType() && "Load of structure or array!");
break;
}
case Instruction::PHI: {
PHINode *OldPN = cast<PHINode>(I);
PHINode *NewPN = new PHINode(Ty, Name);
VMC.ExprMap[I] = NewPN; // Add node to expression eagerly
while (OldPN->getNumOperands()) {
BasicBlock *BB = OldPN->getIncomingBlock(0);
Value *OldVal = OldPN->getIncomingValue(0);
ValueHandle OldValHandle(VMC, OldVal);
OldPN->removeIncomingValue(BB, false);
Value *V = ConvertExpressionToType(OldVal, Ty, VMC, TD);
NewPN->addIncoming(V, BB);
}
Res = NewPN;
break;
}
case Instruction::GetElementPtr: {
// GetElementPtr's are directly convertible to a pointer type if they have
// a number of zeros at the end. Because removing these values does not
// change the logical offset of the GEP, it is okay and fair to remove them.
// This can change this:
// %t1 = getelementptr %Hosp * %hosp, ubyte 4, ubyte 0 ; <%List **>
// %t2 = cast %List * * %t1 to %List *
// into
// %t2 = getelementptr %Hosp * %hosp, ubyte 4 ; <%List *>
//
GetElementPtrInst *GEP = cast<GetElementPtrInst>(I);
// Check to see if there are zero elements that we can remove from the
// index array. If there are, check to see if removing them causes us to
// get to the right type...
//
std::vector<Value*> Indices(GEP->idx_begin(), GEP->idx_end());
const Type *BaseType = GEP->getPointerOperand()->getType();
const Type *PVTy = cast<PointerType>(Ty)->getElementType();
Res = 0;
while (!Indices.empty() &&
Indices.back() == Constant::getNullValue(Indices.back()->getType())){
Indices.pop_back();
if (GetElementPtrInst::getIndexedType(BaseType, Indices, true) == PVTy) {
if (Indices.size() == 0)
// We want to no-op cast this so use BitCast
Res = new BitCastInst(GEP->getPointerOperand(), BaseType);
else
Res = new GetElementPtrInst(GEP->getPointerOperand(), Indices, Name);
break;
}
}
// Otherwise, it could be that we have something like this:
// getelementptr [[sbyte] *] * %reg115, uint %reg138 ; [sbyte]**
// and want to convert it into something like this:
// getelemenptr [[int] *] * %reg115, uint %reg138 ; [int]**
//
if (Res == 0) {
const PointerType *NewSrcTy = PointerType::get(PVTy);
std::vector<Value*> Indices(GEP->idx_begin(), GEP->idx_end());
Res = new GetElementPtrInst(Constant::getNullValue(NewSrcTy),
Indices, Name);
VMC.ExprMap[I] = Res;
Res->setOperand(0, ConvertExpressionToType(I->getOperand(0),
NewSrcTy, VMC, TD));
}
assert(Res && "Didn't find match!");
break;
}
case Instruction::Call: {
assert(!isa<Function>(I->getOperand(0)));
// If this is a function pointer, we can convert the return type if we can
// convert the source function pointer.
//
const PointerType *PT = cast<PointerType>(I->getOperand(0)->getType());
const FunctionType *FT = cast<FunctionType>(PT->getElementType());
std::vector<const Type *> ArgTys(FT->param_begin(), FT->param_end());
const FunctionType *NewTy =
FunctionType::get(Ty, ArgTys, FT->isVarArg());
const PointerType *NewPTy = PointerType::get(NewTy);
if (Ty == Type::VoidTy)
Name = ""; // Make sure not to name calls that now return void!
Res = new CallInst(Constant::getNullValue(NewPTy),
std::vector<Value*>(I->op_begin()+1, I->op_end()),
Name);
if (cast<CallInst>(I)->isTailCall())
cast<CallInst>(Res)->setTailCall();
cast<CallInst>(Res)->setCallingConv(cast<CallInst>(I)->getCallingConv());
VMC.ExprMap[I] = Res;
Res->setOperand(0, ConvertExpressionToType(I->getOperand(0),NewPTy,VMC,TD));
break;
}
default:
assert(0 && "Expression convertible, but don't know how to convert?");
return 0;
}
assert(Res->getType() == Ty && "Didn't convert expr to correct type!");
BB->getInstList().insert(I, Res);
// Add the instruction to the expression map
VMC.ExprMap[I] = Res;
//// WTF is this code! FIXME: remove this.
unsigned NumUses = I->getNumUses();
for (unsigned It = 0; It < NumUses; ) {
unsigned OldSize = NumUses;
Value::use_iterator UI = I->use_begin();
std::advance(UI, It);
ConvertOperandToType(*UI, I, Res, VMC, TD);
NumUses = I->getNumUses();
if (NumUses == OldSize) ++It;
}
DOUT << "ExpIn: " << (void*)I << " " << *I
<< "ExpOut: " << (void*)Res << " " << *Res;
return Res;
}
// ValueConvertibleToType - Return true if it is possible
bool llvm::ValueConvertibleToType(Value *V, const Type *Ty,
ValueTypeCache &ConvertedTypes,
const TargetData &TD) {
ValueTypeCache::iterator I = ConvertedTypes.find(V);
if (I != ConvertedTypes.end()) return I->second == Ty;
ConvertedTypes[V] = Ty;
// It is safe to convert the specified value to the specified type IFF all of
// the uses of the value can be converted to accept the new typed value.
//
if (V->getType() != Ty) {
for (Value::use_iterator I = V->use_begin(), E = V->use_end(); I != E; ++I)
if (!OperandConvertibleToType(*I, V, Ty, ConvertedTypes, TD))
return false;
}
return true;
}
// OperandConvertibleToType - Return true if it is possible to convert operand
// V of User (instruction) U to the specified type. This is true iff it is
// possible to change the specified instruction to accept this. CTMap is a map
// of converted types, so that circular definitions will see the future type of
// the expression, not the static current type.
//
static bool OperandConvertibleToType(User *U, Value *V, const Type *Ty,
ValueTypeCache &CTMap,
const TargetData &TD) {
// if (V->getType() == Ty) return true; // Operand already the right type?
// Expression type must be holdable in a register.
if (!Ty->isFirstClassType())
return false;
Instruction *I = dyn_cast<Instruction>(U);
if (I == 0) return false; // We can't convert non-instructions!
switch (I->getOpcode()) {
case Instruction::BitCast:
assert(I->getOperand(0) == V);
// We can convert the expr if the cast destination type is losslessly
// convertible to the requested type. Also, do not change a cast that
// is a noop cast. For all intents and purposes it should be eliminated.
if (!cast<BitCastInst>(I)->isLosslessCast() ||
I->getType() == I->getOperand(0)->getType())
return false;
// We also do not allow conversion of a cast that casts from a ptr to array
// of X to a *X. For example: cast [4 x %List *] * %val to %List * *
//
if (const PointerType *SPT =
dyn_cast<PointerType>(I->getOperand(0)->getType()))
if (const PointerType *DPT = dyn_cast<PointerType>(I->getType()))
if (const ArrayType *AT = dyn_cast<ArrayType>(SPT->getElementType()))
if (AT->getElementType() == DPT->getElementType())
return false;
return true;
case Instruction::Add:
case Instruction::Sub: {
if (!Ty->isInteger() && !Ty->isFloatingPoint()) return false;
Value *OtherOp = I->getOperand((V == I->getOperand(0)) ? 1 : 0);
return ValueConvertibleToType(I, Ty, CTMap, TD) &&
ExpressionConvertibleToType(OtherOp, Ty, CTMap, TD);
}
case Instruction::ICmp: {
if (cast<ICmpInst>(I)->getPredicate() == ICmpInst::ICMP_EQ ||
cast<ICmpInst>(I)->getPredicate() == ICmpInst::ICMP_NE) {
Value *OtherOp = I->getOperand((V == I->getOperand(0)) ? 1 : 0);
return ExpressionConvertibleToType(OtherOp, Ty, CTMap, TD);
}
return false;
}
case Instruction::Shl:
case Instruction::LShr:
case Instruction::AShr:
if (I->getOperand(1) == V) return false; // Cannot change shift amount type
if (!Ty->isInteger()) return false;
return ValueConvertibleToType(I, Ty, CTMap, TD);
case Instruction::Free:
assert(I->getOperand(0) == V);
return isa<PointerType>(Ty); // Free can free any pointer type!
case Instruction::Load:
// Cannot convert the types of any subscripts...
if (I->getOperand(0) != V) return false;
if (const PointerType *PT = dyn_cast<PointerType>(Ty)) {
LoadInst *LI = cast<LoadInst>(I);
const Type *LoadedTy = PT->getElementType();
// They could be loading the first element of a composite type...
if (const CompositeType *CT = dyn_cast<CompositeType>(LoadedTy)) {
unsigned Offset = 0; // No offset, get first leaf.
std::vector<Value*> Indices; // Discarded...
LoadedTy = getStructOffsetType(CT, Offset, Indices, TD, false);
assert(Offset == 0 && "Offset changed from zero???");
}
if (!LoadedTy->isFirstClassType())
return false;
if (TD.getTypeSize(LoadedTy) != TD.getTypeSize(LI->getType()))
return false;
return ValueConvertibleToType(LI, LoadedTy, CTMap, TD);
}
return false;
case Instruction::Store: {
if (V == I->getOperand(0)) {
ValueTypeCache::iterator CTMI = CTMap.find(I->getOperand(1));
if (CTMI != CTMap.end()) { // Operand #1 is in the table already?
// If so, check to see if it's Ty*, or, more importantly, if it is a
// pointer to a structure where the first element is a Ty... this code
// is necessary because we might be trying to change the source and
// destination type of the store (they might be related) and the dest
// pointer type might be a pointer to structure. Below we allow pointer
// to structures where the 0th element is compatible with the value,
// now we have to support the symmetrical part of this.
//
const Type *ElTy = cast<PointerType>(CTMI->second)->getElementType();
// Already a pointer to what we want? Trivially accept...
if (ElTy == Ty) return true;
// Tricky case now, if the destination is a pointer to structure,
// obviously the source is not allowed to be a structure (cannot copy
// a whole structure at a time), so the level raiser must be trying to
// store into the first field. Check for this and allow it now:
//
if (isa<StructType>(ElTy)) {
unsigned Offset = 0;
std::vector<Value*> Indices;
ElTy = getStructOffsetType(ElTy, Offset, Indices, TD, false);
assert(Offset == 0 && "Offset changed!");
if (ElTy == 0) // Element at offset zero in struct doesn't exist!
return false; // Can only happen for {}*
if (ElTy == Ty) // Looks like the 0th element of structure is
return true; // compatible! Accept now!
// Otherwise we know that we can't work, so just stop trying now.
return false;
}
}
// Can convert the store if we can convert the pointer operand to match
// the new value type...
return ExpressionConvertibleToType(I->getOperand(1), PointerType::get(Ty),
CTMap, TD);
} else if (const PointerType *PT = dyn_cast<PointerType>(Ty)) {
const Type *ElTy = PT->getElementType();
assert(V == I->getOperand(1));
if (isa<StructType>(ElTy)) {
// We can change the destination pointer if we can store our first
// argument into the first element of the structure...
//
unsigned Offset = 0;
std::vector<Value*> Indices;
ElTy = getStructOffsetType(ElTy, Offset, Indices, TD, false);
assert(Offset == 0 && "Offset changed!");
if (ElTy == 0) // Element at offset zero in struct doesn't exist!
return false; // Can only happen for {}*
}
// Must move the same amount of data...
if (!ElTy->isSized() ||
TD.getTypeSize(ElTy) != TD.getTypeSize(I->getOperand(0)->getType()))
return false;
// Can convert store if the incoming value is convertible and if the
// result will preserve semantics...
const Type *Op0Ty = I->getOperand(0)->getType();
if (Op0Ty->isInteger() == ElTy->isInteger() &&
Op0Ty->isFloatingPoint() == ElTy->isFloatingPoint())
return ExpressionConvertibleToType(I->getOperand(0), ElTy, CTMap, TD);
}
return false;
}
case Instruction::PHI: {
PHINode *PN = cast<PHINode>(I);
// Be conservative if we find a giant PHI node.
if (PN->getNumIncomingValues() > 32) return false;
for (unsigned i = 0; i < PN->getNumIncomingValues(); ++i)
if (!ExpressionConvertibleToType(PN->getIncomingValue(i), Ty, CTMap, TD))
return false;
return ValueConvertibleToType(PN, Ty, CTMap, TD);
}
case Instruction::Call: {
User::op_iterator OI = std::find(I->op_begin(), I->op_end(), V);
assert (OI != I->op_end() && "Not using value!");
unsigned OpNum = OI - I->op_begin();
// Are we trying to change the function pointer value to a new type?
if (OpNum == 0) {
const PointerType *PTy = dyn_cast<PointerType>(Ty);
if (PTy == 0) return false; // Can't convert to a non-pointer type...
const FunctionType *FTy = dyn_cast<FunctionType>(PTy->getElementType());
if (FTy == 0) return false; // Can't convert to a non ptr to function...
// Do not allow converting to a call where all of the operands are ...'s
if (FTy->getNumParams() == 0 && FTy->isVarArg())
return false; // Do not permit this conversion!
// Perform sanity checks to make sure that new function type has the
// correct number of arguments...
//
unsigned NumArgs = I->getNumOperands()-1; // Don't include function ptr
// Cannot convert to a type that requires more fixed arguments than
// the call provides...
//
if (NumArgs < FTy->getNumParams()) return false;
// Unless this is a vararg function type, we cannot provide more arguments
// than are desired...
//
if (!FTy->isVarArg() && NumArgs > FTy->getNumParams())
return false;
// Okay, at this point, we know that the call and the function type match
// number of arguments. Now we see if we can convert the arguments
// themselves. Note that we do not require operands to be convertible,
// we can insert casts if they are convertible but not compatible. The
// reason for this is that we prefer to have resolved functions but casted
// arguments if possible.
//
for (unsigned i = 0, NA = FTy->getNumParams(); i < NA; ++i)
if (FTy->getParamType(i) != I->getOperand(i+1)->getType())
return false; // Operands must have compatible types!
// Okay, at this point, we know that all of the arguments can be
// converted. We succeed if we can change the return type if
// necessary...
//
return ValueConvertibleToType(I, FTy->getReturnType(), CTMap, TD);
}
const PointerType *MPtr = cast<PointerType>(I->getOperand(0)->getType());
const FunctionType *FTy = cast<FunctionType>(MPtr->getElementType());
if (!FTy->isVarArg()) return false;
if ((OpNum-1) < FTy->getNumParams())
return false; // It's not in the varargs section...
// If we get this far, we know the value is in the varargs section of the
// function! We can convert if we don't reinterpret the value...
//
return isa<PointerType>(Ty) && isa<PointerType>(V->getType());
}
}
return false;
}
void llvm::ConvertValueToNewType(Value *V, Value *NewVal, ValueMapCache &VMC,
const TargetData &TD) {
ValueHandle VH(VMC, V);
// FIXME: This is horrible!
unsigned NumUses = V->getNumUses();
for (unsigned It = 0; It < NumUses; ) {
unsigned OldSize = NumUses;
Value::use_iterator UI = V->use_begin();
std::advance(UI, It);
ConvertOperandToType(*UI, V, NewVal, VMC, TD);
NumUses = V->getNumUses();
if (NumUses == OldSize) ++It;
}
}
static void ConvertOperandToType(User *U, Value *OldVal, Value *NewVal,
ValueMapCache &VMC, const TargetData &TD) {
if (isa<ValueHandle>(U)) return; // Valuehandles don't let go of operands...
if (VMC.OperandsMapped.count(U)) return;
VMC.OperandsMapped.insert(U);
ValueMapCache::ExprMapTy::iterator VMCI = VMC.ExprMap.find(U);
if (VMCI != VMC.ExprMap.end())
return;
Instruction *I = cast<Instruction>(U); // Only Instructions convertible
BasicBlock *BB = I->getParent();
assert(BB != 0 && "Instruction not embedded in basic block!");
std::string Name = I->getName();
I->setName("");
Instruction *Res = 0; // Result of conversion
//cerr << endl << endl << "Type:\t" << Ty << "\nInst: " << I
// << "BB Before: " << BB << endl;
// Prevent I from being removed...
ValueHandle IHandle(VMC, I);
const Type *NewTy = NewVal->getType();
Constant *Dummy = (NewTy != Type::VoidTy) ?
Constant::getNullValue(NewTy) : 0;
switch (I->getOpcode()) {
case Instruction::BitCast: {
Instruction::CastOps opcode = CastInst::getCastOpcode(NewVal, false,
I->getType(), false);
Res = CastInst::create(opcode, NewVal, I->getType(), Name);
break;
}
case Instruction::Add:
case Instruction::Sub: {
Res = BinaryOperator::create(cast<BinaryOperator>(I)->getOpcode(),
Dummy, Dummy, Name);
VMC.ExprMap[I] = Res; // Add node to expression eagerly
unsigned OtherIdx = (OldVal == I->getOperand(0)) ? 1 : 0;
Value *OtherOp = I->getOperand(OtherIdx);
Res->setOperand(!OtherIdx, NewVal);
Value *NewOther = ConvertExpressionToType(OtherOp, NewTy, VMC, TD);
Res->setOperand(OtherIdx, NewOther);
break;
}
case Instruction::ICmp: {
ICmpInst::Predicate pred = cast<ICmpInst>(I)->getPredicate();
if (pred == ICmpInst::ICMP_EQ || pred == ICmpInst::ICMP_NE) {
Res = new ICmpInst(pred, Dummy, Dummy, Name);
VMC.ExprMap[I] = Res; // Add node to expression eagerly
unsigned OtherIdx = (OldVal == I->getOperand(0)) ? 1 : 0;
Value *OtherOp = I->getOperand(OtherIdx);
Res->setOperand(!OtherIdx, NewVal);
Value *NewOther = ConvertExpressionToType(OtherOp, NewTy, VMC, TD);
Res->setOperand(OtherIdx, NewOther);
}
break;
}
case Instruction::Shl:
case Instruction::LShr:
case Instruction::AShr:
assert(I->getOperand(0) == OldVal);
Res = BinaryOperator::create(cast<BinaryOperator>(I)->getOpcode(), NewVal,
I->getOperand(1), Name);
break;
case Instruction::Free: // Free can free any pointer type!
assert(I->getOperand(0) == OldVal);
Res = new FreeInst(NewVal);
break;
case Instruction::Load: {
assert(I->getOperand(0) == OldVal && isa<PointerType>(NewVal->getType()));
const Type *LoadedTy =
cast<PointerType>(NewVal->getType())->getElementType();
Value *Src = NewVal;
if (const CompositeType *CT = dyn_cast<CompositeType>(LoadedTy)) {
std::vector<Value*> Indices;
Indices.push_back(Constant::getNullValue(Type::Int32Ty));
unsigned Offset = 0; // No offset, get first leaf.
LoadedTy = getStructOffsetType(CT, Offset, Indices, TD, false);
assert(LoadedTy->isFirstClassType());
if (Indices.size() != 1) { // Do not generate load X, 0
// Insert the GEP instruction before this load.
Src = new GetElementPtrInst(Src, Indices, Name+".idx", I);
}
}
Res = new LoadInst(Src, Name);
assert(Res->getType()->isFirstClassType() && "Load of structure or array!");
break;
}
case Instruction::Store: {
if (I->getOperand(0) == OldVal) { // Replace the source value
// Check to see if operand #1 has already been converted...
ValueMapCache::ExprMapTy::iterator VMCI =
VMC.ExprMap.find(I->getOperand(1));
if (VMCI != VMC.ExprMap.end()) {
// Comments describing this stuff are in the OperandConvertibleToType
// switch statement for Store...
//
const Type *ElTy =
cast<PointerType>(VMCI->second->getType())->getElementType();
Value *SrcPtr = VMCI->second;
if (ElTy != NewTy) {
std::vector<Value*> Indices;
Indices.push_back(Constant::getNullValue(Type::Int32Ty));
unsigned Offset = 0;
const Type *Ty = getStructOffsetType(ElTy, Offset, Indices, TD,false);
assert(Offset == 0 && "Offset changed!");
assert(NewTy == Ty && "Did not convert to correct type!");
// Insert the GEP instruction before this store.
SrcPtr = new GetElementPtrInst(SrcPtr, Indices,
SrcPtr->getName()+".idx", I);
}
Res = new StoreInst(NewVal, SrcPtr);
VMC.ExprMap[I] = Res;
} else {
// Otherwise, we haven't converted Operand #1 over yet...
const PointerType *NewPT = PointerType::get(NewTy);
Res = new StoreInst(NewVal, Constant::getNullValue(NewPT));
VMC.ExprMap[I] = Res;
Res->setOperand(1, ConvertExpressionToType(I->getOperand(1),
NewPT, VMC, TD));
}
} else { // Replace the source pointer
const Type *ValTy = cast<PointerType>(NewTy)->getElementType();
Value *SrcPtr = NewVal;
if (isa<StructType>(ValTy)) {
std::vector<Value*> Indices;
Indices.push_back(Constant::getNullValue(Type::Int32Ty));
unsigned Offset = 0;
ValTy = getStructOffsetType(ValTy, Offset, Indices, TD, false);
assert(Offset == 0 && ValTy);
// Insert the GEP instruction before this store.
SrcPtr = new GetElementPtrInst(SrcPtr, Indices,
SrcPtr->getName()+".idx", I);
}
Res = new StoreInst(Constant::getNullValue(ValTy), SrcPtr);
VMC.ExprMap[I] = Res;
Res->setOperand(0, ConvertExpressionToType(I->getOperand(0),
ValTy, VMC, TD));
}
break;
}
case Instruction::PHI: {
PHINode *OldPN = cast<PHINode>(I);
PHINode *NewPN = new PHINode(NewTy, Name);
VMC.ExprMap[I] = NewPN;
while (OldPN->getNumOperands()) {
BasicBlock *BB = OldPN->getIncomingBlock(0);
Value *OldVal = OldPN->getIncomingValue(0);
ValueHandle OldValHandle(VMC, OldVal);
OldPN->removeIncomingValue(BB, false);
Value *V = ConvertExpressionToType(OldVal, NewTy, VMC, TD);
NewPN->addIncoming(V, BB);
}
Res = NewPN;
break;
}
case Instruction::Call: {
Value *Meth = I->getOperand(0);
std::vector<Value*> Params(I->op_begin()+1, I->op_end());
if (Meth == OldVal) { // Changing the function pointer?
const PointerType *NewPTy = cast<PointerType>(NewVal->getType());
const FunctionType *NewTy = cast<FunctionType>(NewPTy->getElementType());
if (NewTy->getReturnType() == Type::VoidTy)
Name = ""; // Make sure not to name a void call!
// Get an iterator to the call instruction so that we can insert casts for
// operands if need be. Note that we do not require operands to be
// convertible, we can insert casts if they are convertible but not
// compatible. The reason for this is that we prefer to have resolved
// functions but casted arguments if possible.
//
BasicBlock::iterator It = I;
// Convert over all of the call operands to their new types... but only
// convert over the part that is not in the vararg section of the call.
//
for (unsigned i = 0; i != NewTy->getNumParams(); ++i)
if (Params[i]->getType() != NewTy->getParamType(i)) {
// Create a cast to convert it to the right type, we know that this
// is a no-op cast...
//
Params[i] = new BitCastInst(Params[i], NewTy->getParamType(i),
"callarg.cast." +
Params[i]->getName(), It);
}
Meth = NewVal; // Update call destination to new value
} else { // Changing an argument, must be in vararg area
std::vector<Value*>::iterator OI =
std::find(Params.begin(), Params.end(), OldVal);
assert (OI != Params.end() && "Not using value!");
*OI = NewVal;
}
Res = new CallInst(Meth, Params, Name);
if (cast<CallInst>(I)->isTailCall())
cast<CallInst>(Res)->setTailCall();
cast<CallInst>(Res)->setCallingConv(cast<CallInst>(I)->getCallingConv());
break;
}
default:
assert(0 && "Expression convertible, but don't know how to convert?");
return;
}
assert(Res != 0 && "We didn't get a result conversion?");
// If the instruction was newly created, insert it into the instruction
// stream.
//
BasicBlock::iterator It = I;
assert(It != BB->end() && "Instruction not in own basic block??");
BB->getInstList().insert(It, Res); // Keep It pointing to old instruction
DOUT << "COT CREATED: " << (void*)Res << " " << *Res
<< "In: " << (void*)I << " " << *I << "Out: " << (void*)Res
<< " " << *Res;
// Add the instruction to the expression map
VMC.ExprMap[I] = Res;
if (I->getType() != Res->getType())
ConvertValueToNewType(I, Res, VMC, TD);
else {
for (Value::use_iterator UI = I->use_begin(), E = I->use_end();
UI != E; )
if (isa<ValueHandle>(*UI)) {
++UI;
} else {
Use &U = UI.getUse();
++UI; // Do not invalidate UI.
U.set(Res);
}
}
}
ValueHandle::ValueHandle(ValueMapCache &VMC, Value *V)
: Instruction(Type::VoidTy, UserOp1, &Op, 1, ""), Op(V, this), Cache(VMC) {
//DOUT << "VH AQUIRING: " << (void*)V << " " << V;
}
ValueHandle::ValueHandle(const ValueHandle &VH)
: Instruction(Type::VoidTy, UserOp1, &Op, 1, ""),
Op(VH.Op, this), Cache(VH.Cache) {
//DOUT << "VH AQUIRING: " << (void*)V << " " << V;
}
static void RecursiveDelete(ValueMapCache &Cache, Instruction *I) {
if (!I || !I->use_empty()) return;
assert(I->getParent() && "Inst not in basic block!");
//DOUT << "VH DELETING: " << (void*)I << " " << I;
for (User::op_iterator OI = I->op_begin(), OE = I->op_end();
OI != OE; ++OI)
if (Instruction *U = dyn_cast<Instruction>(OI)) {
*OI = 0;
RecursiveDelete(Cache, U);
}
I->getParent()->getInstList().remove(I);
Cache.OperandsMapped.erase(I);
Cache.ExprMap.erase(I);
delete I;
}
ValueHandle::~ValueHandle() {
if (Op->hasOneUse()) {
Value *V = Op;
Op.set(0); // Drop use!
// Now we just need to remove the old instruction so we don't get infinite
// loops. Note that we cannot use DCE because DCE won't remove a store
// instruction, for example.
//
RecursiveDelete(Cache, dyn_cast<Instruction>(V));
} else {
//DOUT << "VH RELEASING: " << (void*)Operands[0].get() << " "
// << Operands[0]->getNumUses() << " " << Operands[0];
}
}

View File

@ -1,452 +0,0 @@
//===- LevelRaise.cpp - Code to change LLVM to higher level ---------------===//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the 'raising' part of the LevelChange API. This is
// useful because, in general, it makes the LLVM code terser and easier to
// analyze.
//
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "raise"
#include "llvm/Transforms/Scalar.h"
#include "llvm/Transforms/Utils/Local.h"
#include "TransformInternals.h"
#include "llvm/Instructions.h"
#include "llvm/Pass.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/STLExtras.h"
#include <algorithm>
using namespace llvm;
// StartInst - This enables the -raise-start-inst=foo option to cause the level
// raising pass to start at instruction "foo", which is immensely useful for
// debugging!
//
static cl::opt<std::string>
StartInst("raise-start-inst", cl::Hidden, cl::value_desc("inst name"),
cl::desc("Start raise pass at the instruction with the specified name"));
STATISTIC(NumLoadStorePeepholes, "Number of load/store peepholes");
STATISTIC(NumGEPInstFormed, "Number of other getelementptr's formed");
STATISTIC(NumExprTreesConv, "Number of expression trees converted");
STATISTIC(NumCastOfCast, "Number of cast-of-self removed");
STATISTIC(NumDCEorCP, "Number of insts DCEd or constprop'd");
STATISTIC(NumVarargCallChanges, "Number of vararg call peepholes");
#define PRINT_PEEPHOLE(ID, NUM, I) \
DOUT << "Inst P/H " << ID << "[" << NUM << "] " << I
#define PRINT_PEEPHOLE1(ID, I1) do { PRINT_PEEPHOLE(ID, 0, I1); } while (0)
#define PRINT_PEEPHOLE2(ID, I1, I2) \
do { PRINT_PEEPHOLE(ID, 0, I1); PRINT_PEEPHOLE(ID, 1, I2); } while (0)
#define PRINT_PEEPHOLE3(ID, I1, I2, I3) \
do { PRINT_PEEPHOLE(ID, 0, I1); PRINT_PEEPHOLE(ID, 1, I2); \
PRINT_PEEPHOLE(ID, 2, I3); } while (0)
#define PRINT_PEEPHOLE4(ID, I1, I2, I3, I4) \
do { PRINT_PEEPHOLE(ID, 0, I1); PRINT_PEEPHOLE(ID, 1, I2); \
PRINT_PEEPHOLE(ID, 2, I3); PRINT_PEEPHOLE(ID, 3, I4); } while (0)
namespace {
struct RPR : public FunctionPass {
virtual bool runOnFunction(Function &F);
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesCFG();
AU.addRequired<TargetData>();
}
private:
bool DoRaisePass(Function &F);
bool PeepholeOptimize(BasicBlock *BB, BasicBlock::iterator &BI);
};
RegisterPass<RPR> X("raise", "Raise Pointer References");
}
FunctionPass *llvm::createRaisePointerReferencesPass() {
return new RPR();
}
bool RPR::PeepholeOptimize(BasicBlock *BB, BasicBlock::iterator &BI) {
Instruction *I = BI;
const TargetData &TD = getAnalysis<TargetData>();
if (CastInst *CI = dyn_cast<CastInst>(I)) {
Value *Src = CI->getOperand(0);
const Type *DestTy = CI->getType();
// Peephole optimize the following instruction:
// %V2 = cast <ty> %V to <ty>
//
// Into: <nothing>
//
if (DestTy == Src->getType()) { // Check for a cast to same type as src!!
PRINT_PEEPHOLE1("cast-of-self-ty", *CI);
CI->replaceAllUsesWith(Src);
if (!Src->hasName() && CI->hasName()) {
std::string Name = CI->getName();
CI->setName("");
Src->setName(Name);
}
// DCE the instruction now, to avoid having the iterative version of DCE
// have to worry about it.
//
BI = BB->getInstList().erase(BI);
++NumCastOfCast;
return true;
}
// Check to see if it's a cast of an instruction that does not depend on the
// specific type of the operands to do it's job.
if (CI->isLosslessCast()) {
ValueTypeCache ConvertedTypes;
// Check to see if we can convert the source of the cast to match the
// destination type of the cast...
//
ConvertedTypes[CI] = CI->getType(); // Make sure the cast doesn't change
if (ExpressionConvertibleToType(Src, DestTy, ConvertedTypes, TD)) {
PRINT_PEEPHOLE3("CAST-SRC-EXPR-CONV:in ", *Src, *CI, *BB->getParent());
DOUT << "\nCONVERTING SRC EXPR TYPE:\n";
{ // ValueMap must be destroyed before function verified!
ValueMapCache ValueMap;
Value *E = ConvertExpressionToType(Src, DestTy, ValueMap, TD);
if (Constant *CPV = dyn_cast<Constant>(E))
CI->replaceAllUsesWith(CPV);
PRINT_PEEPHOLE1("CAST-SRC-EXPR-CONV:out", *E);
DOUT << "DONE CONVERTING SRC EXPR TYPE: \n"
<< *BB->getParent();
}
BI = BB->begin(); // Rescan basic block. BI might be invalidated.
++NumExprTreesConv;
return true;
}
// Check to see if we can convert the users of the cast value to match the
// source type of the cast...
//
ConvertedTypes.clear();
// Make sure the source doesn't change type
ConvertedTypes[Src] = Src->getType();
if (ValueConvertibleToType(CI, Src->getType(), ConvertedTypes, TD)) {
//PRINT_PEEPHOLE3("CAST-DEST-EXPR-CONV:in ", *Src, *CI,
// *BB->getParent());
DOUT << "\nCONVERTING EXPR TYPE:\n";
{ // ValueMap must be destroyed before function verified!
ValueMapCache ValueMap;
ConvertValueToNewType(CI, Src, ValueMap, TD); // This will delete CI!
}
PRINT_PEEPHOLE1("CAST-DEST-EXPR-CONV:out", *Src);
DOUT << "DONE CONVERTING EXPR TYPE: \n\n" << *BB->getParent();
BI = BB->begin(); // Rescan basic block. BI might be invalidated.
++NumExprTreesConv;
return true;
}
}
// Check to see if we are casting from a structure pointer to a pointer to
// the first element of the structure... to avoid munching other peepholes,
// we only let this happen if there are no add uses of the cast.
//
// Peephole optimize the following instructions:
// %t1 = cast {<...>} * %StructPtr to <ty> *
//
// Into: %t2 = getelementptr {<...>} * %StructPtr, <0, 0, 0, ...>
// %t1 = cast <eltype> * %t1 to <ty> *
//
if (const CompositeType *CTy = getPointedToComposite(Src->getType()))
if (const PointerType *DestPTy = dyn_cast<PointerType>(DestTy)) {
// Loop over uses of the cast, checking for add instructions. If an add
// exists, this is probably a part of a more complex GEP, so we don't
// want to mess around with the cast.
//
bool HasAddUse = false;
for (Value::use_iterator I = CI->use_begin(), E = CI->use_end();
I != E; ++I)
if (isa<Instruction>(*I) &&
cast<Instruction>(*I)->getOpcode() == Instruction::Add) {
HasAddUse = true; break;
}
// If it doesn't have an add use, check to see if the dest type is
// losslessly convertible to one of the types in the start of the struct
// type.
//
if (!HasAddUse) {
const Type *DestPointedTy = DestPTy->getElementType();
unsigned Depth = 1;
const CompositeType *CurCTy = CTy;
const Type *ElTy = 0;
// Build the index vector, full of all zeros
std::vector<Value*> Indices;
Indices.push_back(Constant::getNullValue(Type::Int32Ty));
while (CurCTy && !isa<PointerType>(CurCTy)) {
if (const StructType *CurSTy = dyn_cast<StructType>(CurCTy)) {
// Check for a zero element struct type... if we have one, bail.
if (CurSTy->getNumElements() == 0) break;
// Grab the first element of the struct type, which must lie at
// offset zero in the struct.
//
ElTy = CurSTy->getElementType(0);
} else {
ElTy = cast<SequentialType>(CurCTy)->getElementType();
}
// Insert a zero to index through this type...
Indices.push_back(Constant::getNullValue(Type::Int32Ty));
// Did we find what we're looking for?
if (ElTy->canLosslesslyBitCastTo(DestPointedTy)) break;
// Nope, go a level deeper.
++Depth;
CurCTy = dyn_cast<CompositeType>(ElTy);
ElTy = 0;
}
// Did we find what we were looking for? If so, do the transformation
if (ElTy) {
PRINT_PEEPHOLE1("cast-for-first:in", *CI);
std::string Name = CI->getName(); CI->setName("");
// Insert the new T cast instruction... stealing old T's name
GetElementPtrInst *GEP = new GetElementPtrInst(Src, Indices,
Name, BI);
// Make the old cast instruction reference the new GEP instead of
// the old src value.
if (CI->getOperand(0)->getType() == GEP->getType()) {
// If the source types are the same we can safely replace the
// first operand of the CastInst because the opcode won't
// change as a result.
CI->setOperand(0, GEP);
} else {
// The existing and new operand 0 types are different so we must
// replace CI with a new CastInst so that we are assured to
// get the correct cast opcode.
CastInst *NewCI = new BitCastInst(GEP, CI->getType(),
CI->getName(), CI);
CI->replaceAllUsesWith(NewCI);
CI->eraseFromParent();
CI = NewCI;
BI = NewCI; // Don't let the iterator invalidate
}
PRINT_PEEPHOLE2("cast-for-first:out", *GEP, *CI);
++NumGEPInstFormed;
return true;
}
}
}
} else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
Value *Val = SI->getOperand(0);
Value *Pointer = SI->getPointerOperand();
// Peephole optimize the following instructions:
// %t = cast <T1>* %P to <T2> * ;; If T1 is losslessly castable to T2
// store <T2> %V, <T2>* %t
//
// Into:
// %t = cast <T2> %V to <T1>
// store <T1> %t2, <T1>* %P
//
// Note: This is not taken care of by expr conversion because there might
// not be a cast available for the store to convert the incoming value of.
// This code is basically here to make sure that pointers don't have casts
// if possible.
//
if (CastInst *CI = dyn_cast<CastInst>(Pointer))
if (Value *CastSrc = CI->getOperand(0)) // CSPT = CastSrcPointerType
if (const PointerType *CSPT = dyn_cast<PointerType>(CastSrc->getType()))
// convertible types?
if (Val->getType()->canLosslesslyBitCastTo(CSPT->getElementType()))
{
PRINT_PEEPHOLE3("st-src-cast:in ", *Pointer, *Val, *SI);
// Insert the new T cast instruction... stealing old T's name
std::string Name(CI->getName()); CI->setName("");
CastInst *NCI = CastInst::create(Instruction::BitCast, Val,
CSPT->getElementType(), Name, BI);
// Replace the old store with a new one!
ReplaceInstWithInst(BB->getInstList(), BI,
SI = new StoreInst(NCI, CastSrc));
PRINT_PEEPHOLE3("st-src-cast:out", *NCI, *CastSrc, *SI);
++NumLoadStorePeepholes;
return true;
}
} else if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
Value *Pointer = LI->getOperand(0);
const Type *PtrElType =
cast<PointerType>(Pointer->getType())->getElementType();
// Peephole optimize the following instructions:
// %Val = cast <T1>* to <T2>* ;; If T1 is losslessly convertible to T2
// %t = load <T2>* %P
//
// Into:
// %t = load <T1>* %P
// %Val = cast <T1> to <T2>
//
// Note: This is not taken care of by expr conversion because there might
// not be a cast available for the store to convert the incoming value of.
// This code is basically here to make sure that pointers don't have casts
// if possible.
//
if (CastInst *CI = dyn_cast<CastInst>(Pointer))
if (Value *CastSrc = CI->getOperand(0)) // CSPT = CastSrcPointerType
if (const PointerType *CSPT = dyn_cast<PointerType>(CastSrc->getType()))
// convertible types?
if (PtrElType->canLosslesslyBitCastTo(CSPT->getElementType())) {
PRINT_PEEPHOLE2("load-src-cast:in ", *Pointer, *LI);
// Create the new load instruction... loading the pre-casted value
LoadInst *NewLI = new LoadInst(CastSrc, LI->getName(), BI);
// Insert the new T cast instruction... stealing old T's name
CastInst *NCI =
CastInst::create(Instruction::BitCast, NewLI, LI->getType(),
CI->getName());
// Replace the old store with a new one!
ReplaceInstWithInst(BB->getInstList(), BI, NCI);
PRINT_PEEPHOLE3("load-src-cast:out", *NCI, *CastSrc, *NewLI);
++NumLoadStorePeepholes;
return true;
}
} else if (CallInst *CI = dyn_cast<CallInst>(I)) {
// If we have a call with all varargs arguments, convert the call to use the
// actual argument types present...
//
const PointerType *PTy = cast<PointerType>(CI->getCalledValue()->getType());
const FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
// Is the call to a vararg variable with no real parameters?
if (FTy->isVarArg() && FTy->getNumParams() == 0 &&
!CI->getCalledFunction()) {
// If so, insert a new cast instruction, casting it to a function type
// that matches the current arguments...
//
std::vector<const Type *> Params; // Parameter types...
for (unsigned i = 1, e = CI->getNumOperands(); i != e; ++i)
Params.push_back(CI->getOperand(i)->getType());
FunctionType *NewFT = FunctionType::get(FTy->getReturnType(),
Params, false);
PointerType *NewPFunTy = PointerType::get(NewFT);
// Create a new cast, inserting it right before the function call...
Value *NewCast;
if (Constant *CS = dyn_cast<Constant>(CI->getCalledValue()))
NewCast = ConstantExpr::getBitCast(CS, NewPFunTy);
else
NewCast = CastInst::create(Instruction::BitCast, CI->getCalledValue(),
NewPFunTy,
CI->getCalledValue()->getName()+"_c", CI);
// Create a new call instruction...
CallInst *NewCall = new CallInst(NewCast,
std::vector<Value*>(CI->op_begin()+1, CI->op_end()));
if (CI->isTailCall()) NewCall->setTailCall();
NewCall->setCallingConv(CI->getCallingConv());
++BI;
ReplaceInstWithInst(CI, NewCall);
++NumVarargCallChanges;
return true;
}
}
return false;
}
bool RPR::DoRaisePass(Function &F) {
bool Changed = false;
for (Function::iterator BB = F.begin(), BBE = F.end(); BB != BBE; ++BB)
for (BasicBlock::iterator BI = BB->begin(); BI != BB->end();) {
DOUT << "LevelRaising: " << *BI;
if (dceInstruction(BI) || doConstantPropagation(BI)) {
Changed = true;
++NumDCEorCP;
DOUT << "***\t\t^^-- Dead code eliminated!\n";
} else if (PeepholeOptimize(BB, BI)) {
Changed = true;
} else {
++BI;
}
}
return Changed;
}
// runOnFunction - Raise a function representation to a higher level.
bool RPR::runOnFunction(Function &F) {
DOUT << "\n\n\nStarting to work on Function '" << F.getName() << "'\n";
// Insert casts for all incoming pointer pointer values that are treated as
// arrays...
//
bool Changed = false, LocalChange;
// If the StartInst option was specified, then Peephole optimize that
// instruction first if it occurs in this function.
//
if (!StartInst.empty()) {
for (Function::iterator BB = F.begin(), BBE = F.end(); BB != BBE; ++BB)
for (BasicBlock::iterator BI = BB->begin(); BI != BB->end(); ++BI)
if (BI->getName() == StartInst) {
bool SavedDebug = DebugFlag; // Save the DEBUG() controlling flag.
DebugFlag = true; // Turn on DEBUG's
Changed |= PeepholeOptimize(BB, BI);
DebugFlag = SavedDebug; // Restore DebugFlag to previous state
}
}
do {
DOUT << "Looping: \n" << F;
// Iterate over the function, refining it, until it converges on a stable
// state
LocalChange = false;
while (DoRaisePass(F)) LocalChange = true;
Changed |= LocalChange;
} while (LocalChange);
return Changed;
}

View File

@ -9,8 +9,6 @@
LEVEL = ../..
PARALLEL_DIRS = Utils Instrumentation Scalar IPO
LIBRARYNAME = LLVMTransforms
BUILD_ARCHIVE = 1
include $(LEVEL)/Makefile.common

View File

@ -1,92 +0,0 @@
//===- TransformInternals.cpp - Implement shared functions for transforms -===//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines shared functions used by the different components of the
// Transforms library.
//
//===----------------------------------------------------------------------===//
#include "TransformInternals.h"
#include "llvm/Type.h"
#include "llvm/Function.h"
#include "llvm/Instructions.h"
using namespace llvm;
static const Type *getStructOffsetStep(const StructType *STy, uint64_t &Offset,
std::vector<Value*> &Indices,
const TargetData &TD) {
assert(Offset < TD.getTypeSize(STy) && "Offset not in composite!");
const StructLayout *SL = TD.getStructLayout(STy);
// This loop terminates always on a 0 <= i < MemberOffsets.size()
unsigned i;
for (i = 0; i < SL->MemberOffsets.size()-1; ++i)
if (Offset >= SL->MemberOffsets[i] && Offset < SL->MemberOffsets[i+1])
break;
assert(Offset >= SL->MemberOffsets[i] &&
(i == SL->MemberOffsets.size()-1 || Offset < SL->MemberOffsets[i+1]));
// Make sure to save the current index...
Indices.push_back(ConstantInt::get(Type::Int32Ty, i));
Offset = SL->MemberOffsets[i];
return STy->getContainedType(i);
}
// getStructOffsetType - Return a vector of offsets that are to be used to index
// into the specified struct type to get as close as possible to index as we
// can. Note that it is possible that we cannot get exactly to Offset, in which
// case we update offset to be the offset we actually obtained. The resultant
// leaf type is returned.
//
// If StopEarly is set to true (the default), the first object with the
// specified type is returned, even if it is a struct type itself. In this
// case, this routine will not drill down to the leaf type. Set StopEarly to
// false if you want a leaf
//
const Type *llvm::getStructOffsetType(const Type *Ty, unsigned &Offset,
std::vector<Value*> &Indices,
const TargetData &TD, bool StopEarly) {
if (Offset == 0 && StopEarly && !Indices.empty())
return Ty; // Return the leaf type
uint64_t ThisOffset;
const Type *NextType;
if (const StructType *STy = dyn_cast<StructType>(Ty)) {
if (STy->getNumElements()) {
Offset = 0;
return STy;
}
ThisOffset = Offset;
NextType = getStructOffsetStep(STy, ThisOffset, Indices, TD);
} else if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
assert(Offset == 0 || Offset < TD.getTypeSize(ATy) &&
"Offset not in composite!");
NextType = ATy->getElementType();
unsigned ChildSize = (unsigned)TD.getTypeSize(NextType);
if (ConstantInt::isValueValidForType(Type::Int32Ty,
uint64_t(Offset/ChildSize)))
Indices.push_back(ConstantInt::get(Type::Int32Ty, Offset/ChildSize));
else
Indices.push_back(ConstantInt::get(Type::Int64Ty, Offset/ChildSize));
ThisOffset = (Offset/ChildSize)*ChildSize;
} else {
Offset = 0; // Return the offset that we were able to achieve
return Ty; // Return the leaf type
}
unsigned SubOffs = unsigned(Offset - ThisOffset);
const Type *LeafTy = getStructOffsetType(NextType, SubOffs,
Indices, TD, StopEarly);
Offset = unsigned(ThisOffset + SubOffs);
return LeafTy;
}

View File

@ -1,3 +0,0 @@
Output
*.log
*.sum

View File

@ -1,19 +0,0 @@
; An invalid assertion killed the level raiser. Fixed.
;
; RUN: llvm-upgrade < %s | llvm-as | opt -raise
implementation
declare int "connect_left"()
int "do_merge"()
begin
%reg108 = call int %connect_left( )
%cast1002 = cast ulong 8 to sbyte *
%reg108-idxcast = cast int %reg108 to long
%reg1000 = getelementptr sbyte * %cast1002, long %reg108-idxcast
%cast1003 = cast sbyte * %reg1000 to sbyte * *
%reg112 = load sbyte * * %cast1003
%cast111 = cast sbyte * %reg112 to int
ret int %cast111
end

View File

@ -1,15 +0,0 @@
; Fixed a problem where level raise would try to forward substitute a cast of a
; method pointer type into a call. In doing so, it would have to change the
; types of the arguments to the call, but broke doing so.
;
; RUN: llvm-upgrade < %s | llvm-as | opt -raise
implementation
void "test"(void (int*) *%Fn, long* %Arg)
begin
%Fn2 = cast void (int*) *%Fn to void(long*) *
call void %Fn2(long *%Arg)
ret void
end

View File

@ -1,24 +0,0 @@
; This testcase found a bug in ConvertableToGEP that could cause an infinite loop
; Note that this code is actually miscompiled from the input source, but despite
; that, level raise should not hang!
;
; RUN: llvm-upgrade < %s | llvm-as | opt -raise
%Disjunct = type { \2 *, short, sbyte, sbyte *, { short, short, sbyte, sbyte, \2, sbyte * } *, { short, short, sbyte, sbyte, \2, sbyte * } * }
%chosen_disjuncts = uninitialized global %Disjunct * * ; <%Disjunct * * *> [#uses=1]
implementation
void "build_image_array"()
begin
bb0: ;[#uses=0]
%reg109 = getelementptr %Disjunct * * * %chosen_disjuncts, long 7 ; <%Disjunct * * *> [#uses=1]
%reg108 = load %Disjunct * * * %reg109 ; <%Disjunct * *> [#uses=1]
%reg1000 = getelementptr %Disjunct * * %reg108, long 3 ; <%Disjunct * *> [#uses=1]
%cast1007 = cast %Disjunct * * %reg1000 to sbyte * * ; <sbyte * *> [#uses=1]
%reg110 = load sbyte * * %cast1007 ; <sbyte *> [#uses=1]
%cast1008 = cast ulong 4 to sbyte * ; <sbyte *> [#uses=1]
%A = cast sbyte * %reg110 to ulong
%B = cast sbyte * %cast1008 to ulong
%reg1001 = add ulong %A, %B ; <sbyte *> [#uses=0]
ret void
end

View File

@ -1,20 +0,0 @@
; Level raise is making an incorrect transformation, which causes incorrect
; bytecode to be generated.
;
; RUN: llvm-upgrade < %s | llvm-as | opt -raise | llvm-dis
;
%Village = type { [4 x \3 *], \2 *, { \2 *, { int, int, int, \5 * } *, \2 * }, { int, int, int, { \2 *, { int, int, int, \6 * } *, \2 * }, { \2 *, { int, int, int, \6 * } *, \2 * }, { \2 *, { int, int, int, \6 * } *, \2 * }, { \2 *, { int, int, int, \6 * } *, \2 * } }, int, int }
implementation
%Village *"get_results"(%Village * %village)
begin
bb0: ;[#uses=1]
%cast121 = cast int 24 to ulong ; <%Village *> [#uses=1]
%A = cast %Village* %village to ulong
%reg123 = add ulong %A, %cast121 ; <%Village *> [#uses=1]
%reg123 = cast ulong %reg123 to %Village*
%idx = getelementptr %Village * %reg123, long 0, uint 0, long 0 ; <%Village *> [#uses=1]
%reg118 = load %Village** %idx
ret %Village *%reg118
end

View File

@ -1,24 +0,0 @@
; This example should be raised to return a Hash directly without casting.
; LevelRaise should eliminate all cast instructions from this testcase.
;
; XFAIL: *
; RUN: llvm-upgrade < %s &&
; RUN: llvm-upgrade < %s | llvm-as > /dev/null &&
; RUN: llvm-upgrade < %s | llvm-as | opt -raise | llvm-dis | notcast
%Hash = type { { uint, sbyte *, \2 } * *, int (uint) *, int } *
%hash = type { { uint, sbyte *, \2 } * *, int (uint) *, int }
%hash_el = type { uint, sbyte *, \2 } *
implementation
%Hash "MakeHash"(int %size, int (uint) * %map)
begin
%reg112 = malloc sbyte, uint 24 ; <sbyte *> [#uses=5]
%reg115 = malloc sbyte, uint 96 ; <sbyte *> [#uses=1]
%cast237 = bitcast sbyte * %reg112 to sbyte * * ; <sbyte * *> [#uses=1]
store sbyte * %reg115, sbyte * * %cast237
%cast246 = bitcast sbyte * %reg112 to %Hash ; <%Hash> [#uses=1]
ret %Hash %cast246
end

View File

@ -1,29 +0,0 @@
; Problem that occured because of obsolete code that only allowed malloc
; instructions to change type if they were 'array' allocations. This
; prevented reg115 from being able to change.
;
; XFAIL: *
; RUN: llvm-upgrade < %s | llvm-as | opt -raise | llvm-dis | not grep bitcast
%Hash = type { { uint, sbyte *, \2 } * *, int (uint) *, int } *
%HashEntry = type { uint, sbyte *, \2 } *
%hash = type { { uint, sbyte *, \2 } * *, int (uint) *, int }
%hash_entry = type { uint, sbyte *, \2 * }
implementation
%Hash "MakeHash"(int %size, int (uint) * %map)
begin
bb1: ;[#uses=0]
%reg112 = malloc sbyte * *, uint 3 ; <sbyte * * *> [#uses=4]
%reg115 = malloc sbyte *, uint 1 ; <sbyte * *> [#uses=1]
store sbyte * * %reg115, sbyte * * * %reg112
%reg121 = load sbyte * * * %reg112 ; <sbyte * *> [#uses=1]
%size-idxcast1 = cast int %size to long ; <uint> [#uses=1]
%reg1221 = getelementptr sbyte * * %reg121, long %size-idxcast1 ; <sbyte * *> [#uses=1]
store sbyte * null, sbyte * * %reg1221
%reg232 = getelementptr sbyte * * * %reg112, long 1 ; <sbyte * * *> [#uses=1]
%cast243 = cast int (uint) * %map to sbyte * * ; <sbyte * *> [#uses=1]
store sbyte * * %cast243, sbyte * * * %reg232
%cast246 = cast sbyte * * * %reg112 to %Hash ; <%Hash> [#uses=1]
ret %Hash %cast246
end

View File

@ -1,19 +0,0 @@
; XFAIL: *
; RUN: llvm-upgrade < %s | llvm-as | opt -raise | llvm-dis | not grep bitcast
%Hash = type { { uint, sbyte *, \2 } * *, int (uint) *, int } *
%HashEntry = type { uint, sbyte *, \2 } *
%hash = type { { uint, sbyte *, \2 } * *, int (uint) *, int }
%hash_entry = type { uint, sbyte *, \2 * }
implementation
%Hash "MakeHash"(int %size, int (uint) * %map) {
%reg112 = malloc sbyte * *, uint 3 ; <sbyte * * *> [#uses=5]
%reg107-uint = cast int %size to uint ; <uint> [#uses=1]
%reg115 = malloc sbyte *, uint %reg107-uint ; <sbyte * *> [#uses=1]
store sbyte * * %reg115, sbyte * * * %reg112
%cast246 = cast sbyte * * * %reg112 to %Hash ; <%Hash> [#uses=1]
ret %Hash %cast246
}

View File

@ -1,22 +0,0 @@
; XFAIL: *
; this testcase is distilled from this C source:
; int *foo(unsigned N, unsigned M) {
; unsigned i = (N+1)*sizeof(int);
; unsigned j = (M+1)*sizeof(int);
; return (int*)malloc(i+j);
; }
; RUN: llvm-upgrade < %s | llvm-as | opt -raise | llvm-dis | not grep bitcast
implementation
int* %test(uint %N, uint %M) {
%reg111 = shl uint %N, ubyte 2 ; <uint> [#uses=1]
%reg109 = add uint %reg111, 4 ; <uint> [#uses=1]
%reg114 = shl uint %M, ubyte 2 ; <uint> [#uses=1]
%reg112 = add uint %reg114, 4 ; <uint> [#uses=1]
%reg116 = add uint %reg109, %reg112 ; <uint> [#uses=1]
%reg117 = malloc sbyte, uint %reg116 ; <sbyte*> [#uses=1]
%cast221 = cast sbyte* %reg117 to int* ; <int*> [#uses=1]
ret int* %cast221
}

View File

@ -1,24 +0,0 @@
; This test contains two cast instructions that cannot be eliminated. If the
; input of the "test" function is negative, it should be correctly converted
; to a 32 bit version of the number with all upper 16 bits clear (ushort->uint
; involves no sign extension). Optimizing this to a single cast is invalid!
;
; RUN: llvm-upgrade < %s | llvm-as | opt -raise -q | lli
;
implementation
uint "test"(short %argc)
begin
%cast223 = cast short %argc to ushort ; <ushort> [#uses=1]
%cast114 = cast ushort %cast223 to uint ; <uint> [#uses=1]
ret uint %cast114
end
int "main"()
begin
%Ret = call uint %test(short -1)
%test = cast uint %Ret to int
%Res = seteq int %test, -1 ; If it returns -1 as int, it's a failure
%Res = cast bool %Res to int
ret int %Res
end

View File

@ -1,20 +0,0 @@
; This testcase is not level raised properly...
; XFAIL: *
; RUN: llvm-upgrade < %s | llvm-as | opt -raise | llvm-dis | not grep bitcast
%List = type { int, %List* }
implementation
%List* "createList"(uint %Depth)
begin
%reg110 = malloc uint, uint 4
store uint %Depth, uint* %reg110
%reg113 = call %List* %createList( uint %Depth )
%reg217 = getelementptr uint* %reg110, long 2
%cast221 = cast uint* %reg217 to %List**
store %List* %reg113, %List** %cast221
%cast222 = cast uint* %reg110 to %List*
ret %List* %cast222
end

View File

@ -1,10 +0,0 @@
; This testcase should have the cast propogated through the load
; just like a store does...
;
; RUN: llvm-upgrade < %s | llvm-as | opt -raise | llvm-dis | not grep 'bitcast uint \*'
int "test"(uint * %Ptr) {
%P2 = cast uint *%Ptr to int *
%Val = load int * %P2
ret int %Val
}

View File

@ -1,20 +0,0 @@
; XFAIL: *
; RUN: llvm-upgrade < %s | llvm-as | opt -raise | llvm-dis | not grep bitcast
%FILE = type { int, ubyte*, ubyte*, ubyte, ubyte, uint, uint, uint }
uint %addfile(%FILE* %f) {
%cast255 = cast %FILE* %f to sbyte*
; Addreses a ubyte member in memory...
%reg2421 = getelementptr sbyte* %cast255, long 24
; Loads the ubyte
%reg130 = load sbyte* %reg2421
; Error, cast cannot convert the source operand to ubyte because then
; the sign extension would not be performed. Need to insert a cast.
;
%cast250 = cast sbyte %reg130 to uint ; This is a sign extension instruction
ret uint %cast250
}

View File

@ -1,22 +0,0 @@
; This case fails raise because the store requires that it's argument is of a
; particular type, but the gep is unable to propogate types backwards through
; it, because it doesn't know what type to ask it's operand to be.
;
; This could be fixed by making all stores add themselves to a list, and check
; their arguments are consistent AFTER all other values are propogated.
; XFAIL: *
; RUN: llvm-upgrade < %s | llvm-as | opt -raise | llvm-dis | notcast
%Tree = type %struct.tree*
%struct.tree = type { int, double, double, %Tree, %Tree, %Tree, %Tree }
void %reverse(%Tree %t) {
bb0: ;[#uses=0]
%cast219 = cast %Tree %t to sbyte*** ; <sbyte***> [#uses=2]
%reg2221 = getelementptr sbyte*** %cast219, long 6 ; <sbyte***> [#uses=1]
%reg108 = load sbyte*** %reg2221 ; <sbyte**> [#uses=2]
%reg247 = getelementptr sbyte*** %cast219, long 5 ; <sbyte***> [#uses=1]
store sbyte** %reg108, sbyte*** %reg247
ret void
}

View File

@ -1,53 +0,0 @@
; RUN: llvm-upgrade < %s | llvm-as | opt -raise
%Tree = type %struct.tree*
%struct.tree = type { int, double, double, %Tree, %Tree, %Tree, %Tree }
implementation ; Functions:
void %reverse(%Tree %t) {
bb0: ;[#uses=0]
%cast219 = cast %Tree %t to sbyte*** ; <sbyte***> [#uses=7]
%cond220 = seteq sbyte*** %cast219, null ; <bool> [#uses=1]
br bool %cond220, label %bb5, label %bb2
bb2: ;[#uses=3]
%reg2221 = getelementptr sbyte*** %cast219, long 6 ; <sbyte***> [#uses=1]
%reg108 = load sbyte*** %reg2221 ; <sbyte**> [#uses=3]
%reg2251 = getelementptr sbyte** %reg108, long 5 ; <sbyte**> [#uses=1]
store sbyte* null, sbyte** %reg2251
%reg2281 = getelementptr sbyte*** %cast219, long 6 ; <sbyte***> [#uses=1]
store sbyte** null, sbyte*** %reg2281
%reg2311 = getelementptr sbyte*** %cast219, long 5 ; <sbyte***> [#uses=1]
%reg114 = load sbyte*** %reg2311 ; <sbyte**> [#uses=2]
%cond234 = seteq sbyte** %reg114, null ; <bool> [#uses=1]
br bool %cond234, label %bb4, label %bb3
bb3: ;[#uses=4]
%reg115 = phi sbyte*** [ %cast117, %bb3 ], [ %cast219, %bb2 ] ; <sbyte***> [#uses=2]
%reg116 = phi sbyte** [ %cast118, %bb3 ], [ %reg114, %bb2 ] ; <sbyte**> [#uses=4]
%reg236 = getelementptr sbyte** %reg116, long 5 ; <sbyte**> [#uses=1]
%reg110 = load sbyte** %reg236 ; <sbyte*> [#uses=1]
%reg239 = getelementptr sbyte** %reg116, long 5 ; <sbyte**> [#uses=1]
%cast241 = cast sbyte*** %reg115 to sbyte* ; <sbyte*> [#uses=1]
store sbyte* %cast241, sbyte** %reg239
%reg242 = getelementptr sbyte*** %reg115, long 6 ; <sbyte***> [#uses=1]
store sbyte** %reg116, sbyte*** %reg242
%cast117 = cast sbyte** %reg116 to sbyte*** ; <sbyte***> [#uses=1]
%cast118 = cast sbyte* %reg110 to sbyte** ; <sbyte**> [#uses=2]
%cond245 = setne sbyte** %cast118, null ; <bool> [#uses=1]
br bool %cond245, label %bb3, label %bb4
bb4: ;[#uses=2]
%reg247 = getelementptr sbyte*** %cast219, long 5 ; <sbyte***> [#uses=1]
store sbyte** %reg108, sbyte*** %reg247
%reg250 = getelementptr sbyte** %reg108, long 6 ; <sbyte**> [#uses=2]
cast sbyte** %reg250 to sbyte**** ; <sbyte****>:0 [#uses=0]
%cast252 = cast sbyte*** %cast219 to sbyte* ; <sbyte*> [#uses=1]
store sbyte* %cast252, sbyte** %reg250
br label %bb5
bb5: ;[#uses=2]
ret void
}

View File

@ -1,12 +0,0 @@
; This crashes raise, with an cast<> failure
; RUN: llvm-upgrade < %s | llvm-as | opt -raise
implementation
sbyte* %test(int* %ptr) {
%A = cast int* %ptr to sbyte *
%A = cast sbyte* %A to ulong
%B = add ulong %A, %A
%B = cast ulong %B to sbyte*
ret sbyte * %B
}

View File

@ -1,21 +0,0 @@
; This testcase, which was distilled from a HUGE function, causes problems
; because both the source and the destination of the %Y cast are converted
; to a new type, which causes massive problems.
; RUN: llvm-upgrade < %s | llvm-as | opt -raise -raise-start-inst=W
int **%test(sbyte **%S) {
BB0:
br label %Loop
Loop:
%X = phi sbyte* [null , %BB0], [%Z, %Loop]
%Y = cast sbyte *%X to sbyte **
%Z = load sbyte** %Y
br bool true, label %Loop, label %Out
Out:
%W = cast sbyte** %Y to int**
ret int** %W
}

View File

@ -1,11 +0,0 @@
; Looks like we don't raise alloca's like we do mallocs
; XFAIL: *
; RUN: llvm-upgrade < %s | llvm-as | opt -raise | llvm-dis | not grep bitcast
implementation ; Functions:
int *%X() {
%reg107 = alloca ubyte, uint 4
%cast213 = cast ubyte* %reg107 to int*
ret int* %cast213
}

View File

@ -1,26 +0,0 @@
; RUN: llvm-upgrade < %s | llvm-as | opt -raise -raise-start-inst=cast271
%CON_list = type { %CON_list*, %CON_node* }
%CON_node = type { %DIS_list*, %DIS_list*, int }
%DIS_list = type { %DIS_list*, %DIS_node* }
%DIS_node = type { %CON_list*, %List_o_links*, int }
%List_o_links = type { int, int, int, %List_o_links* }
implementation ; Functions:
%CON_node* %build_CON_node(int %reg107) {
br label %bb5
bb2: ;[#uses=3]
%reg126 = phi sbyte* [ %reg126, %bb2 ]
br bool true, label %bb2, label %bb5
bb5: ;[#uses=2]
%reg125 = phi sbyte* [ %reg126, %bb2], [ null, %0 ]
%reg263 = malloc sbyte*, uint 3 ; <sbyte**> [#uses=4]
%reg2641 = getelementptr sbyte** %reg263, long 1 ; <sbyte**> [#uses=1]
store sbyte* %reg125, sbyte** %reg2641
store sbyte* %reg125, sbyte** %reg263
%cast271 = cast sbyte** %reg263 to %CON_node* ; <%CON_node*> [#uses=1]
ret %CON_node* %cast271
}

View File

@ -1,9 +0,0 @@
; RUN: llvm-upgrade < %s | llvm-as | opt -raise
int* %test(int* %P, int* %Q) {
%P = cast int* %P to ulong
%Q = cast int* %Q to ulong
%V = add ulong %P, %Q
%V = cast ulong %V to int*
ret int* %V
}

View File

@ -1,13 +0,0 @@
; RUN: llvm-upgrade < %s | llvm-as | opt -raise | llvm-dis | grep -v uint | not grep 4294967295
%length_code = uninitialized global [256 x ubyte]
ubyte* %test(uint %length) {
%d = add uint 4294967295, %length
%e = cast uint %d to int
%g = cast int %e to ulong
%j = cast [256 x ubyte]* %length_code to ulong
%l = add ulong %j, %g
%m = cast ulong %l to ubyte*
ret ubyte* %m
}

View File

@ -1,9 +0,0 @@
; RUN: llvm-upgrade < %s | llvm-as | opt -raise | llvm-dis | grep call | not grep '\.\.\.'
implementation
void %test(sbyte* %P) {
%Q = cast sbyte* %P to void (...)*
call void (...)* %Q(sbyte* %P)
ret void
}

View File

@ -1,10 +0,0 @@
; Due to a recent change, this testcase now sends the raise pass into an infinite loop
;
; RUN: llvm-upgrade < %s | llvm-as | opt -raise
implementation
void %test(sbyte* %P, void(...) * %F) {
call void (...)* %F(sbyte* %P)
ret void
}

View File

@ -1,12 +0,0 @@
; RUN: llvm-as < 2002-11-04-ConstantSharing.ll | opt -raise | llvm-dis | notcast
implementation
bool %test(int *%X, uint* %Y) {
%A = cast int* %X to sbyte*
%B = cast uint* %Y to sbyte*
%c1 = seteq sbyte* %A, null
%c2 = seteq sbyte* %B, null
%c = and bool %c1, %c2
ret bool %c
}

View File

@ -1,10 +0,0 @@
; This testcase should be able to eliminate at least one of the casts.
;
; RUN: llvm-upgrade < %s | llvm-as | opt -raise | llvm-dis | not grep 'REMOVE'
int %foo(sbyte * %PF) {
%UPF = cast sbyte* %PF to uint()*
%Ret = call uint %UPF()
%REMOVE = cast uint %Ret to int
ret int %REMOVE
}

View File

@ -1,22 +0,0 @@
; Testcase reduced from 197.parser by bugpoint
; RUN: llvm-upgrade < %s | llvm-as | opt -raise -raise-start-inst=cast455 > /dev/null
void %conjunction_prune() {
; <label>:0 ; No predecessors!
br label %bb19
bb19: ; preds = %bb22, %0
%reg205 = phi ulong [ %cast208, %bb22 ], [ 0, %0 ] ; <ulong> [#uses=2]
%reg449 = add ulong %reg205, 10 ; <ulong> [#uses=0]
%cast455 = cast ulong %reg205 to sbyte** ; <sbyte**> [#uses=1]
store sbyte* null, sbyte** %cast455
br label %bb22
bb22: ; preds = %bb19
%cast208 = cast sbyte* null to ulong ; <ulong> [#uses=1]
br bool false, label %bb19, label %bb28
bb28: ; preds = %bb22
ret void
}

View File

@ -1,34 +0,0 @@
; RUN: llvm-upgrade < %s | llvm-as | opt -raise -raise-start-inst=cast459
int %deflateInit2_({ ubyte*, uint, ulong, ubyte*, uint, ulong, sbyte*, { \4, int, ubyte*, ulong, ubyte*, int, int, ubyte, ubyte, int, uint, uint, uint, ubyte*, ulong, ushort*, ushort*, uint, uint, uint, uint, uint, long, uint, uint, int, uint, uint, uint, uint, uint, uint, int, int, uint, int, [573 x { { ushort }, { ushort } }], [61 x { { ushort }, { ushort } }], [39 x { { ushort }, { ushort } }], { { { ushort }, { ushort } }*, int, { int }* }, { { { ushort }, { ushort } }*, int, { int }* }, { { { ushort }, { ushort } }*, int, { int }* }, [16 x ushort], [573 x int], int, int, [573 x ubyte], ubyte*, uint, uint, ushort*, ulong, ulong, uint, int, ushort, int }*, sbyte* (sbyte*, uint, uint)*, void (sbyte*, sbyte*)*, sbyte*, int, ulong, ulong }* %strm, int %level, int %method, int %windowBits, int %memLevel, int %strategy, sbyte* %version, int %stream_size) {
bb0: ; No predecessors!
%reg107 = load { ubyte*, uint, ulong, ubyte*, uint, ulong, sbyte*, { \4, int, ubyte*, ulong, ubyte*, int, int, ubyte, ubyte, int, uint, uint, uint, ubyte*, ulong, ushort*, ushort*, uint, uint, uint, uint, uint, long, uint, uint, int, uint, uint, uint, uint, uint, uint, int, int, uint, int, [573 x { { ushort }, { ushort } }], [61 x { { ushort }, { ushort } }], [39 x { { ushort }, { ushort } }], { { { ushort }, { ushort } }*, int, { int }* }, { { { ushort }, { ushort } }*, int, { int }* }, { { { ushort }, { ushort } }*, int, { int }* }, [16 x ushort], [573 x int], int, int, [573 x ubyte], ubyte*, uint, uint, ushort*, ulong, ulong, uint, int, ushort, int }*, sbyte* (sbyte*, uint, uint)*, void (sbyte*, sbyte*)*, sbyte*, int, ulong, ulong }** null ; <{ ubyte*, uint, ulong, ubyte*, uint, ulong, sbyte*, { \4, int, ubyte*, ulong, ubyte*, int, int, ubyte, ubyte, int, uint, uint, uint, ubyte*, ulong, ushort*, ushort*, uint, uint, uint, uint, uint, long, uint, uint, int, uint, uint, uint, uint, uint, uint, int, int, uint, int, [573 x { { ushort }, { ushort } }], [61 x { { ushort }, { ushort } }], [39 x { { ushort }, { ushort } }], { { { ushort }, { ushort } }*, int, { int }* }, { { { ushort }, { ushort } }*, int, { int }* }, { { { ushort }, { ushort } }*, int, { int }* }, [16 x ushort], [573 x int], int, int, [573 x ubyte], ubyte*, uint, uint, ushort*, ulong, ulong, uint, int, ushort, int }*, sbyte* (sbyte*, uint, uint)*, void (sbyte*, sbyte*)*, sbyte*, int, ulong, ulong }*> [#uses=2]
br bool false, label %bb5, label %UnifiedExitNode
bb5: ; preds = %bb0
br bool false, label %bb22, label %UnifiedExitNode
bb22: ; preds = %bb5
br bool false, label %bb24, label %UnifiedExitNode
bb24: ; preds = %bb22
%reg399 = getelementptr { ubyte*, uint, ulong, ubyte*, uint, ulong, sbyte*, { \4, int, ubyte*, ulong, ubyte*, int, int, ubyte, ubyte, int, uint, uint, uint, ubyte*, ulong, ushort*, ushort*, uint, uint, uint, uint, uint, long, uint, uint, int, uint, uint, uint, uint, uint, uint, int, int, uint, int, [573 x { { ushort }, { ushort } }], [61 x { { ushort }, { ushort } }], [39 x { { ushort }, { ushort } }], { { { ushort }, { ushort } }*, int, { int }* }, { { { ushort }, { ushort } }*, int, { int }* }, { { { ushort }, { ushort } }*, int, { int }* }, [16 x ushort], [573 x int], int, int, [573 x ubyte], ubyte*, uint, uint, ushort*, ulong, ulong, uint, int, ushort, int }*, sbyte* (sbyte*, uint, uint)*, void (sbyte*, sbyte*)*, sbyte*, int, ulong, ulong }* %reg107, long 0, uint 8 ; <sbyte* (sbyte*, uint, uint)**> [#uses=1]
%reg137 = load sbyte* (sbyte*, uint, uint)** %reg399 ; <sbyte* (sbyte*, uint, uint)*> [#uses=1]
%reg402 = call sbyte* %reg137( sbyte* null, uint 0, uint 0 ) ; <sbyte*> [#uses=1]
br bool false, label %bb26, label %UnifiedExitNode
bb26: ; preds = %bb24
%reg457 = getelementptr sbyte* %reg402, long 0 ; <sbyte*> [#uses=1]
%cast459 = cast sbyte* %reg457 to ubyte* ; <ubyte*> [#uses=1]
%reg146 = load ubyte* %cast459 ; <ubyte> [#uses=1]
%reg145 = shl uint 0, ubyte %reg146 ; <uint> [#uses=1]
store uint %reg145, uint* null
%reg647 = call int %deflateEnd( { ubyte*, uint, ulong, ubyte*, uint, ulong, sbyte*, { \4, int, ubyte*, ulong, ubyte*, int, int, ubyte, ubyte, int, uint, uint, uint, ubyte*, ulong, ushort*, ushort*, uint, uint, uint, uint, uint, long, uint, uint, int, uint, uint, uint, uint, uint, uint, int, int, uint, int, [573 x { { ushort }, { ushort } }], [61 x { { ushort }, { ushort } }], [39 x { { ushort }, { ushort } }], { { { ushort }, { ushort } }*, int, { int }* }, { { { ushort }, { ushort } }*, int, { int }* }, { { { ushort }, { ushort } }*, int, { int }* }, [16 x ushort], [573 x int], int, int, [573 x ubyte], ubyte*, uint, uint, ushort*, ulong, ulong, uint, int, ushort, int }*, sbyte* (sbyte*, uint, uint)*, void (sbyte*, sbyte*)*, sbyte*, int, ulong, ulong }* %reg107 ) ; <int> [#uses=0]
br label %UnifiedExitNode
UnifiedExitNode: ; preds = %bb26, %bb24, %bb22, %bb5, %bb0
ret int 0
}
declare int %deflateEnd({ ubyte*, uint, ulong, ubyte*, uint, ulong, sbyte*, { \4, int, ubyte*, ulong, ubyte*, int, int, ubyte, ubyte, int, uint, uint, uint, ubyte*, ulong, ushort*, ushort*, uint, uint, uint, uint, uint, long, uint, uint, int, uint, uint, uint, uint, uint, uint, int, int, uint, int, [573 x { { ushort }, { ushort } }], [61 x { { ushort }, { ushort } }], [39 x { { ushort }, { ushort } }], { { { ushort }, { ushort } }*, int, { int }* }, { { { ushort }, { ushort } }*, int, { int }* }, { { { ushort }, { ushort } }*, int, { int }* }, [16 x ushort], [573 x int], int, int, [573 x ubyte], ubyte*, uint, uint, ushort*, ulong, ulong, uint, int, ushort, int }*, sbyte* (sbyte*, uint, uint)*, void (sbyte*, sbyte*)*, sbyte*, int, ulong, ulong }*)

View File

@ -1,9 +0,0 @@
; RUN: llvm-upgrade < %s | llvm-as | opt -raise
declare void %foo()
void %test() {
%X = cast void()* %foo to int()*
%retval = call int %X()
ret void
}

View File

@ -1,8 +0,0 @@
; RUN: llvm-upgrade < %s | llvm-as | opt -raise | llvm-dis | notcast
void %test(...) { ret void }
void %caller() {
call void (...) *%test()
ret void
}

View File

@ -1,10 +0,0 @@
; RUN: llvm-upgrade < %s | llvm-as | opt -raise -disable-output
%T = type { [0 x ubyte] }
void %test(%T* %tmp.22) {
%tmp.23 = getelementptr %T* %tmp.22, long 0, uint 0
%tmp.24 = cast [0 x ubyte]* %tmp.23 to sbyte**
%tmp.25 = load sbyte** %tmp.24
ret void
}

View File

@ -1,24 +0,0 @@
; The expr analysis routines were being too aggressive across cast instructions!
; RUN: llvm-upgrade < %s | llvm-as | opt -raise | llvm-dis | not grep 4294967295
target endian = big
target pointersize = 64
%struct..istack_struct = type { %struct..istack_struct*, %struct..istk_entry*, uint }
%struct..istk_entry = type { double, int, int, double, double, sbyte* }
implementation ; Functions:
bool %Intersection(%struct..istack_struct* %tmp.0, uint %tmp.12) { ; No predecessors!
%tmp.8 = getelementptr %struct..istack_struct* %tmp.0, long 0, uint 1 ; <%struct..istk_entry**> [#uses=1]
%tmp.9 = load %struct..istk_entry** %tmp.8 ; <%struct..istk_entry*> [#uses=1]
%dec = sub uint %tmp.12, 1 ; <uint> [#uses=1]
%tmp.13 = cast uint %dec to ulong ; <ulong> [#uses=1]
%tmp.14 = mul ulong %tmp.13, 40 ; <ulong> [#uses=1]
%tmp.16 = cast %struct..istk_entry* %tmp.9 to long ; <long> [#uses=1]
%tmp.17 = cast ulong %tmp.14 to long ; <long> [#uses=1]
%tmp.18 = add long %tmp.16, %tmp.17 ; <long> [#uses=1]
%tmp.19 = cast long %tmp.18 to %struct..istk_entry* ; <%struct..istk_entry*> [#uses=1]
%tmp.21 = setne %struct..istk_entry* %tmp.19, null ; <bool> [#uses=1]
ret bool %tmp.21
}

View File

@ -1,10 +0,0 @@
; The program should not just cast 2143289344 to float and store it!
;
; RUN: llvm-upgrade < %s | llvm-as | opt -raise | llvm-dis | not grep 41DFF
void %test() {
%mem_tmp = alloca float
%tmp.0 = cast float* %mem_tmp to uint*
store uint 2143289344, uint* %tmp.0
ret void
}

View File

@ -1,3 +0,0 @@
load_lib llvm-dg.exp
llvm-runtest [lsort [glob -nocomplain $srcdir/$subdir/*.{ll,llx,c,cpp,tr}]] $objdir $srcdir $subdir $target_triplet $llvmgcc $llvmgxx $prcontext $llvmgcc_version

View File

@ -11,7 +11,7 @@ LEVEL = ../..
TOOLNAME = bugpoint
LINK_COMPONENTS := bcreader bcwriter asmparser instrumentation scalaropts ipo \
transforms linker
linker
REQUIRES_EH := 1
include $(LEVEL)/Makefile.common

View File

@ -10,7 +10,6 @@ LEVEL = ../..
TOOLNAME = opt
REQUIRES_EH := 1
LINK_COMPONENTS := bcreader bcwriter instrumentation scalaropts ipo \
transforms
LINK_COMPONENTS := bcreader bcwriter instrumentation scalaropts ipo
include $(LEVEL)/Makefile.common

View File

@ -202,7 +202,6 @@ void AddStandardCompilePasses(PassManager &PM) {
addPass(PM, createFunctionInliningPass()); // Inline small functions
addPass(PM, createArgumentPromotionPass()); // Scalarize uninlined fn args
addPass(PM, createRaisePointerReferencesPass());// Recover type information
addPass(PM, createTailDuplicationPass()); // Simplify cfg by copying code
addPass(PM, createInstructionCombiningPass()); // Cleanup for scalarrepl.
addPass(PM, createCFGSimplificationPass()); // Merge & remove BBs